hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k โ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 โ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 โ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k โ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 โ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 โ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k โ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 โ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 โ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5a2edae1fde1740b1290641cc2634614516685b6 | 382 | py | Python | dserver/__main__.py | dandk105/Asyncpyserver | 228671a7299214b503f440b8dea2773c3769e618 | [
"MIT"
] | null | null | null | dserver/__main__.py | dandk105/Asyncpyserver | 228671a7299214b503f440b8dea2773c3769e618 | [
"MIT"
] | null | null | null | dserver/__main__.py | dandk105/Asyncpyserver | 228671a7299214b503f440b8dea2773c3769e618 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# space level: 4
"""this module is the most importing in thie app.
"""
from dserver import ServerSocket as server
from config import ConfManage as conf
if __name__ == "__main__":
# would manage conf class
hostdata = conf("./conf/base.json")
hostdata.setup_logging()
server = server()
server.standby_server()
server.accept_client()
| 22.470588 | 49 | 0.683246 | # -*- coding: utf-8 -*-
# space level: 4
"""this module is the most importing in thie app.
"""
from dserver import ServerSocket as server
from config import ConfManage as conf
if __name__ == "__main__":
# would manage conf class
hostdata = conf("./conf/base.json")
hostdata.setup_logging()
server = server()
server.standby_server()
server.accept_client()
| 0 | 0 | 0 |
19bf9e0556d61188bc88bf1e77eb1fa49ee6106a | 239 | py | Python | app/utils.py | deviantthread/MagicFormula | f22d7d530929050c6cd2b20423c31f866751c7f6 | [
"Apache-2.0"
] | 17 | 2019-05-23T16:55:48.000Z | 2021-08-12T18:38:28.000Z | app/utils.py | deviantthread/MagicFormula | f22d7d530929050c6cd2b20423c31f866751c7f6 | [
"Apache-2.0"
] | 2 | 2020-01-30T16:45:30.000Z | 2020-05-08T21:35:00.000Z | app/utils.py | deviantthread/MagicFormula | f22d7d530929050c6cd2b20423c31f866751c7f6 | [
"Apache-2.0"
] | 6 | 2020-01-26T18:42:48.000Z | 2022-01-30T10:50:31.000Z | from . import db
# Add your own utility classes and functions here.
| 17.071429 | 50 | 0.631799 | from . import db
class ModelMixin(object):
def save(self):
# Save this model to the database.
db.session.add(self)
db.session.commit()
return self
# Add your own utility classes and functions here.
| 114 | 4 | 50 |
8122ac4323b3f9727d08324f79b9508409903093 | 4,073 | py | Python | SIGNUS/app/controllers/search.py | 837477/SIGNUS | cd395dfd45d2c36d09ec9a8069e6e52e19f058e8 | [
"MIT"
] | null | null | null | SIGNUS/app/controllers/search.py | 837477/SIGNUS | cd395dfd45d2c36d09ec9a8069e6e52e19f058e8 | [
"MIT"
] | null | null | null | SIGNUS/app/controllers/search.py | 837477/SIGNUS | cd395dfd45d2c36d09ec9a8069e6e52e19f058e8 | [
"MIT"
] | null | null | null | '''
Search Controller Module
'''
import math
from bson.json_util import dumps
from flask import current_app
from datetime import datetime
from app.models.mongodb.posts import Posts
from app.models.mongodb.user import User
from app.models.mongodb.search_log import SearchLog
def v1_search(mongo_cur, keywords, order, user=None, rank_filter=True):
'''
Search (๊ฒ์)
Params
---------
mongo_cur > ๋ชฝ๊ณ ๋๋น ์ปค๋ฅ์
Object
keywords > ๊ฒ์ ํค์๋
skip > Document num skip
limit > Document num limit
order > sort order
Return
---------
posts > ํฌ์คํธ (list)
'''
posts_model = Posts(mongo_cur)
TK = current_app.config["TK"]
# ํ๋ณด๊ตฐ ์ ์
keyword_split = keywords.lower().strip().split()
keyword_tokens = list(set(TK.get_tk(keywords) + keyword_split))
posts = posts_model.search_posts(keywords,
keyword_tokens,
current_app.config['INDICATORS']['GET_SC_POST_NUM'])
# ๋ก๊น
์์
search_log_model = SearchLog(mongo_cur)
User_model = User(mongo_cur)
log_object = {'keyword_split': keyword_split,
'keyword_tokens': keyword_tokens,
'date': datetime.now()}
if user:
log_object['user_id'] = user['user_id']
User_model.update_list_column_push(user['user_id'], "search_list", log_object)
else:
log_object['user_id'] = "guest"
search_log_model.insert_one(log_object)
# ์ ์ฌ๋ ์ธก์
set_keyword_tokens = set(keyword_tokens)
for post in posts:
post['score'] = 0
# ๊ฐ ์์ญ๋ณ ๋งค์นญ ์ ์ฌ๋ ํ๊ฐ (ํ์ฌ t_index๊ฐ ์๋ ๊ด๊ณ๋ก title_token, token ๋๊ฐ์ ์ปฌ๋ผ์ผ๋ก ์งํํจ)
weight = {'title_token': current_app.config['INDICATORS']['TITLE_WEIGHT'],
'token': current_app.config['INDICATORS']['TOKEN_WEIGHT']}
for _type in ['title_token', 'token']:
point = weight[_type]
set_post_tokens = set(post[_type])
post['score'] += match_score(set_keyword_tokens, set_post_tokens) * point
# regex ๋งค์นญ ์ต์ข
์ ์ฌ๋ ํ๊ฐ (ํ์ฌ regex_str์ด ์๋ ๊ด๊ณ๋ก title์ ๊ธฐ์ค์ผ๋ก ํ๋จ)
point = current_app.config['INDICATORS']['REGEX_WEIGHT']
if keywords in post['title']:
post['score'] = (post['score'] * point) + 2
# ๋ฐํ ์ปฌ๋ผ ์ ๋ฆฌ
del post['token']
del post['title_token']
# ์ตํ์ ๋ญํน ์ ๊ฑฐ
posts.sort(key=lambda t:(-t['score']))
if (rank_filter is True and
len(posts) != 0 and
posts[0]['score'] != posts[-1]['score'] and
posts[-1]['score'] <= current_app.config['INDICATORS']['LOWEST_RANK']):
target = get_first_min(posts,
0,
len(posts)-1,
current_app.config['INDICATORS']['LOWEST_RANK'])
posts = posts[:target]
# ์ ๋ ฌ ์ ํ
if order == 1:
posts.sort(key=lambda x:x['date'], reverse=True)
return {'posts': dumps(posts[:current_app.config['INDICATORS']['RETURN_NUM']]),
'length': len(posts)}
def match_score(set_keyword_tokens, set_post_tokens):
'''
๊ฒ์ ์ ์ฌ๋ ํ๊ฐ ์์
Params
---------
set_keyword_tokens > ๊ฒ์ ํค์๋ ํ ํฐ
set_post_tokens > Post ํ ํฐ
Return
---------
๊ฒฐ๊ณผ ์ ์ (int)
'''
mc = len(set_keyword_tokens & set_post_tokens)
if len(set_keyword_tokens) != 0:
mr = mc / len(set_keyword_tokens)
else:
mr = mc / 1
return mc * (1 + mr + math.floor(mr))
def get_first_min(data, s, e, target):
'''
์ตํ์ ํ์ง (Binary search)
Params
---------
data > Post_list
s > post ์์ idx
e > post ๋ idx
target > ์ตํ์ ๋ญํฌ ์ ์
Return
---------
์ตํ์ ๊ฒฐ๊ณผ index (int)
'''
if s > e: return None
mid = (s + e) // 2
if mid <= 0: return None
if (data[mid-1]['score'] > target and
data[mid]['score'] <= target):
return mid
elif (data[mid-1]['score'] > target and
data[mid]['score'] > target):
return get_first_min(data, mid+1, e, target)
else:
return get_first_min(data, s, mid-1, target) | 29.092857 | 89 | 0.57697 | '''
Search Controller Module
'''
import math
from bson.json_util import dumps
from flask import current_app
from datetime import datetime
from app.models.mongodb.posts import Posts
from app.models.mongodb.user import User
from app.models.mongodb.search_log import SearchLog
def v1_search(mongo_cur, keywords, order, user=None, rank_filter=True):
'''
Search (๊ฒ์)
Params
---------
mongo_cur > ๋ชฝ๊ณ ๋๋น ์ปค๋ฅ์
Object
keywords > ๊ฒ์ ํค์๋
skip > Document num skip
limit > Document num limit
order > sort order
Return
---------
posts > ํฌ์คํธ (list)
'''
posts_model = Posts(mongo_cur)
TK = current_app.config["TK"]
# ํ๋ณด๊ตฐ ์ ์
keyword_split = keywords.lower().strip().split()
keyword_tokens = list(set(TK.get_tk(keywords) + keyword_split))
posts = posts_model.search_posts(keywords,
keyword_tokens,
current_app.config['INDICATORS']['GET_SC_POST_NUM'])
# ๋ก๊น
์์
search_log_model = SearchLog(mongo_cur)
User_model = User(mongo_cur)
log_object = {'keyword_split': keyword_split,
'keyword_tokens': keyword_tokens,
'date': datetime.now()}
if user:
log_object['user_id'] = user['user_id']
User_model.update_list_column_push(user['user_id'], "search_list", log_object)
else:
log_object['user_id'] = "guest"
search_log_model.insert_one(log_object)
# ์ ์ฌ๋ ์ธก์
set_keyword_tokens = set(keyword_tokens)
for post in posts:
post['score'] = 0
# ๊ฐ ์์ญ๋ณ ๋งค์นญ ์ ์ฌ๋ ํ๊ฐ (ํ์ฌ t_index๊ฐ ์๋ ๊ด๊ณ๋ก title_token, token ๋๊ฐ์ ์ปฌ๋ผ์ผ๋ก ์งํํจ)
weight = {'title_token': current_app.config['INDICATORS']['TITLE_WEIGHT'],
'token': current_app.config['INDICATORS']['TOKEN_WEIGHT']}
for _type in ['title_token', 'token']:
point = weight[_type]
set_post_tokens = set(post[_type])
post['score'] += match_score(set_keyword_tokens, set_post_tokens) * point
# regex ๋งค์นญ ์ต์ข
์ ์ฌ๋ ํ๊ฐ (ํ์ฌ regex_str์ด ์๋ ๊ด๊ณ๋ก title์ ๊ธฐ์ค์ผ๋ก ํ๋จ)
point = current_app.config['INDICATORS']['REGEX_WEIGHT']
if keywords in post['title']:
post['score'] = (post['score'] * point) + 2
# ๋ฐํ ์ปฌ๋ผ ์ ๋ฆฌ
del post['token']
del post['title_token']
# ์ตํ์ ๋ญํน ์ ๊ฑฐ
posts.sort(key=lambda t:(-t['score']))
if (rank_filter is True and
len(posts) != 0 and
posts[0]['score'] != posts[-1]['score'] and
posts[-1]['score'] <= current_app.config['INDICATORS']['LOWEST_RANK']):
target = get_first_min(posts,
0,
len(posts)-1,
current_app.config['INDICATORS']['LOWEST_RANK'])
posts = posts[:target]
# ์ ๋ ฌ ์ ํ
if order == 1:
posts.sort(key=lambda x:x['date'], reverse=True)
return {'posts': dumps(posts[:current_app.config['INDICATORS']['RETURN_NUM']]),
'length': len(posts)}
def match_score(set_keyword_tokens, set_post_tokens):
'''
๊ฒ์ ์ ์ฌ๋ ํ๊ฐ ์์
Params
---------
set_keyword_tokens > ๊ฒ์ ํค์๋ ํ ํฐ
set_post_tokens > Post ํ ํฐ
Return
---------
๊ฒฐ๊ณผ ์ ์ (int)
'''
mc = len(set_keyword_tokens & set_post_tokens)
if len(set_keyword_tokens) != 0:
mr = mc / len(set_keyword_tokens)
else:
mr = mc / 1
return mc * (1 + mr + math.floor(mr))
def get_first_min(data, s, e, target):
'''
์ตํ์ ํ์ง (Binary search)
Params
---------
data > Post_list
s > post ์์ idx
e > post ๋ idx
target > ์ตํ์ ๋ญํฌ ์ ์
Return
---------
์ตํ์ ๊ฒฐ๊ณผ index (int)
'''
if s > e: return None
mid = (s + e) // 2
if mid <= 0: return None
if (data[mid-1]['score'] > target and
data[mid]['score'] <= target):
return mid
elif (data[mid-1]['score'] > target and
data[mid]['score'] > target):
return get_first_min(data, mid+1, e, target)
else:
return get_first_min(data, s, mid-1, target) | 0 | 0 | 0 |
ade08a55f673cefae524a9ff08cdfd340e5d3249 | 992 | py | Python | mgr/mgr/samples/urls.py | onap/vfc-gvnfm-vnfmgr | 237ff06c840543d78c715cc488692bc6a3cfb7bc | [
"Apache-2.0"
] | 1 | 2021-10-15T15:23:24.000Z | 2021-10-15T15:23:24.000Z | mgr/mgr/samples/urls.py | onap/vfc-gvnfm-vnfmgr | 237ff06c840543d78c715cc488692bc6a3cfb7bc | [
"Apache-2.0"
] | null | null | null | mgr/mgr/samples/urls.py | onap/vfc-gvnfm-vnfmgr | 237ff06c840543d78c715cc488692bc6a3cfb7bc | [
"Apache-2.0"
] | 1 | 2021-10-15T15:23:16.000Z | 2021-10-15T15:23:16.000Z | # Copyright 2017 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import url
from mgr.samples import views
urlpatterns = [
url(r'^samples/$', views.SampleList.as_view()),
url(r'^api/vnfmgr/v1/reloadstub/(?P<fileName>[0-9a-zA-Z\-\_\.]+)$', views.reloadstub, name='reloadstub'),
url(r'^api/vnfmgr/v1/reg2msb/(?P<msName>[0-9a-zA-Z\-\_]+)$', views.reg2msb, name='reg2msb'),
url(r'^(?P<uri>[0-9a-zA-Z\-\_/]+)$', views.stub, name='stub')
]
| 41.333333 | 109 | 0.705645 | # Copyright 2017 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import url
from mgr.samples import views
urlpatterns = [
url(r'^samples/$', views.SampleList.as_view()),
url(r'^api/vnfmgr/v1/reloadstub/(?P<fileName>[0-9a-zA-Z\-\_\.]+)$', views.reloadstub, name='reloadstub'),
url(r'^api/vnfmgr/v1/reg2msb/(?P<msName>[0-9a-zA-Z\-\_]+)$', views.reg2msb, name='reg2msb'),
url(r'^(?P<uri>[0-9a-zA-Z\-\_/]+)$', views.stub, name='stub')
]
| 0 | 0 | 0 |
2c7358c1382c40af8b18f20ce862f9e03188d010 | 956 | py | Python | brainex/experiments/meta.py | ebuntel/BrainExTemp | 991038155a6e9289af90da3d800210841ef23ff1 | [
"MIT"
] | 1 | 2020-09-04T16:15:26.000Z | 2020-09-04T16:15:26.000Z | brainex/experiments/meta.py | ebuntel/Brainextemp | 991038155a6e9289af90da3d800210841ef23ff1 | [
"MIT"
] | null | null | null | brainex/experiments/meta.py | ebuntel/Brainextemp | 991038155a6e9289af90da3d800210841ef23ff1 | [
"MIT"
] | null | null | null | # Jan28Report on General Accureacy #####################################################################################
# date = 'Jan-23-2020-22-N-noneSpark-R0-noOpt'
# notes = 'noneSpark-R0-noOpt'
# date = 'Jan-23-2020-21-N-UseSpark-R0-noOpt'
# notes = 'UseSpark-R0-noOpt'
# date = 'Jan-24-2020-2-N-UseSpark-R1-noOpt'
# notes = 'UseSpark-R1-noOpt'
# date = 'Jan-23-2020-22-N-noneSpark-R0-noOpt'
# notes = 'noneSpark-R0-noOpt'
# date = 'Jan-24-2020-3-N-UseSpark-R1-bsfKimOnly'
# notes = 'UseSpark-R1-bsfKimOnly'
# Jan31Report on TraditionalDTW+LBOpt vs. FastDTW+NoOpt#################################################################
# date = 'Jan-30-2020-12-N-UseSpark-R1-noOptFastDTW_numSample400'
# notes = 'UseSpark-R1-noOptFastDTW_numSample400'
date = 'Jan-30-2020-15-N-UseSpark-R1-LBOptNormalDTW_numSample400'
notes = 'UseSpark-R1-LBOptNormalDTW_numSample400'
# paa_data folder is /home/apocalyvec/PycharmProjects/Genex/genex/experiments/results/
| 45.52381 | 120 | 0.628661 | # Jan28Report on General Accureacy #####################################################################################
# date = 'Jan-23-2020-22-N-noneSpark-R0-noOpt'
# notes = 'noneSpark-R0-noOpt'
# date = 'Jan-23-2020-21-N-UseSpark-R0-noOpt'
# notes = 'UseSpark-R0-noOpt'
# date = 'Jan-24-2020-2-N-UseSpark-R1-noOpt'
# notes = 'UseSpark-R1-noOpt'
# date = 'Jan-23-2020-22-N-noneSpark-R0-noOpt'
# notes = 'noneSpark-R0-noOpt'
# date = 'Jan-24-2020-3-N-UseSpark-R1-bsfKimOnly'
# notes = 'UseSpark-R1-bsfKimOnly'
# Jan31Report on TraditionalDTW+LBOpt vs. FastDTW+NoOpt#################################################################
# date = 'Jan-30-2020-12-N-UseSpark-R1-noOptFastDTW_numSample400'
# notes = 'UseSpark-R1-noOptFastDTW_numSample400'
date = 'Jan-30-2020-15-N-UseSpark-R1-LBOptNormalDTW_numSample400'
notes = 'UseSpark-R1-LBOptNormalDTW_numSample400'
# paa_data folder is /home/apocalyvec/PycharmProjects/Genex/genex/experiments/results/
| 0 | 0 | 0 |
059ca706a2b105444ff5df27e29f6a31260c0ac2 | 6,285 | py | Python | exercises/city_temperature_prediction.py | ShaharZuntz/IML.HUJI | 5804904840ac0fbdaa5b4cbf2df9348d276927d1 | [
"MIT"
] | null | null | null | exercises/city_temperature_prediction.py | ShaharZuntz/IML.HUJI | 5804904840ac0fbdaa5b4cbf2df9348d276927d1 | [
"MIT"
] | null | null | null | exercises/city_temperature_prediction.py | ShaharZuntz/IML.HUJI | 5804904840ac0fbdaa5b4cbf2df9348d276927d1 | [
"MIT"
] | null | null | null | from typing import Tuple, Any, Union, List
import scipy.stats
from pandas import Series, DataFrame
from pandas.core.generic import NDFrame
from scipy.stats import zscore
from IMLearn.learners.regressors import PolynomialFitting
from IMLearn.utils import split_train_test
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.io as pio
Q2_SCATTER_PLOT_TITLE = ("Temperature in Israel as a function of Day of "
"year, 1995-2007")
Q2_BAR_PLOT_TITLE = ("Standard Deviation of temperature in Israel for each "
"month, 1996-2007")
Q4_PRINT_FORMAT = "k={},loss={}"
Q4_BAR_PLOT_TITLE = ("Loss values as a function of the degree (1 <= k <= 10) "
"of the polynomial fit")
Q5_BAR_PLOT_TITLE = ("Loss of Polynomial fit of degree {} trained on "
"Israel as a function of all other countries")
COUNTRY_ISRAEL = "Israel"
COUNTRY_NETHERLANDS = "The Netherlands"
COUNTRY_SOUTH_AFRICA = "South Africa"
COUNTRY_JORDAN = "Jordan"
COL_STD = "std"
COL_MEAN = "mean"
COL_YEAR = "Year"
COL_TEMP = "Temp"
COL_DAY_OF_YEAR = "DayOfYear"
COL_DATE = "Date"
COL_MONTH = "Month"
COL_COUNTRY = "Country"
CITY_TEMPERATURE_CSV_PATH = "../datasets/City_Temperature.csv"
pio.templates.default = "simple_white"
def load_data(filename: str) -> pd.DataFrame:
"""
Load city daily temperature dataset and preprocess loaded_data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (Temp)
"""
loaded_data = pd.read_csv(filename, parse_dates=[COL_DATE])
loaded_data = clean_data(loaded_data)
loaded_data = process_features(loaded_data)
loaded_data = loaded_data.drop([COL_DATE], axis=1)
return loaded_data
def clean_data(X: pd.DataFrame) -> pd.DataFrame:
"""
Cleans the given data from extreme anomalies
"""
X["zscore"] = zscore(X[COL_TEMP])
X = X.drop(X[(X.zscore <= -3) | (X.zscore >= 3)].index)
X = X.drop(["zscore"], axis=1)
X = X.drop(X[X.Year <= 0].index)
X = X.drop(X[(X.Month <= 0) | (X.Month >= 13)].index)
X = X.drop(X[(X.Day <= 0) | (X.Day >= 32)].index)
return X
def process_features(X: pd.DataFrame) -> pd.DataFrame:
"""
Performs a basic processing of the given data
"""
X[COL_DAY_OF_YEAR] = X[COL_DATE].dt.dayofyear
X[COL_YEAR] = X[COL_YEAR].astype(str)
return X
def explore_data_for_specific_country(
country_name: str, full_data: DataFrame) -> None:
"""
Explores the data of a given country in the data.
"""
country_data = get_country_data(country_name, full_data)
px.scatter(country_data, x=COL_DAY_OF_YEAR, y=COL_TEMP, color=COL_YEAR,
title=Q2_SCATTER_PLOT_TITLE).show()
std_per_month = country_data.groupby(COL_MONTH).Temp.agg(np.std)
px.bar(std_per_month, title=Q2_BAR_PLOT_TITLE).show()
def get_country_data(country_name: str, full_data: pd.DataFrame) -> DataFrame:
"""
Returns a subset the given data that contains samples only from the given
country.
"""
return full_data[full_data[COL_COUNTRY] == country_name]
def explore_differences_between_countries(full_data: pd.DataFrame) -> None:
"""
Explores the differences between the countries.
"""
std_and_mean_per_country_and_month = full_data.groupby(
[COL_COUNTRY, COL_MONTH]).Temp.agg([np.mean, np.std])
px.line(std_and_mean_per_country_and_month,
x=std_and_mean_per_country_and_month.index.get_level_values(1),
y=COL_MEAN,
color=std_and_mean_per_country_and_month.index.get_level_values(0),
error_y=COL_STD).show()
def fit_model_for_different_pol_deg(
min_k: int, max_k: int, country_name: str,
full_data: DataFrame) -> None:
"""
Fits a model (Polyfit) to a given country with different polynomial
degrees.
"""
country_data = get_country_data(country_name, full_data)
train_x, train_y, test_x, test_y = split_train_test(
country_data.drop([COL_TEMP], axis=1),
country_data[COL_TEMP]
)
loss_values = list()
for k in range(min_k, max_k + 1):
polyfit = PolynomialFitting(k)
polyfit.fit(train_x[COL_DAY_OF_YEAR], np.array(train_y))
loss = polyfit.loss(test_x[COL_DAY_OF_YEAR], np.array(test_y))
rounded_loss = round(loss, 2)
print(Q4_PRINT_FORMAT.format(k, rounded_loss))
loss_values.append(rounded_loss)
px.bar(x=range(min_k, max_k + 1), y=loss_values,
title=Q4_BAR_PLOT_TITLE).show()
def evaluate_fitted_model_on_different_countries(
chosen_k: int, original_country_name: str,
full_data: DataFrame, other_countries: List) -> None:
"""
Evaluates a fitted model of the given country on the given other
countries.
"""
original_country_data = get_country_data(original_country_name, full_data)
countries_losses = list()
polyfit = PolynomialFitting(chosen_k)
polyfit.fit(original_country_data[COL_DAY_OF_YEAR],
original_country_data[COL_TEMP])
for country in other_countries:
country_data = data[data[COL_COUNTRY] == country]
country_loss = polyfit.loss(country_data[COL_DAY_OF_YEAR],
country_data[COL_TEMP])
countries_losses.append(country_loss)
px.bar(x=other_countries, y=countries_losses,
title=Q5_BAR_PLOT_TITLE.format(chosen_k)).show()
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of city temperature dataset
data = load_data(CITY_TEMPERATURE_CSV_PATH)
# Question 2 - Exploring data for specific country
explore_data_for_specific_country(COUNTRY_ISRAEL, data)
# Question 3 - Exploring differences between countries
explore_differences_between_countries(data)
# Question 4 - Fitting model for different values of `k`
fit_model_for_different_pol_deg(1, 10, COUNTRY_ISRAEL, data)
# Question 5 - Evaluating fitted model on different countries
evaluate_fitted_model_on_different_countries(
5, COUNTRY_ISRAEL, data,
[COUNTRY_JORDAN, COUNTRY_SOUTH_AFRICA, COUNTRY_NETHERLANDS]
)
| 30.509709 | 79 | 0.688465 | from typing import Tuple, Any, Union, List
import scipy.stats
from pandas import Series, DataFrame
from pandas.core.generic import NDFrame
from scipy.stats import zscore
from IMLearn.learners.regressors import PolynomialFitting
from IMLearn.utils import split_train_test
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.io as pio
Q2_SCATTER_PLOT_TITLE = ("Temperature in Israel as a function of Day of "
"year, 1995-2007")
Q2_BAR_PLOT_TITLE = ("Standard Deviation of temperature in Israel for each "
"month, 1996-2007")
Q4_PRINT_FORMAT = "k={},loss={}"
Q4_BAR_PLOT_TITLE = ("Loss values as a function of the degree (1 <= k <= 10) "
"of the polynomial fit")
Q5_BAR_PLOT_TITLE = ("Loss of Polynomial fit of degree {} trained on "
"Israel as a function of all other countries")
COUNTRY_ISRAEL = "Israel"
COUNTRY_NETHERLANDS = "The Netherlands"
COUNTRY_SOUTH_AFRICA = "South Africa"
COUNTRY_JORDAN = "Jordan"
COL_STD = "std"
COL_MEAN = "mean"
COL_YEAR = "Year"
COL_TEMP = "Temp"
COL_DAY_OF_YEAR = "DayOfYear"
COL_DATE = "Date"
COL_MONTH = "Month"
COL_COUNTRY = "Country"
CITY_TEMPERATURE_CSV_PATH = "../datasets/City_Temperature.csv"
pio.templates.default = "simple_white"
def load_data(filename: str) -> pd.DataFrame:
"""
Load city daily temperature dataset and preprocess loaded_data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (Temp)
"""
loaded_data = pd.read_csv(filename, parse_dates=[COL_DATE])
loaded_data = clean_data(loaded_data)
loaded_data = process_features(loaded_data)
loaded_data = loaded_data.drop([COL_DATE], axis=1)
return loaded_data
def clean_data(X: pd.DataFrame) -> pd.DataFrame:
"""
Cleans the given data from extreme anomalies
"""
X["zscore"] = zscore(X[COL_TEMP])
X = X.drop(X[(X.zscore <= -3) | (X.zscore >= 3)].index)
X = X.drop(["zscore"], axis=1)
X = X.drop(X[X.Year <= 0].index)
X = X.drop(X[(X.Month <= 0) | (X.Month >= 13)].index)
X = X.drop(X[(X.Day <= 0) | (X.Day >= 32)].index)
return X
def process_features(X: pd.DataFrame) -> pd.DataFrame:
"""
Performs a basic processing of the given data
"""
X[COL_DAY_OF_YEAR] = X[COL_DATE].dt.dayofyear
X[COL_YEAR] = X[COL_YEAR].astype(str)
return X
def explore_data_for_specific_country(
country_name: str, full_data: DataFrame) -> None:
"""
Explores the data of a given country in the data.
"""
country_data = get_country_data(country_name, full_data)
px.scatter(country_data, x=COL_DAY_OF_YEAR, y=COL_TEMP, color=COL_YEAR,
title=Q2_SCATTER_PLOT_TITLE).show()
std_per_month = country_data.groupby(COL_MONTH).Temp.agg(np.std)
px.bar(std_per_month, title=Q2_BAR_PLOT_TITLE).show()
def get_country_data(country_name: str, full_data: pd.DataFrame) -> DataFrame:
"""
Returns a subset the given data that contains samples only from the given
country.
"""
return full_data[full_data[COL_COUNTRY] == country_name]
def explore_differences_between_countries(full_data: pd.DataFrame) -> None:
"""
Explores the differences between the countries.
"""
std_and_mean_per_country_and_month = full_data.groupby(
[COL_COUNTRY, COL_MONTH]).Temp.agg([np.mean, np.std])
px.line(std_and_mean_per_country_and_month,
x=std_and_mean_per_country_and_month.index.get_level_values(1),
y=COL_MEAN,
color=std_and_mean_per_country_and_month.index.get_level_values(0),
error_y=COL_STD).show()
def fit_model_for_different_pol_deg(
min_k: int, max_k: int, country_name: str,
full_data: DataFrame) -> None:
"""
Fits a model (Polyfit) to a given country with different polynomial
degrees.
"""
country_data = get_country_data(country_name, full_data)
train_x, train_y, test_x, test_y = split_train_test(
country_data.drop([COL_TEMP], axis=1),
country_data[COL_TEMP]
)
loss_values = list()
for k in range(min_k, max_k + 1):
polyfit = PolynomialFitting(k)
polyfit.fit(train_x[COL_DAY_OF_YEAR], np.array(train_y))
loss = polyfit.loss(test_x[COL_DAY_OF_YEAR], np.array(test_y))
rounded_loss = round(loss, 2)
print(Q4_PRINT_FORMAT.format(k, rounded_loss))
loss_values.append(rounded_loss)
px.bar(x=range(min_k, max_k + 1), y=loss_values,
title=Q4_BAR_PLOT_TITLE).show()
def evaluate_fitted_model_on_different_countries(
chosen_k: int, original_country_name: str,
full_data: DataFrame, other_countries: List) -> None:
"""
Evaluates a fitted model of the given country on the given other
countries.
"""
original_country_data = get_country_data(original_country_name, full_data)
countries_losses = list()
polyfit = PolynomialFitting(chosen_k)
polyfit.fit(original_country_data[COL_DAY_OF_YEAR],
original_country_data[COL_TEMP])
for country in other_countries:
country_data = data[data[COL_COUNTRY] == country]
country_loss = polyfit.loss(country_data[COL_DAY_OF_YEAR],
country_data[COL_TEMP])
countries_losses.append(country_loss)
px.bar(x=other_countries, y=countries_losses,
title=Q5_BAR_PLOT_TITLE.format(chosen_k)).show()
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of city temperature dataset
data = load_data(CITY_TEMPERATURE_CSV_PATH)
# Question 2 - Exploring data for specific country
explore_data_for_specific_country(COUNTRY_ISRAEL, data)
# Question 3 - Exploring differences between countries
explore_differences_between_countries(data)
# Question 4 - Fitting model for different values of `k`
fit_model_for_different_pol_deg(1, 10, COUNTRY_ISRAEL, data)
# Question 5 - Evaluating fitted model on different countries
evaluate_fitted_model_on_different_countries(
5, COUNTRY_ISRAEL, data,
[COUNTRY_JORDAN, COUNTRY_SOUTH_AFRICA, COUNTRY_NETHERLANDS]
)
| 0 | 0 | 0 |
c7325ba4a58ad20bab3d38c0047ad34ee324c512 | 1,736 | py | Python | scbt/actions.py | Jorch72/PythonBitTorrent | 53bb4420288e064f8ca2f6f2844a32afa0084b66 | [
"MIT"
] | 2 | 2015-06-16T02:04:03.000Z | 2017-01-05T19:44:29.000Z | scbt/actions.py | ddevault/scbt | 53bb4420288e064f8ca2f6f2844a32afa0084b66 | [
"MIT"
] | null | null | null | scbt/actions.py | ddevault/scbt | 53bb4420288e064f8ca2f6f2844a32afa0084b66 | [
"MIT"
] | null | null | null | import os
from datetime import datetime
from functools import wraps
actions = dict()
@action
@action
@action
# TODO: Remove this before releasing scbt
@action
| 30.45614 | 82 | 0.608295 | import os
from datetime import datetime
from functools import wraps
actions = dict()
def action(f):
@wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
actions[f.__name__] = f
return wrapper
@action
def add_torrent(session, payload):
path = payload.get("path")
if not path:
return { "success": False, "error": "'path' is required" }
if not os.path.exists(path):
return { "success": False, "error": "File not found" }
t = session.add_torrent(path)
return { "success": True, "info_hash": t.info_hash }
@action
def status(session, payload):
status = session.status()
tstatus = [v.status() for k, v in session.torrents.items()]
response = {
"downloading": len([s for s in tstatus if str(s.state) == "downloading"]),
"seeding": len([s for s in tstatus if str(s.state) == "seeding"]),
"idle": len([s for s in tstatus if str(s.state) == "idle"]),
"session": {
"total_download": status.total_download,
"total_upload": status.total_upload,
"ratio": status.total_upload / status.total_download \
if status.total_upload != 0 else 0,
"num_peers": status.num_peers,
"download_rate": status.download_rate,
"upload_rate": status.upload_rate,
"uptime": (datetime.now() - session.started).seconds,
"pid": os.getpid()
}
}
return response
@action
def list_torrents(session, payload):
return {
"torrents": [v.json() for k, v in session.torrents.items()]
}
# TODO: Remove this before releasing scbt
@action
def interact(session, payload):
import code
code.interact(local=locals())
| 1,461 | 0 | 111 |
24d6b029b931b5c92bfb749057647d092e4bcb99 | 4,305 | py | Python | Fb-Post-Inviter.py | rohitcoder/Automation-Scripts | 34656f2b237a582bf864570375a36eb3a3233143 | [
"Apache-2.0"
] | null | null | null | Fb-Post-Inviter.py | rohitcoder/Automation-Scripts | 34656f2b237a582bf864570375a36eb3a3233143 | [
"Apache-2.0"
] | null | null | null | Fb-Post-Inviter.py | rohitcoder/Automation-Scripts | 34656f2b237a582bf864570375a36eb3a3233143 | [
"Apache-2.0"
] | null | null | null | import requests
import json
from bs4 import BeautifulSoup
import urlparse
burp0_url = "https://business.facebook.com/ufi/reaction/profile/browser/fetch/?limit=5000&total_count=5000&ft_ent_identifier=108566350975276&fb_dtsg_ag=AQxhzRLE9rpjfoRo1DHdRZ0DSQntKVunKgX-keo45t7N2Q:AQzSCWY38h7fU8ix7KUq66CzW1lEBK6d7q6b9qF1GVuNZA&__user=100005595064283&__a=1&__dyn=7AgSXghFoHG4Q9UrJDzk2mq2W8GA5FaDJ4WqK4UqwCGawIhEnUzgjGqK5-7oG5VGwJy9pUKbnyorxuF98SmquUuF3e16xqfzQdzoS6pvh4jUXVEO489EGicGdxO3i5VokKm8yElAx6u14xl0zCypHh43Hg-ezFEmUC1uCwDxe6UGq6UpxyWBGHzooAghwyzZ5CG2e4RVo8EiyXxK9z9ooK3m6ogUkBzUy4XCxS58hx2eyojz9eawzCJ1ymiQ2q6po_zoiKm2u10zUCcx22PxuE9kbzUgxCuV8y7EKUymEjyHGiawYyHDhoG26227Rh8C9xl28rgK7lAAAzE4y2O58gyUTyUbUmDwQwxG76u4UgwNx5e8xi8KUoyE-Uqze7Vojxy2q4UrxS0D8888US2m8wHy8C6EG4u11wk8Su6EaE8K&__csr=&__req=6p&__beoa=0&__pc=PHASED:media_manager_pkg&dpr=1&__ccg=GOOD&__rev=1002593526&__s=n5qgcm:rvmawv:n7hifk&__hsi=6866983610963240940-0&__comet_req=0&jazoest=27719&__jssesw=1&ft[tn]=-a"
burp0_cookies = {"datr": "ml9IXWc-hooQAZsZyGngW7lJ", "sb": "ml9IXey4Kv58ugWsRrgQRXp0", "_ga": "GA1.2.214750873.1587879017", "locale": "en_GB", "js_ver": "3892", "m_pixel_ratio": "1", "c_user": "100005595064283", "cppo": "1", "spin": "r.1002593322_b.trunk_t.1598811628_s.1_v.2_", "xs": "21%3AsAWFX-g9ae4V2A%3A2%3A1598775154%3A13272%3A4196%3A%3AAcVcdyV0_LJR0gJjSBqRwuoQhJhaDp_QlOYidiEqYKA", "fr": "1tpMIwMyIepQeqj6i.AWVuLZHlce-HqooJiS_WNa_2Daw.BdwSBm.qI.F9L.0.0.BfTGNk.AWVJSO_U", "presence": "EDvF3EtimeF1598844249EuserFA21B05595064283A2EstateFDt3F_5bDiFA2user_3a1B02305073382A2EoF1EfF1C_5dEutc3F1598844020217G598844020283CEchF_7bCC", "wd": "799x766"}
burp0_headers = {"Connection": "close", "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36", "Viewport-Width": "799", "Content-Type": "application/x-www-form-urlencoded", "Accept": "*/*", "Origin": "https://business.facebook.com", "Sec-Fetch-Site": "same-origin", "Sec-Fetch-Mode": "cors", "Sec-Fetch-Dest": "empty", "Referer": "https://business.facebook.com/creatorstudio/?mode=facebook&content_table=ALL_POSTS&post_status=ALL&tab=content_posts&post_type=ALL&collection_id=free_form_collection", "Accept-Encoding": "gzip, deflate", "Accept-Language": "en-US,en;q=0.9,hi;q=0.8,es;q=0.7,lt;q=0.6"}
list_response = requests.get(burp0_url, headers=burp0_headers, cookies=burp0_cookies)
json_resp = json.loads(list_response.text[9:])
list_html = json_resp["domops"][0][3]["__html"]
soup = BeautifulSoup(list_html, 'html.parser')
peoples_ids_list = soup.findAll("a", {"class":"_42ft _4jy0 _4jy3 _517h _51sy"})
invite_count = len(peoples_ids_list)
print("Inviting "+str(invite_count)+" peoples")
for people_id in peoples_ids_list:
explode = urlparse.parse_qs(urlparse.urlparse(people_id["ajaxify"]).query)
invitee = explode["invitee"][0]
hash_value = explode["hash"][0]
content_id = explode["content_id"][0]
page_id = explode["page_id"][0]
ext = explode["ext"][0]
burp0_url = "https://www.facebook.com:443/pages/post_like_invite/send/"
burp0_data = {"invitee": invitee, "page_id": page_id, "ref": "pages_post_reactor_dialogue", "content_id": content_id, "ext": ext, "hash": hash_value, "__user": "100005595064283", "__a": "1", "__dyn": "7AgSXghFoHG4Q9UrJDzk2mq2W8GA5FaDJ4WqK4UqwCGawIhEnUzgjGqK5-7oG5VGwJy9pUKbnyorxuF98SmquUuF3e16xqfzQdzoS7_h4jUXVEO489EGicGdxO3i5VokKm8yEqx6u14xl0zCypHh43Hg-ezFEmUC1uCwDxe6UGq6UpxyWBGHzooAghwyzZ5CG2e4RVo8EiyXxK9z9ooK3m6ogUkBzUy4XCxS58hx2eyojz9eawzCJ1ymiQ2q6po_zoiKm2u10zUCcx22PxuE9kbzUgxCuV8y7EKUymEjyHGiawYyHDhoG26227Rh8C9xl28rgK7lAAAzE4y2O58gyUTyUbUmDwQwxG76u4UgwNx5e8xi8KUoyE-Uqze7Vojxy2q4UrxS0D8888US2m8wHxa6EG4u11wk8Su6EaE8K", "__csr": '', "__req": "4q", "__beoa": "0", "__pc": "PHASED:media_manager_pkg", "dpr": "1", "__ccg": "GOOD", "__rev": "1002593526", "__s": "2m0lki:rvmawv:n7hifk", "__hsi": "6866983610963240940-0", "__comet_req": "0", "fb_dtsg": "AQFcQOBaGXMB:AQH33OAOqtrg", "jazoest": "21987", "__jssesw": "1"}
response = requests.post(burp0_url, headers=burp0_headers, cookies=burp0_cookies, data=burp0_data)
final_Response = json.loads(response.text[9:])
if final_Response.has_key("error"):
print("Lets refresh our paramter values")
print(final_Response["errorDescription"])
break;
else:
print("Invited successfully...")
| 134.53125 | 923 | 0.801858 | import requests
import json
from bs4 import BeautifulSoup
import urlparse
burp0_url = "https://business.facebook.com/ufi/reaction/profile/browser/fetch/?limit=5000&total_count=5000&ft_ent_identifier=108566350975276&fb_dtsg_ag=AQxhzRLE9rpjfoRo1DHdRZ0DSQntKVunKgX-keo45t7N2Q:AQzSCWY38h7fU8ix7KUq66CzW1lEBK6d7q6b9qF1GVuNZA&__user=100005595064283&__a=1&__dyn=7AgSXghFoHG4Q9UrJDzk2mq2W8GA5FaDJ4WqK4UqwCGawIhEnUzgjGqK5-7oG5VGwJy9pUKbnyorxuF98SmquUuF3e16xqfzQdzoS6pvh4jUXVEO489EGicGdxO3i5VokKm8yElAx6u14xl0zCypHh43Hg-ezFEmUC1uCwDxe6UGq6UpxyWBGHzooAghwyzZ5CG2e4RVo8EiyXxK9z9ooK3m6ogUkBzUy4XCxS58hx2eyojz9eawzCJ1ymiQ2q6po_zoiKm2u10zUCcx22PxuE9kbzUgxCuV8y7EKUymEjyHGiawYyHDhoG26227Rh8C9xl28rgK7lAAAzE4y2O58gyUTyUbUmDwQwxG76u4UgwNx5e8xi8KUoyE-Uqze7Vojxy2q4UrxS0D8888US2m8wHy8C6EG4u11wk8Su6EaE8K&__csr=&__req=6p&__beoa=0&__pc=PHASED:media_manager_pkg&dpr=1&__ccg=GOOD&__rev=1002593526&__s=n5qgcm:rvmawv:n7hifk&__hsi=6866983610963240940-0&__comet_req=0&jazoest=27719&__jssesw=1&ft[tn]=-a"
burp0_cookies = {"datr": "ml9IXWc-hooQAZsZyGngW7lJ", "sb": "ml9IXey4Kv58ugWsRrgQRXp0", "_ga": "GA1.2.214750873.1587879017", "locale": "en_GB", "js_ver": "3892", "m_pixel_ratio": "1", "c_user": "100005595064283", "cppo": "1", "spin": "r.1002593322_b.trunk_t.1598811628_s.1_v.2_", "xs": "21%3AsAWFX-g9ae4V2A%3A2%3A1598775154%3A13272%3A4196%3A%3AAcVcdyV0_LJR0gJjSBqRwuoQhJhaDp_QlOYidiEqYKA", "fr": "1tpMIwMyIepQeqj6i.AWVuLZHlce-HqooJiS_WNa_2Daw.BdwSBm.qI.F9L.0.0.BfTGNk.AWVJSO_U", "presence": "EDvF3EtimeF1598844249EuserFA21B05595064283A2EstateFDt3F_5bDiFA2user_3a1B02305073382A2EoF1EfF1C_5dEutc3F1598844020217G598844020283CEchF_7bCC", "wd": "799x766"}
burp0_headers = {"Connection": "close", "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36", "Viewport-Width": "799", "Content-Type": "application/x-www-form-urlencoded", "Accept": "*/*", "Origin": "https://business.facebook.com", "Sec-Fetch-Site": "same-origin", "Sec-Fetch-Mode": "cors", "Sec-Fetch-Dest": "empty", "Referer": "https://business.facebook.com/creatorstudio/?mode=facebook&content_table=ALL_POSTS&post_status=ALL&tab=content_posts&post_type=ALL&collection_id=free_form_collection", "Accept-Encoding": "gzip, deflate", "Accept-Language": "en-US,en;q=0.9,hi;q=0.8,es;q=0.7,lt;q=0.6"}
list_response = requests.get(burp0_url, headers=burp0_headers, cookies=burp0_cookies)
json_resp = json.loads(list_response.text[9:])
list_html = json_resp["domops"][0][3]["__html"]
soup = BeautifulSoup(list_html, 'html.parser')
peoples_ids_list = soup.findAll("a", {"class":"_42ft _4jy0 _4jy3 _517h _51sy"})
invite_count = len(peoples_ids_list)
print("Inviting "+str(invite_count)+" peoples")
for people_id in peoples_ids_list:
explode = urlparse.parse_qs(urlparse.urlparse(people_id["ajaxify"]).query)
invitee = explode["invitee"][0]
hash_value = explode["hash"][0]
content_id = explode["content_id"][0]
page_id = explode["page_id"][0]
ext = explode["ext"][0]
burp0_url = "https://www.facebook.com:443/pages/post_like_invite/send/"
burp0_data = {"invitee": invitee, "page_id": page_id, "ref": "pages_post_reactor_dialogue", "content_id": content_id, "ext": ext, "hash": hash_value, "__user": "100005595064283", "__a": "1", "__dyn": "7AgSXghFoHG4Q9UrJDzk2mq2W8GA5FaDJ4WqK4UqwCGawIhEnUzgjGqK5-7oG5VGwJy9pUKbnyorxuF98SmquUuF3e16xqfzQdzoS7_h4jUXVEO489EGicGdxO3i5VokKm8yEqx6u14xl0zCypHh43Hg-ezFEmUC1uCwDxe6UGq6UpxyWBGHzooAghwyzZ5CG2e4RVo8EiyXxK9z9ooK3m6ogUkBzUy4XCxS58hx2eyojz9eawzCJ1ymiQ2q6po_zoiKm2u10zUCcx22PxuE9kbzUgxCuV8y7EKUymEjyHGiawYyHDhoG26227Rh8C9xl28rgK7lAAAzE4y2O58gyUTyUbUmDwQwxG76u4UgwNx5e8xi8KUoyE-Uqze7Vojxy2q4UrxS0D8888US2m8wHxa6EG4u11wk8Su6EaE8K", "__csr": '', "__req": "4q", "__beoa": "0", "__pc": "PHASED:media_manager_pkg", "dpr": "1", "__ccg": "GOOD", "__rev": "1002593526", "__s": "2m0lki:rvmawv:n7hifk", "__hsi": "6866983610963240940-0", "__comet_req": "0", "fb_dtsg": "AQFcQOBaGXMB:AQH33OAOqtrg", "jazoest": "21987", "__jssesw": "1"}
response = requests.post(burp0_url, headers=burp0_headers, cookies=burp0_cookies, data=burp0_data)
final_Response = json.loads(response.text[9:])
if final_Response.has_key("error"):
print("Lets refresh our paramter values")
print(final_Response["errorDescription"])
break;
else:
print("Invited successfully...")
| 0 | 0 | 0 |
042b672d0c1ff8faf6acd9ae05f3802f8aca03f6 | 397 | py | Python | contas/models.py | acaciojunio28/CRUD-django | 62b34a544ec5a14c53172e9240a1f0b448ed7b69 | [
"Apache-2.0"
] | null | null | null | contas/models.py | acaciojunio28/CRUD-django | 62b34a544ec5a14c53172e9240a1f0b448ed7b69 | [
"Apache-2.0"
] | null | null | null | contas/models.py | acaciojunio28/CRUD-django | 62b34a544ec5a14c53172e9240a1f0b448ed7b69 | [
"Apache-2.0"
] | null | null | null | from django.db import models
# Create your models here.
| 24.8125 | 54 | 0.745592 | from django.db import models
# Create your models here.
class categoria(models.Model):
nome=models.CharField(max_length=100)
dt_criaรงao=models.DateTimeField(auto_now_add=True)
class listar(models.Model):
nome = models.CharField(max_length=100)
valor= models.CharField(max_length=100)
unidade=models.CharField(max_length=100)
def __str__(self):
return self.nome
| 22 | 273 | 46 |
7cb6858846589320c17fb4ea403321cc22f2fffc | 112 | py | Python | text/_cascade/element/_element.py | jedhsu/text | 8525b602d304ac571a629104c48703443244545c | [
"Apache-2.0"
] | null | null | null | text/_cascade/element/_element.py | jedhsu/text | 8525b602d304ac571a629104c48703443244545c | [
"Apache-2.0"
] | null | null | null | text/_cascade/element/_element.py | jedhsu/text | 8525b602d304ac571a629104c48703443244545c | [
"Apache-2.0"
] | null | null | null | """
*Element*
A CSS Element.
"""
from abc import ABCMeta
| 8 | 27 | 0.625 | """
*Element*
A CSS Element.
"""
from abc import ABCMeta
class Element:
__metaclass__ = ABCMeta
| 0 | 21 | 23 |
3e0fbcb449b4cfa8bd257a18259a5527d4dbb40f | 641 | py | Python | portfolio/models.py | Aditya-aot/ION | 4789bb2e05fae48414eb829c3607c13a24349cd7 | [
"MIT"
] | null | null | null | portfolio/models.py | Aditya-aot/ION | 4789bb2e05fae48414eb829c3607c13a24349cd7 | [
"MIT"
] | null | null | null | portfolio/models.py | Aditya-aot/ION | 4789bb2e05fae48414eb829c3607c13a24349cd7 | [
"MIT"
] | 1 | 2022-02-16T10:43:57.000Z | 2022-02-16T10:43:57.000Z | from django.db import models
from django.conf import settings
from django.contrib.auth.models import User , auth
from django.utils import timezone
# Create your models here. | 33.736842 | 73 | 0.74103 | from django.db import models
from django.conf import settings
from django.contrib.auth.models import User , auth
from django.utils import timezone
# Create your models here.
class stock_port(models.Model) :
user = models.ForeignKey(User, on_delete=models.CASCADE, null = True)
name = models.TextField(null = True)
price = models.TextField(null=True)
quantity = models.TextField(null=True)
class crypto_port(models.Model) :
user = models.ForeignKey(User, on_delete=models.CASCADE, null = True)
name = models.TextField(null = True)
price = models.TextField(null=True)
quantity = models.TextField(null=True) | 0 | 419 | 46 |
b9bbe40517af05264261c3f6465a6deaa46f3523 | 786 | py | Python | zero_to_one_hundred/tests/test_main.py | fossabot/0to100 | 37faa1340b2ec8b87e5d4c268c8caf521ea164cb | [
"Apache-2.0"
] | null | null | null | zero_to_one_hundred/tests/test_main.py | fossabot/0to100 | 37faa1340b2ec8b87e5d4c268c8caf521ea164cb | [
"Apache-2.0"
] | null | null | null | zero_to_one_hundred/tests/test_main.py | fossabot/0to100 | 37faa1340b2ec8b87e5d4c268c8caf521ea164cb | [
"Apache-2.0"
] | null | null | null | """Unit tests."""
# pylint: disable=C0116,R0903,E0401,W0703,W1201,redefined-outer-name,missing-function-docstring,E0401,C0114,W0511,W1203,C0200,C0103,W1203
from main import run_main
def test_run_main(
get_args_create_section_processor,
get_args_refresh_links_processor,
get_args_refresh_puml_processor,
get_args_refresh_map_processor,
):
"""logical seq"""
run_main(get_args_create_section_processor + ["http://google.com/docs"])
run_main(get_args_create_section_processor + ["https://cloud.google.com/docs"])
run_main(
get_args_create_section_processor + ["https://cloud.google.com/docs/overview"]
)
run_main(get_args_refresh_map_processor)
run_main(get_args_refresh_links_processor)
run_main(get_args_refresh_puml_processor)
| 34.173913 | 137 | 0.774809 | """Unit tests."""
# pylint: disable=C0116,R0903,E0401,W0703,W1201,redefined-outer-name,missing-function-docstring,E0401,C0114,W0511,W1203,C0200,C0103,W1203
from main import run_main
def test_run_main(
get_args_create_section_processor,
get_args_refresh_links_processor,
get_args_refresh_puml_processor,
get_args_refresh_map_processor,
):
"""logical seq"""
run_main(get_args_create_section_processor + ["http://google.com/docs"])
run_main(get_args_create_section_processor + ["https://cloud.google.com/docs"])
run_main(
get_args_create_section_processor + ["https://cloud.google.com/docs/overview"]
)
run_main(get_args_refresh_map_processor)
run_main(get_args_refresh_links_processor)
run_main(get_args_refresh_puml_processor)
| 0 | 0 | 0 |
787dc141c06a378f136489d0c3368808772ea325 | 372 | py | Python | imaginarium/storage/__init__.py | LordFeratum/Imaginarium | ce52f5cad7727aab2e81fcf36f662f55dea9330a | [
"MIT"
] | null | null | null | imaginarium/storage/__init__.py | LordFeratum/Imaginarium | ce52f5cad7727aab2e81fcf36f662f55dea9330a | [
"MIT"
] | null | null | null | imaginarium/storage/__init__.py | LordFeratum/Imaginarium | ce52f5cad7727aab2e81fcf36f662f55dea9330a | [
"MIT"
] | null | null | null | from imaginarium.storage.utils import create_database_pool
| 21.882353 | 60 | 0.698925 | from imaginarium.storage.utils import create_database_pool
async def init_database(app=None, config=None):
if not config:
config = app['settings']
pool = await create_database_pool(config, loop=app.loop)
app['pool'] = pool
return pool
async def close_database(app=None, config=None):
app['pool'].close
await app['pool'].wait_closed()
| 265 | 0 | 46 |
26d4c3e209fcea746298ce44734f73751b665d9a | 41 | py | Python | models/__init__.py | danilonumeroso/MEG | 86f2a664e22082b0ff5d01c8e0ad9618b64e9065 | [
"Apache-2.0"
] | 6 | 2020-10-26T13:53:01.000Z | 2021-03-12T14:26:43.000Z | models/__init__.py | danilonumeroso/Explainer | e133c150738f09998d0350e58dece4824ee58a76 | [
"Apache-2.0"
] | null | null | null | models/__init__.py | danilonumeroso/Explainer | e133c150738f09998d0350e58dece4824ee58a76 | [
"Apache-2.0"
] | 1 | 2021-03-13T01:08:12.000Z | 2021-03-13T01:08:12.000Z | from .GNNExplainer_ import GNNExplainer_
| 20.5 | 40 | 0.878049 | from .GNNExplainer_ import GNNExplainer_
| 0 | 0 | 0 |
0be7b146cbd549cf20b312471280a3ad2005e10a | 1,522 | py | Python | tests/import_helpers/test_internal_import_record.py | vemel/boto3_type_annotations | 88aa07a36f5626428c8d3878a4846d8cb667ea28 | [
"MIT"
] | 44 | 2019-11-09T04:29:31.000Z | 2022-02-11T10:51:41.000Z | tests/import_helpers/test_internal_import_record.py | vemel/boto3_type_annotations | 88aa07a36f5626428c8d3878a4846d8cb667ea28 | [
"MIT"
] | 28 | 2019-11-26T23:50:19.000Z | 2021-05-31T18:52:46.000Z | tests/import_helpers/test_internal_import_record.py | vemel/boto3_type_annotations | 88aa07a36f5626428c8d3878a4846d8cb667ea28 | [
"MIT"
] | 3 | 2019-11-09T16:43:04.000Z | 2019-12-20T15:05:33.000Z | import unittest
from unittest.mock import patch, MagicMock
from mypy_boto3_builder.import_helpers.internal_import_record import (
InternalImportRecord,
)
from mypy_boto3_builder.import_helpers.import_string import ImportString
| 40.052632 | 83 | 0.711564 | import unittest
from unittest.mock import patch, MagicMock
from mypy_boto3_builder.import_helpers.internal_import_record import (
InternalImportRecord,
)
from mypy_boto3_builder.import_helpers.import_string import ImportString
class ImportRecordTestCase(unittest.TestCase):
def test_init(self) -> None:
service_name_mock = MagicMock()
service_name_mock.name = "service_name"
result = InternalImportRecord(service_name_mock, "name", "alias")
self.assertEqual(result.source, ImportString("service_name"))
self.assertEqual(result.name, "name")
self.assertEqual(result.alias, "alias")
@patch("mypy_boto3_builder.import_helpers.internal_import_record.ImportString")
@patch("mypy_boto3_builder.import_helpers.internal_import_record.ImportRecord")
def test_get_external(
self, ImportRecordMock: MagicMock, ImportStringMock: MagicMock
) -> None:
service_name_mock = MagicMock()
service_name_mock.name = "service_name"
ImportRecordMock.return_value = "ImportRecord"
ImportStringMock().__add__.return_value = "module_name.service_name"
self.assertEqual(
InternalImportRecord(service_name_mock, "name", "alias").get_external(
"module_name"
),
"ImportRecord",
)
ImportStringMock.assert_called_with("module_name")
ImportRecordMock.assert_called_with(
source="module_name.service_name", name="name", alias="alias"
)
| 1,020 | 246 | 23 |
97b25cd6b0226a96f557fe9a5069ec8525d806af | 5,335 | py | Python | src/tradingkit/statistics/statistics.py | vishalbelsare/tradingkit | a5dd5e1de828a930c3d4530a34f564b110edc076 | [
"MIT"
] | null | null | null | src/tradingkit/statistics/statistics.py | vishalbelsare/tradingkit | a5dd5e1de828a930c3d4530a34f564b110edc076 | [
"MIT"
] | null | null | null | src/tradingkit/statistics/statistics.py | vishalbelsare/tradingkit | a5dd5e1de828a930c3d4530a34f564b110edc076 | [
"MIT"
] | null | null | null | from datetime import datetime, timedelta
import numpy
from tradingkit.pubsub.core.event import Event
from tradingkit.pubsub.core.publisher import Publisher
from tradingkit.pubsub.core.subscriber import Subscriber
from tradingkit.pubsub.event.book import Book
from tradingkit.pubsub.event.candle import Candle
from tradingkit.pubsub.event.funding import Funding
from tradingkit.pubsub.event.liquidation import Liquidation
from tradingkit.pubsub.event.open_order import OpenOrder
from tradingkit.pubsub.event.order import Order
from tradingkit.pubsub.event.plot import Plot
from tradingkit.pubsub.event.trade import Trade
| 42.34127 | 126 | 0.653796 | from datetime import datetime, timedelta
import numpy
from tradingkit.pubsub.core.event import Event
from tradingkit.pubsub.core.publisher import Publisher
from tradingkit.pubsub.core.subscriber import Subscriber
from tradingkit.pubsub.event.book import Book
from tradingkit.pubsub.event.candle import Candle
from tradingkit.pubsub.event.funding import Funding
from tradingkit.pubsub.event.liquidation import Liquidation
from tradingkit.pubsub.event.open_order import OpenOrder
from tradingkit.pubsub.event.order import Order
from tradingkit.pubsub.event.plot import Plot
from tradingkit.pubsub.event.trade import Trade
class Statistics(Publisher, Subscriber):
def __init__(self):
super().__init__()
self.balance_history = None
self.last_balance_check = None
self.peak_equity = 0
self.max_drawdown = 0
self.last_price = None
def subscribed_events(self) -> list:
return [Order, Trade, Book, Candle, Liquidation, Funding, OpenOrder, Plot]
def on_event(self, event: Event):
if isinstance(event, Book):
self.last_price = event.payload['bids'][0][0]
if isinstance(event, Candle):
if event.payload['timeframe'] == '1d':
self.update_balance_hist_from_candle(event)
if isinstance(event, Trade):
self.last_price = event.payload['price']
if isinstance(event, Plot):
if event.payload['name'] == 'Equity':
self.update_balance_hist_from_plot(event)
def get_statistics(self):
max_drawdown = self.get_max_draw_down()
sharpe_ratio = self.get_sharpe_ratio()
return {'max_drawdown': max_drawdown, 'sharpe_ratio': sharpe_ratio}
def update_balance_hist_from_plot(self, event):
data = event.payload
if self.balance_history is None:
date = datetime.fromisoformat(data['data']['x'][0:10])
self.balance_history = [{'date': date, 'price': data['price']}]
if data['has_position']:
self.balance_history[-1]['quote_balance'] = data['data']['quote_balance']
self.balance_history[-1]['base_balance'] = data['data']['base_balance']
self.balance_history[-1]['position_vol'] = data['data']['position_vol']
self.balance_history[-1]['position_price'] = data['data']['position_price']
else:
self.balance_history[-1]['quote_balance'] = data['data']['quote_balance']
self.balance_history[-1]['base_balance'] = data['data']['base_balance']
def update_balance_hist_from_candle(self, event):
data = event.payload
date = datetime.fromisoformat(data['datetime'])
if self.balance_history is not None and date - self.balance_history[-1]['date'] >= timedelta(days=1):
self.balance_history[-1]['price'] = self.last_price
self.balance_history[-1] = self.calculate_equity(self.balance_history[-1])
# set current balance
current_balance = self.balance_history[-1].copy()
current_balance['date'] = datetime.fromisoformat(data['datetime'][0:10])
self.balance_history.append(current_balance)
self.calculate_max_drawdown()
def calculate_equity(self, current_balance):
if 'position_vol' in current_balance.keys():
current_balance['equity'] = current_balance['base_balance'] + current_balance['quote_balance'] / \
current_balance['price']
if current_balance['position_vol'] != 0:
pnl = (1 / current_balance['position_price'] - 1 / current_balance['price']) * current_balance['position_vol']
current_balance['equity'] += pnl
else:
current_balance['equity'] = current_balance['quote_balance'] + current_balance['base_balance'] * \
current_balance['price']
return current_balance
def calculate_max_drawdown(self):
equity = self.balance_history[-1]['equity']
if equity > self.peak_equity:
self.peak_equity = equity
drawdown = (equity - self.peak_equity) / self.peak_equity
self.max_drawdown = min(self.max_drawdown, drawdown)
def get_max_draw_down(self):
# set last balance price and equity
self.balance_history[-1]['price'] = self.last_price
self.balance_history[-1] = self.calculate_equity(self.balance_history[-1])
self.calculate_max_drawdown()
return self.max_drawdown
def get_sharpe_ratio(self):
profits_history = []
for i in range(len(self.balance_history) - 1):
profit = (self.balance_history[i + 1]['equity'] / self.balance_history[0]['equity'] - 1) * 100
profits_history.append(profit)
standard_deviation = numpy.std(profits_history)
time_delta_years = (self.balance_history[-1]['date'] - self.balance_history[0]['date']).days / 365
total_profit = (self.balance_history[-1]['equity'] / self.balance_history[0]['equity'] - 1) * 100
if time_delta_years == 0:
return 0
anual_profit = total_profit / time_delta_years
no_risk_profit = 5
sharpe_ratio = (anual_profit - no_risk_profit) / standard_deviation
return sharpe_ratio
| 4,400 | 19 | 293 |
40214a1ab4199e7b467be137d06ccce4b5727fd5 | 4,725 | py | Python | games/migrations/0001_initial.py | RevolutionTech/revolutiontech.ca | a3f0f1526812554938674c4fc9e7ea90ed4ffe6d | [
"0BSD"
] | null | null | null | games/migrations/0001_initial.py | RevolutionTech/revolutiontech.ca | a3f0f1526812554938674c4fc9e7ea90ed4ffe6d | [
"0BSD"
] | 171 | 2017-11-02T05:39:37.000Z | 2022-03-07T01:13:53.000Z | games/migrations/0001_initial.py | RevolutionTech/revolutiontech.ca | a3f0f1526812554938674c4fc9e7ea90ed4ffe6d | [
"0BSD"
] | 1 | 2018-01-13T08:11:26.000Z | 2018-01-13T08:11:26.000Z | import django.db.models.deletion
from django.db import migrations, models
| 33.510638 | 87 | 0.385397 | import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("basecategory", "0001_initial")]
operations = [
migrations.CreateModel(
name="Game",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("name", models.CharField(max_length=75, db_index=True)),
("slug", models.SlugField(max_length=75)),
(
"release_year",
models.PositiveSmallIntegerField(
db_index=True,
null=True,
verbose_name=b"Year released",
blank=True,
),
),
(
"description",
models.TextField(
help_text=b"Enter valid HTML", null=True, blank=True
),
),
("hero", models.BooleanField(default=False, db_index=True)),
(
"min_players",
models.PositiveSmallIntegerField(null=True, blank=True),
),
(
"max_players",
models.PositiveSmallIntegerField(null=True, blank=True),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="GameButton",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("text", models.CharField(max_length=30)),
(
"css_class",
models.CharField(
max_length=15, null=True, verbose_name=b"CSS class", blank=True
),
),
(
"local_resource",
models.FileField(null=True, upload_to=b"download", blank=True),
),
(
"external_url",
models.URLField(
null=True, verbose_name=b"External URL", blank=True
),
),
(
"game",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="games.Game"
),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="GameCategory",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("name", models.CharField(max_length=50, db_index=True)),
],
options={"abstract": False, "verbose_name_plural": "Game categories"},
),
migrations.CreateModel(
name="GameImage",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("img", models.ImageField(upload_to=b"img")),
("caption", models.TextField(null=True, blank=True)),
(
"game",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="games.Game"
),
),
],
options={"abstract": False},
),
migrations.AddField(
model_name="game",
name="category",
field=models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT, to="games.GameCategory"
),
),
migrations.AddField(
model_name="game",
name="platform",
field=models.ManyToManyField(to="basecategory.Platform"),
),
]
| 0 | 4,627 | 23 |
c49eb4ba7231c1d4454481a1dbf49b5dc3297b57 | 931 | py | Python | tests/test_data_model.py | CtheSky/namecom | 16ad4a60a0d4b6d1fea2eed57674d5b020e272cf | [
"MIT"
] | 3 | 2018-08-15T06:16:57.000Z | 2020-12-28T07:34:42.000Z | tests/test_data_model.py | CtheSky/namecom | 16ad4a60a0d4b6d1fea2eed57674d5b020e272cf | [
"MIT"
] | 1 | 2019-09-17T06:52:45.000Z | 2019-09-18T07:24:58.000Z | tests/test_data_model.py | CtheSky/namecom | 16ad4a60a0d4b6d1fea2eed57674d5b020e272cf | [
"MIT"
] | null | null | null | import unittest
from namecom import Transfer, Domain
| 33.25 | 99 | 0.662728 | import unittest
from namecom import Transfer, Domain
class DataModelTestCase(unittest.TestCase):
def test_repr_hash(self):
transfer = Transfer(domainName='example.org', email='cthesky@yeah.net', status='Completed')
repr_str = repr(transfer)
got_transfer = eval(repr_str)
self.assertEqual(transfer, got_transfer)
self.assertEqual(hash(transfer), hash(got_transfer))
self.assertFalse(transfer == 'transfer')
self.assertTrue(transfer != 'transfer')
def test_from_dict(self):
result = Domain.from_dict(None)
self.assertIsNone(result)
transfer = Transfer(domainName='example.org', email='cthesky@yeah.net', status='Completed')
got_transfer = Transfer.from_dict(dict(
domainName='example.org',
email='cthesky@yeah.net',
status='Completed'
))
self.assertEqual(transfer, got_transfer) | 778 | 22 | 77 |
1225db4ffd2b439dfc6fac24400a2380348b7109 | 1,655 | py | Python | sdk/machinelearning/azure-mgmt-machinelearningcompute/azure/mgmt/machinelearningcompute/models/ssl_configuration.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/machinelearning/azure-mgmt-machinelearningcompute/azure/mgmt/machinelearningcompute/models/ssl_configuration.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 226 | 2019-07-24T07:57:21.000Z | 2019-10-15T01:07:24.000Z | sdk/machinelearning/azure-mgmt-machinelearningcompute/azure/mgmt/machinelearningcompute/models/ssl_configuration.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 3 | 2016-05-03T20:49:46.000Z | 2017-10-05T21:05:27.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SslConfiguration(Model):
"""SSL configuration. If configured data-plane calls to user services will be
exposed over SSL only.
:param status: SSL status. Allowed values are Enabled and Disabled.
Possible values include: 'Enabled', 'Disabled'. Default value: "Enabled" .
:type status: str or ~azure.mgmt.machinelearningcompute.models.Status
:param cert: The SSL cert data in PEM format.
:type cert: str
:param key: The SSL key data in PEM format. This is not returned in
response of GET/PUT on the resource. To see this please call listKeys API.
:type key: str
:param cname: The CName of the certificate.
:type cname: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'cert': {'key': 'cert', 'type': 'str'},
'key': {'key': 'key', 'type': 'str'},
'cname': {'key': 'cname', 'type': 'str'},
}
| 37.613636 | 81 | 0.598792 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SslConfiguration(Model):
"""SSL configuration. If configured data-plane calls to user services will be
exposed over SSL only.
:param status: SSL status. Allowed values are Enabled and Disabled.
Possible values include: 'Enabled', 'Disabled'. Default value: "Enabled" .
:type status: str or ~azure.mgmt.machinelearningcompute.models.Status
:param cert: The SSL cert data in PEM format.
:type cert: str
:param key: The SSL key data in PEM format. This is not returned in
response of GET/PUT on the resource. To see this please call listKeys API.
:type key: str
:param cname: The CName of the certificate.
:type cname: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'cert': {'key': 'cert', 'type': 'str'},
'key': {'key': 'key', 'type': 'str'},
'cname': {'key': 'cname', 'type': 'str'},
}
def __init__(self, status="Enabled", cert=None, key=None, cname=None):
super(SslConfiguration, self).__init__()
self.status = status
self.cert = cert
self.key = key
self.cname = cname
| 202 | 0 | 27 |
efd8688b990c88981e88b5bbf3bf4ad0c1cba2df | 10,767 | py | Python | android_env/components/emulator_console.py | majacQ/android_env | 3703c9883aa445e93f151dad9332aa6b8f32eea4 | [
"Apache-2.0"
] | 1 | 2021-06-07T13:58:24.000Z | 2021-06-07T13:58:24.000Z | android_env/components/emulator_console.py | smbale/android_env | 5ecfbfe56bb5843b298791d3c8c73b01a79e864a | [
"Apache-2.0"
] | null | null | null | android_env/components/emulator_console.py | smbale/android_env | 5ecfbfe56bb5843b298791d3c8c73b01a79e864a | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class that talks directly to the telnet console of the Android emulator.
NOTE: This class will be deprecated in favour of gRPC-based communication.
"""
import os
import telnetlib
import threading
import time
from typing import List, Optional
import uuid
from absl import logging
from android_env.components import errors
from android_env.proto import raw_observation_pb2
import numpy as np
class _FifoReader(threading.Thread):
"""A thread which reads from a Unix pipe.
This thread is meant to run indefinitely, consuming from `fifo` and providing
observations via `latest_observation()`.
Any exceptions that are caught in `run()` are forwarded to
`latest_exception()` and then execution is terminated.
Users of this thread may call `stop()` to set a signal on
`self._terminate_event`, which is checked periodically by this thread to end
execution, but the `f.read()` call below may get stuck indefinitely causing
this thread to block until the whole process is terminated. In this case, no
CPU will be used, but a file descriptor will be consumed (from the `open()`
call) and threading state will linger until the process dies.
This thread was designed to terminate when facing possibly recoverable errors
allowing its caller thread to time out when waiting on `data_ready()`, then
optionally spawning a new thread to continue the work.
"""
def data_ready(self) -> threading.Condition:
"""Returns a condition variable that protects shared state."""
return self._data_ready
class EmulatorConsole():
"""Handles communication with the emulator."""
def __init__(self,
console_port: int,
auth_code: str = '',
tmp_dir: str = '/tmp',
pipe_read_timeout_sec: float = 20.0):
"""Initializes this EmulatorConsole.
Args:
console_port: Integer
auth_code: String
tmp_dir: String
pipe_read_timeout_sec: Maximum amount of time in seconds to wait for
reading data from a pipe.
"""
self._console_port = console_port
self._tmp_dir = tmp_dir
self._pipe_read_timeout_sec = pipe_read_timeout_sec
self._read_thread = None
self._setup_fifo()
self._connect()
self._authenticate_to_console(auth_code)
self._read_thread = _FifoReader(fifo=self._fifo)
self._read_thread.daemon = True
self._read_thread.start()
def fetch_screenshot(self) -> Optional[List[np.ndarray]]:
"""Returns the observation via telnet through a pipe.
This makes use of a feature in the AndroidEmulator
(https://android-review.googlesource.com/c/platform/external/qemu/+/891716)
that saves screenshots as a binary protobuf instead of a compressed PNG,
greatly improving the performance and latency.
Returns: Observation
Raises:
errors.ReadObservationError: if the observation could not be read.
"""
# Ask the emulator for a screenshot.
self._connection.write(b'screenrecord screenshot %s\n' %
self._fifo.encode('utf-8'))
with self._read_thread.data_ready():
# Check for outstanding errors before waiting.
if self._read_thread.latest_exception():
raise self._read_thread.latest_exception()
if self._read_thread.data_ready().wait(
timeout=self._pipe_read_timeout_sec):
# Check for errors while reading observations.
if self._read_thread.latest_exception():
raise self._read_thread.latest_exception()
# Check if the observation was successfully read.
if self._read_thread.latest_observation():
return self._read_thread.latest_observation()
else:
raise errors.ObservationDecodingError(
'No observation from reader thread.')
else: # Timed out.
# _read_fifo is stuck, so we spawn a new thread.
self._read_thread = _FifoReader(fifo=self._fifo)
self._read_thread.daemon = True
self._read_thread.start()
raise errors.PipeTimedOutError()
def send_mouse_action(self, x: str, y: str, down: bool = True) -> None:
"""Sends mouse events via the emulator telnet console connection.
This functionality is already available in the emulator and is relatively
fast. It sends a "one-finger" touch event to the screen (i.e. it does not
support multitouch).
Args:
x: Integer The absolute value for the x-coordinate.
y: Integer The absolute value for the y-coordinate.
down: Boolean Whether the button is down.
Returns: None
"""
self._connection.write(
('event mouse %s %s 0 %s\n' %
(int(x), int(y), '1' if down else '0')).encode('utf-8'))
def _setup_fifo(self):
"""Creates a named pipe for receiving images from the console."""
self._fifo = os.path.join(self._tmp_dir,
'screenshot_pipe-%s.pb' % uuid.uuid4())
if os.path.isfile(self._fifo): # Remove it before trying to make a new one.
os.remove(self._fifo)
# The following call may raise OSError if it can't create the FIFO, but we
# do not want to catch it because it may hide other more serious errors.
# Because we're executing this at the start of the server, we prefer to fail
# fast and loud.
os.mkfifo(self._fifo)
def _connect(self):
"""Connects to the emulator console."""
logging.info('Connecting to Emulator console on port %s...',
self._console_port)
num_connection_attempts = 3
connected = False
retries = 0
while not connected:
try:
self._connection = telnetlib.Telnet('localhost', self._console_port)
connected = True
except ConnectionRefusedError:
retries += 1
if retries >= num_connection_attempts:
raise errors.ConsoleConnectionError()
logging.error('Console connection refused, retrying in 5 seconds.')
time.sleep(5)
logging.info('Done connecting to Emulator console on port %s.',
self._console_port)
| 37.256055 | 80 | 0.663973 | # coding=utf-8
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class that talks directly to the telnet console of the Android emulator.
NOTE: This class will be deprecated in favour of gRPC-based communication.
"""
import os
import telnetlib
import threading
import time
from typing import List, Optional
import uuid
from absl import logging
from android_env.components import errors
from android_env.proto import raw_observation_pb2
import numpy as np
class _FifoReader(threading.Thread):
"""A thread which reads from a Unix pipe.
This thread is meant to run indefinitely, consuming from `fifo` and providing
observations via `latest_observation()`.
Any exceptions that are caught in `run()` are forwarded to
`latest_exception()` and then execution is terminated.
Users of this thread may call `stop()` to set a signal on
`self._terminate_event`, which is checked periodically by this thread to end
execution, but the `f.read()` call below may get stuck indefinitely causing
this thread to block until the whole process is terminated. In this case, no
CPU will be used, but a file descriptor will be consumed (from the `open()`
call) and threading state will linger until the process dies.
This thread was designed to terminate when facing possibly recoverable errors
allowing its caller thread to time out when waiting on `data_ready()`, then
optionally spawning a new thread to continue the work.
"""
def __init__(self, fifo=str):
super(_FifoReader, self).__init__()
self._fifo = fifo
self._latest_observation = None
self._latest_exception = None
self._data_ready = threading.Condition()
self._terminate_event = threading.Event()
def stop(self) -> None:
self._terminate_event.set()
def data_ready(self) -> threading.Condition:
"""Returns a condition variable that protects shared state."""
return self._data_ready
def latest_observation(self) -> List[np.ndarray]:
return self._latest_observation
def latest_exception(self) -> Exception:
return self._latest_exception
def run(self):
while True:
# Check if the caller thread asked this thread to stop running.
if self._terminate_event.is_set():
self._terminate_event.clear()
return
# Read the data from the pipe.
raw_obs = None
with open(self._fifo, 'rb') as f:
data = []
# Read data from the pipe in chunks.
while True:
# The call `f.read()` may block forever for all sorts of reasons, and
# unfortunately Python does not allow specifying a timeout and there's
# no good way to clean up this thread. When that occurs, the client of
# this thread will timeout when reading from `output`.
try:
chunk = f.read()
except Exception as e: # pylint: disable=broad-except
# It's nearly impossible to be exhaustive here so we use a generic
# Exception to catch all errors, not only known ones such as IOError
# and OSError,
with self._data_ready:
self._latest_exception = e
self._data_ready.notify()
return
if not chunk: # Writer closed the pipe.
break
data.append(chunk)
data = b''.join(
data) # Joining is much faster than string concatenation.
if not data:
# Not having data here is abnormal, so terminate execution.
with self._data_ready:
self._latest_exception = errors.ObservationDecodingError(
'No data from pipe.')
self._data_ready.notify()
return
try:
raw_obs = raw_observation_pb2.RawObservation.FromString(data)
if (raw_obs.screen.height <= 0 or raw_obs.screen.width <= 0 or
raw_obs.screen.num_channels <= 0):
with self._data_ready:
self._latest_exception = errors.ObservationDecodingError(
f'height: {raw_obs.screen.height} '
f'width: {raw_obs.screen.width} '
f'num_channels: {raw_obs.screen.num_channels} '
f'len(data): {len(data)}')
self._data_ready.notify()
return
except: # pylint: disable=bare-except
with self._data_ready:
self._latest_exception = errors.ObservationDecodingError(
f'len(data): {len(data)}')
self._data_ready.notify()
return
if not raw_obs:
with self._data_ready:
self._latest_exception = errors.ObservationDecodingError(
f'No data in {self._fifo}')
self._data_ready.notify()
return
screen = raw_obs.screen
img = np.frombuffer(screen.data, dtype=np.uint8, count=len(screen.data))
img.shape = (screen.height, screen.width, screen.num_channels)
# Delete the 'Alpha' channel along the 'num_channels' axis
img = np.delete(img, 3, 2)
obs = [img, np.int64(raw_obs.timestamp_us)]
with self._data_ready:
self._latest_observation = obs
self._data_ready.notify()
class EmulatorConsole():
"""Handles communication with the emulator."""
def __init__(self,
console_port: int,
auth_code: str = '',
tmp_dir: str = '/tmp',
pipe_read_timeout_sec: float = 20.0):
"""Initializes this EmulatorConsole.
Args:
console_port: Integer
auth_code: String
tmp_dir: String
pipe_read_timeout_sec: Maximum amount of time in seconds to wait for
reading data from a pipe.
"""
self._console_port = console_port
self._tmp_dir = tmp_dir
self._pipe_read_timeout_sec = pipe_read_timeout_sec
self._read_thread = None
self._setup_fifo()
self._connect()
self._authenticate_to_console(auth_code)
self._read_thread = _FifoReader(fifo=self._fifo)
self._read_thread.daemon = True
self._read_thread.start()
def close(self):
self._connection.close()
self._read_thread.stop()
if os.path.isfile(self._fifo):
os.remove(self._fifo)
def fetch_screenshot(self) -> Optional[List[np.ndarray]]:
"""Returns the observation via telnet through a pipe.
This makes use of a feature in the AndroidEmulator
(https://android-review.googlesource.com/c/platform/external/qemu/+/891716)
that saves screenshots as a binary protobuf instead of a compressed PNG,
greatly improving the performance and latency.
Returns: Observation
Raises:
errors.ReadObservationError: if the observation could not be read.
"""
# Ask the emulator for a screenshot.
self._connection.write(b'screenrecord screenshot %s\n' %
self._fifo.encode('utf-8'))
with self._read_thread.data_ready():
# Check for outstanding errors before waiting.
if self._read_thread.latest_exception():
raise self._read_thread.latest_exception()
if self._read_thread.data_ready().wait(
timeout=self._pipe_read_timeout_sec):
# Check for errors while reading observations.
if self._read_thread.latest_exception():
raise self._read_thread.latest_exception()
# Check if the observation was successfully read.
if self._read_thread.latest_observation():
return self._read_thread.latest_observation()
else:
raise errors.ObservationDecodingError(
'No observation from reader thread.')
else: # Timed out.
# _read_fifo is stuck, so we spawn a new thread.
self._read_thread = _FifoReader(fifo=self._fifo)
self._read_thread.daemon = True
self._read_thread.start()
raise errors.PipeTimedOutError()
def send_mouse_action(self, x: str, y: str, down: bool = True) -> None:
"""Sends mouse events via the emulator telnet console connection.
This functionality is already available in the emulator and is relatively
fast. It sends a "one-finger" touch event to the screen (i.e. it does not
support multitouch).
Args:
x: Integer The absolute value for the x-coordinate.
y: Integer The absolute value for the y-coordinate.
down: Boolean Whether the button is down.
Returns: None
"""
self._connection.write(
('event mouse %s %s 0 %s\n' %
(int(x), int(y), '1' if down else '0')).encode('utf-8'))
def _setup_fifo(self):
"""Creates a named pipe for receiving images from the console."""
self._fifo = os.path.join(self._tmp_dir,
'screenshot_pipe-%s.pb' % uuid.uuid4())
if os.path.isfile(self._fifo): # Remove it before trying to make a new one.
os.remove(self._fifo)
# The following call may raise OSError if it can't create the FIFO, but we
# do not want to catch it because it may hide other more serious errors.
# Because we're executing this at the start of the server, we prefer to fail
# fast and loud.
os.mkfifo(self._fifo)
def _connect(self):
"""Connects to the emulator console."""
logging.info('Connecting to Emulator console on port %s...',
self._console_port)
num_connection_attempts = 3
connected = False
retries = 0
while not connected:
try:
self._connection = telnetlib.Telnet('localhost', self._console_port)
connected = True
except ConnectionRefusedError:
retries += 1
if retries >= num_connection_attempts:
raise errors.ConsoleConnectionError()
logging.error('Console connection refused, retrying in 5 seconds.')
time.sleep(5)
logging.info('Done connecting to Emulator console on port %s.',
self._console_port)
def _authenticate_to_console(self, auth_code):
logging.info('Authenticating to console.')
if not auth_code:
with open(os.path.expanduser('~/.emulator_console_auth_token')) as f:
auth_code = f.read()
self._connection.write(b'auth %s\n' %
auth_code.encode('utf-8')) # Authenticate session.
self._connection.read_until(b'OK', timeout=5) # Look for 'OK' for 5s.
| 3,951 | 0 | 175 |
c679d6b045ca8f35b52d519de118dd8764302a1b | 712 | py | Python | ebiznes/apps/service/admin.py | kaniak274/EBiznes | 5dcb10020b5af6b7ae57a70060605f6e5324b03a | [
"MIT"
] | null | null | null | ebiznes/apps/service/admin.py | kaniak274/EBiznes | 5dcb10020b5af6b7ae57a70060605f6e5324b03a | [
"MIT"
] | 1 | 2019-10-30T06:26:36.000Z | 2019-11-20T17:07:25.000Z | ebiznes/apps/service/admin.py | kaniak274/EBiznes | 5dcb10020b5af6b7ae57a70060605f6e5324b03a | [
"MIT"
] | 1 | 2019-11-12T19:09:05.000Z | 2019-11-12T19:09:05.000Z | from django.contrib import admin
from .models import *
admin.site.register(Profession)
admin.site.register(Service, ServiceAdmin)
admin.site.register(Rating, RatingAdmin)
admin.site.register(Rent, RentAdmin)
admin.site.register(PriceList, PriceListAdmin)
admin.site.register(Order)
| 25.428571 | 79 | 0.738764 | from django.contrib import admin
from .models import *
class ServiceAdmin(admin.ModelAdmin):
list_display = ('__str__', 'owner', 'city', 'profession', 'account_number')
class RatingAdmin(admin.ModelAdmin):
list_display = ('__str__', 'owner', 'service', 'rating',)
class RentAdmin(admin.ModelAdmin):
list_display = ('__str__', 'user', 'service', 'status', 'total_price',)
class PriceListAdmin(admin.ModelAdmin):
list_display = ('__str__', 'service', 'price')
admin.site.register(Profession)
admin.site.register(Service, ServiceAdmin)
admin.site.register(Rating, RatingAdmin)
admin.site.register(Rent, RentAdmin)
admin.site.register(PriceList, PriceListAdmin)
admin.site.register(Order)
| 0 | 331 | 92 |
f0b8252c4ee7fa9aab5d7395e5afdc323d3140df | 4,818 | py | Python | src/metrics/retrieval_metrics.py | philip-mueller/lovt | 91cf2094a0e140b8431b8e4ebadc56547a8df6b2 | [
"MIT"
] | 3 | 2021-12-15T07:53:36.000Z | 2022-01-05T17:02:45.000Z | src/metrics/retrieval_metrics.py | philip-mueller/lovt | 91cf2094a0e140b8431b8e4ebadc56547a8df6b2 | [
"MIT"
] | null | null | null | src/metrics/retrieval_metrics.py | philip-mueller/lovt | 91cf2094a0e140b8431b8e4ebadc56547a8df6b2 | [
"MIT"
] | 3 | 2021-12-14T11:17:43.000Z | 2021-12-16T07:35:43.000Z | import torch
from torch.nn import functional as F
from torchmetrics import Metric
from models.components.utils import AttentionMask
| 49.163265 | 125 | 0.65027 | import torch
from torch.nn import functional as F
from torchmetrics import Metric
from models.components.utils import AttentionMask
class LocalRetrievalMetrics(Metric):
def __init__(self, topk=(1, 5),
acc_name='top_{k}_acc',
weighted_acc_name='weighted_top_{k}_acc',
retrieval_index_name='avg_retrieval_index',
weighted_retrieval_index_name='weighted_avg_retrieval_index',
avg_local_size_name='avg_local_size',
dist_sync_on_step=False):
super().__init__(dist_sync_on_step=dist_sync_on_step, compute_on_step=False)
self.acc_names = [acc_name.format(k=k) for k in topk]
self.weighted_acc_names = [weighted_acc_name.format(k=k) for k in topk]
self.retrieval_index_name = retrieval_index_name
self.weighted_retrieval_index_name = weighted_retrieval_index_name
self.avg_local_size_name = avg_local_size_name
self.register_buffer("topk", torch.tensor(topk, dtype=int))
self.add_state("sample_acc_sum", default=torch.zeros(len(topk), dtype=float), dist_reduce_fx="sum")
self.add_state("sample_weighted_acc_sum", default=torch.zeros(len(topk), dtype=float), dist_reduce_fx="sum")
self.add_state("sample_avg_index_sum", default=torch.tensor(0., dtype=float), dist_reduce_fx="sum")
self.add_state("sample_weighted_avg_index_sum", default=torch.tensor(0., dtype=float), dist_reduce_fx="sum")
self.add_state("sample_count", default=torch.tensor(0., dtype=float), dist_reduce_fx="sum")
self.add_state("weighted_sample_count", default=torch.tensor(0., dtype=float), dist_reduce_fx="sum")
self.add_state("total_local", default=torch.tensor(0., dtype=float), dist_reduce_fx="sum")
@staticmethod
def compute_similarities(local_1: torch.Tensor, local_2: torch.Tensor):
local_1 = F.normalize(local_1, dim=-1, p=2) # (B x N x d)
local_2 = F.normalize(local_2, dim=-1, p=2) # (B x N x d)
return torch.bmm(local_1, local_2.transpose(-1, -2)) # (B x N x N)
def update(self, similarities: torch.Tensor, mask: AttentionMask=None, weights=None):
"""
:param similarities: (B x N x N)
:param mask: (B x N)
:param weights: (B x N)
"""
B, N, _ = similarities.size()
if mask is not None:
# set masked columns to -inf such that they are smaller than any other similarity
similarities = similarities + mask.additive_mask[:, None, :] # (B x N x N)
true_similarities = similarities.diagonal(dim1=1, dim2=2) # (B x N)
retrieval_indices = (similarities > true_similarities[:, :, None]).sum(-1) # (B x N)
correct = retrieval_indices[None, :, :] < self.topk[:, None, None] # (numk x B x N)
correct = correct.float()
retrieval_indices = retrieval_indices.float()
if mask is not None:
retrieval_indices = mask.binary_mask * retrieval_indices
correct = mask.binary_mask[None, :, :] * correct
num_local = mask.binary_mask.sum(-1) # (B)
accuracies = correct.sum(-1) / num_local[None, :] # (numk x B)
avg_index = retrieval_indices.sum(-1) / num_local # (B)
self.total_local += num_local.sum()
else:
accuracies = correct.mean(dim=-1) # (numk x B)
avg_index = retrieval_indices.mean(-1) # (B)
self.total_local += B * N
self.sample_acc_sum += accuracies.sum(-1) # (numk)
self.sample_avg_index_sum += avg_index.sum()
if weights is not None:
self.sample_weighted_acc_sum += (weights[None, :, :] * correct).sum((1, 2)) # (numk)
self.sample_weighted_avg_index_sum += (weights * retrieval_indices).sum()
self.weighted_sample_count += B
self.sample_count += B
def compute(self):
metrics = {}
if self.sample_count > 0:
if self.avg_local_size_name is not None:
metrics[self.avg_local_size_name] = self.total_local / self.sample_count
metrics[self.retrieval_index_name] = self.sample_avg_index_sum / self.sample_count
topk_accuracies = self.sample_acc_sum / self.sample_count # (numk)
metrics.update({name: acc for name, acc in zip(self.acc_names, topk_accuracies)})
if self.weighted_sample_count > 0:
metrics[self.weighted_retrieval_index_name] = self.sample_weighted_avg_index_sum / self.weighted_sample_count
weighted_topk_accuracies = self.sample_weighted_acc_sum / self.weighted_sample_count # (numk)
metrics.update({name: acc for name, acc in zip(self.weighted_acc_names, weighted_topk_accuracies)})
return metrics | 2,741 | 1,921 | 23 |
569065124cbd11728d67697d3d36bb715343e14a | 294 | py | Python | pinakes/main/common/tests/functional/test_about.py | hsong-rh/pinakes | 2f08cb757ca64c866af3244686b92a3074fc7571 | [
"Apache-2.0"
] | 2 | 2022-03-17T18:53:58.000Z | 2022-03-17T22:04:22.000Z | pinakes/main/common/tests/functional/test_about.py | hsong-rh/pinakes | 2f08cb757ca64c866af3244686b92a3074fc7571 | [
"Apache-2.0"
] | 9 | 2022-03-18T08:22:57.000Z | 2022-03-30T17:14:49.000Z | pinakes/main/common/tests/functional/test_about.py | hsong-rh/pinakes | 2f08cb757ca64c866af3244686b92a3074fc7571 | [
"Apache-2.0"
] | 7 | 2022-03-17T22:03:08.000Z | 2022-03-28T21:28:34.000Z | """Test about view"""
import pytest
@pytest.mark.django_db
def test_about(api_request):
"""Test about GET endpoint"""
response = api_request("get", "common:about")
assert response.status_code == 200
assert "product_name" in response.data
assert "version" in response.data
| 24.5 | 49 | 0.707483 | """Test about view"""
import pytest
@pytest.mark.django_db
def test_about(api_request):
"""Test about GET endpoint"""
response = api_request("get", "common:about")
assert response.status_code == 200
assert "product_name" in response.data
assert "version" in response.data
| 0 | 0 | 0 |
36ba270c3402aa4a3591c37c11d1cf962bf5bbc1 | 366 | py | Python | StalinSort.py | joscha0/ExoticAlgorithms | 86601200e1bf590fbd246a6c6677ac6e850b9e1c | [
"MIT"
] | null | null | null | StalinSort.py | joscha0/ExoticAlgorithms | 86601200e1bf590fbd246a6c6677ac6e850b9e1c | [
"MIT"
] | null | null | null | StalinSort.py | joscha0/ExoticAlgorithms | 86601200e1bf590fbd246a6c6677ac6e850b9e1c | [
"MIT"
] | 2 | 2019-04-03T09:04:36.000Z | 2019-05-17T13:19:54.000Z |
if __name__ == "__main__":
list1 = [1,4,2,5,6,5,3]
print('\n\n-----Stalin Sorting Algorithm-----\n')
print('unsorted list: '+str(list1))
print('sorted list: '+str(stalinSort(list1))) | 28.153846 | 53 | 0.571038 | def stalinSort(lst):
sorted_list = [lst[0]]
for i in lst[1:]:
if i >= sorted_list[-1]:
sorted_list.append(i)
return sorted_list
if __name__ == "__main__":
list1 = [1,4,2,5,6,5,3]
print('\n\n-----Stalin Sorting Algorithm-----\n')
print('unsorted list: '+str(list1))
print('sorted list: '+str(stalinSort(list1))) | 138 | 0 | 22 |
f9e994ad70317ea383079aa8f894e5717769e495 | 834 | py | Python | unionchan/runlocal.py | mochidaz/union-chan | 410e77e33ce4263d0adff0c7303d9413eb5faab9 | [
"MIT"
] | 4 | 2020-07-19T13:41:42.000Z | 2020-08-03T05:47:59.000Z | unionchan/runlocal.py | mochidaz/union-chan | 410e77e33ce4263d0adff0c7303d9413eb5faab9 | [
"MIT"
] | 4 | 2020-11-13T18:59:33.000Z | 2022-02-10T02:14:45.000Z | unionchan/runlocal.py | mochidaz/union-chan | 410e77e33ce4263d0adff0c7303d9413eb5faab9 | [
"MIT"
] | 3 | 2020-07-19T16:07:25.000Z | 2021-01-31T13:45:03.000Z | import numpy
import tflearn
import tensorflow
import json
import random
import datetime
from utils.utils import net, label, kata, data, sekantung_kata
model = tflearn.DNN(net)
model.load("model/model.tfl")
if __name__ == '__main__':
chat()
| 23.828571 | 64 | 0.589928 | import numpy
import tflearn
import tensorflow
import json
import random
import datetime
from utils.utils import net, label, kata, data, sekantung_kata
model = tflearn.DNN(net)
model.load("model/model.tfl")
def chat():
while True:
content = str(input("Pengguna: "))
if content == "dadah":
exit()
results = model.predict([sekantung_kata(content, kata)])
results_index = numpy.argmax(results)
tag = label[results_index]
print(tag)
if tag == 'datainteraksi-waktu':
print("Union-chan: ", datetime.datetime.now())
else:
for tg in data["intents"]:
if tg['tag'] == tag:
responses = tg['responses']
print("Union-chan: ",random.choice(responses))
if __name__ == '__main__':
chat()
| 564 | 0 | 23 |
d151688d7ced74000a8d74c506785adc3588f6a1 | 668 | py | Python | pages/artefacts/contents/SSD/exercises/recursion.py | yinpinglai/ypinglai.github.io | f82e6380061ec30eb9d71739cedb9115671af565 | [
"CC-BY-3.0"
] | null | null | null | pages/artefacts/contents/SSD/exercises/recursion.py | yinpinglai/ypinglai.github.io | f82e6380061ec30eb9d71739cedb9115671af565 | [
"CC-BY-3.0"
] | null | null | null | pages/artefacts/contents/SSD/exercises/recursion.py | yinpinglai/ypinglai.github.io | f82e6380061ec30eb9d71739cedb9115671af565 | [
"CC-BY-3.0"
] | null | null | null |
if __name__ == '__main__':
n = 4
Solution().tower_of_hanoi(n, 'A', 'B', 'C')
| 30.363636 | 77 | 0.679641 | class Solution:
def tower_of_hanoi(self, n, source, destination, auxiliary):
'''Recursive function for solving the tower of hanoi problem'''
# Base case for the recursion function
if n == 1:
print(f'Move disk 1 from source {source} to destination {destination}')
return
# Move from source to auxiliary
self.tower_of_hanoi(n - 1, source, auxiliary, destination)
print(f'Move disk {n} from source {source} to destination {destination}')
# Move from auxiliary to destination
self.tower_of_hanoi(n-1, auxiliary, destination, source)
if __name__ == '__main__':
n = 4
Solution().tower_of_hanoi(n, 'A', 'B', 'C')
| 0 | 562 | 22 |
f9075a7a8b5220a708c1cc2264c0723cb62d18d5 | 1,029 | py | Python | backend/app/app/crud/crud_item.py | L3RAT/passvortex | ddfac2d4969f2be60d19a9ca857e58939aa8f128 | [
"MIT"
] | null | null | null | backend/app/app/crud/crud_item.py | L3RAT/passvortex | ddfac2d4969f2be60d19a9ca857e58939aa8f128 | [
"MIT"
] | null | null | null | backend/app/app/crud/crud_item.py | L3RAT/passvortex | ddfac2d4969f2be60d19a9ca857e58939aa8f128 | [
"MIT"
] | null | null | null | from typing import List
from fastapi.encoders import jsonable_encoder
from sqlalchemy.orm import Session
from app.core.security import encrypt_secret
from app.crud.base import CRUDBase
from app.models.item import Item
from app.schemas.item import ItemCreate, ItemUpdate
import json
item = CRUDItem(Item)
| 27.810811 | 76 | 0.620991 | from typing import List
from fastapi.encoders import jsonable_encoder
from sqlalchemy.orm import Session
from app.core.security import encrypt_secret
from app.crud.base import CRUDBase
from app.models.item import Item
from app.schemas.item import ItemCreate, ItemUpdate
import json
class CRUDItem(CRUDBase[Item, ItemCreate, ItemUpdate]):
def create_with_owner(
self, db: Session, *, obj_in: ItemCreate, owner_id: int
) -> Item:
db_obj = Item(
login=obj_in.login,
password=str(encrypt_secret(obj_in.password, "temp")),
owner_id=owner_id,
)
db.add(db_obj)
db.commit()
db.refresh(db_obj)
return db_obj
def get_multi_by_owner(
self, db: Session, *, owner_id: int, skip: int = 0, limit: int = 100
) -> List[Item]:
return (
db.query(self.model)
.filter(Item.owner_id == owner_id)
.offset(skip)
.limit(limit)
.all()
)
item = CRUDItem(Item)
| 613 | 34 | 76 |
d9c9d7b3236a94973bc2fb541d5f643b9f229cdf | 38 | py | Python | for2.py | praveenpmin/Python | 513fcde7430b03a187e2c7e58302b88645388eed | [
"MIT"
] | null | null | null | for2.py | praveenpmin/Python | 513fcde7430b03a187e2c7e58302b88645388eed | [
"MIT"
] | null | null | null | for2.py | praveenpmin/Python | 513fcde7430b03a187e2c7e58302b88645388eed | [
"MIT"
] | null | null | null | for i in range(0, 10):
print (i), | 19 | 23 | 0.526316 | for i in range(0, 10):
print (i), | 0 | 0 | 0 |
64608f911d582a4617181a86fb3474aee6b5f498 | 216 | py | Python | media_library/adapters/datasources/mongo_item_datasource/mongo/model.py | jpsalamarcara/every_angle_rec_test | 1595088bfdea580f2f9bb02b7ed9e6b3c97dc17b | [
"MIT"
] | null | null | null | media_library/adapters/datasources/mongo_item_datasource/mongo/model.py | jpsalamarcara/every_angle_rec_test | 1595088bfdea580f2f9bb02b7ed9e6b3c97dc17b | [
"MIT"
] | null | null | null | media_library/adapters/datasources/mongo_item_datasource/mongo/model.py | jpsalamarcara/every_angle_rec_test | 1595088bfdea580f2f9bb02b7ed9e6b3c97dc17b | [
"MIT"
] | null | null | null |
from mongoengine import *
| 24 | 43 | 0.74537 |
from mongoengine import *
class StoredItem(DynamicDocument):
uid = SequenceField(primary_key=True)
name = StringField(required=True)
media_type = StringField(required=True)
location = StringField() | 0 | 166 | 23 |
87717b27eb0ad86fca3a1abc5a9c047b441ba6f0 | 1,707 | py | Python | vendors/spiders/goudenton.py | nl-hugo/grapy | b72216f0a1c93286575d4c1d81c1f4825021dfec | [
"MIT"
] | 2 | 2018-08-02T20:33:44.000Z | 2018-11-14T12:57:28.000Z | vendors/spiders/goudenton.py | nl-hugo/grapy | b72216f0a1c93286575d4c1d81c1f4825021dfec | [
"MIT"
] | null | null | null | vendors/spiders/goudenton.py | nl-hugo/grapy | b72216f0a1c93286575d4c1d81c1f4825021dfec | [
"MIT"
] | 1 | 2019-01-10T16:19:35.000Z | 2019-01-10T16:19:35.000Z | # -*- coding: utf-8 -*-
import scrapy
from vendors.items import VendorWine
from vendors.utils import float_or_none
| 36.319149 | 98 | 0.625073 | # -*- coding: utf-8 -*-
import scrapy
from vendors.items import VendorWine
from vendors.utils import float_or_none
def find_property(response, label):
result = response.css('#product-attribute-specs-table') \
.xpath('//th[contains(text(), "{}")]/following-sibling::td/text()'.format(label)).get()
if result is not None:
result = result.strip()
return result
class GoudentonSpider(scrapy.Spider):
name = 'goudenton'
allowed_domains = ['degoudenton.nl']
start_urls = [
'https://www.degoudenton.nl/rode-wijn',
'https://www.degoudenton.nl/witte-wijn',
'https://www.degoudenton.nl/rose-wijn',
]
def parse(self, response):
""" Parse the response """
urls = response.css('.product-image::attr(href)').extract()
for url in urls:
yield scrapy.Request(url=url, callback=self.parse_item)
next_page_url = response.css('li.next-page > a::attr(href)').get()
if next_page_url is not None:
yield scrapy.Request(next_page_url)
def parse_item(self, response):
""" Creates a WineVendorsItem from the response """
wine = VendorWine()
wine['vendor'] = {'name': 'Wijnkoperij De Gouden Ton', 'url': self.allowed_domains[0]}
wine['url'] = response.url
wine['winery'] = find_property(response, 'Producent')
wine['name'] = response.css('.product-name > h1::text').get()
wine['price'] = float_or_none(response.css('meta[itemprop="price"]::attr(content)').get())
wine['year'] = find_property(response, 'Oogst')
wine['volume'] = float_or_none(find_property(response, 'inhoud').replace(',', '.'))
yield wine
| 249 | 1,294 | 46 |
565b740e0a2b43a85afca2d59fb2a573fac15d32 | 4,550 | py | Python | fisher_py/data/sequence_file_writer.py | abdelq/fisher_py | befb98732ba7c4e57858d158c68cda09ed829d66 | [
"MIT"
] | 3 | 2021-11-03T20:55:45.000Z | 2022-02-01T10:11:47.000Z | fisher_py/data/sequence_file_writer.py | abdelq/fisher_py | befb98732ba7c4e57858d158c68cda09ed829d66 | [
"MIT"
] | 2 | 2022-01-28T02:04:21.000Z | 2022-01-29T01:29:14.000Z | fisher_py/data/sequence_file_writer.py | abdelq/fisher_py | befb98732ba7c4e57858d158c68cda09ed829d66 | [
"MIT"
] | 1 | 2022-01-26T23:30:37.000Z | 2022-01-26T23:30:37.000Z | from fisher_py.net_wrapping import NetWrapperBase
from fisher_py.data import FileHeader, FileError, SequenceInfo
from fisher_py.data.business import SampleInformation, BracketType
from typing import List
| 29.934211 | 96 | 0.627253 | from fisher_py.net_wrapping import NetWrapperBase
from fisher_py.data import FileHeader, FileError, SequenceInfo
from fisher_py.data.business import SampleInformation, BracketType
from typing import List
class SequenceFileWriter(NetWrapperBase):
def __init__(self):
super().__init__()
self._file_header = None
self._file_error = None
self._sequence_info = None
@property
def file_header(self) -> FileHeader:
"""
Gets the file header for the sequence
"""
if self._file_header is None:
self._file_header = FileHeader._get_wrapper_(self._get_wrapped_object_().FileHeader)
return self._file_header
@property
def file_error(self) -> FileError:
"""
Gets the file error state.
"""
if self._file_error is None:
self._file_error = FileError._get_wrapper_(self._get_wrapped_object_().FileError)
return self._file_error
@property
def is_error(self) -> bool:
"""
Gets a value indicating whether the last file operation caused an error
"""
return self._get_wrapped_object_().IsError
@property
def info(self) -> SequenceInfo:
"""
Gets or sets additional information about a sequence
"""
if self._sequence_info is None:
self._sequence_info = SequenceInfo._get_wrapper_(self._get_wrapped_object_().Info)
return self._sequence_info
@info.setter
def info(self, value: SequenceInfo):
"""
Gets or sets additional information about a sequence
"""
assert type(value) is SequenceInfo
self._sequence_info = value
self._get_wrapped_object_().Info = value._get_wrapped_object_()
@property
def samples(self) -> List[SampleInformation]:
"""
Gets the set of samples in the sequence
"""
return [SampleInformation._get_wrapper_(s) for s in self._get_wrapped_object_().Samples]
@property
def file_name(self) -> str:
"""
Gets the name of the sequence file.
"""
return self._get_wrapped_object_().FileName
@property
def bracket(self) -> BracketType:
"""
Gets or sets the sequence bracket type. This determines which groups of samples
use the same calibration curve.
"""
return BracketType(self._get_wrapped_object_().Bracket)
@bracket.setter
def bracket(self, value: BracketType):
"""
Gets or sets the sequence bracket type. This determines which groups of samples
use the same calibration curve.
"""
assert type(value) is BracketType
self._get_wrapped_object_().Bracket = value.value
@property
def tray_configuration(self) -> str:
"""
Gets or sets a description of the auto-sampler tray
"""
return self._get_wrapped_object_().TrayConfiguration
@tray_configuration.setter
def tray_configuration(self, value: str):
"""
Gets or sets a description of the auto-sampler tray
"""
assert type(value) is str
self._get_wrapped_object_().TrayConfiguration = value
def get_user_column_label(self, index: int) -> str:
"""
Retrieves the user label at given 0-based label index.
Parameters:
index:
Index of user label to be retrieved
Returns:
String containing the user label at given index
Remarks:
SampleInformation.MaxUserTextColumnCount determines the maximum number of user
column labels.
"""
return self._get_wrapped_object_().GetUserColumnLabel(index)
def save(self) -> bool:
"""
Saves Sequence data to disk.
Returns:
True saved data to disk; false otherwise.
"""
return self._get_wrapped_object_().Save()
def set_user_column_label(self, index: int, label: str) -> bool:
"""
Sets the user label at given 0-based label index.
Parameters:
index:
Index of user label to be set
label:
New string value for user label to be set
Returns:
true if successful; false otherwise
Remarks:
SampleInformation.MaxUserTextColumnCount determines the maximum number of user
column labels.
"""
return self._get_wrapped_object_().SetUserColumnLabel(index, label)
| 125 | 4,196 | 23 |
1a716336214a081c4e22fedb7d3a76054d86896e | 3,858 | py | Python | ir_datasets/datasets/clirmatrix.py | seanmacavaney/ir_datasets | a8e56f53bb06fcf11164fb0ffc4afc497a6e7507 | [
"Apache-2.0"
] | null | null | null | ir_datasets/datasets/clirmatrix.py | seanmacavaney/ir_datasets | a8e56f53bb06fcf11164fb0ffc4afc497a6e7507 | [
"Apache-2.0"
] | null | null | null | ir_datasets/datasets/clirmatrix.py | seanmacavaney/ir_datasets | a8e56f53bb06fcf11164fb0ffc4afc497a6e7507 | [
"Apache-2.0"
] | null | null | null | import contextlib
from pathlib import Path
from typing import NamedTuple
import ir_datasets
from ir_datasets.util import GzipExtract, DownloadConfig, _DownloadConfig
from ir_datasets.datasets.base import Dataset, YamlDocumentation
from ir_datasets.formats import TsvDocs, CLIRMatrixQueries, CLIRMatrixQrels
NAME = 'clirmatrix'
_logger = ir_datasets.log.easy()
QRELS_DEFS = {
6: "6",
5: "5",
4: "4",
3: "3",
2: "2",
1: "1",
0: "0",
}
collection = _init()
| 44.860465 | 883 | 0.608087 | import contextlib
from pathlib import Path
from typing import NamedTuple
import ir_datasets
from ir_datasets.util import GzipExtract, DownloadConfig, _DownloadConfig
from ir_datasets.datasets.base import Dataset, YamlDocumentation
from ir_datasets.formats import TsvDocs, CLIRMatrixQueries, CLIRMatrixQrels
NAME = 'clirmatrix'
_logger = ir_datasets.log.easy()
QRELS_DEFS = {
6: "6",
5: "5",
4: "4",
3: "3",
2: "2",
1: "1",
0: "0",
}
def _init():
LANGS = ('af', 'als', 'am', 'an', 'ar', 'arz', 'ast', 'az', 'azb', 'ba', 'bar', 'be', 'bg', 'bn', 'bpy', 'br', 'bs', 'bug', 'ca', 'cdo', 'ce', 'ceb', 'ckb', 'cs', 'cv', 'cy', 'da', 'de', 'diq', 'el', 'eml', 'en', 'eo', 'es', 'et', 'eu', 'fa', 'fi', 'fo', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'he', 'hi', 'hr', 'hsb', 'ht', 'hu', 'hy', 'ia', 'id', 'ilo', 'io', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'kn', 'ko', 'ku', 'ky', 'la', 'lb', 'li', 'lmo', 'lt', 'lv', 'mai', 'mg', 'mhr', 'min', 'mk', 'ml', 'mn', 'mr', 'mrj', 'ms', 'my', 'mzn', 'nap', 'nds', 'ne', 'new', 'nl', 'nn', 'no', 'oc', 'or', 'os', 'pa', 'pl', 'pms', 'pnb', 'ps', 'pt', 'qu', 'ro', 'ru', 'sa', 'sah', 'scn', 'sco', 'sd', 'sh', 'si', 'simple', 'sk', 'sl', 'sq', 'sr', 'su', 'sv', 'sw', 'szl', 'ta', 'te', 'tg', 'th', 'tl', 'tr', 'tt', 'uk', 'ur', 'uz', 'vec', 'vi', 'vo', 'wa', 'war', 'wuu', 'xmf', 'yi', 'yo', 'zh')
LANG_REGEX = '(' + '|'.join(LANGS) + ')'
MULTI8_LANGS = ('ar', 'de', 'en', 'es', 'fr', 'ja', 'ru', 'zh')
MULTI8_LANG_REGEX = '(' + '|'.join(MULTI8_LANGS) + ')'
base_path = ir_datasets.util.home_path()/NAME
def _dlc_init():
dlc = DownloadConfig.context(NAME, base_path)
clirmatrix_dlc = _DownloadConfig(dlc['downloads'].path(), parser='json')
return clirmatrix_dlc
_dlc = ir_datasets.util.Lazy(_dlc_init)
_docs_cache = {}
def _docs_initializer(lang_code):
if lang_code not in _docs_cache:
dlc = _dlc().context("clirmatrix_docs", base_path)
docs = TsvDocs(GzipExtract(dlc[f'docs/{lang_code}']), namespace=f'{NAME}/{lang_code}', lang=lang_code)
_docs_cache[lang_code] = docs
return _docs_cache[lang_code]
def _initializer(args, dlc_context=None):
docs_lang, queries_lang, split = args
docs = _docs_initializer(docs_lang)
components = [docs]
if queries_lang: # queries & split are optional
dlc = _dlc().context(dlc_context, base_path)
dlc_key = f'queries/{queries_lang}_{docs_lang}/{split}'
qrel_dlc = GzipExtract(dlc[dlc_key])
qrels = CLIRMatrixQrels(qrel_dlc, QRELS_DEFS)
queries = CLIRMatrixQueries(qrel_dlc, queries_lang)
components += [queries, qrels]
return Dataset(*components)
def _multi8_initializer(args):
return _initializer(args, 'clirmatrix_multi8')
def _bi139_base_initializer(args):
return _initializer(args, 'clirmatrix_bi139_base')
def _bi139_full_initializer(args):
return _initializer(args, 'clirmatrix_bi139_full')
def _corpus_initializer(args):
return _initializer((args[0], None, None))
documentation = YamlDocumentation(f'docs/{NAME}.yaml')
base = Dataset(documentation('_'))
ir_datasets.registry.register(NAME, base)
ir_datasets.registry.register_pattern(rf'^{NAME}/{LANG_REGEX}$', _corpus_initializer)
ir_datasets.registry.register_pattern(rf'^{NAME}/{MULTI8_LANG_REGEX}/multi8/{MULTI8_LANG_REGEX}/(train|dev|test1|test2)$', _multi8_initializer)
ir_datasets.registry.register_pattern(rf'^{NAME}/{LANG_REGEX}/bi139-base/{LANG_REGEX}/(train|dev|test1|test2)$', _bi139_base_initializer)
ir_datasets.registry.register_pattern(rf'^{NAME}/{LANG_REGEX}/bi139-full/{LANG_REGEX}/(train|dev|test1|test2)$', _bi139_full_initializer)
return base
collection = _init()
| 3,348 | 0 | 23 |
f7e0dea9de8ac33acc29d4283d1db7dc1b9430a5 | 198 | py | Python | CSES/Dynamic_Programming/Dice_Combinations.py | kancharlaraju21/Competitive_Programming | 98dc9ff17ce42a5b884b0460c161b42bbb203f0e | [
"MIT"
] | null | null | null | CSES/Dynamic_Programming/Dice_Combinations.py | kancharlaraju21/Competitive_Programming | 98dc9ff17ce42a5b884b0460c161b42bbb203f0e | [
"MIT"
] | null | null | null | CSES/Dynamic_Programming/Dice_Combinations.py | kancharlaraju21/Competitive_Programming | 98dc9ff17ce42a5b884b0460c161b42bbb203f0e | [
"MIT"
] | null | null | null | n=int(input())
m=1000000007
dp=[0]*(n+1)
dp[0]=1
for i in range(1,n+1):
for j in range(1,7):
if i-j >= 0:
dp[i]=(dp[i]+dp[i-j])%m
else:
break
print(dp[n]) | 18 | 35 | 0.449495 | n=int(input())
m=1000000007
dp=[0]*(n+1)
dp[0]=1
for i in range(1,n+1):
for j in range(1,7):
if i-j >= 0:
dp[i]=(dp[i]+dp[i-j])%m
else:
break
print(dp[n]) | 0 | 0 | 0 |
035212274fe5f2c36cd9bf6f0401cf06e9c747da | 807 | py | Python | class_n_objects.py | ntnshrm87/OOP-Basics-Python | 4387c4c565266dc93fc8b03484dea59ef47f59c8 | [
"MIT"
] | 1 | 2018-06-28T16:20:45.000Z | 2018-06-28T16:20:45.000Z | class_n_objects.py | ntnshrm87/OOP-Basics-Python | 4387c4c565266dc93fc8b03484dea59ef47f59c8 | [
"MIT"
] | null | null | null | class_n_objects.py | ntnshrm87/OOP-Basics-Python | 4387c4c565266dc93fc8b03484dea59ef47f59c8 | [
"MIT"
] | null | null | null | # This is a small python snippet which introduces how to create a class with a constructor,
# some functions inside the class and their usage.
# Creating an instance of object
x = Store("Security Books")
# Adding items in stock
x.add_item("Gray Hat Hacking", 34)
x.add_item("Rafay Baloch", 34.42)
# Total of stock items
print("The total of items is: ", x.stock_price())
| 27.827586 | 91 | 0.634449 | # This is a small python snippet which introduces how to create a class with a constructor,
# some functions inside the class and their usage.
class Store:
def __init__(self, name):
self.name = name
self.items = []
def add_item(self, name, price):
''' A function to add items in store list '''
self.items.append({"name": name, "price": price})
def stock_price(self):
''' A function to calculate cost of stocks '''
total = 0
for item in self.items:
total += item["price"]
return (total)
# Creating an instance of object
x = Store("Security Books")
# Adding items in stock
x.add_item("Gray Hat Hacking", 34)
x.add_item("Rafay Baloch", 34.42)
# Total of stock items
print("The total of items is: ", x.stock_price())
| 53 | 358 | 23 |
095cbd232c26ef5dc95418fa6f5dfdff8df44a43 | 717 | py | Python | examples/other_api_requests.py | miguelmartinez1024/adobe_analytics | f32ac71ce1d25478fd3085c0136b8c4f49027b58 | [
"MIT"
] | 32 | 2018-01-26T19:17:25.000Z | 2019-12-05T14:26:20.000Z | examples/other_api_requests.py | miguelmartinez1024/adobe_analytics | f32ac71ce1d25478fd3085c0136b8c4f49027b58 | [
"MIT"
] | 14 | 2018-01-22T08:41:46.000Z | 2018-12-06T06:12:24.000Z | examples/other_api_requests.py | miguelmartinez1024/adobe_analytics | f32ac71ce1d25478fd3085c0136b8c4f49027b58 | [
"MIT"
] | 15 | 2018-07-12T17:25:22.000Z | 2020-03-12T16:28:15.000Z | """
The client class has a request method that allows all sorts of generic API requests to Adobe's v1.4 REST API.
To get a comprehensive overview of available APIs and methods check out the official Adobe Analytics API Explorer:
https://marketing.adobe.com/developer/api-explorer
"""
from adobe_analytics import Client
client = Client.from_json("my_path.json")
# The request below returns a list of all evars available in all specified report suites.
result = client.request(
api="ReportSuite",
method="GetEvars",
data={
"rsid_list": [
"my_report_suite_id_1",
"my_report_suite_id_2",
"...",
"my_report_suite_id_n"
]
}
)
print(result)
| 28.68 | 114 | 0.684798 | """
The client class has a request method that allows all sorts of generic API requests to Adobe's v1.4 REST API.
To get a comprehensive overview of available APIs and methods check out the official Adobe Analytics API Explorer:
https://marketing.adobe.com/developer/api-explorer
"""
from adobe_analytics import Client
client = Client.from_json("my_path.json")
# The request below returns a list of all evars available in all specified report suites.
result = client.request(
api="ReportSuite",
method="GetEvars",
data={
"rsid_list": [
"my_report_suite_id_1",
"my_report_suite_id_2",
"...",
"my_report_suite_id_n"
]
}
)
print(result)
| 0 | 0 | 0 |
64287cc0757e35ce2d45843039a9087d0ef05d47 | 406 | py | Python | test/test_complex.py | zsennenga/luabins_py | 30e3feca7f042777a3d9bef5aa510f406427dfc1 | [
"MIT"
] | null | null | null | test/test_complex.py | zsennenga/luabins_py | 30e3feca7f042777a3d9bef5aa510f406427dfc1 | [
"MIT"
] | null | null | null | test/test_complex.py | zsennenga/luabins_py | 30e3feca7f042777a3d9bef5aa510f406427dfc1 | [
"MIT"
] | 1 | 2022-03-09T23:54:47.000Z | 2022-03-09T23:54:47.000Z | import os
from io import BytesIO
from luabins import decode_luabins, encode_luabins
| 27.066667 | 63 | 0.67734 | import os
from io import BytesIO
from luabins import decode_luabins, encode_luabins
def test_debug():
path = os.path.join(os.path.dirname(__file__), "debug.bin")
with open(path, "rb") as f:
orig = f.read()
decode1 = decode_luabins(BytesIO(orig))
re_encoded = encode_luabins(decode1)
decode2 = decode_luabins(BytesIO(re_encoded))
assert decode2 == decode1
| 297 | 0 | 23 |
ead51da1ae11b386408fc638228f708d544125c7 | 3,358 | py | Python | apps/article/tests.py | aplot249/my_blog | 7cfcd67991f0a6dc861847514e8d0fca2213fa8b | [
"MIT"
] | null | null | null | apps/article/tests.py | aplot249/my_blog | 7cfcd67991f0a6dc861847514e8d0fca2213fa8b | [
"MIT"
] | 5 | 2021-06-02T01:30:26.000Z | 2022-03-12T00:24:27.000Z | apps/article/tests.py | qq1788lover/my_blog | 7cfcd67991f0a6dc861847514e8d0fca2213fa8b | [
"MIT"
] | null | null | null | from django.test import TestCase
import datetime
from django.utils import timezone
from article.models import ArticlePost
from django.contrib.auth.models import User
from time import sleep
from django.urls import reverse
| 33.58 | 110 | 0.634306 | from django.test import TestCase
import datetime
from django.utils import timezone
from article.models import ArticlePost
from django.contrib.auth.models import User
from time import sleep
from django.urls import reverse
class ArticlePostModelTests(TestCase):
def test_was_created_recently_with_future_article(self):
# ่ฅๆ็ซ ๅๅปบๆถ้ดไธบๆชๆฅ๏ผ่ฟๅ False
author = User(username='user', password='test_password')
author.save()
future_article = ArticlePost(
author=author,
title='test',
body='test',
created=timezone.now() + datetime.timedelta(days=30)
)
self.assertIs(future_article.was_created_recently(), False)
def test_was_created_recently_with_seconds_before_article(self):
# ่ฅๆ็ซ ๅๅปบๆถ้ดไธบ 1 ๅ้ๅ
๏ผ่ฟๅ True
author = User(username='user1', password='test_password')
author.save()
seconds_before_article = ArticlePost(
author=author,
title='test1',
body='test1',
created=timezone.now() - datetime.timedelta(seconds=45)
)
self.assertIs(seconds_before_article.was_created_recently(), True)
def test_was_created_recently_with_hours_before_article(self):
# ่ฅๆ็ซ ๅๅปบๆถ้ดไธบๅ ๅฐๆถๅ๏ผ่ฟๅ False
author = User(username='user2', password='test_password')
author.save()
hours_before_article = ArticlePost(
author=author,
title='test2',
body='test2',
created=timezone.now() - datetime.timedelta(hours=3)
)
self.assertIs(hours_before_article.was_created_recently(), False)
def test_was_created_recently_with_days_before_article(self):
# ่ฅๆ็ซ ๅๅปบๆถ้ดไธบๅ ๅคฉๅ๏ผ่ฟๅ False
author = User(username='user3', password='test_password')
author.save()
months_before_article = ArticlePost(
author=author,
title='test3',
body='test3',
created=timezone.now() - datetime.timedelta(days=5)
)
self.assertIs(months_before_article.was_created_recently(), False)
class ArtitclePostViewTests(TestCase):
def test_increase_views(self):
# ่ฏทๆฑ่ฏฆๆ
่งๅพๆถ๏ผ้
่ฏป้ +1
author = User(username='user4', password='test_password')
author.save()
article = ArticlePost(
author=author,
title='test4',
body='test4',
)
article.save()
self.assertIs(article.total_views, 0)
url = reverse('article:article_detail', args=(article.id,))
response = self.client.get(url)
viewed_article = ArticlePost.objects.get(id=article.id)
self.assertIs(viewed_article.total_views, 1)
def test_increase_views_but_not_change_updated_field(self):
# ่ฏทๆฑ่ฏฆๆ
่งๅพๆถ๏ผไธๆนๅ updated ๅญๆฎต
author = User(username='user5', password='test_password')
author.save()
article = ArticlePost(
author=author,
title='test5',
body='test5',
)
article.save()
sleep(0.5)
url = reverse('article:article_detail', args=(article.id,))
response = self.client.get(url)
viewed_article = ArticlePost.objects.get(id=article.id)
self.assertIs(viewed_article.updated - viewed_article.created < timezone.timedelta(seconds=0.1), True) | 3,053 | 34 | 208 |
402a6f87b97039e102c823f882ab0d40841d486c | 1,914 | py | Python | tempo/mlserver.py | michaelcheah/tempo | 41687e76a62619ed73bf197a557513c287fe43dc | [
"Apache-2.0"
] | null | null | null | tempo/mlserver.py | michaelcheah/tempo | 41687e76a62619ed73bf197a557513c287fe43dc | [
"Apache-2.0"
] | null | null | null | tempo/mlserver.py | michaelcheah/tempo | 41687e76a62619ed73bf197a557513c287fe43dc | [
"Apache-2.0"
] | null | null | null | import json
import os
from mlserver import MLModel
from mlserver.types import InferenceRequest, InferenceResponse
from mlserver.utils import get_model_uri
from .serve.base import BaseModel
from .serve.constants import ENV_TEMPO_RUNTIME_OPTIONS
from .serve.loader import load
from .serve.metadata import ModelFramework, RuntimeOptions
from .serve.utils import PredictMethodAttr
| 31.377049 | 76 | 0.700627 | import json
import os
from mlserver import MLModel
from mlserver.types import InferenceRequest, InferenceResponse
from mlserver.utils import get_model_uri
from .serve.base import BaseModel
from .serve.constants import ENV_TEMPO_RUNTIME_OPTIONS
from .serve.loader import load
from .serve.metadata import ModelFramework, RuntimeOptions
from .serve.utils import PredictMethodAttr
def _needs_init(model: BaseModel):
is_class = model._K is not None
has_annotation = hasattr(model._user_func, PredictMethodAttr)
is_bound = hasattr(model._user_func, "__self__")
return is_class and has_annotation and not is_bound
class InferenceRuntime(MLModel):
async def load(self) -> bool:
self._model = await self._load_model()
await self._load_runtime()
self.ready = True
return self.ready
async def _load_model(self) -> BaseModel:
model_uri = await get_model_uri(self._settings)
model = load(model_uri)
model.details.local_folder = model_uri
if model.details.platform == ModelFramework.TempoPipeline:
# If pipeline, call children models remotely
model.set_remote(True)
if _needs_init(model):
instance = model._K()
# Make sure that the model is the instance's model (and not the
# class attribute)
model = instance.get_tempo()
if model._load_func:
model._load_func()
return model
async def _load_runtime(self):
rt_options_str = os.getenv(ENV_TEMPO_RUNTIME_OPTIONS)
if rt_options_str:
rt_options = RuntimeOptions(**json.loads(rt_options_str))
self._model.set_runtime_options_override(rt_options)
async def predict(self, payload: InferenceRequest) -> InferenceResponse:
prediction = self._model.request(payload.dict())
return InferenceResponse(**prediction)
| 1,369 | 11 | 153 |
724703dadf3641357af57821f330c5eb0d47c98b | 10,438 | py | Python | nets/joint_cnn.py | redwankarimsony/UniFAD | 0edd851573313bdf013ca8f8844f45413dfd5373 | [
"MIT"
] | null | null | null | nets/joint_cnn.py | redwankarimsony/UniFAD | 0edd851573313bdf013ca8f8844f45413dfd5373 | [
"MIT"
] | null | null | null | nets/joint_cnn.py | redwankarimsony/UniFAD | 0edd851573313bdf013ca8f8844f45413dfd5373 | [
"MIT"
] | 1 | 2022-02-11T05:02:46.000Z | 2022-02-11T05:02:46.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
import tensorflow.contrib.slim as slim
model_params = {
'basic': ([0, 0, 0, 0], [16, 32, 64, 128]),
'test': ([0, 1, 2, 3, 2], [64, [64,128], [128,256], [256,512], [256,512]], 3, 16),
#'test': ([0, 2, 3, 4, 3], [64, [64,128], [128,256], [256,512], [256,512]], 3, 16),
#'test': ([0, 3, 4, 6, 3], [64, [64,128], [128,256], [256,512], [256,512]], 3, 16),
#'test': ([0, 0, 0, 0, 0], [64, [64,128], [128,256], [256,512], [256,512]], 3, 16),
'50': ([0, 3, 4, 6, 3], [64, [128,256], [256,512], [512,1024], [1024, 2048]], 7, 32),
'101': ([0, 3, 4, 23, 3], [64, [128,256], [256,512], [512,1024], [1024, 2048]], 7, 32),
'152': ([0, 3, 8, 36, 3], [64, [128,256], [256,512], [512,1024], [1024, 2048]], 7, 32),
}
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.995,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# force in-place updates of mean and variance estimates
'updates_collections': None,
# Moving averages ends up in the trainable variables collection
'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
}
batch_norm_params_last = {
# Decay for the moving averages.
'decay': 0.995,
# epsilon to prevent 0s in variance.
'epsilon': 10e-8,
# force in-place updates of mean and variance estimates
'center': False,
# not use beta
'scale': False,
# not use gamma
'updates_collections': None,
# Moving averages ends up in the trainable variables collection
'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
}
activation = tf.nn.relu
# Convolution with special initialization
# def conv_module(net, num_res_layers, num_kernels, cardinality, stride, reuse = None, scope = None):
# with tf.variable_scope(scope, 'conv', [net], reuse=reuse):
# # Use convolution for the first shortcut
# shortcut = convolution(net, num_kernels[1], kernel_size=1, stride=stride, padding='SAME')
# for i in range(num_res_layers):
# stride = stride if i==0 else 1
# net = residual_block(net, num_kernels, cardinality, stride,
# reuse=reuse, scope='block_%d' % i)
# print('| ---- block_%d' % i)
# net = activation(net + shortcut)
# shortcut = net
# return shortcut
# def inference(images, keep_probability, phase_train=True, bottleneck_layer_size=512,
# weight_decay=1e-4, reuse=None, model_version=None):
# with slim.arg_scope([slim.conv2d, slim.separable_conv2d, slim.fully_connected],
# activation_fn=activation,
# normalizer_fn=slim.batch_norm,
# normalizer_params=batch_norm_params):
# with tf.variable_scope('ResNeXt', [images], reuse=reuse):
# with slim.arg_scope([slim.batch_norm, slim.dropout],
# is_training=phase_train):
# print('input shape:', [dim.value for dim in images.shape])
# model_version = 'test' if model_version ==None else model_version
# num_layers, num_kernels, kernel_size, cardinality = model_params[model_version]
# net = convolution(images, num_kernels[0], kernel_size=kernel_size, groups=1, stride=2, padding='SAME')
# print('module_1 shape:', [dim.value for dim in net.shape])
# net = conv_module(net, num_layers[1], num_kernels[1], cardinality, stride=1, scope='conv2')
# print('module_2 shape:', [dim.value for dim in net.shape])
# net = conv_module(net, num_layers[2], num_kernels[2], cardinality, stride=2, scope='conv3')
# print('module_3 shape:', [dim.value for dim in net.shape])
# net = conv_module(net, num_layers[3], num_kernels[3], cardinality, stride=2, scope='conv4')
# print('module_4 shape:', [dim.value for dim in net.shape])
# net = conv_module(net, num_layers[4], num_kernels[4], cardinality, stride=2, scope='conv5')
# print('module_5 shape:', [dim.value for dim in net.shape])
# net = slim.avg_pool2d(net, net.get_shape()[1:3], scope='avgpool5')
# net = slim.flatten(net)
# net = slim.fully_connected(net, 256, scope='PreBottleneck',
# # weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
# # weights_initializer=tf.constant_initializer(0.),
# weights_initializer=slim.xavier_initializer())
# net = slim.fully_connected(net, 1, scope='Bottleneck',
# # weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
# # weights_initializer=tf.constant_initializer(0.),
# weights_initializer=slim.xavier_initializer(),
# activation_fn=None)
# return net
| 55.227513 | 120 | 0.587469 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
import tensorflow.contrib.slim as slim
model_params = {
'basic': ([0, 0, 0, 0], [16, 32, 64, 128]),
'test': ([0, 1, 2, 3, 2], [64, [64,128], [128,256], [256,512], [256,512]], 3, 16),
#'test': ([0, 2, 3, 4, 3], [64, [64,128], [128,256], [256,512], [256,512]], 3, 16),
#'test': ([0, 3, 4, 6, 3], [64, [64,128], [128,256], [256,512], [256,512]], 3, 16),
#'test': ([0, 0, 0, 0, 0], [64, [64,128], [128,256], [256,512], [256,512]], 3, 16),
'50': ([0, 3, 4, 6, 3], [64, [128,256], [256,512], [512,1024], [1024, 2048]], 7, 32),
'101': ([0, 3, 4, 23, 3], [64, [128,256], [256,512], [512,1024], [1024, 2048]], 7, 32),
'152': ([0, 3, 8, 36, 3], [64, [128,256], [256,512], [512,1024], [1024, 2048]], 7, 32),
}
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.995,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# force in-place updates of mean and variance estimates
'updates_collections': None,
# Moving averages ends up in the trainable variables collection
'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
}
batch_norm_params_last = {
# Decay for the moving averages.
'decay': 0.995,
# epsilon to prevent 0s in variance.
'epsilon': 10e-8,
# force in-place updates of mean and variance estimates
'center': False,
# not use beta
'scale': False,
# not use gamma
'updates_collections': None,
# Moving averages ends up in the trainable variables collection
'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
}
activation = tf.nn.relu
# Convolution with special initialization
def convolution(net, num_kernels, kernel_size, groups=1, stride=1, padding='SAME'):
assert num_kernels % groups == 0, '%d %d' % (kernel_size, groups)
stddev = math.sqrt(2/(kernel_size*kernel_size*num_kernels/groups))
if groups==1:
return slim.conv2d(net, num_kernels, kernel_size=kernel_size, stride=stride, padding=padding,
weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
biases_initializer=None)
else:
num_kernels_split = int(num_kernels / groups)
input_splits = tf.split(net, groups, axis=3)
output_splits = [slim.conv2d(input_split, num_kernels_split,
kernel_size=kernel_size, stride=stride, padding=padding,
weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
biases_initializer=None) for input_split in input_splits]
return tf.concat(output_splits, axis=3)
def residual_block(net, num_kernels, cardinality, stride=1, reuse=None, scope=None):
with tf.variable_scope(scope, 'block', [net], reuse=reuse):
net = convolution(net, num_kernels[0], kernel_size=1, groups=1, stride=1, padding='SAME')
net = convolution(net, num_kernels[0], kernel_size=3, groups=cardinality, stride=stride, padding='SAME')
print(net.shape)
with slim.arg_scope([slim.conv2d], activation_fn=None):
net = convolution(net, num_kernels[1], kernel_size=1, groups=1, stride=1, padding='SAME')
return net
def conv_module(net, num_res_layers, num_kernels, trans_kernel_size=3, trans_stride=2,
use_se=False, reuse=None, scope=None):
with tf.variable_scope(scope, 'conv', [net], reuse=reuse):
net = slim.conv2d(net, num_kernels,
kernel_size=trans_kernel_size, stride=trans_stride, padding='SAME',
weights_initializer=slim.xavier_initializer())
shortcut = net
for i in range(num_res_layers):
# num_kernels_sm = int(num_kernels / 2)
net = slim.conv2d(net, num_kernels, kernel_size=3, stride=1, padding='SAME',
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
biases_initializer=None)
net = slim.conv2d(net, num_kernels, kernel_size=3, stride=1, padding='SAME',
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
biases_initializer=None)
# net = slim.conv2d(net, num_kernels, kernel_size=1, stride=1, padding='SAME',
# weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
# biases_initializer=None)
print('| ---- block_%d' % i)
if use_se:
net = se_module(net)
net = net + shortcut
shortcut = net
return net
# def conv_module(net, num_res_layers, num_kernels, cardinality, stride, reuse = None, scope = None):
# with tf.variable_scope(scope, 'conv', [net], reuse=reuse):
# # Use convolution for the first shortcut
# shortcut = convolution(net, num_kernels[1], kernel_size=1, stride=stride, padding='SAME')
# for i in range(num_res_layers):
# stride = stride if i==0 else 1
# net = residual_block(net, num_kernels, cardinality, stride,
# reuse=reuse, scope='block_%d' % i)
# print('| ---- block_%d' % i)
# net = activation(net + shortcut)
# shortcut = net
# return shortcut
# def inference(images, keep_probability, phase_train=True, bottleneck_layer_size=512,
# weight_decay=1e-4, reuse=None, model_version=None):
# with slim.arg_scope([slim.conv2d, slim.separable_conv2d, slim.fully_connected],
# activation_fn=activation,
# normalizer_fn=slim.batch_norm,
# normalizer_params=batch_norm_params):
# with tf.variable_scope('ResNeXt', [images], reuse=reuse):
# with slim.arg_scope([slim.batch_norm, slim.dropout],
# is_training=phase_train):
# print('input shape:', [dim.value for dim in images.shape])
# model_version = 'test' if model_version ==None else model_version
# num_layers, num_kernels, kernel_size, cardinality = model_params[model_version]
# net = convolution(images, num_kernels[0], kernel_size=kernel_size, groups=1, stride=2, padding='SAME')
# print('module_1 shape:', [dim.value for dim in net.shape])
# net = conv_module(net, num_layers[1], num_kernels[1], cardinality, stride=1, scope='conv2')
# print('module_2 shape:', [dim.value for dim in net.shape])
# net = conv_module(net, num_layers[2], num_kernels[2], cardinality, stride=2, scope='conv3')
# print('module_3 shape:', [dim.value for dim in net.shape])
# net = conv_module(net, num_layers[3], num_kernels[3], cardinality, stride=2, scope='conv4')
# print('module_4 shape:', [dim.value for dim in net.shape])
# net = conv_module(net, num_layers[4], num_kernels[4], cardinality, stride=2, scope='conv5')
# print('module_5 shape:', [dim.value for dim in net.shape])
# net = slim.avg_pool2d(net, net.get_shape()[1:3], scope='avgpool5')
# net = slim.flatten(net)
# net = slim.fully_connected(net, 256, scope='PreBottleneck',
# # weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
# # weights_initializer=tf.constant_initializer(0.),
# weights_initializer=slim.xavier_initializer())
# net = slim.fully_connected(net, 1, scope='Bottleneck',
# # weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
# # weights_initializer=tf.constant_initializer(0.),
# weights_initializer=slim.xavier_initializer(),
# activation_fn=None)
# return net
def inference(images, keep_probability, phase_train=True, bottleneck_layer_size=512,
weight_decay=1e-4, reuse=None, model_version=None):
with slim.arg_scope([slim.conv2d, slim.separable_conv2d, slim.fully_connected],
activation_fn=activation,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with tf.variable_scope('ResNeXt', [images], reuse=reuse):
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=phase_train):
print('input shape:', [dim.value for dim in images.shape])
net = conv_module(images, 0, 16, scope='global_conv1')
print('module_1 she:', [dim.value for dim in net.shape])
net = conv_module(net, 0, 32, scope='global_conv2')
print('module_2 shape:', [dim.value for dim in net.shape])
net = conv_module(net, 0, 64, scope='global_conv3')
print('module_3 shape:', [dim.value for dim in net.shape])
net = conv_module(net, 0, 128, scope='global_conv4')
print('module_4 shape:', [dim.value for dim in net.shape])
net = slim.avg_pool2d(net, net.get_shape()[1:3], scope='avgpool5')
feat = slim.flatten(net)
'''feat = slim.fully_connected(net, 64, scope='PreBottleneck',
# # weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
# # weights_initializer=tf.constant_initializer(0.),
weights_initializer=slim.xavier_initializer(),
activation_fn=None, normalizer_fn=None)'''
net = slim.fully_connected(feat, 1, scope='Bottleneck',
# weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
# weights_initializer=tf.constant_initializer(0.),
weights_initializer=slim.xavier_initializer(),
activation_fn=None, normalizer_fn=None)
return net, feat
| 5,041 | 0 | 115 |
c68cb9d7cbf1af2d8ca985b14341bae9e6bc9925 | 6,099 | py | Python | bin/decode-kubeconfig.py | dmrub/kubespray | b9801a61d2bd82cbca68fbf2b5e25362feb44733 | [
"Apache-2.0"
] | null | null | null | bin/decode-kubeconfig.py | dmrub/kubespray | b9801a61d2bd82cbca68fbf2b5e25362feb44733 | [
"Apache-2.0"
] | null | null | null | bin/decode-kubeconfig.py | dmrub/kubespray | b9801a61d2bd82cbca68fbf2b5e25362feb44733 | [
"Apache-2.0"
] | 1 | 2020-02-12T15:50:15.000Z | 2020-02-12T15:50:15.000Z | #!/usr/bin/env python
from __future__ import print_function
import logging
import sys
import os
import yaml
import base64
import subprocess
import tempfile
logger = logging.getLogger()
def run_command(command, env=None, cwd=None, stdin=None,
get_stdout=True, get_stderr=True):
"""returns triple (returncode, stdout, stderr)
if get_stdout is False stdout tuple element will be set to None
if get_stderr is False stderr tuple element will be set to None
"""
logger.info('Run command {} in env {}, cwd {}'.format(command, env, cwd))
myenv = {}
if env is not None:
for k, v in env.items():
myenv[str(k)] = str(v)
env = myenv
with tempfile.TemporaryFile(suffix='stdout') as tmp_stdout:
with tempfile.TemporaryFile(suffix='stderr') as tmp_stderr:
if isinstance(command, list) or isinstance(command, tuple):
p = subprocess.Popen(command,
stdin=stdin,
stdout=tmp_stdout,
stderr=tmp_stderr,
env=env,
cwd=cwd,
universal_newlines=False)
else:
p = subprocess.Popen(command,
stdin=stdin,
stdout=tmp_stdout,
stderr=tmp_stderr,
env=env,
cwd=cwd,
universal_newlines=False,
shell=True)
status = p.wait()
if get_stdout:
tmp_stdout.flush()
tmp_stdout.seek(0)
out = tmp_stdout.read()
else:
out = None
if get_stderr:
tmp_stderr.flush()
tmp_stderr.seek(0)
err = tmp_stderr.read()
else:
err = None
logger.info('Command {} returned code: {}'.format(command, status))
return status, out, err
if __name__ == "__main__":
logging.basicConfig()
if len(sys.argv) <= 1:
print('Usage: {} kubeconfig-file'.format(sys.argv[0]), file=sys.stderr)
sys.exit(0)
stream = open(sys.argv[1], "r")
docs = yaml.load_all(stream)
for doc in docs:
kind = doc.get('kind')
clusters = doc.get('clusters')
for cluster in clusters:
cluster_data = cluster.get('cluster')
cluster_name = cluster.get('name')
if cluster_data:
cert = get_obj_from_dict(cluster_data,
'certificate-authority-data')
server = cluster_data.get('server')
print('Server: {}'.format(server))
print('certificate-authority-data:')
print(cert)
users = doc.get('users')
for user in users:
user_name = user.get('name')
print('User: {}'.format(user_name))
user_data = user.get('user')
if user_data:
cert = get_obj_from_dict(user_data,
'client-certificate-data')
print('client-certificate-data:')
print(cert)
cert = get_obj_from_dict(user_data,
'client-key-data')
print('client-key-data:')
print(cert)
| 34.653409 | 89 | 0.513691 | #!/usr/bin/env python
from __future__ import print_function
import logging
import sys
import os
import yaml
import base64
import subprocess
import tempfile
logger = logging.getLogger()
def run_command(command, env=None, cwd=None, stdin=None,
get_stdout=True, get_stderr=True):
"""returns triple (returncode, stdout, stderr)
if get_stdout is False stdout tuple element will be set to None
if get_stderr is False stderr tuple element will be set to None
"""
logger.info('Run command {} in env {}, cwd {}'.format(command, env, cwd))
myenv = {}
if env is not None:
for k, v in env.items():
myenv[str(k)] = str(v)
env = myenv
with tempfile.TemporaryFile(suffix='stdout') as tmp_stdout:
with tempfile.TemporaryFile(suffix='stderr') as tmp_stderr:
if isinstance(command, list) or isinstance(command, tuple):
p = subprocess.Popen(command,
stdin=stdin,
stdout=tmp_stdout,
stderr=tmp_stderr,
env=env,
cwd=cwd,
universal_newlines=False)
else:
p = subprocess.Popen(command,
stdin=stdin,
stdout=tmp_stdout,
stderr=tmp_stderr,
env=env,
cwd=cwd,
universal_newlines=False,
shell=True)
status = p.wait()
if get_stdout:
tmp_stdout.flush()
tmp_stdout.seek(0)
out = tmp_stdout.read()
else:
out = None
if get_stderr:
tmp_stderr.flush()
tmp_stderr.seek(0)
err = tmp_stderr.read()
else:
err = None
logger.info('Command {} returned code: {}'.format(command, status))
return status, out, err
class ExecutionException(Exception):
def __init__(self, message, stdout=None, stderr=None, oserror=None):
self.message = message
self.stdout = stdout
self.stderr = stderr
self.oserror = oserror
super(ExecutionException, self).__init__(message)
def process_with_cmd(args, input):
cwd = '.'
with tempfile.NamedTemporaryFile() as input_file:
input_file.write(input)
input_file.flush()
input_file.seek(0)
try:
status, out, err = run_command(args, env=os.environ, cwd=cwd,
stdin=input_file)
except OSError as e:
message = 'Could not execute command line "{}" in directory "{}": {}'.format(
' '.join(args), cwd, e)
logger.exception(message)
raise ExecutionException(message=message, oserror=e)
if status != 0:
print(err, file=sys.stderr)
print(input, file=sys.stderr)
raise ExecutionException(message='computation failed',
stdout=out, stderr=err)
print(err)
return out
def decode_certificate(cert):
cwd = '.'
args = ["openssl", "rsa", "-text", "-noout"]
with tempfile.NamedTemporaryFile() as cert_file:
cert_file.write(cert)
cert_file.flush()
cert_file.seek(0)
try:
status, out, err = run_command(args, env=os.environ, cwd=cwd,
stdin=cert_file)
except OSError as e:
message = 'Could not execute command line "{}" in directory "{}": {}'.format(
' '.join(args), cwd, e)
logger.exception(message)
raise ExecutionException(message=message, oserror=e)
if status != 0:
print(err, file=sys.stderr)
print(cert, file=sys.stderr)
raise ExecutionException(message='computation failed',
stdout=out, stderr=err)
print(err)
return out
def get_obj_from_dict(d, key):
obj_data = d.get(key)
if obj_data:
decoded_obj_data = base64.b64decode(obj_data)
if 'BEGIN CERTIFICATE' in decoded_obj_data:
cmd = ["openssl", "x509", "-text", "-noout"]
elif 'BEGIN RSA PRIVATE KEY' in decoded_obj_data:
cmd = ["openssl", "rsa", "-text", "-noout"]
return process_with_cmd(cmd, decoded_obj_data)
return None
if __name__ == "__main__":
logging.basicConfig()
if len(sys.argv) <= 1:
print('Usage: {} kubeconfig-file'.format(sys.argv[0]), file=sys.stderr)
sys.exit(0)
stream = open(sys.argv[1], "r")
docs = yaml.load_all(stream)
for doc in docs:
kind = doc.get('kind')
clusters = doc.get('clusters')
for cluster in clusters:
cluster_data = cluster.get('cluster')
cluster_name = cluster.get('name')
if cluster_data:
cert = get_obj_from_dict(cluster_data,
'certificate-authority-data')
server = cluster_data.get('server')
print('Server: {}'.format(server))
print('certificate-authority-data:')
print(cert)
users = doc.get('users')
for user in users:
user_name = user.get('name')
print('User: {}'.format(user_name))
user_data = user.get('user')
if user_data:
cert = get_obj_from_dict(user_data,
'client-certificate-data')
print('client-certificate-data:')
print(cert)
cert = get_obj_from_dict(user_data,
'client-key-data')
print('client-key-data:')
print(cert)
| 2,362 | 15 | 118 |
7dbca43a3679dcbd7e5e666dbd9f4bab888170b5 | 218 | py | Python | app/api.py | darrenvong/local-lead-finder | 2391b46549b30eda237578f17c8f499536dd6a17 | [
"MIT"
] | 2 | 2022-02-13T08:22:56.000Z | 2022-02-14T01:31:28.000Z | app/api.py | darrenvong/local-lead-finder | 2391b46549b30eda237578f17c8f499536dd6a17 | [
"MIT"
] | null | null | null | app/api.py | darrenvong/local-lead-finder | 2391b46549b30eda237578f17c8f499536dd6a17 | [
"MIT"
] | null | null | null | from flask import render_template
from app import app
@app.route('/', methods=['GET', 'POST'])
if __name__ == '__main__':
app.run(debug=True, host="0.0.0.0")
| 18.166667 | 40 | 0.665138 | from flask import render_template
from app import app
@app.route('/', methods=['GET', 'POST'])
def home():
return render_template('home.html')
if __name__ == '__main__':
app.run(debug=True, host="0.0.0.0")
| 30 | 0 | 22 |
1384aeebc5e42d066a37d9cabd196e83c0c37c35 | 12,239 | py | Python | tests/unit/pipelines/test_base_pipeline.py | dumpmemory/zenml | ec3f6994ae9666493519d600471c035eb9109ac4 | [
"Apache-2.0"
] | null | null | null | tests/unit/pipelines/test_base_pipeline.py | dumpmemory/zenml | ec3f6994ae9666493519d600471c035eb9109ac4 | [
"Apache-2.0"
] | null | null | null | tests/unit/pipelines/test_base_pipeline.py | dumpmemory/zenml | ec3f6994ae9666493519d600471c035eb9109ac4 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) ZenML GmbH 2021. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
from contextlib import ExitStack as does_not_raise
import pytest
from zenml.exceptions import (
PipelineConfigurationError,
PipelineInterfaceError,
StackValidationError,
)
from zenml.pipelines import pipeline
from zenml.repository import Repository
from zenml.steps import BaseStepConfig, step
from zenml.utils.yaml_utils import write_yaml
def create_pipeline_with_config_value(config_value: int):
"""Creates pipeline instance with a step named 'step' which has a
parameter named 'value'."""
@step
@pipeline
pipeline_instance = some_pipeline(
step_=step_with_config(config=Config(value=config_value))
)
return pipeline_instance
def test_initialize_pipeline_with_args(
unconnected_two_step_pipeline, generate_empty_steps
):
"""Test that a pipeline can be initialized with args."""
with does_not_raise():
empty_step_1, empty_step_2 = generate_empty_steps(2)
unconnected_two_step_pipeline(empty_step_1(), empty_step_2())
def test_initialize_pipeline_with_kwargs(
unconnected_two_step_pipeline, generate_empty_steps
):
"""Test that a pipeline can be initialized with kwargs."""
with does_not_raise():
empty_step_1, empty_step_2 = generate_empty_steps(2)
unconnected_two_step_pipeline(
step_1=empty_step_1(), step_2=empty_step_2()
)
def test_initialize_pipeline_with_args_and_kwargs(
unconnected_two_step_pipeline, generate_empty_steps
):
"""Test that a pipeline can be initialized with a mix of args and kwargs."""
with does_not_raise():
empty_step_1, empty_step_2 = generate_empty_steps(2)
unconnected_two_step_pipeline(empty_step_1(), step_2=empty_step_2())
def test_initialize_pipeline_with_too_many_args(
unconnected_two_step_pipeline, generate_empty_steps
):
"""Test that pipeline initialization fails when too many args
are passed."""
with pytest.raises(PipelineInterfaceError):
empty_step_1, empty_step_2, empty_step_3 = generate_empty_steps(3)
unconnected_two_step_pipeline(
empty_step_1(), empty_step_2(), empty_step_3()
)
def test_initialize_pipeline_with_too_many_args_and_kwargs(
unconnected_two_step_pipeline, generate_empty_steps
):
"""Test that pipeline initialization fails when too many args
and kwargs are passed."""
with pytest.raises(PipelineInterfaceError):
empty_step_1, empty_step_2, empty_step_3 = generate_empty_steps(3)
unconnected_two_step_pipeline(
empty_step_3(), step_1=empty_step_1(), step_2=empty_step_2()
)
def test_initialize_pipeline_with_missing_key(
unconnected_two_step_pipeline, empty_step
):
"""Test that pipeline initialization fails when an argument
is missing."""
with pytest.raises(PipelineInterfaceError):
unconnected_two_step_pipeline(step_1=empty_step())
def test_initialize_pipeline_with_unexpected_key(
unconnected_two_step_pipeline, generate_empty_steps
):
"""Test that pipeline initialization fails when an argument
has an unexpected key."""
with pytest.raises(PipelineInterfaceError):
empty_step_1, empty_step_2, empty_step_3 = generate_empty_steps(3)
unconnected_two_step_pipeline(
step_1=empty_step_1(), step_2=empty_step_2(), step_3=empty_step_3()
)
def test_initialize_pipeline_with_repeated_args(
unconnected_two_step_pipeline, empty_step
):
"""Test that pipeline initialization fails when same step
object is used"""
with pytest.raises(PipelineInterfaceError):
unconnected_two_step_pipeline(empty_step(), empty_step())
def test_initialize_pipeline_with_repeated_kwargs(
unconnected_two_step_pipeline, empty_step
):
"""Test that pipeline initialization fails when same step
object is used"""
with pytest.raises(PipelineInterfaceError):
unconnected_two_step_pipeline(step_1=empty_step(), step_2=empty_step())
def test_initialize_pipeline_with_repeated_args_and_kwargs(
unconnected_two_step_pipeline, empty_step
):
"""Test that pipeline initialization fails when same step
object is used"""
with pytest.raises(PipelineInterfaceError):
unconnected_two_step_pipeline(empty_step(), step_2=empty_step())
def test_initialize_pipeline_with_wrong_arg_type(
unconnected_two_step_pipeline, empty_step
):
"""Test that pipeline initialization fails when an arg has a wrong type."""
with pytest.raises(PipelineInterfaceError):
unconnected_two_step_pipeline(1, empty_step())
def test_initialize_pipeline_with_wrong_kwarg_type(
unconnected_two_step_pipeline, empty_step
):
"""Test that pipeline initialization fails when a kwarg has a wrong type."""
with pytest.raises(PipelineInterfaceError):
unconnected_two_step_pipeline(step_1=1, step_2=empty_step())
def test_initialize_pipeline_with_missing_arg_step_brackets(
unconnected_two_step_pipeline, generate_empty_steps
):
"""Test that pipeline initialization fails with missing arg brackets."""
with pytest.raises(PipelineInterfaceError):
empty_step_1, empty_step_2 = generate_empty_steps(2)
unconnected_two_step_pipeline(empty_step_1, empty_step_2)
def test_initialize_pipeline_with_missing_kwarg_step_brackets(
unconnected_two_step_pipeline, generate_empty_steps
):
"""Test that pipeline initialization fails with missing kwarg brackets."""
with pytest.raises(PipelineInterfaceError):
empty_step_1, empty_step_2 = generate_empty_steps(2)
unconnected_two_step_pipeline(step_1=empty_step_1, step_2=empty_step_2)
def test_setting_step_parameter_with_config_object():
"""Test whether step parameters can be set using a config object."""
config_value = 0
pipeline_instance = create_pipeline_with_config_value(config_value)
step_instance = pipeline_instance.steps["step_"]
assert step_instance.PARAM_SPEC["value"] == config_value
def test_overwrite_step_parameter_with_config_yaml(tmp_path):
"""Test whether step parameters can be overwritten using a config yaml."""
config_value = 0
pipeline_instance = create_pipeline_with_config_value(config_value)
yaml_path = os.path.join(tmp_path, "config.yaml")
yaml_config_value = 1
write_yaml(
yaml_path,
{"steps": {"step_": {"parameters": {"value": yaml_config_value}}}},
)
pipeline_instance = pipeline_instance.with_config(
yaml_path, overwrite_step_parameters=True
)
step_instance = pipeline_instance.steps["step_"]
assert step_instance.PARAM_SPEC["value"] == yaml_config_value
def test_dont_overwrite_step_parameter_with_config_yaml(tmp_path):
"""Test that step parameters don't get overwritten by yaml file
if not forced."""
config_value = 0
pipeline_instance = create_pipeline_with_config_value(config_value)
yaml_path = os.path.join(tmp_path, "config.yaml")
yaml_config_value = 1
write_yaml(
yaml_path,
{"steps": {"step_": {"parameters": {"value": yaml_config_value}}}},
)
pipeline_instance = pipeline_instance.with_config(yaml_path)
step_instance = pipeline_instance.steps["step_"]
assert step_instance.PARAM_SPEC["value"] == config_value
def test_yaml_configuration_with_invalid_step_name(tmp_path):
"""Test that a config yaml with an invalid step name raises an exception"""
pipeline_instance = create_pipeline_with_config_value(0)
yaml_path = os.path.join(tmp_path, "config.yaml")
write_yaml(
yaml_path,
{"steps": {"WRONG_STEP_NAME": {"parameters": {"value": 0}}}},
)
with pytest.raises(PipelineConfigurationError):
_ = pipeline_instance.with_config(yaml_path)
def test_yaml_configuration_with_invalid_parameter_name(tmp_path):
"""Test that a config yaml with an invalid parameter
name raises an exception"""
pipeline_instance = create_pipeline_with_config_value(0)
yaml_path = os.path.join(tmp_path, "config.yaml")
write_yaml(
yaml_path,
{"steps": {"step_": {"parameters": {"WRONG_PARAMETER_NAME": 0}}}},
)
with pytest.raises(PipelineConfigurationError):
_ = pipeline_instance.with_config(yaml_path)
def test_setting_pipeline_parameter_name_when_initializing_pipeline(
one_step_pipeline, empty_step
):
"""Tests that initializing a pipeline with a step sets the attribute
`pipeline_parameter_name` of the step."""
step_instance = empty_step()
assert step_instance.pipeline_parameter_name is None
one_step_pipeline(step_instance)
assert step_instance.pipeline_parameter_name == "step_"
def test_calling_a_pipeline_twice_raises_no_exception(
one_step_pipeline, empty_step
):
"""Tests that calling one pipeline instance twice does not raise
any exception."""
pipeline_instance = one_step_pipeline(empty_step())
with does_not_raise():
pipeline_instance.run()
pipeline_instance.run()
def test_pipeline_requirements(tmp_path):
"""Tests that the pipeline requirements are a combination of the
requirements of integrations and requirements of the specified
requirements file."""
from zenml.integrations.sklearn import SklearnIntegration
requirements = tmp_path / "requirements.txt"
requirements.write_text("any_requirement")
@pipeline(required_integrations=[SklearnIntegration.NAME])
assert my_pipeline().requirements == set(SklearnIntegration.REQUIREMENTS)
@pipeline(requirements=str(requirements))
assert my_pipeline().requirements == {"any_requirement"}
@pipeline(
required_integrations=[SklearnIntegration.NAME],
requirements=str(requirements),
)
assert my_pipeline().requirements == {
"any_requirement",
*SklearnIntegration.REQUIREMENTS,
}
def test_pipeline_requirements_takes_list(tmp_path):
"""Tests that the pipeline requirements are a combination of the
requirements of integrations and requirements of the specified
requirements file."""
from zenml.integrations.sklearn import SklearnIntegration
requirements = tmp_path / "requirements.txt"
requirements.write_text("any_requirement")
@pipeline(required_integrations=[SklearnIntegration.NAME])
assert my_pipeline().requirements == set(SklearnIntegration.REQUIREMENTS)
@pipeline(requirements=["any_requirement"])
assert my_pipeline().requirements == {"any_requirement"}
@pipeline(
required_integrations=[SklearnIntegration.NAME],
requirements=["any_requirement"],
)
assert my_pipeline().requirements == {
"any_requirement",
*SklearnIntegration.REQUIREMENTS,
}
def test_pipeline_run_fails_when_required_step_operator_is_missing(
one_step_pipeline,
):
"""Tests that running a pipeline with a step that requires a custom step
operator fails if the active stack does not contain this step operator."""
@step(custom_step_operator="azureml")
assert not Repository().active_stack.step_operator
with pytest.raises(StackValidationError):
one_step_pipeline(step_that_requires_step_operator()).run()
| 33.809392 | 80 | 0.745731 | # Copyright (c) ZenML GmbH 2021. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
from contextlib import ExitStack as does_not_raise
import pytest
from zenml.exceptions import (
PipelineConfigurationError,
PipelineInterfaceError,
StackValidationError,
)
from zenml.pipelines import pipeline
from zenml.repository import Repository
from zenml.steps import BaseStepConfig, step
from zenml.utils.yaml_utils import write_yaml
def create_pipeline_with_config_value(config_value: int):
"""Creates pipeline instance with a step named 'step' which has a
parameter named 'value'."""
class Config(BaseStepConfig):
value: int
@step
def step_with_config(config: Config) -> None:
pass
@pipeline
def some_pipeline(step_):
step_()
pipeline_instance = some_pipeline(
step_=step_with_config(config=Config(value=config_value))
)
return pipeline_instance
def test_initialize_pipeline_with_args(
unconnected_two_step_pipeline, generate_empty_steps
):
"""Test that a pipeline can be initialized with args."""
with does_not_raise():
empty_step_1, empty_step_2 = generate_empty_steps(2)
unconnected_two_step_pipeline(empty_step_1(), empty_step_2())
def test_initialize_pipeline_with_kwargs(
unconnected_two_step_pipeline, generate_empty_steps
):
"""Test that a pipeline can be initialized with kwargs."""
with does_not_raise():
empty_step_1, empty_step_2 = generate_empty_steps(2)
unconnected_two_step_pipeline(
step_1=empty_step_1(), step_2=empty_step_2()
)
def test_initialize_pipeline_with_args_and_kwargs(
unconnected_two_step_pipeline, generate_empty_steps
):
"""Test that a pipeline can be initialized with a mix of args and kwargs."""
with does_not_raise():
empty_step_1, empty_step_2 = generate_empty_steps(2)
unconnected_two_step_pipeline(empty_step_1(), step_2=empty_step_2())
def test_initialize_pipeline_with_too_many_args(
unconnected_two_step_pipeline, generate_empty_steps
):
"""Test that pipeline initialization fails when too many args
are passed."""
with pytest.raises(PipelineInterfaceError):
empty_step_1, empty_step_2, empty_step_3 = generate_empty_steps(3)
unconnected_two_step_pipeline(
empty_step_1(), empty_step_2(), empty_step_3()
)
def test_initialize_pipeline_with_too_many_args_and_kwargs(
unconnected_two_step_pipeline, generate_empty_steps
):
"""Test that pipeline initialization fails when too many args
and kwargs are passed."""
with pytest.raises(PipelineInterfaceError):
empty_step_1, empty_step_2, empty_step_3 = generate_empty_steps(3)
unconnected_two_step_pipeline(
empty_step_3(), step_1=empty_step_1(), step_2=empty_step_2()
)
def test_initialize_pipeline_with_missing_key(
unconnected_two_step_pipeline, empty_step
):
"""Test that pipeline initialization fails when an argument
is missing."""
with pytest.raises(PipelineInterfaceError):
unconnected_two_step_pipeline(step_1=empty_step())
def test_initialize_pipeline_with_unexpected_key(
unconnected_two_step_pipeline, generate_empty_steps
):
"""Test that pipeline initialization fails when an argument
has an unexpected key."""
with pytest.raises(PipelineInterfaceError):
empty_step_1, empty_step_2, empty_step_3 = generate_empty_steps(3)
unconnected_two_step_pipeline(
step_1=empty_step_1(), step_2=empty_step_2(), step_3=empty_step_3()
)
def test_initialize_pipeline_with_repeated_args(
unconnected_two_step_pipeline, empty_step
):
"""Test that pipeline initialization fails when same step
object is used"""
with pytest.raises(PipelineInterfaceError):
unconnected_two_step_pipeline(empty_step(), empty_step())
def test_initialize_pipeline_with_repeated_kwargs(
unconnected_two_step_pipeline, empty_step
):
"""Test that pipeline initialization fails when same step
object is used"""
with pytest.raises(PipelineInterfaceError):
unconnected_two_step_pipeline(step_1=empty_step(), step_2=empty_step())
def test_initialize_pipeline_with_repeated_args_and_kwargs(
unconnected_two_step_pipeline, empty_step
):
"""Test that pipeline initialization fails when same step
object is used"""
with pytest.raises(PipelineInterfaceError):
unconnected_two_step_pipeline(empty_step(), step_2=empty_step())
def test_initialize_pipeline_with_wrong_arg_type(
unconnected_two_step_pipeline, empty_step
):
"""Test that pipeline initialization fails when an arg has a wrong type."""
with pytest.raises(PipelineInterfaceError):
unconnected_two_step_pipeline(1, empty_step())
def test_initialize_pipeline_with_wrong_kwarg_type(
unconnected_two_step_pipeline, empty_step
):
"""Test that pipeline initialization fails when a kwarg has a wrong type."""
with pytest.raises(PipelineInterfaceError):
unconnected_two_step_pipeline(step_1=1, step_2=empty_step())
def test_initialize_pipeline_with_missing_arg_step_brackets(
unconnected_two_step_pipeline, generate_empty_steps
):
"""Test that pipeline initialization fails with missing arg brackets."""
with pytest.raises(PipelineInterfaceError):
empty_step_1, empty_step_2 = generate_empty_steps(2)
unconnected_two_step_pipeline(empty_step_1, empty_step_2)
def test_initialize_pipeline_with_missing_kwarg_step_brackets(
unconnected_two_step_pipeline, generate_empty_steps
):
"""Test that pipeline initialization fails with missing kwarg brackets."""
with pytest.raises(PipelineInterfaceError):
empty_step_1, empty_step_2 = generate_empty_steps(2)
unconnected_two_step_pipeline(step_1=empty_step_1, step_2=empty_step_2)
def test_setting_step_parameter_with_config_object():
"""Test whether step parameters can be set using a config object."""
config_value = 0
pipeline_instance = create_pipeline_with_config_value(config_value)
step_instance = pipeline_instance.steps["step_"]
assert step_instance.PARAM_SPEC["value"] == config_value
def test_overwrite_step_parameter_with_config_yaml(tmp_path):
"""Test whether step parameters can be overwritten using a config yaml."""
config_value = 0
pipeline_instance = create_pipeline_with_config_value(config_value)
yaml_path = os.path.join(tmp_path, "config.yaml")
yaml_config_value = 1
write_yaml(
yaml_path,
{"steps": {"step_": {"parameters": {"value": yaml_config_value}}}},
)
pipeline_instance = pipeline_instance.with_config(
yaml_path, overwrite_step_parameters=True
)
step_instance = pipeline_instance.steps["step_"]
assert step_instance.PARAM_SPEC["value"] == yaml_config_value
def test_dont_overwrite_step_parameter_with_config_yaml(tmp_path):
"""Test that step parameters don't get overwritten by yaml file
if not forced."""
config_value = 0
pipeline_instance = create_pipeline_with_config_value(config_value)
yaml_path = os.path.join(tmp_path, "config.yaml")
yaml_config_value = 1
write_yaml(
yaml_path,
{"steps": {"step_": {"parameters": {"value": yaml_config_value}}}},
)
pipeline_instance = pipeline_instance.with_config(yaml_path)
step_instance = pipeline_instance.steps["step_"]
assert step_instance.PARAM_SPEC["value"] == config_value
def test_yaml_configuration_with_invalid_step_name(tmp_path):
"""Test that a config yaml with an invalid step name raises an exception"""
pipeline_instance = create_pipeline_with_config_value(0)
yaml_path = os.path.join(tmp_path, "config.yaml")
write_yaml(
yaml_path,
{"steps": {"WRONG_STEP_NAME": {"parameters": {"value": 0}}}},
)
with pytest.raises(PipelineConfigurationError):
_ = pipeline_instance.with_config(yaml_path)
def test_yaml_configuration_with_invalid_parameter_name(tmp_path):
"""Test that a config yaml with an invalid parameter
name raises an exception"""
pipeline_instance = create_pipeline_with_config_value(0)
yaml_path = os.path.join(tmp_path, "config.yaml")
write_yaml(
yaml_path,
{"steps": {"step_": {"parameters": {"WRONG_PARAMETER_NAME": 0}}}},
)
with pytest.raises(PipelineConfigurationError):
_ = pipeline_instance.with_config(yaml_path)
def test_setting_pipeline_parameter_name_when_initializing_pipeline(
one_step_pipeline, empty_step
):
"""Tests that initializing a pipeline with a step sets the attribute
`pipeline_parameter_name` of the step."""
step_instance = empty_step()
assert step_instance.pipeline_parameter_name is None
one_step_pipeline(step_instance)
assert step_instance.pipeline_parameter_name == "step_"
def test_calling_a_pipeline_twice_raises_no_exception(
one_step_pipeline, empty_step
):
"""Tests that calling one pipeline instance twice does not raise
any exception."""
pipeline_instance = one_step_pipeline(empty_step())
with does_not_raise():
pipeline_instance.run()
pipeline_instance.run()
def test_pipeline_requirements(tmp_path):
"""Tests that the pipeline requirements are a combination of the
requirements of integrations and requirements of the specified
requirements file."""
from zenml.integrations.sklearn import SklearnIntegration
requirements = tmp_path / "requirements.txt"
requirements.write_text("any_requirement")
@pipeline(required_integrations=[SklearnIntegration.NAME])
def my_pipeline():
pass
assert my_pipeline().requirements == set(SklearnIntegration.REQUIREMENTS)
@pipeline(requirements=str(requirements))
def my_pipeline():
pass
assert my_pipeline().requirements == {"any_requirement"}
@pipeline(
required_integrations=[SklearnIntegration.NAME],
requirements=str(requirements),
)
def my_pipeline():
pass
assert my_pipeline().requirements == {
"any_requirement",
*SklearnIntegration.REQUIREMENTS,
}
def test_pipeline_requirements_takes_list(tmp_path):
"""Tests that the pipeline requirements are a combination of the
requirements of integrations and requirements of the specified
requirements file."""
from zenml.integrations.sklearn import SklearnIntegration
requirements = tmp_path / "requirements.txt"
requirements.write_text("any_requirement")
@pipeline(required_integrations=[SklearnIntegration.NAME])
def my_pipeline():
pass
assert my_pipeline().requirements == set(SklearnIntegration.REQUIREMENTS)
@pipeline(requirements=["any_requirement"])
def my_pipeline():
pass
assert my_pipeline().requirements == {"any_requirement"}
@pipeline(
required_integrations=[SklearnIntegration.NAME],
requirements=["any_requirement"],
)
def my_pipeline():
pass
assert my_pipeline().requirements == {
"any_requirement",
*SklearnIntegration.REQUIREMENTS,
}
def test_pipeline_run_fails_when_required_step_operator_is_missing(
one_step_pipeline,
):
"""Tests that running a pipeline with a step that requires a custom step
operator fails if the active stack does not contain this step operator."""
@step(custom_step_operator="azureml")
def step_that_requires_step_operator() -> None:
pass
assert not Repository().active_stack.step_operator
with pytest.raises(StackValidationError):
one_step_pipeline(step_that_requires_step_operator()).run()
| 156 | 27 | 261 |
0da39b11eb48f74048d82c3a79d5b8d12a4a9d56 | 5,454 | py | Python | mmaction/models/heads/osbp_tsm_head.py | Haawron/mmaction2 | 5927ccc2936759df8977fb588640cbf158264afc | [
"Apache-2.0"
] | null | null | null | mmaction/models/heads/osbp_tsm_head.py | Haawron/mmaction2 | 5927ccc2936759df8977fb588640cbf158264afc | [
"Apache-2.0"
] | null | null | null | mmaction/models/heads/osbp_tsm_head.py | Haawron/mmaction2 | 5927ccc2936759df8977fb588640cbf158264afc | [
"Apache-2.0"
] | 1 | 2022-03-29T13:22:40.000Z | 2022-03-29T13:22:40.000Z | import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from torch.autograd import Function
import numpy as np
from ..builder import HEADS
from ...core import top_k_accuracy
from .base import AvgConsensus, BaseHead
@HEADS.register_module() | 35.415584 | 83 | 0.563073 | import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from torch.autograd import Function
import numpy as np
from ..builder import HEADS
from ...core import top_k_accuracy
from .base import AvgConsensus, BaseHead
class GradReverse(Function):
@staticmethod
def forward(ctx, x, target_idx_mask):
ctx.save_for_backward(target_idx_mask)
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
target_idx_mask, = ctx.saved_tensors
grad_output[target_idx_mask] *= -1.
return grad_output, None
@HEADS.register_module()
class OSBPTSMHead(BaseHead):
def __init__(self,
num_classes,
in_channels,
num_layers=1,
num_segments=8,
loss_cls=dict(type='CrossEntropyLoss'),
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1),
dropout_ratio=0.8,
init_std=0.001,
is_shift=True,
temporal_pool=False,
**kwargs):
super().__init__(num_classes, in_channels, loss_cls, **kwargs)
self.spatial_type = spatial_type
self.dropout_ratio = dropout_ratio
self.num_layers = num_layers
self.num_segments = num_segments
self.init_std = init_std
self.is_shift = is_shift
self.temporal_pool = temporal_pool
consensus_ = consensus.copy()
consensus_type = consensus_.pop('type')
if consensus_type == 'AvgConsensus':
self.consensus = AvgConsensus(**consensus_)
else:
self.consensus = None
if self.dropout_ratio != 0:
self.dropouts = [
nn.Dropout(p=self.dropout_ratio)
for _ in range(self.num_layers)
]
else:
self.dropouts = None
self.fcs = [
nn.Linear(self.in_channels//2**i, self.in_channels//2**(i+1))
for i in range(self.num_layers-1)
] + [nn.Linear(self.in_channels//2**(self.num_layers-1), self.num_classes)]
self.fc_block = []
for i in range(self.num_layers):
self.fc_block.append(self.dropouts[i])
self.fc_block.append(self.fcs[i])
if i != self.num_layers - 1:
self.fc_block.append(nn.ReLU())
self.fc_block = nn.Sequential(*self.fc_block)
if self.spatial_type == 'avg':
# use `nn.AdaptiveAvgPool2d` to adaptively match the in_channels.
self.avg_pool = nn.AdaptiveAvgPool2d(1)
else:
self.avg_pool = None
def init_weights(self):
"""Initiate the parameters from scratch."""
for self.fc in self.fcs:
normal_init(self.fc, std=self.init_std)
def forward(self, x, num_segs, domains=None):
"""
Args:
x (N x num_segs, c, h, w)
Note:
N: batch size
num_segs: num_clips
"""
if domains.shape[0] > 0:
target_idx_mask = torch.squeeze(torch.from_numpy(domains == 'target'))
target_idx_mask = target_idx_mask.repeat(num_segs)
x = GradReverse.apply(x, target_idx_mask)
# [N * num_segs, in_channels, 7, 7]
if self.avg_pool is not None:
x = self.avg_pool(x)
# [N * num_segs, in_channels, 1, 1]
x = torch.flatten(x, 1)
# [N * num_segs, in_channels]
cls_score = self.fc_block(x)
# [N * num_segs, num_classes]
if self.is_shift and self.temporal_pool:
# [2 * N, num_segs // 2, num_classes]
cls_score = cls_score.view((-1, self.num_segments // 2) +
cls_score.size()[1:])
else:
# [N, num_segs, num_classes]
cls_score = cls_score.view((-1, self.num_segments) +
cls_score.size()[1:])
# [N, 1, num_classes]
cls_score = self.consensus(cls_score)
# [N, num_classes]
return cls_score.squeeze(1)
def loss(self, cls_score, labels, domains, **kwargs):
losses = dict()
if labels.shape == torch.Size([]):
labels = labels.unsqueeze(0)
elif labels.dim() == 1 and labels.size()[0] == self.num_classes \
and cls_score.size()[0] == 1:
# Fix a bug when training with soft labels and batch size is 1.
# When using soft labels, `labels` and `cls_socre` share the same
# shape.
labels = labels.unsqueeze(0)
if not self.multi_class and cls_score.size() != labels.size():
top_k_acc = top_k_accuracy(cls_score.detach().cpu().numpy(),
labels.detach().cpu().numpy(),
self.topk)
for k, a in zip(self.topk, top_k_acc):
losses[f'top{k}_acc'] = torch.tensor(
a, device=cls_score.device)
elif self.multi_class and self.label_smooth_eps != 0:
labels = ((1 - self.label_smooth_eps) * labels +
self.label_smooth_eps / self.num_classes)
loss_cls = self.loss_cls(cls_score, labels, domains, **kwargs)
# loss_cls may be dictionary or single tensor
if isinstance(loss_cls, dict):
losses.update(loss_cls)
else:
losses['loss_cls'] = loss_cls
return losses | 3,532 | 1,622 | 45 |
dac48f3495e352413ea7eecbbc73238e87ac1463 | 1,169 | py | Python | better_tree/parser.py | antoniouaa/better-tree | d2030e23cf5eb2d94966741fb2917759afecd9fe | [
"MIT"
] | null | null | null | better_tree/parser.py | antoniouaa/better-tree | d2030e23cf5eb2d94966741fb2917759afecd9fe | [
"MIT"
] | 2 | 2021-11-05T12:24:39.000Z | 2021-11-29T14:37:45.000Z | better_tree/parser.py | antoniouaa/better-tree | d2030e23cf5eb2d94966741fb2917759afecd9fe | [
"MIT"
] | null | null | null | import sys
import argparse
PROG = "tree"
USAGE = "%(prog)s <Path> [options]"
DESCRIPTION = "Featureful tree utility in Python"
| 21.648148 | 55 | 0.558597 | import sys
import argparse
PROG = "tree"
USAGE = "%(prog)s <Path> [options]"
DESCRIPTION = "Featureful tree utility in Python"
class CLIParser(argparse.ArgumentParser):
def error(self, *args, **kwargs):
self.print_help()
sys.exit(1)
def assemble_parser() -> CLIParser:
parser = CLIParser(
prog=PROG,
usage=USAGE,
description=DESCRIPTION,
)
parser.add_argument(
"Path",
help="The path to search in",
)
parser.add_argument(
"--Depth",
dest="depth",
default=-1,
type=int,
help="Number of levels of depth",
)
parser.add_argument(
"--Include",
dest="include",
default="*",
type=str,
help="Glob pattern to include in the search",
)
parser.add_argument(
"--Exclude",
dest="exclude",
default="",
type=str,
help="Glob pattern to exclude from the search",
)
parser.add_argument(
"--File",
dest="file",
action="store_true",
help="Only show files",
)
parser.set_defaults(fuc=parser.print_help)
return parser
| 947 | 20 | 72 |
ac31baa26e1d3c94895d8932cf2b535d4808d75c | 638 | py | Python | backend/src/gloader/xml/sax/drivers2/drv_pyexpat.py | anrl/gini4 | d26649c8c02a1737159e48732cf1ee15ba2a604d | [
"MIT"
] | 11 | 2019-03-02T20:39:34.000Z | 2021-09-02T19:47:38.000Z | backend/src/gloader/xml/sax/drivers2/drv_pyexpat.py | anrl/gini4 | d26649c8c02a1737159e48732cf1ee15ba2a604d | [
"MIT"
] | 29 | 2019-01-17T15:44:48.000Z | 2021-06-02T00:19:40.000Z | backend/src/gloader/xml/sax/drivers2/drv_pyexpat.py | anrl/gini4 | d26649c8c02a1737159e48732cf1ee15ba2a604d | [
"MIT"
] | 11 | 2019-01-28T05:00:55.000Z | 2021-11-12T03:08:32.000Z | """
SAX driver for the Pyexpat C module, based on xml.sax.expatdriver.
$Id: drv_pyexpat.py,v 1.6 2000/09/26 19:53:43 loewis Exp $
"""
# XXX: todo list of old drv_pyexpat.py, check whether any of these
# have been fixed.
# Todo on driver:
# - make it support external entities (wait for pyexpat.c)
# - enable configuration between reset() and feed() calls
# - support lexical events?
# - proper inputsource handling
# - properties and features
# Todo on pyexpat.c:
# - support XML_ExternalEntityParserCreate
# - exceptions in callouts from pyexpat to python code lose position info
from xml.sax.expatreader import create_parser
| 30.380952 | 74 | 0.741379 | """
SAX driver for the Pyexpat C module, based on xml.sax.expatdriver.
$Id: drv_pyexpat.py,v 1.6 2000/09/26 19:53:43 loewis Exp $
"""
# XXX: todo list of old drv_pyexpat.py, check whether any of these
# have been fixed.
# Todo on driver:
# - make it support external entities (wait for pyexpat.c)
# - enable configuration between reset() and feed() calls
# - support lexical events?
# - proper inputsource handling
# - properties and features
# Todo on pyexpat.c:
# - support XML_ExternalEntityParserCreate
# - exceptions in callouts from pyexpat to python code lose position info
from xml.sax.expatreader import create_parser
| 0 | 0 | 0 |
dd220118b8a22188a6ab29bd9ce5820c867c1eb0 | 17,321 | py | Python | HW1/Roomba/Roomba.py | AnarchyKitten/AI-HOMEWORK | 79e4aa9ce897c9d83e526128fbfe57433f31d2d4 | [
"MIT"
] | 2 | 2020-02-10T19:06:20.000Z | 2020-04-01T18:23:05.000Z | HW1/Roomba/Roomba.py | AnarchyKitten/AI-HOMEWORK | 79e4aa9ce897c9d83e526128fbfe57433f31d2d4 | [
"MIT"
] | null | null | null | HW1/Roomba/Roomba.py | AnarchyKitten/AI-HOMEWORK | 79e4aa9ce897c9d83e526128fbfe57433f31d2d4 | [
"MIT"
] | 1 | 2022-03-09T19:33:22.000Z | 2022-03-09T19:33:22.000Z | import time
import random
from pprint import pprint
global env
env= {
"BATTERY_LEVEL": 100,
"SPOT": False,
"SPOT_LOCATION": (3.0, 7.0),
"GENERAL": False,
"DUSTY_SPOT": False,
"DUSTY_SPOT_LOCATION": (0.0, -4.0),
"HOME_PATH": [],
"LOCATION": (-3.0, 5.0),
"FACING": (0, 1)
}
NODE_INITIALIZED = -1
NODE_SUCCEED = 0
NODE_RUNNING = 1
NODE_FAILED = 2
#class FailedTest(Task):
# def job(self, conditions):
# time.sleep(0)
# print("Failed")
# return False
if __name__=="__main__":
roomba=Roomba()
roomba.run(env)
| 29.307953 | 119 | 0.553836 | import time
import random
from pprint import pprint
global env
env= {
"BATTERY_LEVEL": 100,
"SPOT": False,
"SPOT_LOCATION": (3.0, 7.0),
"GENERAL": False,
"DUSTY_SPOT": False,
"DUSTY_SPOT_LOCATION": (0.0, -4.0),
"HOME_PATH": [],
"LOCATION": (-3.0, 5.0),
"FACING": (0, 1)
}
NODE_INITIALIZED = -1
NODE_SUCCEED = 0
NODE_RUNNING = 1
NODE_FAILED = 2
def Update_Battery(conditions, precent):
battery = conditions["BATTERY_LEVEL"]
battery += precent
if battery >= 100:
battery = 100
elif battery <= 0:
battery = 0
conditions.update({"BATTERY_LEVEL": battery})
return True
def RandomChangeLocation(env, degree_x, degree_y):
random_x = round(random.random()-0.5, 1)*degree_x
random_y = round(random.random()-0.5, 1)*degree_y
location = (round(env["LOCATION"][0]+random_x, 1),
round(env["LOCATION"][1]+random_y, 1))
env.update({"LOCATION": location})
def FindCompletePath(env, path):
pathi = path[0]
pathj = path[1]
faci = env["FACING"][0]
facj = env["FACING"][1]
return_path = []
if pathi != 0:
if faci*pathi > 0:
return_path.append([0, pathi/faci])
env.update({"FACING": (faci, facj)})
faci = faci
facj = facj
elif faci*pathi < 0:
return_path.append([180, -pathi/faci])
env.update({"FACING": (-faci, facj)})
faci = -faci
facj = facj
elif faci == 0:
if (pathi > 0) & (facj > 0):
return_path.append([90, pathi])
env.update({"FACING": (1, 0)})
faci = 1
facj = 0
if (pathi > 0) & (facj < 0):
return_path.append([-90, pathi])
env.update({"FACING": (1, 0)})
faci = 1
facj = 0
if (pathi < 0) & (facj > 0):
return_path.append([-90, -pathi])
env.update({"FACING": (-1, 0)})
faci = -1
facj = 0
if (pathi < 0) & (facj < 0):
return_path.append([90, -pathi])
env.update({"FACING": (-1, 0)})
faci = -1
facj = 0
if pathj != 0:
if facj*pathj > 0:
return_path.append([0, pathj/facj])
env.update({"FACING": (faci, facj)})
faci = faci
facj = facj
elif facj*pathj < 0:
return_path.append([180, -pathj/facj])
env.update({"FACING": (faci, -facj)})
faci = faci
facj = -facj
elif facj == 0:
if (pathj > 0) & (faci > 0):
return_path.append([-90, pathj])
env.update({"FACING": (0, 1)})
faci = 0
facj = 1
if (pathj > 0) & (faci < 0):
return_path.append([90, pathj])
env.update({"FACING": (0, 1)})
faci = 0
facj = 1
if (pathj < 0) & (faci > 0):
return_path.append([90, -pathj])
env.update({"FACING": (0, -1)})
faci = 0
facj = -1
if (pathj < 0) & (faci < 0):
return_path.append([-90, -pathj])
env.update({"FACING": (0, -1)})
faci = 0
facj = -1
env.update({"FACING": (faci, facj)})
return return_path
class Node:
ParentNode = None
TrueMessage = ""
FalseMessage = ""
NodeStatus = NODE_INITIALIZED
def __init__(self, TrueMessage, FalseMessage, ParentNode):
self.TrueMessage = TrueMessage
self.FalseMessage = FalseMessage
self.ParentNode = ParentNode
self.NodeStatus = NODE_INITIALIZED
def SetParent(self, ParentNode):
self.ParentNode = ParentNode
def run(self, conditions):
pass
def evaluate_state(self):
return self.NodeStatus
class Decorator(Node):
ChildNode = None
def __init__(self, TrueMessage, FalseMessage, ParentNode, ChildNode):
self.TrueMessage = TrueMessage
self.FalseMessage = FalseMessage
self.ParentNode = ParentNode
self.ChildNode = ChildNode
self.NodeStatus = NODE_INITIALIZED
def set_child_node(self, NewNode):
self.ChildNode = NewNode
class Negation(Decorator):
def run(self, conditions):
self.NodeStatus = NODE_RUNNING
self.ChildNode.run(conditions)
i_status = 2 - self.ChildNode.NodeStatus
self.NodeStatus = i_status
return True
class Until(Decorator):
UntilSuccess = False
UntilFail = False
def __init__(self, TrueMessage, FalseMessage, ParentNode, ChildNode, UntilFail):
self.TrueMessage = TrueMessage
self.FalseMessage = FalseMessage
self.ParentNode = ParentNode
self.ChildNode = ChildNode
self.NodeStatus = NODE_INITIALIZED
if (UntilFail == True):
self.UntilFail = True
self.UntilSuccess = False
elif (UntilFail == False):
self.UntilFail = False
self.UntilSuccess = True
def run(self, conditions):
if (self.UntilSuccess):
self.NodeStatus = NODE_RUNNING
while (True):
self.ChildNode.run(conditions)
i_status = self.ChildNode.evaluate_state()
if i_status == NODE_SUCCEED:
break
self.NodeStatus = NODE_SUCCEED
return True
elif (self.UntilFail):
self.NodeStatus = NODE_RUNNING
while (True):
self.ChildNode.run(conditions)
i_status = self.ChildNode.evaluate_state()
if i_status == NODE_FAILED:
break
self.NodeStatus = NODE_SUCCEED
return True
class Timer(Decorator):
Interval = 0
def __init__(self, TrueMessage, FalseMessage, ParentNode, ChildNode, Interval):
self.TrueMessage = TrueMessage
self.FalseMessage = FalseMessage
self.ParentNode = ParentNode
self.ChildNode = ChildNode
self.NodeStatus = NODE_INITIALIZED
self.Interval = Interval
def run(self, conditions):
self.NodeStatus = NODE_RUNNING
i_status = 2 - self.ChildNode.run(conditions, self.Interval)
self.NodeStatus = i_status
return True
class Composite(Node):
ChildNodes = []
def __init__(self, TrueMessage, FalseMessage, ParentNode, ChildNodes):
self.TrueMessage = TrueMessage
self.FalseMessage = FalseMessage
self.ParentNode = ParentNode
self.ChildNodes = ChildNodes
self.NodeStatus = NODE_INITIALIZED
def add_child_node(self, NewNode):
self.ChildNodes.append(NewNode)
def set_child_nodes(self, NewNodes):
self.ChildNodes = []
for i in NewNodes:
self.ChildNodes.append(i)
class Sequence(Composite):
def run(self, conditions):
self.NodeStatus = NODE_RUNNING
for i in self.ChildNodes:
i.run(conditions)
i_status = i.evaluate_state()
if i_status == NODE_FAILED:
self.NodeStatus = NODE_FAILED
break
if self.NodeStatus != NODE_FAILED:
self.NodeStatus = NODE_SUCCEED
return True
class Selection(Composite):
def run(self, conditions):
self.NodeStatus = NODE_RUNNING
for i in self.ChildNodes:
i.run(conditions)
i_status = i.evaluate_state()
if i_status == NODE_SUCCEED:
self.NodeStatus = NODE_SUCCEED
break
if self.NodeStatus != NODE_SUCCEED:
self.NodeStatus = NODE_FAILED
return True
class Priority(Composite):
PriorityMap = {}
def __init__(self, TrueMessage, FalseMessage, ParentNode, ChildNodes, PriorityMap):
self.TrueMessage = TrueMessage
self.FalseMessage = FalseMessage
self.ParentNode = ParentNode
self.ChildNodes = ChildNodes
self.NodeStatus = NODE_INITIALIZED
self.PriorityMap = PriorityMap
def view_priority_map(self):
pprint(self.PriorityMap)
def add_child_node(self, NewNode, Priority):
for i in range(len(self.PriorityMap) + 1, Priority, -1):
self.PriorityMap.update({i: self.PriorityMap[i - 1]})
self.PriorityMap.update({Priority: NewNode})
self.ChildNodes.append(NewNode)
def run(self, conditions):
self.NodeStatus = NODE_RUNNING
for i in range(1, len(self.PriorityMap)+1):
node = self.PriorityMap[i]
node.run(conditions)
i_status = node.evaluate_state()
if i_status == NODE_SUCCEED:
self.NodeStatus = NODE_SUCCEED
break
if self.NodeStatus != NODE_SUCCEED:
self.NodeStatus = NODE_FAILED
return True
class Condition(Node):
def judge(self, conditions):
return True
def run(self, conditions):
if self.judge(conditions) == True:
self.NodeStatus = NODE_SUCCEED
else:
self.NodeStatus = NODE_FAILED
return True
class Judge_Battery(Condition):
def judge(self, conditions):
if conditions["BATTERY_LEVEL"] <= 30:
print("Battery lower than 30%!")
return True
else:
return False
class Judge_Spot(Condition):
def judge(self, conditions):
if conditions["SPOT"] == True:
print("Need Spot Cleaning!")
return True
else:
return False
class Judge_General(Condition):
def judge(self, conditions):
if conditions["GENERAL"] == True:
print("Need General Cleaning!")
return True
else:
return False
class Judge_Dusty_Spot(Condition):
def judge(self, conditions):
if conditions["DUSTY_SPOT"] == True:
print("Found Dusty Spot!")
return True
else:
return False
class Task(Node):
def job(self, conditions):
return True
def run(self, conditions):
self.NodeStatus = NODE_RUNNING
job_status = self.job(conditions)
if (job_status):
self.NodeStatus = NODE_SUCCEED
elif job_status == False:
self.NodeStatus = NODE_FAILED
return True
class Find_Home(Task):
def job(self, conditions):
conditions.update({"HOME_PATH": FindCompletePath(conditions, conditions["LOCATION"])})
time.sleep(2)
print("Finding Home!")
return True
class Go_Home(Task):
def job(self, conditions):
print("Going Home!")
for i in conditions["HOME_PATH"]:
print("Turn {} degree clockwise...".format(i[0]))
time.sleep(1)
print("Go {} metres ahead...".format(i[1]))
time.sleep(2)
conditions.update({"HOME_PATH": []})
conditions.update({"LOCATION": (0, 0)})
print("Back To Home!")
return True
class Dock(Task):
def job(self, conditions):
print("Docked!")
Update_Battery(conditions, 100)
time.sleep(4)
print("Charge Finished!")
time.sleep(1)
return True
class Clean_Spot(Task):
def job(self, conditions, interval):
# TODO!!!!!!!!!!!!!!!!!!!
print("Spot detected at {}".format(conditions["SPOT_LOCATION"]))
path_x = conditions["SPOT_LOCATION"][0] - conditions["LOCATION"][0]
path_y = conditions["SPOT_LOCATION"][1] - conditions["LOCATION"][1]
to_spot_path = FindCompletePath(conditions, (path_x, path_y))
time.sleep(1)
for i in to_spot_path:
print("Turn {} degree clockwise...".format(i[0]))
time.sleep(1)
print("Go {} metres ahead...".format(i[1]))
time.sleep(2)
time0 = time.time()
while (time.time() <= time0 + interval):
print("Cleaning Spot")
time.sleep(1)
RandomChangeLocation(conditions, 1, 1)
Update_Battery(conditions, -1)
return True
def run(self, conditions, interval):
self.NodeStatus = NODE_RUNNING
job_status = self.job(conditions, interval)
if (job_status):
self.NodeStatus = NODE_SUCCEED
elif job_status == False:
self.NodeStatus = NODE_FAILED
return True
class Clean_Dusty_Spot(Clean_Spot):
def job(self, conditions, interval):
# TODO!!!!!!!!!!!!!!!!!!!
print("Spot detected at {}".format(conditions["DUSTY_SPOT_LOCATION"]))
path_x = conditions["DUSTY_SPOT_LOCATION"][0] - conditions["LOCATION"][0]
path_y = conditions["DUSTY_SPOT_LOCATION"][1] - conditions["LOCATION"][1]
to_spot_path = FindCompletePath(conditions, (path_x, path_y))
time.sleep(1)
for i in to_spot_path:
print("Turn {} degree clockwise...".format(i[0]))
time.sleep(1)
print("Go {} metres ahead...".format(i[1]))
time.sleep(2)
time0 = time.time()
while (time.time() <= time0 + interval):
print("Cleaning Dusty Spot")
time.sleep(1)
RandomChangeLocation(conditions, 1, 1)
Update_Battery(conditions, -1)
return True
class Done_Spot(Task):
def job(self, conditions):
print("Done Spot!")
conditions.update({"SPOT": False})
conditions.update({"SPOT_LOCATION": (0, 0)})
time.sleep(1)
return True
class Done_Dusty_Spot(Task):
def job(self, conditions):
print("Done Spot!")
conditions.update({"DUSTY_SPOT": False})
conditions.update({"DUSTY_SPOT_LOCATION": (0, 0)})
time.sleep(1)
return True
class Clean(Task):
def job(self, conditions):
time.sleep(1)
RandomChangeLocation(conditions, 2, 2)
Update_Battery(conditions, -10)
print("Cleaning!")
return True
class Done_General(Task):
def job(self, conditions):
print("Done General!")
conditions.update({"GENERAL": False})
time.sleep(1)
return True
#class FailedTest(Task):
# def job(self, conditions):
# time.sleep(0)
# print("Failed")
# return False
class Do_Nothing(Task):
def job(self, conditions):
time.sleep(5)
Update_Battery(conditions, -5)
print("Doing Nothing...")
return True
class Roomba:
p1 = Priority("", "", None, [], {})
s1 = Sequence("", "", p1, [])
j1 = Judge_Battery("", "", s1)
t1 = Find_Home("", "", s1)
t2 = Go_Home("", "", s1)
t3 = Dock("", "", s1)
s2 = Selection("", "", p1, [])
s2_1 = Sequence("", "", s2, [])
j2 = Judge_Spot("", "", s2_1)
timer_1 = Timer("", "", s2_1, None, 20)
t4 = Clean_Spot("", "", timer_1)
t5 = Done_Spot("", "", s2_1)
s2_2 = Sequence("", "", s2, [])
j3 = Judge_General("", "", s2_2)
s2_2_1 = Sequence("", "", s2_2, [])
uf1 = Until("", "", s2_2_1, [], UntilFail=True)
s2_2_1_1 = Sequence("", "", s2_2_1, [])
n1 = Negation("", "", s2_2_1_1, [])
j4 = Judge_Battery("", "", n1)
s2_2_1_1_1 = Selection("", "", s2_2_1_1, [])
s2_2_1_1_1_1 = Sequence("", "", s2_2_1_1_1, [])
j5 = Judge_Dusty_Spot("", "", s2_2_1_1_1_1)
timer_2 = Timer("", "", s2_2_1_1_1_1, None, 35)
t6 = Clean_Dusty_Spot("", "", timer_2)
t10 = Done_Dusty_Spot("", "", timer_2)
t7 = Clean("", "", s2_2_1_1_1)
t8 = Done_General("", "", s2_2_1)
t9 = Do_Nothing("", "", p1)
p1.add_child_node(s1, 1)
p1.add_child_node(s2, 2)
p1.add_child_node(t9, 3)
s1.set_child_nodes([j1, t1, t2, t3])
s2.set_child_nodes([s2_1, s2_2])
s2_1.set_child_nodes([j2, timer_1, t5])
timer_1.set_child_node(t4)
s2_2.set_child_nodes([j3, s2_2_1])
s2_2_1.set_child_nodes([uf1, t8])
uf1.set_child_node(s2_2_1_1)
s2_2_1_1.set_child_nodes([n1, s2_2_1_1_1])
n1.set_child_node(j4)
s2_2_1_1_1.set_child_nodes([s2_2_1_1_1_1, t7])
s2_2_1_1_1_1.set_child_nodes([j5, timer_2, t10])
timer_2.set_child_node(t6)
def run(self,env):
while (True):
rand = random.random()
if rand > 0.6:
env.update({"SPOT": True})
env.update({"SPOT_LOCATION": (round(random.random() * 10 - 5, 1), round(random.random() * 10 - 5, 1))})
env.update({"GENERAL": False})
elif rand >0.2:
env.update({"SPOT": False})
env.update({"GENERAL": True})
if rand > 0.4:
env.update({"DUSTY_SPOT": True})
env.update({"DUSTY_SPOT_LOCATION": (
round(random.random() * 10 - 5, 1), round(random.random() * 10 - 5, 1))})
else:
env.update({"DUSTY_SPOT": False})
else:
env.update({"SPOT":False})
env.update({"GENERAL":False})
print("--------------------")
pprint(env)
print("--------------------")
self.p1.run(env)
if __name__=="__main__":
roomba=Roomba()
roomba.run(env)
| 13,028 | 2,449 | 1,242 |
37546ff764e016562d941b3ee20057f64c4e4b6c | 387 | py | Python | 2020/noxfile.py | danoscarmike/adventofcode | 1395dd8dd684e4b23697c53e445b2114d64c2791 | [
"MIT"
] | null | null | null | 2020/noxfile.py | danoscarmike/adventofcode | 1395dd8dd684e4b23697c53e445b2114d64c2791 | [
"MIT"
] | null | null | null | 2020/noxfile.py | danoscarmike/adventofcode | 1395dd8dd684e4b23697c53e445b2114d64c2791 | [
"MIT"
] | null | null | null | import nox
BLACK_PATHS = ["helpers", "."]
@nox.session(python="3.8")
def blacken(session):
"""Run black.
Format code to uniform standard.
"""
session.install("black")
session.run(
"black",
*BLACK_PATHS,
)
@nox.session(python="3.8")
def unit(session):
"""Run the unit test suite."""
session.install("pytest")
session.run("pytest")
| 16.826087 | 36 | 0.591731 | import nox
BLACK_PATHS = ["helpers", "."]
@nox.session(python="3.8")
def blacken(session):
"""Run black.
Format code to uniform standard.
"""
session.install("black")
session.run(
"black",
*BLACK_PATHS,
)
@nox.session(python="3.8")
def unit(session):
"""Run the unit test suite."""
session.install("pytest")
session.run("pytest")
| 0 | 0 | 0 |
a5a9574abdde11cd0e70b802e9c87737ec3086a4 | 4,303 | py | Python | aioevent/emitter.py | travigd/aioevent | 1338e052664d0bb0767a5809f2c4fe5903b9a78f | [
"MIT"
] | null | null | null | aioevent/emitter.py | travigd/aioevent | 1338e052664d0bb0767a5809f2c4fe5903b9a78f | [
"MIT"
] | null | null | null | aioevent/emitter.py | travigd/aioevent | 1338e052664d0bb0767a5809f2c4fe5903b9a78f | [
"MIT"
] | null | null | null | """
Provides EventEmitter class.
"""
import asyncio
from functools import partial
from typing import (
Callable,
Dict,
Type,
Union,
Coroutine,
Set,
)
from .event import BaseEvent
from .subscription import Subscription
# pylint: disable=invalid-name
EventCallbackType = Callable[[BaseEvent], Union[None, Coroutine[None, None, None]]]
class EventEmitter:
"""
ABC for a class whose instances emit events.
:ivar _event_listeners: A dictionary whose keys are subclasses of BaseEvent
and whose values are lists of event handlers. Event handlers should
be callables that accept a single argument: the event being emitted.
"""
_subscriptions: Dict[Type[BaseEvent], Set[Subscription]]
def listen(self, event_type: Type[BaseEvent], callback: EventCallbackType) -> "Subscription":
"""
Register a callback to be fired when an event is emitted.
:param event_type: The type of event (subclass of :py:class:`BaseEvent`)
to listen for.
:param callback: The callback to trigger when the event is emitted;
should accept a single parameter which is the instance of
`event_type` that was emitted.
:return:
"""
if event_type not in self._subscriptions:
self._subscriptions[event_type] = set()
subscription = Subscription(
callback,
unsubscribe=partial(self._remove_subscription, event_type),
loop=self._loop,
)
self._subscriptions[event_type].add(subscription)
return subscription
def emit(self, event: BaseEvent):
"""
Emit an event.
:param event: The event to be omitted.
:return:
"""
if not isinstance(event, BaseEvent):
raise ValueError(f"Events must be subclasses of BaseEvent (got {repr(event)}).")
if event.target is None:
event.target = self
subscriptions = self._get_subscriptions_for_event_type(type(event))
for subscription in subscriptions:
subscription.invoke(event)
def proxy(self, emitter):
"""
Proxy events from another emitter.
Useful in a grandparent-parent-child type pattern where the grandparent
cares about events emitted in the child.
:param emitter:
:return:
"""
emitter.listen(BaseEvent, self._proxy_event)
def _proxy_event(self, event):
"""
Emit an event via proxy.
:param event: The event to proxy.
:return:
"""
self.emit(event)
def _get_subscriptions_for_event_type(
self, event_type: Type,
) -> Set["Subscription"]:
"""
Get all handlers for an event type.
This method will walk up the subclass graph so that a handler
for `SomeEventType` will also handle events of type
`SubclassOfSomeEventType`.
:param event_type: The event type to find handlers for.
:return: A list of event handlers.
"""
# Note: See `test_event_multiple_inheritance.py` for a description about
# why we need to use a set here (rather than a list).
handlers = set()
if not issubclass(event_type, BaseEvent):
raise ValueError(f"Event classes must extend BaseEvent (got {repr(event_type)}).")
if event_type in self._subscriptions:
handlers.update(self._subscriptions[event_type])
for event_supertype in event_type.__bases__:
if not issubclass(event_supertype, BaseEvent):
continue
handlers.update(self._get_subscriptions_for_event_type(event_supertype))
return handlers
| 33.88189 | 97 | 0.633279 | """
Provides EventEmitter class.
"""
import asyncio
from functools import partial
from typing import (
Callable,
Dict,
Type,
Union,
Coroutine,
Set,
)
from .event import BaseEvent
from .subscription import Subscription
# pylint: disable=invalid-name
EventCallbackType = Callable[[BaseEvent], Union[None, Coroutine[None, None, None]]]
class EventEmitter:
"""
ABC for a class whose instances emit events.
:ivar _event_listeners: A dictionary whose keys are subclasses of BaseEvent
and whose values are lists of event handlers. Event handlers should
be callables that accept a single argument: the event being emitted.
"""
_subscriptions: Dict[Type[BaseEvent], Set[Subscription]]
def __init__(
self,
*args,
loop: "asyncio.AbstractEventLoop" = None,
**kwargs,
):
super().__init__(*args, **kwargs)
self._loop = loop or asyncio.get_event_loop()
self._subscriptions = {}
def listen(self, event_type: Type[BaseEvent], callback: EventCallbackType) -> "Subscription":
"""
Register a callback to be fired when an event is emitted.
:param event_type: The type of event (subclass of :py:class:`BaseEvent`)
to listen for.
:param callback: The callback to trigger when the event is emitted;
should accept a single parameter which is the instance of
`event_type` that was emitted.
:return:
"""
if event_type not in self._subscriptions:
self._subscriptions[event_type] = set()
subscription = Subscription(
callback,
unsubscribe=partial(self._remove_subscription, event_type),
loop=self._loop,
)
self._subscriptions[event_type].add(subscription)
return subscription
def emit(self, event: BaseEvent):
"""
Emit an event.
:param event: The event to be omitted.
:return:
"""
if not isinstance(event, BaseEvent):
raise ValueError(f"Events must be subclasses of BaseEvent (got {repr(event)}).")
if event.target is None:
event.target = self
subscriptions = self._get_subscriptions_for_event_type(type(event))
for subscription in subscriptions:
subscription.invoke(event)
def proxy(self, emitter):
"""
Proxy events from another emitter.
Useful in a grandparent-parent-child type pattern where the grandparent
cares about events emitted in the child.
:param emitter:
:return:
"""
emitter.listen(BaseEvent, self._proxy_event)
def _proxy_event(self, event):
"""
Emit an event via proxy.
:param event: The event to proxy.
:return:
"""
self.emit(event)
def _get_subscriptions_for_event_type(
self, event_type: Type,
) -> Set["Subscription"]:
"""
Get all handlers for an event type.
This method will walk up the subclass graph so that a handler
for `SomeEventType` will also handle events of type
`SubclassOfSomeEventType`.
:param event_type: The event type to find handlers for.
:return: A list of event handlers.
"""
# Note: See `test_event_multiple_inheritance.py` for a description about
# why we need to use a set here (rather than a list).
handlers = set()
if not issubclass(event_type, BaseEvent):
raise ValueError(f"Event classes must extend BaseEvent (got {repr(event_type)}).")
if event_type in self._subscriptions:
handlers.update(self._subscriptions[event_type])
for event_supertype in event_type.__bases__:
if not issubclass(event_supertype, BaseEvent):
continue
handlers.update(self._get_subscriptions_for_event_type(event_supertype))
return handlers
def _remove_subscription(self, event_type: Type["BaseEvent"], subscription: "Subscription"):
if event_type in self._subscriptions:
if subscription in self._subscriptions[event_type]:
self._subscriptions[event_type].remove(subscription)
| 491 | 0 | 54 |
f5058bb46b1b8ce6b5bbfce558362f47d32b3f33 | 9,251 | py | Python | sandbox/src/CEDARDispersionTXTFixer.py | sniemi/SamPy | e048756feca67197cf5f995afd7d75d8286e017b | [
"BSD-2-Clause"
] | 5 | 2016-05-28T14:12:28.000Z | 2021-04-22T10:23:12.000Z | sandbox/src/CEDARDispersionTXTFixer.py | sniemi/SamPy | e048756feca67197cf5f995afd7d75d8286e017b | [
"BSD-2-Clause"
] | null | null | null | sandbox/src/CEDARDispersionTXTFixer.py | sniemi/SamPy | e048756feca67197cf5f995afd7d75d8286e017b | [
"BSD-2-Clause"
] | 2 | 2015-07-13T10:04:10.000Z | 2021-04-22T10:23:23.000Z | #! /usr/bin/env python
'''
A script to fix CEDAR's ASCII dispersion solution output.
ABOUT:
This script can be used to modify .txt files that CEDAR produce when
dispersion solution output has been saved.
USAGE:
CEDARDispersionTXTFixer.py [-h] [-v] [-r] [-c] [-f] string [-o] srtring
where:
[-h] prints help
[-v] verbose mode on
[-r] creates an output file called dispersion.txt which is
in format that can be used as an input to update_*_disp.pro scripts.
[-c] compares results to TV06 six data found from CDBS
[-f] user defined string that is used to search text files to be processed.
User can specify wild cards e.g. "*wcal.txt".
[-o] name of the output file. This does not change the name of the output
file that optional argument -c produces.
DEPENDS:
Python 2.5 or 2.6 (not version 3.x compatible)
Pyfits
EXITSTA:
0: No errors
AUTHOR :
Sami-Matias Niemi, for STScI
HISTORY:
May 15 2009: Initial Version
@author: Sami-Matias Niemi
'''
__author__ = 'Sami-Matias Niemi'
__version__ = '0.9'
#Processes command line arguments
#Main program begins
if __name__ == '__main__':
import glob
import sys
import pyfits as PF
#command line arguments
(opts, args) = process_args()
if checkZeroArguments(opts) == False:
process_args(True)
sys.exit(-9)
#verbose
verbose = False
if opts.verbose is True: verbose = True
#CDBS TV06 files
cdbspath = '/grp/hst/cdbs/lref/'
fuvfile = 't2k1224el_disp.fits'
nuvfile = 't2917094l_disp.fits'
#search string
if opts.find is not None: search = opts.find
else: search = '*wcal.txt'
if verbose: print '\nWill use string: %s to indetify ASCII files containing dispersion solutions' % search
#finds all files containing search string
txtfiles = glob.glob(search)
#outputfile
if opts.output is not None:
out = open(opts.output, 'w')
if opts.comparison is True: outcomp = open(opts.output + '.comparison', 'w')
if opts.reference is not None: outref = open('dispersion.txt', 'w')
if verbose: print '\n Output will be written to file %s' % opts.output
outfile = opts.output
else:
outfile = 'CEDARDispersionTXTFixer.output'
out = open(outfile, 'w')
if opts.comparison is True: outcomp = open('CEDARDispersionTXTFixer.output.comparison', 'w')
if opts.reference is not None: outref = open('dispersion.txt', 'w')
if verbose: print '\n You did not specify output filename. Will use default CEDARDispersionTXTFixer.output'
#main loop
for file in txtfiles:
if verbose: print '\nTrying to open file: %s' % file
try:
fulldata = open(file, 'r').readlines()
except:
if verbose: print '\nERROR: Cannot read file %s' % file
pass
#splits the file
sdata = [line.strip().split() for line in fulldata]
filename = sdata[0][0]
#gets all coefficients
coeffs = []
RMS = ''
if verbose: print '\nSearching for coefficient values...'
for line in sdata:
try:
if line[0].startswith('C'):
if verbose: print '\nFound:', line
coeffs.append([line[2], line[4]])
if line[0].startswith('RMS'):
if verbose: print '\nRMS of fit was %s' % line[4]
RMS = line[4]
except: pass
if verbose: print '\nTrying to find equivalent (%s) FITS file for header information...' % filename
#Tries to open equivalent FITS file
try:
hdr0 = PF.open(filename)[0].header
except:
if verbose: print '\nERROR: Could not open %s. Will not get header information.' % filename
pass
#Tries to get all required header keywords
try:
cenwav = hdr0['CENWAVE']
stripe = hdr0['SEGMENT']
grating = hdr0['OPT_ELEM']
fppos = hdr0['FPPOS']
aperture = hdr0['APERTURE']
except:
if verbose: print '\nERROR: Could not read all required header keywords of %s' % filename
cenwave = 'NA'
strip = 'XNA'
grating = 'NA'
fppos = 'NA'
aperture = 'NA'
#comparison to CDBS
#should be rewritten, as it now opens the file for nothing on every round...
if opts.comparison is True:
if verbose: print '\nWill try to compare calculated results to the dispersion solution of TV06 data.'
try:
if stripe.startswith('N'):
if verbose: print 'Trying to open %s' % cdbspath + nuvfile
CDBSdata = PF.open(cdbspath + nuvfile)[1].data
CDBScoeff = [line[5] for line in CDBSdata
if line[0].strip() == stripe and
line[1].strip() == grating and
line[2].strip() == aperture and
line[3] == cenwav]
if stripe.startswith('F'):
if verbose: print 'Trying to open %s' % cdbspath + fuvfile
CDBSdata = PF.open(cdbspath + fuvfile)[1].data
CDBScoeff = [line[5] for line in CDBSdata
if line[0].strip() == stripe and
line[1].strip() == grating and
line[2].strip() == aperture and
line[3] == cenwav]
except:
if verbose: print '\nERROR: Cannot open CDBS file...'
CDBScoeff = 'NA'
#lets calculate delta
delta = []
for new, old in zip(coeffs, CDBScoeff[0]):
delta.append(float(new[0]) - old)
#some quick results to screen
if verbose: print stripe, grating, aperture, cenwav, fppos, coeffs, CDBScoeff, delta
#output
cfs = ''
CDBSfs = ''
deltas = ''
for x in coeffs:
cfs += x[0] + ' ' + x[1] + ' '
if opts.comparison is True:
for x in CDBScoeff[0]: CDBSfs += str(x) + ' '
for x in delta: deltas += str(x) + ' '
else:
CDBSfs = ' '
#normal outputs
if verbose: print '\nWill output data to %s' % outfile
out.write(stripe + ' ' + grating + ' ' + aperture + ' ' + str(cenwav) +
' ' + str(fppos) + ' ' + cfs + CDBSfs + deltas + '\n')
#output in reference file format
if opts.reference is True:
if verbose: print '\nWill output data to dispersion.txt'
outref.write(stripe + ' ' + grating + ' ' + aperture + ' ' + str(cenwav) +
' ' + str(fppos) + ' ' + coeffs[0][0] + ' ' + coeffs[1][0] + ' ' + coeffs[2][0] +
' ' + coeffs[3][0] + '\n')
#comparison output
if opts.comparison is True:
if verbose: print '\nWill output data to %s' % outfile + '.comparison'
outcomp.write(stripe + ' ' + grating + ' ' + aperture + ' ' + str(cenwav) +
' ' + str(fppos) + ' ' + deltas + '\n')
#closes open files
out.close()
if opts.comparison: outcomp.close()
if opts.reference: outref.close()
#exits
if verbose: print '\n\nScripts finished...'
sys.exit(0)
| 38.227273 | 125 | 0.535726 | #! /usr/bin/env python
'''
A script to fix CEDAR's ASCII dispersion solution output.
ABOUT:
This script can be used to modify .txt files that CEDAR produce when
dispersion solution output has been saved.
USAGE:
CEDARDispersionTXTFixer.py [-h] [-v] [-r] [-c] [-f] string [-o] srtring
where:
[-h] prints help
[-v] verbose mode on
[-r] creates an output file called dispersion.txt which is
in format that can be used as an input to update_*_disp.pro scripts.
[-c] compares results to TV06 six data found from CDBS
[-f] user defined string that is used to search text files to be processed.
User can specify wild cards e.g. "*wcal.txt".
[-o] name of the output file. This does not change the name of the output
file that optional argument -c produces.
DEPENDS:
Python 2.5 or 2.6 (not version 3.x compatible)
Pyfits
EXITSTA:
0: No errors
AUTHOR :
Sami-Matias Niemi, for STScI
HISTORY:
May 15 2009: Initial Version
@author: Sami-Matias Niemi
'''
__author__ = 'Sami-Matias Niemi'
__version__ = '0.9'
#Processes command line arguments
def process_args(just_print_help = False):
from optparse import OptionParser
usage = 'usage: %prog [options]'
desc = 'This script can be used to modify .txt files that CEDAR produce when dispersion solution output has been saved. '
parser = OptionParser(usage = usage, version='%prog ' + __version__, description = desc)
parser.add_option("-c", "--compare", action="store_true", dest="comparison",
help="Compare results to TV06 data.")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
help="Verbose mode on.")
parser.add_option("-f", "--find", dest="find",
help='''User define string that is used to find text files to be processed.
User must specify any wild cards in this string e.g. "*wcal.txt".''',
metavar="string")
parser.add_option("-o", "--output", dest="output",
help="Name of the output file.", metavar="string")
parser.add_option("-r", "--reference", action="store_true",dest="reference",
help="Creates an output file called dispersions.txt.")
if just_print_help:
parser.print_help()
else:
return parser.parse_args()
def checkZeroArguments(opts):
for x in opts.__dict__:
if opts.__dict__[x] is not None:
return True
return False
#Main program begins
if __name__ == '__main__':
import glob
import sys
import pyfits as PF
#command line arguments
(opts, args) = process_args()
if checkZeroArguments(opts) == False:
process_args(True)
sys.exit(-9)
#verbose
verbose = False
if opts.verbose is True: verbose = True
#CDBS TV06 files
cdbspath = '/grp/hst/cdbs/lref/'
fuvfile = 't2k1224el_disp.fits'
nuvfile = 't2917094l_disp.fits'
#search string
if opts.find is not None: search = opts.find
else: search = '*wcal.txt'
if verbose: print '\nWill use string: %s to indetify ASCII files containing dispersion solutions' % search
#finds all files containing search string
txtfiles = glob.glob(search)
#outputfile
if opts.output is not None:
out = open(opts.output, 'w')
if opts.comparison is True: outcomp = open(opts.output + '.comparison', 'w')
if opts.reference is not None: outref = open('dispersion.txt', 'w')
if verbose: print '\n Output will be written to file %s' % opts.output
outfile = opts.output
else:
outfile = 'CEDARDispersionTXTFixer.output'
out = open(outfile, 'w')
if opts.comparison is True: outcomp = open('CEDARDispersionTXTFixer.output.comparison', 'w')
if opts.reference is not None: outref = open('dispersion.txt', 'w')
if verbose: print '\n You did not specify output filename. Will use default CEDARDispersionTXTFixer.output'
#main loop
for file in txtfiles:
if verbose: print '\nTrying to open file: %s' % file
try:
fulldata = open(file, 'r').readlines()
except:
if verbose: print '\nERROR: Cannot read file %s' % file
pass
#splits the file
sdata = [line.strip().split() for line in fulldata]
filename = sdata[0][0]
#gets all coefficients
coeffs = []
RMS = ''
if verbose: print '\nSearching for coefficient values...'
for line in sdata:
try:
if line[0].startswith('C'):
if verbose: print '\nFound:', line
coeffs.append([line[2], line[4]])
if line[0].startswith('RMS'):
if verbose: print '\nRMS of fit was %s' % line[4]
RMS = line[4]
except: pass
if verbose: print '\nTrying to find equivalent (%s) FITS file for header information...' % filename
#Tries to open equivalent FITS file
try:
hdr0 = PF.open(filename)[0].header
except:
if verbose: print '\nERROR: Could not open %s. Will not get header information.' % filename
pass
#Tries to get all required header keywords
try:
cenwav = hdr0['CENWAVE']
stripe = hdr0['SEGMENT']
grating = hdr0['OPT_ELEM']
fppos = hdr0['FPPOS']
aperture = hdr0['APERTURE']
except:
if verbose: print '\nERROR: Could not read all required header keywords of %s' % filename
cenwave = 'NA'
strip = 'XNA'
grating = 'NA'
fppos = 'NA'
aperture = 'NA'
#comparison to CDBS
#should be rewritten, as it now opens the file for nothing on every round...
if opts.comparison is True:
if verbose: print '\nWill try to compare calculated results to the dispersion solution of TV06 data.'
try:
if stripe.startswith('N'):
if verbose: print 'Trying to open %s' % cdbspath + nuvfile
CDBSdata = PF.open(cdbspath + nuvfile)[1].data
CDBScoeff = [line[5] for line in CDBSdata
if line[0].strip() == stripe and
line[1].strip() == grating and
line[2].strip() == aperture and
line[3] == cenwav]
if stripe.startswith('F'):
if verbose: print 'Trying to open %s' % cdbspath + fuvfile
CDBSdata = PF.open(cdbspath + fuvfile)[1].data
CDBScoeff = [line[5] for line in CDBSdata
if line[0].strip() == stripe and
line[1].strip() == grating and
line[2].strip() == aperture and
line[3] == cenwav]
except:
if verbose: print '\nERROR: Cannot open CDBS file...'
CDBScoeff = 'NA'
#lets calculate delta
delta = []
for new, old in zip(coeffs, CDBScoeff[0]):
delta.append(float(new[0]) - old)
#some quick results to screen
if verbose: print stripe, grating, aperture, cenwav, fppos, coeffs, CDBScoeff, delta
#output
cfs = ''
CDBSfs = ''
deltas = ''
for x in coeffs:
cfs += x[0] + ' ' + x[1] + ' '
if opts.comparison is True:
for x in CDBScoeff[0]: CDBSfs += str(x) + ' '
for x in delta: deltas += str(x) + ' '
else:
CDBSfs = ' '
#normal outputs
if verbose: print '\nWill output data to %s' % outfile
out.write(stripe + ' ' + grating + ' ' + aperture + ' ' + str(cenwav) +
' ' + str(fppos) + ' ' + cfs + CDBSfs + deltas + '\n')
#output in reference file format
if opts.reference is True:
if verbose: print '\nWill output data to dispersion.txt'
outref.write(stripe + ' ' + grating + ' ' + aperture + ' ' + str(cenwav) +
' ' + str(fppos) + ' ' + coeffs[0][0] + ' ' + coeffs[1][0] + ' ' + coeffs[2][0] +
' ' + coeffs[3][0] + '\n')
#comparison output
if opts.comparison is True:
if verbose: print '\nWill output data to %s' % outfile + '.comparison'
outcomp.write(stripe + ' ' + grating + ' ' + aperture + ' ' + str(cenwav) +
' ' + str(fppos) + ' ' + deltas + '\n')
#closes open files
out.close()
if opts.comparison: outcomp.close()
if opts.reference: outref.close()
#exits
if verbose: print '\n\nScripts finished...'
sys.exit(0)
| 1,391 | 0 | 45 |
c83e1a47a5b871d41b07e55fee7a24c1853ca798 | 1,731 | py | Python | tutorial/bn/ip_bn.py | Xiaoming-Wang1/yambopy | 5b8cb6882657a4ff70dc3cf450d7ac0b5effc952 | [
"BSD-3-Clause"
] | 21 | 2016-04-07T20:53:29.000Z | 2021-05-14T08:06:02.000Z | tutorial/bn/ip_bn.py | Xiaoming-Wang1/yambopy | 5b8cb6882657a4ff70dc3cf450d7ac0b5effc952 | [
"BSD-3-Clause"
] | 22 | 2016-06-14T22:29:47.000Z | 2021-09-16T15:36:26.000Z | tutorial/bn/ip_bn.py | Xiaoming-Wang1/yambopy | 5b8cb6882657a4ff70dc3cf450d7ac0b5effc952 | [
"BSD-3-Clause"
] | 15 | 2016-06-14T18:40:57.000Z | 2021-08-07T13:17:43.000Z | #
# Author: Henrique Pereira Coutada Miranda
# Run a IP calculation using yambo
#
from __future__ import print_function
import sys
from yambopy import *
from qepy import *
import argparse
#parse options
parser = argparse.ArgumentParser(description='Test the yambopy script.')
parser.add_argument('-dg','--doublegrid', action="store_true", help='Use double grid')
parser.add_argument('-c', '--calc', action="store_true", help='calculate the IP absorption')
parser.add_argument('-p', '--plot', action="store_true", help='plot the results')
args = parser.parse_args()
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
yambo = "yambo"
folder = 'ip'
#check if the SAVE folder is present
if not os.path.isdir('database/SAVE'):
print('preparing yambo database')
os.system('mkdir -p database')
os.system('cd nscf/bn.save; p2y > p2y.log')
os.system('cd nscf/bn.save; yambo > yambo.log')
os.system('mv nscf/bn.save/SAVE database')
if not os.path.isdir(folder):
os.mkdir(folder)
os.system('cp -r database/SAVE %s'%folder)
#initialize the double grid
if args.doublegrid:
print("creating double grid")
f = open('%s/ypp.in'%folder,'w')
f.write("""kpts_map
%DbGd_DB1_paths
"../database_double"
%""")
f.close()
os.system('cd %s; ypp'%folder)
if args.calc:
#create the yambo input file
y = YamboIn('yambo -o g -V all',folder=folder)
y['FFTGvecs'] = [30,'Ry']
y['BndsRnXs'] = [1,30]
y['QpntsRXd'] = [[1,1],'']
y['ETStpsXd'] = 500
y.write('%s/yambo_run.in'%folder)
print('running yambo')
os.system('cd %s; %s -F yambo_run.in -J yambo'%(folder,yambo))
if args.plot:
#pack in a json file
y = YamboOut(folder)
y.pack()
| 26.227273 | 92 | 0.656268 | #
# Author: Henrique Pereira Coutada Miranda
# Run a IP calculation using yambo
#
from __future__ import print_function
import sys
from yambopy import *
from qepy import *
import argparse
#parse options
parser = argparse.ArgumentParser(description='Test the yambopy script.')
parser.add_argument('-dg','--doublegrid', action="store_true", help='Use double grid')
parser.add_argument('-c', '--calc', action="store_true", help='calculate the IP absorption')
parser.add_argument('-p', '--plot', action="store_true", help='plot the results')
args = parser.parse_args()
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
yambo = "yambo"
folder = 'ip'
#check if the SAVE folder is present
if not os.path.isdir('database/SAVE'):
print('preparing yambo database')
os.system('mkdir -p database')
os.system('cd nscf/bn.save; p2y > p2y.log')
os.system('cd nscf/bn.save; yambo > yambo.log')
os.system('mv nscf/bn.save/SAVE database')
if not os.path.isdir(folder):
os.mkdir(folder)
os.system('cp -r database/SAVE %s'%folder)
#initialize the double grid
if args.doublegrid:
print("creating double grid")
f = open('%s/ypp.in'%folder,'w')
f.write("""kpts_map
%DbGd_DB1_paths
"../database_double"
%""")
f.close()
os.system('cd %s; ypp'%folder)
if args.calc:
#create the yambo input file
y = YamboIn('yambo -o g -V all',folder=folder)
y['FFTGvecs'] = [30,'Ry']
y['BndsRnXs'] = [1,30]
y['QpntsRXd'] = [[1,1],'']
y['ETStpsXd'] = 500
y.write('%s/yambo_run.in'%folder)
print('running yambo')
os.system('cd %s; %s -F yambo_run.in -J yambo'%(folder,yambo))
if args.plot:
#pack in a json file
y = YamboOut(folder)
y.pack()
| 0 | 0 | 0 |
f1f7ec5076f3b1117487b59e537c55f9effc9a82 | 392 | py | Python | posthog/migrations/0169_person_properties_last_updated_at.py | asherf/posthog | 1e50704d76cba484e80b83f1e1f658bd6e98743a | [
"MIT"
] | null | null | null | posthog/migrations/0169_person_properties_last_updated_at.py | asherf/posthog | 1e50704d76cba484e80b83f1e1f658bd6e98743a | [
"MIT"
] | null | null | null | posthog/migrations/0169_person_properties_last_updated_at.py | asherf/posthog | 1e50704d76cba484e80b83f1e1f658bd6e98743a | [
"MIT"
] | null | null | null | # Generated by Django 3.1.12 on 2021-09-21 16:11
from django.db import migrations, models
| 23.058824 | 105 | 0.665816 | # Generated by Django 3.1.12 on 2021-09-21 16:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("posthog", "0168_action_step_empty_string_reset"),
]
operations = [
migrations.AddField(
model_name="person", name="properties_last_updated_at", field=models.JSONField(default=dict),
),
]
| 0 | 277 | 23 |
b05a163fdb441b5073b8b789628c28f2e21a779e | 939 | py | Python | src/lib/datasets/dataset_factory.py | goodxue/CenterNet | 50e1726664337fb988542e3c2247a4c57ef74334 | [
"MIT"
] | null | null | null | src/lib/datasets/dataset_factory.py | goodxue/CenterNet | 50e1726664337fb988542e3c2247a4c57ef74334 | [
"MIT"
] | null | null | null | src/lib/datasets/dataset_factory.py | goodxue/CenterNet | 50e1726664337fb988542e3c2247a4c57ef74334 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .sample.ddd import DddDataset
from .sample.exdet import EXDetDataset
from .sample.ctdet import CTDetDataset
from .sample.multi_pose import MultiPoseDataset
from .dataset.coco import COCO
from .dataset.pascal import PascalVOC
from .dataset.kitti import KITTI
from .dataset.coco_hp import COCOHP
from .dataset.traffic_car_kitti import KITTI as TRATTIC_CAR
from .dataset.multiview_kitti import KITTI as MULTIVIEW
dataset_factory = {
'coco': COCO,
'pascal': PascalVOC,
'kitti': KITTI,
'coco_hp': COCOHP,
'traffic_car': TRATTIC_CAR,
'multiview': MULTIVIEW
}
_sample_factory = {
'exdet': EXDetDataset,
'ctdet': CTDetDataset,
'ddd': DddDataset,
'multi_pose': MultiPoseDataset
}
| 23.475 | 65 | 0.779553 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .sample.ddd import DddDataset
from .sample.exdet import EXDetDataset
from .sample.ctdet import CTDetDataset
from .sample.multi_pose import MultiPoseDataset
from .dataset.coco import COCO
from .dataset.pascal import PascalVOC
from .dataset.kitti import KITTI
from .dataset.coco_hp import COCOHP
from .dataset.traffic_car_kitti import KITTI as TRATTIC_CAR
from .dataset.multiview_kitti import KITTI as MULTIVIEW
dataset_factory = {
'coco': COCO,
'pascal': PascalVOC,
'kitti': KITTI,
'coco_hp': COCOHP,
'traffic_car': TRATTIC_CAR,
'multiview': MULTIVIEW
}
_sample_factory = {
'exdet': EXDetDataset,
'ctdet': CTDetDataset,
'ddd': DddDataset,
'multi_pose': MultiPoseDataset
}
def get_dataset(dataset, task):
class Dataset(dataset_factory[dataset], _sample_factory[task]):
pass
return Dataset
| 102 | 0 | 23 |
5e3497501962f190bafae73c2c0a96641f61d801 | 637 | py | Python | submitify/migrations/0003_auto_20161124_0055.py | OpenFurry/submitify | 10ff1961fb76b100a5087197760611c925f295a7 | [
"MIT"
] | null | null | null | submitify/migrations/0003_auto_20161124_0055.py | OpenFurry/submitify | 10ff1961fb76b100a5087197760611c925f295a7 | [
"MIT"
] | 14 | 2016-11-20T22:07:56.000Z | 2021-06-10T18:26:45.000Z | submitify/migrations/0003_auto_20161124_0055.py | OpenFurry/submitify | 10ff1961fb76b100a5087197760611c925f295a7 | [
"MIT"
] | 1 | 2016-11-20T21:58:29.000Z | 2016-11-20T21:58:29.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-24 00:55
from __future__ import unicode_literals
from django.db import migrations, models
| 23.592593 | 48 | 0.585557 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-24 00:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('submitify', '0002_call_invite_only'),
]
operations = [
migrations.RenameField(
model_name='guideline',
old_name='value',
new_name='value_raw',
),
migrations.AddField(
model_name='guideline',
name='value_rendered',
field=models.TextField(default=' '),
preserve_default=False,
),
]
| 0 | 458 | 23 |
3651acbfff4d1dc6104b1163d9bcf7633f3d1ddf | 1,397 | py | Python | hata/discord/message/message_activity.py | Multiface24111/hata | cd28f9ef158e347363669cc8d1d49db0ff41aba0 | [
"0BSD"
] | 173 | 2019-06-14T20:25:00.000Z | 2022-03-21T19:36:10.000Z | hata/discord/message/message_activity.py | Tari-dev/hata | a5c3199c845858f997af3b0b2c18770fdc691897 | [
"0BSD"
] | 52 | 2020-01-03T17:05:14.000Z | 2022-03-31T11:39:50.000Z | hata/discord/message/message_activity.py | Tari-dev/hata | a5c3199c845858f997af3b0b2c18770fdc691897 | [
"0BSD"
] | 47 | 2019-11-09T08:46:45.000Z | 2022-03-31T14:33:34.000Z | __all__ = ('MessageActivity', )
from .preinstanced import MessageActivityType
class MessageActivity:
"""
Might be sent with a ``Message``, if it has rich presence-related chat embeds.
Attributes
----------
party_id : `str`
The message application's party's id. Can be empty string.
type : ``MessageActivityType``
The message application's type.
"""
__slots__ = ('party_id', 'type',)
def __init__(self, data):
"""
Creates a new ``MessageActivity`` from message activity data included inside of a ``Message``'s data.
Parameters
----------
data : `dict` of (`str`, `Any`) items
Message activity data.
"""
self.party_id = data.get('party_id','')
self.type = MessageActivityType.get(data['type'])
def __eq__(self, other):
"""Returns whether the two message activities are equal."""
if type(self) is not type(other):
return NotImplemented
if self.type is not other.type:
return False
if self.party_id != other.party_id:
return False
return True
def __repr__(self):
"""Returns the message activity's representation."""
return f'<{self.__class__.__name__} type={self.type.name} ({self.type.value}), party_id={self.party_id!r}>'
| 31.044444 | 115 | 0.582677 | __all__ = ('MessageActivity', )
from .preinstanced import MessageActivityType
class MessageActivity:
"""
Might be sent with a ``Message``, if it has rich presence-related chat embeds.
Attributes
----------
party_id : `str`
The message application's party's id. Can be empty string.
type : ``MessageActivityType``
The message application's type.
"""
__slots__ = ('party_id', 'type',)
def __init__(self, data):
"""
Creates a new ``MessageActivity`` from message activity data included inside of a ``Message``'s data.
Parameters
----------
data : `dict` of (`str`, `Any`) items
Message activity data.
"""
self.party_id = data.get('party_id','')
self.type = MessageActivityType.get(data['type'])
def __eq__(self, other):
"""Returns whether the two message activities are equal."""
if type(self) is not type(other):
return NotImplemented
if self.type is not other.type:
return False
if self.party_id != other.party_id:
return False
return True
def __repr__(self):
"""Returns the message activity's representation."""
return f'<{self.__class__.__name__} type={self.type.name} ({self.type.value}), party_id={self.party_id!r}>'
| 0 | 0 | 0 |
4f1411287da3b757cafc4c5908bcc279b74cb074 | 362 | py | Python | tests/base.py | plafer/rply | 6e16262dc6d434fc467eed83ed31ca764ba01a34 | [
"BSD-3-Clause"
] | 265 | 2015-01-10T04:49:05.000Z | 2022-03-25T10:54:35.000Z | tests/base.py | plafer/rply | 6e16262dc6d434fc467eed83ed31ca764ba01a34 | [
"BSD-3-Clause"
] | 59 | 2015-01-02T11:39:24.000Z | 2022-02-18T18:09:37.000Z | tests/base.py | plafer/rply | 6e16262dc6d434fc467eed83ed31ca764ba01a34 | [
"BSD-3-Clause"
] | 48 | 2015-03-23T14:15:45.000Z | 2021-04-20T17:49:09.000Z | import contextlib
import warnings
| 25.857143 | 55 | 0.640884 | import contextlib
import warnings
class BaseTests(object):
@contextlib.contextmanager
def assert_warns(self, cls, message):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
yield
assert len(w) == 1
assert w[0].category is cls
assert w[0].message.args[0] == message
| 244 | 60 | 23 |
69d66d320bc54e270f4be98c63a8ea8984c25c33 | 19 | py | Python | venv/Lib/site-packages/suit/__init__.py | ryankibayhan/ryb-ecommerce | 15fa3bcb624be528926458b466ad7fe7fef5158e | [
"MIT"
] | null | null | null | venv/Lib/site-packages/suit/__init__.py | ryankibayhan/ryb-ecommerce | 15fa3bcb624be528926458b466ad7fe7fef5158e | [
"MIT"
] | 12 | 2019-12-04T23:48:45.000Z | 2022-03-11T23:53:30.000Z | venv/Lib/site-packages/suit/__init__.py | ryankibayhan/ryb-ecommerce | 15fa3bcb624be528926458b466ad7fe7fef5158e | [
"MIT"
] | null | null | null | VERSION = '0.2.26'
| 9.5 | 18 | 0.578947 | VERSION = '0.2.26'
| 0 | 0 | 0 |
0ee726969a8149a125a512c96fd40fc7e8e7cf2e | 868 | py | Python | app/main/forms.py | Wambuilucy/Pitch-App | 1cf4004a01e8efb483a30fb7d9a95791a42d673f | [
"Unlicense"
] | null | null | null | app/main/forms.py | Wambuilucy/Pitch-App | 1cf4004a01e8efb483a30fb7d9a95791a42d673f | [
"Unlicense"
] | 6 | 2020-02-14T12:13:40.000Z | 2020-02-14T12:13:47.000Z | app/main/forms.py | Wambuilucy/Pitch-App | 1cf4004a01e8efb483a30fb7d9a95791a42d673f | [
"Unlicense"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms import StringField,PasswordField,SubmitField,BooleanField,SubmitField,TextAreaField,RadioField
from wtforms.validators import Required,Email,EqualTo
from wtforms import ValidationError
| 36.166667 | 202 | 0.782258 |
from flask_wtf import FlaskForm
from wtforms import StringField,PasswordField,SubmitField,BooleanField,SubmitField,TextAreaField,RadioField
from wtforms.validators import Required,Email,EqualTo
from wtforms import ValidationError
class PitchForm(FlaskForm):
title = StringField('Title', validators=[Required()])
description = TextAreaField("What would you like to pitch ?",validators=[Required()])
category = RadioField('Label', choices=[ ('promotionpitch','promotionpitch'), ('interviewpitch','interviewpitch'),('pickuplines','pickuplines'),('productpitch','productpitch')],validators=[Required()])
submit = SubmitField('Submit')
class CommentForm(FlaskForm):
description = TextAreaField('Add comment',validators=[Required()])
submit = SubmitField()
class UpvoteForm(FlaskForm):
submit = SubmitField()
class Downvote(FlaskForm):
submit = SubmitField() | 0 | 543 | 92 |
4608e090dd04c4c1000dee1d9306310138ac2957 | 1,050 | py | Python | setup.py | deepraj1729/TChatBot | f6800d1cb4a6a33d182022056d5b2a13b5ce5499 | [
"MIT"
] | 13 | 2020-06-22T12:48:32.000Z | 2021-09-22T16:39:37.000Z | setup.py | deepraj1729/TChatBot | f6800d1cb4a6a33d182022056d5b2a13b5ce5499 | [
"MIT"
] | 4 | 2020-06-22T12:46:51.000Z | 2022-02-09T23:27:23.000Z | setup.py | deepraj1729/TChatBot | f6800d1cb4a6a33d182022056d5b2a13b5ce5499 | [
"MIT"
] | 1 | 2020-06-29T04:06:31.000Z | 2020-06-29T04:06:31.000Z | from setuptools import setup, find_packages
with open('requirements.txt') as f:
requirements = f.readlines()
setup(
name ='TChatBot',
version ='0.1.0',
author ='Deepraj Baidya',
author_email ='bdeeprajrkm1@gmail.com',
url ='https://github.com/deepraj1729/TChatBot',
description ='A ChatBot framework to create customizable all purpose Chatbots using NLP, Tensorflow, Speech Recognition',
long_description = readme(),
long_description_content_type ="text/markdown",
license ='MIT',
packages = find_packages(),
entry_points ={
'console_scripts': [
'tchatbot = TChatBot.main:main'
]
},
classifiers =(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
keywords ='A Customizable ChatBot framework with Tensorflow,NLP,Speech Recognition',
include_package_data = True,
install_requires = requirements,
zip_safe = False
)
| 28.378378 | 124 | 0.691429 | from setuptools import setup, find_packages
with open('requirements.txt') as f:
requirements = f.readlines()
def readme():
with open('README.md') as f:
README = f.read()
return README
setup(
name ='TChatBot',
version ='0.1.0',
author ='Deepraj Baidya',
author_email ='bdeeprajrkm1@gmail.com',
url ='https://github.com/deepraj1729/TChatBot',
description ='A ChatBot framework to create customizable all purpose Chatbots using NLP, Tensorflow, Speech Recognition',
long_description = readme(),
long_description_content_type ="text/markdown",
license ='MIT',
packages = find_packages(),
entry_points ={
'console_scripts': [
'tchatbot = TChatBot.main:main'
]
},
classifiers =(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
keywords ='A Customizable ChatBot framework with Tensorflow,NLP,Speech Recognition',
include_package_data = True,
install_requires = requirements,
zip_safe = False
)
| 57 | 0 | 23 |
5860ee09204a4ac6d863ab5a06ec4a591bd5a665 | 7,435 | py | Python | fast_calvo_trainer.py | DDMAL/Calvo-classifier | 06e9a74b2221e130ec7eae1dfe462f9de1a5ffa3 | [
"MIT"
] | null | null | null | fast_calvo_trainer.py | DDMAL/Calvo-classifier | 06e9a74b2221e130ec7eae1dfe462f9de1a5ffa3 | [
"MIT"
] | 25 | 2017-07-17T19:49:57.000Z | 2021-05-21T14:29:34.000Z | fast_calvo_trainer.py | DDMAL/Calvo-classifier | 06e9a74b2221e130ec7eae1dfe462f9de1a5ffa3 | [
"MIT"
] | null | null | null | # -----------------------------------------------------------------------------
# Program Name: calvo_trainer.py
# Program Description: Rodan wrapper for Fast Calvo's classifier training
# -----------------------------------------------------------------------------
# Core
import logging
import sys
# Third-party
from celery.utils.log import get_task_logger
# Project
from rodan.celery import app
from rodan.jobs.base import RodanTask
from rodan.jobs.Calvo_classifier.fast_trainer_lib import CalvoTrainer
"""Wrap Patchwise (Fast) Calvo classifier training in Rodan."""
logger = get_task_logger(__name__)
| 30.850622 | 139 | 0.456086 | # -----------------------------------------------------------------------------
# Program Name: calvo_trainer.py
# Program Description: Rodan wrapper for Fast Calvo's classifier training
# -----------------------------------------------------------------------------
# Core
import logging
import sys
# Third-party
from celery.utils.log import get_task_logger
# Project
from rodan.celery import app
from rodan.jobs.base import RodanTask
from rodan.jobs.Calvo_classifier.fast_trainer_lib import CalvoTrainer
"""Wrap Patchwise (Fast) Calvo classifier training in Rodan."""
logger = get_task_logger(__name__)
class FastCalvoTrainer(RodanTask):
name = "Training model for Patchwise Analysis of Music Document"
author = "Jorge Calvo-Zaragoza, Francisco J. Castellanos, Gabriel Vigliensoni, and Ichiro Fujinaga"
description = "The job performs the training of many Selection Auto-Encoder model for the pixelwise analysis of music document images."
enabled = True
category = "OMR - Layout analysis"
interactive = False
settings = {
"title": "Training parameters",
"type": "object",
"properties": {
"Batch Size": {
"type": "integer",
"minimum": 1,
"default": 16,
"maximum": 64,
},
"Maximum number of training epochs": {
"type": "integer",
"minimum": 1,
"default": 50,
},
"Maximum number of samples per label": {
"type": "integer",
"minimum": 100,
"default": 2000,
},
"Patch height": {"type": "integer", "minimum": 32, "default": 256},
"Patch width": {"type": "integer", "minimum": 32, "default": 256},
},
"job_queue": "GPU",
}
input_port_types = (
{
"name": "Image",
"minimum": 1,
"maximum": 5,
"resource_types": ["image/rgb+png", "image/rgb+jpg"],
},
{
"name": "rgba PNG - Selected regions",
"minimum": 1,
"maximum": 5,
"resource_types": ["image/rgba+png"],
},
# We did not go this route because it would be more difficult for the user to track layers
# {'name': 'rgba PNG - Layers', 'minimum': 1, 'maximum': 10, 'resource_types': ['image/rgba+png']},
{
"name": "rgba PNG - Layer 0 (Background)",
"minimum": 1,
"maximum": 5,
"resource_types": ["image/rgba+png"],
},
{
"name": "rgba PNG - Layer 1",
"minimum": 1,
"maximum": 5,
"resource_types": ["image/rgba+png"],
},
{
"name": "rgba PNG - Layer 2",
"minimum": 0,
"maximum": 5,
"resource_types": ["image/rgba+png"],
},
{
"name": "rgba PNG - Layer 3",
"minimum": 0,
"maximum": 5,
"resource_types": ["image/rgba+png"],
},
{
"name": "rgba PNG - Layer 4",
"minimum": 0,
"maximum": 5,
"resource_types": ["image/rgba+png"],
},
{
"name": "rgba PNG - Layer 5",
"minimum": 0,
"maximum": 5,
"resource_types": ["image/rgba+png"],
},
{
"name": "rgba PNG - Layer 6",
"minimum": 0,
"maximum": 5,
"resource_types": ["image/rgba+png"],
},
{
"name": "rgba PNG - Layer 7",
"minimum": 0,
"maximum": 5,
"resource_types": ["image/rgba+png"],
},
{
"name": "rgba PNG - Layer 8",
"minimum": 0,
"maximum": 5,
"resource_types": ["image/rgba+png"],
},
{
"name": "rgba PNG - Layer 9",
"minimum": 0,
"maximum": 5,
"resource_types": ["image/rgba+png"],
},
)
output_port_types = (
# We did not go this route because it would be more difficult for the user to track layers
# {'name': 'Adjustable models', 'minimum': 1, 'maximum': 10, 'resource_types': ['keras/model+hdf5']},
{
"name": "Log File",
"minimum": 1,
"maximum": 1,
"resource_types": ["text/plain"],
},
{
"name": "Model 0",
"minimum": 1,
"maximum": 1,
"resource_types": ["keras/model+hdf5"],
},
{
"name": "Model 1",
"minimum": 1,
"maximum": 1,
"resource_types": ["keras/model+hdf5"],
},
{
"name": "Model 2",
"minimum": 0,
"maximum": 1,
"resource_types": ["keras/model+hdf5"],
},
{
"name": "Model 3",
"minimum": 0,
"maximum": 1,
"resource_types": ["keras/model+hdf5"],
},
{
"name": "Model 4",
"minimum": 0,
"maximum": 1,
"resource_types": ["keras/model+hdf5"],
},
{
"name": "Model 5",
"minimum": 0,
"maximum": 1,
"resource_types": ["keras/model+hdf5"],
},
{
"name": "Model 6",
"minimum": 0,
"maximum": 1,
"resource_types": ["keras/model+hdf5"],
},
{
"name": "Model 7",
"minimum": 0,
"maximum": 1,
"resource_types": ["keras/model+hdf5"],
},
{
"name": "Model 8",
"minimum": 0,
"maximum": 1,
"resource_types": ["keras/model+hdf5"],
},
{
"name": "Model 9",
"minimum": 0,
"maximum": 1,
"resource_types": ["keras/model+hdf5"],
},
)
def run_my_task(self, inputs, settings, outputs):
oldouts = sys.stdout, sys.stderr
if "Log File" in outputs:
handler = logging.FileHandler(outputs["Log File"][0]["resource_path"])
handler.setFormatter(
logging.Formatter("%(asctime)s - %(name)s - %(message)s")
)
logger.addHandler(handler)
try:
# Settings
batch_size = settings["Batch Size"]
patch_height = settings["Patch height"]
patch_width = settings["Patch width"]
max_number_of_epochs = settings["Maximum number of training epochs"]
max_samples_per_class = settings["Maximum number of samples per label"]
rlevel = app.conf.CELERY_REDIRECT_STDOUTS_LEVEL
app.log.redirect_stdouts_to_logger(logger, rlevel)
# Fail if arbitrary layers are not equal before training occurs.
trainer = CalvoTrainer(
batch_size,
patch_height,
patch_width,
max_number_of_epochs,
max_samples_per_class,
inputs,
outputs,
)
trainer.runTrainer()
return True
finally:
sys.stdout, sys.stderr = oldouts
def my_error_information(self, exc, traceback):
pass
| 1,321 | 5,471 | 23 |
588914580ea534a0bc749816d18c1c0951db33c0 | 1,056 | py | Python | tests/test_lz77.py | pynflate/pynflate | 3986b09fa64d28cb9e48cf84edfce67b73331e1b | [
"MIT"
] | 10 | 2019-09-09T18:09:46.000Z | 2021-12-28T07:09:31.000Z | tests/test_lz77.py | pynflate/pynflate | 3986b09fa64d28cb9e48cf84edfce67b73331e1b | [
"MIT"
] | null | null | null | tests/test_lz77.py | pynflate/pynflate | 3986b09fa64d28cb9e48cf84edfce67b73331e1b | [
"MIT"
] | 3 | 2020-03-18T12:58:47.000Z | 2021-05-29T19:26:07.000Z | from pynflate.lz77 import Lz77
| 27.076923 | 93 | 0.573864 | from pynflate.lz77 import Lz77
class TestLz77(object):
def test_empty(self):
lz77 = Lz77(6)
codewords = list(lz77.compress(''))
assert codewords == []
def test_one_char(self):
lz77 = Lz77(6)
original = 'x'
codewords = list(lz77.compress(original))
assert codewords == [(0, 0, 'x')]
decompressed = lz77.decompress(codewords)
assert decompressed == original
def test_all_same(self):
lz77 = Lz77(6)
original = 'xxxxxxxxxx'
codewords = list(lz77.compress(original))
assert codewords == [(0, 0, 'x'), (1, 8, 'x')]
decompressed = lz77.decompress(codewords)
assert decompressed == original
def test_nothing_special(self):
lz77 = Lz77(6)
original = 'aacaacabcabaaac'
codewords = list(lz77.compress(original))
assert codewords == [(0, 0, 'a'), (1, 1, 'c'), (3, 4, 'b'), (3, 3, 'a'), (1, 2, 'c')]
decompressed = lz77.decompress(codewords)
assert decompressed == original
| 892 | 2 | 130 |
588385b6915982914d17ba33c5bf2b9591cb5a8a | 4,645 | py | Python | apps/data/forms.py | akaytatsu/incricao_conferencia | 9ab774c6fe30cdb1a45d3732ade394df6e3b4258 | [
"MIT"
] | null | null | null | apps/data/forms.py | akaytatsu/incricao_conferencia | 9ab774c6fe30cdb1a45d3732ade394df6e3b4258 | [
"MIT"
] | 3 | 2019-12-22T22:07:48.000Z | 2019-12-23T21:38:31.000Z | apps/data/forms.py | akaytatsu/inscricao_conferencia | 9ab774c6fe30cdb1a45d3732ade394df6e3b4258 | [
"MIT"
] | null | null | null | from django import forms
from .models import Inscricao, Hospedagem
| 40.043103 | 102 | 0.518837 | from django import forms
from .models import Inscricao, Hospedagem
class InscricaoForm(forms.ModelForm):
_UF = [
{"value": "", "label": "Selecione ..."},
{"value": "AC", "label": "Acre"},
{"value": "AL", "label": "Alagoas"},
{"value": "AP", "label": "Amapรก"},
{"value": "AM", "label": "Amazonas"},
{"value": "BA", "label": "Bahia"},
{"value": "CE", "label": "Cearรก"},
{"value": "DF", "label": "Distrito Federal"},
{"value": "GO", "label": "Goiรกs"},
{"value": "ES", "label": "Espรญrito Santo"},
{"value": "MA", "label": "Maranhรฃo"},
{"value": "MT", "label": "Mato Grosso"},
{"value": "MS", "label": "Mato Grosso do Sul"},
{"value": "MG", "label": "Minas Gerais"},
{"value": "PA", "label": "Parรก"},
{"value": "PB", "label": "Paraiba"},
{"value": "PR", "label": "Paranรก"},
{"value": "PE", "label": "Pernambuco"},
{"value": "PI", "label": "Piauรญยญ"},
{"value": "RJ", "label": "Rio de Janeiro"},
{"value": "RN", "label": "Rio Grande do Norte"},
{"value": "RS", "label": "Rio Grande do Sul"},
{"value": "RO", "label": "Rondรดnia"},
{"value": "RR", "label": "Roraima"},
{"value": "SP", "label": "Sรฃo Paulo"},
{"value": "SC", "label": "Santa Catarina"},
{"value": "SE", "label": "Sergipe"},
{"value": "TO", "label": "Tocantins"},
{"value": "EX", "label": "Exterior"},
]
uf = forms.ChoiceField(
choices=[(doc.get("value"), doc.get("label")) for doc in _UF])
email = forms.EmailField()
class Meta:
model = Inscricao
exclude = ('idade', 'valor', 'valor_total', 'status', 'pagseguro_code',
'payment_reference', 'sit_pagseguro', 'pagseguro_transaction_id', 'usuario', )
def __init__(self, conferencia, *args, **kwargs):
super(InscricaoForm, self).__init__(*args, **kwargs)
self.fields['hospedagem'].queryset = Hospedagem.objects.filter(
conferencia_id=conferencia, ativo=True)
def clean_email(self):
data = self.cleaned_data['email']
if Inscricao.objects.filter(email=data, conferencia=self.data.get("conferencia")).count() > 0:
raise forms.ValidationError("E-mail jรก cadastrado")
return data
def clean_cpf(self):
data = self.cleaned_data['cpf']
if Inscricao.objects.filter(cpf=data, conferencia=self.data.get("conferencia")).count() > 0:
raise forms.ValidationError("CPF jรก cadastrado")
return data
class AtualizaInscricaoForm(forms.ModelForm):
_UF = [
{"value": "", "label": "Selecione ..."},
{"value": "AC", "label": "Acre"},
{"value": "AL", "label": "Alagoas"},
{"value": "AP", "label": "Amapรก"},
{"value": "AM", "label": "Amazonas"},
{"value": "BA", "label": "Bahia"},
{"value": "CE", "label": "Cearรก"},
{"value": "DF", "label": "Distrito Federal"},
{"value": "GO", "label": "Goiรกs"},
{"value": "ES", "label": "Espรญrito Santo"},
{"value": "MA", "label": "Maranhรฃo"},
{"value": "MT", "label": "Mato Grosso"},
{"value": "MS", "label": "Mato Grosso do Sul"},
{"value": "MG", "label": "Minas Gerais"},
{"value": "PA", "label": "Parรก"},
{"value": "PB", "label": "Paraiba"},
{"value": "PR", "label": "Paranรก"},
{"value": "PE", "label": "Pernambuco"},
{"value": "PI", "label": "Piauรญยญ"},
{"value": "RJ", "label": "Rio de Janeiro"},
{"value": "RN", "label": "Rio Grande do Norte"},
{"value": "RS", "label": "Rio Grande do Sul"},
{"value": "RO", "label": "Rondรดnia"},
{"value": "RR", "label": "Roraima"},
{"value": "SP", "label": "Sรฃo Paulo"},
{"value": "SC", "label": "Santa Catarina"},
{"value": "SE", "label": "Sergipe"},
{"value": "TO", "label": "Tocantins"},
{"value": "EX", "label": "Exterior"},
]
uf = forms.ChoiceField(
choices=[(doc.get("value"), doc.get("label")) for doc in _UF])
email = forms.EmailField()
class Meta:
model = Inscricao
exclude = ('idade', 'valor', 'valor_total', 'status', 'pagseguro_code',
'payment_reference', 'sit_pagseguro', 'pagseguro_transaction_id', 'usuario', )
def __init__(self, conferencia, *args, **kwargs):
super(AtualizaInscricaoForm, self).__init__(*args, **kwargs)
self.fields['hospedagem'].queryset = Hospedagem.objects.filter(
conferencia_id=conferencia, ativo=True)
| 889 | 3,664 | 46 |
41e4704bc53fa0011b5050b556c86a41c1d91375 | 1,955 | py | Python | integration/tests_ok/assert_xpath.py | jleverenz/hurl | b81ca8ab7e0e409ec0c074fd8e118721ff4d3fb3 | [
"Apache-2.0"
] | null | null | null | integration/tests_ok/assert_xpath.py | jleverenz/hurl | b81ca8ab7e0e409ec0c074fd8e118721ff4d3fb3 | [
"Apache-2.0"
] | null | null | null | integration/tests_ok/assert_xpath.py | jleverenz/hurl | b81ca8ab7e0e409ec0c074fd8e118721ff4d3fb3 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
from flask import Response
from app import app
@app.route("/assert-xpath")
@app.route("/assert-xpath-svg")
@app.route("/assert-xpath-simple-namespaces")
@app.route("/assert-xpath-namespaces")
| 32.04918 | 125 | 0.638875 | # coding=utf-8
from flask import Response
from app import app
@app.route("/assert-xpath")
def assert_xpath():
body = "<data>cafรฉ</data>"
return Response(body, mimetype="text/xml")
@app.route("/assert-xpath-svg")
def assert_xpath_svg():
body = """<?xml version="1.0"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN"
"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
<svg xmlns="http://www.w3.org/2000/svg">
<style type="text/css">
circle:hover {fill-opacity:0.9;}
</style>
<g style="fill-opacity:0.7;">
<circle cx="6.5cm" cy="2cm" r="100" style="fill:red; stroke:black; stroke-width:0.1cm" transform="translate(0,50)" />
<circle cx="6.5cm" cy="2cm" r="100" style="fill:blue; stroke:black; stroke-width:0.1cm" transform="translate(70,150)" />
<circle cx="6.5cm" cy="2cm" r="100" style="fill:green; stroke:black; stroke-width:0.1cm" transform="translate(-70,150)"/>
</g>
</svg>
"""
return Response(body, mimetype="text/xml")
@app.route("/assert-xpath-simple-namespaces")
def assert_xpath_simple_ns():
body = """<?xml version="1.0"?>
<!-- both namespace prefixes are available throughout -->
<bk:book xmlns:bk='urn:loc.gov:books'
xmlns:isbn='urn:ISBN:0-395-36341-6'>
<bk:title>Cheaper by the Dozen</bk:title>
<isbn:number>1568491379</isbn:number>
</bk:book>
"""
return Response(body, mimetype="text/xml")
@app.route("/assert-xpath-namespaces")
def assert_xpath_ns():
body = """<?xml version="1.0"?>
<!-- initially, the default namespace is "books" -->
<book xmlns='urn:loc.gov:books'
xmlns:isbn='urn:ISBN:0-395-36341-6'>
<title>Cheaper by the Dozen</title>
<isbn:number>1568491379</isbn:number>
<notes>
<!-- make HTML the default namespace for some commentary -->
<p xmlns='http://www.w3.org/1999/xhtml'>
This is a <i>funny</i> book!
</p>
</notes>
</book>
"""
return Response(body, mimetype="text/xml")
| 1,653 | 0 | 88 |
0cc2ebc4ec9efcdb4db40c889232a078bc8dd01c | 587 | py | Python | labs/lab11/ex1/ex1.py | jamestiotio/dbsys | 26f545a51626ce232c0dc26b70ef206e71b273fc | [
"MIT"
] | null | null | null | labs/lab11/ex1/ex1.py | jamestiotio/dbsys | 26f545a51626ce232c0dc26b70ef206e71b273fc | [
"MIT"
] | null | null | null | labs/lab11/ex1/ex1.py | jamestiotio/dbsys | 26f545a51626ce232c0dc26b70ef206e71b273fc | [
"MIT"
] | null | null | null | # The `pyspark` shell command can be used to run an interactive shell for debugging purposes (preferably only on smaller datasets)
import sys
from pyspark import SparkContext, SparkConf
conf = SparkConf().setAppName("Ex1")
sc = SparkContext(conf=conf)
text_file = sc.textFile("hdfs://localhost:9000/input/")
output = text_file.map(foreach)
output.saveAsTextFile("hdfs://localhost:9000/output/")
sc.stop()
| 27.952381 | 130 | 0.708688 | # The `pyspark` shell command can be used to run an interactive shell for debugging purposes (preferably only on smaller datasets)
import sys
from pyspark import SparkContext, SparkConf
conf = SparkConf().setAppName("Ex1")
sc = SparkContext(conf=conf)
def foreach(record):
cols = record.split(",")
if len(cols) > 1:
extra_col = str(cols[0].split(cols[1]))
cols.append(extra_col)
return ",".join(cols)
text_file = sc.textFile("hdfs://localhost:9000/input/")
output = text_file.map(foreach)
output.saveAsTextFile("hdfs://localhost:9000/output/")
sc.stop()
| 155 | 0 | 23 |
d867c00519f15569813bbab583a8d3dc80c57f7f | 23,428 | py | Python | sdk/yapily/models/payment_response.py | bs-yapily/yapily-sdk-python | 0bba45e351b674eb655425a51190f539c4e9896f | [
"MIT"
] | null | null | null | sdk/yapily/models/payment_response.py | bs-yapily/yapily-sdk-python | 0bba45e351b674eb655425a51190f539c4e9896f | [
"MIT"
] | null | null | null | sdk/yapily/models/payment_response.py | bs-yapily/yapily-sdk-python | 0bba45e351b674eb655425a51190f539c4e9896f | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Yapily API
To access endpoints that require authentication, use your application key and secret created in the Dashboard (https://dashboard.yapily.com) # noqa: E501
OpenAPI spec version: 0.0.155
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from yapily.models.amount import Amount # noqa: F401,E501
from yapily.models.charge_details import ChargeDetails # noqa: F401,E501
from yapily.models.frequency_response import FrequencyResponse # noqa: F401,E501
from yapily.models.payee import Payee # noqa: F401,E501
from yapily.models.payment_status_details import PaymentStatusDetails # noqa: F401,E501
class PaymentResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'payment_idempotency_id': 'str',
'institution_consent_id': 'str',
'payment_lifecycle_id': 'str',
'status': 'str',
'status_details': 'PaymentStatusDetails',
'payee_details': 'Payee',
'reference': 'str',
'amount': 'float',
'currency': 'str',
'amount_details': 'Amount',
'first_payment_amount': 'Amount',
'first_payment_date_time': 'datetime',
'next_payment_amount': 'Amount',
'next_payment_date_time': 'datetime',
'final_payment_amount': 'Amount',
'final_payment_date_time': 'datetime',
'created_at': 'datetime',
'previous_payment_amount': 'Amount',
'previous_payment_date_time': 'datetime',
'charge_details': 'list[ChargeDetails]',
'scheduled_payment_type': 'str',
'scheduled_payment_date_time': 'datetime',
'frequency': 'FrequencyResponse'
}
attribute_map = {
'id': 'id',
'payment_idempotency_id': 'paymentIdempotencyId',
'institution_consent_id': 'institutionConsentId',
'payment_lifecycle_id': 'paymentLifecycleId',
'status': 'status',
'status_details': 'statusDetails',
'payee_details': 'payeeDetails',
'reference': 'reference',
'amount': 'amount',
'currency': 'currency',
'amount_details': 'amountDetails',
'first_payment_amount': 'firstPaymentAmount',
'first_payment_date_time': 'firstPaymentDateTime',
'next_payment_amount': 'nextPaymentAmount',
'next_payment_date_time': 'nextPaymentDateTime',
'final_payment_amount': 'finalPaymentAmount',
'final_payment_date_time': 'finalPaymentDateTime',
'created_at': 'createdAt',
'previous_payment_amount': 'previousPaymentAmount',
'previous_payment_date_time': 'previousPaymentDateTime',
'charge_details': 'chargeDetails',
'scheduled_payment_type': 'scheduledPaymentType',
'scheduled_payment_date_time': 'scheduledPaymentDateTime',
'frequency': 'frequency'
}
def __init__(self, id=None, payment_idempotency_id=None, institution_consent_id=None, payment_lifecycle_id=None, status=None, status_details=None, payee_details=None, reference=None, amount=None, currency=None, amount_details=None, first_payment_amount=None, first_payment_date_time=None, next_payment_amount=None, next_payment_date_time=None, final_payment_amount=None, final_payment_date_time=None, created_at=None, previous_payment_amount=None, previous_payment_date_time=None, charge_details=None, scheduled_payment_type=None, scheduled_payment_date_time=None, frequency=None): # noqa: E501
"""PaymentResponse - a model defined in Swagger""" # noqa: E501
self._id = None
self._payment_idempotency_id = None
self._institution_consent_id = None
self._payment_lifecycle_id = None
self._status = None
self._status_details = None
self._payee_details = None
self._reference = None
self._amount = None
self._currency = None
self._amount_details = None
self._first_payment_amount = None
self._first_payment_date_time = None
self._next_payment_amount = None
self._next_payment_date_time = None
self._final_payment_amount = None
self._final_payment_date_time = None
self._created_at = None
self._previous_payment_amount = None
self._previous_payment_date_time = None
self._charge_details = None
self._scheduled_payment_type = None
self._scheduled_payment_date_time = None
self._frequency = None
self.discriminator = None
if id is not None:
self.id = id
if payment_idempotency_id is not None:
self.payment_idempotency_id = payment_idempotency_id
if institution_consent_id is not None:
self.institution_consent_id = institution_consent_id
if payment_lifecycle_id is not None:
self.payment_lifecycle_id = payment_lifecycle_id
if status is not None:
self.status = status
if status_details is not None:
self.status_details = status_details
if payee_details is not None:
self.payee_details = payee_details
if reference is not None:
self.reference = reference
if amount is not None:
self.amount = amount
if currency is not None:
self.currency = currency
if amount_details is not None:
self.amount_details = amount_details
if first_payment_amount is not None:
self.first_payment_amount = first_payment_amount
if first_payment_date_time is not None:
self.first_payment_date_time = first_payment_date_time
if next_payment_amount is not None:
self.next_payment_amount = next_payment_amount
if next_payment_date_time is not None:
self.next_payment_date_time = next_payment_date_time
if final_payment_amount is not None:
self.final_payment_amount = final_payment_amount
if final_payment_date_time is not None:
self.final_payment_date_time = final_payment_date_time
if created_at is not None:
self.created_at = created_at
if previous_payment_amount is not None:
self.previous_payment_amount = previous_payment_amount
if previous_payment_date_time is not None:
self.previous_payment_date_time = previous_payment_date_time
if charge_details is not None:
self.charge_details = charge_details
if scheduled_payment_type is not None:
self.scheduled_payment_type = scheduled_payment_type
if scheduled_payment_date_time is not None:
self.scheduled_payment_date_time = scheduled_payment_date_time
if frequency is not None:
self.frequency = frequency
@property
def id(self):
"""Gets the id of this PaymentResponse. # noqa: E501
:return: The id of this PaymentResponse. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this PaymentResponse.
:param id: The id of this PaymentResponse. # noqa: E501
:type: str
"""
self._id = id
@property
def payment_idempotency_id(self):
"""Gets the payment_idempotency_id of this PaymentResponse. # noqa: E501
:return: The payment_idempotency_id of this PaymentResponse. # noqa: E501
:rtype: str
"""
return self._payment_idempotency_id
@payment_idempotency_id.setter
def payment_idempotency_id(self, payment_idempotency_id):
"""Sets the payment_idempotency_id of this PaymentResponse.
:param payment_idempotency_id: The payment_idempotency_id of this PaymentResponse. # noqa: E501
:type: str
"""
self._payment_idempotency_id = payment_idempotency_id
@property
def institution_consent_id(self):
"""Gets the institution_consent_id of this PaymentResponse. # noqa: E501
:return: The institution_consent_id of this PaymentResponse. # noqa: E501
:rtype: str
"""
return self._institution_consent_id
@institution_consent_id.setter
def institution_consent_id(self, institution_consent_id):
"""Sets the institution_consent_id of this PaymentResponse.
:param institution_consent_id: The institution_consent_id of this PaymentResponse. # noqa: E501
:type: str
"""
self._institution_consent_id = institution_consent_id
@property
def payment_lifecycle_id(self):
"""Gets the payment_lifecycle_id of this PaymentResponse. # noqa: E501
:return: The payment_lifecycle_id of this PaymentResponse. # noqa: E501
:rtype: str
"""
return self._payment_lifecycle_id
@payment_lifecycle_id.setter
def payment_lifecycle_id(self, payment_lifecycle_id):
"""Sets the payment_lifecycle_id of this PaymentResponse.
:param payment_lifecycle_id: The payment_lifecycle_id of this PaymentResponse. # noqa: E501
:type: str
"""
self._payment_lifecycle_id = payment_lifecycle_id
@property
def status(self):
"""Gets the status of this PaymentResponse. # noqa: E501
:return: The status of this PaymentResponse. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this PaymentResponse.
:param status: The status of this PaymentResponse. # noqa: E501
:type: str
"""
allowed_values = ["PENDING", "FAILED", "DECLINED", "COMPLETED", "EXPIRED", "UNKNOWN", "ACTIVE", "INACTIVE"] # noqa: E501
if status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}" # noqa: E501
.format(status, allowed_values)
)
self._status = status
@property
def status_details(self):
"""Gets the status_details of this PaymentResponse. # noqa: E501
:return: The status_details of this PaymentResponse. # noqa: E501
:rtype: PaymentStatusDetails
"""
return self._status_details
@status_details.setter
def status_details(self, status_details):
"""Sets the status_details of this PaymentResponse.
:param status_details: The status_details of this PaymentResponse. # noqa: E501
:type: PaymentStatusDetails
"""
self._status_details = status_details
@property
def payee_details(self):
"""Gets the payee_details of this PaymentResponse. # noqa: E501
:return: The payee_details of this PaymentResponse. # noqa: E501
:rtype: Payee
"""
return self._payee_details
@payee_details.setter
def payee_details(self, payee_details):
"""Sets the payee_details of this PaymentResponse.
:param payee_details: The payee_details of this PaymentResponse. # noqa: E501
:type: Payee
"""
self._payee_details = payee_details
@property
def reference(self):
"""Gets the reference of this PaymentResponse. # noqa: E501
:return: The reference of this PaymentResponse. # noqa: E501
:rtype: str
"""
return self._reference
@reference.setter
def reference(self, reference):
"""Sets the reference of this PaymentResponse.
:param reference: The reference of this PaymentResponse. # noqa: E501
:type: str
"""
self._reference = reference
@property
def amount(self):
"""Gets the amount of this PaymentResponse. # noqa: E501
:return: The amount of this PaymentResponse. # noqa: E501
:rtype: float
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this PaymentResponse.
:param amount: The amount of this PaymentResponse. # noqa: E501
:type: float
"""
self._amount = amount
@property
def currency(self):
"""Gets the currency of this PaymentResponse. # noqa: E501
:return: The currency of this PaymentResponse. # noqa: E501
:rtype: str
"""
return self._currency
@currency.setter
def currency(self, currency):
"""Sets the currency of this PaymentResponse.
:param currency: The currency of this PaymentResponse. # noqa: E501
:type: str
"""
self._currency = currency
@property
def amount_details(self):
"""Gets the amount_details of this PaymentResponse. # noqa: E501
:return: The amount_details of this PaymentResponse. # noqa: E501
:rtype: Amount
"""
return self._amount_details
@amount_details.setter
def amount_details(self, amount_details):
"""Sets the amount_details of this PaymentResponse.
:param amount_details: The amount_details of this PaymentResponse. # noqa: E501
:type: Amount
"""
self._amount_details = amount_details
@property
def first_payment_amount(self):
"""Gets the first_payment_amount of this PaymentResponse. # noqa: E501
:return: The first_payment_amount of this PaymentResponse. # noqa: E501
:rtype: Amount
"""
return self._first_payment_amount
@first_payment_amount.setter
def first_payment_amount(self, first_payment_amount):
"""Sets the first_payment_amount of this PaymentResponse.
:param first_payment_amount: The first_payment_amount of this PaymentResponse. # noqa: E501
:type: Amount
"""
self._first_payment_amount = first_payment_amount
@property
def first_payment_date_time(self):
"""Gets the first_payment_date_time of this PaymentResponse. # noqa: E501
:return: The first_payment_date_time of this PaymentResponse. # noqa: E501
:rtype: datetime
"""
return self._first_payment_date_time
@first_payment_date_time.setter
def first_payment_date_time(self, first_payment_date_time):
"""Sets the first_payment_date_time of this PaymentResponse.
:param first_payment_date_time: The first_payment_date_time of this PaymentResponse. # noqa: E501
:type: datetime
"""
self._first_payment_date_time = first_payment_date_time
@property
def next_payment_amount(self):
"""Gets the next_payment_amount of this PaymentResponse. # noqa: E501
:return: The next_payment_amount of this PaymentResponse. # noqa: E501
:rtype: Amount
"""
return self._next_payment_amount
@next_payment_amount.setter
def next_payment_amount(self, next_payment_amount):
"""Sets the next_payment_amount of this PaymentResponse.
:param next_payment_amount: The next_payment_amount of this PaymentResponse. # noqa: E501
:type: Amount
"""
self._next_payment_amount = next_payment_amount
@property
def next_payment_date_time(self):
"""Gets the next_payment_date_time of this PaymentResponse. # noqa: E501
:return: The next_payment_date_time of this PaymentResponse. # noqa: E501
:rtype: datetime
"""
return self._next_payment_date_time
@next_payment_date_time.setter
def next_payment_date_time(self, next_payment_date_time):
"""Sets the next_payment_date_time of this PaymentResponse.
:param next_payment_date_time: The next_payment_date_time of this PaymentResponse. # noqa: E501
:type: datetime
"""
self._next_payment_date_time = next_payment_date_time
@property
def final_payment_amount(self):
"""Gets the final_payment_amount of this PaymentResponse. # noqa: E501
:return: The final_payment_amount of this PaymentResponse. # noqa: E501
:rtype: Amount
"""
return self._final_payment_amount
@final_payment_amount.setter
def final_payment_amount(self, final_payment_amount):
"""Sets the final_payment_amount of this PaymentResponse.
:param final_payment_amount: The final_payment_amount of this PaymentResponse. # noqa: E501
:type: Amount
"""
self._final_payment_amount = final_payment_amount
@property
def final_payment_date_time(self):
"""Gets the final_payment_date_time of this PaymentResponse. # noqa: E501
:return: The final_payment_date_time of this PaymentResponse. # noqa: E501
:rtype: datetime
"""
return self._final_payment_date_time
@final_payment_date_time.setter
def final_payment_date_time(self, final_payment_date_time):
"""Sets the final_payment_date_time of this PaymentResponse.
:param final_payment_date_time: The final_payment_date_time of this PaymentResponse. # noqa: E501
:type: datetime
"""
self._final_payment_date_time = final_payment_date_time
@property
def created_at(self):
"""Gets the created_at of this PaymentResponse. # noqa: E501
:return: The created_at of this PaymentResponse. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this PaymentResponse.
:param created_at: The created_at of this PaymentResponse. # noqa: E501
:type: datetime
"""
self._created_at = created_at
@property
def previous_payment_amount(self):
"""Gets the previous_payment_amount of this PaymentResponse. # noqa: E501
:return: The previous_payment_amount of this PaymentResponse. # noqa: E501
:rtype: Amount
"""
return self._previous_payment_amount
@previous_payment_amount.setter
def previous_payment_amount(self, previous_payment_amount):
"""Sets the previous_payment_amount of this PaymentResponse.
:param previous_payment_amount: The previous_payment_amount of this PaymentResponse. # noqa: E501
:type: Amount
"""
self._previous_payment_amount = previous_payment_amount
@property
def previous_payment_date_time(self):
"""Gets the previous_payment_date_time of this PaymentResponse. # noqa: E501
:return: The previous_payment_date_time of this PaymentResponse. # noqa: E501
:rtype: datetime
"""
return self._previous_payment_date_time
@previous_payment_date_time.setter
def previous_payment_date_time(self, previous_payment_date_time):
"""Sets the previous_payment_date_time of this PaymentResponse.
:param previous_payment_date_time: The previous_payment_date_time of this PaymentResponse. # noqa: E501
:type: datetime
"""
self._previous_payment_date_time = previous_payment_date_time
@property
def charge_details(self):
"""Gets the charge_details of this PaymentResponse. # noqa: E501
:return: The charge_details of this PaymentResponse. # noqa: E501
:rtype: list[ChargeDetails]
"""
return self._charge_details
@charge_details.setter
def charge_details(self, charge_details):
"""Sets the charge_details of this PaymentResponse.
:param charge_details: The charge_details of this PaymentResponse. # noqa: E501
:type: list[ChargeDetails]
"""
self._charge_details = charge_details
@property
def scheduled_payment_type(self):
"""Gets the scheduled_payment_type of this PaymentResponse. # noqa: E501
:return: The scheduled_payment_type of this PaymentResponse. # noqa: E501
:rtype: str
"""
return self._scheduled_payment_type
@scheduled_payment_type.setter
def scheduled_payment_type(self, scheduled_payment_type):
"""Sets the scheduled_payment_type of this PaymentResponse.
:param scheduled_payment_type: The scheduled_payment_type of this PaymentResponse. # noqa: E501
:type: str
"""
self._scheduled_payment_type = scheduled_payment_type
@property
def scheduled_payment_date_time(self):
"""Gets the scheduled_payment_date_time of this PaymentResponse. # noqa: E501
:return: The scheduled_payment_date_time of this PaymentResponse. # noqa: E501
:rtype: datetime
"""
return self._scheduled_payment_date_time
@scheduled_payment_date_time.setter
def scheduled_payment_date_time(self, scheduled_payment_date_time):
"""Sets the scheduled_payment_date_time of this PaymentResponse.
:param scheduled_payment_date_time: The scheduled_payment_date_time of this PaymentResponse. # noqa: E501
:type: datetime
"""
self._scheduled_payment_date_time = scheduled_payment_date_time
@property
def frequency(self):
"""Gets the frequency of this PaymentResponse. # noqa: E501
:return: The frequency of this PaymentResponse. # noqa: E501
:rtype: FrequencyResponse
"""
return self._frequency
@frequency.setter
def frequency(self, frequency):
"""Sets the frequency of this PaymentResponse.
:param frequency: The frequency of this PaymentResponse. # noqa: E501
:type: FrequencyResponse
"""
self._frequency = frequency
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PaymentResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.403873 | 599 | 0.658827 | # coding: utf-8
"""
Yapily API
To access endpoints that require authentication, use your application key and secret created in the Dashboard (https://dashboard.yapily.com) # noqa: E501
OpenAPI spec version: 0.0.155
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from yapily.models.amount import Amount # noqa: F401,E501
from yapily.models.charge_details import ChargeDetails # noqa: F401,E501
from yapily.models.frequency_response import FrequencyResponse # noqa: F401,E501
from yapily.models.payee import Payee # noqa: F401,E501
from yapily.models.payment_status_details import PaymentStatusDetails # noqa: F401,E501
class PaymentResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'payment_idempotency_id': 'str',
'institution_consent_id': 'str',
'payment_lifecycle_id': 'str',
'status': 'str',
'status_details': 'PaymentStatusDetails',
'payee_details': 'Payee',
'reference': 'str',
'amount': 'float',
'currency': 'str',
'amount_details': 'Amount',
'first_payment_amount': 'Amount',
'first_payment_date_time': 'datetime',
'next_payment_amount': 'Amount',
'next_payment_date_time': 'datetime',
'final_payment_amount': 'Amount',
'final_payment_date_time': 'datetime',
'created_at': 'datetime',
'previous_payment_amount': 'Amount',
'previous_payment_date_time': 'datetime',
'charge_details': 'list[ChargeDetails]',
'scheduled_payment_type': 'str',
'scheduled_payment_date_time': 'datetime',
'frequency': 'FrequencyResponse'
}
attribute_map = {
'id': 'id',
'payment_idempotency_id': 'paymentIdempotencyId',
'institution_consent_id': 'institutionConsentId',
'payment_lifecycle_id': 'paymentLifecycleId',
'status': 'status',
'status_details': 'statusDetails',
'payee_details': 'payeeDetails',
'reference': 'reference',
'amount': 'amount',
'currency': 'currency',
'amount_details': 'amountDetails',
'first_payment_amount': 'firstPaymentAmount',
'first_payment_date_time': 'firstPaymentDateTime',
'next_payment_amount': 'nextPaymentAmount',
'next_payment_date_time': 'nextPaymentDateTime',
'final_payment_amount': 'finalPaymentAmount',
'final_payment_date_time': 'finalPaymentDateTime',
'created_at': 'createdAt',
'previous_payment_amount': 'previousPaymentAmount',
'previous_payment_date_time': 'previousPaymentDateTime',
'charge_details': 'chargeDetails',
'scheduled_payment_type': 'scheduledPaymentType',
'scheduled_payment_date_time': 'scheduledPaymentDateTime',
'frequency': 'frequency'
}
def __init__(self, id=None, payment_idempotency_id=None, institution_consent_id=None, payment_lifecycle_id=None, status=None, status_details=None, payee_details=None, reference=None, amount=None, currency=None, amount_details=None, first_payment_amount=None, first_payment_date_time=None, next_payment_amount=None, next_payment_date_time=None, final_payment_amount=None, final_payment_date_time=None, created_at=None, previous_payment_amount=None, previous_payment_date_time=None, charge_details=None, scheduled_payment_type=None, scheduled_payment_date_time=None, frequency=None): # noqa: E501
"""PaymentResponse - a model defined in Swagger""" # noqa: E501
self._id = None
self._payment_idempotency_id = None
self._institution_consent_id = None
self._payment_lifecycle_id = None
self._status = None
self._status_details = None
self._payee_details = None
self._reference = None
self._amount = None
self._currency = None
self._amount_details = None
self._first_payment_amount = None
self._first_payment_date_time = None
self._next_payment_amount = None
self._next_payment_date_time = None
self._final_payment_amount = None
self._final_payment_date_time = None
self._created_at = None
self._previous_payment_amount = None
self._previous_payment_date_time = None
self._charge_details = None
self._scheduled_payment_type = None
self._scheduled_payment_date_time = None
self._frequency = None
self.discriminator = None
if id is not None:
self.id = id
if payment_idempotency_id is not None:
self.payment_idempotency_id = payment_idempotency_id
if institution_consent_id is not None:
self.institution_consent_id = institution_consent_id
if payment_lifecycle_id is not None:
self.payment_lifecycle_id = payment_lifecycle_id
if status is not None:
self.status = status
if status_details is not None:
self.status_details = status_details
if payee_details is not None:
self.payee_details = payee_details
if reference is not None:
self.reference = reference
if amount is not None:
self.amount = amount
if currency is not None:
self.currency = currency
if amount_details is not None:
self.amount_details = amount_details
if first_payment_amount is not None:
self.first_payment_amount = first_payment_amount
if first_payment_date_time is not None:
self.first_payment_date_time = first_payment_date_time
if next_payment_amount is not None:
self.next_payment_amount = next_payment_amount
if next_payment_date_time is not None:
self.next_payment_date_time = next_payment_date_time
if final_payment_amount is not None:
self.final_payment_amount = final_payment_amount
if final_payment_date_time is not None:
self.final_payment_date_time = final_payment_date_time
if created_at is not None:
self.created_at = created_at
if previous_payment_amount is not None:
self.previous_payment_amount = previous_payment_amount
if previous_payment_date_time is not None:
self.previous_payment_date_time = previous_payment_date_time
if charge_details is not None:
self.charge_details = charge_details
if scheduled_payment_type is not None:
self.scheduled_payment_type = scheduled_payment_type
if scheduled_payment_date_time is not None:
self.scheduled_payment_date_time = scheduled_payment_date_time
if frequency is not None:
self.frequency = frequency
@property
def id(self):
"""Gets the id of this PaymentResponse. # noqa: E501
:return: The id of this PaymentResponse. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this PaymentResponse.
:param id: The id of this PaymentResponse. # noqa: E501
:type: str
"""
self._id = id
@property
def payment_idempotency_id(self):
"""Gets the payment_idempotency_id of this PaymentResponse. # noqa: E501
:return: The payment_idempotency_id of this PaymentResponse. # noqa: E501
:rtype: str
"""
return self._payment_idempotency_id
@payment_idempotency_id.setter
def payment_idempotency_id(self, payment_idempotency_id):
"""Sets the payment_idempotency_id of this PaymentResponse.
:param payment_idempotency_id: The payment_idempotency_id of this PaymentResponse. # noqa: E501
:type: str
"""
self._payment_idempotency_id = payment_idempotency_id
@property
def institution_consent_id(self):
"""Gets the institution_consent_id of this PaymentResponse. # noqa: E501
:return: The institution_consent_id of this PaymentResponse. # noqa: E501
:rtype: str
"""
return self._institution_consent_id
@institution_consent_id.setter
def institution_consent_id(self, institution_consent_id):
"""Sets the institution_consent_id of this PaymentResponse.
:param institution_consent_id: The institution_consent_id of this PaymentResponse. # noqa: E501
:type: str
"""
self._institution_consent_id = institution_consent_id
@property
def payment_lifecycle_id(self):
"""Gets the payment_lifecycle_id of this PaymentResponse. # noqa: E501
:return: The payment_lifecycle_id of this PaymentResponse. # noqa: E501
:rtype: str
"""
return self._payment_lifecycle_id
@payment_lifecycle_id.setter
def payment_lifecycle_id(self, payment_lifecycle_id):
"""Sets the payment_lifecycle_id of this PaymentResponse.
:param payment_lifecycle_id: The payment_lifecycle_id of this PaymentResponse. # noqa: E501
:type: str
"""
self._payment_lifecycle_id = payment_lifecycle_id
@property
def status(self):
"""Gets the status of this PaymentResponse. # noqa: E501
:return: The status of this PaymentResponse. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this PaymentResponse.
:param status: The status of this PaymentResponse. # noqa: E501
:type: str
"""
allowed_values = ["PENDING", "FAILED", "DECLINED", "COMPLETED", "EXPIRED", "UNKNOWN", "ACTIVE", "INACTIVE"] # noqa: E501
if status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}" # noqa: E501
.format(status, allowed_values)
)
self._status = status
@property
def status_details(self):
"""Gets the status_details of this PaymentResponse. # noqa: E501
:return: The status_details of this PaymentResponse. # noqa: E501
:rtype: PaymentStatusDetails
"""
return self._status_details
@status_details.setter
def status_details(self, status_details):
"""Sets the status_details of this PaymentResponse.
:param status_details: The status_details of this PaymentResponse. # noqa: E501
:type: PaymentStatusDetails
"""
self._status_details = status_details
@property
def payee_details(self):
"""Gets the payee_details of this PaymentResponse. # noqa: E501
:return: The payee_details of this PaymentResponse. # noqa: E501
:rtype: Payee
"""
return self._payee_details
@payee_details.setter
def payee_details(self, payee_details):
"""Sets the payee_details of this PaymentResponse.
:param payee_details: The payee_details of this PaymentResponse. # noqa: E501
:type: Payee
"""
self._payee_details = payee_details
@property
def reference(self):
"""Gets the reference of this PaymentResponse. # noqa: E501
:return: The reference of this PaymentResponse. # noqa: E501
:rtype: str
"""
return self._reference
@reference.setter
def reference(self, reference):
"""Sets the reference of this PaymentResponse.
:param reference: The reference of this PaymentResponse. # noqa: E501
:type: str
"""
self._reference = reference
@property
def amount(self):
"""Gets the amount of this PaymentResponse. # noqa: E501
:return: The amount of this PaymentResponse. # noqa: E501
:rtype: float
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this PaymentResponse.
:param amount: The amount of this PaymentResponse. # noqa: E501
:type: float
"""
self._amount = amount
@property
def currency(self):
"""Gets the currency of this PaymentResponse. # noqa: E501
:return: The currency of this PaymentResponse. # noqa: E501
:rtype: str
"""
return self._currency
@currency.setter
def currency(self, currency):
"""Sets the currency of this PaymentResponse.
:param currency: The currency of this PaymentResponse. # noqa: E501
:type: str
"""
self._currency = currency
@property
def amount_details(self):
"""Gets the amount_details of this PaymentResponse. # noqa: E501
:return: The amount_details of this PaymentResponse. # noqa: E501
:rtype: Amount
"""
return self._amount_details
@amount_details.setter
def amount_details(self, amount_details):
"""Sets the amount_details of this PaymentResponse.
:param amount_details: The amount_details of this PaymentResponse. # noqa: E501
:type: Amount
"""
self._amount_details = amount_details
@property
def first_payment_amount(self):
"""Gets the first_payment_amount of this PaymentResponse. # noqa: E501
:return: The first_payment_amount of this PaymentResponse. # noqa: E501
:rtype: Amount
"""
return self._first_payment_amount
@first_payment_amount.setter
def first_payment_amount(self, first_payment_amount):
"""Sets the first_payment_amount of this PaymentResponse.
:param first_payment_amount: The first_payment_amount of this PaymentResponse. # noqa: E501
:type: Amount
"""
self._first_payment_amount = first_payment_amount
@property
def first_payment_date_time(self):
"""Gets the first_payment_date_time of this PaymentResponse. # noqa: E501
:return: The first_payment_date_time of this PaymentResponse. # noqa: E501
:rtype: datetime
"""
return self._first_payment_date_time
@first_payment_date_time.setter
def first_payment_date_time(self, first_payment_date_time):
"""Sets the first_payment_date_time of this PaymentResponse.
:param first_payment_date_time: The first_payment_date_time of this PaymentResponse. # noqa: E501
:type: datetime
"""
self._first_payment_date_time = first_payment_date_time
@property
def next_payment_amount(self):
"""Gets the next_payment_amount of this PaymentResponse. # noqa: E501
:return: The next_payment_amount of this PaymentResponse. # noqa: E501
:rtype: Amount
"""
return self._next_payment_amount
@next_payment_amount.setter
def next_payment_amount(self, next_payment_amount):
"""Sets the next_payment_amount of this PaymentResponse.
:param next_payment_amount: The next_payment_amount of this PaymentResponse. # noqa: E501
:type: Amount
"""
self._next_payment_amount = next_payment_amount
@property
def next_payment_date_time(self):
"""Gets the next_payment_date_time of this PaymentResponse. # noqa: E501
:return: The next_payment_date_time of this PaymentResponse. # noqa: E501
:rtype: datetime
"""
return self._next_payment_date_time
@next_payment_date_time.setter
def next_payment_date_time(self, next_payment_date_time):
"""Sets the next_payment_date_time of this PaymentResponse.
:param next_payment_date_time: The next_payment_date_time of this PaymentResponse. # noqa: E501
:type: datetime
"""
self._next_payment_date_time = next_payment_date_time
@property
def final_payment_amount(self):
"""Gets the final_payment_amount of this PaymentResponse. # noqa: E501
:return: The final_payment_amount of this PaymentResponse. # noqa: E501
:rtype: Amount
"""
return self._final_payment_amount
@final_payment_amount.setter
def final_payment_amount(self, final_payment_amount):
"""Sets the final_payment_amount of this PaymentResponse.
:param final_payment_amount: The final_payment_amount of this PaymentResponse. # noqa: E501
:type: Amount
"""
self._final_payment_amount = final_payment_amount
@property
def final_payment_date_time(self):
"""Gets the final_payment_date_time of this PaymentResponse. # noqa: E501
:return: The final_payment_date_time of this PaymentResponse. # noqa: E501
:rtype: datetime
"""
return self._final_payment_date_time
@final_payment_date_time.setter
def final_payment_date_time(self, final_payment_date_time):
"""Sets the final_payment_date_time of this PaymentResponse.
:param final_payment_date_time: The final_payment_date_time of this PaymentResponse. # noqa: E501
:type: datetime
"""
self._final_payment_date_time = final_payment_date_time
@property
def created_at(self):
"""Gets the created_at of this PaymentResponse. # noqa: E501
:return: The created_at of this PaymentResponse. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this PaymentResponse.
:param created_at: The created_at of this PaymentResponse. # noqa: E501
:type: datetime
"""
self._created_at = created_at
@property
def previous_payment_amount(self):
"""Gets the previous_payment_amount of this PaymentResponse. # noqa: E501
:return: The previous_payment_amount of this PaymentResponse. # noqa: E501
:rtype: Amount
"""
return self._previous_payment_amount
@previous_payment_amount.setter
def previous_payment_amount(self, previous_payment_amount):
"""Sets the previous_payment_amount of this PaymentResponse.
:param previous_payment_amount: The previous_payment_amount of this PaymentResponse. # noqa: E501
:type: Amount
"""
self._previous_payment_amount = previous_payment_amount
@property
def previous_payment_date_time(self):
"""Gets the previous_payment_date_time of this PaymentResponse. # noqa: E501
:return: The previous_payment_date_time of this PaymentResponse. # noqa: E501
:rtype: datetime
"""
return self._previous_payment_date_time
@previous_payment_date_time.setter
def previous_payment_date_time(self, previous_payment_date_time):
"""Sets the previous_payment_date_time of this PaymentResponse.
:param previous_payment_date_time: The previous_payment_date_time of this PaymentResponse. # noqa: E501
:type: datetime
"""
self._previous_payment_date_time = previous_payment_date_time
@property
def charge_details(self):
"""Gets the charge_details of this PaymentResponse. # noqa: E501
:return: The charge_details of this PaymentResponse. # noqa: E501
:rtype: list[ChargeDetails]
"""
return self._charge_details
@charge_details.setter
def charge_details(self, charge_details):
"""Sets the charge_details of this PaymentResponse.
:param charge_details: The charge_details of this PaymentResponse. # noqa: E501
:type: list[ChargeDetails]
"""
self._charge_details = charge_details
@property
def scheduled_payment_type(self):
"""Gets the scheduled_payment_type of this PaymentResponse. # noqa: E501
:return: The scheduled_payment_type of this PaymentResponse. # noqa: E501
:rtype: str
"""
return self._scheduled_payment_type
@scheduled_payment_type.setter
def scheduled_payment_type(self, scheduled_payment_type):
"""Sets the scheduled_payment_type of this PaymentResponse.
:param scheduled_payment_type: The scheduled_payment_type of this PaymentResponse. # noqa: E501
:type: str
"""
self._scheduled_payment_type = scheduled_payment_type
@property
def scheduled_payment_date_time(self):
"""Gets the scheduled_payment_date_time of this PaymentResponse. # noqa: E501
:return: The scheduled_payment_date_time of this PaymentResponse. # noqa: E501
:rtype: datetime
"""
return self._scheduled_payment_date_time
@scheduled_payment_date_time.setter
def scheduled_payment_date_time(self, scheduled_payment_date_time):
"""Sets the scheduled_payment_date_time of this PaymentResponse.
:param scheduled_payment_date_time: The scheduled_payment_date_time of this PaymentResponse. # noqa: E501
:type: datetime
"""
self._scheduled_payment_date_time = scheduled_payment_date_time
@property
def frequency(self):
"""Gets the frequency of this PaymentResponse. # noqa: E501
:return: The frequency of this PaymentResponse. # noqa: E501
:rtype: FrequencyResponse
"""
return self._frequency
@frequency.setter
def frequency(self, frequency):
"""Sets the frequency of this PaymentResponse.
:param frequency: The frequency of this PaymentResponse. # noqa: E501
:type: FrequencyResponse
"""
self._frequency = frequency
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PaymentResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 0 | 0 | 0 |
4436123fe12faadfcf9bf87e38c7f22b95826828 | 12,769 | py | Python | twitter_virtual/twitter.py | juuuuuulian/twitter-virtual | 2a5eece80f6f832aa8a8d2355ed61eabd41d5126 | [
"MIT"
] | null | null | null | twitter_virtual/twitter.py | juuuuuulian/twitter-virtual | 2a5eece80f6f832aa8a8d2355ed61eabd41d5126 | [
"MIT"
] | null | null | null | twitter_virtual/twitter.py | juuuuuulian/twitter-virtual | 2a5eece80f6f832aa8a8d2355ed61eabd41d5126 | [
"MIT"
] | null | null | null | """Classes for interacting with the Twitter API."""
import datetime
import oauth2
from urllib.parse import urlencode
import json
from typing import Any
REQUEST_TOKEN_URL = "https://api.twitter.com/oauth/request_token"
AUTHORIZE_URL = "https://api.twitter.com/oauth/authorize"
ACCESS_TOKEN_URL = "https://api.twitter.com/oauth/access_token"
LIST_FRIENDS_URL = "https://api.twitter.com/1.1/friends/ids.json"
CREATE_LIST_URL = "https://api.twitter.com/1.1/lists/create.json"
LIST_RATE_LIMITS_URL = "https://api.twitter.com/1.1/application/rate_limit_status.json"
ADD_LIST_MEMBERS_URL = "https://api.twitter.com/1.1/lists/members/create_all.json"
DELETE_LIST_URL = "https://api.twitter.com/1.1/lists/destroy.json"
LOOKUP_FRIENDSHIPS_URL = "https://api.twitter.com/1.1/friendships/lookup.json"
SHOW_USER_URL = "https://api.twitter.com/1.1/users/show.json"
BASE_WEB_URL = "https://twitter.com"
INVALIDATE_TOKEN_URL = "https://api.twitter.com/1.1/oauth/invalidate_token"
class TwitterClient:
"""Class for interacting with the Twitter API on behalf of a Twitter user via OAuth."""
def __init__(self, oauth_client: Any = None, consumer_key: str = None, consumer_secret: str = None, callback_url: str = None) -> None:
"""Initialize an oauth2 client, or stash the one provided."""
self.callback_url = callback_url
if oauth_client:
self.oauth_client = oauth_client
elif (consumer_key is not None) and (consumer_secret is not None):
consumer = oauth2.Consumer(key=consumer_key, secret=consumer_secret)
self.oauth_client = oauth2.Client(consumer)
else:
raise Exception("Please supply either an oauth_client argument or a consumer_key + consumer_secret pair")
@classmethod
def from_flask_app(cls, flask_app: Any):
"""Construct a TwitterClient using config from a Flask app."""
return cls(consumer_key=flask_app.config["TWITTER_CONSUMER_KEY"],
consumer_secret=flask_app.config["TWITTER_CONSUMER_SECRET"],
callback_url=flask_app.config["TWITTER_CALLBACK_URL"])
def get_request_token(self) -> oauth2.Token:
"""Get a Twitter OAuth request token for step 1 of OAuth flow."""
client = self.oauth_client
callback_url = self.callback_url
request_body = urlencode({'oauth_callback': callback_url})
headers, body = client.request(REQUEST_TOKEN_URL, method='POST', body=request_body)
if headers.status != 200:
raise OAuthRequestError("Fetching request token failed", headers, body)
token = self.parse_oauth_response(headers, body)
if token.callback_confirmed != "true":
raise InvalidOAuthResponseError("Bad request token response - callback unconfirmed", headers, body)
return token
def parse_oauth_response(self, headers: Any, body: bytes) -> oauth2.Token:
"""Parse a Twitter OAuth request token response or an authorize token response."""
try:
token = oauth2.Token.from_string(body.decode())
except ValueError:
raise InvalidOAuthResponseError("Bad OAuth response - missing required values", headers, body)
return token
def parse_api_response(self, headers: Any, body: bytes) -> Any:
"""Parse a Twitter API response body and return it as a dict."""
body = body.decode()
try:
parsed_body = json.loads(body)
except json.JSONDecodeError as e:
raise TwitterError("Parsing API response failed: " + str(e), headers, body)
return parsed_body
def get_authorize_url_for_token(self, oauth_token: str) -> str:
"""Get a Twitter OAuth authorization URL for step 2 of OAuth."""
twitter_auth_url = AUTHORIZE_URL
if twitter_auth_url[-1] != '?':
twitter_auth_url = twitter_auth_url + '?'
return twitter_auth_url + urlencode({"oauth_token": oauth_token})
def invalidate_token(self) -> bool:
"""Invalidate the current OAuth access token."""
headers, body = self.oauth_client.request(INVALIDATE_TOKEN_URL, method="POST")
if headers.status != 200:
raise OAuthRequestError("Failed to invalidate OAuth access token", headers, body)
return True
def get_full_list_url(self, twitter_list: dict) -> str:
"""Get a full Twitter URL from a twitter list returned by the API."""
return BASE_WEB_URL + twitter_list["uri"]
def set_client_token(self, oauth_token: str, oauth_token_secret: str, verifier: Any = None) -> oauth2.Token:
"""Create an oauth2.Token and set it on our oauth_client."""
token = oauth2.Token(oauth_token, oauth_token_secret)
if verifier:
token.set_verifier(verifier)
self.oauth_client.token = token
return token
def authorize_oauth_token(self, oauth_token: str, oauth_token_secret: str, oauth_verifier: str) -> oauth2.Token:
"""Get an OAuth token from Twitter using an authorized request token - final step of three-legged OAuth."""
self.set_client_token(oauth_token, oauth_token_secret, oauth_verifier)
headers, body = self.oauth_client.request(ACCESS_TOKEN_URL, method='POST')
if headers.status != 200:
raise OAuthRequestError("Request token exchange failed", headers, body)
token = self.parse_oauth_response(headers, body)
# set authorized token on our oauth client
self.oauth_client.token = token
return token
def get_following_user_ids(self, screen_name: str, count=5000) -> dict:
"""Get the stringified IDs of the full list of users who screen_name follows."""
params = {"screen_name": screen_name, "stringify_ids": "true", "count": count}
headers, body = self.oauth_client.request(LIST_FRIENDS_URL + '?' + urlencode(params), method='GET')
if headers.status != 200:
if headers.status == RateLimitHit.status:
raise RateLimitHit("Too many requests for following users in a 15-minute period!", headers, body)
raise TwitterError("Fetch following users failed", headers, body)
return self.parse_api_response(headers, body)
def current_user_is_following_user(self, screen_name: str) -> bool:
"""Check if the current user is following screen_name."""
params = {"screen_name": screen_name}
headers, body = self.oauth_client.request(LOOKUP_FRIENDSHIPS_URL + '?' + urlencode(params))
if headers.status != 200:
if headers.status == RateLimitHit.status:
raise RateLimitHit("Too many friendships lookup requests in a 15-minute window!", headers, body)
raise TwitterError("Friendships lookup failed", headers, body)
users = self.parse_api_response(headers, body)
if len(users) != 0 and ('following' in users[0]["connections"]):
return True
return False
def get_user_profile_img_url(self, screen_name: str) -> bool:
"""Get the Twitter profile image URL for <screen_name> (original size)."""
params = {"screen_name": screen_name}
headers, body = self.oauth_client.request(SHOW_USER_URL + '?' + urlencode(params))
if headers.status != 200:
if headers.status == RateLimitHit.status:
raise RateLimitHit("Too many user info lookup requests in a 15-minute window!", headers, body)
raise TwitterError("User info lookup failed", headers, body)
user_info = self.parse_api_response(headers, body)
profile_img_url = user_info.get("profile_image_url")
if profile_img_url:
return profile_img_url.replace("_normal.", ".")
return None
def create_private_list(self, screen_name: str) -> dict:
"""Create a private, empty Twitter list named '<screen_name>'."""
list_settings = {
"mode": "private",
"name": screen_name,
"description": "Feed for {} as of {}".format(screen_name, datetime.date.today().strftime("%m/%-d/%y"))
}
headers, body = self.oauth_client.request(CREATE_LIST_URL + '?' + urlencode(list_settings), method='POST')
if headers.status != 200:
if headers.status == RateLimitHit.status:
raise RateLimitHit("Too many lists created in a 15-minute window!")
raise TwitterError("Private list creation failed", headers, body)
return self.parse_api_response(headers, body)
def delete_list(self, list_id: str) -> bool:
"""Delete a Twitter list."""
headers, body = self.oauth_client.request(DELETE_LIST_URL + '?list_id=' + str(list_id), method='POST')
if headers.status != 200:
if headers.status == RateLimitHit.status:
raise RateLimitHit("Too many delete requests within a 15-minute window!", headers, body)
raise TwitterError("List delete failed", headers, body)
return True
def get_rate_limit_status(self, resource_type: str, endpoint_uri: str) -> int:
"""Get the remaining number of allowed API requests for a Twitter resource type and one of its endpoints.
https://developer.twitter.com/en/docs/developer-utilities/rate-limit-status/api-reference/get-application-rate_limit_status
N.B. Twitter simply does not return the rate limit status for some rate-limited endpoints, like /lists/create,
so, don't rely too heavily on what this returns. Look at API response headers instead.
"""
headers, body = self.oauth_client.request(LIST_RATE_LIMITS_URL + '?resource=' + resource_type, method='GET')
if headers.status != 200:
if headers.status == RateLimitHit.status:
raise RateLimitHit("Too many requests for rate limit status in 15-minute window!", headers, body)
raise TwitterError("Failed to get rate limit status", headers, body)
status_desc_res = self.parse_api_response(headers, body)
endpoint_status_desc = status_desc_res['resources'].get(resource_type, {}).get(endpoint_uri, {})
return endpoint_status_desc['remaining']
def add_users_to_list(self, list_id: str, user_ids: list) -> dict:
"""Add a list of Twitter accounts (user_ids) to a Twitter List (list_id)."""
create_params = {
"list_id": list_id,
"user_id": ",".join(user_ids)
}
headers, body = self.oauth_client.request(ADD_LIST_MEMBERS_URL, method='POST', body=urlencode(create_params))
if headers.status != 200:
if headers.status == RateLimitHit.status:
raise RateLimitHit("Too many members added to a list within a 15-minute window!")
raise TwitterError("Failed to add users to a list", headers, body)
# check for soft rate limit hit
updated_list = self.parse_api_response(headers, body)
if int(updated_list['member_count']) == 0:
raise SoftRateLimitHit("Too many list actions performed for today!", headers, body)
return updated_list
class TwitterError(Exception):
"""Generic Twitter API response error."""
def __init__(self, message: str = None, headers: any = None, body: any = None):
"""Provide a default message and stash API response headers and body."""
if message is None:
message = str(type(self))
super().__init__(message)
self.message = message
self.headers = headers
self.body = body
def __str__(self):
"""Print details about the API response."""
full_desc = self.message
if self.headers or self.body:
full_desc = full_desc + f'. Response details (headers - body): {str(self.headers)} - {str(self.body)}'
return full_desc
class OAuthRequestError(TwitterError):
"""Generic Twitter OAuth error."""
pass
class InvalidOAuthResponseError(TwitterError):
"""Twitter either rejected our OAuth credentials, or the response was invalid."""
pass
class RateLimitHit(TwitterError):
"""Twitter rate limit exceeded response error."""
status = 429 # http status
class SoftRateLimitHit(TwitterError):
"""Twitter soft (hidden) rate limit exceeded - response is 200 but no actions were performed by Twitter.
This means that the user can't perform the action again for at least the next 24 hours.
"""
pass
class TooManyFollowing(TwitterError):
"""Twitter list would have too many members."""
pass
class ZeroFollowing(TwitterError):
"""Twitter list would have zero members."""
pass
class UserNotFollowingTarget(TwitterError):
"""Current user isn't following the target user."""
pass
| 43.431973 | 138 | 0.672331 | """Classes for interacting with the Twitter API."""
import datetime
import oauth2
from urllib.parse import urlencode
import json
from typing import Any
REQUEST_TOKEN_URL = "https://api.twitter.com/oauth/request_token"
AUTHORIZE_URL = "https://api.twitter.com/oauth/authorize"
ACCESS_TOKEN_URL = "https://api.twitter.com/oauth/access_token"
LIST_FRIENDS_URL = "https://api.twitter.com/1.1/friends/ids.json"
CREATE_LIST_URL = "https://api.twitter.com/1.1/lists/create.json"
LIST_RATE_LIMITS_URL = "https://api.twitter.com/1.1/application/rate_limit_status.json"
ADD_LIST_MEMBERS_URL = "https://api.twitter.com/1.1/lists/members/create_all.json"
DELETE_LIST_URL = "https://api.twitter.com/1.1/lists/destroy.json"
LOOKUP_FRIENDSHIPS_URL = "https://api.twitter.com/1.1/friendships/lookup.json"
SHOW_USER_URL = "https://api.twitter.com/1.1/users/show.json"
BASE_WEB_URL = "https://twitter.com"
INVALIDATE_TOKEN_URL = "https://api.twitter.com/1.1/oauth/invalidate_token"
class TwitterClient:
"""Class for interacting with the Twitter API on behalf of a Twitter user via OAuth."""
def __init__(self, oauth_client: Any = None, consumer_key: str = None, consumer_secret: str = None, callback_url: str = None) -> None:
"""Initialize an oauth2 client, or stash the one provided."""
self.callback_url = callback_url
if oauth_client:
self.oauth_client = oauth_client
elif (consumer_key is not None) and (consumer_secret is not None):
consumer = oauth2.Consumer(key=consumer_key, secret=consumer_secret)
self.oauth_client = oauth2.Client(consumer)
else:
raise Exception("Please supply either an oauth_client argument or a consumer_key + consumer_secret pair")
@classmethod
def from_flask_app(cls, flask_app: Any):
"""Construct a TwitterClient using config from a Flask app."""
return cls(consumer_key=flask_app.config["TWITTER_CONSUMER_KEY"],
consumer_secret=flask_app.config["TWITTER_CONSUMER_SECRET"],
callback_url=flask_app.config["TWITTER_CALLBACK_URL"])
def get_request_token(self) -> oauth2.Token:
"""Get a Twitter OAuth request token for step 1 of OAuth flow."""
client = self.oauth_client
callback_url = self.callback_url
request_body = urlencode({'oauth_callback': callback_url})
headers, body = client.request(REQUEST_TOKEN_URL, method='POST', body=request_body)
if headers.status != 200:
raise OAuthRequestError("Fetching request token failed", headers, body)
token = self.parse_oauth_response(headers, body)
if token.callback_confirmed != "true":
raise InvalidOAuthResponseError("Bad request token response - callback unconfirmed", headers, body)
return token
def parse_oauth_response(self, headers: Any, body: bytes) -> oauth2.Token:
"""Parse a Twitter OAuth request token response or an authorize token response."""
try:
token = oauth2.Token.from_string(body.decode())
except ValueError:
raise InvalidOAuthResponseError("Bad OAuth response - missing required values", headers, body)
return token
def parse_api_response(self, headers: Any, body: bytes) -> Any:
"""Parse a Twitter API response body and return it as a dict."""
body = body.decode()
try:
parsed_body = json.loads(body)
except json.JSONDecodeError as e:
raise TwitterError("Parsing API response failed: " + str(e), headers, body)
return parsed_body
def get_authorize_url_for_token(self, oauth_token: str) -> str:
"""Get a Twitter OAuth authorization URL for step 2 of OAuth."""
twitter_auth_url = AUTHORIZE_URL
if twitter_auth_url[-1] != '?':
twitter_auth_url = twitter_auth_url + '?'
return twitter_auth_url + urlencode({"oauth_token": oauth_token})
def invalidate_token(self) -> bool:
"""Invalidate the current OAuth access token."""
headers, body = self.oauth_client.request(INVALIDATE_TOKEN_URL, method="POST")
if headers.status != 200:
raise OAuthRequestError("Failed to invalidate OAuth access token", headers, body)
return True
def get_full_list_url(self, twitter_list: dict) -> str:
"""Get a full Twitter URL from a twitter list returned by the API."""
return BASE_WEB_URL + twitter_list["uri"]
def set_client_token(self, oauth_token: str, oauth_token_secret: str, verifier: Any = None) -> oauth2.Token:
"""Create an oauth2.Token and set it on our oauth_client."""
token = oauth2.Token(oauth_token, oauth_token_secret)
if verifier:
token.set_verifier(verifier)
self.oauth_client.token = token
return token
def authorize_oauth_token(self, oauth_token: str, oauth_token_secret: str, oauth_verifier: str) -> oauth2.Token:
"""Get an OAuth token from Twitter using an authorized request token - final step of three-legged OAuth."""
self.set_client_token(oauth_token, oauth_token_secret, oauth_verifier)
headers, body = self.oauth_client.request(ACCESS_TOKEN_URL, method='POST')
if headers.status != 200:
raise OAuthRequestError("Request token exchange failed", headers, body)
token = self.parse_oauth_response(headers, body)
# set authorized token on our oauth client
self.oauth_client.token = token
return token
def get_following_user_ids(self, screen_name: str, count=5000) -> dict:
"""Get the stringified IDs of the full list of users who screen_name follows."""
params = {"screen_name": screen_name, "stringify_ids": "true", "count": count}
headers, body = self.oauth_client.request(LIST_FRIENDS_URL + '?' + urlencode(params), method='GET')
if headers.status != 200:
if headers.status == RateLimitHit.status:
raise RateLimitHit("Too many requests for following users in a 15-minute period!", headers, body)
raise TwitterError("Fetch following users failed", headers, body)
return self.parse_api_response(headers, body)
def current_user_is_following_user(self, screen_name: str) -> bool:
"""Check if the current user is following screen_name."""
params = {"screen_name": screen_name}
headers, body = self.oauth_client.request(LOOKUP_FRIENDSHIPS_URL + '?' + urlencode(params))
if headers.status != 200:
if headers.status == RateLimitHit.status:
raise RateLimitHit("Too many friendships lookup requests in a 15-minute window!", headers, body)
raise TwitterError("Friendships lookup failed", headers, body)
users = self.parse_api_response(headers, body)
if len(users) != 0 and ('following' in users[0]["connections"]):
return True
return False
def get_user_profile_img_url(self, screen_name: str) -> bool:
"""Get the Twitter profile image URL for <screen_name> (original size)."""
params = {"screen_name": screen_name}
headers, body = self.oauth_client.request(SHOW_USER_URL + '?' + urlencode(params))
if headers.status != 200:
if headers.status == RateLimitHit.status:
raise RateLimitHit("Too many user info lookup requests in a 15-minute window!", headers, body)
raise TwitterError("User info lookup failed", headers, body)
user_info = self.parse_api_response(headers, body)
profile_img_url = user_info.get("profile_image_url")
if profile_img_url:
return profile_img_url.replace("_normal.", ".")
return None
def create_private_list(self, screen_name: str) -> dict:
"""Create a private, empty Twitter list named '<screen_name>'."""
list_settings = {
"mode": "private",
"name": screen_name,
"description": "Feed for {} as of {}".format(screen_name, datetime.date.today().strftime("%m/%-d/%y"))
}
headers, body = self.oauth_client.request(CREATE_LIST_URL + '?' + urlencode(list_settings), method='POST')
if headers.status != 200:
if headers.status == RateLimitHit.status:
raise RateLimitHit("Too many lists created in a 15-minute window!")
raise TwitterError("Private list creation failed", headers, body)
return self.parse_api_response(headers, body)
def delete_list(self, list_id: str) -> bool:
"""Delete a Twitter list."""
headers, body = self.oauth_client.request(DELETE_LIST_URL + '?list_id=' + str(list_id), method='POST')
if headers.status != 200:
if headers.status == RateLimitHit.status:
raise RateLimitHit("Too many delete requests within a 15-minute window!", headers, body)
raise TwitterError("List delete failed", headers, body)
return True
def get_rate_limit_status(self, resource_type: str, endpoint_uri: str) -> int:
"""Get the remaining number of allowed API requests for a Twitter resource type and one of its endpoints.
https://developer.twitter.com/en/docs/developer-utilities/rate-limit-status/api-reference/get-application-rate_limit_status
N.B. Twitter simply does not return the rate limit status for some rate-limited endpoints, like /lists/create,
so, don't rely too heavily on what this returns. Look at API response headers instead.
"""
headers, body = self.oauth_client.request(LIST_RATE_LIMITS_URL + '?resource=' + resource_type, method='GET')
if headers.status != 200:
if headers.status == RateLimitHit.status:
raise RateLimitHit("Too many requests for rate limit status in 15-minute window!", headers, body)
raise TwitterError("Failed to get rate limit status", headers, body)
status_desc_res = self.parse_api_response(headers, body)
endpoint_status_desc = status_desc_res['resources'].get(resource_type, {}).get(endpoint_uri, {})
return endpoint_status_desc['remaining']
def add_users_to_list(self, list_id: str, user_ids: list) -> dict:
"""Add a list of Twitter accounts (user_ids) to a Twitter List (list_id)."""
create_params = {
"list_id": list_id,
"user_id": ",".join(user_ids)
}
headers, body = self.oauth_client.request(ADD_LIST_MEMBERS_URL, method='POST', body=urlencode(create_params))
if headers.status != 200:
if headers.status == RateLimitHit.status:
raise RateLimitHit("Too many members added to a list within a 15-minute window!")
raise TwitterError("Failed to add users to a list", headers, body)
# check for soft rate limit hit
updated_list = self.parse_api_response(headers, body)
if int(updated_list['member_count']) == 0:
raise SoftRateLimitHit("Too many list actions performed for today!", headers, body)
return updated_list
class TwitterError(Exception):
"""Generic Twitter API response error."""
def __init__(self, message: str = None, headers: any = None, body: any = None):
"""Provide a default message and stash API response headers and body."""
if message is None:
message = str(type(self))
super().__init__(message)
self.message = message
self.headers = headers
self.body = body
def __str__(self):
"""Print details about the API response."""
full_desc = self.message
if self.headers or self.body:
full_desc = full_desc + f'. Response details (headers - body): {str(self.headers)} - {str(self.body)}'
return full_desc
class OAuthRequestError(TwitterError):
"""Generic Twitter OAuth error."""
pass
class InvalidOAuthResponseError(TwitterError):
"""Twitter either rejected our OAuth credentials, or the response was invalid."""
pass
class RateLimitHit(TwitterError):
"""Twitter rate limit exceeded response error."""
status = 429 # http status
class SoftRateLimitHit(TwitterError):
"""Twitter soft (hidden) rate limit exceeded - response is 200 but no actions were performed by Twitter.
This means that the user can't perform the action again for at least the next 24 hours.
"""
pass
class TooManyFollowing(TwitterError):
"""Twitter list would have too many members."""
pass
class ZeroFollowing(TwitterError):
"""Twitter list would have zero members."""
pass
class UserNotFollowingTarget(TwitterError):
"""Current user isn't following the target user."""
pass
| 0 | 0 | 0 |
0dc6e784914d2db2c7185e846d66295cb4f75743 | 2,566 | py | Python | Ene-Jun-2018/Ejemplos/SOLID/liskov-substitution.py | Arbupa/DAS_Sistemas | 52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1 | [
"MIT"
] | 41 | 2017-09-26T09:36:32.000Z | 2022-03-19T18:05:25.000Z | Ene-Jun-2018/Ejemplos/SOLID/liskov-substitution.py | Arbupa/DAS_Sistemas | 52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1 | [
"MIT"
] | 67 | 2017-09-11T05:06:12.000Z | 2022-02-14T04:44:04.000Z | Ene-Jun-2018/Ejemplos/SOLID/liskov-substitution.py | Arbupa/DAS_Sistemas | 52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1 | [
"MIT"
] | 210 | 2017-09-01T00:10:08.000Z | 2022-03-19T18:05:12.000Z | #snake_case
#camelCase
#StudlyCase
#kebab-case o slug
# Clase A
# Clase B
# Clase C
# Cliente utiliza Clase A o un contenedor con objetos tipo Clase A
# e.g. lista = [] de solo tipo Clase A
from typing import List
class Grupo:
"""docstring for Banda"""
class GrupoMusical(Grupo):
"""docstring for Banda"""
class GrupoBaile(Grupo):
"""docstring for Banda"""
integrantes = ['Ana', 'Claudia', 'Sleiman', 'Irving', 'Raul']
grupo = Grupo(nombre="Diseรฑo y Arquitectura de Software Sistemas UAdeC", integrantes=integrantes[:])
#print(grupo.agregarIntegrante('Carielo').getPropiedades())
grupoMusical = GrupoMusical(nombre="Los Misticos", integrantes=integrantes[:], genero='metal')
#print(grupoMusical.getPropiedades())
grupoBaile = GrupoBaile(nombre="Los Danzoneros", integrantes=integrantes[:], categoria='flamenco')
#print(grupoBaile.getPropiedades())
grupos = [grupo] # type: List[Grupo]
grupos.append(grupoMusical)
grupos.append(grupoBaile)
for grupo in grupos:
if type(grupo) is GrupoMusical:
print(grupo.retornaX())
else:
print(type(grupo))
| 28.511111 | 103 | 0.644583 | #snake_case
#camelCase
#StudlyCase
#kebab-case o slug
# Clase A
# Clase B
# Clase C
# Cliente utiliza Clase A o un contenedor con objetos tipo Clase A
# e.g. lista = [] de solo tipo Clase A
from typing import List
class Grupo:
"""docstring for Banda"""
def __init__(self, **args):
self.nombre = args.get('nombre')
self.integrantes = args.get('integrantes', [])
self.numIntegrantes = len(self.integrantes)
def agregarIntegrante(self, integrantes):
if type(integrantes) is list:
self.integrantes += integrantes
self.numIntegrantes += len(integrantes)
return self
if type(integrantes) is str:
self.integrantes.append(integrantes)
self.numIntegrantes += 1
return self
print('Tipo invalido para integrantes!')
return self
def getPropiedades(self):
return "Nombre: {}\nNรบmero de Integrantes: {}\nIntegrantes: {}\nTipo: {}\n".format(self.nombre,
self.numIntegrantes,
self.integrantes,
self.__class__.__name__)
class GrupoMusical(Grupo):
"""docstring for Banda"""
def __init__(self, **args):
super().__init__(**args)
self.genero = args.get('genero')
self.logo = args.get('logo')
def getPropiedades(self):
propiedades = super().getPropiedades()
return (propiedades + "Gรฉnero: {}\nLogo: {}\n").format(self.genero, self.logo)
def retornaX(self):
return 'X!'
class GrupoBaile(Grupo):
"""docstring for Banda"""
def __init__(self, **args):
super().__init__(**args)
self.categoria = args.get('categoria')
def getPropiedades(self):
propiedades = super().getPropiedades()
return (propiedades + "Categoria: {}\n").format(self.categoria)
integrantes = ['Ana', 'Claudia', 'Sleiman', 'Irving', 'Raul']
grupo = Grupo(nombre="Diseรฑo y Arquitectura de Software Sistemas UAdeC", integrantes=integrantes[:])
#print(grupo.agregarIntegrante('Carielo').getPropiedades())
grupoMusical = GrupoMusical(nombre="Los Misticos", integrantes=integrantes[:], genero='metal')
#print(grupoMusical.getPropiedades())
grupoBaile = GrupoBaile(nombre="Los Danzoneros", integrantes=integrantes[:], categoria='flamenco')
#print(grupoBaile.getPropiedades())
grupos = [grupo] # type: List[Grupo]
grupos.append(grupoMusical)
grupos.append(grupoBaile)
for grupo in grupos:
if type(grupo) is GrupoMusical:
print(grupo.retornaX())
else:
print(type(grupo))
| 1,254 | 0 | 213 |
1c5fd36ae0b1a46a987890321b0748ee13ed63f6 | 7,739 | py | Python | navrep/envs/rosnavtrainencodedenv.py | ReykCS/navrep | 22ee4727268188414a8121f069e45c2ab798ca19 | [
"MIT"
] | null | null | null | navrep/envs/rosnavtrainencodedenv.py | ReykCS/navrep | 22ee4727268188414a8121f069e45c2ab798ca19 | [
"MIT"
] | null | null | null | navrep/envs/rosnavtrainencodedenv.py | ReykCS/navrep | 22ee4727268188414a8121f069e45c2ab798ca19 | [
"MIT"
] | null | null | null | from gym import spaces
import numpy as np
from scipy import interpolate
import yaml
from navrep.envs.navreptrainenv import NavRepTrainEnv
from navrep.rosnav_models.utils.reward import RewardCalculator
from navrep.rosnav_models.utils.reward import RewardCalculator
class RosnavTrainEncodedEnv(NavRepTrainEnv):
""" takes a (2) action as input
outputs encoded obs (546) """
def setup_by_configuration(
self, robot_yaml_path
):
"""get the configuration from the yaml file, including robot radius, discrete action space and continuous action space.
Args: linear_range
linear_ranger): [description]
"""
with open(robot_yaml_path, "r") as fd:
robot_data = yaml.safe_load(fd)
# get robot radius
for body in robot_data["bodies"]:
if body["name"] == "base_footprint":
for footprint in body["footprints"]:
if footprint["radius"]:
self._robot_radius = footprint["radius"] * 1.05
# get laser related information
for plugin in robot_data["plugins"]:
if plugin["type"] == "Laser":
laser_angle_min = plugin["angle"]["min"]
laser_angle_max = plugin["angle"]["max"]
laser_angle_increment = plugin["angle"]["increment"]
self.laser_range = plugin["range"]
self._laser_num_beams = int(
round(
(laser_angle_max - laser_angle_min)
/ laser_angle_increment
)
+ 1
)
self._laser_max_range = plugin["range"]
self.linear_range = robot_data["robot"]["continuous_actions"]["linear_range"]
self.angular_range = robot_data["robot"]["continuous_actions"]["angular_range"]
@staticmethod | 35.663594 | 127 | 0.580178 | from gym import spaces
import numpy as np
from scipy import interpolate
import yaml
from navrep.envs.navreptrainenv import NavRepTrainEnv
from navrep.rosnav_models.utils.reward import RewardCalculator
from navrep.rosnav_models.utils.reward import RewardCalculator
class RosnavTrainEncodedEnv(NavRepTrainEnv):
""" takes a (2) action as input
outputs encoded obs (546) """
def __init__(self, roboter_yaml_path, roboter="tb3",
reward_fnc="rule_00", scenario='test',
silent=False, adaptive=True, max_steps_per_episode=500):
super(RosnavTrainEncodedEnv, self).__init__(scenario=scenario, silent=silent, adaptive=adaptive,
legacy_mode=False, collect_statistics=True)
self.setup_by_configuration(roboter_yaml_path)
min, max = self._get_action_space(roboter)
self.action_space = spaces.Box(
low=np.array(min),
high=np.array(max),
dtype=np.float,
)
self.observation_space = spaces.Box(
low=0,
high=np.inf,
shape=(self._laser_num_beams + 2,),
dtype=np.float32,
)
self.reward_calculator = RewardCalculator(
robot_radius=self._robot_radius,
safe_dist=1.6 * self._robot_radius,
goal_radius=0.1,
rule=reward_fnc,
extended_eval=True,
)
self._steps_curr_episode = 0
self._max_steps_per_episode = max_steps_per_episode
self.last_observation = None
self.roboter = roboter
def _get_action_space(self, roboter):
if roboter == "ridgeback":
return [self.linear_range[0], 0, self.angular_range[0]], [self.linear_range[1], 0.5, self.angular_range[1]]
return [self.linear_range[0], self.angular_range[0]], [self.linear_range[1], self.angular_range[1]]
def _get_action(self, action):
if self.roboter == "ridgeback":
return np.array(action)
return np.array([action[0], 0, action[1]])
def _get_observation_from_scan(self, obs):
if self.roboter == "tb3":
lidar_upsampling = 1080 // 360
downsampled_scan = obs.reshape((-1, lidar_upsampling))
downsampled_scan = np.min(downsampled_scan, axis=1)
return downsampled_scan
if self.roboter == "jackal" or self.roboter == "ridgeback":
rotated_scan = np.zeros_like(obs)
rotated_scan[:540] = obs[540:]
rotated_scan[540:] = obs[:540]
downsampled = np.zeros(810)
downsampled[:405] = rotated_scan[135:540]
downsampled[405:] = rotated_scan[540:945]
f = interpolate.interp1d(np.arange(0, 810), downsampled)
upsampled = f(np.linspace(0, 810 - 1, 944))
lidar = upsampled.reshape((-1, 2))
lidar = np.min(lidar, axis=1)
return lidar
if self.roboter == "agv":
rotated_scan = np.zeros_like(obs)
rotated_scan[:540] = obs[540:]
rotated_scan[540:] = obs[:540]
downsampled = np.zeros(540)
downsampled[:270] = rotated_scan[270:540]
downsampled[270:] = rotated_scan[540:810]
f = interpolate.interp1d(np.arange(0, 540), downsampled)
return f(np.linspace(0.0, 540 - 1, 720))
def step(self, action):
self._steps_curr_episode += 1
action_encoded = self._get_action(action)
obs, reward, done, info = super(RosnavTrainEncodedEnv, self).step(action_encoded)
lidar, rho, theta = self._encode_obs(obs)
# reward, reward_info = self.reward_calculator.get_reward(
# np.array(lidar),
# (rho, theta),
# action=np.array([action_encoded[0], action_encoded[2]]),
# global_plan=None,
# robot_pose=None
# )
# done = reward_info["is_done"]
print(reward)
# done = reward_info["is_done"]
observation = np.hstack([lidar, np.array([rho, theta])])
# if done:
# info["done_reason"] = reward_info["done_reason"]
# info["is_success"] = reward_info["is_success"]
# if self._steps_curr_episode > self._max_steps_per_episode:
# done = True
# info["done_reason"] = 0
# info["is_success"] = 0
# if done:
# observation = self.reset()
return observation, 100, done, info
def reset(self, *args, **kwargs):
self.reward_calculator.reset()
self._steps_curr_episode = 0
obs = super(RosnavTrainEncodedEnv, self).reset(*args, **kwargs)
observation, rho, theta = self._encode_obs(obs)
return np.hstack([observation, np.array([rho, theta])])
def _encode_obs(self, obs):
scan, robotstate = obs
lidar = [np.min([self.laser_range, i]) for i in self._get_observation_from_scan(scan)]
self.last_rosnav_scan = lidar
rho, theta = self._get_goal_pose_in_robot_frame(robotstate[:2])
return lidar, rho, theta
def close(self):
super(RosnavTrainEncodedEnv, self).close()
def render(self, mode="human", close=False, save_to_file=False,
robocentric=False, render_decoded_scan=True):
#super(RosnavTrainEncodedEnv, self).render(
# mode=mode, close=close, lidar_scan_override=self.last_rosnav_scan, save_to_file=save_to_file,
# robocentric=robocentric)
pass
def _get_goal_pose_in_robot_frame(self, goal_pos):
y_relative = goal_pos[1]
x_relative = goal_pos[0]
rho = (x_relative ** 2 + y_relative ** 2) ** 0.5
theta = (np.arctan2(y_relative, x_relative) + 4 * np.pi) % (2 * np.pi) - np.pi
return rho, theta
def setup_by_configuration(
self, robot_yaml_path
):
"""get the configuration from the yaml file, including robot radius, discrete action space and continuous action space.
Args: linear_range
linear_ranger): [description]
"""
with open(robot_yaml_path, "r") as fd:
robot_data = yaml.safe_load(fd)
# get robot radius
for body in robot_data["bodies"]:
if body["name"] == "base_footprint":
for footprint in body["footprints"]:
if footprint["radius"]:
self._robot_radius = footprint["radius"] * 1.05
# get laser related information
for plugin in robot_data["plugins"]:
if plugin["type"] == "Laser":
laser_angle_min = plugin["angle"]["min"]
laser_angle_max = plugin["angle"]["max"]
laser_angle_increment = plugin["angle"]["increment"]
self.laser_range = plugin["range"]
self._laser_num_beams = int(
round(
(laser_angle_max - laser_angle_min)
/ laser_angle_increment
)
+ 1
)
self._laser_max_range = plugin["range"]
self.linear_range = robot_data["robot"]["continuous_actions"]["linear_range"]
self.angular_range = robot_data["robot"]["continuous_actions"]["angular_range"]
@staticmethod
def _stack_spaces(ss):
low = []
high = []
for space in ss:
low.extend(space.low.tolist())
high.extend(space.high.tolist())
return spaces.Box(np.array(low).flatten(), np.array(high).flatten()) | 5,468 | 0 | 296 |
750d4d54e1b253e23bab9ee8400fd616fe262f8d | 3,303 | py | Python | preprocess_graph.py | HongyiZhu/EHI | 9fbbc6046546dd7fc6de5d831b4c941bc4404e02 | [
"MIT"
] | null | null | null | preprocess_graph.py | HongyiZhu/EHI | 9fbbc6046546dd7fc6de5d831b4c941bc4404e02 | [
"MIT"
] | null | null | null | preprocess_graph.py | HongyiZhu/EHI | 9fbbc6046546dd7fc6de5d831b4c941bc4404e02 | [
"MIT"
] | null | null | null | from ehi_utils import load_json, dict2dotdict
from mygraph import Graph_Int, Graph_Str
import argparse
import copy
import pickle
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Parser feature file generator")
parser.add_argument("--json_path", type=str, required=True,help="Path to the json config file")
args = parser.parse_args()
configs = load_json(args.json_path)
main(dict2dotdict(configs)) | 40.777778 | 108 | 0.604299 | from ehi_utils import load_json, dict2dotdict
from mygraph import Graph_Int, Graph_Str
import argparse
import copy
import pickle
def main(configs):
timespells = configs.timespells
# load topology
graphs = []
graphs_str = []
for ts in range(1, int(timespells)+1):
edgelist_filename = f"{configs.DATA_PATH}/TS{ts}/user.edgelist"
graph = Graph_Int()
graph.read_edgelist(filename=edgelist_filename, weighted=configs.weighted_graph, directed=False)
graph_str = Graph_Str()
graph_str.read_edgelist(filename=edgelist_filename, weighted=configs.weighted_graph, directed=False)
graphs.append(graph)
graphs_str.append(graph_str)
# build accumulate graph
accu_graphs = []
accu_graphs_str = []
for i in range(0, int(timespells)):
if i == 0:
accu_graphs.append(copy.deepcopy(graphs[i]))
accu_graphs_str.append(copy.deepcopy(graphs_str[i]))
else:
past_graph = copy.deepcopy(accu_graphs[i-1])
past_graph_str = copy.deepcopy(accu_graphs_str[i-1])
# decay past edge weight
for (src, dst) in past_graph.G.edges:
past_graph.G[int(src)][int(dst)]['weight'] /= 2.0
past_graph_str.G[str(src)][str(dst)]['weight'] /= 2.0
for (src, dst) in graphs[i].G.edges:
if past_graph.G.has_edge(int(src), int(dst)):
past_graph.G[int(src)][int(dst)]['weight'] += 1.0
past_graph_str.G[str(src)][str(dst)]['weight'] += 1.0
else:
past_graph.G.add_edge(int(src), int(dst))
past_graph_str.G.add_edge(str(src), str(dst))
past_graph.G[int(src)][int(dst)]['weight'] = 1.0
past_graph_str.G[str(src)][str(dst)]['weight'] = 1.0
accu_graphs.append(past_graph)
accu_graphs_str.append(past_graph_str)
for i in range(int(timespells)):
accu_graphs[i].encode_node()
accu_graphs_str[i].encode_node()
# load features
for ts in range(1, int(timespells)+1):
if configs.have_features:
feature_filename = f"{configs.DATA_PATH}/TS{ts}/generated/user.features"
accu_graphs[ts-1].read_node_features(feature_filename)
accu_graphs_str[ts-1].read_node_features(feature_filename)
else:
for node in accu_graphs[ts-1].G.nodes:
accu_graphs[ts-1].G.nodes[node]['feature'] = [1,]
for node in accu_graphs_str[ts-1].G.nodes:
accu_graphs_str[ts-1].G.nodes[node]['feature'] = [1,]
# save generated graphs
for ts in range(1, int(timespells)+1):
output_file = f"{configs.DATA_PATH}/TS{str(ts)}/generated/graphs.pkl"
f = open(output_file, 'wb')
pickle.dump([accu_graphs[ts-1], accu_graphs_str[ts-1]], f)
f.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Parser feature file generator")
parser.add_argument("--json_path", type=str, required=True,help="Path to the json config file")
args = parser.parse_args()
configs = load_json(args.json_path)
main(dict2dotdict(configs)) | 2,810 | 0 | 23 |
7c564ae4197ee90a4317cec74fc34502972ffc25 | 711 | py | Python | src/samples/pymxs/sphere_borg.py | RegionStormer/3dsMax-Python-HowTos | b86ef45ef4d8dff373bd1cbfe5c4d5b805687339 | [
"MIT"
] | 88 | 2020-03-19T20:24:08.000Z | 2022-03-23T23:13:54.000Z | src/samples/pymxs/sphere_borg.py | RegionStormer/3dsMax-Python-HowTos | b86ef45ef4d8dff373bd1cbfe5c4d5b805687339 | [
"MIT"
] | 5 | 2020-04-09T19:56:47.000Z | 2021-11-12T20:02:18.000Z | src/samples/pymxs/sphere_borg.py | RegionStormer/3dsMax-Python-HowTos | b86ef45ef4d8dff373bd1cbfe5c4d5b805687339 | [
"MIT"
] | 22 | 2020-03-19T19:24:36.000Z | 2022-03-18T21:55:19.000Z | '''
Demonstrates creating objects, object instancing, and object translation.
'''
from pymxs import runtime as rt # pylint: disable=import-error
INST = rt.Name("instance")
def create_borg(obj, num, spacing):
"""Create a bunch of clones of the provided object"""
for i in range(num):
for j in range(num):
for k in range(num):
if i or j or k:
point = rt.Point3(i * spacing, j * spacing, k * spacing)
rt.MaxOps.CloneNodes(obj, cloneType=INST, offset=point)
def main():
"""Create a base object and turn it into a borg, whatever that is."""
obj = rt.sphere()
obj.Radius = 2.0
create_borg(obj, 4, 5.0)
main()
| 29.625 | 77 | 0.603376 | '''
Demonstrates creating objects, object instancing, and object translation.
'''
from pymxs import runtime as rt # pylint: disable=import-error
INST = rt.Name("instance")
def create_borg(obj, num, spacing):
"""Create a bunch of clones of the provided object"""
for i in range(num):
for j in range(num):
for k in range(num):
if i or j or k:
point = rt.Point3(i * spacing, j * spacing, k * spacing)
rt.MaxOps.CloneNodes(obj, cloneType=INST, offset=point)
def main():
"""Create a base object and turn it into a borg, whatever that is."""
obj = rt.sphere()
obj.Radius = 2.0
create_borg(obj, 4, 5.0)
main()
| 0 | 0 | 0 |
cb16fba592e0a2c039cc810b25e1c7db64991366 | 102 | py | Python | sandero.py | CptShock/AuroraModules | 4503326384ed34312be9d1d0f52242f1f8774eeb | [
"MIT"
] | null | null | null | sandero.py | CptShock/AuroraModules | 4503326384ed34312be9d1d0f52242f1f8774eeb | [
"MIT"
] | null | null | null | sandero.py | CptShock/AuroraModules | 4503326384ed34312be9d1d0f52242f1f8774eeb | [
"MIT"
] | null | null | null | import willie
@willie.module.commands('sandero') | 20.4 | 34 | 0.745098 | import willie
@willie.module.commands('sandero')
def sander(bot, trigger):
bot.say('PORN PORN PORN') | 31 | 0 | 22 |
e0bc0b030eda160521fc09b4031b333f5a25bc4b | 7,515 | py | Python | code/GA/mGA_2opt_numpyGA_2.py | KGJsGit/my_Optimization-studio | 1f3f78c22c58017f439c7be8b716a233be872ccc | [
"Apache-2.0"
] | 4 | 2020-06-02T08:27:36.000Z | 2022-03-16T15:09:46.000Z | code/GA/mGA_2opt_numpyGA_2.py | KGJsGit/my_Optimization-studio | 1f3f78c22c58017f439c7be8b716a233be872ccc | [
"Apache-2.0"
] | null | null | null | code/GA/mGA_2opt_numpyGA_2.py | KGJsGit/my_Optimization-studio | 1f3f78c22c58017f439c7be8b716a233be872ccc | [
"Apache-2.0"
] | 5 | 2021-04-21T11:52:33.000Z | 2022-01-15T09:14:45.000Z | import math
import numpy as np
import random
import timeit
from threading import Thread
import functools
dist_ar = [] # ๊ฑฐ๋ฆฌํ(global)
# limit_time = 36 # ์ ํ์๊ฐ(global)
cities_count = 0 # ๋์ ์(global)
dots_list = [] # ๋์ ๋ฆฌ์คํธ(global)
# Hyper Parameter
limits = (60) * 36 # ์ ํ์๊ฐ
MUT = 0.2 # ๋ณ์ดํ๋ฅ
SEL = 0.85 # ์ ํ์
chrCOUNT = 50 # ํด์ง๋จ ๋ด ์ผ์์ฒด ๊ฐ์
selCOUNT = 25 # selection์ ์ ํ๋๋ ์์ ์ผ์์ฒด์ ๊ฐ์
# ์๊ฐ์ ํ ๋ฐ์ฝ๋ ์ดํฐ
# ๊ฑฐ๋ฆฌํ ์ ์(param : ๋ฌธ์ ๊ฒฝ๋ก) : dist_df
# ๊ฑฐ๋ฆฌํ๋ฅผ ์ด์ฉํ ์ ํฉ๋ ๋งค์นญ ํจ์
# 2opt-algorithm
# 0 ~ ranges-1์ ๋ฒ์ ์ค ๋ ๊ฐ๋ฅผ ๋๋ค์ผ๋ก ์ํ๋งํด์ list ๋ฆฌํด
@timeout(limits)
try :
start = timeit.default_timer()
start_GA("2opt_dots/2opt_cycle100.in")
stop = timeit.default_timer()
print(stop - start)
except :
stop = timeit.default_timer()
print(stop - start)
'''
//์ง๋ฌธ
1. 36์ด๋ง์ 200,000์ธ๋๊น์ง ๊ณ์ฐํ๋๊ฑด ์ด๋ ค์ด ์ผ(*GA_2์ฐจ๋ณด๊ณ ์) ๊ฐ์๋ฐ... 36๋ถ์ด ์๋์ง...?
2. population ์ ์ฒด์ 2opt๋ฅผ ์คํํ๋ ์คํ๋ ค ์๋ ด์ ๋ชปํ๋ ๊ฒฐ๊ณผ ๋ฐ์...
ํ์ *selCount๊ฐ์ chromosome์ 2opt๋ฅผ ์ํํ๋๋ ๊ฒฐ๊ณผ๊ฐ ํจ์ฌ ์ข์์.
2opt๋ฅผ ๊ฑฐ์น๋ค๊ณ ํด์ fitness๊ฐ ๋ฌด์กฐ๊ฑด ์ข์์ง๋ ๊ฒ์ ์๋๊ฒ์ ์๊ฒ ๋๋ฐ ์ด๋ป๊ฒ ์ ์ฉํด์ผ ๊ฐ์ฅ ์ต์ ์ผ๊น???
ex) ํ์ ๋ช %๋ง ์ ์ฉ, ์ ์ฉ๊ณผ ๋ฏธ์ ์ฉ์ ๋น๊ตํด์ ์ข์ ๊ฒฝ์ฐ๋ง ๋์น
//์ด์
1. python์์ thread๋ก ํจ์๋ฅผ ์ค๊ฐ์ ์ฃฝ์ด๋๊ฒ windows์์ ์ผ๋ฐ์ ์ผ๋ก ๋ถ๊ฐ. ์ฝ์ง ๋๋ฌด ๋ง์ด ํ์...ใ
๋ฐ์ฝ๋ ์ดํฐ(?)๋ฅผ ๋ง๋ค๊ณ Thread.join (timeout) ๋ฉ์๋๋ฅผ ์ฌ์ฉ.
์ฃฝ์ด๋ ๊ฒ์ ์๋๋ผ์ ๋ฐฑ๊ทธ๋ผ์ด๋์์ ์คํ ์ ์ง.
2. ํน์ ๋ฒ์ ๋ด์ ์ค๋ณต๋์ง ์๋ ๋ ๊ฐ์ ๋๋ค์ ๊ณจ๋ผ๋ด๋ ๊ฒ์๋ ์ฝ์ง ๋ง์ดํจ.
3. ์ฝ๋๋ฅผ ์ข ์์ ํด์ ๋ชจ๋ํ๋ฅผ ์งํํ์ผ๋ ๊ต์๋์ด ์ง๋ํด์ฃผ์๊ธฐ์ ์ข์ง ๋ชปํ ๊ฒ ๊ฐ์์ ๋ค์ ํฉ์นจ...
//๋น๊ต
optGA : numpy + 2-opt GA
numGA : numpy GA
panGA : pandas GA
์๊ฐ์ ํ : 36s
ํ๊ฒ : 2opt_cycle100.in
panGA : generation / fitness
356/2272
375/2349
381/2218
348/2553
381/2467
numGA : generation / fitness
1171/1836
1159/2005
1175/1812
1174/1947
1131/1931
optGA : generation / fitness
1141/1182
1142/1136
1126/1205
1128/1214
1142/1219
'''
| 30.925926 | 118 | 0.578709 | import math
import numpy as np
import random
import timeit
from threading import Thread
import functools
dist_ar = [] # ๊ฑฐ๋ฆฌํ(global)
# limit_time = 36 # ์ ํ์๊ฐ(global)
cities_count = 0 # ๋์ ์(global)
dots_list = [] # ๋์ ๋ฆฌ์คํธ(global)
# Hyper Parameter
limits = (60) * 36 # ์ ํ์๊ฐ
MUT = 0.2 # ๋ณ์ดํ๋ฅ
SEL = 0.85 # ์ ํ์
chrCOUNT = 50 # ํด์ง๋จ ๋ด ์ผ์์ฒด ๊ฐ์
selCOUNT = 25 # selection์ ์ ํ๋๋ ์์ ์ผ์์ฒด์ ๊ฐ์
# ์๊ฐ์ ํ ๋ฐ์ฝ๋ ์ดํฐ
def timeout(seconds_before_timeout):
def deco(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
res = [Exception('function [%s] timeout [%s seconds] exceeded!' %(func.__name__, seconds_before_timeout))]
def newFunc():
try:
res[0] = func(*args, **kwargs)
except Exception as e:
res[0] = e
t = Thread(target=newFunc)
t.daemon = True
try:
t.start()
t.join(seconds_before_timeout)
except Exception as e:
print('error starting thread')
raise e
ret = res[0]
if isinstance(ret, BaseException):
raise ret
return ret
return wrapper
return deco
# ๊ฑฐ๋ฆฌํ ์ ์(param : ๋ฌธ์ ๊ฒฝ๋ก) : dist_df
def make_distDataframe(str):
global dist_ar
global limit_time
global cities_count
global dots_list
reader = open(str, mode='rt', encoding='utf-8')
dots_list = reader.read().split("\n") # ['x1 y1', 'x2 y2', 'x3 y3' ... 'xn yn']
cities_count = int(dots_list.pop(0))
limit_time = float(dots_list.pop())
x_list = [] # ['x1', 'x2', 'x3' ... 'xn']
y_list = [] # ['y1', 'y2', 'y3' ... 'yn']
for i in range(cities_count):
temp = dots_list[i].split(" ")
x_list.append(float(temp[0]))
y_list.append(float(temp[1]))
dist_ar = []
for n in range(cities_count):
temp = []
for m in range(cities_count):
temp.append(round((math.sqrt(((x_list[m] - x_list[n]) ** 2) + ((y_list[m] - y_list[n]) ** 2))), 2))
dist_ar.append(temp)
dist_ar = np.array(dist_ar)
print(dist_ar)
# ๊ฑฐ๋ฆฌํ๋ฅผ ์ด์ฉํ ์ ํฉ๋ ๋งค์นญ ํจ์
def cal_fit(stri) :
fit = 0
for i in range(len(stri)-1) :
if i == len(stri)-1 :
fit += dist_ar[stri[i], stri[0]]
else :
fit += dist_ar[stri[i], stri[i+1]]
return fit
# 2opt-algorithm
def optFunc(stri) :
head = random.randrange(1, len(stri)-2)
tail = random.randrange(head+1, len(stri)-1)
newArr = []
# str[0] - str[head-1] ๊น์ง ์์๋๋ก ๋์ด
for spot1 in range(head) :
newArr.append(stri[spot1])
# str[head] - str[tail] ๊น์ง ์ญ์ ๋์ด
for spot2 in range(len(stri[head-1:tail])) :
newArr.append(stri[tail-spot2])
# str[head+1] - str[-1] ๊น์ง ์์๋๋ก ๋์ด
for spot3 in range(len(stri)-tail-1) :
newArr.append(stri[tail+spot3+1])
return newArr
# 0 ~ ranges-1์ ๋ฒ์ ์ค ๋ ๊ฐ๋ฅผ ๋๋ค์ผ๋ก ์ํ๋งํด์ list ๋ฆฌํด
def randomTwo(ranges) :
randomList = []
randomList += random.sample(range(0,ranges), 2)
randomList.sort()
return randomList
def TSP_GA() :
# ํ๊ฒฝ ์ค์ ๋ฐ ์ด๊ธฐํ
generation = 0 # ํ์ฌ ์ธ๋
chromosome = [] # temp chromosome
chromosome_fit = [] # temp fitness
# initialize
for i in range(chrCOUNT) :
# 2opt ์ด์ฉํด์ ์ข ๋ ์ข์ initial chromosome ์ค์
chromosome.append(optFunc(random.sample(range(0, cities_count), cities_count)))
for i in range(chrCOUNT) :
chromosome_fit.append(round(cal_fit(chromosome[i]), 5))
populations = np.array([chromosome, chromosome_fit])
populations = populations.T
# print('์ด๊ธฐ ์ผ์์ฒด : \n', population, '\n์ผ์์ฒด ๋ณ ์ ํฉ๋ :\n', population_fit)
# print(populations)
while 1 :
generation+=1
populations = populations[np.argsort(populations[:, 1])]
# ์ต์ ํ ์๊ณ ๋ฆฌ์ฆ 2-opt ์ฌ์ฉ
for i in range(selCOUNT) :
populations[i+selCOUNT,0] = optFunc(populations[i+selCOUNT,0])
populations[i+selCOUNT,1] = cal_fit(populations[i+selCOUNT,0])
# selection : ํ ๋๋จผํธ์ ํ,
populations = populations[np.argsort(populations[:, 1])]
for endSel in range(selCOUNT) :
# ๋์๋ฃฐ ๋ฐ์์์ผ ํด์ง๋จ ๋ด ๋ ์ ์ ์ ์ ํ, ์ ํ๋์ ๋ฐ์
# ์ ํ๋์๊ฐ ์ ํ์๋ณด๋ค ์์ผ๋ฉด ๋ ์ ์ ์ ์ค ์ข์ ์ ์ ์๊ฐ ์ ํ. ์๋๋ฉด ๋ฐ๋๋ก
parents_index = [0]*2
for i in range(len(parents_index)):
selGeneNum = randomTwo((chrCOUNT-endSel))
match = random.random()
if match < SEL :
if populations[selGeneNum[0],1] < populations[selGeneNum[1],1] :
parents_index[i] = selGeneNum[0]
else:
parents_index[i] = selGeneNum[1]
else :
if populations[selGeneNum[0],1] < populations[selGeneNum[1],1] :
parents_index[i] = selGeneNum[1]
else:
parents_index[i] = selGeneNum[0]
# crossover : order-based crossover
daddy_value = populations[parents_index[0], 0].copy()
mommy_value = populations[parents_index[1], 0].copy()
CsGeneNum = randomTwo(cities_count)
offspring = daddy_value[CsGeneNum[0] : CsGeneNum[1]]
for i in daddy_value[CsGeneNum[0] : CsGeneNum[1]] :
mommy_value.remove(i)
for i in range(len(offspring)) :
mommy_value.insert(CsGeneNum[0]+i, offspring[i])
offspring = mommy_value
offspring_fit = cal_fit(offspring)
# mutation : exchange mutation
mut_p = random.random()
if mut_p < MUT :
MtGeneNum = randomTwo(cities_count)
mut_Temp = offspring[MtGeneNum[0]]
offspring[MtGeneNum[0]] = offspring[MtGeneNum[1]]
offspring[MtGeneNum[1]] = mut_Temp
offspring_fit = cal_fit(offspring)
populations = np.vstack((populations, [offspring, offspring_fit]))
# Replacement
populations = populations[np.argsort(populations[:, 1])]
for i in range(chrCOUNT-selCOUNT) :
np.delete(populations, (chrCOUNT+i), axis=0)
print(generation, '์ธ๋ ์ต์ ํด : \n', populations[0,0],"\n", populations[0,1])
@timeout(limits)
def start_GA(stri) :
make_distDataframe(stri)
TSP_GA()
try :
start = timeit.default_timer()
start_GA("2opt_dots/2opt_cycle100.in")
stop = timeit.default_timer()
print(stop - start)
except :
stop = timeit.default_timer()
print(stop - start)
'''
//์ง๋ฌธ
1. 36์ด๋ง์ 200,000์ธ๋๊น์ง ๊ณ์ฐํ๋๊ฑด ์ด๋ ค์ด ์ผ(*GA_2์ฐจ๋ณด๊ณ ์) ๊ฐ์๋ฐ... 36๋ถ์ด ์๋์ง...?
2. population ์ ์ฒด์ 2opt๋ฅผ ์คํํ๋ ์คํ๋ ค ์๋ ด์ ๋ชปํ๋ ๊ฒฐ๊ณผ ๋ฐ์...
ํ์ *selCount๊ฐ์ chromosome์ 2opt๋ฅผ ์ํํ๋๋ ๊ฒฐ๊ณผ๊ฐ ํจ์ฌ ์ข์์.
2opt๋ฅผ ๊ฑฐ์น๋ค๊ณ ํด์ fitness๊ฐ ๋ฌด์กฐ๊ฑด ์ข์์ง๋ ๊ฒ์ ์๋๊ฒ์ ์๊ฒ ๋๋ฐ ์ด๋ป๊ฒ ์ ์ฉํด์ผ ๊ฐ์ฅ ์ต์ ์ผ๊น???
ex) ํ์ ๋ช %๋ง ์ ์ฉ, ์ ์ฉ๊ณผ ๋ฏธ์ ์ฉ์ ๋น๊ตํด์ ์ข์ ๊ฒฝ์ฐ๋ง ๋์น
//์ด์
1. python์์ thread๋ก ํจ์๋ฅผ ์ค๊ฐ์ ์ฃฝ์ด๋๊ฒ windows์์ ์ผ๋ฐ์ ์ผ๋ก ๋ถ๊ฐ. ์ฝ์ง ๋๋ฌด ๋ง์ด ํ์...ใ
๋ฐ์ฝ๋ ์ดํฐ(?)๋ฅผ ๋ง๋ค๊ณ Thread.join (timeout) ๋ฉ์๋๋ฅผ ์ฌ์ฉ.
์ฃฝ์ด๋ ๊ฒ์ ์๋๋ผ์ ๋ฐฑ๊ทธ๋ผ์ด๋์์ ์คํ ์ ์ง.
2. ํน์ ๋ฒ์ ๋ด์ ์ค๋ณต๋์ง ์๋ ๋ ๊ฐ์ ๋๋ค์ ๊ณจ๋ผ๋ด๋ ๊ฒ์๋ ์ฝ์ง ๋ง์ดํจ.
3. ์ฝ๋๋ฅผ ์ข ์์ ํด์ ๋ชจ๋ํ๋ฅผ ์งํํ์ผ๋ ๊ต์๋์ด ์ง๋ํด์ฃผ์๊ธฐ์ ์ข์ง ๋ชปํ ๊ฒ ๊ฐ์์ ๋ค์ ํฉ์นจ...
//๋น๊ต
optGA : numpy + 2-opt GA
numGA : numpy GA
panGA : pandas GA
์๊ฐ์ ํ : 36s
ํ๊ฒ : 2opt_cycle100.in
panGA : generation / fitness
356/2272
375/2349
381/2218
348/2553
381/2467
numGA : generation / fitness
1171/1836
1159/2005
1175/1812
1174/1947
1131/1931
optGA : generation / fitness
1141/1182
1142/1136
1126/1205
1128/1214
1142/1219
'''
| 5,917 | 0 | 155 |
2bed9ddb6ad3a9d58c7b0651dc53406b3042e361 | 295 | py | Python | 0001/python3/euler_0001.py | jhanschoo/project_euler | 9bba193996aa6a7c39d7b254198903af2e84904c | [
"Apache-2.0"
] | null | null | null | 0001/python3/euler_0001.py | jhanschoo/project_euler | 9bba193996aa6a7c39d7b254198903af2e84904c | [
"Apache-2.0"
] | null | null | null | 0001/python3/euler_0001.py | jhanschoo/project_euler | 9bba193996aa6a7c39d7b254198903af2e84904c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
if __name__ == "__main__":
target = 999
answer = sum_divisible_upto(target, 3) + sum_divisible_upto(target, 5) - sum_divisible_upto(target, 3 * 5)
print(answer)
| 24.583333 | 110 | 0.627119 | #!/usr/bin/env python3
def sum_divisible_upto(target, d):
n = target // d
s = d * n * (n + 1) // 2
return s
if __name__ == "__main__":
target = 999
answer = sum_divisible_upto(target, 3) + sum_divisible_upto(target, 5) - sum_divisible_upto(target, 3 * 5)
print(answer)
| 75 | 0 | 23 |
eb0ca335a4680d238b42ee15cd2910e10d6942cc | 3,158 | py | Python | crawler/crawlers.py | CirXe0N/WebCrawler-Code-Challenge | 7a597759beaabf96e88ba852b883de6195bcb283 | [
"MIT"
] | null | null | null | crawler/crawlers.py | CirXe0N/WebCrawler-Code-Challenge | 7a597759beaabf96e88ba852b883de6195bcb283 | [
"MIT"
] | null | null | null | crawler/crawlers.py | CirXe0N/WebCrawler-Code-Challenge | 7a597759beaabf96e88ba852b883de6195bcb283 | [
"MIT"
] | null | null | null | import asyncio
import json
import logging
from pathlib import Path
from crawler.queues import UniqueQueue
from crawler.scrapers import URLScraper
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)s: %(message)s",
datefmt="%H:%M:%S",
)
| 28.196429 | 79 | 0.569031 | import asyncio
import json
import logging
from pathlib import Path
from crawler.queues import UniqueQueue
from crawler.scrapers import URLScraper
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)s: %(message)s",
datefmt="%H:%M:%S",
)
class Crawler:
def __init__(self, initial_url: str, out_path: Path, num_workers: int = 5):
self.tasks = []
self.visited_urls = {}
self.num_workers = num_workers
self.initial_url = initial_url
self.queue = UniqueQueue()
self.out_path = out_path
def _add_to_queue(self, url: str) -> None:
"""
Add URL to the queue to be crawled.
"""
has_valid_start = url.startswith(self.initial_url)
is_new = url not in self.visited_urls
if has_valid_start and is_new:
self.queue.put_nowait(url)
def _add_to_visited_urls(self, queue_url: str, url: str) -> None:
"""
Add found URL to list of the scraped URL.
"""
if queue_url not in self.visited_urls:
self.visited_urls[queue_url] = set()
if url.startswith(self.initial_url):
self.visited_urls[queue_url].add(url)
def _write_to_file(self) -> None:
"""
Write the visited URLs to the JSON file.
"""
filepath = Path(self.out_path / 'out.json')
filepath.parent.mkdir(parents=True, exist_ok=True)
with filepath.open('w') as file:
for k, v in self.visited_urls.items():
self.visited_urls[k] = list(v)
file.write(json.dumps(self.visited_urls, indent=2))
async def _start_worker(self, name):
"""
Start worker to consume the Queue with URLs.
"""
num_retries = 0
while True:
try:
queue_url = self.queue.get_nowait()
urls = URLScraper(url=queue_url).run()
for url in urls:
self._add_to_visited_urls(queue_url, url)
self._add_to_queue(url)
logging.info(f'{name} crawled URL: {queue_url}')
self.queue.task_done()
num_retries = False
except asyncio.QueueEmpty:
num_retries += 1
finally:
if num_retries > 3:
break
await asyncio.sleep(1)
async def run(self):
"""
Run crawler to walk from the specified initial URL.
"""
tasks = []
if self.num_workers <= 0:
raise ValueError('The number of workers must be higher than 1.')
await self.queue.put(self.initial_url)
for i in range(self.num_workers):
name = f'Worker-{i + 1}'
logging.info(f'Starting {name}')
task = asyncio.create_task(self._start_worker(name=name))
tasks.append(task)
await asyncio.gather(*tasks, return_exceptions=True)
self._write_to_file()
logging.info(
f'Crawled successfully! Open the file '
f'"{self.out_path / "out.json"}" for the results.'
)
| 255 | 2,607 | 23 |
8330bd7007f57511510cfe305c6c3ea32a0d30a9 | 2,518 | py | Python | alphafold/Data/Tools/hhblits.py | YaoYinYing/OpenFold2 | 57fd3cfba0bc70a2ca4c6943ba00e1c4892c1945 | [
"MIT"
] | null | null | null | alphafold/Data/Tools/hhblits.py | YaoYinYing/OpenFold2 | 57fd3cfba0bc70a2ca4c6943ba00e1c4892c1945 | [
"MIT"
] | null | null | null | alphafold/Data/Tools/hhblits.py | YaoYinYing/OpenFold2 | 57fd3cfba0bc70a2ca4c6943ba00e1c4892c1945 | [
"MIT"
] | null | null | null | from pathlib import Path
from alphafold.Data.Tools import utils
from typing import Optional, Callable, Any, Mapping, Sequence | 27.977778 | 121 | 0.656473 | from pathlib import Path
from alphafold.Data.Tools import utils
from typing import Optional, Callable, Any, Mapping, Sequence
class HHBlits:
_DEFAULT_P = 20
_DEFAULT_Z = 500
def __init__(self,
binary_path: Path,
databases: Sequence[Path],
n_cpu: int=4,
n_iter: int=3,
e_value: float=1e-3,
maxseq: int=1e6,
realign_max: int=1e5,
maxfilt: int=1e5,
min_prefilter_hits: int=1000,
all_seqs: bool=False,
alt: Optional[int]=None,
p: int=_DEFAULT_P,
z: int=_DEFAULT_Z):
self.binary_path = binary_path
self.databases = databases
for database_path in self.databases:
if not database_path.glob('_*'):
print(f'HHBlits: Cant find database {database_path}')
raise ValueError(f'HHBlits: Cant find database {database_path}')
self.n_cpu = n_cpu
self.n_iter = n_iter
self.e_value = e_value
self.maxseq = maxseq
self.realign_max = realign_max
self.maxfilt = maxfilt
self.min_prefilter_hits = min_prefilter_hits
self.all_seqs = all_seqs
self.alt = alt
self.p = p
self.z = z
def query(self, input_fasta_path: Path) -> Mapping[str, Any]:
with utils.tmpdir_manager() as query_tmp_dir:
a3m_path = query_tmp_dir / Path('output.a3m')
db_cmd = []
for db_path in self.databases:
db_cmd += ['-d', db_path.as_posix()]
cmd = [
self.binary_path,
'-i', input_fasta_path.as_posix(),
'-cpu', str(self.n_cpu),
'-oa3m', a3m_path.as_posix(),
'-o', '/dev/null',
'-n', str(self.n_iter),
'-e', str(self.e_value),
'-maxseq', str(self.maxseq),
'-realign_max', str(self.realign_max),
'-maxfilt', str(self.maxfilt),
'-min_prefilter_hits', str(self.min_prefilter_hits)
]
if self.all_seqs:
cmd += ['-all']
if self.alt:
cmd += ['-alt', str(self.alt)]
if self.p != HHBlits._DEFAULT_P:
cmd += ['-P', str(self.p)]
if self.z != HHBlits._DEFAULT_Z:
cmd += ['-Z', str(self.z)]
cmd += db_cmd
print(f'Launching subprocess {"".join(cmd)}')
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
with utils.timing(f'HHBlits query'):
stdout, stderr = process.communicate()
retcode = process.wait()
if retcode:
raise RuntimeError(f"HHBlits failed:\nstdout:\n{stdout.decode('utf-8')}\nstderr:\n{stderr[:500000].decode('utf-8')}")
with open(a3m_path) as f:
a3m = f.read()
raw_output = dict(
a3m=a3m,
output=stdout,
stderr=stderr,
n_iter=self.n_iter,
e_value=self.e_value
)
return raw_output | 2,294 | 76 | 23 |
c069958fbf792e5e39ba289824eccf81eabed4b8 | 2,491 | py | Python | server/simsvc/__init__.py | INTENS-FI/intens | b2a2131241a88b0d80a5091679d6efb6c56098fb | [
"MIT"
] | null | null | null | server/simsvc/__init__.py | INTENS-FI/intens | b2a2131241a88b0d80a5091679d6efb6c56098fb | [
"MIT"
] | null | null | null | server/simsvc/__init__.py | INTENS-FI/intens | b2a2131241a88b0d80a5091679d6efb6c56098fb | [
"MIT"
] | null | null | null | """Distributed simulation service.
"""
# Ensure .config is loaded - it sets up our Dask config.
from .config import Config
def create_app(**kws):
"""Create and return the simulation server Flask app.
kws is passed to the SocketIO constructor.
Also start up coroutines to periodically sync tasks and pack the ZODB
database unless socketio.async_mode is 'threading', in which case only sync
and pack once to avoid concurrency issues. async_mode can be forced by
specifying it in kws, otherwise the Sockiet.IO library auto-detects and
prefers Eventlet. We assume that greenthreads are safe against concurrency
issues, in particular the standard library has not been monkey-patched.
"""
from urllib.parse import urljoin
from flask_socketio import SocketIO
from .tasks import TaskFlask
from .auth import Auth
from . import sockio, jobs, vars
app = TaskFlask(__name__)
app.config.from_object(Config)
auth = Auth(app)
p = app.config['SIMSVC_ROOT']
socketio = sockio.bind_socketio(app, path=urljoin(p, "socket.io"), **kws)
app.register_blueprint(jobs.jobs_bp, url_prefix=urljoin(p, "jobs"))
app.register_blueprint(vars.get_vars, url_prefix=urljoin(p, "default"),
url_defaults={"vtype": "default", "job": None})
app.register_blueprint(
vars.get_vars,
url_prefix=urljoin(p, "jobs/<int:job>/<any(inputs, results):vtype>"))
app.register_blueprint(vars.set_vars, url_prefix=urljoin(p, "default"))
# As a side effect this ensures that app.db and app.client are created.
# If either one is going to fail, we want to know now.
app.logger.info("Connected to database %s", app.db.storage.getName())
cores = app.client.ncores()
app.logger.info("%d workers with %d cores",
len(cores), sum(cores.values()))
if socketio.async_mode == 'threading':
app.logger.info("Periodic sync & pack disabled; only doing once.")
app.sync_tasks()
app.db.pack(days=7)
else:
socketio.start_background_task(task_syncer)
socketio.start_background_task(zodb_packer)
return app
| 36.632353 | 79 | 0.659173 | """Distributed simulation service.
"""
# Ensure .config is loaded - it sets up our Dask config.
from .config import Config
def create_app(**kws):
"""Create and return the simulation server Flask app.
kws is passed to the SocketIO constructor.
Also start up coroutines to periodically sync tasks and pack the ZODB
database unless socketio.async_mode is 'threading', in which case only sync
and pack once to avoid concurrency issues. async_mode can be forced by
specifying it in kws, otherwise the Sockiet.IO library auto-detects and
prefers Eventlet. We assume that greenthreads are safe against concurrency
issues, in particular the standard library has not been monkey-patched.
"""
from urllib.parse import urljoin
from flask_socketio import SocketIO
from .tasks import TaskFlask
from .auth import Auth
from . import sockio, jobs, vars
app = TaskFlask(__name__)
app.config.from_object(Config)
auth = Auth(app)
p = app.config['SIMSVC_ROOT']
socketio = sockio.bind_socketio(app, path=urljoin(p, "socket.io"), **kws)
app.register_blueprint(jobs.jobs_bp, url_prefix=urljoin(p, "jobs"))
app.register_blueprint(vars.get_vars, url_prefix=urljoin(p, "default"),
url_defaults={"vtype": "default", "job": None})
app.register_blueprint(
vars.get_vars,
url_prefix=urljoin(p, "jobs/<int:job>/<any(inputs, results):vtype>"))
app.register_blueprint(vars.set_vars, url_prefix=urljoin(p, "default"))
# As a side effect this ensures that app.db and app.client are created.
# If either one is going to fail, we want to know now.
app.logger.info("Connected to database %s", app.db.storage.getName())
cores = app.client.ncores()
app.logger.info("%d workers with %d cores",
len(cores), sum(cores.values()))
if socketio.async_mode == 'threading':
app.logger.info("Periodic sync & pack disabled; only doing once.")
app.sync_tasks()
app.db.pack(days=7)
else:
def task_syncer():
while True:
app.sync_tasks()
socketio.sleep(30)
def zodb_packer():
while True:
app.logger.info("Packing the database")
app.db.pack(days=7)
socketio.sleep(86400) # 24 h
socketio.start_background_task(task_syncer)
socketio.start_background_task(zodb_packer)
return app
| 247 | 0 | 60 |
2258584dfde8e66b7e016573d08f52b0d4c15a5c | 788 | py | Python | boboleetcode/Play-Leetcode-master/0226-Invert-Binary-Tree/py-0226/Solution2.py | yaominzh/CodeLrn2019 | adc727d92904c5c5d445a2621813dfa99474206d | [
"Apache-2.0"
] | 2 | 2021-03-25T05:26:55.000Z | 2021-04-20T03:33:24.000Z | boboleetcode/Play-Leetcode-master/0226-Invert-Binary-Tree/py-0226/Solution2.py | mcuallen/CodeLrn2019 | adc727d92904c5c5d445a2621813dfa99474206d | [
"Apache-2.0"
] | 6 | 2019-12-04T06:08:32.000Z | 2021-05-10T20:22:47.000Z | boboleetcode/Play-Leetcode-master/0226-Invert-Binary-Tree/py-0226/Solution2.py | mcuallen/CodeLrn2019 | adc727d92904c5c5d445a2621813dfa99474206d | [
"Apache-2.0"
] | null | null | null | # Source : https://leetcode.com/problems/invert-binary-tree/
# Author : penpenps
# Time : 2019-08-01
# BFS, iterative
# Time Complexity: O(n), the number of tree's nodes
# Space Complexity: O(n), the max node number by level
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
| 28.142857 | 60 | 0.57868 | # Source : https://leetcode.com/problems/invert-binary-tree/
# Author : penpenps
# Time : 2019-08-01
# BFS, iterative
# Time Complexity: O(n), the number of tree's nodes
# Space Complexity: O(n), the max node number by level
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def invertTree(self, root: TreeNode) -> TreeNode:
if not root: return root
queue = [root]
while queue:
node = queue.pop(0)
node.left, node.right = node.right, node.left
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
return root
| 349 | -6 | 49 |
1dc3915d669aa9bfc68047f334962a2e28668c8c | 1,083 | py | Python | alipay/aop/api/response/AlipaySocialBaseQuestInstancesQueryResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/response/AlipaySocialBaseQuestInstancesQueryResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/response/AlipaySocialBaseQuestInstancesQueryResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.QuestInstanceDTO import QuestInstanceDTO
| 32.818182 | 116 | 0.682364 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.QuestInstanceDTO import QuestInstanceDTO
class AlipaySocialBaseQuestInstancesQueryResponse(AlipayResponse):
def __init__(self):
super(AlipaySocialBaseQuestInstancesQueryResponse, self).__init__()
self._instances = None
@property
def instances(self):
return self._instances
@instances.setter
def instances(self, value):
if isinstance(value, list):
self._instances = list()
for i in value:
if isinstance(i, QuestInstanceDTO):
self._instances.append(i)
else:
self._instances.append(QuestInstanceDTO.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AlipaySocialBaseQuestInstancesQueryResponse, self).parse_response_content(response_content)
if 'instances' in response:
self.instances = response['instances']
| 677 | 189 | 23 |
f7707bffeea7f123fa38fc412bc9a7d48ca9988e | 10,700 | py | Python | tacotron/models.py | rhoposit/tacotron2 | 2dad8df5ea50459789e16d9effb83fc2a25e42ed | [
"BSD-3-Clause"
] | null | null | null | tacotron/models.py | rhoposit/tacotron2 | 2dad8df5ea50459789e16d9effb83fc2a25e42ed | [
"BSD-3-Clause"
] | null | null | null | tacotron/models.py | rhoposit/tacotron2 | 2dad8df5ea50459789e16d9effb83fc2a25e42ed | [
"BSD-3-Clause"
] | null | null | null | # ==============================================================================
# Copyright (c) 2018, Yamagishi Laboratory, National Institute of Informatics
# Author: Yusuke Yasuda (yasuda@nii.ac.jp)
# All rights reserved.
# ==============================================================================
""" Models. """
import tensorflow as tf
from tacotron.modules import Embedding
from tacotron.tacotron_v1 import EncoderV1, DecoderV1
from tacotron.hooks import MetricsSaver, PostNetMetricsSaver
from util.audio import Audio
| 57.219251 | 152 | 0.538505 | # ==============================================================================
# Copyright (c) 2018, Yamagishi Laboratory, National Institute of Informatics
# Author: Yusuke Yasuda (yasuda@nii.ac.jp)
# All rights reserved.
# ==============================================================================
""" Models. """
import tensorflow as tf
from tacotron.modules import Embedding
from tacotron.tacotron_v1 import EncoderV1, DecoderV1
from tacotron.hooks import MetricsSaver, PostNetMetricsSaver
from util.audio import Audio
class SingleSpeakerTacotronV1Model(tf.estimator.Estimator):
def __init__(self, params, model_dir=None, config=None, warm_start_from=None):
def model_fn(features, labels, mode, params):
is_training = mode == tf.estimator.ModeKeys.TRAIN
is_validation = mode == tf.estimator.ModeKeys.EVAL
is_prediction = mode == tf.estimator.ModeKeys.PREDICT
embedding = Embedding(params.num_symbols, embedding_dim=params.embedding_dim)
encoder = EncoderV1(is_training,
cbhg_out_units=params.cbhg_out_units,
conv_channels=params.conv_channels,
max_filter_width=params.max_filter_width,
projection1_out_channels=params.projection1_out_channels,
projection2_out_channels=params.projection2_out_channels,
num_highway=params.num_highway,
prenet_out_units=params.encoder_prenet_out_units,
drop_rate=params.encoder_prenet_drop_rate)
decoder = DecoderV1(prenet_out_units=params.decoder_prenet_out_units,
drop_rate=params.decoder_prenet_drop_rate,
attention_out_units=params.attention_out_units,
decoder_out_units=params.decoder_out_units,
num_codes=params.num_codes,
outputs_per_step=params.outputs_per_step,
max_iters=params.max_iters,
n_feed_frame=params.n_feed_frame)
target = labels.codes if (is_training or is_validation) else None
embedding_output = embedding(features.source)
encoder_output = encoder(embedding_output)
codes_output, stop_token, decoder_state = decoder(encoder_output,
is_training=is_training,
is_validation=is_validation,
memory_sequence_length=features.source_length,target=target)
alignment = tf.transpose(decoder_state[0].alignment_history.stack(), [1, 2, 0])
global_step = tf.train.get_global_step()
if mode is not tf.estimator.ModeKeys.PREDICT:
codes_loss = self.codes_loss(code_output, labels.codes,
labels.codes_loss_mask)
done_loss = self.binary_loss(stop_token, labels.done, labels.binary_loss_mask)
loss = code_loss + done_loss
if is_training:
lr = self.learning_rate_decay(
params.initial_learning_rate, global_step) if params.decay_learning_rate else tf.convert_to_tensor(
params.initial_learning_rate)
optimizer = tf.train.AdamOptimizer(learning_rate=lr, beta1=params.adam_beta1,
beta2=params.adam_beta2, epsilon=params.adam_eps)
gradients, variables = zip(*optimizer.compute_gradients(loss))
clipped_gradients, _ = tf.clip_by_global_norm(gradients, 1.0)
self.add_training_stats(loss, codes_loss, done_loss, lr)
# Add dependency on UPDATE_OPS; otherwise batchnorm won't work correctly. See:
# https://github.com/tensorflow/tensorflow/issues/1122
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_op = optimizer.apply_gradients(zip(clipped_gradients, variables), global_step=global_step)
summary_writer = tf.summary.FileWriter(model_dir)
alignment_saver = MetricsSaver([alignment],
global_step,
codes_output,
labels.codes,
labels.target_length,
features.id,
features.text,
params.alignment_save_steps,
mode, summary_writer,
params.save_training_time_metrics,
params.keep_eval_results_max_epoch)
hooks = [alignment_saver]
if params.record_profile:
profileHook = tf.train.ProfilerHook(save_steps=params.profile_steps, output_dir=model_dir,
show_dataflow=True, show_memory=True)
hooks.append(profileHook)
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op,
training_hooks=hooks)
if is_validation:
# validation with teacher forcing
codes_output_with_teacher, stop_token_with_teacher, _ = decoder(encoder_output,
is_training=is_training,
is_validation=is_validation,
memory_sequence_length=features.source_length,
target=target,
teacher_forcing=True)
codes_loss_with_teacher = self.spec_loss(codes_output_with_teacher, labels.codes, labels.codes_loss_mask)
done_loss_with_teacher = self.binary_loss(stop_token_with_teacher, labels.done, labels.binary_loss_mask)
loss_with_teacher = codes_loss_with_teacher + done_loss_with_teacher
eval_metric_ops = self.get_validation_metrics(codes_loss, done_loss, loss_with_teacher, codes_loss_with_teacher, done_loss_with_teacher)
summary_writer = tf.summary.FileWriter(model_dir)
alignment_saver = MetricsSaver([alignment],
global_step,
codes_output,
labels.codes,
labels.target_length,
features.id,
features.text,
1,
mode, summary_writer,
params.save_training_time_metrics,
params.keep_eval_results_max_epoch)
return tf.estimator.EstimatorSpec(mode, loss=loss,
evaluation_hooks=[alignment_saver],
eval_metric_ops=eval_metric_ops)
if is_prediction:
return tf.estimator.EstimatorSpec(mode, predictions={
"id": features.id,
"codes": codes_output,
"alignment": alignment,
"source": features.source,
"text": features.text,
})
super(SingleSpeakerTacotronV1Model, self).__init__(
model_fn=model_fn, model_dir=model_dir, config=config,
params=params, warm_start_from=warm_start_from)
@staticmethod
def codes_loss(y_hat, y, mask, n_priority_freq=None, priority_w=0):
l1_loss = tf.abs(y_hat - y)
# Priority L1 loss
if n_priority_freq is not None and priority_w > 0:
priority_loss = tf.abs(y_hat[:, :, :n_priority_freq] - y[:, :, :n_priority_freq])
l1_loss = (1 - priority_w) * l1_loss + priority_w * priority_loss
return tf.losses.compute_weighted_loss(l1_loss, weights=tf.expand_dims(mask, axis=2))
@staticmethod
def binary_loss(done_hat, done, mask):
return tf.losses.sigmoid_cross_entropy(done, tf.squeeze(done_hat, axis=-1), weights=mask)
@staticmethod
def learning_rate_decay(init_rate, global_step):
warmup_steps = 4000.0
step = tf.to_float(global_step + 1)
return init_rate * warmup_steps ** 0.5 * tf.minimum(step * warmup_steps ** -1.5, step ** -0.5)
@staticmethod
def add_training_stats(loss, codes_loss, done_loss, learning_rate):
if loss is not None:
tf.summary.scalar("loss_with_teacher", loss)
if codes_loss is not None:
tf.summary.scalar("codes_loss", codes_loss)
tf.summary.scalar("codes_loss_with_teacher", codes_loss)
if done_loss is not None:
tf.summary.scalar("done_loss", done_loss)
tf.summary.scalar("done_loss_with_teacher", done_loss)
tf.summary.scalar("learning_rate", learning_rate)
return tf.summary.merge_all()
@staticmethod
def get_validation_metrics(codes_loss, done_loss, loss_with_teacher, codes_loss_with_teacher, done_loss_with_teacher):
metrics = {}
if codes_loss is not None:
metrics["codes_loss"] = tf.metrics.mean(codes_loss)
if done_loss is not None:
metrics["done_loss"] = tf.metrics.mean(done_loss)
if loss_with_teacher is not None:
metrics["loss_with_teacher"] = tf.metrics.mean(loss_with_teacher)
if codes_loss_with_teacher is not None:
metrics["codes_loss_with_teacher"] = tf.metrics.mean(codes_loss_with_teacher)
if done_loss_with_teacher is not None:
metrics["done_loss_with_teacher"] = tf.metrics.mean(done_loss_with_teacher)
return metrics
| 9,855 | 290 | 23 |
a014f0d726ae5f8dc4a25ff35886e47de774496b | 21,523 | py | Python | mirari/INT/models.py | gcastellan0s/mirariapp | 24a9db06d10f96c894d817ef7ccfeec2a25788b7 | [
"MIT"
] | null | null | null | mirari/INT/models.py | gcastellan0s/mirariapp | 24a9db06d10f96c894d817ef7ccfeec2a25788b7 | [
"MIT"
] | 18 | 2019-12-27T19:58:20.000Z | 2022-02-27T08:17:49.000Z | mirari/INT/models.py | gcastellan0s/mirariapp | 24a9db06d10f96c894d817ef7ccfeec2a25788b7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from mirari.mirari.models import *
from .vars import *
########################################################################################
########################################################################################
VARS = {
'NAME':'Catรกlogo',
'PLURAL':'Catรกlogos',
'MODEL':'Catalogue',
'NEW':'NUEVO',
'NEW_GENDER': 'un nuevo',
'THIS': 'este',
'APP':APP,
'LIST': [
#{
#'field': 'code',
#'title': 'Cรณdigo',
#'width': '50',
#'url': 'property_url_update'
#},
{
'field': 'get_name_with_color',
'title': 'Nombre',
'template': '{{get_name_with_color}}',
'sortable': 'desc',
'url': 'url_update'
},
{
'field': 'description',
'title': 'Descripciรณn',
'template': '<div class="kt-regular-font-size-sm5" style="line-height:15px;">{{property_get_description}}</div>',
},
],
'SERIALIZER': ('get_description','get_name_with_color'),
'SEARCH': ['name','code'],
'SORTEABLE': ['code'],
'SUMMERNOTE': ['description'],
'EXCLUDE_FORM': ['code','is_active','active','organization'],
}
########################################################################################
########################################################################################
VARS = {
'NAME':'Equipo',
'PLURAL':'Equipos',
'MODEL':'Team',
'NEW':'NUEVO',
'NEW_GENDER': 'un nuevo',
'THIS': 'este',
'APP':APP,
'LIST': [
{
'field': 'name',
'title': 'Nombre',
'url': 'property_url_update',
'width':'200',
'sorteable': True,
'serchable': True,
},
{
'field': 'get_code',
'title': 'Cรณdigo',
'width':'200',
},
{
'field': 'get_members',
'title': 'Miebros',
'template': '<div class="kt-regular-font-size-sm5" style="line-height:15px;">{{get_members}}</div>',
},
],
'SELECTQ': {
'members': {
'plugin': 'select2',
},
},
}
########################################################################################
########################################################################################
VARS = {
'NAME':'Manual',
'PLURAL':'Manuales',
'MODEL':'Handbook',
'NEW':'NUEVO',
'NEW_GENDER': 'un nuevo',
'THIS': 'este',
'APP':APP,
'LIST': [
{
'field': 'name',
'title': 'Nombre',
'template':
"""
<a href="{{file}}" target="_blank class="a-no">
{{name}}
</a>
""",
'url': 'url_update',
'sorteable': True,
'serchable': True,
},
{
'field': 'get_notes',
'title': 'Notas',
'template':
"""
<small>
{{get_notes}}
</small>
""",
'url': 'url_update',
},
],
'HIDE_BUTTONS_UPDATE': True,
'SUMMERNOTE': ['notes'],
}
########################################################################################
########################################################################################
VARS = {
'NAME':'Canal de comunicaciรณn',
'PLURAL':'Canales de comunicaciรณn',
'MODEL':'Channel',
'NEW':'NUEVO',
'NEW_GENDER': 'un nuevo',
'THIS': 'esta',
'APP':APP,
'LIST': [
{
'field': 'name',
'title': 'Nombre',
},
{
'field': 'property_get_audience',
'title': 'Audiencia',
},
{
'field': 'property_get_list_administrators',
'title': 'Administradores',
},
],
'SEARCH': ['title'],
'SELECTQ': {
'user_admin': {
'model': ['mirari','User'],
'plugin': 'selectmultiple',
},
'team_admin': {
'model': ['INT','Team'],
'plugin': 'selectmultiple',
},
'notify_user': {
'model': ['mirari','User'],
'plugin': 'selectmultiple',
},
'notify_team': {
'model': ['INT','Team'],
'plugin': 'selectmultiple',
},
},
'SORTEABLE': ['name'],
}
#######################################################################################
#######################################################################################
VARS = {
'NAME':'Notificaciรณn',
'PLURAL':'Notificaciones',
'MODEL':'Notification',
'NEW':'NUEVA',
'NEW_GENDER': 'una nueva',
'THIS': 'esta',
'APP':APP,
'LIST': [
{
'field': 'title',
'title': 'Tรญtulo',
},
{
'field': 'get_channel',
'title': 'Canal',
},
{
'field': 'get_creation_date',
'title': 'Creado',
},
{
'field': 'get_expiration_date',
'title': 'Expira',
},
{
'field': 'get_status',
'title': 'Estatus',
},
],
'FORM': ('channel','title','message','files','status','datetime_expire','hide_content',),
'SEARCH': ['name'],
'SELECTQ': {
'channel': {
'plugin': 'select2',
},
},
'SORTEABLE': ['creation_date'],
'SUMMERNOTE': ['message'],
}
@receiver(post_save, sender=Notification)
########################################################################################
########################################################################################
VARS = {
'NAME':'Buzon Interno',
'PLURAL':'Buzones Internos',
'MODEL':'InternalMailBox',
'NEW':'NUEVO',
'NEW_GENDER': 'un nuevo',
'THIS': 'este',
'APP':APP,
'LIST': [
{
'field': 'name',
'title': 'Nombre',
},
{
'field': 'emails',
'title': 'Destino',
},
{
'field': 'description',
'title': 'Descripciรณn',
},
],
'SELECTQ': {
'availability': {
'plugin': 'selectmultiple',
},
},
}
########################################################################################
########################################################################################
VARS = {
'NAME':'Email de Buzon Interno',
'PLURAL':'Emails Buzones Internos',
'MODEL':'InternalMailBox_Mail',
'NEW':'NUEVO',
'NEW_GENDER': 'un nuevo',
'THIS': 'este',
'APP':APP,
'SUMMERNOTE': ['message'],
'FORM': [
Div(
HTML('<div class="m--margin-bottom-10"><span>El mail que envies no se almacena y nos aseguramos que solo sea leido por los destinatarios del buzรณn</span></div>'),
Div('message'),
css_class="col-md-12"
),
],
'FORM_SIZE': 'col-xl-12',
'SUBMIT_BUTTONS': "InternalMailBox_Mail__SUBMIT_BUTTONS.html",
'EXCLUDE_PERMISSIONS': ['all'],
} | 40.609434 | 222 | 0.565256 | # -*- coding: utf-8 -*-
from mirari.mirari.models import *
from .vars import *
########################################################################################
########################################################################################
VARS = {
'NAME':'Catรกlogo',
'PLURAL':'Catรกlogos',
'MODEL':'Catalogue',
'NEW':'NUEVO',
'NEW_GENDER': 'un nuevo',
'THIS': 'este',
'APP':APP,
'LIST': [
#{
#'field': 'code',
#'title': 'Cรณdigo',
#'width': '50',
#'url': 'property_url_update'
#},
{
'field': 'get_name_with_color',
'title': 'Nombre',
'template': '{{get_name_with_color}}',
'sortable': 'desc',
'url': 'url_update'
},
{
'field': 'description',
'title': 'Descripciรณn',
'template': '<div class="kt-regular-font-size-sm5" style="line-height:15px;">{{property_get_description}}</div>',
},
],
'SERIALIZER': ('get_description','get_name_with_color'),
'SEARCH': ['name','code'],
'SORTEABLE': ['code'],
'SUMMERNOTE': ['description'],
'EXCLUDE_FORM': ['code','is_active','active','organization'],
}
class Catalogue(Model_base):
organization = models.ForeignKey('mirari.Organization', blank=True, null=True, on_delete=models.CASCADE, related_name='+',)
code = models.CharField('Cรณdigo del documento', max_length=25, blank=True, null=True)
name = models.CharField('Nombre del documento', max_length=500)
description = models.TextField('Notas', max_length=500, blank=True, null=True)
is_active = models.BooleanField('Esta activo?', default=True)
VARS = VARS
class Meta(Model_base.Meta):
verbose_name = VARS['NAME']
verbose_name_plural = VARS['PLURAL']
permissions = permissions(VARS)
def __str__(self):
return '{0}'.format(self.name)
def QUERY(self, view):
if view.request.user.has_perm(self.model.VARS['APP']+'.Can_Update__'+self.model.VARS['MODEL']):
return Catalogue.objects.filter(organization__pk=view.request.session.get('organization'), active=True)
else:
return Catalogue.objects.filter(organization__pk=view.request.session.get('organization'), is_active=True, active=True)
def get_description(self):
return self.render_if(self.description)
def get_name_with_color(self):
class_color = ''
if not self.is_active:
class_color = 'm--font-danger'
return '<div class="{1}">{0}</div>'.format(self, class_color)
########################################################################################
########################################################################################
VARS = {
'NAME':'Equipo',
'PLURAL':'Equipos',
'MODEL':'Team',
'NEW':'NUEVO',
'NEW_GENDER': 'un nuevo',
'THIS': 'este',
'APP':APP,
'LIST': [
{
'field': 'name',
'title': 'Nombre',
'url': 'property_url_update',
'width':'200',
'sorteable': True,
'serchable': True,
},
{
'field': 'get_code',
'title': 'Cรณdigo',
'width':'200',
},
{
'field': 'get_members',
'title': 'Miebros',
'template': '<div class="kt-regular-font-size-sm5" style="line-height:15px;">{{get_members}}</div>',
},
],
'SELECTQ': {
'members': {
'plugin': 'select2',
},
},
}
class Team(Model_base):
organization = models.ForeignKey('mirari.Organization', blank=True, null=True, on_delete=models.CASCADE, related_name='+',)
name = models.CharField('Nombre', max_length=250)
code = models.CharField('Cรณdigo', max_length=250, blank=True, null=True)
members = models.ManyToManyField('mirari.User', blank=True, related_name='+', verbose_name='Miembros de equipo')
VARS = VARS
class Meta(Model_base.Meta):
verbose_name = VARS['NAME']
verbose_name_plural = VARS['PLURAL']
permissions = permissions(VARS)
def __str__(self):
return '{0}'.format(self.name)
def get_members(self):
return self.render_list(self.members, 'visible_username')
def get_user_team(self, user):
return Team.objects.filter(members=user)
def get_code(self):
return self.render_if(self.code)
########################################################################################
########################################################################################
VARS = {
'NAME':'Manual',
'PLURAL':'Manuales',
'MODEL':'Handbook',
'NEW':'NUEVO',
'NEW_GENDER': 'un nuevo',
'THIS': 'este',
'APP':APP,
'LIST': [
{
'field': 'name',
'title': 'Nombre',
'template':
"""
<a href="{{file}}" target="_blank class="a-no">
{{name}}
</a>
""",
'url': 'url_update',
'sorteable': True,
'serchable': True,
},
{
'field': 'get_notes',
'title': 'Notas',
'template':
"""
<small>
{{get_notes}}
</small>
""",
'url': 'url_update',
},
],
'HIDE_BUTTONS_UPDATE': True,
'SUMMERNOTE': ['notes'],
}
def path_Handbook_file(self, filename):
upload_to = "companys/%s_%s/INT/Handbook/%s" % (self.organization.id, self.organization.code, filename)
return upload_to
class Handbook(Model_base):
organization = models.ForeignKey('mirari.Organization', blank=True, null=True, on_delete=models.CASCADE, related_name='+',)
name = models.CharField('Nombre', max_length=250)
file = models.FileField('Archivo', upload_to=path_Handbook_file)
notes = models.TextField('Notas', blank=True, null=True)
is_active = models.BooleanField(default=True)
creation_date = models.DateTimeField(auto_now_add=True)
VARS = VARS
class Meta(Model_base.Meta):
verbose_name = VARS['NAME']
verbose_name_plural = VARS['PLURAL']
permissions = permissions(VARS)
def __str__(self):
return '{0}'.format(self.name)
def QUERY(self, view):
if view.request.user.has_perm(self.model.VARS['APP']+'.Can_Update__'+self.model.VARS['MODEL']):
return Handbook.objects.filter(organization__pk=view.request.session.get('organization'), active=True)
else:
return Handbook.objects.filter(organization__pk=view.request.session.get('organization'), is_active=True, active=True)
def get_notes(self):
return self.render_if(self.notes)
########################################################################################
########################################################################################
VARS = {
'NAME':'Canal de comunicaciรณn',
'PLURAL':'Canales de comunicaciรณn',
'MODEL':'Channel',
'NEW':'NUEVO',
'NEW_GENDER': 'un nuevo',
'THIS': 'esta',
'APP':APP,
'LIST': [
{
'field': 'name',
'title': 'Nombre',
},
{
'field': 'property_get_audience',
'title': 'Audiencia',
},
{
'field': 'property_get_list_administrators',
'title': 'Administradores',
},
],
'SEARCH': ['title'],
'SELECTQ': {
'user_admin': {
'model': ['mirari','User'],
'plugin': 'selectmultiple',
},
'team_admin': {
'model': ['INT','Team'],
'plugin': 'selectmultiple',
},
'notify_user': {
'model': ['mirari','User'],
'plugin': 'selectmultiple',
},
'notify_team': {
'model': ['INT','Team'],
'plugin': 'selectmultiple',
},
},
'SORTEABLE': ['name'],
}
class Channel(Model_base):
organization = models.ForeignKey('mirari.Organization', on_delete=models.CASCADE, related_name='+')
name = models.CharField('Nombre del canal', max_length=250)
user_admin = models.ManyToManyField('mirari.User', blank=True, related_name='+', verbose_name='Lo administran usuarios')
team_admin = models.ManyToManyField('Team', blank=True, related_name='+', verbose_name='Lo administran equipos')
notify_user = models.ManyToManyField('mirari.User', blank=True, related_name='+', verbose_name='Destinado a usuarios')
notify_team = models.ManyToManyField('Team', blank=True, related_name='+', verbose_name='Destinado a equipos', help_text="Si el campo anterior y este estan vacios, se notificara a todos los equipos.")
is_active = models.BooleanField('Canal activo', default=True)
VARS = VARS
class Meta(Model_base.Meta):
verbose_name = VARS['NAME']
verbose_name_plural = VARS['PLURAL']
permissions = permissions(VARS)
def __str__(self):
return '{0}'.format(self.name)
def get_targets(self):
users = []
for team in self.notify_team.all():
for member in team.members.all():
if not member in users:
users.append(member.pk)
return (User.objects.filter(pk__in=users).all() | self.notify_user.all()).distinct()
def get_audience(self):
return len(self.get_targets())
def get_list_administrators(self):
users = []
for team in self.team_admin.all():
for member in team.members.all():
if not member in users:
users.append(member.pk)
return self.render_list((User.objects.filter(pk__in=users).all() | self.user_admin.all()).distinct(), 'visible_username')
#######################################################################################
#######################################################################################
VARS = {
'NAME':'Notificaciรณn',
'PLURAL':'Notificaciones',
'MODEL':'Notification',
'NEW':'NUEVA',
'NEW_GENDER': 'una nueva',
'THIS': 'esta',
'APP':APP,
'LIST': [
{
'field': 'title',
'title': 'Tรญtulo',
},
{
'field': 'get_channel',
'title': 'Canal',
},
{
'field': 'get_creation_date',
'title': 'Creado',
},
{
'field': 'get_expiration_date',
'title': 'Expira',
},
{
'field': 'get_status',
'title': 'Estatus',
},
],
'FORM': ('channel','title','message','files','status','datetime_expire','hide_content',),
'SEARCH': ['name'],
'SELECTQ': {
'channel': {
'plugin': 'select2',
},
},
'SORTEABLE': ['creation_date'],
'SUMMERNOTE': ['message'],
}
def path_Notification_file(self, filename):
upload_to = "companys/%s_%s/INT/Notification/%s" % (self.organization.id, self.organization.code, filename)
return upload_to
class Notification(Model_base):
uuid = models.UUIDField(default=uuid.uuid4)
organization = models.ForeignKey('mirari.Organization', on_delete=models.CASCADE, related_name='+')
channel = models.ForeignKey('Channel', on_delete=models.PROTECT, related_name='+', verbose_name="Canal(es) por donde envias")
title = models.CharField('Tรญtulo', max_length=250)
message = models.TextField('Mensaje')
files = models.FileField('Archivo(s) adjunto(s)', upload_to=path_Notification_file, blank=True, null=True)
status = models.CharField('Estatus', max_length=250, choices=NOTIFICATION_STATUS, default='Borrador')
datetime_expire = models.DateTimeField('Fecha de expiraciรณn', blank=True, null=True, help_text='Este mensaje expira?, dejalo vacio si no expira.')
sended = models.BooleanField('Enviado?', default=False, help_text="Indica si esta notificaciรณn ya fue enviada.")
creation_date = models.DateTimeField(auto_now_add=True)
craeted_by = models.ForeignKey('mirari.User', on_delete=models.SET_NULL, blank=True, null=True, related_name='+', verbose_name="Canal(es) por donde envias")
sended_to = models.ManyToManyField('mirari.User', blank=True, related_name='+', verbose_name='Enviado a...')
readed_by = models.ManyToManyField('mirari.User', blank=True, related_name='+', verbose_name='Leido por...')
hide_content = models.BooleanField('Ocultar contenido?', default=True, help_text="Si ocultas el contenido el usuario deberรก ingresar usuario y contraseรฑa para ver el contenido.")
VARS = VARS
class Meta(Model_base.Meta):
verbose_name = VARS['NAME']
verbose_name_plural = VARS['PLURAL']
permissions = permissions(VARS)
def __str__(self):
return '{0}'.format(self.title)
def QUERY(self, view):
if view.request.user.is_superuser:
channel = Channel.objects.filter(organization__pk=view.request.session.get('organization'), is_active=True, active=True)
else:
channel = Channel.objects.filter(Q(team_admin__members=view.request.user, is_active=True, active=True) | Q(user_admin=view.request.user, is_active=True, active=True))
return Notification.objects.filter(Q(channel__in=channel, datetime_expire__isnull=True, active=True, is_active=True)|Q(channel__in=channel, datetime_expire__gt=datetime.datetime.now(), active=True, is_active=True))
def SELECTQ__channel(self, model=None, view=None):
if view.request.user.is_superuser:
query = model.objects.filter(organization__pk=view.request.session.get('organization'), is_active=True, active=True)
else:
query = model.objects.filter(Q(team_admin__members=view.request.user, is_active=True, active=True) | Q(user_admin=view.request.user, is_active=True, active=True))
return query
def url_detail(self):
return reverse('INT:Notification__DetailView', kwargs={'uuid':self.uuid,})
def url_update(self):
if self.sended:
return '#'
if not 'update' in self.exclude_permissions():
return reverse('mirari:Generic__UpdateView', kwargs={'app': self.VARS['APP'], 'model': self.VARS['MODEL'], 'pk': self.pk})
else:
return None
#######
def get_user_notifications(self, user):
return Notification.objects.all()
def get_channel(self):
return str(self.channel)
def get_creation_date(self):
return self.render_datetime(self.creation_date)
def get_expiration_date(self):
if self.datetime_expire:
return self.render_datetime(self.datetime_expire)
return '-'
def get_sended(self):
return self.render_boolean(self.sended)
def get_status(self):
return self.status
def send_notifications(self):
reminders = self.channel
return True
def get_targets(self):
return self.channel.get_targets()
def get_user_notification(self, user):
return Notification.objects.filter(sended_to = user)[0:50]
def send_mail(self):
email_host = HostEmail.objects.filter(module__code=APP, organization=self.organization).first()
connection = get_connection(host=email_host.host , port=email_host.port, username=email_host.username, password=email_host.password, use_tls=True)
connection.open()
for target in self.get_targets():
try:
if target.email:
context = {
'notification': self,
'destinatary': target
}
template = render_to_string('email/default/base_email.html', context)
msg = EmailMultiAlternatives(
subject=self.title,
body=template,
from_email=email_host.prefix +'<'+email_host.email+'>',
to=[target.email],
connection=connection
)
msg.attach_alternative(template, "text/html")
msg.send(True)
except:
pass
transaction.on_commit(
lambda: self.sended_to.add(*self.get_targets())
)
connection.close()
return True
@receiver(post_save, sender=Notification)
def notification_post_save(sender, instance=None, created=None, **kwargs):
if instance.sended == False and instance.status == 'Publicado':
instance.sended = True
instance.save()
send_mail_task.delay(app='INT', model='Notification', pk=instance.pk)
########################################################################################
########################################################################################
VARS = {
'NAME':'Buzon Interno',
'PLURAL':'Buzones Internos',
'MODEL':'InternalMailBox',
'NEW':'NUEVO',
'NEW_GENDER': 'un nuevo',
'THIS': 'este',
'APP':APP,
'LIST': [
{
'field': 'name',
'title': 'Nombre',
},
{
'field': 'emails',
'title': 'Destino',
},
{
'field': 'description',
'title': 'Descripciรณn',
},
],
'SELECTQ': {
'availability': {
'plugin': 'selectmultiple',
},
},
}
class InternalMailBox(Model_base):
organization = models.ForeignKey('mirari.Organization', blank=True, null=True, on_delete=models.CASCADE, related_name='+',)
name = models.CharField('Nombre del buzรณn', max_length=250)
slug = models.CharField('Nombre del buzรณn', max_length=250, editable=False)
emails = models.CharField("Email's de los destinatarios", max_length=500, help_text="Separa con ',' o ';' los correos de los destinatarios")
description = models.TextField("Descripciรณn del buzรณn", blank=True, null=True)
availability = models.ManyToManyField('Team', blank=True, related_name='+', verbose_name='Disponible para estos equipos', help_text="Si lo dejas en blanco, estara disponible para cualquier persona de la organizaciรณn.")
is_active = models.BooleanField('Buzรณn activo?', default=True)
VARS = VARS
class Meta(Model_base.Meta):
verbose_name = VARS['NAME']
verbose_name_plural = VARS['PLURAL']
permissions = permissions(VARS)
def __str__(self):
return '{0}'.format(self.VARS['NAME'])
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super().save()
def get_user_mailbox(self, user):
return InternalMailBox.objects.filter( Q(organization = user.organization, availability__isnull=True, is_active=True) | Q(organization = user.organization, availability__in=user.get_teams(), is_active=True) )
def url_sendmailbox(self):
return reverse('INT:InternalMailBox_Mail__CreateView', kwargs={'pk':self.pk,'slug':self.slug,'app':APP,'model':'InternalMailBox_Mail'})
########################################################################################
########################################################################################
VARS = {
'NAME':'Email de Buzon Interno',
'PLURAL':'Emails Buzones Internos',
'MODEL':'InternalMailBox_Mail',
'NEW':'NUEVO',
'NEW_GENDER': 'un nuevo',
'THIS': 'este',
'APP':APP,
'SUMMERNOTE': ['message'],
'FORM': [
Div(
HTML('<div class="m--margin-bottom-10"><span>El mail que envies no se almacena y nos aseguramos que solo sea leido por los destinatarios del buzรณn</span></div>'),
Div('message'),
css_class="col-md-12"
),
],
'FORM_SIZE': 'col-xl-12',
'SUBMIT_BUTTONS': "InternalMailBox_Mail__SUBMIT_BUTTONS.html",
'EXCLUDE_PERMISSIONS': ['all'],
}
class InternalMailBox_Mail(Model_base):
internalmailbox = models.ForeignKey('InternalMailBox', on_delete=models.CASCADE, related_name='+')
message = models.TextField("Mensaje")
creation_date = models.DateTimeField(auto_now_add=True)
VARS = VARS
class Meta(Model_base.Meta):
verbose_name = VARS['NAME']
verbose_name_plural = VARS['PLURAL']
permissions = permissions(VARS)
def __str__(self):
return '{0}'.format(str(self.pk))
def get_targets(self):
return self.internalmailbox.emails.replace(' ','').replace(',',';').split(';')
def send_mail(self):
email_host = HostEmail.objects.filter(module__code=APP, organization=self.organization).first()
connection = get_connection(host=email_host.host , port=email_host.port, username=email_host.username, password=email_host.password, use_tls=True)
connection.open()
for target in self.get_targets():
context = {
'mail': self.internalmailbox,
'message': self.message
}
template = render_to_string('email/default/InternalMailBox_Mail.html', context)
msg = EmailMultiAlternatives(
subject=self.internalmailbox,
body=template,
from_email=email_host.prefix +'<'+email_host.email+'>',
to=[target],
connection=connection
)
msg.attach_alternative(template, "text/html")
msg.send(True)
connection.close()
return True | 7,271 | 6,766 | 220 |
4394303aa2a3be1178c47d1ef15a6995ca8b3adb | 5,786 | py | Python | plateseg/predict.py | Pablo1990/platelet-unet-watershed | 66aab7620d2e1646e3d4d33fe85c1691c63353cb | [
"BSD-3-Clause"
] | null | null | null | plateseg/predict.py | Pablo1990/platelet-unet-watershed | 66aab7620d2e1646e3d4d33fe85c1691c63353cb | [
"BSD-3-Clause"
] | null | null | null | plateseg/predict.py | Pablo1990/platelet-unet-watershed | 66aab7620d2e1646e3d4d33fe85c1691c63353cb | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
import os
import itertools
import torch
from tqdm import tqdm
import numpy as np
import toolz as tz
import napari
from napari.qt import thread_worker
from skimage.exposure import rescale_intensity
from . import unet
from . import watershed as ws
u_state_fn = os.path.join(
os.path.dirname(__file__), 'data/unet-210525-zyxmc.pt'
)
u = unet.UNet(in_channels=1, out_channels=5)
IGNORE_CUDA = False
map_location = torch.device('cpu') # for loading the pre-existing unet
if torch.cuda.is_available() and not IGNORE_CUDA:
u.cuda()
map_location=None
u.load_state_dict(
torch.load(u_state_fn, map_location=map_location)
)
@tz.curry
def throttle_function(func, every_n=1000):
"""Return a copy of function that only runs every n calls.
This is useful when attaching a slow callback to a frequent event.
Parameters
----------
func : callable
The input function.
every_n : int
Number of ignored calls before letting another call through.
"""
counter = 0
return throttled
if __name__ == '__main__':
import nd2_dask as nd2
#data_fn = '/data/platelets/200519_IVMTR69_Inj4_dmso_exp3.nd2'
data_fn = os.path.expanduser(
'~/Dropbox/share-files/200519_IVMTR69_Inj4_dmso_exp3.nd2'
)
layer_list = nd2.nd2_reader.nd2_reader(data_fn)
t_idx = 114
source_vol = layer_list[2][0]
vol2predict = rescale_intensity(
np.asarray(source_vol[t_idx])
).astype(np.float32)
prediction_output = np.zeros((5,) + vol2predict.shape, dtype=np.float32)
size = (10, 256, 256)
chunk_starts, chunk_crops = make_chunks(vol2predict.shape, size, (1, 0, 0))
viewer = napari.Viewer(ndisplay=3)
l0 = viewer._add_layer_from_data(*layer_list[0])[0]
l1 = viewer._add_layer_from_data(*layer_list[1])[0]
l2 = viewer._add_layer_from_data(*layer_list[2])[0]
offsets = -0.5 * np.asarray(l0.scale)[-3:] * np.eye(5, 3)
prediction_layers = viewer.add_image(
prediction_output,
channel_axis=0,
name=['z-aff', 'y-aff', 'x-aff', 'mask', 'centroids'],
scale=l0.scale[-3:],
translate=list(np.asarray(l0.translate[-3:]) + offsets),
colormap=['bop purple', 'bop orange', 'bop orange', 'gray', 'gray'],
visible=[False, False, False, True, False],
)
viewer.dims.set_point(0, t_idx)
labels = np.pad(
np.zeros(prediction_output.shape[1:], dtype=np.uint32),
1,
mode='constant',
constant_values=0,
)
labels_layer = viewer.add_labels(
labels[1:-1, 1:-1, 1:-1],
name='watershed',
scale=prediction_layers[-1].scale,
translate=prediction_layers[-1].translate,
)
# closure to connect to threadworker signal
refresh_labels = throttle_function(labels_layer.refresh, every_n=10000)
segment_worker = thread_worker(
segment,
connect={'yielded': refresh_labels}
)
prediction_worker = thread_worker(
predict_output_chunks,
connect={
'yielded': refresh_prediction_layers,
'returned': segment_worker
},
)
prediction_worker(u, vol2predict, size, prediction_output, margin=0)
napari.run()
| 31.107527 | 90 | 0.614587 | # coding: utf-8
import os
import itertools
import torch
from tqdm import tqdm
import numpy as np
import toolz as tz
import napari
from napari.qt import thread_worker
from skimage.exposure import rescale_intensity
from . import unet
from . import watershed as ws
u_state_fn = os.path.join(
os.path.dirname(__file__), 'data/unet-210525-zyxmc.pt'
)
u = unet.UNet(in_channels=1, out_channels=5)
IGNORE_CUDA = False
map_location = torch.device('cpu') # for loading the pre-existing unet
if torch.cuda.is_available() and not IGNORE_CUDA:
u.cuda()
map_location=None
u.load_state_dict(
torch.load(u_state_fn, map_location=map_location)
)
def make_chunks(arr_shape, chunk_shape, margin):
ndim = len(arr_shape)
if type(margin) == int:
margin = [margin] * ndim
starts = []
crops = []
for dim in range(ndim):
arr = arr_shape[dim]
chk = chunk_shape[dim]
mrg = margin[dim]
start = np.arange(0, arr - 2*mrg, chk - 2*mrg)
start[-1] = arr - chk
if len(start) > 1 and start[-1] == start[-2]:
start = start[:-1] # remove duplicates in case last step is perfect
starts.append(start)
crop = np.array([(mrg, chk - mrg),] * len(start))
crop[0, 0] = 0
crop[-1, 0] = chk - (arr - np.sum(crop[:-1, 1] - crop[:-1, 0]))
crop[-1, 1] = chk
crops.append(crop)
chunk_starts = list(itertools.product(*starts))
chunk_crops = list(itertools.product(*crops))
return chunk_starts, chunk_crops
@tz.curry
def throttle_function(func, every_n=1000):
"""Return a copy of function that only runs every n calls.
This is useful when attaching a slow callback to a frequent event.
Parameters
----------
func : callable
The input function.
every_n : int
Number of ignored calls before letting another call through.
"""
counter = 0
def throttled(*args, **kwargs):
nonlocal counter
result = None
if counter % every_n == 0:
result = func(*args, **kwargs)
counter += 1
return result
return throttled
def predict_output_chunks(
unet, input_volume, chunk_size, output_volume, margin=0,
):
u = unet
ndim = len(chunk_size)
chunk_starts, chunk_crops = make_chunks(
input_volume.shape[-ndim:], chunk_size, margin=margin
)
for start, crop in tqdm(list(zip(chunk_starts, chunk_crops))):
sl = tuple(slice(start0, start0+step) for start0, step
in zip(start, chunk_size))
tensor = torch.from_numpy(input_volume[sl][np.newaxis, np.newaxis])
if torch.cuda.is_available() and not IGNORE_CUDA:
tensor = tensor.cuda()
predicted_array = u(tensor).detach().cpu().numpy()
# add slice(None) for the 5 channels
cr = (slice(None),) + tuple(slice(i, j) for i, j in crop)
output_volume[(slice(None),) + sl][cr] = predicted_array[(0,) + cr]
# print(f'output volume is prediction output', output_volume is prediction_output)
yield
return output_volume
if __name__ == '__main__':
import nd2_dask as nd2
#data_fn = '/data/platelets/200519_IVMTR69_Inj4_dmso_exp3.nd2'
data_fn = os.path.expanduser(
'~/Dropbox/share-files/200519_IVMTR69_Inj4_dmso_exp3.nd2'
)
layer_list = nd2.nd2_reader.nd2_reader(data_fn)
t_idx = 114
source_vol = layer_list[2][0]
vol2predict = rescale_intensity(
np.asarray(source_vol[t_idx])
).astype(np.float32)
prediction_output = np.zeros((5,) + vol2predict.shape, dtype=np.float32)
size = (10, 256, 256)
chunk_starts, chunk_crops = make_chunks(vol2predict.shape, size, (1, 0, 0))
viewer = napari.Viewer(ndisplay=3)
l0 = viewer._add_layer_from_data(*layer_list[0])[0]
l1 = viewer._add_layer_from_data(*layer_list[1])[0]
l2 = viewer._add_layer_from_data(*layer_list[2])[0]
offsets = -0.5 * np.asarray(l0.scale)[-3:] * np.eye(5, 3)
prediction_layers = viewer.add_image(
prediction_output,
channel_axis=0,
name=['z-aff', 'y-aff', 'x-aff', 'mask', 'centroids'],
scale=l0.scale[-3:],
translate=list(np.asarray(l0.translate[-3:]) + offsets),
colormap=['bop purple', 'bop orange', 'bop orange', 'gray', 'gray'],
visible=[False, False, False, True, False],
)
viewer.dims.set_point(0, t_idx)
def refresh_prediction_layers():
for layer in prediction_layers:
layer.refresh()
labels = np.pad(
np.zeros(prediction_output.shape[1:], dtype=np.uint32),
1,
mode='constant',
constant_values=0,
)
labels_layer = viewer.add_labels(
labels[1:-1, 1:-1, 1:-1],
name='watershed',
scale=prediction_layers[-1].scale,
translate=prediction_layers[-1].translate,
)
# closure to connect to threadworker signal
def segment(prediction):
yield from ws.segment_output_image(
prediction,
affinities_channels=(0, 1, 2),
centroids_channel=4,
thresholding_channel=3,
out=labels.ravel()
)
refresh_labels = throttle_function(labels_layer.refresh, every_n=10000)
segment_worker = thread_worker(
segment,
connect={'yielded': refresh_labels}
)
prediction_worker = thread_worker(
predict_output_chunks,
connect={
'yielded': refresh_prediction_layers,
'returned': segment_worker
},
)
prediction_worker(u, vol2predict, size, prediction_output, margin=0)
napari.run()
| 2,290 | 0 | 126 |
dceca2c42994a429c1b0e9cd4184414aa609bdac | 184 | py | Python | caixiya/20180402/3.py | python20180319howmework/homework | c826d7aa4c52f8d22f739feb134d20f0b2c217cd | [
"Apache-2.0"
] | null | null | null | caixiya/20180402/3.py | python20180319howmework/homework | c826d7aa4c52f8d22f739feb134d20f0b2c217cd | [
"Apache-2.0"
] | null | null | null | caixiya/20180402/3.py | python20180319howmework/homework | c826d7aa4c52f8d22f739feb134d20f0b2c217cd | [
"Apache-2.0"
] | null | null | null | '''
3. ๅฉ็จmap()ๅฝๆฐๅฐ็จๆท่พๅ
ฅ็ไธ่ง่็่ฑๆๅๅญ,ๅไธบ้ฆๅญๆฏๅคงๅ,ๅ
ถไปๅญ็ฌฆๅฐๅ็ๅฝขๅผ
1) ๅๅฎ็จๆท่พๅ
ฅ็ๆฏ["lambDA", โLILYโ, โaliCeโ]
'''
l=["lambDA","LILY","aliCe"]
print(list(map([i[:1].upper()+i[1:].lower()for i in l,l])))
| 26.285714 | 60 | 0.608696 | '''
3. ๅฉ็จmap()ๅฝๆฐๅฐ็จๆท่พๅ
ฅ็ไธ่ง่็่ฑๆๅๅญ,ๅไธบ้ฆๅญๆฏๅคงๅ,ๅ
ถไปๅญ็ฌฆๅฐๅ็ๅฝขๅผ
1) ๅๅฎ็จๆท่พๅ
ฅ็ๆฏ["lambDA", โLILYโ, โaliCeโ]
'''
l=["lambDA","LILY","aliCe"]
print(list(map([i[:1].upper()+i[1:].lower()for i in l,l])))
| 0 | 0 | 0 |
1a60b05152fee6b8e79a1c93f39d5b6df1b11044 | 1,267 | py | Python | Python/103 ZigzagLevelOrderTraversalB.py | Fiona08/leetcode | cee3da122e1703cfde811a8f69c207d5ae780d37 | [
"MIT"
] | null | null | null | Python/103 ZigzagLevelOrderTraversalB.py | Fiona08/leetcode | cee3da122e1703cfde811a8f69c207d5ae780d37 | [
"MIT"
] | null | null | null | Python/103 ZigzagLevelOrderTraversalB.py | Fiona08/leetcode | cee3da122e1703cfde811a8f69c207d5ae780d37 | [
"MIT"
] | null | null | null | #103
# Time: O(n)
# Space: O(n)
# Given a binary tree, return the zigzag level order traversal of
# its nodes' values. (ie, from left to right, then right to left
# for the next level and alternate between).
#
# For example:
# Given binary tree [3,9,20,null,null,15,7],
# 3
# / \
# 9 20
# / \
# 15 7
# return its zigzag level order traversal as:
# [
# [3],
# [20,9],
# [15,7]
# ]
| 25.34 | 66 | 0.58011 | #103
# Time: O(n)
# Space: O(n)
# Given a binary tree, return the zigzag level order traversal of
# its nodes' values. (ie, from left to right, then right to left
# for the next level and alternate between).
#
# For example:
# Given binary tree [3,9,20,null,null,15,7],
# 3
# / \
# 9 20
# / \
# 15 7
# return its zigzag level order traversal as:
# [
# [3],
# [20,9],
# [15,7]
# ]
class TreeNode():
def __init__(self,val):
self.val=val;
self.right=None
self.left=None
class BFSSol():
def zigzagLevelOrderTraversalBT(self,root):
level,next_level_node,zigzag_traversal=1,[root],[]
while next_level_node:
cur_level_node,cur_level_val=next_level_node,[]
next_level_node=[]
for cur_node in cur_level_node:
cur_level_val.append(cur_node.val)
if cur_node.left:
next_level_node.append(cur_node.left)
if cur_node.right:
next_level_node.append(cur_node.right)
if level%2:
zigzag_traversal.append(cur_level_val)
else:
zigzag_traversal.append(cur_level_val[::-1])
level+=1
return zigzag_traversal
| 766 | -10 | 98 |
012e78c449d04b20ba8352cfe81717ccea4c5e45 | 1,050 | py | Python | setup.py | Marcdnd/electrum-server-cesc | 6e8c107b0994e032a92f0d7c4da7cb430294d6f3 | [
"MIT"
] | null | null | null | setup.py | Marcdnd/electrum-server-cesc | 6e8c107b0994e032a92f0d7c4da7cb430294d6f3 | [
"MIT"
] | null | null | null | setup.py | Marcdnd/electrum-server-cesc | 6e8c107b0994e032a92f0d7c4da7cb430294d6f3 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(
name="electrum-server-cesc",
version="1.0.20160803",
scripts=['run_electrum_server_cesc','electrum-server-cesc'],
install_requires=['plyvel','jsonrpclib', 'irc >= 11, <=14.0'],
package_dir={
'electrumservercesc':'src'
},
py_modules=[
'electrumservercesc.__init__',
'electrumservercesc.utils',
'electrumservercesc.storage',
'electrumservercesc.deserialize',
'electrumservercesc.networks',
'electrumservercesc.blockchain_processor',
'electrumservercesc.server_processor',
'electrumservercesc.processor',
'electrumservercesc.version',
'electrumservercesc.ircthread',
'electrumservercesc.stratum_tcp'
],
description="Cryptoescudo Electrum Server",
author="Marcdnd",
author_email="marcdnd@gmail.com",
license="MIT Licence",
url="https://github.com/Marcdnd/electrum-server-cesc/",
long_description="""Server for the Electrum Lightweight Cryptoescudo Wallet"""
)
| 33.870968 | 82 | 0.67619 | from setuptools import setup
setup(
name="electrum-server-cesc",
version="1.0.20160803",
scripts=['run_electrum_server_cesc','electrum-server-cesc'],
install_requires=['plyvel','jsonrpclib', 'irc >= 11, <=14.0'],
package_dir={
'electrumservercesc':'src'
},
py_modules=[
'electrumservercesc.__init__',
'electrumservercesc.utils',
'electrumservercesc.storage',
'electrumservercesc.deserialize',
'electrumservercesc.networks',
'electrumservercesc.blockchain_processor',
'electrumservercesc.server_processor',
'electrumservercesc.processor',
'electrumservercesc.version',
'electrumservercesc.ircthread',
'electrumservercesc.stratum_tcp'
],
description="Cryptoescudo Electrum Server",
author="Marcdnd",
author_email="marcdnd@gmail.com",
license="MIT Licence",
url="https://github.com/Marcdnd/electrum-server-cesc/",
long_description="""Server for the Electrum Lightweight Cryptoescudo Wallet"""
)
| 0 | 0 | 0 |
5b03553fcf79469b760d1f2bea0a15ae5bbd561b | 13,549 | py | Python | toontown/estate/EstateLoader.py | AnonymousDeveloper65535/open-toontown | 3d05c22a7d960ad843dde231140447c46973dba5 | [
"BSD-3-Clause"
] | 8 | 2017-10-10T11:41:01.000Z | 2021-02-23T12:55:47.000Z | toontown/estate/EstateLoader.py | AnonymousDeveloper65535/open-toontown | 3d05c22a7d960ad843dde231140447c46973dba5 | [
"BSD-3-Clause"
] | 1 | 2018-07-28T20:07:04.000Z | 2018-07-30T18:28:34.000Z | toontown/estate/EstateLoader.py | AnonymousDeveloper65535/open-toontown | 3d05c22a7d960ad843dde231140447c46973dba5 | [
"BSD-3-Clause"
] | 2 | 2019-04-06T16:18:23.000Z | 2021-02-25T06:25:01.000Z | from pandac.PandaModules import *
from toontown.toonbase.ToontownGlobals import *
from direct.interval.IntervalGlobal import *
from direct.fsm import ClassicFSM, State
from toontown.safezone import SafeZoneLoader
import random
from toontown.launcher import DownloadForceAcknowledge
import House
import Estate
import HouseGlobals
import random
import math
from toontown.coghq import MovingPlatform
from direct.directnotify import DirectNotifyGlobal
| 39.15896 | 158 | 0.624548 | from pandac.PandaModules import *
from toontown.toonbase.ToontownGlobals import *
from direct.interval.IntervalGlobal import *
from direct.fsm import ClassicFSM, State
from toontown.safezone import SafeZoneLoader
import random
from toontown.launcher import DownloadForceAcknowledge
import House
import Estate
import HouseGlobals
import random
import math
from toontown.coghq import MovingPlatform
from direct.directnotify import DirectNotifyGlobal
class EstateLoader(SafeZoneLoader.SafeZoneLoader):
notify = DirectNotifyGlobal.directNotify.newCategory('EstateLoader')
def __init__(self, hood, parentFSM, doneEvent):
SafeZoneLoader.SafeZoneLoader.__init__(self, hood, parentFSM, doneEvent)
del self.fsm
self.fsm = ClassicFSM.ClassicFSM('EstateLoader', [State.State('start', self.enterStart, self.exitStart, ['quietZone', 'estate', 'house']),
State.State('estate', self.enterEstate, self.exitEstate, ['quietZone']),
State.State('house', self.enterHouse, self.exitHouse, ['quietZone']),
State.State('quietZone', self.enterQuietZone, self.exitQuietZone, ['house', 'estate']),
State.State('final', self.enterFinal, self.exitFinal, ['start'])], 'start', 'final')
self.musicFile = 'phase_4/audio/bgm/TC_nbrhood.mid'
self.activityMusicFile = 'phase_3.5/audio/bgm/TC_SZ_activity.mid'
self.dnaFile = 'phase_5.5/dna/estate_1.dna'
self.safeZoneStorageDNAFile = None
self.cloudSwitch = 0
self.id = MyEstate
self.estateOwnerId = None
self.branchZone = None
self.houseDoneEvent = 'houseDone'
self.estateDoneEvent = 'estateDone'
self.enteredHouse = None
self.houseNode = [None] * 6
self.houseModels = [None] * HouseGlobals.NUM_HOUSE_TYPES
self.houseId2house = {}
self.barrel = None
self.clouds = []
self.cloudTrack = None
self.sunMoonNode = None
self.fsm.enterInitialState()
return
def load(self):
SafeZoneLoader.SafeZoneLoader.load(self)
self.music = base.loadMusic('phase_4/audio/bgm/TC_nbrhood.mid')
self.underwaterSound = base.loadSfx('phase_4/audio/sfx/AV_ambient_water.mp3')
self.swimSound = base.loadSfx('phase_4/audio/sfx/AV_swim_single_stroke.mp3')
self.submergeSound = base.loadSfx('phase_5.5/audio/sfx/AV_jump_in_water.mp3')
self.birdSound = map(base.loadSfx, ['phase_4/audio/sfx/SZ_TC_bird1.mp3', 'phase_4/audio/sfx/SZ_TC_bird2.mp3', 'phase_4/audio/sfx/SZ_TC_bird3.mp3'])
self.cricketSound = map(base.loadSfx, ['phase_4/audio/sfx/SZ_TC_bird1.mp3', 'phase_4/audio/sfx/SZ_TC_bird2.mp3', 'phase_4/audio/sfx/SZ_TC_bird3.mp3'])
if base.goonsEnabled:
invModel = loader.loadModel('phase_3.5/models/gui/inventory_icons')
self.invModels = []
from toontown.toonbase import ToontownBattleGlobals
for track in range(len(ToontownBattleGlobals.AvPropsNew)):
itemList = []
for item in range(len(ToontownBattleGlobals.AvPropsNew[track])):
itemList.append(invModel.find('**/' + ToontownBattleGlobals.AvPropsNew[track][item]))
self.invModels.append(itemList)
invModel.removeNode()
del invModel
def unload(self):
self.ignoreAll()
base.cr.estateMgr.leaveEstate()
self.estateOwnerId = None
self.estateZoneId = None
if self.place:
self.place.exit()
self.place.unload()
del self.place
del self.underwaterSound
del self.swimSound
del self.submergeSound
del self.birdSound
del self.cricketSound
for node in self.houseNode:
node.removeNode()
del self.houseNode
for model in self.houseModels:
model.removeNode()
del self.houseModels
del self.houseId2house
if self.sunMoonNode:
self.sunMoonNode.removeNode()
del self.sunMoonNode
self.sunMoonNode = None
if self.clouds:
for cloud in self.clouds:
cloud[0].removeNode()
del cloud[1]
del self.clouds
if self.barrel:
self.barrel.removeNode()
SafeZoneLoader.SafeZoneLoader.unload(self)
return
def enter(self, requestStatus):
self.estateOwnerId = requestStatus.get('ownerId', base.localAvatar.doId)
base.localAvatar.inEstate = 1
self.loadCloudPlatforms()
if base.cloudPlatformsEnabled and 0:
self.setCloudSwitch(1)
if self.cloudSwitch:
self.setCloudSwitch(self.cloudSwitch)
SafeZoneLoader.SafeZoneLoader.enter(self, requestStatus)
def exit(self):
self.ignoreAll()
base.cr.cache.flush()
base.localAvatar.stopChat()
base.localAvatar.inEstate = 0
SafeZoneLoader.SafeZoneLoader.exit(self)
def createSafeZone(self, dnaFile):
SafeZoneLoader.SafeZoneLoader.createSafeZone(self, dnaFile)
self.loadHouses()
self.loadSunMoon()
def loadHouses(self):
for i in range(HouseGlobals.NUM_HOUSE_TYPES):
self.houseModels[i] = loader.loadModel(HouseGlobals.houseModels[i])
for i in range(6):
posHpr = HouseGlobals.houseDrops[i]
self.houseNode[i] = self.geom.attachNewNode('esHouse_' + str(i))
self.houseNode[i].setPosHpr(*posHpr)
def loadSunMoon(self):
self.sun = loader.loadModel('phase_4/models/props/sun.bam')
self.moon = loader.loadModel('phase_5.5/models/props/moon.bam')
self.sunMoonNode = self.geom.attachNewNode('sunMoon')
self.sunMoonNode.setPosHpr(0, 0, 0, 0, 0, 0)
if self.sun:
self.sun.reparentTo(self.sunMoonNode)
self.sun.setY(270)
self.sun.setScale(2)
self.sun.setBillboardPointEye()
if self.moon:
self.moon.setP(180)
self.moon.reparentTo(self.sunMoonNode)
self.moon.setY(-270)
self.moon.setScale(15)
self.moon.setBillboardPointEye()
self.sunMoonNode.setP(30)
def enterEstate(self, requestStatus):
self.notify.debug('enterEstate: requestStatus = %s' % requestStatus)
ownerId = requestStatus.get('ownerId')
if ownerId:
self.estateOwnerId = ownerId
zoneId = requestStatus['zoneId']
self.notify.debug('enterEstate, ownerId = %s, zoneId = %s' % (self.estateOwnerId, zoneId))
self.accept(self.estateDoneEvent, self.handleEstateDone)
self.place = Estate.Estate(self, self.estateOwnerId, zoneId, self.fsm.getStateNamed('estate'), self.estateDoneEvent)
base.cr.playGame.setPlace(self.place)
self.place.load()
self.place.enter(requestStatus)
self.estateZoneId = zoneId
def exitEstate(self):
self.notify.debug('exitEstate')
self.ignore(self.estateDoneEvent)
self.place.exit()
self.place.unload()
self.place = None
base.cr.playGame.setPlace(self.place)
base.cr.cache.flush()
return
def handleEstateDone(self, doneStatus = None):
if not doneStatus:
doneStatus = self.place.getDoneStatus()
how = doneStatus['how']
shardId = doneStatus['shardId']
hoodId = doneStatus['hoodId']
zoneId = doneStatus['zoneId']
avId = doneStatus.get('avId', -1)
ownerId = doneStatus.get('ownerId', -1)
if shardId != None or hoodId != MyEstate:
self.notify.debug('estate done, and we are backing out to a different hood/shard')
self.notify.debug('hoodId = %s, avId = %s' % (hoodId, avId))
self.doneStatus = doneStatus
messenger.send(self.doneEvent)
return
if how in ['tunnelIn',
'teleportIn',
'doorIn',
'elevatorIn']:
self.notify.debug('staying in estateloader')
self.fsm.request('quietZone', [doneStatus])
else:
self.notify.error('Exited hood with unexpected mode %s' % how)
return
def enterHouse(self, requestStatus):
ownerId = requestStatus.get('ownerId')
if ownerId:
self.estateOwnerId = ownerId
self.acceptOnce(self.houseDoneEvent, self.handleHouseDone)
self.place = House.House(self, self.estateOwnerId, self.fsm.getStateNamed('house'), self.houseDoneEvent)
base.cr.playGame.setPlace(self.place)
self.place.load()
self.place.enter(requestStatus)
def exitHouse(self):
self.ignore(self.houseDoneEvent)
self.place.exit()
self.place.unload()
self.place = None
base.cr.playGame.setPlace(self.place)
return
def handleHouseDone(self, doneStatus = None):
if not doneStatus:
doneStatus = self.place.getDoneStatus()
shardId = doneStatus['shardId']
hoodId = doneStatus['hoodId']
if shardId != None or hoodId != MyEstate:
self.doneStatus = doneStatus
messenger.send(self.doneEvent)
return
how = doneStatus['how']
if how in ['tunnelIn',
'teleportIn',
'doorIn',
'elevatorIn']:
self.fsm.request('quietZone', [doneStatus])
else:
self.notify.error('Exited hood with unexpected mode %s' % how)
return
def handleQuietZoneDone(self):
status = self.quietZoneStateData.getRequestStatus()
self.fsm.request(status['where'], [status])
def atMyEstate(self):
if self.estateOwnerId != None:
if self.estateOwnerId == base.localAvatar.getDoId():
return 1
else:
return 0
else:
self.notify.warning("We aren't in an estate")
return
def setHouse(self, houseId):
try:
houseDo = base.cr.doId2do[houseId]
self.enteredHouse = houseDo.house
except KeyError:
self.notify.debug("can't find house: %d" % houseId)
def startCloudPlatforms(self):
return
if len(self.clouds):
self.cloudTrack = self.__cloudTrack()
self.cloudTrack.loop()
def stopCloudPlatforms(self):
if self.cloudTrack:
self.cloudTrack.pause()
del self.cloudTrack
self.cloudTrack = None
return
def __cloudTrack(self):
track = Parallel()
for cloud in self.clouds:
axis = cloud[1]
pos = cloud[0].getPos(render)
newPos = pos + axis * 30
reversePos = pos - axis * 30
track.append(Sequence(LerpPosInterval(cloud[0], 10, newPos), LerpPosInterval(cloud[0], 20, reversePos), LerpPosInterval(cloud[0], 10, pos)))
return track
def debugGeom(self, decomposed):
print 'numPrimitives = %d' % decomposed.getNumPrimitives()
for primIndex in range(decomposed.getNumPrimitives()):
prim = decomposed.getPrimitive(primIndex)
print 'prim = %s' % prim
print 'isIndexed = %d' % prim.isIndexed()
print 'prim.getNumPrimitives = %d' % prim.getNumPrimitives()
for basicPrim in range(prim.getNumPrimitives()):
print '%d start=%d' % (basicPrim, prim.getPrimitiveStart(basicPrim))
print '%d end=%d' % (basicPrim, prim.getPrimitiveEnd(basicPrim))
def loadOnePlatform(self, version, radius, zOffset, score, multiplier):
self.notify.debug('loadOnePlatform version=%d' % version)
cloud = NodePath('cloud-%d-%d' % (score, multiplier))
cloudModel = loader.loadModel('phase_5.5/models/estate/bumper_cloud')
cc = cloudModel.copyTo(cloud)
colCube = cc.find('**/collision')
colCube.setName('cloudSphere-0')
dTheta = 2.0 * math.pi / self.numClouds
cloud.reparentTo(self.cloudOrigin)
axes = [Vec3(1, 0, 0), Vec3(0, 1, 0), Vec3(0, 0, 1)]
cloud.setPos(radius * math.cos(version * dTheta), radius * math.sin(version * dTheta), 4 * random.random() + zOffset)
cloud.setScale(4.0)
self.clouds.append([cloud, random.choice(axes)])
def loadSkyCollision(self):
plane = CollisionPlane(Plane(Vec3(0, 0, -1), Point3(0, 0, 300)))
plane.setTangible(0)
planeNode = CollisionNode('cloudSphere-0')
planeNode.addSolid(plane)
self.cloudOrigin.attachNewNode(planeNode)
def loadCloudPlatforms(self):
self.cloudOrigin = self.geom.attachNewNode('cloudOrigin')
self.cloudOrigin.setZ(30)
self.loadSkyCollision()
self.numClouds = 12
pinballScore = PinballScoring[PinballCloudBumperLow]
for i in range(12):
self.loadOnePlatform(i, 40, 0, pinballScore[0], pinballScore[1])
pinballScore = PinballScoring[PinballCloudBumperMed]
for i in range(12):
self.loadOnePlatform(i, 60, 40, pinballScore[0], pinballScore[1])
pinballScore = PinballScoring[PinballCloudBumperHigh]
for i in range(12):
self.loadOnePlatform(i, 20, 80, pinballScore[0], pinballScore[1])
self.cloudOrigin.stash()
def setCloudSwitch(self, on):
self.cloudSwitch = on
if hasattr(self, 'cloudOrigin'):
if on:
self.cloudOrigin.unstash()
else:
self.cloudOrigin.stash()
| 12,301 | 777 | 23 |
82f9d171ce9b99f1e448f065ffdf32684add905c | 22,630 | py | Python | theseus/geometry/se3.py | facebookresearch/theseus | 8e7756d88fe85a02f948c3a9337f8704f4cebda3 | [
"MIT"
] | 236 | 2021-12-03T15:59:29.000Z | 2022-03-30T23:18:33.000Z | theseus/geometry/se3.py | facebookresearch/theseus | 8e7756d88fe85a02f948c3a9337f8704f4cebda3 | [
"MIT"
] | 85 | 2021-12-06T07:04:11.000Z | 2022-03-31T20:29:26.000Z | theseus/geometry/se3.py | facebookresearch/theseus | 8e7756d88fe85a02f948c3a9337f8704f4cebda3 | [
"MIT"
] | 12 | 2021-12-03T22:02:44.000Z | 2022-03-20T14:58:27.000Z | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Optional, Union, cast
import torch
import theseus
import theseus.constants
from .lie_group import LieGroup
from .point_types import Point3
from .so3 import SO3
rand_se3 = SE3.rand
randn_se3 = SE3.randn
| 37.037643 | 94 | 0.537826 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Optional, Union, cast
import torch
import theseus
import theseus.constants
from .lie_group import LieGroup
from .point_types import Point3
from .so3 import SO3
class SE3(LieGroup):
def __init__(
self,
x_y_z_quaternion: Optional[torch.Tensor] = None,
data: Optional[torch.Tensor] = None,
name: Optional[str] = None,
dtype: Optional[torch.dtype] = None,
requires_check: bool = True,
):
if x_y_z_quaternion is not None and data is not None:
raise ValueError("Please provide only one of x_y_z_quaternion or data.")
if x_y_z_quaternion is not None:
dtype = x_y_z_quaternion.dtype
if data is not None and requires_check:
self._SE3_matrix_check(data)
super().__init__(data=data, name=name, dtype=dtype)
if x_y_z_quaternion is not None:
self.update_from_x_y_z_quaternion(x_y_z_quaternion=x_y_z_quaternion)
@staticmethod
def rand(
*size: int,
generator: Optional[torch.Generator] = None,
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None,
requires_grad: bool = False,
) -> "SE3":
if len(size) != 1:
raise ValueError("The size should be 1D.")
ret = SE3()
rotation = SO3.rand(
size[0],
generator=generator,
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
translation = Point3.rand(
size[0],
generator=generator,
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
ret.update_from_rot_and_trans(rotation=rotation, translation=translation)
return ret
@staticmethod
def randn(
*size: int,
generator: Optional[torch.Generator] = None,
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None,
requires_grad: bool = False,
) -> "SE3":
if len(size) != 1:
raise ValueError("The size should be 1D.")
ret = SE3()
rotation = SO3.randn(
size[0],
generator=generator,
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
translation = Point3.randn(
size[0],
generator=generator,
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
ret.update_from_rot_and_trans(rotation=rotation, translation=translation)
return ret
@staticmethod
def _init_data() -> torch.Tensor: # type: ignore
return torch.eye(3, 4).view(1, 3, 4)
def dof(self) -> int:
return 6
def __repr__(self) -> str:
return f"SE3(data={self.data}, name={self.name})"
def __str__(self) -> str:
with torch.no_grad():
return f"SE3(matrix={self.data}), name={self.name})"
def _adjoint_impl(self) -> torch.Tensor:
ret = torch.zeros(self.shape[0], 6, 6).to(dtype=self.dtype, device=self.device)
ret[:, :3, :3] = self[:, :3, :3]
ret[:, 3:, 3:] = self[:, :3, :3]
ret[:, :3, 3:] = SO3.hat(self[:, :3, 3]) @ self[:, :3, :3]
return ret
def _project_impl(
self, euclidean_grad: torch.Tensor, is_sparse: bool = False
) -> torch.Tensor:
self._project_check(euclidean_grad, is_sparse)
ret = torch.zeros(
euclidean_grad.shape[:-2] + torch.Size([6]),
dtype=self.dtype,
device=self.device,
)
if is_sparse:
temp = torch.einsum(
"i...jk,i...jl->i...lk", euclidean_grad, self.data[:, :, :3]
)
else:
temp = torch.einsum(
"...jk,...ji->...ik", euclidean_grad, self.data[:, :, :3]
)
ret[..., :3] = temp[..., 3]
ret[..., 3] = temp[..., 2, 1] - temp[..., 1, 2]
ret[..., 4] = temp[..., 0, 2] - temp[..., 2, 0]
ret[..., 5] = temp[..., 1, 0] - temp[..., 0, 1]
return ret
@staticmethod
def _SE3_matrix_check(matrix: torch.Tensor):
if matrix.ndim != 3 or matrix.shape[1:] != (3, 4):
raise ValueError("SE(3) can only be 3x4 matrices.")
SO3._SO3_matrix_check(matrix.data[:, :3, :3])
@staticmethod
def x_y_z_unit_quaternion_to_SE3(x_y_z_quaternion: torch.Tensor) -> "SE3":
if x_y_z_quaternion.ndim == 1:
x_y_z_quaternion = x_y_z_quaternion.unsqueeze(0)
if x_y_z_quaternion.ndim != 2 and x_y_z_quaternion.shape[1] != 7:
raise ValueError("x_y_z_quaternion can only be 7-D vectors.")
ret = SE3()
batch_size = x_y_z_quaternion.shape[0]
ret.data = torch.empty(batch_size, 3, 4).to(
device=x_y_z_quaternion.device, dtype=x_y_z_quaternion.dtype
)
ret[:, :, :3] = SO3.unit_quaternion_to_SO3(x_y_z_quaternion[:, 3:]).data
ret[:, :, 3] = x_y_z_quaternion[:, :3]
return ret
@staticmethod
def _hat_matrix_check(matrix: torch.Tensor):
if matrix.ndim != 3 or matrix.shape[1:] != (4, 4):
raise ValueError("Hat matrices of SE(3) can only be 4x4 matrices")
if matrix[:, 3].abs().max().item() > theseus.constants.EPS:
raise ValueError("The last row of hat matrices of SE(3) can only be zero.")
if (
matrix[:, :3, :3].transpose(1, 2) + matrix[:, :3, :3]
).abs().max().item() > theseus.constants.EPS:
raise ValueError(
"The 3x3 top-left corner of hat matrices of SE(3) can only be skew-symmetric."
)
@staticmethod
def exp_map(
tangent_vector: torch.Tensor, jacobians: Optional[List[torch.Tensor]] = None
) -> "SE3":
if tangent_vector.ndim != 2 or tangent_vector.shape[1] != 6:
raise ValueError("Tangent vectors of SE(3) can only be 6-D vectors.")
NEAR_ZERO_EPS = 5e-3
ret = SE3(dtype=tangent_vector.dtype)
tangent_vector_lin = tangent_vector[:, :3].view(-1, 3, 1)
tangent_vector_ang = tangent_vector[:, 3:].view(-1, 3, 1)
theta = torch.linalg.norm(tangent_vector_ang, dim=1).unsqueeze(1)
theta2 = theta**2
theta3 = theta**3
near_zero = theta < NEAR_ZERO_EPS
non_zero = torch.ones(
1, dtype=tangent_vector.dtype, device=tangent_vector.device
)
theta_nz = torch.where(near_zero, non_zero, theta)
theta2_nz = torch.where(near_zero, non_zero, theta2)
theta3_nz = torch.where(near_zero, non_zero, theta3)
# Compute the rotation
sine = theta.sin()
cosine = torch.where(near_zero, 8 / (4 + theta2) - 1, theta.cos())
sine_by_theta = torch.where(
near_zero, 0.5 * cosine + 0.5, theta.sin() / theta_nz
)
one_minus_cosine_by_theta2 = torch.where(
near_zero, 0.5 * sine_by_theta, (1 - cosine) / theta2_nz
)
ret.data = torch.zeros(tangent_vector.shape[0], 3, 4).to(
dtype=tangent_vector.dtype, device=tangent_vector.device
)
ret.data[:, :3, :3] = (
one_minus_cosine_by_theta2
* tangent_vector_ang
@ tangent_vector_ang.transpose(1, 2)
)
ret[:, 0, 0] += cosine.view(-1)
ret[:, 1, 1] += cosine.view(-1)
ret[:, 2, 2] += cosine.view(-1)
temp = sine_by_theta.view(-1, 1) * tangent_vector_ang.view(-1, 3)
ret[:, 0, 1] -= temp[:, 2]
ret[:, 1, 0] += temp[:, 2]
ret[:, 0, 2] += temp[:, 1]
ret[:, 2, 0] -= temp[:, 1]
ret[:, 1, 2] -= temp[:, 0]
ret[:, 2, 1] += temp[:, 0]
# Compute the translation
sine_by_theta = torch.where(near_zero, 1 - theta2 / 6, sine_by_theta)
one_minus_cosine_by_theta2 = torch.where(
near_zero, 0.5 - theta2 / 24, one_minus_cosine_by_theta2
)
theta_minus_sine_by_theta3_t = torch.where(
near_zero, 1.0 / 6 - theta2 / 120, (theta - sine) / theta3_nz
)
ret[:, :, 3:] = sine_by_theta * tangent_vector_lin
ret[:, :, 3:] += one_minus_cosine_by_theta2 * torch.cross(
tangent_vector_ang, tangent_vector_lin, dim=1
)
ret[:, :, 3:] += theta_minus_sine_by_theta3_t * (
tangent_vector_ang
@ (tangent_vector_ang.transpose(1, 2) @ tangent_vector_lin)
)
if jacobians is not None:
SE3._check_jacobians_list(jacobians)
theta3_nz = theta_nz * theta2_nz
theta_minus_sine_by_theta3_rot = torch.where(
near_zero, torch.zeros_like(theta), theta_minus_sine_by_theta3_t
)
jac = torch.zeros(
tangent_vector.shape[0],
6,
6,
dtype=tangent_vector.dtype,
device=tangent_vector.device,
)
jac[:, :3, :3] = (
theta_minus_sine_by_theta3_rot
* tangent_vector_ang.view(-1, 3, 1)
@ tangent_vector_ang.view(-1, 1, 3)
)
diag_jac = jac.diagonal(dim1=1, dim2=2)
diag_jac += sine_by_theta.view(-1, 1)
jac_temp_rot = one_minus_cosine_by_theta2.view(
-1, 1
) * tangent_vector_ang.view(-1, 3)
jac[:, 0, 1] += jac_temp_rot[:, 2]
jac[:, 1, 0] -= jac_temp_rot[:, 2]
jac[:, 0, 2] -= jac_temp_rot[:, 1]
jac[:, 2, 0] += jac_temp_rot[:, 1]
jac[:, 1, 2] += jac_temp_rot[:, 0]
jac[:, 2, 1] -= jac_temp_rot[:, 0]
jac[:, 3:, 3:] = jac[:, :3, :3]
minus_one_by_twelve = torch.tensor(
-1 / 12.0,
dtype=sine_by_theta.dtype,
device=sine_by_theta.device,
)
d_one_minus_cosine_by_theta2 = torch.where(
near_zero,
minus_one_by_twelve,
(sine_by_theta - 2 * one_minus_cosine_by_theta2) / theta2_nz,
)
minus_one_by_sixty = torch.tensor(
-1 / 60.0,
dtype=one_minus_cosine_by_theta2.dtype,
device=one_minus_cosine_by_theta2.device,
)
d_theta_minus_sine_by_theta3 = torch.where(
near_zero,
minus_one_by_sixty,
(one_minus_cosine_by_theta2 - 3 * theta_minus_sine_by_theta3_t)
/ theta2_nz,
)
w = tangent_vector[:, 3:]
v = tangent_vector[:, :3]
wv = w.cross(v, dim=1)
wwv = w.cross(wv, dim=1)
sw = theta_minus_sine_by_theta3_t.view(-1, 1) * w
jac_temp_t = (
d_one_minus_cosine_by_theta2.view(-1, 1) * wv
+ d_theta_minus_sine_by_theta3.view(-1, 1) * wwv
).view(-1, 3, 1) @ w.view(-1, 1, 3)
jac_temp_t -= v.view(-1, 3, 1) @ sw.view(-1, 1, 3)
jac_temp_v = (
-one_minus_cosine_by_theta2.view(-1, 1) * v
- theta_minus_sine_by_theta3_t.view(-1, 1) * wv
)
jac_temp_t += SO3.hat(jac_temp_v)
diag_jac_t = torch.diagonal(jac_temp_t, dim1=1, dim2=2)
diag_jac_t += (sw.view(-1, 1, 3) @ v.view(-1, 3, 1)).view(-1, 1)
jac[:, :3, 3:] = ret[:, :, :3].transpose(1, 2) @ jac_temp_t
jacobians.append(jac)
return ret
def _log_map_impl(
self, jacobians: Optional[List[torch.Tensor]] = None
) -> torch.Tensor:
NEAR_PI_EPS = 1e-7
NEAR_ZERO_EPS = 5e-3
sine_axis = torch.zeros(self.shape[0], 3, dtype=self.dtype, device=self.device)
sine_axis[:, 0] = 0.5 * (self[:, 2, 1] - self[:, 1, 2])
sine_axis[:, 1] = 0.5 * (self[:, 0, 2] - self[:, 2, 0])
sine_axis[:, 2] = 0.5 * (self[:, 1, 0] - self[:, 0, 1])
cosine = 0.5 * (self[:, 0, 0] + self[:, 1, 1] + self[:, 2, 2] - 1)
sine = sine_axis.norm(dim=1)
theta = torch.atan2(sine, cosine)
theta2 = theta**2
non_zero = torch.ones(1, dtype=self.dtype, device=self.device)
near_zero = theta < NEAR_ZERO_EPS
# Compute the rotation
not_near_pi = 1 + cosine > NEAR_PI_EPS
# theta is not near pi
near_zero_not_near_pi = near_zero[not_near_pi]
# Compute the approximation of theta / sin(theta) when theta is near to 0
sine_nz = torch.where(near_zero_not_near_pi, non_zero, sine[not_near_pi])
scale = torch.where(
near_zero_not_near_pi,
1 + sine[not_near_pi] ** 2 / 6,
theta[not_near_pi] / sine_nz,
)
ret_ang = torch.zeros_like(sine_axis)
ret_ang[not_near_pi] = sine_axis[not_near_pi] * scale.view(-1, 1)
# theta is near pi
near_pi = ~not_near_pi
ddiag = torch.diagonal(self[near_pi], dim1=1, dim2=2)
# Find the index of major coloumns and diagonals
major = torch.logical_and(
ddiag[:, 1] > ddiag[:, 0], ddiag[:, 1] > ddiag[:, 2]
) + 2 * torch.logical_and(ddiag[:, 2] > ddiag[:, 0], ddiag[:, 2] > ddiag[:, 1])
sel_rows = 0.5 * (self[near_pi, major, :3] + self[near_pi, :3, major])
aux = torch.ones(sel_rows.shape[0], dtype=torch.bool)
sel_rows[aux, major] -= cosine[near_pi]
axis = sel_rows / sel_rows.norm(dim=1, keepdim=True)
ret_ang[near_pi] = axis * (
theta[near_pi] * sine_axis[near_pi, major].sign()
).view(-1, 1)
# Compute the translation
sine_theta = sine * theta
two_cosine_minus_two = 2 * cosine - 2
two_cosine_minus_two_nz = torch.where(near_zero, non_zero, two_cosine_minus_two)
theta2_nz = torch.where(near_zero, non_zero, theta2)
a = torch.where(
near_zero, 1 - theta2 / 12, -sine_theta / two_cosine_minus_two_nz
)
b = torch.where(
near_zero,
1.0 / 12 + theta2 / 720,
(sine_theta + two_cosine_minus_two) / (theta2_nz * two_cosine_minus_two_nz),
)
translation = self[:, :, 3].view(-1, 3, 1)
ret_lin = a.view(-1, 1) * self[:, :, 3]
ret_lin -= 0.5 * torch.cross(ret_ang, self[:, :, 3], dim=1)
ret_ang_ext = ret_ang.view(-1, 3, 1)
ret_lin += b.view(-1, 1) * (
ret_ang_ext @ (ret_ang_ext.transpose(1, 2) @ translation)
).view(-1, 3)
if jacobians is not None:
SE3._check_jacobians_list(jacobians)
jac = torch.zeros(self.shape[0], 6, 6, dtype=self.dtype, device=self.device)
b_ret_ang = b.view(-1, 1) * ret_ang
jac[:, :3, :3] = b_ret_ang.view(-1, 3, 1) * ret_ang.view(-1, 1, 3)
half_ret_ang = 0.5 * ret_ang
jac[:, 0, 1] -= half_ret_ang[:, 2]
jac[:, 1, 0] += half_ret_ang[:, 2]
jac[:, 0, 2] += half_ret_ang[:, 1]
jac[:, 2, 0] -= half_ret_ang[:, 1]
jac[:, 1, 2] -= half_ret_ang[:, 0]
jac[:, 2, 1] += half_ret_ang[:, 0]
diag_jac_rot = torch.diagonal(jac[:, :3, :3], dim1=1, dim2=2)
diag_jac_rot += a.view(-1, 1)
jac[:, 3:, 3:] = jac[:, :3, :3]
theta_nz = torch.where(near_zero, non_zero, theta)
theta4_nz = theta2_nz**2
c = torch.where(
near_zero,
-1 / 360.0 - theta2 / 7560.0,
-(2 * two_cosine_minus_two + theta * sine + theta2)
/ (theta4_nz * two_cosine_minus_two_nz),
)
d = torch.where(
near_zero,
-1 / 6.0 - theta2 / 180.0,
(theta - sine) / (theta_nz * two_cosine_minus_two_nz),
)
e = (ret_ang.view(-1, 1, 3) @ ret_lin.view(-1, 3, 1)).view(-1)
ce_ret_ang = (c * e).view(-1, 1) * ret_ang
jac[:, :3, 3:] = ce_ret_ang.view(-1, 3, 1) * ret_ang.view(-1, 1, 3)
jac[:, :3, 3:] += b_ret_ang.view(-1, 3, 1) * ret_lin.view(
-1, 1, 3
) + ret_lin.view(-1, 3, 1) * b_ret_ang.view(-1, 1, 3)
diag_jac_t = torch.diagonal(jac[:, :3, 3:], dim1=1, dim2=2)
diag_jac_t += (e * d).view(-1, 1)
half_ret_lin = 0.5 * ret_lin
jac[:, 0, 4] -= half_ret_lin[:, 2]
jac[:, 1, 3] += half_ret_lin[:, 2]
jac[:, 0, 5] += half_ret_lin[:, 1]
jac[:, 2, 3] -= half_ret_lin[:, 1]
jac[:, 1, 5] -= half_ret_lin[:, 0]
jac[:, 2, 4] += half_ret_lin[:, 0]
jacobians.append(jac)
return torch.cat([ret_lin, ret_ang], dim=1)
def _compose_impl(self, se3_2: LieGroup) -> "SE3":
se3_2 = cast(SE3, se3_2)
batch_size = max(self.shape[0], se3_2.shape[0])
ret = SE3()
ret.data = torch.zeros(batch_size, 3, 4, dtype=self.dtype, device=self.device)
ret[:, :, :3] = self[:, :, :3] @ se3_2[:, :, :3]
ret[:, :, 3] = self[:, :, 3]
ret[:, :, 3:] += self[:, :, :3] @ se3_2[:, :, 3:]
return ret
def _inverse_impl(self, get_jacobian: bool = False) -> "SE3":
ret = torch.zeros(self.shape[0], 3, 4).to(dtype=self.dtype, device=self.device)
rotT = self.data[:, :3, :3].transpose(1, 2)
ret[:, :, :3] = rotT
ret[:, :, 3] = -(rotT @ self.data[:, :3, 3].unsqueeze(2)).view(-1, 3)
# if self.data is a valid SE(3), so is the inverse
return SE3(data=ret, requires_check=False)
def to_matrix(self) -> torch.Tensor:
ret = torch.zeros(self.shape[0], 4, 4).to(dtype=self.dtype, device=self.device)
ret[:, :3] = self.data
ret[:, 3, 3] = 1
return ret
def update_from_x_y_z_quaternion(self, x_y_z_quaternion: torch.Tensor):
self.update(SE3.x_y_z_unit_quaternion_to_SE3(x_y_z_quaternion))
def update_from_rot_and_trans(self, rotation: SO3, translation: Point3):
if rotation.shape[0] != translation.shape[0]:
raise ValueError("rotation and translation must have the same batch size.")
if rotation.dtype != translation.dtype:
raise ValueError("rotation and translation must be of the same type.")
if rotation.device != translation.device:
raise ValueError("rotation and translation must be on the same device.")
self.data = torch.cat((rotation.data, translation.data.unsqueeze(2)), dim=2)
@staticmethod
def hat(tangent_vector: torch.Tensor) -> torch.Tensor:
_check = tangent_vector.ndim == 2 and tangent_vector.shape[1] == 6
if not _check:
raise ValueError("Invalid vee matrix for SE(3).")
matrix = torch.zeros(tangent_vector.shape[0], 4, 4).to(
dtype=tangent_vector.dtype, device=tangent_vector.device
)
matrix[:, :3, :3] = SO3.hat(tangent_vector[:, 3:])
matrix[:, :3, 3] = tangent_vector[:, :3]
return matrix
@staticmethod
def vee(matrix: torch.Tensor) -> torch.Tensor:
SE3._hat_matrix_check(matrix)
return torch.cat((matrix[:, :3, 3], SO3.vee(matrix[:, :3, :3])), dim=1)
def _copy_impl(self, new_name: Optional[str] = None) -> "SE3":
# if self.data is a valid SE(3), so is the copy
return SE3(data=self.data.clone(), name=new_name, requires_check=False)
# only added to avoid casting downstream
def copy(self, new_name: Optional[str] = None) -> "SE3":
return cast(SE3, super().copy(new_name=new_name))
def _transform_shape_check(self, point: Union[Point3, torch.Tensor]):
err_msg = (
f"SE3 can only transform vectors of shape [{self.shape[0]}, 3] or [1, 3], "
f"but the input has shape {point.shape}."
)
if isinstance(point, torch.Tensor):
if not point.ndim == 2 or point.shape[1] != 3:
raise ValueError(err_msg)
elif point.dof() != 3:
raise ValueError(err_msg)
if (
point.shape[0] != self.shape[0]
and point.shape[0] != 1
and self.shape[0] != 1
):
raise ValueError(err_msg)
def transform_from(
self,
point: Union[Point3, torch.Tensor],
jacobians: Optional[List[torch.Tensor]] = None,
) -> Point3:
self._transform_shape_check(point)
batch_size = max(self.shape[0], point.shape[0])
if isinstance(point, torch.Tensor):
p = point.view(-1, 3, 1)
else:
p = point.data.view(-1, 3, 1)
ret = Point3(data=(self[:, :, :3] @ p).view(-1, 3))
ret.data += self[:, :, 3]
if jacobians is not None:
self._check_jacobians_list(jacobians)
# Right jacobians for SE(3) are computed
Jg = torch.zeros(batch_size, 3, 6, dtype=self.dtype, device=self.device)
Jg[:, :, :3] = self[:, :, :3]
Jg[:, :, 3:] = -self[:, :, :3] @ SO3.hat(p)
# Jacobians for point
Jpnt = Jg[:, :, :3]
jacobians.extend([Jg, Jpnt])
return ret
def transform_to(
self,
point: Union[Point3, torch.Tensor],
jacobians: Optional[List[torch.Tensor]] = None,
) -> Point3:
self._transform_shape_check(point)
batch_size = max(self.shape[0], point.shape[0])
if isinstance(point, torch.Tensor):
p = point.view(-1, 3, 1)
else:
p = point.data.view(-1, 3, 1)
temp = p - self[:, :, 3:]
ret = Point3(data=(self[:, :, :3].transpose(1, 2) @ temp).view(-1, 3))
if jacobians is not None:
self._check_jacobians_list(jacobians)
# Right jacobians for SE(3) are computed
Jg = torch.zeros(batch_size, 3, 6, dtype=self.dtype, device=self.device)
Jg[:, 0, 0] = -1
Jg[:, 1, 1] = -1
Jg[:, 2, 2] = -1
Jg[:, 0, 4] = -ret[:, 2]
Jg[:, 1, 3] = ret[:, 2]
Jg[:, 0, 5] = ret[:, 1]
Jg[:, 2, 3] = -ret[:, 1]
Jg[:, 1, 5] = -ret[:, 0]
Jg[:, 2, 4] = ret[:, 0]
# Jacobians for point
Jpnt = self[:, :, :3].transpose(1, 2).expand(batch_size, 3, 3)
jacobians.extend([Jg, Jpnt])
return ret
rand_se3 = SE3.rand
randn_se3 = SE3.randn
| 21,287 | 907 | 23 |
f76f31edd94cd0e55fff67cd5ce3c1f500f7d6a4 | 958 | py | Python | bulq/core/coders.py | koji-m/bulq | 78f97d2e57d6bcb0ec3fa2b0c7539db3ebaa104a | [
"Apache-2.0"
] | null | null | null | bulq/core/coders.py | koji-m/bulq | 78f97d2e57d6bcb0ec3fa2b0c7539db3ebaa104a | [
"Apache-2.0"
] | null | null | null | bulq/core/coders.py | koji-m/bulq | 78f97d2e57d6bcb0ec3fa2b0c7539db3ebaa104a | [
"Apache-2.0"
] | null | null | null | import typing
import apache_beam as beam
| 21.288889 | 62 | 0.58142 | import typing
import apache_beam as beam
class Coders:
TABLE = {
'bytes': 'Bytes',
'utf8': 'UTF-8',
'utf_8': 'UTF-8',
}
CODERS = {
'Bytes': beam.coders.coders.BytesCoder,
'UTF-8': beam.coders.coders.StrUtf8Coder,
}
@classmethod
def get_coder(cls, coder_name):
coder = cls.CODERS.get(
(cls.TABLE[coder_name.lower().replace('-', '_')]),
None)
if coder:
return coder()
else:
return StrCustomCoder(coder_name)
class StrCustomCoder(beam.coders.coders.Coder):
def __init__(self, coder_name):
super().__init__()
self._coder_name = coder_name
def encode(self, value):
return value.encode(self._coder_name)
def decode(self, value):
return value.decode(self._coder_name)
def is_deterministic(self):
return True
def to_type_hint(self):
return typing.Any
| 455 | 279 | 180 |
e08170c920599293ff53c51e1245c2ecc19f92ac | 1,231 | py | Python | appengine/src/greenday_core/tests/test_youtube_thumbnails_cache_client.py | meedan/montage | 4da0116931edc9af91f226876330645837dc9bcc | [
"Apache-2.0"
] | 6 | 2018-07-31T16:48:07.000Z | 2020-02-01T03:17:51.000Z | appengine/src/greenday_core/tests/test_youtube_thumbnails_cache_client.py | meedan/montage | 4da0116931edc9af91f226876330645837dc9bcc | [
"Apache-2.0"
] | 41 | 2018-08-07T16:43:07.000Z | 2020-06-05T18:54:50.000Z | appengine/src/greenday_core/tests/test_youtube_thumbnails_cache_client.py | meedan/montage | 4da0116931edc9af91f226876330645837dc9bcc | [
"Apache-2.0"
] | 1 | 2018-08-07T16:40:18.000Z | 2018-08-07T16:40:18.000Z | """
Tests for :mod:`greenday_core.youtube_thumbnails_cache <greenday_core.youtube_thumbnails_cache>`
"""
from greenday_core.tests.base import AppengineTestBed
from greenday_core.youtube_thumbnails_cache import YTThumbnailsCache
class YTThumbnailClientTestCase(AppengineTestBed):
"""
Tests for :class:`greenday_core.youtube_thumbnails_cache.YTThumbnailsCache <greenday_core.youtube_thumbnails_cache.YTThumbnailsCache>`
"""
def test_remove_all_cached_thumbnail_images(self):
"""
:func:`greenday_core.youtube_thumbnails_cache.YTThumbnailsCache.remove_all_cached_thumbnail_images <greenday_core.youtube_thumbnails_cache.YTThumbnailsCache.remove_all_cached_thumbnail_images>`
should remove all cached thumbnail images for a video
"""
yt_cache = YTThumbnailsCache('ytid')
keys = [yt_cache.create_key(time) for time in (42, 1024)]
for key in keys:
yt_cache.cache_thumbnail('dummy content', key)
for key in keys:
self.assertTrue(yt_cache.fetch_cached_thumbnail(key))
yt_cache.remove_all_cached_thumbnail_images()
for key in keys:
self.assertFalse(yt_cache.fetch_cached_thumbnail(key))
| 41.033333 | 205 | 0.74411 | """
Tests for :mod:`greenday_core.youtube_thumbnails_cache <greenday_core.youtube_thumbnails_cache>`
"""
from greenday_core.tests.base import AppengineTestBed
from greenday_core.youtube_thumbnails_cache import YTThumbnailsCache
class YTThumbnailClientTestCase(AppengineTestBed):
"""
Tests for :class:`greenday_core.youtube_thumbnails_cache.YTThumbnailsCache <greenday_core.youtube_thumbnails_cache.YTThumbnailsCache>`
"""
def test_remove_all_cached_thumbnail_images(self):
"""
:func:`greenday_core.youtube_thumbnails_cache.YTThumbnailsCache.remove_all_cached_thumbnail_images <greenday_core.youtube_thumbnails_cache.YTThumbnailsCache.remove_all_cached_thumbnail_images>`
should remove all cached thumbnail images for a video
"""
yt_cache = YTThumbnailsCache('ytid')
keys = [yt_cache.create_key(time) for time in (42, 1024)]
for key in keys:
yt_cache.cache_thumbnail('dummy content', key)
for key in keys:
self.assertTrue(yt_cache.fetch_cached_thumbnail(key))
yt_cache.remove_all_cached_thumbnail_images()
for key in keys:
self.assertFalse(yt_cache.fetch_cached_thumbnail(key))
| 0 | 0 | 0 |
327770f23fbdf7de1641f8b54d4885dd27c86561 | 61,679 | py | Python | run_nerf_helpers.py | yashbhalgat/NeuralDiff | a480f2103384a4f5d77eb84abd977a200e6e6405 | [
"MIT"
] | 1 | 2022-02-22T13:54:41.000Z | 2022-02-22T13:54:41.000Z | run_nerf_helpers.py | yashbhalgat/NeuralDiff | a480f2103384a4f5d77eb84abd977a200e6e6405 | [
"MIT"
] | null | null | null | run_nerf_helpers.py | yashbhalgat/NeuralDiff | a480f2103384a4f5d77eb84abd977a200e6e6405 | [
"MIT"
] | null | null | null | import torch
# torch.autograd.set_detect_anomaly(True)
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import pdb
from skimage.metrics import structural_similarity, peak_signal_noise_ratio
from lpips import LPIPS
from hash_encoding import HashEmbedder, SHEncoder, XYZplusT_HashEmbedder
from time_encoding import XYZ_TimeOnOff_Encoding, XYZ_TimePiecewiseConstant
# Misc
img2mse = lambda x, y : torch.mean((x - y) ** 2)
img2L1 = lambda x, y : torch.mean(0.01 * (torch.sqrt(1 + ((x-y)**2).sum(dim=-1)/0.0001) - 1))
img2mse_with_uncertainty = lambda x, y, u : torch.mean(((x - y) ** 2)/(2*(u+1e-9)**2) + torch.log((u+1e-9)**2))
img2mse_perray = lambda x, y : ((x - y) ** 2).sum(dim=-1)
img2mse_with_uncertainty_perray = lambda x, y, u : ((x - y) ** 2).sum(dim=-1)/(2*(u+1e-9)**2) + 0.5*torch.log((u+1e-9)**2)
mse2psnr = lambda x : -10. * torch.log(x) / torch.log(torch.Tensor([10.]))
to8b = lambda x : (255*np.clip(x,0,1)).astype(np.uint8)
relu_act = nn.Softplus()
bce_loss = nn.BCELoss()
lpips_vgg = None
@torch.no_grad()
# Positional encoding (section 5.1)
# Small NeRF for Hash embeddings
class NeuralDiff_BGFGSeparate(nn.Module):
'''
Static Background model uses a low-frequency grid,
and Foreground model uses a high-frequency grid.
'''
class BGFG_XYZT(nn.Module):
'''
XYZT grid for foreground model
'''
class BGFG_OnOffEncoding(nn.Module):
'''
Uses OnOffEncoding for time
'''
class BGFG_PiecewiseConst(nn.Module):
'''
Uses OnOffEncoding for time
'''
class BGFG_XYZT_Bottleneck(nn.Module):
'''
XYZT grid for foreground model
Foreground model also uses some information (encoding) from the BG model, which helps in triangulation
'''
# Ray helpers
# Hierarchical sampling (section 5.2)
| 49.781275 | 169 | 0.564147 | import torch
# torch.autograd.set_detect_anomaly(True)
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import pdb
from skimage.metrics import structural_similarity, peak_signal_noise_ratio
from lpips import LPIPS
from hash_encoding import HashEmbedder, SHEncoder, XYZplusT_HashEmbedder
from time_encoding import XYZ_TimeOnOff_Encoding, XYZ_TimePiecewiseConstant
# Misc
img2mse = lambda x, y : torch.mean((x - y) ** 2)
img2L1 = lambda x, y : torch.mean(0.01 * (torch.sqrt(1 + ((x-y)**2).sum(dim=-1)/0.0001) - 1))
img2mse_with_uncertainty = lambda x, y, u : torch.mean(((x - y) ** 2)/(2*(u+1e-9)**2) + torch.log((u+1e-9)**2))
img2mse_perray = lambda x, y : ((x - y) ** 2).sum(dim=-1)
img2mse_with_uncertainty_perray = lambda x, y, u : ((x - y) ** 2).sum(dim=-1)/(2*(u+1e-9)**2) + 0.5*torch.log((u+1e-9)**2)
mse2psnr = lambda x : -10. * torch.log(x) / torch.log(torch.Tensor([10.]))
to8b = lambda x : (255*np.clip(x,0,1)).astype(np.uint8)
relu_act = nn.Softplus()
bce_loss = nn.BCELoss()
lpips_vgg = None
@torch.no_grad()
def get_perceptual_metrics(rgbs, gts, lpips_batch_size=8, device='cuda'):
# rgbs and gts should be numpy arrays of the same shape. Can be just 1 x H x W x 3
# From pixelNeRF https://github.com/sxyu/pixel-nerf/blob/2929708e90b246dbd0329ce2a128ef381bd8c25d/eval/calc_metrics.py#L188
global lpips_vgg
ssim = [structural_similarity(rgb, gt, multichannel=True, data_range=1) for rgb, gt in zip(rgbs, gts)]
ssim = np.mean(ssim)
psnr = [peak_signal_noise_ratio(rgb, gt, data_range=1) for rgb, gt in zip(rgbs, gts)]
psnr = np.mean(psnr)
# From pixelNeRF https://github.com/sxyu/pixel-nerf/blob/2929708e90b246dbd0329ce2a128ef381bd8c25d/eval/calc_metrics.py#L238
if lpips_vgg is None:
lpips_vgg = LPIPS(net="vgg").to(device=device)
lpips_all = []
preds_spl = torch.split(torch.from_numpy(rgbs).unsqueeze(0).permute(0,3,1,2).float(), lpips_batch_size, dim=0)
gts_spl = torch.split(torch.from_numpy(gts).unsqueeze(0).permute(0,3,1,2).float(), lpips_batch_size, dim=0)
for predi, gti in zip(preds_spl, gts_spl):
lpips_i = lpips_vgg(predi.to(device=device), gti.to(device=device))
lpips_all.append(lpips_i)
lpips = torch.cat(lpips_all)
lpips = lpips.mean().item()
return psnr, ssim, lpips
# Positional encoding (section 5.1)
class Embedder:
def __init__(self, **kwargs):
self.kwargs = kwargs
self.create_embedding_fn()
def create_embedding_fn(self):
embed_fns = []
d = self.kwargs['input_dims']
out_dim = 0
if self.kwargs['include_input']:
embed_fns.append(lambda x : x)
out_dim += d
max_freq = self.kwargs['max_freq_log2']
N_freqs = self.kwargs['num_freqs']
if self.kwargs['log_sampling']:
freq_bands = 2.**torch.linspace(0., max_freq, steps=N_freqs)
else:
freq_bands = torch.linspace(2.**0., 2.**max_freq, steps=N_freqs)
for freq in freq_bands:
for p_fn in self.kwargs['periodic_fns']:
embed_fns.append(lambda x, p_fn=p_fn, freq=freq : p_fn(x * freq))
out_dim += d
self.embed_fns = embed_fns
self.out_dim = out_dim
def embed(self, inputs):
return torch.cat([fn(inputs) for fn in self.embed_fns], -1)
def get_embedder(multires, args, i=0):
time_dim = None
if i == -1:
return nn.Identity(), 4, None
elif i==0:
embed_kwargs = {
'include_input' : True,
'input_dims' : 3,
'max_freq_log2' : multires-1,
'num_freqs' : multires,
'log_sampling' : True,
'periodic_fns' : [torch.sin, torch.cos],
}
embedder_obj = Embedder(**embed_kwargs)
embed = lambda x, eo=embedder_obj : eo.embed(x)
out_dim = embedder_obj.out_dim
elif i==1:
# # embed = HashEmbedder(bounding_box=args.bounding_box, \
# embed = XYZplusT_HashEmbedder(bounding_box=args.bounding_box, \
# log2_hashmap_size=args.log2_hashmap_size, \
# finest_resolution=args.finest_res)
# out_dim = embed.out_dim
# time_dim = embed.time_dim
return nn.Identity(), 4, None ### I am initializing embeddings in NeRF models itself
elif i==2:
embed = SHEncoder()
out_dim = embed.out_dim
# out_dim means dimension of the TOTAL embedding, which could be XYZ or XYZT or XYZ+T
# time_dim means dimension of time embedding
return embed, out_dim, time_dim
def create_sigma_and_color_MLP(
num_layers, num_layers_color,
hidden_dim, hidden_dim_color,
input_ch,
input_ch_views,
geo_feat_dim,
use_viewdirs=True
):
# sigma network
sigma_net = []
for l in range(num_layers):
if l == 0:
in_dim = input_ch
else:
in_dim = hidden_dim
if l == num_layers - 1:
out_dim = 1 + 1 + geo_feat_dim # 1 sigma + 1 uncertainty + 15 SH features for color
else:
out_dim = hidden_dim
sigma_net.append(nn.Linear(in_dim, out_dim, bias=False))
# color network
color_net = []
for l in range(num_layers_color):
if l == 0:
if use_viewdirs:
in_dim = input_ch_views + geo_feat_dim
else:
in_dim = geo_feat_dim
else:
in_dim = hidden_dim
if l == num_layers_color - 1:
out_dim = 3 # 3 rgb
else:
out_dim = hidden_dim
color_net.append(nn.Linear(in_dim, out_dim, bias=False))
return nn.ModuleList(sigma_net), nn.ModuleList(color_net)
def forward_through_MLP(sigma_net, color_net, embedded_x, embedded_views, \
num_layers, num_layers_color):
# sigma
h = embedded_x
for l in range(num_layers):
h = sigma_net[l](h)
if l != num_layers - 1:
h = F.relu(h, inplace=True)
sigma, uncertainties, geo_feat = h[..., 0], h[..., 1], h[..., 2:]
# always predict uncertainties, just don't use them sometimes!
uncertainties = relu_act(uncertainties)
sigma = relu_act(sigma)
# color
if embedded_views is not None:
h = torch.cat([embedded_views, geo_feat], dim=-1)
else:
h = geo_feat
for l in range(num_layers_color):
h = color_net[l](h)
if l != num_layers_color - 1:
h = F.relu(h, inplace=True)
color = h
return sigma, color, uncertainties
# Small NeRF for Hash embeddings
class NeRFSmall(nn.Module):
def __init__(self,
num_layers=3,
hidden_dim=64,
geo_feat_dim=15,
num_layers_color=4,
hidden_dim_color=64,
input_ch=3, input_ch_views=3,
use_uncertainties=False
):
super(NeRFSmall, self).__init__()
self.input_ch = input_ch
self.input_ch_views = input_ch_views
self.use_uncertainties = use_uncertainties
# sigma network
self.num_layers = num_layers
self.num_layers_color = num_layers_color
self.hidden_dim = hidden_dim
self.geo_feat_dim = geo_feat_dim
self.sigma_net, self.color_net = create_sigma_and_color_MLP(
num_layers, num_layers_color,
hidden_dim, hidden_dim_color,
input_ch,
input_ch_views,
geo_feat_dim
)
def forward(self, x):
input_pts, input_views = torch.split(x, [self.input_ch, self.input_ch_views], dim=-1)
# sigma
sigma, color, uncertainties = forward_through_MLP(self.sigma_net, self.color_net, \
input_pts, input_views, \
self.num_layers, self.num_layers_color)
if self.use_uncertainties:
outputs = torch.cat([color, sigma.unsqueeze(dim=-1), uncertainties.unsqueeze(dim=-1)], -1)
else:
outputs = torch.cat([color, sigma.unsqueeze(dim=-1)], -1)
return outputs
class BackgroundForegroundNeRF(nn.Module):
def __init__(self,
num_layers=3,
hidden_dim=64,
geo_feat_dim=15,
num_layers_color=4,
hidden_dim_color=64,
input_ch=3, input_ch_views=3,
time_dim=8,
use_uncertainties=False,
only_background=False # we will rarely use this option
):
super(BackgroundForegroundNeRF, self).__init__()
self.input_ch = input_ch # size of XYZ and time embeddings combined!
self.input_ch_views = input_ch_views
self.time_dim = time_dim
self.num_layers = num_layers
self.hidden_dim = hidden_dim
self.geo_feat_dim = geo_feat_dim
self.num_layers_color = num_layers_color
self.hidden_dim_color = hidden_dim_color
self.use_uncertainties = use_uncertainties
self.only_background = only_background
### Background Network
self.background_sigma_net, self.background_color_net = create_sigma_and_color_MLP(
num_layers, num_layers_color,
hidden_dim, hidden_dim_color,
input_ch - time_dim, # only XYZ
input_ch_views,
geo_feat_dim
)
### Foreground Network
self.foreground_sigma_net, self.foreground_color_net = create_sigma_and_color_MLP(
num_layers, num_layers_color,
hidden_dim, hidden_dim_color,
input_ch, # XYZ + time embedding
input_ch_views,
geo_feat_dim
)
def forward(self, x):
input_pts, input_views = torch.split(x, [self.input_ch, self.input_ch_views], dim=-1)
input_xyz, _ = input_pts[..., :-self.time_dim], input_pts[..., -self.time_dim:]
background_sigma, background_color, background_uncertainties = forward_through_MLP( \
self.background_sigma_net, self.background_color_net, \
input_xyz, input_views, \
self.num_layers, self.num_layers_color)
if not self.only_background:
foreground_sigma, foreground_color, foreground_uncertainties = forward_through_MLP( \
self.foreground_sigma_net, self.foreground_color_net, \
input_pts, input_views, \
self.num_layers, self.num_layers_color)
else:
# dummies for foreground
foreground_sigma, foreground_color, foreground_uncertainties = torch.zeros_like(background_sigma), torch.zeros_like(background_color), None
# Principled color mixing
sigma = background_sigma + foreground_sigma + 1e-9
color = (background_sigma/sigma)[:,None] * background_color + (foreground_sigma/sigma)[:,None] * foreground_color
if self.only_background:
if self.use_uncertainties:
return torch.cat([color, \
sigma.unsqueeze(dim=-1), \
background_uncertainties.unsqueeze(dim=-1)], -1)
else:
return torch.cat([color, sigma.unsqueeze(dim=-1)], -1)
if not self.only_background:
if self.use_uncertainties:
return torch.cat([color, \
sigma.unsqueeze(dim=-1), \
foreground_uncertainties.unsqueeze(dim=-1), \
foreground_sigma.unsqueeze(dim=-1)], -1)
else:
return torch.cat([color, sigma.unsqueeze(dim=-1)], -1)
class BackgroundForegroundNeRF_separateEmbeddings(nn.Module):
def __init__(self,
num_layers=3,
hidden_dim=64,
geo_feat_dim=15,
num_layers_color=4,
hidden_dim_color=64,
input_ch=3, input_ch_views=3,
use_uncertainties=False,
only_background=False, # we will rarely use this option
args=None
):
super(BackgroundForegroundNeRF_separateEmbeddings, self).__init__()
self.input_ch = input_ch # it's raw xyzt, so input_ch=4
self.input_ch_views = input_ch_views # has embedded views
self.num_layers = num_layers
self.hidden_dim = hidden_dim
self.geo_feat_dim = geo_feat_dim
self.num_layers_color = num_layers_color
self.hidden_dim_color = hidden_dim_color
self.use_uncertainties = use_uncertainties
self.only_background = only_background
xyz_bounding_box = args.bounding_box[0][:3], args.bounding_box[1][:3]
self.BG_embedder = HashEmbedder(bounding_box=xyz_bounding_box, \
log2_hashmap_size=args.log2_hashmap_size, \
finest_resolution=args.finest_res)
self.FG_embedder = XYZplusT_HashEmbedder(bounding_box=args.bounding_box, \
log2_hashmap_size=args.log2_hashmap_size, \
finest_resolution=args.finest_res)
### Background Network
self.BG_sigma_net, self.BG_color_net = create_sigma_and_color_MLP(num_layers, num_layers_color,
hidden_dim, hidden_dim_color,
self.BG_embedder.out_dim, # only XYZ
input_ch_views, geo_feat_dim
)
### Foreground Network
self.FG_sigma_net, self.FG_color_net = create_sigma_and_color_MLP(num_layers, num_layers_color,
hidden_dim, hidden_dim_color,
self.FG_embedder.out_dim, # XYZ + time embedding
input_ch_views, geo_feat_dim
)
def forward(self, x):
input_pts, input_views = torch.split(x, [self.input_ch, self.input_ch_views], dim=-1)
BG_embedded_xyz = self.BG_embedder(input_pts[...,:3])
FG_embedded_xyzt = self.FG_embedder(input_pts)
BG_sigma, BG_color, BG_uncertainties = forward_through_MLP(self.BG_sigma_net, self.BG_color_net, \
BG_embedded_xyz, input_views, \
self.num_layers, self.num_layers_color)
if not self.only_background:
FG_sigma, FG_color, FG_uncertainties = forward_through_MLP(self.FG_sigma_net, self.FG_color_net, \
FG_embedded_xyzt, input_views, \
self.num_layers, self.num_layers_color)
else:
# dummies for foreground
FG_sigma, FG_color, FG_uncertainties = torch.zeros_like(BG_sigma), torch.zeros_like(BG_color), None
# Principled color mixing
sigma = BG_sigma + FG_sigma + 1e-9
color = (BG_sigma/sigma)[:,None] * BG_color + (FG_sigma/sigma)[:,None] * FG_color
if self.only_background:
if self.use_uncertainties:
return torch.cat([color, sigma.unsqueeze(dim=-1), BG_uncertainties.unsqueeze(dim=-1)], -1)
else:
return torch.cat([color, sigma.unsqueeze(dim=-1)], -1)
if not self.only_background:
if self.use_uncertainties:
return torch.cat([color, sigma.unsqueeze(dim=-1), \
FG_uncertainties.unsqueeze(dim=-1), \
FG_sigma.unsqueeze(dim=-1)], -1)
else:
return torch.cat([color, sigma.unsqueeze(dim=-1)], -1)
class BackgroundForegroundActorNeRF_separateEmbeddings(nn.Module):
def __init__(self,
num_layers=3,
hidden_dim=64,
geo_feat_dim=15,
num_layers_color=4,
hidden_dim_color=64,
input_ch=4, input_cam_ch=4,
input_ch_views=3, input_ch_views_cam=3,
use_uncertainties=False,
BG_embedder=None, FG_embedder=None, ACTOR_embedder=None,
big=False
):
super(BackgroundForegroundActorNeRF_separateEmbeddings, self).__init__()
self.input_ch = input_ch # it's raw xyzt, so input_ch=4
self.input_ch_views = input_ch_views # has embedded views
self.input_cam_ch = input_cam_ch # should be 4
self.input_ch_views_cam = input_ch_views_cam # has embedded views
self.num_layers = num_layers
self.hidden_dim = hidden_dim
self.geo_feat_dim = geo_feat_dim
self.num_layers_color = num_layers_color
self.hidden_dim_color = hidden_dim_color
self.use_uncertainties = use_uncertainties
self.big = big
self.BG_embedder, self.FG_embedder, self.ACTOR_embedder = BG_embedder, FG_embedder, ACTOR_embedder
### Background Network
self.BG_sigma_net, self.BG_color_net = create_sigma_and_color_MLP(num_layers, num_layers_color,
hidden_dim, hidden_dim_color,
self.BG_embedder.out_dim, # only XYZ
input_ch_views, geo_feat_dim
)
### Foreground Network
self.FG_sigma_net, self.FG_color_net = create_sigma_and_color_MLP(num_layers+2*big, num_layers_color+2*big,
hidden_dim+64*big, hidden_dim_color+64*big,
self.FG_embedder.out_dim, # XYZ + time embedding
input_ch_views, geo_feat_dim
)
### Actor Network
self.ACTOR_sigma_net, self.ACTOR_color_net = create_sigma_and_color_MLP(num_layers+2*big, num_layers_color+2*big,
hidden_dim+64*big, hidden_dim_color+64*big,
self.ACTOR_embedder.out_dim, # XYZ + time embedding
input_ch_views_cam, geo_feat_dim
)
def forward(self, x):
input_pts, input_pts_cam, input_views, input_views_cam = torch.split(x, [self.input_ch, self.input_cam_ch, self.input_ch_views, self.input_ch_views_cam], dim=-1)
BG_embedded_xyz = self.BG_embedder(input_pts[...,:3])
FG_embedded_xyzt = self.FG_embedder(input_pts)
ACTOR_embedded_xyzt = self.ACTOR_embedder(input_pts_cam)
BG_sigma, BG_color, _ = forward_through_MLP(self.BG_sigma_net, self.BG_color_net, \
BG_embedded_xyz, input_views, \
self.num_layers, self.num_layers_color)
FG_sigma, FG_color, FG_uncertainties = forward_through_MLP(self.FG_sigma_net, self.FG_color_net, \
FG_embedded_xyzt, input_views, \
self.num_layers+2*self.big, self.num_layers_color+2*self.big)
ACTOR_sigma, ACTOR_color, ACTOR_uncertainties = forward_through_MLP(self.ACTOR_sigma_net, self.ACTOR_color_net, \
ACTOR_embedded_xyzt, input_views_cam, \
self.num_layers+2*self.big, self.num_layers_color+2*self.big)
# Principled color mixing
sigma = BG_sigma + FG_sigma + ACTOR_sigma + 1e-9
color = (BG_sigma/sigma)[:,None] * BG_color + (FG_sigma/sigma)[:,None] * FG_color + (ACTOR_sigma/sigma)[:,None] * ACTOR_color
if self.use_uncertainties:
return torch.cat([color, sigma.unsqueeze(dim=-1), \
FG_uncertainties.unsqueeze(dim=-1), \
FG_sigma.unsqueeze(dim=-1), \
ACTOR_uncertainties.unsqueeze(dim=-1), \
ACTOR_sigma.unsqueeze(dim=-1)], -1)
else:
return torch.cat([color, sigma.unsqueeze(dim=-1)], -1)
class NeuralDiff(nn.Module):
def __init__(self,
num_layers=3,
hidden_dim=64,
geo_feat_dim=15,
num_layers_color=4,
num_layers_FG=4, num_layers_ACTOR=4,
input_ch=4, input_cam_ch=4,
input_ch_views=3, input_ch_views_cam=3,
use_uncertainties=False,
world_grid_embed=None, world_grid_embed_FG=None, camera_grid_embed=None, time_grid_embed=None,
big=False,
coarse=True):
super(NeuralDiff, self).__init__()
self.input_ch, self.input_cam_ch = input_ch, input_cam_ch # it's raw xyzt, so input_ch=4
self.input_ch_views, self.input_ch_views_cam = input_ch_views, input_ch_views_cam # has embedded views
self.num_layers, self.num_layers_color, self.hidden_dim = num_layers, num_layers_color, hidden_dim
self.geo_feat_dim = geo_feat_dim
self.num_layers_FG, self.num_layers_ACTOR = num_layers_FG, num_layers_ACTOR
if coarse:
self.use_uncertainties = False
else:
self.use_uncertainties = use_uncertainties
self.big = big
self.coarse = coarse # Is this a coarse model?
self.world_grid_embed, self.camera_grid_embed, self.time_grid_embed = world_grid_embed, camera_grid_embed, time_grid_embed
### Background Network
# sigma network
BG_sigma_net = []
for l in range(num_layers):
in_dim = world_grid_embed.out_dim if l == 0 else (hidden_dim + 64)*(self.big+1)
out_dim = 1 + geo_feat_dim if l==num_layers-1 else (hidden_dim + 64)*(self.big+1) # 1 sigma + 15 SH features for color
BG_sigma_net.append(nn.Linear(in_dim, out_dim, bias=False))
if l!=num_layers-1:
BG_sigma_net.append(nn.ReLU())
# color network
BG_color_net = []
for l in range(num_layers_color):
in_dim = input_ch_views + geo_feat_dim if l == 0 else hidden_dim*(self.big+1)
out_dim = 3 if l==num_layers_color-1 else hidden_dim*(self.big+1) # 3 rgb
BG_color_net.append(nn.Linear(in_dim, out_dim, bias=False))
if l!=num_layers_color-1:
BG_color_net.append(nn.ReLU())
self.BG_sigma_net, self.BG_color_net = nn.Sequential(*BG_sigma_net), nn.Sequential(*BG_color_net)
if not coarse: # if this is a "fine" model, use dynamic components
### Foreground Network
FG_net = []
for l in range(num_layers_FG):
in_dim = time_grid_embed.out_dim + geo_feat_dim if l == 0 else hidden_dim*(self.big+1)
out_dim = 3 + 1 + 1 if l==num_layers_FG-1 else hidden_dim*(self.big+1) # 3 rgb_FG + 1 sigma_FG + 1 uncertainty_FG
FG_net.append(nn.Linear(in_dim, out_dim, bias=False))
if l!=num_layers_FG-1:
FG_net.append(nn.ReLU())
self.FG_net = nn.Sequential(*FG_net)
### Actor Network
ACTOR_net = []
for l in range(num_layers_ACTOR):
in_dim = camera_grid_embed.out_dim + time_grid_embed.out_dim if l == 0 else hidden_dim*2
out_dim = 3 + 1 + 1 if l==num_layers_ACTOR-1 else hidden_dim*2 # 3 rgb_ACTOR + 1 sigma_ACTOR + 1 uncertainty_ACTOR
ACTOR_net.append(nn.Linear(in_dim, out_dim, bias=False))
if l!=num_layers_ACTOR-1:
ACTOR_net.append(nn.ReLU())
self.ACTOR_net = nn.Sequential(*ACTOR_net)
def forward(self, x):
input_pts, input_pts_cam, input_views, input_views_cam = torch.split(x, [self.input_ch, self.input_cam_ch, self.input_ch_views, self.input_ch_views_cam], dim=-1)
embedded_xyz, keep_mask = self.world_grid_embed(input_pts[...,:3])
if not self.coarse:
embedded_time = self.time_grid_embed(input_pts[...,3].unsqueeze(-1))
embedded_xyz_cam, keep_mask_cam = self.camera_grid_embed(input_pts_cam[...,:3])
### Static components
h = self.BG_sigma_net(embedded_xyz)
BG_sigma, ray_encoding = h[..., 0], h[..., 1:]
BG_color = self.BG_color_net(torch.cat([input_views, ray_encoding], dim=-1))
BG_color = F.sigmoid(BG_color)
BG_sigma = relu_act(BG_sigma)
BG_sigma, BG_color = BG_sigma*keep_mask, BG_color*keep_mask[:,None]
### Dynamic components
if not self.coarse:
h = self.FG_net(torch.cat([embedded_time, ray_encoding], dim=-1))
FG_color, FG_sigma, FG_uncertainties = h[..., :3], h[..., 3], h[..., 4]
FG_color = F.sigmoid(FG_color)
h = self.ACTOR_net(torch.cat([embedded_xyz_cam, embedded_time], dim=-1))
ACTOR_color, ACTOR_sigma, ACTOR_uncertainties = h[..., :3], h[..., 3], h[..., 4]
ACTOR_color = F.sigmoid(ACTOR_color)
FG_sigma, FG_uncertainties = relu_act(FG_sigma), relu_act(FG_uncertainties)
ACTOR_sigma, ACTOR_uncertainties = relu_act(ACTOR_sigma), relu_act(ACTOR_uncertainties)
FG_sigma, FG_color = FG_sigma*keep_mask, FG_color*keep_mask[:,None]
ACTOR_sigma, ACTOR_color = ACTOR_sigma*keep_mask_cam, ACTOR_color*keep_mask_cam[:,None]
else:
FG_sigma, FG_color, FG_uncertainties = torch.zeros_like(BG_sigma), torch.zeros_like(BG_color), None
ACTOR_sigma, ACTOR_color, ACTOR_uncertainties = torch.zeros_like(BG_sigma), torch.zeros_like(BG_color), None
# Principled color mixing
sigma = BG_sigma + FG_sigma + ACTOR_sigma + 1e-9
color = (BG_sigma/sigma)[:,None] * BG_color + (FG_sigma/sigma)[:,None] * FG_color + (ACTOR_sigma/sigma)[:,None] * ACTOR_color
if self.use_uncertainties:
return torch.cat([BG_color, FG_color, ACTOR_color, # :3, 3:6, 6:9
BG_sigma.unsqueeze(dim=-1), FG_sigma.unsqueeze(dim=-1), ACTOR_sigma.unsqueeze(dim=-1), # 9, 10, 11
FG_uncertainties.unsqueeze(dim=-1), ACTOR_uncertainties.unsqueeze(dim=-1)], -1) # 12, 13
else:
return torch.cat([color, sigma.unsqueeze(dim=-1)], -1)
class NeuralDiff_BGFGSeparate(nn.Module):
'''
Static Background model uses a low-frequency grid,
and Foreground model uses a high-frequency grid.
'''
def __init__(self,
num_layers=3,
hidden_dim=64,
geo_feat_dim=15,
num_layers_color=4,
num_layers_FG=4, num_layers_ACTOR=4,
input_ch=4, input_cam_ch=4,
input_ch_views=3, input_ch_views_cam=3,
use_uncertainties=False,
world_grid_embed=None, world_grid_embed_FG=None, camera_grid_embed=None, time_grid_embed=None,
big=False,
coarse=True):
super(NeuralDiff_BGFGSeparate, self).__init__()
self.input_ch, self.input_cam_ch = input_ch, input_cam_ch # it's raw xyzt, so input_ch=4
self.input_ch_views, self.input_ch_views_cam = input_ch_views, input_ch_views_cam # has embedded views
self.num_layers, self.num_layers_color, self.hidden_dim = num_layers, num_layers_color, hidden_dim
self.geo_feat_dim = geo_feat_dim
self.num_layers_FG, self.num_layers_ACTOR = num_layers_FG, num_layers_ACTOR
if coarse:
self.use_uncertainties = False
else:
self.use_uncertainties = use_uncertainties
self.big = big
self.coarse = coarse # Is this a coarse model?
self.world_grid_embed = world_grid_embed
if not coarse: # if this is a "fine" model, use dynamic components
self.camera_grid_embed, self.time_grid_embed = camera_grid_embed, time_grid_embed
self.world_grid_embed_FG = world_grid_embed_FG # separate high freq grid for FG
### Background Network
# sigma network
BG_sigma_net = []
for l in range(num_layers):
in_dim = world_grid_embed.out_dim if l == 0 else (hidden_dim + 64)*(self.big+1)
out_dim = 1 + geo_feat_dim if l==num_layers-1 else (hidden_dim + 64)*(self.big+1) # 1 sigma + 15 SH features for color
BG_sigma_net.append(nn.Linear(in_dim, out_dim, bias=False))
if l!=num_layers-1:
BG_sigma_net.append(nn.ReLU())
# color network
BG_color_net = []
for l in range(num_layers_color):
in_dim = input_ch_views + geo_feat_dim if l == 0 else hidden_dim*(self.big+1)
out_dim = 3 if l==num_layers_color-1 else hidden_dim*(self.big+1) # 3 rgb
BG_color_net.append(nn.Linear(in_dim, out_dim, bias=False))
if l!=num_layers_color-1:
BG_color_net.append(nn.ReLU())
self.BG_sigma_net, self.BG_color_net = nn.Sequential(*BG_sigma_net), nn.Sequential(*BG_color_net)
if not coarse: # if this is a "fine" model, use dynamic components
### Foreground Network
FG_net = []
for l in range(num_layers_FG):
in_dim = world_grid_embed_FG.out_dim + time_grid_embed.out_dim if l == 0 else hidden_dim*(self.big+1)
out_dim = 3 + 1 + 1 if l==num_layers_FG-1 else hidden_dim*(self.big+1) # 3 rgb_FG + 1 sigma_FG + 1 uncertainty_FG
FG_net.append(nn.Linear(in_dim, out_dim, bias=False))
if l!=num_layers_FG-1:
FG_net.append(nn.ReLU())
self.FG_net = nn.Sequential(*FG_net)
### Actor Network
ACTOR_net = []
for l in range(num_layers_ACTOR):
in_dim = camera_grid_embed.out_dim + time_grid_embed.out_dim if l == 0 else hidden_dim*2
out_dim = 3 + 1 + 1 if l==num_layers_ACTOR-1 else hidden_dim*2 # 3 rgb_ACTOR + 1 sigma_ACTOR + 1 uncertainty_ACTOR
ACTOR_net.append(nn.Linear(in_dim, out_dim, bias=False))
if l!=num_layers_ACTOR-1:
ACTOR_net.append(nn.ReLU())
self.ACTOR_net = nn.Sequential(*ACTOR_net)
def forward(self, x):
input_pts, input_pts_cam, input_views, _ = torch.split(x, [self.input_ch, self.input_cam_ch, self.input_ch_views, self.input_ch_views_cam], dim=-1)
embedded_xyz, keep_mask = self.world_grid_embed(input_pts[...,:3])
if not self.coarse:
embedded_xyz_FG, keep_mask_FG = self.world_grid_embed_FG(input_pts[...,:3])
embedded_time = self.time_grid_embed(input_pts[...,3].unsqueeze(-1))
embedded_xyz_cam, keep_mask_cam = self.camera_grid_embed(input_pts_cam[...,:3])
### Static components
h = self.BG_sigma_net(embedded_xyz)
BG_sigma, ray_encoding = h[..., 0], h[..., 1:]
BG_color = self.BG_color_net(torch.cat([input_views, ray_encoding], dim=-1))
BG_color = F.sigmoid(BG_color)
BG_sigma = relu_act(BG_sigma)
BG_sigma, BG_color = BG_sigma*keep_mask, BG_color*keep_mask[:,None]
### Dynamic components
if not self.coarse:
h = self.FG_net(torch.cat([embedded_xyz_FG, embedded_time], dim=-1))
FG_color, FG_sigma, FG_uncertainties = h[..., :3], h[..., 3], h[..., 4]
FG_color = F.sigmoid(FG_color)
h = self.ACTOR_net(torch.cat([embedded_xyz_cam, embedded_time], dim=-1))
ACTOR_color, ACTOR_sigma, ACTOR_uncertainties = h[..., :3], h[..., 3], h[..., 4]
ACTOR_color = F.sigmoid(ACTOR_color)
FG_sigma, FG_uncertainties = relu_act(FG_sigma), relu_act(FG_uncertainties)
ACTOR_sigma, ACTOR_uncertainties = relu_act(ACTOR_sigma), relu_act(ACTOR_uncertainties)
FG_sigma, FG_color = FG_sigma*keep_mask_FG, FG_color*keep_mask_FG[:,None]
ACTOR_sigma, ACTOR_color = ACTOR_sigma*keep_mask_cam, ACTOR_color*keep_mask_cam[:,None]
else:
FG_sigma, FG_color, FG_uncertainties = torch.zeros_like(BG_sigma), torch.zeros_like(BG_color), None
ACTOR_sigma, ACTOR_color, ACTOR_uncertainties = torch.zeros_like(BG_sigma), torch.zeros_like(BG_color), None
# Principled color mixing
sigma = BG_sigma + FG_sigma + ACTOR_sigma + 1e-9
color = (BG_sigma/sigma)[:,None] * BG_color + (FG_sigma/sigma)[:,None] * FG_color + (ACTOR_sigma/sigma)[:,None] * ACTOR_color
if self.use_uncertainties:
return torch.cat([BG_color, FG_color, ACTOR_color, # :3, 3:6, 6:9
BG_sigma.unsqueeze(dim=-1), FG_sigma.unsqueeze(dim=-1), ACTOR_sigma.unsqueeze(dim=-1), # 9, 10, 11
FG_uncertainties.unsqueeze(dim=-1), ACTOR_uncertainties.unsqueeze(dim=-1)], -1) # 12, 13
else:
return torch.cat([color, sigma.unsqueeze(dim=-1)], -1)
class BGFG_XYZT(nn.Module):
'''
XYZT grid for foreground model
'''
def __init__(self,
num_layers=3,
hidden_dim=64,
geo_feat_dim=15,
num_layers_color=4,
num_layers_FG=4,
input_ch=4, input_cam_ch=4,
input_ch_views=3, input_ch_views_cam=3,
use_uncertainties=False,
static_grid=None, xyzt_grid=None, xyzt_grid_cam=None,
coarse=True,
use_viewdirs_FG=False,
use_actor=False,
small_MLPs_dyn=False):
super(BGFG_XYZT, self).__init__()
self.input_ch, self.input_cam_ch = input_ch, input_cam_ch # it's raw xyzt, so input_ch=4
self.input_ch_views, self.input_ch_views_cam = input_ch_views, input_ch_views_cam # has embedded views
self.num_layers, self.num_layers_color, self.hidden_dim = num_layers, num_layers_color, hidden_dim
self.small_MLPs_dyn = small_MLPs_dyn
if small_MLPs_dyn:
self.num_layers_dyn, self.num_layers_color_dyn = 1, 1
else:
self.num_layers_dyn, self.num_layers_color_dyn = num_layers, num_layers_color
self.geo_feat_dim = geo_feat_dim
self.num_layers_FG = num_layers_FG
if coarse:
self.use_uncertainties = False
else:
self.use_uncertainties = use_uncertainties
self.coarse = coarse # Is this a coarse model?
self.use_viewdirs_FG = use_viewdirs_FG
self.use_actor = use_actor
self.static_grid = static_grid
if not coarse: # if this is a "fine" model, use dynamic components
self.xyzt_grid = xyzt_grid # separate high freq grid for FG
if self.use_actor:
self.xyzt_grid_cam = xyzt_grid_cam
### Background Network
self.BG_sigma_net, self.BG_color_net = create_sigma_and_color_MLP(num_layers, num_layers_color,
hidden_dim, 53, # 53 is random! bad code, sorry :(
self.static_grid.out_dim, # only XYZ
input_ch_views, geo_feat_dim)
if not coarse: # if this is a "fine" model, use dynamic components
### Foreground Network
self.FG_sigma_net, self.FG_color_net = create_sigma_and_color_MLP(self.num_layers_dyn, self.num_layers_color_dyn,
hidden_dim, 53, # 53 is random! bad code, sorry :(
self.xyzt_grid.out_dim,
input_ch_views, geo_feat_dim,
use_viewdirs=use_viewdirs_FG)
if self.use_actor:
### Actor Network
self.ACTOR_sigma_net, self.ACTOR_color_net = create_sigma_and_color_MLP(self.num_layers_dyn, self.num_layers_color_dyn,
hidden_dim, 53, # 53 is random! bad code, sorry :(
self.xyzt_grid_cam.out_dim,
input_ch_views, geo_feat_dim,
use_viewdirs=use_viewdirs_FG)
def forward(self, x):
input_pts, input_pts_cam, input_views, input_views_cam = torch.split(x, [self.input_ch, self.input_cam_ch, self.input_ch_views, self.input_ch_views_cam], dim=-1)
embedded_xyz, keep_mask = self.static_grid(input_pts[...,:3])
if not self.coarse:
embedded_xyzt, keep_mask_FG = self.xyzt_grid(input_pts)
if self.use_actor:
embedded_xyzt_cam, keep_mask_ACTOR = self.xyzt_grid_cam(input_pts_cam)
### Static components
BG_sigma, BG_color, _ = forward_through_MLP(self.BG_sigma_net, self.BG_color_net, \
embedded_xyz, input_views, \
self.num_layers, self.num_layers_color)
BG_color = F.sigmoid(BG_color)
BG_sigma, BG_color = BG_sigma*keep_mask, BG_color*keep_mask[:,None]
### Dynamic components
if not self.coarse:
FG_sigma, FG_color, FG_uncertainties = forward_through_MLP(self.FG_sigma_net, self.FG_color_net, \
embedded_xyzt, input_views if self.use_viewdirs_FG else None, \
self.num_layers_dyn, self.num_layers_color_dyn)
FG_color = F.sigmoid(FG_color)
FG_sigma, FG_color = FG_sigma*keep_mask_FG, FG_color*keep_mask_FG[:,None]
if self.use_actor:
ACTOR_sigma, ACTOR_color, ACTOR_uncertainties = forward_through_MLP(self.ACTOR_sigma_net, self.ACTOR_color_net, \
embedded_xyzt_cam, input_views_cam if self.use_viewdirs_FG else None, \
self.num_layers_dyn, self.num_layers_color_dyn)
ACTOR_color = F.sigmoid(ACTOR_color)
ACTOR_sigma, ACTOR_color = ACTOR_sigma*keep_mask_ACTOR, ACTOR_color*keep_mask_ACTOR[:,None]
else:
### we don't have actor, but doing this to make the code consistent
ACTOR_sigma, ACTOR_color = torch.zeros_like(BG_sigma), torch.zeros_like(BG_color)
ACTOR_uncertainties = torch.zeros_like(FG_uncertainties)
##################
else:
FG_sigma, FG_color, FG_uncertainties = torch.zeros_like(BG_sigma), torch.zeros_like(BG_color), None
ACTOR_sigma, ACTOR_color, ACTOR_uncertainties = torch.zeros_like(BG_sigma), torch.zeros_like(BG_color), None
# Principled color mixing
sigma = BG_sigma + FG_sigma + ACTOR_sigma + 1e-9
color = (BG_sigma/sigma)[:,None] * BG_color + (FG_sigma/sigma)[:,None] * FG_color + (ACTOR_sigma/sigma)[:,None] * ACTOR_color
if self.use_uncertainties:
return torch.cat([BG_color, FG_color, ACTOR_color, # :3, 3:6, 6:9
BG_sigma.unsqueeze(dim=-1), FG_sigma.unsqueeze(dim=-1), ACTOR_sigma.unsqueeze(dim=-1), # 9, 10, 11
FG_uncertainties.unsqueeze(dim=-1), ACTOR_uncertainties.unsqueeze(dim=-1), # 12, 13
embedded_xyzt], -1) # 14:
else:
return torch.cat([color, sigma.unsqueeze(dim=-1)], -1)
class BGFG_OnOffEncoding(nn.Module):
'''
Uses OnOffEncoding for time
'''
def __init__(self,
t_bounds,
num_layers=3,
hidden_dim=64,
geo_feat_dim=15,
num_layers_color=4,
input_ch=4, input_cam_ch=4,
input_ch_views=3, input_ch_views_cam=3,
use_uncertainties=False,
static_grid=None, FG_xyz_grid=None,
coarse=True):
super(BGFG_OnOffEncoding, self).__init__()
self.input_ch, self.input_cam_ch = input_ch, input_cam_ch # it's raw xyzt, so input_ch=4
self.input_ch_views, self.input_ch_views_cam = input_ch_views, input_ch_views_cam # has embedded views
self.num_layers, self.num_layers_color, self.hidden_dim = num_layers, num_layers_color, hidden_dim
self.geo_feat_dim = geo_feat_dim
self.use_uncertainties = False if coarse else use_uncertainties
self.coarse = coarse # Is this a coarse model?
self.static_grid = static_grid
if not coarse: # if this is a "fine" model, use dynamic components
self.FG_xyzt_encoder = XYZ_TimeOnOff_Encoding(xyz_encoder=FG_xyz_grid, t_bounds=t_bounds)
### Background Network
self.BG_sigma_net, self.BG_color_net = create_sigma_and_color_MLP(num_layers, num_layers_color,
hidden_dim, 53, # 53 is random! bad code, sorry :(
self.static_grid.out_dim, # only XYZ
input_ch_views, geo_feat_dim)
if not coarse: # if this is a "fine" model, use dynamic components
### Foreground Network
self.FG_sigma_net, self.FG_color_net = create_sigma_and_color_MLP(num_layers, num_layers_color,
hidden_dim, 53, # 53 is random! bad code, sorry :(
self.FG_xyzt_encoder.out_dim,
input_ch_views, geo_feat_dim,
use_viewdirs=False)
def forward(self, x):
input_pts, input_pts_cam, input_views, input_views_cam = torch.split(x, [self.input_ch, self.input_cam_ch, self.input_ch_views, self.input_ch_views_cam], dim=-1)
embedded_xyz, keep_mask = self.static_grid(input_pts[...,:3])
if not self.coarse:
embedded_xyzt = self.FG_xyzt_encoder(input_pts)
### Static components
BG_sigma, BG_color, _ = forward_through_MLP(self.BG_sigma_net, self.BG_color_net, \
embedded_xyz, input_views, \
self.num_layers, self.num_layers_color)
BG_color = F.sigmoid(BG_color)
BG_sigma, BG_color = BG_sigma*keep_mask, BG_color*keep_mask[:,None]
### Dynamic components
if not self.coarse:
FG_sigma, FG_color, FG_uncertainties = forward_through_MLP(self.FG_sigma_net, self.FG_color_net, \
embedded_xyzt, None, \
self.num_layers, self.num_layers_color)
FG_color = F.sigmoid(FG_color)
### we don't have actor, but doing this to make the code consistent
ACTOR_sigma, ACTOR_color = torch.zeros_like(BG_sigma), torch.zeros_like(BG_color)
ACTOR_uncertainties = torch.zeros_like(FG_uncertainties)
##################
else:
FG_sigma, FG_color, FG_uncertainties = torch.zeros_like(BG_sigma), torch.zeros_like(BG_color), None
ACTOR_sigma, ACTOR_color, ACTOR_uncertainties = torch.zeros_like(BG_sigma), torch.zeros_like(BG_color), None
# Principled color mixing
sigma = BG_sigma + FG_sigma + ACTOR_sigma + 1e-9
color = (BG_sigma/sigma)[:,None] * BG_color + (FG_sigma/sigma)[:,None] * FG_color + (ACTOR_sigma/sigma)[:,None] * ACTOR_color
if self.use_uncertainties:
return torch.cat([BG_color, FG_color, ACTOR_color, # :3, 3:6, 6:9
BG_sigma.unsqueeze(dim=-1), FG_sigma.unsqueeze(dim=-1), ACTOR_sigma.unsqueeze(dim=-1), # 9, 10, 11
FG_uncertainties.unsqueeze(dim=-1), ACTOR_uncertainties.unsqueeze(dim=-1), # 12, 13
embedded_xyzt], -1) # 14:
else:
return torch.cat([color, sigma.unsqueeze(dim=-1)], -1)
class BGFG_PiecewiseConst(nn.Module):
'''
Uses OnOffEncoding for time
'''
def __init__(self,
xyzt_bounds,
num_layers=3,
hidden_dim=64,
geo_feat_dim=15,
num_layers_color=4,
input_ch=4, input_cam_ch=4,
input_ch_views=3, input_ch_views_cam=3,
use_uncertainties=False,
static_grid=None,
coarse=True,
init_temperature=100.0,
n_pieces=10):
super(BGFG_PiecewiseConst, self).__init__()
self.input_ch, self.input_cam_ch = input_ch, input_cam_ch # it's raw xyzt, so input_ch=4
self.input_ch_views, self.input_ch_views_cam = input_ch_views, input_ch_views_cam # has embedded views
self.num_layers, self.num_layers_color, self.hidden_dim = num_layers, num_layers_color, hidden_dim
self.geo_feat_dim = geo_feat_dim
self.use_uncertainties = False if coarse else use_uncertainties
self.coarse = coarse # Is this a coarse model?
self.static_grid = static_grid
if not coarse: # if this is a "fine" model, use dynamic components
self.FG_xyzt_encoder = XYZ_TimePiecewiseConstant(xyzt_bounds=xyzt_bounds,
init_temperature=init_temperature,
n_pieces=n_pieces)
### Background Network
self.BG_sigma_net, self.BG_color_net = create_sigma_and_color_MLP(num_layers, num_layers_color,
hidden_dim, 53, # 53 is random! bad code, sorry :(
self.static_grid.out_dim, # only XYZ
input_ch_views, geo_feat_dim)
if not coarse: # if this is a "fine" model, use dynamic components
### Foreground Network
self.FG_sigma_net, self.FG_color_net = create_sigma_and_color_MLP(num_layers, num_layers_color,
hidden_dim, 53, # 53 is random! bad code, sorry :(
self.FG_xyzt_encoder.out_dim,
input_ch_views, geo_feat_dim,
use_viewdirs=False)
def forward(self, x):
input_pts, input_pts_cam, input_views, input_views_cam = torch.split(x, [self.input_ch, self.input_cam_ch, self.input_ch_views, self.input_ch_views_cam], dim=-1)
embedded_xyz, keep_mask = self.static_grid(input_pts[...,:3])
if not self.coarse:
embedded_xyzt = self.FG_xyzt_encoder(input_pts)
### Static components
BG_sigma, BG_color, _ = forward_through_MLP(self.BG_sigma_net, self.BG_color_net, \
embedded_xyz, input_views, \
self.num_layers, self.num_layers_color)
BG_color = F.sigmoid(BG_color)
BG_sigma, BG_color = BG_sigma*keep_mask, BG_color*keep_mask[:,None]
### Dynamic components
if not self.coarse:
FG_sigma, FG_color, FG_uncertainties = forward_through_MLP(self.FG_sigma_net, self.FG_color_net, \
embedded_xyzt, None, \
self.num_layers, self.num_layers_color)
FG_color = F.sigmoid(FG_color)
### we don't have actor, but doing this to make the code consistent
ACTOR_sigma, ACTOR_color = torch.zeros_like(BG_sigma), torch.zeros_like(BG_color)
ACTOR_uncertainties = torch.zeros_like(FG_uncertainties)
##################
else:
FG_sigma, FG_color, FG_uncertainties = torch.zeros_like(BG_sigma), torch.zeros_like(BG_color), None
ACTOR_sigma, ACTOR_color, ACTOR_uncertainties = torch.zeros_like(BG_sigma), torch.zeros_like(BG_color), None
# Principled color mixing
sigma = BG_sigma + FG_sigma + ACTOR_sigma + 1e-9
color = (BG_sigma/sigma)[:,None] * BG_color + (FG_sigma/sigma)[:,None] * FG_color + (ACTOR_sigma/sigma)[:,None] * ACTOR_color
if self.use_uncertainties:
return torch.cat([BG_color, FG_color, ACTOR_color, # :3, 3:6, 6:9
BG_sigma.unsqueeze(dim=-1), FG_sigma.unsqueeze(dim=-1), ACTOR_sigma.unsqueeze(dim=-1), # 9, 10, 11
FG_uncertainties.unsqueeze(dim=-1), ACTOR_uncertainties.unsqueeze(dim=-1), # 12, 13
embedded_xyzt], -1) # 14:
else:
return torch.cat([color, sigma.unsqueeze(dim=-1)], -1)
class BGFG_XYZT_Bottleneck(nn.Module):
'''
XYZT grid for foreground model
Foreground model also uses some information (encoding) from the BG model, which helps in triangulation
'''
def __init__(self,
num_layers=2,
hidden_dim=64,
geo_feat_dim=24,
num_layers_color=2,
num_layers_FG=4,
input_ch=4, input_cam_ch=4,
input_ch_views=3, input_ch_views_cam=3,
use_uncertainties=False,
static_grid=None, xyzt_grid=None,
coarse=True):
super(BGFG_XYZT_Bottleneck, self).__init__()
self.input_ch, self.input_cam_ch = input_ch, input_cam_ch # it's raw xyzt, so input_ch=4
self.input_ch_views, self.input_ch_views_cam = input_ch_views, input_ch_views_cam # has embedded views
self.num_layers, self.num_layers_color, self.hidden_dim = num_layers, num_layers_color, hidden_dim
self.geo_feat_dim = geo_feat_dim
self.num_layers_FG = num_layers_FG
if coarse:
self.use_uncertainties = False
else:
self.use_uncertainties = use_uncertainties
self.coarse = coarse # Is this a coarse model?
self.static_grid = static_grid
if not coarse: # if this is a "fine" model, use dynamic components
self.xyzt_grid = xyzt_grid # separate high freq grid for FG
### Static components
self.xyz_encoder = nn.Sequential(
nn.Linear(static_grid.out_dim, hidden_dim),
nn.ReLU(),
)
self.BG_sigma_net = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, 1),
nn.Softplus()
)
self.xyz_final_encoder = nn.Sequential(nn.Linear(hidden_dim, geo_feat_dim), nn.ReLU())
self.BG_color_net = nn.Sequential(
nn.Linear(geo_feat_dim + input_ch_views, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, 3),
nn.Sigmoid()
)
### Dynamic components
if not coarse:
self.FG_encoder = nn.Sequential(
nn.Linear(xyzt_grid.out_dim + geo_feat_dim, hidden_dim//2),
nn.ReLU(),
nn.Linear(hidden_dim//2, hidden_dim//2),
nn.ReLU(),
nn.Linear(hidden_dim//2, hidden_dim//2),
nn.ReLU(),
nn.Linear(hidden_dim//2, hidden_dim//2),
nn.ReLU()
)
self.FG_sigma_net = nn.Sequential(nn.Linear(hidden_dim//2, 1), nn.Softplus())
# self.FG_sigma_net[0].bias.data.fill_(-1.0)
self.FG_color_net = nn.Sequential(nn.Linear(hidden_dim//2, 3), nn.Sigmoid())
self.FG_uncertainty_net = nn.Sequential(nn.Linear(hidden_dim//2, 1), nn.Softplus())
def forward(self, x):
input_pts, input_pts_cam, input_views, input_views_cam = torch.split(x, [self.input_ch, self.input_cam_ch, self.input_ch_views, self.input_ch_views_cam], dim=-1)
embedded_xyz, keep_mask = self.static_grid(input_pts[...,:3])
if not self.coarse:
embedded_xyzt, keep_mask_FG = self.xyzt_grid(input_pts)
### Static components
xyz_encoding = self.xyz_encoder(embedded_xyz)
BG_sigma = self.BG_sigma_net(xyz_encoding)
xyz_encoding_final = self.xyz_final_encoder(xyz_encoding) # size: (B, geo_feat_dim)
BG_color = self.BG_color_net(torch.cat([xyz_encoding_final, input_views], dim=-1))
BG_sigma, BG_color = BG_sigma*keep_mask[:,None], BG_color*keep_mask[:,None]
### Dynamic components
if not self.coarse:
FG_encoding = self.FG_encoder(torch.cat([embedded_xyzt, xyz_encoding_final], dim=-1)) # size: (B, hidden_dim//2)
FG_sigma = self.FG_sigma_net(FG_encoding)
FG_color = self.FG_color_net(FG_encoding)
FG_uncertainties = self.FG_uncertainty_net(FG_encoding)
FG_sigma, FG_color = FG_sigma*keep_mask_FG[:,None], FG_color*keep_mask_FG[:,None]
### we don't have actor, but doing this to make the code consistent
ACTOR_sigma, ACTOR_color, ACTOR_uncertainties = torch.zeros_like(BG_sigma), torch.zeros_like(BG_color), torch.zeros_like(FG_uncertainties)
##################
else:
FG_sigma, FG_color, FG_uncertainties = torch.zeros_like(BG_sigma), torch.zeros_like(BG_color), None
ACTOR_sigma, ACTOR_color, ACTOR_uncertainties = torch.zeros_like(BG_sigma), torch.zeros_like(BG_color), None
# Principled color mixing
sigma = BG_sigma + FG_sigma + ACTOR_sigma + 1e-9
color = (BG_sigma/sigma) * BG_color + (FG_sigma/sigma) * FG_color + (ACTOR_sigma/sigma) * ACTOR_color
if self.use_uncertainties:
return torch.cat([BG_color, FG_color, ACTOR_color, # :3, 3:6, 6:9
BG_sigma, FG_sigma, ACTOR_sigma, # 9, 10, 11
FG_uncertainties, ACTOR_uncertainties, # 12, 13
FG_encoding], -1) # 14:
else:
return torch.cat([color, sigma], -1)
# Ray helpers
def get_rays_incameraframe(H, W, K):
i, j = torch.meshgrid(torch.linspace(0, W-1, W), torch.linspace(0, H-1, H)) # pytorch's meshgrid has indexing='ij'
i = i.t()
j = j.t()
dirs = torch.stack([(i-K[0][2])/K[0][0], -(j-K[1][2])/K[1][1], -torch.ones_like(i)], -1)
origins = torch.zeros_like(dirs)
return origins, dirs
def get_rays_incameraframe_np(H, W, K):
i, j = np.meshgrid(np.arange(W, dtype=np.float32), np.arange(H, dtype=np.float32), indexing='xy')
dirs = np.stack([(i-K[0][2])/K[0][0], -(j-K[1][2])/K[1][1], -np.ones_like(i)], -1)
origins = np.zeros_like(dirs)
return origins, dirs
def get_rays(H, W, K, c2w):
i, j = torch.meshgrid(torch.linspace(0, W-1, W), torch.linspace(0, H-1, H)) # pytorch's meshgrid has indexing='ij'
i = i.t()
j = j.t()
dirs = torch.stack([(i-K[0][2])/K[0][0], -(j-K[1][2])/K[1][1], -torch.ones_like(i)], -1)
# Rotate ray directions from camera frame to the world frame
rays_d = torch.sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1) # dot product, equals to: [c2w.dot(dir) for dir in dirs]
# Translate camera frame's origin to the world frame. It is the origin of all rays.
rays_o = c2w[:3,-1].expand(rays_d.shape)
return rays_o, rays_d
def get_rays_np(H, W, K, c2w):
i, j = np.meshgrid(np.arange(W, dtype=np.float32), np.arange(H, dtype=np.float32), indexing='xy')
dirs = np.stack([(i-K[0][2])/K[0][0], -(j-K[1][2])/K[1][1], -np.ones_like(i)], -1)
# Rotate ray directions from camera frame to the world frame
rays_d = np.sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1) # dot product, equals to: [c2w.dot(dir) for dir in dirs]
# Translate camera frame's origin to the world frame. It is the origin of all rays.
rays_o = np.broadcast_to(c2w[:3,-1], np.shape(rays_d))
return rays_o, rays_d
def ndc_rays(H, W, focal, near, rays_o, rays_d):
# Shift ray origins to near plane
t = -(near + rays_o[...,2]) / rays_d[...,2]
rays_o = rays_o + t[...,None] * rays_d
# Projection
o0 = -1./(W/(2.*focal)) * rays_o[...,0] / rays_o[...,2]
o1 = -1./(H/(2.*focal)) * rays_o[...,1] / rays_o[...,2]
o2 = 1. + 2. * near / rays_o[...,2]
d0 = -1./(W/(2.*focal)) * (rays_d[...,0]/rays_d[...,2] - rays_o[...,0]/rays_o[...,2])
d1 = -1./(H/(2.*focal)) * (rays_d[...,1]/rays_d[...,2] - rays_o[...,1]/rays_o[...,2])
d2 = -2. * near / rays_o[...,2]
rays_o = torch.stack([o0,o1,o2], -1)
rays_d = torch.stack([d0,d1,d2], -1)
return rays_o, rays_d
# Hierarchical sampling (section 5.2)
def sample_pdf(bins, weights, N_samples, det=False, pytest=False):
# Get pdf
weights = weights + 1e-5 # prevent nans
pdf = weights / torch.sum(weights, -1, keepdim=True)
cdf = torch.cumsum(pdf, -1)
cdf = torch.cat([torch.zeros_like(cdf[...,:1]), cdf], -1) # (batch, len(bins))
# Take uniform samples
if det:
u = torch.linspace(0., 1., steps=N_samples)
u = u.expand(list(cdf.shape[:-1]) + [N_samples])
else:
u = torch.rand(list(cdf.shape[:-1]) + [N_samples])
# Pytest, overwrite u with numpy's fixed random numbers
if pytest:
np.random.seed(0)
new_shape = list(cdf.shape[:-1]) + [N_samples]
if det:
u = np.linspace(0., 1., N_samples)
u = np.broadcast_to(u, new_shape)
else:
u = np.random.rand(*new_shape)
u = torch.Tensor(u)
# Invert CDF
u = u.contiguous()
inds = torch.searchsorted(cdf, u, right=True)
below = torch.max(torch.zeros_like(inds-1), inds-1)
above = torch.min((cdf.shape[-1]-1) * torch.ones_like(inds), inds)
inds_g = torch.stack([below, above], -1) # (batch, N_samples, 2)
# cdf_g = tf.gather(cdf, inds_g, axis=-1, batch_dims=len(inds_g.shape)-2)
# bins_g = tf.gather(bins, inds_g, axis=-1, batch_dims=len(inds_g.shape)-2)
matched_shape = [inds_g.shape[0], inds_g.shape[1], cdf.shape[-1]]
cdf_g = torch.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g)
bins_g = torch.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g)
denom = (cdf_g[...,1]-cdf_g[...,0])
denom = torch.where(denom<1e-5, torch.ones_like(denom), denom)
t = (u-cdf_g[...,0])/denom
samples = bins_g[...,0] + t * (bins_g[...,1]-bins_g[...,0])
return samples
| 58,713 | 113 | 1,037 |
6284b2b3c014222e9b83ab9f6a59216c9c7e1517 | 1,482 | py | Python | tests/test_instruction.py | kurusugawa-computer/annofab-cli | 8edad492d439bc8fe64e9471464f545d07aba8b7 | [
"MIT"
] | 9 | 2019-07-22T23:54:05.000Z | 2020-11-05T06:26:04.000Z | tests/test_instruction.py | kurusugawa-computer/annofab-cli | 8edad492d439bc8fe64e9471464f545d07aba8b7 | [
"MIT"
] | 389 | 2019-07-03T04:39:11.000Z | 2022-03-28T14:06:11.000Z | tests/test_instruction.py | kurusugawa-computer/annofab-cli | 8edad492d439bc8fe64e9471464f545d07aba8b7 | [
"MIT"
] | 1 | 2021-08-30T14:22:04.000Z | 2021-08-30T14:22:04.000Z | import configparser
import os
from pathlib import Path
import annofabapi
from annofabcli.__main__ import main
out_dir = Path("./tests/out/instruction")
data_dir = Path("./tests/data/instruction")
# ใใญใธใงใฏใใใใใซ็งปๅใใ
os.chdir(os.path.dirname(os.path.abspath(__file__)) + "/../")
inifile = configparser.ConfigParser()
inifile.read("./pytest.ini", "UTF-8")
annofab_config = dict(inifile.items("annofab"))
project_id = annofab_config["project_id"]
service = annofabapi.build()
| 26.464286 | 88 | 0.566802 | import configparser
import os
from pathlib import Path
import annofabapi
from annofabcli.__main__ import main
out_dir = Path("./tests/out/instruction")
data_dir = Path("./tests/data/instruction")
# ใใญใธใงใฏใใใใใซ็งปๅใใ
os.chdir(os.path.dirname(os.path.abspath(__file__)) + "/../")
inifile = configparser.ConfigParser()
inifile.read("./pytest.ini", "UTF-8")
annofab_config = dict(inifile.items("annofab"))
project_id = annofab_config["project_id"]
service = annofabapi.build()
class TestCommandLine:
def test_copy_instruction(self):
src_project_id = project_id
dest_project_id = project_id
main(["instruction", "copy", src_project_id, dest_project_id, "--yes"])
def test_download(self):
main(
[
"instruction",
"download",
"--project_id",
project_id,
"--output_dir",
str(out_dir / "download-out"),
"--download_image",
]
)
def test_list_history(self):
main(
[
"instruction",
"list_history",
"--project_id",
project_id,
"--output",
str(out_dir / "list_history-out.csv"),
]
)
def test_upload_instruction(self):
html_file = str(data_dir / "instruction.html")
main(["instruction", "upload", "--project_id", project_id, "--html", html_file])
| 875 | 1 | 130 |
308da623381a511c3b36a31d7f59d74d631166a4 | 435 | py | Python | pacote-download/Ex65.py | nkonai/Curso-em-video-Python | c05a60b3daa7d448e1e7f0d4d23f62df5d2c8df2 | [
"MIT"
] | null | null | null | pacote-download/Ex65.py | nkonai/Curso-em-video-Python | c05a60b3daa7d448e1e7f0d4d23f62df5d2c8df2 | [
"MIT"
] | null | null | null | pacote-download/Ex65.py | nkonai/Curso-em-video-Python | c05a60b3daa7d448e1e7f0d4d23f62df5d2c8df2 | [
"MIT"
] | null | null | null | soma = 0
count = 0
maior = 0
menor = 0
r = 'Y'
while r=='Y':
n = int(input('Digite um numero: '))
if count == 0 or n > maior:
maior = n
if count == 0 or n < menor:
menor = n
soma = soma + n
count = count + 1
r = str(input('Quer continuar? [Y/N]: ')).upper().strip()
media = soma/count
print('A media e {:.2f}'.format(media))
print('O menor numero e {} e o maior numero e {}'.format(menor,maior)) | 25.588235 | 70 | 0.551724 | soma = 0
count = 0
maior = 0
menor = 0
r = 'Y'
while r=='Y':
n = int(input('Digite um numero: '))
if count == 0 or n > maior:
maior = n
if count == 0 or n < menor:
menor = n
soma = soma + n
count = count + 1
r = str(input('Quer continuar? [Y/N]: ')).upper().strip()
media = soma/count
print('A media e {:.2f}'.format(media))
print('O menor numero e {} e o maior numero e {}'.format(menor,maior)) | 0 | 0 | 0 |
a276e3314627555c8de56733a50385417366f055 | 2,174 | py | Python | python/byteport/tests.py | gebart/byteport-api | 38504af42bd91ffafed4d813af14ccf88fdfe56d | [
"BSD-2-Clause"
] | null | null | null | python/byteport/tests.py | gebart/byteport-api | 38504af42bd91ffafed4d813af14ccf88fdfe56d | [
"BSD-2-Clause"
] | 2 | 2015-02-13T13:43:53.000Z | 2015-04-20T07:57:16.000Z | python/byteport/tests.py | gebart/byteport-api | 38504af42bd91ffafed4d813af14ccf88fdfe56d | [
"BSD-2-Clause"
] | 1 | 2017-12-18T01:38:46.000Z | 2017-12-18T01:38:46.000Z | import unittest
import datetime
from http_clients import ByteportHttpGetClient, ByteportHttpPostClient
| 36.233333 | 105 | 0.674793 | import unittest
import datetime
from http_clients import ByteportHttpGetClient, ByteportHttpPostClient
class TestHttpClients(unittest.TestCase):
hostname = 'localhost:8000'
# hostname = 'api.byteport.se'
byteport_api_store_url = 'http://%s/services/store/' % hostname
namespace = 'test'
device_uid = '6000'
key = 'TEST'
def test_should_handle_all_supported_timetamps_correctly(self):
client = ByteportHttpGetClient(
byteport_api_store_url=self.byteport_api_store_url,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
# integer input
int_input = 0
expected_result = '0'
result = client.auto_timestamp(int_input)
self.assertEqual(expected_result, result)
# float input with exact precision
float_input = 1.012345
expected_result = '1.012345'
result = client.auto_timestamp(float_input)
self.assertEqual(expected_result, result)
# float input with higher precision, should round
float_input = 1.01234599999
expected_result = '1.012346'
result = client.auto_timestamp(float_input)
self.assertEqual(expected_result, result)
# datetime input, second precision
datetime_input = datetime.datetime.strptime('1970-01-01T00:00:02', '%Y-%m-%dT%H:%M:%S')
expected_result = '2'
result = client.auto_timestamp(datetime_input)
self.assertEqual(expected_result, result)
# datetime input, micro-second precision
datetime_input = datetime.datetime.strptime('1970-01-01T00:00:02.012345', '%Y-%m-%dT%H:%M:%S.%f')
expected_result = '2.012345'
result = client.auto_timestamp(datetime_input)
self.assertEqual(expected_result, result)
# datetime input, micro-second precision, later timestamp
datetime_input = datetime.datetime.strptime('2015-05-01T00:00:00.012345', '%Y-%m-%dT%H:%M:%S.%f')
expected_result = '1430438400.012345'
result = client.auto_timestamp(datetime_input)
self.assertEqual(expected_result, result)
| 1,798 | 248 | 23 |
9e73b1031062b8ac79a2adf8eaf63209234ac6e0 | 17,345 | py | Python | stackinawsgi/test/test_admin_admin.py | BenjamenMeyer/stackInAWSGI | 8ac6be173bb08addc09214ba7dc9f91727d0221a | [
"Apache-2.0"
] | 2 | 2016-08-12T19:11:13.000Z | 2017-11-19T20:52:47.000Z | stackinawsgi/test/test_admin_admin.py | BenjamenMeyer/stackInAWSGI | 8ac6be173bb08addc09214ba7dc9f91727d0221a | [
"Apache-2.0"
] | 16 | 2016-05-22T05:11:12.000Z | 2016-07-14T00:57:07.000Z | stackinawsgi/test/test_admin_admin.py | BenjamenMeyer/stackInAWSGI | 8ac6be173bb08addc09214ba7dc9f91727d0221a | [
"Apache-2.0"
] | 1 | 2016-05-15T19:01:35.000Z | 2016-05-15T19:01:35.000Z | """
Stack-In-A-WSGI: stackinawsgi.admin.admin.StackInAWsgiSessionManager
"""
import datetime
import json
import unittest
import ddt
from stackinabox.services.service import StackInABoxService
from stackinabox.services.hello import HelloService
from stackinawsgi.admin.admin import StackInAWsgiAdmin
from stackinawsgi.session.service import (
global_sessions,
StackInAWsgiSessionManager
)
from stackinawsgi.wsgi.request import Request
from stackinawsgi.wsgi.response import Response
from stackinawsgi.test.helpers import make_environment
@ddt.ddt
class TestSessionManager(unittest.TestCase):
"""
Test the interaction of StackInAWSGI's Session Manager
"""
def setUp(self):
"""
configure env for the test
"""
self.manager = StackInAWsgiSessionManager()
self.manager.register_service(HelloService)
self.base_uri = 'test://testing-url'
def tearDown(self):
"""
clean up after the test
"""
keys = tuple(global_sessions.keys())
for k in keys:
del global_sessions[k]
def test_construction(self):
"""
test basic construction of the admin interface
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
self.assertIsInstance(admin, StackInABoxService)
self.assertEqual(id(self.manager), id(admin.manager))
self.assertTrue(admin.base_uri.startswith(self.base_uri))
def test_property_base_uri_with_no_slash(self):
"""
test basic construction of the admin interface
"""
base_uri = 'hello'
admin = StackInAWsgiAdmin(self.manager, base_uri)
self.assertIsInstance(admin, StackInABoxService)
self.assertEqual(id(self.manager), id(admin.manager))
self.assertTrue(admin.base_uri.startswith(base_uri))
def test_property_base_uri_start_with_slash(self):
"""
test basic construction of the admin interface
"""
base_uri = '/hello'
admin = StackInAWsgiAdmin(self.manager, base_uri)
self.assertIsInstance(admin, StackInABoxService)
self.assertEqual(id(self.manager), id(admin.manager))
self.assertTrue(admin.base_uri.startswith(base_uri[1:]))
def test_property_base_uri_ends_with_slash(self):
"""
test the base uri property to ensure the trailing slash
is removed
"""
base_uri = 'hello/'
admin = StackInAWsgiAdmin(self.manager, base_uri)
self.assertIsInstance(admin, StackInABoxService)
self.assertEqual(id(self.manager), id(admin.manager))
self.assertTrue(admin.base_uri.startswith(base_uri[:-1]))
def test_helper_get_session_id(self):
"""
test extracting the session-id from the headers
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
session_id = 'some-session-id'
headers = {
'x-session-id': session_id
}
extracted_session_id = admin.helper_get_session_id(headers)
self.assertEqual(session_id, extracted_session_id)
def test_helper_get_session_id_no_session_id(self):
"""
test extracting the session-id from the headers
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
headers = {}
extracted_session_id = admin.helper_get_session_id(headers)
self.assertIsNone(extracted_session_id)
def test_helper_get_uri(self):
"""
test building the URI
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
session_id = 'some-session-id'
expected_uri = '{0}/{1}/'.format(self.base_uri, session_id)
result_uri = admin.helper_get_uri(session_id)
self.assertEqual(expected_uri, result_uri)
def test_session_creation(self):
"""
test creating a new session
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
uri = u'/'
environment = make_environment(
self,
method='POST',
path=uri[1:]
)
request = Request(environment)
response = Response()
result = admin.create_session(
request,
uri,
response.headers
)
response.from_stackinabox(
result[0],
result[1],
result[2]
)
# validate response
self.assertEqual(response.status, 201)
# validate header entries
self.assertIn('x-session-id', response.headers)
self.assertIn('location', response.headers)
# validate x-session-id
session_id = response.headers['x-session-id']
self.assertIn(session_id, global_sessions)
# validate location
self.assertEqual(
'{0}/{1}/'.format(self.base_uri, session_id),
response.headers['location']
)
def test_session_creation_with_session_id(self):
"""
test creating a new session with a session-id
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
session_id = 'my-session-id'
uri = u'/'
environment = make_environment(
self,
method='POST',
path=uri[1:],
headers={
'x-session-id': session_id
}
)
request = Request(environment)
self.assertIn('x-session-id', request.headers)
self.assertEqual(session_id, request.headers['x-session-id'])
response = Response()
result = admin.create_session(
request,
uri,
request.headers
)
response.from_stackinabox(
result[0],
result[1],
result[2]
)
# validate response
self.assertEqual(response.status, 201)
# validate header entries
self.assertIn('x-session-id', response.headers)
self.assertIn('location', response.headers)
# validate x-session-id
extracted_session_id = response.headers['x-session-id']
self.assertEqual(session_id, extracted_session_id)
self.assertIn(extracted_session_id, global_sessions)
# validate location
self.assertEqual(
'{0}/{1}/'.format(self.base_uri, extracted_session_id),
response.headers['location']
)
def test_session_remove(self):
"""
test removing a session
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
session_id = self.manager.create_session()
uri = u'/'
environment = make_environment(
self,
method='DELETE',
path=uri[1:],
headers={
'x-session-id': session_id
}
)
request = Request(environment)
self.assertIn('x-session-id', request.headers)
self.assertEqual(session_id, request.headers['x-session-id'])
response = Response()
result = admin.remove_session(
request,
uri,
request.headers
)
response.from_stackinabox(
result[0],
result[1],
result[2]
)
# validate response
self.assertEqual(response.status, 204)
def test_session_remove_invalid_session_id(self):
"""
test removing a session with an invalid session id
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
session_id = 'my-session-id'
uri = u'/'
environment = make_environment(
self,
method='DELETE',
path=uri[1:],
headers={
'x-session-id': session_id
}
)
request = Request(environment)
self.assertIn('x-session-id', request.headers)
self.assertEqual(session_id, request.headers['x-session-id'])
response = Response()
result = admin.remove_session(
request,
uri,
request.headers
)
response.from_stackinabox(
result[0],
result[1],
result[2]
)
# validate response
self.assertEqual(response.status, 404)
def test_session_reset(self):
"""
test resetting a session
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
session_id = self.manager.create_session()
uri = u'/'
environment = make_environment(
self,
method='PUT',
path=uri[1:],
headers={
'x-session-id': session_id
}
)
request = Request(environment)
self.assertIn('x-session-id', request.headers)
self.assertEqual(session_id, request.headers['x-session-id'])
response = Response()
result = admin.reset_session(
request,
uri,
request.headers
)
response.from_stackinabox(
result[0],
result[1],
result[2]
)
# validate response
self.assertEqual(response.status, 205)
def test_session_reset_invalid_session_id(self):
"""
test resetting a session with an invalid session id
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
session_id = 'my-session-id'
uri = u'/'
environment = make_environment(
self,
method='PUT',
path=uri[1:],
headers={
'x-session-id': session_id
}
)
request = Request(environment)
self.assertIn('x-session-id', request.headers)
self.assertEqual(session_id, request.headers['x-session-id'])
response = Response()
result = admin.reset_session(
request,
uri,
request.headers
)
response.from_stackinabox(
result[0],
result[1],
result[2]
)
# validate response
self.assertEqual(response.status, 404)
@ddt.data(0, 1, 2, 3, 5, 8, 13)
def test_get_sessions(self, session_count):
"""
test get sessions
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
uri = u'/'
environment = make_environment(
self,
method='GET',
path=uri[1:],
headers={}
)
request = Request(environment)
for _ in range(session_count):
admin.manager.create_session()
response = Response()
result = admin.get_sessions(
request,
uri,
request.headers
)
response.from_stackinabox(
result[0],
result[1],
result[2]
)
# validate response
self.assertEqual(response.status, 200)
response_body = response.body
session_data = json.loads(response_body)
self.assertIn('base_url', session_data)
self.assertEqual(session_data['base_url'], self.base_uri)
self.assertIn('services', session_data)
self.assertEqual(len(session_data['services']), 1)
self.assertIn('hello', session_data['services'])
self.assertEqual(session_data['services']['hello'], 'HelloService')
self.assertIn('sessions', session_data)
self.assertEqual(len(session_data['sessions']), session_count)
def test_get_session_info(self):
"""
test resetting a session with an invalid session id
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
session_id = 'my-session-id'
uri = u'/{0}'.format(session_id)
environment = make_environment(
self,
method='GET',
path=uri[1:],
headers={
'x-session-id': session_id
}
)
request = Request(environment)
self.assertIn('x-session-id', request.headers)
self.assertEqual(session_id, request.headers['x-session-id'])
response_created = Response()
result_create = admin.create_session(
request,
uri,
request.headers
)
response_created.from_stackinabox(
result_create[0],
result_create[1],
result_create[2]
)
self.assertEqual(response_created.status, 201)
response = Response()
result = admin.get_session_info(
request,
uri,
request.headers
)
response.from_stackinabox(
result[0],
result[1],
result[2]
)
# validate response
self.assertEqual(response.status, 200)
response_body = response.body
session_data = json.loads(response_body)
self.assertIn('base_url', session_data)
self.assertEqual(session_data['base_url'], self.base_uri)
self.assertIn('session_valid', session_data)
self.assertTrue(session_data['session_valid'])
self.assertIn('services', session_data)
self.assertEqual(len(session_data['services']), 1)
self.assertIn('hello', session_data['services'])
self.assertEqual(session_data['services']['hello'], 'HelloService')
self.assertIn('trackers', session_data)
self.assertEqual(len(session_data['trackers']), 3)
self.assertIn('created-time', session_data['trackers'])
self.assertIsNotNone(session_data['trackers']['created-time'])
created_time = datetime.datetime.strptime(
session_data['trackers']['created-time'],
"%Y-%m-%dT%H:%M:%S.%f"
)
self.assertIn('accessed', session_data['trackers'])
self.assertEqual(len(session_data['trackers']['accessed']), 2)
self.assertIn('time', session_data['trackers']['accessed'])
self.assertIsNotNone(session_data['trackers']['accessed']['time'])
accessed_time = datetime.datetime.strptime(
session_data['trackers']['accessed']['time'],
"%Y-%m-%dT%H:%M:%S.%f"
)
self.assertEqual(created_time, accessed_time)
self.assertIn('count', session_data['trackers']['accessed'])
self.assertIn('status', session_data['trackers'])
self.assertEqual(len(session_data['trackers']['status']), 0)
def test_get_session_info_invalid_session(self):
"""
test resetting a session with an invalid session id
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
session_id = 'my-session-id'
uri = u'/{0}'.format(session_id)
environment = make_environment(
self,
method='PUT',
path=uri[1:],
)
request = Request(environment)
response = Response()
result = admin.get_session_info(
request,
uri,
request.headers
)
response.from_stackinabox(
result[0],
result[1],
result[2]
)
# validate response
self.assertEqual(response.status, 200)
response_body = response.body
session_data = json.loads(response_body)
self.assertIn('base_url', session_data)
self.assertEqual(session_data['base_url'], self.base_uri)
self.assertIn('session_valid', session_data)
self.assertFalse(session_data['session_valid'])
self.assertIn('services', session_data)
self.assertEqual(len(session_data['services']), 1)
self.assertIn('hello', session_data['services'])
self.assertEqual(session_data['services']['hello'], 'HelloService')
self.assertIn('trackers', session_data)
self.assertEqual(len(session_data['trackers']), 3)
self.assertIn('created-time', session_data['trackers'])
self.assertIsNone(session_data['trackers']['created-time'])
self.assertIn('accessed', session_data['trackers'])
self.assertEqual(len(session_data['trackers']['accessed']), 2)
self.assertIn('time', session_data['trackers']['accessed'])
self.assertIsNone(session_data['trackers']['accessed']['time'])
self.assertIn('count', session_data['trackers']['accessed'])
self.assertIn('status', session_data['trackers'])
self.assertEqual(len(session_data['trackers']['status']), 0)
def test_extract_session_from_uri(self):
"""
test extracting a session from the URI - positive test
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
session_id = 'my-session-id'
uri = u'/{0}'.format(session_id)
extracted_session_id = admin.helper_get_session_id_from_uri(
uri
)
self.assertEqual(session_id, extracted_session_id)
def test_extract_session_from_uri_invalid(self):
"""
test extracting a session from the URI - negative test
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
uri = u'/'
extracted_session_id = admin.helper_get_session_id_from_uri(
uri
)
self.assertIsNone(extracted_session_id)
| 30.808171 | 75 | 0.589853 | """
Stack-In-A-WSGI: stackinawsgi.admin.admin.StackInAWsgiSessionManager
"""
import datetime
import json
import unittest
import ddt
from stackinabox.services.service import StackInABoxService
from stackinabox.services.hello import HelloService
from stackinawsgi.admin.admin import StackInAWsgiAdmin
from stackinawsgi.session.service import (
global_sessions,
StackInAWsgiSessionManager
)
from stackinawsgi.wsgi.request import Request
from stackinawsgi.wsgi.response import Response
from stackinawsgi.test.helpers import make_environment
@ddt.ddt
class TestSessionManager(unittest.TestCase):
"""
Test the interaction of StackInAWSGI's Session Manager
"""
def setUp(self):
"""
configure env for the test
"""
self.manager = StackInAWsgiSessionManager()
self.manager.register_service(HelloService)
self.base_uri = 'test://testing-url'
def tearDown(self):
"""
clean up after the test
"""
keys = tuple(global_sessions.keys())
for k in keys:
del global_sessions[k]
def test_construction(self):
"""
test basic construction of the admin interface
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
self.assertIsInstance(admin, StackInABoxService)
self.assertEqual(id(self.manager), id(admin.manager))
self.assertTrue(admin.base_uri.startswith(self.base_uri))
def test_property_base_uri_with_no_slash(self):
"""
test basic construction of the admin interface
"""
base_uri = 'hello'
admin = StackInAWsgiAdmin(self.manager, base_uri)
self.assertIsInstance(admin, StackInABoxService)
self.assertEqual(id(self.manager), id(admin.manager))
self.assertTrue(admin.base_uri.startswith(base_uri))
def test_property_base_uri_start_with_slash(self):
"""
test basic construction of the admin interface
"""
base_uri = '/hello'
admin = StackInAWsgiAdmin(self.manager, base_uri)
self.assertIsInstance(admin, StackInABoxService)
self.assertEqual(id(self.manager), id(admin.manager))
self.assertTrue(admin.base_uri.startswith(base_uri[1:]))
def test_property_base_uri_ends_with_slash(self):
"""
test the base uri property to ensure the trailing slash
is removed
"""
base_uri = 'hello/'
admin = StackInAWsgiAdmin(self.manager, base_uri)
self.assertIsInstance(admin, StackInABoxService)
self.assertEqual(id(self.manager), id(admin.manager))
self.assertTrue(admin.base_uri.startswith(base_uri[:-1]))
def test_helper_get_session_id(self):
"""
test extracting the session-id from the headers
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
session_id = 'some-session-id'
headers = {
'x-session-id': session_id
}
extracted_session_id = admin.helper_get_session_id(headers)
self.assertEqual(session_id, extracted_session_id)
def test_helper_get_session_id_no_session_id(self):
"""
test extracting the session-id from the headers
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
headers = {}
extracted_session_id = admin.helper_get_session_id(headers)
self.assertIsNone(extracted_session_id)
def test_helper_get_uri(self):
"""
test building the URI
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
session_id = 'some-session-id'
expected_uri = '{0}/{1}/'.format(self.base_uri, session_id)
result_uri = admin.helper_get_uri(session_id)
self.assertEqual(expected_uri, result_uri)
def test_session_creation(self):
"""
test creating a new session
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
uri = u'/'
environment = make_environment(
self,
method='POST',
path=uri[1:]
)
request = Request(environment)
response = Response()
result = admin.create_session(
request,
uri,
response.headers
)
response.from_stackinabox(
result[0],
result[1],
result[2]
)
# validate response
self.assertEqual(response.status, 201)
# validate header entries
self.assertIn('x-session-id', response.headers)
self.assertIn('location', response.headers)
# validate x-session-id
session_id = response.headers['x-session-id']
self.assertIn(session_id, global_sessions)
# validate location
self.assertEqual(
'{0}/{1}/'.format(self.base_uri, session_id),
response.headers['location']
)
def test_session_creation_with_session_id(self):
"""
test creating a new session with a session-id
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
session_id = 'my-session-id'
uri = u'/'
environment = make_environment(
self,
method='POST',
path=uri[1:],
headers={
'x-session-id': session_id
}
)
request = Request(environment)
self.assertIn('x-session-id', request.headers)
self.assertEqual(session_id, request.headers['x-session-id'])
response = Response()
result = admin.create_session(
request,
uri,
request.headers
)
response.from_stackinabox(
result[0],
result[1],
result[2]
)
# validate response
self.assertEqual(response.status, 201)
# validate header entries
self.assertIn('x-session-id', response.headers)
self.assertIn('location', response.headers)
# validate x-session-id
extracted_session_id = response.headers['x-session-id']
self.assertEqual(session_id, extracted_session_id)
self.assertIn(extracted_session_id, global_sessions)
# validate location
self.assertEqual(
'{0}/{1}/'.format(self.base_uri, extracted_session_id),
response.headers['location']
)
def test_session_remove(self):
"""
test removing a session
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
session_id = self.manager.create_session()
uri = u'/'
environment = make_environment(
self,
method='DELETE',
path=uri[1:],
headers={
'x-session-id': session_id
}
)
request = Request(environment)
self.assertIn('x-session-id', request.headers)
self.assertEqual(session_id, request.headers['x-session-id'])
response = Response()
result = admin.remove_session(
request,
uri,
request.headers
)
response.from_stackinabox(
result[0],
result[1],
result[2]
)
# validate response
self.assertEqual(response.status, 204)
def test_session_remove_invalid_session_id(self):
"""
test removing a session with an invalid session id
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
session_id = 'my-session-id'
uri = u'/'
environment = make_environment(
self,
method='DELETE',
path=uri[1:],
headers={
'x-session-id': session_id
}
)
request = Request(environment)
self.assertIn('x-session-id', request.headers)
self.assertEqual(session_id, request.headers['x-session-id'])
response = Response()
result = admin.remove_session(
request,
uri,
request.headers
)
response.from_stackinabox(
result[0],
result[1],
result[2]
)
# validate response
self.assertEqual(response.status, 404)
def test_session_reset(self):
"""
test resetting a session
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
session_id = self.manager.create_session()
uri = u'/'
environment = make_environment(
self,
method='PUT',
path=uri[1:],
headers={
'x-session-id': session_id
}
)
request = Request(environment)
self.assertIn('x-session-id', request.headers)
self.assertEqual(session_id, request.headers['x-session-id'])
response = Response()
result = admin.reset_session(
request,
uri,
request.headers
)
response.from_stackinabox(
result[0],
result[1],
result[2]
)
# validate response
self.assertEqual(response.status, 205)
def test_session_reset_invalid_session_id(self):
"""
test resetting a session with an invalid session id
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
session_id = 'my-session-id'
uri = u'/'
environment = make_environment(
self,
method='PUT',
path=uri[1:],
headers={
'x-session-id': session_id
}
)
request = Request(environment)
self.assertIn('x-session-id', request.headers)
self.assertEqual(session_id, request.headers['x-session-id'])
response = Response()
result = admin.reset_session(
request,
uri,
request.headers
)
response.from_stackinabox(
result[0],
result[1],
result[2]
)
# validate response
self.assertEqual(response.status, 404)
@ddt.data(0, 1, 2, 3, 5, 8, 13)
def test_get_sessions(self, session_count):
"""
test get sessions
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
uri = u'/'
environment = make_environment(
self,
method='GET',
path=uri[1:],
headers={}
)
request = Request(environment)
for _ in range(session_count):
admin.manager.create_session()
response = Response()
result = admin.get_sessions(
request,
uri,
request.headers
)
response.from_stackinabox(
result[0],
result[1],
result[2]
)
# validate response
self.assertEqual(response.status, 200)
response_body = response.body
session_data = json.loads(response_body)
self.assertIn('base_url', session_data)
self.assertEqual(session_data['base_url'], self.base_uri)
self.assertIn('services', session_data)
self.assertEqual(len(session_data['services']), 1)
self.assertIn('hello', session_data['services'])
self.assertEqual(session_data['services']['hello'], 'HelloService')
self.assertIn('sessions', session_data)
self.assertEqual(len(session_data['sessions']), session_count)
def test_get_session_info(self):
"""
test resetting a session with an invalid session id
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
session_id = 'my-session-id'
uri = u'/{0}'.format(session_id)
environment = make_environment(
self,
method='GET',
path=uri[1:],
headers={
'x-session-id': session_id
}
)
request = Request(environment)
self.assertIn('x-session-id', request.headers)
self.assertEqual(session_id, request.headers['x-session-id'])
response_created = Response()
result_create = admin.create_session(
request,
uri,
request.headers
)
response_created.from_stackinabox(
result_create[0],
result_create[1],
result_create[2]
)
self.assertEqual(response_created.status, 201)
response = Response()
result = admin.get_session_info(
request,
uri,
request.headers
)
response.from_stackinabox(
result[0],
result[1],
result[2]
)
# validate response
self.assertEqual(response.status, 200)
response_body = response.body
session_data = json.loads(response_body)
self.assertIn('base_url', session_data)
self.assertEqual(session_data['base_url'], self.base_uri)
self.assertIn('session_valid', session_data)
self.assertTrue(session_data['session_valid'])
self.assertIn('services', session_data)
self.assertEqual(len(session_data['services']), 1)
self.assertIn('hello', session_data['services'])
self.assertEqual(session_data['services']['hello'], 'HelloService')
self.assertIn('trackers', session_data)
self.assertEqual(len(session_data['trackers']), 3)
self.assertIn('created-time', session_data['trackers'])
self.assertIsNotNone(session_data['trackers']['created-time'])
created_time = datetime.datetime.strptime(
session_data['trackers']['created-time'],
"%Y-%m-%dT%H:%M:%S.%f"
)
self.assertIn('accessed', session_data['trackers'])
self.assertEqual(len(session_data['trackers']['accessed']), 2)
self.assertIn('time', session_data['trackers']['accessed'])
self.assertIsNotNone(session_data['trackers']['accessed']['time'])
accessed_time = datetime.datetime.strptime(
session_data['trackers']['accessed']['time'],
"%Y-%m-%dT%H:%M:%S.%f"
)
self.assertEqual(created_time, accessed_time)
self.assertIn('count', session_data['trackers']['accessed'])
self.assertIn('status', session_data['trackers'])
self.assertEqual(len(session_data['trackers']['status']), 0)
def test_get_session_info_invalid_session(self):
"""
test resetting a session with an invalid session id
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
session_id = 'my-session-id'
uri = u'/{0}'.format(session_id)
environment = make_environment(
self,
method='PUT',
path=uri[1:],
)
request = Request(environment)
response = Response()
result = admin.get_session_info(
request,
uri,
request.headers
)
response.from_stackinabox(
result[0],
result[1],
result[2]
)
# validate response
self.assertEqual(response.status, 200)
response_body = response.body
session_data = json.loads(response_body)
self.assertIn('base_url', session_data)
self.assertEqual(session_data['base_url'], self.base_uri)
self.assertIn('session_valid', session_data)
self.assertFalse(session_data['session_valid'])
self.assertIn('services', session_data)
self.assertEqual(len(session_data['services']), 1)
self.assertIn('hello', session_data['services'])
self.assertEqual(session_data['services']['hello'], 'HelloService')
self.assertIn('trackers', session_data)
self.assertEqual(len(session_data['trackers']), 3)
self.assertIn('created-time', session_data['trackers'])
self.assertIsNone(session_data['trackers']['created-time'])
self.assertIn('accessed', session_data['trackers'])
self.assertEqual(len(session_data['trackers']['accessed']), 2)
self.assertIn('time', session_data['trackers']['accessed'])
self.assertIsNone(session_data['trackers']['accessed']['time'])
self.assertIn('count', session_data['trackers']['accessed'])
self.assertIn('status', session_data['trackers'])
self.assertEqual(len(session_data['trackers']['status']), 0)
def test_extract_session_from_uri(self):
"""
test extracting a session from the URI - positive test
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
session_id = 'my-session-id'
uri = u'/{0}'.format(session_id)
extracted_session_id = admin.helper_get_session_id_from_uri(
uri
)
self.assertEqual(session_id, extracted_session_id)
def test_extract_session_from_uri_invalid(self):
"""
test extracting a session from the URI - negative test
"""
admin = StackInAWsgiAdmin(self.manager, self.base_uri)
uri = u'/'
extracted_session_id = admin.helper_get_session_id_from_uri(
uri
)
self.assertIsNone(extracted_session_id)
| 0 | 0 | 0 |
ea62194839d143d2c84ffdc76fc2a59e091dbad7 | 3,077 | py | Python | validation/prep_rm_subset.py | bgruening/bcbb | dbfb52711f0bfcc1d26c5a5b53c9ff4f50dc0027 | [
"MIT"
] | 339 | 2015-01-04T13:23:04.000Z | 2022-03-25T23:09:09.000Z | validation/prep_rm_subset.py | bgruening/bcbb | dbfb52711f0bfcc1d26c5a5b53c9ff4f50dc0027 | [
"MIT"
] | 39 | 2015-01-14T21:31:09.000Z | 2021-11-18T15:15:33.000Z | validation/prep_rm_subset.py | bgruening/bcbb | dbfb52711f0bfcc1d26c5a5b53c9ff4f50dc0027 | [
"MIT"
] | 176 | 2015-01-10T17:40:44.000Z | 2022-03-25T05:14:21.000Z | """Prepare subset regions of full NIST NA12878 reference materials for evaluation.
Allows preparation of exome or targeted reference materials from
the full NIST NA12878 genome.
Requires:
vcflib: https://github.com/ekg/vcflib
bedtools: http://bedtools.readthedocs.org/en/latest/
Usage:
prep_rm_subset.py <input_config.yaml>
"""
import os
import sys
import subprocess
import yaml
import pybedtools
if __name__ == "__main__":
main(sys.argv[1])
| 37.072289 | 89 | 0.588885 | """Prepare subset regions of full NIST NA12878 reference materials for evaluation.
Allows preparation of exome or targeted reference materials from
the full NIST NA12878 genome.
Requires:
vcflib: https://github.com/ekg/vcflib
bedtools: http://bedtools.readthedocs.org/en/latest/
Usage:
prep_rm_subset.py <input_config.yaml>
"""
import os
import sys
import subprocess
import yaml
import pybedtools
def main(config_file):
config = load_config(config_file)
config["out_base"] = os.path.join(config["dirs"]["rm"],
config["subset"]["name"])
region_bed = intersect_beds(config["subset"]["interval"],
config["rm"]["interval"], config)
final_vcf = combine_subset_vcfs(config["rm"]["vcfs"],
config["rm"]["ref"],
region_bed, config)
filter_vcf(final_vcf)
def filter_vcf(in_vcf):
out_vcf = "%s-pass%s" % os.path.splitext(in_vcf)
with open(in_vcf) as in_handle:
with open(out_vcf, "w") as out_handle:
for line in in_handle:
passes = False
if line.startswith("#"):
passes = True
else:
parts = line.split("\t")
if parts[6] in [".", "PASS"]:
passes = True
if passes:
out_handle.write(line)
def combine_subset_vcfs(vcfs, ref_file, region_bed, config):
out_file = os.path.join(config["dirs"]["rm"],
"%s.vcf" % config["subset"]["name"])
tmp_files = []
for i, vcf in enumerate(vcfs):
tmp_out_file = "%s-%s.vcf" % (os.path.splitext(out_file)[0], i)
cmd = "vcfintersect -b {region_bed} {vcf} > {tmp_out_file}"
subprocess.check_call(cmd.format(**locals()), shell=True)
tmp_files.append(tmp_out_file)
# Need to generalize for multiple VCFs
one_vcf, two_vcf = tmp_files
cmd = "vcfintersect -r {ref_file} -u {two_vcf} {one_vcf} > {out_file}"
subprocess.check_call(cmd.format(**locals()), shell=True)
for tmp_file in tmp_files:
os.remove(tmp_file)
return out_file
def intersect_beds(base_bed, rm_bed, config):
out_file = os.path.join(config["dirs"]["rm"],
"%s-regions.bed" % config["subset"]["name"])
if not os.path.exists(out_file):
base_bt = pybedtools.BedTool(base_bed)
base_bt.intersect(rm_bed).saveas(out_file)
return out_file
def load_config(config_file):
with open(config_file) as in_handle:
config = yaml.load(in_handle)
dirs = config["dirs"]
config["rm"]["vcfs"] = [os.path.join(dirs["rm"], x) for x in config["rm"]["vcfs"]]
config["rm"]["interval"] = os.path.join(dirs["rm"], config["rm"]["interval"])
config["subset"]["interval"] = os.path.join(dirs["rm"], config["subset"]["interval"])
config["rm"]["ref"] = os.path.join(dirs["genome"], config["rm"]["ref"])
return config
if __name__ == "__main__":
main(sys.argv[1])
| 2,505 | 0 | 115 |
7eee2fbdb615744d318d8742d8eca13f0229393c | 2,768 | py | Python | biblification.py | olivierkes/bible_libre | 84c60e424c00da6171705b3e613240886640f3e5 | [
"Unlicense"
] | 1 | 2021-11-16T14:25:35.000Z | 2021-11-16T14:25:35.000Z | biblification.py | olivierkes/bible_libre | 84c60e424c00da6171705b3e613240886640f3e5 | [
"Unlicense"
] | null | null | null | biblification.py | olivierkes/bible_libre | 84c60e424c00da6171705b3e613240886640f3e5 | [
"Unlicense"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf8 -*-
import csv
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='This generates a t2t bible.')
parser.add_argument('-p', '--plan', help='plan to be used',
default="nouveau-testament-commente")
#parser.add_argument('-s', '--style', help='style used',
#default="default")
parser.add_argument('-v', help='show verses references', action='store_const', const=True, default=False)
parser.add_argument('-m', help='show marks only', action='store_const', const=True, default=False)
#parser.add_argument('output', help='output file')
args = parser.parse_args()
plan = args.plan
#style = args.style
#output = args.output
showVerse = args.v
showMarks = args.m
text = ""
with open('plans/' + plan + ".csv", 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
r = 0
struct = []
for row in reader:
if r != 0:
struct.append(row)
r += 1
for i in range(len(struct)):
row = struct[i]
nextRow = -1
text += addTitle(row)
if i != len(struct) - 1: nextRow = struct[i + 1]
if nextRow != -1 and nextRow[2] == row[2] and nextRow[3] == row[3]:
pass
else:
text += getText(row[0], row[2], row[3], row[4], row[5], row[6], showVerse, showMarks)
print text
| 32.186047 | 109 | 0.501445 | #!/usr/bin/python
# -*- coding: utf8 -*-
import csv
import argparse
def parseText(t):
t = t.replace("\\n\\q", " \n\n\t")
t = t.replace("\\n", " \n")
t = t.replace("\\q", "\t")
t = t.replace("\\p", " \n\n")
return t
def addMark(t):
i = -1
while t[i] in ["\n", "\t"]:
i -= 1
return t[:i] + " ยฐ" + t[i:]
def getText(book, startChapter, startVerse, endChapter, endVerse, title, showVerses, showMarks):
r = ""
with open('textes/' + book + ".csv", 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
within = False
for row in reader:
if row[0] == startChapter and row[1] == startVerse:
within = True
if within:
rr = parseText(row[2])
if rr[-1] != "\n": rr += " "
if showVerses: rr = ":sup:`{}:{}`\xc2\xa0".format(row[0], row[1]) + rr
if showMarks and not showVerses: rr = addMark(rr)
r += rr
#r += "({}:{}) ".format(row[0], row[1])
if row[0] == endChapter and row[1] == endVerse:
break
return r
def addTitle(row):
charSet = "#=-~_"
return "\n\n" + row[6] + "\n" + charSet[int(row[1])] * len(row[6]) + "\n"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='This generates a t2t bible.')
parser.add_argument('-p', '--plan', help='plan to be used',
default="nouveau-testament-commente")
#parser.add_argument('-s', '--style', help='style used',
#default="default")
parser.add_argument('-v', help='show verses references', action='store_const', const=True, default=False)
parser.add_argument('-m', help='show marks only', action='store_const', const=True, default=False)
#parser.add_argument('output', help='output file')
args = parser.parse_args()
plan = args.plan
#style = args.style
#output = args.output
showVerse = args.v
showMarks = args.m
text = ""
with open('plans/' + plan + ".csv", 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
r = 0
struct = []
for row in reader:
if r != 0:
struct.append(row)
r += 1
for i in range(len(struct)):
row = struct[i]
nextRow = -1
text += addTitle(row)
if i != len(struct) - 1: nextRow = struct[i + 1]
if nextRow != -1 and nextRow[2] == row[2] and nextRow[3] == row[3]:
pass
else:
text += getText(row[0], row[2], row[3], row[4], row[5], row[6], showVerse, showMarks)
print text
| 1,144 | 0 | 92 |
5471538f7c2ee28507998ade17fdbcced5321a8c | 1,087 | py | Python | src/foxdot/sandbox/180811_0506_compo_032.py | Neko250/aisthesis | 1d4a2c3070d10596c28b25ea2170523583e7eff0 | [
"Apache-2.0"
] | 4 | 2018-06-29T18:39:34.000Z | 2021-06-20T16:44:29.000Z | src/foxdot/sandbox/180811_0506_compo_032.py | Neko250/aisthesis | 1d4a2c3070d10596c28b25ea2170523583e7eff0 | [
"Apache-2.0"
] | null | null | null | src/foxdot/sandbox/180811_0506_compo_032.py | Neko250/aisthesis | 1d4a2c3070d10596c28b25ea2170523583e7eff0 | [
"Apache-2.0"
] | null | null | null | Scale.default = 'indian'
Root.default = 0
Clock.bpm = 80
print(Scale.names())
print(SynthDefs)
print(FxList)
print(PatternMethods)
print(Samples)
print(Clock.playing)
var.ch = var([0,1],[15,3])
~p1 >> play('W', amp=.5, dur=6, shape=.5, rate=.5, room=1, formant=[1,1,2])
~p2 >> play('m', amp=.8, dur=2/3, sample=[0,1,0], room=1).often('stutter', 2, dur=1.5, pan=PRand([-1,1]), rate=1.5)
~s1 >> klank(var.ch+(0,var([2,3,4],6)), amp=.5, oct=4)
~s1 >> klank(var.ch+(0,var([2,3,4],6)), amp=.3, oct=4, spin=1)
~s1 >> klank(var.ch+(0,var([2,3,4],6)), amp=.2, oct=4, spin=2)
~s1 >> klank(var.ch+(0,var([2,3,4],6)), amp=.5, oct=4, spin=4)
~s2 >> blip(var.ch, amp=PWhite(0,.6), dur=PDur(4,6)*2, oct=PRand([4,4,4,5,5,6]), lpf=800, room=1).sometimes('offadd', 4)
~s3 >> sitar(var.ch+PWalk(), dur=.25, amp=PRand([0,.8])[:36], formant=1, oct=PRand([6,7,8]), vib=12, room=1)
~s1 >> klank(var.ch+(0,var([2,3,4],6)), amp=.5, oct=4, spin=4, formant=1)
s3.every(6, 'degrade')
Clock.future(36, s3.stop)
~p3 >> play('( k[kk ] k[ kk]) |r3|', dur=1, rate=.5, amp=.5, lpf=00, formant=1)
p_all.stop()
| 31.057143 | 120 | 0.582337 | Scale.default = 'indian'
Root.default = 0
Clock.bpm = 80
print(Scale.names())
print(SynthDefs)
print(FxList)
print(PatternMethods)
print(Samples)
print(Clock.playing)
var.ch = var([0,1],[15,3])
~p1 >> play('W', amp=.5, dur=6, shape=.5, rate=.5, room=1, formant=[1,1,2])
~p2 >> play('m', amp=.8, dur=2/3, sample=[0,1,0], room=1).often('stutter', 2, dur=1.5, pan=PRand([-1,1]), rate=1.5)
~s1 >> klank(var.ch+(0,var([2,3,4],6)), amp=.5, oct=4)
~s1 >> klank(var.ch+(0,var([2,3,4],6)), amp=.3, oct=4, spin=1)
~s1 >> klank(var.ch+(0,var([2,3,4],6)), amp=.2, oct=4, spin=2)
~s1 >> klank(var.ch+(0,var([2,3,4],6)), amp=.5, oct=4, spin=4)
~s2 >> blip(var.ch, amp=PWhite(0,.6), dur=PDur(4,6)*2, oct=PRand([4,4,4,5,5,6]), lpf=800, room=1).sometimes('offadd', 4)
~s3 >> sitar(var.ch+PWalk(), dur=.25, amp=PRand([0,.8])[:36], formant=1, oct=PRand([6,7,8]), vib=12, room=1)
~s1 >> klank(var.ch+(0,var([2,3,4],6)), amp=.5, oct=4, spin=4, formant=1)
s3.every(6, 'degrade')
Clock.future(36, s3.stop)
~p3 >> play('( k[kk ] k[ kk]) |r3|', dur=1, rate=.5, amp=.5, lpf=00, formant=1)
p_all.stop()
| 0 | 0 | 0 |
8219ece2a6ff254575b95ea24d0cd5a196a8f886 | 4,237 | py | Python | graphene_auto/graphql_auto/query.py | ssshier/meta-realize | cc13309fa9e7e59044fb1c8e6e6b0a62caa7ca8c | [
"MIT"
] | 1 | 2021-12-18T09:12:58.000Z | 2021-12-18T09:12:58.000Z | graphene_auto/graphql_auto/query.py | ssshier/meta-realize | cc13309fa9e7e59044fb1c8e6e6b0a62caa7ca8c | [
"MIT"
] | null | null | null | graphene_auto/graphql_auto/query.py | ssshier/meta-realize | cc13309fa9e7e59044fb1c8e6e6b0a62caa7ca8c | [
"MIT"
] | null | null | null | from collections import OrderedDict
import graphene
from graphene.types.generic import GenericScalar
from graphene.types.objecttype import ObjectTypeOptions
from graphene_sqlalchemy import SQLAlchemyConnectionField
from graphene_sqlalchemy.types import sort_argument_for_object_type
from graphene_sqlalchemy_auto.filter import filter_query
from graphene_sqlalchemy_auto.types import SQLAlchemyObjectTypes
from graphql_auto.filter import extend_query_filter
class CustomConnection(graphene.relay.Connection):
"""
CustomConnection
default add total count for query list
"""
total_count = graphene.Int()
@staticmethod
# first lower
| 33.896 | 98 | 0.628275 | from collections import OrderedDict
import graphene
from graphene.types.generic import GenericScalar
from graphene.types.objecttype import ObjectTypeOptions
from graphene_sqlalchemy import SQLAlchemyConnectionField
from graphene_sqlalchemy.types import sort_argument_for_object_type
from graphene_sqlalchemy_auto.filter import filter_query
from graphene_sqlalchemy_auto.types import SQLAlchemyObjectTypes
from graphql_auto.filter import extend_query_filter
class CustomConnectionField(SQLAlchemyConnectionField):
def __init__(self, connection, *args, **kwargs):
"""
add default query
filters
limit
offset
"""
model = connection.Edge.node._type._meta.model
if "filters" not in kwargs:
kwargs.setdefault("filters", sort_argument_for_object_type(model))
elif "filters" in kwargs and kwargs["filters"] is None:
del kwargs["filters"]
if "limit" not in kwargs:
kwargs.setdefault("limit", sort_argument_for_object_type(model))
elif "limit" in kwargs and kwargs["limit"] is None:
del kwargs["limit"]
if "offset" not in kwargs:
kwargs.setdefault("offset", sort_argument_for_object_type(model))
elif "offset" in kwargs and kwargs["offset"] is None:
del kwargs["offset"]
super(CustomConnectionField, self).__init__(connection, *args, **kwargs)
@classmethod
def get_query(cls, model, info, **args):
query = super(CustomConnectionField, cls).get_query(model, info, **args)
# extend query filter
args = extend_query_filter(model, args)
if args.get("filters"):
query = filter_query(query, model, args["filters"])
if "limit" in args:
query = query.limit(args["limit"])
if "offset" in args:
query = query.offset(args["offset"])
return query
class CustomConnection(graphene.relay.Connection):
"""
CustomConnection
default add total count for query list
"""
class Meta:
abstract = True
total_count = graphene.Int()
@staticmethod
def resolve_total_count(root, info):
return root.iterable.limit(None).offset(None).count()
def model_connection(model):
connection = CustomConnection.create_type(
model.__name__ + "Connection", node=SQLAlchemyObjectTypes().get(model)
)
return CustomConnectionField(
connection,
filters=GenericScalar(),
limit=graphene.types.Int(),
offset=graphene.types.Int(),
)
# first lower
def decapitalize(s, upper_rest=False):
return s[:1].lower() + (s[1:].upper() if upper_rest else s[1:])
class QueryObjectType(graphene.ObjectType):
@classmethod
def __init_subclass_with_meta__(
cls, declarative_base, exclude_models=None, _meta=None, **options
):
"""
:param declarative_base: sqlalchemy's base
:param exclude_models: exclude models
:param _meta:
:param options:
:return:
"""
if exclude_models is None:
exclude_models = []
if not _meta:
_meta = ObjectTypeOptions(cls)
fields = OrderedDict()
fields["node"] = graphene.relay.Node.Field()
if not isinstance(declarative_base, list):
declarative_base = [declarative_base]
for base in declarative_base: # declarative_base can be mutil
for model in base.registry.mappers:
model_obj = model.class_
if model_obj.__name__ in exclude_models:
continue
fields.update(
{
decapitalize(model_obj.__name__): graphene.relay.Node.Field(
SQLAlchemyObjectTypes().get(model_obj)
),
"%s_list" % decapitalize(model_obj.__name__): model_connection(model_obj),
}
)
if _meta.fields:
_meta.fields.update(fields)
else:
_meta.fields = fields
return super(QueryObjectType, cls).__init_subclass_with_meta__(
_meta=_meta, **options
)
| 915 | 2,509 | 144 |
ef4b06b997fac916729f1f37fe5a226c6c148e96 | 1,392 | py | Python | cms/stacks/templatetags/stack_tags.py | mightyiam/django-cms | 09bf76d2f3d81fdaebcfb7e9ed4ecd4769fa8c25 | [
"BSD-3-Clause"
] | 2 | 2018-05-17T02:49:49.000Z | 2019-08-20T02:07:44.000Z | cms/stacks/templatetags/stack_tags.py | mightyiam/django-cms | 09bf76d2f3d81fdaebcfb7e9ed4ecd4769fa8c25 | [
"BSD-3-Clause"
] | 2 | 2019-02-13T07:58:23.000Z | 2019-02-13T07:58:27.000Z | cms/stacks/templatetags/stack_tags.py | mightyiam/django-cms | 09bf76d2f3d81fdaebcfb7e9ed4ecd4769fa8c25 | [
"BSD-3-Clause"
] | null | null | null | from classytags.arguments import Argument
from classytags.core import Tag, Options
from django import template
from cms.plugin_rendering import render_placeholder
from cms.stacks.models import Stack
register = template.Library()
register.tag(StackNode)
| 33.95122 | 112 | 0.658764 | from classytags.arguments import Argument
from classytags.core import Tag, Options
from django import template
from cms.plugin_rendering import render_placeholder
from cms.stacks.models import Stack
register = template.Library()
class StackNode(Tag):
name = 'stack'
options = Options(
Argument('code', required=True),
'as',
Argument('varname', required=False, resolve=False)
)
def render_tag(self, context, code, varname):
# TODO: language override (the reason this is not implemented, is that language selection is buried way
# down somewhere in some method called in render_plugins. There it gets extracted from the request
# and a language in request.GET always overrides everything.)
if not code:
# an empty string was passed in or the variable is not available in the context
return ''
# TODO: caching?
request = context.get('request', False)
if not request:
return ''
if isinstance(code, Stack):
stack = code
else:
stack, __ = Stack.objects.get_or_create(code=code, defaults={'name': code,
'creation_method': Stack.CREATION_BY_TEMPLATE})
placeholder = stack.content
return render_placeholder(placeholder, context, name_fallback=code)
register.tag(StackNode)
| 921 | 189 | 23 |
e5466609c1e8a5716819e3a17f2a6558dd33f7ad | 6,135 | py | Python | utils/lib/rr_graph/tracks.py | Fleker/symbiflow-arch-defs | 0be4b3ec85e4614b29258116492299f39cf59ae3 | [
"ISC"
] | null | null | null | utils/lib/rr_graph/tracks.py | Fleker/symbiflow-arch-defs | 0be4b3ec85e4614b29258116492299f39cf59ae3 | [
"ISC"
] | null | null | null | utils/lib/rr_graph/tracks.py | Fleker/symbiflow-arch-defs | 0be4b3ec85e4614b29258116492299f39cf59ae3 | [
"ISC"
] | null | null | null | from collections import namedtuple
import pprint
from enum import Enum
Track = namedtuple('Track', 'direction x_low x_high y_low y_high')
def make_tracks(xs, ys, points):
""" Give a list of xs columns and ys rows and points, return a list of
Track's and connections between the tracks.
An assert will fail if each point in the point list is not covered by
a column in xs or a row in ys.
Connections will be models as indicies into the track list.
Return:
[Track], [(index into track list, index into track list)]
>>> pos = [
... (0,0), (2,0),
... (0,1), (1,1), (2,1),
... (0,2), (2,2),
... (0,3), (1,3), (2,3),
... (0,4), (2,4),
... ]
>>> xs = [0, 2]
>>> ys = [1, 3]
>>> tracks, connections = make_tracks(xs, ys, pos)
>>> print_tracks(tracks)
[Track(direction='Y', x_low=0, x_high=0, y_low=0, y_high=4),
Track(direction='Y', x_low=2, x_high=2, y_low=0, y_high=4),
Track(direction='X', x_low=0, x_high=2, y_low=1, y_high=1),
Track(direction='X', x_low=0, x_high=2, y_low=3, y_high=3)]
>>> print(connections)
[(3, 0), (2, 0), (2, 1)]
>>> pos = [
... (68,48), (69,48),
... (68,49), (69,49),
... (69,50),
... (69,51),
... (69,52),
... (69,53), (70,53), (71,53), (72,53)]
>>> xs = [68, 69]
>>> ys = [53]
>>> tracks, connections = make_tracks(xs, ys, pos)
>>> print_tracks(tracks)
[Track(direction='Y', x_low=68, x_high=68, y_low=48, y_high=53),
Track(direction='Y', x_low=69, x_high=69, y_low=48, y_high=53),
Track(direction='X', x_low=68, x_high=72, y_low=53, y_high=53)]
>>> print(connections)
[(2, 0), (2, 1)]
"""
x_set = set(xs)
y_set = set(ys)
for x, y in points:
assert x in x_set or y in y_set
all_xs, all_ys = zip(*points)
x_min = min(all_xs)
x_max = max(all_xs)
y_min = min(all_ys)
y_max = max(all_ys)
tracks = []
x_tracks = []
y_tracks = []
for x in xs:
tracks.append(Track(
direction='Y',
x_low=x,
x_high=x,
y_low=y_min,
y_high=y_max,
))
y_tracks.append(len(tracks)-1)
for y in ys:
tracks.append(Track(
direction='X',
x_low=x_min,
x_high=x_max,
y_low=y,
y_high=y,
))
x_tracks.append(len(tracks)-1)
if len(tracks) == 1:
return tracks, []
# If there is more than 1 track, there must be a track in each dimension
assert len(xs) >= 1 and len(ys) >= 1
connections = set()
# Always just connect X tracks to the first Y track, and Y tracks to the
# first X tracks.
#
# To dedup connections, the x channel track will appear first in the
# connection list.
for idx, track in enumerate(tracks):
if track.direction == 'X':
connections.add((idx, y_tracks[0]))
else:
assert track.direction == 'Y'
connections.add((x_tracks[0], idx))
return tracks, list(connections)
if __name__ == "__main__":
main()
| 29.354067 | 90 | 0.545884 | from collections import namedtuple
import pprint
from enum import Enum
class Direction(Enum):
NO_SIDE = 0
LEFT = 1
RIGHT = 2
TOP = 3
BOTTOM = 4
Track = namedtuple('Track', 'direction x_low x_high y_low y_high')
def print_tracks(tracks):
pprint.pprint(tracks)
def make_tracks(xs, ys, points):
""" Give a list of xs columns and ys rows and points, return a list of
Track's and connections between the tracks.
An assert will fail if each point in the point list is not covered by
a column in xs or a row in ys.
Connections will be models as indicies into the track list.
Return:
[Track], [(index into track list, index into track list)]
>>> pos = [
... (0,0), (2,0),
... (0,1), (1,1), (2,1),
... (0,2), (2,2),
... (0,3), (1,3), (2,3),
... (0,4), (2,4),
... ]
>>> xs = [0, 2]
>>> ys = [1, 3]
>>> tracks, connections = make_tracks(xs, ys, pos)
>>> print_tracks(tracks)
[Track(direction='Y', x_low=0, x_high=0, y_low=0, y_high=4),
Track(direction='Y', x_low=2, x_high=2, y_low=0, y_high=4),
Track(direction='X', x_low=0, x_high=2, y_low=1, y_high=1),
Track(direction='X', x_low=0, x_high=2, y_low=3, y_high=3)]
>>> print(connections)
[(3, 0), (2, 0), (2, 1)]
>>> pos = [
... (68,48), (69,48),
... (68,49), (69,49),
... (69,50),
... (69,51),
... (69,52),
... (69,53), (70,53), (71,53), (72,53)]
>>> xs = [68, 69]
>>> ys = [53]
>>> tracks, connections = make_tracks(xs, ys, pos)
>>> print_tracks(tracks)
[Track(direction='Y', x_low=68, x_high=68, y_low=48, y_high=53),
Track(direction='Y', x_low=69, x_high=69, y_low=48, y_high=53),
Track(direction='X', x_low=68, x_high=72, y_low=53, y_high=53)]
>>> print(connections)
[(2, 0), (2, 1)]
"""
x_set = set(xs)
y_set = set(ys)
for x, y in points:
assert x in x_set or y in y_set
all_xs, all_ys = zip(*points)
x_min = min(all_xs)
x_max = max(all_xs)
y_min = min(all_ys)
y_max = max(all_ys)
tracks = []
x_tracks = []
y_tracks = []
for x in xs:
tracks.append(Track(
direction='Y',
x_low=x,
x_high=x,
y_low=y_min,
y_high=y_max,
))
y_tracks.append(len(tracks)-1)
for y in ys:
tracks.append(Track(
direction='X',
x_low=x_min,
x_high=x_max,
y_low=y,
y_high=y,
))
x_tracks.append(len(tracks)-1)
if len(tracks) == 1:
return tracks, []
# If there is more than 1 track, there must be a track in each dimension
assert len(xs) >= 1 and len(ys) >= 1
connections = set()
# Always just connect X tracks to the first Y track, and Y tracks to the
# first X tracks.
#
# To dedup connections, the x channel track will appear first in the
# connection list.
for idx, track in enumerate(tracks):
if track.direction == 'X':
connections.add((idx, y_tracks[0]))
else:
assert track.direction == 'Y'
connections.add((x_tracks[0], idx))
return tracks, list(connections)
class Tracks(object):
def __init__(self, tracks, track_connections):
self.tracks = tracks
self.track_connections = track_connections
def verify_tracks(self):
""" Verify that all tracks are connected to all other tracks. """
track_connections = {}
for idx, _ in enumerate(self.tracks):
track_connections[idx] = set((idx,))
for conn_a, conn_b in self.track_connections:
if track_connections[conn_a] is track_connections[conn_b]:
continue
assert self.tracks[conn_a].direction != self.tracks[conn_b].direction
track_connections[conn_a] |= track_connections[conn_b]
for track_idx in track_connections[conn_a]:
track_connections[track_idx] = track_connections[conn_a]
assert len(set(id(s) for s in track_connections.values())) == 1, track_connections
def is_wire_adjacent_to_track(self, idx, coord):
track = self.tracks[idx]
wire_x, wire_y = coord
if track.direction == 'X':
pin_top = track.y_low == wire_y
pin_bottom = track.y_low == wire_y-1
adjacent_channel = ((pin_top or pin_bottom) and (
track.x_low <= wire_x and wire_x <= track.x_high))
if adjacent_channel:
if pin_top:
return Direction.TOP
elif pin_bottom:
return Direction.BOTTOM
else:
assert False, (coord, track)
else:
return Direction.NO_SIDE
elif track.direction == 'Y':
pin_right = track.x_low == wire_x
pin_left = track.x_low == wire_x-1
adjacent_channel = ((pin_right or pin_left) and (
track.y_low <= wire_y and wire_y <= track.y_high))
if adjacent_channel:
if pin_right:
return Direction.RIGHT
elif pin_left:
return Direction.LEFT
else:
assert False, (coord, track)
else:
return Direction.NO_SIDE
else:
assert False, track
def get_tracks_for_wire_at_coord(self, coord):
""" Returns which track indicies and direction a wire at a coord can
be connected too. """
wire_x, wire_y = coord
for idx, track in enumerate(self.tracks):
pin_dir = self.is_wire_adjacent_to_track(idx, coord)
if pin_dir != Direction.NO_SIDE:
yield (idx, pin_dir)
def main():
import doctest
print('Doctest begin')
doctest.testmod(optionflags=doctest.ELLIPSIS)
print('Doctest end')
if __name__ == "__main__":
main()
| 1,530 | 1,274 | 92 |
d5efc68128591aad4024897c7fc12d3343ac12cf | 3,708 | py | Python | easypyqt/widget/buttongroupwidget.py | mafster/easypyqt4 | e3681de1a974741e74cc3b77cad3e599acef20ae | [
"MIT"
] | null | null | null | easypyqt/widget/buttongroupwidget.py | mafster/easypyqt4 | e3681de1a974741e74cc3b77cad3e599acef20ae | [
"MIT"
] | null | null | null | easypyqt/widget/buttongroupwidget.py | mafster/easypyqt4 | e3681de1a974741e74cc3b77cad3e599acef20ae | [
"MIT"
] | null | null | null | from PyQt4 import QtGui, QtCore
from easypyqt.widget import basicwidget
class ButtonGroupWidget(basicwidget.BasicWidget):
"""
A group of widgets with horizontal or vertical layout.
EXAMPLE::
buttonList = [('test1', 'TestONE'), ('test2', 'TestTWO')]
fw = ButtonGroupWidget(button_list=buttonList, label='My Test', exclusive=True)
fw.show()
"""
FONT_GRAY = 'color: rgb(160, 160, 160)'
buttonClicked = QtCore.pyqtSignal(QtGui.QPushButton)
def __init__(self, button_list=None, label=None, vertical=False, exclusive=False, exclusive_color='#46c878'):
"""
:param button_list: *(list(tuple))* list of string tuples. [(name, label)]
:param label: *(str)* visible label or "title" for the button group
:param vertical: *(bool)* if True will lay buttons out vertically
:param exclusive: *(bool)* if True will highlight button clicked and ghost the rest. Button can be accessed
via get_exclusive_button() or get_exclusive_button_name()
:param exclusive_color *(str)* hex colour to use if exclusive option is True
"""
super(ButtonGroupWidget, self).__init__(vertical=vertical)
self.button_list = button_list or []
self.exclusive = exclusive
self.exclusive_color = exclusive_color
if label:
label = QtGui.QLabel(label)
self.basic_layout.addWidget(label)
for each in self.button_list:
button = QtGui.QPushButton(each[1])
button.setObjectName(each[0])
button.exclusive = False
button.clicked.connect(self.button_clicked)
self.basic_layout.addWidget(button)
def get_button_by_name(self, name):
"""
Returns the QPushButton that has name matching name passed
:param name:
:return:
"""
for each in self.get_all_buttons():
if each.objectName() == name:
return each
def button_clicked(self):
"""
This executes when a button is clicked.
:return:
"""
button = self.sender()
if self.exclusive:
button.setStyleSheet('background-color: {}'.format(self.exclusive_color))
button.exclusive = True
for each in [x for x in self.get_all_buttons() if x.objectName() != button.objectName()]:
each.exclusive = False
each.setStyleSheet(self.FONT_GRAY)
self.buttonClicked.emit(button)
def get_exclusive_button(self):
"""
:return: *(QtGui.QPushButton)*
"""
if not self.exclusive:
raise RuntimeError('This ButtonGroupWidget has not been instantiated with param exclusive = True')
for each in self.get_all_buttons():
if each.exclusive:
return each
def get_exclusive_button_name(self):
"""
:return: *(str)* name of the exclusive button
"""
return self.get_exclusive_button().objectName()
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
buttonList = [('test1', 'TestONE'), ('test2', 'TestTWO')]
fw = ButtonGroupWidget(button_list=buttonList, label='My Test', exclusive=True)
fw.show()
sys.exit(app.exec_())
| 31.423729 | 121 | 0.608414 | from PyQt4 import QtGui, QtCore
from easypyqt.widget import basicwidget
class ButtonGroupWidget(basicwidget.BasicWidget):
"""
A group of widgets with horizontal or vertical layout.
EXAMPLE::
buttonList = [('test1', 'TestONE'), ('test2', 'TestTWO')]
fw = ButtonGroupWidget(button_list=buttonList, label='My Test', exclusive=True)
fw.show()
"""
FONT_GRAY = 'color: rgb(160, 160, 160)'
buttonClicked = QtCore.pyqtSignal(QtGui.QPushButton)
def __init__(self, button_list=None, label=None, vertical=False, exclusive=False, exclusive_color='#46c878'):
"""
:param button_list: *(list(tuple))* list of string tuples. [(name, label)]
:param label: *(str)* visible label or "title" for the button group
:param vertical: *(bool)* if True will lay buttons out vertically
:param exclusive: *(bool)* if True will highlight button clicked and ghost the rest. Button can be accessed
via get_exclusive_button() or get_exclusive_button_name()
:param exclusive_color *(str)* hex colour to use if exclusive option is True
"""
super(ButtonGroupWidget, self).__init__(vertical=vertical)
self.button_list = button_list or []
self.exclusive = exclusive
self.exclusive_color = exclusive_color
if label:
label = QtGui.QLabel(label)
self.basic_layout.addWidget(label)
for each in self.button_list:
button = QtGui.QPushButton(each[1])
button.setObjectName(each[0])
button.exclusive = False
button.clicked.connect(self.button_clicked)
self.basic_layout.addWidget(button)
def __getattr__(self, item):
# Get button by dot notation
b = self.get_button_by_name(item)
if b:
return b
else:
return super(ButtonGroupWidget, self).__getattribute__(item)
def get_all_buttons(self):
return self.findChildren(QtGui.QPushButton)
def get_button_by_name(self, name):
"""
Returns the QPushButton that has name matching name passed
:param name:
:return:
"""
for each in self.get_all_buttons():
if each.objectName() == name:
return each
def button_clicked(self):
"""
This executes when a button is clicked.
:return:
"""
button = self.sender()
if self.exclusive:
button.setStyleSheet('background-color: {}'.format(self.exclusive_color))
button.exclusive = True
for each in [x for x in self.get_all_buttons() if x.objectName() != button.objectName()]:
each.exclusive = False
each.setStyleSheet(self.FONT_GRAY)
self.buttonClicked.emit(button)
def get_exclusive_button(self):
"""
:return: *(QtGui.QPushButton)*
"""
if not self.exclusive:
raise RuntimeError('This ButtonGroupWidget has not been instantiated with param exclusive = True')
for each in self.get_all_buttons():
if each.exclusive:
return each
def get_exclusive_button_name(self):
"""
:return: *(str)* name of the exclusive button
"""
return self.get_exclusive_button().objectName()
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
buttonList = [('test1', 'TestONE'), ('test2', 'TestTWO')]
fw = ButtonGroupWidget(button_list=buttonList, label='My Test', exclusive=True)
fw.show()
sys.exit(app.exec_())
| 267 | 0 | 54 |
7eb67fb923203b51676df835efeceb20c49ef936 | 1,812 | py | Python | tests/test_core_we_get.py | asnapper-net/we-get | ca88118d6bf18485ae1c45284d14b53df6adf40c | [
"MIT"
] | 131 | 2017-05-05T01:26:21.000Z | 2022-03-31T05:41:22.000Z | tests/test_core_we_get.py | asnapper-net/we-get | ca88118d6bf18485ae1c45284d14b53df6adf40c | [
"MIT"
] | 37 | 2017-05-04T20:48:20.000Z | 2022-03-21T12:17:19.000Z | tests/test_core_we_get.py | asnapper-net/we-get | ca88118d6bf18485ae1c45284d14b53df6adf40c | [
"MIT"
] | 19 | 2017-05-04T16:18:35.000Z | 2022-01-31T11:52:48.000Z | import pytest
from docopt import docopt, DocoptExit
from we_get.core.we_get import WG
from we_get.core import we_get
@pytest.mark.parametrize(
'argv, exp_res',
[
[None, {'arguments': None, 'parguments': {}, 'we_get_run': 0}],
[['--search', 'ubuntu'], {
'arguments': {
'--config': [],
'--filter': [],
'--genre': [],
'--get-list': 0,
'--help': 0,
'--json': 0,
'--links': 0,
'--list': 0,
'--quality': [],
'--results': [],
'--search': ['ubuntu'],
'--sfw': 0,
'--sort-type': [],
'--target': ['all'],
'--version': 0
},
'parguments': {
'--search': ['ubuntu'], '--target': ['all']}, 'we_get_run': 1
}],
]
)
@pytest.mark.parametrize(
'argv, exp_res',
[
[
[],
{
'--filter': [], '--genre': [], '--get-list': 0, '--help': 0, '--json': 0,
'--links': 0, '--list': 0, '--quality': [], '--results': [], '--search': [],
'--sort-type': [], '--target': ['all'], '--version': 0, '--config': [], '--sfw': 0}
],
],
)
| 28.3125 | 99 | 0.418874 | import pytest
from docopt import docopt, DocoptExit
from we_get.core.we_get import WG
from we_get.core import we_get
@pytest.mark.parametrize(
'argv, exp_res',
[
[None, {'arguments': None, 'parguments': {}, 'we_get_run': 0}],
[['--search', 'ubuntu'], {
'arguments': {
'--config': [],
'--filter': [],
'--genre': [],
'--get-list': 0,
'--help': 0,
'--json': 0,
'--links': 0,
'--list': 0,
'--quality': [],
'--results': [],
'--search': ['ubuntu'],
'--sfw': 0,
'--sort-type': [],
'--target': ['all'],
'--version': 0
},
'parguments': {
'--search': ['ubuntu'], '--target': ['all']}, 'we_get_run': 1
}],
]
)
def test_parse_arguments(argv, exp_res):
wg = WG()
if argv is None:
with pytest.raises(DocoptExit):
wg.parse_arguments()
assert vars(wg) == exp_res
with pytest.raises(DocoptExit):
wg.parse_arguments(argv)
assert vars(wg) == exp_res
else:
wg.parse_arguments(argv)
assert vars(wg) == exp_res
@pytest.mark.parametrize(
'argv, exp_res',
[
[
[],
{
'--filter': [], '--genre': [], '--get-list': 0, '--help': 0, '--json': 0,
'--links': 0, '--list': 0, '--quality': [], '--results': [], '--search': [],
'--sort-type': [], '--target': ['all'], '--version': 0, '--config': [], '--sfw': 0}
],
],
)
def test_we_get_docopt(argv, exp_res):
res = docopt(we_get.__doc__, argv=argv)
assert exp_res == res
| 439 | 0 | 44 |