hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2098e6c83ceaacb23c61f4fef967a43a0da02414 | 2,916 | py | Python | src/train_xgb_FE.py | sin1012/approach-tabular-competitions | 660995ba6c1d42006bce01a90fa04bce507f4c16 | [
"MIT"
] | null | null | null | src/train_xgb_FE.py | sin1012/approach-tabular-competitions | 660995ba6c1d42006bce01a90fa04bce507f4c16 | [
"MIT"
] | null | null | null | src/train_xgb_FE.py | sin1012/approach-tabular-competitions | 660995ba6c1d42006bce01a90fa04bce507f4c16 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import config
import os
import joblib
from sklearn import metrics
from sklearn import preprocessing
import xgboost as xgb
from feature_engineering import feature_engineering
import warnings
warnings.filterwarnings('ignore')
def train(fold):
df = pd.read_csv(config.training_file)
## fill all NaNs
df[df == ' ?'] = np.nan # the dataset has labelled all NaNs with ? already
## label encoding
### define numerical columns
num_feas = ['age', 'wage per hour', 'capital gains', 'capital losses',
'dividends from stocks', 'num persons worked for employer',
'own business or self employed', 'weeks worked in year', 'kfold', 'y']
## only for this competition, mapping them to 0 and 1 as suggested by the organizer
df.y = df.y.map({' - 50000.':0,' 50000+.':1 })
## define all categorical features
cat_feas = [i for i in df.columns if i not in num_feas]
## call the feature engineering function for categorical features
df = feature_engineering(df, cat_feas)
## all features
features = [i for i in df.columns if i not in ('kfold', 'y')]
## fill all NaNs with NONE
for col in features:
if col not in num_feas:
df.loc[:,col] = df[col].astype(str).fillna('NONE') # fill all NaNs wiht None
## label encoding each column
## add each encoder to the dictionary
encoder = {}
for col in features:
if col not in num_feas:
lbl = preprocessing.LabelEncoder()
lbl.fit(df[col])
df.loc[:,col] = lbl.transform(df[col])
encoder[col] = lbl
## create data for training and validation
df_train = df[df.kfold != fold].reset_index(drop=True)
df_valid = df[df.kfold == fold].reset_index(drop=True)
## prepare data for training
x_train = df_train.drop(['kfold', 'y'], axis=1).values
y_train = df_train[config.target].values
## similarly, we prepare data for testing
x_valid = df_valid.drop(['kfold', 'y'], axis=1).values
y_valid = df_valid[config.target].values
## initialize a model
model = xgb.XGBClassifier(n_jobs=-1)
print('Training starting!!!')
## fit
model.fit(x_train, y_train)
## predict on validation dataset
valid_preds = model.predict_proba(x_valid)[:,1]
## get roc auc score
auc = metrics.roc_auc_score(y_valid, valid_preds)
## print auc
print(f"Fold = {fold}, AUC = {auc}")
## save the model
joblib.dump(
model,
os.path.join(config.model_output, f'xgb_fe_fold{fold}.bin')
)
## save the columns used to fit the model
joblib.dump(
df_train.drop(['kfold', 'y'], axis=1).columns,
os.path.join(config.model_output, f'xgb_fe_cols_fold{fold}.pkl')
)
## save the label encoder
joblib.dump(
encoder,
os.path.join(config.model_output, f'xgb_fe_encoder_fold{fold}.pkl')
)
for i in range(5):
train(i)
| 28.038462 | 88 | 0.656379 |
362565fd4ae13ebe5c0e0c59366188f1268a50ab | 5,315 | py | Python | guide.py | NAKKA-K/raspi-pedestrian-signs | 64016db54f74ccce9c592082e894c9068da8b5c6 | [
"MIT"
] | null | null | null | guide.py | NAKKA-K/raspi-pedestrian-signs | 64016db54f74ccce9c592082e894c9068da8b5c6 | [
"MIT"
] | null | null | null | guide.py | NAKKA-K/raspi-pedestrian-signs | 64016db54f74ccce9c592082e894c9068da8b5c6 | [
"MIT"
] | null | null | null | # making now ...
import threading
import os
import RPi.GPIO as GPIO
import pygame.mixer
class BoardInfo(object):
STATE_LED = 1
STATE_BLUE = 2
STATE_FLASH = 3
def __init__(self, sw_pin=18, led_LED_pin=21, blue_LED_pin=20):
self.sw_pin = sw_pin
self.led_LED_pin = led_LED_pin
self.blue_LED_pin = blue_LED_pin
self.init_rasp_board()
self._sign_state = self.STATE_LED
def __del__(self):
self.output_led(0)
self.output_blue(0)
def init_rasp_board(self):
try:
#GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.sw_pin, GPIO.IN)
GPIO.setup(self.led_LED_pin, GPIO.OUT)
GPIO.setup(self.blue_LED_pin, GPIO.OUT)
except:
print('Error: ボードにPINが刺さっていないか、ボードが設置されていません')
raise
@property
def sign_state(self):
return self._sign_state
@sign_state.setter
def sign_state(self, sign_state):
if sign_state not in range(1, 3):
raise ValueError('boardに指定しようとしたステータス値が不正です')
self._sign_state = sign_state
def output_blue_flash(self, state):
if not state:
self.output_blue(0)
self.output_blue(1)
time.sleep(0.2)
self.output_blue(0)
def output_led(self, state)
GPIO.output(self.led_LED_pin, state)
def output_blue(self, state)
GPIO.output(self.blue_LED_pin, state)
def input_led(self):
return GPIO.input(self.led_LED_pin)
def input_blue(self):
return GPIO.input(self.blue_LED_pin)
def input_sw(self):
return GPIO.input(self.sw_pin)
class PedestrianSigns(object):
"""
This class is Singleton.
But you can change the board information with DI.
"""
_instance = None
_lock = threading.Lock()
GUIDE_VOICE = 'guide_voice.wav'
GUIDE_VOICE_MESSAGE = 'まもなく信号が赤になります'
GUIDE_ALERT = 'guide_alert.mp3'
# Change board info with DI
def __init__(self, board_info):
self.board_info = board_info
def __new__(cls):
# Thread safe
with cls._lock:
if cls._instance is not None:
return cls._instance
cls._instance = super().__new__(cls)
cls._instance.init_music()
return cls._instance
# Initialize pygame & music file
def init_music(self):
pygame.mixer.init()
pygame.mixer.music.load("guide_alert.mp3")
if not os.path.isfile(GUIDE_SOUND):
make_voice_wav(GUIDE_VOICE, GUIDE_VOICE_MESSAGE)
# Make guide voice from message
def make_voice_wav(self, file_name, message):
jtalk_option="\
-m /usr/share/hts-voice/mei/mei_normal.htsvoice \
-x /var/lib/mecab/dic/open-jtalk/naist-jdic \
-ow {}".format(file_name)
os.system("echo {} | open_jtalk {}".format(message, jtalk_option))
def sign_led(self):
self.board_info.output_blue(0)
self.board_info.output_led(1)
self.board_info.sign_state = self.board_info.STATE_LED
self.func_timer = None
def alert_led(self):
print('warning red!')
def sign_blue_flash(self):
self.board_info.output_blue(1)
time.sleep(0.2)
self.board_info.output_blue(0)
self.board_info.sign_state = self.board_info.STATE_FLASH
def sign_blue(self):
self.board_info.output_led(0)
self.board_info.output_blue(1)
self.board_info.sign_state = self.board_info.STATE_BLUE
print('pappo-')
def start():
while True:
# stateが赤で、赤のLEDが未点灯であれば
if self.board_info.sign_state == self.board_info.STATE_LED and self.board_info.input_led() == 0:
self.board_info.output_led(1)
# 永続待機状態で、SWを押されたら信号が青
if self.board_info.input_sw() == 1 and self.func_timer is None:
self.func_timer = FuncExecTimer(2, sign_blue)
# 青になるtimerが実行終了状態で、stateが青のとき
if self.func_timer.executed and self.board_info.sign_state == self.board_info.STATE_BLUE:
FuncExecTimer(2, alert_led)
self.func_timer = FuncExecTimer(5, sign_blue_flash)
# stateが点滅の時
if self.board_info.sign_state == self.board_info.STATE_FLASH:
# stateがflashに変更した瞬間のみ
if self.func_timer.executed:
self.func_timer = FuncExecTimer(2, sign_led)
sign_blue_flash()
class FuncExecTimer(object):
def __init__(self, time_sec, func, *args):
self.time_sec = time_sec
self.func = func
self.func_args = args
self._executed = False
self.start()
@property
def executed(self):
return self._executed
def start(self):
signal.signal(signal.SIGALRM, signal_wrapper(self.func))
signal.settimer(signal.ITIMER_REAL, self.time_sec)
def signal_wrapper(self, func):
def wrapper():
self._executed = True
func(self.func_args)
return wrapper
if __name__ = '__main__':
try:
board_info = BoardInfo()
pedestrian_signs = PedestrianSigns(board_info)
pedestrian_signs.start()
except KeyboardInterrupt:
print('終了します')
| 28.88587 | 108 | 0.623518 |
f36696c80e97352d002f772de9a12918802353d0 | 459 | py | Python | training/training_code/dataset/constants.py | Bhaskers-Blu-Org1/MAX-Recommender | a39da6d7f6028b56eada029461e18d3932d9fc88 | [
"Apache-2.0"
] | 1 | 2021-01-11T18:40:13.000Z | 2021-01-11T18:40:13.000Z | training/training_code/dataset/constants.py | kiminh/MAX-Recommender | 044525fa74af25ba3b8611695527eeea4b3ccf14 | [
"Apache-2.0"
] | null | null | null | training/training_code/dataset/constants.py | kiminh/MAX-Recommender | 044525fa74af25ba3b8611695527eeea4b3ccf14 | [
"Apache-2.0"
] | 1 | 2020-07-30T10:33:47.000Z | 2020-07-30T10:33:47.000Z | # Default column names
DEFAULT_USER_COL = "userID"
DEFAULT_ITEM_COL = "itemID"
DEFAULT_RATING_COL = "rating"
DEFAULT_LABEL_COL = "label"
DEFAULT_TIMESTAMP_COL = "timestamp"
DEFAULT_PREDICTION_COL = "prediction"
COL_DICT = {
"col_user": DEFAULT_USER_COL,
"col_item": DEFAULT_ITEM_COL,
"col_rating": DEFAULT_RATING_COL,
"col_prediction": DEFAULT_PREDICTION_COL
}
# Filtering variables
DEFAULT_K = 10
DEFAULT_THRESHOLD = 10
# Other
SEED = 42 | 22.95 | 44 | 0.760349 |
5965b025a7b55c94a71b22e9c12fd24674fc3a28 | 628 | py | Python | cybot/plug/start.py | francis-taylor/Timotty-Bot | 2cf7c9897ed31d26d331594e2578b253e3b970d8 | [
"MIT"
] | 6 | 2017-10-18T14:22:48.000Z | 2017-10-26T15:14:52.000Z | cybot/plug/start.py | Fr4ncisTaylor/Timotty | 2cf7c9897ed31d26d331594e2578b253e3b970d8 | [
"MIT"
] | 1 | 2017-10-20T19:16:03.000Z | 2017-10-20T19:16:03.000Z | cybot/plug/start.py | Fr4ncisTaylor/Timotty | 2cf7c9897ed31d26d331594e2578b253e3b970d8 | [
"MIT"
] | 7 | 2017-10-18T14:19:52.000Z | 2017-10-22T15:23:33.000Z | # -*- coding: utf-8 -*-
import mensagens
from inline import inline_keyboard
from metodos import *
m = mensagens
def start(msg):
if 'text' in msg:
texto = msg[u'text']
chat_id = msg['chat']['id']
nome = msg['from'][u'first_name']
from_id = msg['from']['id']
if texto == '/start':
keyboard = [[{'text': m.start['bot1'], 'url': config.grupo_url}] +
[{'text':m.start['bot2'], 'url': mensagens.creditos['url']}]]
markup = inline_keyboard(keyboard)
sendMessage(chat_id, m.start['mensagem'].format(nome), reply_markup=markup) | 28.545455 | 87 | 0.557325 |
d0a8194e0de050382cd4161dee3beac6475c12a8 | 1,446 | py | Python | src/ttu_encoder/mentioned_persons.py | ediewilson/ddhi-encoder | 020e7e5b237ce2d1d329432d5842fbe8dc8abcca | [
"MIT"
] | null | null | null | src/ttu_encoder/mentioned_persons.py | ediewilson/ddhi-encoder | 020e7e5b237ce2d1d329432d5842fbe8dc8abcca | [
"MIT"
] | null | null | null | src/ttu_encoder/mentioned_persons.py | ediewilson/ddhi-encoder | 020e7e5b237ce2d1d329432d5842fbe8dc8abcca | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import argparse
import sys
import csv
from ttu_encoder.ne_extractor import NeExtractor
from ttu_encoder import __version__
__author__ = "Clifford Wulfman"
__copyright__ = "Clifford Wulfman"
__license__ = "mit"
def to_tsv(list, stream=sys.stdout):
writer = csv.DictWriter(stream,
fieldnames=[k for k in list[0].keys()],
delimiter="\t")
writer.writeheader()
for row in list:
writer.writerow(row)
def parse_args(args):
parser = argparse.ArgumentParser(
description="export standoff persons to tsv")
parser.add_argument(
"--version",
action="version",
version="ttu-encoder {ver}".format(ver=__version__))
parser.add_argument('file', help="the TEI document")
parser.add_argument('-o', '--outfile',
dest="outfile",
default=sys.stdout.buffer,
help="output file (stdout by default)")
return parser.parse_args(args)
def main(args):
"""Main entry point allowing external calls
Args:
args ([str]): command line parameter list
"""
args = parse_args(args)
extractor = NeExtractor(args.file)
person_names_list = extractor.person_names_list()
to_tsv(person_names_list)
def run():
"""Entry point for console_scripts
"""
main(sys.argv[1:])
if __name__ == "__main__":
run()
| 23.704918 | 67 | 0.618257 |
13c7f604e0acf1acd8ca666cb750d2f80c9b3e28 | 926 | py | Python | 0653_TwoSumBSTree.py | zsyc/LeetAlgo | 70793a26900824e308f69ec2b2299e04eb9c7646 | [
"MIT"
] | null | null | null | 0653_TwoSumBSTree.py | zsyc/LeetAlgo | 70793a26900824e308f69ec2b2299e04eb9c7646 | [
"MIT"
] | null | null | null | 0653_TwoSumBSTree.py | zsyc/LeetAlgo | 70793a26900824e308f69ec2b2299e04eb9c7646 | [
"MIT"
] | null | null | null | """
值得注意的是特性:
二叉查找树(英语:Binary Search Tree),也称为二叉搜索树、***有序***二叉树(ordered binary tree)或排序二叉树(sorted binary tree),是指一棵空树或者具有下列性质的二叉树:
若任意节点的左子树不空,则左子树上所有节点的值均小于它的根节点的值;
若任意节点的右子树不空,则右子树上所有节点的值均大于它的根节点的值;
任意节点的左、右子树也分别为二叉查找树;
没有键值相等的节点。
本题首先遍历所有值,放入一个无序集合set,然后进行搜索。"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def findTarget(self, root: TreeNode, k: int) -> bool:
btree = set([])
self.walkthrough(root, btree)
for ele in btree:
comp = k - ele
if comp in btree - set([ele]):
return True
return False
def walkthrough(self, root: TreeNode, btree: set):
if root is not None:
btree.add(root.val)
self.walkthrough(root.left, btree)
self.walkthrough(root.right, btree)
| 26.457143 | 116 | 0.62203 |
c8bc815c4dba25c6ba27a8d715efb676a179bfdb | 1,583 | py | Python | docs/src/parse_ranking2.py | vishalbelsare/RLScore | 713f0a402f7a09e41a609f2ddcaf849b2021a0a7 | [
"MIT"
] | 61 | 2015-03-06T08:48:01.000Z | 2021-04-26T16:13:07.000Z | docs/src/parse_ranking2.py | andrecamara/RLScore | 713f0a402f7a09e41a609f2ddcaf849b2021a0a7 | [
"MIT"
] | 5 | 2016-09-08T15:47:00.000Z | 2019-02-25T17:44:55.000Z | docs/src/parse_ranking2.py | vishalbelsare/RLScore | 713f0a402f7a09e41a609f2ddcaf849b2021a0a7 | [
"MIT"
] | 31 | 2015-01-28T15:05:33.000Z | 2021-04-16T19:39:48.000Z | import numpy as np
from rlscore.learner import QueryRankRLS
from rlscore.measure import cindex
from rlscore.utilities.reader import read_sparse
from rlscore.utilities.cross_validation import map_ids
def train_rls():
#Select regparam with k-fold cross-validation,
#where instances related to a single sentence form
#together a fold
X_train = read_sparse("train_2000_x.txt")
Y_train = np.loadtxt("train_2000_y.txt")
X_test = read_sparse("test_2000_x.txt", X_train.shape[1])
Y_test = np.loadtxt("test_2000_y.txt")
#list of sentence ids
qids_train = np.loadtxt("train_2000_qids.txt")
qids_test = np.loadtxt("test_2000_qids.txt")
learner = QueryRankRLS(X_train, Y_train, qids_train)
P_test = learner.predict(X_test)
folds = map_ids(qids_train)
perfs = []
for fold in folds:
if np.var(Y_train[fold]) != 0:
P = learner.holdout(fold)
c = cindex(Y_train[fold], P)
perfs.append(c)
perf = np.mean(perfs)
print("leave-query-out cross-validation cindex %f" %perf)
partition = map_ids(qids_test)
test_perfs = []
#compute the ranking accuracy separately for each test query
for query in partition:
#skip such queries, where all instances have the same
#score, since in this case cindex is undefined
if np.var(Y_test[query]) != 0:
perf = cindex(Y_test[query], P_test[query])
test_perfs.append(perf)
test_perf = np.mean(test_perfs)
print("test cindex %f" %test_perf)
if __name__=="__main__":
train_rls()
| 35.977273 | 64 | 0.67909 |
2d1509929b6bfd6ac54cfaab9377d8a8c6f2cd2b | 2,626 | py | Python | python/crr/scattered/roll_zeropad.py | blanton144/crr | f4edd1bf3f4f1f7ea5d0f8bc3e14e223c83e02c1 | [
"BSD-3-Clause"
] | null | null | null | python/crr/scattered/roll_zeropad.py | blanton144/crr | f4edd1bf3f4f1f7ea5d0f8bc3e14e223c83e02c1 | [
"BSD-3-Clause"
] | null | null | null | python/crr/scattered/roll_zeropad.py | blanton144/crr | f4edd1bf3f4f1f7ea5d0f8bc3e14e223c83e02c1 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
def roll_zeropad(a, shift, axis=None):
"""
Roll array elements along a given axis.
Elements off the end of the array are treated as zeros.
Parameters
----------
a : array_like
Input array.
shift : int
The number of places by which elements are shifted.
axis : int, optional
The axis along which elements are shifted. By default, the array
is flattened before shifting, after which the original
shape is restored.
Returns
-------
res : ndarray
Output array, with the same shape as `a`.
See Also
--------
roll : Elements that roll off one end come back on the other.
rollaxis : Roll the specified axis backwards, until it lies in a
given position.
Examples
--------
>>> x = np.arange(10)
>>> roll_zeropad(x, 2)
array([0, 0, 0, 1, 2, 3, 4, 5, 6, 7])
>>> roll_zeropad(x, -2)
array([2, 3, 4, 5, 6, 7, 8, 9, 0, 0])
>>> x2 = np.reshape(x, (2,5))
>>> x2
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> roll_zeropad(x2, 1)
array([[0, 0, 1, 2, 3],
[4, 5, 6, 7, 8]])
>>> roll_zeropad(x2, -2)
array([[2, 3, 4, 5, 6],
[7, 8, 9, 0, 0]])
>>> roll_zeropad(x2, 1, axis=0)
array([[0, 0, 0, 0, 0],
[0, 1, 2, 3, 4]])
>>> roll_zeropad(x2, -1, axis=0)
array([[5, 6, 7, 8, 9],
[0, 0, 0, 0, 0]])
>>> roll_zeropad(x2, 1, axis=1)
array([[0, 0, 1, 2, 3],
[0, 5, 6, 7, 8]])
>>> roll_zeropad(x2, -2, axis=1)
array([[2, 3, 4, 0, 0],
[7, 8, 9, 0, 0]])
>>> roll_zeropad(x2, 50)
array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
>>> roll_zeropad(x2, -50)
array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
>>> roll_zeropad(x2, 0)
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
"""
a = np.asanyarray(a)
if shift == 0: return a
if axis is None:
n = a.size
reshape = True
else:
n = a.shape[axis]
reshape = False
if np.abs(shift) > n:
res = np.zeros_like(a)
elif shift < 0:
shift += n
zeros = np.zeros_like(a.take(np.arange(n - shift), axis))
res = np.concatenate((a.take(np.arange(n - shift, n), axis), zeros),
axis)
else:
zeros = np.zeros_like(a.take(np.arange(n - shift, n), axis))
res = np.concatenate((zeros, a.take(np.arange(n - shift), axis)),
axis)
if reshape:
return res.reshape(a.shape)
else:
return res
| 27.072165 | 76 | 0.476009 |
689206e51b9e2a34b7e43ac4df5cd34d0608c68a | 4,853 | py | Python | utils/dataloader_medical.py | Rossettaylm/unet_for_jamming_segmentation | 4c9898e4f4093d0deb6266b0380007d729857788 | [
"MIT"
] | 2 | 2021-12-03T04:39:15.000Z | 2021-12-09T14:33:17.000Z | utils/dataloader_medical.py | Rossettaylm/unet_for_jamming_segmentation | 4c9898e4f4093d0deb6266b0380007d729857788 | [
"MIT"
] | null | null | null | utils/dataloader_medical.py | Rossettaylm/unet_for_jamming_segmentation | 4c9898e4f4093d0deb6266b0380007d729857788 | [
"MIT"
] | null | null | null | import os
import cv2
import numpy as np
from PIL import Image
from torch.utils.data.dataset import Dataset
from utils.utils import cvtColor, preprocess_input
class UnetDataset(Dataset):
def __init__(self, annotation_lines, input_shape, num_classes, train, dataset_path):
super(UnetDataset, self).__init__()
self.annotation_lines = annotation_lines
self.length = len(annotation_lines)
self.input_shape = input_shape
self.num_classes = num_classes
self.train = train
self.dataset_path = dataset_path
def __len__(self):
return self.length
def __getitem__(self, index):
annotation_line = self.annotation_lines[index]
name = annotation_line.split()[0]
#-------------------------------#
# 从文件中读取图像
#-------------------------------#
jpg = Image.open(os.path.join(os.path.join(self.dataset_path, "Images"), name + ".png"))
png = Image.open(os.path.join(os.path.join(self.dataset_path, "Labels"), name + ".png"))
#-------------------------------#
# 数据增强
#-------------------------------#
jpg, png = self.get_random_data(jpg, png, self.input_shape, random = self.train)
jpg = np.transpose(preprocess_input(np.array(jpg, np.float64)), [2,0,1])
png = np.array(png)
#-------------------------------------------------------#
# 这里的标签处理方式和普通voc的处理方式不同
# 将小于127.5的像素点设置为目标像素点。
#-------------------------------------------------------#
modify_png = np.zeros_like(png)
modify_png[png <= 127.5] = 1
seg_labels = modify_png
seg_labels = np.eye(self.num_classes + 1)[seg_labels.reshape([-1])]
seg_labels = seg_labels.reshape((int(self.input_shape[0]), int(self.input_shape[1]), self.num_classes + 1))
return jpg, modify_png, seg_labels
def rand(self, a=0, b=1):
return np.random.rand() * (b - a) + a
def get_random_data(self, image, label, input_shape, jitter=.3, hue=.1, sat=1.5, val=1.5, random=True):
image = cvtColor(image)
label = Image.fromarray(np.array(label))
h, w = input_shape
if not random:
iw, ih = image.size
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', [w, h], (128,128,128))
new_image.paste(image, ((w-nw)//2, (h-nh)//2))
label = label.resize((nw,nh), Image.NEAREST)
new_label = Image.new('L', [w, h], (0))
new_label.paste(label, ((w-nw)//2, (h-nh)//2))
return new_image, new_label
# resize image
rand_jit1 = self.rand(1-jitter,1+jitter)
rand_jit2 = self.rand(1-jitter,1+jitter)
new_ar = w/h * rand_jit1/rand_jit2
scale = self.rand(0.25, 2)
if new_ar < 1:
nh = int(scale*h)
nw = int(nh*new_ar)
else:
nw = int(scale*w)
nh = int(nw/new_ar)
image = image.resize((nw,nh), Image.BICUBIC)
label = label.resize((nw,nh), Image.NEAREST)
flip = self.rand()<.5
if flip:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
label = label.transpose(Image.FLIP_LEFT_RIGHT)
# place image
dx = int(self.rand(0, w-nw))
dy = int(self.rand(0, h-nh))
new_image = Image.new('RGB', (w,h), (128,128,128))
new_label = Image.new('L', (w,h), (0))
new_image.paste(image, (dx, dy))
new_label.paste(label, (dx, dy))
image = new_image
label = new_label
# distort image
hue = self.rand(-hue, hue)
sat = self.rand(1, sat) if self.rand()<.5 else 1/self.rand(1, sat)
val = self.rand(1, val) if self.rand()<.5 else 1/self.rand(1, val)
x = cv2.cvtColor(np.array(image,np.float32)/255, cv2.COLOR_RGB2HSV)
x[..., 0] += hue*360
x[..., 0][x[..., 0]>1] -= 1
x[..., 0][x[..., 0]<0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x[:,:, 0]>360, 0] = 360
x[:, :, 1:][x[:, :, 1:]>1] = 1
x[x<0] = 0
image_data = cv2.cvtColor(x, cv2.COLOR_HSV2RGB)*255
return image_data, label
# DataLoader中collate_fn使用
def unet_dataset_collate(batch):
images = []
pngs = []
seg_labels = []
for img, png, labels in batch:
images.append(img)
pngs.append(png)
seg_labels.append(labels)
images = np.array(images)
pngs = np.array(pngs)
seg_labels = np.array(seg_labels)
return images, pngs, seg_labels
| 35.948148 | 116 | 0.514733 |
a9856b65dc900dfd345b892a85c6d9281d9093a5 | 1,652 | py | Python | dask/array/tests/test_wrap.py | abhinavralhan/dask | e840ba38eadfa93c3b9959347f0a43c1279a94ab | [
"BSD-3-Clause"
] | 2 | 2018-12-29T13:47:40.000Z | 2018-12-29T13:47:49.000Z | dask/array/tests/test_wrap.py | abhinavralhan/dask | e840ba38eadfa93c3b9959347f0a43c1279a94ab | [
"BSD-3-Clause"
] | 2 | 2019-03-19T22:19:04.000Z | 2019-03-26T19:04:00.000Z | dask/array/tests/test_wrap.py | abhinavralhan/dask | e840ba38eadfa93c3b9959347f0a43c1279a94ab | [
"BSD-3-Clause"
] | 1 | 2021-03-28T04:50:43.000Z | 2021-03-28T04:50:43.000Z | import pytest
pytest.importorskip('numpy')
from dask.array.wrap import ones
import dask.array as da
import numpy as np
def test_ones():
a = ones((10, 10), dtype='i4', chunks=(4, 4))
x = np.array(a)
assert (x == np.ones((10, 10), 'i4')).all()
assert a.name.startswith('ones-')
def test_size_as_list():
a = ones([10, 10], dtype='i4', chunks=(4, 4))
x = np.array(a)
assert (x == np.ones((10, 10), dtype='i4')).all()
def test_singleton_size():
a = ones(10, dtype='i4', chunks=(4,))
x = np.array(a)
assert (x == np.ones(10, dtype='i4')).all()
def test_kwargs():
a = ones(10, dtype='i4', chunks=(4,))
x = np.array(a)
assert (x == np.ones(10, dtype='i4')).all()
def test_full():
a = da.full((3, 3), 100, chunks=(2, 2), dtype='i8')
assert (a.compute() == 100).all()
assert a.dtype == a.compute(scheduler='sync').dtype == 'i8'
assert a.name.startswith('full-')
def test_can_make_really_big_array_of_ones():
ones((1000000, 1000000), chunks=(100000, 100000))
ones(shape=(1000000, 1000000), chunks=(100000, 100000))
def test_wrap_consistent_names():
assert (sorted(ones(10, dtype='i4', chunks=(4,)).dask) ==
sorted(ones(10, dtype='i4', chunks=(4,)).dask))
assert (sorted(ones(10, dtype='i4', chunks=(4,)).dask) !=
sorted(ones(10, chunks=(4,)).dask))
assert (sorted(da.full((3, 3), 100, chunks=(2, 2), dtype='f8').dask) ==
sorted(da.full((3, 3), 100, chunks=(2, 2), dtype='f8').dask))
assert (sorted(da.full((3, 3), 100, chunks=(2, 2), dtype='i2').dask) !=
sorted(da.full((3, 3), 100, chunks=(2, 2)).dask))
| 28.482759 | 75 | 0.580508 |
8e3403d70ad12c321735861a6fc4686da1cdc3e8 | 13,002 | py | Python | disnake/asset.py | Kraots/disnake | 9eb9ab81915dae7249ffb2b757dd6dee6090341e | [
"MIT"
] | null | null | null | disnake/asset.py | Kraots/disnake | 9eb9ab81915dae7249ffb2b757dd6dee6090341e | [
"MIT"
] | null | null | null | disnake/asset.py | Kraots/disnake | 9eb9ab81915dae7249ffb2b757dd6dee6090341e | [
"MIT"
] | null | null | null | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import io
import os
from typing import Any, Literal, Optional, TYPE_CHECKING, Tuple, Union
from .errors import DiscordException
from .errors import InvalidArgument
from . import utils
import yarl
__all__ = ("Asset",)
if TYPE_CHECKING:
ValidStaticFormatTypes = Literal["webp", "jpeg", "jpg", "png"]
ValidAssetFormatTypes = Literal["webp", "jpeg", "jpg", "png", "gif"]
VALID_STATIC_FORMATS = frozenset({"jpeg", "jpg", "webp", "png"})
VALID_ASSET_FORMATS = VALID_STATIC_FORMATS | {"gif"}
MISSING = utils.MISSING
class AssetMixin:
url: str
_state: Optional[Any]
async def read(self) -> bytes:
"""|coro|
Retrieves the content of this asset as a :class:`bytes` object.
Raises
------
DiscordException
There was no internal connection state.
HTTPException
Downloading the asset failed.
NotFound
The asset was deleted.
Returns
-------
:class:`bytes`
The content of the asset.
"""
if self._state is None:
raise DiscordException("Invalid state (no ConnectionState provided)")
return await self._state.http.get_from_cdn(self.url)
async def save(
self, fp: Union[str, bytes, os.PathLike, io.BufferedIOBase], *, seek_begin: bool = True
) -> int:
"""|coro|
Saves this asset into a file-like object.
Parameters
----------
fp: Union[:class:`io.BufferedIOBase`, :class:`os.PathLike`]
The file-like object to save this attachment to or the filename
to use. If a filename is passed then a file is created with that
filename and used instead.
seek_begin: :class:`bool`
Whether to seek to the beginning of the file after saving is
successfully done.
Raises
------
DiscordException
There was no internal connection state.
HTTPException
Downloading the asset failed.
NotFound
The asset was deleted.
Returns
--------
:class:`int`
The number of bytes written.
"""
data = await self.read()
if isinstance(fp, io.BufferedIOBase):
written = fp.write(data)
if seek_begin:
fp.seek(0)
return written
else:
with open(fp, "wb") as f:
return f.write(data)
class Asset(AssetMixin):
"""Represents a CDN asset on Discord.
.. container:: operations
.. describe:: str(x)
Returns the URL of the CDN asset.
.. describe:: len(x)
Returns the length of the CDN asset's URL.
.. describe:: x == y
Checks if the asset is equal to another asset.
.. describe:: x != y
Checks if the asset is not equal to another asset.
.. describe:: hash(x)
Returns the hash of the asset.
"""
__slots__: Tuple[str, ...] = (
"_state",
"_url",
"_animated",
"_key",
)
BASE = "https://cdn.discordapp.com"
def __init__(self, state, *, url: str, key: str, animated: bool = False):
self._state = state
self._url = url
self._animated = animated
self._key = key
@classmethod
def _from_default_avatar(cls, state, index: int) -> Asset:
return cls(
state,
url=f"{cls.BASE}/embed/avatars/{index}.png",
key=str(index),
animated=False,
)
@classmethod
def _from_avatar(cls, state, user_id: int, avatar: str) -> Asset:
animated = avatar.startswith("a_")
format = "gif" if animated else "png"
return cls(
state,
url=f"{cls.BASE}/avatars/{user_id}/{avatar}.{format}?size=1024",
key=avatar,
animated=animated,
)
@classmethod
def _from_guild_avatar(cls, state, guild_id: int, member_id: int, avatar: str) -> Asset:
animated = avatar.startswith("a_")
format = "gif" if animated else "png"
return cls(
state,
url=f"{cls.BASE}/guilds/{guild_id}/users/{member_id}/avatars/{avatar}.{format}?size=1024",
key=avatar,
animated=animated,
)
@classmethod
def _from_icon(cls, state, object_id: int, icon_hash: str, path: str) -> Asset:
return cls(
state,
url=f"{cls.BASE}/{path}-icons/{object_id}/{icon_hash}.png?size=1024",
key=icon_hash,
animated=False,
)
@classmethod
def _from_cover_image(cls, state, object_id: int, cover_image_hash: str) -> Asset:
return cls(
state,
url=f"{cls.BASE}/app-assets/{object_id}/store/{cover_image_hash}.png?size=1024",
key=cover_image_hash,
animated=False,
)
@classmethod
def _from_guild_image(cls, state, guild_id: int, image: str, path: str) -> Asset:
return cls(
state,
url=f"{cls.BASE}/{path}/{guild_id}/{image}.png?size=1024",
key=image,
animated=False,
)
@classmethod
def _from_guild_icon(cls, state, guild_id: int, icon_hash: str) -> Asset:
animated = icon_hash.startswith("a_")
format = "gif" if animated else "png"
return cls(
state,
url=f"{cls.BASE}/icons/{guild_id}/{icon_hash}.{format}?size=1024",
key=icon_hash,
animated=animated,
)
@classmethod
def _from_sticker_banner(cls, state, banner: int) -> Asset:
return cls(
state,
url=f"{cls.BASE}/app-assets/710982414301790216/store/{banner}.png",
key=str(banner),
animated=False,
)
@classmethod
def _from_banner(cls, state, id: int, banner_hash: str) -> Asset:
animated = banner_hash.startswith("a_")
format = "gif" if animated else "png"
return cls(
state,
url=f"{cls.BASE}/banners/{id}/{banner_hash}.{format}?size=1024",
key=banner_hash,
animated=animated,
)
@classmethod
def _from_role_icon(cls, state, role_id: int, icon_hash: str) -> Asset:
animated = icon_hash.startswith("a_")
format = "gif" if animated else "png"
return cls(
state,
url=f"{cls.BASE}/role-icons/{role_id}/{icon_hash}.{format}?size=1024",
key=icon_hash,
animated=animated,
)
def __str__(self) -> str:
return self._url
def __len__(self) -> int:
return len(self._url)
def __repr__(self):
shorten = self._url.replace(self.BASE, "")
return f"<Asset url={shorten!r}>"
def __eq__(self, other):
return isinstance(other, Asset) and self._url == other._url
def __hash__(self):
return hash(self._url)
@property
def url(self) -> str:
""":class:`str`: Returns the underlying URL of the asset."""
return self._url
@property
def key(self) -> str:
""":class:`str`: Returns the identifying key of the asset."""
return self._key
def is_animated(self) -> bool:
""":class:`bool`: Returns whether the asset is animated."""
return self._animated
def replace(
self,
*,
size: int = MISSING,
format: ValidAssetFormatTypes = MISSING,
static_format: ValidStaticFormatTypes = MISSING,
) -> Asset:
"""Returns a new asset with the passed components replaced.
Parameters
-----------
size: :class:`int`
The new size of the asset.
format: :class:`str`
The new format to change it to. Must be either
'webp', 'jpeg', 'jpg', 'png', or 'gif' if it's animated.
static_format: :class:`str`
The new format to change it to if the asset isn't animated.
Must be either 'webp', 'jpeg', 'jpg', or 'png'.
Raises
-------
InvalidArgument
An invalid size or format was passed.
Returns
--------
:class:`Asset`
The newly updated asset.
"""
url = yarl.URL(self._url)
path, _ = os.path.splitext(url.path)
if format is not MISSING:
if self._animated:
if format not in VALID_ASSET_FORMATS:
raise InvalidArgument(f"format must be one of {VALID_ASSET_FORMATS}")
else:
if format not in VALID_STATIC_FORMATS:
raise InvalidArgument(f"format must be one of {VALID_STATIC_FORMATS}")
url = url.with_path(f"{path}.{format}")
if static_format is not MISSING and not self._animated:
if static_format not in VALID_STATIC_FORMATS:
raise InvalidArgument(f"static_format must be one of {VALID_STATIC_FORMATS}")
url = url.with_path(f"{path}.{static_format}")
if size is not MISSING:
if not utils.valid_icon_size(size):
raise InvalidArgument("size must be a power of 2 between 16 and 4096")
url = url.with_query(size=size)
else:
url = url.with_query(url.raw_query_string)
url = str(url)
return Asset(state=self._state, url=url, key=self._key, animated=self._animated)
def with_size(self, size: int, /) -> Asset:
"""Returns a new asset with the specified size.
Parameters
------------
size: :class:`int`
The new size of the asset.
Raises
-------
InvalidArgument
The asset had an invalid size.
Returns
--------
:class:`Asset`
The new updated asset.
"""
if not utils.valid_icon_size(size):
raise InvalidArgument("size must be a power of 2 between 16 and 4096")
url = str(yarl.URL(self._url).with_query(size=size))
return Asset(state=self._state, url=url, key=self._key, animated=self._animated)
def with_format(self, format: ValidAssetFormatTypes, /) -> Asset:
"""Returns a new asset with the specified format.
Parameters
------------
format: :class:`str`
The new format of the asset.
Raises
-------
InvalidArgument
The asset had an invalid format.
Returns
--------
:class:`Asset`
The new updated asset.
"""
if self._animated:
if format not in VALID_ASSET_FORMATS:
raise InvalidArgument(f"format must be one of {VALID_ASSET_FORMATS}")
else:
if format not in VALID_STATIC_FORMATS:
raise InvalidArgument(f"format must be one of {VALID_STATIC_FORMATS}")
url = yarl.URL(self._url)
path, _ = os.path.splitext(url.path)
url = str(url.with_path(f"{path}.{format}").with_query(url.raw_query_string))
return Asset(state=self._state, url=url, key=self._key, animated=self._animated)
def with_static_format(self, format: ValidStaticFormatTypes, /) -> Asset:
"""Returns a new asset with the specified static format.
This only changes the format if the underlying asset is
not animated. Otherwise, the asset is not changed.
Parameters
------------
format: :class:`str`
The new static format of the asset.
Raises
-------
InvalidArgument
The asset had an invalid format.
Returns
--------
:class:`Asset`
The new updated asset.
"""
if self._animated:
return self
return self.with_format(format)
| 30.378505 | 102 | 0.581603 |
c51b5f2f26d17474545aecfe05351eeafbca8ed0 | 989 | py | Python | heat/engine/clients/os/vitrage.py | odmanV2/heat | 76c20f1fc94a06ce5a00730c50952efe19ed0e3e | [
"Apache-2.0"
] | 265 | 2015-01-02T09:33:22.000Z | 2022-03-26T23:19:54.000Z | heat/engine/clients/os/vitrage.py | HyunJin-Jeong/heat | 8353fddf9ebfb0eca67d6f2b2feb529031acff89 | [
"Apache-2.0"
] | 8 | 2015-09-01T15:43:19.000Z | 2021-12-14T05:18:23.000Z | heat/engine/clients/os/vitrage.py | HyunJin-Jeong/heat | 8353fddf9ebfb0eca67d6f2b2feb529031acff89 | [
"Apache-2.0"
] | 295 | 2015-01-06T07:00:40.000Z | 2021-09-06T08:05:06.000Z | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.engine.clients import client_plugin
from oslo_log import log as logging
from vitrageclient import client as vitrage_client
LOG = logging.getLogger(__name__)
CLIENT_NAME = 'vitrage'
class VitrageClientPlugin(client_plugin.ClientPlugin):
exceptions_module = None
service_types = [RCA] = ['rca']
def _create(self):
return vitrage_client.Client('1', self.context.keystone_session)
| 31.903226 | 78 | 0.741153 |
4d0e71c5cf9fd179ecdf4a35c61c8414810d6a00 | 826 | py | Python | app/main/forms.py | Vector254/pitcher | 1eccdae41b8a639aef9bb7319ec86437ded60709 | [
"MIT"
] | null | null | null | app/main/forms.py | Vector254/pitcher | 1eccdae41b8a639aef9bb7319ec86437ded60709 | [
"MIT"
] | null | null | null | app/main/forms.py | Vector254/pitcher | 1eccdae41b8a639aef9bb7319ec86437ded60709 | [
"MIT"
] | 1 | 2020-10-02T14:41:40.000Z | 2020-10-02T14:41:40.000Z | from flask_wtf import FlaskForm
from wtforms import StringField,PasswordField,SubmitField,BooleanField,SubmitField,TextAreaField,RadioField
from wtforms.validators import Required,Email,EqualTo
from wtforms import ValidationError
class PitchForm(FlaskForm):
title = StringField("title",validators=[Required()])
pitch = TextAreaField("pitch",validators=[Required()])
category = RadioField('Label', choices=[('promotionpitch','promotionpitch'), ('interviewpitch','interviewpitch'),('pickuplines','pickuplines'),('productpitch','productpitch')],validators=[Required()])
submit = SubmitField('submit')
class CommentForm(FlaskForm):
description = TextAreaField('',validators=[Required()])
submit = SubmitField()
class UpvoteForm(FlaskForm):
submit = SubmitField()
class Downvote(FlaskForm):
submit = SubmitField()
| 34.416667 | 201 | 0.780872 |
74e5ce410f0dcddfca38dddd48fd7be59c6705ec | 369 | py | Python | Leetcode/week_1/p0557_reverse_words_in_a_string_iii.py | SamSamhuns/wallbreakers_projekts | c07b555127ee89d6f461cea7cd87811c382086ff | [
"MIT"
] | 1 | 2021-07-07T00:55:23.000Z | 2021-07-07T00:55:23.000Z | Leetcode/week_1/p0557_reverse_words_in_a_string_iii.py | SamSamhuns/wallbreakers_projekts | c07b555127ee89d6f461cea7cd87811c382086ff | [
"MIT"
] | null | null | null | Leetcode/week_1/p0557_reverse_words_in_a_string_iii.py | SamSamhuns/wallbreakers_projekts | c07b555127ee89d6f461cea7cd87811c382086ff | [
"MIT"
] | null | null | null | class Solution:
def reverseWords(self, s: str) -> str:
return ' '.join([word[::-1] for word in s.split()])
"""
Runtime: O(4N) ~ O(N)
Space: O(N)
Runtime: 28 ms, faster than 86.92% of Python3 online submissions for Reverse Words in a String III.
Memory Usage: 13.2 MB, less than 96.15% of Python3 online submissions for Reverse Words in a String III.
"""
| 28.384615 | 104 | 0.669377 |
d7aa580558eb445a73acd6467eb92eb2bc8465c2 | 191 | py | Python | metrics-calculator/chalicelib/get_splunk_api_token.py | nhsconnect/prm-practice-migration-dashboard | 40c8760f409834d05bde4fb015aa5f8765acaa82 | [
"0BSD"
] | null | null | null | metrics-calculator/chalicelib/get_splunk_api_token.py | nhsconnect/prm-practice-migration-dashboard | 40c8760f409834d05bde4fb015aa5f8765acaa82 | [
"0BSD"
] | null | null | null | metrics-calculator/chalicelib/get_splunk_api_token.py | nhsconnect/prm-practice-migration-dashboard | 40c8760f409834d05bde4fb015aa5f8765acaa82 | [
"0BSD"
] | null | null | null | def get_splunk_api_token(ssm, parameter_name):
parameter_details = ssm.get_parameter(
Name=parameter_name, WithDecryption=True)
return parameter_details['Parameter']['Value']
| 38.2 | 50 | 0.764398 |
f6b48ccb678dee964cc7b12444188b97e73802e7 | 276 | py | Python | Task_4/Q1_part_d.py | ayyzenn/Python_Codes | cdcd9dcb9e4301c4a84637a40eaad357d59a32d8 | [
"MIT"
] | null | null | null | Task_4/Q1_part_d.py | ayyzenn/Python_Codes | cdcd9dcb9e4301c4a84637a40eaad357d59a32d8 | [
"MIT"
] | null | null | null | Task_4/Q1_part_d.py | ayyzenn/Python_Codes | cdcd9dcb9e4301c4a84637a40eaad357d59a32d8 | [
"MIT"
] | null | null | null | #Question:1 part d
with open('names.txt', 'r') as n:
sum_of_length =0
for i in n:
sum_of_length+= len(i) -1
n=open("names.txt","r")
z=len(n.read().split())
z
avg=sum_of_length/z
avg
print("The Average of the lengths of the names in names.txt is",avg)
n.close() | 23 | 68 | 0.652174 |
3abe0173728a43474bf2fc0ed8b2fd9b817d4595 | 36,415 | py | Python | oletools/oleobj.py | n3m351d4/oletools | 15aad9601302c164fccb20020bf98699364ad78c | [
"BSD-2-Clause"
] | null | null | null | oletools/oleobj.py | n3m351d4/oletools | 15aad9601302c164fccb20020bf98699364ad78c | [
"BSD-2-Clause"
] | null | null | null | oletools/oleobj.py | n3m351d4/oletools | 15aad9601302c164fccb20020bf98699364ad78c | [
"BSD-2-Clause"
] | 1 | 2020-07-25T00:23:51.000Z | 2020-07-25T00:23:51.000Z | #!/usr/bin/env python
"""
oleobj.py
oleobj is a Python script and module to parse OLE objects and files stored
into various MS Office file formats (doc, xls, ppt, docx, xlsx, pptx, etc)
Author: Philippe Lagadec - http://www.decalage.info
License: BSD, see source code or documentation
oleobj is part of the python-oletools package:
http://www.decalage.info/python/oletools
"""
# === LICENSE =================================================================
# oleobj is copyright (c) 2015-2019 Philippe Lagadec (http://www.decalage.info)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -- IMPORTS ------------------------------------------------------------------
from __future__ import print_function
import logging
import struct
import argparse
import os
import re
import sys
import io
from zipfile import is_zipfile
import olefile
# IMPORTANT: it should be possible to run oletools directly as scripts
# in any directory without installing them with pip or setup.py.
# In that case, relative imports are NOT usable.
# And to enable Python 2+3 compatibility, we need to use absolute imports,
# so we add the oletools parent folder to sys.path (absolute+normalized path):
_thismodule_dir = os.path.normpath(os.path.abspath(os.path.dirname(__file__)))
# print('_thismodule_dir = %r' % _thismodule_dir)
_parent_dir = os.path.normpath(os.path.join(_thismodule_dir, '..'))
# print('_parent_dir = %r' % _thirdparty_dir)
if _parent_dir not in sys.path:
sys.path.insert(0, _parent_dir)
from oletools.thirdparty import xglob
from oletools.ppt_record_parser import (is_ppt, PptFile,
PptRecordExOleVbaActiveXAtom)
from oletools.ooxml import XmlParser
from oletools.common.io_encoding import ensure_stdout_handles_unicode
# -----------------------------------------------------------------------------
# CHANGELOG:
# 2015-12-05 v0.01 PL: - first version
# 2016-06 PL: - added main and process_file (not working yet)
# 2016-07-18 v0.48 SL: - added Python 3.5 support
# 2016-07-19 PL: - fixed Python 2.6-7 support
# 2016-11-17 v0.51 PL: - fixed OLE native object extraction
# 2016-11-18 PL: - added main for setup.py entry point
# 2017-05-03 PL: - fixed absolute imports (issue #141)
# 2018-01-18 v0.52 CH: - added support for zipped-xml-based types (docx, pptx,
# xlsx), and ppt
# 2018-03-27 PL: - fixed issue #274 in read_length_prefixed_string
# 2018-09-11 v0.54 PL: - olefile is now a dependency
# 2018-10-30 SA: - added detection of external links (PR #317)
__version__ = '0.55dev5'
# -----------------------------------------------------------------------------
# TODO:
# + setup logging (common with other oletools)
# -----------------------------------------------------------------------------
# REFERENCES:
# Reference for the storage of embedded OLE objects/files:
# [MS-OLEDS]: Object Linking and Embedding (OLE) Data Structures
# https://msdn.microsoft.com/en-us/library/dd942265.aspx
# - officeparser: https://github.com/unixfreak0037/officeparser
# TODO: oledump
# === LOGGING =================================================================
DEFAULT_LOG_LEVEL = "warning"
LOG_LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
'debug-olefile': logging.DEBUG}
class NullHandler(logging.Handler):
"""
Log Handler without output, to avoid printing messages if logging is not
configured by the main application.
Python 2.7 has logging.NullHandler, but this is necessary for 2.6:
see https://docs.python.org/2.6/library/logging.html section
configuring-logging-for-a-library
"""
def emit(self, record):
pass
def get_logger(name, level=logging.CRITICAL+1):
"""
Create a suitable logger object for this module.
The goal is not to change settings of the root logger, to avoid getting
other modules' logs on the screen.
If a logger exists with same name, reuse it. (Else it would have duplicate
handlers and messages would be doubled.)
The level is set to CRITICAL+1 by default, to avoid any logging.
"""
# First, test if there is already a logger with the same name, else it
# will generate duplicate messages (due to duplicate handlers):
if name in logging.Logger.manager.loggerDict:
# NOTE: another less intrusive but more "hackish" solution would be to
# use getLogger then test if its effective level is not default.
logger = logging.getLogger(name)
# make sure level is OK:
logger.setLevel(level)
return logger
# get a new logger:
logger = logging.getLogger(name)
# only add a NullHandler for this logger, it is up to the application
# to configure its own logging:
logger.addHandler(NullHandler())
logger.setLevel(level)
return logger
# a global logger object used for debugging:
log = get_logger('oleobj') # pylint: disable=invalid-name
def enable_logging():
"""
Enable logging for this module (disabled by default).
This will set the module-specific logger level to NOTSET, which
means the main application controls the actual logging level.
"""
log.setLevel(logging.NOTSET)
# === CONSTANTS ===============================================================
# some str methods on Python 2.x return characters,
# while the equivalent bytes methods return integers on Python 3.x:
if sys.version_info[0] <= 2:
# Python 2.x
NULL_CHAR = '\x00'
else:
# Python 3.x
NULL_CHAR = 0 # pylint: disable=redefined-variable-type
xrange = range # pylint: disable=redefined-builtin, invalid-name
OOXML_RELATIONSHIP_TAG = '{http://schemas.openxmlformats.org/package/2006/relationships}Relationship'
# === GLOBAL VARIABLES ========================================================
# struct to parse an unsigned integer of 32 bits:
STRUCT_UINT32 = struct.Struct('<L')
assert STRUCT_UINT32.size == 4 # make sure it matches 4 bytes
# struct to parse an unsigned integer of 16 bits:
STRUCT_UINT16 = struct.Struct('<H')
assert STRUCT_UINT16.size == 2 # make sure it matches 2 bytes
# max length of a zero-terminated ansi string. Not sure what this really is
STR_MAX_LEN = 1024
# size of chunks to copy from ole stream to file
DUMP_CHUNK_SIZE = 4096
# return values from main; can be added
# (e.g.: did dump but had err parsing and dumping --> return 1+4+8 = 13)
RETURN_NO_DUMP = 0 # nothing found to dump/extract
RETURN_DID_DUMP = 1 # did dump/extract successfully
RETURN_ERR_ARGS = 2 # reserve for OptionParser.parse_args
RETURN_ERR_STREAM = 4 # error opening/parsing a stream
RETURN_ERR_DUMP = 8 # error dumping data from stream to file
# Not sure if they can all be "External", but just in case
BLACKLISTED_RELATIONSHIP_TYPES = [
'attachedTemplate',
'externalLink',
'externalLinkPath',
'externalReference'
'frame'
'hyperlink',
'officeDocument',
'oleObject',
'package',
'slideUpdateUrl',
'slideMaster',
'slide',
'slideUpdateInfo',
'subDocument',
'worksheet'
]
# === FUNCTIONS ===============================================================
def read_uint32(data, index):
"""
Read an unsigned integer from the first 32 bits of data.
:param data: bytes string or stream containing the data to be extracted.
:param index: index to start reading from or None if data is stream.
:return: tuple (value, index) containing the read value (int),
and the index to continue reading next time.
"""
if index is None:
value = STRUCT_UINT32.unpack(data.read(4))[0]
else:
value = STRUCT_UINT32.unpack(data[index:index+4])[0]
index += 4
return (value, index)
def read_uint16(data, index):
"""
Read an unsigned integer from the 16 bits of data following index.
:param data: bytes string or stream containing the data to be extracted.
:param index: index to start reading from or None if data is stream
:return: tuple (value, index) containing the read value (int),
and the index to continue reading next time.
"""
if index is None:
value = STRUCT_UINT16.unpack(data.read(2))[0]
else:
value = STRUCT_UINT16.unpack(data[index:index+2])[0]
index += 2
return (value, index)
def read_length_prefixed_string(data, index):
"""
Read a length-prefixed ANSI string from data.
:param data: bytes string or stream containing the data to be extracted.
:param index: index in data where string size start or None if data is
stream
:return: tuple (value, index) containing the read value (bytes string),
and the index to start reading from next time.
"""
length, index = read_uint32(data, index)
# if length = 0, return a null string (no null character)
if length == 0:
return ('', index)
# extract the string without the last null character
if index is None:
ansi_string = data.read(length-1)
null_char = data.read(1)
else:
ansi_string = data[index:index+length-1]
null_char = data[index+length-1]
index += length
# TODO: only in strict mode:
# check the presence of the null char:
assert null_char == NULL_CHAR
return (ansi_string, index)
def guess_encoding(data):
""" guess encoding of byte string to create unicode
Since this is used to decode path names from ole objects, prefer latin1
over utf* codecs if ascii is not enough
"""
for encoding in 'ascii', 'latin1', 'utf8', 'utf-16-le', 'utf16':
try:
result = data.decode(encoding, errors='strict')
log.debug(u'decoded using {0}: "{1}"'.format(encoding, result))
return result
except UnicodeError:
pass
log.warning('failed to guess encoding for string, falling back to '
'ascii with replace')
return data.decode('ascii', errors='replace')
def read_zero_terminated_string(data, index):
"""
Read a zero-terminated string from data
:param data: bytes string or stream containing an ansi string
:param index: index at which the string should start or None if data is
stream
:return: tuple (unicode, index) containing the read string (unicode),
and the index to start reading from next time.
"""
if index is None:
result = bytearray()
for _ in xrange(STR_MAX_LEN):
char = ord(data.read(1)) # need ord() for py3
if char == 0:
return guess_encoding(result), index
result.append(char)
raise ValueError('found no string-terminating zero-byte!')
else: # data is byte array, can just search
end_idx = data.index(b'\x00', index, index+STR_MAX_LEN)
# encode and return with index after the 0-byte
return guess_encoding(data[index:end_idx]), end_idx+1
# === CLASSES =================================================================
class OleNativeStream(object):
"""
OLE object contained into an OLENativeStream structure.
(see MS-OLEDS 2.3.6 OLENativeStream)
Filename and paths are decoded to unicode.
"""
# constants for the type attribute:
# see MS-OLEDS 2.2.4 ObjectHeader
TYPE_LINKED = 0x01
TYPE_EMBEDDED = 0x02
def __init__(self, bindata=None, package=False):
"""
Constructor for OleNativeStream.
If bindata is provided, it will be parsed using the parse() method.
:param bindata: forwarded to parse, see docu there
:param package: bool, set to True when extracting from an OLE Package
object
"""
self.filename = None
self.src_path = None
self.unknown_short = None
self.unknown_long_1 = None
self.unknown_long_2 = None
self.temp_path = None
self.actual_size = None
self.data = None
self.package = package
self.is_link = None
self.data_is_stream = None
if bindata is not None:
self.parse(data=bindata)
def parse(self, data):
"""
Parse binary data containing an OLENativeStream structure,
to extract the OLE object it contains.
(see MS-OLEDS 2.3.6 OLENativeStream)
:param data: bytes array or stream, containing OLENativeStream
structure containing an OLE object
:return: None
"""
# TODO: strict mode to raise exceptions when values are incorrect
# (permissive mode by default)
if hasattr(data, 'read'):
self.data_is_stream = True
index = None # marker for read_* functions to expect stream
else:
self.data_is_stream = False
index = 0 # marker for read_* functions to expect array
# An OLE Package object does not have the native data size field
if not self.package:
self.native_data_size, index = read_uint32(data, index)
log.debug('OLE native data size = {0:08X} ({0} bytes)'
.format(self.native_data_size))
# I thought this might be an OLE type specifier ???
self.unknown_short, index = read_uint16(data, index)
self.filename, index = read_zero_terminated_string(data, index)
# source path
self.src_path, index = read_zero_terminated_string(data, index)
# TODO: I bet these 8 bytes are a timestamp ==> FILETIME from olefile
self.unknown_long_1, index = read_uint32(data, index)
self.unknown_long_2, index = read_uint32(data, index)
# temp path?
self.temp_path, index = read_zero_terminated_string(data, index)
# size of the rest of the data
try:
self.actual_size, index = read_uint32(data, index)
if self.data_is_stream:
self.data = data
else:
self.data = data[index:index+self.actual_size]
self.is_link = False
# TODO: there can be extra data, no idea what it is for
# TODO: SLACK DATA
except (IOError, struct.error): # no data to read actual_size
log.debug('data is not embedded but only a link')
self.is_link = True
self.actual_size = 0
self.data = None
class OleObject(object):
"""
OLE 1.0 Object
see MS-OLEDS 2.2 OLE1.0 Format Structures
"""
# constants for the format_id attribute:
# see MS-OLEDS 2.2.4 ObjectHeader
TYPE_LINKED = 0x01
TYPE_EMBEDDED = 0x02
def __init__(self, bindata=None):
"""
Constructor for OleObject.
If bindata is provided, it will be parsed using the parse() method.
:param bindata: bytes, OLE 1.0 Object structure containing OLE object
Note: Code can easily by generalized to work with byte streams instead
of arrays just like in OleNativeStream.
"""
self.ole_version = None
self.format_id = None
self.class_name = None
self.topic_name = None
self.item_name = None
self.data = None
self.data_size = None
if bindata is not None:
self.parse(bindata)
def parse(self, data):
"""
Parse binary data containing an OLE 1.0 Object structure,
to extract the OLE object it contains.
(see MS-OLEDS 2.2 OLE1.0 Format Structures)
:param data: bytes, OLE 1.0 Object structure containing an OLE object
:return:
"""
# from ezhexviewer import hexdump3
# print("Parsing OLE object data:")
# print(hexdump3(data, length=16))
# Header: see MS-OLEDS 2.2.4 ObjectHeader
index = 0
self.ole_version, index = read_uint32(data, index)
self.format_id, index = read_uint32(data, index)
log.debug('OLE version=%08X - Format ID=%08X',
self.ole_version, self.format_id)
assert self.format_id in (self.TYPE_EMBEDDED, self.TYPE_LINKED)
self.class_name, index = read_length_prefixed_string(data, index)
self.topic_name, index = read_length_prefixed_string(data, index)
self.item_name, index = read_length_prefixed_string(data, index)
log.debug('Class name=%r - Topic name=%r - Item name=%r',
self.class_name, self.topic_name, self.item_name)
if self.format_id == self.TYPE_EMBEDDED:
# Embedded object: see MS-OLEDS 2.2.5 EmbeddedObject
# assert self.topic_name != '' and self.item_name != ''
self.data_size, index = read_uint32(data, index)
log.debug('Declared data size=%d - remaining size=%d',
self.data_size, len(data)-index)
# TODO: handle incorrect size to avoid exception
self.data = data[index:index+self.data_size]
assert len(self.data) == self.data_size
self.extra_data = data[index+self.data_size:]
def sanitize_filename(filename, replacement='_', max_length=200):
"""compute basename of filename. Replaces all non-whitelisted characters.
The returned filename is always a ascii basename of the file."""
basepath = os.path.basename(filename).strip()
sane_fname = re.sub(u'[^a-zA-Z0-9.\\-_ ]', replacement, basepath)
sane_fname = str(sane_fname) # py3: does nothing; py2: unicode --> str
while ".." in sane_fname:
sane_fname = sane_fname.replace('..', '.')
while " " in sane_fname:
sane_fname = sane_fname.replace(' ', ' ')
if not filename:
sane_fname = 'NONAME'
# limit filename length
if max_length:
sane_fname = sane_fname[:max_length]
return sane_fname
def find_ole_in_ppt(filename):
""" find ole streams in ppt
This may be a bit confusing: we get an ole file (or its name) as input and
as output we produce possibly several ole files. This is because the
data structure can be pretty nested:
A ppt file has many streams that consist of records. Some of these records
can contain data which contains data for another complete ole file (which
we yield). This embedded ole file can have several streams, one of which
can contain the actual embedded file we are looking for (caller will check
for these).
"""
ppt_file = None
try:
ppt_file = PptFile(filename)
for stream in ppt_file.iter_streams():
for record_idx, record in enumerate(stream.iter_records()):
if isinstance(record, PptRecordExOleVbaActiveXAtom):
ole = None
try:
data_start = next(record.iter_uncompressed())
if data_start[:len(olefile.MAGIC)] != olefile.MAGIC:
continue # could be ActiveX control / VBA Storage
# otherwise, this should be an OLE object
log.debug('Found record with embedded ole object in '
'ppt (stream "{0}", record no {1})'
.format(stream.name, record_idx))
ole = record.get_data_as_olefile()
yield ole
except IOError:
log.warning('Error reading data from {0} stream or '
'interpreting it as OLE object'
.format(stream.name))
log.debug('', exc_info=True)
finally:
if ole is not None:
ole.close()
finally:
if ppt_file is not None:
ppt_file.close()
class FakeFile(io.RawIOBase):
""" create file-like object from data without copying it
BytesIO is what I would like to use but it copies all the data. This class
does not. On the downside: data can only be read and seeked, not written.
Assume that given data is bytes (str in py2, bytes in py3).
See also (and maybe can put into common file with):
ppt_record_parser.IterStream, ooxml.ZipSubFile
"""
def __init__(self, data):
""" create FakeFile with given bytes data """
super(FakeFile, self).__init__()
self.data = data # this does not actually copy (python is lazy)
self.pos = 0
self.size = len(data)
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return True
def readinto(self, target):
""" read into pre-allocated target """
n_data = min(len(target), self.size-self.pos)
if n_data == 0:
return 0
target[:n_data] = self.data[self.pos:self.pos+n_data]
self.pos += n_data
return n_data
def read(self, n_data=-1):
""" read and return data """
if self.pos >= self.size:
return bytes()
if n_data == -1:
n_data = self.size - self.pos
result = self.data[self.pos:self.pos+n_data]
self.pos += n_data
return result
def seek(self, pos, offset=io.SEEK_SET):
""" jump to another position in file """
# calc target position from self.pos, pos and offset
if offset == io.SEEK_SET:
new_pos = pos
elif offset == io.SEEK_CUR:
new_pos = self.pos + pos
elif offset == io.SEEK_END:
new_pos = self.size + pos
else:
raise ValueError("invalid offset {0}, need SEEK_* constant"
.format(offset))
if new_pos < 0:
raise IOError('Seek beyond start of file not allowed')
self.pos = new_pos
def tell(self):
""" tell where in file we are positioned """
return self.pos
def find_ole(filename, data, xml_parser=None):
""" try to open somehow as zip/ole/rtf/... ; yield None if fail
If data is given, filename is (mostly) ignored.
yields embedded ole streams in form of OleFileIO.
"""
if data is not None:
# isOleFile and is_ppt can work on data directly but zip need file
# --> wrap data in a file-like object without copying data
log.debug('working on data, file is not touched below')
arg_for_ole = data
arg_for_zip = FakeFile(data)
else:
# we only have a file name
log.debug('working on file by name')
arg_for_ole = filename
arg_for_zip = filename
ole = None
try:
if olefile.isOleFile(arg_for_ole):
if is_ppt(arg_for_ole):
log.info('is ppt file: ' + filename)
for ole in find_ole_in_ppt(arg_for_ole):
yield ole
ole = None # is closed in find_ole_in_ppt
# in any case: check for embedded stuff in non-sectored streams
log.info('is ole file: ' + filename)
ole = olefile.OleFileIO(arg_for_ole)
yield ole
elif xml_parser is not None or is_zipfile(arg_for_zip):
# keep compatibility with 3rd-party code that calls this function
# directly without providing an XmlParser instance
if xml_parser is None:
xml_parser = XmlParser(arg_for_zip)
# force iteration so XmlParser.iter_non_xml() returns data
[x for x in xml_parser.iter_xml()]
log.info('is zip file: ' + filename)
# we looped through the XML files before, now we can
# iterate the non-XML files looking for ole objects
for subfile, _, file_handle in xml_parser.iter_non_xml():
try:
head = file_handle.read(len(olefile.MAGIC))
except RuntimeError:
log.error('zip is encrypted: ' + filename)
yield None
continue
if head == olefile.MAGIC:
file_handle.seek(0)
log.info(' unzipping ole: ' + subfile)
try:
ole = olefile.OleFileIO(file_handle)
yield ole
except IOError:
log.warning('Error reading data from {0}/{1} or '
'interpreting it as OLE object'
.format(filename, subfile))
log.debug('', exc_info=True)
finally:
if ole is not None:
ole.close()
ole = None
else:
log.debug('unzip skip: ' + subfile)
else:
log.warning('open failed: {0} (or its data) is neither zip nor OLE'
.format(filename))
yield None
except Exception:
log.error('Caught exception opening {0}'.format(filename),
exc_info=True)
yield None
finally:
if ole is not None:
ole.close()
def find_external_relationships(xml_parser):
""" iterate XML files looking for relationships to external objects
"""
for _, elem, _ in xml_parser.iter_xml(None, False, OOXML_RELATIONSHIP_TAG):
try:
if elem.attrib['TargetMode'] == 'External':
relationship_type = elem.attrib['Type'].rsplit('/', 1)[1]
if relationship_type in BLACKLISTED_RELATIONSHIP_TYPES:
yield relationship_type, elem.attrib['Target']
except (AttributeError, KeyError):
# ignore missing attributes - Word won't detect
# external links anyway
pass
def process_file(filename, data, output_dir=None):
""" find embedded objects in given file
if data is given (from xglob for encrypted zip files), then filename is
not used for reading. If not (usual case), then data is read from filename
on demand.
If output_dir is given and does not exist, it is created. If it is not
given, data is saved to same directory as the input file.
"""
if output_dir:
if not os.path.isdir(output_dir):
log.info('creating output directory %s', output_dir)
os.mkdir(output_dir)
fname_prefix = os.path.join(output_dir,
sanitize_filename(filename))
else:
base_dir = os.path.dirname(filename)
sane_fname = sanitize_filename(filename)
fname_prefix = os.path.join(base_dir, sane_fname)
# TODO: option to extract objects to files (false by default)
print('-'*79)
print('File: %r' % filename)
index = 1
# do not throw errors but remember them and try continue with other streams
err_stream = False
err_dumping = False
did_dump = False
xml_parser = None
if is_zipfile(filename):
log.info('file could be an OOXML file, looking for relationships with '
'external links')
xml_parser = XmlParser(filename)
for relationship, target in find_external_relationships(xml_parser):
did_dump = True
print("Found relationship '%s' with external link %s" % (relationship, target))
# look for ole files inside file (e.g. unzip docx)
# have to finish work on every ole stream inside iteration, since handles
# are closed in find_ole
for ole in find_ole(filename, data, xml_parser):
if ole is None: # no ole file found
continue
for path_parts in ole.listdir():
stream_path = '/'.join(path_parts)
log.debug('Checking stream %r', stream_path)
if path_parts[-1] == '\x01Ole10Native':
stream = None
try:
stream = ole.openstream(path_parts)
print('extract file embedded in OLE object from stream %r:'
% stream_path)
print('Parsing OLE Package')
opkg = OleNativeStream(stream)
# leave stream open until dumping is finished
except Exception:
log.warning('*** Not an OLE 1.0 Object')
err_stream = True
if stream is not None:
stream.close()
continue
# print info
if opkg.is_link:
log.debug('Object is not embedded but only linked to '
'- skip')
continue
print(u'Filename = "%s"' % opkg.filename)
print(u'Source path = "%s"' % opkg.src_path)
print(u'Temp path = "%s"' % opkg.temp_path)
if opkg.filename:
fname = '%s_%s' % (fname_prefix,
sanitize_filename(opkg.filename))
else:
fname = '%s_object_%03d.noname' % (fname_prefix, index)
# dump
try:
print('saving to file %s' % fname)
with open(fname, 'wb') as writer:
n_dumped = 0
next_size = min(DUMP_CHUNK_SIZE, opkg.actual_size)
while next_size:
data = stream.read(next_size)
writer.write(data)
n_dumped += len(data)
if len(data) != next_size:
log.warning('Wanted to read {0}, got {1}'
.format(next_size, len(data)))
break
next_size = min(DUMP_CHUNK_SIZE,
opkg.actual_size - n_dumped)
did_dump = True
except Exception as exc:
log.warning('error dumping to {0} ({1})'
.format(fname, exc))
err_dumping = True
finally:
stream.close()
index += 1
return err_stream, err_dumping, did_dump
# === MAIN ====================================================================
def existing_file(filename):
""" called by argument parser to see whether given file exists """
if not os.path.isfile(filename):
raise argparse.ArgumentTypeError('{0} is not a file.'.format(filename))
return filename
def main(cmd_line_args=None):
""" main function, called when running this as script
Per default (cmd_line_args=None) uses sys.argv. For testing, however, can
provide other arguments.
"""
# print banner with version
ensure_stdout_handles_unicode()
print('oleobj %s - http://decalage.info/oletools' % __version__)
print('THIS IS WORK IN PROGRESS - Check updates regularly!')
print('Please report any issue at '
'https://github.com/decalage2/oletools/issues')
print('')
usage = 'usage: %(prog)s [options] <filename> [filename2 ...]'
parser = argparse.ArgumentParser(usage=usage)
# parser.add_argument('-o', '--outfile', dest='outfile',
# help='output file')
# parser.add_argument('-c', '--csv', dest='csv',
# help='export results to a CSV file')
parser.add_argument("-r", action="store_true", dest="recursive",
help='find files recursively in subdirectories.')
parser.add_argument("-d", type=str, dest="output_dir", default=None,
help='use specified directory to output files.')
parser.add_argument("-z", "--zip", dest='zip_password', type=str,
default=None,
help='if the file is a zip archive, open first file '
'from it, using the provided password (requires '
'Python 2.6+)')
parser.add_argument("-f", "--zipfname", dest='zip_fname', type=str,
default='*',
help='if the file is a zip archive, file(s) to be '
'opened within the zip. Wildcards * and ? are '
'supported. (default:*)')
parser.add_argument('-l', '--loglevel', dest="loglevel", action="store",
default=DEFAULT_LOG_LEVEL,
help='logging level debug/info/warning/error/critical '
'(default=%(default)s)')
parser.add_argument('input', nargs='*', type=existing_file, metavar='FILE',
help='Office files to parse (same as -i)')
# options for compatibility with ripOLE
parser.add_argument('-i', '--more-input', type=str, metavar='FILE',
help='Additional file to parse (same as positional '
'arguments)')
parser.add_argument('-v', '--verbose', action='store_true',
help='verbose mode, set logging to DEBUG '
'(overwrites -l)')
options = parser.parse_args(cmd_line_args)
if options.more_input:
options.input += [options.more_input, ]
if options.verbose:
options.loglevel = 'debug'
# Print help if no arguments are passed
if not options.input:
parser.print_help()
return RETURN_ERR_ARGS
# Setup logging to the console:
# here we use stdout instead of stderr by default, so that the output
# can be redirected properly.
logging.basicConfig(level=LOG_LEVELS[options.loglevel], stream=sys.stdout,
format='%(levelname)-8s %(message)s')
# enable logging in the modules:
log.setLevel(logging.NOTSET)
if options.loglevel == 'debug-olefile':
olefile.enable_logging()
# remember if there was a problem and continue with other data
any_err_stream = False
any_err_dumping = False
any_did_dump = False
for container, filename, data in \
xglob.iter_files(options.input, recursive=options.recursive,
zip_password=options.zip_password,
zip_fname=options.zip_fname):
# ignore directory names stored in zip files:
if container and filename.endswith('/'):
continue
err_stream, err_dumping, did_dump = \
process_file(filename, data, options.output_dir)
any_err_stream |= err_stream
any_err_dumping |= err_dumping
any_did_dump |= did_dump
# assemble return value
return_val = RETURN_NO_DUMP
if any_did_dump:
return_val += RETURN_DID_DUMP
if any_err_stream:
return_val += RETURN_ERR_STREAM
if any_err_dumping:
return_val += RETURN_ERR_DUMP
return return_val
if __name__ == '__main__':
sys.exit(main())
| 38.616119 | 101 | 0.595798 |
a94f0352cd02843bf926b926dd2be63d28db4695 | 1,211 | py | Python | framebuf/framebuf.py | MaxTurchin/pycopy-lib | d7a69fc2a28031e2ca475c29239f715c1809d8cc | [
"PSF-2.0"
] | 126 | 2019-07-19T14:42:41.000Z | 2022-03-21T22:22:19.000Z | framebuf/framebuf.py | MaxTurchin/pycopy-lib | d7a69fc2a28031e2ca475c29239f715c1809d8cc | [
"PSF-2.0"
] | 38 | 2019-08-28T01:46:31.000Z | 2022-03-17T05:46:51.000Z | framebuf/framebuf.py | MaxTurchin/pycopy-lib | d7a69fc2a28031e2ca475c29239f715c1809d8cc | [
"PSF-2.0"
] | 55 | 2019-08-02T09:32:33.000Z | 2021-12-22T11:25:51.000Z | # (c) 2018 Paul Sokolovsky. MIT license.
from usdl2 import *
RGB888 = 1
class FrameBuffer:
def __init__(self, buffer, width, height, format, stride=None):
if stride is None:
stride = width
self.win = SDL_CreateWindow(
"Pycopy FrameBuffer",
SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
width, height, 0)
self.renderer = SDL_CreateRenderer(self.win, -1, 0)
def _set_color(self, c):
SDL_SetRenderDrawColor(self.renderer, (c >> 16) & 0xff, (c >> 8) & 0xff, c & 0xff, 255);
def fill(self, c):
self._set_color(c)
SDL_RenderClear(self.renderer)
def pixel(self, x, y, c):
self._set_color(c)
SDL_RenderDrawPoint(self.renderer, x, y)
def line(self, x1, y1, x2, y2, c):
self._set_color(c)
SDL_RenderDrawLine(self.renderer, x1, y1, x2, y2)
def rect(self, x, y, w, h, c):
self._set_color(c)
SDL_RenderDrawRect(self.renderer, SDL_Rect(x, y, w, h))
def fill_rect(self, x, y, w, h, c):
self._set_color(c)
SDL_RenderFillRect(self.renderer, SDL_Rect(x, y, w, h))
def show(self):
SDL_RenderPresent(self.renderer)
| 27.522727 | 96 | 0.603633 |
79f4ce6e7a80503e20806a19c7e5886f1a8558c3 | 2,150 | py | Python | pysaint/constants.py | puroong/pysaint | 484cc4e819b61068e628ae70f96fc4e4a8213c27 | [
"MIT"
] | 33 | 2018-08-21T05:48:01.000Z | 2021-07-09T04:46:36.000Z | pysaint/constants.py | puroong/pysaint | 484cc4e819b61068e628ae70f96fc4e4a8213c27 | [
"MIT"
] | 9 | 2019-07-26T20:56:36.000Z | 2021-02-02T03:51:54.000Z | pysaint/constants.py | puroong/pysaint | 484cc4e819b61068e628ae70f96fc4e4a8213c27 | [
"MIT"
] | 10 | 2019-07-25T14:50:00.000Z | 2021-07-11T22:53:08.000Z | SOURCE_URL = "http://ecc.ssu.ac.kr/sap/bc/webdynpro/sap/zcmw2100?sap-language=KO"
ECC_URL = "http://ecc.ssu.ac.kr"
SAINT_URL = "https://saint.ssu.ac.kr"
PORTAL_URL = "https://saint.ssu.ac.kr/irj/portal"
POPUP_URL = "https://saint.ssu.ac.kr/ssu_logon/jsp/popupCheck.jsp"
REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'ko-KR,ko;q=0.9,en-US;q=0.8,en;q=0.7',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Content-Type': 'application/x-www-form-urlencoded',
'DNT': '1',
'Host': 'saint.ssu.ac.kr',
'Origin': 'https://saint.ssu.ac.kr',
'Referer': 'http://saint.ssu.ac.kr/irj/portal',
'Upgrade-Insecure-Requests': '1',
'User-Agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6)" +
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
}
SESSION_HEADERS = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'ko-KR,ko;q=0.9,en-US;q=0.8,en;q=0.7',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Content-Type': 'application/x-www-form-urlencoded',
'DNT': '1',
'Host': 'saint.ssu.ac.kr',
'Origin': 'https://saint.ssu.ac.kr',
'Referer': 'https://ecc.ssu.ac.kr/sap/bc/webdynpro/sap/zcmw2100?sap-language=KO',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'X-Requested-With': 'XMLHttpRequest',
'X-XHR-Logon': 'accept',
'User-Agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) " +
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.183 Safari/537.36",
}
class Line:
TEN = 10
TWENTY = 20
FIFTY = 50
HUNDRED = 100
TWO_HUNDRED = 200
FIVE_HUNDRED = 500
@classmethod
def list(cls):
members = [getattr(Line, attr) for attr in dir(Line) if not callable(getattr(Line, attr)) and not attr.startswith("__")]
return sorted(members)
@classmethod
def has_value(cls, val):
return val in cls.list() | 37.068966 | 128 | 0.618605 |
887276956a9e20429defc1119ff63b8b7d7ccec7 | 1,515 | py | Python | pythonrpc.py | fujimotomh/python-rpc | 7b59724986dc4751bb80c165604e368f079fa833 | [
"MIT"
] | 1 | 2017-06-08T11:03:34.000Z | 2017-06-08T11:03:34.000Z | pythonrpc.py | fujimotomh/python-rpc | 7b59724986dc4751bb80c165604e368f079fa833 | [
"MIT"
] | null | null | null | pythonrpc.py | fujimotomh/python-rpc | 7b59724986dc4751bb80c165604e368f079fa833 | [
"MIT"
] | null | null | null | from __future__ import print_function
import io
import time
import requests
import pickle
from six.moves.BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
class RPCServer(HTTPServer):
def __init__(self, func, local=False, port=8080, verbose=False):
self.func = func
self.verbose = verbose
if local:
ip = '127.0.0.1'
else:
ip = '0.0.0.0'
super(RPCServer, self).__init__((ip, port), RPCHandler)
class RPCHandler(BaseHTTPRequestHandler):
def do_POST(self):
start = time.clock()
self.send_response(200)
self.send_header('Content-type', 'application/octet-stream')
self.send_header('access-control-allow-origin', '*')
self.end_headers()
args, kwargs = pickle.load(self.rfile)
out = self.server.func(*args, **kwargs)
pickle.dump(out, self.wfile)
if self.server.verbose:
print("finished POST")
print("ms/call: {:.3f}".format((time.clock() - start) * 1000))
class RPCClient(object):
def __init__(self, addr):
self.addr = addr
def __call__(self, *args, **kwargs):
up = io.BytesIO()
pickle.dump((args, kwargs), up)
post = requests.post(self.addr, data=up.getvalue(), \
headers={'content-type': 'application/octet-stream',
'access-control-allow-origin': '*'})
down = io.BytesIO(post.content)
out = pickle.load(down)
return out
| 25.25 | 74 | 0.60132 |
ad4a10a8ad27e8ae24eeff6369762fea64a00c6e | 20,083 | py | Python | openstack_dashboard/dashboards/admin/networks/subnets/tests.py | kbujold/stx-horizon | a9454b7fe7ac74f76f0362b8c978673b89e5aa0c | [
"Apache-2.0"
] | 37 | 2018-10-30T02:47:24.000Z | 2021-12-04T10:29:40.000Z | openstack_dashboard/dashboards/admin/networks/subnets/tests.py | kbujold/stx-horizon | a9454b7fe7ac74f76f0362b8c978673b89e5aa0c | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/dashboards/admin/networks/subnets/tests.py | kbujold/stx-horizon | a9454b7fe7ac74f76f0362b8c978673b89e5aa0c | [
"Apache-2.0"
] | 35 | 2018-11-26T03:36:31.000Z | 2021-12-04T10:29:41.000Z | # Copyright 2012 NEC Corporation
# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IsA
from horizon.workflows import views
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks import tests
from openstack_dashboard.test import helpers as test
DETAIL_URL = 'horizon:admin:networks:subnets:detail'
NETWORKS_INDEX_URL = reverse('horizon:admin:networks:index')
NETWORKS_DETAIL_URL = 'horizon:admin:networks:detail'
class NetworkSubnetTests(test.BaseAdminViewTests):
@test.create_stubs({api.neutron: ('network_get',
'subnet_get',
'is_extension_supported')})
def test_subnet_detail(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.MultipleTimes().AndReturn(network)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.is_extension_supported(
IsA(http.HttpRequest),
'network-ip-availability').AndReturn(True)
self.mox.ReplayAll()
url = reverse(DETAIL_URL, args=[subnet.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
self.assertEqual(res.context['subnet'].id, subnet.id)
@test.create_stubs({api.neutron: ('subnet_get',)})
def test_subnet_detail_exception(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
url = reverse(DETAIL_URL, args=[subnet.id])
res = self.client.get(url)
redir_url = NETWORKS_INDEX_URL
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'subnetpool_list',)})
def test_subnet_create_get(self):
network = self.networks.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation')\
.AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest))\
.AndReturn(self.subnets)
self.mox.ReplayAll()
url = reverse('horizon:admin:networks:createsubnet',
args=[network.id])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'subnetpool_list',
'subnet_create',)})
def test_subnet_create_post(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.MultipleTimes().AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation')\
.MultipleTimes().AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest))\
.AndReturn(self.subnets)
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools,
tenant_id=subnet.tenant_id)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = tests.form_data_subnet(subnet)
url = reverse('horizon:admin:networks:createsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse(NETWORKS_DETAIL_URL, args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_network_exception(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = tests.form_data_subnet(subnet, allocation_pools=[])
url = reverse('horizon:admin:networks:createsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
# admin DetailView is shared with userpanel one, so
# redirection URL on error is userpanel index.
redir_url = reverse('horizon:project:networks:index')
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'subnetpool_list',
'subnet_create',)})
def test_subnet_create_post_subnet_exception(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.MultipleTimes().AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation')\
.AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest))\
.AndReturn(self.subnets)
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp,
tenant_id=subnet.tenant_id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = tests.form_data_subnet(subnet, allocation_pools=[])
url = reverse('horizon:admin:networks:createsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
redir_url = reverse(NETWORKS_DETAIL_URL, args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'subnetpool_list',)})
def test_subnet_create_post_cidr_inconsistent(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation')\
.AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest))\
.AndReturn(self.subnets)
self.mox.ReplayAll()
# dummy IPv6 address
cidr = '2001:0DB8:0:CD30:123:4567:89AB:CDEF/60'
form_data = tests.form_data_subnet(
subnet, cidr=cidr, allocation_pools=[])
url = reverse('horizon:admin:networks:createsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
expected_msg = 'Network Address and IP version are inconsistent.'
self.assertContains(res, expected_msg)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'subnetpool_list',)})
def test_subnet_create_post_gw_inconsistent(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation')\
.AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest))\
.AndReturn(self.subnets)
self.mox.ReplayAll()
# dummy IPv6 address
gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF'
form_data = tests.form_data_subnet(subnet, gateway_ip=gateway_ip,
allocation_pools=[])
url = reverse('horizon:admin:networks:createsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res, 'Gateway IP and IP version are inconsistent.')
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',
'is_extension_supported',
'subnetpool_list')})
def test_subnet_update_post(self):
subnet = self.subnets.first()
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation')\
.AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest))\
.AndReturn(self.subnetpools.list())
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
enable_dhcp=subnet.enable_dhcp,
dns_nameservers=[],
host_routes=[])\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = tests.form_data_subnet(subnet, allocation_pools=[])
url = reverse('horizon:admin:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse(NETWORKS_DETAIL_URL, args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',
'is_extension_supported',
'subnetpool_list')})
def test_subnet_update_post_gw_inconsistent(self):
subnet = self.subnets.first()
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation')\
.AndReturn(True)
api.neutron.subnetpool_list(IsA(http.HttpRequest))\
.AndReturn(self.subnetpools.list())
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# dummy IPv6 address
gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF'
form_data = tests.form_data_subnet(subnet, gateway_ip=gateway_ip,
allocation_pools=[])
url = reverse('horizon:admin:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res, 'Gateway IP and IP version are inconsistent.')
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'port_list',
'is_extension_supported',
'show_network_ip_availability',
'list_dhcp_agent_hosting_networks',)})
def test_subnet_delete(self):
self._test_subnet_delete()
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'port_list',
'is_extension_supported',
'show_network_ip_availability',
'list_dhcp_agent_hosting_networks',)})
def test_subnet_delete_with_mac_learning(self):
self._test_subnet_delete(mac_learning=True)
def _test_subnet_delete(self, mac_learning=False):
subnet = self.subnets.first()
network_id = subnet.network_id
ip_availability = self.ip_availability.get()
api.neutron.show_network_ip_availability(IsA(http.HttpRequest),
network_id). \
MultipleTimes().AndReturn(ip_availability)
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet.id)
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.neutron.is_extension_supported(
IsA(http.HttpRequest),
'network-ip-availability').AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'network-ip-availability')\
.MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
form_data = {'action': 'subnets__delete__%s' % subnet.id}
url = reverse(NETWORKS_DETAIL_URL, args=[network_id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'port_list',
'is_extension_supported',
'show_network_ip_availability',
'list_dhcp_agent_hosting_networks',)})
def test_subnet_delete_exception(self):
self._test_subnet_delete_exception()
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'port_list',
'is_extension_supported',
'show_network_ip_availability',
'list_dhcp_agent_hosting_networks',)})
def test_subnet_delete_exception_with_mac_learning(self):
self._test_subnet_delete_exception(mac_learning=True)
def _test_subnet_delete_exception(self, mac_learning=False):
subnet = self.subnets.first()
network_id = subnet.network_id
ip_availability = self.ip_availability.get()
api.neutron.show_network_ip_availability(IsA(http.HttpRequest),
network_id).\
MultipleTimes().AndReturn(ip_availability)
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet.id)\
.AndRaise(self.exceptions.neutron)
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.neutron.is_extension_supported(
IsA(http.HttpRequest),
'network-ip-availability').AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'network-ip-availability') \
.MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
form_data = {'action': 'subnets__delete__%s' % subnet.id}
url = reverse(NETWORKS_DETAIL_URL, args=[network_id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',
'show_network_ip_availability',
'list_dhcp_agent_hosting_networks',)})
def test_network_detail_ip_availability_exception(self):
self._test_network_detail_ip_availability_exception()
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',
'is_extension_supported',
'show_network_ip_availability',
'list_dhcp_agent_hosting_networks',)})
def test_network_detail_ip_availability_exception_with_mac_learning(self):
self._test_network_detail_ip_availability_exception(mac_learning=True)
def _test_network_detail_ip_availability_exception(self,
mac_learning=False):
network_id = self.networks.first().id
api.neutron.is_extension_supported(
IsA(http.HttpRequest),
'network-ip-availability').AndReturn(True)
api.neutron.show_network_ip_availability(IsA(http.HttpRequest),
network_id).\
MultipleTimes().AndRaise(self.exceptions.neutron)
api.neutron.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id).\
AndReturn([self.subnets.first()])
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning') \
.AndReturn(mac_learning)
api.neutron.is_extension_supported(
IsA(http.HttpRequest),
'network-ip-availability').AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'dhcp_agent_scheduler')\
.MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
from django.utils.http import urlunquote
url = urlunquote(reverse('horizon:admin:networks:subnets_tab',
args=[network_id]))
res = self.client.get(url)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
subnets = res.context['subnets_table'].data
self.assertItemsEqual(subnets, [self.subnets.first()])
| 45.956522 | 79 | 0.566001 |
7c6d31b297ba398435ddbad716a9d15745082f68 | 17,368 | py | Python | tests/python/unittest/test_hybrid_script.py | liwchang/tvm | 00f80d12bbaa7efd1a1e8e4fbee023bfd81be9f9 | [
"Apache-2.0"
] | null | null | null | tests/python/unittest/test_hybrid_script.py | liwchang/tvm | 00f80d12bbaa7efd1a1e8e4fbee023bfd81be9f9 | [
"Apache-2.0"
] | null | null | null | tests/python/unittest/test_hybrid_script.py | liwchang/tvm | 00f80d12bbaa7efd1a1e8e4fbee023bfd81be9f9 | [
"Apache-2.0"
] | null | null | null | import tvm, inspect, sys, traceback, numpy, nose, types
from tvm.hybrid import script
from tvm.hybrid.intrin import HYBRID_GLOBALS
@nose.tools.nottest
def run_and_check(func, args, var_dict={}, target='llvm'):
def tvm_val_2_py_val(val):
val = tvm.ir_pass.Substitute(val, var_dict)
val = tvm.ir_pass.Simplify(val)
assert isinstance(val, (tvm.expr.IntImm, tvm.expr.UIntImm))
return val.value
ctx = tvm.context(target, 0)
op = None
outs = func(*tuple(tvm.convert(i) if isinstance(i, list) else i for i in args))
op = outs[0].op if isinstance(outs, list) else outs.op
emu_args = []
nd_args = []
for i in args:
if isinstance(i, tvm.tensor.Tensor):
shape = [tvm_val_2_py_val(j) for j in i.shape]
emu_args.append(numpy.random.randn(*shape).astype(i.dtype))
nd_args.append(tvm.nd.array(emu_args[-1], ctx))
elif isinstance(i, tvm.expr.Var):
emu_args.append(tvm_val_2_py_val(i))
nd_args.append(emu_args[-1])
else:
assert isinstance(i, list)
emu_args.append(numpy.array(i))
sch = tvm.create_schedule(op)
module = tvm.build(sch,
[i for i in args if isinstance(i, (tvm.tensor.Tensor, tvm.expr.Var))] + \
(outs if isinstance(outs, list) else [outs]),
target=target)
assert module
out_tensors = []
for i in range(op.num_outputs):
output = op.output(i)
shape = [tvm_val_2_py_val(j) for j in output.shape]
nd_args.append(tvm.nd.array(numpy.zeros(shape).astype(output.dtype), ctx))
out_tensors.append(nd_args[-1])
ref_data = func(*emu_args)
if isinstance(ref_data, numpy.ndarray):
ref_data = [ref_data]
module(*nd_args)
for nd, np in zip(out_tensors, ref_data):
tvm.testing.assert_allclose(nd.asnumpy(), np, rtol=1e-5, atol=1e-5)
@script
def outer_product(n, m, a, b):
"""This is a simple outer product.
Actually this function is not required to be documented.
I write this docstring to test skipping docstring functionality.
"""
c = output_tensor((n, m), a.dtype)
for i in range(n):
for j in range(m):
c[i, j] = a[i] * b[j]
return c
#Test global function
#Test bridge between frontend and backend
def test_outer_product():
n = tvm.var('n')
m = tvm.var('m')
a = tvm.placeholder((n, ), name='a')
b = tvm.placeholder((m, ), name='b')
try:
c = outer_product(n, m, a, b)
ir = c.op.body
except IOError as err:
assert sys.version_info[0] == 2 and str(err) == 'could not get source code'
return
#Check for i in (0, n)
assert isinstance(ir, tvm.stmt.For)
assert ir.loop_var.name == 'i'
assert ir.min.value == 0
assert ir.extent.name == 'n'
ibody = ir.body
assert isinstance(ibody, tvm.stmt.For)
#Check for j in (0, m)
assert ibody.loop_var.name == 'j'
assert ibody.min.value == 0
assert ibody.extent.name == 'm'
#Check loop body
jbody = ibody.body
assert isinstance(jbody, tvm.stmt.Provide)
assert jbody.func.name == 'c'
assert len(jbody.args) == 2
assert jbody.args[0].name == 'i'
assert jbody.args[1].name == 'j'
assert isinstance(jbody.value, tvm.expr.Mul)
mul = jbody.value
assert isinstance(mul.a, tvm.expr.Call)
assert mul.a.name == 'a'
assert mul.b.name == 'b'
run_and_check(outer_product, [n, m, a, b], {n: 99, m: 101})
for key, _ in HYBRID_GLOBALS.items():
assert key not in globals().keys()
assert key not in outer_product.__globals__.keys()
#Test local function
#Test allocation of local variable
def test_fanout():
@script
def fanout(n, a):
three = 3.0
b = output_tensor((a.shape[0] - 3, ), a.dtype)
for i in range(a.shape[0] - 3):
sigma = 0.0
for j in range(3):
sigma += a[i + j]
sigma = sigma / three
b[i] = sigma
return b
n = tvm.var('n')
a = tvm.placeholder((n, ), 'float32', name='a')
try:
b = fanout(n, a)
ir = b.op.body
except IOError as err:
assert sys.version_info[0] == 2 and str(err) == 'could not get source code'
return
#Check for i in (0, n-3)
assert isinstance(ir, tvm.stmt.For)
assert ir.loop_var.name == 'i'
assert ir.min.value == 0
assert tvm.ir_pass.Equal(ir.extent, n - 3)
#Check loopbody
ibody = ir.body
assert isinstance(ibody, tvm.stmt.AttrStmt)
abody = ibody.body
assert isinstance(abody, tvm.stmt.Realize)
assert abody.bounds[0].min.value == 0
assert abody.bounds[0].extent.value == 1
assert abody.func.name == 'sigma'
#Check i loop body
rbody = abody.body
assert isinstance(rbody.first, tvm.stmt.Provide)
assert rbody.first.func.name == 'sigma'
assert len(rbody.first.args) == 1
assert rbody.first.args[0].value == 0
#Check fanout loop
jloop = rbody.rest.first
assert jloop.loop_var.name == 'j'
assert jloop.min.value == 0
assert jloop.extent.value == 3
jbody = jloop.body
assert isinstance(jbody, tvm.stmt.Provide)
assert len(jbody.args) == 1
assert jbody.args[0].value == 0
assert jbody.func.name == 'sigma'
assert isinstance(jbody.value, tvm.expr.Add)
value = jbody.value
assert isinstance(value.a, tvm.expr.Call)
assert value.a.name == 'sigma'
assert len(value.a.args) == 1
assert value.a.args[0].value == 0
assert value.b.name == 'a'
assert len(value.b.args) == 1
assert tvm.ir_pass.Equal(value.b.args[0], ir.loop_var + jloop.loop_var)
divide= rbody.rest.rest.first
assert isinstance(divide, tvm.stmt.Provide)
assert len(divide.args) == 1
assert divide.args[0].value == 0
value = divide.value
assert isinstance(value, tvm.expr.Mul)
assert value.a.name == 'sigma'
assert len(value.a.args) == 1
assert value.a.args[0].value == 0
assert abs(value.b.value - (1 / 3.0)) < 1e-5
write = rbody.rest.rest.rest
assert isinstance(write, tvm.stmt.Provide)
assert write.func.name == 'b'
assert write.value.name == 'sigma'
assert len(write.value.args) == 1
assert write.value.args[0].value == 0
run_and_check(fanout, [n, a], {n: 10})
def test_looptype():
@script
def looptype(a, b, c):
d = output_tensor((16, ), 'int32')
e = output_tensor((16, ), 'int32')
f = output_tensor((16, ), 'int32')
for i in parallel(16):
d[i] = a[i]
for j in vectorize(16):
e[j] = b[j]
for k in unroll(16):
f[k] = c[k]
return d, e, f
a = tvm.placeholder((16, ), name='a', dtype='int32')
b = tvm.placeholder((16, ), name='b', dtype='int32')
c = tvm.placeholder((16, ), name='c', dtype='int32')
try:
d, e, f = looptype(a, b, c)
ir = d.op.body
except:
return
iloop = ir.first
jloop = ir.rest.first
kloop = ir.rest.rest
assert iloop.for_type == tvm.stmt.For.Parallel
assert jloop.for_type == tvm.stmt.For.Vectorized
assert kloop.for_type == tvm.stmt.For.Unrolled
run_and_check(looptype, [a, b, c])
def test_if():
@script
def if_then_else(a):
b = output_tensor((10, ), 'int32')
c = output_tensor((10, ), 'int32')
for i in range(10):
if i % 2 == 0:
c[i] = a[i]
else:
c[i] = b[i]
for i in unroll(10):
b[i] = -1 if i % 2 == 0 else 1
return b, c
a = tvm.placeholder((10, ), dtype='int32', name='a')
run_and_check(if_then_else, [a])
@script
def if_triple_condition(a):
b = output_tensor((10, ), 'int32')
for i in range(10):
if 0 <= i < 5:
b[i] = a[i]
else:
b[i] = a[i] + 1
return b
run_and_check(if_triple_condition, [a])
@script
def if_and(a):
b = output_tensor((10, ), 'int32')
for i in range(10):
if i >= 0 and i < 5:
b[i] = a[i]
else:
b[i] = a[i] + 1
return b
run_and_check(if_and, [a])
def test_bind():
if not tvm.gpu(0).exist:
print('[Warning] No GPU found! Skip bind test!')
return
@script
def vec_add(a, b):
c = output_tensor((1000, ), 'float32')
for tx in bind('threadIdx.x', 1000):
c[tx] = a[tx] + b[tx]
return c
a = tvm.placeholder((1000, ), dtype='float32', name='a')
b = tvm.placeholder((1000, ), dtype='float32', name='b')
run_and_check(vec_add, [a, b], target='cuda')
def test_math_intrin():
@script
def intrin_real(a):
b = output_tensor((8, ), 'float32')
b[0] = sqrt(a[0])
b[1] = log(a[1])
b[2] = exp(a[2])
b[3] = sigmoid(a[3])
b[4] = power(a[4], a[5])
b[5] = tanh(a[5])
b[6] = min(a[4], a[5])
b[7] = max(a[5], a[6])
return b
a8 = tvm.placeholder((8, ), dtype='float32', name='a')
b8 = intrin_real(a8)
sch = tvm.create_schedule(b8.op)
func = tvm.build(sch, [a8, b8])
assert func
a = numpy.arange(2, 10).astype('float32')
tvm_a = tvm.ndarray.array(a)
tvm_b = tvm.ndarray.array(numpy.zeros((8, ), dtype='float32'))
b = intrin_real(a)
func(tvm_a, tvm_b)
tvm.testing.assert_allclose(b, tvm_b.asnumpy(), rtol=1e-5)
@script
def intrin_int(a):
b = output_tensor((1, ), 'int32')
b[0] = popcount(a[0])
return b
a1 = tvm.placeholder((1, ), dtype='int32')
b1 = intrin_int(a1)
sch = tvm.create_schedule(b1.op)
func = tvm.build(sch, [a1, b1])
assert func
a = numpy.array([114514]).astype('int32')
tvm_a = tvm.ndarray.array(a)
tvm_b = tvm.ndarray.array(numpy.array([0]).astype('int32'))
b = intrin_int(a)
func(tvm_a, tvm_b)
assert tvm_b.asnumpy()[0] == b[0]
# test non caconical loops
def test_non_zero():
@tvm.hybrid.script
def blur(a):
b = output_tensor((30, 30), 'float32')
for i in range(2, 32):
for j in range(2, 32):
s = 0.0
for di in range(3):
for dj in range(3):
s += a[i-di, j-dj]
b[i-2, j-2] = s / 9.0
return b
a = tvm.placeholder((32, 32), 'float32', 'a')
run_and_check(blur, [a])
@tvm.hybrid.script
def triangle(a, b):
c = output_tensor((10, 10), dtype='float32')
for i in range(10):
for j in range(i, 10):
c[i, j] = a[i] * b[j]
return c
a = tvm.placeholder((10, ), dtype='float32', name='a')
b = tvm.placeholder((10, ), dtype='float32', name='b')
run_and_check(triangle, [a, b])
def test_allocate():
@tvm.hybrid.script
def blur2d(a):
b = output_tensor((30, 30), 'float32')
for i in range(30):
ha = allocate((3, 30), 'float32')
for j in range(3):
for k in range(30):
ha[j, k] = a[i+j, k] + a[i+j, k+1] + a[i+j, k+2]
for j in range(30):
b[i, j] = (ha[0, j] + ha[1, j] + ha[2, j]) / 9.0
return b
a = tvm.placeholder((32, 32), 'float32', 'a')
run_and_check(blur2d, [a])
if tvm.gpu().exist:
@tvm.hybrid.script
def share_vec_add(a, b):
c = output_tensor((256, ), 'float32')
shared = allocate((256, ), 'float32', 'shared')
for i in bind("threadIdx.x", 256):
shared[i] = a[i]
local = allocate((256, ), 'float32', 'local')
for i in bind("threadIdx.x", 256):
local[i] = b[i]
for i in bind("threadIdx.x", 256):
c[i] = shared[i] + local[i]
return c
a = tvm.placeholder((256, ), dtype='float32', name='a')
b = tvm.placeholder((256, ), dtype='float32', name='b')
run_and_check(share_vec_add, [a, b], target='cuda')
else:
print('[Warning] No GPU found! Skip shared mem test!')
def test_upstream():
@tvm.hybrid.script
def upstream(a):
b = output_tensor((20, ), 'float32')
for i in range(20):
b[i] = a[i] * i
return b
a = tvm.placeholder((20, ), 'float32')
b = tvm.placeholder((20, ), 'float32')
c = tvm.compute((20, ), lambda x: a[x] + b[x])
d = upstream(c)
sch = tvm.create_schedule([c.op, d.op])
ir = tvm.lower(sch, [a, b, d], simple_mode=True)
func = tvm.build(sch, [a, b, d])
assert(func)
a = numpy.random.randn(20).astype('float32')
b = numpy.random.randn(20).astype('float32')
ref = numpy.zeros((20, ), 'float32')
for i in range(20):
ref[i] = (a[i] + b[i]) * i
tvm_a = tvm.nd.array(a)
tvm_b = tvm.nd.array(b)
tvm_d = tvm.nd.array(numpy.zeros((20, )).astype('float32'))
func(tvm_a, tvm_b, tvm_d)
tvm.testing.assert_allclose(tvm_d.asnumpy(), ref, 1e-5, 1e-5)
def test_downstream():
@tvm.hybrid.script
def downstream(a):
b = output_tensor((20, ), 'float32')
for i in range(20):
b[i] = a[i] * i
return b
a = tvm.placeholder((20, ), 'float32')
b = downstream(a)
c = tvm.compute((20, ), lambda x: b[x] + 1.0)
sch = tvm.create_schedule(c.op)
module = tvm.build(sch, [a, c])
assert module
a = numpy.random.randn(20).astype('float32')
ref = numpy.zeros((20, )).astype('float32')
for i in range(20):
ref[i] = (a[i] * i) + 1.0
tvm_a = tvm.nd.array(a)
tvm_c = tvm.nd.array(numpy.zeros((20, )).astype('float32'))
module(tvm_a, tvm_c)
tvm.testing.assert_allclose(tvm_c.asnumpy(), ref, 1e-5, 1e-5)
def test_const_param():
@tvm.hybrid.script
def add_something(a, b):
c = output_tensor((11, ), 'int32')
for i in range(11):
c[i] = a[i] + b
return c
a = tvm.placeholder((11, ), dtype='int32', name='a')
b = tvm.const(11, 'int32')
c = add_something(a, b)
sch = tvm.create_schedule(c.op)
module = tvm.build(sch, [a, c], 'llvm')
assert(module)
np_a = numpy.arange(11).astype('int32')
np_b = 11
np_c = numpy.zeros((11, )).astype('int32')
nd_a = tvm.ndarray.array(np_a)
nd_c = tvm.ndarray.array(numpy.zeros((11, )).astype('int32'))
module(nd_a, nd_c)
ref = add_something(np_a, 11)
tvm.testing.assert_allclose(nd_c.asnumpy(), ref, 1e-5, 1e-5)
def test_value_index():
@tvm.hybrid.script
def kernel_a(a):
b = output_tensor((16, ), 'int32')
c = output_tensor((4, 4), 'int32')
for i in range(16):
b[i] = a[i] + 2
c[i // 4, i % 4] = a[i] + 1
return b, c
@tvm.hybrid.script
def kernel_b(b, a):
c = output_tensor((4, 4), 'int32')
for i in range(4):
for j in range(4):
c[i, j] = a[i * 4 + j] * b[i, j]
return c
a = tvm.placeholder((16, ), 'int32')
b, c = kernel_a(a)
d = kernel_b(c, b)
sch = tvm.create_schedule(d.op)
module = tvm.build(sch, [a, d])
assert module
np_a = numpy.arange(16).astype('int32')
np_b, np_c = kernel_a(np_a)
ref = kernel_b(np_c, np_b)
res = tvm.ndarray.array(numpy.zeros((4, 4)).astype('int32'))
module(tvm.ndarray.array(np_a), res)
tvm.testing.assert_allclose(res.asnumpy(), ref)
def test_func_call():
@tvm.hybrid.script
def foo(a, b):
for i in range(len(a)):
a[i] = i + 1.0
for i in range(len(a)):
b[i] = i + 1.0
c = outer_product(10, 10, a, b)
d = output_tensor(c.shape, c.dtype)
for i in range(10):
for j in range(10):
d[i, j] = c[i, j] + i * j
return d
a = tvm.placeholder((10, ), name='a')
b = tvm.placeholder((10, ), name='b')
run_and_check(foo, [a, b])
def test_bool():
@tvm.hybrid.script
def foo(a):
b = output_tensor(a.shape, a.dtype)
b[0] = 1.2
for i in range(1, a.shape[0] - 1):
if a[i] * a[i - 1] < a[i] or a[i] * a[i - 1] < a[i - 1] or i * a[i] == a[i]:
b[i] = a[i]
else:
b[i] = 0.0
return b
a = tvm.placeholder((10, ), name='a')
run_and_check(foo, [a])
def test_const_range():
@tvm.hybrid.script
def foo(a, b):
c = output_tensor(a.shape, a.dtype)
d = output_tensor(a.shape, a.dtype)
for i in const_range(2):
for j in const_range(5):
c[i, j] = a[i, j] + b[i, j]
for i in const_range(len(b)):
for j in const_range(len(b[0])):
d[i, j] = a[i, j] + b[i, j]
return c, d
a = tvm.placeholder((2, 5), name='a', dtype='int32')
b = [[1, 2, 3, 4, 5], [5, 4, 3, 2, 1]]
run_and_check(foo, [a, b])
if __name__ == "__main__":
test_outer_product()
test_fanout()
test_looptype()
test_if()
test_bind()
test_math_intrin()
test_non_zero()
test_allocate()
test_upstream()
test_downstream()
test_const_param()
test_value_index()
test_func_call()
test_bool()
test_const_range()
# TODO:
# test_inplace()
| 29.739726 | 96 | 0.545947 |
c213c2497628c22e01d1462416d868581d5910e7 | 4,419 | py | Python | hpobench/container/benchmarks/nas/tabular_benchmarks.py | pfistfl/HPOBench | a7ad8807bd2e058ff99f703ad057b64ecadd4b66 | [
"Apache-2.0"
] | 78 | 2017-01-14T14:25:55.000Z | 2020-09-30T22:57:14.000Z | hpobench/container/benchmarks/nas/tabular_benchmarks.py | pfistfl/HPOBench | a7ad8807bd2e058ff99f703ad057b64ecadd4b66 | [
"Apache-2.0"
] | 84 | 2016-11-24T15:19:20.000Z | 2020-11-09T11:34:19.000Z | hpobench/container/benchmarks/nas/tabular_benchmarks.py | pfistfl/HPOBench | a7ad8807bd2e058ff99f703ad057b64ecadd4b66 | [
"Apache-2.0"
] | 31 | 2016-11-29T19:56:06.000Z | 2020-07-10T04:13:33.000Z | #!/usr/bin/python3
# -*- coding: utf-8 -*-
""" Benchmark for the Tabular Benchmark from hpobench/benchmarks/nas/tabular_benchmarks.py """
from hpobench.container.client_abstract_benchmark import AbstractBenchmarkClient
class SliceLocalizationBenchmark(AbstractBenchmarkClient):
def __init__(self, **kwargs):
kwargs['data_path'] = '/home/fcnet_tabular_benchmarks'
kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'SliceLocalizationBenchmark')
kwargs['container_name'] = kwargs.get('container_name', 'tabular_benchmarks')
kwargs['latest'] = kwargs.get('container_tag', '0.0.5')
super(SliceLocalizationBenchmark, self).__init__(**kwargs)
class ProteinStructureBenchmark(AbstractBenchmarkClient):
def __init__(self, **kwargs):
kwargs['data_path'] = '/home/fcnet_tabular_benchmarks'
kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'ProteinStructureBenchmark')
kwargs['container_name'] = kwargs.get('container_name', 'tabular_benchmarks')
kwargs['latest'] = kwargs.get('container_tag', '0.0.5')
super(ProteinStructureBenchmark, self).__init__(**kwargs)
class NavalPropulsionBenchmark(AbstractBenchmarkClient):
def __init__(self, **kwargs):
kwargs['data_path'] = '/home/fcnet_tabular_benchmarks'
kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'NavalPropulsionBenchmark')
kwargs['container_name'] = kwargs.get('container_name', 'tabular_benchmarks')
kwargs['latest'] = kwargs.get('container_tag', '0.0.5')
super(NavalPropulsionBenchmark, self).__init__(**kwargs)
class ParkinsonsTelemonitoringBenchmark(AbstractBenchmarkClient):
def __init__(self, **kwargs):
kwargs['data_path'] = '/home/fcnet_tabular_benchmarks'
kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'ParkinsonsTelemonitoringBenchmark')
kwargs['container_name'] = kwargs.get('container_name', 'tabular_benchmarks')
kwargs['latest'] = kwargs.get('container_tag', '0.0.5')
super(ParkinsonsTelemonitoringBenchmark, self).__init__(**kwargs)
class SliceLocalizationBenchmarkOriginal(AbstractBenchmarkClient):
def __init__(self, **kwargs):
kwargs['data_path'] = '/home/fcnet_tabular_benchmarks'
kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'SliceLocalizationBenchmarkOriginal')
kwargs['container_name'] = kwargs.get('container_name', 'tabular_benchmarks')
kwargs['latest'] = kwargs.get('container_tag', '0.0.5')
super(SliceLocalizationBenchmarkOriginal, self).__init__(**kwargs)
class ProteinStructureBenchmarkOriginal(AbstractBenchmarkClient):
def __init__(self, **kwargs):
kwargs['data_path'] = '/home/fcnet_tabular_benchmarks'
kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'ProteinStructureBenchmarkOriginal')
kwargs['container_name'] = kwargs.get('container_name', 'tabular_benchmarks')
kwargs['latest'] = kwargs.get('container_tag', '0.0.5')
super(ProteinStructureBenchmarkOriginal, self).__init__(**kwargs)
class NavalPropulsionBenchmarkOriginal(AbstractBenchmarkClient):
def __init__(self, **kwargs):
kwargs['data_path'] = '/home/fcnet_tabular_benchmarks'
kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'NavalPropulsionBenchmarkOriginal')
kwargs['container_name'] = kwargs.get('container_name', 'tabular_benchmarks')
kwargs['latest'] = kwargs.get('container_tag', '0.0.5')
super(NavalPropulsionBenchmarkOriginal, self).__init__(**kwargs)
class ParkinsonsTelemonitoringBenchmarkOriginal(AbstractBenchmarkClient):
def __init__(self, **kwargs):
kwargs['data_path'] = '/home/fcnet_tabular_benchmarks'
kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'ParkinsonsTelemonitoringBenchmarkOriginal')
kwargs['container_name'] = kwargs.get('container_name', 'tabular_benchmarks')
kwargs['latest'] = kwargs.get('container_tag', '0.0.5')
super(ParkinsonsTelemonitoringBenchmarkOriginal, self).__init__(**kwargs)
__all__ = ["SliceLocalizationBenchmark", "SliceLocalizationBenchmarkOriginal",
"ProteinStructureBenchmark", "ProteinStructureBenchmarkOriginal",
"NavalPropulsionBenchmark", "NavalPropulsionBenchmarkOriginal",
"ParkinsonsTelemonitoringBenchmark", "ParkinsonsTelemonitoringBenchmarkOriginal"]
| 51.988235 | 108 | 0.728898 |
7f5652e0f111dfec2b7dfd1863d1975415ccc374 | 2,524 | py | Python | nnvm/examples/benchmark/cuda_imagenet_bench.py | CynthiaProtector/helo | ad9e22363a92389b3fa519ecae9061c6ead28b05 | [
"Apache-2.0"
] | 9 | 2019-04-19T04:45:18.000Z | 2021-01-07T06:31:15.000Z | src/nnvm/examples/benchmark/cuda_imagenet_bench.py | tashby/turicreate | 7f07ce795833d0c56c72b3a1fb9339bed6d178d1 | [
"BSD-3-Clause"
] | 3 | 2021-09-08T02:18:00.000Z | 2022-03-12T00:39:44.000Z | src/nnvm/examples/benchmark/cuda_imagenet_bench.py | tashby/turicreate | 7f07ce795833d0c56c72b3a1fb9339bed6d178d1 | [
"BSD-3-Clause"
] | 5 | 2019-09-18T20:21:23.000Z | 2020-11-22T11:18:15.000Z | """ Benchmark script for performance on GPUs. For example, run the file with: `python cuda_imagenet_bench.py --model='mobilenet'`. For more details about how to set up the inference environment on GPUs, please refer to NNVM Tutorial: ImageNet Inference on the GPU """
import time
import argparse
import numpy as np
import tvm
import nnvm.compiler
import nnvm.testing
from tvm.contrib import util, nvcc
from tvm.contrib import graph_runtime as runtime
@tvm.register_func
def tvm_callback_cuda_compile(code):
ptx = nvcc.compile_cuda(code, target="ptx")
return ptx
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, required=True, choices=['resnet', 'mobilenet'],
help="The model type.")
parser.add_argument('--opt-level', type=int, default=1, help="Level of optimization.")
parser.add_argument('--num-iter', type=int, default=1000, help="Number of iteration during benchmark.")
args = parser.parse_args()
opt_level = args.opt_level
num_iter = args.num_iter
target = "cuda"
ctx = tvm.gpu(0)
batch_size = 1
num_classes = 1000
image_shape = (3, 224, 224)
data_shape = (batch_size,) + image_shape
out_shape = (batch_size, num_classes)
if args.model == 'resnet':
net, params = nnvm.testing.resnet.get_workload(
batch_size=1, image_shape=image_shape)
elif args.model == 'mobilenet':
net, params = nnvm.testing.mobilenet.get_workload(
batch_size=1, image_shape=image_shape)
else:
raise ValueError('no benchmark prepared for {}.'.format(args.model))
with nnvm.compiler.build_config(opt_level=opt_level):
with tvm.build_config(auto_unroll_max_step=32,
auto_unroll_min_depth=0,
unroll_explicit=False):
graph, lib, params = nnvm.compiler.build(
net, target, shape={"data": data_shape}, params=params)
data = np.random.uniform(-1, 1, size=data_shape).astype("float32")
module = runtime.create(graph, lib, ctx)
module.set_input(**params)
module.set_input("data", data)
module.run()
out = module.get_output(0, tvm.nd.empty(out_shape))
out.asnumpy()
print('benchmark args: {}'.format(args))
ftimer = module.module.time_evaluator("run", ctx, num_iter)
for i in range(3):
prof_res = ftimer()
print(prof_res)
# sleep for avoiding cpu overheat
time.sleep(45)
if __name__ == '__main__':
main()
| 37.671642 | 267 | 0.66878 |
4ff4671ef7ff6d402f4c0dcf834c666e0c0d16da | 7,742 | py | Python | isso/ext/notifications.py | matolivier/isso | 784e08170ab8694ef610564305e6382a482f78b0 | [
"MIT"
] | null | null | null | isso/ext/notifications.py | matolivier/isso | 784e08170ab8694ef610564305e6382a482f78b0 | [
"MIT"
] | null | null | null | isso/ext/notifications.py | matolivier/isso | 784e08170ab8694ef610564305e6382a482f78b0 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import sys
import io
import time
import json
import socket
import smtplib
from email.utils import formatdate
from email.header import Header
from email.mime.text import MIMEText
try:
from urllib.parse import quote
except ImportError:
from urllib import quote
import logging
logger = logging.getLogger("isso")
try:
import uwsgi
except ImportError:
uwsgi = None
from isso.compat import PY2K
from isso import local
if PY2K:
from thread import start_new_thread
else:
from _thread import start_new_thread
class SMTPConnection(object):
def __init__(self, conf):
self.conf = conf
def __enter__(self):
klass = (smtplib.SMTP_SSL if self.conf.get(
'security') == 'ssl' else smtplib.SMTP)
self.client = klass(host=self.conf.get('host'),
port=self.conf.getint('port'),
timeout=self.conf.getint('timeout'))
if self.conf.get('security') == 'starttls':
if sys.version_info >= (3, 4):
import ssl
self.client.starttls(context=ssl.create_default_context())
else:
self.client.starttls()
username = self.conf.get('username')
password = self.conf.get('password')
if username and password:
if PY2K:
username = username.encode('ascii')
password = password.encode('ascii')
self.client.login(username, password)
return self.client
def __exit__(self, exc_type, exc_value, traceback):
self.client.quit()
class SMTP(object):
def __init__(self, isso):
self.isso = isso
self.conf = isso.conf.section("smtp")
self.public_endpoint = isso.conf.get("server", "public-endpoint") or local("host")
self.admin_notify = any((n in ("smtp", "SMTP")) for n in isso.conf.getlist("general", "notify"))
self.reply_notify = isso.conf.getboolean("general", "reply-notifications")
# test SMTP connectivity
try:
with SMTPConnection(self.conf):
logger.info("connected to SMTP server")
except (socket.error, smtplib.SMTPException):
logger.exception("unable to connect to SMTP server")
if uwsgi:
def spooler(args):
try:
self._sendmail(args[b"subject"].decode("utf-8"),
args["body"].decode("utf-8"),
args[b"to"].decode("utf-8"))
except smtplib.SMTPConnectError:
return uwsgi.SPOOL_RETRY
else:
return uwsgi.SPOOL_OK
uwsgi.spooler = spooler
def __iter__(self):
yield "comments.new:after-save", self.notify_new
yield "comments.activate", self.notify_activated
def format(self, thread, comment, parent_comment, recipient=None, admin=False):
rv = io.StringIO()
author = comment["author"] or "Anonymous"
if comment["email"]:
author += " <%s>" % comment["email"]
rv.write(author + " wrote:\n")
rv.write("\n")
rv.write(comment["text"] + "\n")
rv.write("\n")
if admin:
if comment["website"]:
rv.write("User's URL: %s\n" % comment["website"])
rv.write("IP address: %s\n" % comment["remote_addr"])
rv.write("Link to comment: %s\n" %
(local("origin") + thread["uri"] + "#isso-%i" % comment["id"]))
rv.write("\n")
rv.write("---\n")
if admin:
uri = self.public_endpoint + "/id/%i" % comment["id"]
key = self.isso.sign(comment["id"])
rv.write("Delete comment: %s\n" % (uri + "/delete/" + key))
if comment["mode"] == 2:
rv.write("Activate comment: %s\n" % (uri + "/activate/" + key))
else:
uri = self.public_endpoint + "/id/%i" % parent_comment["id"]
key = self.isso.sign(('unsubscribe', recipient))
rv.write("Unsubscribe from this conversation: %s\n" % (uri + "/unsubscribe/" + quote(recipient) + "/" + key))
rv.seek(0)
return rv.read()
def notify_new(self, thread, comment):
if self.admin_notify:
body = self.format(thread, comment, None, admin=True)
self.sendmail(thread["title"], body, thread, comment)
if comment["mode"] == 1:
self.notify_users(thread, comment)
def notify_activated(self, thread, comment):
self.notify_users(thread, comment)
def notify_users(self, thread, comment):
if self.reply_notify and "parent" in comment and comment["parent"] is not None:
# Notify interested authors that a new comment is posted
notified = []
parent_comment = self.isso.db.comments.get(comment["parent"])
comments_to_notify = [parent_comment] if parent_comment is not None else []
comments_to_notify += self.isso.db.comments.fetch(thread["uri"], mode=1, parent=comment["parent"])
for comment_to_notify in comments_to_notify:
email = comment_to_notify["email"]
if "email" in comment_to_notify and comment_to_notify["notification"] and email not in notified \
and comment_to_notify["id"] != comment["id"] and email != comment["email"]:
body = self.format(thread, comment, parent_comment, email, admin=False)
subject = "Re: New comment posted on %s" % thread["title"]
self.sendmail(subject, body, thread, comment, to=email)
notified.append(email)
def sendmail(self, subject, body, thread, comment, to=None):
to = to or self.conf.get("to")
if uwsgi:
uwsgi.spool({b"subject": subject.encode("utf-8"),
b"body": body.encode("utf-8"),
b"to": to.encode("utf-8")})
else:
start_new_thread(self._retry, (subject, body, to))
def _sendmail(self, subject, body, to_addr):
from_addr = self.conf.get("from")
msg = MIMEText(body, 'plain', 'utf-8')
msg['From'] = from_addr
msg['To'] = to_addr
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = Header(subject, 'utf-8')
with SMTPConnection(self.conf) as con:
con.sendmail(from_addr, to_addr, msg.as_string())
def _retry(self, subject, body, to):
for x in range(5):
try:
self._sendmail(subject, body, to)
except smtplib.SMTPConnectError:
time.sleep(60)
else:
break
class Stdout(object):
def __init__(self, conf):
pass
def __iter__(self):
yield "comments.new:new-thread", self._new_thread
yield "comments.new:finish", self._new_comment
yield "comments.edit", self._edit_comment
yield "comments.delete", self._delete_comment
yield "comments.activate", self._activate_comment
def _new_thread(self, thread):
logger.info("new thread %(id)s: %(title)s" % thread)
def _new_comment(self, thread, comment):
logger.info("comment created: %s", json.dumps(comment))
def _edit_comment(self, comment):
logger.info('comment %i edited: %s',
comment["id"], json.dumps(comment))
def _delete_comment(self, id):
logger.info('comment %i deleted', id)
def _activate_comment(self, thread, comment):
logger.info("comment %(id)s activated" % thread)
| 32.805085 | 121 | 0.574787 |
0de39e43b1d3159c7f32b3f2717c2b3b35bd27de | 1,005 | py | Python | startup/12-xbpms.py | mrakitin/profile_collection-smi | 1eea45a3b886b2c0daeec715ce94f27da24d0ba3 | [
"BSD-3-Clause"
] | null | null | null | startup/12-xbpms.py | mrakitin/profile_collection-smi | 1eea45a3b886b2c0daeec715ce94f27da24d0ba3 | [
"BSD-3-Clause"
] | null | null | null | startup/12-xbpms.py | mrakitin/profile_collection-smi | 1eea45a3b886b2c0daeec715ce94f27da24d0ba3 | [
"BSD-3-Clause"
] | null | null | null | print(f'Loading {__file__}')
from ophyd import EpicsMotor, EpicsSignalRO, EpicsSignal, Device, Component as Cpt, PseudoPositioner
# xbpm2 positioner shortcut for scans 5 april 2017
# Note: this class isn't really different from APER ifle only motor axes
# are here. But I can imagine inputting numerical values pertaining
# to each diamond, or special in/out or quad1/quad2 commands that would
# be specific to the xbpms.
# need to verify the PVs, only xbpm2 was checked:
class XBPM(Device):
x = Cpt(EpicsMotor, 'X}Mtr')
y = Cpt(EpicsMotor, 'Y}Mtr')
xbpm1 = XBPM('XF:12IDA-BI:2{XBPM:1-Ax:', name='xbpm1')
xbpm2 = XBPM('XF:12IDA-BI:2{XBPM:2-Ax:', name='xbpm2')
xbpm3 = XBPM('XF:12IDB1-BI:2{XBPM:3-Ax:', name='xbpm3')
# need work to input elecetrometer current PVs - what headers needed?
xbpm3y = EpicsSignal('XF:12IDB-BI:2{EM:BPM3}PosY:MeanValue_RBV', name='xbpm3y')
#Prototype new electrometer, currently looking at XBPM2.
#ch1,2,3,4 = pads 2,3,5,4 respectively; thick active area
| 29.558824 | 100 | 0.728358 |
e8804c193abead81f086a2968812e96faf1cd938 | 3,555 | py | Python | venv/Lib/site-packages/pyrogram/raw/types/page_block_embed_post.py | D1ne2021/jjhhhjj | a090da30983b3ef276dfe4cef2ded4526f36002a | [
"MIT"
] | 2 | 2021-12-13T07:09:55.000Z | 2022-01-12T12:15:20.000Z | venv/Lib/site-packages/pyrogram/raw/types/page_block_embed_post.py | hoangkiet1906/Botcie_ver1 | c133b915edde06dac690a7dc6ca160f6792fc4c8 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pyrogram/raw/types/page_block_embed_post.py | hoangkiet1906/Botcie_ver1 | c133b915edde06dac690a7dc6ca160f6792fc4c8 | [
"MIT"
] | null | null | null | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class PageBlockEmbedPost(TLObject): # type: ignore
"""This object is a constructor of the base type :obj:`~pyrogram.raw.base.PageBlock`.
Details:
- Layer: ``126``
- ID: ``0xf259a80b``
Parameters:
url: ``str``
webpage_id: ``int`` ``64-bit``
author_photo_id: ``int`` ``64-bit``
author: ``str``
date: ``int`` ``32-bit``
blocks: List of :obj:`PageBlock <pyrogram.raw.base.PageBlock>`
caption: :obj:`PageCaption <pyrogram.raw.base.PageCaption>`
"""
__slots__: List[str] = ["url", "webpage_id", "author_photo_id", "author", "date", "blocks", "caption"]
ID = 0xf259a80b
QUALNAME = "types.PageBlockEmbedPost"
def __init__(self, *, url: str, webpage_id: int, author_photo_id: int, author: str, date: int, blocks: List["raw.base.PageBlock"], caption: "raw.base.PageCaption") -> None:
self.url = url # string
self.webpage_id = webpage_id # long
self.author_photo_id = author_photo_id # long
self.author = author # string
self.date = date # int
self.blocks = blocks # Vector<PageBlock>
self.caption = caption # PageCaption
@staticmethod
def read(data: BytesIO, *args: Any) -> "PageBlockEmbedPost":
# No flags
url = String.read(data)
webpage_id = Long.read(data)
author_photo_id = Long.read(data)
author = String.read(data)
date = Int.read(data)
blocks = TLObject.read(data)
caption = TLObject.read(data)
return PageBlockEmbedPost(url=url, webpage_id=webpage_id, author_photo_id=author_photo_id, author=author, date=date, blocks=blocks, caption=caption)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
# No flags
data.write(String(self.url))
data.write(Long(self.webpage_id))
data.write(Long(self.author_photo_id))
data.write(String(self.author))
data.write(Int(self.date))
data.write(Vector(self.blocks))
data.write(self.caption.write())
return data.getvalue()
| 33.857143 | 176 | 0.607032 |
63db13c56ba78d2fd4e8bb3a47237113853905ea | 872 | py | Python | samples/regression/models/L0RF1/train_model.py | jimthompson5802/model-stacking-framework | 486461340c41072627a8c6c69ef6902297d2ada7 | [
"MIT"
] | 3 | 2018-06-10T23:45:52.000Z | 2019-08-04T08:08:05.000Z | samples/regression/models/L0RF1/train_model.py | jimthompson5802/model-stacking-framework | 486461340c41072627a8c6c69ef6902297d2ada7 | [
"MIT"
] | 25 | 2018-06-09T11:08:00.000Z | 2019-02-06T22:47:26.000Z | samples/regression/models/L0RF1/train_model.py | jimthompson5802/model-stacking-framework | 486461340c41072627a8c6c69ef6902297d2ada7 | [
"MIT"
] | 1 | 2018-06-10T23:45:53.000Z | 2018-06-10T23:45:53.000Z | # -*- coding: utf-8 -*-
#%%
from msw.model_stacking import ModelTrainer, ModelPerformanceTracker
from sklearn.ensemble import RandomForestRegressor as ThisModel
#%%
#
# Set up model for training
#
this_model = ModelTrainer(
ModelClass=ThisModel, #Model algorithm
model_params=dict(n_estimators=20,n_jobs=-1), #hyper-parameters
model_id='L0RF1', # Model Identifier
feature_set='KFSBSLN' # feature set to use
)
model_tracker = ModelPerformanceTracker(model_trainer=this_model)
#%%
#
# clear out old results
#
this_model.cleanPriorResults()
#%%
#
# train model on all the data
#
this_model.trainModel()
#%%
# create Test predictions
this_model.createTestPredictions()
#%%
#
# create Kaggle submission
#
this_model.createKaggleSubmission()
#%%
#
# record model performance metrics
#
model_tracker.recordModelPerformance()
#%% | 18.956522 | 71 | 0.727064 |
090cee2fdc20a20b06651e2935aa08456e8e3ded | 55 | py | Python | readability/__init__.py | anekos/python-readability | ede4d015abd1829cc723ad59bf2db3c487fda66f | [
"Apache-2.0"
] | null | null | null | readability/__init__.py | anekos/python-readability | ede4d015abd1829cc723ad59bf2db3c487fda66f | [
"Apache-2.0"
] | null | null | null | readability/__init__.py | anekos/python-readability | ede4d015abd1829cc723ad59bf2db3c487fda66f | [
"Apache-2.0"
] | null | null | null | __version__ = "0.8"
from .readability import Document
| 13.75 | 33 | 0.763636 |
c0b68474f685bd3330fb107544b2f5c8e78d13cd | 399 | py | Python | GUI Features/Mouse/MouseCallBack.py | hardy8059/OpenCV_Examples | 8cdba1a72374b4cb9f8aa293a4b88edc7d1f341d | [
"MIT"
] | null | null | null | GUI Features/Mouse/MouseCallBack.py | hardy8059/OpenCV_Examples | 8cdba1a72374b4cb9f8aa293a4b88edc7d1f341d | [
"MIT"
] | null | null | null | GUI Features/Mouse/MouseCallBack.py | hardy8059/OpenCV_Examples | 8cdba1a72374b4cb9f8aa293a4b88edc7d1f341d | [
"MIT"
] | null | null | null | import cv2
import numpy as np
def draw_circle(event,x,y,flag,param):
if event == cv2.EVENT_LBUTTONDBLCLK:
cv2.circle(img,(x,y),100,(255,0,0),-1)
img = np.zeros((512,512,3),np.uint8)
cv2.namedWindow('image')
cv2.setMouseCallback('image',draw_circle)
while(1):
cv2.imshow('image',img)
if cv2.waitKey(20) & 0xFF == 27:
break
cv2.destroyAllWindows()
| 22.166667 | 47 | 0.631579 |
129487ee6494ade899fa812ba522f50e0e84706a | 783 | py | Python | calcpayment.py | Alfareiza/calc-payment | c0ad005795a1841da82ae1c8092c65233510fd99 | [
"MIT"
] | null | null | null | calcpayment.py | Alfareiza/calc-payment | c0ad005795a1841da82ae1c8092c65233510fd99 | [
"MIT"
] | null | null | null | calcpayment.py | Alfareiza/calc-payment | c0ad005795a1841da82ae1c8092c65233510fd99 | [
"MIT"
] | null | null | null | import os
import pathlib
from src.facade import EXPLANATION
from src.tools import main
FOLDER = pathlib.Path(__file__).parent.resolve()
def ask():
txt_files = [file for file in os.listdir(FOLDER) if file.endswith(".txt") and not file.startswith('requirements')]
if not txt_files:
print(f'Please, place a txt file on the folder {FOLDER}\\ and then execute the program again.\n')
else:
start = input('Should I start to calculate the information (S/N): ')
if start.upper() == 'S':
main(txt_files[0])
elif start.upper() == 'N':
print('Thanks by using our software')
else:
print('Invalid Information, press S or N')
ask()
if __name__ == '__main__':
print(EXPLANATION)
ask()
| 27.964286 | 118 | 0.624521 |
03fba1c732f75be3ee10f321b26a15b5180d72a7 | 970 | py | Python | fixture/application.py | anna-inboxx/python_training | 0a2c1a393894eb3d999d77a0ae1f9366ecb88185 | [
"Apache-2.0"
] | null | null | null | fixture/application.py | anna-inboxx/python_training | 0a2c1a393894eb3d999d77a0ae1f9366ecb88185 | [
"Apache-2.0"
] | null | null | null | fixture/application.py | anna-inboxx/python_training | 0a2c1a393894eb3d999d77a0ae1f9366ecb88185 | [
"Apache-2.0"
] | null | null | null | from selenium import webdriver
from fixture.session import SessionHelper
from fixture.group import GroupHelper
from fixture.contact import ContactHelper
class Application:
def __init__(self, browser, base_url):
if browser == "firefox":
self.wd = webdriver.Firefox()
elif browser == "chrome":
self.wd = webdriver.Chrome()
elif browser == "ie":
self.wd = webdriver.Ie()
else:
raise ValueError("Unrecognized browser %s" % browser)
self.wd.implicitly_wait(3)
self.session = SessionHelper(self)
self.group = GroupHelper(self)
self.contact = ContactHelper(self)
self.base_url = base_url
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def open_home_page(self):
wd = self.wd
wd.get(self.base_url)
def destroy(self):
self.wd.quit()
| 27.714286 | 65 | 0.604124 |
743fc4e262defa340d527119fec34fe047d43321 | 6,672 | py | Python | homeassistant/components/media_player/russound_rio.py | EmitKiwi/home-assistant | 0999e2ddc476f4bddf710005168b082f03a7cdc0 | [
"Apache-2.0"
] | 4 | 2019-05-14T20:33:43.000Z | 2021-09-25T14:56:08.000Z | homeassistant/components/media_player/russound_rio.py | EmitKiwi/home-assistant | 0999e2ddc476f4bddf710005168b082f03a7cdc0 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/media_player/russound_rio.py | EmitKiwi/home-assistant | 0999e2ddc476f4bddf710005168b082f03a7cdc0 | [
"Apache-2.0"
] | null | null | null | """
Support for Russound multizone controllers using RIO Protocol.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.russound_rio/
"""
import asyncio
import logging
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.components.media_player import (
SUPPORT_TURN_ON, SUPPORT_TURN_OFF, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET,
SUPPORT_SELECT_SOURCE, MediaPlayerDevice, PLATFORM_SCHEMA,
MEDIA_TYPE_MUSIC)
from homeassistant.const import (
CONF_HOST, CONF_PORT, STATE_OFF, STATE_ON,
CONF_NAME, EVENT_HOMEASSISTANT_STOP)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['russound_rio==0.1.3']
_LOGGER = logging.getLogger(__name__)
SUPPORT_RUSSOUND = SUPPORT_VOLUME_MUTE | SUPPORT_VOLUME_SET | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_PORT, default=9621): cv.port,
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the Russound RIO platform."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
from russound_rio import Russound
russ = Russound(hass.loop, host, port)
yield from russ.connect()
# Discover sources
sources = yield from russ.enumerate_sources()
# Discover zones
valid_zones = yield from russ.enumerate_zones()
devices = []
for zone_id, name in valid_zones:
yield from russ.watch_zone(zone_id)
dev = RussoundZoneDevice(russ, zone_id, name, sources)
devices.append(dev)
@callback
def on_stop(event):
"""Shutdown cleanly when hass stops."""
hass.loop.create_task(russ.close())
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, on_stop)
async_add_devices(devices)
class RussoundZoneDevice(MediaPlayerDevice):
"""Representation of a Russound Zone."""
def __init__(self, russ, zone_id, name, sources):
"""Initialize the zone device."""
super().__init__()
self._name = name
self._russ = russ
self._zone_id = zone_id
self._sources = sources
def _zone_var(self, name, default=None):
return self._russ.get_cached_zone_variable(self._zone_id,
name,
default)
def _source_var(self, name, default=None):
current = int(self._zone_var('currentsource', 0))
if current:
return self._russ.get_cached_source_variable(
current, name, default)
return default
def _source_na_var(self, name):
"""Will replace invalid values with None."""
current = int(self._zone_var('currentsource', 0))
if current:
value = self._russ.get_cached_source_variable(
current, name, None)
if value in (None, "", "------"):
return None
return value
else:
return None
def _zone_callback_handler(self, zone_id, *args):
if zone_id == self._zone_id:
self.schedule_update_ha_state()
def _source_callback_handler(self, source_id, *args):
current = int(self._zone_var('currentsource', 0))
if source_id == current:
self.schedule_update_ha_state()
@asyncio.coroutine
def async_added_to_hass(self):
"""Register callback handlers."""
self._russ.add_zone_callback(self._zone_callback_handler)
self._russ.add_source_callback(self._source_callback_handler)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the zone."""
return self._zone_var('name', self._name)
@property
def state(self):
"""Return the state of the device."""
status = self._zone_var('status', "OFF")
if status == 'ON':
return STATE_ON
elif status == 'OFF':
return STATE_OFF
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_RUSSOUND
@property
def source(self):
"""Get the currently selected source."""
return self._source_na_var('name')
@property
def source_list(self):
"""Return a list of available input sources."""
return [x[1] for x in self._sources]
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_title(self):
"""Title of current playing media."""
return self._source_na_var('songname')
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return self._source_na_var('artistname')
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return self._source_na_var('albumname')
@property
def media_image_url(self):
"""Image url of current playing media."""
return self._source_na_var('coverarturl')
@property
def volume_level(self):
"""Volume level of the media player (0..1).
Value is returned based on a range (0..50).
Therefore float divide by 50 to get to the required range.
"""
return float(self._zone_var('volume', 0)) / 50.0
def async_turn_off(self):
"""Turn off the zone."""
return self._russ.send_zone_event(self._zone_id,
"ZoneOff")
def async_turn_on(self):
"""Turn on the zone."""
return self._russ.send_zone_event(self._zone_id,
"ZoneOn")
def async_set_volume_level(self, volume):
"""Set the volume level."""
rvol = int(volume * 50.0)
return self._russ.send_zone_event(self._zone_id,
"KeyPress",
"Volume",
rvol)
def async_select_source(self, source):
"""Select the source input for this zone."""
for source_id, name in self._sources:
if name.lower() != source.lower():
continue
return self._russ.send_zone_event(
self._zone_id, "SelectSource", source_id)
| 31.17757 | 79 | 0.623951 |
2204d449944993f79beecd881dc403a5862b8285 | 1,918 | py | Python | day7/bags.py | PatrickKalkman/Advent-of-Code-2020 | 74e6dad69eb82f16c58db7a589720cf6d21d1a8f | [
"MIT"
] | null | null | null | day7/bags.py | PatrickKalkman/Advent-of-Code-2020 | 74e6dad69eb82f16c58db7a589720cf6d21d1a8f | [
"MIT"
] | null | null | null | day7/bags.py | PatrickKalkman/Advent-of-Code-2020 | 74e6dad69eb82f16c58db7a589720cf6d21d1a8f | [
"MIT"
] | null | null | null |
def construct_rules(rules_list):
rules = []
for rule_line in rules_list:
parts = rule_line.split("bags contain")
color_key = parts[0].strip()
contents = parts[1].split(",")
internal_colors = []
for content_part in contents:
content_part = content_part.strip().replace(
"bags", "").replace("bag", "").replace(".", "").strip()
if content_part == "no other":
color = ""
amount = 1
else:
amount = int(content_part[0:1])
color = content_part[1:].strip()
internal_colors.append((color, amount))
rules.append((color_key, internal_colors))
return rules
color_counted = []
def construct_rules_tree(rules, sep, start_color):
number_of_bags = 0
for top_color, internal_color in rules:
for color, amount in internal_color:
if color == start_color:
if top_color not in color_counted:
color_counted.append(top_color)
# print(top_color + sep + color)
number_of_bags += 1
number_of_bags += construct_rules_tree(
rules, sep + " -> ", top_color)
return number_of_bags
color_count = {}
def count_bags_contained(rules, start_color):
for top_color, internal_color in rules:
if top_color == start_color:
if len(internal_color) > 1:
for color, amount in internal_color:
print(f"{top_color}->{amount}*{color}")
count_bags_contained(rules, color)
with open('input.txt', 'r') as reader:
rules = reader.readlines()
rules_parsed = construct_rules(rules)
amount = construct_rules_tree(rules_parsed, " -> ", "shiny gold")
print(amount)
result = count_bags_contained(rules_parsed, "shiny gold")
print(result)
| 29.96875 | 71 | 0.578206 |
ae18cb38b9ef54e9bc09b4b2f4fbaa46556af681 | 462 | py | Python | setup.py | xavierhardy/harbinger | 66455ee77280c4779f3a57ffa42f6f728812197f | [
"Apache-2.0"
] | 1 | 2018-07-24T20:09:27.000Z | 2018-07-24T20:09:27.000Z | setup.py | xavierhardy/harbinger | 66455ee77280c4779f3a57ffa42f6f728812197f | [
"Apache-2.0"
] | 13 | 2018-07-27T22:55:54.000Z | 2022-03-29T21:54:19.000Z | setup.py | xavierhardy/harbinger | 66455ee77280c4779f3a57ffa42f6f728812197f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from distutils.core import setup
setup(
name="harbinger",
version="0.0.1",
description="Abstract remote shell connection through several libraries, "
"designed for performance",
author="Xavier Hardy",
url="https://github.com/xavierhardy/harbinger",
packages=[],
extras_require={
"docs": ["sphinx"],
"paramiko": ["paramiko==2.4.1"],
"ssh2": ["ssh2-python==0.15.0.post9"],
},
)
| 22 | 78 | 0.614719 |
1d528b838c024d430c37e4c77e5b4605c8e095e9 | 3,209 | py | Python | appEngine-DataStore/labs/fortune-teller/solution/main.py | silpillasil/DANK_CSSI_STUFFS | e1aa1d2b86cfe69a359d3084f0ed899102f7fe6c | [
"Apache-2.0"
] | null | null | null | appEngine-DataStore/labs/fortune-teller/solution/main.py | silpillasil/DANK_CSSI_STUFFS | e1aa1d2b86cfe69a359d3084f0ed899102f7fe6c | [
"Apache-2.0"
] | null | null | null | appEngine-DataStore/labs/fortune-teller/solution/main.py | silpillasil/DANK_CSSI_STUFFS | e1aa1d2b86cfe69a359d3084f0ed899102f7fe6c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import webapp2
import os
import jinja2
import random
from models import Movie
def get_fortune():
fortune_list=['Tomorrow, you will meet a life-changing new friend.',
'Fame and Instagram followers are headed your way.',
'On the Tuesday after next, an odd meeting will lead to a new opportunity.',
'Despite dry skies, bring an umbrella tomorrow.',
'A thrilling time is in your immediate future.',
'Someone has Googled you recently.',
'Stay alert. You will be part of a rescue mission.',
'You will beat Watson in a game of Jeopardy. Start studying though']
return(random.choice(fortune_list))
#remember, you can get this by searching for jinja2 google app engine
jinja_current_directory = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class FortuneHandler(webapp2.RequestHandler):
def get(self):
start_template=jinja_current_directory.get_template("templates/fortune_welcome.html")
self.response.write(start_template.render())
def post(self):
random_fortune = get_fortune()
astro_sign = self.request.get('user_astrological_sign')
my_dict={'the_fortune':random_fortune, 'the_astro_sign':astro_sign}
end_template=jinja_current_directory.get_template("templates/fortune_results.html")
#astro_sign = request.form.get('user_astrological_sign')
self.response.write(end_template.render(my_dict))
class DataDemoHandler(webapp2.RequestHandler):
def get(self):
return
def post(self):
title = self.request.get('movie_title')
runtime = int(self.request.get('movie_runtime'))
rating = float(self.request.get('movie_rating'))
my_movie = Movie(title=title, runtime_mins=runtime, rating=rating)
app = webapp2.WSGIApplication([
('/', FortuneHandler)
], debug=True)
| 38.202381 | 94 | 0.712371 |
eb29e30de11c78ea1955b7c988986ec3b04b31b3 | 16,687 | py | Python | gevent/queue.py | patricklx/gevent | 4bfd06c708e7f408a086398babe8edeb9e74b094 | [
"MIT"
] | null | null | null | gevent/queue.py | patricklx/gevent | 4bfd06c708e7f408a086398babe8edeb9e74b094 | [
"MIT"
] | null | null | null | gevent/queue.py | patricklx/gevent | 4bfd06c708e7f408a086398babe8edeb9e74b094 | [
"MIT"
] | null | null | null | # Copyright (c) 2009-2012 Denis Bilenko. See LICENSE for details.
"""Synchronized queues.
The :mod:`gevent.queue` module implements multi-producer, multi-consumer queues
that work across greenlets, with the API similar to the classes found in the
standard :mod:`Queue` and :class:`multiprocessing <multiprocessing.Queue>` modules.
Changed in version 1.0: Queue(0) now means queue of infinite size, not a channel.
The classes in this module implement iterator protocol. Iterating over queue
means repeatedly calling :meth:`get <Queue.get>` until :meth:`get <Queue.get>` returns ``StopIteration``.
>>> queue = gevent.queue.Queue()
>>> queue.put(1)
>>> queue.put(2)
>>> queue.put(StopIteration)
>>> for item in queue:
... print(item)
1
2
"""
from __future__ import absolute_import
import sys
import heapq
import collections
if sys.version_info[0] == 2:
import Queue as __queue__
else:
import queue as __queue__
Full = __queue__.Full
Empty = __queue__.Empty
from gevent.timeout import Timeout
from gevent.hub import get_hub, Waiter, getcurrent, PY3
__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'JoinableQueue', 'Channel']
class Queue(object):
"""Create a queue object with a given maximum size.
If *maxsize* is less than or equal to zero or ``None``, the queue size is infinite.
"""
def __init__(self, maxsize=None, items=None):
if maxsize is not None and maxsize <= 0:
self.maxsize = None
if maxsize == 0:
import warnings
warnings.warn('Queue(0) now equivalent to Queue(None); if you want a channel, use Channel',
DeprecationWarning, stacklevel=2)
else:
self.maxsize = maxsize
self.getters = set()
self.putters = set()
self.hub = get_hub()
self._event_unlock = None
if items:
self._init(maxsize, items)
else:
self._init(maxsize)
# QQQ make maxsize into a property with setter that schedules unlock if necessary
def copy(self):
return type(self)(self.maxsize, self.queue)
def _init(self, maxsize, items=None):
if items:
self.queue = collections.deque(items)
else:
self.queue = collections.deque()
def _get(self):
return self.queue.popleft()
def _peek(self):
return self.queue[0]
def _put(self, item):
self.queue.append(item)
def __repr__(self):
return '<%s at %s%s>' % (type(self).__name__, hex(id(self)), self._format())
def __str__(self):
return '<%s%s>' % (type(self).__name__, self._format())
def _format(self):
result = []
if self.maxsize is not None:
result.append('maxsize=%r' % (self.maxsize, ))
if getattr(self, 'queue', None):
result.append('queue=%r' % (self.queue, ))
if self.getters:
result.append('getters[%s]' % len(self.getters))
if self.putters:
result.append('putters[%s]' % len(self.putters))
if result:
return ' ' + ' '.join(result)
else:
return ''
def qsize(self):
"""Return the size of the queue."""
return len(self.queue)
def empty(self):
"""Return ``True`` if the queue is empty, ``False`` otherwise."""
return not self.qsize()
def full(self):
"""Return ``True`` if the queue is full, ``False`` otherwise.
``Queue(None)`` is never full.
"""
return self.maxsize is not None and self.qsize() >= self.maxsize
def put(self, item, block=True, timeout=None):
"""Put an item into the queue.
If optional arg *block* is true and *timeout* is ``None`` (the default),
block if necessary until a free slot is available. If *timeout* is
a positive number, it blocks at most *timeout* seconds and raises
the :class:`Full` exception if no free slot was available within that time.
Otherwise (*block* is false), put an item on the queue if a free slot
is immediately available, else raise the :class:`Full` exception (*timeout*
is ignored in that case).
"""
if self.maxsize is None or self.qsize() < self.maxsize:
# there's a free slot, put an item right away
self._put(item)
if self.getters:
self._schedule_unlock()
elif self.hub is getcurrent():
# We're in the mainloop, so we cannot wait; we can switch to other greenlets though.
# Check if possible to get a free slot in the queue.
while self.getters and self.qsize() and self.qsize() >= self.maxsize:
getter = self.getters.pop()
getter.switch(getter)
if self.qsize() < self.maxsize:
self._put(item)
return
raise Full
elif block:
waiter = ItemWaiter(item, self)
self.putters.add(waiter)
timeout = Timeout.start_new(timeout, Full)
try:
if self.getters:
self._schedule_unlock()
result = waiter.get()
assert result is waiter, "Invalid switch into Queue.put: %r" % (result, )
finally:
timeout.cancel()
self.putters.discard(waiter)
else:
raise Full
def put_nowait(self, item):
"""Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the :class:`Full` exception.
"""
self.put(item, False)
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args *block* is true and *timeout* is ``None`` (the default),
block if necessary until an item is available. If *timeout* is a positive number,
it blocks at most *timeout* seconds and raises the :class:`Empty` exception
if no item was available within that time. Otherwise (*block* is false), return
an item if one is immediately available, else raise the :class:`Empty` exception
(*timeout* is ignored in that case).
"""
if self.qsize():
if self.putters:
self._schedule_unlock()
return self._get()
elif self.hub is getcurrent():
# special case to make get_nowait() runnable in the mainloop greenlet
# there are no items in the queue; try to fix the situation by unlocking putters
while self.putters:
self.putters.pop().put_and_switch()
if self.qsize():
return self._get()
raise Empty
elif block:
waiter = Waiter()
timeout = Timeout.start_new(timeout, Empty)
try:
self.getters.add(waiter)
if self.putters:
self._schedule_unlock()
result = waiter.get()
assert result is waiter, 'Invalid switch into Queue.get: %r' % (result, )
return self._get()
finally:
self.getters.discard(waiter)
timeout.cancel()
else:
raise Empty
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the :class:`Empty` exception.
"""
return self.get(False)
def peek(self, block=True, timeout=None):
"""Return an item from the queue without removing it.
If optional args *block* is true and *timeout* is ``None`` (the default),
block if necessary until an item is available. If *timeout* is a positive number,
it blocks at most *timeout* seconds and raises the :class:`Empty` exception
if no item was available within that time. Otherwise (*block* is false), return
an item if one is immediately available, else raise the :class:`Empty` exception
(*timeout* is ignored in that case).
"""
if self.qsize():
return self._peek()
elif self.hub is getcurrent():
# special case to make peek(False) runnable in the mainloop greenlet
# there are no items in the queue; try to fix the situation by unlocking putters
while self.putters:
self.putters.pop().put_and_switch()
if self.qsize():
return self._peek()
raise Empty
elif block:
waiter = Waiter()
timeout = Timeout.start_new(timeout, Empty)
try:
self.getters.add(waiter)
if self.putters:
self._schedule_unlock()
result = waiter.get()
assert result is waiter, 'Invalid switch into Queue.peek: %r' % (result, )
return self._peek()
finally:
self.getters.discard(waiter)
timeout.cancel()
else:
raise Empty
def peek_nowait(self):
return self.peek(False)
def _unlock(self):
while True:
repeat = False
if self.putters and (self.maxsize is None or self.qsize() < self.maxsize):
repeat = True
try:
putter = self.putters.pop()
self._put(putter.item)
except:
putter.throw(*sys.exc_info())
else:
putter.switch(putter)
if self.getters and self.qsize():
repeat = True
getter = self.getters.pop()
getter.switch(getter)
if not repeat:
return
def _schedule_unlock(self):
if not self._event_unlock:
self._event_unlock = self.hub.loop.run_callback(self._unlock)
def __iter__(self):
return self
def next(self):
result = self.get()
if result is StopIteration:
raise result
return result
if PY3:
__next__ = next
del next
class ItemWaiter(Waiter):
__slots__ = ['item', 'queue']
def __init__(self, item, queue):
Waiter.__init__(self)
self.item = item
self.queue = queue
def put_and_switch(self):
self.queue._put(self.item)
self.queue = None
self.item = None
return self.switch(self)
class PriorityQueue(Queue):
'''A subclass of :class:`Queue` that retrieves entries in priority order (lowest first).
Entries are typically tuples of the form: ``(priority number, data)``.
'''
def _init(self, maxsize, items=None):
if items:
self.queue = list(items)
else:
self.queue = []
def _put(self, item, heappush=heapq.heappush):
heappush(self.queue, item)
def _get(self, heappop=heapq.heappop):
return heappop(self.queue)
class LifoQueue(Queue):
'''A subclass of :class:`Queue` that retrieves most recently added entries first.'''
def _init(self, maxsize, items=None):
if items:
self.queue = list(items)
else:
self.queue = []
def _put(self, item):
self.queue.append(item)
def _get(self):
return self.queue.pop()
class JoinableQueue(Queue):
'''A subclass of :class:`Queue` that additionally has :meth:`task_done` and :meth:`join` methods.'''
def __init__(self, maxsize=None, items=None, unfinished_tasks=None):
from gevent.event import Event
Queue.__init__(self, maxsize, items)
self.unfinished_tasks = unfinished_tasks or 0
self._cond = Event()
self._cond.set()
def copy(self):
return type(self)(self.maxsize, self.queue, self.unfinished_tasks)
def _format(self):
result = Queue._format(self)
if self.unfinished_tasks:
result += ' tasks=%s _cond=%s' % (self.unfinished_tasks, self._cond)
return result
def _put(self, item):
Queue._put(self, item)
self.unfinished_tasks += 1
self._cond.clear()
def task_done(self):
'''Indicate that a formerly enqueued task is complete. Used by queue consumer threads.
For each :meth:`get <Queue.get>` used to fetch a task, a subsequent call to :meth:`task_done` tells the queue
that the processing on the task is complete.
If a :meth:`join` is currently blocking, it will resume when all items have been processed
(meaning that a :meth:`task_done` call was received for every item that had been
:meth:`put <Queue.put>` into the queue).
Raises a :exc:`ValueError` if called more times than there were items placed in the queue.
'''
if self.unfinished_tasks <= 0:
raise ValueError('task_done() called too many times')
self.unfinished_tasks -= 1
if self.unfinished_tasks == 0:
self._cond.set()
def join(self):
'''Block until all items in the queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the queue.
The count goes down whenever a consumer thread calls :meth:`task_done` to indicate
that the item was retrieved and all work on it is complete. When the count of
unfinished tasks drops to zero, :meth:`join` unblocks.
'''
self._cond.wait()
class Channel(object):
def __init__(self):
self.getters = collections.deque()
self.putters = collections.deque()
self.hub = get_hub()
self._event_unlock = None
def __repr__(self):
return '<%s at %s %s>' % (type(self).__name__, hex(id(self)), self._format())
def __str__(self):
return '<%s %s>' % (type(self).__name__, self._format())
def _format(self):
result = ''
if self.getters:
result += ' getters[%s]' % len(self.getters)
if self.putters:
result += ' putters[%s]' % len(self.putters)
return result
@property
def balance(self):
return len(self.putters) - len(self.getters)
def qsize(self):
return 0
def empty(self):
return True
def full(self):
return True
def put(self, item, block=True, timeout=None):
if self.hub is getcurrent():
if self.getters:
getter = self.getters.popleft()
getter.switch(item)
return
raise Full
if not block:
timeout = 0
waiter = Waiter()
item = (item, waiter)
self.putters.append(item)
timeout = Timeout.start_new(timeout, Full)
try:
if self.getters:
self._schedule_unlock()
result = waiter.get()
assert result is waiter, "Invalid switch into Channel.put: %r" % (result, )
except:
self._discard(item)
raise
finally:
timeout.cancel()
def _discard(self, item):
try:
self.putters.remove(item)
except ValueError:
pass
def put_nowait(self, item):
self.put(item, False)
def get(self, block=True, timeout=None):
if self.hub is getcurrent():
if self.putters:
item, putter = self.putters.popleft()
self.hub.loop.run_callback(putter.switch, putter)
return item
if not block:
timeout = 0
waiter = Waiter()
timeout = Timeout.start_new(timeout, Empty)
try:
self.getters.append(waiter)
if self.putters:
self._schedule_unlock()
return waiter.get()
except:
self.getters.remove(waiter)
raise
finally:
timeout.cancel()
def get_nowait(self):
return self.get(False)
def _unlock(self):
while self.putters and self.getters:
getter = self.getters.popleft()
item, putter = self.putters.popleft()
getter.switch(item)
putter.switch(putter)
def _schedule_unlock(self):
if not self._event_unlock:
self._event_unlock = self.hub.loop.run_callback(self._unlock)
def __iter__(self):
return self
def next(self):
result = self.get()
if result is StopIteration:
raise result
return result
| 32.719608 | 117 | 0.577515 |
84f0d9b3238e80cb7f217330856ae199823eea36 | 4,755 | py | Python | django/engagementmanager/service/checklist_audit_log_service.py | onap/vvp-engagementmgr | 8d2108708e7c55cc753b956563c535177f92d0d9 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | django/engagementmanager/service/checklist_audit_log_service.py | onap/vvp-engagementmgr | 8d2108708e7c55cc753b956563c535177f92d0d9 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | django/engagementmanager/service/checklist_audit_log_service.py | onap/vvp-engagementmgr | 8d2108708e7c55cc753b956563c535177f92d0d9 | [
"Apache-2.0",
"CC-BY-4.0"
] | 1 | 2021-10-19T15:17:09.000Z | 2021-10-19T15:17:09.000Z | #
# ============LICENSE_START==========================================
# org.onap.vvp/engagementmgr
# ===================================================================
# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
# ===================================================================
#
# Unless otherwise specified, all software contained herein is licensed
# under the Apache License, Version 2.0 (the “License”);
# you may not use this software except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
# Unless otherwise specified, all documentation contained herein is licensed
# under the Creative Commons License, Attribution 4.0 Intl. (the “License”);
# you may not use this documentation except in compliance with the License.
# You may obtain a copy of the License at
#
# https://creativecommons.org/licenses/by/4.0/
#
# Unless required by applicable law or agreed to in writing, documentation
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ============LICENSE_END============================================
#
# ECOMP is a trademark and service mark of AT&T Intellectual Property.
import json
from engagementmanager.models import ChecklistDecision,\
ChecklistAuditLog, Checklist
from engagementmanager.serializers import ThinChecklistAuditLogModelSerializer
from engagementmanager.service.logging_service import LoggingServiceFactory
logger = LoggingServiceFactory.get_logger()
def addAuditLogToDecision(decision, description, user, category=''):
"""
expected: decisionUuid(string), description(string),\
user(object), category is optional(string)
result: new auditlog object would be create and \
attached to a decision object.
"""
audit = ChecklistAuditLog.objects.create(decision=decision,
description=description,
category=category, creator=user)
auditData = ThinChecklistAuditLogModelSerializer(audit).data
return auditData
def getAuditLogsWithDecision(decisionUuid, user):
"""
expected: decisionUuid(string), user(object)
result: all audit logs objects that attached to a decision \
would be returned in a json.
"""
data = dict()
if decisionUuid == '' or not user:
msg = "checklistUuid or user == None"
logger.error(msg)
msg = "AuditLogs were not retrieved due to bad parameters"
raise KeyError(msg)
decision = ChecklistDecision.objects.get(uuid=decisionUuid)
audits = ChecklistAuditLog.objects.filter(decision=decision)
data['audits'] = ThinChecklistAuditLogModelSerializer(
audits, many=True).data
auditsData = json.dumps(data, ensure_ascii=False)
return auditsData
def addAuditLogToChecklist(checklist, description, user, category=''):
"""
expected: checklistUuid(string), description(string), user(object), \
category is optional(string)
result: new auditlog object would be create and attached \
to a checklist object.
"""
audit = ChecklistAuditLog.objects.create(checklist=checklist,
description=description,
category=category, creator=user)
auditData = ThinChecklistAuditLogModelSerializer(audit).data
logger.debug("audit log was successfully updated")
return auditData
def getAuditLogsWithChecklist(checklistUuid, user):
"""
expected: checklistUuid(string), user(object)
result: all audit logs objects that attached to a checklist \
would be returned in a json.
"""
data = dict()
if checklistUuid == '' or not user: # @UndefinedVariable
msg = "checklistUuid or user == None"
logger.error(msg)
msg = "AuditLogs were not retrieved due to bad parameters"
raise KeyError(msg)
checklist = Checklist.objects.get(uuid=checklistUuid)
audits = ChecklistAuditLog.objects.filter(checklist=checklist)
data['audits'] = ThinChecklistAuditLogModelSerializer(
audits, many=True).data
auditsData = json.dumps(data, ensure_ascii=False)
return auditsData
| 40.641026 | 78 | 0.674238 |
c47b3e37ecf172fa15b89215170930cbad17de6c | 1,063 | py | Python | src/python/WMCore/MicroService/Unified/Injector.py | ramonbrugman/WMCore | 7faea49e870a1a08459509c7fc681b0ce09cb2b9 | [
"Apache-2.0"
] | null | null | null | src/python/WMCore/MicroService/Unified/Injector.py | ramonbrugman/WMCore | 7faea49e870a1a08459509c7fc681b0ce09cb2b9 | [
"Apache-2.0"
] | null | null | null | src/python/WMCore/MicroService/Unified/Injector.py | ramonbrugman/WMCore | 7faea49e870a1a08459509c7fc681b0ce09cb2b9 | [
"Apache-2.0"
] | null | null | null | """
File : UnifiedInjectorManager.py
Author : Valentin Kuznetsov <vkuznet AT gmail dot com>
Description: UnifiedInjectorManager class provides full functionality of the UnifiedInjector service.
"""
# futures
from __future__ import division
# system modules
from builtins import object
import time
# Unified modules
from Utils.ProcessStats import processStatus, threadStack
class UnifiedInjectorManager(object):
"""
Initialize UnifiedInjectorManager class
"""
def __init__(self, config=None):
self.config = config
self.time0 = time.time()
self.state = None
self.ncalls = 0
def status(self):
"Return current status about UnifiedInjector"
sdict = {'server': processStatus()}
sdict['server'].update({'uptime': time.time()-self.time0,\
'ncalls': self.ncalls, 'state': self.state})
sdict.update(threadStack())
return sdict
def request(self, **kwargs):
"Process request given to UnifiedInjector"
return {'state': self.state}
| 27.25641 | 101 | 0.672625 |
4317febdeb4c88700a1a498533207847d8eae8b3 | 37,805 | py | Python | plotly/offline/offline.py | malmaud/plotly.py | bcf4a2a0ca01b2568ccc521fc0cd6fd78c2edb0f | [
"MIT"
] | 12 | 2020-04-18T18:10:22.000Z | 2021-12-06T10:11:15.000Z | plotly/offline/offline.py | Abd-Elrazek/plotly.py | 568f577cbee3ecca3fd7b6493b7f0c9901acfb65 | [
"MIT"
] | 27 | 2020-04-28T21:23:12.000Z | 2021-06-25T15:36:38.000Z | plotly/offline/offline.py | Abd-Elrazek/plotly.py | 568f577cbee3ecca3fd7b6493b7f0c9901acfb65 | [
"MIT"
] | 6 | 2020-04-18T23:07:08.000Z | 2021-11-18T07:53:06.000Z | """ Plotly Offline
A module to use Plotly's graphing library with Python
without connecting to a public or private plotly enterprise
server.
"""
from __future__ import absolute_import
import os
import uuid
import warnings
import pkgutil
import time
import webbrowser
import six
from requests.compat import json as _json
import plotly
from plotly import optional_imports, tools, utils
from plotly.exceptions import PlotlyError
from ._plotlyjs_version import __plotlyjs_version__
ipython = optional_imports.get_module('IPython')
ipython_display = optional_imports.get_module('IPython.display')
matplotlib = optional_imports.get_module('matplotlib')
__PLOTLY_OFFLINE_INITIALIZED = False
__IMAGE_FORMATS = ['jpeg', 'png', 'webp', 'svg']
def download_plotlyjs(download_url):
warnings.warn('''
`download_plotlyjs` is deprecated and will be removed in the
next release. plotly.js is shipped with this module, it is no
longer necessary to download this bundle separately.
''', DeprecationWarning)
pass
def get_plotlyjs_version():
"""
Returns the version of plotly.js that is bundled with plotly.py.
Returns
-------
str
Plotly.js version string
"""
return __plotlyjs_version__
def get_plotlyjs():
"""
Return the contents of the minified plotly.js library as a string.
This may be useful when building standalone HTML reports.
Returns
-------
str
Contents of the minified plotly.js library as a string
Examples
--------
Here is an example of creating a standalone HTML report that contains
two plotly figures, each in their own div. The include_plotlyjs argument
is set to False when creating the divs so that we don't include multiple
copies of the plotly.js library in the output. Instead, a single copy
of plotly.js is included in a script tag in the html head element.
>>> import plotly.graph_objs as go
>>> from plotly.offline import plot, get_plotlyjs
>>> fig1 = go.Figure(data=[{'type': 'bar', 'y': [1, 3, 2]}],
... layout={'height': 400})
>>> fig2 = go.Figure(data=[{'type': 'scatter', 'y': [1, 3, 2]}],
... layout={'height': 400})
>>> div1 = plot(fig1, output_type='div', include_plotlyjs=False)
>>> div2 = plot(fig2, output_type='div', include_plotlyjs=False)
>>> html = '''
... <html>
... <head>
... <script type="text/javascript">{plotlyjs}</script>
... </head>
... <body>
... {div1}
... {div2}
... </body>
... </html>
...'''.format(plotlyjs=get_plotlyjs(), div1=div1, div2=div2)
>>> with open('multi_plot.html', 'w') as f:
... f.write(html)
"""
path = os.path.join('package_data', 'plotly.min.js')
plotlyjs = pkgutil.get_data('plotly', path).decode('utf-8')
return plotlyjs
def _build_resize_script(plotdivid, plotly_root='Plotly'):
resize_script = (
'<script type="text/javascript">'
'window.addEventListener("resize", function(){{'
'{plotly_root}.Plots.resize(document.getElementById("{id}"));}});'
'</script>'
).format(plotly_root=plotly_root, id=plotdivid)
return resize_script
def _build_mathjax_script(url):
return ('<script src="{url}?config=TeX-AMS-MML_SVG"></script>'
.format(url=url))
def _get_jconfig(config):
configkeys = (
'staticPlot',
'plotlyServerURL',
'editable',
'edits',
'autosizable',
'responsive',
'queueLength',
'fillFrame',
'frameMargins',
'scrollZoom',
'doubleClick',
'showTips',
'showAxisDragHandles',
'showAxisRangeEntryBoxes',
'showLink',
'sendData',
'showSendToCloud',
'linkText',
'showSources',
'displayModeBar',
'modeBarButtonsToRemove',
'modeBarButtonsToAdd',
'modeBarButtons',
'toImageButtonOptions',
'displaylogo',
'watermark',
'plotGlPixelRatio',
'setBackground',
'topojsonURL',
'mapboxAccessToken',
'logging',
'globalTransforms',
'locale',
'locales',
)
if config and isinstance(config, dict):
# Warn user on unrecognized config options. We make this a warning
# rather than an error since we don't have code generation logic in
# place yet to guarantee that the config options in plotly.py are up
# to date
bad_config = [k for k in config if k not in configkeys]
if bad_config:
warnings.warn("""
Unrecognized config options supplied: {bad_config}"""
.format(bad_config=bad_config))
clean_config = config
else:
config = plotly.plotly.get_config()
clean_config = dict((k, config[k]) for k in configkeys if k in config)
# TODO: The get_config 'source of truth' should
# really be somewhere other than plotly.plotly
plotly_platform_url = plotly.plotly.get_config().get('plotly_domain',
'https://plot.ly')
clean_config['plotlyServerURL'] = plotly_platform_url
if (plotly_platform_url != 'https://plot.ly' and
clean_config.get('linkText', None) == 'Export to plot.ly'):
link_domain = plotly_platform_url\
.replace('https://', '')\
.replace('http://', '')
link_text = clean_config['linkText'].replace('plot.ly', link_domain)
clean_config['linkText'] = link_text
return clean_config
# Build script to set global PlotlyConfig object. This must execute before
# plotly.js is loaded.
_window_plotly_config = """\
<script type="text/javascript">\
window.PlotlyConfig = {MathJaxConfig: 'local'};\
</script>"""
_mathjax_config = """\
<script type="text/javascript">\
if (window.MathJax) {MathJax.Hub.Config({SVG: {font: "STIX-Web"}});}\
</script>"""
def get_image_download_script(caller):
"""
This function will return a script that will download an image of a Plotly
plot.
Keyword Arguments:
caller ('plot', 'iplot') -- specifies which function made the call for the
download script. If `iplot`, then an extra condition is added into the
download script to ensure that download prompts aren't initiated on
page reloads.
"""
if caller == 'iplot':
check_start = 'if(document.readyState == \'complete\') {{'
check_end = '}}'
elif caller == 'plot':
check_start = ''
check_end = ''
else:
raise ValueError('caller should only be one of `iplot` or `plot`')
return(
('<script>'
'function downloadimage(format, height, width,'
' filename) {{'
'var p = document.getElementById(\'{plot_id}\');'
'Plotly.downloadImage(p, {{format: format, height: height, '
'width: width, filename: filename}});}};' +
check_start +
'{{downloadimage(\'{format}\', {height}, {width}, '
'\'{filename}\');}}' +
check_end +
'</script>')
)
def init_notebook_mode(connected=False):
"""
Initialize plotly.js in the browser if it hasn't been loaded into the DOM
yet. This is an idempotent method and can and should be called from any
offline methods that require plotly.js to be loaded into the notebook dom.
Keyword arguments:
connected (default=False) -- If True, the plotly.js library will be loaded
from an online CDN. If False, the plotly.js library will be loaded locally
from the plotly python package
Use `connected=True` if you want your notebooks to have smaller file sizes.
In the case where `connected=False`, the entirety of the plotly.js library
will be loaded into the notebook, which will result in a file-size increase
of a couple megabytes. Additionally, because the library will be downloaded
from the web, you and your viewers must be connected to the internet to be
able to view charts within this notebook.
Use `connected=False` if you want you and your collaborators to be able to
create and view these charts regardless of the availability of an internet
connection. This is the default option since it is the most predictable.
Note that under this setting the library will be included inline inside
your notebook, resulting in much larger notebook sizes compared to the case
where `connected=True`.
"""
if not ipython:
raise ImportError('`iplot` can only run inside an IPython Notebook.')
global __PLOTLY_OFFLINE_INITIALIZED
if connected:
# Inject plotly.js into the output cell
script_inject = (
'{win_config}'
'{mathjax_config}'
'<script>'
'requirejs.config({{'
'paths: {{ '
# Note we omit the extension .js because require will include it.
'\'plotly\': [\'https://cdn.plot.ly/plotly-latest.min\']}},'
'}});'
'if(!window._Plotly) {{'
'require([\'plotly\'],'
'function(plotly) {{window._Plotly=plotly;}});'
'}}'
'</script>'
).format(win_config=_window_plotly_config,
mathjax_config=_mathjax_config)
else:
# Inject plotly.js into the output cell
script_inject = (
'{win_config}'
'{mathjax_config}'
'<script type=\'text/javascript\'>'
'if(!window._Plotly){{'
'define(\'plotly\', function(require, exports, module) {{'
'{script}'
'}});'
'require([\'plotly\'], function(Plotly) {{'
'window._Plotly = Plotly;'
'}});'
'}}'
'</script>'
'').format(script=get_plotlyjs(),
win_config=_window_plotly_config,
mathjax_config=_mathjax_config)
display_bundle = {
'text/html': script_inject,
'text/vnd.plotly.v1+html': script_inject
}
ipython_display.display(display_bundle, raw=True)
__PLOTLY_OFFLINE_INITIALIZED = True
def _plot_html(figure_or_data, config, validate, default_width,
default_height, global_requirejs):
figure = tools.return_figure_from_figure_or_data(figure_or_data, validate)
width = figure.get('layout', {}).get('width', default_width)
height = figure.get('layout', {}).get('height', default_height)
try:
float(width)
except (ValueError, TypeError):
pass
else:
width = str(width) + 'px'
try:
float(height)
except (ValueError, TypeError):
pass
else:
height = str(height) + 'px'
plotdivid = uuid.uuid4()
jdata = _json.dumps(figure.get('data', []), cls=utils.PlotlyJSONEncoder)
jlayout = _json.dumps(figure.get('layout', {}),
cls=utils.PlotlyJSONEncoder)
if figure.get('frames', None):
jframes = _json.dumps(figure.get('frames', []),
cls=utils.PlotlyJSONEncoder)
else:
jframes = None
jconfig = _json.dumps(_get_jconfig(config))
plotly_platform_url = plotly.plotly.get_config().get('plotly_domain',
'https://plot.ly')
if jframes:
script = '''
Plotly.plot(
'{id}',
{data},
{layout},
{config}
).then(function () {add_frames}).then(function(){animate})
'''.format(
id=plotdivid,
data=jdata,
layout=jlayout,
config=jconfig,
add_frames="{" + "return Plotly.addFrames('{id}',{frames}".format(
id=plotdivid, frames=jframes
) + ");}",
animate="{" + "Plotly.animate('{id}');".format(id=plotdivid) + "}"
)
else:
script = 'Plotly.newPlot("{id}", {data}, {layout}, {config})'.format(
id=plotdivid,
data=jdata,
layout=jlayout,
config=jconfig)
optional_line1 = ('require(["plotly"], function(Plotly) {{ '
if global_requirejs else '')
optional_line2 = ('}});' if global_requirejs else '')
plotly_html_div = (
''
'<div id="{id}" style="height: {height}; width: {width};" '
'class="plotly-graph-div">'
'</div>'
'<script type="text/javascript">' +
optional_line1 +
'window.PLOTLYENV=window.PLOTLYENV || {{}};'
'window.PLOTLYENV.BASE_URL="' + plotly_platform_url + '";'
'{script}' +
optional_line2 +
'</script>'
'').format(
id=plotdivid, script=script,
height=height, width=width)
return plotly_html_div, plotdivid, width, height
def iplot(figure_or_data, show_link=False, link_text='Export to plot.ly',
validate=True, image=None, filename='plot_image', image_width=800,
image_height=600, config=None):
"""
Draw plotly graphs inside an IPython or Jupyter notebook without
connecting to an external server.
To save the chart to Plotly Cloud or Plotly Enterprise, use
`plotly.plotly.iplot`.
To embed an image of the chart, use `plotly.image.ishow`.
figure_or_data -- a plotly.graph_objs.Figure or plotly.graph_objs.Data or
dict or list that describes a Plotly graph.
See https://plot.ly/python/ for examples of
graph descriptions.
Keyword arguments:
show_link (default=False) -- display a link in the bottom-right corner of
of the chart that will export the chart to
Plotly Cloud or Plotly Enterprise
link_text (default='Export to plot.ly') -- the text of export link
validate (default=True) -- validate that all of the keys in the figure
are valid? omit if your version of plotly.js
has become outdated with your version of
graph_reference.json or if you need to include
extra, unnecessary keys in your figure.
image (default=None |'png' |'jpeg' |'svg' |'webp') -- This parameter sets
the format of the image to be downloaded, if we choose to download an
image. This parameter has a default value of None indicating that no
image should be downloaded. Please note: for higher resolution images
and more export options, consider making requests to our image servers.
Type: `help(py.image)` for more details.
filename (default='plot') -- Sets the name of the file your image
will be saved to. The extension should not be included.
image_height (default=600) -- Specifies the height of the image in `px`.
image_width (default=800) -- Specifies the width of the image in `px`.
config (default=None) -- Plot view options dictionary. Keyword arguments
`show_link` and `link_text` set the associated options in this
dictionary if it doesn't contain them already.
Example:
```
from plotly.offline import init_notebook_mode, iplot
init_notebook_mode()
iplot([{'x': [1, 2, 3], 'y': [5, 2, 7]}])
# We can also download an image of the plot by setting the image to the
format you want. e.g. `image='png'`
iplot([{'x': [1, 2, 3], 'y': [5, 2, 7]}], image='png')
```
"""
if not ipython:
raise ImportError('`iplot` can only run inside an IPython Notebook.')
config = dict(config) if config else {}
config.setdefault('showLink', show_link)
config.setdefault('linkText', link_text)
jconfig = _get_jconfig(config)
figure = tools.return_figure_from_figure_or_data(figure_or_data, validate)
# Though it can add quite a bit to the display-bundle size, we include
# multiple representations of the plot so that the display environment can
# choose which one to act on.
data = _json.loads(_json.dumps(figure['data'],
cls=plotly.utils.PlotlyJSONEncoder))
layout = _json.loads(_json.dumps(figure.get('layout', {}),
cls=plotly.utils.PlotlyJSONEncoder))
frames = _json.loads(_json.dumps(figure.get('frames', None),
cls=plotly.utils.PlotlyJSONEncoder))
fig = {'data': data, 'layout': layout, 'config': jconfig}
if frames:
fig['frames'] = frames
display_bundle = {'application/vnd.plotly.v1+json': fig}
if __PLOTLY_OFFLINE_INITIALIZED:
plot_html, plotdivid, width, height = _plot_html(
figure_or_data, config, validate, '100%', 525, True
)
resize_script = ''
if width == '100%' or height == '100%':
resize_script = _build_resize_script(
plotdivid, 'window._Plotly')
display_bundle['text/html'] = plot_html + resize_script
display_bundle['text/vnd.plotly.v1+html'] = plot_html + resize_script
ipython_display.display(display_bundle, raw=True)
if image:
if not __PLOTLY_OFFLINE_INITIALIZED:
raise PlotlyError('\n'.join([
'Plotly Offline mode has not been initialized in this notebook. '
'Run: ',
'',
'import plotly',
'plotly.offline.init_notebook_mode() '
'# run at the start of every ipython notebook',
]))
if image not in __IMAGE_FORMATS:
raise ValueError('The image parameter must be one of the following'
': {}'.format(__IMAGE_FORMATS)
)
# if image is given, and is a valid format, we will download the image
script = get_image_download_script('iplot').format(format=image,
width=image_width,
height=image_height,
filename=filename,
plot_id=plotdivid)
# allow time for the plot to draw
time.sleep(1)
# inject code to download an image of the plot
ipython_display.display(ipython_display.HTML(script))
def plot(figure_or_data, show_link=False, link_text='Export to plot.ly',
validate=True, output_type='file', include_plotlyjs=True,
filename='temp-plot.html', auto_open=True, image=None,
image_filename='plot_image', image_width=800, image_height=600,
config=None, include_mathjax=False):
""" Create a plotly graph locally as an HTML document or string.
Example:
```
from plotly.offline import plot
import plotly.graph_objs as go
plot([go.Scatter(x=[1, 2, 3], y=[3, 2, 6])], filename='my-graph.html')
# We can also download an image of the plot by setting the image parameter
# to the image format we want
plot([go.Scatter(x=[1, 2, 3], y=[3, 2, 6])], filename='my-graph.html'
image='jpeg')
```
More examples below.
figure_or_data -- a plotly.graph_objs.Figure or plotly.graph_objs.Data or
dict or list that describes a Plotly graph.
See https://plot.ly/python/ for examples of
graph descriptions.
Keyword arguments:
show_link (default=False) -- display a link in the bottom-right corner of
of the chart that will export the chart to Plotly Cloud or
Plotly Enterprise
link_text (default='Export to plot.ly') -- the text of export link
validate (default=True) -- validate that all of the keys in the figure
are valid? omit if your version of plotly.js has become outdated
with your version of graph_reference.json or if you need to include
extra, unnecessary keys in your figure.
output_type ('file' | 'div' - default 'file') -- if 'file', then
the graph is saved as a standalone HTML file and `plot`
returns None.
If 'div', then `plot` returns a string that just contains the
HTML <div> that contains the graph and the script to generate the
graph.
Use 'file' if you want to save and view a single graph at a time
in a standalone HTML file.
Use 'div' if you are embedding these graphs in an HTML file with
other graphs or HTML markup, like a HTML report or an website.
include_plotlyjs (True | False | 'cdn' | 'directory' | path - default=True)
Specifies how the plotly.js library is included in the output html
file or div string.
If True, a script tag containing the plotly.js source code (~3MB)
is included in the output. HTML files generated with this option are
fully self-contained and can be used offline.
If 'cdn', a script tag that references the plotly.js CDN is included
in the output. HTML files generated with this option are about 3MB
smaller than those generated with include_plotlyjs=True, but they
require an active internet connection in order to load the plotly.js
library.
If 'directory', a script tag is included that references an external
plotly.min.js bundle that is assumed to reside in the same
directory as the HTML file. If output_type='file' then the
plotly.min.js bundle is copied into the directory of the resulting
HTML file. If a file named plotly.min.js already exists in the output
directory then this file is left unmodified and no copy is performed.
HTML files generated with this option can be used offline, but they
require a copy of the plotly.min.js bundle in the same directory.
This option is useful when many figures will be saved as HTML files in
the same directory because the plotly.js source code will be included
only once per output directory, rather than once per output file.
If a string that ends in '.js', a script tag is included that
references the specified path. This approach can be used to point
the resulting HTML file to an alternative CDN.
If False, no script tag referencing plotly.js is included. This is
useful when output_type='div' and the resulting div string will be
placed inside an HTML document that already loads plotly.js. This
option is not advised when output_type='file' as it will result in
a non-functional html file.
filename (default='temp-plot.html') -- The local filename to save the
outputted chart to. If the filename already exists, it will be
overwritten. This argument only applies if `output_type` is 'file'.
auto_open (default=True) -- If True, open the saved file in a
web browser after saving.
This argument only applies if `output_type` is 'file'.
image (default=None |'png' |'jpeg' |'svg' |'webp') -- This parameter sets
the format of the image to be downloaded, if we choose to download an
image. This parameter has a default value of None indicating that no
image should be downloaded. Please note: for higher resolution images
and more export options, consider making requests to our image servers.
Type: `help(py.image)` for more details.
image_filename (default='plot_image') -- Sets the name of the file your
image will be saved to. The extension should not be included.
image_height (default=600) -- Specifies the height of the image in `px`.
image_width (default=800) -- Specifies the width of the image in `px`.
config (default=None) -- Plot view options dictionary. Keyword arguments
`show_link` and `link_text` set the associated options in this
dictionary if it doesn't contain them already.
include_mathjax (False | 'cdn' | path - default=False) --
Specifies how the MathJax.js library is included in the output html
file or div string. MathJax is required in order to display labels
with LaTeX typesetting.
If False, no script tag referencing MathJax.js will be included in the
output. HTML files generated with this option will not be able to
display LaTeX typesetting.
If 'cdn', a script tag that references a MathJax CDN location will be
included in the output. HTML files generated with this option will be
able to display LaTeX typesetting as long as they have internet access.
If a string that ends in '.js', a script tag is included that
references the specified path. This approach can be used to point the
resulting HTML file to an alternative CDN.
"""
if output_type not in ['div', 'file']:
raise ValueError(
"`output_type` argument must be 'div' or 'file'. "
"You supplied `" + output_type + "``")
if not filename.endswith('.html') and output_type == 'file':
warnings.warn(
"Your filename `" + filename + "` didn't end with .html. "
"Adding .html to the end of your file.")
filename += '.html'
config = dict(config) if config else {}
config.setdefault('showLink', show_link)
config.setdefault('linkText', link_text)
plot_html, plotdivid, width, height = _plot_html(
figure_or_data, config, validate,
'100%', '100%', global_requirejs=False)
# Build resize_script
resize_script = ''
if width == '100%' or height == '100%':
resize_script = _build_resize_script(plotdivid)
# Process include_plotlyjs and build plotly_js_script
include_plotlyjs_orig = include_plotlyjs
if isinstance(include_plotlyjs, six.string_types):
include_plotlyjs = include_plotlyjs.lower()
if include_plotlyjs == 'cdn':
plotly_js_script = _window_plotly_config + """\
<script src="https://cdn.plot.ly/plotly-latest.min.js"></script>"""
elif include_plotlyjs == 'directory':
plotly_js_script = (_window_plotly_config +
'<script src="plotly.min.js"></script>')
elif (isinstance(include_plotlyjs, six.string_types) and
include_plotlyjs.endswith('.js')):
plotly_js_script = (_window_plotly_config +
'<script src="{url}"></script>'.format(
url=include_plotlyjs_orig))
elif include_plotlyjs:
plotly_js_script = ''.join([
_window_plotly_config,
'<script type="text/javascript">',
get_plotlyjs(),
'</script>',
])
else:
plotly_js_script = ''
# Process include_mathjax and build mathjax_script
include_mathjax_orig = include_mathjax
if isinstance(include_mathjax, six.string_types):
include_mathjax = include_mathjax.lower()
if include_mathjax == 'cdn':
mathjax_script = _build_mathjax_script(
url=('https://cdnjs.cloudflare.com'
'/ajax/libs/mathjax/2.7.5/MathJax.js')) + _mathjax_config
elif (isinstance(include_mathjax, six.string_types) and
include_mathjax.endswith('.js')):
mathjax_script = _build_mathjax_script(
url=include_mathjax_orig) + _mathjax_config
elif not include_mathjax:
mathjax_script = ''
else:
raise ValueError("""\
Invalid value of type {typ} received as the include_mathjax argument
Received value: {val}
include_mathjax may be specified as False, 'cdn', or a string ending with '.js'
""".format(typ=type(include_mathjax), val=repr(include_mathjax)))
if output_type == 'file':
with open(filename, 'w') as f:
if image:
if image not in __IMAGE_FORMATS:
raise ValueError('The image parameter must be one of the '
'following: {}'.format(__IMAGE_FORMATS)
)
# if the check passes then download script is injected.
# write the download script:
script = get_image_download_script('plot')
script = script.format(format=image,
width=image_width,
height=image_height,
filename=image_filename,
plot_id=plotdivid)
else:
script = ''
f.write(''.join([
'<html>',
'<head><meta charset="utf-8" /></head>',
'<body>',
mathjax_script,
plotly_js_script,
plot_html,
resize_script,
script,
'</body>',
'</html>']))
# Check if we should copy plotly.min.js to output directory
if include_plotlyjs == 'directory':
bundle_path = os.path.join(
os.path.dirname(filename), 'plotly.min.js')
if not os.path.exists(bundle_path):
with open(bundle_path, 'w') as f:
f.write(get_plotlyjs())
url = 'file://' + os.path.abspath(filename)
if auto_open:
webbrowser.open(url)
return url
elif output_type == 'div':
return ''.join([
'<div>',
mathjax_script,
plotly_js_script,
plot_html,
resize_script,
'</div>',
])
def plot_mpl(mpl_fig, resize=False, strip_style=False,
verbose=False, show_link=False, link_text='Export to plot.ly',
validate=True, output_type='file', include_plotlyjs=True,
filename='temp-plot.html', auto_open=True,
image=None, image_filename='plot_image',
image_height=600, image_width=800):
"""
Convert a matplotlib figure to a Plotly graph stored locally as HTML.
For more information on converting matplotlib visualizations to plotly
graphs, call help(plotly.tools.mpl_to_plotly)
For more information on creating plotly charts locally as an HTML document
or string, call help(plotly.offline.plot)
mpl_fig -- a matplotlib figure object to convert to a plotly graph
Keyword arguments:
resize (default=False) -- allow plotly to choose the figure size.
strip_style (default=False) -- allow plotly to choose style options.
verbose (default=False) -- print message.
show_link (default=False) -- display a link in the bottom-right corner of
of the chart that will export the chart to Plotly Cloud or
Plotly Enterprise
link_text (default='Export to plot.ly') -- the text of export link
validate (default=True) -- validate that all of the keys in the figure
are valid? omit if your version of plotly.js has become outdated
with your version of graph_reference.json or if you need to include
extra, unnecessary keys in your figure.
output_type ('file' | 'div' - default 'file') -- if 'file', then
the graph is saved as a standalone HTML file and `plot`
returns None.
If 'div', then `plot` returns a string that just contains the
HTML <div> that contains the graph and the script to generate the
graph.
Use 'file' if you want to save and view a single graph at a time
in a standalone HTML file.
Use 'div' if you are embedding these graphs in an HTML file with
other graphs or HTML markup, like a HTML report or an website.
include_plotlyjs (default=True) -- If True, include the plotly.js
source code in the output file or string.
Set as False if your HTML file already contains a copy of the plotly.js
library.
filename (default='temp-plot.html') -- The local filename to save the
outputted chart to. If the filename already exists, it will be
overwritten. This argument only applies if `output_type` is 'file'.
auto_open (default=True) -- If True, open the saved file in a
web browser after saving.
This argument only applies if `output_type` is 'file'.
image (default=None |'png' |'jpeg' |'svg' |'webp') -- This parameter sets
the format of the image to be downloaded, if we choose to download an
image. This parameter has a default value of None indicating that no
image should be downloaded.
image_filename (default='plot_image') -- Sets the name of the file your
image will be saved to. The extension should not be included.
image_height (default=600) -- Specifies the height of the image in `px`.
image_width (default=800) -- Specifies the width of the image in `px`.
Example:
```
from plotly.offline import init_notebook_mode, plot_mpl
import matplotlib.pyplot as plt
init_notebook_mode()
fig = plt.figure()
x = [10, 15, 20, 25, 30]
y = [100, 250, 200, 150, 300]
plt.plot(x, y, "o")
plot_mpl(fig)
# If you want to to download an image of the figure as well
plot_mpl(fig, image='png')
```
"""
plotly_plot = tools.mpl_to_plotly(mpl_fig, resize, strip_style, verbose)
return plot(plotly_plot, show_link, link_text, validate, output_type,
include_plotlyjs, filename, auto_open,
image=image, image_filename=image_filename,
image_height=image_height, image_width=image_width)
def iplot_mpl(mpl_fig, resize=False, strip_style=False,
verbose=False, show_link=False,
link_text='Export to plot.ly', validate=True,
image=None, image_filename='plot_image',
image_height=600, image_width=800):
"""
Convert a matplotlib figure to a plotly graph and plot inside an IPython
notebook without connecting to an external server.
To save the chart to Plotly Cloud or Plotly Enterprise, use
`plotly.plotly.plot_mpl`.
For more information on converting matplotlib visualizations to plotly
graphs call `help(plotly.tools.mpl_to_plotly)`
For more information on plotting plotly charts offline in an Ipython
notebook call `help(plotly.offline.iplot)`
mpl_fig -- a matplotlib.figure to convert to a plotly graph
Keyword arguments:
resize (default=False) -- allow plotly to choose the figure size.
strip_style (default=False) -- allow plotly to choose style options.
verbose (default=False) -- print message.
show_link (default=False) -- display a link in the bottom-right corner of
of the chart that will export the chart to
Plotly Cloud or Plotly Enterprise
link_text (default='Export to plot.ly') -- the text of export link
validate (default=True) -- validate that all of the keys in the figure
are valid? omit if your version of plotly.js
has become outdated with your version of
graph_reference.json or if you need to include
extra, unnecessary keys in your figure.
image (default=None |'png' |'jpeg' |'svg' |'webp') -- This parameter sets
the format of the image to be downloaded, if we choose to download an
image. This parameter has a default value of None indicating that no
image should be downloaded.
image_filename (default='plot_image') -- Sets the name of the file your
image will be saved to. The extension should not be included.
image_height (default=600) -- Specifies the height of the image in `px`.
image_width (default=800) -- Specifies the width of the image in `px`.
Example:
```
from plotly.offline import init_notebook_mode, iplot_mpl
import matplotlib.pyplot as plt
fig = plt.figure()
x = [10, 15, 20, 25, 30]
y = [100, 250, 200, 150, 300]
plt.plot(x, y, "o")
init_notebook_mode()
iplot_mpl(fig)
# and if you want to download an image of the figure as well
iplot_mpl(fig, image='jpeg')
```
"""
plotly_plot = tools.mpl_to_plotly(mpl_fig, resize, strip_style, verbose)
return iplot(plotly_plot, show_link, link_text, validate,
image=image, filename=image_filename,
image_height=image_height, image_width=image_width)
def enable_mpl_offline(resize=False, strip_style=False,
verbose=False, show_link=False,
link_text='Export to plot.ly', validate=True):
"""
Convert mpl plots to locally hosted HTML documents.
This function should be used with the inline matplotlib backend
that ships with IPython that can be enabled with `%pylab inline`
or `%matplotlib inline`. This works by adding an HTML formatter
for Figure objects; the existing SVG/PNG formatters will remain
enabled.
(idea taken from `mpld3._display.enable_notebook`)
Example:
```
from plotly.offline import enable_mpl_offline
import matplotlib.pyplot as plt
enable_mpl_offline()
fig = plt.figure()
x = [10, 15, 20, 25, 30]
y = [100, 250, 200, 150, 300]
plt.plot(x, y, "o")
fig
```
"""
init_notebook_mode()
ip = ipython.core.getipython.get_ipython()
formatter = ip.display_formatter.formatters['text/html']
formatter.for_type(matplotlib.figure.Figure,
lambda fig: iplot_mpl(fig, resize, strip_style, verbose,
show_link, link_text, validate))
| 40.260916 | 81 | 0.619442 |
b5a88653ed37c0aa5d084b01d87b0012ccb89a3a | 925 | py | Python | meerkat/columns/video_column.py | HazyResearch/meerkat | e3b437d47809ef8e856a5f732ac1e11a1176ba1f | [
"Apache-2.0"
] | 65 | 2021-06-25T06:51:15.000Z | 2022-02-08T22:25:27.000Z | meerkat/columns/video_column.py | HazyResearch/meerkat | e3b437d47809ef8e856a5f732ac1e11a1176ba1f | [
"Apache-2.0"
] | 111 | 2021-06-24T01:16:36.000Z | 2022-03-07T17:54:58.000Z | meerkat/columns/video_column.py | HazyResearch/meerkat | e3b437d47809ef8e856a5f732ac1e11a1176ba1f | [
"Apache-2.0"
] | 4 | 2021-08-03T18:24:22.000Z | 2022-03-09T21:06:07.000Z | from __future__ import annotations
import logging
from typing import Callable, Optional, Sequence
from meerkat.cells.video import VideoCell
from meerkat.columns.cell_column import CellColumn
logger = logging.getLogger(__name__)
class VideoColumn(CellColumn):
"""Interface for creating a CellColumn from VideoCell objects."""
def __init__(self, *args, **kwargs):
super(VideoColumn, self).__init__(*args, **kwargs)
@classmethod
def from_filepaths(
cls,
filepaths: Optional[Sequence[str]] = None,
time_dim: Optional[int] = 1,
# TODO: add different loaders to VideoCell
transform: Optional[Callable] = None,
*args,
**kwargs,
):
cells = [
VideoCell(fp, time_dim=time_dim, transform=transform) for fp in filepaths
]
return cls(
cells=cells,
*args,
**kwargs,
)
| 25.694444 | 85 | 0.633514 |
ada6e9bd00edabf3a538e81d169fc66239f94d30 | 28,379 | py | Python | tools/webidl_binder.py | DingYong4223/emscripten | 1fc7303e52cce207822977fa6770f1aae371cc43 | [
"MIT"
] | 1 | 2020-02-22T14:55:15.000Z | 2020-02-22T14:55:15.000Z | tools/webidl_binder.py | DingYong4223/emscripten | 1fc7303e52cce207822977fa6770f1aae371cc43 | [
"MIT"
] | null | null | null | tools/webidl_binder.py | DingYong4223/emscripten | 1fc7303e52cce207822977fa6770f1aae371cc43 | [
"MIT"
] | null | null | null | # Copyright 2014 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
'''
WebIDL binder
http://kripken.github.io/emscripten-site/docs/porting/connecting_cpp_and_javascript/WebIDL-Binder.html
'''
from __future__ import print_function
import os, sys
sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from tools import shared
sys.path.append(shared.path_from_root('third_party'))
sys.path.append(shared.path_from_root('third_party', 'ply'))
import WebIDL
# CHECKS='FAST' will skip most argument type checks in the wrapper methods for
# performance (~3x faster than default).
# CHECKS='ALL' will do extensive argument type checking (~5x slower than default).
# This will catch invalid numbers, invalid pointers, invalid strings, etc.
# Anything else defaults to legacy mode for backward compatibility.
CHECKS = os.environ.get('IDL_CHECKS') or 'DEFAULT'
# DEBUG=1 will print debug info in render_function
DEBUG = os.environ.get('IDL_VERBOSE') == '1'
if DEBUG: print("Debug print ON, CHECKS=%s" % CHECKS)
# We need to avoid some closure errors on the constructors we define here.
CONSTRUCTOR_CLOSURE_SUPPRESSIONS = '/** @suppress {undefinedVars, duplicate} */'
class Dummy(object):
def __init__(self, init):
for k, v in init.items():
self.__dict__[k] = v
def getExtendedAttribute(self, name):
return None
input_file = sys.argv[1]
output_base = sys.argv[2]
shared.try_delete(output_base + '.cpp')
shared.try_delete(output_base + '.js')
p = WebIDL.Parser()
p.parse(r'''
interface VoidPtr {
};
''' + open(input_file).read())
data = p.finish()
interfaces = {}
implements = {}
enums = {}
for thing in data:
if isinstance(thing, WebIDL.IDLInterface):
interfaces[thing.identifier.name] = thing
elif isinstance(thing, WebIDL.IDLImplementsStatement):
implements.setdefault(thing.implementor.identifier.name, []).append(thing.implementee.identifier.name)
elif isinstance(thing, WebIDL.IDLEnum):
enums[thing.identifier.name] = thing
#print interfaces
#print implements
pre_c = []
mid_c = []
mid_js = []
pre_c += [r'''
#include <emscripten.h>
''']
mid_c += [r'''
extern "C" {
''']
def build_constructor(name):
implementing_name = implements[name][0] if implements.get(name) else 'WrapperObject'
return [r'''{name}.prototype = Object.create({implementing}.prototype);
{name}.prototype.constructor = {name};
{name}.prototype.__class__ = {name};
{name}.__cache__ = {{}};
Module['{name}'] = {name};
'''.format(name=name, implementing=implementing_name)]
mid_js += ['''
// Bindings utilities
/** @suppress {duplicate} (TODO: avoid emitting this multiple times, it is redundant) */
function WrapperObject() {
}
''']
mid_js += build_constructor('WrapperObject')
mid_js += ['''
/** @suppress {duplicate} (TODO: avoid emitting this multiple times, it is redundant) */
function getCache(__class__) {
return (__class__ || WrapperObject).__cache__;
}
Module['getCache'] = getCache;
/** @suppress {duplicate} (TODO: avoid emitting this multiple times, it is redundant) */
function wrapPointer(ptr, __class__) {
var cache = getCache(__class__);
var ret = cache[ptr];
if (ret) return ret;
ret = Object.create((__class__ || WrapperObject).prototype);
ret.ptr = ptr;
return cache[ptr] = ret;
}
Module['wrapPointer'] = wrapPointer;
/** @suppress {duplicate} (TODO: avoid emitting this multiple times, it is redundant) */
function castObject(obj, __class__) {
return wrapPointer(obj.ptr, __class__);
}
Module['castObject'] = castObject;
Module['NULL'] = wrapPointer(0);
/** @suppress {duplicate} (TODO: avoid emitting this multiple times, it is redundant) */
function destroy(obj) {
if (!obj['__destroy__']) throw 'Error: Cannot destroy object. (Did you create it yourself?)';
obj['__destroy__']();
// Remove from cache, so the object can be GC'd and refs added onto it released
delete getCache(obj.__class__)[obj.ptr];
}
Module['destroy'] = destroy;
/** @suppress {duplicate} (TODO: avoid emitting this multiple times, it is redundant) */
function compare(obj1, obj2) {
return obj1.ptr === obj2.ptr;
}
Module['compare'] = compare;
/** @suppress {duplicate} (TODO: avoid emitting this multiple times, it is redundant) */
function getPointer(obj) {
return obj.ptr;
}
Module['getPointer'] = getPointer;
/** @suppress {duplicate} (TODO: avoid emitting this multiple times, it is redundant) */
function getClass(obj) {
return obj.__class__;
}
Module['getClass'] = getClass;
// Converts big (string or array) values into a C-style storage, in temporary space
/** @suppress {duplicate} (TODO: avoid emitting this multiple times, it is redundant) */
var ensureCache = {
buffer: 0, // the main buffer of temporary storage
size: 0, // the size of buffer
pos: 0, // the next free offset in buffer
temps: [], // extra allocations
needed: 0, // the total size we need next time
prepare: function() {
if (ensureCache.needed) {
// clear the temps
for (var i = 0; i < ensureCache.temps.length; i++) {
Module['_free'](ensureCache.temps[i]);
}
ensureCache.temps.length = 0;
// prepare to allocate a bigger buffer
Module['_free'](ensureCache.buffer);
ensureCache.buffer = 0;
ensureCache.size += ensureCache.needed;
// clean up
ensureCache.needed = 0;
}
if (!ensureCache.buffer) { // happens first time, or when we need to grow
ensureCache.size += 128; // heuristic, avoid many small grow events
ensureCache.buffer = Module['_malloc'](ensureCache.size);
assert(ensureCache.buffer);
}
ensureCache.pos = 0;
},
alloc: function(array, view) {
assert(ensureCache.buffer);
var bytes = view.BYTES_PER_ELEMENT;
var len = array.length * bytes;
len = (len + 7) & -8; // keep things aligned to 8 byte boundaries
var ret;
if (ensureCache.pos + len >= ensureCache.size) {
// we failed to allocate in the buffer, ensureCache time around :(
assert(len > 0); // null terminator, at least
ensureCache.needed += len;
ret = Module['_malloc'](len);
ensureCache.temps.push(ret);
} else {
// we can allocate in the buffer
ret = ensureCache.buffer + ensureCache.pos;
ensureCache.pos += len;
}
return ret;
},
copy: function(array, view, offset) {
var offsetShifted = offset;
var bytes = view.BYTES_PER_ELEMENT;
switch (bytes) {
case 2: offsetShifted >>= 1; break;
case 4: offsetShifted >>= 2; break;
case 8: offsetShifted >>= 3; break;
}
for (var i = 0; i < array.length; i++) {
view[offsetShifted + i] = array[i];
}
},
};
/** @suppress {duplicate} (TODO: avoid emitting this multiple times, it is redundant) */
function ensureString(value) {
if (typeof value === 'string') {
var intArray = intArrayFromString(value);
var offset = ensureCache.alloc(intArray, HEAP8);
ensureCache.copy(intArray, HEAP8, offset);
return offset;
}
return value;
}
/** @suppress {duplicate} (TODO: avoid emitting this multiple times, it is redundant) */
function ensureInt8(value) {
if (typeof value === 'object') {
var offset = ensureCache.alloc(value, HEAP8);
ensureCache.copy(value, HEAP8, offset);
return offset;
}
return value;
}
/** @suppress {duplicate} (TODO: avoid emitting this multiple times, it is redundant) */
function ensureInt16(value) {
if (typeof value === 'object') {
var offset = ensureCache.alloc(value, HEAP16);
ensureCache.copy(value, HEAP16, offset);
return offset;
}
return value;
}
/** @suppress {duplicate} (TODO: avoid emitting this multiple times, it is redundant) */
function ensureInt32(value) {
if (typeof value === 'object') {
var offset = ensureCache.alloc(value, HEAP32);
ensureCache.copy(value, HEAP32, offset);
return offset;
}
return value;
}
/** @suppress {duplicate} (TODO: avoid emitting this multiple times, it is redundant) */
function ensureFloat32(value) {
if (typeof value === 'object') {
var offset = ensureCache.alloc(value, HEAPF32);
ensureCache.copy(value, HEAPF32, offset);
return offset;
}
return value;
}
/** @suppress {duplicate} (TODO: avoid emitting this multiple times, it is redundant) */
function ensureFloat64(value) {
if (typeof value === 'object') {
var offset = ensureCache.alloc(value, HEAPF64);
ensureCache.copy(value, HEAPF64, offset);
return offset;
}
return value;
}
''']
mid_c += ['''
// Not using size_t for array indices as the values used by the javascript code are signed.
void array_bounds_check(const int array_size, const int array_idx) {
if (array_idx < 0 || array_idx >= array_size) {
EM_ASM({
throw 'Array index ' + $0 + ' out of bounds: [0,' + $1 + ')';
}, array_idx, array_size);
}
}
''']
C_FLOATS = ['float', 'double']
def full_typename(arg):
return ('const ' if arg.getExtendedAttribute('Const') else '') + arg.type.name + ('[]' if arg.type.isArray() else '')
def type_to_c(t, non_pointing=False):
#print 'to c ', t
def base_type_to_c(t):
if t == 'Long':
ret = 'int'
elif t == 'UnsignedLong':
ret = 'unsigned int'
elif t == 'LongLong':
ret = 'long long'
elif t == 'UnsignedLongLong':
ret = 'unsigned long long'
elif t == 'Short':
ret = 'short'
elif t == 'UnsignedShort':
ret = 'unsigned short'
elif t == 'Byte':
ret = 'char'
elif t == 'Octet':
ret = 'unsigned char'
elif t == 'Void':
ret = 'void'
elif t == 'String':
ret = 'char*'
elif t == 'Float':
ret = 'float'
elif t == 'Double':
ret = 'double'
elif t == 'Boolean':
ret = 'bool'
elif t == 'Any' or t == 'VoidPtr':
ret = 'void*'
elif t in interfaces:
ret = (interfaces[t].getExtendedAttribute('Prefix') or [''])[0] + t + ('' if non_pointing else '*')
else:
ret = t
return ret
t = t.replace(' (Wrapper)', '')
prefix = ''
suffix = ''
if '[]' in t:
t = t.replace('[]', '')
suffix = '*'
if 'const ' in t:
t = t.replace('const ', '')
prefix = 'const '
return prefix + base_type_to_c(t) + suffix
def take_addr_if_nonpointer(m):
if m.getExtendedAttribute('Ref') or m.getExtendedAttribute('Value'):
return '&'
return ''
def deref_if_nonpointer(m):
if m.getExtendedAttribute('Ref') or m.getExtendedAttribute('Value'):
return '*'
return ''
def type_to_cdec(raw):
name = ret = type_to_c(raw.type.name, non_pointing=True)
if raw.getExtendedAttribute('Const'): ret = 'const ' + ret
if raw.type.name not in interfaces: return ret
if raw.getExtendedAttribute('Ref'):
return ret + '&'
if raw.getExtendedAttribute('Value'):
return ret
return ret + '*'
def render_function(class_name, func_name, sigs, return_type, non_pointer, copy, operator, constructor, func_scope, call_content=None, const=False, array_attribute=False):
global mid_c, mid_js, js_impl_methods
legacy_mode = CHECKS not in ['ALL', 'FAST']
all_checks = CHECKS == 'ALL'
bindings_name = class_name + '_' + func_name
min_args = min(sigs.keys())
max_args = max(sigs.keys())
all_args = sigs.get(max_args)
if DEBUG:
print('renderfunc', class_name, func_name, list(sigs.keys()), return_type, constructor)
for i in range(max_args):
a = all_args[i]
if isinstance(a, WebIDL.IDLArgument):
print(' ', a.identifier.name, a.identifier, a.type, a.optional)
else:
print(" arg%d" % i)
# JS
cache = ('getCache(%s)[this.ptr] = this;' % class_name) if constructor else ''
call_prefix = '' if not constructor else 'this.ptr = '
call_postfix = ''
if return_type != 'Void' and not constructor: call_prefix = 'return '
if not constructor:
if return_type in interfaces:
call_prefix += 'wrapPointer('
call_postfix += ', ' + return_type + ')'
elif return_type == 'String':
call_prefix += 'UTF8ToString('
call_postfix += ')'
elif return_type == 'Boolean':
call_prefix += '!!('
call_postfix += ')'
args = [(all_args[i].identifier.name if isinstance(all_args[i], WebIDL.IDLArgument) else ('arg%d' % i)) for i in range(max_args)]
if not constructor:
body = ' var self = this.ptr;\n'
pre_arg = ['self']
else:
body = ''
pre_arg = []
if any(arg.type.isString() or arg.type.isArray() for arg in all_args):
body += ' ensureCache.prepare();\n'
full_name = "%s::%s" % (class_name, func_name)
for i, (js_arg, arg) in enumerate(zip(args, all_args)):
if i >= min_args:
optional = True
else:
optional = False
do_default = False
# Filter out arguments we don't know how to parse. Fast casing only common cases.
compatible_arg = isinstance(arg, Dummy) or (isinstance(arg, WebIDL.IDLArgument) and arg.optional is False)
# note: null has typeof object, but is ok to leave as is, since we are calling into asm code where null|0 = 0
if not legacy_mode and compatible_arg:
if isinstance(arg, WebIDL.IDLArgument):
arg_name = arg.identifier.name
else:
arg_name = ''
# Format assert fail message
check_msg = "[CHECK FAILED] %s(%s:%s): " % (full_name, js_arg, arg_name)
if isinstance(arg.type, WebIDL.IDLWrapperType):
inner = arg.type.inner
else:
inner = ""
# Print type info in comments.
body += " /* %s <%s> [%s] */\n" % (js_arg, arg.type.name, inner)
# Wrap asserts with existence check when argument is optional.
if all_checks and optional: body += "if(typeof {0} !== 'undefined' && {0} !== null) {{\n".format(js_arg)
# Special case argument types.
if arg.type.isNumeric():
if arg.type.isInteger():
if all_checks: body += " assert(typeof {0} === 'number' && !isNaN({0}), '{1}Expecting <integer>');\n".format(js_arg, check_msg)
else:
if all_checks: body += " assert(typeof {0} === 'number', '{1}Expecting <number>');\n".format(js_arg, check_msg)
# No transform needed for numbers
elif arg.type.isBoolean():
if all_checks: body += " assert(typeof {0} === 'boolean' || (typeof {0} === 'number' && !isNaN({0})), '{1}Expecting <boolean>');\n".format(js_arg, check_msg)
# No transform needed for booleans
elif arg.type.isString():
# Strings can be DOM strings or pointers.
if all_checks: body += " assert(typeof {0} === 'string' || ({0} && typeof {0} === 'object' && typeof {0}.ptr === 'number'), '{1}Expecting <string>');\n".format(js_arg, check_msg)
do_default = True # legacy path is fast enough for strings.
elif arg.type.isInterface():
if all_checks: body += " assert(typeof {0} === 'object' && typeof {0}.ptr === 'number', '{1}Expecting <pointer>');\n".format(js_arg, check_msg)
if optional:
body += " if(typeof {0} !== 'undefined' && {0} !== null) {{ {0} = {0}.ptr }};\n".format(js_arg)
else:
# No checks in fast mode when the arg is required
body += " {0} = {0}.ptr;\n".format(js_arg)
else:
do_default = True
if all_checks and optional: body += "}\n"
else:
do_default = True
if do_default:
if not (arg.type.isArray() and not array_attribute):
body += " if ({0} && typeof {0} === 'object') {0} = {0}.ptr;\n".format(js_arg)
if arg.type.isString():
body += " else {0} = ensureString({0});\n".format(js_arg)
else:
# an array can be received here
arg_type = arg.type.name
if arg_type in ['Byte', 'Octet']:
body += " if (typeof {0} == 'object') {{ {0} = ensureInt8({0}); }}\n".format(js_arg)
elif arg_type in ['Short', 'UnsignedShort']:
body += " if (typeof {0} == 'object') {{ {0} = ensureInt16({0}); }}\n".format(js_arg)
elif arg_type in ['Long', 'UnsignedLong']:
body += " if (typeof {0} == 'object') {{ {0} = ensureInt32({0}); }}\n".format(js_arg)
elif arg_type == 'Float':
body += " if (typeof {0} == 'object') {{ {0} = ensureFloat32({0}); }}\n".format(js_arg)
elif arg_type == 'Double':
body += " if (typeof {0} == 'object') {{ {0} = ensureFloat64({0}); }}\n".format(js_arg)
c_names = {}
for i in range(min_args, max_args):
c_names[i] = 'emscripten_bind_%s_%d' % (bindings_name, i)
body += ' if (%s === undefined) { %s%s(%s)%s%s }\n' % (args[i], call_prefix, '_' + c_names[i], ', '.join(pre_arg + args[:i]), call_postfix, '' if 'return ' in call_prefix else '; ' + (cache or ' ') + 'return')
c_names[max_args] = 'emscripten_bind_%s_%d' % (bindings_name, max_args)
body += ' %s%s(%s)%s;\n' % (call_prefix, '_' + c_names[max_args], ', '.join(pre_arg + args), call_postfix)
if cache:
body += ' ' + cache + '\n'
mid_js += [r'''%sfunction%s(%s) {
%s
};''' % (CONSTRUCTOR_CLOSURE_SUPPRESSIONS, (' ' + func_name) if constructor else '', ', '.join(args), body[:-1])]
# C
for i in range(min_args, max_args+1):
raw = sigs.get(i)
if raw is None: continue
sig = list(map(full_typename, raw))
if array_attribute:
sig = [x.replace('[]', '') for x in sig] # for arrays, ignore that this is an array - our get/set methods operate on the elements
c_arg_types = list(map(type_to_c, sig))
normal_args = ', '.join(['%s %s' % (c_arg_types[j], args[j]) for j in range(i)])
if constructor:
full_args = normal_args
else:
full_args = type_to_c(class_name, non_pointing=True) + '* self' + ('' if not normal_args else ', ' + normal_args)
call_args = ', '.join(['%s%s' % ('*' if raw[j].getExtendedAttribute('Ref') else '', args[j]) for j in range(i)])
if constructor:
call = 'new ' + type_to_c(class_name, non_pointing=True)
call += '(' + call_args + ')'
elif call_content is not None:
call = call_content
else:
call = 'self->' + func_name
call += '(' + call_args + ')'
if operator:
cast_self = 'self'
if class_name != func_scope:
# this function comes from an ancestor class; for operators, we must cast it
cast_self = 'dynamic_cast<' + type_to_c(func_scope) + '>(' + cast_self + ')'
maybe_deref = deref_if_nonpointer(raw[0])
if '=' in operator:
call = '(*%s %s %s%s)' % (cast_self, operator, maybe_deref, args[0])
elif operator == '[]':
call = '((*%s)[%s%s])' % (cast_self, maybe_deref, args[0])
else:
raise Exception('unfamiliar operator ' + operator)
pre = ''
basic_return = 'return ' if constructor or return_type != 'Void' else ''
return_prefix = basic_return
return_postfix = ''
if non_pointer:
return_prefix += '&';
if copy:
pre += ' static %s temp;\n' % type_to_c(return_type, non_pointing=True)
return_prefix += '(temp = '
return_postfix += ', &temp)'
c_return_type = type_to_c(return_type)
maybe_const = 'const ' if const else ''
mid_c += [r'''
%s%s EMSCRIPTEN_KEEPALIVE %s(%s) {
%s %s%s%s;
}
''' % (maybe_const, type_to_c(class_name) if constructor else c_return_type, c_names[i], full_args, pre, return_prefix, call, return_postfix)]
if not constructor:
if i == max_args:
dec_args = ', '.join([type_to_cdec(raw[j]) + ' ' + args[j] for j in range(i)])
js_call_args = ', '.join(['%s%s' % (('(int)' if sig[j] in interfaces else '') + take_addr_if_nonpointer(raw[j]), args[j]) for j in range(i)])
js_impl_methods += [r''' %s %s(%s) %s {
%sEM_ASM_%s({
var self = Module['getCache'](Module['%s'])[$0];
if (!self.hasOwnProperty('%s')) throw 'a JSImplementation must implement all functions, you forgot %s::%s.';
%sself['%s'](%s)%s;
}, (int)this%s);
}''' % (c_return_type, func_name, dec_args, maybe_const,
basic_return, 'INT' if c_return_type not in C_FLOATS else 'DOUBLE',
class_name,
func_name, class_name, func_name,
return_prefix,
func_name,
','.join(['$%d' % i for i in range(1, max_args + 1)]),
return_postfix,
(', ' if js_call_args else '') + js_call_args)]
for name, interface in interfaces.items():
js_impl = interface.getExtendedAttribute('JSImplementation')
if not js_impl: continue
implements[name] = [js_impl[0]]
# Compute the height in the inheritance tree of each node. Note that the order of interation
# of `implements` is irrelevant.
#
# After one iteration of the loop, all ancestors of child are guaranteed to have a a larger
# height number than the child, and this is recursively true for each ancestor. If the height
# of child is later increased, all its ancestors will be readjusted at that time to maintain
# that invariant. Further, the height of a node never decreases. Therefore, when the loop
# finishes, all ancestors of a given node should have a larger height number than that node.
nodeHeight = {}
for child, parent in implements.items():
parent = parent[0]
while parent:
nodeHeight[parent] = max(nodeHeight.get(parent, 0), nodeHeight.get(child, 0) + 1)
grandParent = implements.get(parent)
if grandParent:
child = parent
parent = grandParent[0]
else:
parent = None
names = sorted(interfaces.keys(), key=lambda x: nodeHeight.get(x, 0), reverse=True)
for name in names:
interface = interfaces[name]
mid_js += ['\n// ' + name + '\n']
mid_c += ['\n// ' + name + '\n']
global js_impl_methods
js_impl_methods = []
cons = interface.getExtendedAttribute('Constructor')
if type(cons) == list: raise Exception('do not use "Constructor", instead create methods with the name of the interface')
js_impl = interface.getExtendedAttribute('JSImplementation')
if js_impl:
js_impl = js_impl[0]
# Methods
# Ensure a constructor even if one is not specified.
if not any(m.identifier.name == name for m in interface.members):
mid_js += ['%sfunction %s() { throw "cannot construct a %s, no constructor in IDL" }\n' % (CONSTRUCTOR_CLOSURE_SUPPRESSIONS, name, name)]
mid_js += build_constructor(name)
for m in interface.members:
if not m.isMethod(): continue
constructor = m.identifier.name == name
if not constructor:
parent_constructor = False
temp = m.parentScope
while temp.parentScope:
if temp.identifier.name == m.identifier.name:
parent_constructor = True
temp = temp.parentScope
if parent_constructor:
continue
if not constructor:
mid_js += [r'''
%s.prototype['%s'] = %s.prototype.%s = ''' % (name, m.identifier.name, name, m.identifier.name)]
sigs = {}
return_type = None
for ret, args in m.signatures():
if return_type is None:
return_type = ret.name
else:
assert return_type == ret.name, 'overloads must have the same return type'
for i in range(len(args)+1):
if i == len(args) or args[i].optional:
assert i not in sigs, 'overloading must differentiate by # of arguments (cannot have two signatures that differ by types but not by length)'
sigs[i] = args[:i]
render_function(name,
m.identifier.name, sigs, return_type,
m.getExtendedAttribute('Ref'),
m.getExtendedAttribute('Value'),
(m.getExtendedAttribute('Operator') or [None])[0],
constructor,
func_scope=m.parentScope.identifier.name,
const=m.getExtendedAttribute('Const'))
mid_js += [';\n']
if constructor:
mid_js += build_constructor(name)
for m in interface.members:
if not m.isAttr(): continue
attr = m.identifier.name
if m.type.isArray():
get_sigs = { 1: [Dummy({ 'type': WebIDL.BuiltinTypes[WebIDL.IDLBuiltinType.Types.long] })] }
set_sigs = { 2: [Dummy({ 'type': WebIDL.BuiltinTypes[WebIDL.IDLBuiltinType.Types.long] }),
Dummy({ 'type': m.type })] }
get_call_content = take_addr_if_nonpointer(m) + 'self->' + attr + '[arg0]'
set_call_content = 'self->' + attr + '[arg0] = ' + deref_if_nonpointer(m) + 'arg1'
if m.getExtendedAttribute('BoundsChecked'):
bounds_check = "array_bounds_check(sizeof(self->%s) / sizeof(self->%s[0]), arg0)" % (attr, attr)
get_call_content = "(%s, %s)" % (bounds_check, get_call_content)
set_call_content = "(%s, %s)" % (bounds_check, set_call_content)
else:
get_sigs = { 0: [] }
set_sigs = { 1: [Dummy({ 'type': m.type })] }
get_call_content = take_addr_if_nonpointer(m) + 'self->' + attr
set_call_content = 'self->' + attr + ' = ' + deref_if_nonpointer(m) + 'arg0'
get_name = 'get_' + attr
mid_js += [r'''
%s.prototype['%s'] = %s.prototype.%s = ''' % (name, get_name, name, get_name)]
render_function(name,
get_name, get_sigs, m.type.name,
None,
None,
None,
False,
func_scope=interface,
call_content=get_call_content,
const=m.getExtendedAttribute('Const'),
array_attribute=m.type.isArray())
if m.readonly:
mid_js += [r'''
Object.defineProperty(%s.prototype, '%s', { get: %s.prototype.%s });''' % (name, attr, name, get_name)]
else:
set_name = 'set_' + attr
mid_js += [r'''
%s.prototype['%s'] = %s.prototype.%s = ''' % (name, set_name, name, set_name)]
render_function(name,
set_name, set_sigs, 'Void',
None,
None,
None,
False,
func_scope=interface,
call_content=set_call_content,
const=m.getExtendedAttribute('Const'),
array_attribute=m.type.isArray())
mid_js += [r'''
Object.defineProperty(%s.prototype, '%s', { get: %s.prototype.%s, set: %s.prototype.%s });''' % (name, attr, name, get_name, name, set_name)]
if not interface.getExtendedAttribute('NoDelete'):
mid_js += [r'''
%s.prototype['__destroy__'] = %s.prototype.__destroy__ = ''' % (name, name)]
render_function(name,
'__destroy__', { 0: [] }, 'Void',
None,
None,
None,
False,
func_scope=interface,
call_content='delete self')
# Emit C++ class implementation that calls into JS implementation
if js_impl:
pre_c += [r'''
class %s : public %s {
public:
%s
};
''' % (name, type_to_c(js_impl, non_pointing=True), '\n'.join(js_impl_methods))]
deferred_js = []
for name, enum in enums.items():
mid_c += ['\n// ' + name + '\n']
deferred_js += ['\n', '// ' + name + '\n']
for value in enum.values():
function_id = "%s_%s" % (name, value.split('::')[-1])
function_id = 'emscripten_enum_%s' % function_id
mid_c += [r'''%s EMSCRIPTEN_KEEPALIVE %s() {
return %s;
}
''' % (name, function_id, value)]
symbols = value.split('::')
if len(symbols) == 1:
identifier = symbols[0]
deferred_js += ["Module['%s'] = _%s();\n" % (identifier, function_id)]
elif len(symbols) == 2:
[namespace, identifier] = symbols
if namespace in interfaces:
# namespace is a class
deferred_js += ["Module['%s']['%s'] = _%s();\n" % \
(namespace, identifier, function_id)]
else:
# namespace is a namespace, so the enums get collapsed into the top level namespace.
deferred_js += ["Module['%s'] = _%s();\n" % (identifier, function_id)]
else:
raise Exception("Illegal enum value %s" % value)
mid_c += ['\n}\n\n']
if len(deferred_js):
mid_js += ['''
(function() {
function setupEnums() {
%s
}
if (runtimeInitialized) setupEnums();
else addOnPreMain(setupEnums);
})();
''' % '\n '.join(deferred_js)]
# Write
with open(output_base + '.cpp', 'w') as c:
for x in pre_c: c.write(x)
for x in mid_c: c.write(x)
with open(output_base + '.js', 'w') as js:
for x in mid_js: js.write(x)
| 35.607277 | 214 | 0.618133 |
6017e59cb0752a26f3454ddbc603f1ca25926af6 | 607 | py | Python | qiskit/aqua/components/eigs/__init__.py | gawelk/aqua | 98ce06289cc6e12d087982cba95a45353ce0cb3b | [
"Apache-2.0"
] | null | null | null | qiskit/aqua/components/eigs/__init__.py | gawelk/aqua | 98ce06289cc6e12d087982cba95a45353ce0cb3b | [
"Apache-2.0"
] | null | null | null | qiskit/aqua/components/eigs/__init__.py | gawelk/aqua | 98ce06289cc6e12d087982cba95a45353ce0cb3b | [
"Apache-2.0"
] | 1 | 2019-06-05T01:08:23.000Z | 2019-06-05T01:08:23.000Z | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
from .eigs import Eigenvalues
from .eigs_qpe import EigsQPE
__all__ = ['EigsQPE', 'Eigenvalues']
| 31.947368 | 77 | 0.742998 |
9ecd4c740b29ddfa800016dc25ad3a4aa6ca754c | 5,769 | py | Python | examples/task_seq2seq_autotitle_csl.py | EthanChen1234/bert4keras | 149b8abe4f5696f7762f49547533873b935f85b9 | [
"Apache-2.0"
] | 2 | 2020-11-24T02:58:24.000Z | 2021-08-18T06:50:28.000Z | examples/task_seq2seq_autotitle_csl.py | ZhenHengDong/bert4keras | de66f9b66a57152816920a6b068a3f28648dd547 | [
"Apache-2.0"
] | null | null | null | examples/task_seq2seq_autotitle_csl.py | ZhenHengDong/bert4keras | de66f9b66a57152816920a6b068a3f28648dd547 | [
"Apache-2.0"
] | null | null | null | #! -*- coding: utf-8 -*-
# bert做Seq2Seq任务,采用UNILM方案
# 介绍链接:https://kexue.fm/archives/6933
# 数据集:https://github.com/CLUEbenchmark/CLGE 中的CSL数据集
# 补充了评测指标bleu、rouge-1、rouge-2、rouge-l
from __future__ import print_function
import numpy as np
from tqdm import tqdm
from bert4keras.backend import keras, K
from bert4keras.layers import Loss
from bert4keras.models import build_transformer_model
from bert4keras.tokenizers import Tokenizer, load_vocab
from bert4keras.optimizers import Adam
from bert4keras.snippets import sequence_padding, open
from bert4keras.snippets import DataGenerator, AutoRegressiveDecoder
from keras.models import Model
from rouge import Rouge # pip install rouge
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
# 基本参数
maxlen = 256
batch_size = 16
epochs = 20
# bert配置
config_path = '/root/kg/bert/chinese_wwm_L-12_H-768_A-12/bert_config.json'
checkpoint_path = '/root/kg/bert/chinese_wwm_L-12_H-768_A-12/bert_model.ckpt'
dict_path = '/root/kg/bert/chinese_wwm_L-12_H-768_A-12/vocab.txt'
def load_data(filename):
D = []
with open(filename, encoding='utf-8') as f:
for l in f:
title, content = l.strip().split('\t')
D.append((title, content))
return D
# 加载数据集
train_data = load_data('/root/csl/train.tsv')
valid_data = load_data('/root/csl/val.tsv')
test_data = load_data('/root/csl/test.tsv')
# 加载并精简词表,建立分词器
token_dict, keep_tokens = load_vocab(
dict_path=dict_path,
simplified=True,
startswith=['[PAD]', '[UNK]', '[CLS]', '[SEP]'],
)
tokenizer = Tokenizer(token_dict, do_lower_case=True)
class data_generator(DataGenerator):
"""数据生成器
"""
def __iter__(self, random=False):
batch_token_ids, batch_segment_ids = [], []
for is_end, (title, content) in self.sample(random):
token_ids, segment_ids = tokenizer.encode(
content, title, max_length=maxlen
)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
if len(batch_token_ids) == self.batch_size or is_end:
batch_token_ids = sequence_padding(batch_token_ids)
batch_segment_ids = sequence_padding(batch_segment_ids)
yield [batch_token_ids, batch_segment_ids], None
batch_token_ids, batch_segment_ids = [], []
class CrossEntropy(Loss):
"""交叉熵作为loss,并mask掉输入部分
"""
def compute_loss(self, inputs, mask=None):
y_true, y_mask, y_pred = inputs
y_true = y_true[:, 1:] # 目标token_ids
y_mask = y_mask[:, 1:] # segment_ids,刚好指示了要预测的部分
y_pred = y_pred[:, :-1] # 预测序列,错开一位
loss = K.sparse_categorical_crossentropy(y_true, y_pred)
loss = K.sum(loss * y_mask) / K.sum(y_mask)
return loss
model = build_transformer_model(
config_path,
checkpoint_path,
application='unilm',
keep_tokens=keep_tokens, # 只保留keep_tokens中的字,精简原字表
)
output = CrossEntropy(2)(model.inputs + model.outputs)
model = Model(model.inputs, output)
model.compile(optimizer=Adam(1e-5))
model.summary()
class AutoTitle(AutoRegressiveDecoder):
"""seq2seq解码器
"""
@AutoRegressiveDecoder.set_rtype('probas')
def predict(self, inputs, output_ids, step):
token_ids, segment_ids = inputs
token_ids = np.concatenate([token_ids, output_ids], 1)
segment_ids = np.concatenate([segment_ids, np.ones_like(output_ids)], 1)
return model.predict([token_ids, segment_ids])[:, -1]
def generate(self, text, topk=1):
max_c_len = maxlen - self.maxlen
token_ids, segment_ids = tokenizer.encode(text, max_length=max_c_len)
output_ids = self.beam_search([token_ids, segment_ids],
topk) # 基于beam search
return tokenizer.decode(output_ids)
autotitle = AutoTitle(start_id=None, end_id=tokenizer._token_end_id, maxlen=32)
class Evaluate(keras.callbacks.Callback):
def __init__(self):
self.rouge = Rouge()
self.smooth = SmoothingFunction().method1
self.best_bleu = 0.
def on_epoch_end(self, epoch, logs=None):
metrics = self.evaluate(valid_data) # 评测模型
if metrics['bleu'] > self.best_bleu:
self.best_bleu = metrics['bleu']
model.save_weights('./best_model.weights') # 保存模型
metrics['best_bleu'] = self.best_bleu
print('valid_data:', metrics)
def evaluate(self, data, topk=1):
total = 0
rouge_1, rouge_2, rouge_l, bleu = 0, 0, 0, 0
for title, content in tqdm(data):
total += 1
title = ' '.join(title)
pred_title = ' '.join(autotitle.generate(content, topk))
if pred_title.strip():
scores = self.rouge.get_scores(hyps=pred_title, refs=title)
rouge_1 += scores[0]['rouge-1']['f']
rouge_2 += scores[0]['rouge-2']['f']
rouge_l += scores[0]['rouge-l']['f']
bleu += sentence_bleu(
references=[title.split(' ')],
hypothesis=pred_title.split(' '),
smoothing_function=self.smooth
)
rouge_1 /= total
rouge_2 /= total
rouge_l /= total
bleu /= total
return {
'rouge-1': rouge_1,
'rouge-2': rouge_2,
'rouge-l': rouge_l,
'bleu': bleu,
}
if __name__ == '__main__':
evaluator = Evaluate()
train_generator = data_generator(train_data, batch_size)
model.fit_generator(
train_generator.forfit(),
steps_per_epoch=len(train_generator),
epochs=epochs,
callbacks=[evaluator]
)
else:
model.load_weights('./best_model.weights')
| 32.22905 | 80 | 0.641186 |
1b14dfa994e955b906622ea478e98066af21a4be | 301 | py | Python | desafio063.py | EdnaMota/python3_estudos | 78fd4cf0063329d02b1e8b6f0094679e8922d5d7 | [
"MIT"
] | null | null | null | desafio063.py | EdnaMota/python3_estudos | 78fd4cf0063329d02b1e8b6f0094679e8922d5d7 | [
"MIT"
] | null | null | null | desafio063.py | EdnaMota/python3_estudos | 78fd4cf0063329d02b1e8b6f0094679e8922d5d7 | [
"MIT"
] | null | null | null | print('-'*30)
print('Sequência de Fibonacci')
print('-'*30)
n = int(input('Quantos termos você quer mostrar? '))
t1 = 0
t2 = 1
print('~'*30)
print(f'{t1} -> {t2}', end='')
cont = 3
while cont <= n:
t3 = t1 + t2
print(f' -> {t3}', end='')
t1 = t2
t2 = t3
cont += 1
print(' -> FIM')
| 17.705882 | 52 | 0.51495 |
8a6da2bfc8b49642b93ac006e1ff225d829adc12 | 3,978 | py | Python | isi_sdk_9_0_0/isi_sdk_9_0_0/models/member_object.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_9_0_0/isi_sdk_9_0_0/models/member_object.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_9_0_0/isi_sdk_9_0_0/models/member_object.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 10
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class MemberObject(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'type': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name',
'type': 'type'
}
def __init__(self, id=None, name=None, type=None): # noqa: E501
"""MemberObject - a model defined in Swagger""" # noqa: E501
self._id = None
self._name = None
self._type = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if type is not None:
self.type = type
@property
def id(self):
"""Gets the id of this MemberObject. # noqa: E501
:return: The id of this MemberObject. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this MemberObject.
:param id: The id of this MemberObject. # noqa: E501
:type: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this MemberObject. # noqa: E501
:return: The name of this MemberObject. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this MemberObject.
:param name: The name of this MemberObject. # noqa: E501
:type: str
"""
self._name = name
@property
def type(self):
"""Gets the type of this MemberObject. # noqa: E501
:return: The type of this MemberObject. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this MemberObject.
:param type: The type of this MemberObject. # noqa: E501
:type: str
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MemberObject):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 24.109091 | 80 | 0.529915 |
a0a0da885c48fd14cf08145cc2b01bfe750a9979 | 1,423 | py | Python | skills-sdk-setup.py | sowmyavasudeva/SmartBookmark | 797a90cfea624d2ab977e5aa78614c0db1177a23 | [
"Apache-2.0"
] | 2 | 2018-12-16T15:55:04.000Z | 2018-12-29T19:52:38.000Z | skills-sdk-setup.py | sowmyavasudeva/SmartBookmark | 797a90cfea624d2ab977e5aa78614c0db1177a23 | [
"Apache-2.0"
] | null | null | null | skills-sdk-setup.py | sowmyavasudeva/SmartBookmark | 797a90cfea624d2ab977e5aa78614c0db1177a23 | [
"Apache-2.0"
] | 1 | 2018-03-14T07:07:56.000Z | 2018-03-14T07:07:56.000Z | # Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from setuptools import setup
from mycroft.util.setup_base import get_version, place_manifest
place_manifest("skills-sdk-MANIFEST.in")
setup(
name="mycroft-skills-sdk",
version=get_version(),
install_requires=[
"mustache==0.1.4",
"configobj==5.0.6",
"pyee==1.0.1",
"adapt-parser==0.2.1",
"padatious==0.1.4"
"websocket-client==0.32.0"
],
packages=[
"mycroft.configuration",
"mycroft.dialog",
"mycroft.filesystem",
"mycroft.messagebus",
"mycroft.messagebus.client",
"mycroft.session",
"mycroft.skills",
"mycroft.util",
"mycroft"
],
include_package_data=True,
entry_points={
'console_scripts': [
'mycroft-skill-container=mycroft.skills.container:main'
]
}
)
| 27.365385 | 74 | 0.653549 |
b35b2faf9a08b510bf13baac66c9b32a7ce819b8 | 3,775 | py | Python | res_build/build.py | carlosgim/cagimenez.github.io | 7b6a89e709fa43ea790ed62cba59a7a88c6ee66e | [
"MIT"
] | null | null | null | res_build/build.py | carlosgim/cagimenez.github.io | 7b6a89e709fa43ea790ed62cba59a7a88c6ee66e | [
"MIT"
] | null | null | null | res_build/build.py | carlosgim/cagimenez.github.io | 7b6a89e709fa43ea790ed62cba59a7a88c6ee66e | [
"MIT"
] | null | null | null | """
Simple HTML and PDF resume generator from structurized YAML files.
Usage:
build.py [-o=<DIR>] [-f=<FORMAT>] [-t=<THEME>] <resume_file>
Options:
-o=<DIR>, --output_dir=<DIR> Output directory for the build files. [default: build].
-f=<FORMAT>, --format=<FORMAT> Format of the build [default: html].
-t=<NAME>, --theme=<NAME> Name of the theme to use.
"""
import os
import yaml
import shutil
import docopt
import jinja2
import helpers
# Template defaults
defaults = {
'labels': None,
}
def read_yaml(filename):
"""
Read Yaml file given by ``filename`` and return dictionary data.
"""
with open(filename, 'rt') as f:
return yaml.load(f)
def render_template(tpl, vars):
"""
Render template file with ``vars`` arguments.
"""
with open(tpl, 'rt') as f:
tpl = jinja2.Template(f.read())
return tpl.render(**vars)
def copy_static_data(theme_dir, output_dir):
"""
Copy contents of theme directory skipping all jinja template files.
"""
def ignored_files(src, names):
return [name for name in names if name.endswith('.jinja2')]
shutil.copytree(theme_dir, output_dir, ignore=ignored_files)
def clean(output_dir):
"""
Remove the output directory.
"""
shutil.rmtree(output_dir, ignore_errors=True)
def build(data, config, output_dir):
"""
Build the final directory, rendering all templates and copying source files
"""
theme_name = config.get('theme', 'simple')
vars = defaults.copy()
vars.update(data)
vars['config'] = config
vars['h'] = helpers # make helpers module accessible via 'h' shortcut.
theme_location = os.path.join('themes', theme_name)
clean(output_dir)
copy_static_data(theme_location, output_dir)
for filename in os.listdir(theme_location):
if not filename.endswith('.jinja2'):
continue
html = render_template(os.path.join(theme_location, filename),
vars)
rendered_file = filename.replace('.jinja2', '.html')
with open(os.path.join(output_dir, rendered_file), 'wt') as f:
f.write(html)
def make_html(config, data):
"""
Generate static html build of the resume given by input `data`.
"""
output_dir = config.get('output_dir', 'build')
build(data, config, output_dir)
def make_pdf(config, data):
"""
Generate PDF file out of generated 'index.html' page.
"""
from weasyprint import HTML
output_dir = config.get('output_dir', 'build')
output_file = os.path.join(output_dir, config.get('pdf_file', 'resume.pdf'))
input_file = os.path.join(output_dir, 'index.html')
theme_location = os.path.join('themes', config['theme'])
html = HTML(input_file, base_url=theme_location)
html.write_pdf(output_file)
def main():
"""
Entry function for the script to handle command arguments
and run appropriate build like 'html' and 'pdf'.
"""
args = docopt.docopt(__doc__)
output_format = args['--format']
# read resume data and config with some defaults
resume_data = read_yaml(args['<resume_file>'])
config = resume_data.get('config', {})
config.setdefault('output_dir', args['--output_dir'])
config['theme'] = args['--theme'] or config.get('theme')
config.setdefault('theme', 'simple')
# build based on the given format
cmds = {'html': make_html, 'pdf': make_pdf}
return cmds[output_format](config, resume_data)
if __name__ == '__main__':
main()
"""
Move the build content to the main folder - carlosgim:w
"""
from distutils.dir_util import copy_tree
src = os.getcwd() +"/build/"
dest = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
copy_tree(src, dest)
| 26.215278 | 92 | 0.651921 |
4f461f3c4ec8d01985ca4cc27c43e12abc439726 | 436 | py | Python | backend/global/settings/prod.py | kevindice/cnap-dms | edb850412b6f95d1d4e057674e5cd899ee0b444e | [
"MIT"
] | 1 | 2018-11-01T22:16:02.000Z | 2018-11-01T22:16:02.000Z | backend/global/settings/prod.py | kevindice/cnap-dms | edb850412b6f95d1d4e057674e5cd899ee0b444e | [
"MIT"
] | 128 | 2018-04-19T08:28:03.000Z | 2018-12-20T19:02:06.000Z | backend/global/settings/prod.py | cnap-cobre/hyperion | edb850412b6f95d1d4e057674e5cd899ee0b444e | [
"MIT"
] | 2 | 2018-04-24T20:04:55.000Z | 2018-04-25T12:17:29.000Z | from .base import *
from .secret import Secret
DEBUG = False
ALLOWED_HOSTS = ['127.0.0.1',
'172.18.0.1',
'127.18.0.3',
'localhost',
'synapse.ksu.edu',
'172.18.0.4']
CSRF_TRUSTED_ORIGINS += ['localhost']
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'apprelay.smtp.ksu.edu'
DEFAULT_FROM_EMAIL = 'noreply@synapse.ksu.edu'
| 27.25 | 61 | 0.582569 |
709c77bab92408e1a384c591359ccf2ad062f494 | 87,532 | py | Python | psutil/tests/test_linux.py | marza-animation-planet/psutil | e1ea2bccf8aea404dca0f79398f36f37217c45f6 | [
"BSD-3-Clause"
] | null | null | null | psutil/tests/test_linux.py | marza-animation-planet/psutil | e1ea2bccf8aea404dca0f79398f36f37217c45f6 | [
"BSD-3-Clause"
] | null | null | null | psutil/tests/test_linux.py | marza-animation-planet/psutil | e1ea2bccf8aea404dca0f79398f36f37217c45f6 | [
"BSD-3-Clause"
] | 1 | 2020-05-07T08:37:52.000Z | 2020-05-07T08:37:52.000Z | #!/usr/bin/env python3
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Linux specific tests."""
from __future__ import division
import collections
import contextlib
import errno
import glob
import io
import os
import re
import shutil
import socket
import struct
import textwrap
import time
import warnings
import psutil
from psutil import LINUX
from psutil._compat import basestring
from psutil._compat import FileNotFoundError
from psutil._compat import PY3
from psutil._compat import u
from psutil.tests import call_until
from psutil.tests import HAS_BATTERY
from psutil.tests import HAS_CPU_FREQ
from psutil.tests import HAS_GETLOADAVG
from psutil.tests import HAS_RLIMIT
from psutil.tests import mock
from psutil.tests import PsutilTestCase
from psutil.tests import PYPY
from psutil.tests import reload_module
from psutil.tests import retry_on_failure
from psutil.tests import safe_rmpath
from psutil.tests import sh
from psutil.tests import skip_on_not_implemented
from psutil.tests import SYSMEM_TOLERANCE
from psutil.tests import ThreadTask
from psutil.tests import TRAVIS
from psutil.tests import unittest
from psutil.tests import which
HERE = os.path.abspath(os.path.dirname(__file__))
SIOCGIFADDR = 0x8915
SIOCGIFCONF = 0x8912
SIOCGIFHWADDR = 0x8927
if LINUX:
SECTOR_SIZE = 512
EMPTY_TEMPERATURES = not glob.glob('/sys/class/hwmon/hwmon*')
# =====================================================================
# --- utils
# =====================================================================
def get_ipv4_address(ifname):
import fcntl
ifname = ifname[:15]
if PY3:
ifname = bytes(ifname, 'ascii')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with contextlib.closing(s):
return socket.inet_ntoa(
fcntl.ioctl(s.fileno(),
SIOCGIFADDR,
struct.pack('256s', ifname))[20:24])
def get_mac_address(ifname):
import fcntl
ifname = ifname[:15]
if PY3:
ifname = bytes(ifname, 'ascii')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with contextlib.closing(s):
info = fcntl.ioctl(
s.fileno(), SIOCGIFHWADDR, struct.pack('256s', ifname))
if PY3:
def ord(x):
return x
else:
import __builtin__
ord = __builtin__.ord
return ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1]
def free_swap():
"""Parse 'free' cmd and return swap memory's s total, used and free
values.
"""
out = sh('free -b', env={"LANG": "C.UTF-8"})
lines = out.split('\n')
for line in lines:
if line.startswith('Swap'):
_, total, used, free = line.split()
nt = collections.namedtuple('free', 'total used free')
return nt(int(total), int(used), int(free))
raise ValueError(
"can't find 'Swap' in 'free' output:\n%s" % '\n'.join(lines))
def free_physmem():
"""Parse 'free' cmd and return physical memory's total, used
and free values.
"""
# Note: free can have 2 different formats, invalidating 'shared'
# and 'cached' memory which may have different positions so we
# do not return them.
# https://github.com/giampaolo/psutil/issues/538#issuecomment-57059946
out = sh('free -b', env={"LANG": "C.UTF-8"})
lines = out.split('\n')
for line in lines:
if line.startswith('Mem'):
total, used, free, shared = \
[int(x) for x in line.split()[1:5]]
nt = collections.namedtuple(
'free', 'total used free shared output')
return nt(total, used, free, shared, out)
raise ValueError(
"can't find 'Mem' in 'free' output:\n%s" % '\n'.join(lines))
def vmstat(stat):
out = sh("vmstat -s", env={"LANG": "C.UTF-8"})
for line in out.split("\n"):
line = line.strip()
if stat in line:
return int(line.split(' ')[0])
raise ValueError("can't find %r in 'vmstat' output" % stat)
def get_free_version_info():
out = sh("free -V").strip()
return tuple(map(int, out.split()[-1].split('.')))
@contextlib.contextmanager
def mock_open_content(for_path, content):
"""Mock open() builtin and forces it to return a certain `content`
on read() if the path being opened matches `for_path`.
"""
def open_mock(name, *args, **kwargs):
if name == for_path:
if PY3:
if isinstance(content, basestring):
return io.StringIO(content)
else:
return io.BytesIO(content)
else:
return io.BytesIO(content)
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, create=True, side_effect=open_mock) as m:
yield m
@contextlib.contextmanager
def mock_open_exception(for_path, exc):
"""Mock open() builtin and raises `exc` if the path being opened
matches `for_path`.
"""
def open_mock(name, *args, **kwargs):
if name == for_path:
raise exc
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, create=True, side_effect=open_mock) as m:
yield m
# =====================================================================
# --- system virtual memory
# =====================================================================
@unittest.skipIf(not LINUX, "LINUX only")
class TestSystemVirtualMemory(PsutilTestCase):
def test_total(self):
# free_value = free_physmem().total
# psutil_value = psutil.virtual_memory().total
# self.assertEqual(free_value, psutil_value)
vmstat_value = vmstat('total memory') * 1024
psutil_value = psutil.virtual_memory().total
self.assertAlmostEqual(vmstat_value, psutil_value)
# Older versions of procps used slab memory to calculate used memory.
# This got changed in:
# https://gitlab.com/procps-ng/procps/commit/
# 05d751c4f076a2f0118b914c5e51cfbb4762ad8e
@unittest.skipIf(LINUX and get_free_version_info() < (3, 3, 12),
"old free version")
@retry_on_failure()
def test_used(self):
free = free_physmem()
free_value = free.used
psutil_value = psutil.virtual_memory().used
self.assertAlmostEqual(
free_value, psutil_value, delta=SYSMEM_TOLERANCE,
msg='%s %s \n%s' % (free_value, psutil_value, free.output))
@unittest.skipIf(TRAVIS, "unreliable on TRAVIS")
@retry_on_failure()
def test_free(self):
vmstat_value = vmstat('free memory') * 1024
psutil_value = psutil.virtual_memory().free
self.assertAlmostEqual(
vmstat_value, psutil_value, delta=SYSMEM_TOLERANCE)
@retry_on_failure()
def test_buffers(self):
vmstat_value = vmstat('buffer memory') * 1024
psutil_value = psutil.virtual_memory().buffers
self.assertAlmostEqual(
vmstat_value, psutil_value, delta=SYSMEM_TOLERANCE)
# https://travis-ci.org/giampaolo/psutil/jobs/226719664
@unittest.skipIf(TRAVIS, "unreliable on TRAVIS")
@retry_on_failure()
def test_active(self):
vmstat_value = vmstat('active memory') * 1024
psutil_value = psutil.virtual_memory().active
self.assertAlmostEqual(
vmstat_value, psutil_value, delta=SYSMEM_TOLERANCE)
# https://travis-ci.org/giampaolo/psutil/jobs/227242952
@unittest.skipIf(TRAVIS, "unreliable on TRAVIS")
@retry_on_failure()
def test_inactive(self):
vmstat_value = vmstat('inactive memory') * 1024
psutil_value = psutil.virtual_memory().inactive
self.assertAlmostEqual(
vmstat_value, psutil_value, delta=SYSMEM_TOLERANCE)
@retry_on_failure()
def test_shared(self):
free = free_physmem()
free_value = free.shared
if free_value == 0:
raise unittest.SkipTest("free does not support 'shared' column")
psutil_value = psutil.virtual_memory().shared
self.assertAlmostEqual(
free_value, psutil_value, delta=SYSMEM_TOLERANCE,
msg='%s %s \n%s' % (free_value, psutil_value, free.output))
@retry_on_failure()
def test_available(self):
# "free" output format has changed at some point:
# https://github.com/giampaolo/psutil/issues/538#issuecomment-147192098
out = sh("free -b")
lines = out.split('\n')
if 'available' not in lines[0]:
raise unittest.SkipTest("free does not support 'available' column")
else:
free_value = int(lines[1].split()[-1])
psutil_value = psutil.virtual_memory().available
self.assertAlmostEqual(
free_value, psutil_value, delta=SYSMEM_TOLERANCE,
msg='%s %s \n%s' % (free_value, psutil_value, out))
def test_warnings_on_misses(self):
# Emulate a case where /proc/meminfo provides few info.
# psutil is supposed to set the missing fields to 0 and
# raise a warning.
with mock_open_content(
'/proc/meminfo',
textwrap.dedent("""\
Active(anon): 6145416 kB
Active(file): 2950064 kB
Inactive(anon): 574764 kB
Inactive(file): 1567648 kB
MemAvailable: -1 kB
MemFree: 2057400 kB
MemTotal: 16325648 kB
SReclaimable: 346648 kB
""").encode()) as m:
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always")
ret = psutil.virtual_memory()
assert m.called
self.assertEqual(len(ws), 1)
w = ws[0]
assert w.filename.endswith('psutil/_pslinux.py')
self.assertIn(
"memory stats couldn't be determined", str(w.message))
self.assertIn("cached", str(w.message))
self.assertIn("shared", str(w.message))
self.assertIn("active", str(w.message))
self.assertIn("inactive", str(w.message))
self.assertIn("buffers", str(w.message))
self.assertIn("available", str(w.message))
self.assertEqual(ret.cached, 0)
self.assertEqual(ret.active, 0)
self.assertEqual(ret.inactive, 0)
self.assertEqual(ret.shared, 0)
self.assertEqual(ret.buffers, 0)
self.assertEqual(ret.available, 0)
self.assertEqual(ret.slab, 0)
@retry_on_failure()
def test_avail_old_percent(self):
# Make sure that our calculation of avail mem for old kernels
# is off by max 15%.
from psutil._pslinux import calculate_avail_vmem
from psutil._pslinux import open_binary
mems = {}
with open_binary('/proc/meminfo') as f:
for line in f:
fields = line.split()
mems[fields[0]] = int(fields[1]) * 1024
a = calculate_avail_vmem(mems)
if b'MemAvailable:' in mems:
b = mems[b'MemAvailable:']
diff_percent = abs(a - b) / a * 100
self.assertLess(diff_percent, 15)
def test_avail_old_comes_from_kernel(self):
# Make sure "MemAvailable:" coluimn is used instead of relying
# on our internal algorithm to calculate avail mem.
with mock_open_content(
'/proc/meminfo',
textwrap.dedent("""\
Active: 9444728 kB
Active(anon): 6145416 kB
Active(file): 2950064 kB
Buffers: 287952 kB
Cached: 4818144 kB
Inactive(file): 1578132 kB
Inactive(anon): 574764 kB
Inactive(file): 1567648 kB
MemAvailable: 6574984 kB
MemFree: 2057400 kB
MemTotal: 16325648 kB
Shmem: 577588 kB
SReclaimable: 346648 kB
""").encode()) as m:
with warnings.catch_warnings(record=True) as ws:
ret = psutil.virtual_memory()
assert m.called
self.assertEqual(ret.available, 6574984 * 1024)
w = ws[0]
self.assertIn(
"inactive memory stats couldn't be determined", str(w.message))
def test_avail_old_missing_fields(self):
# Remove Active(file), Inactive(file) and SReclaimable
# from /proc/meminfo and make sure the fallback is used
# (free + cached),
with mock_open_content(
"/proc/meminfo",
textwrap.dedent("""\
Active: 9444728 kB
Active(anon): 6145416 kB
Buffers: 287952 kB
Cached: 4818144 kB
Inactive(file): 1578132 kB
Inactive(anon): 574764 kB
MemFree: 2057400 kB
MemTotal: 16325648 kB
Shmem: 577588 kB
""").encode()) as m:
with warnings.catch_warnings(record=True) as ws:
ret = psutil.virtual_memory()
assert m.called
self.assertEqual(ret.available, 2057400 * 1024 + 4818144 * 1024)
w = ws[0]
self.assertIn(
"inactive memory stats couldn't be determined", str(w.message))
def test_avail_old_missing_zoneinfo(self):
# Remove /proc/zoneinfo file. Make sure fallback is used
# (free + cached).
with mock_open_content(
"/proc/meminfo",
textwrap.dedent("""\
Active: 9444728 kB
Active(anon): 6145416 kB
Active(file): 2950064 kB
Buffers: 287952 kB
Cached: 4818144 kB
Inactive(file): 1578132 kB
Inactive(anon): 574764 kB
Inactive(file): 1567648 kB
MemFree: 2057400 kB
MemTotal: 16325648 kB
Shmem: 577588 kB
SReclaimable: 346648 kB
""").encode()):
with mock_open_exception(
"/proc/zoneinfo",
IOError(errno.ENOENT, 'no such file or directory')):
with warnings.catch_warnings(record=True) as ws:
ret = psutil.virtual_memory()
self.assertEqual(
ret.available, 2057400 * 1024 + 4818144 * 1024)
w = ws[0]
self.assertIn(
"inactive memory stats couldn't be determined",
str(w.message))
def test_virtual_memory_mocked(self):
# Emulate /proc/meminfo because neither vmstat nor free return slab.
def open_mock(name, *args, **kwargs):
if name == '/proc/meminfo':
return io.BytesIO(textwrap.dedent("""\
MemTotal: 100 kB
MemFree: 2 kB
MemAvailable: 3 kB
Buffers: 4 kB
Cached: 5 kB
SwapCached: 6 kB
Active: 7 kB
Inactive: 8 kB
Active(anon): 9 kB
Inactive(anon): 10 kB
Active(file): 11 kB
Inactive(file): 12 kB
Unevictable: 13 kB
Mlocked: 14 kB
SwapTotal: 15 kB
SwapFree: 16 kB
Dirty: 17 kB
Writeback: 18 kB
AnonPages: 19 kB
Mapped: 20 kB
Shmem: 21 kB
Slab: 22 kB
SReclaimable: 23 kB
SUnreclaim: 24 kB
KernelStack: 25 kB
PageTables: 26 kB
NFS_Unstable: 27 kB
Bounce: 28 kB
WritebackTmp: 29 kB
CommitLimit: 30 kB
Committed_AS: 31 kB
VmallocTotal: 32 kB
VmallocUsed: 33 kB
VmallocChunk: 34 kB
HardwareCorrupted: 35 kB
AnonHugePages: 36 kB
ShmemHugePages: 37 kB
ShmemPmdMapped: 38 kB
CmaTotal: 39 kB
CmaFree: 40 kB
HugePages_Total: 41 kB
HugePages_Free: 42 kB
HugePages_Rsvd: 43 kB
HugePages_Surp: 44 kB
Hugepagesize: 45 kB
DirectMap46k: 46 kB
DirectMap47M: 47 kB
DirectMap48G: 48 kB
""").encode())
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, create=True, side_effect=open_mock) as m:
mem = psutil.virtual_memory()
assert m.called
self.assertEqual(mem.total, 100 * 1024)
self.assertEqual(mem.free, 2 * 1024)
self.assertEqual(mem.buffers, 4 * 1024)
# cached mem also includes reclaimable memory
self.assertEqual(mem.cached, (5 + 23) * 1024)
self.assertEqual(mem.shared, 21 * 1024)
self.assertEqual(mem.active, 7 * 1024)
self.assertEqual(mem.inactive, 8 * 1024)
self.assertEqual(mem.slab, 22 * 1024)
self.assertEqual(mem.available, 3 * 1024)
# =====================================================================
# --- system swap memory
# =====================================================================
@unittest.skipIf(not LINUX, "LINUX only")
class TestSystemSwapMemory(PsutilTestCase):
@staticmethod
def meminfo_has_swap_info():
"""Return True if /proc/meminfo provides swap metrics."""
with open("/proc/meminfo") as f:
data = f.read()
return 'SwapTotal:' in data and 'SwapFree:' in data
def test_total(self):
free_value = free_swap().total
psutil_value = psutil.swap_memory().total
return self.assertAlmostEqual(
free_value, psutil_value, delta=SYSMEM_TOLERANCE)
@retry_on_failure()
def test_used(self):
free_value = free_swap().used
psutil_value = psutil.swap_memory().used
return self.assertAlmostEqual(
free_value, psutil_value, delta=SYSMEM_TOLERANCE)
@retry_on_failure()
def test_free(self):
free_value = free_swap().free
psutil_value = psutil.swap_memory().free
return self.assertAlmostEqual(
free_value, psutil_value, delta=SYSMEM_TOLERANCE)
def test_missing_sin_sout(self):
with mock.patch('psutil._common.open', create=True) as m:
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always")
ret = psutil.swap_memory()
assert m.called
self.assertEqual(len(ws), 1)
w = ws[0]
assert w.filename.endswith('psutil/_pslinux.py')
self.assertIn(
"'sin' and 'sout' swap memory stats couldn't "
"be determined", str(w.message))
self.assertEqual(ret.sin, 0)
self.assertEqual(ret.sout, 0)
def test_no_vmstat_mocked(self):
# see https://github.com/giampaolo/psutil/issues/722
with mock_open_exception(
"/proc/vmstat",
IOError(errno.ENOENT, 'no such file or directory')) as m:
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always")
ret = psutil.swap_memory()
assert m.called
self.assertEqual(len(ws), 1)
w = ws[0]
assert w.filename.endswith('psutil/_pslinux.py')
self.assertIn(
"'sin' and 'sout' swap memory stats couldn't "
"be determined and were set to 0",
str(w.message))
self.assertEqual(ret.sin, 0)
self.assertEqual(ret.sout, 0)
def test_meminfo_against_sysinfo(self):
# Make sure the content of /proc/meminfo about swap memory
# matches sysinfo() syscall, see:
# https://github.com/giampaolo/psutil/issues/1015
if not self.meminfo_has_swap_info():
return unittest.skip("/proc/meminfo has no swap metrics")
with mock.patch('psutil._pslinux.cext.linux_sysinfo') as m:
swap = psutil.swap_memory()
assert not m.called
import psutil._psutil_linux as cext
_, _, _, _, total, free, unit_multiplier = cext.linux_sysinfo()
total *= unit_multiplier
free *= unit_multiplier
self.assertEqual(swap.total, total)
self.assertAlmostEqual(swap.free, free, delta=SYSMEM_TOLERANCE)
def test_emulate_meminfo_has_no_metrics(self):
# Emulate a case where /proc/meminfo provides no swap metrics
# in which case sysinfo() syscall is supposed to be used
# as a fallback.
with mock_open_content("/proc/meminfo", b"") as m:
psutil.swap_memory()
assert m.called
# =====================================================================
# --- system CPU
# =====================================================================
@unittest.skipIf(not LINUX, "LINUX only")
class TestSystemCPUTimes(PsutilTestCase):
@unittest.skipIf(TRAVIS, "unknown failure on travis")
def test_fields(self):
fields = psutil.cpu_times()._fields
kernel_ver = re.findall(r'\d+\.\d+\.\d+', os.uname()[2])[0]
kernel_ver_info = tuple(map(int, kernel_ver.split('.')))
if kernel_ver_info >= (2, 6, 11):
self.assertIn('steal', fields)
else:
self.assertNotIn('steal', fields)
if kernel_ver_info >= (2, 6, 24):
self.assertIn('guest', fields)
else:
self.assertNotIn('guest', fields)
if kernel_ver_info >= (3, 2, 0):
self.assertIn('guest_nice', fields)
else:
self.assertNotIn('guest_nice', fields)
@unittest.skipIf(not LINUX, "LINUX only")
class TestSystemCPUCountLogical(PsutilTestCase):
@unittest.skipIf(not os.path.exists("/sys/devices/system/cpu/online"),
"/sys/devices/system/cpu/online does not exist")
def test_against_sysdev_cpu_online(self):
with open("/sys/devices/system/cpu/online") as f:
value = f.read().strip()
if "-" in str(value):
value = int(value.split('-')[1]) + 1
self.assertEqual(psutil.cpu_count(), value)
@unittest.skipIf(not os.path.exists("/sys/devices/system/cpu"),
"/sys/devices/system/cpu does not exist")
def test_against_sysdev_cpu_num(self):
ls = os.listdir("/sys/devices/system/cpu")
count = len([x for x in ls if re.search(r"cpu\d+$", x) is not None])
self.assertEqual(psutil.cpu_count(), count)
@unittest.skipIf(not which("nproc"), "nproc utility not available")
def test_against_nproc(self):
num = int(sh("nproc --all"))
self.assertEqual(psutil.cpu_count(logical=True), num)
@unittest.skipIf(not which("lscpu"), "lscpu utility not available")
def test_against_lscpu(self):
out = sh("lscpu -p")
num = len([x for x in out.split('\n') if not x.startswith('#')])
self.assertEqual(psutil.cpu_count(logical=True), num)
def test_emulate_fallbacks(self):
import psutil._pslinux
original = psutil._pslinux.cpu_count_logical()
# Here we want to mock os.sysconf("SC_NPROCESSORS_ONLN") in
# order to cause the parsing of /proc/cpuinfo and /proc/stat.
with mock.patch(
'psutil._pslinux.os.sysconf', side_effect=ValueError) as m:
self.assertEqual(psutil._pslinux.cpu_count_logical(), original)
assert m.called
# Let's have open() return emtpy data and make sure None is
# returned ('cause we mimick os.cpu_count()).
with mock.patch('psutil._common.open', create=True) as m:
self.assertIsNone(psutil._pslinux.cpu_count_logical())
self.assertEqual(m.call_count, 2)
# /proc/stat should be the last one
self.assertEqual(m.call_args[0][0], '/proc/stat')
# Let's push this a bit further and make sure /proc/cpuinfo
# parsing works as expected.
with open('/proc/cpuinfo', 'rb') as f:
cpuinfo_data = f.read()
fake_file = io.BytesIO(cpuinfo_data)
with mock.patch('psutil._common.open',
return_value=fake_file, create=True) as m:
self.assertEqual(psutil._pslinux.cpu_count_logical(), original)
# Finally, let's make /proc/cpuinfo return meaningless data;
# this way we'll fall back on relying on /proc/stat
with mock_open_content('/proc/cpuinfo', b"") as m:
self.assertEqual(psutil._pslinux.cpu_count_logical(), original)
m.called
@unittest.skipIf(not LINUX, "LINUX only")
class TestSystemCPUCountPhysical(PsutilTestCase):
@unittest.skipIf(not which("lscpu"), "lscpu utility not available")
def test_against_lscpu(self):
out = sh("lscpu -p")
core_ids = set()
for line in out.split('\n'):
if not line.startswith('#'):
fields = line.split(',')
core_ids.add(fields[1])
self.assertEqual(psutil.cpu_count(logical=False), len(core_ids))
def test_emulate_none(self):
with mock.patch('glob.glob', return_value=[]) as m1:
with mock.patch('psutil._common.open', create=True) as m2:
self.assertIsNone(psutil._pslinux.cpu_count_physical())
assert m1.called
assert m2.called
@unittest.skipIf(not LINUX, "LINUX only")
class TestSystemCPUFrequency(PsutilTestCase):
@unittest.skipIf(TRAVIS, "fails on Travis")
@unittest.skipIf(not HAS_CPU_FREQ, "not supported")
def test_emulate_use_second_file(self):
# https://github.com/giampaolo/psutil/issues/981
def path_exists_mock(path):
if path.startswith("/sys/devices/system/cpu/cpufreq/policy"):
return False
else:
return orig_exists(path)
orig_exists = os.path.exists
with mock.patch("os.path.exists", side_effect=path_exists_mock,
create=True):
assert psutil.cpu_freq()
@unittest.skipIf(not HAS_CPU_FREQ, "not supported")
def test_emulate_use_cpuinfo(self):
# Emulate a case where /sys/devices/system/cpu/cpufreq* does not
# exist and /proc/cpuinfo is used instead.
def path_exists_mock(path):
if path.startswith('/sys/devices/system/cpu/'):
return False
else:
if path == "/proc/cpuinfo":
flags.append(None)
return os_path_exists(path)
flags = []
os_path_exists = os.path.exists
try:
with mock.patch("os.path.exists", side_effect=path_exists_mock):
reload_module(psutil._pslinux)
ret = psutil.cpu_freq()
assert ret
assert flags
self.assertEqual(ret.max, 0.0)
self.assertEqual(ret.min, 0.0)
for freq in psutil.cpu_freq(percpu=True):
self.assertEqual(ret.max, 0.0)
self.assertEqual(ret.min, 0.0)
finally:
reload_module(psutil._pslinux)
reload_module(psutil)
@unittest.skipIf(not HAS_CPU_FREQ, "not supported")
def test_emulate_data(self):
def open_mock(name, *args, **kwargs):
if (name.endswith('/scaling_cur_freq') and
name.startswith("/sys/devices/system/cpu/cpufreq/policy")):
return io.BytesIO(b"500000")
elif (name.endswith('/scaling_min_freq') and
name.startswith("/sys/devices/system/cpu/cpufreq/policy")):
return io.BytesIO(b"600000")
elif (name.endswith('/scaling_max_freq') and
name.startswith("/sys/devices/system/cpu/cpufreq/policy")):
return io.BytesIO(b"700000")
elif name == '/proc/cpuinfo':
return io.BytesIO(b"cpu MHz : 500")
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock):
with mock.patch(
'os.path.exists', return_value=True):
freq = psutil.cpu_freq()
self.assertEqual(freq.current, 500.0)
# when /proc/cpuinfo is used min and max frequencies are not
# available and are set to 0.
if freq.min != 0.0:
self.assertEqual(freq.min, 600.0)
if freq.max != 0.0:
self.assertEqual(freq.max, 700.0)
@unittest.skipIf(not HAS_CPU_FREQ, "not supported")
def test_emulate_multi_cpu(self):
def open_mock(name, *args, **kwargs):
n = name
if (n.endswith('/scaling_cur_freq') and
n.startswith("/sys/devices/system/cpu/cpufreq/policy0")):
return io.BytesIO(b"100000")
elif (n.endswith('/scaling_min_freq') and
n.startswith("/sys/devices/system/cpu/cpufreq/policy0")):
return io.BytesIO(b"200000")
elif (n.endswith('/scaling_max_freq') and
n.startswith("/sys/devices/system/cpu/cpufreq/policy0")):
return io.BytesIO(b"300000")
elif (n.endswith('/scaling_cur_freq') and
n.startswith("/sys/devices/system/cpu/cpufreq/policy1")):
return io.BytesIO(b"400000")
elif (n.endswith('/scaling_min_freq') and
n.startswith("/sys/devices/system/cpu/cpufreq/policy1")):
return io.BytesIO(b"500000")
elif (n.endswith('/scaling_max_freq') and
n.startswith("/sys/devices/system/cpu/cpufreq/policy1")):
return io.BytesIO(b"600000")
elif name == '/proc/cpuinfo':
return io.BytesIO(b"cpu MHz : 100\n"
b"cpu MHz : 400")
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock):
with mock.patch('os.path.exists', return_value=True):
with mock.patch('psutil._pslinux.cpu_count_logical',
return_value=2):
freq = psutil.cpu_freq(percpu=True)
self.assertEqual(freq[0].current, 100.0)
if freq[0].min != 0.0:
self.assertEqual(freq[0].min, 200.0)
if freq[0].max != 0.0:
self.assertEqual(freq[0].max, 300.0)
self.assertEqual(freq[1].current, 400.0)
if freq[1].min != 0.0:
self.assertEqual(freq[1].min, 500.0)
if freq[1].max != 0.0:
self.assertEqual(freq[1].max, 600.0)
@unittest.skipIf(TRAVIS, "fails on Travis")
@unittest.skipIf(not HAS_CPU_FREQ, "not supported")
def test_emulate_no_scaling_cur_freq_file(self):
# See: https://github.com/giampaolo/psutil/issues/1071
def open_mock(name, *args, **kwargs):
if name.endswith('/scaling_cur_freq'):
raise IOError(errno.ENOENT, "")
elif name.endswith('/cpuinfo_cur_freq'):
return io.BytesIO(b"200000")
elif name == '/proc/cpuinfo':
return io.BytesIO(b"cpu MHz : 200")
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock):
with mock.patch('os.path.exists', return_value=True):
with mock.patch('psutil._pslinux.cpu_count_logical',
return_value=1):
freq = psutil.cpu_freq()
self.assertEqual(freq.current, 200)
@unittest.skipIf(not LINUX, "LINUX only")
class TestSystemCPUStats(PsutilTestCase):
@unittest.skipIf(TRAVIS, "fails on Travis")
def test_ctx_switches(self):
vmstat_value = vmstat("context switches")
psutil_value = psutil.cpu_stats().ctx_switches
self.assertAlmostEqual(vmstat_value, psutil_value, delta=500)
@unittest.skipIf(TRAVIS, "fails on Travis")
def test_interrupts(self):
vmstat_value = vmstat("interrupts")
psutil_value = psutil.cpu_stats().interrupts
self.assertAlmostEqual(vmstat_value, psutil_value, delta=500)
@unittest.skipIf(not LINUX, "LINUX only")
class TestLoadAvg(PsutilTestCase):
@unittest.skipIf(not HAS_GETLOADAVG, "not supported")
def test_getloadavg(self):
psutil_value = psutil.getloadavg()
with open("/proc/loadavg", "r") as f:
proc_value = f.read().split()
self.assertAlmostEqual(float(proc_value[0]), psutil_value[0], delta=1)
self.assertAlmostEqual(float(proc_value[1]), psutil_value[1], delta=1)
self.assertAlmostEqual(float(proc_value[2]), psutil_value[2], delta=1)
# =====================================================================
# --- system network
# =====================================================================
@unittest.skipIf(not LINUX, "LINUX only")
class TestSystemNetIfAddrs(PsutilTestCase):
def test_ips(self):
for name, addrs in psutil.net_if_addrs().items():
for addr in addrs:
if addr.family == psutil.AF_LINK:
self.assertEqual(addr.address, get_mac_address(name))
elif addr.family == socket.AF_INET:
self.assertEqual(addr.address, get_ipv4_address(name))
# TODO: test for AF_INET6 family
# XXX - not reliable when having virtual NICs installed by Docker.
# @unittest.skipIf(not which('ip'), "'ip' utility not available")
# @unittest.skipIf(TRAVIS, "skipped on Travis")
# def test_net_if_names(self):
# out = sh("ip addr").strip()
# nics = [x for x in psutil.net_if_addrs().keys() if ':' not in x]
# found = 0
# for line in out.split('\n'):
# line = line.strip()
# if re.search(r"^\d+:", line):
# found += 1
# name = line.split(':')[1].strip()
# self.assertIn(name, nics)
# self.assertEqual(len(nics), found, msg="%s\n---\n%s" % (
# pprint.pformat(nics), out))
@unittest.skipIf(not LINUX, "LINUX only")
class TestSystemNetIfStats(PsutilTestCase):
def test_against_ifconfig(self):
for name, stats in psutil.net_if_stats().items():
try:
out = sh("ifconfig %s" % name)
except RuntimeError:
pass
else:
# Not always reliable.
# self.assertEqual(stats.isup, 'RUNNING' in out, msg=out)
self.assertEqual(stats.mtu,
int(re.findall(r'(?i)MTU[: ](\d+)', out)[0]))
@unittest.skipIf(not LINUX, "LINUX only")
class TestSystemNetIOCounters(PsutilTestCase):
@retry_on_failure()
def test_against_ifconfig(self):
def ifconfig(nic):
ret = {}
out = sh("ifconfig %s" % name)
ret['packets_recv'] = int(
re.findall(r'RX packets[: ](\d+)', out)[0])
ret['packets_sent'] = int(
re.findall(r'TX packets[: ](\d+)', out)[0])
ret['errin'] = int(re.findall(r'errors[: ](\d+)', out)[0])
ret['errout'] = int(re.findall(r'errors[: ](\d+)', out)[1])
ret['dropin'] = int(re.findall(r'dropped[: ](\d+)', out)[0])
ret['dropout'] = int(re.findall(r'dropped[: ](\d+)', out)[1])
ret['bytes_recv'] = int(
re.findall(r'RX (?:packets \d+ +)?bytes[: ](\d+)', out)[0])
ret['bytes_sent'] = int(
re.findall(r'TX (?:packets \d+ +)?bytes[: ](\d+)', out)[0])
return ret
nio = psutil.net_io_counters(pernic=True, nowrap=False)
for name, stats in nio.items():
try:
ifconfig_ret = ifconfig(name)
except RuntimeError:
continue
self.assertAlmostEqual(
stats.bytes_recv, ifconfig_ret['bytes_recv'], delta=1024 * 5)
self.assertAlmostEqual(
stats.bytes_sent, ifconfig_ret['bytes_sent'], delta=1024 * 5)
self.assertAlmostEqual(
stats.packets_recv, ifconfig_ret['packets_recv'], delta=1024)
self.assertAlmostEqual(
stats.packets_sent, ifconfig_ret['packets_sent'], delta=1024)
self.assertAlmostEqual(
stats.errin, ifconfig_ret['errin'], delta=10)
self.assertAlmostEqual(
stats.errout, ifconfig_ret['errout'], delta=10)
self.assertAlmostEqual(
stats.dropin, ifconfig_ret['dropin'], delta=10)
self.assertAlmostEqual(
stats.dropout, ifconfig_ret['dropout'], delta=10)
@unittest.skipIf(not LINUX, "LINUX only")
class TestSystemNetConnections(PsutilTestCase):
@mock.patch('psutil._pslinux.socket.inet_ntop', side_effect=ValueError)
@mock.patch('psutil._pslinux.supports_ipv6', return_value=False)
def test_emulate_ipv6_unsupported(self, supports_ipv6, inet_ntop):
# see: https://github.com/giampaolo/psutil/issues/623
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind(("::1", 0))
except socket.error:
pass
psutil.net_connections(kind='inet6')
def test_emulate_unix(self):
with mock_open_content(
'/proc/net/unix',
textwrap.dedent("""\
0: 00000003 000 000 0001 03 462170 @/tmp/dbus-Qw2hMPIU3n
0: 00000003 000 000 0001 03 35010 @/tmp/dbus-tB2X8h69BQ
0: 00000003 000 000 0001 03 34424 @/tmp/dbus-cHy80Y8O
000000000000000000000000000000000000000000000000000000
""")) as m:
psutil.net_connections(kind='unix')
assert m.called
# =====================================================================
# --- system disks
# =====================================================================
@unittest.skipIf(not LINUX, "LINUX only")
class TestSystemDiskPartitions(PsutilTestCase):
@unittest.skipIf(not hasattr(os, 'statvfs'), "os.statvfs() not available")
@skip_on_not_implemented()
def test_against_df(self):
# test psutil.disk_usage() and psutil.disk_partitions()
# against "df -a"
def df(path):
out = sh('df -P -B 1 "%s"' % path).strip()
lines = out.split('\n')
lines.pop(0)
line = lines.pop(0)
dev, total, used, free = line.split()[:4]
if dev == 'none':
dev = ''
total, used, free = int(total), int(used), int(free)
return dev, total, used, free
for part in psutil.disk_partitions(all=False):
usage = psutil.disk_usage(part.mountpoint)
dev, total, used, free = df(part.mountpoint)
self.assertEqual(usage.total, total)
# 10 MB tollerance
if abs(usage.free - free) > 10 * 1024 * 1024:
self.fail("psutil=%s, df=%s" % (usage.free, free))
if abs(usage.used - used) > 10 * 1024 * 1024:
self.fail("psutil=%s, df=%s" % (usage.used, used))
def test_zfs_fs(self):
# Test that ZFS partitions are returned.
with open("/proc/filesystems", "r") as f:
data = f.read()
if 'zfs' in data:
for part in psutil.disk_partitions():
if part.fstype == 'zfs':
break
else:
self.fail("couldn't find any ZFS partition")
else:
# No ZFS partitions on this system. Let's fake one.
fake_file = io.StringIO(u("nodev\tzfs\n"))
with mock.patch('psutil._common.open',
return_value=fake_file, create=True) as m1:
with mock.patch(
'psutil._pslinux.cext.disk_partitions',
return_value=[('/dev/sdb3', '/', 'zfs', 'rw')]) as m2:
ret = psutil.disk_partitions()
assert m1.called
assert m2.called
assert ret
self.assertEqual(ret[0].fstype, 'zfs')
def test_emulate_realpath_fail(self):
# See: https://github.com/giampaolo/psutil/issues/1307
try:
with mock.patch('os.path.realpath',
return_value='/non/existent') as m:
with self.assertRaises(FileNotFoundError):
psutil.disk_partitions()
assert m.called
finally:
psutil.PROCFS_PATH = "/proc"
@unittest.skipIf(not LINUX, "LINUX only")
class TestSystemDiskIoCounters(PsutilTestCase):
def test_emulate_kernel_2_4(self):
# Tests /proc/diskstats parsing format for 2.4 kernels, see:
# https://github.com/giampaolo/psutil/issues/767
with mock_open_content(
'/proc/diskstats',
" 3 0 1 hda 2 3 4 5 6 7 8 9 10 11 12"):
with mock.patch('psutil._pslinux.is_storage_device',
return_value=True):
ret = psutil.disk_io_counters(nowrap=False)
self.assertEqual(ret.read_count, 1)
self.assertEqual(ret.read_merged_count, 2)
self.assertEqual(ret.read_bytes, 3 * SECTOR_SIZE)
self.assertEqual(ret.read_time, 4)
self.assertEqual(ret.write_count, 5)
self.assertEqual(ret.write_merged_count, 6)
self.assertEqual(ret.write_bytes, 7 * SECTOR_SIZE)
self.assertEqual(ret.write_time, 8)
self.assertEqual(ret.busy_time, 10)
def test_emulate_kernel_2_6_full(self):
# Tests /proc/diskstats parsing format for 2.6 kernels,
# lines reporting all metrics:
# https://github.com/giampaolo/psutil/issues/767
with mock_open_content(
'/proc/diskstats',
" 3 0 hda 1 2 3 4 5 6 7 8 9 10 11"):
with mock.patch('psutil._pslinux.is_storage_device',
return_value=True):
ret = psutil.disk_io_counters(nowrap=False)
self.assertEqual(ret.read_count, 1)
self.assertEqual(ret.read_merged_count, 2)
self.assertEqual(ret.read_bytes, 3 * SECTOR_SIZE)
self.assertEqual(ret.read_time, 4)
self.assertEqual(ret.write_count, 5)
self.assertEqual(ret.write_merged_count, 6)
self.assertEqual(ret.write_bytes, 7 * SECTOR_SIZE)
self.assertEqual(ret.write_time, 8)
self.assertEqual(ret.busy_time, 10)
def test_emulate_kernel_2_6_limited(self):
# Tests /proc/diskstats parsing format for 2.6 kernels,
# where one line of /proc/partitions return a limited
# amount of metrics when it bumps into a partition
# (instead of a disk). See:
# https://github.com/giampaolo/psutil/issues/767
with mock_open_content(
'/proc/diskstats',
" 3 1 hda 1 2 3 4"):
with mock.patch('psutil._pslinux.is_storage_device',
return_value=True):
ret = psutil.disk_io_counters(nowrap=False)
self.assertEqual(ret.read_count, 1)
self.assertEqual(ret.read_bytes, 2 * SECTOR_SIZE)
self.assertEqual(ret.write_count, 3)
self.assertEqual(ret.write_bytes, 4 * SECTOR_SIZE)
self.assertEqual(ret.read_merged_count, 0)
self.assertEqual(ret.read_time, 0)
self.assertEqual(ret.write_merged_count, 0)
self.assertEqual(ret.write_time, 0)
self.assertEqual(ret.busy_time, 0)
def test_emulate_include_partitions(self):
# Make sure that when perdisk=True disk partitions are returned,
# see:
# https://github.com/giampaolo/psutil/pull/1313#issuecomment-408626842
with mock_open_content(
'/proc/diskstats',
textwrap.dedent("""\
3 0 nvme0n1 1 2 3 4 5 6 7 8 9 10 11
3 0 nvme0n1p1 1 2 3 4 5 6 7 8 9 10 11
""")):
with mock.patch('psutil._pslinux.is_storage_device',
return_value=False):
ret = psutil.disk_io_counters(perdisk=True, nowrap=False)
self.assertEqual(len(ret), 2)
self.assertEqual(ret['nvme0n1'].read_count, 1)
self.assertEqual(ret['nvme0n1p1'].read_count, 1)
self.assertEqual(ret['nvme0n1'].write_count, 5)
self.assertEqual(ret['nvme0n1p1'].write_count, 5)
def test_emulate_exclude_partitions(self):
# Make sure that when perdisk=False partitions (e.g. 'sda1',
# 'nvme0n1p1') are skipped and not included in the total count.
# https://github.com/giampaolo/psutil/pull/1313#issuecomment-408626842
with mock_open_content(
'/proc/diskstats',
textwrap.dedent("""\
3 0 nvme0n1 1 2 3 4 5 6 7 8 9 10 11
3 0 nvme0n1p1 1 2 3 4 5 6 7 8 9 10 11
""")):
with mock.patch('psutil._pslinux.is_storage_device',
return_value=False):
ret = psutil.disk_io_counters(perdisk=False, nowrap=False)
self.assertIsNone(ret)
#
def is_storage_device(name):
return name == 'nvme0n1'
with mock_open_content(
'/proc/diskstats',
textwrap.dedent("""\
3 0 nvme0n1 1 2 3 4 5 6 7 8 9 10 11
3 0 nvme0n1p1 1 2 3 4 5 6 7 8 9 10 11
""")):
with mock.patch('psutil._pslinux.is_storage_device',
create=True, side_effect=is_storage_device):
ret = psutil.disk_io_counters(perdisk=False, nowrap=False)
self.assertEqual(ret.read_count, 1)
self.assertEqual(ret.write_count, 5)
def test_emulate_use_sysfs(self):
def exists(path):
if path == '/proc/diskstats':
return False
return True
wprocfs = psutil.disk_io_counters(perdisk=True)
with mock.patch('psutil._pslinux.os.path.exists',
create=True, side_effect=exists):
wsysfs = psutil.disk_io_counters(perdisk=True)
self.assertEqual(len(wprocfs), len(wsysfs))
def test_emulate_not_impl(self):
def exists(path):
return False
with mock.patch('psutil._pslinux.os.path.exists',
create=True, side_effect=exists):
self.assertRaises(NotImplementedError, psutil.disk_io_counters)
# =====================================================================
# --- misc
# =====================================================================
@unittest.skipIf(not LINUX, "LINUX only")
class TestMisc(PsutilTestCase):
def test_boot_time(self):
vmstat_value = vmstat('boot time')
psutil_value = psutil.boot_time()
self.assertEqual(int(vmstat_value), int(psutil_value))
def test_no_procfs_on_import(self):
my_procfs = self.get_testfn()
os.mkdir(my_procfs)
with open(os.path.join(my_procfs, 'stat'), 'w') as f:
f.write('cpu 0 0 0 0 0 0 0 0 0 0\n')
f.write('cpu0 0 0 0 0 0 0 0 0 0 0\n')
f.write('cpu1 0 0 0 0 0 0 0 0 0 0\n')
try:
orig_open = open
def open_mock(name, *args, **kwargs):
if name.startswith('/proc'):
raise IOError(errno.ENOENT, 'rejecting access for test')
return orig_open(name, *args, **kwargs)
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock):
reload_module(psutil)
self.assertRaises(IOError, psutil.cpu_times)
self.assertRaises(IOError, psutil.cpu_times, percpu=True)
self.assertRaises(IOError, psutil.cpu_percent)
self.assertRaises(IOError, psutil.cpu_percent, percpu=True)
self.assertRaises(IOError, psutil.cpu_times_percent)
self.assertRaises(
IOError, psutil.cpu_times_percent, percpu=True)
psutil.PROCFS_PATH = my_procfs
self.assertEqual(psutil.cpu_percent(), 0)
self.assertEqual(sum(psutil.cpu_times_percent()), 0)
# since we don't know the number of CPUs at import time,
# we awkwardly say there are none until the second call
per_cpu_percent = psutil.cpu_percent(percpu=True)
self.assertEqual(sum(per_cpu_percent), 0)
# ditto awkward length
per_cpu_times_percent = psutil.cpu_times_percent(percpu=True)
self.assertEqual(sum(map(sum, per_cpu_times_percent)), 0)
# much user, very busy
with open(os.path.join(my_procfs, 'stat'), 'w') as f:
f.write('cpu 1 0 0 0 0 0 0 0 0 0\n')
f.write('cpu0 1 0 0 0 0 0 0 0 0 0\n')
f.write('cpu1 1 0 0 0 0 0 0 0 0 0\n')
self.assertNotEqual(psutil.cpu_percent(), 0)
self.assertNotEqual(
sum(psutil.cpu_percent(percpu=True)), 0)
self.assertNotEqual(sum(psutil.cpu_times_percent()), 0)
self.assertNotEqual(
sum(map(sum, psutil.cpu_times_percent(percpu=True))), 0)
finally:
shutil.rmtree(my_procfs)
reload_module(psutil)
self.assertEqual(psutil.PROCFS_PATH, '/proc')
def test_cpu_steal_decrease(self):
# Test cumulative cpu stats decrease. We should ignore this.
# See issue #1210.
with mock_open_content(
"/proc/stat",
textwrap.dedent("""\
cpu 0 0 0 0 0 0 0 1 0 0
cpu0 0 0 0 0 0 0 0 1 0 0
cpu1 0 0 0 0 0 0 0 1 0 0
""").encode()) as m:
# first call to "percent" functions should read the new stat file
# and compare to the "real" file read at import time - so the
# values are meaningless
psutil.cpu_percent()
assert m.called
psutil.cpu_percent(percpu=True)
psutil.cpu_times_percent()
psutil.cpu_times_percent(percpu=True)
with mock_open_content(
"/proc/stat",
textwrap.dedent("""\
cpu 1 0 0 0 0 0 0 0 0 0
cpu0 1 0 0 0 0 0 0 0 0 0
cpu1 1 0 0 0 0 0 0 0 0 0
""").encode()) as m:
# Increase "user" while steal goes "backwards" to zero.
cpu_percent = psutil.cpu_percent()
assert m.called
cpu_percent_percpu = psutil.cpu_percent(percpu=True)
cpu_times_percent = psutil.cpu_times_percent()
cpu_times_percent_percpu = psutil.cpu_times_percent(percpu=True)
self.assertNotEqual(cpu_percent, 0)
self.assertNotEqual(sum(cpu_percent_percpu), 0)
self.assertNotEqual(sum(cpu_times_percent), 0)
self.assertNotEqual(sum(cpu_times_percent), 100.0)
self.assertNotEqual(sum(map(sum, cpu_times_percent_percpu)), 0)
self.assertNotEqual(sum(map(sum, cpu_times_percent_percpu)), 100.0)
self.assertEqual(cpu_times_percent.steal, 0)
self.assertNotEqual(cpu_times_percent.user, 0)
def test_boot_time_mocked(self):
with mock.patch('psutil._common.open', create=True) as m:
self.assertRaises(
RuntimeError,
psutil._pslinux.boot_time)
assert m.called
def test_users_mocked(self):
# Make sure ':0' and ':0.0' (returned by C ext) are converted
# to 'localhost'.
with mock.patch('psutil._pslinux.cext.users',
return_value=[('giampaolo', 'pts/2', ':0',
1436573184.0, True, 2)]) as m:
self.assertEqual(psutil.users()[0].host, 'localhost')
assert m.called
with mock.patch('psutil._pslinux.cext.users',
return_value=[('giampaolo', 'pts/2', ':0.0',
1436573184.0, True, 2)]) as m:
self.assertEqual(psutil.users()[0].host, 'localhost')
assert m.called
# ...otherwise it should be returned as-is
with mock.patch('psutil._pslinux.cext.users',
return_value=[('giampaolo', 'pts/2', 'foo',
1436573184.0, True, 2)]) as m:
self.assertEqual(psutil.users()[0].host, 'foo')
assert m.called
def test_procfs_path(self):
tdir = self.get_testfn()
os.mkdir(tdir)
try:
psutil.PROCFS_PATH = tdir
self.assertRaises(IOError, psutil.virtual_memory)
self.assertRaises(IOError, psutil.cpu_times)
self.assertRaises(IOError, psutil.cpu_times, percpu=True)
self.assertRaises(IOError, psutil.boot_time)
# self.assertRaises(IOError, psutil.pids)
self.assertRaises(IOError, psutil.net_connections)
self.assertRaises(IOError, psutil.net_io_counters)
self.assertRaises(IOError, psutil.net_if_stats)
# self.assertRaises(IOError, psutil.disk_io_counters)
self.assertRaises(IOError, psutil.disk_partitions)
self.assertRaises(psutil.NoSuchProcess, psutil.Process)
finally:
psutil.PROCFS_PATH = "/proc"
@retry_on_failure()
def test_issue_687(self):
# In case of thread ID:
# - pid_exists() is supposed to return False
# - Process(tid) is supposed to work
# - pids() should not return the TID
# See: https://github.com/giampaolo/psutil/issues/687
t = ThreadTask()
t.start()
try:
p = psutil.Process()
tid = p.threads()[1].id
assert not psutil.pid_exists(tid), tid
pt = psutil.Process(tid)
pt.as_dict()
self.assertNotIn(tid, psutil.pids())
finally:
t.stop()
def test_pid_exists_no_proc_status(self):
# Internally pid_exists relies on /proc/{pid}/status.
# Emulate a case where this file is empty in which case
# psutil is supposed to fall back on using pids().
with mock_open_content("/proc/%s/status", "") as m:
assert psutil.pid_exists(os.getpid())
assert m.called
# =====================================================================
# --- sensors
# =====================================================================
@unittest.skipIf(not LINUX, "LINUX only")
@unittest.skipIf(not HAS_BATTERY, "no battery")
class TestSensorsBattery(PsutilTestCase):
@unittest.skipIf(not which("acpi"), "acpi utility not available")
def test_percent(self):
out = sh("acpi -b")
acpi_value = int(out.split(",")[1].strip().replace('%', ''))
psutil_value = psutil.sensors_battery().percent
self.assertAlmostEqual(acpi_value, psutil_value, delta=1)
@unittest.skipIf(not which("acpi"), "acpi utility not available")
def test_power_plugged(self):
out = sh("acpi -b")
if 'unknown' in out.lower():
return unittest.skip("acpi output not reliable")
if 'discharging at zero rate' in out:
plugged = True
else:
plugged = "Charging" in out.split('\n')[0]
self.assertEqual(psutil.sensors_battery().power_plugged, plugged)
def test_emulate_power_plugged(self):
# Pretend the AC power cable is connected.
def open_mock(name, *args, **kwargs):
if name.endswith("AC0/online") or name.endswith("AC/online"):
return io.BytesIO(b"1")
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock) as m:
self.assertEqual(psutil.sensors_battery().power_plugged, True)
self.assertEqual(
psutil.sensors_battery().secsleft, psutil.POWER_TIME_UNLIMITED)
assert m.called
def test_emulate_power_plugged_2(self):
# Same as above but pretend /AC0/online does not exist in which
# case code relies on /status file.
def open_mock(name, *args, **kwargs):
if name.endswith("AC0/online") or name.endswith("AC/online"):
raise IOError(errno.ENOENT, "")
elif name.endswith("/status"):
return io.StringIO(u("charging"))
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock) as m:
self.assertEqual(psutil.sensors_battery().power_plugged, True)
assert m.called
def test_emulate_power_not_plugged(self):
# Pretend the AC power cable is not connected.
def open_mock(name, *args, **kwargs):
if name.endswith("AC0/online") or name.endswith("AC/online"):
return io.BytesIO(b"0")
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock) as m:
self.assertEqual(psutil.sensors_battery().power_plugged, False)
assert m.called
def test_emulate_power_not_plugged_2(self):
# Same as above but pretend /AC0/online does not exist in which
# case code relies on /status file.
def open_mock(name, *args, **kwargs):
if name.endswith("AC0/online") or name.endswith("AC/online"):
raise IOError(errno.ENOENT, "")
elif name.endswith("/status"):
return io.StringIO(u("discharging"))
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock) as m:
self.assertEqual(psutil.sensors_battery().power_plugged, False)
assert m.called
def test_emulate_power_undetermined(self):
# Pretend we can't know whether the AC power cable not
# connected (assert fallback to False).
def open_mock(name, *args, **kwargs):
if name.startswith("/sys/class/power_supply/AC0/online") or \
name.startswith("/sys/class/power_supply/AC/online"):
raise IOError(errno.ENOENT, "")
elif name.startswith("/sys/class/power_supply/BAT0/status"):
return io.BytesIO(b"???")
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock) as m:
self.assertIsNone(psutil.sensors_battery().power_plugged)
assert m.called
def test_emulate_no_base_files(self):
# Emulate a case where base metrics files are not present,
# in which case we're supposed to get None.
with mock_open_exception(
"/sys/class/power_supply/BAT0/energy_now",
IOError(errno.ENOENT, "")):
with mock_open_exception(
"/sys/class/power_supply/BAT0/charge_now",
IOError(errno.ENOENT, "")):
self.assertIsNone(psutil.sensors_battery())
def test_emulate_energy_full_0(self):
# Emulate a case where energy_full files returns 0.
with mock_open_content(
"/sys/class/power_supply/BAT0/energy_full", b"0") as m:
self.assertEqual(psutil.sensors_battery().percent, 0)
assert m.called
def test_emulate_energy_full_not_avail(self):
# Emulate a case where energy_full file does not exist.
# Expected fallback on /capacity.
with mock_open_exception(
"/sys/class/power_supply/BAT0/energy_full",
IOError(errno.ENOENT, "")):
with mock_open_exception(
"/sys/class/power_supply/BAT0/charge_full",
IOError(errno.ENOENT, "")):
with mock_open_content(
"/sys/class/power_supply/BAT0/capacity", b"88"):
self.assertEqual(psutil.sensors_battery().percent, 88)
def test_emulate_no_power(self):
# Emulate a case where /AC0/online file nor /BAT0/status exist.
with mock_open_exception(
"/sys/class/power_supply/AC/online",
IOError(errno.ENOENT, "")):
with mock_open_exception(
"/sys/class/power_supply/AC0/online",
IOError(errno.ENOENT, "")):
with mock_open_exception(
"/sys/class/power_supply/BAT0/status",
IOError(errno.ENOENT, "")):
self.assertIsNone(psutil.sensors_battery().power_plugged)
@unittest.skipIf(not LINUX, "LINUX only")
class TestSensorsTemperatures(PsutilTestCase):
def test_emulate_class_hwmon(self):
def open_mock(name, *args, **kwargs):
if name.endswith('/name'):
return io.StringIO(u("name"))
elif name.endswith('/temp1_label'):
return io.StringIO(u("label"))
elif name.endswith('/temp1_input'):
return io.BytesIO(b"30000")
elif name.endswith('/temp1_max'):
return io.BytesIO(b"40000")
elif name.endswith('/temp1_crit'):
return io.BytesIO(b"50000")
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock):
# Test case with /sys/class/hwmon
with mock.patch('glob.glob',
return_value=['/sys/class/hwmon/hwmon0/temp1']):
temp = psutil.sensors_temperatures()['name'][0]
self.assertEqual(temp.label, 'label')
self.assertEqual(temp.current, 30.0)
self.assertEqual(temp.high, 40.0)
self.assertEqual(temp.critical, 50.0)
def test_emulate_class_thermal(self):
def open_mock(name, *args, **kwargs):
if name.endswith('0_temp'):
return io.BytesIO(b"50000")
elif name.endswith('temp'):
return io.BytesIO(b"30000")
elif name.endswith('0_type'):
return io.StringIO(u("critical"))
elif name.endswith('type'):
return io.StringIO(u("name"))
else:
return orig_open(name, *args, **kwargs)
def glob_mock(path):
if path == '/sys/class/hwmon/hwmon*/temp*_*':
return []
elif path == '/sys/class/hwmon/hwmon*/device/temp*_*':
return []
elif path == '/sys/class/thermal/thermal_zone*':
return ['/sys/class/thermal/thermal_zone0']
elif path == '/sys/class/thermal/thermal_zone0/trip_point*':
return ['/sys/class/thermal/thermal_zone1/trip_point_0_type',
'/sys/class/thermal/thermal_zone1/trip_point_0_temp']
return []
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock):
with mock.patch('glob.glob', create=True, side_effect=glob_mock):
temp = psutil.sensors_temperatures()['name'][0]
self.assertEqual(temp.label, '')
self.assertEqual(temp.current, 30.0)
self.assertEqual(temp.high, 50.0)
self.assertEqual(temp.critical, 50.0)
@unittest.skipIf(not LINUX, "LINUX only")
class TestSensorsFans(PsutilTestCase):
def test_emulate_data(self):
def open_mock(name, *args, **kwargs):
if name.endswith('/name'):
return io.StringIO(u("name"))
elif name.endswith('/fan1_label'):
return io.StringIO(u("label"))
elif name.endswith('/fan1_input'):
return io.StringIO(u("2000"))
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock):
with mock.patch('glob.glob',
return_value=['/sys/class/hwmon/hwmon2/fan1']):
fan = psutil.sensors_fans()['name'][0]
self.assertEqual(fan.label, 'label')
self.assertEqual(fan.current, 2000)
# =====================================================================
# --- test process
# =====================================================================
@unittest.skipIf(not LINUX, "LINUX only")
class TestProcess(PsutilTestCase):
@retry_on_failure()
def test_memory_full_info(self):
testfn = self.get_testfn()
src = textwrap.dedent("""
import time
with open("%s", "w") as f:
time.sleep(10)
""" % testfn)
sproc = self.pyrun(src)
call_until(lambda: os.listdir('.'), "'%s' not in ret" % testfn)
p = psutil.Process(sproc.pid)
time.sleep(.1)
mem = p.memory_full_info()
maps = p.memory_maps(grouped=False)
self.assertAlmostEqual(
mem.uss, sum([x.private_dirty + x.private_clean for x in maps]),
delta=4096)
self.assertAlmostEqual(
mem.pss, sum([x.pss for x in maps]), delta=4096)
self.assertAlmostEqual(
mem.swap, sum([x.swap for x in maps]), delta=4096)
def test_memory_full_info_mocked(self):
# See: https://github.com/giampaolo/psutil/issues/1222
with mock_open_content(
"/proc/%s/smaps" % os.getpid(),
textwrap.dedent("""\
fffff0 r-xp 00000000 00:00 0 [vsyscall]
Size: 1 kB
Rss: 2 kB
Pss: 3 kB
Shared_Clean: 4 kB
Shared_Dirty: 5 kB
Private_Clean: 6 kB
Private_Dirty: 7 kB
Referenced: 8 kB
Anonymous: 9 kB
LazyFree: 10 kB
AnonHugePages: 11 kB
ShmemPmdMapped: 12 kB
Shared_Hugetlb: 13 kB
Private_Hugetlb: 14 kB
Swap: 15 kB
SwapPss: 16 kB
KernelPageSize: 17 kB
MMUPageSize: 18 kB
Locked: 19 kB
VmFlags: rd ex
""").encode()) as m:
p = psutil.Process()
mem = p.memory_full_info()
assert m.called
self.assertEqual(mem.uss, (6 + 7 + 14) * 1024)
self.assertEqual(mem.pss, 3 * 1024)
self.assertEqual(mem.swap, 15 * 1024)
# On PYPY file descriptors are not closed fast enough.
@unittest.skipIf(PYPY, "unreliable on PYPY")
def test_open_files_mode(self):
def get_test_file(fname):
p = psutil.Process()
giveup_at = time.time() + 2
while True:
for file in p.open_files():
if file.path == os.path.abspath(fname):
return file
elif time.time() > giveup_at:
break
raise RuntimeError("timeout looking for test file")
#
testfn = self.get_testfn()
with open(testfn, "w"):
self.assertEqual(get_test_file(testfn).mode, "w")
with open(testfn, "r"):
self.assertEqual(get_test_file(testfn).mode, "r")
with open(testfn, "a"):
self.assertEqual(get_test_file(testfn).mode, "a")
#
with open(testfn, "r+"):
self.assertEqual(get_test_file(testfn).mode, "r+")
with open(testfn, "w+"):
self.assertEqual(get_test_file(testfn).mode, "r+")
with open(testfn, "a+"):
self.assertEqual(get_test_file(testfn).mode, "a+")
# note: "x" bit is not supported
if PY3:
safe_rmpath(testfn)
with open(testfn, "x"):
self.assertEqual(get_test_file(testfn).mode, "w")
safe_rmpath(testfn)
with open(testfn, "x+"):
self.assertEqual(get_test_file(testfn).mode, "r+")
def test_open_files_file_gone(self):
# simulates a file which gets deleted during open_files()
# execution
p = psutil.Process()
files = p.open_files()
with open(self.get_testfn(), 'w'):
# give the kernel some time to see the new file
call_until(p.open_files, "len(ret) != %i" % len(files))
with mock.patch('psutil._pslinux.os.readlink',
side_effect=OSError(errno.ENOENT, "")) as m:
files = p.open_files()
assert not files
assert m.called
# also simulate the case where os.readlink() returns EINVAL
# in which case psutil is supposed to 'continue'
with mock.patch('psutil._pslinux.os.readlink',
side_effect=OSError(errno.EINVAL, "")) as m:
self.assertEqual(p.open_files(), [])
assert m.called
def test_open_files_fd_gone(self):
# Simulate a case where /proc/{pid}/fdinfo/{fd} disappears
# while iterating through fds.
# https://travis-ci.org/giampaolo/psutil/jobs/225694530
p = psutil.Process()
files = p.open_files()
with open(self.get_testfn(), 'w'):
# give the kernel some time to see the new file
call_until(p.open_files, "len(ret) != %i" % len(files))
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point,
side_effect=IOError(errno.ENOENT, "")) as m:
files = p.open_files()
assert not files
assert m.called
# --- mocked tests
def test_terminal_mocked(self):
with mock.patch('psutil._pslinux._psposix.get_terminal_map',
return_value={}) as m:
self.assertIsNone(psutil._pslinux.Process(os.getpid()).terminal())
assert m.called
# TODO: re-enable this test.
# def test_num_ctx_switches_mocked(self):
# with mock.patch('psutil._common.open', create=True) as m:
# self.assertRaises(
# NotImplementedError,
# psutil._pslinux.Process(os.getpid()).num_ctx_switches)
# assert m.called
def test_cmdline_mocked(self):
# see: https://github.com/giampaolo/psutil/issues/639
p = psutil.Process()
fake_file = io.StringIO(u('foo\x00bar\x00'))
with mock.patch('psutil._common.open',
return_value=fake_file, create=True) as m:
self.assertEqual(p.cmdline(), ['foo', 'bar'])
assert m.called
fake_file = io.StringIO(u('foo\x00bar\x00\x00'))
with mock.patch('psutil._common.open',
return_value=fake_file, create=True) as m:
self.assertEqual(p.cmdline(), ['foo', 'bar', ''])
assert m.called
def test_cmdline_spaces_mocked(self):
# see: https://github.com/giampaolo/psutil/issues/1179
p = psutil.Process()
fake_file = io.StringIO(u('foo bar '))
with mock.patch('psutil._common.open',
return_value=fake_file, create=True) as m:
self.assertEqual(p.cmdline(), ['foo', 'bar'])
assert m.called
fake_file = io.StringIO(u('foo bar '))
with mock.patch('psutil._common.open',
return_value=fake_file, create=True) as m:
self.assertEqual(p.cmdline(), ['foo', 'bar', ''])
assert m.called
def test_cmdline_mixed_separators(self):
# https://github.com/giampaolo/psutil/issues/
# 1179#issuecomment-552984549
p = psutil.Process()
fake_file = io.StringIO(u('foo\x20bar\x00'))
with mock.patch('psutil._common.open',
return_value=fake_file, create=True) as m:
self.assertEqual(p.cmdline(), ['foo', 'bar'])
assert m.called
def test_readlink_path_deleted_mocked(self):
with mock.patch('psutil._pslinux.os.readlink',
return_value='/home/foo (deleted)'):
self.assertEqual(psutil.Process().exe(), "/home/foo")
self.assertEqual(psutil.Process().cwd(), "/home/foo")
def test_threads_mocked(self):
# Test the case where os.listdir() returns a file (thread)
# which no longer exists by the time we open() it (race
# condition). threads() is supposed to ignore that instead
# of raising NSP.
def open_mock(name, *args, **kwargs):
if name.startswith('/proc/%s/task' % os.getpid()):
raise IOError(errno.ENOENT, "")
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock) as m:
ret = psutil.Process().threads()
assert m.called
self.assertEqual(ret, [])
# ...but if it bumps into something != ENOENT we want an
# exception.
def open_mock(name, *args, **kwargs):
if name.startswith('/proc/%s/task' % os.getpid()):
raise IOError(errno.EPERM, "")
else:
return orig_open(name, *args, **kwargs)
with mock.patch(patch_point, side_effect=open_mock):
self.assertRaises(psutil.AccessDenied, psutil.Process().threads)
def test_exe_mocked(self):
with mock.patch('psutil._pslinux.readlink',
side_effect=OSError(errno.ENOENT, "")) as m1:
with mock.patch('psutil.Process.cmdline',
side_effect=psutil.AccessDenied(0, "")) as m2:
# No such file error; might be raised also if /proc/pid/exe
# path actually exists for system processes with low pids
# (about 0-20). In this case psutil is supposed to return
# an empty string.
ret = psutil.Process().exe()
assert m1.called
assert m2.called
self.assertEqual(ret, "")
# ...but if /proc/pid no longer exist we're supposed to treat
# it as an alias for zombie process
with mock.patch('psutil._pslinux.os.path.lexists',
return_value=False):
self.assertRaises(
psutil.ZombieProcess, psutil.Process().exe)
def test_issue_1014(self):
# Emulates a case where smaps file does not exist. In this case
# wrap_exception decorator should not raise NoSuchProcess.
with mock_open_exception(
'/proc/%s/smaps' % os.getpid(),
IOError(errno.ENOENT, "")) as m:
p = psutil.Process()
with self.assertRaises(FileNotFoundError):
p.memory_maps()
assert m.called
@unittest.skipIf(not HAS_RLIMIT, "not supported")
def test_rlimit_zombie(self):
# Emulate a case where rlimit() raises ENOSYS, which may
# happen in case of zombie process:
# https://travis-ci.org/giampaolo/psutil/jobs/51368273
with mock.patch("psutil._pslinux.cext.linux_prlimit",
side_effect=OSError(errno.ENOSYS, "")) as m:
p = psutil.Process()
p.name()
with self.assertRaises(psutil.ZombieProcess) as exc:
p.rlimit(psutil.RLIMIT_NOFILE)
assert m.called
self.assertEqual(exc.exception.pid, p.pid)
self.assertEqual(exc.exception.name, p.name())
def test_cwd_zombie(self):
with mock.patch("psutil._pslinux.os.readlink",
side_effect=OSError(errno.ENOENT, "")) as m:
p = psutil.Process()
p.name()
with self.assertRaises(psutil.ZombieProcess) as exc:
p.cwd()
assert m.called
self.assertEqual(exc.exception.pid, p.pid)
self.assertEqual(exc.exception.name, p.name())
def test_stat_file_parsing(self):
from psutil._pslinux import CLOCK_TICKS
args = [
"0", # pid
"(cat)", # name
"Z", # status
"1", # ppid
"0", # pgrp
"0", # session
"0", # tty
"0", # tpgid
"0", # flags
"0", # minflt
"0", # cminflt
"0", # majflt
"0", # cmajflt
"2", # utime
"3", # stime
"4", # cutime
"5", # cstime
"0", # priority
"0", # nice
"0", # num_threads
"0", # itrealvalue
"6", # starttime
"0", # vsize
"0", # rss
"0", # rsslim
"0", # startcode
"0", # endcode
"0", # startstack
"0", # kstkesp
"0", # kstkeip
"0", # signal
"0", # blocked
"0", # sigignore
"0", # sigcatch
"0", # wchan
"0", # nswap
"0", # cnswap
"0", # exit_signal
"6", # processor
"0", # rt priority
"0", # policy
"7", # delayacct_blkio_ticks
]
content = " ".join(args).encode()
with mock_open_content('/proc/%s/stat' % os.getpid(), content):
p = psutil.Process()
self.assertEqual(p.name(), 'cat')
self.assertEqual(p.status(), psutil.STATUS_ZOMBIE)
self.assertEqual(p.ppid(), 1)
self.assertEqual(
p.create_time(), 6 / CLOCK_TICKS + psutil.boot_time())
cpu = p.cpu_times()
self.assertEqual(cpu.user, 2 / CLOCK_TICKS)
self.assertEqual(cpu.system, 3 / CLOCK_TICKS)
self.assertEqual(cpu.children_user, 4 / CLOCK_TICKS)
self.assertEqual(cpu.children_system, 5 / CLOCK_TICKS)
self.assertEqual(cpu.iowait, 7 / CLOCK_TICKS)
self.assertEqual(p.cpu_num(), 6)
def test_status_file_parsing(self):
with mock_open_content(
'/proc/%s/status' % os.getpid(),
textwrap.dedent("""\
Uid:\t1000\t1001\t1002\t1003
Gid:\t1004\t1005\t1006\t1007
Threads:\t66
Cpus_allowed:\tf
Cpus_allowed_list:\t0-7
voluntary_ctxt_switches:\t12
nonvoluntary_ctxt_switches:\t13""").encode()):
p = psutil.Process()
self.assertEqual(p.num_ctx_switches().voluntary, 12)
self.assertEqual(p.num_ctx_switches().involuntary, 13)
self.assertEqual(p.num_threads(), 66)
uids = p.uids()
self.assertEqual(uids.real, 1000)
self.assertEqual(uids.effective, 1001)
self.assertEqual(uids.saved, 1002)
gids = p.gids()
self.assertEqual(gids.real, 1004)
self.assertEqual(gids.effective, 1005)
self.assertEqual(gids.saved, 1006)
self.assertEqual(p._proc._get_eligible_cpus(), list(range(0, 8)))
@unittest.skipIf(not LINUX, "LINUX only")
class TestProcessAgainstStatus(PsutilTestCase):
"""/proc/pid/stat and /proc/pid/status have many values in common.
Whenever possible, psutil uses /proc/pid/stat (it's faster).
For all those cases we check that the value found in
/proc/pid/stat (by psutil) matches the one found in
/proc/pid/status.
"""
@classmethod
def setUpClass(cls):
cls.proc = psutil.Process()
def read_status_file(self, linestart):
with psutil._psplatform.open_text(
'/proc/%s/status' % self.proc.pid) as f:
for line in f:
line = line.strip()
if line.startswith(linestart):
value = line.partition('\t')[2]
try:
return int(value)
except ValueError:
return value
raise ValueError("can't find %r" % linestart)
def test_name(self):
value = self.read_status_file("Name:")
self.assertEqual(self.proc.name(), value)
def test_status(self):
value = self.read_status_file("State:")
value = value[value.find('(') + 1:value.rfind(')')]
value = value.replace(' ', '-')
self.assertEqual(self.proc.status(), value)
def test_ppid(self):
value = self.read_status_file("PPid:")
self.assertEqual(self.proc.ppid(), value)
def test_num_threads(self):
value = self.read_status_file("Threads:")
self.assertEqual(self.proc.num_threads(), value)
def test_uids(self):
value = self.read_status_file("Uid:")
value = tuple(map(int, value.split()[1:4]))
self.assertEqual(self.proc.uids(), value)
def test_gids(self):
value = self.read_status_file("Gid:")
value = tuple(map(int, value.split()[1:4]))
self.assertEqual(self.proc.gids(), value)
@retry_on_failure()
def test_num_ctx_switches(self):
value = self.read_status_file("voluntary_ctxt_switches:")
self.assertEqual(self.proc.num_ctx_switches().voluntary, value)
value = self.read_status_file("nonvoluntary_ctxt_switches:")
self.assertEqual(self.proc.num_ctx_switches().involuntary, value)
def test_cpu_affinity(self):
value = self.read_status_file("Cpus_allowed_list:")
if '-' in str(value):
min_, max_ = map(int, value.split('-'))
self.assertEqual(
self.proc.cpu_affinity(), list(range(min_, max_ + 1)))
def test_cpu_affinity_eligible_cpus(self):
value = self.read_status_file("Cpus_allowed_list:")
with mock.patch("psutil._pslinux.per_cpu_times") as m:
self.proc._proc._get_eligible_cpus()
if '-' in str(value):
assert not m.called
else:
assert m.called
# =====================================================================
# --- test utils
# =====================================================================
@unittest.skipIf(not LINUX, "LINUX only")
class TestUtils(PsutilTestCase):
def test_readlink(self):
with mock.patch("os.readlink", return_value="foo (deleted)") as m:
self.assertEqual(psutil._psplatform.readlink("bar"), "foo")
assert m.called
def test_cat(self):
testfn = self.get_testfn()
with open(testfn, "wt") as f:
f.write("foo ")
self.assertEqual(psutil._psplatform.cat(testfn, binary=False), "foo")
self.assertEqual(psutil._psplatform.cat(testfn, binary=True), b"foo")
self.assertEqual(
psutil._psplatform.cat(testfn + '??', fallback="bar"), "bar")
if __name__ == '__main__':
from psutil.tests.runner import run_from_name
run_from_name(__file__)
| 41.386288 | 79 | 0.549605 |
cea4c9da31b01c7d4e4b6a9b839c758c40069a56 | 165 | py | Python | 0046_Permutations.py | taro-masuda/leetcode | 39739e9fec7c66513b114c740ef982ccc09dc39f | [
"MIT"
] | null | null | null | 0046_Permutations.py | taro-masuda/leetcode | 39739e9fec7c66513b114c740ef982ccc09dc39f | [
"MIT"
] | null | null | null | 0046_Permutations.py | taro-masuda/leetcode | 39739e9fec7c66513b114c740ef982ccc09dc39f | [
"MIT"
] | 1 | 2020-03-18T05:23:40.000Z | 2020-03-18T05:23:40.000Z | from itertools import permutations
class Solution:
def permute(self, nums: List[int]) -> List[List[int]]:
perm = permutations(nums)
return perm
| 23.571429 | 58 | 0.666667 |
01e353276546d78fffc512db94d1b4a02f93c335 | 3,240 | py | Python | tools/generate_EMP_statement.py | bert-kapsarc/KEMChinaCoal | 34d321c406f24a0f023ec0c616e350b66c2b1ce6 | [
"MIT"
] | null | null | null | tools/generate_EMP_statement.py | bert-kapsarc/KEMChinaCoal | 34d321c406f24a0f023ec0c616e350b66c2b1ce6 | [
"MIT"
] | null | null | null | tools/generate_EMP_statement.py | bert-kapsarc/KEMChinaCoal | 34d321c406f24a0f023ec0c616e350b66c2b1ce6 | [
"MIT"
] | 2 | 2020-08-24T00:25:57.000Z | 2021-04-12T09:33:36.000Z | from argparse import ArgumentParser
import os, re
parser = ArgumentParser()
parser.add_argument('--agents', type=str, default='')
parser.add_argument('--input', type=str, default='KEM_EMP.lst')
parser.add_argument('--output', type=str, default='EMP_statement.gms')
parser.add_argument('--models', type=str, default='integrated_model.info')
args = parser.parse_args()
# Getting the agents prefix and countries
input_file = args.input
output_file = args.output
models_file = args.models
# Loading the integrated sectors from the info file
myFile = open(models_file, 'r')
keys = [(line[:-1].split()[0], line[:-1].split()[1]) for line in myFile]
sectors = set([item[0] for item in keys])
myFile.close()
# removing the info file
os.remove(models_file)
list_of_e_dict = {key: [] for key in keys}
list_of_v_dict = {key: [] for key in keys}
myFile = open(input_file, 'r')
file_content = [line[:-1] for line in myFile]
myFile.close()
in_out = False
i = 0
# reading the list file and grepping the equations and variables
while i < len(file_content):
if file_content[i][:5] == '---- ':
in_out = True
while in_out:
i += 1
line = file_content[i]
if line == 'MODEL STATISTICS':
in_out = False
break
if '..' in line:
if 'objective' in line or re.match(r'([a-z]).*(_eqn)', line.lower()) is not None:
continue
line = line[0:line.find('..')]
country = line[line.find('(')+1:line.find(')')].split(',')[-1]
line = re.sub(r"\(", "(\'", line)
line = re.sub(r"\)", "\')", line)
line = re.sub(r",", "\',\'", line)
key = (line[0:2], country)
try:
list_of_e_dict[key].append(line)
except KeyError:
continue
elif re.match(r'([a-z]).*(\))', line.lower()) is not None and line[0] != 'D':
country = line[line.find('(') + 1:line.find(')')].split(',')[-1]
line = re.sub(r"\(", "(\'", line)
line = re.sub(r"\)", "\')", line)
line = re.sub(r",", "\',\'", line)
key = (line[0:2], country)
try:
list_of_v_dict[key].append(line)
except KeyError:
continue
if '---- ' in line:
in_out = False
i -= 1
i += 1
# Writing the EMP file
with open(output_file, 'w') as f:
print('put myinfo \'equilibrium\';', file=f)
for sec, country in list_of_v_dict.keys():
print("put / 'min', %sobjval('%s');" % (sec, country), file=f)
print(' /* variables */', file=f)
for item in list_of_v_dict[(sec, country)]:
if 'objval' not in item:
print('\tput %s;' % item, file=f)
print('\n /* Equations */', file=f)
print(" put %sobjective('%s');" % (sec, country), file=f)
for item in list_of_e_dict[(sec, country)]:
print('\tput %s;' % item, file=f)
for sec in sectors:
if sec == 'EL':
print('put \'dualvar D%ssup %ssup\';' % (sec, sec), file=f)
else:
print('put \'dualvar D%sdem %sdem\';' % (sec, sec), file=f)
print('putclose myinfo;', file=f)
f.close() | 34.83871 | 93 | 0.540432 |
16480e5e97633771f61d4cf208994db35108bfd0 | 8,287 | py | Python | Mi Lenguaje/tests/lexer_test.py | DataEngel/Creando-mi-primer-lenguaje-de-programaci-n- | 92434f89c62b6cec0114441c669952450ba21b79 | [
"MIT"
] | null | null | null | Mi Lenguaje/tests/lexer_test.py | DataEngel/Creando-mi-primer-lenguaje-de-programaci-n- | 92434f89c62b6cec0114441c669952450ba21b79 | [
"MIT"
] | null | null | null | Mi Lenguaje/tests/lexer_test.py | DataEngel/Creando-mi-primer-lenguaje-de-programaci-n- | 92434f89c62b6cec0114441c669952450ba21b79 | [
"MIT"
] | null | null | null | from unittest import TestCase
from typing import List
from lpp.token import (
Token,
TokenType,
)
from lpp.lexer import Lexer
class LexerTest(TestCase):
def test_illegal(self) -> None:
source: str = '¡¿@'
lexer: Lexer = Lexer(source)
tokens: List[Token] = []
for i in range(len(source)):
tokens.append(lexer.next_token())
expected_tokens: List[Token] = [
Token(TokenType.ILLEGAL, '¡'),
Token(TokenType.ILLEGAL, '¿'),
Token(TokenType.ILLEGAL, '@'),
]
self.assertEquals(tokens, expected_tokens)
def test_one_character_operator(self) -> None:
source: str = '=+-/*<>!'
lexer: Lexer = Lexer(source)
tokens: List[Token] = []
for i in range(len(source)):
tokens.append(lexer.next_token())
expected_tokens: List[Token] = [
Token(TokenType.ASSIGN, '='),
Token(TokenType.PLUS, '+'),
Token(TokenType.MINUS, '-'),
Token(TokenType.DIVISION, '/'),
Token(TokenType.MULTIPLICATION, '*'),
Token(TokenType.LT, '<'),
Token(TokenType.GT, '>'),
Token(TokenType.NEGATION, '!'),
]
self.assertEquals(tokens, expected_tokens)
def test_eof(self) -> None:
source: str = '+'
lexer: Lexer = Lexer(source)
tokens: List[Token] = []
for i in range(len(source) + 1):
tokens.append(lexer.next_token())
expected_tokens: List[Token] = [
Token(TokenType.PLUS, '+'),
Token(TokenType.EOF, ''),
]
self.assertEquals(tokens, expected_tokens)
def test_delimiters(self) -> None:
source = '(){},;'
lexer: Lexer = Lexer(source)
tokens: List[Token] = []
for i in range(len(source)):
tokens.append(lexer.next_token())
expected_tokens: List[Token] = [
Token(TokenType.LPAREN, '('),
Token(TokenType.RPAREN, ')'),
Token(TokenType.LBRACE, '{'),
Token(TokenType.RBRACE, '}'),
Token(TokenType.COMMA, ','),
Token(TokenType.SEMICOLON, ';'),
]
self.assertEquals(tokens, expected_tokens)
def test_assignment(self) -> None:
source: str = 'variable cinco = 5;'
lexer: Lexer = Lexer(source)
tokens: List[Token] = []
for i in range(5):
tokens.append(lexer.next_token())
expected_tokens: List[Token] = [
Token(TokenType.LET, 'variable'),
Token(TokenType.IDENT, 'cinco'),
Token(TokenType.ASSIGN, '='),
Token(TokenType.INT, '5'),
Token(TokenType.SEMICOLON, ';'),
]
self.assertEquals(tokens, expected_tokens)
def test_function_declaration(self) -> None:
source: str = '''
variable suma = procedimiento(x, y) {
x + y;
};
'''
lexer: Lexer = Lexer(source)
tokens: List[Token] = []
for i in range(16):
tokens.append(lexer.next_token())
expected_tokens: List[Token] = [
Token(TokenType.LET, 'variable'),
Token(TokenType.IDENT, 'suma'),
Token(TokenType.ASSIGN, '='),
Token(TokenType.FUNCTION, 'procedimiento'),
Token(TokenType.LPAREN, '('),
Token(TokenType.IDENT, 'x'),
Token(TokenType.COMMA, ','),
Token(TokenType.IDENT, 'y'),
Token(TokenType.RPAREN, ')'),
Token(TokenType.LBRACE, '{'),
Token(TokenType.IDENT, 'x'),
Token(TokenType.PLUS, '+'),
Token(TokenType.IDENT, 'y'),
Token(TokenType.SEMICOLON, ';'),
Token(TokenType.RBRACE, '}'),
Token(TokenType.SEMICOLON, ';'),
]
self.assertEquals(tokens, expected_tokens)
def test_function_call(self) -> None:
source: str = 'variable resultado = suma(dos, tres);'
lexer: Lexer = Lexer(source)
tokens: List[Token] = []
for i in range(10):
tokens.append(lexer.next_token())
expected_tokens: List[Token] = [
Token(TokenType.LET, 'variable'),
Token(TokenType.IDENT, 'resultado'),
Token(TokenType.ASSIGN, '='),
Token(TokenType.IDENT, 'suma'),
Token(TokenType.LPAREN, '('),
Token(TokenType.IDENT, 'dos'),
Token(TokenType.COMMA, ','),
Token(TokenType.IDENT, 'tres'),
Token(TokenType.RPAREN, ')'),
Token(TokenType.SEMICOLON, ';'),
]
self.assertEquals(tokens, expected_tokens)
def test_control_statement(self) -> None:
source: str = '''
si (5 < 10) {
regresa verdadero;
} si_no {
regresa falso;
}
'''
lexer: Lexer = Lexer(source)
tokens: List[Token] = []
for i in range(17):
tokens.append(lexer.next_token())
expected_tokens: List[Token] = [
Token(TokenType.IF, 'si'),
Token(TokenType.LPAREN, '('),
Token(TokenType.INT, '5'),
Token(TokenType.LT, '<'),
Token(TokenType.INT, '10'),
Token(TokenType.RPAREN, ')'),
Token(TokenType.LBRACE, '{'),
Token(TokenType.RETURN, 'regresa'),
Token(TokenType.TRUE, 'verdadero'),
Token(TokenType.SEMICOLON, ';'),
Token(TokenType.RBRACE, '}'),
Token(TokenType.ELSE, 'si_no'),
Token(TokenType.LBRACE, '{'),
Token(TokenType.RETURN, 'regresa'),
Token(TokenType.FALSE, 'falso'),
Token(TokenType.SEMICOLON, ';'),
Token(TokenType.RBRACE, '}'),
]
self.assertEquals(tokens, expected_tokens)
def test_two_character_operator(self) -> None:
source: str = '''
10 == 10;
10 != 9;
'''
lexer: Lexer = Lexer(source)
tokens: List[Token] = []
for i in range(8):
tokens.append(lexer.next_token())
expected_tokens: List[Token] = [
Token(TokenType.INT, '10'),
Token(TokenType.EQ, '=='),
Token(TokenType.INT, '10'),
Token(TokenType.SEMICOLON, ';'),
Token(TokenType.INT, '10'),
Token(TokenType.NOT_EQ, '!='),
Token(TokenType.INT, '9'),
Token(TokenType.SEMICOLON, ';'),
]
self.assertEquals(tokens, expected_tokens)
def test_for_loop(self) -> None:
source: str = '''
para ( 0; 0<10; 0 = suma(0, 1)){
;}
'''
lexer: Lexer = Lexer(source)
tokens: List[Token] = []
for i in range(19):
tokens.append(lexer.next_token())
expected_tokens: List[Token] = [
Token(TokenType.FOR, 'para'),
Token(TokenType.LPAREN, '('),
Token(TokenType.INT, '0'),
Token(TokenType.SEMICOLON, ';'),
Token(TokenType.INT, '0'),
Token(TokenType.LT, '<'),
Token(TokenType.INT, '10'),
Token(TokenType.SEMICOLON, ';'),
Token(TokenType.INT, '0'),
Token(TokenType.LT, '<'),
Token(TokenType.INT, '10'),
Token(TokenType.SEMICOLON, ';'),
Token(TokenType.INT, '0'),
Token(TokenType.ASSIGN, '='),
Token(TokenType.IDENT, 'suma'),
Token(TokenType.LPAREN, '('),
Token(TokenType.INT, '0'),
Token(TokenType.COMMA, ','),
Token(TokenType.INT, '1'),
Token(TokenType.LPAREN, ')'),
Token(TokenType.LPAREN, ')'),
Token(TokenType.LBRACE, '{'),
Token(TokenType.SEMICOLON, ';'),
Token(TokenType.LBRACE, '}'),
]
self.assertEquals(tokens, expected_tokens) | 32.120155 | 62 | 0.492941 |
5ce5aed85795130801b6ab7bae2c441b2d15db5e | 33,487 | py | Python | parser/team06/TablaDeSimbolos.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 35 | 2020-12-07T03:11:43.000Z | 2021-04-15T17:38:16.000Z | parser/team06/TablaDeSimbolos.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 47 | 2020-12-09T01:29:09.000Z | 2021-01-13T05:37:50.000Z | parser/team06/TablaDeSimbolos.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 556 | 2020-12-07T03:13:31.000Z | 2021-06-17T17:41:10.000Z | from enum import Enum
import pandas as pd
import reportes as h
class TIPO_DE_DATO(Enum) :
NUMERO = 1
FLOTANTE=2
CARACTER=3
#ir agregando los tipos faltantes para la comprobacion de tipos en las operacioens
class Simbolo() :
'Esta clase representa un simbolo dentro de nuestra tabla de simbolos'
def __init__(self, id, nombre, tipo, tamanoCadena, BD, tabla, obligatorio, pk, FK, referenciaTablaFK, referenciaCampoFK, unique, idUnique, check, condicionCheck, idCheck,valor,default, idConstraintFK, idConstraintPK, tipoIndex, sortIndex, ambito, rol) :
self.id = id
self.nombre = nombre
self.tipo = tipo
self.tamanoCadena = tamanoCadena
self.BD = BD
self.tabla = tabla
self.obligatorio = obligatorio
self.pk = pk
self.FK = FK
self.referenciaTablaFK = referenciaTablaFK
self.referenciaCampoFK = referenciaCampoFK
self.unique = unique
self.idUnique = idUnique
self.check = check
self.condicionCheck = condicionCheck
self.idCheck = idCheck
self.valor = valor
self.default = default
self.idConstraintFK = idConstraintFK
self.idConstraintPK = idConstraintPK
self.tipoIndex = tipoIndex
self.sortIndex = sortIndex
self.ambito = ambito
self.rol = rol
class TablaDeSimbolos() :
'Esta clase representa la tabla de simbolos'
def __init__(self, simbolos = {}) :
self.simbolos = simbolos
def agregar(self, simbolo) :
self.simbolos[simbolo.nombre] = simbolo
def obtener(self, id) :
print("a este entra")
if not id in self.simbolos :
print('Error1: variable ', id, ' no definida.')
return("no definida")
return self.simbolos[id]
def obtener2(self, nombre) :
print("a este entra")
if not nombre in self.simbolos :
print('Error1: variable ', nombre, ' no definida.')
return 0
return self.simbolos[nombre]
def actualizar(self, simbolo) :
if not simbolo.nombre in self.simbolos :
print('Error2: variable ', simbolo.nombre, ' no definida.')
else :
self.simbolos[simbolo.nombre] = simbolo
def mostrar(self,var):
print(str(var))
for x in self.simbolos:
print(x)
def destruir(self,simbolo):
print("########################### simbolos>",str(simbolo.id))
if not simbolo.id in self.simbolos :
print('Error3: variable ', simbolo.id, ' no definida.')
else :
self.simbolos[simbolo.id] = simbolo
del self.simbolos[simbolo.id]
print("si lo elimina")
def destruirColumna(self,nombre,BD,tabla):
clave = str(nombre)+str(BD)+str(tabla)
print(clave)
for simb in self.simbolos:
print (simb)
if simb == clave:
if self.simbolos[simb].nombre == nombre and self.simbolos[simb].BD == BD and self.simbolos[simb].tabla == tabla and self.simbolos[simb].tipo != None:
del self.simbolos[simb]
return
#print(self.simbolos[simb].id," ",self.simbolos[simb].nombre," ",self.simbolos[simb].BD," ",self.simbolos[simb].tabla)
print("la columna no existe")
return 0
def obtenerColumnas(self,tabla,BD):
#print("EMPIEZO A BORRAR LA TABLA: ",tabla)
print("DE MOMENTO IMPRIMIRÉ ACÁ ABAJO CUALES SON LAS COLUMNAS QUE PERTENECEN A LA TABLA")
listaColumnas = []
for simb in self.simbolos:
if self.simbolos[simb].tabla == tabla and self.simbolos[simb].BD == BD and self.simbolos[simb].tipo != None:
listaColumnas.append(self.simbolos[simb].nombre)
#print(self.simbolos[simb].nombre)
return listaColumnas
def destruirTabla(self,nombre,BD):
for simb in self.simbolos:
if self.simbolos[simb].nombre == nombre and self.simbolos[simb].BD == BD:
del self.simbolos[simb]
return
'''claveTabla = str(tabla)+str(BD)
for simb in self.simbolos:
if simb == claveTabla:
del self.simbolos[simb]
print("SE ACABARON LAS COLUMNAS DE LA TABLA: ",tabla)
return 0 '''
def destruirConstraint(self,nombre,BD,tabla):
print("aca estoy meeeeen!")
print(nombre)
print(BD)
print(tabla)
for simb in self.simbolos:
print("xdddx")
if self.simbolos[simb].tabla == tabla and self.simbolos[simb].BD == BD:
print("encontre una entrada posible")
print(self.simbolos[simb].idConstraintFK)
print(self.simbolos[simb].idConstraintPK)
if self.simbolos[simb].idConstraintFK == nombre:
print("ENCONTRE EL CONSTRAINTFK, KEMOSION")
self.simbolos[simb].idConstraintFK=None
self.simbolos[simb].FK = 0
self.simbolos[simb].referenciaTablaFK=None
self.simbolos[simb].referenciaCampoFK=None
elif self.simbolos[simb].idConstraintPK == nombre:
print("ENCONTRE EL CONSTRAINTPK, KEMOSION")
self.simbolos[simb].idConstraintPK=None
self.simbolos[simb].pk = 0
#-------------------------------------------------------------------
'''print("########################### simbolos>",str(simbolo.id))
if not simbolo.id in self.simbolos :
print('Error3: variable ', simbolo.id, ' no definida.')
else :
self.simbolos[simbolo.id] = simbolo
del self.simbolos[simbolo.id]
print("si lo elimina")'''
#-----------------------------------------------------------------------------------------------------------------------
def obtenerDato(self, nombre):
print("a este entra")
if not nombre in self.simbolos :
print('Error1: variable ', nombre, ' no definida.')
return("no definida")
return self.simbolos[nombre]
#-----------------------------------------------------------------------------------------------------------------------
#Funciones
def agregarSimbolo(self,simbolo):
clave = str(simbolo.nombre)+str(simbolo.BD)
self.simbolos[clave] = simbolo
def agregarVariable(self, simbolo):
clave = str(simbolo.nombre)+str(simbolo.BD)+str(simbolo.ambito)
self.simbolos[clave] = simbolo
def verificarFuncion(self,nombre,BD):
clave = str(nombre)+str(BD)
if not clave in self.simbolos:
return 0
return 1
def eliminarVariablesFuncion(self,BD,ambito):
for simb in self.simbolos:
if self.simbolos[simb].BD == BD and self.simbolos[simb].ambito == ambito:
del self.simbolos[simb]
return 1
return 0
def contVariablesFunction(self,BD,ambito):
contt=0
for simb in self.simbolos:
if self.simbolos[simb].BD == BD and self.simbolos[simb].ambito == ambito:
contt+=1
return contt
def eliminarFunction(self,nombre,BD):
clave = str(nombre)+str(BD)
for simb in self.simbolos:
if clave == simb:
del self.simbolos[simb]
return 1
return 0
#-----------------------------------------------------------------------------------------------------------------------
def agregarnuevTablaBD(self,simbolo):
clave = str(simbolo.nombre)+str(simbolo.BD)
self.simbolos[clave] = simbolo
def validarTabla(self,nombre,BD):
clave = str(nombre)+str(BD)
if not clave in self.simbolos:
return 0
return 1
def obtenerTablaBD(self, nombre):
print("a este entra")
if not nombre in self.simbolos :
print('Error: La tabla: ', nombre, ' no definida.')
return 0
return self.simbolos[nombre]
#-----------------------------------------------------------------------------------------------------------------------
#Inicia creacion de tabla
def agregarnuevaColumna(self,simbolo):
clave = str(simbolo.nombre) + str(simbolo.BD) + str(simbolo.tabla)
self.simbolos[clave] = simbolo
def verificarcolumnaBD(self,nombre,BD,tabla):
clave = str(nombre) + str(BD) + str(tabla)
if not clave in self.simbolos :
print('Error: La columna: ', nombre, ' no definida.')
return 0
return 1
def verificarcolumnaBDAT(self,nombre,BD,tabla):
clave = str(nombre) + str(BD) + str(tabla)
if not clave in self.simbolos :
print('Error: La tabla: ', nombre, ' no definida.')
return 0
return self.simbolos[clave]
def actualizauniqueColumna(self,nombre,BD,tabla):
clave = str(nombre) + str(BD) + str(tabla)
for simb in self.simbolos:
if simb == clave:
if self.simbolos[simb].nombre == nombre and self.simbolos[simb].BD == BD and self.simbolos[simb].tabla == tabla:
self.simbolos[simb].unique = 1
print("se actualizao restriccion unique en columna")
return
#print(self.simbolos[simb].id," ",self.simbolos[simb].nombre," ",self.simbolos[simb].BD," ",self.simbolos[simb].tabla)
print("la columna no existe")
return 0
def actualizauniqueColumnaAT(self,nombre,BD,tabla,idConstraint):
clave = str(nombre) + str(BD) + str(tabla)
print(clave)
for simb in self.simbolos:
if simb == clave:
if self.simbolos[simb].nombre == nombre and self.simbolos[simb].BD == BD and self.simbolos[simb].tabla == tabla:
self.simbolos[simb].unique = 1
self.simbolos[simb].idConstraintFK = idConstraint
print("**********************************")
print(self.simbolos[simb].idConstraintFK)
print("**********************************")
print("se actualizao restriccion unique en columna")
return
#print(self.simbolos[simb].id," ",self.simbolos[simb].nombre," ",self.simbolos[simb].BD," ",self.simbolos[simb].tabla)
print("la columna no existe")
return 0
def actualizarcheckColumna(self,nombre,BD,tabla,idchk,condchk):
clave = str(nombre) + str(BD) + str(tabla)
print(clave)
for simb in self.simbolos:
if simb == clave:
if self.simbolos[simb].nombre == nombre and self.simbolos[simb].BD == BD and self.simbolos[simb].tabla == tabla:
self.simbolos[simb].check = 1
self.simbolos[simb].condCheck = condchk
self.simbolos[simb].idCheck = idchk
print("se actualizo restricion check en columna")
return
#print(self.simbolos[simb].id," ",self.simbolos[simb].nombre," ",self.simbolos[simb].BD," ",self.simbolos[simb].tabla)
print("la columna no existe")
return 0
def actualizapkcolumna(self,nombre,BD,tabla):
clave = str(nombre) + str(BD) + str(tabla)
print(clave)
for simb in self.simbolos:
if simb == clave:
if self.simbolos[simb].nombre == nombre and self.simbolos[simb].BD == BD and self.simbolos[simb].tabla == tabla:
self.simbolos[simb].pk = 1
print("se actualizo restricion llave primaria en columna")
return
#print(self.simbolos[simb].id," ",self.simbolos[simb].nombre," ",self.simbolos[simb].BD," ",self.simbolos[simb].tabla)
print("la columna no existe")
return 0
def actualizapkcolumnaAT(self,nombre,BD,tabla,idConstraint):
clave = str(nombre) + str(BD) + str(tabla)
print(clave)
for simb in self.simbolos:
if simb == clave:
if self.simbolos[simb].nombre == nombre and self.simbolos[simb].BD == BD and self.simbolos[simb].tabla == tabla:
self.simbolos[simb].pk = 1
self.simbolos[simb].unique = 1
self.simbolos[simb].obligatorio = 0
self.simbolos[simb].idConstraintPK = idConstraint
print("se actualizo restricion llave primaria en columna")
return
#print(self.simbolos[simb].id," ",self.simbolos[simb].nombre," ",self.simbolos[simb].BD," ",self.simbolos[simb].tabla)
print("la columna no existe")
return 0
def actualizafkcolumna(self,nombre,BD,tabla,idrefcolumna,idreftabla):
clave = str(nombre) + str(BD) + str(tabla)
print(clave)
for simb in self.simbolos:
if simb == clave:
if self.simbolos[simb].nombre == nombre and self.simbolos[simb].BD == BD and self.simbolos[simb].tabla == tabla:
self.simbolos[simb].FK = 1
self.simbolos[simb].referenciaCampoFK = idrefcolumna
self.simbolos[simb].referenciaTablaFK = idreftabla
print("se actualizo columna como llave foranea")
return
#print(self.simbolos[simb].id," ",self.simbolos[simb].nombre," ",self.simbolos[simb].BD," ",self.simbolos[simb].tabla)
print("la columna no existe")
return 0
def actualizafkcolumnaAT(self,nombre,BD,tabla,idrefcolumna,idreftabla,idConstraint):
clave = str(nombre) + str(BD) + str(tabla)
print(clave)
for simb in self.simbolos:
if simb == clave:
if self.simbolos[simb].nombre == nombre and self.simbolos[simb].BD == BD and self.simbolos[simb].tabla == tabla:
self.simbolos[simb].FK = 1
self.simbolos[simb].referenciaCampoFK = idrefcolumna
self.simbolos[simb].referenciaTablaFK = idreftabla
self.simbolos[simb].idConstraintFK = idConstraint
print("se actualizo columna como llave foranea")
return
#print(self.simbolos[simb].id," ",self.simbolos[simb].nombre," ",self.simbolos[simb].BD," ",self.simbolos[simb].tabla)
print("la columna no existe")
return 0
def numerodeColumnas(self,BD,tabla):
cont = 0
for simb in self.simbolos:
if self.simbolos[simb].tabla == tabla and self.simbolos[simb].BD == BD and self.simbolos[simb].tipo != None:
cont=cont+1
return cont
def numerodeDatosenColumna(self,nombre,BD,tabla):
clave = str(nombre)+str(BD)+str(tabla)
if self.simbolos[clave].valor == None:
return 0
return len(self.simbolos[clave].valor)
def numerodeDatosenprimeraColumna(self,tabla,BD):
for simb in self.simbolos:
if self.simbolos[simb].tabla == tabla and self.simbolos[simb].BD == BD and self.simbolos[simb].id == 0 and self.simbolos[simb].tipo != None:
if self.simbolos[simb].valor == None:
return 0
return len(self.simbolos[simb].valor)
return 0
def actualizandoDefaultColumna(self,nombre,BD,tabla):
clave = str(nombre)+str(BD)+str(tabla)
if self.simbolos[clave].valor == None:
if self.simbolos[clave].default != None:
self.simbolos[clave].valor = [self.simbolos[clave].default]
else:
self.simbolos[clave].valor = ["NULL"]
else:
if self.simbolos[clave].default != None:
self.simbolos[clave].valor.append(self.simbolos[clave].defualt)
else:
self.simbolos[clave].valor.append("NULL")
#-----------------------------------------------------------------------------------------------------------------------
#Inicia Insert en Tabla
#se llama cuando en el insert solo colocan los registros a ingresar a la columna
def obtenersinNombreColumna(self,nombre,BD,id):
for simb in self.simbolos:
if self.simbolos[simb].tabla == nombre and self.simbolos[simb].BD == BD and self.simbolos[simb].id == id and self.simbolos[simb].tipo != None:
return self.simbolos[simb]
return 0
#se llama cuando en insert se especifica el id de la columna
def obtenerconNombreColumna(self,nombre,BD,tabla):
clave = str(nombre) + str(BD) + str(tabla)
for simb in self.simbolos:
if simb == clave:
if self.simbolos[simb].nombre == nombre and self.simbolos[simb].BD == BD and self.simbolos[simb].tabla == tabla and self.simbolos[simb].tipo != None:
return self.simbolos[simb]
return 0
#se utiliza para actualizar los datos en la tabla de simbolos
def actualizarValorColumna(self,nombre,BD,tabla,dato):
clave = str(nombre) + str(BD) + str(tabla)
for simb in self.simbolos:
if simb == clave:
if self.simbolos[simb].nombre == nombre and self.simbolos[simb].BD == BD and self.simbolos[simb].tabla == tabla and self.simbolos[simb].tipo != None:
if self.simbolos[simb].valor == None:
self.simbolos[simb].valor = [dato]
else:
self.simbolos[simb].valor.append(dato)
print("se agrego un dato a la columna: ",nombre," en tabla: ",tabla)
return
#print(self.simbolos[simb].id," ",self.simbolos[simb].nombre," ",self.simbolos[simb].BD," ",self.simbolos[simb].tabla)
print("la columna no existe")
return 0
def columnasPrimaria(self,BD,tabla):
listpk = []
for simb in self.simbolos:
if self.simbolos[simb].tabla == tabla and self.simbolos[simb].BD == BD and self.simbolos[simb].pk == 1 and self.simbolos[simb].tipo != None:
listpk.append(self.simbolos[simb].id)
return listpk
#--------------Delete de registro
def eliminarRegistroTabla(self,BD,tabla,posvalor):
for simb in self.simbolos:
if self.simbolos[simb].tabla == tabla and self.simbolos[simb].BD == BD and self.simbolos[simb].tipo != None:
self.simbolos[simb].valor.pop(posvalor)
return 0
#--------------- Update de Registro
def UpdateRegistro(self,nombre,BD,tabla,dato,pos):
clave = str(nombre) + str(BD) + str(tabla)
if not clave in self.simbolos :
print('Error: La tabla: ', nombre, ' no definida.')
return 0
self.simbolos[clave].valor[pos] = dato
return 1
def printcontsimbolos(self):
tm = 0
for simb in self.simbolos:
print("----------Columna ",tm,"----------")
print(self.simbolos[simb].id)
print(self.simbolos[simb].nombre)
print(self.simbolos[simb].tipo)
print(self.simbolos[simb].tamanoCadena)
print(self.simbolos[simb].BD)
print(self.simbolos[simb].tabla)
print(self.simbolos[simb].obligatorio)
print(self.simbolos[simb].pk)
print(self.simbolos[simb].FK)
print(self.simbolos[simb].referenciaTablaFK)
print(self.simbolos[simb].referenciaCampoFK)
print(self.simbolos[simb].unique)
print(self.simbolos[simb].idUnique)
print(self.simbolos[simb].check)
print(self.simbolos[simb].condicionCheck)
print(self.simbolos[simb].idCheck)
print(self.simbolos[simb].valor)
print(self.simbolos[simb].default)
print(self.simbolos[simb].idConstraintFK)
print(self.simbolos[simb].idConstraintPK)
tm=tm+1
return 0
# --------------------CREAR, ALTER USE Y DROP BD---------------------------------------------------------------------
def agregarCrearBD(self, simbolo) :
self.simbolos[simbolo.nombre] = simbolo
def verificacionCrearBD(self, nombre) :
for simb in self.simbolos:
if self.simbolos[simb].nombre == nombre and self.simbolos[simb].BD == None and self.simbolos[simb].tabla == None:
print('Error1: base de datos ', nombre, ' ya definida.')
return 1
return 0
def verificacionUseBD(self, nombre) :
for simb in self.simbolos:
if self.simbolos[simb].nombre == nombre and self.simbolos[simb].BD == None and self.simbolos[simb].tabla == None:
print('BD ', nombre, ' existente.')
return 1
return 0
def verificacionAlterBD(self, nombre) :
for simb in self.simbolos:
if self.simbolos[simb].nombre == nombre and self.simbolos[simb].BD == None and self.simbolos[simb].tabla == None:
return 1
return 0
def verificacionAlterBD_2(self, nombre) :
for simb in self.simbolos:
if self.simbolos[simb].nombre == nombre and self.simbolos[simb].BD == None and self.simbolos[simb].tabla == None:
return 1
return 0
def actualizarAlterBD(self, old, alter) :
for simb in self.simbolos:
if self.simbolos[simb].nombre == old and self.simbolos[simb].BD == None and self.simbolos[simb].tabla == None:
print("SIMB",self.simbolos[simb])
self.simbolos[alter] = self.simbolos.pop(simb)
self.simbolos[alter].nombre = alter
return 2
return 1
def destruirBD(self,nombre):
for simb in self.simbolos:
if self.simbolos[simb].nombre == nombre and self.simbolos[simb].BD == None and self.simbolos[simb].tabla == None:
print('Se elimino ', nombre)
self.simbolos.pop(simb)
return 1
return 0
def verificacionShowBD(self) :
bd = []
for simb in self.simbolos:
print("entro a for")
if self.simbolos[simb].nombre != None and self.simbolos[simb].BD == None and self.simbolos[simb].tabla == None:
bd.append(self.simbolos[simb].nombre)
return bd
#obtiene los datos cuando se manda una tabla y todas las columnas
def obtenerSelect1A(self, tabla, bd) :
print("a este entra metodo")
print("la bd: ",bd)
if tabla=="" or bd=="": return 0
a=""
columnas=[]
datos={}
for simb in self.simbolos:
print(simb)
if self.simbolos[simb].tabla == tabla and self.simbolos[simb].BD == bd:
print("res: ",self.simbolos[simb].valor)
print( simb," = ",self.simbolos[simb].valor)
a+=str(simb)+" = "+str(self.simbolos[simb].valor)+"\n"
datos[simb]=self.simbolos[simb].valor
#columnas.append(simb)
if a=="":
print("A va vacio")
return 0
else:
print("vera si genera el dataframe")
df=pd.DataFrame(datos)
print(df)
print("si termino")
print("A es: ",a)
return df
#obtiene un dato cuando se mandan varias columnas de 1 tabla
def obtenerSelect2B(self, tabla, bd, campos) :
print("a este entra metodo")
print("la bd: ",bd)
print("la tabla: ",tabla)
print("campos: ",campos)
if tabla=="" or bd=="" or len(campos)==0: return 0
a=""
columnas=[]
datos={}
for x in range(0,len(campos)):
for simb in self.simbolos:
print(simb)
key=str(self.simbolos[simb].nombre)+str(self.simbolos[simb].BD)+str(self.simbolos[simb].tabla)
print("el nombre sera ====",key)
if self.simbolos[simb].tabla == tabla and self.simbolos[simb].BD == bd and (self.simbolos[simb].nombre+self.simbolos[simb].BD+self.simbolos[simb].tabla)==campos[x]:
print("res: ",self.simbolos[simb].valor)
print( simb," = ",self.simbolos[simb].valor)
a+=str(simb)+" = "+str(self.simbolos[simb].valor)+"\n"
datos[simb]=self.simbolos[simb].valor
#columnas.append(simb)
if a=="":
print("A va vacio")
return 0
else:
print("vera si genera el dataframe")
df=pd.DataFrame(datos)
print(df)
print("si termino")
print("A es: ",a)
return df
def obtenerSelect2E(self, identificador):
if len(identificador)==0: return "no se encontro la variable"
a=""
for x in range(0,len(identificador)):
for simb in self.simbolos:
if self.simbolos[simb].nombre == identificador[x]:
a+= str(self.simbolos[simb].nombre)+" = "+ str(self.simbolos[simb].valor)+"\n"
if a=="":
print("A va vacio")
return 0
else:
return a
#obtiene un dato cuando se mandan varias columnas de 1 tabla
def obtenerSelect4(self, tabla, bd, campos) :
print("a este entra metodo----------------------")
print("la bd: ",bd)
print("la tabla: ",tabla)
print("campos: ",campos)
if tabla=="" or bd=="" or len(campos)==0: return 0
a=""
columnas=[]
datos={}
for x in range(0,len(tabla)):
for y in range(0,len(campos)):
for simb in self.simbolos:
print(simb)
key=str(self.simbolos[simb].nombre)+str(self.simbolos[simb].BD)+str(self.simbolos[simb].tabla)
print("el nombre sera ====",key)
if self.simbolos[simb].tabla == tabla[x] and self.simbolos[simb].BD == bd and (self.simbolos[simb].nombre+self.simbolos[simb].BD+self.simbolos[simb].tabla)==campos[y]:
print("res: ",self.simbolos[simb].valor)
print( simb," = ",self.simbolos[simb].valor)
a+=str(simb)+" = "+str(self.simbolos[simb].valor)+"\n"
datos[simb]=self.simbolos[simb].valor
#columnas.append(simb)
if a=="":
print("A va vacio")
return 0
else:
print("vera si genera el dataframe")
df=pd.DataFrame(datos)
print(df)
print("si termino")
print("A es: ",a)
return df
#obtiene un dato cuando se mandan varias tablas y todos los datos
def obtenerSelect5Todo(self, tabla, bd) :
print("a este entra metodo----------------------")
print("la bd: ",bd)
print("la tabla: ",tabla)
if bd=="" or len(tabla)==0: return 0
a=""
columnas=[]
datos={}
for x in range(0,len(tabla)):
for simb in self.simbolos:
print(simb)
key=str(self.simbolos[simb].nombre)+str(self.simbolos[simb].BD)+str(self.simbolos[simb].tabla)
print("el nombre sera ====",key)
if self.simbolos[simb].tabla == tabla[x] and self.simbolos[simb].BD == bd:
print("res: ",self.simbolos[simb].valor)
print( simb," = ",self.simbolos[simb].valor)
a+=str(simb)+" = "+str(self.simbolos[simb].valor)+"\n"
datos[simb]=self.simbolos[simb].valor
#columnas.append(simb)
if a=="":
print("A va vacio")
return 0
else:
print("vera si genera el dataframe")
df=pd.DataFrame(datos)
print(df)
print("si termino")
print("A es: ",a)
return df
def agregarnuevoIndex(self,simbolo):
clave = str(simbolo.nombre) + str(simbolo.BD) + str(simbolo.tabla)
self.simbolos[clave] = simbolo
def verificacionAlterColumnIndex(self, nombre, BD,idcolumn) :
for simb in self.simbolos:
if self.simbolos[simb].nombre == nombre and self.simbolos[simb].BD == BD:
print(self.simbolos[simb].tabla)
return self.simbolos[simb].tabla
return 0
def obtenerTablasIndex(self,nombre,BD,idcolumn):
for simb in self.simbolos:
if self.simbolos[simb].tabla == nombre and self.simbolos[simb].BD == BD and self.simbolos[simb].id == idcolumn:
print(self.simbolos[simb].nombre)
return self.simbolos[simb].nombre
return 0
def verificacionAlterStringColumIndex(self, nombre, BD,idcolumn) :
for simb in self.simbolos:
if self.simbolos[simb].nombre == nombre and self.simbolos[simb].BD == BD:
print(self.simbolos[simb].tabla)
return self.simbolos[simb].tabla
return 0
def obtenerTablasStringIndex(self,nombre,BD,idcolumn):
for simb in self.simbolos:
if self.simbolos[simb].tabla == nombre and self.simbolos[simb].BD == BD and self.simbolos[simb].nombre == idcolumn:
print(self.simbolos[simb].nombre)
return self.simbolos[simb].nombre
return 0
def verificarIndex(self,nombre,BD,tabla):
clave = str(nombre) + str(BD) + str(tabla)
if not clave in self.simbolos :
for simb in self.simbolos:
if self.simbolos[simb].BD == BD and self.simbolos[simb].tabla == tabla:
return 0
else:
return 1
def verificarTablaIndex(self, nombre, BD, idcolumn):
for simb in self.simbolos:
if self.simbolos[simb].tabla == nombre and self.simbolos[simb].BD == BD:
print("TABLA:",self.simbolos[simb].tabla)
return self.simbolos[simb].tabla
return 0
def obtenerColumnaIndex(self,nombre,BD,idcolumn):
print("COLL:",idcolumn)
for simb in self.simbolos:
for col in idcolumn:
if self.simbolos[simb].tabla == nombre and self.simbolos[simb].BD == BD and self.simbolos[simb].nombre == col:
print(self.simbolos[simb].nombre)
return 0
return 1
def obtenerColumnaUnicaIndex(self,nombre,BD,idcolumn):
for simb in self.simbolos:
if self.simbolos[simb].tabla == nombre and self.simbolos[simb].BD == BD and self.simbolos[simb].nombre == idcolumn:
print(self.simbolos[simb].nombre)
return 0
return 1
def verificacionAlterIndex(self, nombre, BD) :
for simb in self.simbolos:
if self.simbolos[simb].nombre == nombre and self.simbolos[simb].BD == BD:
return 1
return 0
def deleteAlterIndex(self, nombre, BD) :
for simb in self.simbolos:
if self.simbolos[simb].nombre == nombre and self.simbolos[simb].BD == BD:
print("SIMB",self.simbolos[simb])
del self.simbolos[simb]
return 2
return 1
def actualizarAlterIndex(self, old, alter, BD) :
for simb in self.simbolos:
if self.simbolos[simb].nombre == old and self.simbolos[simb].BD == BD:
print("SIMB",self.simbolos[simb])
clave = alter + BD + self.simbolos[simb].tabla
tipo = self.simbolos[simb].tipoIndex
sort = self.simbolos[simb].sortIndex
tabla = self.simbolos[simb].tabla
valores = self.simbolos[simb].valor
BDatos = BD
simbolo = Simbolo(None,alter,None,None,BDatos,tabla,None,None,None,None,None,None,None,None,None,None,valores,None,None,None,tipo,sort,None,None)
print(simbolo)
self.simbolos[clave] = simbolo
del self.simbolos[simb]
return 2
return 1
def actualizarAlterColumnIndex(self, nombre, nombreColumna, BD) :
for simb in self.simbolos:
if self.simbolos[simb].nombre == nombre and self.simbolos[simb].BD == BD:
print("SIMB",self.simbolos[simb])
clave = nombre + BD + self.simbolos[simb].tabla
tipo = self.simbolos[simb].tipoIndex
sort = self.simbolos[simb].sortIndex
tabla = self.simbolos[simb].tabla
valores = [nombreColumna]
BDatos = BD
simbolo = Simbolo(None,nombre,None,None,BDatos,tabla,None,None,None,None,None,None,None,None,None,None,valores,None,None,None,tipo,sort,None,None)
print(simbolo)
self.simbolos[clave] = simbolo
#del self.simbolos[simb]
return 2
return 1
| 42.71301 | 257 | 0.545943 |
ef577e05aa83900eeb732ad94aa60eb48f74c640 | 1,600 | py | Python | scalpr/core/state.py | TvanMeer/scalpr | c4d2e07da60663f77c3d17875aa61ad9d215a08d | [
"MIT"
] | 1 | 2022-02-14T22:48:58.000Z | 2022-02-14T22:48:58.000Z | scalpr/core/state.py | TvanMeer/scalpr | c4d2e07da60663f77c3d17875aa61ad9d215a08d | [
"MIT"
] | null | null | null | scalpr/core/state.py | TvanMeer/scalpr | c4d2e07da60663f77c3d17875aa61ad9d215a08d | [
"MIT"
] | 1 | 2022-02-14T22:49:01.000Z | 2022-02-14T22:49:01.000Z | import asyncio
from dataclasses import dataclass, field
from queue import Queue
from typing import Dict
from ..database.database import DataBase
@dataclass
class SharedState:
"""Holds data that is shared between all concurrent loops.
Contains both state variables for internal usage, and
the database instance.
"""
db: DataBase
user_input_queue: Queue
stop: bool = False
queue: asyncio.Queue = asyncio.Queue()
history_downloaded: Dict = field(init=False)
last_candles_update: Dict = field(init=False)
last_candles_update_closed: Dict = field(init=False)
def __post_init__(self):
self.history_downloaded = self.init_history_downloaded(self.db)
self.last_candles_update = self.init_last_candles_update(self.db)
self.last_candles_update_closed = self.init_last_candles_update_closed(self.db)
def init_history_downloaded(self, db: DataBase) -> Dict:
hist = {}
for s in db.selected_symbols:
hist[s] = {}
for iv in db.options.window_intervals:
hist[s][iv] = False
return hist
def init_last_candles_update(self, db: DataBase) -> Dict:
update = {}
for s in db.selected_symbols:
update[s] = None
return update
def init_last_candles_update_closed(self, db: DataBase) -> Dict:
closed = {}
for s in db.selected_symbols:
closed[s] = False
return closed
| 30.769231 | 87 | 0.615625 |
61197612f2dc57e38dc0a6d7c579e473985b0251 | 19,178 | py | Python | model/transform.py | ZENGXH/NPDRAW | 339d1d9b4880cce891cafe7c20198ef7c121a29e | [
"MIT"
] | 21 | 2021-06-28T18:29:28.000Z | 2022-03-13T09:12:07.000Z | model/transform.py | ZENGXH/NPDRAW | 339d1d9b4880cce891cafe7c20198ef7c121a29e | [
"MIT"
] | null | null | null | model/transform.py | ZENGXH/NPDRAW | 339d1d9b4880cce891cafe7c20198ef7c121a29e | [
"MIT"
] | 2 | 2021-07-05T02:29:32.000Z | 2021-11-02T08:25:14.000Z | import torch
import utils
from loguru import logger
from utils.checker import *
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from utils import data_helper, ph_helper
class CanvasPlotter(object):
'''
one class hold several function within transform file;
'''
def __init__(self, n_class_loc, n_class_sel, nloc, dataset,
loc_dist, device, patch_bank, patch_size, add_stop, loc_stride,
loc_transform_i2f, loc_transform_f2i, use_bg_mean=False
):
'''
patch_bank in shape [1,nH,nW,K,ph,pw,imgd]
'''
CHECK7D(patch_bank)
self.add_stop = add_stop
# assert(self.add_stop), 'requried'
self.loc_stride = loc_stride
self.fcn = 0
self.loc_dist = loc_dist
self.n_class_loc = n_class_loc
self.n_class_sel = n_class_sel
self.img_size = data_helper.get_imgsize(dataset)
self.canvas_size = data_helper.get_canvsize(dataset)
self.imgd = data_helper.get_imgd(dataset)
if use_bg_mean:
self.bg_mean = data_helper.get_img_mean(dataset)
self.use_bg_mean = use_bg_mean
self.nloc = nloc
self.categorical_dim = n_class_sel
self.patch_size = patch_size
self.device = torch.device(device)
logger.info('[INIT CanvasPlotter]: use_bg_mean={}, img_size={}, nloc={}, canvas_size={}, imgd={}, patch_size={}',
self.use_bg_mean, self.img_size, self.nloc, self.canvas_size, self.imgd, self.patch_size)
#if self.loc_dist == 'Gaussian' and self.fcn:
# self.loc_discrete = 0
# self.loc_reg_from_anchor = 1
#elif self.loc_dist == 'Gaussian':
# self.loc_discrete = self.loc_reg_from_anchor = 0
if self.loc_dist == 'cat':
self.loc_discrete = 1
self.loc_reg_from_anchor = 0
assert(self.loc_stride), 'require loc_stride value'
# -- location transform --
# float_matrix = ph_helper.prepare_loc_float_matrix(
# self.canvas_size, self.loc_stride)
self.loc_transform_i2f = loc_transform_i2f #partial(ph_helper.loc_transform_i2f, float_matrix)
self.loc_transform_f2i = loc_transform_f2i #partial(ph_helper.loc_transform_f2i, float_matrix)
else:
raise ValueError('not support loc_dist: %s'%self.loc_dist)
self.info_dict = {'categorical_dim': self.categorical_dim,
'nloci': self.nloc,
'nlocj':1, 'ph': self.patch_size, 'pw':self.patch_size,
'canvas_size':self.canvas_size, 'img_size':self.img_size,
### 'latent_dim': self.latent_dim,
'imgd':self.imgd, 'canvasd': self.imgd
}
self.canvasd = self.imgd
self.grid_H = torch.arange(1., 1.+self.canvas_size).to(device).detach()
self.zcls_BHWK_to_psel_BHW_phw = partial(zcls_BHWK_to_psel_BHW_phw,
info_dict=self.info_dict, patch_bank=patch_bank)
self.coords_ij_to_distmap = partial(coords_ij_to_distmap,
info_dict=self.info_dict, grid_H=self.grid_H)
self.transform_ij = partial(transform_ij,
info_dict=self.info_dict, grid_H=self.grid_H)
def vis_gen_process(self, z, break_in_step=True, customize_nloc=0):
""" visulize the how generation process, with bounding box draw on canvas
Args:
z: dict of z sampled from q(z|x)
sampled_sel: B.nloc.K1, can be soft
sampled_loc: B.nloc.K2, one-hot | top-left or center ??
sampled_stp: B.nloc.1 #?
customize_nloc: support taking different length of seq as input
"""
z_cls, z_loc, z_stp = z['sampled_sel'], z['sampled_loc'], z['sampled_stp']
B = z_cls.shape[0]
nloc, img_size, canvas_size = self.nloc, self.img_size, self.canvas_size
if customize_nloc:
nloc = customize_nloc
Hc = Wc = canvas_size
K,ph = self.n_class_sel, self.patch_size
CHECKSIZE(z_cls, (B,nloc,K))
# -- decode location output to (x,y) --
assert(not self.loc_reg_from_anchor and self.loc_discrete)
k_loc = self.n_class_loc
CHECKSIZE(z_loc, (B,nloc,k_loc))
# -- transform from categorical_dim --
z_loc = self.loc_transform_i2f(z_loc.view(B*nloc,k_loc)).view(B,nloc,2)
g_i = z_loc[:,:,0].view(B*nloc,1)
g_j = z_loc[:,:,1].view(B*nloc,1)
# map to location; [0,Wc], [0,Hc]
if customize_nloc:
psel = self.zcls_BHWK_to_psel_BHW_phw(z_cls.unsqueeze(2), nloc=customize_nloc) # B,N,1,K
else:
psel = self.zcls_BHWK_to_psel_BHW_phw(z_cls.unsqueeze(2)) # B,N,1,K
# center location of the patches
map_shape = [B,nloc,canvas_size, canvas_size, self.imgd]
pat_map = self.transform_ij(g_i, g_j, psel).view(*map_shape).cpu().numpy()
out_list = []
if break_in_step:
for b in range(B):
for locid in range(nloc):
img = (pat_map[b,locid] * 255).astype(np.uint8) # canvas_size, canv_size, imgid
img = Image.from_numpy(img)
draw = ImageDraw.Draw(img)
ri_color = tuple([0,255,255])
ti = int(g_i[b*nloc+locid] - ph*0.5)
tj = int(g_j[b*nloc+locid] - ph*0.5)
draw.rectangle([ (tj,ti), (tj+ph,ti+ph)], outline=ri_color)
out_list.append(img)
else:
for b in range(B):
img = (pat_map[b,:].max(0)[0] * 255).astype(np.uint8) # canvas_size, canv_size, imgid
for locid in range(nloc):
img = (pat_map[b,locid] * 255).astype(np.uint8) # canvas_size, canv_size, imgid
@logger.catch(reraise=True)
def create_canvas(self, z, break_in_step=False, customize_nloc=0, return_per_step_loc=0):
'''
Args:
z: dict of z sampled from q(z|x)
sampled_sel: B.nloc.K1, can be soft
sampled_loc: B.nloc.K2, one-hot | top-left or center ??
sampled_stp: B.nloc.1 #?
customize_nloc: support taking different length of seq as input
Return:
z shape: (B,1,canv_size,canvas_size)
latent: (B,nloc,2,cs,cs)
if break_in_step:
return z: (B*nloc,D,canvas_size,canvas_size)
plot the canvas being plot progressively
'''
z_cls, z_loc, z_stp = z['sampled_sel'], z['sampled_loc'], z['sampled_stp']
B = z_cls.shape[0]
nloc, img_size, canvas_size = self.nloc, self.img_size, self.canvas_size
if customize_nloc:
nloc = customize_nloc
Hc = Wc = canvas_size
K,ph = self.n_class_sel, self.patch_size
CHECKSIZE(z_cls, (B,nloc,K))
if self.loc_reg_from_anchor:
CHECKSIZE(z_loc, (B,nloc,2))
z_loc = self.loc_trans_anchor(z_loc)
# map to location; [0,Wc], [0,Hc]
elif self.loc_discrete:
k_loc = self.n_class_loc
CHECKSIZE(z_loc, (B,nloc,k_loc))
# -- transform from categorical_dim --
z_loc = self.loc_transform_i2f(z_loc.view(B*nloc,k_loc)).view(B,nloc,2)
# map to location; [0,Wc], [0,Hc]
else:
CHECKSIZE(z_loc, (B,nloc,2))
# map to location; [0,Wc], [0,Hc]
g_i = z_loc[:,:,0].view(B*nloc,1)
g_j = z_loc[:,:,1].view(B*nloc,1)
if customize_nloc:
psel = self.zcls_BHWK_to_psel_BHW_phw(z_cls.unsqueeze(2), nloc=customize_nloc) # B,N,1,K
else:
psel = self.zcls_BHWK_to_psel_BHW_phw(z_cls.unsqueeze(2)) # B,N,1,K
# center location of the patches
map_shape = [B,nloc,canvas_size, canvas_size, self.imgd]
pat_map = self.transform_ij(g_i, g_j, psel).view(*map_shape)
if self.add_stop:
CHECKSIZE(z_stp, (B,nloc,1))
# loc_map: B,nloc,csize,csize
z_stp = z_stp.view(B,nloc,1,1,1)
## loc_map = loc_map * z_stp
pat_map = pat_map * z_stp
# B,nloc,Hc,Wc,D -> (max over the nloc) -> B,Hc,Wc,D -> (permute) -> B,D,Hc,Wc
if not break_in_step:
canvas = pat_map.max(1)[0].view(B,Hc,Wc,self.imgd).permute(0,3,1,2).contiguous() # select the max value over Nsteps
else:
loc_map = self.transform_ij(g_i, g_j, psel*0+1).view(*map_shape)[...,0] # only select the first one C
#B,nloc,cs,cs,D
canvas = []
loc_map_list = []
for locid in range(nloc):
canvas.append(pat_map[:,:locid+1].max(1)[0].view(B,Hc,Wc,self.imgd).permute(0,3,1,2).contiguous())
loc_map_list.append(loc_map[:,:locid+1].max(1)[0].view(B,1,Hc,Wc))
canvas = torch.stack(canvas, dim=1)
loc_map_list = torch.cat(loc_map_list, dim=1)
if not return_per_step_loc:
return canvas.view(B*nloc,self.imgd,Hc,Wc),loc_map_list.view(B*nloc,1,Hc,Wc)
else:
return canvas.view(B*nloc,self.imgd,Hc,Wc),loc_map.view(B*nloc, Hc,Wc,self.imgd)
# get pasted patch
# shape: B,nloc,2,canvas_size, canvas_size
#logger.info('pat_map: {}, loc_map: {}', pat_map.shape, loc_map.shape)
## latent = torch.cat([pat_map, loc_map.view(B,nloc,1,Hc,Wc)],dim=2)
z = canvas.view(B, -1) # output for plot?
z = z.view(B, self.imgd, Hc, Wc)
#logger.info('output: {}', latent.shape)
return z ## , latent
def zcls_BHWK_to_psel_BHW_phw(z_cls, info_dict, patch_bank, nloc=None): #, z_cls):
''' obtain selected patches
Args:
z_cls (tensor): B,H,W,K output of ?
Returns:
psel (tensor): B,H,W,K,ph,pw,canvasd -> BHW,ph,pw,canvasd
'''
B = z_cls.shape[0]
categorical_dim, nloci, nlocj, ph, pw, canvasd = info_dict['categorical_dim'], info_dict['nloci'], \
info_dict['nlocj'], info_dict['ph'], info_dict['pw'], info_dict['canvasd']
if nloc:
nloci = nloc
nlocj = 1
K,H,W = categorical_dim, nloci, nlocj
CHECKSIZE(z_cls, (B,H,W,K))
# logger.info('check selectk: {}', z_cls.mean(3).view(-1))
patch_bank = patch_bank[:,:nloc]
CHECKSIZE(patch_bank, (1,H,W,K,ph,pw,canvasd))
target_shape = [B,H,W,K,ph,pw,canvasd]
z_cls = z_cls.view(B,H,W,K,1,1,1).expand(*target_shape) #-1,-1,-1,-1,ph,pw,canvasd) # B,nH,nW,K,ph,pw
psel = z_cls * patch_bank.expand(*target_shape) #B,-1,-1,-1,-1,-1) # B,nh,nw,K,ph,pw
psel_sum = psel.sum(3).view(B*H*W,ph,pw,canvasd) # sum along K dimension
else:
K,H,W = categorical_dim, nloci, nlocj
CHECKSIZE(z_cls, (B,H,W,K))
# logger.info('check selectk: {}', z_cls.mean(3).view(-1))
CHECKSIZE(patch_bank, (1,H,W,K,ph,pw,canvasd))
target_shape = [B,H,W,K,ph,pw,canvasd]
z_cls = z_cls.view(B,H,W,K,1,1,1).expand(*target_shape) #-1,-1,-1,-1,ph,pw,canvasd) # B,nH,nW,K,ph,pw
psel = z_cls * patch_bank.expand(*target_shape) #B,-1,-1,-1,-1,-1) # B,nh,nw,K,ph,pw
psel_sum = psel.sum(3).view(B*H*W,ph,pw,canvasd) # sum along K dimension
return psel_sum
def diff_round(mu_x):
mu_x_int = torch.round(mu_x).float()
mu_x_diff = mu_x_int - mu_x
mu_x = mu_x + mu_x_diff.detach() # make it to closest int, while support gradient backprop
return mu_x
def compute_filterbank_matrices(g_x, g_y, H, W, patch_size, grid, var=0.001):
""" DRAW section 3.2 -- computes the parameters for an NxN grid of Gaussian filters over the input image.
Args
g_x, g_y -- tensors of shape (B, 1); unnormalized center coords for the attention window, suppose to
be in range [-1,1]; but the DRAW model does not enforce that
logvar -- tensor of shape (B, 1); log variance for the Gaussian filters (filterbank matrices) on the attention window
logdelta -- tensor of shape (B, 1); unnormalized stride for the spacing of the filters in the attention window
H, W -- scalars; original image dimensions
attn_window_size -- scalar; size of the attention window (specified by the read_size / write_size input args
Returns
g_x, g_y -- tensors of shape (B, 1); normalized center coords of the attention window;
delta -- tensor of shape (B, 1); stride for the spacing of the filters in the attention window
mu_x, mu_y -- tensors of shape (B, attn_window_size); means location of the filters at row and column
F_x, F_y -- tensors of shape (B, N, W) and (B, N, H) where N=attention_window_size; filterbank matrices
"""
B = g_x.shape[0]
ph = patch_size
device = g_x.device
# rescale attention window center coords and stride to ensure the initial patch covers the whole input image
# eq 22 - 24
delta = 1 # (B, 1)
# compute the means of the filter
# eq 19 - 20 [1,2,....10] - 5 = [-4,-3,....5]
offset_sampled_loc = torch.arange(1.0, 1.0+ph).to(device) - 0.5*ph
## offset_sampled_loc = torch.arange(1.0, 1.0+ph).to(device) - 0.5*ph
offset_sampled_loc = offset_sampled_loc.view(1,ph).expand(B,-1) # B,ph
g_x = g_x.view(B,1).expand(-1,ph) # B,ph
g_y = g_y.view(B,1).expand(-1,ph) # B,ph
mu_x = g_x + offset_sampled_loc # B,ph
mu_x = diff_round(mu_x)
# g_y shape: B; + size (1,10) -> B,10
mu_y = g_y + offset_sampled_loc # [B,ph]
mu_y = diff_round(mu_y)
# mu_x = g_x + (torch.arange(1., 1. + attn_window_size).to(device) - 0.5*(attn_window_size + 1)) * delta # (B, N)
# mu_y = g_y + (torch.arange(1., 1. + attn_window_size).to(device) - 0.5*(attn_window_size + 1)) * delta # (B, N)
# compute the filterbank matrices
# B = batch dim; N = attn window size; H = original heigh; W = original width
# eq 25 -- combines logvar=(B, 1, 1) * ( range=(B, 1, W) - mu=(B, N, 1) ) = out (B, N, W); then normalizes over W dimension;
grid = grid.view(1,1,H).expand(B,ph,-1) # B,N,28
mu_x = mu_x.view(B,ph,1).expand(-1,-1,H) # B,N,H
mu_y = mu_y.view(B,ph,1).expand(-1,-1,H) # B,N,H
F_x = torch.exp(-0.5/var * (grid - mu_x)**2) #B.N.H
# eq 26
# F_x shape: B,1,28; mu_y shape: B,10,28
F_y = torch.exp(-0.5/var * (grid - mu_y)**2)
# since Gaussian Filter Bank: require the intergration of F_y over its space is 1
return g_x, g_y, F_x, F_y, mu_x, mu_y #, grid
#def zcls_BHWK_to_psel_BHW_phw(z_cls, info_dict, patch_bank): #, z_cls):
# ''' obtain selected patches
# Args:
# z_cls (tensor): B,H,W,K output of ?
# Returns:
# psel (tensor): B,H,W,K,ph,pw,canvasd -> BHW,ph,pw,canvasd
# '''
# B = z_cls.shape[0]
# categorical_dim, nloci, nlocj, ph, pw, canvasd = info_dict['categorical_dim'], info_dict['nloci'], \
# info_dict['nlocj'], info_dict['ph'], info_dict['pw'], info_dict['canvasd']
# K,H,W = categorical_dim, nloci, nlocj
# CHECKSIZE(z_cls, (B,H,W,K))
# # logger.info('check selectk: {}', z_cls.mean(3).view(-1))
# CHECKSIZE(patch_bank, (1,H,W,K,ph,pw,canvasd))
# target_shape = [B,H,W,K,ph,pw,canvasd]
# z_cls = z_cls.view(B,H,W,K,1,1,1).expand(*target_shape) #-1,-1,-1,-1,ph,pw,canvasd) # B,nH,nW,K,ph,pw
# psel = z_cls * patch_bank.expand(*target_shape) #B,-1,-1,-1,-1,-1) # B,nh,nw,K,ph,pw
# psel_sum = psel.sum(3).view(B*H*W,ph,pw,canvasd) # sum along K dimension
# return psel_sum
def coords_ij_to_distmap(g_i, g_j,info_dict, grid_H,customize_nloc=0):
'''
Args:
g_i (tensor): [B,Nloc], range(0,canvas_size)
Returns:
loc (tensor): [B*Nloc,canvas_size,canvas_size]
convert location i,j to a heatmap with peak at i,j
and radius as the patch-size
'''
B = g_i.shape[0]
csize = info_dict['canvas_size']
categorical_dim, nloci, nlocj, ph = info_dict['categorical_dim'], info_dict['nloci'], \
info_dict['nlocj'], info_dict['ph']
if customize_nloc:
nloci=customize_nloc
nlocj=1
K,H,W = categorical_dim, nloci, nlocj
CHECKSIZE(g_i, (B,H*W))
cover = data_helper.get_cover(ph, csize)
var_img = ((cover // 2)/3.0) ** 2
Nloc = H*W
grid = grid_H.view(1,1,csize).expand(B*Nloc, 1, -1) # BN,1,28
# B,1,28 - B,Nloc,1
loc_x = torch.exp(- 0.5 / var_img * (grid - g_j.view(B*Nloc,1,1))**2) # BHW, 1, 28
loc_y = torch.exp(- 0.5 / var_img * (grid - g_i.view(B*Nloc,1,1))**2) # B*NLoc,1, 28
CHECKSIZE(loc_x, (B*Nloc,1,csize))
loc = loc_y.transpose(-2,-1) @ loc_x # expect: BHW,28,28
CHECKSIZE(loc, (B*Nloc,csize,csize))
g_i_mask = g_i.ne(-1).view(B,Nloc,1,1).expand(-1,-1,csize,csize).view(
B*Nloc,csize,csize).float()
loc = loc * g_i_mask # mask out those g_i = -1
return loc
def transform_ij(g_i, g_j, psel, info_dict, grid_H, return_writted_loc=False):
''' transform patches to canvas
grid_H = torch.arange(1., 1.+self.canvas_size).cuda().detach()
Args:
g_i (tensor): BNloc,1; the (center?) location of the selected patch
psel (tensor): BNloc,ph,pw,canvasd; the selected patches
Returns:
w (tensor): BNloc,canvas_size,canvas_size,canvasd
if return_writted_loc:
writed_loc: same shape as w, for the location being writed, entry is 1
H,W: number of location, put it in the batch dim,
if canvasd > 1: will also be intergrated into batch-dim
'''
BNloc = g_j.shape[0]
CHECKSIZE(g_j, (BNloc,1))
canvas_size = info_dict['canvas_size']
ph = info_dict['ph']
canvasd = info_dict['canvasd']
CHECK4D(psel) # last dim is the canvasd
canvasd = psel.shape[-1]
CHECKSIZE(psel, (BNloc,ph,ph,canvasd))
## K,H,W = categorical_dim, nloci, nlocj
# given the center location of the patches, and canvas_size, patch size
# F_x: shape (BNloc,10,28)
g0_x, g0_y, F_x, F_y, mu_x, mu_y = compute_filterbank_matrices(
g_j, g_i, canvas_size, canvas_size, ph, grid_H)
CHECKSIZE(F_x, (BNloc,ph,canvas_size)) # BNloc,patch_size,canvas_size
if canvasd > 1: # make if C,BNloc,1
F_y = F_y.unsqueeze(0).expand(canvasd,-1,-1,-1).reshape(canvasd*BNloc,ph,canvas_size)
F_x = F_x.unsqueeze(0).expand(canvasd,-1,-1,-1).reshape(canvasd*BNloc,ph,canvas_size)
psel = psel.permute(3,0,1,2).contiguous().view(canvasd*BNloc,ph,ph) #
F_y_t = F_y.transpose(-2, -1) # BNloc,1
w = F_y_t @ psel @ F_x # B*Nloc, canvas_size, canvas_size
w = w.view(canvasd,BNloc,canvas_size,canvas_size).permute(1,2,3,0).contiguous()
w = w.view(BNloc,canvas_size,canvas_size,canvasd)
if return_writted_loc:
writed_loc = F_y_t @ F_x # B*Nloc, canvas_size, canvas_size
writed_loc = writed_loc.view(canvasd,BNloc,canvas_size,canvas_size).permute(1,2,3,0).contiguous()
writed_loc = writed_loc.view(BNloc,canvas_size,canvas_size,canvasd)
return w, writed_loc
return w
| 46.6618 | 130 | 0.60366 |
ca1c3e665c4fb93b9b24cddc99a1d7592e11a07f | 1,534 | py | Python | python/dynamic_programming/coin_change.py | SounakMandal/AlgoBook | 3952cb49ef12f1c00e97e0cf25810170f8585748 | [
"MIT"
] | 191 | 2020-09-28T10:00:20.000Z | 2022-03-06T14:36:55.000Z | python/dynamic_programming/coin_change.py | SounakMandal/AlgoBook | 3952cb49ef12f1c00e97e0cf25810170f8585748 | [
"MIT"
] | 210 | 2020-09-28T10:06:36.000Z | 2022-03-05T03:44:24.000Z | python/dynamic_programming/coin_change.py | SounakMandal/AlgoBook | 3952cb49ef12f1c00e97e0cf25810170f8585748 | [
"MIT"
] | 320 | 2020-09-28T09:56:14.000Z | 2022-02-12T16:45:57.000Z | """
THE COIN CHANGE PROBLEM (SOLVED FROM HACKERRANK- PROBLEM SOLVING)
PROBLEM:
You are working at the cash counter at a fun-fair, and you have different types of coins available
to you in infinite quantities. The value of each coin is already given.
Can you determine the number of ways of making change for a particular number of units using the given types of coins?
For example, if you have 4 types of coins, and the value of each type is given as 8,3,1,2 respectively,
you can make change for 3 units in three ways:{1,1,1} ,{1,2} and {3} .
Input Format
The first line contains two space-separated integers describing the respective values of n and m, where:
n is the number of units
m is the number of coin types
The second line contains m space-separated integers describing the respective values of each coin type :
c=c[0],c[1],c[2]...c[m-1] (the list of distinct coins available in infinite amounts).
Output Format
Print a long integer denoting the number of ways we can get a sum of n
from the given infinite supply of m types of coins
Sample Input 0
4 3
1 2 3
Sample Output 0
4
Explanation 0
There are four ways to make change for N=4 using coins with values given by C=[1,2,3] :
1. {1,1,1}
2. {1,1,2}
3. {2,2}
4. {1,3}
Thus, we print 4 as our answer.
"""
def getWays(n, c):
n_perms = [1]+[0]*n
for coin in c:
for i in range(coin, n+1):
n_perms[i] += n_perms[i-coin]
return n_perms[n]
n, m = map(int, input().split())
c = list(map(int, input().split()))
print(getWays(n, c)) | 31.306122 | 119 | 0.709257 |
57756ed7ac47dcd7de8ad5bc88ea6813c505120a | 1,388 | py | Python | 03_rabbitmq/doctor.py | wgslr/agh-distributed | ce10efbf24af26bde03cbfe051f01e784b1d4222 | [
"MIT"
] | 1 | 2019-05-28T07:21:16.000Z | 2019-05-28T07:21:16.000Z | 03_rabbitmq/doctor.py | wgslr/agh-distributed | ce10efbf24af26bde03cbfe051f01e784b1d4222 | [
"MIT"
] | null | null | null | 03_rabbitmq/doctor.py | wgslr/agh-distributed | ce10efbf24af26bde03cbfe051f01e784b1d4222 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import pika
import sys
import select
import time
from typing import Optional
from interactive_server import InteractiveServer
import common
from common import errprint
def get_injury(line: str) -> dict:
if not line:
return None
parts = line.split(maxsplit=1)
if len(parts) != 2:
print("Invalid input, discarding. Should be: <injury> <name>")
return None
return parts[0]
class Doctor(InteractiveServer):
def __init__(self):
queues = [
('', self._on_result),
('info', self._on_info)
]
super().__init__(self._handle_line, queues)
def _handle_line(self, line):
injury = get_injury(line)
if injury:
routing_key = 'request.' + injury
message = line
properties = pika.BasicProperties(reply_to=self.auto_queue)
self.channel.basic_publish(exchange=common.EXCHANGE,
routing_key=routing_key,
body=message,
properties=properties)
def _on_result(self, ch, method, properties, body):
print(body.decode())
def _on_info(self, ch, method, properties, body):
print("[INFO] {}".format(body.decode()))
if __name__ == '__main__':
doctor = Doctor()
doctor.start()
| 24.785714 | 71 | 0.585735 |
3d50f6c16e44c15ac564ff8cfaf1329cc7b09720 | 2,573 | py | Python | applications/view/manage/manage.py | wangyuan02605/webcloud | e57a2713125b751ee8bb8da29b789e2044e789aa | [
"MIT"
] | null | null | null | applications/view/manage/manage.py | wangyuan02605/webcloud | e57a2713125b751ee8bb8da29b789e2044e789aa | [
"MIT"
] | null | null | null | applications/view/manage/manage.py | wangyuan02605/webcloud | e57a2713125b751ee8bb8da29b789e2044e789aa | [
"MIT"
] | null | null | null | from flask import Blueprint, render_template, request
from flask_login import login_required, current_user
from sqlalchemy import desc
from applications.models import Recruitment
from applications.common import curd
from applications.common.curd import model_to_dicts, enable_status, disable_status
from applications.common.helper import ModelFilter
from applications.common.utils.http import table_api, fail_api, success_api
from applications.common.utils.rights import authorize
from applications.common.utils.validate import xss_escape
from applications.extensions import db
from applications.models import AdminLog,Recruitment,Employ
from applications.schemas import recruitmentOutSchema
manage_BP = Blueprint('manage', __name__, url_prefix='/manage')
#招聘信息审核
@manage_BP.get('/')
@authorize("admin:manage:info")
def index():
# 获取当前需要审核的所有信息
recruitment_list = Recruitment.query.filter(Recruitment.status == 1).all()
print(recruitment_list)
# 传递至前台
return render_template("manage/index.html", recruitment_list=recruitment_list)
@manage_BP.get('/postresume')
def post_resume():
#获得当前用户创建需要审核的雇佣关系
pass
#招聘信息修改
@authorize("admin:manage:info")
@manage_BP.get('/edit/<int:id>')
def edit(id):
#传入的id为信息id
recruitment_Update = curd.get_one_by_id(Recruitment, id)
return render_template("manage/edit.html",recruitment_Update=recruitment_Update)
# 编辑信息
@manage_BP.put('/update')
@authorize("admin:manage:info")
def update():
req_json = request.json
# 传入审核字段
id = xss_escape(req_json.get("id"))
status = xss_escape(req_json.get("status"))
result = xss_escape(req_json.get("result"))
Recruitment.query.filter_by(id=id).update({'status':status,'result':result})
db.session.commit()
return success_api(msg="更新成功")
#分页查询
@manage_BP.get('/data')
@authorize("admin:manage:info")
def data():
# 获取请求参数
info = xss_escape(request.args.get('info', type=str))
remark = xss_escape(request.args.get('remark', type=str))
# 查询参数构造
mf = ModelFilter()
if info:
mf.contains(field_name="info", value=info)
if remark:
mf.contains(field_name="remark", value=remark)
# orm查询
# 使用分页获取data需要.items
#recruitment_ = Recruitment.query.filter(mf.get_filter(model=Recruitment)).layui_paginate()
#只显示需要审核的数据
recruitment_ = Recruitment.query.filter(Recruitment.status==1).layui_paginate()
count = recruitment_.total
# 返回api
return table_api(data=model_to_dicts(schema=recruitmentOutSchema, data=recruitment_.items), count=count)
| 25.73 | 108 | 0.745433 |
83dc75755dac84466a159c3a4e7170b4ddbaf21d | 7,923 | py | Python | cloudsplaining/scan/principal_detail.py | jhutchings1/cloudsplaining | 7c94658a4f6e087a2d0e1a1b87df668e8eaa6ab6 | [
"BSD-3-Clause"
] | null | null | null | cloudsplaining/scan/principal_detail.py | jhutchings1/cloudsplaining | 7c94658a4f6e087a2d0e1a1b87df668e8eaa6ab6 | [
"BSD-3-Clause"
] | null | null | null | cloudsplaining/scan/principal_detail.py | jhutchings1/cloudsplaining | 7c94658a4f6e087a2d0e1a1b87df668e8eaa6ab6 | [
"BSD-3-Clause"
] | null | null | null | """Objects abstracting UserDetailList, GroupDetailList, or RoleDetailList sections of the output generated by the
command `aws iam get-account-authorization-details`"""
# Copyright (c) 2020, salesforce.com, inc.
# All rights reserved.
# Licensed under the BSD 3-Clause license.
# For full license text, see the LICENSE file in the repo root
# or https://opensource.org/licenses/BSD-3-Clause
import logging
from policy_sentry.util.arns import get_account_from_arn, get_resource_path_from_arn
from cloudsplaining.shared.exclusions import is_name_excluded
from cloudsplaining.scan.policy_document import PolicyDocument
from cloudsplaining.scan.assume_role_policy_document import AssumeRolePolicyDocument
from cloudsplaining.shared.exclusions import DEFAULT_EXCLUSIONS
logger = logging.getLogger(__name__)
class PrincipalTypeDetails:
"""This covers UserDetailList, GroupDetailList, or RoleDetailList"""
def __init__(self, principal_type_details):
self.principals = []
for principal_detail in principal_type_details:
self.principals.append(PrincipalDetail(principal_detail))
# pylint: disable=too-many-instance-attributes
class PrincipalDetail:
"""
Holds data for individual Principal Entries from the Authz file
"""
def __init__(self, principal_detail):
self.principal_detail = principal_detail
if "UserName" in principal_detail:
self.principal_type = "User"
elif "GroupName" in principal_detail:
self.principal_type = "Group"
elif "RoleName" in principal_detail:
self.principal_type = "Role"
# self.principal_policy_list = self._inline_principal_policies()
self.policy_list = []
self.path = principal_detail.get("Path", None)
self.arn = principal_detail.get("Arn", None)
self.tags = principal_detail.get("Tags", None)
self.create_date = principal_detail.get("CreateDate", None)
# This will only appear for Roles, not Users or Groups
self.assume_role_policy_document = self._assume_role_policy_document()
# self.assume_role_policy_document = principal_detail.get("AssumeRolePolicyDocument", None)
self.attached_managed_policies = principal_detail.get("AttachedManagedPolicies")
self.group_member = []
self.members = []
self.inline_principal_policies = self._inline_principal_policies()
if self.principal_type == "User":
self.name = principal_detail.get("UserName", None)
self.id = principal_detail.get("UserId", None)
self.group_member = principal_detail.get("GroupList", None)
policy_list = principal_detail.get("UserPolicyList", None)
if policy_list:
logger.debug(f"Adding {self.principal_type}: {self.name}")
for policy in policy_list:
logger.debug(
f"\tAdding {policy.get('PolicyName', None)} which is attached to the {self.principal_type} {self.name}"
)
self.policy_list.append(
{
"PolicyName": policy.get("PolicyName", None),
"PolicyDocument": PolicyDocument(
policy.get("PolicyDocument", None)
),
}
)
elif self.principal_type == "Group":
self.name = principal_detail.get("GroupName", None)
self.id = principal_detail.get("GroupId", None)
policy_list = principal_detail.get("GroupPolicyList", None)
if policy_list:
logger.debug(f"Adding {self.principal_type}: {self.name}")
for policy in policy_list:
logger.debug(
f"\tAdding {policy.get('PolicyName', None)} which is attached to the {self.principal_type} {self.name}"
)
self.policy_list.append(
{
"PolicyName": policy.get("PolicyName", None),
"PolicyDocument": PolicyDocument(
policy.get("PolicyDocument", None)
),
}
)
elif self.principal_type == "Role":
self.name = principal_detail.get("RoleName", None)
self.id = principal_detail.get("RoleId", None)
policy_list = principal_detail.get("RolePolicyList", None)
if policy_list:
logger.debug(f"Adding {self.principal_type}: {self.name}")
for policy in policy_list:
logger.debug(
f"\tAdding {policy.get('PolicyName', None)} which is attached to the {self.principal_type} {self.name}"
)
self.policy_list.append(
{
"PolicyName": policy.get("PolicyName", None),
"PolicyDocument": PolicyDocument(
policy.get("PolicyDocument", None)
),
}
)
def _inline_principal_policies(self):
"""Stores the list of policies attached to the Principal."""
inline_principal_policies = []
if self.principal_type == "User":
inline_principal_policies = self.principal_detail.get(
"UserPolicyList", None
)
if self.principal_type == "Group":
inline_principal_policies = self.principal_detail.get(
"GroupPolicyList", None
)
if self.principal_type == "Role":
inline_principal_policies = self.principal_detail.get(
"RolePolicyList", None
)
return inline_principal_policies
def _assume_role_policy_document(self):
"""Set the assume role policy document"""
if self.principal_type == "Role":
this_document = self.principal_detail.get("AssumeRolePolicyDocument", None)
if this_document:
assume_role_policy_document = AssumeRolePolicyDocument(this_document)
return assume_role_policy_document
# For roles, a trust policy is required. I'm simply including an else statement here
# to make Pylint and coverage happy.
else:
return None # pragma: no cover
else:
return None
@property
def assume_role_from_compute(self): # pragma: no cover
"""Parse the Trust Policy and determine if an AWS Compute service (EC2, ECS, EKS, Lambda)
is able to assume the role."""
if self.principal_type == "Role":
return self.assume_role_policy_document
else:
return None
@property
def account_id(self):
"""Return the account ID"""
account_id = get_account_from_arn(self.arn)
return account_id
def is_principal_excluded(self, exclusions=DEFAULT_EXCLUSIONS):
"""According to the exclusions configuration, determine whether or not to skip the Principal according
to their name."""
decision = False
name = get_resource_path_from_arn(self.arn)
if self.principal_type == "User":
if is_name_excluded(name, exclusions.users):
print(f"\tExcluded user: {name}")
decision = True
if self.principal_type == "Group":
if is_name_excluded(name, exclusions.groups):
print(f"\tExcluded group: {name}")
decision = True
if self.principal_type == "Role":
if is_name_excluded(name, exclusions.roles):
print(f"\tExcluded role: {name}")
decision = True
return decision
| 45.017045 | 127 | 0.598511 |
5580ba1d70a7b2884c87ced02d6686dc5f1d8193 | 2,304 | py | Python | .tox/scenario/lib/python2.7/site-packages/oslo_reports/views/json/generic.py | bdrich/neutron-lbaas | b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd | [
"Apache-2.0"
] | 21 | 2015-06-23T08:06:36.000Z | 2021-04-24T19:16:46.000Z | .tox/scenario/lib/python2.7/site-packages/oslo_reports/views/json/generic.py | bdrich/neutron-lbaas | b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd | [
"Apache-2.0"
] | null | null | null | .tox/scenario/lib/python2.7/site-packages/oslo_reports/views/json/generic.py | bdrich/neutron-lbaas | b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd | [
"Apache-2.0"
] | 7 | 2015-07-02T13:48:34.000Z | 2020-02-04T15:51:39.000Z | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides generic JSON views
This modules defines several basic views for serializing
data to JSON. Submodels that have already been serialized
as JSON may have their string values marked with `__is_json__
= True` using :class:`oslo_reports._utils.StringWithAttrs`
(each of the classes within this module does this automatically,
and non-naive serializers check for this attribute and handle
such strings specially)
"""
import copy
from oslo_serialization import jsonutils as json
from oslo_reports import _utils as utils
class BasicKeyValueView(object):
"""A Basic Key-Value JSON View
This view performs a naive serialization of a model
into JSON by simply calling :func:`json.dumps` on the model
"""
def __call__(self, model):
res = utils.StringWithAttrs(json.dumps(model.data, sort_keys=True))
res.__is_json__ = True
return res
class KeyValueView(object):
"""A Key-Value JSON View
This view performs advanced serialization to a model
into JSON. It does so by first checking all values to
see if they are marked as JSON. If so, they are deserialized
using :func:`json.loads`. Then, the copy of the model with all
JSON deserialized is reserialized into proper nested JSON using
:func:`json.dumps`.
"""
def __call__(self, model):
# this part deals with subviews that were already serialized
cpy = copy.deepcopy(model)
for key in model.keys():
if getattr(model[key], '__is_json__', False):
cpy[key] = json.loads(model[key])
res = utils.StringWithAttrs(json.dumps(cpy.data, sort_keys=True))
res.__is_json__ = True
return res
| 34.38806 | 78 | 0.711806 |
68a3e341fa10593cd76664fe28bb40ad78aec2ff | 41,064 | py | Python | pytorch_translate/ensemble_export.py | batikim09/translate | ae275549c414a68af783a87cb2eacc303a0553b0 | [
"BSD-3-Clause"
] | null | null | null | pytorch_translate/ensemble_export.py | batikim09/translate | ae275549c414a68af783a87cb2eacc303a0553b0 | [
"BSD-3-Clause"
] | null | null | null | pytorch_translate/ensemble_export.py | batikim09/translate | ae275549c414a68af783a87cb2eacc303a0553b0 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import logging
import os
import tempfile
import numpy as np
import onnx
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.onnx.operators
from caffe2.python import core, workspace
from caffe2.python.onnx import backend as caffe2_backend
from caffe2.python.predictor import predictor_exporter
from fairseq import utils
from pytorch_translate.word_prediction import word_prediction_model
from torch.onnx import ExportTypes, OperatorExportTypes
from pytorch_translate import ( # noqa; noqa
char_source_model,
dictionary,
rnn,
tasks,
transformer,
)
logger = logging.getLogger(__name__)
def onnx_export_ensemble(module, output_path, input_tuple, input_names, output_names):
# include parameters as inputs of exported graph
for name, _ in module.named_parameters():
input_names.append(name)
with open(output_path, "w+b") as netdef_file:
torch.onnx._export(
module,
input_tuple,
netdef_file,
verbose=False,
input_names=input_names,
output_names=output_names,
export_type=ExportTypes.ZIP_ARCHIVE,
operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,
)
def load_models_from_checkpoints(
checkpoint_filenames, src_dict_filename, dst_dict_filename, lexical_dict_paths=None
):
src_dict = dictionary.Dictionary.load(src_dict_filename)
dst_dict = dictionary.Dictionary.load(dst_dict_filename)
models = []
for filename in checkpoint_filenames:
checkpoint_data = torch.load(filename, map_location="cpu")
if lexical_dict_paths is not None:
assert (
checkpoint_data["args"].vocab_reduction_params is not None
), "lexical dictionaries can only be replaced in vocab-reduction models"
checkpoint_data["args"].vocab_reduction_params[
"lexical_dictionaries"
] = lexical_dict_paths
task = tasks.DictionaryHolderTask(src_dict, dst_dict)
architecture = checkpoint_data["args"].arch
if architecture == "rnn":
model = rnn.RNNModel.build_model(checkpoint_data["args"], task)
elif architecture == "char_source":
model = char_source_model.CharSourceModel.build_model(
checkpoint_data["args"], task
)
elif architecture == "rnn_word_pred":
model = word_prediction_model.RNNWordPredictionModel.build_model(
checkpoint_data["args"], task
)
elif architecture == "ptt_transformer":
model = transformer.TransformerModel.build_model(
checkpoint_data["args"], task
)
else:
raise RuntimeError("Architecture not supported: {architecture}")
model.load_state_dict(checkpoint_data["model"])
models.append(model)
return models, src_dict, dst_dict
def save_caffe2_rep_to_db(
caffe2_backend_rep, output_path, input_names, output_names, num_workers
):
# netdef external_input includes internally produced blobs
actual_external_inputs = set()
produced = set()
for operator in caffe2_backend_rep.predict_net.op:
for blob in operator.input:
if blob not in produced:
actual_external_inputs.add(blob)
for blob in operator.output:
produced.add(blob)
for blob in output_names:
if blob not in produced:
actual_external_inputs.add(blob)
param_names = [blob for blob in actual_external_inputs if blob not in input_names]
init_net = core.Net(caffe2_backend_rep.init_net)
predict_net = core.Net(caffe2_backend_rep.predict_net)
# predictor_exporter requires disjoint params, inputs and outputs
for i, param in enumerate(param_names):
if param in output_names:
saved_name = param + "_PARAM"
init_net.Copy(param, saved_name)
predict_net.Copy(saved_name, param)
param_names[i] = saved_name
output_shapes = {}
for blob in output_names:
output_shapes[blob] = (0,)
# Required because of https://github.com/pytorch/pytorch/pull/6456/files
with caffe2_backend_rep.workspace._ctx:
workspace.RunNetOnce(init_net)
predictor_export_meta = predictor_exporter.PredictorExportMeta(
predict_net=predict_net,
parameters=param_names,
inputs=input_names,
outputs=output_names,
shapes=output_shapes,
net_type="dag",
num_workers=num_workers,
)
predictor_exporter.save_to_db(
db_type="minidb",
db_destination=output_path,
predictor_export_meta=predictor_export_meta,
)
logger.info(f"Caffe2 predictor net saved as: {output_path}")
class EncoderEnsemble(nn.Module):
def __init__(self, models, src_dict=None):
super().__init__()
self.models = models
self.src_dict = src_dict
for i, model in enumerate(self.models):
model.prepare_for_onnx_export_()
self._modules[f"model_{i}"] = model
def forward(self, src_tokens, src_lengths):
outputs = []
output_names = []
states = []
# (seq_length, batch_size) for compatibility with Caffe2
src_tokens_seq_first = src_tokens.t()
for i, model in enumerate(self.models):
# evaluation mode
model.eval()
encoder_out = model.encoder(src_tokens_seq_first, src_lengths)
# evaluation mode
model.eval()
# "primary" encoder output (vector representations per source token)
encoder_outputs = encoder_out[0]
outputs.append(encoder_outputs)
output_names.append(f"encoder_output_{i}")
if hasattr(model.decoder, "_init_prev_states"):
states.extend(model.decoder._init_prev_states(encoder_out))
# underlying assumption is each model has same vocab_reduction_module
vocab_reduction_module = self.models[0].decoder.vocab_reduction_module
if vocab_reduction_module is not None:
possible_translation_tokens = vocab_reduction_module(
src_tokens=src_tokens, decoder_input_tokens=None
)
outputs.append(possible_translation_tokens)
output_names.append("possible_translation_tokens")
for i, state in enumerate(states):
outputs.append(state)
output_names.append(f"initial_state_{i}")
self.output_names = output_names
return tuple(outputs)
def onnx_export(self, output_path):
# The discrepancy in types here is a temporary expedient.
# PyTorch indexing requires int64 while support for tracing
# pack_padded_sequence() requires int32.
length = 5
src_tokens = torch.LongTensor(np.ones((length, 1), dtype="int64"))
src_lengths = torch.IntTensor(np.array([length], dtype="int32"))
# generate output names
self.forward(src_tokens, src_lengths)
onnx_export_ensemble(
module=self,
output_path=output_path,
input_tuple=(src_tokens, src_lengths),
input_names=["encoder_inputs", "encoder_lengths"],
output_names=self.output_names,
)
def save_to_db(self, output_path):
"""
Save encapsulated encoder export file.
"""
tmp_dir = tempfile.mkdtemp()
tmp_file = os.path.join(tmp_dir, "encoder.pb")
self.onnx_export(tmp_file)
onnx_encoder = caffe2_backend.prepare_zip_archive(tmp_file)
save_caffe2_rep_to_db(
caffe2_backend_rep=onnx_encoder,
output_path=output_path,
input_names=["encoder_inputs", "encoder_lengths"],
output_names=self.output_names,
num_workers=2 * len(self.models),
)
@classmethod
def build_from_checkpoints(
cls,
checkpoint_filenames,
src_dict_filename,
dst_dict_filename,
lexical_dict_paths=None,
):
models, src_dict, _ = load_models_from_checkpoints(
checkpoint_filenames,
src_dict_filename,
dst_dict_filename,
lexical_dict_paths,
)
return cls(models, src_dict=src_dict)
class DecoderBatchedStepEnsemble(nn.Module):
def __init__(
self,
models,
tgt_dict,
beam_size,
word_reward=0,
unk_reward=0,
tile_internal=False,
):
super().__init__()
self.models = models
for i, model in enumerate(self.models):
model.prepare_for_onnx_export_()
self._modules[f"model_{i}"] = model
self.tgt_dict = tgt_dict
self.beam_size = beam_size
self.word_reward = word_reward
self.unk_reward = unk_reward
vocab_size = len(tgt_dict.indices)
self.word_rewards = torch.FloatTensor(vocab_size).fill_(word_reward)
self.word_rewards[tgt_dict.eos()] = 0
self.word_rewards[tgt_dict.unk()] = word_reward + unk_reward
self.tile_internal = tile_internal
def forward(self, input_tokens, prev_scores, timestep, *inputs):
"""
Decoder step inputs correspond one-to-one to encoder outputs.
HOWEVER: after the first step, encoder outputs (i.e, the first
len(self.models) elements of inputs) must be tiled k (beam size)
times on the batch dimension (axis 1).
"""
log_probs_per_model = []
attn_weights_per_model = []
state_outputs = []
beam_axis_per_state = []
# from flat to (batch x 1)
input_tokens = input_tokens.unsqueeze(1)
next_state_input = len(self.models)
# size of "batch" dimension of input as tensor
batch_size = torch.onnx.operators.shape_as_tensor(input_tokens)[0]
# underlying assumption is each model has same vocab_reduction_module
vocab_reduction_module = self.models[0].decoder.vocab_reduction_module
if vocab_reduction_module is not None:
possible_translation_tokens = inputs[len(self.models)]
next_state_input += 1
else:
possible_translation_tokens = None
for i, model in enumerate(self.models):
if (
isinstance(model, rnn.RNNModel)
or isinstance(model, char_source_model.CharSourceModel)
or isinstance(model, word_prediction_model.WordPredictionModel)
):
encoder_output = inputs[i]
prev_hiddens = []
prev_cells = []
for _ in range(len(model.decoder.layers)):
prev_hiddens.append(inputs[next_state_input])
prev_cells.append(inputs[next_state_input + 1])
next_state_input += 2
# ensure previous attention context has batch dimension
input_feed_shape = torch.cat(
(batch_size.view(1), torch.LongTensor([-1]))
)
prev_input_feed = torch.onnx.operators.reshape_from_tensor_shape(
inputs[next_state_input], input_feed_shape
)
next_state_input += 1
# no batching, we only care about care about "max" length
src_length_int = int(encoder_output.size()[0])
src_length = torch.LongTensor(np.array([src_length_int]))
# notional, not actually used for decoder computation
src_tokens = torch.LongTensor(np.array([[0] * src_length_int]))
src_embeddings = encoder_output.new_zeros(encoder_output.shape)
encoder_out = (
encoder_output,
prev_hiddens,
prev_cells,
src_length,
src_tokens,
src_embeddings,
)
# store cached states, use evaluation mode
model.decoder._is_incremental_eval = True
model.eval()
# placeholder
incremental_state = {}
# cache previous state inputs
utils.set_incremental_state(
model.decoder,
incremental_state,
"cached_state",
(prev_hiddens, prev_cells, prev_input_feed),
)
decoder_output = model.decoder(
input_tokens,
encoder_out,
incremental_state=incremental_state,
possible_translation_tokens=possible_translation_tokens,
)
logits, attn_scores, _ = decoder_output
log_probs = F.log_softmax(logits, dim=2)
log_probs_per_model.append(log_probs)
attn_weights_per_model.append(attn_scores)
(
next_hiddens,
next_cells,
next_input_feed,
) = utils.get_incremental_state(
model.decoder, incremental_state, "cached_state"
)
for h, c in zip(next_hiddens, next_cells):
state_outputs.extend([h, c])
beam_axis_per_state.extend([0, 0])
state_outputs.append(next_input_feed)
beam_axis_per_state.append(0)
elif isinstance(model, transformer.TransformerModel):
encoder_output = inputs[i]
# store cached states, use evaluation mode
model.decoder._is_incremental_eval = True
model.eval()
# placeholder
incremental_state = {}
state_inputs = []
for _ in model.decoder.layers:
# (prev_key, prev_value) for self- and encoder-attention
state_inputs.extend(inputs[next_state_input : next_state_input + 4])
next_state_input += 4
encoder_out = (encoder_output, None, None)
decoder_output = model.decoder(
input_tokens,
encoder_out,
incremental_state=state_inputs,
possible_translation_tokens=possible_translation_tokens,
timestep=timestep,
)
logits, attn_scores, _, attention_states = decoder_output
log_probs = F.log_softmax(logits, dim=2)
log_probs_per_model.append(log_probs)
attn_weights_per_model.append(attn_scores)
state_outputs.extend(attention_states)
beam_axis_per_state.extend([1 for _ in attention_states])
else:
raise RuntimeError(f"Not a supported model: {type(model)}")
average_log_probs = torch.mean(
torch.cat(log_probs_per_model, dim=1), dim=1, keepdim=True
)
average_attn_weights = torch.mean(
torch.cat(attn_weights_per_model, dim=1), dim=1, keepdim=True
)
best_scores_k_by_k, best_tokens_k_by_k = torch.topk(
average_log_probs.squeeze(1), k=self.beam_size
)
prev_scores_k_by_k = prev_scores.view(-1, 1).expand(-1, self.beam_size)
total_scores_k_by_k = best_scores_k_by_k + prev_scores_k_by_k
# flatten to take top k over all (beam x beam) hypos
total_scores_flat = total_scores_k_by_k.view(-1)
best_tokens_flat = best_tokens_k_by_k.view(-1)
best_scores, best_indices = torch.topk(total_scores_flat, k=self.beam_size)
best_tokens = best_tokens_flat.index_select(dim=0, index=best_indices).view(-1)
# integer division to determine which input produced each successor
prev_hypos = best_indices / self.beam_size
attention_weights = average_attn_weights.index_select(dim=0, index=prev_hypos)
if possible_translation_tokens is not None:
best_tokens = possible_translation_tokens.index_select(
dim=0, index=best_tokens
)
word_rewards_for_best_tokens = self.word_rewards.index_select(0, best_tokens)
best_scores += word_rewards_for_best_tokens
self.input_names = ["prev_tokens", "prev_scores", "timestep"]
for i in range(len(self.models)):
self.input_names.append(f"fixed_input_{i}")
if possible_translation_tokens is not None:
self.input_names.append("possible_translation_tokens")
# 'attention_weights_average' output shape: (src_length x beam_size)
attention_weights = attention_weights.squeeze(1)
outputs = [best_tokens, best_scores, prev_hypos, attention_weights]
self.output_names = [
"best_tokens_indices",
"best_scores",
"prev_hypos_indices",
"attention_weights_average",
]
for i in range(len(self.models)):
self.output_names.append(f"fixed_input_{i}")
if self.tile_internal:
outputs.append(inputs[i].repeat(1, self.beam_size, 1))
else:
outputs.append(inputs[i])
if possible_translation_tokens is not None:
self.output_names.append("possible_translation_tokens")
outputs.append(possible_translation_tokens)
for i, state in enumerate(state_outputs):
beam_axis = beam_axis_per_state[i]
next_state = state.index_select(dim=beam_axis, index=prev_hypos)
outputs.append(next_state)
self.output_names.append(f"state_output_{i}")
self.input_names.append(f"state_input_{i}")
return tuple(outputs)
def onnx_export(self, output_path, encoder_ensemble_outputs):
# single EOS (as flat array)
input_token = torch.LongTensor(np.array([self.tgt_dict.eos()]))
prev_scores = torch.FloatTensor(np.array([0.0]))
timestep = torch.LongTensor(np.array([0]))
# generate input and output names
self.forward(input_token, prev_scores, timestep, *encoder_ensemble_outputs)
onnx_export_ensemble(
module=self,
output_path=output_path,
input_tuple=tuple(
[input_token, prev_scores, timestep] + list(encoder_ensemble_outputs)
),
input_names=self.input_names,
output_names=self.output_names,
)
@classmethod
def build_from_checkpoints(
cls,
checkpoint_filenames,
src_dict_filename,
dst_dict_filename,
beam_size,
word_reward=0,
unk_reward=0,
lexical_dict_paths=None,
):
models, _, tgt_dict = load_models_from_checkpoints(
checkpoint_filenames,
src_dict_filename,
dst_dict_filename,
lexical_dict_paths,
)
return cls(
models,
tgt_dict,
beam_size=beam_size,
word_reward=word_reward,
unk_reward=unk_reward,
)
def save_to_db(self, output_path, encoder_ensemble_outputs):
"""
Save encapsulated decoder step export file.
Example encoder_ensemble_outputs (PyTorch tensors) from corresponding
encoder are necessary to run through network once.
"""
tmp_dir = tempfile.mkdtemp()
tmp_file = os.path.join(tmp_dir, "decoder_step.pb")
self.onnx_export(tmp_file, encoder_ensemble_outputs)
onnx_decoder_step = caffe2_backend.prepare_zip_archive(tmp_file)
save_caffe2_rep_to_db(
caffe2_backend_rep=onnx_decoder_step,
output_path=output_path,
input_names=self.input_names,
output_names=self.output_names,
num_workers=2 * len(self.models),
)
class BeamSearch(torch.jit.ScriptModule):
__constants__ = ["beam_size"]
def __init__(
self,
model_list,
tgt_dict,
src_tokens,
src_lengths,
beam_size=1,
word_reward=0,
unk_reward=0,
):
super().__init__()
self.models = model_list
self.tgt_dict = tgt_dict
self.beam_size = beam_size
self.word_reward = word_reward
self.unk_reward = unk_reward
encoder_ens = EncoderEnsemble(self.models)
example_encoder_outs = encoder_ens(src_tokens, src_lengths)
self.encoder_ens = torch.jit.trace(encoder_ens, (src_tokens, src_lengths))
decoder_ens = DecoderBatchedStepEnsemble(
self.models,
tgt_dict,
beam_size,
word_reward,
unk_reward,
tile_internal=False,
)
decoder_ens_tile = DecoderBatchedStepEnsemble(
self.models,
tgt_dict,
beam_size,
word_reward,
unk_reward,
tile_internal=True,
)
prev_token = torch.LongTensor([0])
prev_scores = torch.FloatTensor([0.0])
ts = torch.LongTensor([0])
_, _, _, _, *tiled_states = decoder_ens_tile(
prev_token, prev_scores, ts, *example_encoder_outs
)
self.decoder_ens_tile = torch.jit.trace(
decoder_ens_tile, (prev_token, prev_scores, ts, *example_encoder_outs)
)
self.decoder_ens = torch.jit.trace(
decoder_ens,
(
prev_token.repeat(self.beam_size),
prev_scores.repeat(self.beam_size),
ts,
*tiled_states,
),
)
self.input_names = [
"src_tokens",
"src_lengths",
"prev_token",
"prev_scores",
"attn_weights",
"prev_hypos_indices",
"num_steps",
]
self.output_names = [
"all_tokens",
"all_scores",
"all_weights",
"all_prev_indices",
]
@torch.jit.script_method
def forward(
self,
src_tokens: torch.Tensor,
src_lengths: torch.Tensor,
prev_token: torch.Tensor,
prev_scores: torch.Tensor,
attn_weights: torch.Tensor,
prev_hypos_indices: torch.Tensor,
num_steps: int,
):
enc_states = self.encoder_ens(src_tokens, src_lengths)
all_tokens = prev_token.repeat(repeats=[self.beam_size]).unsqueeze(dim=0)
all_scores = prev_scores.repeat(repeats=[self.beam_size]).unsqueeze(dim=0)
all_weights = (
attn_weights.unsqueeze(dim=0)
.repeat(repeats=[self.beam_size, 1])
.unsqueeze(dim=0)
)
all_prev_indices = prev_hypos_indices.unsqueeze(dim=0)
prev_token, prev_scores, prev_hypos_indices, attn_weights, *states = self.decoder_ens_tile(
prev_token, prev_scores, _to_tensor(0), *enc_states # noqa
)
all_tokens = torch.cat((all_tokens, prev_token.unsqueeze(dim=0)), dim=0)
all_scores = torch.cat((all_scores, prev_scores.unsqueeze(dim=0)), dim=0)
all_weights = torch.cat((all_weights, attn_weights.unsqueeze(dim=0)), dim=0)
all_prev_indices = torch.cat(
(all_prev_indices, prev_hypos_indices.unsqueeze(dim=0)), dim=0
)
for i in range(num_steps - 1):
(
prev_token,
prev_scores,
prev_hypos_indices,
attn_weights,
*states,
) = self.decoder_ens(
prev_token, prev_scores, _to_tensor(i + 1), *states # noqa
)
all_tokens = torch.cat((all_tokens, prev_token.unsqueeze(dim=0)), dim=0)
all_scores = torch.cat((all_scores, prev_scores.unsqueeze(dim=0)), dim=0)
all_weights = torch.cat((all_weights, attn_weights.unsqueeze(dim=0)), dim=0)
all_prev_indices = torch.cat(
(all_prev_indices, prev_hypos_indices.unsqueeze(dim=0)), dim=0
)
return all_tokens, all_scores, all_weights, all_prev_indices
def onnx_export(self, output_path):
length = 10
src_tokens = torch.LongTensor(np.ones((length, 1), dtype="int64"))
src_lengths = torch.IntTensor(np.array([length], dtype="int32"))
prev_token = torch.LongTensor([self.tgt_dict.eos()])
prev_scores = torch.FloatTensor([0.0])
attn_weights = torch.zeros(length)
prev_hypos_indices = torch.zeros(self.beam_size, dtype=torch.int64)
num_steps = torch.LongTensor([20])
input_tuple = (
src_tokens,
src_lengths,
prev_token,
prev_scores,
attn_weights,
prev_hypos_indices,
num_steps,
)
example_outputs = self.forward(*input_tuple)
with open(output_path, "w+b") as netdef_file:
torch.onnx._export(
self,
input_tuple,
netdef_file,
verbose=False,
input_names=self.input_names,
output_names=self.output_names,
example_outputs=example_outputs,
export_type=ExportTypes.ZIP_ARCHIVE,
)
@classmethod
def build_from_checkpoints(
cls,
checkpoint_filenames,
src_dict_filename,
dst_dict_filename,
beam_size,
word_reward=0,
unk_reward=0,
lexical_dict_paths=None,
):
length = 10
models, _, tgt_dict = load_models_from_checkpoints(
checkpoint_filenames,
src_dict_filename,
dst_dict_filename,
lexical_dict_paths,
)
src_tokens = torch.LongTensor(np.ones((length, 1), dtype="int64"))
src_lengths = torch.IntTensor(np.array([length], dtype="int32"))
return cls(
models,
tgt_dict,
src_tokens,
src_lengths,
beam_size=beam_size,
word_reward=word_reward,
unk_reward=unk_reward,
)
def save_to_db(self, output_path):
"""
Save encapsulated beam search.
"""
tmp_dir = tempfile.mkdtemp()
tmp_file = os.path.join(tmp_dir, "beam_search.pb")
self.onnx_export(tmp_file)
beam_search = caffe2_backend.prepare_zip_archive(tmp_file, no_check_UNSAFE=True)
save_caffe2_rep_to_db(
caffe2_backend_rep=beam_search,
output_path=output_path,
input_names=self.input_names,
output_names=self.output_names,
num_workers=2 * len(self.models),
)
class KnownOutputDecoderStepEnsemble(nn.Module):
def __init__(self, models, tgt_dict, word_reward=0, unk_reward=0):
super().__init__()
self.models = models
self.tgt_dict = tgt_dict
for i, model in enumerate(self.models):
model.prepare_for_onnx_export_()
self._modules[f"model_{i}"] = model
self.word_reward = word_reward
self.unk_reward = unk_reward
vocab_size = len(tgt_dict.indices)
self.word_rewards = torch.FloatTensor(vocab_size).fill_(word_reward)
self.word_rewards[tgt_dict.eos()] = 0
self.word_rewards[tgt_dict.unk()] = word_reward + unk_reward
self.vocab_size = vocab_size
self.unk_token = tgt_dict.unk()
def forward(self, input_token, target_token, timestep, *inputs):
"""
Decoder step inputs correspond one-to-one to encoder outputs.
"""
log_probs_per_model = []
state_outputs = []
next_state_input = len(self.models)
# underlying assumption is each model has same vocab_reduction_module
vocab_reduction_module = self.models[0].decoder.vocab_reduction_module
if vocab_reduction_module is not None:
possible_translation_tokens = inputs[len(self.models)]
next_state_input += 1
else:
possible_translation_tokens = None
for i, model in enumerate(self.models):
encoder_output = inputs[i]
prev_hiddens = []
prev_cells = []
for _ in range(len(model.decoder.layers)):
prev_hiddens.append(inputs[next_state_input])
prev_cells.append(inputs[next_state_input + 1])
next_state_input += 2
prev_input_feed = inputs[next_state_input].view(1, -1)
next_state_input += 1
# no batching, we only care about care about "max" length
src_length_int = int(encoder_output.size()[0])
src_length = torch.LongTensor(np.array([src_length_int]))
# notional, not actually used for decoder computation
src_tokens = torch.LongTensor(np.array([[0] * src_length_int]))
src_embeddings = encoder_output.new_zeros(encoder_output.shape)
encoder_out = (
encoder_output,
prev_hiddens,
prev_cells,
src_length,
src_tokens,
src_embeddings,
)
# store cached states, use evaluation mode
model.decoder._is_incremental_eval = True
model.eval()
# placeholder
incremental_state = {}
# cache previous state inputs
utils.set_incremental_state(
model.decoder,
incremental_state,
"cached_state",
(prev_hiddens, prev_cells, prev_input_feed),
)
decoder_output = model.decoder(
input_token.view(1, 1),
encoder_out,
incremental_state=incremental_state,
possible_translation_tokens=possible_translation_tokens,
)
logits, _, _ = decoder_output
log_probs = F.log_softmax(logits, dim=2)
log_probs_per_model.append(log_probs)
(next_hiddens, next_cells, next_input_feed) = utils.get_incremental_state(
model.decoder, incremental_state, "cached_state"
)
for h, c in zip(next_hiddens, next_cells):
state_outputs.extend([h, c])
state_outputs.append(next_input_feed)
average_log_probs = torch.mean(
torch.cat(log_probs_per_model, dim=0), dim=0, keepdim=True
)
if possible_translation_tokens is not None:
reduced_indices = torch.zeros(self.vocab_size).long().fill_(self.unk_token)
# ONNX-exportable arange (ATen op)
possible_translation_token_range = torch._dim_arange(
like=possible_translation_tokens, dim=0
)
reduced_indices[
possible_translation_tokens
] = possible_translation_token_range
reduced_index = reduced_indices.index_select(dim=0, index=target_token)
score = average_log_probs.view((-1,)).index_select(
dim=0, index=reduced_index
)
else:
score = average_log_probs.view((-1,)).index_select(
dim=0, index=target_token
)
word_reward = self.word_rewards.index_select(0, target_token)
score += word_reward
self.input_names = ["prev_token", "target_token", "timestep"]
for i in range(len(self.models)):
self.input_names.append(f"fixed_input_{i}")
if possible_translation_tokens is not None:
self.input_names.append("possible_translation_tokens")
outputs = [score]
self.output_names = ["score"]
for i in range(len(self.models)):
self.output_names.append(f"fixed_input_{i}")
outputs.append(inputs[i])
if possible_translation_tokens is not None:
self.output_names.append("possible_translation_tokens")
outputs.append(possible_translation_tokens)
for i, state in enumerate(state_outputs):
outputs.append(state)
self.output_names.append(f"state_output_{i}")
self.input_names.append(f"state_input_{i}")
return tuple(outputs)
class ForcedDecoder(torch.jit.ScriptModule):
def __init__(self, model_list, tgt_dict, word_reward=0, unk_reward=0):
super().__init__()
self.models = model_list
self.tgt_dict = tgt_dict
self.word_reward = word_reward
self.unk_reward = unk_reward
source_tokens = torch.LongTensor(np.ones((5, 1), dtype="int64"))
source_length = torch.LongTensor([5])
encoder_ens = EncoderEnsemble(self.models)
example_encoder_outs = encoder_ens(source_tokens, source_length)
self.encoder_ens = torch.jit.trace(encoder_ens, (source_tokens, source_length))
decoder_ens = KnownOutputDecoderStepEnsemble(
self.models, tgt_dict, word_reward, unk_reward
)
prev_token = torch.LongTensor([0])
target_token = torch.LongTensor([0])
ts = torch.LongTensor([0])
_, *states = decoder_ens(prev_token, target_token, ts, *example_encoder_outs)
self.decoder_ens = torch.jit.trace(
decoder_ens, (prev_token, target_token, ts, *example_encoder_outs)
)
self.input_names = [
"source_tokens",
"source_length",
"target_tokens",
"target_length",
"eos_token",
"zero",
]
self.output_names = ["score"]
@torch.jit.script_method
def forward(
self,
source_tokens,
source_length,
target_tokens,
target_length,
eos_token,
zero,
):
# EncoderEnsemble expects tokens in sequence_length-first shape
source_tokens = source_tokens.view((-1, 1))
states = self.encoder_ens(source_tokens, source_length)
target_tokens = target_tokens.view((1, -1))
eos_token = eos_token.view((1, 1))
input_tokens = torch.cat([eos_token, target_tokens], dim=1)
output_tokens = torch.cat([target_tokens, eos_token], dim=1)
num_steps = int(target_length + 1)
score = zero
for i in range(num_steps):
# Lint error expected (see @jamesreed's comment on D9021140)
index_t = _to_tensor(i) # noqa F821
(step_score, *states) = self.decoder_ens(
input_tokens.index_select(dim=1, index=index_t).view((1, 1)),
output_tokens.index_select(dim=1, index=index_t).view((1,)),
index_t,
*states,
)
score += step_score
return score
def onnx_export(self, output_path):
source_tokens = torch.LongTensor(np.ones((1, 5), dtype="int64"))
source_length = torch.LongTensor([5])
target_tokens = torch.LongTensor(np.ones((1, 7), dtype="int64"))
target_length = torch.LongTensor([7])
eos_token = torch.LongTensor([[self.tgt_dict.eos()]])
zero = torch.FloatTensor([0.0])
input_tuple = (
source_tokens,
source_length,
target_tokens,
target_length,
eos_token,
zero,
)
example_outputs = self.forward(*input_tuple)
with open(output_path, "w+b") as netdef_file:
torch.onnx._export(
self,
input_tuple,
netdef_file,
verbose=False,
input_names=self.input_names,
output_names=self.output_names,
example_outputs=example_outputs,
export_type=ExportTypes.ZIP_ARCHIVE,
)
@classmethod
def build_from_checkpoints(
cls,
checkpoint_filenames,
src_dict_filename,
dst_dict_filename,
word_reward=0,
unk_reward=0,
lexical_dict_paths=None,
):
models, _, tgt_dict = load_models_from_checkpoints(
checkpoint_filenames,
src_dict_filename,
dst_dict_filename,
lexical_dict_paths,
)
return cls(models, tgt_dict, word_reward=word_reward, unk_reward=unk_reward)
def save_to_db(self, output_path):
"""
Save encapsulated beam search.
"""
tmp_dir = tempfile.mkdtemp()
tmp_file = os.path.join(tmp_dir, "forced_decoder.pb")
self.onnx_export(tmp_file)
forced_decoder = caffe2_backend.prepare_zip_archive(tmp_file)
save_caffe2_rep_to_db(
caffe2_backend_rep=forced_decoder,
output_path=output_path,
input_names=self.input_names,
output_names=self.output_names,
num_workers=2 * len(self.models),
)
class CharSourceEncoderEnsemble(nn.Module):
def __init__(self, models, src_dict=None):
super().__init__()
self.models = models
self.src_dict = src_dict
for i, model in enumerate(self.models):
model.prepare_for_onnx_export_()
self._modules[f"model_{i}"] = model
def forward(self, src_tokens, src_lengths, char_inds, word_lengths):
outputs = []
output_names = []
states = []
# (seq_length, batch_size) for compatibility with Caffe2
src_tokens_seq_first = src_tokens.t()
for i, model in enumerate(self.models):
# evaluation mode
model.eval()
encoder_out = model.encoder(
src_tokens_seq_first, src_lengths, char_inds, word_lengths
)
# evaluation mode
model.eval()
# "primary" encoder output (vector representations per source token)
encoder_outputs = encoder_out[0]
outputs.append(encoder_outputs)
output_names.append(f"encoder_output_{i}")
if hasattr(model.decoder, "_init_prev_states"):
states.extend(model.decoder._init_prev_states(encoder_out))
# underlying assumption is each model has same vocab_reduction_module
vocab_reduction_module = self.models[0].decoder.vocab_reduction_module
if vocab_reduction_module is not None:
possible_translation_tokens = vocab_reduction_module(
src_tokens=src_tokens, decoder_input_tokens=None
)
outputs.append(possible_translation_tokens)
output_names.append("possible_translation_tokens")
for i, state in enumerate(states):
outputs.append(state)
output_names.append(f"initial_state_{i}")
self.output_names = output_names
return tuple(outputs)
def onnx_export(self, output_path):
# The discrepancy in types here is a temporary expedient.
# PyTorch indexing requires int64 while support for tracing
# pack_padded_sequence() requires int32.
length = 5
src_tokens = torch.LongTensor(np.ones((length, 1), dtype="int64"))
src_lengths = torch.IntTensor(np.array([length], dtype="int32"))
word_length = 3
char_inds = torch.LongTensor(np.ones((1, length, word_length), dtype="int64"))
word_lengths = torch.IntTensor(
np.array([word_length] * length, dtype="int32")
).reshape((1, length))
# generate output names
self.forward(src_tokens, src_lengths, char_inds, word_lengths)
onnx_export_ensemble(
module=self,
output_path=output_path,
input_tuple=(src_tokens, src_lengths, char_inds, word_lengths),
input_names=["src_tokens", "src_lengths", "char_inds", "word_lengths"],
output_names=self.output_names,
)
@classmethod
def build_from_checkpoints(
cls,
checkpoint_filenames,
src_dict_filename,
dst_dict_filename,
lexical_dict_paths=None,
):
models, src_dict, _ = load_models_from_checkpoints(
checkpoint_filenames,
src_dict_filename,
dst_dict_filename,
lexical_dict_paths,
)
return cls(models, src_dict=src_dict)
def save_to_db(self, output_path):
"""
Save encapsulated encoder export file.
"""
tmp_dir = tempfile.mkdtemp()
tmp_file = os.path.join(tmp_dir, "encoder.pb")
self.onnx_export(tmp_file)
onnx_encoder = caffe2_backend.prepare_zip_archive(tmp_file)
save_caffe2_rep_to_db(
caffe2_backend_rep=onnx_encoder,
output_path=output_path,
input_names=["src_tokens", "src_lengths", "char_inds", "word_lengths"],
output_names=self.output_names,
num_workers=2 * len(self.models),
)
| 35.067464 | 99 | 0.606833 |
0e75f1e7d82eafae6bb7f3bed3f5b4c96adde373 | 1,154 | py | Python | lv3/integer_triangle.py | 5joon2/algorithm_programmers | 72f04ad610e1700f13d0657fffb4ab952fefa047 | [
"MIT"
] | null | null | null | lv3/integer_triangle.py | 5joon2/algorithm_programmers | 72f04ad610e1700f13d0657fffb4ab952fefa047 | [
"MIT"
] | null | null | null | lv3/integer_triangle.py | 5joon2/algorithm_programmers | 72f04ad610e1700f13d0657fffb4ab952fefa047 | [
"MIT"
] | null | null | null | def solution(triangle):
for i, numbers in enumerate(triangle):
if i == 0:
continue
elif i > 0:
for j, number in enumerate(numbers):
if j == 0: # left end
numbers[j] += triangle[i-1][0]
elif j == len(numbers)-1: # right end
numbers[j] += triangle[i-1][-1]
else: # in the middle
numbers[j] += max(triangle[i-1][j-1], triangle[i-1][j])
return max(triangle[-1])
# 테스트 1 〉 통과 (0.02ms, 10.2MB)
# 테스트 2 〉 통과 (0.02ms, 10.3MB)
# 테스트 3 〉 통과 (0.05ms, 10.2MB)
# 테스트 4 〉 통과 (0.32ms, 10.2MB)
# 테스트 5 〉 통과 (1.15ms, 10.3MB)
# 테스트 6 〉 통과 (0.59ms, 10.2MB)
# 테스트 7 〉 통과 (2.17ms, 10.3MB)
# 테스트 8 〉 통과 (0.26ms, 10.3MB)
# 테스트 9 〉 통과 (0.01ms, 10.2MB)
# 테스트 10 〉 통과 (0.17ms, 10.3MB)
# 효율성 테스트
# 테스트 1 〉 통과 (42.25ms, 14.2MB)
# 테스트 2 〉 통과 (32.65ms, 13.1MB)
# 테스트 3 〉 통과 (47.93ms, 14.6MB)
# 테스트 4 〉 통과 (44.63ms, 14.2MB)
# 테스트 5 〉 통과 (39.36ms, 13.9MB)
# 테스트 6 〉 통과 (49.06ms, 14.6MB)
# 테스트 7 〉 통과 (46.04ms, 14.4MB)
# 테스트 8 〉 통과 (45.60ms, 13.7MB)
# 테스트 9 〉 통과 (35.64ms, 14MB)
# 테스트 10 〉 통과 (46.49ms, 14.4MB)
| 30.368421 | 75 | 0.495667 |
5790eaa9e058afa62ea1e1554f2b1d9e569043ba | 3,359 | py | Python | src/data/dataset_factory.py | benihime91/leaf-disease-classification-kaggle | e1c8b1c6b88fe199a09e9d956db7b9baa3fb9dcc | [
"MIT"
] | null | null | null | src/data/dataset_factory.py | benihime91/leaf-disease-classification-kaggle | e1c8b1c6b88fe199a09e9d956db7b9baa3fb9dcc | [
"MIT"
] | null | null | null | src/data/dataset_factory.py | benihime91/leaf-disease-classification-kaggle | e1c8b1c6b88fe199a09e9d956db7b9baa3fb9dcc | [
"MIT"
] | 3 | 2020-12-14T04:22:31.000Z | 2021-04-22T19:40:34.000Z | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01b_data.datasests_factory.ipynb (unless otherwise specified).
__all__ = ["create_transform", "DatasetMapper"]
# Cell
from omegaconf import DictConfig
from sklearn.model_selection import train_test_split
from .datasets import CassavaDataset, load_data
from .transforms_factory import create_transform
# Cell
class DatasetMapper:
"A convenince class for CassavaImageClassification task"
def __init__(self, cfg: DictConfig):
"Note: `cfg` has to be the global hydra config"
self.dset_cfg = cfg.data.dataset
self.tfm_config = cfg.augmentations
self.cfg = cfg
self.fold = self.dset_cfg.fold
def generate_datasets(self):
"generates datasets and repective transformations from HYDRA config file"
# loads the data correspoind to the current fold
# and do some data preprocessing
self.data = load_data(
self.dset_cfg.csv, self.dset_cfg.image_dir, self.fold, shuffle=True
)
self.train_data = self.data.loc[self.data["is_valid"] == False]
self.valid_data = self.data.loc[self.data["is_valid"] == True]
self.train_data = self.train_data.sample(frac=1).reset_index(
inplace=False, drop=True
)
self.valid_data = self.valid_data.sample(frac=1).reset_index(
inplace=False, drop=True
)
# Train Test split for validation and test dataset
# self.test_data, self.valid_data = train_test_split(
# self.valid_data,
# shuffle=True,
# test_size=0.5,
# random_state=self.cfg.training.random_seed,
# stratify=self.valid_data["label"],
# )
# self.test_data = self.test_data.sample(frac=1).reset_index(
# inplace=False, drop=True
# )
# self.valid_data = self.valid_data.sample(frac=1).reset_index(
# inplace=False, drop=True
# )
self.test_data = self.valid_data
# Loads transformations from the HYDRA config file
self.augs_initial, self.augs_final, self.augs_valid = create_transform(
self.tfm_config, self.cfg
)
# Instantiate the Datasets for Training
self.train_ds = CassavaDataset(
self.train_data,
fn_col="filePath",
label_col="label",
transform=self.augs_initial,
backend=self.tfm_config.backend,
)
self.valid_ds = CassavaDataset(
self.valid_data,
fn_col="filePath",
label_col="label",
transform=self.augs_valid,
backend=self.tfm_config.backend,
)
self.test_ds = CassavaDataset(
self.test_data,
fn_col="filePath",
label_col="label",
transform=self.augs_valid,
backend=self.tfm_config.backend,
)
def get_train_dataset(self):
"returns the train dataset"
return self.train_ds
def get_valid_dataset(self):
"returns the validation dataset"
return self.valid_ds
def get_test_dataset(self):
"return the test dataset"
return self.test_ds
def get_transforms(self):
"returns the transformations to be applied after mixmethod"
return self.augs_final
| 32.298077 | 110 | 0.629949 |
4987387dc302368a9c3556b5a50a78b321d51812 | 68,385 | py | Python | tensorflow/python/training/checkpointable/util_test.py | handongke/tensorflow | c6bb5cd0447a0af2764c195fb14d218df8ae6471 | [
"Apache-2.0"
] | 5 | 2019-01-13T16:15:25.000Z | 2019-07-07T16:17:32.000Z | tensorflow/python/training/checkpointable/util_test.py | handongke/tensorflow | c6bb5cd0447a0af2764c195fb14d218df8ae6471 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/training/checkpointable/util_test.py | handongke/tensorflow | c6bb5cd0447a0af2764c195fb14d218df8ae6471 | [
"Apache-2.0"
] | 1 | 2019-04-16T13:48:37.000Z | 2019-04-16T13:48:37.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import os
from absl.testing import parameterized
import six
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import template
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import training_util
from tensorflow.python.training.checkpointable import base
from tensorflow.python.training.checkpointable import tracking
from tensorflow.python.training.checkpointable import util as checkpointable_utils
class NonLayerCheckpointable(tracking.Checkpointable):
def __init__(self):
super(NonLayerCheckpointable, self).__init__()
self.a_variable = checkpointable_utils.add_variable(
self, name="a_variable", shape=[])
# pylint: disable=not-callable
class MyModel(training.Model):
"""A concrete Model for testing."""
def __init__(self):
super(MyModel, self).__init__()
self._named_dense = core.Dense(1, use_bias=True)
self._second = core.Dense(1, use_bias=False)
# We can still track Checkpointables which aren't Layers.
self._non_layer = NonLayerCheckpointable()
def call(self, values):
ret = self._second(self._named_dense(values))
return ret
class InterfaceTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testAddVariable(self):
obj = NonLayerCheckpointable()
with self.assertRaisesRegexp(ValueError, "do not specify shape"):
checkpointable_utils.add_variable(
obj, name="shape_specified_twice", shape=[], initializer=1)
constant_initializer = checkpointable_utils.add_variable(
obj, name="constant_initializer", initializer=1)
with variable_scope.variable_scope("some_variable_scope"):
ones_initializer = checkpointable_utils.add_variable(
obj,
name="ones_initializer",
shape=[2],
initializer=init_ops.ones_initializer(dtype=dtypes.float32))
bare_initializer = checkpointable_utils.add_variable(
obj,
name="bare_initializer",
shape=[2, 2],
dtype=dtypes.float64,
initializer=init_ops.zeros_initializer)
# Even in graph mode, there are no naming conflicts between objects, only
# naming conflicts within an object.
other_duplicate = resource_variable_ops.ResourceVariable(
name="duplicate", initial_value=1.)
duplicate = checkpointable_utils.add_variable(
obj, name="duplicate", shape=[])
with self.assertRaisesRegexp(ValueError, "'duplicate'.*already declared"):
checkpointable_utils.add_variable(obj, name="duplicate", shape=[])
self.evaluate(checkpointable_utils.gather_initializers(obj))
self.assertEqual("constant_initializer:0", constant_initializer.name)
self.assertEqual(1, self.evaluate(constant_initializer))
self.assertEqual("some_variable_scope/ones_initializer:0",
ones_initializer.name)
self.assertAllEqual([1, 1], self.evaluate(ones_initializer))
self.assertAllEqual([[0., 0.],
[0., 0.]], self.evaluate(bare_initializer))
self.assertEqual("a_variable:0", obj.a_variable.name)
self.assertEqual("duplicate:0", other_duplicate.name)
if context.executing_eagerly():
# When executing eagerly, there's no uniquification of variable names. The
# checkpoint name will be the same.
self.assertEqual("duplicate:0", duplicate.name)
else:
# The .name attribute may be globally influenced, but the checkpoint name
# won't be (tested below).
self.assertEqual("duplicate_1:0", duplicate.name)
named_variables, _, _ = checkpointable_utils._serialize_object_graph(
obj, saveables_cache=None)
expected_checkpoint_names = (
"a_variable/.ATTRIBUTES/VARIABLE_VALUE",
"bare_initializer/.ATTRIBUTES/VARIABLE_VALUE",
"constant_initializer/.ATTRIBUTES/VARIABLE_VALUE",
"duplicate/.ATTRIBUTES/VARIABLE_VALUE",
"ones_initializer/.ATTRIBUTES/VARIABLE_VALUE",
)
six.assertCountEqual(
self, expected_checkpoint_names, [v.name for v in named_variables])
def testInitNotCalled(self):
class NoInit(tracking.Checkpointable):
def __init__(self):
pass
# __init__ for Checkpointable will be called implicitly.
checkpointable_utils.add_variable(NoInit(), "var", shape=[])
def testShapeDtype(self):
root = tracking.Checkpointable()
v1 = checkpointable_utils.add_variable(
root, name="v1", initializer=3., dtype=dtypes.float64)
self.assertEqual(dtypes.float64, v1.dtype)
v2 = checkpointable_utils.add_variable(
root,
name="v2",
shape=[3],
initializer=init_ops.ones_initializer,
dtype=dtypes.float64)
self.assertEqual(dtypes.float64, v2.dtype)
self.assertAllEqual([1., 1., 1.], self.evaluate(v2))
def testObjectMetadata(self):
with context.eager_mode():
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
dense = core.Dense(1)
checkpoint = checkpointable_utils.Checkpoint(dense=dense)
dense(constant_op.constant([[1.]]))
save_path = checkpoint.save(checkpoint_prefix)
objects = checkpointable_utils.object_metadata(save_path)
all_variable_names = []
for obj in objects.nodes:
for attribute in obj.attributes:
all_variable_names.append(attribute.full_name)
self.assertIn("dense/kernel", all_variable_names)
def testNotCheckpointable(self):
class CallsFunctionalStuff(
tracking.NotCheckpointable, tracking.Checkpointable):
pass
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
checkpoint = checkpointable_utils.Checkpoint(x=CallsFunctionalStuff())
with self.assertRaises(NotImplementedError):
checkpoint.save(prefix)
class CallsFunctionalStuffOtherMRO(
tracking.Checkpointable, tracking.NotCheckpointable):
pass
checkpoint_reversed = checkpointable_utils.Checkpoint(
x=CallsFunctionalStuffOtherMRO())
with self.assertRaises(NotImplementedError):
checkpoint_reversed.save(prefix)
class _MirroringSaveable(saver_lib.BaseSaverBuilder.SaveableObject):
def __init__(self, primary_variable, mirrored_variable, name):
self._primary_variable = primary_variable
self._mirrored_variable = mirrored_variable
tensor = self._primary_variable.read_value()
spec = saver_lib.BaseSaverBuilder.SaveSpec(
tensor=tensor,
slice_spec="",
name=name)
super(_MirroringSaveable, self).__init__(
tensor, [spec], name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into both variables."""
tensor, = restored_tensors
return control_flow_ops.group(
self._primary_variable.assign(tensor),
self._mirrored_variable.assign(tensor))
class _OwnsMirroredVariables(base.CheckpointableBase):
"""A Checkpointable object which returns a more complex SaveableObject."""
def __init__(self):
self.non_dep_variable = variable_scope.get_variable(
name="non_dep_variable", initializer=6., use_resource=True)
self.mirrored = variable_scope.get_variable(
name="mirrored", initializer=15., use_resource=True)
def _gather_saveables_for_checkpoint(self):
def _saveable_factory(name=self.non_dep_variable.name):
return _MirroringSaveable(
primary_variable=self.non_dep_variable,
mirrored_variable=self.mirrored,
name=name)
return {base.VARIABLE_VALUE_KEY: _saveable_factory}
# The Saver sorts by name before parsing, so we need a name property.
@property
def name(self):
return self.non_dep_variable.name
class CheckpointingTests(parameterized.TestCase, test.TestCase):
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNamingWithOptimizer(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
# A nuisance Model using the same optimizer. Its slot variables should not
# go in the checkpoint, since it is never depended on.
other_model = MyModel()
optimizer = adam.Adam(0.001)
step = training_util.get_or_create_global_step()
root_checkpointable = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model, step=step)
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
train_op = control_flow_ops.group(
optimizer.apply_gradients(zip(gradients, variables)),
step.assign_add(1))
with backprop.GradientTape() as tape:
loss = other_model(input_value)
variables = other_model.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
self.evaluate(checkpointable_utils.gather_initializers(
root_checkpointable))
self.evaluate(train_op)
named_variables, serialized_graph, _ = (
checkpointable_utils._serialize_object_graph(
root_checkpointable, saveables_cache=None))
expected_slot_keys = (
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/v",
)
expected_checkpoint_names = (
# Created in the root node, so no prefix.
"step",
"model/_second/kernel",
"model/_named_dense/kernel",
"model/_named_dense/bias",
# non-Layer dependency of the model
"model/_non_layer/a_variable",
"optimizer/learning_rate",
"optimizer/beta_1",
"optimizer/beta_2",
"optimizer/epsilon",
"optimizer/iter",
"optimizer/decay",
) + expected_slot_keys
suffix = "/.ATTRIBUTES/VARIABLE_VALUE"
expected_checkpoint_names = [
name + suffix for name in expected_checkpoint_names]
expected_checkpoint_names.append(
"optimizer/.ATTRIBUTES/OBJECT_CONFIG_JSON")
# The Dense layers also save get_config() JSON
expected_checkpoint_names.extend(
["model/_second/.ATTRIBUTES/OBJECT_CONFIG_JSON",
"model/_named_dense/.ATTRIBUTES/OBJECT_CONFIG_JSON"])
named_variables = {v.name: v for v in named_variables}
six.assertCountEqual(self, expected_checkpoint_names,
named_variables.keys())
# Check that we've mapped to the right variable objects (not exhaustive)
self.assertEqual(
"global_step",
named_variables["step" + suffix].full_name)
self.assertEqual(
"my_model/dense_1/kernel",
named_variables["model/_second/kernel" + suffix].full_name)
self.assertEqual(
"my_model/dense/kernel",
named_variables["model/_named_dense/kernel" + suffix].full_name)
self.assertEqual(
"beta_1",
named_variables["optimizer/beta_1" + suffix].full_name)
self.assertEqual(
"beta_2",
named_variables["optimizer/beta_2" + suffix].full_name)
# Spot check the generated protocol buffers.
self.assertEqual("optimizer",
serialized_graph.nodes[0].children[1].local_name)
optimizer_node = serialized_graph.nodes[serialized_graph.nodes[0].children[
1].node_id]
children = [node.local_name for node in optimizer_node.children]
six.assertCountEqual(
self,
# Non-slot dependencies
["beta_1", "beta_2", "iter", "decay", "epsilon", "learning_rate"],
children)
serialized_slot_keys = []
for slot in optimizer_node.slot_variables:
for attribute in (
serialized_graph.nodes[slot.slot_variable_node_id].attributes):
serialized_slot_keys.append(attribute.checkpoint_key)
six.assertCountEqual(
self,
[key + suffix for key in expected_slot_keys],
serialized_slot_keys)
@test_util.run_in_graph_and_eager_modes
def testMoreComplexSaveableReturned(self):
v = _OwnsMirroredVariables()
checkpoint = checkpointable_utils.Checkpoint(v=v)
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
self.evaluate(v.non_dep_variable.assign(42.))
save_path = checkpoint.save(prefix)
self.evaluate(v.non_dep_variable.assign(43.))
self.evaluate(v.mirrored.assign(44.))
checkpoint.restore(save_path).assert_consumed().initialize_or_restore()
self.assertEqual(42., self.evaluate(v.non_dep_variable))
self.assertEqual(42., self.evaluate(v.mirrored))
self.evaluate(v.non_dep_variable.assign(44.))
save_path = checkpoint.save(prefix)
self.evaluate(v.non_dep_variable.assign(45.))
checkpoint.restore(save_path).assert_consumed().initialize_or_restore()
self.assertEqual(44., self.evaluate(v.non_dep_variable))
self.assertEqual(44., self.evaluate(v.mirrored))
@test_util.run_in_graph_and_eager_modes
def testMoreComplexSaveableReturnedWithGlobalName(self):
# The same object can also be saved using the name-based saver.
v = _OwnsMirroredVariables()
saver = saver_lib.Saver(var_list=[v])
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
with self.cached_session() as sess:
self.evaluate(v.non_dep_variable.assign(42.))
save_path = saver.save(sess, prefix)
self.evaluate(v.non_dep_variable.assign(43.))
self.evaluate(v.mirrored.assign(44.))
saver.restore(sess, save_path)
self.assertEqual(42., self.evaluate(v.non_dep_variable))
self.assertEqual(42., self.evaluate(v.mirrored))
@test_util.run_in_graph_and_eager_modes
def testSaveRestore(self):
model = MyModel()
optimizer = adam.Adam(0.001)
root_checkpointable = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model)
input_value = constant_op.constant([[3.]])
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
train_op = optimizer.apply_gradients(zip(gradients, variables))
root_checkpointable.save_counter # pylint: disable=pointless-statement
self.evaluate(checkpointable_utils.gather_initializers(
root_checkpointable))
self.evaluate(train_op)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
self.evaluate(state_ops.assign(model._named_dense.variables[1], [42.]))
m_bias_slot = optimizer.get_slot(model._named_dense.variables[1], "m")
self.evaluate(state_ops.assign(m_bias_slot, [1.5]))
save_path = root_checkpointable.save(file_prefix=prefix)
self.evaluate(state_ops.assign(model._named_dense.variables[1], [43.]))
self.evaluate(state_ops.assign(root_checkpointable.save_counter, 3))
optimizer_variables = self.evaluate(
sorted(optimizer.variables(), key=lambda v: v.name))
self.evaluate(state_ops.assign(m_bias_slot, [-2.]))
# Immediate restoration
status = root_checkpointable.restore(save_path=save_path).assert_consumed()
status.run_restore_ops()
self.assertAllEqual([42.], self.evaluate(model._named_dense.variables[1]))
self.assertAllEqual(1, self.evaluate(root_checkpointable.save_counter))
self.assertAllEqual([1.5], self.evaluate(m_bias_slot))
if not context.executing_eagerly():
return # Restore-on-create is only supported when executing eagerly
on_create_model = MyModel()
on_create_optimizer = adam.Adam(0.001)
on_create_root = checkpointable_utils.Checkpoint(
optimizer=on_create_optimizer, model=on_create_model)
# Deferred restoration
status = on_create_root.restore(save_path=save_path)
status.assert_nontrivial_match()
status.assert_existing_objects_matched()
with self.assertRaises(AssertionError):
status.assert_consumed()
on_create_model(constant_op.constant([[3.]])) # create variables
self.assertAllEqual(1, self.evaluate(on_create_root.save_counter))
self.assertAllEqual([42.],
self.evaluate(
on_create_model._named_dense.variables[1]))
on_create_m_bias_slot = on_create_optimizer.get_slot(
on_create_model._named_dense.variables[1], "m")
status.assert_existing_objects_matched()
with self.assertRaises(AssertionError):
status.assert_consumed()
# Optimizer slot variables are created when the original variable is
# restored.
self.assertAllEqual([1.5], self.evaluate(on_create_m_bias_slot))
dummy_var = resource_variable_ops.ResourceVariable([1.])
on_create_optimizer.minimize(loss=dummy_var.read_value,
var_list=[dummy_var])
status.assert_existing_objects_matched()
status.assert_consumed()
self.assertAllEqual(
optimizer_variables,
# Creation order is different, so .variables() needs to be re-sorted.
self.evaluate(sorted(optimizer.variables(), key=lambda v: v.name)))
# TODO(allenl): Debug garbage created by this test in python3.
def testDeferredRestorationUsageEager(self):
"""An idiomatic eager execution example."""
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
model = MyModel()
optimizer = adam.Adam(0.001)
root = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model)
root.restore(checkpoint_management.latest_checkpoint(
checkpoint_directory))
for _ in range(num_training_steps):
# TODO(allenl): Use a Dataset and serialize/checkpoint it.
input_value = constant_op.constant([[3.]])
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
root.optimizer.iterations.numpy())
def testUsageGraph(self):
"""Expected usage when graph building."""
with context.graph_mode():
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with ops.Graph().as_default():
model = MyModel()
optimizer = adam.Adam(0.001)
root = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model)
input_value = constant_op.constant([[3.]])
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
train_op = optimizer.apply_gradients(zip(gradients, variables))
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
with self.session(graph=ops.get_default_graph()) as session:
status = root.restore(save_path=checkpoint_path)
status.initialize_or_restore(session=session)
if checkpoint_path is None:
self.assertEqual(0, training_continuation)
with self.assertRaises(AssertionError):
status.assert_consumed()
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
else:
status.assert_consumed()
status.assert_existing_objects_matched()
for _ in range(num_training_steps):
session.run(train_op)
root.save(file_prefix=checkpoint_prefix, session=session)
self.assertEqual((training_continuation + 1) * num_training_steps,
session.run(root.optimizer.iterations))
self.assertEqual(training_continuation + 1,
session.run(root.save_counter))
@test_util.run_in_graph_and_eager_modes
def testAgnosticUsage(self):
"""Graph/eager agnostic usage."""
# Does create garbage when executing eagerly due to ops.Graph() creation.
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
def _train_fn(model, input_value):
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
return optimizer.apply_gradients(zip(gradients, variables))
for training_continuation in range(3):
with test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.Adam(0.001)
root = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model)
manager = checkpoint_management.CheckpointManager(
root, checkpoint_directory, max_to_keep=1)
status = root.restore(save_path=manager.latest_checkpoint)
input_value = constant_op.constant([[3.]])
train_fn = functools.partial(_train_fn, model, input_value)
if not context.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
status.initialize_or_restore()
for _ in range(num_training_steps):
train_fn()
manager.save()
self.assertEqual((training_continuation + 1) * num_training_steps,
self.evaluate(root.optimizer.iterations))
self.assertEqual(training_continuation + 1,
self.evaluate(root.save_counter))
@test_util.run_in_graph_and_eager_modes
def testFreezing(self):
with test_util.use_gpu():
# Save an object-based checkpoint using a frozen saver
directory = self.get_temp_dir()
prefix = os.path.join(directory, "ckpt")
v = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64)
checkpoint = checkpointable_utils.Checkpoint(v=v)
self.evaluate(v.assign(3))
# Create the save counter so assert_consumed doesn't complain about it not
# existing in the checkpoint on restore.
self.evaluate(checkpoint.save_counter.assign(12))
saver = checkpointable_utils.frozen_saver(checkpoint)
with ops.device("cpu:0"):
prefix_tensor = constant_op.constant(prefix)
save_path = self.evaluate(saver.save(prefix_tensor))
self.evaluate(v.assign(10))
# Use the frozen saver to restore the same object graph
self.evaluate(saver.restore(prefix_tensor))
self.assertEqual(3, self.evaluate(v))
# Restore using another frozen saver on an identical object graph
del v, checkpoint, saver
v = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64)
checkpoint = checkpointable_utils.Checkpoint(v=v)
saver = checkpointable_utils.frozen_saver(checkpoint)
self.evaluate(saver.restore(prefix_tensor))
self.assertEqual(3, self.evaluate(v))
# Restore as an object-based checkpoint
del v, checkpoint, saver
checkpoint = checkpointable_utils.Checkpoint()
status = checkpoint.restore(save_path)
v = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64)
if context.executing_eagerly():
self.assertEqual(12, self.evaluate(checkpoint.save_counter))
self.assertEqual(0, self.evaluate(v))
checkpoint.v = v
status.assert_consumed().run_restore_ops()
self.assertEqual(3, self.evaluate(v))
self.assertEqual(12, self.evaluate(checkpoint.save_counter))
@test_util.run_in_graph_and_eager_modes
def testCustomNumbering(self):
directory = self.get_temp_dir()
prefix = os.path.join(directory, "ckpt")
step = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64)
checkpoint = checkpointable_utils.Checkpoint(step=step)
self.evaluate(step.initializer)
for i in range(5):
path = checkpoint.write("%s-%d" % (prefix, self.evaluate(step)))
expected_suffix = "-%d" % (2 * i,)
if not path.endswith(expected_suffix):
self.fail("%s should have suffix %s" % (path, expected_suffix))
self.evaluate(step.assign_add(2))
# pylint: disable=cell-var-from-loop
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testWithDefun(self):
num_training_steps = 2
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with test_util.device(use_gpu=True):
model = MyModel()
# Don't actually train so we can test variable values
optimizer = adam.Adam(0.)
root = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model)
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
status = root.restore(save_path=checkpoint_path)
def train_fn():
@def_function.function
def _call_model(x):
return model(x)
with backprop.GradientTape() as tape:
loss = _call_model(constant_op.constant([[3.]]))
gradients = tape.gradient(loss, model.variables)
return optimizer.apply_gradients(zip(gradients, model.variables))
if not context.executing_eagerly():
train_fn = functools.partial(
self.evaluate, train_fn())
status.initialize_or_restore()
for _ in range(num_training_steps):
train_fn()
if training_continuation > 0:
status.assert_consumed()
self.assertAllClose([[42.]], self.evaluate(model.variables[0]))
else:
self.evaluate(model.variables[0].assign([[42.]]))
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
self.evaluate(optimizer.iterations))
self.assertEqual(training_continuation + 1,
self.evaluate(root.save_counter))
# pylint: enable=cell-var-from-loop
def _get_checkpoint_name(self, name):
root = tracking.Checkpointable()
checkpointable_utils.add_variable(
root, name=name, shape=[1, 2], dtype=dtypes.float64)
(named_variable,), _, _ = checkpointable_utils._serialize_object_graph(
root, saveables_cache=None)
with ops.name_scope("root/" + named_variable.name):
pass # Make sure we can use this as an op name if we prefix it.
return named_variable.name
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testVariableNameEscaping(self):
suffix = "/.ATTRIBUTES/VARIABLE_VALUE"
self.assertEqual(r"a.Sb.Sc" + suffix, self._get_checkpoint_name(r"a/b/c"))
self.assertEqual(r"b" + suffix, self._get_checkpoint_name(r"b"))
self.assertEqual(r"c.S" + suffix, self._get_checkpoint_name(r"c/"))
self.assertEqual(r"d.S..S" + suffix, self._get_checkpoint_name(r"d/.S"))
self.assertEqual(r"d.S..ATTRIBUTES.Sf" + suffix,
self._get_checkpoint_name(r"d/.ATTRIBUTES/f"))
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNumberedPath(self):
root = tracking.Checkpointable()
leaf = tracking.Checkpointable()
root.leaf = leaf
checkpointable_utils.add_variable(leaf, name="v", shape=[])
(named_variable,), _, _ = checkpointable_utils._serialize_object_graph(
root, saveables_cache=None)
self.assertEqual(r"leaf/v/.ATTRIBUTES/VARIABLE_VALUE", named_variable.name)
@test_util.run_in_graph_and_eager_modes
def testLocalNameValidation(self):
root = tracking.Checkpointable()
leaf = tracking.Checkpointable()
# Dots are escaped, which avoids conflicts with reserved names.
root._track_checkpointable(leaf, name=".ATTRIBUTES")
checkpointable_utils.add_variable(checkpointable=leaf, name="a", shape=[])
(named_variable,), _, _ = checkpointable_utils._serialize_object_graph(
root, saveables_cache=None)
self.assertEqual("..ATTRIBUTES/a/.ATTRIBUTES/VARIABLE_VALUE",
named_variable.name)
def testAnonymousVarsInInit(self):
class Model(training.Model):
def __init__(self):
super(Model, self).__init__()
self.w = resource_variable_ops.ResourceVariable(0.0)
self.b = resource_variable_ops.ResourceVariable(0.0)
self.vars = [self.w, self.b]
def call(self, x):
return x * self.w + self.b
with context.eager_mode():
model = Model()
optimizer = adam.Adam(learning_rate=0.05)
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
checkpoint = checkpointable_utils.Checkpoint(
model=model, optimizer=optimizer)
for _ in range(2):
checkpoint.save(checkpoint_prefix)
with backprop.GradientTape() as tape:
loss = (constant_op.constant(1.)
- model(constant_op.constant(1.))) ** 2
grad = tape.gradient(loss, model.vars)
optimizer.apply_gradients(
[(g, v) for g, v in zip(grad, model.vars)])
@test_util.run_in_graph_and_eager_modes
def testLateDependencyTracking(self):
class Dependency(tracking.Checkpointable):
def build(self):
self.var = checkpointable_utils.add_variable(
self, "var", initializer=0.)
class LateDependencies(tracking.Checkpointable):
def add_dep(self):
self.dep = Dependency()
self.dep.build()
original = LateDependencies()
original.add_dep()
self.evaluate(state_ops.assign(original.dep.var, 123.))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = checkpointable_utils.CheckpointableSaver(
original).save(checkpoint_prefix)
load_into = LateDependencies()
status = checkpointable_utils.CheckpointableSaver(
load_into).restore(save_path)
status.assert_existing_objects_matched()
with self.assertRaises(AssertionError):
status.assert_consumed()
load_into.add_dep()
status.assert_consumed()
status.assert_existing_objects_matched().run_restore_ops()
self.assertEqual(123., self.evaluate(load_into.dep.var))
@test_util.run_in_graph_and_eager_modes
def testDepAfterVar(self):
class Dependency(tracking.Checkpointable):
def build(self):
self.var = checkpointable_utils.add_variable(
self, "var", initializer=0.)
class DepAfterVar(tracking.Checkpointable):
def add_dep(self):
dep = Dependency()
dep.build()
self.dep = dep
dep_after_var = DepAfterVar()
dep_after_var.add_dep()
self.evaluate(state_ops.assign(dep_after_var.dep.var, -14.))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = checkpointable_utils.CheckpointableSaver(dep_after_var).save(
checkpoint_prefix)
loaded_dep_after_var = DepAfterVar()
status = checkpointable_utils.CheckpointableSaver(
loaded_dep_after_var).restore(save_path)
loaded_dep_after_var.add_dep()
status.assert_consumed()
status.run_restore_ops()
self.assertEqual(-14., self.evaluate(loaded_dep_after_var.dep.var))
@test_util.run_in_graph_and_eager_modes
def testDeferredSlotRestoration(self):
checkpoint_directory = self.get_temp_dir()
root = tracking.Checkpointable()
root.var = checkpointable_utils.add_variable(
root, name="var", initializer=0.)
optimizer = adam.Adam(0.1)
variables = [root.var]
gradients = [1.]
train_op = optimizer.apply_gradients(zip(gradients, variables))
# Note that `optimizer` has not been added as a dependency of
# `root`. Create a one-off grouping so that slot variables for `root.var`
# get initialized too.
self.evaluate(checkpointable_utils.gather_initializers(
checkpointable_utils.Checkpoint(root=root, optimizer=optimizer)))
self.evaluate(train_op)
self.evaluate(state_ops.assign(root.var, 12.))
no_slots_path = checkpointable_utils.CheckpointableSaver(root).save(
os.path.join(checkpoint_directory, "no_slots"))
root.optimizer = optimizer
self.evaluate(state_ops.assign(root.var, 13.))
self.evaluate(state_ops.assign(
optimizer.get_slot(slot_name="m", var=root.var),
14.))
slots_path = checkpointable_utils.CheckpointableSaver(root).save(
os.path.join(checkpoint_directory, "with_slots"))
new_root = tracking.Checkpointable()
# Load the slot-containing checkpoint (deferred), then immediately overwrite
# the non-slot variable (also deferred).
slot_status = checkpointable_utils.CheckpointableSaver(
new_root).restore(slots_path)
no_slot_status = checkpointable_utils.CheckpointableSaver(
new_root).restore(no_slots_path)
with self.assertRaises(AssertionError):
no_slot_status.assert_consumed()
new_root.var = checkpointable_utils.add_variable(
new_root, name="var", shape=[])
no_slot_status.assert_consumed()
no_slot_status.run_restore_ops()
self.assertEqual(12., self.evaluate(new_root.var))
new_root.optimizer = adam.Adam(0.1)
slot_status.assert_existing_objects_matched()
with self.assertRaisesRegexp(AssertionError, "Unresolved object"):
slot_status.assert_consumed()
self.assertEqual(12., self.evaluate(new_root.var))
if context.executing_eagerly():
# Slot variables are only created with restoring initializers when
# executing eagerly.
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(slot_name="m", var=new_root.var)))
else:
# Slot variables are not created eagerly when graph building.
with self.assertRaises(KeyError):
new_root.optimizer.get_slot(slot_name="m", var=new_root.var)
variables = [new_root.var]
gradients = [1.]
train_op = new_root.optimizer.apply_gradients(zip(gradients, variables))
# The slot variable now exists; restore() didn't create it, but we should
# now have a restore op for it.
slot_status.run_restore_ops()
if not context.executing_eagerly():
# The train op hasn't run when graph building, so the slot variable has
# its restored value. It has run in eager, so the value will be different.
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(slot_name="m", var=new_root.var)))
self.evaluate(train_op)
slot_status.assert_consumed()
@test_util.run_in_graph_and_eager_modes
def testOverlappingRestores(self):
checkpoint_directory = self.get_temp_dir()
save_root = tracking.Checkpointable()
save_root.dep = tracking.Checkpointable()
save_root.dep.var = checkpointable_utils.add_variable(
save_root.dep, name="var", initializer=0.)
self.evaluate(state_ops.assign(save_root.dep.var, 12.))
saver = checkpointable_utils.CheckpointableSaver(save_root)
first_path = saver.save(os.path.join(checkpoint_directory, "first"))
self.evaluate(state_ops.assign(save_root.dep.var, 13.))
second_path = saver.save(os.path.join(checkpoint_directory, "second"))
first_root = tracking.Checkpointable()
second_root = tracking.Checkpointable()
first_status = checkpointable_utils.CheckpointableSaver(
first_root).restore(first_path)
second_status = checkpointable_utils.CheckpointableSaver(
second_root).restore(second_path)
load_dep = tracking.Checkpointable()
load_dep.var = checkpointable_utils.add_variable(
load_dep, name="var", shape=[])
first_root.dep = load_dep
first_status.assert_consumed()
first_status.run_restore_ops()
self.assertEqual(12., self.evaluate(load_dep.var))
second_root.dep = load_dep
second_status.assert_consumed()
second_status.run_restore_ops()
self.assertEqual(13., self.evaluate(load_dep.var))
# Try again with the order of the restore() reversed. The last restore
# determines the final value.
first_root = tracking.Checkpointable()
second_root = tracking.Checkpointable()
second_status = checkpointable_utils.CheckpointableSaver(
second_root).restore(second_path)
first_status = checkpointable_utils.CheckpointableSaver(
first_root).restore(first_path)
load_dep = tracking.Checkpointable()
load_dep.var = checkpointable_utils.add_variable(
load_dep, name="var", shape=[])
first_root.dep = load_dep
first_status.assert_consumed()
first_status.run_restore_ops()
self.assertEqual(12., self.evaluate(load_dep.var))
second_root.dep = load_dep
second_status.assert_consumed()
second_status.run_restore_ops()
self.assertEqual(12., self.evaluate(load_dep.var))
@test_util.run_in_graph_and_eager_modes
def testAmbiguousLoad(self):
# Not OK to split one checkpoint object into two
checkpoint_directory = self.get_temp_dir()
save_root = tracking.Checkpointable()
save_root.dep_one = tracking.Checkpointable()
save_root.dep_two = tracking.Checkpointable()
dep_three = tracking.Checkpointable()
save_root.dep_one.dep_three = dep_three
save_root.dep_two.dep_three = dep_three
checkpointable_utils.add_variable(dep_three, name="var", initializer=0.)
self.evaluate(checkpointable_utils.gather_initializers(save_root))
save_path = checkpointable_utils.CheckpointableSaver(save_root).save(
os.path.join(checkpoint_directory, "ckpt"))
load_root = tracking.Checkpointable()
status = checkpointable_utils.CheckpointableSaver(load_root).restore(
save_path)
load_root.dep_one = tracking.Checkpointable()
load_root.dep_two = tracking.Checkpointable()
load_root.dep_one.dep_three = tracking.Checkpointable()
load_root.dep_two.dep_three = tracking.Checkpointable()
checkpointable_utils.add_variable(
load_root.dep_one.dep_three, name="var", initializer=0.)
with self.assertRaises(AssertionError):
status.assert_consumed()
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
@test_util.run_in_graph_and_eager_modes
def testObjectsCombined(self):
# Currently fine to load two checkpoint objects into one Python object
checkpoint_directory = self.get_temp_dir()
save_root = tracking.Checkpointable()
save_root.dep_one = tracking.Checkpointable()
save_root.dep_two = tracking.Checkpointable()
checkpointable_utils.add_variable(
save_root.dep_one, name="var1", initializer=32., dtype=dtypes.float64)
checkpointable_utils.add_variable(
save_root.dep_two, name="var2", initializer=64., dtype=dtypes.float64)
self.evaluate(checkpointable_utils.gather_initializers(save_root))
save_path = checkpointable_utils.CheckpointableSaver(save_root).save(
os.path.join(checkpoint_directory, "ckpt"))
load_root = tracking.Checkpointable()
load_root.dep_one = tracking.Checkpointable()
load_root.dep_two = load_root.dep_one
v1 = checkpointable_utils.add_variable(
load_root.dep_one, name="var1", shape=[], dtype=dtypes.float64)
v2 = checkpointable_utils.add_variable(
load_root.dep_one, name="var2", shape=[], dtype=dtypes.float64)
status = checkpointable_utils.CheckpointableSaver(load_root).restore(
save_path).assert_consumed().assert_existing_objects_matched()
status.run_restore_ops()
self.assertEqual(32., self.evaluate(v1))
self.assertEqual(64., self.evaluate(v2))
@test_util.run_in_graph_and_eager_modes
def testDependencyLoop(self):
# Note: this test creates garbage during eager execution because it
# purposefully creates a reference cycle.
first = tracking.Checkpointable()
second = tracking.Checkpointable()
first.second = second
second.first = first
first.v = checkpointable_utils.add_variable(
first, "v1", initializer=[3., 1., 4.])
second.v = checkpointable_utils.add_variable(
second, "v2", initializer=[1., 1., 2., 3.])
self.evaluate(checkpointable_utils.gather_initializers(first))
checkpoint_directory = self.get_temp_dir()
save_path = checkpointable_utils.CheckpointableSaver(first).save(
os.path.join(checkpoint_directory, "ckpt"))
# Test deferred loading
first_load = tracking.Checkpointable()
status = checkpointable_utils.CheckpointableSaver(
first_load).restore(save_path)
second_load = tracking.Checkpointable()
first_load.second = second_load
second_load.first = first_load
with self.assertRaises(AssertionError):
status.assert_consumed()
first_load.v = checkpointable_utils.add_variable(
first_load, "v1", shape=[3])
second_load.v = checkpointable_utils.add_variable(
second_load, "v2", shape=[4])
status.assert_consumed()
status.run_restore_ops()
self.assertAllEqual([3., 1., 4.], self.evaluate(first_load.v))
self.assertAllEqual([1., 1., 2., 3.], self.evaluate(second_load.v))
# Test loading when variables have already been created
self.evaluate(first_load.v.assign([2., 7., 1.]))
self.assertAllEqual([2., 7., 1.], self.evaluate(first_load.v))
self.evaluate(second_load.v.assign([2., 7., 1., 8.]))
self.assertAllEqual([2., 7., 1., 8.], self.evaluate(second_load.v))
status = checkpointable_utils.CheckpointableSaver(first_load).restore(
save_path).assert_consumed()
status.run_restore_ops()
self.assertAllEqual([3., 1., 4.], self.evaluate(first_load.v))
self.assertAllEqual([1., 1., 2., 3.], self.evaluate(second_load.v))
@test_util.run_in_graph_and_eager_modes
def testRestoreOnAssign(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
first = tracking.Checkpointable()
first.var1 = variables_lib.Variable(0., name="outside_var")
first.var2 = variables_lib.Variable(0., name="blah")
self.evaluate(first.var1.assign(4.))
self.evaluate(first.var2.assign(8.))
save_path = checkpointable_utils.CheckpointableSaver(first).save(
checkpoint_prefix)
second = tracking.Checkpointable()
second.var2 = variables_lib.Variable(0., name="blah")
status = checkpointable_utils.CheckpointableSaver(
second).restore(save_path)
recreated_var1 = variables_lib.Variable(0., name="outside_var")
status.run_restore_ops()
self.assertEqual(8., self.evaluate(second.var2))
self.evaluate(recreated_var1.assign(-2.))
self.assertEqual(-2., self.evaluate(recreated_var1))
second.var1 = recreated_var1
status.run_restore_ops()
self.assertEqual(4., self.evaluate(recreated_var1))
def testManySavesGraph(self):
"""Saves after the first should not modify the graph."""
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.session(graph):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = tracking.Checkpointable()
obj.var = variables_lib.Variable(0., name="v")
obj.opt = adam.Adam(0.1)
variables = [obj.var]
gradients = [1.]
obj.opt.apply_gradients(zip(gradients, variables))
self.evaluate(checkpointable_utils.gather_initializers(obj))
saver = checkpointable_utils.CheckpointableSaver(obj)
saver.save(checkpoint_prefix)
graph.finalize()
saver.save(checkpoint_prefix)
@test_util.run_in_graph_and_eager_modes
def testCheckpointState(self):
# No checkpoints are deleted by default
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = tracking.Checkpointable()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
self.evaluate(checkpointable_utils.gather_initializers(obj))
saver = checkpointable_utils.Checkpoint(obj=obj)
for _ in range(10):
saver.save(checkpoint_prefix)
expected_filenames = ["checkpoint"]
for checkpoint_number in range(1, 11):
expected_filenames.append("ckpt-%d.index" % (checkpoint_number,))
expected_filenames.append(
"ckpt-%d.data-00000-of-00001" % (checkpoint_number,))
six.assertCountEqual(
self,
expected_filenames,
os.listdir(checkpoint_directory))
@test_util.run_in_graph_and_eager_modes
def testCheckpointStateChangingVarList(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = tracking.Checkpointable()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
self.evaluate(checkpointable_utils.gather_initializers(obj))
checkpoint = checkpointable_utils.Checkpoint(obj=obj)
looped_variables = []
for iteration in range(10):
new_variable = resource_variable_ops.ResourceVariable(iteration)
self.evaluate(new_variable.initializer)
setattr(checkpoint, "var_%d" % iteration, new_variable)
checkpoint.save(checkpoint_prefix)
looped_variables.append(new_variable)
expected_filenames = ["checkpoint"]
# We've copied the saver each time, but checkpoint management should still
# be consistent. Nothing gets deleted.
for checkpoint_number in range(1, 11):
expected_filenames.append("ckpt-%d.index" % (checkpoint_number,))
expected_filenames.append(
"ckpt-%d.data-00000-of-00001" % (checkpoint_number,))
six.assertCountEqual(
self,
expected_filenames,
os.listdir(checkpoint_directory))
self.assertEqual(
checkpoint_prefix + "-10",
checkpoint_management.latest_checkpoint(checkpoint_directory))
# The checkpoint list only contains the most recent checkpoint, but they're
# all on disk. This means we won't eventually run into proto size limits.
self.assertEqual(
[checkpoint_prefix + "-10"],
(checkpoint_management.get_checkpoint_state(checkpoint_directory)
.all_model_checkpoint_paths))
for v in looped_variables:
self.evaluate(v.assign(314))
checkpoint.restore(checkpoint_prefix + "-6").run_restore_ops()
self.assertEqual(314, self.evaluate(checkpoint.var_9))
self.assertEqual(314, self.evaluate(checkpoint.var_8))
self.assertEqual(314, self.evaluate(checkpoint.var_6))
self.assertEqual(5, self.evaluate(checkpoint.var_5))
self.assertEqual(1, self.evaluate(checkpoint.var_1))
self.assertEqual(0, self.evaluate(checkpoint.var_0))
checkpoint.restore(checkpoint_prefix + "-10").run_restore_ops()
self.assertEqual(9, self.evaluate(checkpoint.var_9))
self.assertEqual(8, self.evaluate(checkpoint.var_8))
self.assertEqual(1, self.evaluate(checkpoint.var_1))
self.assertEqual(0, self.evaluate(checkpoint.var_0))
def testManyRestoresGraph(self):
"""Restores after the first should not modify the graph."""
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.session(graph):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = tracking.Checkpointable()
obj.var = variables_lib.Variable(0., name="v")
obj.opt = adam.Adam(0.1)
variables = [obj.var]
gradients = [1.]
obj.opt.apply_gradients(zip(gradients, variables))
self.evaluate(checkpointable_utils.gather_initializers(obj))
saver = checkpointable_utils.CheckpointableSaver(obj)
save_path = saver.save(checkpoint_prefix)
saver.restore(save_path)
graph.finalize()
saver.restore(save_path)
@test_util.run_in_graph_and_eager_modes
def test_sequential(self):
model = sequential.Sequential()
checkpoint = checkpointable_utils.Checkpoint(model=model)
model.add(core.Dense(4))
second_dense = core.Dense(5)
model.add(second_dense)
model(constant_op.constant([[1.]]))
checkpoint.restore(None).initialize_or_restore()
self.evaluate(second_dense.bias.assign(
constant_op.constant([1., 2., 3., 4., 5.])))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = checkpoint.save(checkpoint_prefix)
self.evaluate(second_dense.bias.assign(
constant_op.constant([5., 6., 7., 8., 9.])))
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.assertAllEqual([1., 2., 3., 4., 5.], self.evaluate(second_dense.bias))
deferred_sequential = sequential.Sequential()
deferred_sequential_checkpoint = checkpointable_utils.Checkpoint(
model=deferred_sequential)
status = deferred_sequential_checkpoint.restore(save_path)
deferred_sequential.add(core.Dense(4))
deferred_sequential(constant_op.constant([[1.]]))
deferred_second_dense = core.Dense(5)
deferred_sequential.add(deferred_second_dense)
deferred_sequential(constant_op.constant([[1.]]))
status.run_restore_ops()
self.assertAllEqual([1., 2., 3., 4., 5.],
self.evaluate(deferred_second_dense.bias))
@test_util.run_in_graph_and_eager_modes
def test_initialize_if_not_restoring(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
optimizer_only_prefix = os.path.join(checkpoint_directory, "opt")
with test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.Adam(0.001)
root = checkpointable_utils.Checkpoint(
model=model) # Do not save the optimizer with the checkpoint.
optimizer_checkpoint = checkpointable_utils.Checkpoint(
optimizer=optimizer)
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
status = root.restore(save_path=checkpoint_path)
input_value = constant_op.constant([[3.]])
def train_fn():
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
return optimizer.apply_gradients(zip(gradients, variables))
if not context.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
status.initialize_or_restore()
# TODO(tanzheny): Add hyper variables to .variables(), and set them with
# set_weights etc.
variables_not_in_the_variables_property = [
obj for obj in optimizer._hyper.values()
if isinstance(obj, variables_lib.Variable)]
self.evaluate([v.initializer for v
in optimizer.variables()
+ variables_not_in_the_variables_property])
train_fn()
model_save_path = root.save(file_prefix=checkpoint_prefix)
self.evaluate(optimizer.beta_1.assign(42.))
optimizer_save_path = optimizer_checkpoint.save(optimizer_only_prefix)
del train_fn
# Restore into a graph with the optimizer
with test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.Adam(0.001)
root = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model)
status = root.restore(save_path=model_save_path)
input_value = constant_op.constant([[3.]])
def train_fn1():
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
return optimizer.apply_gradients(zip(gradients, variables))
if not context.executing_eagerly():
train_fn1 = functools.partial(self.evaluate, train_fn1())
status.initialize_or_restore()
train_fn1()
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
with self.assertRaises(AssertionError):
status.assert_consumed()
del train_fn1
# Make sure initialization doesn't clobber later restores
with test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.Adam(0.001, beta_1=1.0)
root = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model)
opt_root = checkpointable_utils.Checkpoint(
optimizer=optimizer)
status = root.restore(save_path=model_save_path)
init_only_optimizer_status = opt_root.restore(save_path=None)
optimizer_status = opt_root.restore(save_path=optimizer_save_path)
input_value = constant_op.constant([[3.]])
def train_fn2():
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
return optimizer.apply_gradients(zip(gradients, variables))
if not context.executing_eagerly():
train_fn2 = functools.partial(self.evaluate, train_fn2())
optimizer_status.run_restore_ops()
status.initialize_or_restore()
init_only_optimizer_status.initialize_or_restore()
train_fn2()
self.assertEqual(42., self.evaluate(optimizer.beta_1))
@test_util.run_in_graph_and_eager_modes
def test_restore_after_adding_empty_checkpointable_data_structure(self):
model = NonLayerCheckpointable()
checkpoint = checkpointable_utils.Checkpoint(model=model)
checkpoint.restore(None).initialize_or_restore()
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = checkpoint.save(checkpoint_prefix)
del model, checkpoint
model = NonLayerCheckpointable()
model.dict = {"a": 1}
model.list = {"b": 1}
checkpoint = checkpointable_utils.Checkpoint(model=model)
load_status = checkpoint.restore(save_path)
load_status.assert_existing_objects_matched().run_restore_ops()
class _ManualScope(tracking.Checkpointable):
def __call__(self):
with variable_scope.variable_scope("ManualScope") as vs:
self.variable_scope = vs
with checkpointable_utils.capture_dependencies(template=self):
return self._build()
def _build(self):
return variable_scope.get_variable(name="in_manual_scope", shape=[])
class TemplateTests(parameterized.TestCase, test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_checkpointable_save_restore(self):
def _templated():
v = variable_scope.get_variable(
"v", shape=[1], initializer=init_ops.zeros_initializer(),
use_resource=True)
v2 = variable_scope.get_variable(
"v2", shape=[1], initializer=init_ops.zeros_initializer(),
use_resource=True)
manual = _ManualScope()
return v, v + 1., v2, manual, manual()
save_template = template.make_template("s1", _templated)
v1_save, _, v2_save, manual_scope, manual_scope_v = save_template()
six.assertCountEqual(
self,
[v1_save, v2_save, manual_scope, manual_scope_v, save_template],
checkpointable_utils.list_objects(save_template))
manual_dep, = manual_scope._checkpoint_dependencies
self.assertEqual("in_manual_scope", manual_dep.name)
self.assertIs(manual_scope_v, manual_dep.ref)
optimizer = adam.Adam(0.0)
save_root = checkpointable_utils.Checkpoint(
my_template=save_template, optimizer=optimizer)
optimizer.minimize(v1_save.read_value,
var_list=[v1_save])
self.evaluate([v.initializer for v in save_template.variables])
self.evaluate([v.initializer for v in optimizer.variables()])
self.evaluate(v1_save.assign([12.]))
self.evaluate(v2_save.assign([14.]))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = save_root.save(checkpoint_prefix)
load_template = template.make_template("s2", _templated)
load_optimizer = adam.Adam(0.0)
load_root = checkpointable_utils.Checkpoint(
my_template=load_template, optimizer=load_optimizer)
status = load_root.restore(save_path)
var, var_plus_one, var2, _, _ = load_template()
load_optimizer.minimize(var.read_value, var_list=[var])
self.assertLen(load_template._checkpoint_dependencies, 3)
self.assertEqual("v", load_template._checkpoint_dependencies[0].name)
self.assertEqual("v2", load_template._checkpoint_dependencies[1].name)
self.assertEqual("ManualScope",
load_template._checkpoint_dependencies[2].name)
status.assert_consumed().run_restore_ops()
self.assertAllEqual([12.], self.evaluate(var))
self.assertAllEqual([13.], self.evaluate(var_plus_one))
self.assertAllEqual([14.], self.evaluate(var2))
@test_util.run_in_graph_and_eager_modes
def test_checkpointable_save_restore_nested(self):
def _inner_template():
v = variable_scope.get_variable(
"v", shape=[1], initializer=init_ops.zeros_initializer())
return v
def _outer_template():
first_inner = template.make_template("i1", _inner_template)
second_inner = template.make_template("i2", _inner_template)
v1 = first_inner()
v2 = second_inner()
v3 = second_inner()
return (first_inner, second_inner), (v1, v2, v3)
with variable_scope.variable_scope("ignored"):
save_template = template.make_template("s1", _outer_template)
save_root = checkpointable_utils.Checkpoint(my_template=save_template)
(inner_template_one, inner_template_two), _ = save_template()
self.evaluate(inner_template_one.variables[0].assign([20.]))
self.evaluate(inner_template_two.variables[0].assign([25.]))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = save_root.save(checkpoint_prefix)
load_template = template.make_template("s2", _outer_template)
load_root = checkpointable_utils.Checkpoint(my_template=load_template)
status = load_root.restore(save_path)
(inner_template_one, inner_template_two), (v1, v2, v3) = load_template()
outer_template_dependencies = load_root.my_template._checkpoint_dependencies
self.assertLen(outer_template_dependencies, 2)
self.assertEqual("i1", outer_template_dependencies[0].name)
self.assertIs(inner_template_one, outer_template_dependencies[0].ref)
self.assertEqual("i2", outer_template_dependencies[1].name)
self.assertIs(inner_template_two, outer_template_dependencies[1].ref)
self.assertLen(inner_template_one._checkpoint_dependencies, 1)
self.assertEqual("v", inner_template_one._checkpoint_dependencies[0].name)
self.assertLen(inner_template_two._checkpoint_dependencies, 1)
self.assertEqual("v", inner_template_two._checkpoint_dependencies[0].name)
status.assert_consumed().run_restore_ops()
self.assertAllEqual([20.], self.evaluate(v1))
self.assertAllEqual([25.], self.evaluate(v2))
self.assertAllEqual([25.], self.evaluate(v3))
class CheckpointCompatibilityTests(test.TestCase):
def _initialized_model(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
optimizer = adam.Adam(0.001)
root_checkpointable = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model)
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
train_op = optimizer.apply_gradients(zip(gradients, variables))
self.evaluate(checkpointable_utils.gather_initializers(
root_checkpointable))
self.evaluate(train_op)
# A regular variable, a slot variable, and a non-slot Optimizer variable
# with known values to check when loading.
self.evaluate(model._named_dense.bias.assign([1.]))
self.evaluate(optimizer.get_slot(
var=model._named_dense.bias, slot_name="m").assign([2.]))
self.evaluate(optimizer.beta_1.assign(3.))
return root_checkpointable
def _set_sentinels(self, root_checkpointable):
self.evaluate(root_checkpointable.model._named_dense.bias.assign([101.]))
self.evaluate(
root_checkpointable.optimizer.get_slot(
var=root_checkpointable.model._named_dense.bias, slot_name="m")
.assign([102.]))
self.evaluate(root_checkpointable.optimizer.beta_1.assign(103.))
def _check_sentinels(self, root_checkpointable):
self.assertAllEqual(
[1.], self.evaluate(root_checkpointable.model._named_dense.bias))
self.assertAllEqual([2.], self.evaluate(
root_checkpointable.optimizer.get_slot(
var=root_checkpointable.model._named_dense.bias, slot_name="m")))
self.assertAllEqual(3.,
self.evaluate(root_checkpointable.optimizer.beta_1))
def _write_name_based_checkpoint(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.session(
graph=save_graph) as session:
root = self._initialized_model()
name_saver = saver_lib.Saver()
return name_saver.save(
sess=session, save_path=checkpoint_prefix,
global_step=root.optimizer.iterations)
@test_util.run_in_graph_and_eager_modes
def testLoadFromNameBasedSaver(self):
"""Save a name-based checkpoint, load it using the object-based API."""
with test_util.device(use_gpu=True):
save_path = self._write_name_based_checkpoint()
root = self._initialized_model()
self._set_sentinels(root)
with self.assertRaises(AssertionError):
self._check_sentinels(root)
object_saver = checkpointable_utils.CheckpointableSaver(root)
self._set_sentinels(root)
status = object_saver.restore(save_path)
if context.executing_eagerly():
self._check_sentinels(root)
if context.executing_eagerly():
with self.assertRaisesRegexp(AssertionError, "OBJECT_CONFIG_JSON"):
status.assert_consumed()
with self.assertRaisesRegexp(AssertionError, "OBJECT_CONFIG_JSON"):
status.assert_existing_objects_matched()
with self.assertRaisesRegexp(AssertionError, "OBJECT_CONFIG_JSON"):
status.assert_nontrivial_match()
else:
# When graph building, we haven't read any keys, so we don't know
# whether the restore will be complete.
with self.assertRaisesRegexp(AssertionError, "not restored"):
status.assert_consumed()
with self.assertRaisesRegexp(AssertionError, "not restored"):
status.assert_existing_objects_matched()
with self.assertRaisesRegexp(AssertionError, "not restored"):
status.assert_nontrivial_match()
status.run_restore_ops()
self._check_sentinels(root)
self._set_sentinels(root)
status = object_saver.restore(save_path)
status.initialize_or_restore()
self._check_sentinels(root)
# Check that there is no error when keys are missing from the name-based
# checkpoint.
root.not_in_name_checkpoint = resource_variable_ops.ResourceVariable([1.])
status = object_saver.restore(save_path)
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
def testSaveGraphLoadEager(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.session(
graph=save_graph) as session:
root = self._initialized_model()
save_path = root.save(session=session, file_prefix=checkpoint_prefix)
with context.eager_mode():
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed()
self._check_sentinels(root)
def testSaveEagerLoadGraph(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.eager_mode():
root = self._initialized_model()
save_path = root.save(file_prefix=checkpoint_prefix)
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.session(
graph=save_graph):
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed().run_restore_ops()
self._check_sentinels(root)
class PythonMetadataTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testSaveLoad(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
dense = core.Dense(1)
checkpoint = checkpointable_utils.Checkpoint(dense=dense)
dense(constant_op.constant([[1.]]))
checkpoint.restore(None).initialize_or_restore()
save_path = checkpoint.save(checkpoint_prefix)
def _get_dense_node_from_object_graph(object_graph_proto):
root_node = object_graph_proto.nodes[0]
for child in root_node.children:
if child.local_name == "dense":
break
else:
raise AssertionError(
"Expected a 'dense' dependency of root, didn't find one.")
dense_node = object_graph_proto.nodes[child.node_id] # pylint: disable=undefined-loop-variable
self.assertEqual(1, len(dense_node.attributes))
reader = pywrap_tensorflow.NewCheckpointReader(save_path)
layer_json = reader.get_tensor(dense_node.attributes[0].checkpoint_key)
return json.loads(layer_json.decode("utf-8"))
layer_data = _get_dense_node_from_object_graph(
checkpointable_utils.object_metadata(save_path))
self.assertEqual("Dense", layer_data["class_name"])
self.assertEqual(1, layer_data["config"]["units"])
# Check that no new ops are added to the graph the second time we save.
ops.get_default_graph().finalize()
dense.units = 42
save_path = checkpoint.save(checkpoint_prefix)
layer_data = _get_dense_node_from_object_graph(
checkpointable_utils.object_metadata(save_path))
self.assertEqual("Dense", layer_data["class_name"])
self.assertEqual(42, layer_data["config"]["units"])
if __name__ == "__main__":
test.main()
| 43.529599 | 101 | 0.715669 |
1e09d08ef44dc21563bbb9795dbdbcbe3b94be7d | 1,504 | py | Python | PycharmProjects/PythonTest/DistributionProgramming.py | FLYKingdom/CodeSeeker | f09b84ea3d04631aa36de98a9d6ad549cbaffebd | [
"MIT"
] | null | null | null | PycharmProjects/PythonTest/DistributionProgramming.py | FLYKingdom/CodeSeeker | f09b84ea3d04631aa36de98a9d6ad549cbaffebd | [
"MIT"
] | null | null | null | PycharmProjects/PythonTest/DistributionProgramming.py | FLYKingdom/CodeSeeker | f09b84ea3d04631aa36de98a9d6ad549cbaffebd | [
"MIT"
] | null | null | null | # 分布式编程
import random, time, queue
from multiprocessing.managers import BaseManager
# 发送任务的队列
task_queue = queue.Queue()
# 接受任务的队列
result_queue = queue.Queue()
class QueueManager(BaseManager):
pass
QueueManager.register('get_task_queue', callable=lambda: task_queue)
QueueManager.register('get_result_queue', callable=lambda: result_queue)
# 绑定端口5000 设置密码
manager = QueueManager(address=('', 5000), authkey=b'123456')
# 启动queue
manager.start()
# 获取网络中的queue 对象
task = manager.get_task_queue()
result = manager.get_result_queue()
# 放几个任务进去
for i in range(10):
n = random.randint(0, 10000)
print('Put task %d ...' % n)
task.put(n)
# 从result队列读取结果
print('Try get result ...')
for i in range(10):
r = result.get(timeout=10)
print('Result : %s ' % r)
# 关闭
manager.shutdown()
print('master exit')
# import time, sys, queue
# from multiprocessing.managers import BaseManager
#
# class QueueManager(BaseManager):
# pass
#
# QueueManager.register('get_task_queue')
# QueueManager.register('get_result_queue')
#
# server_addr = '127.0.0.1'
# manager1 = QueueManager((server_addr, 5000), authkey=b'123456')
#
# manager1.connect()
task1 = manager1.get_task_queue()
result1 = manager1.get_result_queue()
for i in range(10):
try:
n = task.get(timeout=1)
print('run task %d * %d ...' % (n, n))
r = '%d * %d = %d ' % (n, n, n*n)
time.sleep(1)
result.put(r)
except queue.Empty:
print('task queue is empty')
print('worker exit') | 22.787879 | 72 | 0.678191 |
91e37d431d776fdbe95a199561c7f9e49a4474f8 | 5,262 | py | Python | auth/ops/cert_auth.py | socrateslee/pauli | 983b317febf1b3b78b8b1b9b7aaa21a32fe0aa2b | [
"MIT"
] | 2 | 2019-09-02T02:59:09.000Z | 2021-05-20T09:43:54.000Z | auth/ops/cert_auth.py | socrateslee/pauli | 983b317febf1b3b78b8b1b9b7aaa21a32fe0aa2b | [
"MIT"
] | null | null | null | auth/ops/cert_auth.py | socrateslee/pauli | 983b317febf1b3b78b8b1b9b7aaa21a32fe0aa2b | [
"MIT"
] | 1 | 2019-10-10T07:38:13.000Z | 2019-10-10T07:38:13.000Z | # coding:utf-8
import os
import re
import time
import uuid
import random
import sys
import getopt
import logging
from ... import conf
from ..models import User, CertLogin
logger = logging.getLogger(__name__)
def get_user(**headers):
if headers.get('Ssl-Client-Verified') != 'SUCCESS':
return None
if not headers.get('Ssl-Client-S-Dn'):
return None
serial = headers.get('Ssl-Client-Serial')
if not serial:
return None
else:
serial = str(int(serial, 16))
cert_login = CertLogin.objects(serial=serial, soft_del=False).first()
if not cert_login:
return None
user = User.objects(id=cert_login.user_id, soft_del=False).first()
return user
def revoke(user_id=None, serial=None):
cert_login_list
if user_id:
cert_login_list = list(CertLogin.objects(user_id=user_id, soft_del=False))
elif serial:
cert_login_list = list(CertLogin.objects(serial=serial, soft_del=False))
for cert_login in cert_login_list:
cert_login.soft_del = True
cert_login.info['revoke_timestamp'] = int(time.time())
cert_login.save()
return True
return False
def generate_cert(user, ca_key_path, ca_cert_path, csr_path='', key_path=''):
user = user if hasattr(user, 'id')\
else User.objects(id=user).first()
if not user:
return False, '用户不存在'
serial = str(int(time.time() * 1000)) + str(random.randint(0, 10000))
if CertLogin.objects(serial=serial).first():
return False, '证书Serial已经存在'
if key_path:
if not key_path.endswith('.key'):
return False, 'key文件必须以.key结尾'
dirname = os.path.dirname(key_path)
basename = os.path.basename(key_path)
csr_name = basename.replace('.key', '.csr')
csr_path = '%s/%s' % (dirname, csr_name) if dirname else csr_name
csr_cmd = 'openssl req -new -key %s -out %s -subj '\
+ '"/C=CN/ST=Beijing/L=Beijing/O=Gsitanfu Ltd./OU=/CN=gstianfu.com/serialNumber=%s"'
print(csr_cmd % (key_path, csr_path, serial))
ret = os.system(csr_cmd % (key_path, csr_path, serial))
if ret != 0:
return False, 'csr生成失败'
if csr_path:
if not csr_path.endswith('.csr'):
return False, 'csr文件必须以.csr结尾'
dirname = os.path.dirname(csr_path)
basename = os.path.basename(csr_path)
crt_name = basename.replace('.csr', '.crt')
crt_path = "%s/%s" % (dirname, crt_name) if dirname else crt_name
crt_cmd = 'openssl x509 -req -days 1000 -in %s -CA %s -CAkey %s -set_serial %s -out %s'
ret = os.system(crt_cmd % (csr_path, ca_cert_path, ca_key_path, serial, crt_path))
if ret != 0:
return False, 'crt文件生成失败'
print("Serial: %s" % serial)
print("Certificate:")
os.system("cat %s" % crt_path)
cert_login = CertLogin(user_id=str(user.id), serial=serial)
cert_login.save()
return True, cert_login.serial
def generate_cert_user(name, ca_key_path, ca_cert_path, csr_path='', key_path=''):
name = name.strip()
if not name:
return False, "用户名字不能为空"
user = User(name=name)
user.save()
succ, msg = generate_cert(user, ca_key_path, ca_cert_path,
csr_path=csr_path, key_path=key_path)
if not succ:
user.delete()
return succ, msg
else:
ret = {'serial': msg,
'user_id': str(user.id)}
return succ, ret
def help():
docstr = '''
Generate user and cert:
python -m auth.ops.cert_auth genuser <NAME OF USER> <CA_KEY_PATH> <CA_CERTT_PATH> --key_path <KEY_PATH>
python -m auth.ops.cert_auth genuser <NAME OF USER> <CA_KEY_PATH> <CA_CERTT_PATH> --csr_path <CSR_PATH>
Generate cert:
python -m auth.ops.cert_auth gencert <CA_KEY_PATH> <CA_CERTT_PATH> --user_id <USER_ID> --key_path <KEY_PATH>
python -m auth.ops.cert_auth gencert <CA_KEY_PATH> <CA_CERTT_PATH> --user_id <USER_ID> --csr_path <CSR_PATH>
Revoke cert:
python -m auth.ops.cert_auth revoke --user_id <USER_ID>
python -m auth.ops.cert_auth revoke --serial <SERIAL>
'''
print(docstr)
def main():
opts, args = getopt.gnu_getopt(sys.argv[1:], "",
["help", "user_id=", "serial=",
"csr_path=", "key_path="])
opts = dict(opts)
if '--help' in opts:
help()
return
if args[0] == 'revoke':
print(revoke(user_id=opts.get('--user_id'),
serial=opts.get('--serial')))
elif args[0] == 'gencert':
ca_key_path = args[1]
ca_cert_path = args[2]
user_id = opts.get('--user_id')
print(generate_cert(user_id, ca_key_path, ca_cert_path,
csr_path=opts.get('--csr_path'),
key_path=opts.get('--key_path')))
elif args[0] == 'genuser':
name = args[1]
ca_key_path = args[2]
ca_cert_path = args[3]
print(generate_cert_user(name, ca_key_path, ca_cert_path,
csr_path=opts.get('--csr_path'),
key_path=opts.get('--key_path')))
else:
help()
if __name__ == '__main__':
main()
| 34.847682 | 112 | 0.603193 |
1ba1f05007dbd4086c75735bf947439378f9b25b | 1,045 | py | Python | setup.py | shu11/django-s3-backup | 2f2fe1e43fdcef2cf7dc250c1f8511e112439185 | [
"MIT"
] | 1 | 2018-01-21T03:06:11.000Z | 2018-01-21T03:06:11.000Z | setup.py | shu11/django-s3-backup | 2f2fe1e43fdcef2cf7dc250c1f8511e112439185 | [
"MIT"
] | null | null | null | setup.py | shu11/django-s3-backup | 2f2fe1e43fdcef2cf7dc250c1f8511e112439185 | [
"MIT"
] | null | null | null | import os
from distutils.core import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
packages = []
package_dir = "django_s3_backup"
for dirpath, dirnames, filenames in os.walk(package_dir):
# ignore dirnames that start with '.'
for i, dirname in enumerate(dirnames):
if dirname.startswith("."):
del dirnames[i]
if "__init__.py" in filenames:
pkg = dirpath.replace(os.path.sep, '.')
if os.path.altsep:
pkg = pkg.replace(os.path.altsep, '.')
packages.append(pkg)
setup(
name='django-s3-backup',
version='0.8',
description='Backup all applications to Amazon S3 in INSTALLED_APPS of django project like loaddata/dumpdata.',
long_description=read('README.md'),
author='Shuichi Mitarai',
author_email='',
install_requires=['boto'],
license='MIT',
url='https://github.com/lukas-hetzenecker/django-s3-backup',
keywords=['django', 'database', 'backup', 'amazon', 's3'],
packages=packages
)
| 29.027778 | 115 | 0.656459 |
9d55fcf1121a7558c558900c0eb7a8a6e97d6aa9 | 436 | py | Python | test/test_del_group.py | vp96288/python_training | 31ad72969712c25a52698ee82c156e3abd91d087 | [
"Apache-2.0"
] | null | null | null | test/test_del_group.py | vp96288/python_training | 31ad72969712c25a52698ee82c156e3abd91d087 | [
"Apache-2.0"
] | null | null | null | test/test_del_group.py | vp96288/python_training | 31ad72969712c25a52698ee82c156e3abd91d087 | [
"Apache-2.0"
] | null | null | null | from model.group import Group
import random
def test_delete_some_group(app, db):
if len(db.get_group_list()) == 0:
app.group.create(Group(name = "test"))
old_groups = db.get_group_list()
group = random.choice(old_groups)
app.group.delete_group_by_id(group.id)
new_groups = db.get_group_list()
assert len(old_groups) - 1 == len(new_groups)
old_groups.remove(group)
assert old_groups == new_groups | 33.538462 | 49 | 0.706422 |
d48f5d5623cff8a54f366e1b901b296cb64ff63a | 3,279 | py | Python | aliyun-python-sdk-hbr/aliyunsdkhbr/request/v20170908/DescribeRestoreJobsRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-hbr/aliyunsdkhbr/request/v20170908/DescribeRestoreJobsRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-hbr/aliyunsdkhbr/request/v20170908/DescribeRestoreJobsRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkhbr.endpoint import endpoint_data
class DescribeRestoreJobsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'hbr', '2017-09-08', 'DescribeRestoreJobs','hbr')
self.set_protocol_type('https')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_SnapshotId(self):
return self.get_query_params().get('SnapshotId')
def set_SnapshotId(self,SnapshotId):
self.add_query_param('SnapshotId',SnapshotId)
def get_VaultId(self):
return self.get_query_params().get('VaultId')
def set_VaultId(self,VaultId):
self.add_query_param('VaultId',VaultId)
def get_RestoreId(self):
return self.get_query_params().get('RestoreId')
def set_RestoreId(self,RestoreId):
self.add_query_param('RestoreId',RestoreId)
def get_Source(self):
return self.get_query_params().get('Source')
def set_Source(self,Source):
self.add_query_param('Source',Source)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_Token(self):
return self.get_query_params().get('Token')
def set_Token(self,Token):
self.add_query_param('Token',Token)
def get_Target(self):
return self.get_query_params().get('Target')
def set_Target(self,Target):
self.add_query_param('Target',Target)
def get_RestoreType(self):
return self.get_query_params().get('RestoreType')
def set_RestoreType(self,RestoreType):
self.add_query_param('RestoreType',RestoreType)
def get_VaultRegionId(self):
return self.get_query_params().get('VaultRegionId')
def set_VaultRegionId(self,VaultRegionId):
self.add_query_param('VaultRegionId',VaultRegionId)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_Status(self):
return self.get_query_params().get('Status')
def set_Status(self,Status):
self.add_query_param('Status',Status) | 31.228571 | 78 | 0.754498 |
af3f04906ce387709f98d9ff00f6b9573cbd9fa4 | 122 | py | Python | tests/test_extract_nominations.py | jwilk-forks/and-the-award-goes-to | 94d05babf85bd06ad2aea42054e949940875cc22 | [
"MIT"
] | 1 | 2017-12-26T15:43:31.000Z | 2017-12-26T15:43:31.000Z | tests/test_extract_nominations.py | jwilk-forks/and-the-award-goes-to | 94d05babf85bd06ad2aea42054e949940875cc22 | [
"MIT"
] | null | null | null | tests/test_extract_nominations.py | jwilk-forks/and-the-award-goes-to | 94d05babf85bd06ad2aea42054e949940875cc22 | [
"MIT"
] | 1 | 2018-08-14T06:49:51.000Z | 2018-08-14T06:49:51.000Z | import unittest
class TestExtractAwards(unittest.TestCase):
def test_xyz(self):
self.assertEqual(True, True) | 20.333333 | 43 | 0.737705 |
04456412785b9ce57c62ad43238a41b952b1b1fa | 28,534 | py | Python | simulators/lakeshore372/ls372_simulator.py | simonsobs/socs | ef1c0f6e3620dcbb574aea64b5eb1b633ce87b07 | [
"BSD-2-Clause"
] | 6 | 2019-09-02T14:16:53.000Z | 2022-01-19T20:49:35.000Z | simulators/lakeshore372/ls372_simulator.py | simonsobs/socs | ef1c0f6e3620dcbb574aea64b5eb1b633ce87b07 | [
"BSD-2-Clause"
] | 183 | 2019-06-04T20:38:07.000Z | 2022-03-28T18:45:17.000Z | simulators/lakeshore372/ls372_simulator.py | simonsobs/socs | ef1c0f6e3620dcbb574aea64b5eb1b633ce87b07 | [
"BSD-2-Clause"
] | 7 | 2019-06-28T15:55:16.000Z | 2022-02-02T16:27:44.000Z | # To run the simulator: python3 ls372_simulator.py -p 7777
# (Since it automatically tries nearby ports, sometimes it will connect to 7778 when restarted,
# so you may need to temporarily change the port in Lakeshore372.py or just try running it again)
#
# To interact with the simulator:
# Connect 372 agent: python3 -i Lakeshore372.py 'localhost'
# There are two ways to communicate -- either by using specific functions from the agent, such as
# ls.get_autoscan(), ls.channels[1].get_input_setup(), ls.sample_heater.get_output_mode()
# or by using the ls.msg() function and the interface command formatting from the 372 manual, such as
# ls.msg('SCAN?'), ls.msg('INTYPE? 1'), ls.msg('OUTMODE? 0')
import os
import socket
import argparse
import numpy as np
import logging
import time
BUFF_SIZE = 4096
voltage_excitation_key = {1: 2.0e-6,
2: 6.32e-6,
3: 20.0e-6,
4: 63.2e-6,
5: 200.0e-6,
6: 632.0e-6,
7: 2.0e-3,
8: 6.32e-3,
9: 20.0e-3,
10: 63.2e-3,
11: 200.0e-3,
12: 632.0e-3}
current_excitation_key = {1: 1.0e-12,
2: 3.16e-12,
3: 10.0e-12,
4: 31.6e-12,
5: 100.0e-12,
6: 316.0e-12,
7: 1.0e-9,
8: 3.16e-9,
9: 10.0e-9,
10: 31.6e-9,
11: 100.0e-9,
12: 316.0e-9,
13: 1.0e-6,
14: 3.16e-6,
15: 10.0e-6,
16: 31.6e-6,
17: 100.0e-6,
18: 316.0e-6,
19: 1.0e-3,
20: 3.16e-3,
21: 10.0-3,
22: 31.6-3}
class Lakeshore372_Simulator:
def __init__(self, port, num_channels=16, sn="LSASIM"):
self.log = logging.getLogger()
self.port = port
self.sn = sn
self.num_channels = num_channels
self.channels = []
for i in range(self.num_channels + 1):
if i == 0:
c = ChannelSim('A', "Channel A")
else:
c = ChannelSim(i, "Channel {}".format(i))
self.channels.append(c)
self.log.debug(f'Created channel "{self.channels[i].name}"')
self.scanner = 1 # 0 = autoscan off; 1 = autoscan on
self.active_channel = 1 # start on channel 1
self.heaters = []
for i in range(3):
h = Heater(i)
self.heaters.append(h)
self.curves = []
for i in range(60):
v = Curve(i)
self.curves.append(v)
self.cmds = {
# Lakeshore and channel commands
"*IDN?": self.get_idn,
"RDGK?": lambda x: self.get_reading(chan=x, unit='1'),
"RDGR?": lambda x: self.get_reading(chan=x, unit='2'),
"SRDG?": lambda x: self.get_reading(chan=x, unit='2'),
"KRDG?": lambda x: self.get_reading(chan=x, unit='1'),
"RDGST?": self.get_reading_status,
"INNAME": self.set_channel_name,
"INNAME?": self.get_channel_name,
"INTYPE": self.set_channel_intype,
"INTYPE?": self.get_channel_intype,
"SET_VALUE": self.set_channel_value,
"SCAN": self.set_scanner,
"SCAN?": self.get_scanner,
"INSET": self.set_input_parameters,
"INSET?": self.get_input_parameters,
"TLIMIT": self.set_tlimit,
"TLIMIT?": self.get_tlimit,
"RDGPWR?": self.get_rdgpwr,
# Heater commands
"OUTMODE?": self.get_outmode,
"OUTMODE": self.set_outmode,
"HTR?": self.get_htr,
"HTRSET?": self.get_htrset,
"HTRSET": self.set_htrset,
"MOUT?": self.get_mout,
"MOUT": self.set_mout,
"RAMP?": self.get_ramp,
"RAMP": self.set_ramp,
"RAMPST": self.get_ramp_status,
"RANGE?": self.get_heater_range,
"RANGE": self.set_heater_range,
"SETP?": self.get_setpoint,
"SETP": self.set_setpoint,
"STILL?": self.get_still,
"STILL": self.set_still,
"PID?": self.get_pid,
"PID": self.set_pid,
# Curve commands
"CRVHDR?": self.get_curve_header,
"CRVHDR": self.set_curve_header,
"CRVPT?": self.get_curve_data,
"CRVPT": self.set_curve_data,
"CRVDEL": self.delete_curve,
}
def set_channel_value(self, chan, value):
if chan == 'A':
chan_index = 0
else:
chan_index = int(chan)
if not 0 <= chan_index <= self.num_channels:
self.log.warning(f"chan num must be A or between 1 and {self.num_channels}")
return
self.channels[chan_index].set_value(value)
def get_channel_intype(self, chan):
if chan == 'A':
chan_index = 0
else:
chan_index = int(chan)
if not 0 <= chan_index <= self.num_channels:
self.log.warning(f"chan num must be A or between 1 and {self.num_channels}")
return
return self.channels[chan_index].get_intype()
def set_channel_intype(self, chan, *args):
if chan == 'A':
chan_index = 0
else:
chan_index = int(chan)
if not 0 <= chan_index <= self.num_channels:
self.log.warning(f"chan num must be A or between 1 and {self.num_channels}")
return
args = map(int, args)
self.channels[chan_index].set_intype(*args)
def get_channel_name(self, chan):
if chan == 'A':
chan_index = 0
else:
chan_index = int(chan)
if not 0 <= chan_index <= self.num_channels:
self.log.warning(f"chan num must be A or between 1 and {self.num_channels}")
return
return self.channels[chan_index].name
def set_channel_name(self, chan, name):
if chan == 'A':
chan_index = 0
else:
chan_index = int(chan)
if not 0 <= chan_index <= self.num_channels:
self.log.warning(f"chan num must be A or between 1 and {self.num_channels}")
return
self.channels[chan_index].name = name
def run(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Checks self.port plus the next ten to see if they're open
for p in range(self.port, self.port + 10):
try:
self.log.info(f"Trying to listen on port {p}")
sock.bind(('', p))
break
except OSError as e:
if e.errno == 48:
self.log.warning(f"Address {p} is already in use")
else:
raise (e)
else:
print(f"Could not connect to ports in {range(self.port, self.port + 5)}")
sock.listen(1)
while True:
self.log.info('waiting for a connection....')
conn, client_address = sock.accept()
self.log.info(f"Made connection with {client_address}")
with conn:
# Main data loop
while True:
data = conn.recv(BUFF_SIZE)
elapsed_time = time.time() - start_time
#self.log.debug('time:', elapsed_time) # timestamp printed every time a command is received
if not data:
self.log.info("Connection closed by client")
break
clean_cmd = data.decode().strip()
self.log.info(f"Received command: {clean_cmd}")
self.log.debug("Raw Command: {}".format(data))
# Only takes first command in case multiple commands are s
cmds = data.decode().split(';')
if int(self.scanner) == 1: # useful only if all channels have the same dwell and pause settings
channel_change = int(elapsed_time // (self.channels[int(self.active_channel)].dwell +
self.channels[int(self.active_channel)].pause))
# print(channel_change)
if 0 < channel_change < 16:
self.active_channel = 1 + channel_change
elif channel_change >= 16:
new_channel_change = int(channel_change % 16)
self.active_channel = 1 + new_channel_change
self.log.debug(f"Active channel: {self.active_channel}")
elif int(self.scanner) == 0:
pass
for c in cmds:
if c.strip() == '':
continue
cmd_list = c.strip().split(' ')
if len(cmd_list) == 1:
cmd, args = cmd_list[0], []
else:
cmd, args = cmd_list[0], cmd_list[1].split(',')
self.log.debug(f"{cmd} {args}")
try:
cmd_fn = self.cmds.get(cmd)
if cmd_fn is None:
self.log.warning(f"Command {cmd} is not registered")
continue
resp = cmd_fn(*args)
self.log.info(f"Sent response: {resp}")
except TypeError as e:
self.log.error(f"Command error: {e}")
continue
if resp is not None:
conn.send(resp.encode())
def get_idn(self):
return ','.join([
"LSCI",
"MODEL372",
self.sn,
'0.0'
])
def get_reading(self, chan, unit='S'):
if chan == 'A':
chan_index = 0
else:
chan_index = int(chan)
if not 0 <= chan_index <= self.num_channels:
self.log.warning(f"chan num must be A or between 1 and {self.num_channels}")
return
return self.channels[chan_index].get_reading(unit=unit)
def get_reading_status(self, chan):
if chan == 'A':
chan_index = 0
else:
chan_index = int(chan)
if not 0 <= chan_index <= self.num_channels:
self.log.warning(f"chan num must be A or between 1 and {self.num_channels}")
return
bit_string = "000"
return bit_string
def get_scanner(self):
msg_string = '{:02d},{} '.format(int(self.active_channel), str(self.scanner))
self.log.debug(f"get_scanner: {msg_string}")
return msg_string
def set_scanner(self, chan, auto):
if not 0 <= int(chan) <= self.num_channels:
self.log.warning(f"chan num must be A or between 1 and {self.num_channels}")
return
self.active_channel = int(chan)
self.scanner = int(auto)
def get_input_parameters(self, chan):
if chan == 'A':
chan_index = 0
else:
chan_index = int(chan)
if not 0 <= chan_index <= self.num_channels:
self.log.warning(f"chan num must be A or between 1 and {self.num_channels}")
return
return self.channels[chan_index].get_inset()
def set_input_parameters(self, chan, *args):
if chan == 'A':
chan_index = 0
else:
chan_index = int(chan)
if not 0 <= chan_index <= self.num_channels:
self.log.warning(f"chan num must be A or between 1 and {self.num_channels}")
return
args = map(int, args)
self.channels[chan_index].set_inset(*args)
def get_tlimit(self, chan):
if chan == 'A':
chan_index = 0
else:
chan_index = int(chan)
if not 0 <= chan_index <= self.num_channels:
self.log.warning(f"chan num must be A or between 1 and {self.num_channels}")
return
temp = str(self.channels[chan_index].temp_limit)
return temp
def set_tlimit(self, chan, limit):
if chan == 'A':
chan_index = 0
else:
chan_index = int(chan)
if not 0 <= chan_index <= self.num_channels:
self.log.warning(f"chan num must be A or between 1 and {self.num_channels}")
return
self.channels[chan_index].temp_limit = float(limit)
def get_rdgpwr(self, chan):
if chan == 'A':
chan_index = 0
else:
chan_index = int(chan)
if not 0 <= chan_index <= self.num_channels:
self.log.warning(f"chan num must be A or between 1 and {self.num_channels}")
return
return self.channels[chan_index].get_excitation_power()
def get_outmode(self, heater_output):
if not 0 <= int(heater_output) <= 2:
self.log.warning("heater output must be between 0 and 2")
return
return self.heaters[int(heater_output)].get_output_mode()
def set_outmode(self, heater_output, *args):
if not 0 <= int(heater_output) <= 2:
self.log.warning("heater output must be between 0 and 2")
return
args = map(str, args)
self.heaters[int(heater_output)].set_output_mode(*args)
def get_htr(self):
"""Random sample heater value."""
return f"+{np.random.rand():.4f}E+00"
def get_htrset(self, heater_output):
if not 0 <= int(heater_output) <= 2:
self.log.warning("heater output must be 0 or 1")
return
return self.heaters[int(heater_output)].get_heater_setup()
def set_htrset(self, heater_output, *args):
if not 0 <= int(heater_output) <= 2:
self.log.warning("heater output must be 0 or 1")
return
args = map(int, args)
self.heaters[int(heater_output)].set_heater_setup(*args)
def get_mout(self, heater_output):
if not 0 <= int(heater_output) <= 2:
self.log.warning("heater output must be between 0 and 2")
return
return str(self.heaters[int(heater_output)].output_value)
def set_mout(self, heater_output, value):
if not 0 <= int(heater_output) <= 2:
self.log.warning("heater output must be between 0 and 2")
return
self.heaters[int(heater_output)].output_value = float(value)
def get_ramp(self, heater_output):
if not 0 <= int(heater_output) <= 2:
self.log.warning("heater output must be between 0 and 2")
return
ramp_string = '{},{}'.format(str(self.heaters[int(heater_output)].ramp),
str(self.heaters[int(heater_output)].rate))
return ramp_string
def set_ramp(self, heater_output, enabled, value):
if not 0 <= int(heater_output) <= 2:
self.log.warning("heater output must be between 0 and 2")
return
if int(enabled) in [0,1]:
self.heaters[int(heater_output)].ramp = int(enabled)
else:
self.log.warning("0 = ramping off, 1 = ramping on")
return
if 0.001 <= float(value) <= 100:
self.heaters[int(heater_output)].rate = float(value)
else:
self.log.warning("setpoint ramp rate must be between 0.001 and 100 k/min")
return
def get_ramp_status(self, heater_output):
if not 0 <= int(heater_output) <= 2:
self.log.warning("heater output must be between 0 and 2")
return
return str(self.heaters[int(heater_output)].status)
def get_heater_range(self, heater_output):
if not 0 <= int(heater_output) <= 2:
self.log.warning("heater output must be between 0 and 2")
return
return str(self.heaters[int(heater_output)].rng)
def set_heater_range(self, heater_output, heater_range):
if not 0 <= int(heater_output) <= 2:
self.log.warning("heater output must be between 0 and 2")
return
self.heaters[int(heater_output)].rng = int(heater_range)
def get_setpoint(self, heater_output):
if not 0 <= int(heater_output) < 2:
self.log.warning("heater output must be 0 or 1")
return
return str(self.heaters[int(heater_output)].setpoint)
def set_setpoint(self, heater_output, setp):
if not 0 <= int(heater_output) < 2:
self.log.warning("heater output must be 0 or 1")
return
self.heaters[int(heater_output)].setpoint = setp
def get_still(self):
return str(self.heaters[2].output_value)
def set_still(self, output):
self.heaters[2].output_value = float(output)
def get_pid(self):
return ','.join([str(self.heaters[0].P), str(self.heaters[0].I), str(self.heaters[0].D)])
def set_pid(self, heater_output, p, i, d):
if not 0 <= int(heater_output) < 2:
self.log.warning("heater output must be 0 or 1")
return
if 0.0 <= float(p) <= 1000:
self.heaters[int(heater_output)].P = float(p)
else:
self.log.warning("P value must be between 0.0 and 1000")
return
if 0 <= float(i) <= 10000:
self.heaters[int(heater_output)].I = float(i)
else:
self.log.warning("I value must be between 0 and 10000")
return
if 0 <= float(d) <= 2500:
self.heaters[int(heater_output)].D = float(d)
else:
self.log.warning("D value must be between 0 and 2500")
return
def get_curve_header(self, curve):
curve_index = int(curve)
if not 1 <= curve_index <= 59:
self.log.warning(f"curve num must be between 1 and 59")
return
return self.curves[curve_index].get_header()
def set_curve_header(self, curve, *args):
curve_index = int(curve)
if not 21 <= curve_index <= 59:
self.log.warning(f"curve num must be between 21 and 59")
return
args = map(str, args)
self.curves[curve_index].set_header(*args)
def get_curve_data(self, curve, index):
curve_index = int(curve)
if not 1 <= curve_index <= 59:
self.log.warning(f"curve num must be between 1 and 59")
return
return '{},{},{}'.format(str(self.curves[curve_index].data[int(index)][0]),
str(self.curves[curve_index].data[int(index)][1]),
str(self.curves[curve_index].data[int(index)][2]))
def set_curve_data(self, curve, index, units, kelvin, curvature=0):
curve_index = int(curve)
if not 21 <= curve_index <= 59:
self.log.warning(f"curve num must be between 21 and 59")
return
self.curves[curve_index].data[int(index)][0] = float(units)
self.curves[curve_index].data[int(index)][1] = float(kelvin)
self.curves[curve_index].data[int(index)][2] = float(curvature)
def delete_curve(self, curve):
curve_index = int(curve)
if not 21 <= curve_index <= 59:
self.log.warning(f"curve num must be between 21 and 59")
return
for i in range(1, 201):
self.curves[curve_index].data[i][0] = 0
self.curves[curve_index].data[i][1] = 0
self.curves[curve_index].data[i][2] = 0
class ChannelSim:
def __init__(self, channel_num, name):
self.log = logging.getLogger()
self.channel_num = channel_num
self.name = name
self.temp_limit = 0
self.enabled = 1
self.dwell = 10
self.pause = 3
self.curve_number = 0
self.tempco = 1
if channel_num == 'A':
self.mode = 1
else:
self.mode = 0
self.excitation = 1
self.autorange = 0
self.rng = 1
self.cs_shunt = 0
self.units = 1
self.value = 100
def get_intype(self):
return ','.join([
str(self.mode),
str(self.excitation),
str(self.autorange),
str(self.rng),
str(self.cs_shunt),
str(self.units)
])
def set_intype(self, mode, excitation, autorange, rng, cs_shunt, units):
if mode in [0,1]:
self.log.debug(f"Setting mode to {mode}")
self.mode = mode
if excitation in range(1,13) and self.mode == 0:
self.excitation = excitation
if excitation in range(1,23) and self.mode == 1:
self.excitation = excitation
if autorange in [0,1,2]:
self.autorange = autorange
if rng in range(1,23):
self.rng = rng
if cs_shunt in [0,1]:
self.cs_shunt = cs_shunt
if units in [1,2]:
self.units = units
def set_value(self, value):
self.log.debug(f"Setting value to {value}")
self.value = float(value)
def get_reading(self, unit='S'):
if self.enabled == 0:
rv = 0
else:
rv = np.random.normal(self.value)
return str(rv)
def get_inset(self):
return ','.join([
str(self.enabled),
str(self.dwell),
str(self.pause),
str(self.curve_number),
str(self.tempco)
])
def set_inset(self, enabled, dwell, pause, curve_number, tempco):
if enabled in [0, 1]:
self.log.debug(f"Setting mode to {enabled}")
self.enabled = enabled
if dwell in range(1, 201) and self.channel_num != 'A':
self.dwell = dwell
if pause in range(3, 201):
self.pause = pause
if curve_number in range(59):
self.curve_number = curve_number
if tempco in [1, 2]:
self.tempco = tempco
def get_excitation_power(self):
if self.mode == 0:
pwr = (voltage_excitation_key[int(self.excitation)]**2) / (float(self.get_reading()))
if self.mode == 1:
pwr = (current_excitation_key[int(self.excitation)]**2) * (float(self.get_reading()))
return str(pwr)
class Heater:
def __init__(self, output):
self.output = output
self.mode = 0
self.input = 1
self.powerup = 0
self.polarity = 0
self.filter = 0
self.delay = 5
self.rng = 0
self.resistance = 1
self.max_current = 0
self.max_user_current = 0
self.display = 1
self.output_value = 0
self.ramp = 0
self.rate = 0
self.status = 0
self.setpoint = 0
self.P = 0
self.I = 0
self.D = 0
def get_output_mode(self):
return ','.join([
str(self.mode),
str(self.input),
str(self.powerup),
str(self.polarity),
str(self.filter),
str(self.delay)
])
def set_output_mode(self, output, mode, input, powerup, polarity, filter, delay):
if int(output) in [0,1,2]:
self.log.debug(f"Setting output to {output}")
self.output = int(output)
if (int(output) == 0 and int(mode) in [0,2,3,5]) or (int(output) == 1 and int(mode) in [0,2,3,5,6]) or (int(output) == 2 and int(mode) in [0,1,2,4]):
self.mode = int(mode)
if input == 'A':
self.input = input
elif int(input) in range(1,17):
self.input = int(input)
if int(powerup) in [0, 1]:
self.powerup = int(powerup)
if int(polarity) in [0, 1] and int(output) != 1:
self.polarity = int(polarity)
if int(filter) in [0,1]:
self.filter = int(filter)
if int(delay) in range(1,256):
self.delay = int(delay)
def get_heater_setup(self):
return ','.join([
str(self.resistance),
str(self.max_current),
str(self.max_user_current),
str(self.display)
])
def set_heater_setup(self, output, resistance, max_current, max_user_current, display):
if output in [0,1]:
self.output = output
if (output == 0 and resistance in range(1,2001)) or (output == 1 and resistance in [1,2]):
self.resistance = resistance
if max_current in [0,1,2]:
self.max_current = max_current
# if max_user_current in []
# not sure what condition to use
self.max_user_current = max_user_current
if display in [1,2]:
self.display = display
class Curve:
def __init__(self, num):
self.curve_num = num
self.name = "User Curve"
self.serial_number = None
self.format = 4
self.limit = 40.0
self.coefficient = 1
self.get_header()
self.units = np.random.random([201])
self.temp = np.random.random([201])
self.curvature = 0
self.data = {i: [self.units[i], self.temp[i], self.curvature] for i in range(1,201)}
def get_header(self):
return ','.join([
str(self.name),
str(self.serial_number),
str(self.format),
str(self.limit),
str(self.coefficient)
])
def set_header(self, name, serial_number, format, limit, coefficient):
self.name = name
if len(str(serial_number)) <= 10:
self.serial_number = serial_number
if int(format) in [3,4,7]:
self.format = format
if float(limit) in range(1000):
self.limit = float(limit)
if int(coefficient) in [1,2]:
self.coefficient = int(coefficient)
def make_parser(parser=None):
if parser is None:
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port', type=int, default=50000,
help="Port which simulator will wait for a connection. "
"If taken, it will test several consecutive ports "
"until it finds one that is free.")
parser.add_argument('--num-channels', type=int, default=16,
help="Number of channels which the simulator will have.")
parser.add_argument('--sn', type=str, default='LSASIM',
help="Serial number for the device")
parser.add_argument('--log-file', type=str, default=None,
help="File where logs are written")
parser.add_argument('-o', '--log-stdout', action="store_true",
help="Log to stdout")
return parser
if __name__ == '__main__':
parser = make_parser()
args = parser.parse_args()
level = os.environ.get('LOGLEVEL', 'info')
log_level = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR
}[level]
format_string = '%(asctime)-15s [%(levelname)s]: %(message)s'
# logging.basicConfig(level=log_level, format=format_string)
formatter = logging.Formatter(format_string)
log = logging.getLogger()
log.setLevel(log_level)
if args.log_file is None or args.log_stdout:
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(formatter)
log.addHandler(consoleHandler)
if args.log_file is not None:
fileHandler = logging.FileHandler(args.log_file)
fileHandler.setFormatter(formatter)
log.addHandler(fileHandler)
ls = Lakeshore372_Simulator(args.port,
num_channels=args.num_channels,
sn=args.sn)
start_time = time.time() # begins timer as soon as simulator is created
ls.run()
| 33.25641 | 157 | 0.530245 |
c2fbc956a069c7139ebc5f8b5074082d11cb5eff | 3,248 | py | Python | scraper.py | creat00r/EarningSurpriseScraper | db01b1ba23e649bc513f9dc87a2aaca288a208c3 | [
"MIT"
] | null | null | null | scraper.py | creat00r/EarningSurpriseScraper | db01b1ba23e649bc513f9dc87a2aaca288a208c3 | [
"MIT"
] | null | null | null | scraper.py | creat00r/EarningSurpriseScraper | db01b1ba23e649bc513f9dc87a2aaca288a208c3 | [
"MIT"
] | null | null | null |
def ES(symbol):#get earning surprises for last three quarters
#example: ES("AApl")
import time
from selenium import webdriver
#from selenium import driver
#CLICK ANY BUTTONS
#driver.get("https://www.nasdaq.com/market-activity/stocks/aapl/earnings")
#more_buttons = driver.find_elements_by_class_name("moreLink")
#for x in range(len(more_buttons)):
# if more_buttons[x].is_displayed():
# driver.execute_script("arguments[0].click();", more_buttons[x])
from bs4 import BeautifulSoup
#import requests
import time
#import pandas as pd
import re
header = {'User-Agent':"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36"}
url = "https://www.nasdaq.com/market-activity/stocks/"+str(symbol)+"/earnings"
#page_source = requests.get(url, headers= header)#used to be in geckodriver-master/geckodriver
#driver = webdriver.Firefox()
driver = webdriver.Firefox(executable_path ='/home/or/Desktop/here/geckodriver')
#from selenium import webdriver
driver.get(url)
driver.execute_script("window.scrollTo(0,document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;")
print ('\n' + '\n')
#time.sleep(1)#only human after all
#time.sleep(.3)
page_source = driver.page_source
#print (page_source)
soup = BeautifulSoup(page_source, features="lxml")#'lxml')
driver.quit()
QE = soup.find_all("td", {"class": "earnings-surprise__table-cell"})
#print ("this is qe:" + str(QE))
sum =0#sum of % earnings surprise for last 3 quarters
av =0#averarge earnings surprise
var=0#surprise variance
sup =0#float version of the earnings surprise string
sups = []#list of last 3 quarters earning surprises
j=0#loop counter
#for surprise in QE:
for i in range (3, len(QE), 4):
sup = re.findall(r"[-+]?\d*\.\d+|\d+|[-+]?\d*\.\d+\d+",str(QE[i]))#The site has earning surprises in various formats, both single digit, double digit and integer.
#print (sup)
#sup = surprise.strip('<td class="earnings-surprise__table-cell">)').strip("</td>")
#if (len(sup)>1):
# continue
s = float(sup[0])
#if (s < 0 ):
# break#underperformed
sups.append(s)
#print(s)
sum += s
i += 4
j += 1
#print (sups )
print("quarter surprises for " + symbol + ": " + str(sups[0]) + ", " + str(sups[1]) + ", " + str(sups[2]) + ", " + str(sups[3]) )
for i in range(0,4):
av += sups[i]
av = av/4
print (av)
for i in range(0,4):
var += (abs(sups[i]-av))**2#variance is the average of squared differences from the mean.
var = var / 4
print(var)
si = ((av**2) / var)#One potential "sustainability" indicator
if (av<0):
si = -si
#A high earning average and low variance indicates growth and stability so the higher this number is the better.
#It is desirable for a stock to beat it's profit projections (high earning surprise average) and do so in a consistent way (low variance) which indicates a company is ready to expand.
result = [sups, av, var, si]
return result
| 36.909091 | 187 | 0.637931 |
c79e67969299905ae09959eea517176ca47ace1e | 321 | py | Python | 25.py | ikramulkayes/Python_season2 | d057460d07c5d2d218ecd52e08c1d355add44df2 | [
"MIT"
] | null | null | null | 25.py | ikramulkayes/Python_season2 | d057460d07c5d2d218ecd52e08c1d355add44df2 | [
"MIT"
] | null | null | null | 25.py | ikramulkayes/Python_season2 | d057460d07c5d2d218ecd52e08c1d355add44df2 | [
"MIT"
] | null | null | null | times = int(input())
lst = []
for i in range(times):
k = int(input())
lst.append(k)
for word in lst:
word = int(word)
if word >= 1900:
print("Division 1")
elif word >= 1600:
print("Division 2")
elif word >= 1400:
print("Division 3")
else:
print("Division 4") | 17.833333 | 27 | 0.523364 |
3bb5ca9abd3e86c6a9fb59c33743fc3f5907dd82 | 9,049 | py | Python | tests/test_exceptions.py | HazenBabcock/pytest-qt | ae4947d5a5ab5799c5153f4fe2c0af938bd07ffe | [
"MIT"
] | 5 | 2015-02-07T14:37:53.000Z | 2021-03-19T11:00:40.000Z | tests/test_exceptions.py | HazenBabcock/pytest-qt | ae4947d5a5ab5799c5153f4fe2c0af938bd07ffe | [
"MIT"
] | 1 | 2015-01-07T15:35:47.000Z | 2015-01-07T15:35:47.000Z | tests/test_exceptions.py | HazenBabcock/pytest-qt | ae4947d5a5ab5799c5153f4fe2c0af938bd07ffe | [
"MIT"
] | 1 | 2018-08-18T17:01:37.000Z | 2018-08-18T17:01:37.000Z | from pytestqt.exceptions import capture_exceptions, format_captured_exceptions
import pytest
import sys
@pytest.mark.parametrize('raise_error', [False, True])
def test_catch_exceptions_in_virtual_methods(testdir, raise_error):
"""
Catch exceptions that happen inside Qt virtual methods and make the
tests fail if any.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makepyfile('''
from pytestqt.qt_compat import qt_api
class Receiver(qt_api.QtCore.QObject):
def event(self, ev):
if {raise_error}:
raise ValueError('mistakes were made')
return qt_api.QtCore.QObject.event(self, ev)
def test_exceptions(qtbot):
v = Receiver()
app = qt_api.QApplication.instance()
app.sendEvent(v, qt_api.QtCore.QEvent(qt_api.QtCore.QEvent.User))
app.sendEvent(v, qt_api.QtCore.QEvent(qt_api.QtCore.QEvent.User))
app.processEvents()
'''.format(raise_error=raise_error))
result = testdir.runpytest()
if raise_error:
result.stdout.fnmatch_lines([
'*Qt exceptions in virtual methods:*',
'*ValueError: mistakes were made*',
'*1 failed*',
])
assert 'pytest.fail' not in '\n'.join(result.outlines)
else:
result.stdout.fnmatch_lines('*1 passed*')
def test_format_captured_exceptions():
try:
raise ValueError('errors were made')
except ValueError:
exceptions = [sys.exc_info()]
obtained_text = format_captured_exceptions(exceptions)
lines = obtained_text.splitlines()
assert 'Qt exceptions in virtual methods:' in lines
assert 'ValueError: errors were made' in lines
@pytest.mark.parametrize('no_capture_by_marker', [True, False])
def test_no_capture(testdir, no_capture_by_marker):
"""
Make sure options that disable exception capture are working (either marker
or ini configuration value).
:type testdir: TmpTestdir
"""
if no_capture_by_marker:
marker_code = '@pytest.mark.qt_no_exception_capture'
else:
marker_code = ''
testdir.makeini('''
[pytest]
qt_no_exception_capture = 1
''')
testdir.makepyfile('''
import pytest
import sys
from pytestqt.qt_compat import qt_api
QWidget = qt_api.QWidget
QtCore = qt_api.QtCore
# PyQt 5.5+ will crash if there's no custom exception handler installed
sys.excepthook = lambda *args: None
class MyWidget(QWidget):
def mouseReleaseEvent(self, ev):
raise RuntimeError
{marker_code}
def test_widget(qtbot):
w = MyWidget()
qtbot.addWidget(w)
qtbot.mouseClick(w, QtCore.Qt.LeftButton)
'''.format(marker_code=marker_code))
res = testdir.runpytest()
res.stdout.fnmatch_lines(['*1 passed*'])
def test_no_capture_preserves_custom_excepthook(testdir):
"""
Capturing must leave custom excepthooks alone when disabled.
:type testdir: TmpTestdir
"""
testdir.makepyfile('''
import pytest
import sys
from pytestqt.qt_compat import qt_api
QWidget = qt_api.QWidget
QtCore = qt_api.QtCore
def custom_excepthook(*args):
sys.__excepthook__(*args)
sys.excepthook = custom_excepthook
@pytest.mark.qt_no_exception_capture
def test_no_capture(qtbot):
assert sys.excepthook is custom_excepthook
def test_capture(qtbot):
assert sys.excepthook is not custom_excepthook
''')
res = testdir.runpytest()
res.stdout.fnmatch_lines(['*2 passed*'])
def test_exception_capture_on_call(testdir):
"""
Exceptions should also be captured during test execution.
:type testdir: TmpTestdir
"""
testdir.makepyfile('''
import pytest
from pytestqt.qt_compat import qt_api
QWidget = qt_api.QWidget
QtCore = qt_api.QtCore
QEvent = qt_api.QtCore.QEvent
class MyWidget(QWidget):
def event(self, ev):
raise RuntimeError('event processed')
def test_widget(qtbot, qapp):
w = MyWidget()
qapp.postEvent(w, QEvent(QEvent.User))
qapp.processEvents()
''')
res = testdir.runpytest('-s')
res.stdout.fnmatch_lines([
"*RuntimeError('event processed')*",
'*1 failed*',
])
def test_exception_capture_on_widget_close(testdir):
"""
Exceptions should also be captured when widget is being closed.
:type testdir: TmpTestdir
"""
testdir.makepyfile('''
import pytest
from pytestqt.qt_compat import qt_api
QWidget = qt_api.QWidget
QtCore = qt_api.QtCore
QEvent = qt_api.QtCore.QEvent
class MyWidget(QWidget):
def closeEvent(self, ev):
raise RuntimeError('close error')
def test_widget(qtbot, qapp):
w = MyWidget()
test_widget.w = w # keep it alive
qtbot.addWidget(w)
''')
res = testdir.runpytest('-s')
res.stdout.fnmatch_lines([
"*RuntimeError('close error')*",
'*1 error*',
])
@pytest.mark.parametrize('mode', ['setup', 'teardown'])
def test_exception_capture_on_fixture_setup_and_teardown(testdir, mode):
"""
Setup/teardown exception capturing as early/late as possible to catch
all exceptions, even from other fixtures (#105).
:type testdir: TmpTestdir
"""
if mode == 'setup':
setup_code = 'send_event(w, qapp)'
teardown_code = ''
else:
setup_code = ''
teardown_code = 'send_event(w, qapp)'
testdir.makepyfile('''
import pytest
from pytestqt.qt_compat import qt_api
QWidget = qt_api.QWidget
QtCore = qt_api.QtCore
QEvent = qt_api.QtCore.QEvent
QApplication = qt_api.QApplication
class MyWidget(QWidget):
def event(self, ev):
if ev.type() == QEvent.User:
raise RuntimeError('event processed')
return True
@pytest.yield_fixture
def widget(qapp):
w = MyWidget()
{setup_code}
yield w
{teardown_code}
def send_event(w, qapp):
qapp.postEvent(w, QEvent(QEvent.User))
qapp.processEvents()
def test_capture(widget):
pass
'''.format(setup_code=setup_code, teardown_code=teardown_code))
res = testdir.runpytest('-s')
res.stdout.fnmatch_lines([
'*__ ERROR at %s of test_capture __*' % mode,
"*RuntimeError('event processed')*",
'*1 error*',
])
@pytest.mark.qt_no_exception_capture
def test_capture_exceptions_context_manager(qapp):
"""Test capture_exceptions() context manager.
While not used internally anymore, it is still part of the API and therefore
should be properly tested.
"""
from pytestqt.qt_compat import qt_api
from pytestqt.plugin import capture_exceptions
class Receiver(qt_api.QtCore.QObject):
def event(self, ev):
raise ValueError('mistakes were made')
r = Receiver()
with capture_exceptions() as exceptions:
qapp.sendEvent(r, qt_api.QtCore.QEvent(qt_api.QtCore.QEvent.User))
qapp.processEvents()
assert [str(e) for (t, e, tb) in exceptions] == ['mistakes were made']
def test_capture_exceptions_qtbot_context_manager(testdir):
"""Test capturing exceptions in a block by using `capture_exceptions` method provided
by `qtbot`.
"""
testdir.makepyfile('''
import pytest
from pytestqt.qt_compat import qt_api
QWidget = qt_api.QWidget
Signal = qt_api.Signal
class MyWidget(QWidget):
on_event = Signal()
def test_widget(qtbot):
widget = MyWidget()
qtbot.addWidget(widget)
def raise_on_event():
raise RuntimeError("error")
widget.on_event.connect(raise_on_event)
with qtbot.capture_exceptions() as exceptions:
widget.on_event.emit()
assert len(exceptions) == 1
assert str(exceptions[0][1]) == "error"
''')
result = testdir.runpytest()
result.stdout.fnmatch_lines(['*1 passed*'])
def test_exceptions_to_stderr(qapp, capsys):
"""
Exceptions should still be reported to stderr.
"""
called = []
from pytestqt.qt_compat import qt_api
class MyWidget(qt_api.QWidget):
def event(self, ev):
called.append(1)
raise RuntimeError('event processed')
w = MyWidget()
with capture_exceptions() as exceptions:
qapp.postEvent(w, qt_api.QEvent(qt_api.QEvent.User))
qapp.processEvents()
assert called
del exceptions[:]
_out, err = capsys.readouterr()
assert "raise RuntimeError('event processed')" in err
| 28.545741 | 89 | 0.622942 |
8ceba797990701e0ee8b22847ce34d5002546627 | 2,231 | py | Python | jaguarete_store/views.py | Alexander0z/trabajo-final-django-v2 | 09f394ecc7876bd0c96a43239f049a06c5d05e06 | [
"CC0-1.0"
] | 1 | 2021-06-06T20:27:51.000Z | 2021-06-06T20:27:51.000Z | jaguarete_store/views.py | Alexander0z/trabajo-final-django-v2 | 09f394ecc7876bd0c96a43239f049a06c5d05e06 | [
"CC0-1.0"
] | null | null | null | jaguarete_store/views.py | Alexander0z/trabajo-final-django-v2 | 09f394ecc7876bd0c96a43239f049a06c5d05e06 | [
"CC0-1.0"
] | 1 | 2021-07-06T01:42:45.000Z | 2021-07-06T01:42:45.000Z | from categories.models import Category
from django.http.response import HttpResponseRedirect
from products.models import Product
from django.shortcuts import get_object_or_404, redirect, render
from django.contrib.auth import authenticate, login, logout
from users.models import User
from django.contrib import messages
from .forms import RegisterForm
from django.contrib.auth.decorators import user_passes_test
def index(request):
products = Product.objects.all().order_by('-id')
print('hola')
return render(request,'index.html',{
'products': products
})
def login_view(request):
if request.user.is_authenticated:
return redirect('index')
if request.method =='POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
login(request, user)
messages.success(request, 'Bienvenido {}.'.format(username))
if request.GET.get('next'):
return HttpResponseRedirect(request.GET['next'])
return redirect('index')
else:
messages.error(request, 'Usuario y/o contraseña invalido/s')
return render(request, 'users/login.html', {
'title': 'Iniciar sesión'
})
def logout_view(request):
logout(request)
messages.success(request, 'La sesión se cerró exitosamente')
return redirect('login')
def register(request):
if request.user.is_authenticated:
return redirect('index')
form = RegisterForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
username = form.cleaned_data.get('username')
email = form.cleaned_data.get('email')
password = form.cleaned_data.get('password')
user = User.objects.create_user(username, email, password)
if user:
login(request, user)
messages.success(request, 'Usuario creado')
return redirect('index')
return render(request, 'users/register.html',{
'form': form,
'title': 'Registro'
})
def about(request):
return render(request, 'about.html',{
'title': 'Acerca de...'
})
| 33.80303 | 72 | 0.659794 |
cf581f051234bc9017d7e9ffb8b1fca83e44e70d | 2,377 | py | Python | happyball.py | FreshConsulting/happyball | 52b067c1fb8b340aaeb5cec47ebce90150f33b95 | [
"MIT"
] | null | null | null | happyball.py | FreshConsulting/happyball | 52b067c1fb8b340aaeb5cec47ebce90150f33b95 | [
"MIT"
] | null | null | null | happyball.py | FreshConsulting/happyball | 52b067c1fb8b340aaeb5cec47ebce90150f33b95 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""happyball.py: A basic ball bouncing program to demonstrate programming
during the Fresh Future Innovators Workshop."""
__author__ = "Sean Patterson"
__copyright__ = "Copyright 2019, Fresh Consulting"
__credits__ = ["Randall Tateishi", "Sean McKay"]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Sean Patterson"
__email__ = "sean@freshconsulting.com"
import sys, pygame
# Settings for our game area
screen_size = width, height = 900, 600
white_color = 255, 255, 255
# Settings for bouncing happy ball
move_down_by = 5
move_right_by = 5
app_is_running = True
# Startup our environment.
pygame.init()
# This is a little trick to keep Happy Ball from moving too fast.
game_clock=pygame.time.Clock()
frames_per_second = 60
# Build our environment for Happy Ball and put it in there.
screen = pygame.display.set_mode(screen_size)
happy_ball = pygame.image.load('ball.png').convert_alpha()
fresh_background = pygame.image.load('fresh_background.png').convert_alpha()
# The frame holds Happy Ball and that is how we move it around.
happy_ball_frame = happy_ball.get_rect()
# Let's move happy ball around!!!
while app_is_running:
# Pygame continuously reponds to events that happen while the app is
# running. The only event we care about right now is if the user wants
# to quit, by closing the window or pressing CTRL + C. In that case
# we close up our environment and exit.
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.display.quit()
pygame.quit()
sys.exit()
# Since we weren't told to quit, we clear out any other events that
# might occur (like moving the mouse or keyboard) that would stop
# Happy Ball from moving.
pygame.event.clear()
# Move our ball by moving the frame that holds it.
happy_ball_frame = happy_ball_frame.move(move_right_by, move_down_by)
# Don't move into the nothingness...
if happy_ball_frame.left < 0 or happy_ball_frame.right > width:
move_right_by = -move_right_by
if happy_ball_frame.top < 0 or happy_ball_frame.bottom > height:
move_down_by = -move_down_by
# Now that we know where the ball moved to. Redraw thsi on the screen.
screen.fill(white_color)
screen.blit(fresh_background, [0,0])
screen.blit(happy_ball, happy_ball_frame)
pygame.display.flip()
# Tick our clock to refresh the display.
game_clock.tick(frames_per_second)
| 31.276316 | 76 | 0.755995 |
d7d6ed5b318e3c9cd0e9490c3fd6c85bd0a82f5b | 154 | py | Python | create_box/main.py | ine-rmotr-projects/itp-w1-create-box | 0b3bbec88b2858a10bad3eb9c267ffd839f17bf6 | [
"MIT"
] | null | null | null | create_box/main.py | ine-rmotr-projects/itp-w1-create-box | 0b3bbec88b2858a10bad3eb9c267ffd839f17bf6 | [
"MIT"
] | null | null | null | create_box/main.py | ine-rmotr-projects/itp-w1-create-box | 0b3bbec88b2858a10bad3eb9c267ffd839f17bf6 | [
"MIT"
] | null | null | null | """This is the entry point of the program."""
def create_box(height, width, character):
pass
if __name__ == '__main__':
create_box(3, 4, '*')
| 15.4 | 45 | 0.636364 |
4787fb30faa5e31ccaf72e590ccd251890fff3aa | 212 | py | Python | beneficiaries/beneficiaries/doctype/material_status/test_material_status.py | baidalala/beneficiaries | b7299e0a7da91e90c607e70d76994ec0aebae402 | [
"MIT"
] | null | null | null | beneficiaries/beneficiaries/doctype/material_status/test_material_status.py | baidalala/beneficiaries | b7299e0a7da91e90c607e70d76994ec0aebae402 | [
"MIT"
] | null | null | null | beneficiaries/beneficiaries/doctype/material_status/test_material_status.py | baidalala/beneficiaries | b7299e0a7da91e90c607e70d76994ec0aebae402 | [
"MIT"
] | 1 | 2021-08-31T18:47:58.000Z | 2021-08-31T18:47:58.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Baida and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestMaterialStatus(unittest.TestCase):
pass
| 19.272727 | 44 | 0.764151 |
a17feda15fe8eac649b81dce19cdd5796eb371a0 | 7,089 | py | Python | app/api/event_invoices.py | akashtalole/python-flask-restful-api | 475d8fd7be1724183716a197aac4257f8fbbeac4 | [
"MIT"
] | 3 | 2019-09-05T05:28:49.000Z | 2020-06-10T09:03:37.000Z | app/api/event_invoices.py | akashtalole/python-flask-restful-api | 475d8fd7be1724183716a197aac4257f8fbbeac4 | [
"MIT"
] | null | null | null | app/api/event_invoices.py | akashtalole/python-flask-restful-api | 475d8fd7be1724183716a197aac4257f8fbbeac4 | [
"MIT"
] | null | null | null | import datetime
from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
from flask import jsonify, request
from app.api.bootstrap import api
from app.api.helpers.db import safe_query
from app.api.helpers.permissions import is_admin
from app.api.helpers.query import event_query
from app.api.helpers.utilities import require_relationship
from app.api.schema.event_invoices import EventInvoiceSchema
from app.models import db
from app.models.discount_code import DiscountCode
from app.models.event_invoice import EventInvoice
from app.models.user import User
from app.api.helpers.payment import PayPalPaymentsManager
from app.api.helpers.errors import BadRequestError
from app.api.helpers.db import save_to_db
from app.api.helpers.permissions import jwt_required
from app.api.orders import order_misc_routes
class EventInvoiceList(ResourceList):
"""
List and Create Event Invoices
"""
def before_post(self, args, kwargs, data):
"""
before post method to check for required relationship and proper permission
:param args:
:param kwargs:
:param data:
:return:
"""
require_relationship(['event'], data)
def query(self, view_kwargs):
"""
query method for event invoice list
:param view_kwargs:
:return:
"""
query_ = self.session.query(EventInvoice)
query_ = event_query(self, query_, view_kwargs)
if view_kwargs.get('user_id'):
user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')
query_ = query_.join(User).filter(User.id == user.id)
if view_kwargs.get('discount_code_id'):
discount_code = safe_query(self, DiscountCode, 'id', view_kwargs['discount_code_id'], 'discount_code_id')
query_ = query_.join(DiscountCode).filter(DiscountCode.id == discount_code.id)
return query_
view_kwargs = True
methods = ['GET', ]
decorators = (api.has_permission('is_organizer', ), )
schema = EventInvoiceSchema
data_layer = {'session': db.session,
'model': EventInvoice,
'methods': {
'query': query
}}
class EventInvoiceDetail(ResourceDetail):
"""
Event Invoice detail by id
"""
def before_get_object(self, view_kwargs):
"""
before get method to get the resource id for fetching details
:param view_kwargs:
:return:
"""
if view_kwargs.get('event_invoice_identifier'):
event_invoice = safe_query(self, EventInvoice, 'identifier', view_kwargs['event_invoice_identifier'],
'event_invoice_identifier')
view_kwargs['id'] = event_invoice.id
decorators = (is_admin,)
schema = EventInvoiceSchema
data_layer = {'session': db.session,
'model': EventInvoice,
'methods': {
'before_get_object': before_get_object
}}
class EventInvoiceRelationshipRequired(ResourceRelationship):
"""
Event Invoice Relationship for required entities
"""
def before_get_object(self, view_kwargs):
"""
before get method to get the resource id for fetching details
:param view_kwargs:
:return:
"""
if view_kwargs.get('event_invoice_identifier'):
event_invoice = safe_query(self, EventInvoice, 'identifier', view_kwargs['event_invoice_identifier'],
'event_invoice_identifier')
view_kwargs['id'] = event_invoice.id
decorators = (is_admin,)
methods = ['GET', 'PATCH']
schema = EventInvoiceSchema
data_layer = {'session': db.session,
'model': EventInvoice,
'methods': {
'before_get_object': before_get_object
}}
class EventInvoiceRelationshipOptional(ResourceRelationship):
"""
Event Invoice Relationship
"""
def before_get_object(self, view_kwargs):
"""
before get method to get the resource id for fetching details
:param view_kwargs:
:return:
"""
if view_kwargs.get('event_invoice_identifier'):
event_invoice = safe_query(self, EventInvoice, 'identifier', view_kwargs['event_invoice_identifier'],
'event_invoice_identifier')
view_kwargs['id'] = event_invoice.id
decorators = (is_admin,)
schema = EventInvoiceSchema
data_layer = {'session': db.session,
'model': EventInvoice,
'methods': {
'before_get_object': before_get_object
}}
@order_misc_routes.route('/event-invoices/<string:invoice_identifier>/create-paypal-payment', methods=['POST', 'GET'])
@jwt_required
def create_paypal_payment_invoice(invoice_identifier):
"""
Create a paypal payment.
:return: The payment id of the created payment.
"""
try:
return_url = request.json['data']['attributes']['return-url']
cancel_url = request.json['data']['attributes']['cancel-url']
except TypeError:
return BadRequestError({'source': ''}, 'Bad Request Error').respond()
event_invoice = safe_query(db, EventInvoice, 'identifier', invoice_identifier, 'identifier')
status, response = PayPalPaymentsManager.create_payment(event_invoice, return_url, cancel_url)
if status:
return jsonify(status=True, payment_id=response)
else:
return jsonify(status=False, error=response)
@order_misc_routes.route('/event-invoices/<string:invoice_identifier>/charge', methods=['POST', 'GET'])
@jwt_required
def charge_paypal_payment_invoice(invoice_identifier):
"""
Create a paypal payment.
:return: The payment id of the created payment.
"""
try:
paypal_payment_id = request.json['data']['attributes']['paypal_payment_id']
paypal_payer_id = request.json['data']['attributes']['paypal_payer_id']
except Exception as e:
return BadRequestError({'source': e}, 'Bad Request Error').respond()
event_invoice = safe_query(db, EventInvoice, 'identifier', invoice_identifier, 'identifier')
# save the paypal payment_id with the order
event_invoice.paypal_token = paypal_payment_id
save_to_db(event_invoice)
# execute the invoice transaction.
status, error = PayPalPaymentsManager.execute_payment(paypal_payer_id, paypal_payment_id)
if status:
# successful transaction hence update the order details.
event_invoice.paid_via = 'paypal'
event_invoice.status = 'paid'
event_invoice.transaction_id = paypal_payment_id
event_invoice.completed_at = datetime.datetime.now()
save_to_db(event_invoice)
return jsonify(status="Charge Successful", payment_id=paypal_payment_id)
else:
# return the error message from Paypal
return jsonify(status="Charge Unsuccessful", error=error)
| 36.353846 | 118 | 0.656933 |
c25006b03bc13043ce425cfc55c91918537d479d | 4,377 | py | Python | pytorch_widedeep/utils/dense_utils.py | yuanzhiKe/pytorch-widedeep | 65465a454de6cd917a0927eb16b0137170ee4dc9 | [
"MIT"
] | null | null | null | pytorch_widedeep/utils/dense_utils.py | yuanzhiKe/pytorch-widedeep | 65465a454de6cd917a0927eb16b0137170ee4dc9 | [
"MIT"
] | null | null | null | pytorch_widedeep/utils/dense_utils.py | yuanzhiKe/pytorch-widedeep | 65465a454de6cd917a0927eb16b0137170ee4dc9 | [
"MIT"
] | null | null | null | import warnings
import numpy as np
import pandas as pd
from sklearn.exceptions import NotFittedError
from ..wdtypes import *
warnings.filterwarnings("ignore")
pd.options.mode.chained_assignment = None
__all__ = ["LabelEncoder"]
class LabelEncoder(object):
"""Class to Label Encode categorical values for multiple columns at once
.. note:: LabelEncoder will automatically add a new category and label for
`unseen` new categories
Parameters
----------
columns_to_encode: List[str], Optional
List of strings containing the names of the columns to encode. If
``None`` all columns of type ``object`` in the dataframe will be label
encoded.
Attributes
----------
encoding_dict: :obj:`Dict`
Dictionary containing the encoding mappings in the format, e.g.
`{'colname1': {'cat1': 0, 'cat2': 1, ...}, 'colname2': {'cat1': 0, 'cat2': 1, ...}, ...}`
inverse_encoding_dict: :obj:`Dict`
Dictionary containing the insverse encoding mappings in the format, e.g.
`{'colname1': {0: 'cat1', 1: 'cat2', ...}, 'colname2': {0: 'cat1', 1: 'cat2', ...}, ...}`
"""
def __init__(self, columns_to_encode: Optional[List[str]] = None):
super(LabelEncoder, self).__init__()
self.columns_to_encode = columns_to_encode
def fit(self, df: pd.DataFrame) -> "LabelEncoder":
"""Creates encoding attributes
"""
df_inp = df.copy()
if self.columns_to_encode is None:
self.columns_to_encode = list(
df_inp.select_dtypes(include=["object"]).columns
)
else:
# sanity check to make sure all categorical columns are in an adequate
# format
for col in self.columns_to_encode:
df_inp[col] = df_inp[col].astype("O")
unique_column_vals = dict()
for c in self.columns_to_encode:
unique_column_vals[c] = df_inp[c].unique()
self.encoding_dict = dict()
for k, v in unique_column_vals.items():
self.encoding_dict[k] = {o: i for i, o in enumerate(unique_column_vals[k])}
self.encoding_dict[k]["unseen"] = len(self.encoding_dict[k])
self.inverse_encoding_dict = dict()
for c in self.encoding_dict:
self.inverse_encoding_dict[c] = {
v: k for k, v in self.encoding_dict[c].items()
}
return self
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""Label Encoded the categories in ``columns_to_encode``
"""
try:
self.encoding_dict
except AttributeError:
raise NotFittedError(
"This LabelEncoder instance is not fitted yet. "
"Call 'fit' with appropriate arguments before using this LabelEncoder."
)
df_inp = df.copy()
# sanity check to make sure all categorical columns are in an adequate
# format
for col in self.columns_to_encode: # type: ignore
df_inp[col] = df_inp[col].astype("O")
for k, v in self.encoding_dict.items():
original_values = [f for f in v.keys() if f != "unseen"]
df_inp[k] = np.where(df_inp[k].isin(original_values), df_inp[k], "unseen")
df_inp[k] = df_inp[k].apply(lambda x: v[x])
return df_inp
def fit_transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""Applies the full process
Examples
--------
>>> import pandas as pd
>>> from pytorch_widedeep.utils import LabelEncoder
>>> df = pd.DataFrame({'col1': [1,2,3], 'col2': ['me', 'you', 'him']})
>>> columns_to_encode = ['col2']
>>> encoder = LabelEncoder(columns_to_encode)
>>> encoder.fit_transform(df)
col1 col2
0 1 0
1 2 1
2 3 2
>>> encoder.encoding_dict
{'col2': {'me': 0, 'you': 1, 'him': 2, 'unseen': 3}}
.. note:: a new category (`unseen`) and label has been created
"""
return self.fit(df).transform(df)
def inverse_transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""Returns the original categories
"""
for k, v in self.inverse_encoding_dict.items():
df[k] = df[k].apply(lambda x: v[x])
return df
| 32.422222 | 97 | 0.581448 |
a366634dd92c16fc459460ebba81cef33488c5ce | 4,466 | py | Python | day_19/part_2.py | Akarys42/aoc-2021 | 610d32777d91caada78036b6ed864dc871901ee3 | [
"MIT"
] | 2 | 2021-12-12T11:31:03.000Z | 2022-01-02T11:32:53.000Z | day_19/part_2.py | Akarys42/aoc-2021 | 610d32777d91caada78036b6ed864dc871901ee3 | [
"MIT"
] | null | null | null | day_19/part_2.py | Akarys42/aoc-2021 | 610d32777d91caada78036b6ed864dc871901ee3 | [
"MIT"
] | 1 | 2021-12-18T05:06:26.000Z | 2021-12-18T05:06:26.000Z | import dataclasses
from decimal import Decimal
from itertools import count, chain, product
from typing import Optional, Literal
REQUIRED_COMMON = 12
AXIS = [*"xyz"]
Axis = Literal["x", "y", "z"]
@dataclasses.dataclass(eq=True, frozen=True)
class Point:
x: int
y: int
z: int
def get(self, axis: Axis) -> int:
return {"x": self.x, "y": self.y, "z": self.z}[axis]
@dataclasses.dataclass()
class Alignment:
axis_mapping: dict[Axis, Axis]
axis_direction: dict[Axis, int]
offsets: dict[Axis, int]
class Scanner:
id_counter = count(0)
def __init__(self, points: list[Point]) -> None:
self.id = next(self.id_counter)
self.points = points
self._distances: Optional[list[set[Decimal]]] = None
self.to_origin = None
@property
def distances(self) -> list[set[Decimal]]:
if not self._distances:
self._distances = [set() for _ in range(len(self.points))]
for i, p1 in enumerate(self.points):
for j, p2 in enumerate(self.points):
self._distances[i].add(
Decimal(
(p1.x - p2.x) ** 2 + (p1.y - p2.y) ** 2 + (p1.z - p2.z) ** 2
).sqrt()
)
self._distances[i].remove(Decimal(0))
return self._distances
def find_commons(self, other: "Scanner") -> dict[Point, Point]:
mapping = {}
for i, distances_1 in enumerate(self.distances):
for j, distances_2 in enumerate(other.distances):
if len(distances_1 & distances_2) >= REQUIRED_COMMON - 1:
mapping[self.points[i]] = other.points[j]
return mapping
def align(self, commons: dict[Point, Point]) -> Alignment:
assert len(commons) == 12
axis_mapping = {}
axis_direction = {}
offsets = {}
# Try each possible mapping for each axis
for axis in AXIS:
for mapped_axis, direction in product(AXIS, (-1, 1)):
deltas = set()
for p1, p2 in commons.items():
deltas.add(p2.get(axis) - p1.get(mapped_axis) * direction)
# All the points are correctly aligned
if len(deltas) == 1:
axis_mapping[mapped_axis] = axis
axis_direction[axis] = direction
offsets[axis] = next(iter(deltas))
break
return Alignment(axis_mapping, axis_direction, offsets)
def set_coordinates(self, al: Alignment) -> None:
self.to_origin = [
-1
* al.offsets[al.axis_mapping[axis]]
* al.axis_direction[al.axis_mapping[axis]]
for axis in AXIS
]
new_points = []
for point in self.points:
coords = [
-1
* al.axis_direction[al.axis_mapping[axis]]
* (al.offsets[al.axis_mapping[axis]] - point.get(al.axis_mapping[axis]))
for axis in AXIS
]
new_points.append(Point(*coords))
self.points = new_points
def __repr__(self) -> str:
return f"<Scanner {self.id}>"
scanners: list[Scanner] = []
with open("day_19/input.txt") as file:
points = []
for line in chain(file.readlines(), ("--- end of input ---",)):
# Skip empty lines
if len(line) == 1:
continue
if line.startswith("---"):
if points:
scanners.append(Scanner(points))
points = []
else:
points.append(Point(*map(int, line.split(","))))
scanners[0].to_origin = (0, 0, 0)
unchecked = scanners[1:]
checked = [scanners[0]]
while unchecked:
match = False
for current, other in product(checked, unchecked):
commons = current.find_commons(other)
if len(commons) < REQUIRED_COMMON:
continue
alignment = other.align(commons)
other.set_coordinates(alignment)
assert len(set(current.points) & set(other.points)) == 12
match = True
break
if not match:
print(f"Couldn't find any match! {unchecked = }")
exit(1)
else:
unchecked.remove(other)
checked.append(other)
print(
max(
sum(abs(a - b) for a, b in zip(o1.to_origin, o2.to_origin))
for o1, o2 in product(scanners, scanners)
)
)
| 27.066667 | 88 | 0.545007 |
fe6bd7c24f340fc9dfb522103d87491f01ee60f2 | 3,803 | py | Python | utils/dicomutils.py | Whtie4622/dicom2stl | 35e64e9c80e81a7519407199a6d93c59778ad34c | [
"Apache-2.0"
] | null | null | null | utils/dicomutils.py | Whtie4622/dicom2stl | 35e64e9c80e81a7519407199a6d93c59778ad34c | [
"Apache-2.0"
] | null | null | null | utils/dicomutils.py | Whtie4622/dicom2stl | 35e64e9c80e81a7519407199a6d93c59778ad34c | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
"""
Function to load the largest Dicom series in a directory.
It scans the directory recursively search for files with the ".dcm"
suffix. Note that DICOM fails don't always have that suffix. In
that case this function will fail.
Written by David T. Chen from the National Institute of Allergy
and Infectious Diseases, dchen@mail.nih.gov.
It is covered by the Apache License, Version 2.0:
http://www.apache.org/licenses/LICENSE-2.0
"""
from __future__ import print_function
import sys
import os
import fnmatch
import zipfile
import SimpleITK as sitk
from pydicom.filereader import read_file_meta_info
from pydicom.errors import InvalidDicomError
def testDicomFile(file_path):
"""Test if given file is in DICOM format."""
try:
read_file_meta_info(file_path)
return True
except InvalidDicomError:
return False
def scanDirForDicom(dicomdir):
matches = []
dirs = []
for root, dirnames, filenames in os.walk(dicomdir):
for filename in filenames:
abs_file_path = os.path.join(root, filename)
if not testDicomFile(abs_file_path):
continue
matches.append(abs_file_path)
if root not in dirs:
dirs.append(root)
return (matches, dirs)
def getAllSeries(dirs):
"""Get all the Dicom series in a set of directories."""
isr = sitk.ImageSeriesReader()
seriessets = []
for d in dirs:
series = isr.GetGDCMSeriesIDs(d)
for s in series:
files = isr.GetGDCMSeriesFileNames(d, s)
print(s, d, len(files))
seriessets.append([s, d, files])
return seriessets
def getModality(img):
"""Get an image's modality, as stored in the Dicom meta data."""
modality = ""
if (sitk.Version.MinorVersion() > 8) or (sitk.Version.MajorVersion() > 0):
try:
modality = img.GetMetaData("0008|0060")
except BaseException:
modality = ""
return modality
def loadLargestSeries(dicomdir):
"""
Load the largest Dicom series it finds in a recursive scan of
a directory.
Largest means has the most slices. It also returns the modality
of the series.
"""
files, dirs = scanDirForDicom(dicomdir)
seriessets = getAllSeries(dirs)
maxsize = 0
maxindex = -1
count = 0
for ss in seriessets:
size = len(ss[2])
if size > maxsize:
maxsize = size
maxindex = count
count = count + 1
if maxindex < 0:
print("Error: no series found")
return None
isr = sitk.ImageSeriesReader()
ss = seriessets[maxindex]
files = ss[2]
isr.SetFileNames(files)
print("\nLoading series", ss[0], "in directory", ss[1])
img = isr.Execute()
firstslice = sitk.ReadImage(files[0])
modality = getModality(firstslice)
return img, modality
def loadZipDicom(name, tempDir):
""" Unzip a zipfile of dicom images into a temp directory, then
load the series that has the most slices.
"""
print("Reading Dicom zip file:", name)
myzip = zipfile.ZipFile(name, 'r')
try:
myzip.extractall(tempDir)
except BaseException:
print("Zip extract failed")
return loadLargestSeries(tempDir)
#
# Main (test code)
#
if __name__ == "__main__":
print("")
print("dicomutils.py")
print(sys.argv[1])
# img = loadLargestSeries(sys.argv[1])
# print (img)
# sys.exit(0)
files, dirs = scanDirForDicom(sys.argv[1])
print("")
print("files")
print(files)
print("")
print("dirs")
print(dirs)
print("series")
seriessets = getAllSeries(dirs)
for ss in seriessets:
print(ss[0], " ", ss[1])
print(len(ss[2]))
print("")
| 24.22293 | 78 | 0.632395 |
d5d2654be1ad312fee08dc1fe969500697c12c1f | 10,280 | py | Python | src/python/paperai/query.py | personx000/paperai | f23bb99b6cb0522a600487286c73b570740d795c | [
"Apache-2.0"
] | 743 | 2020-07-21T18:46:37.000Z | 2022-03-28T22:27:02.000Z | src/python/paperai/query.py | gobbletown/paperai | 5c0efd7931d287f3f372fecb57f5d31e5957ac2b | [
"Apache-2.0"
] | 56 | 2020-07-24T20:17:26.000Z | 2022-03-17T01:44:26.000Z | src/python/paperai/query.py | gobbletown/paperai | 5c0efd7931d287f3f372fecb57f5d31e5957ac2b | [
"Apache-2.0"
] | 72 | 2020-07-22T16:23:58.000Z | 2022-03-18T20:32:42.000Z | """
Query module
"""
import datetime
import re
import sys
import html2markdown
import mdv
from txtai.pipeline import Tokenizer
from .highlights import Highlights
from .models import Models
class Query:
"""
Methods to query an embeddings index.
"""
@staticmethod
def markdown(text):
"""
Converts html text to markdown.
Args:
text: html text
Returns:
text as markdown
"""
# Remove rel attributes as they are not supported by html2markdown
text = re.sub(r' rel=".+?">', ">", text)
# Convert html to markdown
text = html2markdown.convert(text)
# Decode [<>&] characters
return text.replace("<", "<").replace(">", ">").replace("&", "&")
@staticmethod
def escape(text):
"""
Escapes text to work around issues with mdv double escaping characters.
Args:
text: input text
Returns:
escaped text
"""
text = text.replace("<", "¿")
text = text.replace(">", "Ñ")
text = text.replace("&", "ž")
return text
@staticmethod
def unescape(text):
"""
Un-escapes text to work around issues with mdv double escaping characters.
Args:
text: input text
Returns:
unescaped text
"""
text = text.replace("¿", "<")
text = text.replace("Ñ", ">")
text = text.replace("ž", "&")
return text
@staticmethod
def render(text, theme="592.2129", html=True, tab_length=0):
"""
Renders input text to formatted text ready to send to the terminal.
Args:
text: input html text
Returns:
text formatted for print to terminal
"""
if html:
# Convert html to markdown
text = Query.markdown(text)
text = Query.escape(text)
text = mdv.main(
text, theme=theme, c_theme="953.3567", cols=180, tab_length=tab_length
)
if html:
text = Query.unescape(text)
return text.strip()
@staticmethod
def search(embeddings, cur, query, topn, threshold):
"""
Executes an embeddings search for the input query. Each returned result is resolved
to the full section row.
Args:
embeddings: embeddings model
cur: database cursor
query: query text
topn: number of documents to return
threshold: require at least this score to include result
Returns:
search results
"""
if query == "*":
return []
# Default threshold if None
threshold = threshold if threshold is not None else 0.6
results = []
# Get list of required and prohibited tokens
must = [
token.strip("+")
for token in query.split()
if token.startswith("+") and len(token) > 1
]
mnot = [
token.strip("-")
for token in query.split()
if token.startswith("-") and len(token) > 1
]
# Tokenize search query
query = Tokenizer.tokenize(query)
# Retrieve topn * 5 to account for duplicate matches
for uid, score in embeddings.search(query, topn * 5):
if score >= threshold:
cur.execute("SELECT Article, Text FROM sections WHERE id = ?", [uid])
# Get matching row
sid, text = cur.fetchone()
# Add result if:
# - all required tokens are present or there are not required tokens AND
# - all prohibited tokens are not present or there are not prohibited tokens
if (
not must or all(token.lower() in text.lower() for token in must)
) and (
not mnot or all(token.lower() not in text.lower() for token in mnot)
):
# Save result
results.append((uid, score, sid, text))
return results
@staticmethod
def highlights(results, topn):
"""
Builds a list of highlights for the search results. Returns top ranked sections by importance
over the result list.
Args:
results: search results
topn: number of highlights to extract
Returns:
top ranked sections
"""
sections = {}
for uid, score, _, text in results:
# Filter out lower scored results
if score >= 0.35:
sections[text] = (uid, text)
# Return up to 5 highlights
return Highlights.build(sections.values(), min(topn, 5))
@staticmethod
def documents(results, topn):
"""
Processes search results and groups by article.
Args:
results: search results
topn: number of documents to return
Returns:
results grouped by article
"""
documents = {}
# Group by article
for _, score, article, text in results:
if article not in documents:
documents[article] = set()
documents[article].add((score, text))
# Sort based on section id, which preserves original order
for uid in documents:
documents[uid] = sorted(list(documents[uid]), reverse=True)
# Get documents with top n best sections
topn = sorted(
documents, key=lambda k: max([x[0] for x in documents[k]]), reverse=True
)[:topn]
return {uid: documents[uid] for uid in topn}
@staticmethod
def all(cur):
"""
Gets a list of all article ids.
Args:
cur: database cursor
Returns:
list of all ids as a dict
"""
cur.execute("SELECT Id FROM articles")
return {row[0]: None for row in cur.fetchall()}
@staticmethod
def authors(authors):
"""
Formats a short authors string
Args:
authors: full authors string
Returns:
short author string
"""
if authors:
authors = authors.split("; ")[0]
if "," in authors:
authors = authors.split(",")[0]
else:
authors = authors.split()[-1]
return f"{authors} et al"
return None
@staticmethod
def date(date):
"""
Formats a date string.
Args:
date: input date string
Returns:
formatted date
"""
if date:
date = datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S")
# 1/1 dates had no month/day specified, use only year
if date.month == 1 and date.day == 1:
return date.strftime("%Y")
return date.strftime("%Y-%m-%d")
return None
@staticmethod
def text(text):
"""
Formats match text.
Args:
text: input text
Returns:
formatted text
"""
if text:
# Remove reference links ([1], [2], etc)
text = re.sub(r"\s*[\[(][0-9, ]+[\])]\s*", " ", text)
# Remove •
text = text.replace("•", "")
# Remove http links
text = re.sub(r"http.+?\s", " ", text)
return text
@staticmethod
def query(embeddings, db, query, topn, threshold):
"""
Executes a query against the embeddings model.
Args:
embeddings: embeddings model
db: open SQLite database
query: query string
topn: number of query results
threshold: query match score threshold
"""
# Default to 10 results if not specified
topn = topn if topn else 10
cur = db.cursor()
print(Query.render(f"#Query: {query}", theme="729.8953") + "\n")
# Query for best matches
results = Query.search(embeddings, cur, query, topn, threshold)
# Extract top sections as highlights
print(Query.render("# Highlights"))
for highlight in Query.highlights(results, int(topn / 5)):
print(Query.render(f"## - {Query.text(highlight)}"))
print()
# Get results grouped by document
documents = Query.documents(results, topn)
print(Query.render("# Articles") + "\n")
# Print each result, sorted by max score descending
for uid in sorted(
documents, key=lambda k: sum([x[0] for x in documents[k]]), reverse=True
):
cur.execute(
"SELECT Title, Published, Publication, Entry, Id, Reference FROM articles WHERE id = ?",
[uid],
)
article = cur.fetchone()
print(f"Title: {article[0]}")
print(f"Published: {Query.date(article[1])}")
print(f"Publication: {article[2]}")
print(f"Entry: {article[3]}")
print(f"Id: {article[4]}")
print(f"Reference: {article[5]}")
# Print top matches
for score, text in documents[uid]:
print(
Query.render(f"## - ({score:.4f}): {Query.text(text)}", html=False)
)
print()
@staticmethod
def run(query, topn=None, path=None, threshold=None):
"""
Executes a query against an index.
Args:
query: input query
topn: number of results
path: model path
threshold: query match score threshold
"""
# Load model
embeddings, db = Models.load(path)
# Query the database
Query.query(embeddings, db, query, topn, threshold)
# Free resources
Models.close(db)
if __name__ == "__main__":
if len(sys.argv) > 1:
Query.run(
sys.argv[1],
int(sys.argv[2]) if len(sys.argv) > 2 else None,
sys.argv[3] if len(sys.argv) > 3 else None,
float(sys.argv[4]) if len(sys.argv) > 4 else None,
)
| 25.894207 | 104 | 0.521595 |
afd7326ff4fd417f73a980183d93c1e2f12ce1db | 3,190 | py | Python | configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_8x8x1_150e_kinetics400_rgb.py | Naoki-Wake/mmaction2 | a2032605db82509744a18d993c94a06feb1efd15 | [
"Apache-2.0"
] | 648 | 2021-06-24T19:33:09.000Z | 2022-03-31T06:27:24.000Z | configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_8x8x1_150e_kinetics400_rgb.py | jayleicn/mmaction2-1 | 0a6fde1abb8403f1f68b568f5b4694c6f828e27e | [
"Apache-2.0"
] | 53 | 2021-07-01T03:07:52.000Z | 2022-03-27T16:15:29.000Z | configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_8x8x1_150e_kinetics400_rgb.py | jayleicn/mmaction2-1 | 0a6fde1abb8403f1f68b568f5b4694c6f828e27e | [
"Apache-2.0"
] | 117 | 2021-06-25T01:22:32.000Z | 2022-03-31T08:33:55.000Z | _base_ = [
'../../_base_/models/slowonly_r50.py', '../../_base_/default_runtime.py'
]
# dataset settings
dataset_type = 'RawframeDataset'
data_root = 'data/kinetics400/rawframes_train'
data_root_val = 'data/kinetics400/rawframes_val'
ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt'
ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='RandomResizedCrop'),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(
type='SampleFrames',
clip_len=8,
frame_interval=8,
num_clips=1,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
test_pipeline = [
dict(
type='SampleFrames',
clip_len=8,
frame_interval=8,
num_clips=10,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='ThreeCrop', crop_size=256),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data = dict(
videos_per_gpu=8,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_root,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=val_pipeline),
test=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=test_pipeline))
evaluation = dict(
interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy'])
# optimizer
optimizer = dict(
type='SGD', lr=0.01, momentum=0.9,
weight_decay=0.0001) # this lr is used for 8 gpus
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
step=[90, 130],
warmup='linear',
warmup_by_epoch=True,
warmup_iters=10)
total_epochs = 150
# runtime settings
checkpoint_config = dict(interval=4)
work_dir = ('./work_dirs/slowonly_imagenet_pretrained_r50_8x8x1_150e'
'_kinetics400_rgb')
find_unused_parameters = False
| 32.55102 | 78 | 0.66489 |
67e347f5eb6300f504633834d5bd13ec5fba360b | 851 | py | Python | encuestas/migrations/0005_userprofile.py | cjpm1983/tesoreriapy | 552cf5ab4237ced086b54d5a0552fd607704cec5 | [
"MIT"
] | null | null | null | encuestas/migrations/0005_userprofile.py | cjpm1983/tesoreriapy | 552cf5ab4237ced086b54d5a0552fd607704cec5 | [
"MIT"
] | null | null | null | encuestas/migrations/0005_userprofile.py | cjpm1983/tesoreriapy | 552cf5ab4237ced086b54d5a0552fd607704cec5 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.15 on 2020-08-16 07:40
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('encuestas', '0004_calificacion_curso_persona'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('avatar', models.ImageField(upload_to='')),
('background', models.ImageField(upload_to='')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 32.730769 | 121 | 0.638073 |
2be8c7d2108d7a0e4f3a5add0eb7c222a9ce2971 | 1,013 | py | Python | darktrace/icon_darktrace/connection/connection.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | null | null | null | darktrace/icon_darktrace/connection/connection.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | 1 | 2021-02-23T23:57:37.000Z | 2021-02-23T23:57:37.000Z | darktrace/icon_darktrace/connection/connection.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | null | null | null | import insightconnect_plugin_runtime
from .schema import ConnectionSchema, Input
# Custom imports below
from icon_darktrace.util.api import DarkTraceAPI
from insightconnect_plugin_runtime.exceptions import PluginException, ConnectionTestException
class Connection(insightconnect_plugin_runtime.Connection):
def __init__(self):
super(self.__class__, self).__init__(input=ConnectionSchema())
self.client = None
def connect(self, params={}):
self.logger.info("Connect: Connecting...")
self.client = DarkTraceAPI(
params.get(Input.URL),
params.get(Input.API_PUBLIC_TOKEN).get("secretKey"),
params.get(Input.API_PRIVATE_TOKEN).get("secretKey"),
self.logger
)
def test(self):
try:
return {
"success": self.client.get_status() != {}
}
except PluginException as e:
raise ConnectionTestException(cause=e.cause, assistance=e.assistance, data=e.data)
| 33.766667 | 94 | 0.670286 |
d164987319d5454975b8d872e788fcdf296def53 | 50,969 | py | Python | shibgreen/consensus/block_header_validation.py | BTCgreen-Network/shibgreen-blockchain | b1e41e82ad849775543aa36fefc0c0d03e13f6e8 | [
"Apache-2.0"
] | 12 | 2021-11-10T02:52:38.000Z | 2022-03-22T10:19:45.000Z | shibgreen/consensus/block_header_validation.py | BTCgreen-Network/shibgreen-blockchain | b1e41e82ad849775543aa36fefc0c0d03e13f6e8 | [
"Apache-2.0"
] | 13 | 2021-11-16T03:09:34.000Z | 2022-03-09T00:45:05.000Z | shibgreen/consensus/block_header_validation.py | BTCgreen-Network/shibgreen-blockchain | b1e41e82ad849775543aa36fefc0c0d03e13f6e8 | [
"Apache-2.0"
] | 1 | 2022-03-15T08:25:06.000Z | 2022-03-15T08:25:06.000Z | import dataclasses
import logging
import time
from typing import Optional, Tuple
from blspy import AugSchemeMPL
from shibgreen.consensus.block_record import BlockRecord
from shibgreen.consensus.blockchain_interface import BlockchainInterface
from shibgreen.consensus.constants import ConsensusConstants
from shibgreen.consensus.deficit import calculate_deficit
from shibgreen.consensus.difficulty_adjustment import can_finish_sub_and_full_epoch
from shibgreen.consensus.get_block_challenge import final_eos_is_already_included, get_block_challenge
from shibgreen.consensus.make_sub_epoch_summary import make_sub_epoch_summary
from shibgreen.consensus.pot_iterations import (
calculate_ip_iters,
calculate_iterations_quality,
calculate_sp_interval_iters,
calculate_sp_iters,
is_overflow_block,
)
from shibgreen.consensus.vdf_info_computation import get_signage_point_vdf_info
from shibgreen.types.blockchain_format.classgroup import ClassgroupElement
from shibgreen.types.blockchain_format.sized_bytes import bytes32
from shibgreen.types.blockchain_format.slots import ChallengeChainSubSlot, RewardChainSubSlot, SubSlotProofs
from shibgreen.types.blockchain_format.vdf import VDFInfo, VDFProof
from shibgreen.types.end_of_slot_bundle import EndOfSubSlotBundle
from shibgreen.types.header_block import HeaderBlock
from shibgreen.types.unfinished_header_block import UnfinishedHeaderBlock
from shibgreen.util.errors import Err, ValidationError
from shibgreen.util.hash import std_hash
from shibgreen.util.ints import uint8, uint32, uint64, uint128
log = logging.getLogger(__name__)
# noinspection PyCallByClass
def validate_unfinished_header_block(
constants: ConsensusConstants,
blocks: BlockchainInterface,
header_block: UnfinishedHeaderBlock,
check_filter: bool,
expected_difficulty: uint64,
expected_sub_slot_iters: uint64,
skip_overflow_last_ss_validation: bool = False,
skip_vdf_is_valid: bool = False,
check_sub_epoch_summary=True,
) -> Tuple[Optional[uint64], Optional[ValidationError]]:
"""
Validates an unfinished header block. This is a block without the infusion VDFs (unfinished)
and without transactions and transaction info (header). Returns (required_iters, error).
This method is meant to validate only the unfinished part of the block. However, the finished_sub_slots
refers to all sub-slots that were finishes from the previous block's infusion point, up to this blocks
infusion point. Therefore, in the case where this is an overflow block, and the last sub-slot is not yet
released, header_block.finished_sub_slots will be missing one sub-slot. In this case,
skip_overflow_last_ss_validation must be set to True. This will skip validation of end of slots, sub-epochs,
and lead to other small tweaks in validation.
"""
# 1. Check that the previous block exists in the blockchain, or that it is correct
prev_b = blocks.try_block_record(header_block.prev_header_hash)
genesis_block = prev_b is None
if genesis_block and header_block.prev_header_hash != constants.GENESIS_CHALLENGE:
return None, ValidationError(Err.INVALID_PREV_BLOCK_HASH)
overflow = is_overflow_block(constants, header_block.reward_chain_block.signage_point_index)
if skip_overflow_last_ss_validation and overflow:
if final_eos_is_already_included(header_block, blocks, expected_sub_slot_iters):
skip_overflow_last_ss_validation = False
finished_sub_slots_since_prev = len(header_block.finished_sub_slots)
else:
finished_sub_slots_since_prev = len(header_block.finished_sub_slots) + 1
else:
finished_sub_slots_since_prev = len(header_block.finished_sub_slots)
new_sub_slot: bool = finished_sub_slots_since_prev > 0
can_finish_se: bool = False
can_finish_epoch: bool = False
if genesis_block:
height: uint32 = uint32(0)
assert expected_difficulty == constants.DIFFICULTY_STARTING
assert expected_sub_slot_iters == constants.SUB_SLOT_ITERS_STARTING
else:
assert prev_b is not None
height = uint32(prev_b.height + 1)
if new_sub_slot:
can_finish_se, can_finish_epoch = can_finish_sub_and_full_epoch(
constants,
blocks,
prev_b.height,
prev_b.prev_hash,
prev_b.deficit,
prev_b.sub_epoch_summary_included is not None,
)
else:
can_finish_se = False
can_finish_epoch = False
# 2. Check finished slots that have been crossed since prev_b
ses_hash: Optional[bytes32] = None
if new_sub_slot and not skip_overflow_last_ss_validation:
# Finished a slot(s) since previous block. The first sub-slot must have at least one block, and all
# subsequent sub-slots must be empty
for finished_sub_slot_n, sub_slot in enumerate(header_block.finished_sub_slots):
# Start of slot challenge is fetched from SP
challenge_hash: bytes32 = sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
if finished_sub_slot_n == 0:
if genesis_block:
# 2a. check sub-slot challenge hash for genesis block
if challenge_hash != constants.GENESIS_CHALLENGE:
return None, ValidationError(Err.INVALID_PREV_CHALLENGE_SLOT_HASH)
else:
assert prev_b is not None
curr: BlockRecord = prev_b
while not curr.first_in_sub_slot:
curr = blocks.block_record(curr.prev_hash)
assert curr.finished_challenge_slot_hashes is not None
# 2b. check sub-slot challenge hash for non-genesis block
if not curr.finished_challenge_slot_hashes[-1] == challenge_hash:
print(curr.finished_challenge_slot_hashes[-1], challenge_hash)
return None, ValidationError(Err.INVALID_PREV_CHALLENGE_SLOT_HASH)
else:
# 2c. check sub-slot challenge hash for empty slot
if (
not header_block.finished_sub_slots[finished_sub_slot_n - 1].challenge_chain.get_hash()
== challenge_hash
):
return None, ValidationError(Err.INVALID_PREV_CHALLENGE_SLOT_HASH)
if genesis_block:
# 2d. Validate that genesis block has no ICC
if sub_slot.infused_challenge_chain is not None:
return None, ValidationError(Err.SHOULD_NOT_HAVE_ICC)
else:
assert prev_b is not None
icc_iters_committed: Optional[uint64] = None
icc_iters_proof: Optional[uint64] = None
icc_challenge_hash: Optional[bytes32] = None
icc_vdf_input = None
if prev_b.deficit < constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
# There should be no ICC chain if the last block's deficit is 16
# Prev sb's deficit is 0, 1, 2, 3, or 4
if finished_sub_slot_n == 0:
# This is the first sub slot after the last sb, which must have deficit 1-4, and thus an ICC
curr = prev_b
while not curr.is_challenge_block(constants) and not curr.first_in_sub_slot:
curr = blocks.block_record(curr.prev_hash)
if curr.is_challenge_block(constants):
icc_challenge_hash = curr.challenge_block_info_hash
icc_iters_committed = uint64(prev_b.sub_slot_iters - curr.ip_iters(constants))
else:
assert curr.finished_infused_challenge_slot_hashes is not None
icc_challenge_hash = curr.finished_infused_challenge_slot_hashes[-1]
icc_iters_committed = prev_b.sub_slot_iters
icc_iters_proof = uint64(prev_b.sub_slot_iters - prev_b.ip_iters(constants))
if prev_b.is_challenge_block(constants):
icc_vdf_input = ClassgroupElement.get_default_element()
else:
icc_vdf_input = prev_b.infused_challenge_vdf_output
else:
# This is not the first sub slot after the last block, so we might not have an ICC
if (
header_block.finished_sub_slots[finished_sub_slot_n - 1].reward_chain.deficit
< constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
):
finished_ss = header_block.finished_sub_slots[finished_sub_slot_n - 1]
assert finished_ss.infused_challenge_chain is not None
# Only sets the icc iff the previous sub slots deficit is 4 or less
icc_challenge_hash = finished_ss.infused_challenge_chain.get_hash()
icc_iters_committed = prev_b.sub_slot_iters
icc_iters_proof = icc_iters_committed
icc_vdf_input = ClassgroupElement.get_default_element()
# 2e. Validate that there is not icc iff icc_challenge hash is None
assert (sub_slot.infused_challenge_chain is None) == (icc_challenge_hash is None)
if sub_slot.infused_challenge_chain is not None:
assert icc_vdf_input is not None
assert icc_iters_proof is not None
assert icc_challenge_hash is not None
assert sub_slot.proofs.infused_challenge_chain_slot_proof is not None
# 2f. Check infused challenge chain sub-slot VDF
# Only validate from prev_b to optimize
target_vdf_info = VDFInfo(
icc_challenge_hash,
icc_iters_proof,
sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf.output,
)
if sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf != dataclasses.replace(
target_vdf_info,
number_of_iterations=icc_iters_committed,
):
return None, ValidationError(Err.INVALID_ICC_EOS_VDF)
if not skip_vdf_is_valid:
if (
not sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
and not sub_slot.proofs.infused_challenge_chain_slot_proof.is_valid(
constants, icc_vdf_input, target_vdf_info, None
)
):
return None, ValidationError(Err.INVALID_ICC_EOS_VDF)
if (
sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
and not sub_slot.proofs.infused_challenge_chain_slot_proof.is_valid(
constants,
ClassgroupElement.get_default_element(),
sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf,
)
):
return None, ValidationError(Err.INVALID_ICC_EOS_VDF)
if sub_slot.reward_chain.deficit == constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
# 2g. Check infused challenge sub-slot hash in challenge chain, deficit 16
if (
sub_slot.infused_challenge_chain.get_hash()
!= sub_slot.challenge_chain.infused_challenge_chain_sub_slot_hash
):
return None, ValidationError(Err.INVALID_ICC_HASH_CC)
else:
# 2h. Check infused challenge sub-slot hash not included for other deficits
if sub_slot.challenge_chain.infused_challenge_chain_sub_slot_hash is not None:
return None, ValidationError(Err.INVALID_ICC_HASH_CC)
# 2i. Check infused challenge sub-slot hash in reward sub-slot
if (
sub_slot.infused_challenge_chain.get_hash()
!= sub_slot.reward_chain.infused_challenge_chain_sub_slot_hash
):
return None, ValidationError(Err.INVALID_ICC_HASH_RC)
else:
# 2j. If no icc, check that the cc doesn't include it
if sub_slot.challenge_chain.infused_challenge_chain_sub_slot_hash is not None:
return None, ValidationError(Err.INVALID_ICC_HASH_CC)
# 2k. If no icc, check that the cc doesn't include it
if sub_slot.reward_chain.infused_challenge_chain_sub_slot_hash is not None:
return None, ValidationError(Err.INVALID_ICC_HASH_RC)
if sub_slot.challenge_chain.subepoch_summary_hash is not None:
assert ses_hash is None # Only one of the slots can have it
ses_hash = sub_slot.challenge_chain.subepoch_summary_hash
# 2l. check sub-epoch summary hash is None for empty slots
if finished_sub_slot_n != 0:
if sub_slot.challenge_chain.subepoch_summary_hash is not None:
return None, ValidationError(Err.INVALID_SUB_EPOCH_SUMMARY_HASH)
if can_finish_epoch and sub_slot.challenge_chain.subepoch_summary_hash is not None:
# 2m. Check new difficulty and ssi
if sub_slot.challenge_chain.new_sub_slot_iters != expected_sub_slot_iters:
return None, ValidationError(Err.INVALID_NEW_SUB_SLOT_ITERS)
if sub_slot.challenge_chain.new_difficulty != expected_difficulty:
return None, ValidationError(Err.INVALID_NEW_DIFFICULTY)
else:
# 2n. Check new difficulty and ssi are None if we don't finish epoch
if sub_slot.challenge_chain.new_sub_slot_iters is not None:
return None, ValidationError(Err.INVALID_NEW_SUB_SLOT_ITERS)
if sub_slot.challenge_chain.new_difficulty is not None:
return None, ValidationError(Err.INVALID_NEW_DIFFICULTY)
# 2o. Check challenge sub-slot hash in reward sub-slot
if sub_slot.challenge_chain.get_hash() != sub_slot.reward_chain.challenge_chain_sub_slot_hash:
return (
None,
ValidationError(
Err.INVALID_CHALLENGE_SLOT_HASH_RC,
"sub-slot hash in reward sub-slot mismatch",
),
)
eos_vdf_iters: uint64 = expected_sub_slot_iters
cc_start_element: ClassgroupElement = ClassgroupElement.get_default_element()
cc_eos_vdf_challenge: bytes32 = challenge_hash
if genesis_block:
if finished_sub_slot_n == 0:
# First block, one empty slot. prior_point is the initial challenge
rc_eos_vdf_challenge: bytes32 = constants.GENESIS_CHALLENGE
cc_eos_vdf_challenge = constants.GENESIS_CHALLENGE
else:
# First block, but have at least two empty slots
rc_eos_vdf_challenge = header_block.finished_sub_slots[
finished_sub_slot_n - 1
].reward_chain.get_hash()
else:
assert prev_b is not None
if finished_sub_slot_n == 0:
# No empty slots, so the starting point of VDF is the last reward block. Uses
# the same IPS as the previous block, since it's the same slot
rc_eos_vdf_challenge = prev_b.reward_infusion_new_challenge
eos_vdf_iters = uint64(prev_b.sub_slot_iters - prev_b.ip_iters(constants))
cc_start_element = prev_b.challenge_vdf_output
else:
# At least one empty slot, so use previous slot hash. IPS might change because it's a new slot
rc_eos_vdf_challenge = header_block.finished_sub_slots[
finished_sub_slot_n - 1
].reward_chain.get_hash()
# 2p. Check end of reward slot VDF
target_vdf_info = VDFInfo(
rc_eos_vdf_challenge,
eos_vdf_iters,
sub_slot.reward_chain.end_of_slot_vdf.output,
)
if not skip_vdf_is_valid and not sub_slot.proofs.reward_chain_slot_proof.is_valid(
constants,
ClassgroupElement.get_default_element(),
sub_slot.reward_chain.end_of_slot_vdf,
target_vdf_info,
):
return None, ValidationError(Err.INVALID_RC_EOS_VDF)
# 2q. Check challenge chain sub-slot VDF
partial_cc_vdf_info = VDFInfo(
cc_eos_vdf_challenge,
eos_vdf_iters,
sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf.output,
)
if genesis_block:
cc_eos_vdf_info_iters = constants.SUB_SLOT_ITERS_STARTING
else:
assert prev_b is not None
if finished_sub_slot_n == 0:
cc_eos_vdf_info_iters = prev_b.sub_slot_iters
else:
cc_eos_vdf_info_iters = expected_sub_slot_iters
# Check that the modified data is correct
if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf != dataclasses.replace(
partial_cc_vdf_info,
number_of_iterations=cc_eos_vdf_info_iters,
):
return None, ValidationError(Err.INVALID_CC_EOS_VDF, "wrong challenge chain end of slot vdf")
if not skip_vdf_is_valid:
# Pass in None for target info since we are only checking the proof from the temporary point,
# but the challenge_chain_end_of_slot_vdf actually starts from the start of slot (for light clients)
if (
not sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity
and not sub_slot.proofs.challenge_chain_slot_proof.is_valid(
constants, cc_start_element, partial_cc_vdf_info, None
)
):
return None, ValidationError(Err.INVALID_CC_EOS_VDF)
if (
sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity
and not sub_slot.proofs.challenge_chain_slot_proof.is_valid(
constants,
ClassgroupElement.get_default_element(),
sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf,
)
):
return None, ValidationError(Err.INVALID_CC_EOS_VDF)
if genesis_block:
# 2r. Check deficit (MIN_SUB.. deficit edge case for genesis block)
if sub_slot.reward_chain.deficit != constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
return (
None,
ValidationError(
Err.INVALID_DEFICIT,
f"genesis, expected deficit {constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK}",
),
)
else:
assert prev_b is not None
if prev_b.deficit == 0:
# 2s. If prev sb had deficit 0, resets deficit to MIN_BLOCK_PER_CHALLENGE_BLOCK
if sub_slot.reward_chain.deficit != constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
log.error(
constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK,
)
return (
None,
ValidationError(
Err.INVALID_DEFICIT,
f"expected deficit {constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK}, saw "
f"{sub_slot.reward_chain.deficit}",
),
)
else:
# 2t. Otherwise, deficit stays the same at the slot ends, cannot reset until 0
if sub_slot.reward_chain.deficit != prev_b.deficit:
return None, ValidationError(Err.INVALID_DEFICIT, "deficit is wrong at slot end")
# 3. Check sub-epoch summary
# Note that the subepoch summary is the summary of the previous subepoch (not the one that just finished)
if not skip_overflow_last_ss_validation:
if ses_hash is not None:
# 3a. Check that genesis block does not have sub-epoch summary
if genesis_block:
return (
None,
ValidationError(
Err.INVALID_SUB_EPOCH_SUMMARY_HASH,
"genesis with sub-epoch-summary hash",
),
)
assert prev_b is not None
# 3b. Check that we finished a slot and we finished a sub-epoch
if not new_sub_slot or not can_finish_se:
return (
None,
ValidationError(
Err.INVALID_SUB_EPOCH_SUMMARY_HASH,
f"new sub-slot: {new_sub_slot} finishes sub-epoch {can_finish_se}",
),
)
# 3c. Check the actual sub-epoch is correct
if check_sub_epoch_summary:
expected_sub_epoch_summary = make_sub_epoch_summary(
constants,
blocks,
height,
blocks.block_record(prev_b.prev_hash),
expected_difficulty if can_finish_epoch else None,
expected_sub_slot_iters if can_finish_epoch else None,
)
expected_hash = expected_sub_epoch_summary.get_hash()
if expected_hash != ses_hash:
log.error(f"{expected_sub_epoch_summary}")
return (
None,
ValidationError(
Err.INVALID_SUB_EPOCH_SUMMARY,
f"expected ses hash: {expected_hash} got {ses_hash} ",
),
)
elif new_sub_slot and not genesis_block:
# 3d. Check that we don't have to include a sub-epoch summary
if can_finish_se or can_finish_epoch:
return (
None,
ValidationError(
Err.INVALID_SUB_EPOCH_SUMMARY,
"block finishes sub-epoch but ses-hash is None",
),
)
# 4. Check if the number of blocks is less than the max
if not new_sub_slot and not genesis_block:
assert prev_b is not None
num_blocks = 2 # This includes the current block and the prev block
curr = prev_b
while not curr.first_in_sub_slot:
num_blocks += 1
curr = blocks.block_record(curr.prev_hash)
if num_blocks > constants.MAX_SUB_SLOT_BLOCKS:
return None, ValidationError(Err.TOO_MANY_BLOCKS)
# If block state is correct, we should always find a challenge here
# This computes what the challenge should be for this block
challenge = get_block_challenge(
constants,
header_block,
blocks,
genesis_block,
overflow,
skip_overflow_last_ss_validation,
)
# 5a. Check proof of space
if challenge != header_block.reward_chain_block.pos_ss_cc_challenge_hash:
log.error(f"Finished slots: {header_block.finished_sub_slots}")
log.error(
f"Data: {genesis_block} {overflow} {skip_overflow_last_ss_validation} {header_block.total_iters} "
f"{header_block.reward_chain_block.signage_point_index}"
f"Prev: {prev_b}"
)
log.error(f"Challenge {challenge} provided {header_block.reward_chain_block.pos_ss_cc_challenge_hash}")
return None, ValidationError(Err.INVALID_CC_CHALLENGE)
# 5b. Check proof of space
if header_block.reward_chain_block.challenge_chain_sp_vdf is None:
# Edge case of first sp (start of slot), where sp_iters == 0
cc_sp_hash: bytes32 = challenge
else:
cc_sp_hash = header_block.reward_chain_block.challenge_chain_sp_vdf.output.get_hash()
q_str: Optional[bytes32] = header_block.reward_chain_block.proof_of_space.verify_and_get_quality_string(
constants, challenge, cc_sp_hash
)
if q_str is None:
return None, ValidationError(Err.INVALID_POSPACE)
# 6. check signage point index
# no need to check negative values as this is uint 8
if header_block.reward_chain_block.signage_point_index >= constants.NUM_SPS_SUB_SLOT:
return None, ValidationError(Err.INVALID_SP_INDEX)
# Note that required iters might be from the previous slot (if we are in an overflow block)
required_iters: uint64 = calculate_iterations_quality(
constants.DIFFICULTY_CONSTANT_FACTOR,
q_str,
header_block.reward_chain_block.proof_of_space.size,
expected_difficulty,
cc_sp_hash,
)
# 7. check required iters
if required_iters >= calculate_sp_interval_iters(constants, expected_sub_slot_iters):
return None, ValidationError(Err.INVALID_REQUIRED_ITERS)
# 8a. check signage point index 0 has no cc sp
if (header_block.reward_chain_block.signage_point_index == 0) != (
header_block.reward_chain_block.challenge_chain_sp_vdf is None
):
return None, ValidationError(Err.INVALID_SP_INDEX)
# 8b. check signage point index 0 has no rc sp
if (header_block.reward_chain_block.signage_point_index == 0) != (
header_block.reward_chain_block.reward_chain_sp_vdf is None
):
return None, ValidationError(Err.INVALID_SP_INDEX)
sp_iters: uint64 = calculate_sp_iters(
constants,
expected_sub_slot_iters,
header_block.reward_chain_block.signage_point_index,
)
ip_iters: uint64 = calculate_ip_iters(
constants,
expected_sub_slot_iters,
header_block.reward_chain_block.signage_point_index,
required_iters,
)
if header_block.reward_chain_block.challenge_chain_sp_vdf is None:
# Blocks with very low required iters are not overflow blocks
assert not overflow
# 9. Check no overflows in the first sub-slot of a new epoch
# (although they are OK in the second sub-slot), this is important
if overflow and can_finish_epoch:
if finished_sub_slots_since_prev < 2:
return None, ValidationError(Err.NO_OVERFLOWS_IN_FIRST_SUB_SLOT_NEW_EPOCH)
# 10. Check total iters
if genesis_block:
total_iters: uint128 = uint128(expected_sub_slot_iters * finished_sub_slots_since_prev)
else:
assert prev_b is not None
if new_sub_slot:
total_iters = prev_b.total_iters
# Add the rest of the slot of prev_b
total_iters = uint128(total_iters + prev_b.sub_slot_iters - prev_b.ip_iters(constants))
# Add other empty slots
total_iters = uint128(total_iters + (expected_sub_slot_iters * (finished_sub_slots_since_prev - 1)))
else:
# Slot iters is guaranteed to be the same for header_block and prev_b
# This takes the beginning of the slot, and adds ip_iters
total_iters = uint128(prev_b.total_iters - prev_b.ip_iters(constants))
total_iters = uint128(total_iters + ip_iters)
if total_iters != header_block.reward_chain_block.total_iters:
return (
None,
ValidationError(
Err.INVALID_TOTAL_ITERS,
f"expected {total_iters} got {header_block.reward_chain_block.total_iters}",
),
)
sp_total_iters: uint128 = uint128(total_iters - ip_iters + sp_iters - (expected_sub_slot_iters if overflow else 0))
if overflow and skip_overflow_last_ss_validation:
dummy_vdf_info = VDFInfo(
bytes32([0] * 32),
uint64(1),
ClassgroupElement.get_default_element(),
)
dummy_sub_slot = EndOfSubSlotBundle(
ChallengeChainSubSlot(dummy_vdf_info, None, None, None, None),
None,
RewardChainSubSlot(dummy_vdf_info, bytes32([0] * 32), None, uint8(0)),
SubSlotProofs(VDFProof(uint8(0), b"", False), None, VDFProof(uint8(0), b"", False)),
)
sub_slots_to_pass_in = header_block.finished_sub_slots + [dummy_sub_slot]
else:
sub_slots_to_pass_in = header_block.finished_sub_slots
(
cc_vdf_challenge,
rc_vdf_challenge,
cc_vdf_input,
rc_vdf_input,
cc_vdf_iters,
rc_vdf_iters,
) = get_signage_point_vdf_info(
constants,
sub_slots_to_pass_in,
overflow,
prev_b,
blocks,
sp_total_iters,
sp_iters,
)
# 11. Check reward chain sp proof
if sp_iters != 0:
assert (
header_block.reward_chain_block.reward_chain_sp_vdf is not None
and header_block.reward_chain_sp_proof is not None
)
target_vdf_info = VDFInfo(
rc_vdf_challenge,
rc_vdf_iters,
header_block.reward_chain_block.reward_chain_sp_vdf.output,
)
if not skip_vdf_is_valid and not header_block.reward_chain_sp_proof.is_valid(
constants,
rc_vdf_input,
header_block.reward_chain_block.reward_chain_sp_vdf,
target_vdf_info,
):
return None, ValidationError(Err.INVALID_RC_SP_VDF)
rc_sp_hash = header_block.reward_chain_block.reward_chain_sp_vdf.output.get_hash()
else:
# Edge case of first sp (start of slot), where sp_iters == 0
assert overflow is not None
if header_block.reward_chain_block.reward_chain_sp_vdf is not None:
return None, ValidationError(Err.INVALID_RC_SP_VDF)
if new_sub_slot:
rc_sp_hash = header_block.finished_sub_slots[-1].reward_chain.get_hash()
else:
if genesis_block:
rc_sp_hash = constants.GENESIS_CHALLENGE
else:
assert prev_b is not None
curr = prev_b
while not curr.first_in_sub_slot:
curr = blocks.block_record(curr.prev_hash)
assert curr.finished_reward_slot_hashes is not None
rc_sp_hash = curr.finished_reward_slot_hashes[-1]
# 12. Check reward chain sp signature
if not AugSchemeMPL.verify(
header_block.reward_chain_block.proof_of_space.plot_public_key,
rc_sp_hash,
header_block.reward_chain_block.reward_chain_sp_signature,
):
return None, ValidationError(Err.INVALID_RC_SIGNATURE)
# 13. Check cc sp vdf
if sp_iters != 0:
assert header_block.reward_chain_block.challenge_chain_sp_vdf is not None
assert header_block.challenge_chain_sp_proof is not None
target_vdf_info = VDFInfo(
cc_vdf_challenge,
cc_vdf_iters,
header_block.reward_chain_block.challenge_chain_sp_vdf.output,
)
if header_block.reward_chain_block.challenge_chain_sp_vdf != dataclasses.replace(
target_vdf_info,
number_of_iterations=sp_iters,
):
return None, ValidationError(Err.INVALID_CC_SP_VDF)
if not skip_vdf_is_valid:
if (
not header_block.challenge_chain_sp_proof.normalized_to_identity
and not header_block.challenge_chain_sp_proof.is_valid(constants, cc_vdf_input, target_vdf_info, None)
):
return None, ValidationError(Err.INVALID_CC_SP_VDF)
if (
header_block.challenge_chain_sp_proof.normalized_to_identity
and not header_block.challenge_chain_sp_proof.is_valid(
constants,
ClassgroupElement.get_default_element(),
header_block.reward_chain_block.challenge_chain_sp_vdf,
)
):
return None, ValidationError(Err.INVALID_CC_SP_VDF)
else:
assert overflow is not None
if header_block.reward_chain_block.challenge_chain_sp_vdf is not None:
return None, ValidationError(Err.INVALID_CC_SP_VDF)
# 14. Check cc sp sig
if not AugSchemeMPL.verify(
header_block.reward_chain_block.proof_of_space.plot_public_key,
cc_sp_hash,
header_block.reward_chain_block.challenge_chain_sp_signature,
):
return None, ValidationError(Err.INVALID_CC_SIGNATURE, "invalid cc sp sig")
# 15. Check is_transaction_block
if genesis_block:
if header_block.foliage.foliage_transaction_block_hash is None:
return None, ValidationError(Err.INVALID_IS_TRANSACTION_BLOCK, "invalid genesis")
else:
assert prev_b is not None
# Finds the previous block
curr = prev_b
while not curr.is_transaction_block:
curr = blocks.block_record(curr.prev_hash)
# The first block to have an sp > the last tx block's infusion iters, is a tx block
if overflow:
our_sp_total_iters: uint128 = uint128(total_iters - ip_iters + sp_iters - expected_sub_slot_iters)
else:
our_sp_total_iters = uint128(total_iters - ip_iters + sp_iters)
if (our_sp_total_iters > curr.total_iters) != (header_block.foliage.foliage_transaction_block_hash is not None):
return None, ValidationError(Err.INVALID_IS_TRANSACTION_BLOCK)
if (our_sp_total_iters > curr.total_iters) != (
header_block.foliage.foliage_transaction_block_signature is not None
):
return None, ValidationError(Err.INVALID_IS_TRANSACTION_BLOCK)
# 16. Check foliage block signature by plot key
if not AugSchemeMPL.verify(
header_block.reward_chain_block.proof_of_space.plot_public_key,
header_block.foliage.foliage_block_data.get_hash(),
header_block.foliage.foliage_block_data_signature,
):
return None, ValidationError(Err.INVALID_PLOT_SIGNATURE)
# 17. Check foliage block signature by plot key
if header_block.foliage.foliage_transaction_block_hash is not None:
if not AugSchemeMPL.verify(
header_block.reward_chain_block.proof_of_space.plot_public_key,
header_block.foliage.foliage_transaction_block_hash,
header_block.foliage.foliage_transaction_block_signature,
):
return None, ValidationError(Err.INVALID_PLOT_SIGNATURE)
# 18. Check unfinished reward chain block hash
if (
header_block.reward_chain_block.get_hash()
!= header_block.foliage.foliage_block_data.unfinished_reward_block_hash
):
return None, ValidationError(Err.INVALID_URSB_HASH)
# 19. Check pool target max height
if (
header_block.foliage.foliage_block_data.pool_target.max_height != 0
and header_block.foliage.foliage_block_data.pool_target.max_height < height
):
return None, ValidationError(Err.OLD_POOL_TARGET)
# 20a. Check pre-farm puzzle hashes for genesis block.
if genesis_block:
if (
header_block.foliage.foliage_block_data.pool_target.puzzle_hash
!= constants.GENESIS_PRE_FARM_POOL_PUZZLE_HASH
):
log.error(f"Pool target {header_block.foliage.foliage_block_data.pool_target} hb {header_block}")
return None, ValidationError(Err.INVALID_PREFARM)
if (
header_block.foliage.foliage_block_data.farmer_reward_puzzle_hash
!= constants.GENESIS_PRE_FARM_FARMER_PUZZLE_HASH
):
return None, ValidationError(Err.INVALID_PREFARM)
else:
# 20b. If pospace has a pool pk, heck pool target signature. Should not check this for genesis block.
if header_block.reward_chain_block.proof_of_space.pool_public_key is not None:
assert header_block.reward_chain_block.proof_of_space.pool_contract_puzzle_hash is None
if not AugSchemeMPL.verify(
header_block.reward_chain_block.proof_of_space.pool_public_key,
bytes(header_block.foliage.foliage_block_data.pool_target),
header_block.foliage.foliage_block_data.pool_signature,
):
return None, ValidationError(Err.INVALID_POOL_SIGNATURE)
else:
# 20c. Otherwise, the plot is associated with a contract puzzle hash, not a public key
assert header_block.reward_chain_block.proof_of_space.pool_contract_puzzle_hash is not None
if (
header_block.foliage.foliage_block_data.pool_target.puzzle_hash
!= header_block.reward_chain_block.proof_of_space.pool_contract_puzzle_hash
):
return None, ValidationError(Err.INVALID_POOL_TARGET)
# 21. Check extension data if applicable. None for mainnet.
# 22. Check if foliage block is present
if (header_block.foliage.foliage_transaction_block_hash is not None) != (
header_block.foliage_transaction_block is not None
):
return None, ValidationError(Err.INVALID_FOLIAGE_BLOCK_PRESENCE)
if (header_block.foliage.foliage_transaction_block_signature is not None) != (
header_block.foliage_transaction_block is not None
):
return None, ValidationError(Err.INVALID_FOLIAGE_BLOCK_PRESENCE)
if header_block.foliage_transaction_block is not None:
# 23. Check foliage block hash
if header_block.foliage_transaction_block.get_hash() != header_block.foliage.foliage_transaction_block_hash:
return None, ValidationError(Err.INVALID_FOLIAGE_BLOCK_HASH)
if genesis_block:
# 24a. Check prev block hash for genesis
if header_block.foliage_transaction_block.prev_transaction_block_hash != constants.GENESIS_CHALLENGE:
return None, ValidationError(Err.INVALID_PREV_BLOCK_HASH)
else:
assert prev_b is not None
# 24b. Check prev block hash for non-genesis
curr_b: BlockRecord = prev_b
while not curr_b.is_transaction_block:
curr_b = blocks.block_record(curr_b.prev_hash)
if not header_block.foliage_transaction_block.prev_transaction_block_hash == curr_b.header_hash:
log.error(
f"Prev BH: {header_block.foliage_transaction_block.prev_transaction_block_hash} "
f"{curr_b.header_hash} curr sb: {curr_b}"
)
return None, ValidationError(Err.INVALID_PREV_BLOCK_HASH)
# 25. The filter hash in the Foliage Block must be the hash of the filter
if check_filter:
if header_block.foliage_transaction_block.filter_hash != std_hash(header_block.transactions_filter):
return None, ValidationError(Err.INVALID_TRANSACTIONS_FILTER_HASH)
# 26a. The timestamp in Foliage Block must not be over 5 minutes in the future
if header_block.foliage_transaction_block.timestamp > int(time.time() + constants.MAX_FUTURE_TIME):
return None, ValidationError(Err.TIMESTAMP_TOO_FAR_IN_FUTURE)
if prev_b is not None:
# 26b. The timestamp must be greater than the previous transaction block timestamp
prev_transaction_b = blocks.block_record(header_block.foliage_transaction_block.prev_transaction_block_hash)
assert prev_transaction_b.timestamp is not None
if header_block.foliage_transaction_block.timestamp <= prev_transaction_b.timestamp:
return None, ValidationError(Err.TIMESTAMP_TOO_FAR_IN_PAST)
return required_iters, None # Valid unfinished header block
def validate_finished_header_block(
constants: ConsensusConstants,
blocks: BlockchainInterface,
header_block: HeaderBlock,
check_filter: bool,
expected_difficulty: uint64,
expected_sub_slot_iters: uint64,
check_sub_epoch_summary=True,
) -> Tuple[Optional[uint64], Optional[ValidationError]]:
"""
Fully validates the header of a block. A header block is the same as a full block, but
without transactions and transaction info. Returns (required_iters, error).
"""
unfinished_header_block = UnfinishedHeaderBlock(
header_block.finished_sub_slots,
header_block.reward_chain_block.get_unfinished(),
header_block.challenge_chain_sp_proof,
header_block.reward_chain_sp_proof,
header_block.foliage,
header_block.foliage_transaction_block,
header_block.transactions_filter,
)
required_iters, validate_unfinished_err = validate_unfinished_header_block(
constants,
blocks,
unfinished_header_block,
check_filter,
expected_difficulty,
expected_sub_slot_iters,
False,
check_sub_epoch_summary=check_sub_epoch_summary,
)
genesis_block = False
if validate_unfinished_err is not None:
return None, validate_unfinished_err
assert required_iters is not None
if header_block.height == 0:
prev_b: Optional[BlockRecord] = None
genesis_block = True
else:
prev_b = blocks.block_record(header_block.prev_header_hash)
new_sub_slot: bool = len(header_block.finished_sub_slots) > 0
ip_iters: uint64 = calculate_ip_iters(
constants,
expected_sub_slot_iters,
header_block.reward_chain_block.signage_point_index,
required_iters,
)
if not genesis_block:
assert prev_b is not None
# 27. Check block height
if header_block.height != prev_b.height + 1:
return None, ValidationError(Err.INVALID_HEIGHT)
# 28. Check weight
if header_block.weight != prev_b.weight + expected_difficulty:
log.error(f"INVALID WEIGHT: {header_block} {prev_b} {expected_difficulty}")
return None, ValidationError(Err.INVALID_WEIGHT)
else:
# 27b. Check genesis block height, weight, and prev block hash
if header_block.height != uint32(0):
return None, ValidationError(Err.INVALID_HEIGHT)
if header_block.weight != constants.DIFFICULTY_STARTING:
return None, ValidationError(Err.INVALID_WEIGHT)
if header_block.prev_header_hash != constants.GENESIS_CHALLENGE:
return None, ValidationError(Err.INVALID_PREV_BLOCK_HASH)
# RC vdf challenge is taken from more recent of (slot start, prev_block)
if genesis_block:
cc_vdf_output = ClassgroupElement.get_default_element()
ip_vdf_iters = ip_iters
if new_sub_slot:
rc_vdf_challenge = header_block.finished_sub_slots[-1].reward_chain.get_hash()
else:
rc_vdf_challenge = constants.GENESIS_CHALLENGE
else:
assert prev_b is not None
if new_sub_slot:
# slot start is more recent
rc_vdf_challenge = header_block.finished_sub_slots[-1].reward_chain.get_hash()
ip_vdf_iters = ip_iters
cc_vdf_output = ClassgroupElement.get_default_element()
else:
# Prev sb is more recent
rc_vdf_challenge = prev_b.reward_infusion_new_challenge
ip_vdf_iters = uint64(header_block.reward_chain_block.total_iters - prev_b.total_iters)
cc_vdf_output = prev_b.challenge_vdf_output
# 29. Check challenge chain infusion point VDF
if new_sub_slot:
cc_vdf_challenge = header_block.finished_sub_slots[-1].challenge_chain.get_hash()
else:
# Not first block in slot
if genesis_block:
# genesis block
cc_vdf_challenge = constants.GENESIS_CHALLENGE
else:
assert prev_b is not None
# Not genesis block, go back to first block in slot
curr = prev_b
while curr.finished_challenge_slot_hashes is None:
curr = blocks.block_record(curr.prev_hash)
cc_vdf_challenge = curr.finished_challenge_slot_hashes[-1]
cc_target_vdf_info = VDFInfo(
cc_vdf_challenge,
ip_vdf_iters,
header_block.reward_chain_block.challenge_chain_ip_vdf.output,
)
if header_block.reward_chain_block.challenge_chain_ip_vdf != dataclasses.replace(
cc_target_vdf_info,
number_of_iterations=ip_iters,
):
expected = dataclasses.replace(
cc_target_vdf_info,
number_of_iterations=ip_iters,
)
log.error(f"{header_block.reward_chain_block.challenge_chain_ip_vdf }. expected {expected}")
log.error(f"Block: {header_block}")
return None, ValidationError(Err.INVALID_CC_IP_VDF)
if (
not header_block.challenge_chain_ip_proof.normalized_to_identity
and not header_block.challenge_chain_ip_proof.is_valid(
constants,
cc_vdf_output,
cc_target_vdf_info,
None,
)
):
log.error(f"Did not validate, output {cc_vdf_output}")
log.error(f"Block: {header_block}")
return None, ValidationError(Err.INVALID_CC_IP_VDF)
if (
header_block.challenge_chain_ip_proof.normalized_to_identity
and not header_block.challenge_chain_ip_proof.is_valid(
constants,
ClassgroupElement.get_default_element(),
header_block.reward_chain_block.challenge_chain_ip_vdf,
)
):
return None, ValidationError(Err.INVALID_CC_IP_VDF)
# 30. Check reward chain infusion point VDF
rc_target_vdf_info = VDFInfo(
rc_vdf_challenge,
ip_vdf_iters,
header_block.reward_chain_block.reward_chain_ip_vdf.output,
)
if not header_block.reward_chain_ip_proof.is_valid(
constants,
ClassgroupElement.get_default_element(),
header_block.reward_chain_block.reward_chain_ip_vdf,
rc_target_vdf_info,
):
return None, ValidationError(Err.INVALID_RC_IP_VDF)
# 31. Check infused challenge chain infusion point VDF
if not genesis_block:
overflow = is_overflow_block(constants, header_block.reward_chain_block.signage_point_index)
deficit = calculate_deficit(
constants,
header_block.height,
prev_b,
overflow,
len(header_block.finished_sub_slots),
)
if header_block.reward_chain_block.infused_challenge_chain_ip_vdf is None:
# If we don't have an ICC chain, deficit must be 4 or 5
if deficit < constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1:
return None, ValidationError(Err.INVALID_ICC_VDF)
else:
assert header_block.infused_challenge_chain_ip_proof is not None
# If we have an ICC chain, deficit must be 0, 1, 2 or 3
if deficit >= constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1:
return (
None,
ValidationError(
Err.INVALID_ICC_VDF,
f"icc vdf and deficit is bigger or equal to {constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1}",
),
)
if new_sub_slot:
last_ss = header_block.finished_sub_slots[-1]
assert last_ss.infused_challenge_chain is not None
icc_vdf_challenge: bytes32 = last_ss.infused_challenge_chain.get_hash()
icc_vdf_input: Optional[ClassgroupElement] = ClassgroupElement.get_default_element()
else:
assert prev_b is not None
if prev_b.is_challenge_block(constants):
icc_vdf_input = ClassgroupElement.get_default_element()
else:
icc_vdf_input = prev_b.infused_challenge_vdf_output
curr = prev_b
while curr.finished_infused_challenge_slot_hashes is None and not curr.is_challenge_block(constants):
curr = blocks.block_record(curr.prev_hash)
if curr.is_challenge_block(constants):
icc_vdf_challenge = curr.challenge_block_info_hash
else:
assert curr.finished_infused_challenge_slot_hashes is not None
icc_vdf_challenge = curr.finished_infused_challenge_slot_hashes[-1]
icc_target_vdf_info = VDFInfo(
icc_vdf_challenge,
ip_vdf_iters,
header_block.reward_chain_block.infused_challenge_chain_ip_vdf.output,
)
if icc_vdf_input is None or not header_block.infused_challenge_chain_ip_proof.is_valid(
constants,
icc_vdf_input,
header_block.reward_chain_block.infused_challenge_chain_ip_vdf,
icc_target_vdf_info,
):
return None, ValidationError(Err.INVALID_ICC_VDF, "invalid icc proof")
else:
if header_block.infused_challenge_chain_ip_proof is not None:
return None, ValidationError(Err.INVALID_ICC_VDF)
# 32. Check reward block hash
if header_block.foliage.reward_block_hash != header_block.reward_chain_block.get_hash():
return None, ValidationError(Err.INVALID_REWARD_BLOCK_HASH)
# 33. Check reward block is_transaction_block
if (
header_block.foliage.foliage_transaction_block_hash is not None
) != header_block.reward_chain_block.is_transaction_block:
return None, ValidationError(Err.INVALID_FOLIAGE_BLOCK_PRESENCE)
return required_iters, None
| 47.903195 | 120 | 0.640193 |
4299983e535919c1e7cd881d874a61d2d3ee1550 | 15,172 | py | Python | tests/features/steps/intersections.py | darsovit/pyRayTracerChallenge | 5ca833c3606c67c3113d91e5de0750cbfd73660d | [
"MIT"
] | 2 | 2020-05-13T20:55:23.000Z | 2020-10-26T02:54:40.000Z | tests/features/steps/intersections.py | darsovit/pyRayTracerChallenge | 5ca833c3606c67c3113d91e5de0750cbfd73660d | [
"MIT"
] | null | null | null | tests/features/steps/intersections.py | darsovit/pyRayTracerChallenge | 5ca833c3606c67c3113d91e5de0750cbfd73660d | [
"MIT"
] | 1 | 2020-05-15T02:18:15.000Z | 2020-05-15T02:18:15.000Z | #! python
#
#
from behave import given,then,when
from renderer.bolts import IdentifyHit, EPSILON, Vector
from math import isclose
@given(u'{intersectionvar:w} ← intersection({time:S}, {objectvar:w})')
@when(u'{intersectionvar:w} ← intersection({time:S}, {objectvar:w})')
def step_impl(context, intersectionvar, time, objectvar):
print(u'STEP: When {} ← intersection({}, {})'.format(intersectionvar, time, objectvar))
assert objectvar in context.result
determineNumeric = context.helpers['determineNumeric']
context.result[intersectionvar] = {'time': determineNumeric(time), 'object': context.result[objectvar]}
@then(u'{intersectionvar:w}.t = {expectedtime:g}')
def step_impl(context, intersectionvar, expectedtime):
print(u'STEP: Then {}.t = {}'.format(intersectionvar, expectedtime))
assert intersectionvar in context.result
result = context.result[intersectionvar]['time']
assert expectedtime == result, 'Expected intersection {} time to be {}, found it was {}'.format(intersectionvar, expectedtime, result)
@then(u'{intersectionvar:w}.object = {objectvar:w}')
def step_impl(context, intersectionvar, objectvar):
print(u'STEP: Then {}.object = {}'.format(intersectionvar, objectvar))
assert intersectionvar in context.result
assert context.result[intersectionvar]['object'] == context.result[objectvar], 'Expected intersection {} object to be {}, found it was {}'.format( intersectionvar, context.result[objectvar], context.result[intersectionvar]['object'])
@given(u'{intersectionvar:w} ← intersection({time:g}, {objectvar:w})')
def step_impl(context, intersectionvar, time, objectvar):
print(u'STEP: Given {} ← intersection({}, {})'.format(intersectionvar, time, objectvar))
assert objectvar in context.result
context.result[intersectionvar] = {'time': time, 'object': context.result[objectvar]}
@given(u'{var:w} ← intersections({intersection1var:w})')
def step_impl(context, var, intersection1var):
print(u'STEP: When {} ← intersections({})'.format(var, intersection1var))
assert intersection1var in context.result
context.result[var] = [ context.result[intersection1var] ]
@then(u'{var:w}[{instance:d}].t = {timeval:g}')
def step_impl(context, var, instance, timeval):
print(u'STEP: Then {}[{}].t = {}'.format(var, instance, timeval))
assert var in context.result
result = context.result[var][instance]['time']
assert timeval == result, 'Expected time of {}[{}] to be {}, found {} instead'.format(var, instance, timeval, result)
@when(u'{var:w} ← intersections({intersection1var:w}, {intersection2var})')
@given(u'{var:w} ← intersections({intersection1var:w}, {intersection2var:w})')
def step_impl(context, var, intersection1var, intersection2var):
print(u'STEP: Given {} ← intersections({}, {})'.format(var, intersection1var, intersection2var))
assert intersection1var in context.result
assert intersection2var in context.result
context.result[var] = [ context.result[intersection1var], context.result[intersection2var] ]
@when(u'{resultvar:w} ← hit({intersectionsvar:w})')
def step_impl(context, resultvar, intersectionsvar):
print(u'STEP: When {} ← hit({})'.format(resultvar, intersectionsvar))
assert intersectionsvar in context.result
context.result[resultvar] = IdentifyHit(context.result[intersectionsvar])
@then(u'{var1:w} = {var2:w}')
def step_impl(context, var1, var2):
print(u'STEP: Then {} = {}'.format(var1, var2))
assert context.result[var1] == context.result[var2], 'Expected {} to be equal to {}'.format(var1, var2)
@then(u'{var:w} is nothing')
def step_impl(context, var):
print(u'STEP: Then {} is nothing'.format(var))
assert var in context.result
assert context.result[var] == None, 'Expected {} in context to be nothing'.format(var)
@given(u'{resultvar:w} ← intersections({var1:w}, {var2:w}, {var3:w}, {var4:w})')
def step_impl(context, resultvar, var1, var2, var3, var4):
print(u'STEP: Given {} ← intersections({}, {}, {}, {})'.format(resultvar, var1, var2, var3, var4))
assert var1 in context.result
assert var2 in context.result
assert var3 in context.result
assert var4 in context.result
context.result[resultvar] = [ context.result[var1]
, context.result[var2]
, context.result[var3]
, context.result[var4] ]
@then(u'{intersectsvar:w}[{instance:d}].object = {objectvar:w}')
def step_impl(context, intersectsvar, instance, objectvar):
print(u'STEP: Then {}[{}].object = {}'.format(intersectsvar, instance, objectvar))
assert intersectsvar in context.result
assert objectvar in context.result
result = context.result[intersectsvar][instance]['object']
expected = context.result[objectvar]
assert expected == result, 'Expected {}[{}].object to be {}, but found {} instead'.format(intersectsvar, instance, expected, result)
@when(u'{compsvar:w} ← prepare_computations({intersectionvar:w}, {rayvar:w})')
def step_impl(context, compsvar, intersectionvar, rayvar):
print(u'STEP: When {} ← prepare_computations({}, {})'.format(compsvar, intersectionvar, rayvar))
assert intersectionvar in context.result
assert rayvar in context.result
context.result[compsvar] = context.result[intersectionvar]['object'].PrepareComputations(context.result[rayvar], context.result[intersectionvar]['time'])
@when(u'{compsvar:w} ← prepare_computations({intersectionvar:w}, {rayvar:w}, {intersectionsvar:w})')
def step_impl(context, compsvar, intersectionvar, rayvar, intersectionsvar):
print(u'STEP: When {} ← prepare_computations({}, {}, {})'.format(compsvar, intersectionvar, rayvar, intersectionsvar))
assert intersectionvar in context.result
assert intersectionsvar in context.result
assert rayvar in context.result
context.result[compsvar] = context.result[intersectionvar]['object'].PrepareComputations(context.result[rayvar], context.result[intersectionvar]['time'], context.result[intersectionsvar])
@when(u'{compsvar:w} ← prepare_computations({intersectionsvar:w}[{instance:d}], {rayvar:w}, {intersections2var:w})')
def step_impl(context, compsvar, intersectionsvar, instance, rayvar, intersections2var):
print(u'STEP: When {} ← prepare_computations({}[{}], {}, {})'.format(compsvar, intersectionsvar, instance, rayvar, intersections2var))
assert intersectionsvar in context.result
assert intersectionsvar == intersections2var
assert len(context.result[intersectionsvar]) > instance
assert rayvar in context.result
context.result[compsvar] = context.result[intersectionsvar][instance]['object'].PrepareComputations(context.result[rayvar], context.result[intersectionsvar][instance]['time'], context.result[intersectionsvar])
@then(u'{compsvar:w}.t = {intersectionvar:w}.t')
def step_impl(context, compsvar, intersectionvar):
print(u'STEP: Then {}.t = {}.t'.format(compsvar, intersectionvar))
assert compsvar in context.result
assert intersectionvar in context.result
expected = context.result[intersectionvar]['time']
result = context.result[compsvar]['time']
assert expected == result, 'Expected time in intersection {} to match time in computations {}, but {} != {}'.format(intersectionvar, compsvar, expected, result)
@then(u'{compsvar:w}.object = {intersectionvar:w}.object')
def step_impl(context, compsvar, intersectionvar):
print(u'STEP: Then {}.object = {}.object'.format(compsvar, intersectionvar))
assert compsvar in context.result
assert intersectionvar in context.result
expected = context.result[intersectionvar]['object']
result = context.result[compsvar]['object']
assert expected == result, 'Expected object in intersection {} to match object in computations {}, but they are not the same'.format(intersectionvar, compsvar)
@then(u'{compsvar:w}.inside = false')
def step_impl(context, compsvar):
expected = False
print(u'STEP: Then {}.inside = {}'.format(compsvar, expected))
assert compsvar in context.result
result = context.result[compsvar]['inside']
assert expected == result, 'Expected computations {} to indicate inside was {}, but found it as {}'.format(compsvar, expected, result)
@then(u'{compsvar:w}.inside = true')
def step_impl(context, compsvar):
expected = True
print(u'STEP: Then {}.inside = {}'.format(compsvar, expected))
assert compsvar in context.result
result = context.result[compsvar]['inside']
assert expected == result, 'Expected computations {} to indicate inside was {}, but found it as {}'.format(compsvar, expected, result)
@then(u'{compsvar:w}.over_point.z < -EPSILON/2')
def step_impl(context, compsvar):
print(u'STEP: Then {}.over_point.z < -EPSILON/2'.format(compsvar))
assert compsvar in context.result
expected = -EPSILON / 2
result = context.result[compsvar]['over_point'][2]
assert result < expected, 'Expected {}.over_point.z ({}) < -EPSILON / 2 ({})'.format(compsvar, result, expected)
@then(u'{compsvar:w}.under_point.z > EPSILON/2')
def step_impl(context, compsvar):
print(u'STEP: Then {}.under_point.z > EPSILON/2'.format(compsvar))
assert compsvar in context.result
expected = EPSILON / 2
result = context.result[compsvar]['under_point'][2]
assert result > expected, 'Expected {}.under_point.z ({}) < EPSILON / 2 ({})'.format(compsvar, result, expected)
@then(u'{compsvar:w}.point.z < {comps2var}.under_point.z')
def step_impl(context, compsvar, comps2var):
print(u'STEP: Then {}.point.z < {}.under_point.z'.format(compsvar, comps2var))
assert compsvar in context.result
assert comps2var == compsvar
lhs = context.result[compsvar]['point'][2]
rhs = context.result[compsvar]['under_point'][2]
assert lhs < rhs, 'Expected {}.point.z ({}) < {}.under_point.z ({})'.format(compsvar, lhs, compsvar, rhs)
@then(u'{compsvar:w}.point.z > {comps2var:w}.over_point.z')
def step_impl(context, compsvar, comps2var):
print(u'STEP: Then {}.point.z > {}.over_point.z'.format(compsvar, comps2var))
assert compsvar in context.result
assert comps2var in context.result
lhs = context.result[compsvar]['point'][2]
rhs = context.result[comps2var]['over_point'][2]
assert lhs > rhs, 'Expected {}.point.z ({}) > {}.over_point.z ({})'.format(compsvar, lhs, comps2var, rhs)
@then(u'{compsvar:w}.reflectv = vector({x:S}, {y:S}, {z:S})')
def step_impl(context, compsvar, x, y, z):
print(u'STEP: Then {}.reflectv = vector({}, {}, {})'.format(compsvar, x, y, z))
assert compsvar in context.result
determineNumeric = context.helpers['determineNumeric']
expected = Vector( determineNumeric(x), determineNumeric(y), determineNumeric(z) )
result = context.result[compsvar]['reflectv']
assert expected.compare(result), 'Expected {}.reflectv to equal Vector({}, {}, {}) = {}, but it is {}'.format(compsvar, x, y, z, expected, result)
@then(u'{compsvar:w}.n1 = {expected:g}')
def step_impl(context, compsvar, expected):
print(u'STEP: Then {}.n1 = {}'.format(compsvar, expected))
assert compsvar in context.result
result = context.result[compsvar]['n1']
assert isclose(expected, result), 'Expected Computations {} n1 value to be {}, found it to be {}'.format(compsvar, expected, result)
@then(u'{compsvar:w}.n2 = {expected:g}')
def step_impl(context, compsvar, expected):
print(u'STEP: Then {}.n2 = {}'.format(compsvar, expected))
assert compsvar in context.result
result = context.result[compsvar]['n2']
assert isclose(expected, result), 'Expected Computations {} n2 value to be {}, found it to be {}'.format(compsvar, expected, result)
@then(u'{intersectionsvar:w} is empty')
def step_impl(context, intersectionsvar):
print(u'STEP: Then {} is empty'.format(intersectionsvar))
assert intersectionsvar in context.result
result = len( context.result[intersectionsvar] )
assert result == 0, 'Expected {} is empty, but it has {} elements'.format(intersectionsvar, result)
@given(u'{intersectionsvar:w} ← intersections({time1:g}:{obj1:w}, {time2:g}:{obj2:w}, {time3:g}:{obj3:w}, {time4:g}:{obj4:w}, {time5:g}:{obj5:w}, {time6:g}:{obj6:w})')
def step_impl(context, intersectionsvar, time1, obj1, time2, obj2, time3, obj3, time4, obj4, time5, obj5, time6, obj6):
print(u'STEP: Given {} ← intersections({}:{}, {}:{}, {}:{}, {}:{}, {}:{}, {}:{})'.format(intersectionsvar,
time1, obj1, time2, obj2, time3, obj3, time4, obj4, time5, obj5, time6, obj6))
intersections = []
intersections += [ {'object': context.result[obj1], 'time': time1 } ]
intersections += [ {'object': context.result[obj2], 'time': time2 } ]
intersections += [ {'object': context.result[obj3], 'time': time3 } ]
intersections += [ {'object': context.result[obj4], 'time': time4 } ]
intersections += [ {'object': context.result[obj5], 'time': time5 } ]
intersections += [ {'object': context.result[obj6], 'time': time6 } ]
context.result[intersectionsvar] = intersections
@given(u'{xsvar:w} ← intersections({time1:S}:{obj1:w}, {time2:S}:{obj2:w})')
def step_impl(context, xsvar, time1, obj1, time2, obj2 ):
print(u'STEP: Given {} ← intersections({}:{}, {}:{})'.format(xsvar, time1, obj1, time2, obj2))
assert obj1 in context.result
assert obj2 in context.result
determineNumeric = context.helpers['determineNumeric']
intersections = []
intersections += [ {'object': context.result[obj1], 'time': determineNumeric(time1)} ]
intersections += [ {'object': context.result[obj2], 'time': determineNumeric(time2)} ]
context.result[xsvar] = intersections
@given(u'{xsvar:w} ← intersections({time1:S}:{obj1:w})')
def step_impl(context, xsvar, time1, obj1):
print(u'STEP: Given {} ← intersections({}:{})'.format(xsvar, time1, obj1))
assert obj1 in context.result
determineNumeric = context.helpers['determineNumeric']
context.result[xsvar] = [ {'object': context.result[obj1], 'time': determineNumeric(time1)} ]
@given(u'{xsvar:w} ← intersections({time1:g}:{obj1:w}, {time2:g}:{obj2:w}, {time3:g}:{obj3:w}, {time4:g}:{obj4:w})')
def step_impl(context, xsvar, time1, obj1, time2, obj2, time3, obj3, time4, obj4):
print(u'STEP: Given {} ← intersections({}:{}, {}:{}, {}:{}, {}:{})'.format(xsvar, time1, obj1, time2, obj2, time3, obj3, time4, obj4))
assert obj1 in context.result
assert obj2 in context.result
assert obj3 in context.result
assert obj4 in context.result
intersections = []
intersections += [ {'object': context.result[obj1], 'time': time1} ]
intersections += [ {'object': context.result[obj2], 'time': time2} ]
intersections += [ {'object': context.result[obj3], 'time': time3} ]
intersections += [ {'object': context.result[obj4], 'time': time4} ]
context.result[xsvar] = intersections
@when(u'{varname:w} ← schlick({compsvar:w})')
def step_impl(context, varname, compsvar):
print(u'STEP: When {} ← schlick({})'.format(varname, compsvar))
assert compsvar in context.result
assert 'reflectance' in context.result[compsvar]
context.result[varname] = context.result[compsvar]['reflectance']
| 54.971014 | 237 | 0.693383 |
f8afa468b5180fb0ff8f959889875f56e695185b | 3,866 | py | Python | src/evaluation/utils.py | lyonva/Nue | 90680de00b0c76f6bfdbed71b785671e7c3a3f54 | [
"Apache-2.0"
] | null | null | null | src/evaluation/utils.py | lyonva/Nue | 90680de00b0c76f6bfdbed71b785671e7c3a3f54 | [
"Apache-2.0"
] | null | null | null | src/evaluation/utils.py | lyonva/Nue | 90680de00b0c76f6bfdbed71b785671e7c3a3f54 | [
"Apache-2.0"
] | null | null | null | from copy import copy
def get_metrics_dataset(df, metrics, problem, names = None):
"""
Function:
get_metrics_dataset
Description:
From a list of Metrics, returns only those that are applicable to the dataset.
That means:
1) match to the type of problem, and
2) MetricX objects use the secondary objectives from dataset
If so, also include 1 MetricX per seconary objective
Input:
- df,Dataset: Dataset object
- metrics,list: List of Metric objects
- problem,str: Type of problem
- names,list: If not none, only picks the names selected
Output:
List of metric objects that match problem
"""
metrics = get_metrics_problem(metrics, problem)
all_metrics = metrics
if names is not None:
metrics = get_metrics_by_name( metrics, names )
new_metrics = []
for m in metrics:
if m.unifeature:
new_metrics += get_metricx_list( m.__class__, df.secondary )
else:
new_metrics += [copy(m)]
# Now, if metrics are composite, add depedant sub-metrics
for m in new_metrics:
if hasattr(m, 'composite') and type(m.composite) is list:
m.name += " (" + "+".join( m.composite ) + ")"
m.composite = [ mi for mi in all_metrics if mi.name in m.composite ] # Get metric object
m.composite = get_metrics_dataset(df, m.composite, problem) # Get correct settings
return new_metrics
def get_metrics_problem(metrics, problem):
"""
Function:
get_metrics_problem
Description:
From a list of Metrics, returns only those that match with problem.
Input:
- metrics,list: List of Metric objects
- problem,str: Type of problem
Output:
List of metric objects that match problem
"""
return [ e for e in metrics if (e.problem == problem or e.problem == "both") ]
def evaluate(y, y_pred, X, estimator, metrics):
"""
Function:
evaluate
Description:
Evaluate all the metrics for a list of predicted values.
For a given problem type.
Input:
- y,list: List of the true y values.
- y_pred,list: List of predicted y values.
- X,dataframe: The data.
- metrics,list: List of Metric objects.
Output:
List of metric objects that match problem
"""
return dict( [(m.name, m.evaluate(y, y_pred, X, estimator)) for m in metrics ] )
def get_all_scorers( metrics ):
"""
Function:
get_all_scorers
Description:
Return the scorer of all metrics.
Input:
- metrics,list: List of Metric objects
Output:
Dictonary of format { name : scorer }
"""
return dict( [(m.name, m.make_scorer()) for m in metrics] )
def get_metrics_by_name(metrics, names):
"""
Function:
get_metrics_by_name
Description:
Return metrics that match the selected names.
Input:
- metrics,list: List of Metric objects
- names,list: List of metric names
Output:
List of metric objects that match names
"""
return [ m for m in metrics if m.name in names ]
def get_metricx_list(type, features):
"""
Function:
get_metricx_list
Description:
Return an instance of a MetricX for each feature name on a list.
Input:
- type,class: The MetricX object to instance.
- features,list: List of feature names.
Output:
List of MetricX objects, one per feature.
"""
return [ type(None, feature = f) for f in features ]
| 34.828829 | 100 | 0.584066 |
565c471061e790c5e812561e8dcf9fd545d93316 | 1,691 | py | Python | spiders/mps/all/guangxi.py | JJYYYY/policy_crawl | e5f7612163c00049f2e6859e81babb3e0f30aca4 | [
"Apache-2.0"
] | 3 | 2020-04-15T07:17:04.000Z | 2020-09-21T13:06:57.000Z | spiders/mps/all/guangxi.py | JJYYYY/policy_crawl | e5f7612163c00049f2e6859e81babb3e0f30aca4 | [
"Apache-2.0"
] | null | null | null | spiders/mps/all/guangxi.py | JJYYYY/policy_crawl | e5f7612163c00049f2e6859e81babb3e0f30aca4 | [
"Apache-2.0"
] | 4 | 2020-03-23T02:09:18.000Z | 2021-04-18T08:30:08.000Z | import re
import time
from pyquery import PyQuery as pq
from policy_crawl.common.fetch import get,post
from policy_crawl.common.save import save
from policy_crawl.common.logger import alllog,errorlog
def parse_detail(html,url):
alllog.logger.info("广西省公安厅: %s"%url)
doc=pq(html)
data={}
data["title"]=doc("title").text()
data["content"]=doc(".major").text().replace("\n","")
data["content_url"]=[item.attr("href") for item in doc(".major a").items()]
try:
# data["publish_time"]=re.findall("(\d{4}年\d{1,2}月\d{1,2}日)",html)[0]
# data["publish_time"]=re.findall("(\d{4}/\d{1,2}/\d{1,2})",html)[0]
data["publish_time"]=re.findall("(\d{4}-\d{1,2}-\d{1,2})",html)[0]
except:
data["publish_time"]=""
errorlog.logger.error("url:%s 未找到publish_time"%url)
# if not data["content"]:
# data["content"] = doc(".TRS_Editor").text()
# data["content_url"] = [item.attr("href") for item in doc(".TRS_Editor a").items()]
data["classification"]="广西省公安厅"
data["url"]=url
print(data)
save(data)
def parse_index(html):
ids=re.findall('<a href="../../../n895440/n895445/n895495/c(.+?)/content.html"',html)
for id in ids:
url="http://gat.gxzf.gov.cn/zwgk/jcxxgk/bbmwj/c"+str(id)+"/content.html"
try:
html=get(url)
except:
errorlog.logger.error("url错误:%s"%url)
parse_detail(html,url)
time.sleep(1)
def main():
for i in range(1,13):
print(i)
url="http://gat.gxzf.gov.cn/n895440/n895445/n895495/index_898744_"+str(i)+".html"
html=get(url)
parse_index(html)
if __name__ == '__main__':
main() | 31.314815 | 92 | 0.59728 |
14a6a8c497e8980313fd2f5b476694f939c8172b | 9,046 | py | Python | projects/mechanism_config_files/input_Propane_v7/local_settings.py | AdamPI314/SOHR | eec472ec98c69ce58d8dee1bc5bfc4a2bf9063c6 | [
"MIT"
] | 2 | 2017-08-11T23:29:56.000Z | 2020-07-22T18:13:50.000Z | projects/mechanism_config_files/input_Propane_v7/local_settings.py | AdamPI314/SOHR | eec472ec98c69ce58d8dee1bc5bfc4a2bf9063c6 | [
"MIT"
] | null | null | null | projects/mechanism_config_files/input_Propane_v7/local_settings.py | AdamPI314/SOHR | eec472ec98c69ce58d8dee1bc5bfc4a2bf9063c6 | [
"MIT"
] | null | null | null | """
local settings, include settings from C++ code and fast transitions
"""
from collections import OrderedDict
def get_local_settings():
"""
local settings for C++ codes
"""
setting = {
"system": {
"condition": "cv",
"initializer": "dlsode"
},
"network": {
"merge_chatterings": "yes"
},
"propagator": {
"primary_type": "from_file",
"type": "dlsode",
"sub_type": "time_propagator_cv_s2m_pgt",
"convert_molar_concentration_to_mole_fraction": "no",
"normalize_initial_concentration": "yes"
},
# trajectory max time, used to solve referene trajectory
"traj_max_t": 0.779074999626780951,
# trajectory critical time, after which print out more data points
"traj_critical_t": 0.751999999880706205,
# reference time, to a combustion system, this is gonna be the ignition delay time
# for Propane, time when temperature=1800K
"tau": 0.777655955130997,
# time at which using MC to generate pathway list, time=mc_t*tau
"mc_t": 0.25718313951098054,
# beginning time, for pathway or for trajectory, exact time = begin_t*tau
"begin_t": 0.0,
# end time, for pathway or for trajectory, exact time = end_t*tau
# here 0.25718313951098054 is actually 0.2 seconds
"end_t": 1.001,
# "end_t": 0.9,
# species oriented, if true, pick pathways ending with top_n species,
# if False, just top n pathway
"spe_oriented": False,
# condense species path, no reactions
"species_path": False,
# atom followed
"atom_f": "HA3",
"init_s": 60,
# terminal species for file ./setting.json, either None, or [] or [14, 15]
"terminal_spe": [],
# end species index, either None, or [] or [14, 15]
"end_s_idx": [],
# top n path
"top_n_p": 500,
# top n path for gephi to generate coordinates
"top_n_p_gephi": 500,
# top n species
"top_n_s": 10,
# number of trajectory used to generate pathway list running mc simulation
"mc_n_traj": 1000000,
# path integral number of trajectory
"pi_n_traj": 10000,
# number of time points when prepare path integral time points
"pi_n_time": 50,
# tag, M or fraction
"tag": "M"
}
return setting
def get_chattering_species(atom_followed="C"):
"""
return chattering species
the chatteing reaction infomation is just for reference, will not use it
as long as the paired chattering species is provided, should be fine
better make them in the same order
"""
fast_transitions = [
# 1068 549 O2+npropyl=npropyloo
# reactants 9 O2 60 npropyl products 78 npropyloo
# 1069 -549 O2+npropyl=npropyloo
{
"rxn": [1068, 1069],
"spe": {
"H": [60, 78],
"O": [78, 9],
"C": [60, 78],
"HA1": [60, 78],
"HA2": [60, 78],
"HA3": [60, 78],
"HA4": [60, 78]
}
},
# 1096 565 O2+ipropyl=ipropyloo
# reactants 9 O2 61 ipropyl products 80 ipropyloo
# 1097 -565 O2+ipropyl=ipropyloo
{
"rxn": [1096, 1097],
"spe": {
"H": [61, 80],
"O": [80, 9],
"C": [61, 80],
"HA1": [61, 80],
"HA2": [61, 80],
"HA3": [61, 80],
"HA4": [61, 80]
}
},
# 1116 575 O2+QOOH_1=well_1
# reactants 9 O2 87 QOOH_1 products 90 well_1
# 1117 -575 O2+QOOH_1=well_1
{
"rxn": [1116, 1117],
"spe": {
"H": [87, 90],
"O": [90, 9],
"C": [87, 90],
"HA1": [87, 90],
"HA2": [87, 90],
"HA3": [87, 90],
"HA4": [87, 90]
}
},
# 1080 556 npropyloo=QOOH_1 557 npropyloo=QOOH_1
# reactants 78 npropyloo products 87 QOOH_1
# 1081 -556 npropyloo=QOOH_1 -557 npropyloo=QOOH_1
{
"rxn": [1080, 1081],
"spe": {
"H": [78, 87],
"C": [78, 87],
"HA1": [78, 87],
"HA2": [78, 87],
"HA3": [78, 87],
"HA4": [78, 87]
}
},
# 1124 579 O2 + QOOH_2 = well_2
# reactants 9 O2 88 QOOH_2 products 91 well_2
# net_reactants 9 O2 88 QOOH_2 net_products 91 well_2
{
"rxn": [1124, 1125],
"spe": {
"H": [88, 91],
"C": [88, 91],
"HA1": [88, 91],
"HA2": [88, 91],
"HA3": [88, 91],
"HA4": [88, 91]
}
},
# 1146 590 O2 + QOOH_3 = well_3
# reactants 9 O2 89 QOOH_3 products 92 well_3
# net_reactants 9 O2 89 QOOH_3 net_products 92 well_3
{
"rxn": [1146, 1147],
"spe": {
"H": [89, 92],
"C": [89, 92],
"HA1": [89, 92],
"HA2": [89, 92],
"HA3": [89, 92],
"HA4": [89, 92]
}
},
# 1214 624 prod_1=frag_1+OH
# reactants 94 prod_1 products 10 OH 101 frag_1
# net_reactants 94 prod_1 net_products 10 OH 101 frag_1
{
"rxn": [1214, 1215],
"spe": {
"H": [94, 101],
"C": [94, 101],
"HA1": [94, 101],
"HA2": [94, 101],
"HA3": [94, 101],
"HA4": [94, 101]
}
},
# 1042 536 allyloxy=vinoxylmethyl
# reactants 72 allyloxy products 108 vinoxylmethyl
# 1043 -536 allyloxy=vinoxylmethyl
{
"rxn": [1042, 1043],
"spe": {
"H": [72, 108],
"C": [72, 108],
"O": [72, 108],
"HA1": [72, 108],
"HA2": [72, 108],
"HA3": [72, 108],
"HA4": [72, 108]
}
},
# 348 180 C2H5+O2=CH3CH2OO
# reactants 39 C2H5 9 O2 products 50 CH3CH2OO
# 349 -180 C2H5+O2=CH3CH2OO
{
"rxn": [348, 349],
"spe": {
"H": [39, 50],
"C": [39, 50],
"O": [9, 50],
"HA1": [39, 50],
"HA2": [39, 50],
"HA3": [39, 50],
"HA4": [39, 50]
}
},
# 132 69 CH3+O2(+M)=CH3OO(+M)
# reactants 25 CH3 9 O2 products 27 CH3OO
# 133 -69 CH3+O2(+M)=CH3OO(+M)
{
"rxn": [132, 133],
"spe": {
"H": [25, 27],
"C": [25, 27],
"O": [25, 27],
"HA1": [25, 27],
"HA2": [25, 27],
"HA3": [25, 27],
"HA4": [25, 27]
}
}
# # 586 300 O2C2H4OH=CH2CH2OH+O2
# # reactants 85 O2C2H4OH products 54 CH2CH2OH 9 O2
# # 587 -300 O2C2H4OH=CH2CH2OH+O2
# {
# "rxn": [586, 587],
# "spe": {
# "C": [85, 54]
# }
# },
# # 434 224 acetyl+O2=acetylperoxy
# # reactants 9 O2 45 acetyl products 47 acetylperoxy
# # 435 -224 acetyl+O2=acetylperoxy
# {
# "rxn": [434, 435],
# "spe": {
# "C": [45, 47]
# }
# }
]
trapped_species_list = []
for _, r_s in enumerate(fast_transitions):
print(r_s)
if 'spe' not in r_s:
continue
if atom_followed not in r_s['spe']:
continue
if len(r_s['spe'][atom_followed]) != 2:
continue
trapped_species_list.append(
[int(r_s['spe'][atom_followed][0]), int(r_s['spe'][atom_followed][1])])
print(trapped_species_list)
chattering_species = {}
for idx, val in enumerate(trapped_species_list):
print(idx, val)
chattering_species.update({str(idx + 1): val})
chattering_species = OrderedDict(chattering_species)
print(chattering_species)
return chattering_species
if __name__ == '__main__':
get_chattering_species("HA2")
| 31.964664 | 102 | 0.426818 |
e7f1ebcb334a8dd0788111ae95d97cc639e6cc9e | 684 | py | Python | vol/save/fixsave.py | Newer-Team/NewerSMBU | 3f91885234852b5763f5c8815ea14816f3baffff | [
"MIT"
] | 7 | 2021-05-22T21:59:42.000Z | 2022-01-03T16:33:03.000Z | vol/save/fixsave.py | Newer-Team/NewerSMBU | 3f91885234852b5763f5c8815ea14816f3baffff | [
"MIT"
] | null | null | null | vol/save/fixsave.py | Newer-Team/NewerSMBU | 3f91885234852b5763f5c8815ea14816f3baffff | [
"MIT"
] | 2 | 2021-05-24T03:38:18.000Z | 2021-08-12T01:25:36.000Z |
import binascii
import struct
def crc32(dat):
return struct.pack('>i', binascii.crc32(dat))
with open('rp_savedata.dat', 'rb') as f:
data = f.read()
data = data[:0xC] + crc32(data[:0xC]) + data[0x10:]
for i in range(12):
data = data[:0x210 + 0x204 * i] + crc32(data[0x10 + 0x204 * i : 0x210 + 0x204 * i]) + data[0x214 + 0x204 * i:]
data = data[:0x1840 + 0x3608] + crc32(data[0x1840:0x1840 + 0x3608]) + data[0x1840 + 0x360C:]
data = data[:0x4EF0] + crc32(data[0x4E4C:0x4EF0]) + data[0x4EF4:]
data = data[:0xAE28] + crc32(data[0x4EF4:0xAE28]) + data[0xAE2C:]
data = data[:0xB130] + crc32(data[0xAEC0:0xB130])
with open('rp_savedata.dat', 'wb') as f:
f.write(data)
| 31.090909 | 114 | 0.641813 |
ede0f1ce7b32bfc610dca0d6c3bc405e74e20dc8 | 45,047 | py | Python | ttrv/content.py | jspanos/ttrv | 45838659a8184a33d02560e265ad7c3ce05e83e2 | [
"MIT"
] | 248 | 2019-12-12T08:09:27.000Z | 2021-11-14T22:28:01.000Z | ttrv/content.py | jspanos/ttrv | 45838659a8184a33d02560e265ad7c3ce05e83e2 | [
"MIT"
] | 29 | 2019-12-29T20:43:25.000Z | 2021-05-14T07:06:55.000Z | ttrv/content.py | jspanos/ttrv | 45838659a8184a33d02560e265ad7c3ce05e83e2 | [
"MIT"
] | 30 | 2020-03-05T12:59:59.000Z | 2021-08-08T07:02:53.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import time
import logging
from datetime import datetime
from timeit import default_timer as timer
import six
from bs4 import BeautifulSoup
from kitchen.text.display import wrap
from . import exceptions
from .packages import praw
from .packages.praw.errors import InvalidSubreddit
from .packages.praw.helpers import normalize_url
from .packages.praw.handlers import DefaultHandler
_logger = logging.getLogger(__name__)
class Content(object):
def get(self, index, n_cols):
"""
Grab the item at the given index, and format the text to fit a width of
n columns.
"""
raise NotImplementedError
def iterate(self, index, step, n_cols=70):
"""
Return an iterator that starts and the current index and increments
by the given step.
"""
while True:
if step < 0 and index < 0:
# Hack to prevent displaying a submission's post if iterating
# comments in the negative direction
break
try:
yield self.get(index, n_cols=n_cols)
except IndexError:
break
index += step
@property
def range(self):
"""
Return the minimm and maximum valid indicies.
"""
raise NotImplementedError
@staticmethod
def flatten_comments(comments, root_level=0):
"""
Flatten a PRAW comment tree while preserving the nested level of each
comment via the `nested_level` attribute.
There are a couple of different ways that the input comment list can be
organized depending on its source:
1. Comments that are returned from the get_submission() api call.
In this case, the comments list will contain only top level
comments and replies will be attached to those comments via
the `comment.replies` property.
2. Comments that are returned from the comments() method on a
MoreComments object. In this case, the api returns all of the
comments and replies as a flat list. We need to sort out which
ones are replies to other comments by looking at the parent_id
parameter and checking if the id matches another comment.
In addition, there is a bug in praw where a MoreComments object that is
also a reply will be added below the comment as a sibling instead of
a child. So it is especially important that this method is robust and
double-checks all of the parent_id's of the comments.
Reference:
https://github.com/praw-dev/praw/issues/391
"""
stack = comments[:]
for item in stack:
item.nested_level = root_level
retval, parent_candidates = [], {}
while stack:
item = stack.pop(0)
# The MoreComments item count should never be zero, discard it if
# it is. Need to look into this further.
if isinstance(item, praw.objects.MoreComments) and item.count == 0:
continue
if item.parent_id:
# Search the list of previous comments for a possible parent
# The match is based off of the parent_id parameter E.g.
# parent.id = c0tprcm
# child.parent_id = t1_c0tprcm
parent = parent_candidates.get(item.parent_id[3:])
if parent:
item.nested_level = parent.nested_level + 1
# Add all of the attached replies to the front of the stack to be
# parsed separately
if hasattr(item, 'replies'):
for n in item.replies:
n.nested_level = item.nested_level + 1
stack[0:0] = item.replies
# The comment is now a potential parent for the items that are
# remaining on the stack.
parent_candidates[item.id] = item
retval.append(item)
return retval
@classmethod
def strip_praw_comment(cls, comment):
"""
Parse through a submission comment and return a dict with data ready to
be displayed through the terminal.
"""
data = {}
data['object'] = comment
if isinstance(comment, praw.objects.MoreComments):
data['type'] = 'MoreComments'
data['level'] = comment.nested_level
data['count'] = comment.count
data['body'] = 'More comments'
data['hidden'] = True
elif hasattr(comment, 'nested_level'):
author = getattr(comment, 'author', '[deleted]')
name = getattr(author, 'name', '[deleted]')
sub = getattr(comment, 'submission', '[deleted]')
sub_author = getattr(sub, 'author', '[deleted]')
sub_name = getattr(sub_author, 'name', '[deleted]')
flair = getattr(comment, 'author_flair_text', '')
permalink = getattr(comment, 'permalink', None)
stickied = getattr(comment, 'stickied', False)
data['type'] = 'Comment'
data['level'] = comment.nested_level
data['body'] = comment.body
data['html'] = comment.body_html
data['created'] = cls.humanize_timestamp(comment.created_utc)
data['score'] = '{0} pts'.format(
'-' if comment.score_hidden else comment.score)
data['author'] = name
data['is_author'] = (name == sub_name)
data['flair'] = flair
data['likes'] = comment.likes
data['gold'] = comment.gilded
data['permalink'] = permalink
data['stickied'] = stickied
data['hidden'] = False
data['saved'] = comment.saved
if comment.edited:
data['edited'] = '(edit {})'.format(
cls.humanize_timestamp(comment.edited))
else:
data['edited'] = ''
else:
# Saved comments don't have a nested level and are missing a couple
# of fields like ``submission``. As a result, we can only load a
# subset of fields to avoid triggering a separate api call to load
# the full comment.
author = getattr(comment, 'author', '[deleted]')
stickied = getattr(comment, 'stickied', False)
flair = getattr(comment, 'author_flair_text', '')
data['type'] = 'SavedComment'
data['level'] = None
data['title'] = '[Comment] {0}'.format(comment.body)
data['comments'] = None
data['url_full'] = comment._fast_permalink
data['url'] = comment._fast_permalink
data['permalink'] = comment._fast_permalink
data['nsfw'] = comment.over_18
data['subreddit'] = six.text_type(comment.subreddit)
data['url_type'] = 'selfpost'
data['score'] = '{0} pts'.format(
'-' if comment.score_hidden else comment.score)
data['likes'] = comment.likes
data['created'] = cls.humanize_timestamp(comment.created_utc)
data['saved'] = comment.saved
data['stickied'] = stickied
data['gold'] = comment.gilded
data['author'] = author
data['flair'] = flair
data['hidden'] = False
if comment.edited:
data['edited'] = '(edit {})'.format(
cls.humanize_timestamp(comment.edited))
else:
data['edited'] = ''
return data
@classmethod
def strip_praw_submission(cls, sub):
"""
Parse through a submission and return a dict with data ready to be
displayed through the terminal.
Definitions:
permalink - URL to the reddit page with submission comments.
url_full - URL that the submission points to.
url - URL that will be displayed on the subreddit page, may be
"selfpost", "x-post submission", "x-post subreddit", or an
external link.
"""
reddit_link = re.compile(
r'https?://(www\.)?(np\.)?redd(it\.com|\.it)/r/.*')
author = getattr(sub, 'author', '[deleted]')
name = getattr(author, 'name', '[deleted]')
flair = getattr(sub, 'link_flair_text', '')
data = {}
data['object'] = sub
data['type'] = 'Submission'
data['title'] = sub.title
data['text'] = sub.selftext
data['html'] = sub.selftext_html or ''
data['created'] = cls.humanize_timestamp(sub.created_utc)
data['created_long'] = cls.humanize_timestamp(sub.created_utc, True)
data['comments'] = '{0} comments'.format(sub.num_comments)
data['score'] = '{0} pts'.format('-' if sub.hide_score else sub.score)
data['author'] = name
data['permalink'] = sub.permalink
data['subreddit'] = six.text_type(sub.subreddit)
data['flair'] = '[{0}]'.format(flair.strip(' []')) if flair else ''
data['url_full'] = sub.url
data['likes'] = sub.likes
data['gold'] = sub.gilded
data['nsfw'] = sub.over_18
data['stickied'] = sub.stickied
data['hidden'] = sub.hidden
data['xpost_subreddit'] = None
data['index'] = None # This is filled in later by the method caller
data['saved'] = sub.saved
if sub.edited:
data['edited'] = '(edit {})'.format(
cls.humanize_timestamp(sub.edited))
data['edited_long'] = '(edit {})'.format(
cls.humanize_timestamp(sub.edited, True))
else:
data['edited'] = ''
data['edited_long'] = ''
if sub.url.split('/r/')[-1] == sub.permalink.split('/r/')[-1]:
data['url'] = 'self.{0}'.format(data['subreddit'])
data['url_type'] = 'selfpost'
elif reddit_link.match(sub.url):
# Strip the subreddit name from the permalink to avoid having
# submission.subreddit.url make a separate API call
url_parts = sub.url.split('/')
data['xpost_subreddit'] = url_parts[4]
data['url'] = 'self.{0}'.format(url_parts[4])
if 'comments' in url_parts:
data['url_type'] = 'x-post submission'
else:
data['url_type'] = 'x-post subreddit'
else:
data['url'] = sub.url
data['url_type'] = 'external'
return data
@staticmethod
def strip_praw_subscription(subscription):
"""
Parse through a subscription and return a dict with data ready to be
displayed through the terminal.
"""
data = {}
data['object'] = subscription
if isinstance(subscription, praw.objects.Multireddit):
data['type'] = 'Multireddit'
data['name'] = subscription.path
data['title'] = subscription.description_md
else:
data['type'] = 'Subscription'
data['name'] = "/r/" + subscription.display_name
data['title'] = subscription.title
return data
@classmethod
def strip_praw_message(cls, msg):
"""
Parse through a message and return a dict with data ready to be
displayed through the terminal. Messages can be of either type
praw.objects.Message or praw.object.Comment. The comments returned will
contain special fields unique to messages and can't be parsed as normal
comment objects.
"""
author = getattr(msg, 'author', None)
data = {}
data['object'] = msg
if isinstance(msg, praw.objects.Message):
data['type'] = 'Message'
data['level'] = msg.nested_level
data['distinguished'] = msg.distinguished
data['permalink'] = None
data['submission_permalink'] = None
data['subreddit_name'] = None
data['link_title'] = None
data['context'] = None
else:
data['type'] = 'InboxComment'
data['level'] = 0
data['distinguished'] = None
data['permalink'] = msg._fast_permalink
data['submission_permalink'] = '/'.join(data['permalink'].split('/')[:-2])
data['subreddit_name'] = msg.subreddit_name_prefixed
data['link_title'] = msg.link_title
data['context'] = msg.context
data['id'] = msg.id
data['subject'] = msg.subject
data['body'] = msg.body
data['html'] = msg.body_html
data['created'] = cls.humanize_timestamp(msg.created_utc)
data['created_long'] = cls.humanize_timestamp(msg.created_utc, True)
data['recipient'] = msg.dest
data['distinguished'] = msg.distinguished
data['author'] = author.name if author else '[deleted]'
data['is_new'] = msg.new
data['was_comment'] = msg.was_comment
return data
@staticmethod
def humanize_timestamp(utc_timestamp, verbose=False):
"""
Convert a utc timestamp into a human readable relative-time.
"""
timedelta = datetime.utcnow() - datetime.utcfromtimestamp(utc_timestamp)
seconds = int(timedelta.total_seconds())
if seconds < 60:
return 'moments ago' if verbose else '0min'
minutes = seconds // 60
if minutes < 60:
if verbose and minutes == 1:
return '1 minutes ago'
elif verbose:
return '%d minutes ago' % minutes
else:
return '%dmin' % minutes
hours = minutes // 60
if hours < 24:
if verbose and hours == 1:
return '1 hour ago'
elif verbose:
return '%d hours ago' % hours
else:
return '%dhr' % hours
days = hours // 24
if days < 30:
if verbose and days == 1:
return '1 day ago'
elif verbose:
return '%d days ago' % days
else:
return '%dday' % days
months = days // 30.4
if months < 12:
if verbose and months == 1:
return '1 month ago'
elif verbose:
return '%d months ago' % months
else:
return '%dmonth' % months
years = months // 12
if verbose and years == 1:
return '1 year ago'
elif verbose:
return '%d years ago' % years
else:
return '%dyr' % years
@staticmethod
def wrap_text(text, width):
"""
Wrap text paragraphs to the given character width while preserving
newlines.
"""
out = []
for paragraph in text.splitlines():
# Wrap returns an empty list when paragraph is a newline. In order
# to preserve newlines we substitute a list containing an empty
# string.
lines = wrap(paragraph, width=width) or ['']
out.extend(lines)
return out
@staticmethod
def extract_links(html):
"""
Extract a list of hyperlinks from an HTML document.
"""
links = []
soup = BeautifulSoup(html, 'html.parser')
for link in soup.findAll('a'):
href = link.get('href')
if not href:
continue
if href.startswith('/'):
href = 'https://www.reddit.com' + href
links.append({'text': link.text, 'href': href})
return links
class SubmissionContent(Content):
"""
Grab a submission from PRAW and lazily store comments to an internal
list for repeat access.
"""
def __init__(self, submission, loader, indent_size=2, max_indent_level=8,
order=None, max_comment_cols=120):
submission_data = self.strip_praw_submission(submission)
comments = self.flatten_comments(submission.comments)
self.indent_size = indent_size
self.max_indent_level = max_indent_level
self.name = submission_data['permalink']
self.order = order
self.query = None
self._loader = loader
self._submission = submission
self._submission_data = submission_data
self._comment_data = [self.strip_praw_comment(c) for c in comments]
self._max_comment_cols = max_comment_cols
@classmethod
def from_url(cls, reddit, url, loader, indent_size=2, max_indent_level=8,
order=None, max_comment_cols=120):
# Reddit forces SSL
url = url.replace('http:', 'https:')
# Sometimes reddit will return a 403 FORBIDDEN when trying to access an
# np link while using OAUTH. Cause is unknown.
url = url.replace('https://np.', 'https://www.')
# Sometimes reddit will return internal links like "context" as
# relative URLs.
if url.startswith('/'):
url = 'https://www.reddit.com' + url
submission = reddit.get_submission(url, comment_sort=order)
return cls(submission, loader, indent_size, max_indent_level, order,
max_comment_cols)
@property
def range(self):
return -1, len(self._comment_data) - 1
def get(self, index, n_cols=70):
"""
Grab the `i`th submission, with the title field formatted to fit inside
of a window of width `n`
"""
if index < -1:
raise IndexError
elif index == -1:
data = self._submission_data
data['split_title'] = self.wrap_text(data['title'], width=n_cols-2)
data['split_text'] = self.wrap_text(data['text'], width=n_cols-2)
data['n_rows'] = len(data['split_title'] + data['split_text']) + 5
data['h_offset'] = 0
else:
data = self._comment_data[index]
indent_level = min(data['level'], self.max_indent_level)
data['h_offset'] = indent_level * self.indent_size
if data['type'] == 'Comment':
width = min(n_cols - data['h_offset'], self._max_comment_cols)
data['split_body'] = self.wrap_text(data['body'], width=width)
data['n_rows'] = len(data['split_body']) + 1
else:
data['n_rows'] = 1
return data
def toggle(self, index, n_cols=70):
"""
Toggle the state of the object at the given index.
If it is a comment, pack it into a hidden comment.
If it is a hidden comment, unpack it.
If it is more comments, load the comments.
"""
data = self.get(index)
if data['type'] == 'Submission':
# Can't hide the submission!
pass
elif data['type'] == 'Comment':
cache = [data]
count = 1
for d in self.iterate(index + 1, 1, n_cols):
if d['level'] <= data['level']:
break
count += d.get('count', 1)
cache.append(d)
comment = {
'type': 'HiddenComment',
'cache': cache,
'count': count,
'level': data['level'],
'body': 'Hidden',
'hidden': True}
self._comment_data[index:index + len(cache)] = [comment]
elif data['type'] == 'HiddenComment':
self._comment_data[index:index + 1] = data['cache']
elif data['type'] == 'MoreComments':
with self._loader('Loading comments'):
# Undefined behavior if using a nested loader here
assert self._loader.depth == 1
comments = data['object'].comments(update=True)
if not self._loader.exception:
comments = self.flatten_comments(comments, data['level'])
comment_data = [self.strip_praw_comment(c) for c in comments]
self._comment_data[index:index + 1] = comment_data
else:
raise ValueError('%s type not recognized' % data['type'])
class SubredditContent(Content):
"""
Grab a subreddit from PRAW and lazily stores submissions to an internal
list for repeat access.
"""
def __init__(self, name, submissions, loader, order=None,
max_title_rows=4, query=None, filter_nsfw=False):
self.name = name
self.order = order
self.query = query
self.max_title_rows = max_title_rows
self.filter_nsfw = filter_nsfw
self._loader = loader
self._submissions = submissions
self._submission_data = []
# Verify that content exists for the given submission generator.
# This is necessary because PRAW loads submissions lazily, and
# there is is no other way to check things like multireddits that
# don't have a real corresponding subreddit object.
try:
self.get(0)
except IndexError:
full_name = self.name
if self.order:
full_name += '/' + self.order
raise exceptions.NoSubmissionsError(full_name)
@classmethod
def from_name(cls, reddit, name, loader, order=None, query=None):
"""
Params:
reddit (praw.Reddit): Instance of the reddit api.
name (text): The name of the desired subreddit, user, multireddit,
etc. In most cases this translates directly from the URL that
reddit itself uses. This is what users will type in the command
prompt when they navigate to a new location.
loader (terminal.loader): Handler for the load screen that will be
displayed when making http requests.
order (text): If specified, the order that posts will be sorted in.
For `top` and `controversial`, you can specify the time frame
by including a dash, e.g. "top-year". If an order is not
specified, it will be extracted from the name.
query (text): Content to search for on the given subreddit or
user's page.
"""
# TODO: This desperately needs to be refactored
# Strip leading, trailing, and redundant backslashes
parts = [seg for seg in name.strip(' /').split('/') if seg]
# Check for the resource type, assume /r/ as the default
if len(parts) >= 3 and parts[2] == 'm':
# E.g. /u/civilization_phaze_3/m/multireddit ->
# resource_root = "u/civilization_phaze_3/m"
# parts = ["multireddit"]
resource_root, parts = '/'.join(parts[:3]), parts[3:]
elif len(parts) > 1 and parts[0] in ['r', 'u', 'user', 'domain']:
# E.g. /u/civilization_phaze_3 ->
# resource_root = "u"
# parts = ["civilization_phaze_3"]
#
# E.g. /r/python/top-week ->
# resource_root = "r"
# parts = ["python", "top-week"]
resource_root = parts.pop(0)
else:
resource_root = 'r'
if resource_root == 'user':
resource_root = 'u'
elif resource_root.startswith('user/'):
# Special check for multi-reddit resource roots
# E.g.
# before: resource_root = "user/civilization_phaze_3/m"
# After: resource_root = "u/civilization_phaze_3/m"
resource_root = 'u' + resource_root[4:]
# The parts left should be in one of the following forms:
# [resource]
# [resource, order]
# [resource, user_room, order]
user_rooms = ['overview', 'submitted', 'comments']
private_user_rooms = ['upvoted', 'downvoted', 'hidden', 'saved']
user_room = None
if len(parts) == 1:
# E.g. /r/python
# parts = ["python"]
# resource = "python"
# resource_order = None
resource, resource_order = parts[0], None
elif resource_root == 'u' and len(parts) in [2, 3] \
and parts[1] in user_rooms + private_user_rooms:
# E.g. /u/spez/submitted/top ->
# parts = ["spez", "submitted", "top"]
# resource = "spez"
# user_room = "submitted"
# resource_order = "top"
resource, user_room = parts[:2]
resource_order = parts[2] if len(parts) == 3 else None
elif len(parts) == 2:
# E.g. /r/python/top
# parts = ["python", "top"]
# resource = "python
# resource_order = "top"
resource, resource_order = parts
else:
raise InvalidSubreddit('`{}` is an invalid format'.format(name))
if not resource:
# Praw does not correctly handle empty strings
# https://github.com/praw-dev/praw/issues/615
raise InvalidSubreddit('Subreddit cannot be empty')
# If the order was explicitly passed in, it will take priority over
# the order that was extracted from the name
order = order or resource_order
display_order = order
display_name = '/'.join(['', resource_root, resource])
if user_room and resource_root == 'u':
display_name += '/' + user_room
# Split the order from the period E.g. controversial-all, top-hour
if order and '-' in order:
order, period = order.split('-', 1)
else:
period = None
if query:
# The allowed orders for sorting search results are different
orders = ['relevance', 'top', 'comments', 'new', None]
period_allowed = ['top', 'comments']
else:
orders = ['hot', 'top', 'rising', 'new', 'controversial', 'gilded', None]
period_allowed = ['top', 'controversial']
if order not in orders:
raise InvalidSubreddit('Invalid order `%s`' % order)
if period not in ['all', 'day', 'hour', 'month', 'week', 'year', None]:
raise InvalidSubreddit('Invalid period `%s`' % period)
if period and order not in period_allowed:
raise InvalidSubreddit(
'`%s` order does not allow sorting by period' % order)
# On some objects, praw doesn't allow you to pass arguments for the
# order and period. Instead you need to call special helper functions
# such as Multireddit.get_controversial_from_year(). Build the method
# name here for convenience.
if period:
method_alias = 'get_{0}_from_{1}'.format(order, period)
elif order:
method_alias = 'get_{0}'.format(order)
else:
method_alias = 'get_hot'
# Here's where we start to build the submission generators
if query:
if resource_root == 'u':
search = '/r/{subreddit}/search'
author = reddit.user.name if resource == 'me' else resource
query = 'author:{0} {1}'.format(author, query)
subreddit = None
else:
search = resource_root + '/{subreddit}/search'
subreddit = None if resource == 'front' else resource
reddit.config.API_PATHS['search'] = search
submissions = reddit.search(query, subreddit=subreddit,
sort=order, period=period)
elif resource_root == 'domain':
order = order or 'hot'
submissions = reddit.get_domain_listing(
resource, sort=order, period=period, limit=None)
elif resource_root.endswith('/m'):
redditor = resource_root.split('/')[1]
if redditor == 'me':
if not reddit.is_oauth_session():
raise exceptions.AccountError('Not logged in')
else:
redditor = reddit.user.name
display_name = display_name.replace(
'/me/', '/{0}/'.format(redditor))
multireddit = reddit.get_multireddit(redditor, resource)
submissions = getattr(multireddit, method_alias)(limit=None)
elif resource_root == 'u' and resource == 'me':
if not reddit.is_oauth_session():
raise exceptions.AccountError('Not logged in')
else:
user_room = user_room or 'overview'
order = order or 'new'
period = period or 'all'
method = getattr(reddit.user, 'get_%s' % user_room)
submissions = method(sort=order, time=period, limit=None)
elif resource_root == 'u':
user_room = user_room or 'overview'
if user_room not in user_rooms:
# Tried to access a private room like "u/me/hidden" for a
# different redditor
raise InvalidSubreddit('Unavailable Resource')
order = order or 'new'
period = period or 'all'
redditor = reddit.get_redditor(resource)
method = getattr(redditor, 'get_%s' % user_room)
submissions = method(sort=order, time=period, limit=None)
elif resource == 'front':
if order in (None, 'hot'):
submissions = reddit.get_front_page(limit=None)
elif period:
# For the front page, praw makes you send the period as `t`
# instead of calling reddit.get_hot_from_week()
method_alias = 'get_{0}'.format(order)
method = getattr(reddit, method_alias)
submissions = method(limit=None, params={'t': period})
else:
submissions = getattr(reddit, method_alias)(limit=None)
else:
subreddit = reddit.get_subreddit(resource)
submissions = getattr(subreddit, method_alias)(limit=None)
# For special subreddits like /r/random we want to replace the
# display name with the one returned by the request.
display_name = '/r/{0}'.format(subreddit.display_name)
filter_nsfw = (reddit.user and reddit.user.over_18 is False)
# We made it!
return cls(display_name, submissions, loader, order=display_order,
query=query, filter_nsfw=filter_nsfw)
@property
def range(self):
# Note that for subreddits, the submissions are generated lazily and
# there is no actual "end" index. Instead, we return the bottom index
# that we have loaded so far.
return 0, len(self._submission_data) - 1
def get(self, index, n_cols=70):
"""
Grab the `i`th submission, with the title field formatted to fit inside
of a window of width `n_cols`
"""
if index < 0:
raise IndexError
nsfw_count = 0
while index >= len(self._submission_data):
try:
with self._loader('Loading more submissions'):
submission = next(self._submissions)
if self._loader.exception:
raise IndexError
except StopIteration:
raise IndexError
else:
# Skip NSFW posts based on the reddit user's profile settings.
# If we see 20+ NSFW posts at the beginning, assume the subreddit
# only has NSFW content and abort. This allows us to avoid making
# an additional API call to check if a subreddit is over18 (which
# doesn't work for things like multireddits anyway)
if self.filter_nsfw and submission.over_18:
nsfw_count += 1
if not self._submission_data and nsfw_count >= 20:
raise exceptions.SubredditError(
'You must be over 18+ to view this subreddit')
continue
else:
nsfw_count = 0
if hasattr(submission, 'title'):
data = self.strip_praw_submission(submission)
else:
# when submission is a saved comment
data = self.strip_praw_comment(submission)
data['index'] = len(self._submission_data) + 1
# Add the post number to the beginning of the title
data['title'] = '{0}. {1}'.format(data['index'], data['title'])
self._submission_data.append(data)
# Modifies the original dict, faster than copying
data = self._submission_data[index]
data['split_title'] = self.wrap_text(data['title'], width=n_cols)
if len(data['split_title']) > self.max_title_rows:
data['split_title'] = data['split_title'][:self.max_title_rows-1]
data['split_title'].append('(Not enough space to display)')
data['n_rows'] = len(data['split_title']) + 3
data['h_offset'] = 0
return data
class SubscriptionContent(Content):
def __init__(self, name, subscriptions, loader):
self.name = name
self.order = None
self.query = None
self._loader = loader
self._subscriptions = subscriptions
self._subscription_data = []
try:
self.get(0)
except IndexError:
raise exceptions.SubscriptionError('No content')
# Load 1024 subscriptions up front (one http request's worth)
# For most people this should be all of their subscriptions. This
# allows the user to jump to the end of the page with `G`.
if name != 'Popular Subreddits':
try:
self.get(1023)
except IndexError:
pass
@classmethod
def from_user(cls, reddit, loader, content_type='subreddit'):
if content_type == 'subreddit':
name = 'My Subreddits'
items = reddit.get_my_subreddits(limit=None)
elif content_type == 'multireddit':
name = 'My Multireddits'
# Multireddits are returned as a list
items = iter(reddit.get_my_multireddits())
elif content_type == 'popular':
name = 'Popular Subreddits'
items = reddit.get_popular_subreddits(limit=None)
else:
raise exceptions.SubscriptionError('Invalid type %s' % content_type)
return cls(name, items, loader)
@property
def range(self):
return 0, len(self._subscription_data) - 1
def get(self, index, n_cols=70):
"""
Grab the `i`th object, with the title field formatted to fit
inside of a window of width `n_cols`
"""
if index < 0:
raise IndexError
while index >= len(self._subscription_data):
try:
with self._loader('Loading content'):
subscription = next(self._subscriptions)
if self._loader.exception:
raise IndexError
except StopIteration:
raise IndexError
else:
data = self.strip_praw_subscription(subscription)
self._subscription_data.append(data)
data = self._subscription_data[index]
data['split_title'] = self.wrap_text(data['title'], width=n_cols)
data['n_rows'] = len(data['split_title']) + 1
data['h_offset'] = 0
return data
class InboxContent(Content):
def __init__(self, order, content_generator, loader,
indent_size=2, max_indent_level=8):
self.name = 'My Inbox'
self.order = order
self.query = None
self.indent_size = indent_size
self.max_indent_level = max_indent_level
self._loader = loader
self._content_generator = content_generator
self._content_data = []
try:
self.get(0)
except IndexError:
if order == 'all':
raise exceptions.InboxError('Empty Inbox')
else:
raise exceptions.InboxError('Empty Inbox [%s]' % order)
@classmethod
def from_user(cls, reddit, loader, order='all'):
if order == 'all':
items = reddit.get_inbox(limit=None)
elif order == 'unread':
items = reddit.get_unread(limit=None)
elif order == 'messages':
items = reddit.get_messages(limit=None)
elif order == 'comments':
items = reddit.get_comment_replies(limit=None)
elif order == 'posts':
items = reddit.get_post_replies(limit=None)
elif order == 'mentions':
items = reddit.get_mentions(limit=None)
elif order == 'sent':
items = reddit.get_sent(limit=None)
else:
raise exceptions.InboxError('Invalid order %s' % order)
return cls(order, items, loader)
@property
def range(self):
return 0, len(self._content_data) - 1
def get(self, index, n_cols=70):
"""
Grab the `i`th object, with the title field formatted to fit
inside of a window of width `n_cols`
"""
if index < 0:
raise IndexError
while index >= len(self._content_data):
try:
with self._loader('Loading content'):
item = next(self._content_generator)
if self._loader.exception:
raise IndexError
except StopIteration:
raise IndexError
else:
if isinstance(item, praw.objects.Message):
# Message chains can be treated like comment trees
for child_message in self.flatten_comments([item]):
data = self.strip_praw_message(child_message)
self._content_data.append(data)
else:
# Comments also return children, but we don't display them
# in the Inbox page so they don't need to be parsed here.
data = self.strip_praw_message(item)
self._content_data.append(data)
data = self._content_data[index]
indent_level = min(data['level'], self.max_indent_level)
data['h_offset'] = indent_level * self.indent_size
width = n_cols - data['h_offset']
data['split_body'] = self.wrap_text(data['body'], width=width)
data['n_rows'] = len(data['split_body']) + 2
return data
class RequestHeaderRateLimiter(DefaultHandler):
"""Custom PRAW request handler for rate-limiting requests.
This is an alternative to PRAW 3's DefaultHandler that uses
Reddit's modern API guidelines to rate-limit requests based
on the X-Ratelimit-* headers returned from Reddit. Most of
these methods are copied from or derived from the DefaultHandler.
References:
https://github.com/reddit/reddit/wiki/API
https://github.com/praw-dev/prawcore/blob/master/prawcore/rate_limit.py
"""
def __init__(self):
# In PRAW's convention, these variables were bound to the
# class so the cache could be shared among all of the ``reddit``
# instances. In TRV's use-case there is only ever a single reddit
# instance so it made sense to clean up the globals and transfer them
# to method variables
self.cache = {}
self.timeouts = {}
# These are used for the header rate-limiting
self.used = None
self.remaining = None
self.seconds_to_reset = None
self.next_request_timestamp = None
super(RequestHeaderRateLimiter, self).__init__()
def _delay(self):
"""
Pause before making the next HTTP request.
"""
if self.next_request_timestamp is None:
return
sleep_seconds = self.next_request_timestamp - time.time()
if sleep_seconds <= 0:
return
time.sleep(sleep_seconds)
def _update(self, response_headers):
"""
Update the state of the rate limiter based on the response headers:
X-Ratelimit-Used: Approximate number of requests used this period
X-Ratelimit-Remaining: Approximate number of requests left to use
X-Ratelimit-Reset: Approximate number of seconds to end of period
PRAW 5's rate limiting logic is structured for making hundreds of
evenly-spaced API requests, which makes sense for running something
like a bot or crawler.
This handler's logic, on the other hand, is geared more towards
interactive usage. It allows for short, sporadic bursts of requests.
The assumption is that actual users browsing reddit shouldn't ever be
in danger of hitting the rate limit. If they do hit the limit, they
will be cutoff until the period resets.
"""
if 'x-ratelimit-remaining' not in response_headers:
# This could be because the API returned an error response, or it
# could be because we're using something like read-only credentials
# which Reddit doesn't appear to care about rate limiting.
return
self.used = float(response_headers['x-ratelimit-used'])
self.remaining = float(response_headers['x-ratelimit-remaining'])
self.seconds_to_reset = int(response_headers['x-ratelimit-reset'])
_logger.debug('Rate limit: %s used, %s remaining, %s reset',
self.used, self.remaining, self.seconds_to_reset)
if self.remaining <= 0:
self.next_request_timestamp = time.time() + self.seconds_to_reset
else:
self.next_request_timestamp = None
def _clear_timeouts(self, cache_timeout):
"""
Clear the cache of timed out results.
"""
for key in list(self.timeouts):
if timer() - self.timeouts[key] > cache_timeout:
del self.timeouts[key]
del self.cache[key]
def clear_cache(self):
"""Remove all items from the cache."""
self.cache = {}
self.timeouts = {}
def evict(self, urls):
"""Remove items from cache matching URLs.
Return the number of items removed.
"""
if isinstance(urls, six.text_type):
urls = [urls]
urls = set(normalize_url(url) for url in urls)
retval = 0
for key in list(self.cache):
if key[0] in urls:
retval += 1
del self.cache[key]
del self.timeouts[key]
return retval
def request(self, _cache_key, _cache_ignore, _cache_timeout, **kwargs):
"""
This is a wrapper function that handles the caching of the request.
See DefaultHandler.with_cache for reference.
"""
if _cache_key:
# Pop the request's session cookies from the cache key.
# These appear to be unreliable and change with every
# request. Also, with the introduction of OAuth I don't think
# that cookies are being used to store anything that
# differentiates API requests anyways
url, items = _cache_key
_cache_key = (url, (items[0], items[1], items[3], items[4]))
if kwargs['request'].method != 'GET':
# I added this check for TTRV, I have no idea why PRAW would ever
# want to cache POST/PUT/DELETE requests
_cache_ignore = True
if _cache_ignore:
return self._request(**kwargs)
self._clear_timeouts(_cache_timeout)
if _cache_key in self.cache:
return self.cache[_cache_key]
result = self._request(**kwargs)
# The handlers don't call `raise_for_status` so we need to ignore
# status codes that will result in an exception that should not be
# cached.
if result.status_code not in (200, 302):
return result
self.timeouts[_cache_key] = timer()
self.cache[_cache_key] = result
return result
def _request(self, request, proxies, timeout, verify, **_):
"""
This is where we apply rate limiting and make the HTTP request.
"""
settings = self.http.merge_environment_settings(
request.url, proxies, False, verify, None)
self._delay()
response = self.http.send(
request, timeout=timeout, allow_redirects=False, **settings)
self._update(response.headers)
return response
| 37.854622 | 86 | 0.567629 |
547b0ef99dfb6e1f9d42572329f92f1b37b9ba98 | 258 | py | Python | db_connection/db_conn.py | scorelab/DroneMap | 73d620caf3e0d8b1101059aee844c0200858493f | [
"Apache-2.0"
] | 2 | 2018-10-02T12:02:42.000Z | 2018-10-20T06:00:06.000Z | db_connection/db_conn.py | horizon00/DroneMap | 73d620caf3e0d8b1101059aee844c0200858493f | [
"Apache-2.0"
] | 2 | 2017-12-06T11:56:24.000Z | 2019-04-07T00:55:32.000Z | db_connection/db_conn.py | horizon00/DroneMap | 73d620caf3e0d8b1101059aee844c0200858493f | [
"Apache-2.0"
] | 17 | 2016-11-30T09:56:28.000Z | 2021-05-25T12:25:29.000Z | #Created by Imal thiunuwan using Intellij Idea
import pymongo
client = pymongo.MongoClient("link to the remote database/dbname") # defaults to port 27017
db = client.db_name
# print the number of documents in a collection
print db.collection_name.count() | 25.8 | 91 | 0.790698 |
2150c266da8860dc3572de1db6cfe427289df233 | 524 | py | Python | authserver/mailauth/migrations/0010_domain_redirect_to.py | yopiti/authserver | 0a1f7f5a83d03963d1ecfb5199be8e05d3068dfd | [
"MIT"
] | 8 | 2017-07-04T10:07:32.000Z | 2022-01-02T10:31:43.000Z | authserver/mailauth/migrations/0010_domain_redirect_to.py | yopiti/authserver | 0a1f7f5a83d03963d1ecfb5199be8e05d3068dfd | [
"MIT"
] | 14 | 2020-02-11T21:42:38.000Z | 2022-03-28T16:00:55.000Z | authserver/mailauth/migrations/0010_domain_redirect_to.py | yopiti/authserver | 0a1f7f5a83d03963d1ecfb5199be8e05d3068dfd | [
"MIT"
] | 1 | 2020-03-01T10:39:28.000Z | 2020-03-01T10:39:28.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-25 19:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mailauth', '0009_auto_20170912_2158'),
]
operations = [
migrations.AddField(
model_name='domain',
name='redirect_to',
field=models.CharField(blank=True, default='', max_length=255, verbose_name='Redirect all mail to domain'),
),
]
| 24.952381 | 119 | 0.637405 |
72184825f18a8e172c908cbb33ce04a9ac90fc4a | 17,398 | py | Python | mypy/semanal_pass1.py | AlexTalks/mypy | e35b642d1f679ba8dd656091d9641ac24f481bbe | [
"PSF-2.0"
] | null | null | null | mypy/semanal_pass1.py | AlexTalks/mypy | e35b642d1f679ba8dd656091d9641ac24f481bbe | [
"PSF-2.0"
] | 7 | 2018-12-11T20:01:53.000Z | 2018-12-12T12:06:33.000Z | mypy/semanal_pass1.py | AlexTalks/mypy | e35b642d1f679ba8dd656091d9641ac24f481bbe | [
"PSF-2.0"
] | null | null | null | """The semantic analyzer pass 1.
This sets up externally visible names defined in a module but doesn't
follow imports and mostly ignores local definitions. It helps enable
(some) cyclic references between modules, such as module 'a' that
imports module 'b' and used names defined in 'b' *and* vice versa. The
first pass can be performed before dependent modules have been
processed.
Since this pass can't assume that other modules have been processed,
this pass cannot detect certain definitions that can only be recognized
in later passes. Examples of these include TypeVar and NamedTuple
definitions, as these look like regular assignments until we are able to
bind names, which only happens in pass 2.
This pass also infers the reachability of certain if statements, such as
those with platform checks.
"""
from typing import List, Tuple
from mypy import experiments
from mypy.nodes import (
MypyFile, SymbolTable, SymbolTableNode, Var, Block, AssignmentStmt, FuncDef, Decorator,
ClassDef, TypeInfo, ImportFrom, Import, ImportAll, IfStmt, WhileStmt, ForStmt, WithStmt,
TryStmt, OverloadedFuncDef, Lvalue, Context, ImportedName, LDEF, GDEF, MDEF, UNBOUND_IMPORTED,
MODULE_REF, implicit_module_attrs, AssertStmt,
)
from mypy.types import Type, UnboundType, UnionType, AnyType, TypeOfAny, NoneTyp, CallableType
from mypy.semanal import SemanticAnalyzerPass2
from mypy.reachability import infer_reachability_of_if_statement, assert_will_always_fail
from mypy.semanal_shared import create_indirect_imported_name
from mypy.options import Options
from mypy.sametypes import is_same_type
from mypy.visitor import NodeVisitor
class SemanticAnalyzerPass1(NodeVisitor[None]):
"""First phase of semantic analysis.
See docstring of 'visit_file()' below and the module docstring for a
description of what this does.
"""
def __init__(self, sem: SemanticAnalyzerPass2) -> None:
self.sem = sem
def visit_file(self, file: MypyFile, fnam: str, mod_id: str, options: Options) -> None:
"""Perform the first analysis pass.
Populate module global table. Resolve the full names of
definitions not nested within functions and construct type
info structures, but do not resolve inter-definition
references such as base classes.
Also add implicit definitions such as __name__.
In this phase we don't resolve imports. For 'from ... import',
we generate dummy symbol table nodes for the imported names,
and these will get resolved in later phases of semantic
analysis.
"""
sem = self.sem
self.sem.options = options # Needed because we sometimes call into it
self.pyversion = options.python_version
self.platform = options.platform
sem.cur_mod_id = mod_id
sem.cur_mod_node = file
sem.errors.set_file(fnam, mod_id, scope=sem.scope)
sem.globals = SymbolTable()
sem.global_decls = [set()]
sem.nonlocal_decls = [set()]
sem.block_depth = [0]
sem.scope.enter_file(mod_id)
defs = file.defs
with experiments.strict_optional_set(options.strict_optional):
# Add implicit definitions of module '__name__' etc.
for name, t in implicit_module_attrs.items():
# unicode docstrings should be accepted in Python 2
if name == '__doc__':
if self.pyversion >= (3, 0):
typ = UnboundType('__builtins__.str') # type: Type
else:
typ = UnionType([UnboundType('__builtins__.str'),
UnboundType('__builtins__.unicode')])
else:
assert t is not None, 'type should be specified for {}'.format(name)
typ = UnboundType(t)
v = Var(name, typ)
v._fullname = self.sem.qualified_name(name)
self.sem.globals[name] = SymbolTableNode(GDEF, v)
for i, d in enumerate(defs):
d.accept(self)
if isinstance(d, AssertStmt) and assert_will_always_fail(d, options):
# We've encountered an assert that's always false,
# e.g. assert sys.platform == 'lol'. Truncate the
# list of statements. This mutates file.defs too.
del defs[i + 1:]
break
# Add implicit definition of literals/keywords to builtins, as we
# cannot define a variable with them explicitly.
if mod_id == 'builtins':
literal_types = [
('None', NoneTyp()),
# reveal_type is a mypy-only function that gives an error with
# the type of its arg.
('reveal_type', AnyType(TypeOfAny.special_form)),
# reveal_locals is a mypy-only function that gives an error with the types of
# locals
('reveal_locals', AnyType(TypeOfAny.special_form)),
] # type: List[Tuple[str, Type]]
# TODO(ddfisher): This guard is only needed because mypy defines
# fake builtins for its tests which often don't define bool. If
# mypy is fast enough that we no longer need those, this
# conditional check should be removed.
if 'bool' in self.sem.globals:
bool_type = self.sem.named_type('bool')
literal_types.extend([
('True', bool_type),
('False', bool_type),
('__debug__', bool_type),
])
else:
# We are running tests without 'bool' in builtins.
# TODO: Find a permanent solution to this problem.
# Maybe add 'bool' to all fixtures?
literal_types.append(('True', AnyType(TypeOfAny.special_form)))
for name, typ in literal_types:
v = Var(name, typ)
v._fullname = self.sem.qualified_name(name)
self.sem.globals[name] = SymbolTableNode(GDEF, v)
del self.sem.options
sem.scope.leave()
def visit_block(self, b: Block) -> None:
if b.is_unreachable:
return
self.sem.block_depth[-1] += 1
for node in b.body:
node.accept(self)
self.sem.block_depth[-1] -= 1
def visit_assignment_stmt(self, s: AssignmentStmt) -> None:
if self.sem.is_module_scope():
for lval in s.lvalues:
self.analyze_lvalue(lval, explicit_type=s.type is not None)
def visit_func_def(self, func: FuncDef, decorated: bool = False) -> None:
"""Process a func def.
decorated is true if we are processing a func def in a
Decorator that needs a _fullname and to have its body analyzed but
does not need to be added to the symbol table.
"""
sem = self.sem
if sem.type is not None:
# Don't process methods during pass 1.
return
func.is_conditional = sem.block_depth[-1] > 0
func._fullname = sem.qualified_name(func.name())
at_module = sem.is_module_scope() and not decorated
if (at_module and func.name() == '__getattr__' and
self.sem.cur_mod_node.is_package_init_file() and self.sem.cur_mod_node.is_stub):
if isinstance(func.type, CallableType):
ret = func.type.ret_type
if isinstance(ret, UnboundType) and not ret.args:
sym = self.sem.lookup_qualified(ret.name, func, suppress_errors=True)
# We only interpret a package as partial if the __getattr__ return type
# is either types.ModuleType of Any.
if sym and sym.node and sym.node.fullname() in ('types.ModuleType',
'typing.Any'):
self.sem.cur_mod_node.is_partial_stub_package = True
if at_module and func.name() in sem.globals:
# Already defined in this module.
original_sym = sem.globals[func.name()]
if (original_sym.kind == UNBOUND_IMPORTED or
isinstance(original_sym.node, ImportedName)):
# Ah this is an imported name. We can't resolve them now, so we'll postpone
# this until the main phase of semantic analysis.
return
if not sem.set_original_def(original_sym.node, func):
# Report error.
sem.check_no_global(func.name(), func)
else:
if at_module:
sem.globals[func.name()] = SymbolTableNode(GDEF, func)
# Also analyze the function body (needed in case there are unreachable
# conditional imports).
sem.function_stack.append(func)
sem.scope.enter_function(func)
sem.enter()
func.body.accept(self)
sem.leave()
sem.scope.leave()
sem.function_stack.pop()
def visit_overloaded_func_def(self, func: OverloadedFuncDef) -> None:
if self.sem.type is not None:
# Don't process methods during pass 1.
return
kind = self.kind_by_scope()
if kind == GDEF:
self.sem.check_no_global(func.name(), func, True)
func._fullname = self.sem.qualified_name(func.name())
if kind == GDEF:
self.sem.globals[func.name()] = SymbolTableNode(kind, func)
if func.impl:
impl = func.impl
# Also analyze the function body (in case there are conditional imports).
sem = self.sem
if isinstance(impl, FuncDef):
sem.function_stack.append(impl)
sem.scope.enter_function(func)
sem.enter()
impl.body.accept(self)
elif isinstance(impl, Decorator):
sem.function_stack.append(impl.func)
sem.scope.enter_function(func)
sem.enter()
impl.func.body.accept(self)
else:
assert False, "Implementation of an overload needs to be FuncDef or Decorator"
sem.leave()
sem.scope.leave()
sem.function_stack.pop()
def visit_class_def(self, cdef: ClassDef) -> None:
kind = self.kind_by_scope()
if kind == LDEF:
return
elif kind == GDEF:
self.sem.check_no_global(cdef.name, cdef)
cdef.fullname = self.sem.qualified_name(cdef.name)
info = TypeInfo(SymbolTable(), cdef, self.sem.cur_mod_id)
info.set_line(cdef.line, cdef.column)
cdef.info = info
if kind == GDEF:
self.sem.globals[cdef.name] = SymbolTableNode(kind, info)
self.process_nested_classes(cdef)
def process_nested_classes(self, outer_def: ClassDef) -> None:
self.sem.enter_class(outer_def.info)
for node in outer_def.defs.body:
if isinstance(node, ClassDef):
node.info = TypeInfo(SymbolTable(), node, self.sem.cur_mod_id)
if outer_def.fullname:
node.info._fullname = outer_def.fullname + '.' + node.info.name()
else:
node.info._fullname = node.info.name()
node.fullname = node.info._fullname
symbol = SymbolTableNode(MDEF, node.info)
outer_def.info.names[node.name] = symbol
self.process_nested_classes(node)
elif isinstance(node, (ImportFrom, Import, ImportAll, IfStmt)):
node.accept(self)
self.sem.leave_class()
def visit_import_from(self, node: ImportFrom) -> None:
# We can't bind module names during the first pass, as the target module might be
# unprocessed. However, we add dummy unbound imported names to the symbol table so
# that we at least know that the name refers to a module.
at_module = self.sem.is_module_scope()
node.is_top_level = at_module
if not at_module:
return
for name, as_name in node.names:
imported_name = as_name or name
if imported_name not in self.sem.globals:
sym = create_indirect_imported_name(self.sem.cur_mod_node,
node.id,
node.relative,
name)
if sym:
self.add_symbol(imported_name, sym, context=node)
def visit_import(self, node: Import) -> None:
node.is_top_level = self.sem.is_module_scope()
# This is similar to visit_import_from -- see the comment there.
if not self.sem.is_module_scope():
return
for id, as_id in node.ids:
imported_id = as_id or id
# For 'import a.b.c' we create symbol 'a'.
imported_id = imported_id.split('.')[0]
if imported_id not in self.sem.globals:
self.add_symbol(imported_id, SymbolTableNode(UNBOUND_IMPORTED, None), node)
def visit_import_all(self, node: ImportAll) -> None:
node.is_top_level = self.sem.is_module_scope()
def visit_while_stmt(self, s: WhileStmt) -> None:
if self.sem.is_module_scope():
s.body.accept(self)
if s.else_body:
s.else_body.accept(self)
def visit_for_stmt(self, s: ForStmt) -> None:
if self.sem.is_module_scope():
self.analyze_lvalue(s.index, explicit_type=s.index_type is not None)
s.body.accept(self)
if s.else_body:
s.else_body.accept(self)
def visit_with_stmt(self, s: WithStmt) -> None:
if self.sem.is_module_scope():
for n in s.target:
if n:
self.analyze_lvalue(n, explicit_type=s.target_type is not None)
s.body.accept(self)
def visit_decorator(self, d: Decorator) -> None:
if self.sem.type is not None:
# Don't process methods during pass 1.
return
d.var._fullname = self.sem.qualified_name(d.var.name())
self.add_symbol(d.var.name(), SymbolTableNode(self.kind_by_scope(), d), d)
self.visit_func_def(d.func, decorated=True)
def visit_if_stmt(self, s: IfStmt) -> None:
infer_reachability_of_if_statement(s, self.sem.options)
for node in s.body:
node.accept(self)
if s.else_body:
s.else_body.accept(self)
def visit_try_stmt(self, s: TryStmt) -> None:
if self.sem.is_module_scope():
self.sem.analyze_try_stmt(s, self, add_global=self.sem.is_module_scope())
def analyze_lvalue(self, lvalue: Lvalue, explicit_type: bool = False) -> None:
self.sem.analyze_lvalue(lvalue, add_global=self.sem.is_module_scope(),
explicit_type=explicit_type)
def kind_by_scope(self) -> int:
if self.sem.is_module_scope():
return GDEF
elif self.sem.is_class_scope():
return MDEF
elif self.sem.is_func_scope():
return LDEF
else:
assert False, "Couldn't determine scope"
def add_symbol(self, name: str, node: SymbolTableNode,
context: Context) -> None:
# NOTE: This is closely related to SemanticAnalyzerPass2.add_symbol. Since both methods
# will be called on top-level definitions, they need to co-operate. If you change
# this, you may have to change the other method as well.
if self.sem.is_func_scope():
assert self.sem.locals[-1] is not None
if name in self.sem.locals[-1]:
# Flag redefinition unless this is a reimport of a module.
if not (node.kind == MODULE_REF and
self.sem.locals[-1][name].node == node.node):
self.sem.name_already_defined(name, context, self.sem.locals[-1][name])
return
self.sem.locals[-1][name] = node
else:
assert self.sem.type is None # Pass 1 doesn't look inside classes
existing = self.sem.globals.get(name)
if (existing
and (not isinstance(node.node, MypyFile) or existing.node != node.node)
and existing.kind != UNBOUND_IMPORTED
and not isinstance(existing.node, ImportedName)):
# Modules can be imported multiple times to support import
# of multiple submodules of a package (e.g. a.x and a.y).
ok = False
# Only report an error if the symbol collision provides a different type.
if existing.type and node.type and is_same_type(existing.type, node.type):
ok = True
if not ok:
self.sem.name_already_defined(name, context, existing)
return
elif not existing:
self.sem.globals[name] = node
| 45.072539 | 98 | 0.589953 |
7f9a63858dba2966583a4fc571d65b19932e61e6 | 3,223 | py | Python | experiments/ops.py | LizhengMathAi/svgd | 9606388cf4565e4fafe82869feef7a7ba8986ef2 | [
"Apache-2.0"
] | 3 | 2019-07-10T16:48:53.000Z | 2019-11-03T11:23:06.000Z | experiments/ops.py | LizhengMathAi/svgd | 9606388cf4565e4fafe82869feef7a7ba8986ef2 | [
"Apache-2.0"
] | null | null | null | experiments/ops.py | LizhengMathAi/svgd | 9606388cf4565e4fafe82869feef7a7ba8986ef2 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
from tensorflow.python.framework import ops
OP_MODULE = tf.load_op_library('./src/ops.so')
SCALE = 0.1
# ---------------------------------------- for Variable ----------------------------------------
def identity(input_tensor):
dim = input_tensor.get_shape().as_list().__len__()
return OP_MODULE.def_identity(input_tensor, dim=dim)
@ops.RegisterGradient('DefIdentity')
def gradients(op, grad):
r = tf.Variable(initial_value=tf.zeros_like(grad), trainable=False)
r_ = 0.9 * r + 0.1 * tf.square(grad)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, r.assign(r_))
return grad * tf.rsqrt(r + 1e-6)
# ---------------------------------------- for Tensor ----------------------------------------
def grad_transform(grad):
shape = grad.get_shape().as_list()[1:]
r = tf.Variable(initial_value=tf.zeros([1] + shape), trainable=False, dtype=grad.dtype)
r_ = 0.9 * r + tf.reduce_mean(0.1 * tf.square(grad), axis=0, keepdims=True)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, r.assign(r_))
return grad * tf.rsqrt(r + 1e-6)
def matmul(mat1, mat2):
return OP_MODULE.def_mat_mul(mat1, mat2, transpose=False)
@ops.RegisterGradient('DefMatMul')
def gradients(op, grad):
mat1_tensor, mat2_tensor = op.inputs
transpose_mat1_tensor = tf.transpose(mat1_tensor, perm=[1, 0])
transpose_mat2_tensor = tf.transpose(mat2_tensor, perm=[1, 0])
grad_mat1 = tf.matmul(grad, transpose_mat2_tensor)
grad_mat2 = tf.matmul(transpose_mat1_tensor, grad_transform(grad))
return grad_mat1, SCALE * grad_mat2
def conv2d(input_tensor, kernel, strides, padding):
return OP_MODULE.def_conv2d(input_tensor, kernel, strides=strides, padding=padding)
@ops.RegisterGradient('DefConv2d')
def gradients(op, grad):
input_tensor, kernel = op.inputs
grad_input_tensor = OP_MODULE.def_conv2d_grad_input(
grad, input_tensor, kernel, strides=op.get_attr("strides"), padding=op.get_attr("padding"))
grad_kernel = OP_MODULE.def_conv2d_grad_kernel(
grad_transform(grad), input_tensor, kernel, strides=op.get_attr("strides"), padding=op.get_attr("padding"))
return grad_input_tensor, SCALE * grad_kernel
# def bias_add(input_tensor, bias):
# assert input_tensor.get_shape().as_list().__len__() in [2, 4]
# return OP_MODULE.def_bias_add(input_tensor, bias, dim=input_tensor.get_shape().as_list().__len__())
#
#
# @ops.RegisterGradient('DefBiasAdd')
# def gradients(op, grad_tensor):
# dim = op.get_attr("dim")
#
# if dim == 4:
# return grad_tensor, tf.einsum("nhwc->c", grad_tensor)
# else:
# return grad_tensor, tf.einsum("nc->c", grad_tensor)
def max_pool(input_tensor, ksize, strides, padding):
return OP_MODULE.def_max_pool(input_tensor, ksize=ksize, strides=strides, padding=padding)
@ops.RegisterGradient('DefMaxPool')
def gradients(op, grad_output_tensor):
input_tensor = op.inputs[0]
output_tensor = op.outputs[0]
ksize = op.get_attr("ksize")
strides = op.get_attr("strides")
padding = op.get_attr("padding")
return OP_MODULE.def_max_pool_grad(
grad_output_tensor, input_tensor, output_tensor, ksize=ksize, strides=strides, padding=padding)
| 33.226804 | 115 | 0.681663 |
8513f067433fb807a3381a376d67367c61960958 | 6,057 | py | Python | src/models/main.py | AsgerG/cookie | 73b9dbd708564e7ae03a671b21969d3746b3eaac | [
"MIT"
] | null | null | null | src/models/main.py | AsgerG/cookie | 73b9dbd708564e7ae03a671b21969d3746b3eaac | [
"MIT"
] | null | null | null | src/models/main.py | AsgerG/cookie | 73b9dbd708564e7ae03a671b21969d3746b3eaac | [
"MIT"
] | null | null | null | import argparse
import sys
import matplotlib.pyplot as plt
import torch
import tqdm
from src.data.make_dataset import load_mnist
import torchvision
from torch import nn, optim
#from torch.utils.tensorboard import SummaryWriter
import wandb
from model import Classifier
class TrainOREvaluate(object):
"""Helper class that will help launch class methods as commands
from a single script
"""
def __init__(self):
parser = argparse.ArgumentParser(
description="Script for either training or evaluating",
usage="python main.py <command>",
)
parser.add_argument("command", help="Subcommand to run")
args = parser.parse_args(sys.argv[1:2])
if not hasattr(self, args.command):
print("Unrecognized command")
parser.print_help()
exit(1)
# use dispatch pattern to invoke method with same name
getattr(self, args.command)()
def train(self):
print("Training day and night")
parser = argparse.ArgumentParser(description="Training arguments")
parser.add_argument("--lr", default=0.1)
parser.add_argument("--n_epochs", default=30, type=int)
parser.add_argument("--run_name", default="new")
# add any additional argument that you want
args = parser.parse_args(sys.argv[2:])
print(args)
wandb.init(config=args)
# ____ Setup model, loss and optimizer _____
#tb = SummaryWriter()
model = Classifier()
wandb.watch(model, log_freq=100)
optimizer = optim.Adam(model.parameters(), lr=0.003)
criterion = nn.NLLLoss()
# ____ Loop Variables ____
n_epochs = args.n_epochs
max_steps = 10
train_losses, test_losses, epochs = [], [], []
train_set, test_set = load_mnist()
trainloader = torch.utils.data.DataLoader(
train_set, batch_size=256, shuffle=True
)
testloader = torch.utils.data.DataLoader(test_set, batch_size=256, shuffle=True)
# ____Training loop _____
for e in range(n_epochs):
train_loss = 0
test_loss = 0
steps = 0
for images, labels in tqdm.tqdm(trainloader, total=max_steps):
if steps > max_steps:
break
# TRAIN
model.train()
optimizer.zero_grad()
log_ps = model(images)
loss = criterion(log_ps, labels)
loss.backward()
optimizer.step()
train_loss += loss.item()
steps += 1
# VALIDATION
with torch.no_grad():
model.eval()
res = torch.zeros(0)
for images, labels in testloader:
# Get the val loss
log_ps = model(images)
loss = criterion(log_ps, labels)
test_loss += loss.item()
# Get the class probabilities
ps = torch.exp(log_ps)
_, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
res = torch.cat((res, equals), dim=0)
accuracy = torch.mean(res.type(torch.FloatTensor))
# Wandb
wandb.log({"train_loss": train_loss, "test_loss": test_loss,
"accuracy": accuracy})
wandb.log({"examples" : [wandb.Image(i) for i in images]})
# Tensorboard
#tb.add_scalar("Test_loss", test_loss, e)
#tb.add_scalar("Train_loss", train_loss, e)
#tb.add_histogram("conv1.weight", model.conv1.weight, e)
# Save current model
if e % 5 == 0:
torch.save(model.state_dict(), f"models/{args.run_name}_model{e}.pth")
# Sum up epoch
epochs += [e]
train_losses += [train_loss]
test_losses += [test_loss]
print(f"Epoch {e}: acc={round(accuracy.item()*100, 4)}%")
plt.plot(epochs, train_losses, label="Train")
plt.plot(epochs, test_losses, label="Test")
plt.legend()
plt.savefig(f"reports/figures/loss_curve_{args.run_name}.pdf")
plt.close()
#grid = torchvision.utils.make_grid(images)
#tb.add_image("images", grid)
#tb.add_graph(model, images)
#tb.add_hparams({"lr": args.lr,
# "epochs": args.n_epochs},
# {"accuracy": round(accuracy.item()*100, 4),
# "test_loss": test_losses[-1]})
#tb.close()
def evaluate(self):
print("Evaluating until hitting the ceiling")
parser = argparse.ArgumentParser(description="Training arguments")
parser.add_argument("--load_model_from", default="")
# add any additional argument that you want
args = parser.parse_args(sys.argv[2:])
print(args)
# load model
model = Classifier()
if args.load_model_from:
state_dict = torch.load(args.load_model_from)
model.load_state_dict(state_dict)
# Evaluation
_, test_set = load_mnist()
testloader = torch.utils.data.DataLoader(test_set, batch_size=256, shuffle=True)
with torch.no_grad():
model.eval()
res = torch.zeros(0)
for images, labels in testloader:
# Get the val loss
log_ps = model(images)
# Get the class probabilities
ps = torch.exp(log_ps)
_, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
res = torch.cat((res, equals), dim=0)
accuracy = torch.mean(res.type(torch.FloatTensor))
print("Accuracy is: ", accuracy)
if __name__ == "__main__":
TrainOREvaluate()
| 33.28022 | 88 | 0.552749 |
c294e0b51673392abe8847884c5ae8b55c85903a | 844 | py | Python | tests/test_validators.py | Kristianuruplarsen/pydst-1 | 6bb7d96a01d09cf326b0894b0a1ba54c486e0ec3 | [
"MIT"
] | 1 | 2020-03-11T15:42:08.000Z | 2020-03-11T15:42:08.000Z | tests/test_validators.py | Kristianuruplarsen/pydst-1 | 6bb7d96a01d09cf326b0894b0a1ba54c486e0ec3 | [
"MIT"
] | 53 | 2018-06-12T14:36:58.000Z | 2021-11-15T17:48:19.000Z | tests/test_validators.py | Kristianuruplarsen/pydst-1 | 6bb7d96a01d09cf326b0894b0a1ba54c486e0ec3 | [
"MIT"
] | 1 | 2020-03-10T09:06:11.000Z | 2020-03-10T09:06:11.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `pydst` package."""
import pytest
from pydst import validators
from pydst import utils
def test_ValueError_if_lang_wrong_type():
with pytest.raises(ValueError):
validators.lang_validator(2, ['da', 'en'])
def test_ValueError_if_valid_langs_wrong_type():
with pytest.raises(ValueError):
validators.lang_validator('da', 'da')
def test_ValueError_if_lang_not_in_valid_langs():
with pytest.raises(ValueError):
validators.lang_validator('da', ['en'])
def test_returns_None_if_correctly_specified():
assert validators.lang_validator('da', ['da', 'en']) == None
def test_False_if_str_not_in_list():
assert validators.str_in_list('da', ['en']) == False
def test_True_if_str_in_list():
assert validators.str_in_list('da', ['da']) == True
| 27.225806 | 64 | 0.71564 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.