hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b6564c976990b7fa6fc560e5b2308ec16d5f0a89
| 492
|
py
|
Python
|
ABC/210/c.py
|
fumiyanll23/AtCoder
|
362ca9fcacb5415c1458bc8dee5326ba2cc70b65
|
[
"MIT"
] | null | null | null |
ABC/210/c.py
|
fumiyanll23/AtCoder
|
362ca9fcacb5415c1458bc8dee5326ba2cc70b65
|
[
"MIT"
] | null | null | null |
ABC/210/c.py
|
fumiyanll23/AtCoder
|
362ca9fcacb5415c1458bc8dee5326ba2cc70b65
|
[
"MIT"
] | null | null | null |
from collections import defaultdict
if __name__ == '__main__':
main()
| 18.923077
| 37
| 0.497967
|
from collections import defaultdict
def main():
# input
N, K = map(int, input().split())
cs = [*map(int, input().split())]
# compute
ddict = defaultdict(int)
for i in range(K):
ddict[cs[i]] += 1
ans = len(ddict)
for i in range(N-K):
ddict[cs[i]] -= 1
if ddict[cs[i]] == 0:
del ddict[cs[i]]
ddict[cs[i+K]] += 1
ans = max(ans, len(ddict))
# output
print(ans)
if __name__ == '__main__':
main()
| 393
| 0
| 23
|
da6567f46ca5114b211c9251f1de332e436be104
| 1,892
|
py
|
Python
|
code/model/intent_classification/intent_classifier_inference.py
|
vipulraheja/IteraTeR
|
80c1939969de909c39e41e16b8866355c038b6d2
|
[
"Apache-2.0"
] | 11
|
2022-03-23T21:41:54.000Z
|
2022-03-26T13:41:01.000Z
|
code/model/intent_classification/intent_classifier_inference.py
|
vipulraheja/IteraTeR
|
80c1939969de909c39e41e16b8866355c038b6d2
|
[
"Apache-2.0"
] | null | null | null |
code/model/intent_classification/intent_classifier_inference.py
|
vipulraheja/IteraTeR
|
80c1939969de909c39e41e16b8866355c038b6d2
|
[
"Apache-2.0"
] | 1
|
2022-03-24T15:55:16.000Z
|
2022-03-24T15:55:16.000Z
|
import json
import torch
import argparse
import numpy as np
from transformers.modeling_outputs import SequenceClassifierOutput
from transformers import Trainer, TrainingArguments, RobertaTokenizer, RobertaModel, RobertaConfig, RobertaForSequenceClassification
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', '-c', required=True,
help='path to Pegasus model checkpoint')
args = parser.parse_args()
main(args)
| 35.037037
| 132
| 0.691332
|
import json
import torch
import argparse
import numpy as np
from transformers.modeling_outputs import SequenceClassifierOutput
from transformers import Trainer, TrainingArguments, RobertaTokenizer, RobertaModel, RobertaConfig, RobertaForSequenceClassification
def main(args):
checkpoint = args.checkpoint
model_name = 'roberta-large'
model_cache_dir='roberta-large-model-cache/'
model_type = RobertaForSequenceClassification
config_type = RobertaConfig
tokenizer_type = RobertaTokenizer
tokenizer = tokenizer_type.from_pretrained(
model_name,
cache_dir=model_cache_dir
)
id2label = {0: "clarity", 1: "fluency", 2: "coherence", 3: "style", 4: "meaning-changed"}
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
checkpoint = args.checkpoint
model = model_type.from_pretrained(checkpoint)
model.eval()
model.to(device)
before_text = 'I likes coffee.'
after_text = 'I like coffee.'
def score_text(before_text, after_text, tokenizer, model):
input_ids = tokenizer(before_text, after_text, return_tensors='pt', padding=True, truncation=True)
with torch.no_grad():
input_ids = input_ids.to(device)
outputs = model(**input_ids)
softmax_scores = torch.softmax(outputs.logits, dim=1)
softmax_scores = softmax_scores[0].cpu().numpy()
index = np.argmax(softmax_scores)
return index, softmax_scores[index]
index, confidence = score_text([before_text], [after_text], tokenizer, model)
label = id2label[index]
print(label)
print(confidence)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', '-c', required=True,
help='path to Pegasus model checkpoint')
args = parser.parse_args()
main(args)
| 1,370
| 0
| 23
|
740141a9f07306e45b5ebf41d61bb31b1b134c05
| 2,035
|
py
|
Python
|
Forms/mengban_seed.py
|
UlordChain/uwallet-client
|
c41f89f34dd17699cb4b285dbba9053f28be5603
|
[
"MIT"
] | 19
|
2018-08-21T06:25:30.000Z
|
2018-08-21T12:34:14.000Z
|
Forms/mengban_seed.py
|
UlordChain/uwallet-client
|
c41f89f34dd17699cb4b285dbba9053f28be5603
|
[
"MIT"
] | 1
|
2018-06-01T09:14:36.000Z
|
2018-06-01T09:20:49.000Z
|
Forms/mengban_seed.py
|
UlordChain/uwallet-client
|
c41f89f34dd17699cb4b285dbba9053f28be5603
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2017/12/18
# @Author : Shu
# @Email : httpservlet@yeah.net
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from FormUI.ui_getseed import Ui_getseedWD
| 40.7
| 89
| 0.584767
|
# -*- coding: utf-8 -*-
# @Time : 2017/12/18
# @Author : Shu
# @Email : httpservlet@yeah.net
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from FormUI.ui_getseed import Ui_getseedWD
class SeedWidget(QWidget, Ui_getseedWD):
def __init__(self, parent=None):
super(SeedWidget, self).__init__(parent)
self.setupUi(self)
self.parent = parent
self.setStyleSheet("""QFrame#frame_left{border-image:url(:/images/heisemengban)}
QFrame#frame_top{border-image:url(:/images/baisemengban)}
QFrame#frame_rigth{background-color:white;}
""")
self.ted_setting_getseed.setReadOnly(True)
self.btn_seed_password.clicked.connect(self.slot_password)
self.frame_left.installEventFilter(self.parent)
self.frame_top.installEventFilter(self.parent)
def slot_password(self):
"""查看seed之前, 需要输入密码"""
if self.led_seed_password.text().isEmpty():
self.led_seed_password.setStyleSheet("""border:1px solid red;""")
else:
self.led_seed_password.setStyleSheet("")
password = unicode(self.led_seed_password.text()).encode('utf-8')
try:
args = ['getseed', '--client']
if password:
args.append('-W')
args.append(password)
rs = self.parent.bwallet_main(*args, thread_safe=True)
except Exception as e:
print (e)
if 'Incorrect password' in str(e):
self.led_seed_password.setStyleSheet("""border:1px solid red;""")
else:
self.led_seed_password.setStyleSheet("""border:1px solid yellow;""")
else:
self.ted_setting_getseed.setText(rs)
self.ted_setting_getseed.setVisible(True)
self.led_seed_password.setVisible(False)
self.btn_seed_password.setVisible(False)
| 637
| 1,197
| 23
|
f52c7e893c3ecdab0771489d791ee3bc29fa08c0
| 325
|
py
|
Python
|
test/test_json_equal.py
|
dakotahawkins/MCSchematicIntersection
|
a5bc130c9f887ca6a253c0a6508fcbca5f164df5
|
[
"MIT"
] | null | null | null |
test/test_json_equal.py
|
dakotahawkins/MCSchematicIntersection
|
a5bc130c9f887ca6a253c0a6508fcbca5f164df5
|
[
"MIT"
] | null | null | null |
test/test_json_equal.py
|
dakotahawkins/MCSchematicIntersection
|
a5bc130c9f887ca6a253c0a6508fcbca5f164df5
|
[
"MIT"
] | null | null | null |
"""Tests two schematic json files to ensure they're equal
"""
import json
import sys
INPUT_A: str = sys.argv[1]
INPUT_B: str = sys.argv[2]
with open(INPUT_A, 'r') as infile_a:
with open(INPUT_B, 'r') as infile_b:
if json.load(infile_a)['nbt'] != json.load(infile_b)['nbt']:
sys.exit(1)
sys.exit(0)
| 21.666667
| 68
| 0.64
|
"""Tests two schematic json files to ensure they're equal
"""
import json
import sys
INPUT_A: str = sys.argv[1]
INPUT_B: str = sys.argv[2]
with open(INPUT_A, 'r') as infile_a:
with open(INPUT_B, 'r') as infile_b:
if json.load(infile_a)['nbt'] != json.load(infile_b)['nbt']:
sys.exit(1)
sys.exit(0)
| 0
| 0
| 0
|
ae2007157bf4f09f792df527fa386d5e97a2fa2a
| 54
|
py
|
Python
|
kenning/resources/reports/__init__.py
|
antmicro/edge-ai-tester
|
6b145145ed1cec206ae0229c846fb33d272f3ffa
|
[
"Apache-2.0"
] | 20
|
2021-06-24T13:37:21.000Z
|
2022-03-25T10:50:26.000Z
|
kenning/resources/reports/__init__.py
|
antmicro/edge-ai-tester
|
6b145145ed1cec206ae0229c846fb33d272f3ffa
|
[
"Apache-2.0"
] | null | null | null |
kenning/resources/reports/__init__.py
|
antmicro/edge-ai-tester
|
6b145145ed1cec206ae0229c846fb33d272f3ffa
|
[
"Apache-2.0"
] | 1
|
2021-11-09T17:23:04.000Z
|
2021-11-09T17:23:04.000Z
|
"""
Contains the templates for benchmark reports.
"""
| 13.5
| 45
| 0.722222
|
"""
Contains the templates for benchmark reports.
"""
| 0
| 0
| 0
|
8150d7ae07dca58de8cc781d35d56112a702254d
| 20,928
|
py
|
Python
|
tests/stockfish/test_models.py
|
guidopetri/stockfish
|
8140df45cbec9a2bce41d2f71c1b7b2c9c7036a2
|
[
"MIT"
] | null | null | null |
tests/stockfish/test_models.py
|
guidopetri/stockfish
|
8140df45cbec9a2bce41d2f71c1b7b2c9c7036a2
|
[
"MIT"
] | null | null | null |
tests/stockfish/test_models.py
|
guidopetri/stockfish
|
8140df45cbec9a2bce41d2f71c1b7b2c9c7036a2
|
[
"MIT"
] | null | null | null |
import pytest
from timeit import default_timer
from stockfish import Stockfish
| 39.711575
| 113
| 0.565845
|
import pytest
from timeit import default_timer
from stockfish import Stockfish
class TestStockfish:
@pytest.fixture
def stockfish(self):
return Stockfish()
def test_get_best_move_first_move(self, stockfish):
best_move = stockfish.get_best_move()
assert best_move in (
"e2e3",
"e2e4",
"g1f3",
"b1c3",
"d2d4",
)
def test_get_best_move_time_first_move(self, stockfish):
best_move = stockfish.get_best_move_time(1000)
assert best_move in ("e2e3", "e2e4", "g1f3", "b1c3", "d2d4")
def test_set_position_resets_info(self, stockfish):
stockfish.set_position(["e2e4", "e7e6"])
stockfish.get_best_move()
assert stockfish.info != ""
stockfish.set_position(["e2e4", "e7e6"])
assert stockfish.info == ""
def test_get_best_move_not_first_move(self, stockfish):
stockfish.set_position(["e2e4", "e7e6"])
best_move = stockfish.get_best_move()
assert best_move in ("d2d4", "g1f3")
def test_get_best_move_time_not_first_move(self, stockfish):
stockfish.set_position(["e2e4", "e7e6"])
best_move = stockfish.get_best_move_time(1000)
assert best_move in ("d2d4", "g1f3")
def test_get_best_move_checkmate(self, stockfish):
stockfish.set_position(["f2f3", "e7e5", "g2g4", "d8h4"])
assert stockfish.get_best_move() is None
def test_get_best_move_time_checkmate(self, stockfish):
stockfish.set_position(["f2f3", "e7e5", "g2g4", "d8h4"])
assert stockfish.get_best_move_time(1000) is None
def test_set_fen_position(self, stockfish):
stockfish.set_fen_position(
"7r/1pr1kppb/2n1p2p/2NpP2P/5PP1/1P6/P6K/R1R2B2 w - - 1 27"
)
assert stockfish.is_move_correct("f4f5") is True
assert stockfish.is_move_correct("a1c1") is False
def test_castling(self, stockfish):
assert stockfish.is_move_correct("e1g1") is False
stockfish.set_fen_position(
"rnbqkbnr/ppp3pp/3ppp2/8/4P3/5N2/PPPPBPPP/RNBQK2R w KQkq - 0 4"
)
assert stockfish.is_move_correct("e1g1") is True
def test_set_fen_position_mate(self, stockfish):
stockfish.set_fen_position("8/8/8/6pp/8/4k1PP/8/r3K3 w - - 12 53")
assert stockfish.get_best_move() is None
assert stockfish.info == ""
def test_clear_info_after_set_new_fen_position(self, stockfish):
stockfish.set_fen_position("8/8/8/6pp/8/4k1PP/r7/4K3 b - - 11 52")
stockfish.get_best_move()
stockfish.set_fen_position("8/8/8/6pp/8/4k1PP/8/r3K3 w - - 12 53")
assert stockfish.info == ""
stockfish.set_fen_position("8/8/8/6pp/8/4k1PP/r7/4K3 b - - 11 52")
stockfish.get_best_move()
stockfish.set_fen_position("8/8/8/6pp/8/4k1PP/8/r3K3 w - - 12 53", False)
assert stockfish.info == ""
def test_set_fen_position_starts_new_game(self, stockfish):
stockfish.set_fen_position(
"7r/1pr1kppb/2n1p2p/2NpP2P/5PP1/1P6/P6K/R1R2B2 w - - 1 27"
)
stockfish.get_best_move()
assert stockfish.info != ""
stockfish.set_fen_position("3kn3/p5rp/1p3p2/3B4/3P1P2/2P5/1P3K2/8 w - - 0 53")
assert stockfish.info == ""
def test_set_fen_position_second_argument(self, stockfish):
stockfish.set_depth(16)
stockfish.set_fen_position(
"rnbqk2r/pppp1ppp/3bpn2/8/3PP3/2N5/PPP2PPP/R1BQKBNR w KQkq - 0 1", True
)
assert stockfish.get_best_move() == "e4e5"
stockfish.set_fen_position(
"rnbqk2r/pppp1ppp/3bpn2/4P3/3P4/2N5/PPP2PPP/R1BQKBNR b KQkq - 0 1", False
)
assert stockfish.get_best_move() == "d6e7"
stockfish.set_fen_position(
"rnbqk2r/pppp1ppp/3bpn2/8/3PP3/2N5/PPP2PPP/R1BQKBNR w KQkq - 0 1", False
)
assert stockfish.get_best_move() == "e4e5"
def test_is_move_correct_first_move(self, stockfish):
assert stockfish.is_move_correct("e2e1") is False
assert stockfish.is_move_correct("a2a3") is True
def test_is_move_correct_not_first_move(self, stockfish):
stockfish.set_position(["e2e4", "e7e6"])
assert stockfish.is_move_correct("e2e1") is False
assert stockfish.is_move_correct("a2a3") is True
@pytest.mark.parametrize(
"value",
[
"info",
"depth",
"seldepth",
"multipv",
"score",
"mate",
"-1",
"nodes",
"nps",
"tbhits",
"time",
"pv",
"h2g1",
"h4g3",
],
)
def test_last_info(self, stockfish, value):
stockfish.set_fen_position("r6k/6b1/2b1Q3/p6p/1p5q/3P2PP/5r1K/8 w - - 1 31")
stockfish.get_best_move()
assert value in stockfish.info
def test_set_skill_level(self, stockfish):
stockfish.set_fen_position(
"rnbqkbnr/ppp2ppp/3pp3/8/4P3/5N2/PPPP1PPP/RNBQKB1R w KQkq - 0 1"
)
assert stockfish.get_parameters()["Skill Level"] == 20
stockfish.set_skill_level(1)
assert stockfish.get_best_move() in (
"b2b3",
"b2b3",
"d2d3",
"d2d4",
"b1c3",
"d1e2",
"g2g3",
"c2c4",
"f1e2",
)
assert stockfish.get_parameters()["Skill Level"] == 1
stockfish.set_skill_level(20)
assert stockfish.get_best_move() in (
"d2d4",
"b1c3",
)
assert stockfish.get_parameters()["Skill Level"] == 20
def test_set_elo_rating(self, stockfish):
stockfish.set_depth(2)
stockfish.set_fen_position(
"rnbqkbnr/ppp2ppp/3pp3/8/4P3/5N2/PPPP1PPP/RNBQKB1R w KQkq - 0 1"
)
assert stockfish.get_parameters()["UCI_Elo"] == 1350
stockfish.set_elo_rating(2000)
assert stockfish.get_best_move() in (
"b2b3",
"b2b3",
"d2d3",
"d2d4",
"b1c3",
"d1e2",
"g2g3",
"c2c4",
"f1e2",
)
assert stockfish.get_parameters()["UCI_Elo"] == 2000
stockfish.set_elo_rating(1350)
assert stockfish.get_best_move() in (
"d1e2",
"b1c3",
"d2d3",
"d2d4",
"c2c4",
"f1e2",
)
assert stockfish.get_parameters()["UCI_Elo"] == 1350
def test_stockfish_constructor_with_custom_params(self, stockfish):
stockfish.set_skill_level(1)
assert stockfish.get_parameters() == {
"Write Debug Log": "false",
"Contempt": 0,
"Min Split Depth": 0,
"Threads": 1,
"Ponder": "false",
"Hash": 16,
"MultiPV": 1,
"Skill Level": 1,
"Move Overhead": 30,
"Minimum Thinking Time": 20,
"Slow Mover": 80,
"UCI_Chess960": "false",
"UCI_LimitStrength": "false",
"UCI_Elo": 1350,
}
def test_get_board_visual(self, stockfish):
stockfish.set_position(["e2e4", "e7e6", "d2d4", "d7d5"])
if stockfish.get_stockfish_major_version() >= 12:
expected_result = (
"+---+---+---+---+---+---+---+---+\n"
"| r | n | b | q | k | b | n | r | 8\n"
"+---+---+---+---+---+---+---+---+\n"
"| p | p | p | | | p | p | p | 7\n"
"+---+---+---+---+---+---+---+---+\n"
"| | | | | p | | | | 6\n"
"+---+---+---+---+---+---+---+---+\n"
"| | | | p | | | | | 5\n"
"+---+---+---+---+---+---+---+---+\n"
"| | | | P | P | | | | 4\n"
"+---+---+---+---+---+---+---+---+\n"
"| | | | | | | | | 3\n"
"+---+---+---+---+---+---+---+---+\n"
"| P | P | P | | | P | P | P | 2\n"
"+---+---+---+---+---+---+---+---+\n"
"| R | N | B | Q | K | B | N | R | 1\n"
"+---+---+---+---+---+---+---+---+\n"
" a b c d e f g h\n"
)
else:
expected_result = (
"+---+---+---+---+---+---+---+---+\n"
"| r | n | b | q | k | b | n | r |\n"
"+---+---+---+---+---+---+---+---+\n"
"| p | p | p | | | p | p | p |\n"
"+---+---+---+---+---+---+---+---+\n"
"| | | | | p | | | |\n"
"+---+---+---+---+---+---+---+---+\n"
"| | | | p | | | | |\n"
"+---+---+---+---+---+---+---+---+\n"
"| | | | P | P | | | |\n"
"+---+---+---+---+---+---+---+---+\n"
"| | | | | | | | |\n"
"+---+---+---+---+---+---+---+---+\n"
"| P | P | P | | | P | P | P |\n"
"+---+---+---+---+---+---+---+---+\n"
"| R | N | B | Q | K | B | N | R |\n"
"+---+---+---+---+---+---+---+---+\n"
)
assert stockfish.get_board_visual() == expected_result
def test_get_fen_position(self, stockfish):
assert (
stockfish.get_fen_position()
== "rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1"
)
def test_get_fen_position_after_some_moves(self, stockfish):
stockfish.set_position(["e2e4", "e7e6"])
assert (
stockfish.get_fen_position()
== "rnbqkbnr/pppp1ppp/4p3/8/4P3/8/PPPP1PPP/RNBQKBNR w KQkq - 0 2"
)
def test_get_stockfish_major_version(self, stockfish):
assert stockfish.get_stockfish_major_version() in (8, 9, 10, 11, 12, 13, 14)
def test_get_evaluation_cp(self, stockfish):
stockfish.set_fen_position(
"r4rk1/pppb1p1p/2nbpqp1/8/3P4/3QBN2/PPP1BPPP/R4RK1 w - - 0 11"
)
evaluation = stockfish.get_evaluation()
assert evaluation["type"] == "cp" and evaluation["value"] > 0
def test_get_evaluation_checkmate(self, stockfish):
stockfish.set_fen_position("1nb1k1n1/pppppppp/8/6r1/5bqK/6r1/8/8 w - - 2 2")
assert stockfish.get_evaluation() == {"type": "mate", "value": 0}
def test_get_evaluation_stalemate(self, stockfish):
stockfish.set_fen_position("1nb1kqn1/pppppppp/8/6r1/5b1K/6r1/8/8 w - - 2 2")
assert stockfish.get_evaluation() == {"type": "cp", "value": 0}
def test_set_depth(self, stockfish):
stockfish.set_depth(12)
assert stockfish.depth == "12"
stockfish.get_best_move()
assert "depth 12" in stockfish.info
def test_get_best_move_wrong_position(self, stockfish):
wrong_fen = "3kk3/8/8/8/8/8/8/3KK3 w - - 0 0"
stockfish.set_fen_position(wrong_fen)
assert stockfish.get_best_move() in (
"d1e2",
"d1c1",
)
def test_get_parameters(self, stockfish):
stockfish._set_option("Minimum Thinking Time", 10)
parameters = stockfish.get_parameters()
assert parameters["Minimum Thinking Time"] == 10
def test_get_top_moves(self, stockfish):
stockfish.set_depth(15)
stockfish._set_option("MultiPV", 4)
stockfish.set_fen_position("1rQ1r1k1/5ppp/8/8/1R6/8/2r2PPP/4R1K1 w - - 0 1")
assert stockfish.get_top_moves(2) == [
{"Move": "e1e8", "Centipawn": None, "Mate": 1},
{"Move": "c8e8", "Centipawn": None, "Mate": 2},
]
stockfish.set_fen_position("8/8/8/8/8/3r2k1/8/6K1 w - - 0 1")
assert stockfish.get_top_moves(2) == [
{"Move": "g1f1", "Centipawn": None, "Mate": -2},
{"Move": "g1h1", "Centipawn": None, "Mate": -1},
]
def test_get_top_moves_mate(self, stockfish):
stockfish.set_depth(10)
stockfish._set_option("MultiPV", 3)
stockfish.set_fen_position("8/8/8/8/8/6k1/8/3r2K1 w - - 0 1")
assert stockfish.get_top_moves() == []
assert stockfish.get_parameters()["MultiPV"] == 3
def test_get_top_moves_raising_error(self, stockfish):
stockfish.set_fen_position(
"rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1"
)
with pytest.raises(ValueError):
stockfish.get_top_moves(0)
assert len(stockfish.get_top_moves(2)) == 2
assert stockfish.get_parameters()["MultiPV"] == 1
def test_make_moves_from_current_position(self, stockfish):
stockfish.set_fen_position(
"r1bqkb1r/pppp1ppp/2n2n2/1B2p3/4P3/5N2/PPPP1PPP/RNBQK2R w KQkq - 0 1"
)
with pytest.raises(ValueError):
stockfish.make_moves_from_current_position([])
stockfish.make_moves_from_current_position(["e1g1"])
assert (
stockfish.get_fen_position()
== "r1bqkb1r/pppp1ppp/2n2n2/1B2p3/4P3/5N2/PPPP1PPP/RNBQ1RK1 b kq - 1 1"
)
stockfish.make_moves_from_current_position(
["f6e4", "d2d4", "e4d6", "b5c6", "d7c6", "d4e5", "d6f5"]
)
assert (
stockfish.get_fen_position()
== "r1bqkb1r/ppp2ppp/2p5/4Pn2/8/5N2/PPP2PPP/RNBQ1RK1 w kq - 1 5"
)
stockfish.make_moves_from_current_position(
["d1d8", "e8d8", "b1c3", "d8e8", "f1d1", "f5e7", "h2h3", "f7f5"]
)
assert (
stockfish.get_fen_position()
== "r1b1kb1r/ppp1n1pp/2p5/4Pp2/8/2N2N1P/PPP2PP1/R1BR2K1 w - f6 0 9"
)
def test_make_moves_transposition_table_speed(self, stockfish):
"""
make_moves_from_current_position won't send the "ucinewgame" token to Stockfish, since it
will reach a new position similar to the current one. Meanwhile, set_fen_position will send this
token (unless the user specifies otherwise), since it could be going to a completely new position.
A big effect of sending this token is that it resets SF's transposition table. If the
new position is similar to the current one, this will affect SF's speed. This function tests
that make_moves_from_current_position doesn't reset the transposition table, by verifying SF is faster in
evaluating a consecutive set of positions when the make_moves_from_current_position function is used.
"""
stockfish.set_depth(16)
positions_considered = []
stockfish.set_fen_position(
"rnbqkbnr/ppp1pppp/8/3p4/2PP4/8/PP2PPPP/RNBQKBNR b KQkq - 0 2"
)
total_time_calculating_first = 0.0
for i in range(5):
start = default_timer()
chosen_move = stockfish.get_best_move()
total_time_calculating_first += default_timer() - start
positions_considered.append(stockfish.get_fen_position())
stockfish.make_moves_from_current_position([chosen_move])
total_time_calculating_second = 0.0
for i in range(len(positions_considered)):
stockfish.set_fen_position(positions_considered[i])
start = default_timer()
stockfish.get_best_move()
total_time_calculating_second += default_timer() - start
assert total_time_calculating_first < total_time_calculating_second
def test_get_wdl_stats(self, stockfish):
stockfish.set_depth(15)
stockfish._set_option("MultiPV", 2)
if stockfish.does_current_engine_version_have_wdl_option():
stockfish.set_fen_position("7k/4R3/4P1pp/7N/8/8/1q5q/3K4 w - - 0 1")
stockfish.set_show_wdl_option(True)
wdl_stats = stockfish.get_wdl_stats()
assert wdl_stats[1] > wdl_stats[0] * 7
assert abs(wdl_stats[0] - wdl_stats[2]) / wdl_stats[0] < 0.1
assert stockfish._parameters["UCI_ShowWDL"] == "true"
stockfish.set_fen_position(
"rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1"
)
stockfish.set_show_wdl_option(False)
wdl_stats = stockfish.get_wdl_stats()
assert wdl_stats[1] > wdl_stats[0] * 4
assert wdl_stats[0] > wdl_stats[2] * 1.8
assert stockfish._parameters["UCI_ShowWDL"] == "false"
stockfish.set_fen_position("8/8/8/8/8/6k1/6p1/6K1 w - - 0 1")
assert stockfish.get_wdl_stats() is None
else:
with pytest.raises(RuntimeError):
stockfish.get_wdl_stats()
def test_does_current_engine_version_have_wdl_option(self, stockfish):
if stockfish.get_stockfish_major_version() <= 11:
assert not stockfish.does_current_engine_version_have_wdl_option()
assert "UCI_ShowWDL" not in stockfish._parameters
with pytest.raises(RuntimeError):
stockfish.get_wdl_stats()
def test_set_show_wdl_option(self, stockfish):
stockfish.set_fen_position(
"rnbqkb1r/pp3ppp/3p1n2/1B2p3/3NP3/2N5/PPP2PPP/R1BQK2R b KQkq - 0 6"
)
if stockfish.does_current_engine_version_have_wdl_option():
stockfish.set_show_wdl_option(True)
assert stockfish._parameters["UCI_ShowWDL"] == "true"
assert len(Stockfish.get_wdl_stats()) == 3
assert stockfish._parameters["UCI_ShowWDL"] == "true"
stockfish.set_show_wdl_option(False)
assert stockfish._parameters["UCI_ShowWDL"] == "false"
stockfish.set_fen_position("8/8/8/8/8/3k4/3p4/3K4 w - - 0 1")
assert Stockfish.get_wdl_stats() is None
assert stockfish._parameters["UCI_ShowWDL"] == "false"
else:
with pytest.raises(RuntimeError):
stockfish.set_show_wdl_option(True)
with pytest.raises(RuntimeError):
stockfish.set_show_wdl_option(False)
def test_benchmark_result_with_defaults(self, stockfish):
params = stockfish.BenchmarkParameters()
result = stockfish.benchmark(params)
# result should contain the last line of a successful method call
assert result.split(" ")[0] == "Nodes/second"
def test_benchmark_result_with_valid_options(self, stockfish):
params = stockfish.BenchmarkParameters(
ttSize=64, threads=2, limit=1000, limitType="movetime", evalType="classical"
)
result = stockfish.benchmark(params)
# result should contain the last line of a successful method call
assert result.split(" ")[0] == "Nodes/second"
def test_benchmark_result_with_invalid_options(self, stockfish):
params = stockfish.BenchmarkParameters(
ttSize=2049,
threads=0,
limit=0,
fenFile="./fakefile.fen",
limitType="fghthtr",
evalType="",
)
result = stockfish.benchmark(params)
# result should contain the last line of a successful method call
assert result.split(" ")[0] == "Nodes/second"
def test_benchmark_result_with_invalid_type(self, stockfish):
params = {
"ttSize": 16,
"threads": 1,
"limit": 13,
"fenFile": "./fakefile.fen",
"limitType": "depth",
"evalType": "mixed",
}
result = stockfish.benchmark(params)
# result should contain the last line of a successful method call
assert result.split(" ")[0] == "Nodes/second"
def test_multiple_calls_to_del(self, stockfish):
assert stockfish._stockfish.poll() is None
assert not stockfish._has_quit_command_been_sent
stockfish.__del__()
assert stockfish._stockfish.poll() is not None
assert stockfish._has_quit_command_been_sent
stockfish.__del__()
assert stockfish._stockfish.poll() is not None
assert stockfish._has_quit_command_been_sent
def test_multiple_quit_commands(self, stockfish):
# Test multiple quit commands, and include a call to del too. All of
# them should run without causing some Exception.
assert stockfish._stockfish.poll() is None
assert not stockfish._has_quit_command_been_sent
stockfish._put("quit")
assert stockfish._has_quit_command_been_sent
stockfish._put("quit")
assert stockfish._has_quit_command_been_sent
stockfish.__del__()
assert stockfish._stockfish.poll() is not None
assert stockfish._has_quit_command_been_sent
stockfish._put(f"go depth {10}")
# Should do nothing, and change neither of the values below.
assert stockfish._stockfish.poll() is not None
assert stockfish._has_quit_command_been_sent
| 17,526
| 3,298
| 23
|
ae71a18b83fe0ab9540c787415a3e73b56ccb447
| 1,884
|
py
|
Python
|
fs.py
|
mission-liao/fin-stmt-additional
|
da9ef5299e6ff10406996d0cb0975b46498d3c39
|
[
"MIT"
] | null | null | null |
fs.py
|
mission-liao/fin-stmt-additional
|
da9ef5299e6ff10406996d0cb0975b46498d3c39
|
[
"MIT"
] | null | null | null |
fs.py
|
mission-liao/fin-stmt-additional
|
da9ef5299e6ff10406996d0cb0975b46498d3c39
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import click
from fstmt import TableAdaptorFactory, DashboardFactory, table
@click.group()
@cli.command()
@click.argument('target')
@click.argument('market')
@click.argument('symbol')
@click.option('--year', type=int)
@click.option('--quarter', type=int, default=4)
@click.option('--col', type=(str, str), multiple=True)
@cli.command()
@click.argument('target')
@click.argument('market')
@click.argument('symbol')
@click.option('--year')
@click.option('--quarter', type=int, default=4)
@cli.command()
@click.argument('target')
@click.argument('market')
@click.argument('symbol')
@click.option('--arg', type=(str, str), multiple=True)
@cli.command()
@click.argument('target')
if __name__ == '__main__':
cli()
| 28.984615
| 75
| 0.685775
|
# -*- coding: utf-8 -*-
import os
import click
from fstmt import TableAdaptorFactory, DashboardFactory, table
def get_data_dir():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
def get_database_path():
return os.path.join(get_data_dir(), 'fstmt.sqlite')
@click.group()
def cli():
pass
@cli.command()
@click.argument('target')
@click.argument('market')
@click.argument('symbol')
@click.option('--year', type=int)
@click.option('--quarter', type=int, default=4)
@click.option('--col', type=(str, str), multiple=True)
def insert(target, market, symbol, year, quarter, col):
t = TableAdaptorFactory(get_database_path()).by_shortcut(target)
market = market.upper()
if isinstance(t, table.Stock):
if year is not None:
raise Exception("Providing 'year' when creating stocks")
if col :
raise Exception("Providing 'col' when creating stocks")
t.insert(market, symbol)
else:
t.insert(market, symbol, year, quarter, col)
@cli.command()
@click.argument('target')
@click.argument('market')
@click.argument('symbol')
@click.option('--year')
@click.option('--quarter', type=int, default=4)
def delete(target, market, symbol, year, quarter):
t = TableAdaptorFactory(get_database_path()).by_shortcut(target)
market = market.upper()
t.delete(market, symbol, year, quarter)
@cli.command()
@click.argument('target')
@click.argument('market')
@click.argument('symbol')
@click.option('--arg', type=(str, str), multiple=True)
def query(target, market, symbol, arg):
d = DashboardFactory(get_database_path()).by_shortcut(target)
market = market.upper()
d.draw(market, symbol, arg)
@cli.command()
@click.argument('target')
def migrate(target):
t = TableAdaptorFactory(get_database_path()).by_shortcut(target)
t.migrate()
if __name__ == '__main__':
cli()
| 974
| 0
| 156
|
46a9f3dcdb026e9a896987b4bac29a4f48c1cfbc
| 2,073
|
py
|
Python
|
tests/ar/test_news_blockchain.py
|
OpenDataCordoba/whoare
|
e6be8c4c15239054b546c468305860265668bac9
|
[
"MIT"
] | null | null | null |
tests/ar/test_news_blockchain.py
|
OpenDataCordoba/whoare
|
e6be8c4c15239054b546c468305860265668bac9
|
[
"MIT"
] | 5
|
2020-10-20T20:09:19.000Z
|
2020-12-28T00:39:28.000Z
|
tests/ar/test_news_blockchain.py
|
OpenDataCordoba/whoare
|
e6be8c4c15239054b546c468305860265668bac9
|
[
"MIT"
] | null | null | null |
from datetime import date
from whoare.zone_parsers.ar.news_from_blockchain import NewDomains
| 28.013514
| 66
| 0.682103
|
from datetime import date
from whoare.zone_parsers.ar.news_from_blockchain import NewDomains
def read_csv(dated):
nd = NewDomains()
nd.data_path = 'tests/ar/samples'
results = nd.get_from_date(dated)
urls = []
for zona, lista in results['zonas'].items():
for dom in lista:
urls.append(dom)
return urls
def test_new_domains_2020_12_15():
urls = read_csv(date(2020, 12, 15))
assert 'd2creativos.com.ar' in urls
assert 'danielojeda.com.ar' in urls
assert 'deseame.ar' in urls
assert 'desstek.com.ar' in urls
assert 'diamondprotein.ar' in urls
assert 'diamondprotein.com.ar' in urls
assert 'diarioelliberal.ar' in urls
def test_new_domains_2020_12_20():
urls = read_csv(date(2020, 12, 20))
assert 'xaragon.com.ar' in urls
# # xn--expodiseo-s6a.com.ar
assert 'expodiseño.com.ar' in urls
assert 'zion.ar' in urls
def test_new_domains_2018_10_08():
urls = read_csv(date(2018, 10, 8))
# TODO si empieza con cero el CSV viene malo
# assert '341.com.ar' in urls
assert 'abogadosdecordoba.net.ar' in urls
assert 'abogadosdepueblosfumigados.com.ar' in urls
assert 'aesucm.org.ar' in urls
assert 'agenciatimonel.com.ar' in urls
assert 'agustinpayges.com.ar' in urls
assert 'forastero.tur.ar' in urls
assert 'patinesconi.com.ar' in urls
assert 'pcyrma.org.ar' in urls
# # xn--diseowebrosario-1qb com.ar
assert 'diseñowebrosario.com.ar' in urls
# # xn--santuariodelapea-lub com.ar
assert 'santuariodelapeña.com.ar' in urls
def test_new_domains_2020_11_26():
urls = read_csv(date(2020, 11, 26))
assert 'sanro.ar' in urls
# xn--cabaasatrapasueos-ixbl com.ar
assert 'cabañasatrapasueños.com.ar' in urls
assert 'zenlab.com.ar' in urls
def test_new_domains_2019_01_15():
urls = read_csv(date(2019, 1, 15))
assert 'wtracker.com.ar' in urls
# xn--tartamudezenaccin-vyb com.ar
assert 'tartamudezenacción.com.ar' in urls
assert 'yoquierocalzados.com.ar' in urls
| 1,842
| 0
| 138
|
b3a45dcb40d939002cd6cc74fed37e8c87cd19b8
| 2,539
|
py
|
Python
|
rbtools/clients/tests/test_scanning.py
|
fangwentong/rbtools
|
c09f5c93fd61d447dee19b643ddfcf00ba92f920
|
[
"MIT"
] | null | null | null |
rbtools/clients/tests/test_scanning.py
|
fangwentong/rbtools
|
c09f5c93fd61d447dee19b643ddfcf00ba92f920
|
[
"MIT"
] | null | null | null |
rbtools/clients/tests/test_scanning.py
|
fangwentong/rbtools
|
c09f5c93fd61d447dee19b643ddfcf00ba92f920
|
[
"MIT"
] | 1
|
2020-06-27T23:08:47.000Z
|
2020-06-27T23:08:47.000Z
|
"""Unit tests for client scanning."""
from __future__ import unicode_literals
import os
from rbtools.clients import scan_usable_client
from rbtools.clients.git import GitClient
from rbtools.clients.svn import SVNClient
from rbtools.clients.tests import SCMClientTests
from rbtools.utils.process import execute
class ScanningTests(SCMClientTests):
"""Unit tests for client scanning."""
def test_scanning_nested_repos_1(self):
"""Testing scan_for_usable_client with nested repositories (git inside
svn)
"""
git_dir = os.path.join(self.testdata_dir, 'git-repo')
svn_dir = os.path.join(self.testdata_dir, 'svn-repo')
# Check out SVN first.
clone_dir = self.chdir_tmp()
execute(['svn', 'co', 'file://%s' % svn_dir, 'svn-repo'],
env=None, ignore_errors=False, extra_ignore_errors=())
svn_clone_dir = os.path.join(clone_dir, 'svn-repo')
# Now check out git.
git_clone_dir = os.path.join(svn_clone_dir, 'git-repo')
os.mkdir(git_clone_dir)
execute(['git', 'clone', git_dir, git_clone_dir],
env=None, ignore_errors=False, extra_ignore_errors=())
os.chdir(git_clone_dir)
repository_info, tool = scan_usable_client({}, self.options)
self.assertEqual(repository_info.local_path,
os.path.realpath(git_clone_dir))
self.assertEqual(type(tool), GitClient)
def test_scanning_nested_repos_2(self):
"""Testing scan_for_usable_client with nested repositories (svn inside
git)
"""
git_dir = os.path.join(self.testdata_dir, 'git-repo')
svn_dir = os.path.join(self.testdata_dir, 'svn-repo')
# Check out git first
clone_dir = self.chdir_tmp()
git_clone_dir = os.path.join(clone_dir, 'git-repo')
os.mkdir(git_clone_dir)
execute(['git', 'clone', git_dir, git_clone_dir],
env=None, ignore_errors=False, extra_ignore_errors=())
# Now check out svn.
svn_clone_dir = os.path.join(git_clone_dir, 'svn-repo')
os.chdir(git_clone_dir)
execute(['svn', 'co', 'file://%s' % svn_dir, 'svn-repo'],
env=None, ignore_errors=False, extra_ignore_errors=())
os.chdir(svn_clone_dir)
repository_info, tool = scan_usable_client({}, self.options)
self.assertEqual(repository_info.local_path,
os.path.realpath(svn_clone_dir))
self.assertEqual(type(tool), SVNClient)
| 35.760563
| 78
| 0.648681
|
"""Unit tests for client scanning."""
from __future__ import unicode_literals
import os
from rbtools.clients import scan_usable_client
from rbtools.clients.git import GitClient
from rbtools.clients.svn import SVNClient
from rbtools.clients.tests import SCMClientTests
from rbtools.utils.process import execute
class ScanningTests(SCMClientTests):
"""Unit tests for client scanning."""
def test_scanning_nested_repos_1(self):
"""Testing scan_for_usable_client with nested repositories (git inside
svn)
"""
git_dir = os.path.join(self.testdata_dir, 'git-repo')
svn_dir = os.path.join(self.testdata_dir, 'svn-repo')
# Check out SVN first.
clone_dir = self.chdir_tmp()
execute(['svn', 'co', 'file://%s' % svn_dir, 'svn-repo'],
env=None, ignore_errors=False, extra_ignore_errors=())
svn_clone_dir = os.path.join(clone_dir, 'svn-repo')
# Now check out git.
git_clone_dir = os.path.join(svn_clone_dir, 'git-repo')
os.mkdir(git_clone_dir)
execute(['git', 'clone', git_dir, git_clone_dir],
env=None, ignore_errors=False, extra_ignore_errors=())
os.chdir(git_clone_dir)
repository_info, tool = scan_usable_client({}, self.options)
self.assertEqual(repository_info.local_path,
os.path.realpath(git_clone_dir))
self.assertEqual(type(tool), GitClient)
def test_scanning_nested_repos_2(self):
"""Testing scan_for_usable_client with nested repositories (svn inside
git)
"""
git_dir = os.path.join(self.testdata_dir, 'git-repo')
svn_dir = os.path.join(self.testdata_dir, 'svn-repo')
# Check out git first
clone_dir = self.chdir_tmp()
git_clone_dir = os.path.join(clone_dir, 'git-repo')
os.mkdir(git_clone_dir)
execute(['git', 'clone', git_dir, git_clone_dir],
env=None, ignore_errors=False, extra_ignore_errors=())
# Now check out svn.
svn_clone_dir = os.path.join(git_clone_dir, 'svn-repo')
os.chdir(git_clone_dir)
execute(['svn', 'co', 'file://%s' % svn_dir, 'svn-repo'],
env=None, ignore_errors=False, extra_ignore_errors=())
os.chdir(svn_clone_dir)
repository_info, tool = scan_usable_client({}, self.options)
self.assertEqual(repository_info.local_path,
os.path.realpath(svn_clone_dir))
self.assertEqual(type(tool), SVNClient)
| 0
| 0
| 0
|
0662e69a71e1cc9d3473c7b9d5a6fe55d4510954
| 2,858
|
py
|
Python
|
tests/test_archive.py
|
lgq2015/ubuntu-isign
|
2b72d0c260d13e1dce4a9438a9b0cb566a0fcdaf
|
[
"Apache-2.0"
] | null | null | null |
tests/test_archive.py
|
lgq2015/ubuntu-isign
|
2b72d0c260d13e1dce4a9438a9b0cb566a0fcdaf
|
[
"Apache-2.0"
] | null | null | null |
tests/test_archive.py
|
lgq2015/ubuntu-isign
|
2b72d0c260d13e1dce4a9438a9b0cb566a0fcdaf
|
[
"Apache-2.0"
] | 1
|
2020-10-26T17:36:54.000Z
|
2020-10-26T17:36:54.000Z
|
from isign_base_test import IsignBaseTest
from isign.archive import archive_factory, Archive, AppArchive, AppZipArchive, IpaArchive
from isign.utils import PY3
import logging
log = logging.getLogger(__name__)
| 32.477273
| 89
| 0.731631
|
from isign_base_test import IsignBaseTest
from isign.archive import archive_factory, Archive, AppArchive, AppZipArchive, IpaArchive
from isign.utils import PY3
import logging
log = logging.getLogger(__name__)
class TestArchive(IsignBaseTest):
def _test_good(self, filename, klass):
archive = archive_factory(filename)
assert archive is not None
assert archive.__class__ is klass
assert isinstance(archive, Archive)
def test_archive_factory_app(self):
self._test_good(self.TEST_APP, AppArchive)
def test_archive_factory_appzip(self):
self._test_good(self.TEST_APPZIP, AppZipArchive)
def test_archive_factory_ipa(self):
self._test_good(self.TEST_IPA, IpaArchive)
def test_archive_factory_nonapp_dir(self):
archive = archive_factory(self.TEST_NONAPP_DIR)
assert archive is None
def test_archive_factory_nonapp_ipa(self):
archive = archive_factory(self.TEST_NONAPP_IPA)
assert archive is None
def test_archive_factory_nonapp_txt(self):
archive = archive_factory(self.TEST_NONAPP_TXT)
assert archive is None
def test_archive_factory_nonapp_simulator_app(self):
archive = archive_factory(self.TEST_SIMULATOR_APP)
assert archive is None
class TestBundleInfo(IsignBaseTest):
def _test_bundle_info(self, filename):
archive = archive_factory(filename)
assert archive is not None
assert archive.bundle_info is not None
if PY3:
assert archive.bundle_info[b'CFBundleName'] == b'isignTestApp'
else:
assert archive.bundle_info['CFBundleName'] == 'isignTestApp'
def test_app_archive_info(self):
self._test_bundle_info(self.TEST_APP)
def test_appzip_archive_info(self):
self._test_bundle_info(self.TEST_APPZIP)
def test_ipa_archive_info(self):
self._test_bundle_info(self.TEST_IPA)
class TestArchivePrecheck(IsignBaseTest):
def test_precheck_app(self):
assert AppArchive.precheck(self.TEST_APP)
def test_precheck_appzip(self):
assert AppZipArchive.precheck(self.TEST_APPZIP)
def test_precheck_ipa(self):
assert IpaArchive.precheck(self.TEST_IPA)
def test_bad_precheck_app(self):
assert AppArchive.precheck(self.TEST_NONAPP_DIR) is False
assert AppArchive.precheck(self.TEST_APPZIP) is False
assert AppArchive.precheck(self.TEST_IPA) is False
def test_bad_precheck_appzip(self):
assert AppZipArchive.precheck(self.TEST_APP) is False
assert AppZipArchive.precheck(self.TEST_IPA) is False
def test_bad_precheck_ipa(self):
assert IpaArchive.precheck(self.TEST_APP) is False
assert IpaArchive.precheck(self.TEST_APPZIP) is False
assert IpaArchive.precheck(self.TEST_NONAPP_IPA) is False
| 2,043
| 47
| 555
|
628390e7b0e104bdccc43edd629d89f2f161d0b5
| 4,769
|
py
|
Python
|
cotrendy/lightcurves.py
|
PLATO-Mission/cotrendy
|
31d03f0cfd8329f72d897e84d2aa6c0ca8865dfe
|
[
"MIT"
] | null | null | null |
cotrendy/lightcurves.py
|
PLATO-Mission/cotrendy
|
31d03f0cfd8329f72d897e84d2aa6c0ca8865dfe
|
[
"MIT"
] | null | null | null |
cotrendy/lightcurves.py
|
PLATO-Mission/cotrendy
|
31d03f0cfd8329f72d897e84d2aa6c0ca8865dfe
|
[
"MIT"
] | null | null | null |
"""
Light curves components for Cotrendy
"""
import sys
import logging
import numpy as np
from scipy.stats import median_absolute_deviation
import cotrendy.utils as cuts
def load_photometry(config, apply_object_mask=True):
"""
Read in a photometry file
Parameters
----------
config : dict
Configuration file loaded via TOML
apply_object_mask : boolean
Mask our a subset of stars?
Returns
-------
times : array
Array of times of observation
lightcurves : list
List of Lightcurve objects, one per star
Raises
------
None
"""
root = config['global']['root']
time_file = config['data']['time_file']
flux_file = config['data']['flux_file']
error_file = config['data']['error_file']
times = cuts.depicklify(f"{root}/{time_file}")
if times is None:
logging.critical(f"Could not load {root}/{time_file}...")
sys.exit(1)
fluxes = cuts.depicklify(f"{root}/{flux_file}")
if fluxes is None:
logging.critical(f"Could not load {root}/{flux_file}...")
sys.exit(1)
errors = cuts.depicklify(f"{root}/{error_file}")
if errors is None:
logging.critical(f"Could not load {root}/{error_file}...")
sys.exit(1)
if fluxes.shape != errors.shape or len(times) != len(fluxes[0]):
logging.critical("Data arrays have mismatched shapes...")
sys.exit(1)
# now apply the mask if needed
if apply_object_mask:
objects_mask_file = config['data']['objects_mask_file']
mask = cuts.depicklify(f"{root}/{objects_mask_file}")
fluxes = fluxes[mask]
errors = errors[mask]
# now make list of Lightcurves objects
lightcurves = []
n_stars = len(fluxes)
i = 0
for star, star_err in zip(fluxes, errors):
logging.info(f"{i+1}/{n_stars}")
lightcurves.append(Lightcurve(star, star_err, config['data']['reject_outliers']))
i += 1
return times, lightcurves
class Lightcurve():
"""
Lightcurve object of real object
"""
def __init__(self, flux, flux_err, filter_outliers=False):
"""
Initialise the class
Parameters
----------
flux : array-like
list of flux values
flux_err : array-like
list of flux error values
filter_outliers : boolean
turn on PLATO outlier rejection?
default = False
Returns
-------
None
Raises
------
None
"""
# Initialise variables to hold data when trend is applied
self.flux_wtrend = flux
self.fluxerr_wtrend = flux_err
self.median_flux = np.median(flux)
self.outlier_indices = None
# store the lightcurve after removing outliers
if filter_outliers:
self.filter_outliers()
def filter_outliers(self, alpha=5, beta=12):
"""
Filter out data points that are > alpha*local MAD
within a window ±beta around a given data point.
Replace the data point with the local median
as to not introduce gaps
Parameters
----------
alpha : int
Scaling factor for number of MADs to reject outside
beta : int
Half width of sliding window for MAD rejection
Returns
-------
None
Outliers indices are included in self.outlier_indices
Raises
------
None
"""
# could imaging this having a voting system where each beta*2+1 slice
# votes on an outlier and if >N votes it gets nuked
outlier_indices = []
for i in np.arange(beta, len(self.flux_wtrend)-beta-1):
window = self.flux_wtrend[i-beta: i+beta+1]
med = np.median(window)
mad = median_absolute_deviation(window)
outlier_positions = np.where(((window >= med+alpha*mad) |
(window <= med-alpha*mad)))[0] + i - beta
# gather them up and then correct them with a median
# window centered on them
for outlier_position in outlier_positions:
if outlier_position not in outlier_indices:
outlier_indices.append(outlier_position)
# now go back and fix the outliers
for outlier in outlier_indices:
lower = outlier-beta
upper = outlier+beta+1
if lower < 0:
lower = 0
if upper > len(self.flux_wtrend):
upper = len(self.flux_wtrend)
med = np.median(self.flux_wtrend[lower:upper])
self.flux_wtrend[outlier] = med
self.outlier_indices = outlier_indices
| 29.993711
| 89
| 0.585657
|
"""
Light curves components for Cotrendy
"""
import sys
import logging
import numpy as np
from scipy.stats import median_absolute_deviation
import cotrendy.utils as cuts
def load_photometry(config, apply_object_mask=True):
"""
Read in a photometry file
Parameters
----------
config : dict
Configuration file loaded via TOML
apply_object_mask : boolean
Mask our a subset of stars?
Returns
-------
times : array
Array of times of observation
lightcurves : list
List of Lightcurve objects, one per star
Raises
------
None
"""
root = config['global']['root']
time_file = config['data']['time_file']
flux_file = config['data']['flux_file']
error_file = config['data']['error_file']
times = cuts.depicklify(f"{root}/{time_file}")
if times is None:
logging.critical(f"Could not load {root}/{time_file}...")
sys.exit(1)
fluxes = cuts.depicklify(f"{root}/{flux_file}")
if fluxes is None:
logging.critical(f"Could not load {root}/{flux_file}...")
sys.exit(1)
errors = cuts.depicklify(f"{root}/{error_file}")
if errors is None:
logging.critical(f"Could not load {root}/{error_file}...")
sys.exit(1)
if fluxes.shape != errors.shape or len(times) != len(fluxes[0]):
logging.critical("Data arrays have mismatched shapes...")
sys.exit(1)
# now apply the mask if needed
if apply_object_mask:
objects_mask_file = config['data']['objects_mask_file']
mask = cuts.depicklify(f"{root}/{objects_mask_file}")
fluxes = fluxes[mask]
errors = errors[mask]
# now make list of Lightcurves objects
lightcurves = []
n_stars = len(fluxes)
i = 0
for star, star_err in zip(fluxes, errors):
logging.info(f"{i+1}/{n_stars}")
lightcurves.append(Lightcurve(star, star_err, config['data']['reject_outliers']))
i += 1
return times, lightcurves
class Lightcurve():
"""
Lightcurve object of real object
"""
def __init__(self, flux, flux_err, filter_outliers=False):
"""
Initialise the class
Parameters
----------
flux : array-like
list of flux values
flux_err : array-like
list of flux error values
filter_outliers : boolean
turn on PLATO outlier rejection?
default = False
Returns
-------
None
Raises
------
None
"""
# Initialise variables to hold data when trend is applied
self.flux_wtrend = flux
self.fluxerr_wtrend = flux_err
self.median_flux = np.median(flux)
self.outlier_indices = None
# store the lightcurve after removing outliers
if filter_outliers:
self.filter_outliers()
def filter_outliers(self, alpha=5, beta=12):
"""
Filter out data points that are > alpha*local MAD
within a window ±beta around a given data point.
Replace the data point with the local median
as to not introduce gaps
Parameters
----------
alpha : int
Scaling factor for number of MADs to reject outside
beta : int
Half width of sliding window for MAD rejection
Returns
-------
None
Outliers indices are included in self.outlier_indices
Raises
------
None
"""
# could imaging this having a voting system where each beta*2+1 slice
# votes on an outlier and if >N votes it gets nuked
outlier_indices = []
for i in np.arange(beta, len(self.flux_wtrend)-beta-1):
window = self.flux_wtrend[i-beta: i+beta+1]
med = np.median(window)
mad = median_absolute_deviation(window)
outlier_positions = np.where(((window >= med+alpha*mad) |
(window <= med-alpha*mad)))[0] + i - beta
# gather them up and then correct them with a median
# window centered on them
for outlier_position in outlier_positions:
if outlier_position not in outlier_indices:
outlier_indices.append(outlier_position)
# now go back and fix the outliers
for outlier in outlier_indices:
lower = outlier-beta
upper = outlier+beta+1
if lower < 0:
lower = 0
if upper > len(self.flux_wtrend):
upper = len(self.flux_wtrend)
med = np.median(self.flux_wtrend[lower:upper])
self.flux_wtrend[outlier] = med
self.outlier_indices = outlier_indices
| 0
| 0
| 0
|
c4d9e25825d0a67968b72afbc467451be752f281
| 1,954
|
py
|
Python
|
BigQuery_Script.py
|
rezaho/iipp_patstat2018
|
b83e913a124113052dfbfc5d43ef9d9f6a3f7af0
|
[
"Apache-2.0"
] | null | null | null |
BigQuery_Script.py
|
rezaho/iipp_patstat2018
|
b83e913a124113052dfbfc5d43ef9d9f6a3f7af0
|
[
"Apache-2.0"
] | null | null | null |
BigQuery_Script.py
|
rezaho/iipp_patstat2018
|
b83e913a124113052dfbfc5d43ef9d9f6a3f7af0
|
[
"Apache-2.0"
] | null | null | null |
# Script for creating and loading PatStat2018b dataset into Big Query tables
# coding: utf-8
###############################################
###### Importing Libraries and functions ######
from google.cloud import bigquery
from open_patstat.utils.gcp import create_table, load_gcs_file, delete_table
from open_patstat.utils.schema import Schema
####################################################
###### Initializing the Client anf Job Config ######
# Before running this line, make sure that you have defined the environment variable...
# ..."GOOGLE_APPLICATION_CREDENTIALS" which points to the JSON file containing authentication key
client = bigquery.Client()
# Initializing the Job_config
job_config = bigquery.LoadJobConfig()
job_config.skip_leading_rows = 1
job_config.max_bad_records = 10
job_config.source_format = bigquery.SourceFormat.CSV
dataset_ref = client.dataset('patstat')
###########################################
####### Creating and Adding Tables ########
# Tables list to be loaded
tables_list = ['tls201', 'tls209', 'tls204', 'tls207', 'tls206', 'tls211', 'tls212']
# Google Bucket directory address, which contains all data files
gs_add = 'gs://patstat_2018g/data_PATSTAT_Global_2018_Autumn/'
# Loading the tables in the list
for table in tables_list:
# Creating the table
create_table(client,
dataset_id='patstat',
table_id=table,
schema=getattr(Schema(),table))
# Adding files to the table from GCP bucket
table_ref = dataset_ref.table(table)
job_config.schema = getattr(Schema(),table)
# Adding files to the table from GCP bucket
table_ref = dataset_ref.table(table)
job_config.schema = getattr(Schema(),table)
load_job = client.load_table_from_uri(
source_uris=gs_add+table+'_*.gz',
destination=table_ref,
# job_id=job_id,
job_id_prefix='lgs-',
job_config=job_config,
)
load_job.result()
| 34.892857
| 97
| 0.665814
|
# Script for creating and loading PatStat2018b dataset into Big Query tables
# coding: utf-8
###############################################
###### Importing Libraries and functions ######
from google.cloud import bigquery
from open_patstat.utils.gcp import create_table, load_gcs_file, delete_table
from open_patstat.utils.schema import Schema
####################################################
###### Initializing the Client anf Job Config ######
# Before running this line, make sure that you have defined the environment variable...
# ..."GOOGLE_APPLICATION_CREDENTIALS" which points to the JSON file containing authentication key
client = bigquery.Client()
# Initializing the Job_config
job_config = bigquery.LoadJobConfig()
job_config.skip_leading_rows = 1
job_config.max_bad_records = 10
job_config.source_format = bigquery.SourceFormat.CSV
dataset_ref = client.dataset('patstat')
###########################################
####### Creating and Adding Tables ########
# Tables list to be loaded
tables_list = ['tls201', 'tls209', 'tls204', 'tls207', 'tls206', 'tls211', 'tls212']
# Google Bucket directory address, which contains all data files
gs_add = 'gs://patstat_2018g/data_PATSTAT_Global_2018_Autumn/'
# Loading the tables in the list
for table in tables_list:
# Creating the table
create_table(client,
dataset_id='patstat',
table_id=table,
schema=getattr(Schema(),table))
# Adding files to the table from GCP bucket
table_ref = dataset_ref.table(table)
job_config.schema = getattr(Schema(),table)
# Adding files to the table from GCP bucket
table_ref = dataset_ref.table(table)
job_config.schema = getattr(Schema(),table)
load_job = client.load_table_from_uri(
source_uris=gs_add+table+'_*.gz',
destination=table_ref,
# job_id=job_id,
job_id_prefix='lgs-',
job_config=job_config,
)
load_job.result()
| 0
| 0
| 0
|
fc24f84cff67f66fdbc72dc2ba547c523b3814fe
| 828
|
py
|
Python
|
external/cclib/bridge/cclib2pyquante.py
|
faribas/RMG-Py
|
6149e29b642bf8da9537e2db98f15121f0e040c7
|
[
"MIT"
] | 1
|
2017-12-18T18:43:22.000Z
|
2017-12-18T18:43:22.000Z
|
external/cclib/bridge/cclib2pyquante.py
|
speth/RMG-Py
|
1d2c2b684580396e984459d9347628a5ceb80e2e
|
[
"MIT"
] | 72
|
2016-06-06T18:18:49.000Z
|
2019-11-17T03:21:10.000Z
|
external/cclib/bridge/cclib2pyquante.py
|
speth/RMG-Py
|
1d2c2b684580396e984459d9347628a5ceb80e2e
|
[
"MIT"
] | 3
|
2017-09-22T15:47:37.000Z
|
2021-12-30T23:51:47.000Z
|
"""
cclib (http://cclib.sf.net) is (c) 2006, the cclib development team
and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html).
"""
__revision__ = "$Revision: 737 $"
from PyQuante.Molecule import Molecule
def makepyquante(atomcoords, atomnos, charge=0, mult=1):
"""Create a PyQuante Molecule.
>>> import numpy
>>> from PyQuante.hartree_fock import hf
>>> atomnos = numpy.array([1,8,1],"i")
>>> a = numpy.array([[-1,1,0],[0,0,0],[1,1,0]],"f")
>>> pyqmol = makepyquante(a,atomnos)
>>> en,orbe,orbs = hf(pyqmol)
>>> print int(en * 10) / 10. # Should be around -73.8
-73.8
"""
return Molecule("notitle", zip(atomnos, atomcoords), units="Angstrom",
charge=charge, multiplicity=mult)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29.571429
| 74
| 0.621981
|
"""
cclib (http://cclib.sf.net) is (c) 2006, the cclib development team
and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html).
"""
__revision__ = "$Revision: 737 $"
from PyQuante.Molecule import Molecule
def makepyquante(atomcoords, atomnos, charge=0, mult=1):
"""Create a PyQuante Molecule.
>>> import numpy
>>> from PyQuante.hartree_fock import hf
>>> atomnos = numpy.array([1,8,1],"i")
>>> a = numpy.array([[-1,1,0],[0,0,0],[1,1,0]],"f")
>>> pyqmol = makepyquante(a,atomnos)
>>> en,orbe,orbs = hf(pyqmol)
>>> print int(en * 10) / 10. # Should be around -73.8
-73.8
"""
return Molecule("notitle", zip(atomnos, atomcoords), units="Angstrom",
charge=charge, multiplicity=mult)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0
| 0
| 0
|
f9a937ded3908623f9ea6aa4b476025ff2324f45
| 1,106
|
py
|
Python
|
NBATextAlerts/Alerts.py
|
kevinfjiang/NBATextAlerts
|
0ddd4fc0fa7a272191c422167350d8813581675b
|
[
"MIT"
] | 1
|
2021-03-24T04:39:40.000Z
|
2021-03-24T04:39:40.000Z
|
NBATextAlerts/Alerts.py
|
kevinfjiang/NBATextAlerts
|
0ddd4fc0fa7a272191c422167350d8813581675b
|
[
"MIT"
] | 1
|
2021-03-24T05:33:20.000Z
|
2021-03-24T05:36:28.000Z
|
NBATextAlerts/Alerts.py
|
kevinfjiang/NBATextAlerts
|
0ddd4fc0fa7a272191c422167350d8813581675b
|
[
"MIT"
] | null | null | null |
"""
https://www.twilio.com/
This link is the basis for the text messaging, make sure to sign up!
After registering, press the home buton and click "Dashboard", both in the top left
You will see the following lines
"cellphone" -> Paste verified Twilio number as string
"ACCOUNT SID" -> Paste that number into account as string
"AUTH TOKEN" -> click show and paste that into token as string
"PHONE NUMBER" -> Paste that into token as string
Remember to verify your phone number
"""
from twilio.rest import Client
cellphone = "" #Input the phone number you want to send texts too (the phone number verified by twilio)
twilio_number = ""#Twilio provides a PHONE NUMBER, input it here
account = ""#Input ACCOUNT SID
token = ""#AUTH TOKEN, press show
#Test message if calling alerts. Run Alerts.py to test the system is working
if __name__ == "__main__":
send_message("Test message. Did you receive it?")
| 30.722222
| 103
| 0.711573
|
"""
https://www.twilio.com/
This link is the basis for the text messaging, make sure to sign up!
After registering, press the home buton and click "Dashboard", both in the top left
You will see the following lines
"cellphone" -> Paste verified Twilio number as string
"ACCOUNT SID" -> Paste that number into account as string
"AUTH TOKEN" -> click show and paste that into token as string
"PHONE NUMBER" -> Paste that into token as string
Remember to verify your phone number
"""
from twilio.rest import Client
cellphone = "" #Input the phone number you want to send texts too (the phone number verified by twilio)
twilio_number = ""#Twilio provides a PHONE NUMBER, input it here
account = ""#Input ACCOUNT SID
token = ""#AUTH TOKEN, press show
def send_message(message):
client = Client(account, token)
client.messages.create(to=cellphone,
from_=twilio_number,
body=message)
#Test message if calling alerts. Run Alerts.py to test the system is working
if __name__ == "__main__":
send_message("Test message. Did you receive it?")
| 170
| 0
| 23
|
abfd30e1b28d8aa306ca97c0ff99e36c6c64c29c
| 2,546
|
py
|
Python
|
utils/timer.py
|
FanmingL/ESCP
|
518f13f8b002d142f670f52d9ef34778e2c2d59f
|
[
"MIT"
] | null | null | null |
utils/timer.py
|
FanmingL/ESCP
|
518f13f8b002d142f670f52d9ef34778e2c2d59f
|
[
"MIT"
] | null | null | null |
utils/timer.py
|
FanmingL/ESCP
|
518f13f8b002d142f670f52d9ef34778e2c2d59f
|
[
"MIT"
] | null | null | null |
import time
import inspect
import numpy as np
if __name__ == '__main__':
test_timer()
| 33.064935
| 89
| 0.569521
|
import time
import inspect
import numpy as np
class Timer:
def __init__(self):
self.check_points = {}
self.points_time = {}
self.need_summary = {}
self.init_time = time.time()
def reset(self):
self.check_points = {}
self.points_time = {}
self.need_summary = {}
@staticmethod
def file_func_line(stack=1):
frame = inspect.stack()[stack][0]
info = inspect.getframeinfo(frame)
return info.filename, info.function, info.lineno
@staticmethod
def line(stack=2, short=False):
file, func, lineo = Timer.file_func_line(stack)
if short:
return f"line_{lineo}_func_{func}"
return f"line: {lineo}, func: {func}, file: {file}"
def register_point(self, tag=None, stack=3, short=True, need_summary=True, level=0):
if tag is None:
tag = self.line(stack, short)
if False and not tag.startswith('__'):
print(f'arrive {tag}, time: {time.time() - self.init_time}, level: {level}')
if level not in self.check_points:
self.check_points[level] = []
self.points_time[level] = []
self.need_summary[level] = set()
self.check_points[level].append(tag)
self.points_time[level].append(time.time())
if need_summary:
self.need_summary[level].add(tag)
def register_end(self, stack=4, level=0):
self.register_point('__timer_end_unique', stack, need_summary=False, level=level)
def summary(self):
if len(self.check_points) == 0:
return dict()
res = {}
for level in self.check_points:
self.register_point('__timer_finale_unique', level=level)
res_tmp = {}
for ind, item in enumerate(self.check_points[level][:-1]):
time_now = self.points_time[level][ind]
time_next = self.points_time[level][ind + 1]
if item in res_tmp:
res_tmp[item].append(time_next - time_now)
else:
res_tmp[item] = [time_next - time_now]
for k, v in res_tmp.items():
if k in self.need_summary[level]:
res['period_' + k] = np.mean(v)
self.reset()
return res
def test_timer():
timer = Timer()
for i in range(4):
timer.register_point()
time.sleep(1)
for k, v in timer.summary().items():
print(f'{k}, {v}')
if __name__ == '__main__':
test_timer()
| 2,194
| 215
| 46
|
f7642e021866ac47a0bcd5fd062c3e4fbd79be21
| 4,042
|
py
|
Python
|
src/interface_py/h2o4gpu/util/lightgbm_dynamic.py
|
pnijhara/h2o4gpu
|
6257112c134136471420b68241f57190a445b67d
|
[
"Apache-2.0"
] | 458
|
2017-09-20T08:32:10.000Z
|
2022-02-28T18:40:57.000Z
|
src/interface_py/h2o4gpu/util/lightgbm_dynamic.py
|
Jun-NIBS/h2o4gpu
|
9885416deb3285f5d0f33023d6c07373ac4fc0b7
|
[
"Apache-2.0"
] | 461
|
2017-09-20T11:39:04.000Z
|
2021-11-21T15:51:42.000Z
|
src/interface_py/h2o4gpu/util/lightgbm_dynamic.py
|
Jun-NIBS/h2o4gpu
|
9885416deb3285f5d0f33023d6c07373ac4fc0b7
|
[
"Apache-2.0"
] | 114
|
2017-09-20T12:08:07.000Z
|
2021-11-29T14:15:40.000Z
|
# pylint: skip-file
import os
import importlib.util
got_cpu_lgb = False
got_gpu_lgb = False
from h2o4gpu.util.gpu import device_count
_, ngpus_vis_global = device_count()
enable_lightgbm_import = True
if enable_lightgbm_import:
lgb_loader = importlib.util.find_spec('lightgbm')
lgb_found = lgb_loader is not None
always_do_dynamic_lgb_selection = True # False will take existing lightgbm package if exists, True will always overwrite existing
do_dynamic_lgb_selection = True
link_method = False # False (default now) is to directly load from path
if not lgb_found and do_dynamic_lgb_selection or always_do_dynamic_lgb_selection:
numpy_loader = importlib.util.find_spec('numpy')
found = numpy_loader is not None
if found:
numpy_path = os.path.dirname(numpy_loader.origin)
dirname = "/".join(numpy_path.split("/")[:-1])
lgb_path_gpu = os.path.join(dirname, "lightgbm_gpu")
lgb_path_cpu = os.path.join(dirname, "lightgbm_cpu")
lgb_path_new = os.path.join(dirname, "lightgbm")
got_lgb = False
expt_gpu = ""
expt_cpu = ""
expt_other = ""
# This locally leads to lgb as if did import lightgbm as lgb, but also any other file that imports lgb will immediately return with lgb even though no module name "lightgbm" has a path in site-packages.
try:
if ngpus_vis_global > 0:
loader = importlib.machinery.SourceFileLoader('lightgbm',
os.path.join(lgb_path_gpu, '__init__.py'))
lgb = loader.load_module()
print("Selected GPU version of lightgbm to import\n")
got_lgb = True
# This locally leads to lgb as if did import lightgbm as lgb, but also any other file that imports lgb will immediately return with lgb even though no module name "lightgbm" has a path in site-packages.
got_gpu_lgb = True
except Exception as e:
expt_gpu = str(e)
pass
if not got_lgb:
try:
loader = importlib.machinery.SourceFileLoader('lightgbm',
os.path.join(lgb_path_cpu, '__init__.py'))
lgb = loader.load_module()
if ngpus_vis_global > 0:
print(
"Selected CPU version of lightgbm to import (GPU selection failed due to %s)\n" % expt_gpu)
else:
print("Selected CPU version of lightgbm to import\n")
got_lgb = True
got_cpu_lgb = True
except Exception as e:
expt_cpu = str(e)
pass
if not got_lgb:
try:
loader = importlib.machinery.SourceFileLoader('lightgbm',
os.path.join(lgb_path_new, '__init__.py'))
lgb = loader.load_module()
if ngpus_vis_global > 0:
print(
"Selected non-dynamic CPU version of lightgbm to import (GPU selection failed due to %s)\n" % expt_other)
else:
print("Selected non-dynamic CPU version of lightgbm to import\n")
got_lgb = True
got_cpu_lgb = True
except Exception as e:
expt_other = str(e)
pass
if not got_lgb:
print(
"Unable to dynamically or non-dynamically import either GPU or CPU version of lightgbm: expt_gpu=%s expt_cpu=%s expt_other=%s\n" % (
expt_gpu, expt_cpu, expt_other))
else:
print("Did not find lightgbm or numpy\n")
| 47.552941
| 222
| 0.543295
|
# pylint: skip-file
import os
import importlib.util
got_cpu_lgb = False
got_gpu_lgb = False
from h2o4gpu.util.gpu import device_count
_, ngpus_vis_global = device_count()
enable_lightgbm_import = True
if enable_lightgbm_import:
lgb_loader = importlib.util.find_spec('lightgbm')
lgb_found = lgb_loader is not None
always_do_dynamic_lgb_selection = True # False will take existing lightgbm package if exists, True will always overwrite existing
do_dynamic_lgb_selection = True
link_method = False # False (default now) is to directly load from path
if not lgb_found and do_dynamic_lgb_selection or always_do_dynamic_lgb_selection:
numpy_loader = importlib.util.find_spec('numpy')
found = numpy_loader is not None
if found:
numpy_path = os.path.dirname(numpy_loader.origin)
dirname = "/".join(numpy_path.split("/")[:-1])
lgb_path_gpu = os.path.join(dirname, "lightgbm_gpu")
lgb_path_cpu = os.path.join(dirname, "lightgbm_cpu")
lgb_path_new = os.path.join(dirname, "lightgbm")
got_lgb = False
expt_gpu = ""
expt_cpu = ""
expt_other = ""
# This locally leads to lgb as if did import lightgbm as lgb, but also any other file that imports lgb will immediately return with lgb even though no module name "lightgbm" has a path in site-packages.
try:
if ngpus_vis_global > 0:
loader = importlib.machinery.SourceFileLoader('lightgbm',
os.path.join(lgb_path_gpu, '__init__.py'))
lgb = loader.load_module()
print("Selected GPU version of lightgbm to import\n")
got_lgb = True
# This locally leads to lgb as if did import lightgbm as lgb, but also any other file that imports lgb will immediately return with lgb even though no module name "lightgbm" has a path in site-packages.
got_gpu_lgb = True
except Exception as e:
expt_gpu = str(e)
pass
if not got_lgb:
try:
loader = importlib.machinery.SourceFileLoader('lightgbm',
os.path.join(lgb_path_cpu, '__init__.py'))
lgb = loader.load_module()
if ngpus_vis_global > 0:
print(
"Selected CPU version of lightgbm to import (GPU selection failed due to %s)\n" % expt_gpu)
else:
print("Selected CPU version of lightgbm to import\n")
got_lgb = True
got_cpu_lgb = True
except Exception as e:
expt_cpu = str(e)
pass
if not got_lgb:
try:
loader = importlib.machinery.SourceFileLoader('lightgbm',
os.path.join(lgb_path_new, '__init__.py'))
lgb = loader.load_module()
if ngpus_vis_global > 0:
print(
"Selected non-dynamic CPU version of lightgbm to import (GPU selection failed due to %s)\n" % expt_other)
else:
print("Selected non-dynamic CPU version of lightgbm to import\n")
got_lgb = True
got_cpu_lgb = True
except Exception as e:
expt_other = str(e)
pass
if not got_lgb:
print(
"Unable to dynamically or non-dynamically import either GPU or CPU version of lightgbm: expt_gpu=%s expt_cpu=%s expt_other=%s\n" % (
expt_gpu, expt_cpu, expt_other))
else:
print("Did not find lightgbm or numpy\n")
| 0
| 0
| 0
|
188ee1b65907db67dfd917f80e2a5d76fdb2dca5
| 1,967
|
py
|
Python
|
google-cloud-sdk/lib/surface/resource_manager/folders/undelete.py
|
KaranToor/MA450
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
[
"Apache-2.0"
] | 1
|
2017-11-29T18:52:27.000Z
|
2017-11-29T18:52:27.000Z
|
google-cloud-sdk/.install/.backup/lib/surface/resource_manager/folders/undelete.py
|
KaranToor/MA450
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
[
"Apache-2.0"
] | null | null | null |
google-cloud-sdk/.install/.backup/lib/surface/resource_manager/folders/undelete.py
|
KaranToor/MA450
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
[
"Apache-2.0"
] | 1
|
2020-07-25T12:09:01.000Z
|
2020-07-25T12:09:01.000Z
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to undelete a folder."""
import textwrap
from googlecloudsdk.api_lib.resource_manager import folders
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.resource_manager import flags
from googlecloudsdk.command_lib.resource_manager import folders_base
from googlecloudsdk.core import log
@base.Hidden
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Undelete(folders_base.FolderCommand):
"""Undelete a folder.
Undeletes the folder with the given folder ID.
This command can fail for the following reasons:
* There is no folder with the given ID.
* The active account does not have Owner or Editor permissions for the
given folder.
* When the folder to be undeleted has the same display name as an active
folder under this folder's parent.
"""
detailed_help = {
'EXAMPLES': textwrap.dedent("""\
The following command undeletes the folder with the ID
`3589215982`:
$ {command} 3589215982
"""),
}
@staticmethod
| 33.338983
| 74
| 0.744281
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to undelete a folder."""
import textwrap
from googlecloudsdk.api_lib.resource_manager import folders
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.resource_manager import flags
from googlecloudsdk.command_lib.resource_manager import folders_base
from googlecloudsdk.core import log
@base.Hidden
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Undelete(folders_base.FolderCommand):
"""Undelete a folder.
Undeletes the folder with the given folder ID.
This command can fail for the following reasons:
* There is no folder with the given ID.
* The active account does not have Owner or Editor permissions for the
given folder.
* When the folder to be undeleted has the same display name as an active
folder under this folder's parent.
"""
detailed_help = {
'EXAMPLES': textwrap.dedent("""\
The following command undeletes the folder with the ID
`3589215982`:
$ {command} 3589215982
"""),
}
@staticmethod
def Args(parser):
flags.FolderIdArg('you want to undelete.').AddToParser(parser)
def Run(self, args):
service = folders.FoldersService()
messages = folders.FoldersMessages()
restored = service.Undelete(
messages.CloudresourcemanagerFoldersUndeleteRequest(
foldersId=args.id))
log.RestoredResource(restored)
| 303
| 0
| 49
|
5a28b79a46e2fcfa07d776568c13a7328fded066
| 417
|
py
|
Python
|
contract/tests/ownership.py
|
ebloc/eBlocBroker
|
52d507835a0fe3c930df2e2c816724d26a3484a7
|
[
"MIT"
] | 7
|
2018-02-10T22:57:28.000Z
|
2020-11-20T14:46:18.000Z
|
contract/tests/ownership.py
|
ebloc/eBlocBroker
|
52d507835a0fe3c930df2e2c816724d26a3484a7
|
[
"MIT"
] | 5
|
2020-10-30T18:43:27.000Z
|
2021-02-04T12:39:30.000Z
|
contract/tests/ownership.py
|
ebloc/eBlocBroker
|
52d507835a0fe3c930df2e2c816724d26a3484a7
|
[
"MIT"
] | 5
|
2017-07-06T14:14:13.000Z
|
2019-02-22T14:40:16.000Z
|
#!/usr/bin/python3
import pytest
from utils import ZERO_ADDRESS
from brownie import accounts
def test_ownership(Ebb):
"""Get Owner"""
assert Ebb.getOwner() == accounts[0]
with pytest.reverts(): # transferOwnership should revert
Ebb.transferOwnership(ZERO_ADDRESS, {"from": accounts[0]})
Ebb.transferOwnership(accounts[1], {"from": accounts[0]})
assert Ebb.getOwner() == accounts[1]
| 23.166667
| 66
| 0.695444
|
#!/usr/bin/python3
import pytest
from utils import ZERO_ADDRESS
from brownie import accounts
def test_ownership(Ebb):
"""Get Owner"""
assert Ebb.getOwner() == accounts[0]
with pytest.reverts(): # transferOwnership should revert
Ebb.transferOwnership(ZERO_ADDRESS, {"from": accounts[0]})
Ebb.transferOwnership(accounts[1], {"from": accounts[0]})
assert Ebb.getOwner() == accounts[1]
| 0
| 0
| 0
|
d4bf808de2a868ba73315da564d256636fe0b32b
| 2,858
|
py
|
Python
|
gd/api/_property.py
|
scottwedge/gd.py
|
328c9833abc949b1c9ac0eabe276bd66fead4c2c
|
[
"MIT"
] | null | null | null |
gd/api/_property.py
|
scottwedge/gd.py
|
328c9833abc949b1c9ac0eabe276bd66fead4c2c
|
[
"MIT"
] | null | null | null |
gd/api/_property.py
|
scottwedge/gd.py
|
328c9833abc949b1c9ac0eabe276bd66fead4c2c
|
[
"MIT"
] | null | null | null |
"""Automatic object property code generator."""
from gd.typing import Enum, Union
from gd.api.enums import (
ColorChannelProperties,
LevelDataEnum,
LevelHeaderEnum,
ObjectDataEnum,
PlayerColor,
)
from gd.api.parser import ( # type: ignore
_INT,
_BOOL,
_FLOAT,
_HSV,
_ENUMS,
_TEXT,
_GROUPS,
_COLOR_INT,
_COLOR_BOOL,
_COLOR_PLAYER,
_COLOR_FLOAT,
_COLOR_HSV,
_HEADER_INT,
_HEADER_BOOL,
_HEADER_FLOAT,
_HEADER_COLORS,
_COLORS,
_GUIDELINES,
_HEADER_ENUMS,
)
from gd.api.hsv import HSV
__all__ = ("_template", "_create", "_object_code", "_color_code", "_header_code", "_level_code")
_template = """
@property
def {name}(self):
\"\"\":class:`{cls}`: Property ({desc}).\"\"\"
return self.data.get({enum!r})
@{name}.setter
def {name}(self, value):
self.data[{enum!r}] = value
@{name}.deleter
def {name}(self):
try:
del self.data[{enum!r}]
except KeyError:
pass
""".strip()
_container = "_container = {}"
_object_code = _create(ObjectDataEnum, "object")
_color_code = _create(ColorChannelProperties, "color")
_header_code = _create(LevelHeaderEnum, "header")
_level_code = _create(LevelDataEnum, "level")
| 23.816667
| 96
| 0.573828
|
"""Automatic object property code generator."""
from gd.typing import Enum, Union
from gd.api.enums import (
ColorChannelProperties,
LevelDataEnum,
LevelHeaderEnum,
ObjectDataEnum,
PlayerColor,
)
from gd.api.parser import ( # type: ignore
_INT,
_BOOL,
_FLOAT,
_HSV,
_ENUMS,
_TEXT,
_GROUPS,
_COLOR_INT,
_COLOR_BOOL,
_COLOR_PLAYER,
_COLOR_FLOAT,
_COLOR_HSV,
_HEADER_INT,
_HEADER_BOOL,
_HEADER_FLOAT,
_HEADER_COLORS,
_COLORS,
_GUIDELINES,
_HEADER_ENUMS,
)
from gd.api.hsv import HSV
__all__ = ("_template", "_create", "_object_code", "_color_code", "_header_code", "_level_code")
_template = """
@property
def {name}(self):
\"\"\":class:`{cls}`: Property ({desc}).\"\"\"
return self.data.get({enum!r})
@{name}.setter
def {name}(self, value):
self.data[{enum!r}] = value
@{name}.deleter
def {name}(self):
try:
del self.data[{enum!r}]
except KeyError:
pass
""".strip()
_container = "_container = {}"
def _get_type(n: Union[int, str], ts: str = "object") -> str:
t = {
"object": {
n in _INT: int,
n in _BOOL: bool,
n in _FLOAT: float,
n in _HSV: HSV,
n in _ENUMS: _ENUMS.get(n),
n == _TEXT: str,
n == _GROUPS: set,
},
"color": {
n in _COLOR_INT: int,
n in _COLOR_BOOL: bool,
n == _COLOR_PLAYER: PlayerColor,
n == _COLOR_FLOAT: float,
n == _COLOR_HSV: HSV,
},
"header": {
n in _HEADER_INT: int,
n in _HEADER_BOOL: bool,
n == _HEADER_FLOAT: float,
n in _HEADER_COLORS: "ColorChannel",
n == _COLORS: list,
n == _GUIDELINES: list,
n in _HEADER_ENUMS: _HEADER_ENUMS.get(n),
},
"level": {True: "soon"}, # yikes!
}
r = t.get(ts, {}).get(1, str)
try:
return r.__name__
except AttributeError:
return r
def _create(enum: Enum, ts: str) -> str:
final = []
for name, value in enum.as_dict().items():
desc = enum(value).desc
value = str(value)
cls = _get_type(value, ts=ts)
final.append(_template.format(name=name, enum=value, desc=desc, cls=cls))
property_container = {}
for name, value in enum.as_dict().items():
value = str(value) # we are going with str from now on
if value not in property_container:
property_container[value] = name
final.append(_container.format(property_container))
return ("\n\n").join(final)
_object_code = _create(ObjectDataEnum, "object")
_color_code = _create(ColorChannelProperties, "color")
_header_code = _create(LevelHeaderEnum, "header")
_level_code = _create(LevelDataEnum, "level")
| 1,575
| 0
| 46
|
ffc6fc0c01a161fba017b7f74580eecc40db4a94
| 286
|
py
|
Python
|
test.py
|
picturate/picturate
|
9f8e69fef7b600b6d8c1ade41a0ccfc382992e8b
|
[
"Apache-2.0"
] | 4
|
2020-08-03T04:16:53.000Z
|
2020-11-02T20:11:16.000Z
|
test.py
|
picturate/picturate
|
9f8e69fef7b600b6d8c1ade41a0ccfc382992e8b
|
[
"Apache-2.0"
] | 6
|
2020-09-04T12:36:08.000Z
|
2021-06-18T04:31:29.000Z
|
test.py
|
picturate/picturate
|
9f8e69fef7b600b6d8c1ade41a0ccfc382992e8b
|
[
"Apache-2.0"
] | 1
|
2020-07-24T07:29:46.000Z
|
2020-07-24T07:29:46.000Z
|
from picturate.config import CAttnGANConfig
from picturate.nets import CAttnGAN
config = CAttnGANConfig('bird')
gan = CAttnGAN(config, pretrained=True)
caption = "This little bird is blue with short beak and white underbelly"
filename = 'bird'
gan.generate_image(caption, filename)
| 23.833333
| 73
| 0.793706
|
from picturate.config import CAttnGANConfig
from picturate.nets import CAttnGAN
config = CAttnGANConfig('bird')
gan = CAttnGAN(config, pretrained=True)
caption = "This little bird is blue with short beak and white underbelly"
filename = 'bird'
gan.generate_image(caption, filename)
| 0
| 0
| 0
|
409729662516480907dfc439cb222223768f41e8
| 14,838
|
py
|
Python
|
tests/unit/test_maxmin.py
|
mzelling/syndata
|
bba1c4a7b142f1da332d6613baae30b8b97c4e9b
|
[
"MIT"
] | null | null | null |
tests/unit/test_maxmin.py
|
mzelling/syndata
|
bba1c4a7b142f1da332d6613baae30b8b97c4e9b
|
[
"MIT"
] | null | null | null |
tests/unit/test_maxmin.py
|
mzelling/syndata
|
bba1c4a7b142f1da332d6613baae30b8b97c4e9b
|
[
"MIT"
] | null | null | null |
import pytest
import numpy as np
from syndata.core import ClusterData
from syndata.maxmin import MaxMinClusters, MaxMinCov, MaxMinBal, maxmin_sampler
# Test Cases for maxmin_sampler
def test_maxmin_sampler():
"""
Make sure the sampling mechanism doesn't break when wrong inputs
are supplied.
"""
# Test cases throwing exceptions
args_causing_exception = [ # negative vals
{'n_samples': 10, 'ref': -2, 'min_val': 1, 'maxmin_ratio': 1.5},
{'n_samples': 10, 'ref': 2, 'min_val': -1, 'maxmin_ratio': 1.5},
{'n_samples': 10, 'ref': 2, 'min_val': 1, 'maxmin_ratio': -1.5},
# zeros vals
{'n_samples': 0, 'ref': 2, 'min_val': 1, 'maxmin_ratio': 1.5},
{'n_samples': 10, 'ref': 0, 'min_val': 1, 'maxmin_ratio': 1.5},
{'n_samples': 10, 'ref': 2, 'min_val': 0, 'maxmin_ratio': 1.5},
{'n_samples': 10, 'ref': 2, 'min_val': 1, 'maxmin_ratio': 0},
# ref < min
{'n_samples': 10, 'ref': 1, 'min_val': 2, 'maxmin_ratio': 1.5},
# ref > max
{'n_samples': 10, 'ref': 10, 'min_val': 1, 'maxmin_ratio': 1.5},
# maxmin_ratio < 1
{'n_samples': 10, 'ref': 2, 'min_val': 1, 'maxmin_ratio': 0.7},
# maxmin_ratio = 1, ref != min_val
{'n_samples': 10, 'ref': 2, 'min_val': 1, 'maxmin_ratio': 1},
]
with pytest.raises(ValueError):
for args in args_causing_exception:
args['f_constrain'] = lambda x: 2*args['ref'] - x
maxmin_sampler(**args)
# Test cases with appropriate inputs (randomized)
args_appropriate_input = []
max_ref_val = 10; max_min_val = 10
for i in range(100):
min_val = np.random.default_rng(seed=i).uniform(0,max_min_val)
ref = np.random.uniform(min_val, max_ref_val)
maxmin_ratio = np.random.uniform(ref/min_val, 10*(ref/min_val))
args_appropriate_input.append(
{
# Do the first 10 tests on the edge case n_samples=1
'n_samples': np.random.choice(np.arange(2,15)) if i>10 else 1,
'min_val': min_val,
'ref': ref,
'maxmin_ratio': maxmin_ratio,
}
)
print('making the args', 'ref', ref, 'min_val', min_val, 'max_val', min_val*maxmin_ratio)
# Add test case with large sample size
args_appropriate_input.append({'n_samples': 10000, 'ref': 2, \
'min_val': 1, 'maxmin_ratio': 3})
for args in args_appropriate_input:
args['f_constrain'] = lambda x: 2*args['ref'] - x
out = maxmin_sampler(**args)
print(out)
assert check_maxmin_sampler_output(out,
args['f_constrain'])
def check_maxmin_sampler_output(sampled_vals, f_constrain):
"""
Check that output satisfies lower and upper bounds.
Check min, max values are related through the constraint.
Check that output is sorted.
"""
return is_sorted(sampled_vals, order='ascending') \
and (f_constrain(np.max(sampled_vals) == np.min(sampled_vals))) \
and (f_constrain(np.min(sampled_vals) == np.max(sampled_vals)))
def is_sorted(vals, order='ascending'):
"""
Check if values are sorted.
"""
if order=='ascending':
return np.all(vals[1:] - vals[:-1] >= 0)
elif order=='descending':
return np.all(vals[1:] - vals[:-1] <= 0)
# Test Cases for MaxMinCov
def test_init_maxmincov():
"""
Make sure that no illicit values can be used to construct MaxMinCov.
"""
# appropriate values of attributes
interior_cases = np.random.uniform(1,10,size=(100,3)) # random appropriate values
edge_cases = np.concatenate([2-np.eye(3),np.ones(3)[np.newaxis,:]],axis=0) # edge and corner cases
Z_appropriate = np.concatenate([interior_cases,edge_cases],axis=0)
args_appropriate = [{'ref_aspect': z[0], 'aspect_maxmin': z[1],
'radius_maxmin': z[2]} for z in Z_appropriate]
for args in args_appropriate:
my_maxmincov = MaxMinCov(**args)
for attr in ['ref_aspect','aspect_maxmin','radius_maxmin']:
assert hasattr(my_maxmincov, attr)
# inappropriate values of attributes
Z_inappropriate = np.concatenate([np.ones(3) - 0.5*np.eye(3), (1-0.01)*np.ones(3)[np.newaxis,:]])
args_inappropriate = [{'ref_aspect': z[0], 'aspect_maxmin': z[1],
'radius_maxmin': z[2]} for z in Z_inappropriate]
with pytest.raises(ValueError):
for args in args_inappropriate:
MaxMinCov(**args)
@pytest.fixture()
def setup_maxmincov():
"""
Initialize a valid MaxMinCov instance to test its methods.
"""
maxmincov = MaxMinCov(ref_aspect=1.5,
aspect_maxmin=1.5,
radius_maxmin=1.5)
yield maxmincov
def test_make_cluster_aspects(setup_maxmincov):
"""
Make sure that valid cluster aspect ratios are sampled.
Test the range of acceptable numbers of clusters, and
make sure setting a seed works.
"""
maxmincov = setup_maxmincov
with pytest.raises(ValueError):
maxmincov.make_cluster_aspects(0,seed=None)
maxmincov.make_cluster_aspects(0.99,seed=None)
# test different numbers of clusters
for n_clusters in range(1,100):
cluster_aspects = maxmincov.make_cluster_aspects(n_clusters,seed=None)
assert np.all(cluster_aspects >= 1)
assert np.max(cluster_aspects) >= maxmincov.ref_aspect
assert np.min(cluster_aspects) <= maxmincov.ref_aspect
# test seed
seed = 23
for i in range(10):
cluster_aspects_new = maxmincov.make_cluster_aspects(2,seed=23)
# make sure that each successive output is the same as the previous output
if i >= 1:
assert np.all(cluster_aspects_new == cluster_aspects_prev)
cluster_aspects_prev = cluster_aspects_new
def test_make_cluster_radii(setup_maxmincov):
"""
Make sure valid cluster radii are sampled.
Test the range of acceptable inputs, and make sure setting a seed works.
"""
maxmincov = setup_maxmincov
# test appropriate inputs
interior_cases = np.concatenate([np.arange(1,20+1)[:,np.newaxis],
np.random.uniform(0,10,size=20)[:,np.newaxis],
np.random.choice(np.arange(2,100),size=20)[:,np.newaxis]],
axis=1)
edge_cases = np.array([[1,1e-3,2], [1,1e-3,1],[2,100,1]])
Z_appropriate = np.concatenate([interior_cases, edge_cases],axis=0)
args_appropriate = [{'n_clusters': z[0], 'ref_radius': z[1], 'n_dim': z[2]} for z in Z_appropriate]
for args in args_appropriate:
tol = 1e-12
print(args)
cluster_radii = maxmincov.make_cluster_radii(**args)
print(cluster_radii)
assert np.all(cluster_radii > 0)
assert (np.min(cluster_radii) <= args['ref_radius'] + tol) and \
(np.max(cluster_radii) >= args['ref_radius'] - tol)
# test inappropriate inputs
with pytest.raises(ValueError):
maxmincov.make_cluster_radii(n_clusters=0, ref_radius=1, n_dim=10)
maxmincov.make_cluster_radii(n_clusters=1, ref_radius=0, n_dim=10)
maxmincov.make_cluster_radii(n_clusters=1, ref_radius=1, n_dim=0)
# test seeds
seed = 717
for i in range(10):
cluster_radii_new = maxmincov.make_cluster_radii(n_clusters=5,ref_radius=4,n_dim=25, seed=seed)
if (i >= 1):
assert np.all(cluster_radii_new == cluster_radii_prev)
cluster_radii_prev = cluster_radii_new
def test_make_axis_sd(setup_maxmincov):
"""
Make sure valid standard deviations are sampled (>0).
Ensure sure ref_sd is between min and max, and that the maxmin ratio
equals the desired aspect ratio.
"""
maxmincov = setup_maxmincov
# test appropriate inputs
interior_cases = np.concatenate([np.arange(2,50+2)[:,np.newaxis],
np.random.uniform(0,10,size=50)[:,np.newaxis],
np.random.uniform(1,10,size=50)[:,np.newaxis]],
axis=1)
edge_cases = np.array([[1,0.5,1.5], [1,0.5,1], [2,0.1,1]])
Z_appropriate = np.concatenate([interior_cases, edge_cases],axis=0)
args_appropriate = [{'n_axes': z[0], 'sd': z[1], 'aspect': z[2]} for z in Z_appropriate]
for args in args_appropriate:
out = maxmincov.make_axis_sd(**args)
assert (np.min(out) <= args['sd']) and (np.max(out) >= args['sd'])
# test inappropriate inputs
with pytest.raises(ValueError):
maxmincov.make_axis_sd(n_axes=0, sd=1, aspect=2)
maxmincov.make_axis_sd(n_axes=0.5, sd=0, aspect=2)
maxmincov.make_axis_sd(n_axes=1, sd=1, aspect=0.5)
maxmincov.make_axis_sd(n_axes=2, sd=1, aspect=-2)
maxmincov.make_axis_sd(n_axes=2, sd=-1, aspect=2)
# test seed
seed = 123
for i in range(10):
axis_sd_new = maxmincov.make_axis_sd(n_axes=5,sd=4,aspect=25, seed=seed)
if (i >= 1):
assert np.all(axis_sd_new == axis_sd_prev)
axis_sd_prev = axis_sd_new
def test_make_cov(setup_maxmincov, setup_clusterdata):
"""
Make sure axes are orthogonal
Make sure cov = axis * sd**2 * axis', similar for cov_inv
"""
clusterdata = setup_clusterdata
maxmincov = setup_maxmincov
# ensure output makes mathematical sense
for i in range(10):
(axis, sd, cov, cov_inv) = maxmincov.make_cov(clusterdata)
for cluster_idx in range(clusterdata.n_clusters):
# test orthogonality of cluster axes
assert np.all(np.allclose(axis[cluster_idx] @ np.transpose(axis[cluster_idx]),
np.eye(axis[cluster_idx].shape[0])))
# test covariance matrix is correct
assert np.all(np.allclose(cov[cluster_idx],
np.transpose(axis[cluster_idx]) @ np.diag(sd[cluster_idx]**2) \
@ axis[cluster_idx]))
# test inverse covariance matrix is correct
assert np.all(np.allclose(cov_inv[cluster_idx],
np.transpose(axis[cluster_idx]) @ np.diag(sd[cluster_idx]**(-2)) \
@ axis[cluster_idx]))
# test seed
seed = 123
for i in range(10):
cov_structure_new = maxmincov.make_cov(clusterdata, seed=seed)
if (i >= 1):
for cluster_idx in range(clusterdata.n_clusters):
for j in range(4): # iterate through axis, sd, cov, cov_inv
assert np.all(np.allclose(cov_structure_prev[j][cluster_idx],
cov_structure_new[j][cluster_idx]))
# set previous covariance structure for next iteration:
cov_structure_prev = cov_structure_new
# Test Cases for MaxMinBal
@pytest.fixture(params = np.linspace(1,10,10))
def test_init_maxminbal(setup_maxminbal):
"""
Ensure imbalance ratio is properly specified.
"""
maxminbal = setup_maxminbal
assert maxminbal.imbal_ratio >= 1
# test input check for inappropriate arguments
with pytest.raises(ValueError):
MaxMinBal(imbal_ratio = 0.5)
MaxMinBal(imbal_ratio = -2)
def test_make_class_sizes(setup_maxminbal,setup_clusterdata):
"""
"""
maxminbal = setup_maxminbal
clusterdata = setup_clusterdata
# test with appropriate input
Z_appropriate = [[500,5],[200,1],[100,2],[1000,10],[1500,3], [100,100]]
args_appropriate = [{'n_samples': z[0], 'n_clusters': z[1]} for z in Z_appropriate]
for args in args_appropriate:
clusterdata.n_samples = args['n_samples']
clusterdata.n_clusters = args['n_clusters']
out = maxminbal.make_class_sizes(clusterdata)
assert np.issubdtype(out.dtype, np.integer) and np.all(out >= 1) and \
(np.sum(out) == args['n_samples'])
# test with inappropriate input
Z_inappropriate = [[500,0],[0,10],[100,-1],[-0.5,5],[10,11]]
args_inappropriate = [{'n_samples': z[0], 'n_clusters': z[1]} for z in Z_inappropriate]
for args in args_inappropriate:
with pytest.raises(ValueError):
clusterdata.n_clusters = args['n_clusters']
clusterdata.n_samples = args['n_samples']
maxminbal.make_class_sizes(clusterdata)
def test_float_to_int(setup_maxminbal):
"""
float_class_sz, n_samples
"""
maxminbal = setup_maxminbal
# test appropriate inputs
for float_class_sz, n_samples in [(np.array([23.2, 254.7, 0.1, 35.6]), 100), \
(np.array([0.2, 0.7, 0.1, 0.5]), 10),
(np.array([2.5,1.5,5.2]), 3),
(np.array([0.5]), 1)]:
out = maxminbal.float_to_int(float_class_sz,n_samples)
print(len(float_class_sz), float_class_sz, n_samples)
assert (np.sum(out) == n_samples) and (np.all(out >= 1)) \
and np.issubdtype(out.dtype,np.integer)
# test inputs that should be left unchanged
assert np.all(maxminbal.float_to_int(np.array([5,10,25,7]), 5+10+25+7) \
== np.sort(np.array([5,10,25,7])))
# test inappropriate inputs
for float_class_sz, n_samples in [(np.array([0.5,1.5]), 1),
(np.array([0.5,1.5]), 0),
(np.array([2.5,1.5,5.2]), 2)]:
with pytest.raises(ValueError):
maxminbal.float_to_int(float_class_sz,n_samples)
# Test Cases for MaxMinClusters
def test_init_maxminclusters():
"""
Make sure to throw an error when inappropriate arguments are given.
"""
# edge and interior test cases for n_clusters, n_samples, n_dim
MaxMinClusters(n_clusters=1,n_samples=1,n_dim=1)
MaxMinClusters(n_clusters=1,n_samples=1,n_dim=10)
MaxMinClusters(n_clusters=2,n_samples=100,n_dim=2)
MaxMinClusters(n_clusters=10,n_samples=200,n_dim=5)
# edge and interior test cases for testing maxmin ratios
MaxMinClusters(imbal_maxmin=1,aspect_maxmin=1,radius_maxmin=1, aspect_ref=1)
MaxMinClusters(imbal_maxmin=1,aspect_maxmin=1.1,radius_maxmin=1.1,aspect_ref=1.5)
MaxMinClusters(imbal_maxmin=1.2,aspect_maxmin=1,radius_maxmin=1.5,aspect_ref=7)
MaxMinClusters(imbal_maxmin=3,aspect_maxmin=2,radius_maxmin=1,aspect_ref=5)
MaxMinClusters(imbal_maxmin=3,aspect_maxmin=2,radius_maxmin=5,aspect_ref=1)
MaxMinClusters(imbal_maxmin=3,aspect_maxmin=2,radius_maxmin=5,aspect_ref=4)
# edge and interior test cases for overlap
MaxMinClusters(alpha_max=0.5, alpha_min=0.01)
MaxMinClusters(alpha_max=0.05, alpha_min=0)
MaxMinClusters(alpha_max=0.1, alpha_min=0.0001)
# testing the distributions
MaxMinClusters(dist='exp')
MaxMinClusters(dist='gaussian')
MaxMinClusters(dist='t')
# testing packing and scale
MaxMinClusters(packing=0.5)
MaxMinClusters(packing=0.01)
MaxMinClusters(packing=0.99)
MaxMinClusters(scale=0.01)
MaxMinClusters(scale=0.05)
MaxMinClusters(scale=5)
MaxMinClusters(scale=10)
with pytest.raises(ValueError):
# must have n_dim, n_clusters, n_samples >= 1
# and n_clusters <= n_samples
MaxMinClusters(n_clusters=10,n_samples=100,n_dim=0)
MaxMinClusters(n_clusters=10,n_samples=9,n_dim=10)
MaxMinClusters(n_clusters=0,n_samples=100,n_dim=10)
MaxMinClusters(n_clusters=2,n_samples=1,n_dim=10)
MaxMinClusters(n_clusters=2,n_samples=1,n_dim=10)
# maxmin_ratios must be >= 1
MaxMinClusters(imbal_maxmin=0.98)
MaxMinClusters(imbal_maxmin=-1.1)
MaxMinClusters(aspect_maxmin=0.35)
MaxMinClusters(aspect_maxmin=-1.5)
MaxMinClusters(radius_maxmin=0.21)
MaxMinClusters(radius_maxmin=-1)
MaxMinClusters(aspect_ref=0.99)
MaxMinClusters(aspect_ref=-2)
# must have alpha_max > 0, alpha_min >= 0, alpha_max > alpha_min
MaxMinClusters(alpha_max=0, alpha_min=0)
MaxMinClusters(alpha_max=0.05, alpha_min=0.1)
MaxMinClusters(alpha_max=0.1, alpha_min=0.0001)
MaxMinClusters(alpha_max=0.025, alpha_min=-1.0)
MaxMinClusters(alpha_max=-0.5, alpha_min=0.05)
# packing must be strictly between 0 and 1, scale must be >0
MaxMinClusters(packing=0)
MaxMinClusters(packing=1)
MaxMinClusters(scale=0)
MaxMinClusters(scale=-0.5)
# currently only support dist in {'gaussian','exp','t'}
MaxMinClusters(dist='foo')
MaxMinClusters(dist='bar')
| 33.722727
| 100
| 0.709934
|
import pytest
import numpy as np
from syndata.core import ClusterData
from syndata.maxmin import MaxMinClusters, MaxMinCov, MaxMinBal, maxmin_sampler
# Test Cases for maxmin_sampler
def test_maxmin_sampler():
"""
Make sure the sampling mechanism doesn't break when wrong inputs
are supplied.
"""
# Test cases throwing exceptions
args_causing_exception = [ # negative vals
{'n_samples': 10, 'ref': -2, 'min_val': 1, 'maxmin_ratio': 1.5},
{'n_samples': 10, 'ref': 2, 'min_val': -1, 'maxmin_ratio': 1.5},
{'n_samples': 10, 'ref': 2, 'min_val': 1, 'maxmin_ratio': -1.5},
# zeros vals
{'n_samples': 0, 'ref': 2, 'min_val': 1, 'maxmin_ratio': 1.5},
{'n_samples': 10, 'ref': 0, 'min_val': 1, 'maxmin_ratio': 1.5},
{'n_samples': 10, 'ref': 2, 'min_val': 0, 'maxmin_ratio': 1.5},
{'n_samples': 10, 'ref': 2, 'min_val': 1, 'maxmin_ratio': 0},
# ref < min
{'n_samples': 10, 'ref': 1, 'min_val': 2, 'maxmin_ratio': 1.5},
# ref > max
{'n_samples': 10, 'ref': 10, 'min_val': 1, 'maxmin_ratio': 1.5},
# maxmin_ratio < 1
{'n_samples': 10, 'ref': 2, 'min_val': 1, 'maxmin_ratio': 0.7},
# maxmin_ratio = 1, ref != min_val
{'n_samples': 10, 'ref': 2, 'min_val': 1, 'maxmin_ratio': 1},
]
with pytest.raises(ValueError):
for args in args_causing_exception:
args['f_constrain'] = lambda x: 2*args['ref'] - x
maxmin_sampler(**args)
# Test cases with appropriate inputs (randomized)
args_appropriate_input = []
max_ref_val = 10; max_min_val = 10
for i in range(100):
min_val = np.random.default_rng(seed=i).uniform(0,max_min_val)
ref = np.random.uniform(min_val, max_ref_val)
maxmin_ratio = np.random.uniform(ref/min_val, 10*(ref/min_val))
args_appropriate_input.append(
{
# Do the first 10 tests on the edge case n_samples=1
'n_samples': np.random.choice(np.arange(2,15)) if i>10 else 1,
'min_val': min_val,
'ref': ref,
'maxmin_ratio': maxmin_ratio,
}
)
print('making the args', 'ref', ref, 'min_val', min_val, 'max_val', min_val*maxmin_ratio)
# Add test case with large sample size
args_appropriate_input.append({'n_samples': 10000, 'ref': 2, \
'min_val': 1, 'maxmin_ratio': 3})
for args in args_appropriate_input:
args['f_constrain'] = lambda x: 2*args['ref'] - x
out = maxmin_sampler(**args)
print(out)
assert check_maxmin_sampler_output(out,
args['f_constrain'])
def check_maxmin_sampler_output(sampled_vals, f_constrain):
"""
Check that output satisfies lower and upper bounds.
Check min, max values are related through the constraint.
Check that output is sorted.
"""
return is_sorted(sampled_vals, order='ascending') \
and (f_constrain(np.max(sampled_vals) == np.min(sampled_vals))) \
and (f_constrain(np.min(sampled_vals) == np.max(sampled_vals)))
def is_sorted(vals, order='ascending'):
"""
Check if values are sorted.
"""
if order=='ascending':
return np.all(vals[1:] - vals[:-1] >= 0)
elif order=='descending':
return np.all(vals[1:] - vals[:-1] <= 0)
# Test Cases for MaxMinCov
def test_init_maxmincov():
"""
Make sure that no illicit values can be used to construct MaxMinCov.
"""
# appropriate values of attributes
interior_cases = np.random.uniform(1,10,size=(100,3)) # random appropriate values
edge_cases = np.concatenate([2-np.eye(3),np.ones(3)[np.newaxis,:]],axis=0) # edge and corner cases
Z_appropriate = np.concatenate([interior_cases,edge_cases],axis=0)
args_appropriate = [{'ref_aspect': z[0], 'aspect_maxmin': z[1],
'radius_maxmin': z[2]} for z in Z_appropriate]
for args in args_appropriate:
my_maxmincov = MaxMinCov(**args)
for attr in ['ref_aspect','aspect_maxmin','radius_maxmin']:
assert hasattr(my_maxmincov, attr)
# inappropriate values of attributes
Z_inappropriate = np.concatenate([np.ones(3) - 0.5*np.eye(3), (1-0.01)*np.ones(3)[np.newaxis,:]])
args_inappropriate = [{'ref_aspect': z[0], 'aspect_maxmin': z[1],
'radius_maxmin': z[2]} for z in Z_inappropriate]
with pytest.raises(ValueError):
for args in args_inappropriate:
MaxMinCov(**args)
@pytest.fixture()
def setup_maxmincov():
"""
Initialize a valid MaxMinCov instance to test its methods.
"""
maxmincov = MaxMinCov(ref_aspect=1.5,
aspect_maxmin=1.5,
radius_maxmin=1.5)
yield maxmincov
def test_make_cluster_aspects(setup_maxmincov):
"""
Make sure that valid cluster aspect ratios are sampled.
Test the range of acceptable numbers of clusters, and
make sure setting a seed works.
"""
maxmincov = setup_maxmincov
with pytest.raises(ValueError):
maxmincov.make_cluster_aspects(0,seed=None)
maxmincov.make_cluster_aspects(0.99,seed=None)
# test different numbers of clusters
for n_clusters in range(1,100):
cluster_aspects = maxmincov.make_cluster_aspects(n_clusters,seed=None)
assert np.all(cluster_aspects >= 1)
assert np.max(cluster_aspects) >= maxmincov.ref_aspect
assert np.min(cluster_aspects) <= maxmincov.ref_aspect
# test seed
seed = 23
for i in range(10):
cluster_aspects_new = maxmincov.make_cluster_aspects(2,seed=23)
# make sure that each successive output is the same as the previous output
if i >= 1:
assert np.all(cluster_aspects_new == cluster_aspects_prev)
cluster_aspects_prev = cluster_aspects_new
def test_make_cluster_radii(setup_maxmincov):
"""
Make sure valid cluster radii are sampled.
Test the range of acceptable inputs, and make sure setting a seed works.
"""
maxmincov = setup_maxmincov
# test appropriate inputs
interior_cases = np.concatenate([np.arange(1,20+1)[:,np.newaxis],
np.random.uniform(0,10,size=20)[:,np.newaxis],
np.random.choice(np.arange(2,100),size=20)[:,np.newaxis]],
axis=1)
edge_cases = np.array([[1,1e-3,2], [1,1e-3,1],[2,100,1]])
Z_appropriate = np.concatenate([interior_cases, edge_cases],axis=0)
args_appropriate = [{'n_clusters': z[0], 'ref_radius': z[1], 'n_dim': z[2]} for z in Z_appropriate]
for args in args_appropriate:
tol = 1e-12
print(args)
cluster_radii = maxmincov.make_cluster_radii(**args)
print(cluster_radii)
assert np.all(cluster_radii > 0)
assert (np.min(cluster_radii) <= args['ref_radius'] + tol) and \
(np.max(cluster_radii) >= args['ref_radius'] - tol)
# test inappropriate inputs
with pytest.raises(ValueError):
maxmincov.make_cluster_radii(n_clusters=0, ref_radius=1, n_dim=10)
maxmincov.make_cluster_radii(n_clusters=1, ref_radius=0, n_dim=10)
maxmincov.make_cluster_radii(n_clusters=1, ref_radius=1, n_dim=0)
# test seeds
seed = 717
for i in range(10):
cluster_radii_new = maxmincov.make_cluster_radii(n_clusters=5,ref_radius=4,n_dim=25, seed=seed)
if (i >= 1):
assert np.all(cluster_radii_new == cluster_radii_prev)
cluster_radii_prev = cluster_radii_new
def test_make_axis_sd(setup_maxmincov):
"""
Make sure valid standard deviations are sampled (>0).
Ensure sure ref_sd is between min and max, and that the maxmin ratio
equals the desired aspect ratio.
"""
maxmincov = setup_maxmincov
# test appropriate inputs
interior_cases = np.concatenate([np.arange(2,50+2)[:,np.newaxis],
np.random.uniform(0,10,size=50)[:,np.newaxis],
np.random.uniform(1,10,size=50)[:,np.newaxis]],
axis=1)
edge_cases = np.array([[1,0.5,1.5], [1,0.5,1], [2,0.1,1]])
Z_appropriate = np.concatenate([interior_cases, edge_cases],axis=0)
args_appropriate = [{'n_axes': z[0], 'sd': z[1], 'aspect': z[2]} for z in Z_appropriate]
for args in args_appropriate:
out = maxmincov.make_axis_sd(**args)
assert (np.min(out) <= args['sd']) and (np.max(out) >= args['sd'])
# test inappropriate inputs
with pytest.raises(ValueError):
maxmincov.make_axis_sd(n_axes=0, sd=1, aspect=2)
maxmincov.make_axis_sd(n_axes=0.5, sd=0, aspect=2)
maxmincov.make_axis_sd(n_axes=1, sd=1, aspect=0.5)
maxmincov.make_axis_sd(n_axes=2, sd=1, aspect=-2)
maxmincov.make_axis_sd(n_axes=2, sd=-1, aspect=2)
# test seed
seed = 123
for i in range(10):
axis_sd_new = maxmincov.make_axis_sd(n_axes=5,sd=4,aspect=25, seed=seed)
if (i >= 1):
assert np.all(axis_sd_new == axis_sd_prev)
axis_sd_prev = axis_sd_new
def test_make_cov(setup_maxmincov, setup_clusterdata):
"""
Make sure axes are orthogonal
Make sure cov = axis * sd**2 * axis', similar for cov_inv
"""
clusterdata = setup_clusterdata
maxmincov = setup_maxmincov
# ensure output makes mathematical sense
for i in range(10):
(axis, sd, cov, cov_inv) = maxmincov.make_cov(clusterdata)
for cluster_idx in range(clusterdata.n_clusters):
# test orthogonality of cluster axes
assert np.all(np.allclose(axis[cluster_idx] @ np.transpose(axis[cluster_idx]),
np.eye(axis[cluster_idx].shape[0])))
# test covariance matrix is correct
assert np.all(np.allclose(cov[cluster_idx],
np.transpose(axis[cluster_idx]) @ np.diag(sd[cluster_idx]**2) \
@ axis[cluster_idx]))
# test inverse covariance matrix is correct
assert np.all(np.allclose(cov_inv[cluster_idx],
np.transpose(axis[cluster_idx]) @ np.diag(sd[cluster_idx]**(-2)) \
@ axis[cluster_idx]))
# test seed
seed = 123
for i in range(10):
cov_structure_new = maxmincov.make_cov(clusterdata, seed=seed)
if (i >= 1):
for cluster_idx in range(clusterdata.n_clusters):
for j in range(4): # iterate through axis, sd, cov, cov_inv
assert np.all(np.allclose(cov_structure_prev[j][cluster_idx],
cov_structure_new[j][cluster_idx]))
# set previous covariance structure for next iteration:
cov_structure_prev = cov_structure_new
# Test Cases for MaxMinBal
@pytest.fixture(params = np.linspace(1,10,10))
def setup_maxminbal(request):
return MaxMinBal(request.param)
def test_init_maxminbal(setup_maxminbal):
"""
Ensure imbalance ratio is properly specified.
"""
maxminbal = setup_maxminbal
assert maxminbal.imbal_ratio >= 1
# test input check for inappropriate arguments
with pytest.raises(ValueError):
MaxMinBal(imbal_ratio = 0.5)
MaxMinBal(imbal_ratio = -2)
def test_make_class_sizes(setup_maxminbal,setup_clusterdata):
"""
"""
maxminbal = setup_maxminbal
clusterdata = setup_clusterdata
# test with appropriate input
Z_appropriate = [[500,5],[200,1],[100,2],[1000,10],[1500,3], [100,100]]
args_appropriate = [{'n_samples': z[0], 'n_clusters': z[1]} for z in Z_appropriate]
for args in args_appropriate:
clusterdata.n_samples = args['n_samples']
clusterdata.n_clusters = args['n_clusters']
out = maxminbal.make_class_sizes(clusterdata)
assert np.issubdtype(out.dtype, np.integer) and np.all(out >= 1) and \
(np.sum(out) == args['n_samples'])
# test with inappropriate input
Z_inappropriate = [[500,0],[0,10],[100,-1],[-0.5,5],[10,11]]
args_inappropriate = [{'n_samples': z[0], 'n_clusters': z[1]} for z in Z_inappropriate]
for args in args_inappropriate:
with pytest.raises(ValueError):
clusterdata.n_clusters = args['n_clusters']
clusterdata.n_samples = args['n_samples']
maxminbal.make_class_sizes(clusterdata)
def test_float_to_int(setup_maxminbal):
"""
float_class_sz, n_samples
"""
maxminbal = setup_maxminbal
# test appropriate inputs
for float_class_sz, n_samples in [(np.array([23.2, 254.7, 0.1, 35.6]), 100), \
(np.array([0.2, 0.7, 0.1, 0.5]), 10),
(np.array([2.5,1.5,5.2]), 3),
(np.array([0.5]), 1)]:
out = maxminbal.float_to_int(float_class_sz,n_samples)
print(len(float_class_sz), float_class_sz, n_samples)
assert (np.sum(out) == n_samples) and (np.all(out >= 1)) \
and np.issubdtype(out.dtype,np.integer)
# test inputs that should be left unchanged
assert np.all(maxminbal.float_to_int(np.array([5,10,25,7]), 5+10+25+7) \
== np.sort(np.array([5,10,25,7])))
# test inappropriate inputs
for float_class_sz, n_samples in [(np.array([0.5,1.5]), 1),
(np.array([0.5,1.5]), 0),
(np.array([2.5,1.5,5.2]), 2)]:
with pytest.raises(ValueError):
maxminbal.float_to_int(float_class_sz,n_samples)
# Test Cases for MaxMinClusters
def test_init_maxminclusters():
"""
Make sure to throw an error when inappropriate arguments are given.
"""
# edge and interior test cases for n_clusters, n_samples, n_dim
MaxMinClusters(n_clusters=1,n_samples=1,n_dim=1)
MaxMinClusters(n_clusters=1,n_samples=1,n_dim=10)
MaxMinClusters(n_clusters=2,n_samples=100,n_dim=2)
MaxMinClusters(n_clusters=10,n_samples=200,n_dim=5)
# edge and interior test cases for testing maxmin ratios
MaxMinClusters(imbal_maxmin=1,aspect_maxmin=1,radius_maxmin=1, aspect_ref=1)
MaxMinClusters(imbal_maxmin=1,aspect_maxmin=1.1,radius_maxmin=1.1,aspect_ref=1.5)
MaxMinClusters(imbal_maxmin=1.2,aspect_maxmin=1,radius_maxmin=1.5,aspect_ref=7)
MaxMinClusters(imbal_maxmin=3,aspect_maxmin=2,radius_maxmin=1,aspect_ref=5)
MaxMinClusters(imbal_maxmin=3,aspect_maxmin=2,radius_maxmin=5,aspect_ref=1)
MaxMinClusters(imbal_maxmin=3,aspect_maxmin=2,radius_maxmin=5,aspect_ref=4)
# edge and interior test cases for overlap
MaxMinClusters(alpha_max=0.5, alpha_min=0.01)
MaxMinClusters(alpha_max=0.05, alpha_min=0)
MaxMinClusters(alpha_max=0.1, alpha_min=0.0001)
# testing the distributions
MaxMinClusters(dist='exp')
MaxMinClusters(dist='gaussian')
MaxMinClusters(dist='t')
# testing packing and scale
MaxMinClusters(packing=0.5)
MaxMinClusters(packing=0.01)
MaxMinClusters(packing=0.99)
MaxMinClusters(scale=0.01)
MaxMinClusters(scale=0.05)
MaxMinClusters(scale=5)
MaxMinClusters(scale=10)
with pytest.raises(ValueError):
# must have n_dim, n_clusters, n_samples >= 1
# and n_clusters <= n_samples
MaxMinClusters(n_clusters=10,n_samples=100,n_dim=0)
MaxMinClusters(n_clusters=10,n_samples=9,n_dim=10)
MaxMinClusters(n_clusters=0,n_samples=100,n_dim=10)
MaxMinClusters(n_clusters=2,n_samples=1,n_dim=10)
MaxMinClusters(n_clusters=2,n_samples=1,n_dim=10)
# maxmin_ratios must be >= 1
MaxMinClusters(imbal_maxmin=0.98)
MaxMinClusters(imbal_maxmin=-1.1)
MaxMinClusters(aspect_maxmin=0.35)
MaxMinClusters(aspect_maxmin=-1.5)
MaxMinClusters(radius_maxmin=0.21)
MaxMinClusters(radius_maxmin=-1)
MaxMinClusters(aspect_ref=0.99)
MaxMinClusters(aspect_ref=-2)
# must have alpha_max > 0, alpha_min >= 0, alpha_max > alpha_min
MaxMinClusters(alpha_max=0, alpha_min=0)
MaxMinClusters(alpha_max=0.05, alpha_min=0.1)
MaxMinClusters(alpha_max=0.1, alpha_min=0.0001)
MaxMinClusters(alpha_max=0.025, alpha_min=-1.0)
MaxMinClusters(alpha_max=-0.5, alpha_min=0.05)
# packing must be strictly between 0 and 1, scale must be >0
MaxMinClusters(packing=0)
MaxMinClusters(packing=1)
MaxMinClusters(scale=0)
MaxMinClusters(scale=-0.5)
# currently only support dist in {'gaussian','exp','t'}
MaxMinClusters(dist='foo')
MaxMinClusters(dist='bar')
| 41
| 0
| 22
|
f547cb46376f6cd48fe72244973add9c82d457c0
| 122
|
py
|
Python
|
configs/scheduler_cfgs/multi_step_lr_cfg.py
|
slothfulxtx/TransLoc3D
|
0ac324b1dcec456c76d7db2f87d13c076f2d55e4
|
[
"MIT"
] | 5
|
2021-09-30T08:12:26.000Z
|
2022-01-19T16:20:10.000Z
|
configs/scheduler_cfgs/multi_step_lr_cfg.py
|
slothfulxtx/TransLoc3D
|
0ac324b1dcec456c76d7db2f87d13c076f2d55e4
|
[
"MIT"
] | null | null | null |
configs/scheduler_cfgs/multi_step_lr_cfg.py
|
slothfulxtx/TransLoc3D
|
0ac324b1dcec456c76d7db2f87d13c076f2d55e4
|
[
"MIT"
] | null | null | null |
scheduler_type = 'MultiStepLR'
scheduler_cfg = dict(
gamma=0.5,
milestones=(50, 100, 150, 200)
)
end_epoch = 250
| 15.25
| 34
| 0.672131
|
scheduler_type = 'MultiStepLR'
scheduler_cfg = dict(
gamma=0.5,
milestones=(50, 100, 150, 200)
)
end_epoch = 250
| 0
| 0
| 0
|
517a5b82716bd7c535ee53011b12813c5f3bf87e
| 392
|
py
|
Python
|
back/webapi/views/SystemDateView.py
|
stimulee/piclodio3
|
09f23d608b36cfd0e2e4aec3310c57752e8b7c59
|
[
"MIT"
] | null | null | null |
back/webapi/views/SystemDateView.py
|
stimulee/piclodio3
|
09f23d608b36cfd0e2e4aec3310c57752e8b7c59
|
[
"MIT"
] | null | null | null |
back/webapi/views/SystemDateView.py
|
stimulee/piclodio3
|
09f23d608b36cfd0e2e4aec3310c57752e8b7c59
|
[
"MIT"
] | null | null | null |
from time import strftime
from rest_framework.permissions import AllowAny
from rest_framework.views import APIView
from rest_framework.response import Response
| 24.5
| 47
| 0.721939
|
from time import strftime
from rest_framework.permissions import AllowAny
from rest_framework.views import APIView
from rest_framework.response import Response
class SystemDateList(APIView):
permission_classes = (AllowAny,)
def get(self, request, format=None):
# get the local system date
clock = strftime("%Y-%m-%dT%H:%M:%S")
return Response(str(clock))
| 134
| 73
| 23
|
d9f7438220a4ebe74beaea888af37f17f5bfb665
| 721
|
py
|
Python
|
levenshtein_distance.py
|
int2str/catbot
|
d6279845eb51eaa9c9e9f2aef2f7a521432d7851
|
[
"MIT"
] | null | null | null |
levenshtein_distance.py
|
int2str/catbot
|
d6279845eb51eaa9c9e9f2aef2f7a521432d7851
|
[
"MIT"
] | null | null | null |
levenshtein_distance.py
|
int2str/catbot
|
d6279845eb51eaa9c9e9f2aef2f7a521432d7851
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 1 14:54:14 2020
@author: Mei
"""
@memoize
| 18.487179
| 74
| 0.468793
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 1 14:54:14 2020
@author: Mei
"""
def memoize(func):
mem = {}
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in mem:
mem[key] = func(*args, **kwargs)
return mem[key]
return memoizer
@memoize
def levenshtein(s, t):
if s == "":
return len(t)
if t == "":
return len(s)
if s[-1] == t[-1]:
cost = 0
else:
cost = 1
res = min([levenshtein(s[:-1], t) + 1, # char is inserted
levenshtein(s, t[:-1]) + 1, # char is deleted
levenshtein(s[:-1], t[:-1]) + cost]) # char is substituted
return res
| 582
| 0
| 45
|
131c12d042555b54873fdce0f237aab3ccf4db7f
| 37
|
py
|
Python
|
src/repconc/models/repconc/__init__.py
|
jingtaozhan/RepCONC
|
64f3f8ac265e33a8abcd8d9d750e8a170b739f3b
|
[
"MIT"
] | 37
|
2021-10-16T07:38:44.000Z
|
2022-03-18T17:54:10.000Z
|
src/repconc/models/repconc/__init__.py
|
jingtaozhan/RepCONC
|
64f3f8ac265e33a8abcd8d9d750e8a170b739f3b
|
[
"MIT"
] | 4
|
2021-11-09T15:57:59.000Z
|
2022-03-01T09:10:32.000Z
|
src/repconc/models/repconc/__init__.py
|
jingtaozhan/RepCONC
|
64f3f8ac265e33a8abcd8d9d750e8a170b739f3b
|
[
"MIT"
] | 5
|
2021-11-08T02:58:24.000Z
|
2022-02-22T05:22:37.000Z
|
from .modeling_repconc import RepCONC
| 37
| 37
| 0.891892
|
from .modeling_repconc import RepCONC
| 0
| 0
| 0
|
b2e9ce95b9c470541c1124a564f290f253410919
| 9,658
|
py
|
Python
|
applications/FluidDynamicsApplication/tests/embedded_reservoir_test.py
|
AndreaVoltan/MyKratos7.0
|
e977752722e8ef1b606f25618c4bf8fd04c434cc
|
[
"BSD-4-Clause"
] | 2
|
2020-04-30T19:13:08.000Z
|
2021-04-14T19:40:47.000Z
|
applications/FluidDynamicsApplication/tests/embedded_reservoir_test.py
|
AndreaVoltan/MyKratos7.0
|
e977752722e8ef1b606f25618c4bf8fd04c434cc
|
[
"BSD-4-Clause"
] | 1
|
2020-04-30T19:19:09.000Z
|
2020-05-02T14:22:36.000Z
|
applications/FluidDynamicsApplication/tests/embedded_reservoir_test.py
|
AndreaVoltan/MyKratos7.0
|
e977752722e8ef1b606f25618c4bf8fd04c434cc
|
[
"BSD-4-Clause"
] | 1
|
2020-06-12T08:51:24.000Z
|
2020-06-12T08:51:24.000Z
|
import KratosMultiphysics
import KratosMultiphysics.FluidDynamicsApplication as KratosFluid
import KratosMultiphysics.kratos_utilities as KratosUtilities
have_external_solvers = KratosUtilities.IsApplicationAvailable("ExternalSolversApplication")
import KratosMultiphysics.KratosUnittest as UnitTest
@UnitTest.skipUnless(have_external_solvers,"Missing required application: ExternalSolversApplication")
if __name__ == '__main__':
test = EmbeddedReservoirTest()
test.setUp()
test.distance = 0.5
test.slip_level_set = False
test.print_output = False
test.print_reference_values = False
test.work_folder = "EmbeddedReservoirTest"
test.reference_file = "reference_slip_reservoir_2D"
test.settings = "EmbeddedReservoir2DTest_parameters.json"
test.setUpProblem()
test.setUpDistanceField()
test.runTest()
test.tearDown()
test.checkResults()
| 45.130841
| 203
| 0.657693
|
import KratosMultiphysics
import KratosMultiphysics.FluidDynamicsApplication as KratosFluid
import KratosMultiphysics.kratos_utilities as KratosUtilities
have_external_solvers = KratosUtilities.IsApplicationAvailable("ExternalSolversApplication")
import KratosMultiphysics.KratosUnittest as UnitTest
@UnitTest.skipUnless(have_external_solvers,"Missing required application: ExternalSolversApplication")
class EmbeddedReservoirTest(UnitTest.TestCase):
def testEmbeddedReservoir2D(self):
self.distance = 0.5
self.slip_level_set = False
self.work_folder = "EmbeddedReservoirTest"
self.reference_file = "reference_reservoir_2D"
self.settings = "EmbeddedReservoir2DTest_parameters.json"
self.ExecuteEmbeddedReservoirTest()
def testEmbeddedReservoir3D(self):
self.distance = 0.5
self.slip_level_set = False
self.work_folder = "EmbeddedReservoirTest"
self.reference_file = "reference_reservoir_3D"
self.settings = "EmbeddedReservoir3DTest_parameters.json"
self.ExecuteEmbeddedReservoirTest()
def testEmbeddedSlipReservoir2D(self):
self.distance = 0.5
self.slip_level_set = True
self.work_folder = "EmbeddedReservoirTest"
self.reference_file = "reference_slip_reservoir_2D"
self.settings = "EmbeddedReservoir2DTest_parameters.json"
self.ExecuteEmbeddedReservoirTest()
def testEmbeddedSlipReservoir3D(self):
self.distance = 0.5
self.slip_level_set = True
self.work_folder = "EmbeddedReservoirTest"
self.reference_file = "reference_slip_reservoir_3D"
self.settings = "EmbeddedReservoir3DTest_parameters.json"
self.ExecuteEmbeddedReservoirTest()
def ExecuteEmbeddedReservoirTest(self):
with UnitTest.WorkFolderScope(self.work_folder, __file__):
self.setUp()
self.setUpProblem()
self.setUpDistanceField()
self.runTest()
self.tearDown()
self.checkResults()
def setUp(self):
self.check_tolerance = 1e-6
self.print_output = False
self.print_reference_values = False
def tearDown(self):
with UnitTest.WorkFolderScope(self.work_folder, __file__):
KratosUtilities.DeleteFileIfExisting(
self.ProjectParameters["solver_settings"]["model_import_settings"]["input_filename"].GetString()+'.time')
def setUpProblem(self):
with UnitTest.WorkFolderScope(self.work_folder, __file__):
with open(self.settings, 'r') as parameter_file:
self.ProjectParameters = KratosMultiphysics.Parameters(parameter_file.read())
self.model = KratosMultiphysics.Model()
## Solver construction
import python_solvers_wrapper_fluid
self.solver = python_solvers_wrapper_fluid.CreateSolver(self.model, self.ProjectParameters)
## Set the "is_slip" field in the json settings (to avoid duplication it is set to false in all tests)
if self.slip_level_set and self.solver.settings.Has("is_slip"):
self.ProjectParameters["solver_settings"]["is_slip"].SetBool(True)
self.solver.AddVariables()
## Read the model - note that SetBufferSize is done here
self.solver.ImportModelPart()
self.solver.PrepareModelPart()
## Add AddDofs
self.solver.AddDofs()
## Solver initialization
self.solver.Initialize()
## Processes construction
import process_factory
self.list_of_processes = process_factory.KratosProcessFactory(self.model).ConstructListOfProcesses( self.ProjectParameters["processes"]["gravity"] )
self.list_of_processes += process_factory.KratosProcessFactory(self.model).ConstructListOfProcesses( self.ProjectParameters["processes"]["boundary_conditions_process_list"] )
## Processes initialization
for process in self.list_of_processes:
process.ExecuteInitialize()
self.main_model_part = self.model.GetModelPart(self.ProjectParameters["problem_data"]["model_part_name"].GetString())
def setUpDistanceField(self):
# Set the distance function
if (self.main_model_part.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE] == 2):
for node in self.main_model_part.Nodes:
distance = node.Y-self.distance
node.SetSolutionStepValue(KratosMultiphysics.DISTANCE, 0, distance)
elif (self.main_model_part.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE] == 3):
for node in self.main_model_part.Nodes:
distance = node.Z-self.distance
node.SetSolutionStepValue(KratosMultiphysics.DISTANCE, 0, distance)
# Set the ELEMENTAL_DISTANCES value
n_nodes = len(self.main_model_part.Elements[1].GetNodes())
for element in self.main_model_part.Elements:
elem_dist = KratosMultiphysics.Vector(n_nodes)
elem_nodes = element.GetNodes()
for i_node in range(0,n_nodes):
elem_dist[i_node] = elem_nodes[i_node].GetSolutionStepValue(KratosMultiphysics.DISTANCE)
element.SetValue(KratosMultiphysics.ELEMENTAL_DISTANCES, elem_dist)
def runTest(self):
with UnitTest.WorkFolderScope(self.work_folder, __file__):
if (self.print_output):
gid_mode = KratosMultiphysics.GiDPostMode.GiD_PostBinary
multifile = KratosMultiphysics.MultiFileFlag.SingleFile
deformed_mesh_flag = KratosMultiphysics.WriteDeformedMeshFlag.WriteUndeformed
write_conditions = KratosMultiphysics.WriteConditionsFlag.WriteElementsOnly
gid_io = KratosMultiphysics.GidIO(self.ProjectParameters["solver_settings"]["model_import_settings"]["input_filename"].GetString(),gid_mode,multifile,deformed_mesh_flag, write_conditions)
mesh_name = 0.0
gid_io.InitializeMesh( mesh_name)
gid_io.WriteMesh( self.main_model_part.GetMesh() )
gid_io.FinalizeMesh()
gid_io.InitializeResults(mesh_name,(self.main_model_part).GetMesh())
end_time = self.ProjectParameters["problem_data"]["end_time"].GetDouble()
time = 0.0
step = 0
for process in self.list_of_processes:
process.ExecuteBeforeSolutionLoop()
while(time <= end_time):
time = self.solver.AdvanceInTime(time)
for process in self.list_of_processes:
process.ExecuteInitializeSolutionStep()
self.solver.InitializeSolutionStep()
self.solver.Predict()
self.solver.SolveSolutionStep()
self.solver.FinalizeSolutionStep()
for process in self.list_of_processes:
process.ExecuteFinalizeSolutionStep()
for process in self.list_of_processes:
process.ExecuteBeforeOutputStep()
if (self.print_output):
gid_io.WriteNodalResults(KratosMultiphysics.VELOCITY,self.main_model_part.Nodes,time,0)
gid_io.WriteNodalResults(KratosMultiphysics.PRESSURE,self.main_model_part.Nodes,time,0)
gid_io.WriteNodalResults(KratosMultiphysics.DISTANCE,self.main_model_part.Nodes,time,0)
for process in self.list_of_processes:
process.ExecuteAfterOutputStep()
for process in self.list_of_processes:
process.ExecuteFinalize()
if (self.print_output):
gid_io.FinalizeResults()
def checkResults(self):
with UnitTest.WorkFolderScope(self.work_folder, __file__):
if self.print_reference_values:
with open(self.reference_file+'.csv','w') as ref_file:
ref_file.write("#ID, PRESSURE\n")
for node in self.main_model_part.Nodes:
pres = node.GetSolutionStepValue(KratosMultiphysics.PRESSURE)
ref_file.write("{0}, {1}\n".format(node.Id, pres))
else:
with open(self.reference_file+'.csv','r') as reference_file:
reference_file.readline() # skip header
line = reference_file.readline()
for node in self.main_model_part.Nodes:
values = [ float(i) for i in line.rstrip('\n ').split(',') ]
node_id = values[0]
reference_pres = values[1]
pres = node.GetSolutionStepValue(KratosMultiphysics.PRESSURE)
self.assertAlmostEqual(reference_pres, pres, delta = self.check_tolerance)
line = reference_file.readline()
if line != '': # If we did not reach the end of the reference file
self.fail("The number of nodes in the mdpa is smaller than the number of nodes in the output file")
if __name__ == '__main__':
test = EmbeddedReservoirTest()
test.setUp()
test.distance = 0.5
test.slip_level_set = False
test.print_output = False
test.print_reference_values = False
test.work_folder = "EmbeddedReservoirTest"
test.reference_file = "reference_slip_reservoir_2D"
test.settings = "EmbeddedReservoir2DTest_parameters.json"
test.setUpProblem()
test.setUpDistanceField()
test.runTest()
test.tearDown()
test.checkResults()
| 8,421
| 26
| 318
|
68e1ed0ef59a3040f7e29f35297d861200c09805
| 454
|
py
|
Python
|
tests/conftest.py
|
BradleyKirton/ice3x
|
7a289b6b208a0bd07112744923cf5d315982ee31
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
BradleyKirton/ice3x
|
7a289b6b208a0bd07112744923cf5d315982ee31
|
[
"MIT"
] | 1
|
2021-01-18T09:38:53.000Z
|
2021-01-18T09:38:53.000Z
|
tests/conftest.py
|
BradleyKirton/ice3x
|
7a289b6b208a0bd07112744923cf5d315982ee31
|
[
"MIT"
] | 1
|
2021-01-15T05:15:08.000Z
|
2021-01-15T05:15:08.000Z
|
import pytest
def pytest_collection_modifyitems(config, items):
"""If async dependencies is not available skip async tests."""
try:
import treq # noqa
skip_async = False
except ImportError:
skip_async = True
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "requires_async" in item.keywords and skip_async is True:
item.add_marker(skip_slow)
| 23.894737
| 71
| 0.665198
|
import pytest
def pytest_collection_modifyitems(config, items):
"""If async dependencies is not available skip async tests."""
try:
import treq # noqa
skip_async = False
except ImportError:
skip_async = True
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "requires_async" in item.keywords and skip_async is True:
item.add_marker(skip_slow)
| 0
| 0
| 0
|
1f318af426ba6effdcc824c35b1410a508967992
| 605
|
py
|
Python
|
python/train_model.py
|
bfakhri/dml_custom
|
1e908b10890df11e510d72c21f3125e3069a0eac
|
[
"CC-BY-4.0"
] | null | null | null |
python/train_model.py
|
bfakhri/dml_custom
|
1e908b10890df11e510d72c21f3125e3069a0eac
|
[
"CC-BY-4.0"
] | null | null | null |
python/train_model.py
|
bfakhri/dml_custom
|
1e908b10890df11e510d72c21f3125e3069a0eac
|
[
"CC-BY-4.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import argparse
import random
import numpy as np
import deepmind_lab
import tensorflow as tf
import sys
print('PYTHON VERSION - ', sys.version)
# For the DML random agent dataset
import random_dataset
# For the model that we will train
import model
# For debugging
import os
for i in range(10):
print(os.getcwd())
ds = random_dataset.dml_dataset()
model = model.Model(ds.shape)
for i in range(1000000):
batch = ds.get_batch()
model.train_step(batch, i)
| 18.90625
| 39
| 0.771901
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import argparse
import random
import numpy as np
import deepmind_lab
import tensorflow as tf
import sys
print('PYTHON VERSION - ', sys.version)
# For the DML random agent dataset
import random_dataset
# For the model that we will train
import model
# For debugging
import os
for i in range(10):
print(os.getcwd())
ds = random_dataset.dml_dataset()
model = model.Model(ds.shape)
for i in range(1000000):
batch = ds.get_batch()
model.train_step(batch, i)
| 0
| 0
| 0
|
61db0562dc232d4ff5aad924e5350c8b5a68b06a
| 503
|
py
|
Python
|
hub/models/types.py
|
harenlewis/api-hub
|
f79cd8b82e95c039269765a4542866286803a322
|
[
"MIT"
] | null | null | null |
hub/models/types.py
|
harenlewis/api-hub
|
f79cd8b82e95c039269765a4542866286803a322
|
[
"MIT"
] | 2
|
2020-06-05T19:41:09.000Z
|
2021-06-10T21:07:30.000Z
|
hub/models/types.py
|
harenlewis/api-hub
|
f79cd8b82e95c039269765a4542866286803a322
|
[
"MIT"
] | null | null | null |
GET = 100
POST = 200
PUT = 300
DELETE = 400
METHOD_TYPES = (
(GET, 'GET'),
(POST, 'POST'),
(PUT, 'PUT'),
(DELETE, 'DELETE'),
)
METHOD_TYPES_DICT = {
'GET': GET,
'POST': POST,
'PUT': PUT,
'DELETE': DELETE,
}
JSON = 500
HTML = 600
TEXT = 700
RESP_TYPES = (
(JSON, 'JSON'),
(HTML, 'HTML'),
(TEXT, 'TEXT'),
)
RESP_TYPES_DICT = {
'JSON': 'application/json; charset=utf-8',
'HTML': 'text/html; charset=utf-8',
'TEXT': 'text/plain; charset=utf-8',
}
| 14.794118
| 46
| 0.532803
|
GET = 100
POST = 200
PUT = 300
DELETE = 400
METHOD_TYPES = (
(GET, 'GET'),
(POST, 'POST'),
(PUT, 'PUT'),
(DELETE, 'DELETE'),
)
METHOD_TYPES_DICT = {
'GET': GET,
'POST': POST,
'PUT': PUT,
'DELETE': DELETE,
}
JSON = 500
HTML = 600
TEXT = 700
RESP_TYPES = (
(JSON, 'JSON'),
(HTML, 'HTML'),
(TEXT, 'TEXT'),
)
RESP_TYPES_DICT = {
'JSON': 'application/json; charset=utf-8',
'HTML': 'text/html; charset=utf-8',
'TEXT': 'text/plain; charset=utf-8',
}
| 0
| 0
| 0
|
052a4a4bdf56d5e8dedd6dfe0080f6b2a2e65602
| 145
|
py
|
Python
|
app/adapters/api/dtos/message_dto.py
|
jmp/fast1
|
2fb0283168d93b258da15e12af530c50de2dba75
|
[
"MIT"
] | 1
|
2021-11-23T13:27:21.000Z
|
2021-11-23T13:27:21.000Z
|
app/adapters/api/dtos/message_dto.py
|
jmp/fast1
|
2fb0283168d93b258da15e12af530c50de2dba75
|
[
"MIT"
] | null | null | null |
app/adapters/api/dtos/message_dto.py
|
jmp/fast1
|
2fb0283168d93b258da15e12af530c50de2dba75
|
[
"MIT"
] | null | null | null |
from pydantic import BaseModel
| 14.5
| 30
| 0.641379
|
from pydantic import BaseModel
class MessageDto(BaseModel):
detail: str
class Config:
frozen = True
title = "Message"
| 0
| 90
| 23
|
69dfa3f3f3c61dd8f1cd49fd9d62071055662676
| 3,962
|
py
|
Python
|
genome_designer/tests/integration/test_pipeline_integration.py
|
churchlab/millstone
|
ddb5d003a5b8a7675e5a56bafd5c432d9642b473
|
[
"MIT"
] | 45
|
2015-09-30T14:55:33.000Z
|
2021-06-28T02:33:30.000Z
|
genome_designer/tests/integration/test_pipeline_integration.py
|
churchlab/millstone
|
ddb5d003a5b8a7675e5a56bafd5c432d9642b473
|
[
"MIT"
] | 261
|
2015-06-03T20:41:56.000Z
|
2022-03-07T08:46:10.000Z
|
genome_designer/tests/integration/test_pipeline_integration.py
|
churchlab/millstone
|
ddb5d003a5b8a7675e5a56bafd5c432d9642b473
|
[
"MIT"
] | 22
|
2015-06-04T20:43:10.000Z
|
2022-02-27T08:27:34.000Z
|
"""Alignment pipeline integration tests.
"""
import os
import time
from django.conf import settings
from djcelery_testworker.testcase import CeleryWorkerTestCase
from main.models import AlignmentGroup
from main.models import Dataset
from main.models import ExperimentSample
from main.testing_util import create_common_entities
from pipeline.pipeline_runner import run_pipeline
from utils.import_util import copy_and_add_dataset_source
from utils.import_util import import_reference_genome_from_local_file
from utils.import_util import import_reference_genome_from_ncbi
from utils import internet_on
TEST_FASTA = os.path.join(settings.PWD, 'test_data', 'fake_genome_and_reads',
'test_genome.fa')
TEST_FASTQ1 = os.path.join(settings.PWD, 'test_data', 'fake_genome_and_reads',
'38d786f2', 'test_genome_1.snps.simLibrary.1.fq')
TEST_FASTQ2 = os.path.join(settings.PWD, 'test_data', 'fake_genome_and_reads',
'38d786f2', 'test_genome_1.snps.simLibrary.2.fq')
| 38.096154
| 80
| 0.69687
|
"""Alignment pipeline integration tests.
"""
import os
import time
from django.conf import settings
from djcelery_testworker.testcase import CeleryWorkerTestCase
from main.models import AlignmentGroup
from main.models import Dataset
from main.models import ExperimentSample
from main.testing_util import create_common_entities
from pipeline.pipeline_runner import run_pipeline
from utils.import_util import copy_and_add_dataset_source
from utils.import_util import import_reference_genome_from_local_file
from utils.import_util import import_reference_genome_from_ncbi
from utils import internet_on
TEST_FASTA = os.path.join(settings.PWD, 'test_data', 'fake_genome_and_reads',
'test_genome.fa')
TEST_FASTQ1 = os.path.join(settings.PWD, 'test_data', 'fake_genome_and_reads',
'38d786f2', 'test_genome_1.snps.simLibrary.1.fq')
TEST_FASTQ2 = os.path.join(settings.PWD, 'test_data', 'fake_genome_and_reads',
'38d786f2', 'test_genome_1.snps.simLibrary.2.fq')
class TestAlignmentPipeline(CeleryWorkerTestCase):
def setUp(self):
common_entities = create_common_entities()
self.project = common_entities['project']
self.reference_genome = import_reference_genome_from_local_file(
self.project, 'ref_genome', TEST_FASTA, 'fasta')
self.experiment_sample = ExperimentSample.objects.create(
project=self.project, label='sample1')
copy_and_add_dataset_source(self.experiment_sample, Dataset.TYPE.FASTQ1,
Dataset.TYPE.FASTQ1, TEST_FASTQ1)
copy_and_add_dataset_source(self.experiment_sample, Dataset.TYPE.FASTQ2,
Dataset.TYPE.FASTQ2, TEST_FASTQ2)
def test_run_pipeline(self):
"""Tests running the full pipeline.
"""
sample_list = [self.experiment_sample]
alignment_group_obj, async_result = run_pipeline('name_placeholder',
self.reference_genome, sample_list)
# Block until pipeline finishes.
while not async_result.ready():
time.sleep(1)
if async_result.status == 'FAILURE':
self.fail('Async task failed.')
# Refresh the object.
alignment_group_obj = AlignmentGroup.objects.get(
id=alignment_group_obj.id)
# Verify the AlignmentGroup object is created.
self.assertEqual(1,
len(alignment_group_obj.experimentsampletoalignment_set.all()))
self.assertEqual(AlignmentGroup.STATUS.COMPLETED,
alignment_group_obj.status)
# Make sure the initial JBrowse config has been created.
jbrowse_dir = self.reference_genome.get_jbrowse_directory_path()
self.assertTrue(os.path.exists(jbrowse_dir))
self.assertTrue(os.path.exists(os.path.join(jbrowse_dir,
'indiv_tracks')))
def test_run_pipeline__genbank_from_ncbi_with_spaces_in_label(self):
"""Tests the pipeline where the genome is imported from NCBI with
spaces in the name.
"""
if not internet_on():
return
MG1655_ACCESSION = 'NC_000913.3'
MG1655_LABEL = 'mg1655 look a space'
ref_genome = import_reference_genome_from_ncbi(self.project,
MG1655_LABEL, MG1655_ACCESSION, 'genbank')
sample_list = [self.experiment_sample]
alignment_group_obj, async_result = run_pipeline('name_placeholder',
ref_genome, sample_list)
# Block until pipeline finishes.
while not async_result.ready():
time.sleep(1)
if async_result.status == 'FAILURE':
self.fail('Async task failed.')
alignment_group_obj = AlignmentGroup.objects.get(
id=alignment_group_obj.id)
self.assertEqual(1,
len(alignment_group_obj.experimentsampletoalignment_set.all()))
self.assertEqual(AlignmentGroup.STATUS.COMPLETED,
alignment_group_obj.status)
| 618
| 2,336
| 23
|
c03319542f2244c2d4ef46ea8722b2475a06c15b
| 793
|
py
|
Python
|
topics/Array/Best_Time_to_Buy_and_Sell_Stock_121/Best_Time_to_Buy_and_Sell_Stock_121.py
|
DmitryNaimark/leetcode-solutions-python
|
16af5f3a9cb8469d82b14c8953847f0e93a92324
|
[
"MIT"
] | 1
|
2019-10-31T11:06:23.000Z
|
2019-10-31T11:06:23.000Z
|
topics/Array/Best_Time_to_Buy_and_Sell_Stock_121/Best_Time_to_Buy_and_Sell_Stock_121.py
|
DmitryNaimark/leetcode-solutions-python
|
16af5f3a9cb8469d82b14c8953847f0e93a92324
|
[
"MIT"
] | null | null | null |
topics/Array/Best_Time_to_Buy_and_Sell_Stock_121/Best_Time_to_Buy_and_Sell_Stock_121.py
|
DmitryNaimark/leetcode-solutions-python
|
16af5f3a9cb8469d82b14c8953847f0e93a92324
|
[
"MIT"
] | null | null | null |
# https://leetcode.com/problems/best-time-to-buy-and-sell-stock/
# ---------------------------------------------------
from typing import List
# Runtime Complexity: O(N)
# Space Complexity: O(1)
# ---------------------------------------------------
# Test Cases
# ---------------------------------------------------
solution = Solution()
# 5
print(solution.maxProfit([7, 1, 5, 3, 6, 4]))
# 0
print(solution.maxProfit([7, 6, 4, 3, 1]))
| 27.344828
| 64
| 0.461538
|
# https://leetcode.com/problems/best-time-to-buy-and-sell-stock/
# ---------------------------------------------------
from typing import List
# Runtime Complexity: O(N)
# Space Complexity: O(1)
class Solution:
def maxProfit(self, prices: List[int]) -> int:
if len(prices) == 0:
return 0
cur_min = prices[0]
max_diff = 0
for i in range(1, len(prices)):
cur_min = min(prices[i], cur_min)
max_diff = max(prices[i] - cur_min, max_diff)
return max_diff
# ---------------------------------------------------
# Test Cases
# ---------------------------------------------------
solution = Solution()
# 5
print(solution.maxProfit([7, 1, 5, 3, 6, 4]))
# 0
print(solution.maxProfit([7, 6, 4, 3, 1]))
| 294
| -6
| 48
|
651987d7de3aff6142ce2f122b6b368e0940755f
| 6,839
|
py
|
Python
|
main.py
|
GunnarHolwerda/PiWallGuiController
|
cc90e5f6fd6f13fdfdcabcc8e6b195bf01cb440f
|
[
"MIT"
] | 5
|
2017-03-29T20:44:42.000Z
|
2020-06-26T23:11:34.000Z
|
main.py
|
GunnarHolwerda/PiWallGuiController
|
cc90e5f6fd6f13fdfdcabcc8e6b195bf01cb440f
|
[
"MIT"
] | null | null | null |
main.py
|
GunnarHolwerda/PiWallGuiController
|
cc90e5f6fd6f13fdfdcabcc8e6b195bf01cb440f
|
[
"MIT"
] | 1
|
2021-03-08T14:57:09.000Z
|
2021-03-08T14:57:09.000Z
|
"""
GUI Application to control the PiWall from
"""
#!/usr/bin/python3
# Author: Gunnar Holwerda
# GUI to control a PiWall
from tkinter import Frame, StringVar, OptionMenu, Listbox, Button, Label, Tk, END
from piwallcontroller.piwallcontroller import PiWallController
from piwallcontroller.playlist import Playlist
from threading import Thread
class SelectorWindow(Frame):
"""
GUI Class extending the tkinter.Frame class
"""
TIMEOUTS = {
'1 hour ': 3600,
'2 hours': 7200,
'3 hours': 10800,
'Infinite': -1,
}
def create_video_file_dropdown(self):
"""
Creates the dropdown to display the video files from
"""
videos = self.__controller.get_video_file_list()
if videos:
self.__dropdown_selection.set(videos[0])
else:
videos.append(None)
self.video_dropdown = OptionMenu(
None, self.__dropdown_selection, *videos)
self.video_dropdown.config(width=10)
self.video_dropdown.grid(row=0, column=0)
def create_timeout_dropdown(self):
"""
Creates the dropdown that displays the timeouts
"""
timeouts = list(self.TIMEOUTS.keys())
timeouts.sort()
self.__timeout_selection.set(timeouts[0])
self.timeout_dropdown = OptionMenu(
None, self.__timeout_selection, *timeouts)
self.timeout_dropdown.config(width=5)
self.timeout_dropdown.grid(row=0, column=1)
def create_display_box(self):
"""
Creates display box that displays all current items in the playlist
"""
self.display_box = Listbox(width=30, height=10)
self.display_box.grid(row=0, column=2, columnspan=2)
def create_play_button(self):
"""
Creates the play button
"""
self.submit_button = Button(text="Play", width=10)
self.submit_button['command'] = self.play_wall
self.submit_button.grid(row=1, column=2, pady=5)
def create_add_button(self):
"""
Creates the button to add the current values in the video and timeout dropdown
into the playlist
"""
self.add_button = Button(text='Add', fg='green', width=10)
self.add_button['command'] = self.update_display_box
self.add_button.grid(row=1, column=0, pady=5)
def create_delete_button(self):
"""
Creates delete button to delete items from display blox
"""
self.delete_button = Button(text='Delete', fg='red', width=10)
self.delete_button['command'] = self.delete_selected_item
self.delete_button.grid(row=1, column=1, pady=5)
def create_reboot_button(self):
"""
Creates button that reboots the pi's
"""
self.reboot_button = Button(text='Reboot Tiles', fg='red', width=10)
self.reboot_button['command'] = self.reboot_pressed
self.reboot_button.grid(row=1, column=3, pady=5)
def create_status_label(self):
"""
Creates label to display current status of the wall
"""
self.status_label = Label(relief="ridge", width=11)
self.set_status_label(0)
self.status_label.grid(row=2, column=3, pady=5)
def create_stop_button(self):
"""
Creates stop button to stop PiWall
"""
self.stop_button = Button(text='Stop Playing')
self.set_status_label(0)
self.stop_button['command'] = self.stop_pressed
self.stop_button.grid(row=2, column=2, pady=5)
def delete_selected_item(self):
"""
Deletes the currently selected item from the displaybox
"""
self.__playlist.remove_playlist_item(self.display_box.curselection())
self.display_box.delete(self.display_box.curselection())
def play_wall(self):
"""
Submits ths form to be played on the pi's
"""
if self.__playlist.is_empty():
return
self.set_status_label(1)
self.display_box.delete(0, END)
# If there is a thread running, we need to stop the wall, which will
# end the thread
if self.__command_thread.isAlive():
print("Stopping Wall")
self.__controller.stop_wall()
self.__command_thread.join()
self.__command_thread = Thread(
target=self.__controller.run_commands, args=(self.__playlist,))
self.__command_thread.start()
def update_display_box(self):
"""
Button listener for the Add Button (create_add_button)
"""
video_file = self.__dropdown_selection.get()
timeout = self.__timeout_selection.get()
self.__playlist.add_playlist_item(video_file, self.TIMEOUTS[timeout])
self.display_box.insert(END, "{0} {1}".format(timeout, video_file))
def stop_pressed(self):
"""
Button listener for the Stop Button (create_stop_button)
"""
self.__controller.stop_wall()
self.set_status_label(0)
def reboot_pressed(self):
"""
Button listener for the Reboot Button (create_reboot_button)
"""
self.set_status_label(0)
self.__controller.reboot_pis()
return True
def set_status_label(self, state):
"""
Updates the status label to the current status of the PiWall
"""
if state == 1:
self.status_label.config(text='Playing', fg='green')
return True
elif state == 0:
self.status_label.config(text='Not Playing', fg='red')
return True
else:
Exception(
'Status label state {0} not supported. Try 1 or 2'.format(state))
def get_controller(self):
"""
Returns the piwallcontrollers
"""
return self.__controller
# Run the GUI
if __name__ == "__main__":
tk_window = Tk(className="PiWall")
frame = SelectorWindow(master=tk_window)
tk_window.mainloop()
frame.get_controller().stop_wall()
| 33.360976
| 90
| 0.619389
|
"""
GUI Application to control the PiWall from
"""
#!/usr/bin/python3
# Author: Gunnar Holwerda
# GUI to control a PiWall
from tkinter import Frame, StringVar, OptionMenu, Listbox, Button, Label, Tk, END
from piwallcontroller.piwallcontroller import PiWallController
from piwallcontroller.playlist import Playlist
from threading import Thread
class SelectorWindow(Frame):
"""
GUI Class extending the tkinter.Frame class
"""
TIMEOUTS = {
'1 hour ': 3600,
'2 hours': 7200,
'3 hours': 10800,
'Infinite': -1,
}
def __init__(self, master=None):
Frame.__init__(self, master)
self.__playlist = Playlist()
self.__controller = PiWallController()
self.__dropdown_selection = StringVar()
self.__timeout_selection = StringVar()
self.__command_thread = Thread(
target=self.__controller.run_commands, args=(self.__playlist,))
self.grid()
self.create_video_file_dropdown()
self.create_timeout_dropdown()
self.create_display_box()
self.create_add_button()
self.create_delete_button()
self.create_play_button()
self.create_reboot_button()
self.create_status_label()
self.create_stop_button()
def create_video_file_dropdown(self):
"""
Creates the dropdown to display the video files from
"""
videos = self.__controller.get_video_file_list()
if videos:
self.__dropdown_selection.set(videos[0])
else:
videos.append(None)
self.video_dropdown = OptionMenu(
None, self.__dropdown_selection, *videos)
self.video_dropdown.config(width=10)
self.video_dropdown.grid(row=0, column=0)
def create_timeout_dropdown(self):
"""
Creates the dropdown that displays the timeouts
"""
timeouts = list(self.TIMEOUTS.keys())
timeouts.sort()
self.__timeout_selection.set(timeouts[0])
self.timeout_dropdown = OptionMenu(
None, self.__timeout_selection, *timeouts)
self.timeout_dropdown.config(width=5)
self.timeout_dropdown.grid(row=0, column=1)
def create_display_box(self):
"""
Creates display box that displays all current items in the playlist
"""
self.display_box = Listbox(width=30, height=10)
self.display_box.grid(row=0, column=2, columnspan=2)
def create_play_button(self):
"""
Creates the play button
"""
self.submit_button = Button(text="Play", width=10)
self.submit_button['command'] = self.play_wall
self.submit_button.grid(row=1, column=2, pady=5)
def create_add_button(self):
"""
Creates the button to add the current values in the video and timeout dropdown
into the playlist
"""
self.add_button = Button(text='Add', fg='green', width=10)
self.add_button['command'] = self.update_display_box
self.add_button.grid(row=1, column=0, pady=5)
def create_delete_button(self):
"""
Creates delete button to delete items from display blox
"""
self.delete_button = Button(text='Delete', fg='red', width=10)
self.delete_button['command'] = self.delete_selected_item
self.delete_button.grid(row=1, column=1, pady=5)
def create_reboot_button(self):
"""
Creates button that reboots the pi's
"""
self.reboot_button = Button(text='Reboot Tiles', fg='red', width=10)
self.reboot_button['command'] = self.reboot_pressed
self.reboot_button.grid(row=1, column=3, pady=5)
def create_status_label(self):
"""
Creates label to display current status of the wall
"""
self.status_label = Label(relief="ridge", width=11)
self.set_status_label(0)
self.status_label.grid(row=2, column=3, pady=5)
def create_stop_button(self):
"""
Creates stop button to stop PiWall
"""
self.stop_button = Button(text='Stop Playing')
self.set_status_label(0)
self.stop_button['command'] = self.stop_pressed
self.stop_button.grid(row=2, column=2, pady=5)
def delete_selected_item(self):
"""
Deletes the currently selected item from the displaybox
"""
self.__playlist.remove_playlist_item(self.display_box.curselection())
self.display_box.delete(self.display_box.curselection())
def play_wall(self):
"""
Submits ths form to be played on the pi's
"""
if self.__playlist.is_empty():
return
self.set_status_label(1)
self.display_box.delete(0, END)
# If there is a thread running, we need to stop the wall, which will
# end the thread
if self.__command_thread.isAlive():
print("Stopping Wall")
self.__controller.stop_wall()
self.__command_thread.join()
self.__command_thread = Thread(
target=self.__controller.run_commands, args=(self.__playlist,))
self.__command_thread.start()
def update_display_box(self):
"""
Button listener for the Add Button (create_add_button)
"""
video_file = self.__dropdown_selection.get()
timeout = self.__timeout_selection.get()
self.__playlist.add_playlist_item(video_file, self.TIMEOUTS[timeout])
self.display_box.insert(END, "{0} {1}".format(timeout, video_file))
def stop_pressed(self):
"""
Button listener for the Stop Button (create_stop_button)
"""
self.__controller.stop_wall()
self.set_status_label(0)
def reboot_pressed(self):
"""
Button listener for the Reboot Button (create_reboot_button)
"""
self.set_status_label(0)
self.__controller.reboot_pis()
return True
def set_status_label(self, state):
"""
Updates the status label to the current status of the PiWall
"""
if state == 1:
self.status_label.config(text='Playing', fg='green')
return True
elif state == 0:
self.status_label.config(text='Not Playing', fg='red')
return True
else:
Exception(
'Status label state {0} not supported. Try 1 or 2'.format(state))
def get_controller(self):
"""
Returns the piwallcontrollers
"""
return self.__controller
# Run the GUI
if __name__ == "__main__":
tk_window = Tk(className="PiWall")
frame = SelectorWindow(master=tk_window)
tk_window.mainloop()
frame.get_controller().stop_wall()
| 686
| 0
| 27
|
4e411687a292bc56a0037b2e523555237471ea26
| 765
|
py
|
Python
|
libraries/botbuilder-schema/botbuilder/schema/_sign_in_enums.py
|
victor-kironde/botbuilder-python
|
e893d9b036d7cf33cf9c9afd1405450c354cdbcd
|
[
"MIT"
] | 1
|
2020-07-12T21:04:08.000Z
|
2020-07-12T21:04:08.000Z
|
libraries/botbuilder-schema/botbuilder/schema/_sign_in_enums.py
|
Fortune-Adekogbe/botbuilder-python
|
4e48c874c32a2a7fe7f27a7a1f825e2aa39466c4
|
[
"MIT"
] | null | null | null |
libraries/botbuilder-schema/botbuilder/schema/_sign_in_enums.py
|
Fortune-Adekogbe/botbuilder-python
|
4e48c874c32a2a7fe7f27a7a1f825e2aa39466c4
|
[
"MIT"
] | 1
|
2020-10-01T07:34:07.000Z
|
2020-10-01T07:34:07.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from enum import Enum
| 40.263158
| 94
| 0.60915
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from enum import Enum
class SignInConstants(str, Enum):
# Name for the signin invoke to verify the 6-digit authentication code as part of sign-in.
verify_state_operation_name = "signin/verifyState"
# Name for signin invoke to perform a token exchange.
token_exchange_operation_name = "signin/tokenExchange"
# The EventActivity name when a token is sent to the bot.
token_response_event_name = "tokens/response"
| 0
| 392
| 23
|
cedce4854061d9a8c9e7cb1c10204a423754caa1
| 220
|
py
|
Python
|
verifyage.py
|
cheesyc/basicpython
|
9a055e4f813c6caa601ba00da939439b0bc82a3f
|
[
"MIT"
] | null | null | null |
verifyage.py
|
cheesyc/basicpython
|
9a055e4f813c6caa601ba00da939439b0bc82a3f
|
[
"MIT"
] | null | null | null |
verifyage.py
|
cheesyc/basicpython
|
9a055e4f813c6caa601ba00da939439b0bc82a3f
|
[
"MIT"
] | null | null | null |
from datetime import datetime
# def days (d):
# now = datetime.now
if __name__ == "__main__":
# u = int(input("What is your age?"))
# d = int(input("What month were you born in?"")
print (datetime.now)
| 22
| 52
| 0.613636
|
from datetime import datetime
# def days (d):
# now = datetime.now
if __name__ == "__main__":
# u = int(input("What is your age?"))
# d = int(input("What month were you born in?"")
print (datetime.now)
| 0
| 0
| 0
|
2ab9ebef051b3056bedabb899617bd511e5cce45
| 3,546
|
py
|
Python
|
acceptance/harness/acceptance_test.py
|
ismacaulay/qtcwatchdog
|
72f3588eef1019bac8788fa58c52722dfa7c4d28
|
[
"MIT"
] | null | null | null |
acceptance/harness/acceptance_test.py
|
ismacaulay/qtcwatchdog
|
72f3588eef1019bac8788fa58c52722dfa7c4d28
|
[
"MIT"
] | 12
|
2015-10-22T15:38:28.000Z
|
2016-03-22T18:53:57.000Z
|
acceptance/harness/acceptance_test.py
|
ismacaulay/qtcwatchdog
|
72f3588eef1019bac8788fa58c52722dfa7c4d28
|
[
"MIT"
] | null | null | null |
import os, mock
from pyfakefs import fake_filesystem_unittest
from observer import FakeObserver
from qtcwatchdog.qtcwatchdog import QtcWatchdog
from qtcwatchdog.watcher import ProjectWatcher
| 38.543478
| 101
| 0.663283
|
import os, mock
from pyfakefs import fake_filesystem_unittest
from observer import FakeObserver
from qtcwatchdog.qtcwatchdog import QtcWatchdog
from qtcwatchdog.watcher import ProjectWatcher
class WatchdogAcceptanceTest(fake_filesystem_unittest.TestCase):
def setUp(self):
self.setUpPyfakefs()
self.fs_observer = FakeObserver(self.fs)
self.project_settings = {}
self.sleep_patcher = mock.patch('time.sleep')
self.addCleanup(self.sleep_patcher.stop)
self.mock_sleep = self.sleep_patcher.start()
self.running_patcher = mock.patch('qtcwatchdog.watcher.running')
self.addCleanup(self.running_patcher.stop)
self.mock_running = self.running_patcher.start()
self.mock_running.side_effect = [True, False]
self.observer_patcher = mock.patch('qtcwatchdog.watcher.Observer')
self.addCleanup(self.observer_patcher.stop)
self.mock_observer = self.observer_patcher.start()
self.mock_observer.return_value = self.fs_observer
self.watcher_patcher = mock.patch('qtcwatchdog.qtcwatchdog.ProjectWatcher')
self.addCleanup(self.watcher_patcher.stop)
self.mock_watcher = self.watcher_patcher.start()
self.mock_watcher.side_effect = self.save_updater
self.setup_project_directory()
def tearDown(self):
pass
def setup_project_directory(self):
self.project_settings = {
'project': 'watchdog',
'project_path': os.path.relpath('/project/watchdog'),
'files': {},
'includes': {},
}
self.files_file = os.path.join(self.project_settings['project_path'], 'watchdog.files')
self.includes_file = os.path.join(self.project_settings['project_path'], 'watchdog.includes')
os.makedirs(self.project_settings['project_path'])
self.fs.CreateFile(self.files_file)
self.fs.CreateFile(self.includes_file)
self.initial_files = [
os.path.join(self.project_settings['project_path'], 'initial_file.txt'),
os.path.join(self.project_settings['project_path'], 'initial_file.cxx'),
os.path.join(self.project_settings['project_path'], 'initial_file.h'),
]
for f in self.initial_files:
self.fs.CreateFile(f)
self.initial_directories = [
os.path.join(self.project_settings['project_path'], 'directory1'),
os.path.join(self.project_settings['project_path'], 'directory2'),
os.path.join(self.project_settings['project_path'], 'directory3'),
]
for d in self.initial_directories:
self.fs.CreateDirectory(d)
def create_and_start_watchdog(self):
self.watchdog = QtcWatchdog(self.project_settings)
self.watchdog.start()
def create_file_with_contents(self, path, contents):
try:
self.fs.RemoveObject(path)
finally:
self.fs.CreateFile(path, contents=contents)
def save_updater(self, project_path_arg, updater_arg):
self.file_updater = updater_arg
return ProjectWatcher(project_path_arg, updater_arg)
def file_contains_paths(self, file_path, paths=[]):
with open(file_path) as f:
lines = [f.strip('\n') for f in f.readlines()]
for path in paths:
if path not in lines:
return False, '{} does not contain path {}'.format(file_path, path)
return True, 'All paths in {}. paths: {}'.format(file_path, str(paths))
| 3,098
| 43
| 211
|
7d5e808698d08d5b754ad10b30667e0affcf369b
| 9,023
|
py
|
Python
|
predictive-horizontal-pod-autoscaler/short/analyse.py
|
jthomperoo/custom-pod-autoscaler-experiments
|
f065bee72391dff008a388d46cba40df3fb23c98
|
[
"Apache-2.0"
] | 4
|
2020-02-26T14:00:01.000Z
|
2022-02-25T15:23:09.000Z
|
predictive-horizontal-pod-autoscaler/short/analyse.py
|
jthomperoo/custom-pod-autoscaler-experiments
|
f065bee72391dff008a388d46cba40df3fb23c98
|
[
"Apache-2.0"
] | 1
|
2021-06-12T09:40:56.000Z
|
2021-06-12T09:51:45.000Z
|
predictive-horizontal-pod-autoscaler/short/analyse.py
|
jthomperoo/custom-pod-autoscaler-experiments
|
f065bee72391dff008a388d46cba40df3fb23c98
|
[
"Apache-2.0"
] | 1
|
2021-07-07T09:58:23.000Z
|
2021-07-07T09:58:23.000Z
|
# Copyright 2020 Jamie Thompson.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import numpy as np
from tabulate import tabulate
from matplotlib import pyplot as plt
if __name__ == "__main__":
main()
| 54.355422
| 161
| 0.729469
|
# Copyright 2020 Jamie Thompson.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import numpy as np
from tabulate import tabulate
from matplotlib import pyplot as plt
def plot_replica_comparison(horizontal_replicas, predictive_replicas):
plt.figure(figsize=[6, 6])
plt.plot(list(np.arange(0, 30, 0.5)), horizontal_replicas, "r", list(np.arange(0, 30, 0.5)), predictive_replicas, "b")
plt.legend(["K8s HPA", "CPA Predictive HPA"])
plt.xlabel("time (minutes)")
plt.ylabel("number of replicas")
plt.savefig("results/predictive_vs_horizontal_replicas.svg")
def plot_avg_latency_comparison(horizontal_latencies, predictive_latencies):
horizontal_avg_latencies = []
for result in horizontal_latencies:
if result["requests"].get("GET_/api/v1/namespaces/default/services/horizontal-deployment/proxy//") is None:
continue
horizontal_avg_latencies.append(result["requests"].get("GET_/api/v1/namespaces/default/services/horizontal-deployment/proxy//").get("avg_response_time"))
predictive_avg_latencies = []
for result in predictive_latencies:
if result["requests"].get("GET_/api/v1/namespaces/default/services/predictive-deployment/proxy//") is None:
continue
predictive_avg_latencies.append(result["requests"].get("GET_/api/v1/namespaces/default/services/predictive-deployment/proxy//").get("avg_response_time"))
plt.figure(figsize=[6, 6])
plt.plot(list(np.arange(0, 30, 0.5)), horizontal_avg_latencies, "r", list(np.arange(0, 30, 0.5)), predictive_avg_latencies, "b")
plt.legend(["K8s HPA", "CPA Predictive HPA"])
plt.xlabel("time (minutes)")
plt.ylabel("average latency")
plt.savefig("results/avg_latency_comparison.svg")
def plot_max_latency_comparison(horizontal_latencies, predictive_latencies):
horizontal_max_latencies = []
for result in horizontal_latencies:
if result["requests"].get("GET_/api/v1/namespaces/default/services/horizontal-deployment/proxy//") is None:
continue
horizontal_max_latencies.append(result["requests"].get("GET_/api/v1/namespaces/default/services/horizontal-deployment/proxy//").get("max_response_time"))
predictive_max_latencies = []
for result in predictive_latencies:
if result["requests"].get("GET_/api/v1/namespaces/default/services/predictive-deployment/proxy//") is None:
continue
predictive_max_latencies.append(result["requests"].get("GET_/api/v1/namespaces/default/services/predictive-deployment/proxy//").get("max_response_time"))
plt.figure(figsize=[6, 6])
plt.plot(list(np.arange(0, 30, 0.5)), horizontal_max_latencies, "r", list(np.arange(0, 30, 0.5)), predictive_max_latencies, "b")
plt.legend(["K8s HPA", "CPA Predictive HPA"])
plt.xlabel("time (minutes)")
plt.ylabel("maximum latency")
plt.savefig("results/max_latency_comparison.svg")
def plot_failed_to_success_request_percentage(horizontal_latencies, predictive_latencies):
horizontal_fail_percentages = []
for result in horizontal_latencies:
if result["requests"].get("GET_/api/v1/namespaces/default/services/horizontal-deployment/proxy//") is None:
continue
horizontal_fail_percentages.append(result["num_requests_fail"] / result["num_requests"] * 100)
predictive_fail_percentages = []
for result in predictive_latencies:
if result["requests"].get("GET_/api/v1/namespaces/default/services/predictive-deployment/proxy//") is None:
continue
predictive_fail_percentages.append(result["num_requests_fail"] / result["num_requests"] * 100)
plt.figure(figsize=[6, 6])
plt.plot(list(np.arange(0, 30, 0.5)), horizontal_fail_percentages, "r", list(np.arange(0, 30, 0.5)), predictive_fail_percentages, "b")
plt.legend(["K8s HPA", "CPA Predictive HPA"])
plt.xlabel("time (minutes)")
plt.ylabel("failed requests (%)")
plt.savefig("results/fail_percentage_comparison.svg")
def create_table(horizontal_replicas, predictive_replicas, horizontal_latencies, predictive_latencies):
horizontal_num_requests = []
for result in horizontal_latencies:
if result["requests"].get("GET_/api/v1/namespaces/default/services/horizontal-deployment/proxy//") is None:
continue
horizontal_num_requests.append(result["num_requests"])
predictive_num_requests = []
for result in predictive_latencies:
if result["requests"].get("GET_/api/v1/namespaces/default/services/predictive-deployment/proxy//") is None:
continue
predictive_num_requests.append(result["num_requests"])
horizontal_avg_latencies = []
for result in horizontal_latencies:
if result["requests"].get("GET_/api/v1/namespaces/default/services/horizontal-deployment/proxy//") is None:
continue
horizontal_avg_latencies.append(result["requests"].get("GET_/api/v1/namespaces/default/services/horizontal-deployment/proxy//").get("avg_response_time"))
predictive_avg_latencies = []
for result in predictive_latencies:
if result["requests"].get("GET_/api/v1/namespaces/default/services/predictive-deployment/proxy//") is None:
continue
predictive_avg_latencies.append(result["requests"].get("GET_/api/v1/namespaces/default/services/predictive-deployment/proxy//").get("avg_response_time"))
horizontal_max_latencies = []
for result in horizontal_latencies:
if result["requests"].get("GET_/api/v1/namespaces/default/services/horizontal-deployment/proxy//") is None:
continue
horizontal_max_latencies.append(result["requests"].get("GET_/api/v1/namespaces/default/services/horizontal-deployment/proxy//").get("max_response_time"))
predictive_max_latencies = []
for result in predictive_latencies:
if result["requests"].get("GET_/api/v1/namespaces/default/services/predictive-deployment/proxy//") is None:
continue
predictive_max_latencies.append(result["requests"].get("GET_/api/v1/namespaces/default/services/predictive-deployment/proxy//").get("max_response_time"))
horizontal_fail_percentages = []
for result in horizontal_latencies:
if result["requests"].get("GET_/api/v1/namespaces/default/services/horizontal-deployment/proxy//") is None:
continue
horizontal_fail_percentages.append(result["num_requests_fail"] / result["num_requests"] * 100)
predictive_fail_percentages = []
for result in predictive_latencies:
if result["requests"].get("GET_/api/v1/namespaces/default/services/predictive-deployment/proxy//") is None:
continue
predictive_fail_percentages.append(result["num_requests_fail"] / result["num_requests"] * 100)
table = {
"time (mins)": list(np.arange(0, 30, 0.5)),
"hpa num requests": horizontal_num_requests,
"phpa num requests": predictive_num_requests,
"hpa replicas": horizontal_replicas,
"phpa replicas": predictive_replicas,
"hpa avg latencies": horizontal_avg_latencies,
"phpa avg latencies": predictive_avg_latencies,
"hpa max latencies": horizontal_max_latencies,
"phpa max latencies": predictive_max_latencies,
"hpa fail requests (%)": horizontal_fail_percentages,
"phpa fail requests (%)": predictive_fail_percentages
}
with open("results/predictive_vs_horizontal_table.md", "w") as table_file:
table_file.write(tabulate(table, tablefmt="pipe", headers="keys"))
def main():
with open("results/results.json") as json_file:
results = json.load(json_file)
horizontal_replicas = results["horizontal"]["replicas"]
predictive_replicas = results["predictive"]["replicas"]
horizontal_latencies = results["horizontal"]["latency"]
predictive_latencies = results["predictive"]["latency"]
horizontal_latencies = sorted(horizontal_latencies, key=lambda k: k["start_time"])
predictive_latencies = sorted(predictive_latencies, key=lambda k: k["start_time"])
create_table(horizontal_replicas, predictive_replicas, horizontal_latencies, predictive_latencies)
plot_replica_comparison(horizontal_replicas, predictive_replicas)
plot_avg_latency_comparison(horizontal_latencies, predictive_latencies)
plot_max_latency_comparison(horizontal_latencies, predictive_latencies)
plot_failed_to_success_request_percentage(horizontal_latencies, predictive_latencies)
if __name__ == "__main__":
main()
| 8,167
| 0
| 138
|
21699970a803f9a1e84a84d986852609b75c11f8
| 2,747
|
py
|
Python
|
fmformatter/Sites2Query.py
|
wassermanlab/OpenFlexTyper
|
35edbf2c29f20ccec20baaaf46cc2382b7defda6
|
[
"MIT"
] | 7
|
2019-11-26T00:01:58.000Z
|
2021-04-03T05:31:44.000Z
|
fmformatter/Sites2Query.py
|
wassermanlab/OpenFlexTyper_restore
|
f599011a8f856bd81e73e5472d50980b4695055c
|
[
"MIT"
] | 33
|
2019-10-22T22:23:51.000Z
|
2020-10-02T20:14:17.000Z
|
fmformatter/Sites2Query.py
|
wassermanlab/OpenFlexTyper_restore
|
f599011a8f856bd81e73e5472d50980b4695055c
|
[
"MIT"
] | 4
|
2019-11-29T23:16:57.000Z
|
2020-03-07T19:04:26.000Z
|
import pybedtools
import sys
import argparse
# Function which takes in a sites file and produces a query file.
# Sites file looks like (these are 1-based coords):
# 22:50988105:G:A
#
# Query file looks like:
# #Index Reference Alternate Chrom Pos Ref Alt Identifier DataType
#0 TTTCTCCAAATACAGATCCAATGTCTTCACTTGTCTATTAAATGCCTCCCATTCCAAATATGATTACCTCTCCCCAGCTCCAATTAAGTCCCTTCTTTCCCCTCTTACTACCGCTTTCTTCCATGTGCCTCTTACAACACCATGGAGACATTTTTCATTTGTGCTTCTTTCATGCAGTTAGCCAAGCTTGTCAAGTTTTTTTTTTTTTGAAAAAAAAAAAAAATACATACATATATATATATATAATTTTTTTTCCCCTCACTATGTTGCCCAGATTGGTCTTGAACTACCGGGCTCAAGT TTTCTCCAAATACAGATCCAATGTCTTCACTTGTCTATTAAATGCCTCCCATTCCAAATATGATTACCTCTCCCCAGCTCCAATTAAGTCCCTTCTTTCCCCTCTTACTACCGCTTTCTTCCATGTGCCTCTTACAACACCATGGAGACACTTTTCATTTGTGCTTCTTTCATGCAGTTAGCCAAGCTTGTCAAGTTTTTTTTTTTTTGAAAAAAAAAAAAAATACATACATATATATATATATAATTTTTTTTCCCCTCACTATGTTGCCCAGATTGGTCTTGAACTACCGGGCTCAAGT 16 27557749 T C rs7198785_S-3AAAA cytoscan
# given 1-based pos coordinate, extract seqs and return the 2 seqs for query, one with the ref and one with the alt alleles
# reftest,alttest = Site2Seqs(22,50988105,'G','A',ARGS.Fasta)
# print(reftest)
# print(alttest)
if __name__=="__main__":
Main()
| 41.621212
| 649
| 0.790681
|
import pybedtools
import sys
import argparse
def GetArgs():
parser = argparse.ArgumentParser()
parser.add_argument("-I","--Infile",help="Infile in the format of Sites: Chrom:position:ref:alt", required=True)
parser.add_argument("-F","--Fasta",help="Input fasta file corresponding to the positions", required=True)
parser.add_argument("-O","--Outfile",help="Output file for the queries for FlexTyper", required=True)
parser.add_argument("-S","--Source",help="Source acquired from,e.g. PeddyGRCh37Sites", required=True)
args = parser.parse_args()
return args
# Function which takes in a sites file and produces a query file.
# Sites file looks like (these are 1-based coords):
# 22:50988105:G:A
#
# Query file looks like:
# #Index Reference Alternate Chrom Pos Ref Alt Identifier DataType
#0 TTTCTCCAAATACAGATCCAATGTCTTCACTTGTCTATTAAATGCCTCCCATTCCAAATATGATTACCTCTCCCCAGCTCCAATTAAGTCCCTTCTTTCCCCTCTTACTACCGCTTTCTTCCATGTGCCTCTTACAACACCATGGAGACATTTTTCATTTGTGCTTCTTTCATGCAGTTAGCCAAGCTTGTCAAGTTTTTTTTTTTTTGAAAAAAAAAAAAAATACATACATATATATATATATAATTTTTTTTCCCCTCACTATGTTGCCCAGATTGGTCTTGAACTACCGGGCTCAAGT TTTCTCCAAATACAGATCCAATGTCTTCACTTGTCTATTAAATGCCTCCCATTCCAAATATGATTACCTCTCCCCAGCTCCAATTAAGTCCCTTCTTTCCCCTCTTACTACCGCTTTCTTCCATGTGCCTCTTACAACACCATGGAGACACTTTTCATTTGTGCTTCTTTCATGCAGTTAGCCAAGCTTGTCAAGTTTTTTTTTTTTTGAAAAAAAAAAAAAATACATACATATATATATATATAATTTTTTTTCCCCTCACTATGTTGCCCAGATTGGTCTTGAACTACCGGGCTCAAGT 16 27557749 T C rs7198785_S-3AAAA cytoscan
def ParseSitesGetQuery(SitesInfile,Fasta,QueryOutfile,Source):
infile = open(SitesInfile,'r')
outfile = open(QueryOutfile,'w')
counter = 0
outfile.write("#Index\tReference\tAlternate\tChrom\tPos\tRef\tAlt\tIdentifier\tDataType\n")
for line in infile:
line = line.strip('\n')
cols=line.split(':')
chrom = cols[0]
pos = int(cols[1]) - 1 # 1-based transition
ref = cols[2]
alt = cols[3]
Source='PeddySitesGRCh37'
refSeq,altSeq = Site2Seqs(chrom,pos,ref,alt,Fasta)
outfile.write("%d\t%s\t%s\t%s\t%d\t%s\t%s\t%s\t%s\n"%(counter,refSeq,altSeq,chrom,pos,ref,alt,line,Source))
counter += 1
# given 1-based pos coordinate, extract seqs and return the 2 seqs for query, one with the ref and one with the alt alleles
def Site2Seqs(chrom,pos,ref,alt,fasta):
pos = pos-1
refSeq = pybedtools.BedTool.seq((chrom,pos-150,pos+151),fasta)
altSeqleft = pybedtools.BedTool.seq((chrom,pos-150,pos),fasta)
altSeqright = pybedtools.BedTool.seq((chrom,pos+1,pos+151),fasta)
altSeq = altSeqleft + alt + altSeqright
return refSeq,altSeq
def Main():
ARGS = GetArgs()
ParseSitesGetQuery(ARGS.Infile,ARGS.Fasta,ARGS.Outfile,ARGS.Source)
# test Site2Seqs
# reftest,alttest = Site2Seqs(22,50988105,'G','A',ARGS.Fasta)
# print(reftest)
# print(alttest)
if __name__=="__main__":
Main()
| 1,466
| 0
| 91
|
fac204b97e11e17794e1161b7bf560750117f3ce
| 49
|
py
|
Python
|
src/thekpi_node/__init__.py
|
keeplerteam/thekpi
|
082258c26909254caf46caec1da89438a43548c3
|
[
"MIT"
] | 2
|
2022-01-21T14:37:50.000Z
|
2022-01-21T16:06:27.000Z
|
src/thekpi_node/__init__.py
|
keeplerteam/thekpi
|
082258c26909254caf46caec1da89438a43548c3
|
[
"MIT"
] | null | null | null |
src/thekpi_node/__init__.py
|
keeplerteam/thekpi
|
082258c26909254caf46caec1da89438a43548c3
|
[
"MIT"
] | null | null | null |
from .node import KpiNode
__all__ = ["KpiNode"]
| 12.25
| 25
| 0.714286
|
from .node import KpiNode
__all__ = ["KpiNode"]
| 0
| 0
| 0
|
1fa0e3b8383b8f9f172b6decfb3c6c2eff282ed3
| 4,727
|
py
|
Python
|
python/Tests/TestStatic.py
|
ugirumurera/ta_solver
|
c3bd83633aca4db785a4d0dc554f924bb26754e1
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
python/Tests/TestStatic.py
|
ugirumurera/ta_solver
|
c3bd83633aca4db785a4d0dc554f924bb26754e1
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
python/Tests/TestStatic.py
|
ugirumurera/ta_solver
|
c3bd83633aca4db785a4d0dc554f924bb26754e1
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
import unittest
import numpy as np
from Solvers.Frank_Wolfe_Solver_Static import Frank_Wolfe_Solver
from Solvers.Path_Based_Frank_Wolfe_Solver import Path_Based_Frank_Wolfe_Solver
#from Solvers.Decomposition_Solver import Decomposition_Solver
from Model_Manager.Link_Model_Manager import Link_Model_Manager_class
from Java_Connection import Java_Connection
from Data_Types.Demand_Assignment_Class import Demand_Assignment_class
import os
import inspect
class TestStatic(unittest.TestCase):
@classmethod
'''
def test_decomposition_solver(self):
number_of_subproblems = 1
start_time1 = timeit.default_timer()
assignment_dec, error = Decomposition_Solver(self.traffic_scenario, self.Cost_Function, number_of_subproblems)
print "Decomposition finished with error ", error
elapsed1 = timeit.default_timer() - start_time1
print ("Decomposition Path-based took %s seconds" % elapsed1)
'''
| 41.464912
| 121
| 0.675904
|
import unittest
import numpy as np
from Solvers.Frank_Wolfe_Solver_Static import Frank_Wolfe_Solver
from Solvers.Path_Based_Frank_Wolfe_Solver import Path_Based_Frank_Wolfe_Solver
#from Solvers.Decomposition_Solver import Decomposition_Solver
from Model_Manager.Link_Model_Manager import Link_Model_Manager_class
from Java_Connection import Java_Connection
from Data_Types.Demand_Assignment_Class import Demand_Assignment_class
import os
import inspect
class TestStatic(unittest.TestCase):
@classmethod
def setUpClass(cls):
# make Java connection
cls.connection = Java_Connection()
# create a static/bpr model manager
this_folder = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
configfile = os.path.join(this_folder, os.path.pardir, 'configfiles', 'seven_links.xml')
bpr_coefficients = {0L: [1, 0, 0, 0, 1], 1L: [1, 0, 0, 0, 1], 2L: [5, 0, 0, 0, 5], 3L: [2, 0, 0, 0, 2],
4L: [2, 0, 0, 0, 2], 5L: [1, 0, 0, 0, 1], 6L: [5, 0, 0, 0, 5]}
cls.model_manager = Link_Model_Manager_class(configfile, "static", cls.connection, None, "bpr", bpr_coefficients)
# create a demand assignment
api = TestStatic.model_manager.beats_api
time_period = 1 # Only have one time period for static model
paths_list = list(api.get_path_ids())
commodity_list = list(api.get_commodity_ids())
route_list = {}
for path_id in paths_list:
route_list[path_id] = api.get_subnetwork_with_id(path_id).get_link_ids()
# Creating the demand assignment for initialization
cls.demand_assignments = Demand_Assignment_class(route_list, commodity_list, time_period, dt=time_period)
demands = {}
demand_value = np.zeros(time_period)
demand_value1 = np.zeros(time_period)
demand_value[0] = 2
demand_value1[0] = 2
demands[(1L, 1L)] = demand_value
demands[(2L, 1L)] = demand_value1
demands[(3L, 1L)] = demand_value
cls.demand_assignments.set_all_demands(demands)
def check_manager(self):
self.assertTrue(TestStatic.model_manager.is_valid())
def test_model_run(self):
traffic_model = TestStatic.model_manager.traffic_model
link_states = traffic_model.Run_Model(TestStatic.demand_assignments)
self.assertTrue(self.check_assignments(link_states))
def test_link_cost(self):
traffic_model = TestStatic.model_manager.traffic_model
link_states = traffic_model.Run_Model(TestStatic.demand_assignments)
link_costs = TestStatic.model_manager.cost_function.evaluate_Cost_Function(link_states)
self.assertTrue(self.check_link_costs(link_costs))
def test_link_based_fw(self):
frank_sol = Frank_Wolfe_Solver(self.model_manager)
def test_path_based_fw(self):
num_steps = 1
eps = 1e-2
frank_sol = Frank_Wolfe_Solver(self.model_manager)
assignment_seq = Path_Based_Frank_Wolfe_Solver(self.model_manager, num_steps)
# Cost resulting from the path_based Frank-Wolfe
link_states = self.model_manager.traffic_model.Run_Model(assignment_seq)
cost_path_based = self.model_manager.cost_function.evaluate_BPR_Potential(link_states)
# Cost resulting from link-based Frank-Wolfe
cost_link_based = self.model_manager.cost_function.evaluate_BPR_Potential_FW(frank_sol)
self.assertTrue(np.abs(cost_link_based-cost_path_based) < eps)
'''
def test_decomposition_solver(self):
number_of_subproblems = 1
start_time1 = timeit.default_timer()
assignment_dec, error = Decomposition_Solver(self.traffic_scenario, self.Cost_Function, number_of_subproblems)
print "Decomposition finished with error ", error
elapsed1 = timeit.default_timer() - start_time1
print ("Decomposition Path-based took %s seconds" % elapsed1)
'''
def check_assignments(self, link_states):
links_flows = {(0L,1L): [6], (1L,1L): [4], (2L,1L): [2], (3L,1L): [2],
(4L,1L): [2], (5L,1L): [2], (6L,1L): [4]}
states = link_states.get_all_states()
for key in states.keys():
if states[key][0].get_flow() != links_flows[key][0]:
return False
return True
def check_link_costs(self, link_costs):
cost_links = {(0L,1L): [1297], (1L,1L): [257], (2L,1L): [85], (3L,1L): [34],
(4L,1L): [34], (5L,1L): [17], (6L,1L): [1285]}
states = link_costs.get_all_costs()
for key in states.keys():
if states[key][0] != cost_links[key][0]:
return False
return True
| 3,559
| 0
| 214
|
7678dbedc0d00e401fec232c6c04c058318a2f5c
| 3,363
|
py
|
Python
|
tests/test_latency_host_filter.py
|
luos/nova-latency-scheduler
|
8e83539ce1dfd080ba86e4e71a2b999e56a91ec8
|
[
"MIT"
] | 1
|
2017-03-28T19:02:23.000Z
|
2017-03-28T19:02:23.000Z
|
tests/test_latency_host_filter.py
|
luos/nova-latency-scheduler
|
8e83539ce1dfd080ba86e4e71a2b999e56a91ec8
|
[
"MIT"
] | null | null | null |
tests/test_latency_host_filter.py
|
luos/nova-latency-scheduler
|
8e83539ce1dfd080ba86e4e71a2b999e56a91ec8
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from network_filters import LatencyFilter, HostLatencyService
| 35.03125
| 88
| 0.652691
|
from unittest import TestCase
from network_filters import LatencyFilter, HostLatencyService
class TestLatencyHostFilter(TestCase):
def setUp(self):
super(TestLatencyHostFilter, self).setUp()
self.latencies = MockHostLatencyService()
self.filter = LatencyFilter(self.latencies)
def test_given_a_host_with_no_hints_passes(self):
self.assertPasses("test-host", {})
def test_given_a_host_with_a_latency_hint_but_no_latency_info_fails(self):
self.latencies.returns({})
self.assertFails('test-host', {'latency_to': ['50,target1']})
def test_given_a_host_with_a_latency_hint_and_latency_info_passes(self):
self.latencies.returns({'target1': 30})
self.assertPasses('test-host', {'latency_to': ['50,target1']})
def test_given_a_host_with_higher_latency_than_the_hint_fails(self):
self.latencies.returns({'target1': 1000})
self.assertFails('test-host', {'latency_to': ['50,target1']})
def test_given_a_host_with_multiple_latencies_if_no_less_than_expected_fails(self):
self.latencies.returns({
'target2': 10000,
'target1': 1000,
'target24': 3333
})
self.assertFails('test-host', {'latency_to': ['50,target1']})
def test_given_a_host_with_multiple_latencies_with_less_than_the_hint_passes(self):
self.latencies.returns({
'target2': 10000,
'target1': 1000,
'target24': 3333
})
self.assertPasses('test-host', {'latency_to': ['1001,target1']})
def test_given_multiple_expectations_when_meets_expectations_passes(self):
self.latencies.returns({
'target2': 50,
'target1': 60,
})
self.assertPasses('test-host', {'latency_to': ['1001,target1', '500,target2']})
def test_given_multiple_expectations_when_doesnt_meet_expectations_fails(self):
self.latencies.returns({
'target2': 5000,
'target1': 6000,
})
self.assertFails('test-host', {'latency_to': ['1001,target1', '500,target2']})
def test_given_multiple_expectations_when_one_host_doesnt_exist_fails(self):
self.latencies.returns({
'target1': 5000,
})
self.assertFails('test-host', {'latency_to': ['1001,target1', '500,target2']})
def test_given_multiple_expectations_when_successful_passes(self):
self.latencies.returns({
'target3': 24234,
'target2': 2000,
'target1': 1000,
})
self.assertPasses('test-host', {'latency_to': ['1001,target1', '2001,target2']})
def test_given_multiple_expectations_one_of_them_fails_then_fails(self):
self.latencies.returns({
'target3': 24234,
'target2': 2002,
'target1': 1000,
})
self.assertFails('test-host', {'latency_to': ['1001,target1', '2001,target2']})
def assertFails(self, host, hints):
assert self.filter.host_passes(host, hints) == False
def assertPasses(self, host, hints):
assert self.filter.host_passes(host, hints) == True
class MockHostLatencyService(HostLatencyService):
latencies = {}
def get_latencies_from_host(self, host):
return self.latencies
def returns(self, latencies):
self.latencies = latencies
| 2,725
| 118
| 423
|
7c364bc32aba99d22e5967788cc363abdd9e9b31
| 484
|
py
|
Python
|
api/setup.py
|
jim8786453/kiln_share
|
2d70c8863f7db18069d13cdea319cd113a2d0bbb
|
[
"BSD-3-Clause"
] | 1
|
2018-03-21T12:27:56.000Z
|
2018-03-21T12:27:56.000Z
|
api/setup.py
|
jim8786453/kiln_share
|
2d70c8863f7db18069d13cdea319cd113a2d0bbb
|
[
"BSD-3-Clause"
] | null | null | null |
api/setup.py
|
jim8786453/kiln_share
|
2d70c8863f7db18069d13cdea319cd113a2d0bbb
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import os
import platform
from setuptools import setup
from pip.req import parse_requirements
req_file = 'requirements.txt'
install_reqs = parse_requirements(req_file, session=False)
reqs = [str(ir.req) for ir in install_reqs]
del os.link
setup(
author='Jim Kennedy',
author_email='jim@kohlstudios.co.uk',
description='Api for kilnshare.co.uk',
install_requires=reqs,
name='kiln_share',
packages=['kiln_share'],
version='0.0.1',
)
| 22
| 58
| 0.727273
|
#!/usr/bin/env python
import os
import platform
from setuptools import setup
from pip.req import parse_requirements
req_file = 'requirements.txt'
install_reqs = parse_requirements(req_file, session=False)
reqs = [str(ir.req) for ir in install_reqs]
del os.link
setup(
author='Jim Kennedy',
author_email='jim@kohlstudios.co.uk',
description='Api for kilnshare.co.uk',
install_requires=reqs,
name='kiln_share',
packages=['kiln_share'],
version='0.0.1',
)
| 0
| 0
| 0
|
adeea26af730e012cda2bb7d0ba780ef3a185e64
| 4,228
|
py
|
Python
|
backend/foodgram/recipes/views.py
|
solilov/foodgram_project_react
|
9b0194f912ff881cd2213550d6b4be71e7587403
|
[
"MIT"
] | null | null | null |
backend/foodgram/recipes/views.py
|
solilov/foodgram_project_react
|
9b0194f912ff881cd2213550d6b4be71e7587403
|
[
"MIT"
] | null | null | null |
backend/foodgram/recipes/views.py
|
solilov/foodgram_project_react
|
9b0194f912ff881cd2213550d6b4be71e7587403
|
[
"MIT"
] | null | null | null |
from api.filters import IngredientFilter, TagOrAuthorFilter
from api.pagination import CustomPagination
from api.serializers import (CustomRecipeSerializer, IngredientSerializer,
RecipeSerializer, TagSerializer)
from django.db.models import Sum
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django_filters.rest_framework import DjangoFilterBackend
from recipes.models import (Favorite, Ingredient, IngredientRecipe, Recipe,
Shopping_Cart, Tag)
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfgen import canvas
from rest_framework import status, viewsets
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.viewsets import ReadOnlyModelViewSet
| 37.415929
| 79
| 0.705061
|
from api.filters import IngredientFilter, TagOrAuthorFilter
from api.pagination import CustomPagination
from api.serializers import (CustomRecipeSerializer, IngredientSerializer,
RecipeSerializer, TagSerializer)
from django.db.models import Sum
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django_filters.rest_framework import DjangoFilterBackend
from recipes.models import (Favorite, Ingredient, IngredientRecipe, Recipe,
Shopping_Cart, Tag)
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfgen import canvas
from rest_framework import status, viewsets
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.viewsets import ReadOnlyModelViewSet
class TagViewSet(ReadOnlyModelViewSet):
queryset = Tag.objects.all()
serializer_class = TagSerializer
filter_backends = [DjangoFilterBackend]
filter_fields = ['name', 'slug', 'id']
class IngredientViewSet(ReadOnlyModelViewSet):
queryset = Ingredient.objects.all()
serializer_class = IngredientSerializer
filter_backends = [DjangoFilterBackend]
filter_class = IngredientFilter
class RecipeViewSet(viewsets.ModelViewSet):
serializer_class = RecipeSerializer
filter_backends = [DjangoFilterBackend]
filter_class = TagOrAuthorFilter
pagination_class = CustomPagination
def get_queryset(self):
if self.request.query_params.get('is_favorited'):
return Recipe.objects.filter(favorites__user=self.request.user)
elif self.request.query_params.get('is_in_shopping_cart'):
return Recipe.objects.filter(shopping_cart__user=self.request.user)
return Recipe.objects.all()
def perform_create(self, serializer):
serializer.save(author=self.request.user)
@action(detail=False, methods=['get'])
def download_shopping_cart(self, request):
user = request.user
shopping_list = IngredientRecipe.objects.filter(
recipe__shopping_cart__user=user
).values(
'ingredient__name', 'ingredient__measurement_unit'
).annotate(amount=Sum('amount')).order_by()
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = (
'attachment; filename="shopping_list.pdf"'
)
pdfmetrics.registerFont(TTFont('Petersburg', 'PetersburgITT.ttf'))
p = canvas.Canvas(response)
p.setFont('Petersburg', 24)
p.drawString(200, 800, 'Список покупок')
p.setFont('Petersburg', 20)
number = 1
height = 750
for i in shopping_list:
p.drawString(100, height, text=(
f'{number}) {i["ingredient__name"]} - {i["amount"]}'
f'{i["ingredient__measurement_unit"]}'
))
height -= 20
number += 1
p.showPage()
p.save()
return response
class FavoriteView(APIView):
permission_classes = [IsAuthenticated]
def get(self, request, id):
recipe = get_object_or_404(Recipe, id=id)
Favorite.objects.get_or_create(user=request.user, recipe=recipe)
serializer = CustomRecipeSerializer(recipe)
return Response(serializer.data, status=status.HTTP_201_CREATED)
def delete(self, request, id):
Favorite.objects.filter(user=request.user, recipe_id=id).delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class Shopping_CartView(APIView):
permission_classes = [IsAuthenticated]
def get(self, request, id):
recipe = get_object_or_404(Recipe, id=id)
Shopping_Cart.objects.create(user=request.user, recipe=recipe)
serializer = CustomRecipeSerializer(recipe)
return Response(serializer.data, status=status.HTTP_201_CREATED)
def delete(self, request, id):
Shopping_Cart.objects.filter(
user=request.user,
recipe_id=id
).delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| 2,284
| 884
| 115
|
a7da0dc79993ceee28e11231a75e0d28a5195097
| 784
|
py
|
Python
|
back/infolica/alembic/versions/20210906_5a8069c68433.py
|
maltaesousa/infolica
|
9b510b706daba8f8a04434d281c1f8730651f25f
|
[
"MIT"
] | null | null | null |
back/infolica/alembic/versions/20210906_5a8069c68433.py
|
maltaesousa/infolica
|
9b510b706daba8f8a04434d281c1f8730651f25f
|
[
"MIT"
] | 327
|
2019-10-29T13:35:25.000Z
|
2022-03-03T10:01:46.000Z
|
back/infolica/alembic/versions/20210906_5a8069c68433.py
|
maltaesousa/infolica
|
9b510b706daba8f8a04434d281c1f8730651f25f
|
[
"MIT"
] | 5
|
2019-11-07T15:49:05.000Z
|
2021-03-08T08:59:56.000Z
|
"""fix affaire abandon default value
Revision ID: 5a8069c68433
Revises: ee79f1259c77
Create Date: 2021-09-06 16:28:58.437853
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5a8069c68433'
down_revision = 'ee79f1259c77'
branch_labels = None
depends_on = None
| 25.290323
| 65
| 0.655612
|
"""fix affaire abandon default value
Revision ID: 5a8069c68433
Revises: ee79f1259c77
Create Date: 2021-09-06 16:28:58.437853
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5a8069c68433'
down_revision = 'ee79f1259c77'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('affaire', 'abandon',
existing_type=sa.BOOLEAN(),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('affaire', 'abandon',
existing_type=sa.BOOLEAN(),
nullable=True)
# ### end Alembic commands ###
| 421
| 0
| 46
|
93f461036d6eba069464298f7bf6843f8d47e919
| 1,074
|
py
|
Python
|
fNb-end/src/backend/models/Hangar.py
|
kauereblin/ifc
|
071103c4b87a158754f1fe6751984ed0b1760fed
|
[
"MIT"
] | 4
|
2020-07-23T18:20:00.000Z
|
2020-11-17T02:38:31.000Z
|
fNb-end/src/backend/models/Hangar.py
|
kauereblin/ifc
|
071103c4b87a158754f1fe6751984ed0b1760fed
|
[
"MIT"
] | null | null | null |
fNb-end/src/backend/models/Hangar.py
|
kauereblin/ifc
|
071103c4b87a158754f1fe6751984ed0b1760fed
|
[
"MIT"
] | null | null | null |
from config import db
from models.Pilot import Pilot
from models.HelicopteroDeCombate import HelicopteroDeCombate
| 31.588235
| 79
| 0.647114
|
from config import db
from models.Pilot import Pilot
from models.HelicopteroDeCombate import HelicopteroDeCombate
class Hangar(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(254), nullable=False)
country = db.Column(db.String(254), nullable=False)
pilot_id = db.Column(db.Integer, db.ForeignKey(Pilot.id), nullable=False)
pilot = db.relationship("Pilot")
helicopter_id = db.Column(db.Integer, db.ForeignKey(HelicopteroDeCombate.id),
nullable=False)
helicopter = db.relationship("HelicopteroDeCombate")
def __str__(self):
return f'''{self.id} - {self.name}, {self.country};
Piloto: {self.pilot_id} - {self.pilot};
Helicóptero: {self.helicopter_id} - {self.helicopter}'''
def json(self):
return {
"id": self.id,
"name": self.name,
"country": self.country,
"pilot_id": self.pilot_id,
"pilot": self.pilot.json(),
"helicopter_id": self.helicopter_id,
"helicopter": self.helicopter.json()
}
| 440
| 498
| 23
|
36df2a65cfecf0f2d8cef146751f1d40789fd2ae
| 1,128
|
py
|
Python
|
Code_Python/Exercicio-03/Leia-três-numeros.py
|
gabrielf7/code-exercises
|
b3a8661fadc133395f3c6fb7e926317acf7fa539
|
[
"MIT"
] | null | null | null |
Code_Python/Exercicio-03/Leia-três-numeros.py
|
gabrielf7/code-exercises
|
b3a8661fadc133395f3c6fb7e926317acf7fa539
|
[
"MIT"
] | null | null | null |
Code_Python/Exercicio-03/Leia-três-numeros.py
|
gabrielf7/code-exercises
|
b3a8661fadc133395f3c6fb7e926317acf7fa539
|
[
"MIT"
] | null | null | null |
#questão 4
num1 = float(input("Digite o primeiro valor: \n"))
num2 = float(input("Digite o segundo valor: \n"))
num3 = float(input("Digite o terceiro valor: \n"))
if(num1 > num2 > num3 or num1 == num2 > num3 or num1 > num2 == num3):
maior = num1
segundo = num2
menor = num3
elif num1 > num2 < num3 or num1 == num2 < num3 or num1 > num2 == num3:
maior = num1
segundo = num3
menor = num2
if(num2 > num1 > num3 or num2 == num1 > num3 or num2 > num1 == num3):
maior = num2
segundo = num1
menor = num3
elif (num2 > num1 < num3 or num2 == num1 < num3 or num2 > num1 == num3):
maior = num2
segundo = num3
menor = num1
if(num3 > num1 > num2 or num3 == num1 > num2 or num3 > num1 == num2):
maior = num3
segundo = num1
menor = num2
elif (num3 > num1 < num2 or num3 == num1 < num2 or num3 > num1 == num2):
maior = num3
segundo = num2
menor = num1
if num1 == num2 == num3:
maior = num1
iguais = maior
print("Iguais: [", iguais,"]")
exit()
print("Maior: [", maior, "] | Segundo: [", segundo, "] | Menor: ", [menor])
| 31.333333
| 75
| 0.565603
|
#questão 4
num1 = float(input("Digite o primeiro valor: \n"))
num2 = float(input("Digite o segundo valor: \n"))
num3 = float(input("Digite o terceiro valor: \n"))
if(num1 > num2 > num3 or num1 == num2 > num3 or num1 > num2 == num3):
maior = num1
segundo = num2
menor = num3
elif num1 > num2 < num3 or num1 == num2 < num3 or num1 > num2 == num3:
maior = num1
segundo = num3
menor = num2
if(num2 > num1 > num3 or num2 == num1 > num3 or num2 > num1 == num3):
maior = num2
segundo = num1
menor = num3
elif (num2 > num1 < num3 or num2 == num1 < num3 or num2 > num1 == num3):
maior = num2
segundo = num3
menor = num1
if(num3 > num1 > num2 or num3 == num1 > num2 or num3 > num1 == num2):
maior = num3
segundo = num1
menor = num2
elif (num3 > num1 < num2 or num3 == num1 < num2 or num3 > num1 == num2):
maior = num3
segundo = num2
menor = num1
if num1 == num2 == num3:
maior = num1
iguais = maior
print("Iguais: [", iguais,"]")
exit()
print("Maior: [", maior, "] | Segundo: [", segundo, "] | Menor: ", [menor])
| 0
| 0
| 0
|
ec43363c255f6adb5d1411a40a6f397b07037274
| 383
|
py
|
Python
|
Economic_Dispatch/plot_results.py
|
asuncionjc/Pyomo_Playground
|
b81a12905fb6cdd041b11f89ee4bbbc20168d4d2
|
[
"Apache-2.0"
] | 1
|
2019-04-12T14:47:58.000Z
|
2019-04-12T14:47:58.000Z
|
Economic_Dispatch/plot_results.py
|
asuncionjc/Pyomo_Playground
|
b81a12905fb6cdd041b11f89ee4bbbc20168d4d2
|
[
"Apache-2.0"
] | null | null | null |
Economic_Dispatch/plot_results.py
|
asuncionjc/Pyomo_Playground
|
b81a12905fb6cdd041b11f89ee4bbbc20168d4d2
|
[
"Apache-2.0"
] | 1
|
2021-02-14T18:40:13.000Z
|
2021-02-14T18:40:13.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 10 15:53:54 2019
@author: Asun
"""
import matplotlib.pyplot as plt
import numpy as np
| 25.533333
| 99
| 0.665796
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 10 15:53:54 2019
@author: Asun
"""
import matplotlib.pyplot as plt
import numpy as np
def plot_results(model):
x = np.arange(0, 3)
y = [model.flow[generating_unit,1].value for generating_unit in model.indexes_generating_units]
plt.plot(x, y, color = 'red', marker = 'o', linestyle = "--")
plt.savefig('flow_plot.pdf')
| 226
| 0
| 23
|
de05130838373479be28ff8059892d8eb6a14633
| 1,787
|
py
|
Python
|
download_hype.py
|
woctezuma/steam-hype
|
cb885f8c1c2a4e7b8d344401207e3a7634f52317
|
[
"MIT"
] | 1
|
2019-08-15T18:52:55.000Z
|
2019-08-15T18:52:55.000Z
|
download_hype.py
|
woctezuma/steam-hype
|
cb885f8c1c2a4e7b8d344401207e3a7634f52317
|
[
"MIT"
] | 10
|
2019-08-15T19:05:10.000Z
|
2020-07-24T05:07:28.000Z
|
download_hype.py
|
woctezuma/steam-hype
|
cb885f8c1c2a4e7b8d344401207e3a7634f52317
|
[
"MIT"
] | 1
|
2019-08-20T03:32:25.000Z
|
2019-08-20T03:32:25.000Z
|
import time
import requests
from utils import save_results
if __name__ == '__main__':
main()
| 20.078652
| 75
| 0.604365
|
import time
import requests
from utils import save_results
def get_steam_hype_url():
# This is not my API. Please use with moderation!
url = 'https://steamhype-api.herokuapp.com/calendar'
return url
def get_time_stamp():
time_stamp = int(time.time() * 1000)
return time_stamp
def get_steam_hype_params(num_followers=0):
params = dict()
params['start'] = get_time_stamp()
params['current'] = 0
params['followers'] = num_followers
params['includedlc'] = 'false'
params['price'] = 100
params['discount'] = 0
params['reviews'] = 0
params['score'] = 0
return params
def request_data(params=None):
if params is None:
params = get_steam_hype_params()
resp_data = requests.get(url=get_steam_hype_url(),
params=params)
result = resp_data.json()
return result
def batch_request_data(params,
save_results_to_disk=True,
verbose=False):
results = dict()
while True:
print('Request n°{}'.format(params['current'] + 1))
result = request_data(params)
if len(result) == 0:
break
else:
for game in result:
app_id = game['id']
results[app_id] = game
params['current'] += 1
if verbose:
print(results)
if save_results_to_disk:
save_results(results=results)
return results
def main(num_followers=5000,
save_results_to_disk=True):
params = get_steam_hype_params(num_followers=num_followers)
results = batch_request_data(params=params,
save_results_to_disk=save_results_to_disk)
return True
if __name__ == '__main__':
main()
| 1,543
| 0
| 138
|
538fa6ef11f1d9c920a5d631b5035786fcade951
| 2,881
|
py
|
Python
|
examples/sine.py
|
bjodah/finitediff
|
bfb1940cf5c7ce5c9a3b440d1efd8f8c4128fed8
|
[
"BSD-2-Clause"
] | 27
|
2016-09-14T11:40:35.000Z
|
2022-03-05T18:48:26.000Z
|
examples/sine.py
|
tutoushaonian/finitediff
|
bfb1940cf5c7ce5c9a3b440d1efd8f8c4128fed8
|
[
"BSD-2-Clause"
] | 4
|
2016-04-08T03:55:14.000Z
|
2018-06-27T11:18:58.000Z
|
examples/sine.py
|
tutoushaonian/finitediff
|
bfb1940cf5c7ce5c9a3b440d1efd8f8c4128fed8
|
[
"BSD-2-Clause"
] | 5
|
2017-05-25T06:50:40.000Z
|
2021-09-13T14:16:59.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function # Python 3 behaviour in Py2
import numpy as np
from finitediff import derivatives_at_point_by_finite_diff, interpolate_by_finite_diff
def demo_usage(n_data=50, n_fit=537, nhead=5, ntail=5, plot=False, alt=0):
"""
Plots a noisy sine curve and the fitting to it.
Also presents the error and the error in the
approximation of its first derivative (cosine curve)
Usage example for benchmarking:
$ time python sine.py --nhead 3 --ntail 3 --n-fit 500000 --n-data 50000
Usage example for plotting:
$ python sine.py --nhead 1 --ntail 1 --plot
"""
x0, xend = 0, 5
# shaky linspace -5% to +5% noise
x_data = (
np.linspace(x0, xend, n_data)
+ np.random.rand(n_data) * (xend - x0) / n_data / 1.5
)
y_data = np.sin(x_data) * (1.0 + 0.1 * (np.random.rand(n_data) - 0.5))
x_fit = np.linspace(x0, xend, n_fit)
# Edges behave badly, work around:
x_fit[0] = x_fit[0] + (x_fit[1] - x_fit[0]) / 2
x_fit[-1] = x_fit[-2] + (x_fit[-1] - x_fit[-2]) / 2
if alt:
y_fit = np.empty(n_fit)
dydx_fit = np.empty(n_fit)
for i, xf in enumerate(x_fit):
# get index j of first data point beyond xf
j = np.where(x_data > xf)[0][0]
lower_bound = max(0, j - alt)
upper_bound = min(n_data - 1, j + alt)
y_fit[i] = derivatives_at_point_by_finite_diff(
x_data[lower_bound:upper_bound], y_data[lower_bound:upper_bound], xf, 0
)
dydx_fit[i] = derivatives_at_point_by_finite_diff(
x_data[lower_bound:upper_bound], y_data[lower_bound:upper_bound], xf, 1
)[1]
else:
interp = interpolate_by_finite_diff(x_data, y_data, x_fit, 1, nhead, ntail)
y_fit = interp[:, 0]
dydx_fit = interp[:, 1]
if plot:
import matplotlib.pyplot as plt
plt.subplot(221)
plt.plot(x_data, y_data, "x", label="Data points (sin)")
plt.plot(x_fit, y_fit, "-", label="Fitted curve (order=0)")
plt.plot(x_data, np.sin(x_data), "-", label="Analytic sin(x)")
plt.legend()
plt.subplot(222)
plt.plot(x_fit, y_fit - np.sin(x_fit), label="Error in order=0")
plt.legend()
plt.subplot(223)
plt.plot(x_fit, dydx_fit, "-", label="Fitted derivative (order=1)")
plt.plot(x_data, np.cos(x_data), "-", label="Analytic cos(x)")
plt.legend()
plt.subplot(224)
plt.plot(x_fit, dydx_fit - np.cos(x_fit), label="Error in order=1")
plt.legend()
plt.show()
if __name__ == "__main__":
try:
from argh import dispatch_command
except ImportError:
dispatch_command(demo_usage)
| 30.648936
| 87
| 0.596321
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function # Python 3 behaviour in Py2
import numpy as np
from finitediff import derivatives_at_point_by_finite_diff, interpolate_by_finite_diff
def demo_usage(n_data=50, n_fit=537, nhead=5, ntail=5, plot=False, alt=0):
"""
Plots a noisy sine curve and the fitting to it.
Also presents the error and the error in the
approximation of its first derivative (cosine curve)
Usage example for benchmarking:
$ time python sine.py --nhead 3 --ntail 3 --n-fit 500000 --n-data 50000
Usage example for plotting:
$ python sine.py --nhead 1 --ntail 1 --plot
"""
x0, xend = 0, 5
# shaky linspace -5% to +5% noise
x_data = (
np.linspace(x0, xend, n_data)
+ np.random.rand(n_data) * (xend - x0) / n_data / 1.5
)
y_data = np.sin(x_data) * (1.0 + 0.1 * (np.random.rand(n_data) - 0.5))
x_fit = np.linspace(x0, xend, n_fit)
# Edges behave badly, work around:
x_fit[0] = x_fit[0] + (x_fit[1] - x_fit[0]) / 2
x_fit[-1] = x_fit[-2] + (x_fit[-1] - x_fit[-2]) / 2
if alt:
y_fit = np.empty(n_fit)
dydx_fit = np.empty(n_fit)
for i, xf in enumerate(x_fit):
# get index j of first data point beyond xf
j = np.where(x_data > xf)[0][0]
lower_bound = max(0, j - alt)
upper_bound = min(n_data - 1, j + alt)
y_fit[i] = derivatives_at_point_by_finite_diff(
x_data[lower_bound:upper_bound], y_data[lower_bound:upper_bound], xf, 0
)
dydx_fit[i] = derivatives_at_point_by_finite_diff(
x_data[lower_bound:upper_bound], y_data[lower_bound:upper_bound], xf, 1
)[1]
else:
interp = interpolate_by_finite_diff(x_data, y_data, x_fit, 1, nhead, ntail)
y_fit = interp[:, 0]
dydx_fit = interp[:, 1]
if plot:
import matplotlib.pyplot as plt
plt.subplot(221)
plt.plot(x_data, y_data, "x", label="Data points (sin)")
plt.plot(x_fit, y_fit, "-", label="Fitted curve (order=0)")
plt.plot(x_data, np.sin(x_data), "-", label="Analytic sin(x)")
plt.legend()
plt.subplot(222)
plt.plot(x_fit, y_fit - np.sin(x_fit), label="Error in order=0")
plt.legend()
plt.subplot(223)
plt.plot(x_fit, dydx_fit, "-", label="Fitted derivative (order=1)")
plt.plot(x_data, np.cos(x_data), "-", label="Analytic cos(x)")
plt.legend()
plt.subplot(224)
plt.plot(x_fit, dydx_fit - np.cos(x_fit), label="Error in order=1")
plt.legend()
plt.show()
if __name__ == "__main__":
try:
from argh import dispatch_command
except ImportError:
def dispatch_command(cb):
return cb()
dispatch_command(demo_usage)
| 28
| 0
| 31
|
9271f1a5455a7ecdd71cc83dbca5ba4c204b255a
| 1,173
|
py
|
Python
|
packages/mdspan/package.py
|
pdidev/spack
|
32151f29738895e1f7d96e496c084d6349a9277b
|
[
"Apache-2.0",
"MIT"
] | 2
|
2020-04-09T11:39:41.000Z
|
2021-12-10T17:45:42.000Z
|
packages/mdspan/package.py
|
pdidev/spack
|
32151f29738895e1f7d96e496c084d6349a9277b
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-08-12T10:03:26.000Z
|
2021-08-12T10:03:26.000Z
|
packages/mdspan/package.py
|
pdidev/spack
|
32151f29738895e1f7d96e496c084d6349a9277b
|
[
"Apache-2.0",
"MIT"
] | 3
|
2020-03-27T15:41:45.000Z
|
2022-02-01T15:03:11.000Z
|
# Copyright (C) 2020 Commissariat a l'energie atomique et aux energies alternatives (CEA)
# and others. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Mdspan(CMakePackage):
"""Reference implementation of mdspan targeting C++23."""
homepage = "https://github.com/Kokkos/mdspan"
git = "https://github.com/Kokkos/mdspan.git"
url = "https://github.com/kokkos/mdspan/archive/refs/tags/mdspan-0.2.0.tar.gz"
maintainers = ['crtrott']
version('stable', branch='stable', preferred=True)
version('0.2.0', sha256='1ce8e2be0588aa6f2ba34c930b06b892182634d93034071c0157cb78fa294212', extension='tar.gz')
version('0.1.0', sha256='24c1e4be4870436c6c5e80d38870721b0b6252185b8288d00d8f3491dfba754b', extension='tar.gz')
depends_on("cmake@3.12:", type='build')
variant('cxx_standard', default='DETECT', description="Override the default CXX_STANDARD to compile with.",
values=('DETECT', '14', '17', '20'))
| 35.545455
| 115
| 0.695652
|
# Copyright (C) 2020 Commissariat a l'energie atomique et aux energies alternatives (CEA)
# and others. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Mdspan(CMakePackage):
"""Reference implementation of mdspan targeting C++23."""
homepage = "https://github.com/Kokkos/mdspan"
git = "https://github.com/Kokkos/mdspan.git"
url = "https://github.com/kokkos/mdspan/archive/refs/tags/mdspan-0.2.0.tar.gz"
maintainers = ['crtrott']
version('stable', branch='stable', preferred=True)
version('0.2.0', sha256='1ce8e2be0588aa6f2ba34c930b06b892182634d93034071c0157cb78fa294212', extension='tar.gz')
version('0.1.0', sha256='24c1e4be4870436c6c5e80d38870721b0b6252185b8288d00d8f3491dfba754b', extension='tar.gz')
depends_on("cmake@3.12:", type='build')
variant('cxx_standard', default='DETECT', description="Override the default CXX_STANDARD to compile with.",
values=('DETECT', '14', '17', '20'))
def cmake_args(self):
args = [
self.define_from_variant('MDSPAN_CXX_STANDARD', 'cxx_standard')
]
return args
| 124
| 0
| 27
|
cdb3f49fb732beb3ef7f5d4eef3c47dfc48b1951
| 307
|
py
|
Python
|
examples/docs_snippets_crag/docs_snippets_crag/concepts/solids_pipelines/linear_pipeline.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 1
|
2021-07-03T09:05:58.000Z
|
2021-07-03T09:05:58.000Z
|
examples/docs_snippets_crag/docs_snippets_crag/concepts/solids_pipelines/linear_pipeline.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 1
|
2021-06-21T18:30:02.000Z
|
2021-06-25T21:18:39.000Z
|
examples/docs_snippets_crag/docs_snippets_crag/concepts/solids_pipelines/linear_pipeline.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 1
|
2021-09-26T07:29:17.000Z
|
2021-09-26T07:29:17.000Z
|
# pylint: disable=unused-argument
# start_marker
from dagster import pipeline, solid
@solid
@solid
@pipeline
# end_marker
| 13.347826
| 43
| 0.710098
|
# pylint: disable=unused-argument
# start_marker
from dagster import pipeline, solid
@solid
def return_one(context) -> int:
return 1
@solid
def add_one(context, number: int) -> int:
return number + 1
@pipeline
def linear_pipeline():
add_one(add_one(add_one(return_one())))
# end_marker
| 110
| 0
| 66
|
18b2f34f7078f46737a2a88c1ad04524675a51e2
| 1,147
|
py
|
Python
|
snakeskin/sources/source.py
|
ewanbarr/snakeskin
|
b41a5393e9b4ab42fd6245e022dd4923be01815b
|
[
"Apache-2.0"
] | null | null | null |
snakeskin/sources/source.py
|
ewanbarr/snakeskin
|
b41a5393e9b4ab42fd6245e022dd4923be01815b
|
[
"Apache-2.0"
] | null | null | null |
snakeskin/sources/source.py
|
ewanbarr/snakeskin
|
b41a5393e9b4ab42fd6245e022dd4923be01815b
|
[
"Apache-2.0"
] | null | null | null |
import ephem as eph
import numpy as np
from snakeskin.constants import SEC_TO_SIDRAD
| 32.771429
| 82
| 0.632084
|
import ephem as eph
import numpy as np
from snakeskin.constants import SEC_TO_SIDRAD
class Source(eph.FixedBody):
def __init__(self,ra,dec,tobs=1800.0,name="none",value=1.,obs_config=None):
super(Source,self).__init__()
self.name = name
coords = eph.Equatorial(ra,dec)
self._ra = coords.ra
self._dec = coords.dec
self.tobs = tobs
self.value = value
self.obs_config = obs_config
def azalt(self,telescope):
self.compute(telescope)
return self.az,self.alt
def path(self,telescope,lmst):
lat = telescope.lat
ha = lmst-self.ra
cosha = np.cos(ha)
coslat = np.cos(lat)
sinlat = np.sin(lat)
alt = np.arcsin(sinlat*np.sin(self.dec)+coslat*np.cos(self.dec)*cosha)
az = np.arctan2(np.sin(ha),(cosha*sinlat - np.tan(self.dec)*coslat))+np.pi
return az,alt
def trail(self,telescope,duration=600.0):
start_lmst = telescope.sidereal_time()
end_lmst = start_lmst+SEC_TO_SIDRAD*duration
lmst = np.linspace(start_lmst,end_lmst,100)%(np.pi*2)
return path(telescope,lmst)
| 925
| 7
| 130
|
ac9f99f6f60b9becd44d5f1c6fefe4639be389b0
| 474
|
py
|
Python
|
xastropy/relativity/__init__.py
|
bpholden/xastropy
|
66aff0995a84c6829da65996d2379ba4c946dabe
|
[
"BSD-3-Clause"
] | 3
|
2015-08-23T00:32:58.000Z
|
2020-12-31T02:37:52.000Z
|
xastropy/relativity/__init__.py
|
Kristall-WangShiwei/xastropy
|
723fe56cb48d5a5c4cdded839082ee12ef8c6732
|
[
"BSD-3-Clause"
] | 104
|
2015-07-17T18:31:54.000Z
|
2018-06-29T17:04:09.000Z
|
xastropy/relativity/__init__.py
|
Kristall-WangShiwei/xastropy
|
723fe56cb48d5a5c4cdded839082ee12ef8c6732
|
[
"BSD-3-Clause"
] | 16
|
2015-07-17T15:50:37.000Z
|
2019-04-21T03:42:47.000Z
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
""" astropy.cosmology contains classes and functions for cosmological
distance measures and other cosmology-related calculations.
See the `Astropy documentation
<http://docs.astropy.org/en/latest/cosmology/index.html>`_ for more
detailed usage examples and references.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .velocities import *
| 36.461538
| 69
| 0.767932
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
""" astropy.cosmology contains classes and functions for cosmological
distance measures and other cosmology-related calculations.
See the `Astropy documentation
<http://docs.astropy.org/en/latest/cosmology/index.html>`_ for more
detailed usage examples and references.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .velocities import *
| 0
| 0
| 0
|
4bfa262067e0d0cd970b7cd29211db1db46e96fe
| 651
|
py
|
Python
|
app/migrations/0003_auto_20181022_1601.py
|
Evohmike/Nyumba-Kumi-App
|
80ba9ded12bda6e41c9395a4e439e80f8840d295
|
[
"MIT"
] | null | null | null |
app/migrations/0003_auto_20181022_1601.py
|
Evohmike/Nyumba-Kumi-App
|
80ba9ded12bda6e41c9395a4e439e80f8840d295
|
[
"MIT"
] | null | null | null |
app/migrations/0003_auto_20181022_1601.py
|
Evohmike/Nyumba-Kumi-App
|
80ba9ded12bda6e41c9395a4e439e80f8840d295
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-10-22 13:01
from __future__ import unicode_literals
from django.db import migrations, models
| 25.038462
| 71
| 0.605223
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-10-22 13:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0002_neighbourhood_hood_photo'),
]
operations = [
migrations.AddField(
model_name='neighbourhood',
name='health',
field=models.CharField(default='071000000', max_length=15),
),
migrations.AddField(
model_name='neighbourhood',
name='police',
field=models.CharField(default='9999', max_length=15),
),
]
| 0
| 474
| 23
|
30d1cfa49c2d708d5f169d7bff5b66ab9dc3fbca
| 2,138
|
py
|
Python
|
test_laylib/test_default_engine.py
|
Layto888/laylib-1.0.1
|
c7317c29659a476adf6e90eb729b09ce4c49e219
|
[
"MIT"
] | 1
|
2018-08-04T14:44:42.000Z
|
2018-08-04T14:44:42.000Z
|
test_laylib/test_default_engine.py
|
Layto888/laylib-1.0
|
c7317c29659a476adf6e90eb729b09ce4c49e219
|
[
"MIT"
] | null | null | null |
test_laylib/test_default_engine.py
|
Layto888/laylib-1.0
|
c7317c29659a476adf6e90eb729b09ce4c49e219
|
[
"MIT"
] | null | null | null |
# test module default_engine.py
import pytest
import logging
import os
import inspect
import sys
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
from laylib import default_engine
logging.basicConfig(level=logging.INFO,
format='%(levelname)s: %(message)s')
"""
@pytest.fixture
def surface_env(scope="function"):
pg.init()
if not pg.display.get_init():
logging.info('unable to init display pygame')
set_env = pg.display.set_mode((200, 200))
yield set_env
# pg.quit()
"""
@pytest.fixture
@pytest.mark.skip(reason="unskip this test if you're not using travis CI.")
@pytest.mark.skip(reason="We can't exit the main_loop this way")
@pytest.mark.skip(reason="will not be tested. User interaction")
@pytest.mark.skip(reason="will be tested with resources module.")
| 26.395062
| 87
| 0.755379
|
# test module default_engine.py
import pytest
import logging
import os
import inspect
import sys
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
from laylib import default_engine
logging.basicConfig(level=logging.INFO,
format='%(levelname)s: %(message)s')
"""
@pytest.fixture
def surface_env(scope="function"):
pg.init()
if not pg.display.get_init():
logging.info('unable to init display pygame')
set_env = pg.display.set_mode((200, 200))
yield set_env
# pg.quit()
"""
class _ObjClass(default_engine.DefaultEngine):
pass
@pytest.fixture
def class_default_engine():
new_class = _ObjClass()
return new_class
@pytest.mark.skip(reason="unskip this test if you're not using travis CI.")
def test_surface_env(surface_env):
# the screen should not be none.
assert surface_env is not None
assert surface_env.get_size() == (200, 200)
def test_default_engine_attr(class_default_engine):
assert isinstance(class_default_engine, default_engine.DefaultEngine)
assert class_default_engine.running is True
assert class_default_engine.playing is False
assert class_default_engine._time_unit == 1000.0
def test_time_setget(class_default_engine):
class_default_engine.time_unit = 20.0
assert class_default_engine.time_unit == 20.0
class_default_engine.time_unit = -50.0
assert class_default_engine.time_unit == 1000.0
@pytest.mark.skip(reason="We can't exit the main_loop this way")
def test_delta_time_main_loop(class_default_engine):
pass
@pytest.mark.skip(reason="will not be tested. User interaction")
def test_event_listener():
pass
@pytest.mark.skip(reason="will be tested with resources module.")
def test_load_game():
pass
def test_destroy_game(class_default_engine):
class_default_engine._destroy_game()
assert class_default_engine.all_sprites is not None
assert class_default_engine.img is None
assert class_default_engine.snd is None
assert class_default_engine.fnt is None
| 966
| 34
| 202
|
e0296db2c64142c0262d853517a11e247c329f34
| 3,886
|
py
|
Python
|
lingvo/core/base_decoder.py
|
pizzahan/lingvo
|
9b85b7ba5d037701302efa807841c05223bc7d1d
|
[
"Apache-2.0"
] | 4
|
2019-06-08T00:19:06.000Z
|
2020-08-03T16:28:53.000Z
|
lingvo/core/base_decoder.py
|
pizzahan/lingvo
|
9b85b7ba5d037701302efa807841c05223bc7d1d
|
[
"Apache-2.0"
] | null | null | null |
lingvo/core/base_decoder.py
|
pizzahan/lingvo
|
9b85b7ba5d037701302efa807841c05223bc7d1d
|
[
"Apache-2.0"
] | 5
|
2018-12-11T08:05:16.000Z
|
2020-05-30T03:40:13.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common decoder interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from lingvo.core import base_layer
from lingvo.core import beam_search_helper
from lingvo.core import target_sequence_sampler
class BaseDecoder(base_layer.BaseLayer):
"""Base class for all decoders."""
@classmethod
def FProp(self, theta, encoder_outputs, targets):
"""Decodes `targets` given encoded source.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
encoder_outputs: a NestedMap computed by encoder.
targets: A dict of string to tensors representing the targets one try to
predict.
Returns:
A map from metric name (a python string) to a tuple (value, weight).
Both value and weight are scalar Tensors.
"""
predictions = self.ComputePredictions(theta, encoder_outputs, targets)
return self.ComputeLoss(theta, predictions, targets)[0]
class BaseBeamSearchDecoder(BaseDecoder):
"""Decoder that does beam search."""
@classmethod
@base_layer.initializer
def BeamSearchDecode(self, encoder_outputs):
# pylint: disable=line-too-long
"""Performs beam search based decoding.
Args:
encoder_outputs: the outputs of the encoder.
returns:
`.BeamSearchDecodeOutput`, A namedtuple whose elements are tensors.
"""
# pylint: enable=line-too-long
raise NotImplementedError('Abstract method')
| 37.728155
| 80
| 0.717962
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common decoder interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from lingvo.core import base_layer
from lingvo.core import beam_search_helper
from lingvo.core import target_sequence_sampler
class BaseDecoder(base_layer.BaseLayer):
"""Base class for all decoders."""
@classmethod
def Params(cls):
p = super(BaseDecoder, cls).Params()
p.Define(
'packed_input', False, 'If True, decoder and all layers support '
'multiple examples in a single sequence.')
return p
def FProp(self, theta, encoder_outputs, targets):
"""Decodes `targets` given encoded source.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
encoder_outputs: a NestedMap computed by encoder.
targets: A dict of string to tensors representing the targets one try to
predict.
Returns:
A map from metric name (a python string) to a tuple (value, weight).
Both value and weight are scalar Tensors.
"""
predictions = self.ComputePredictions(theta, encoder_outputs, targets)
return self.ComputeLoss(theta, predictions, targets)[0]
def ComputePredictions(self, theta, encoder_outputs, targets):
raise NotImplementedError('Abstract method: %s' % type(self))
def ComputeLoss(self, theta, predictions, targets):
raise NotImplementedError('Abstract method: %s' % type(self))
class BaseBeamSearchDecoder(BaseDecoder):
"""Decoder that does beam search."""
@classmethod
def Params(cls):
p = super(BaseBeamSearchDecoder, cls).Params()
p.Define('target_sos_id', 1, 'Id of the target sequence sos symbol.')
p.Define('target_eos_id', 2, 'Id of the target sequence eos symbol.')
# TODO(rpang): remove target_seq_len and use beam_search.target_seq_len
# instead.
p.Define('target_seq_len', 0, 'Target seq length.')
p.Define('beam_search', beam_search_helper.BeamSearchHelper.Params(),
'BeamSearchHelper params.')
p.Define('target_sequence_sampler',
target_sequence_sampler.TargetSequenceSampler.Params(),
'TargetSequenceSampler params.')
return p
@base_layer.initializer
def __init__(self, params):
super(BaseBeamSearchDecoder, self).__init__(params)
p = self.params
p.beam_search.target_seq_len = p.target_seq_len
p.beam_search.target_sos_id = p.target_sos_id
p.beam_search.target_eos_id = p.target_eos_id
self.CreateChild('beam_search', p.beam_search)
p.target_sequence_sampler.target_seq_len = p.target_seq_len
p.target_sequence_sampler.target_sos_id = p.target_sos_id
p.target_sequence_sampler.target_eos_id = p.target_eos_id
self.CreateChild('target_sequence_sampler', p.target_sequence_sampler)
def BeamSearchDecode(self, encoder_outputs):
# pylint: disable=line-too-long
"""Performs beam search based decoding.
Args:
encoder_outputs: the outputs of the encoder.
returns:
`.BeamSearchDecodeOutput`, A namedtuple whose elements are tensors.
"""
# pylint: enable=line-too-long
raise NotImplementedError('Abstract method')
| 1,563
| 0
| 122
|
85d102b6cba4ef055e73d753952668f328b5a301
| 1,225
|
py
|
Python
|
tests/runtime/asset/test_persistent.py
|
formlio/forml
|
fd070da74a0107e37c0c643dd8df8680618fef74
|
[
"Apache-2.0"
] | 78
|
2020-11-04T18:27:20.000Z
|
2022-02-07T03:32:53.000Z
|
tests/runtime/asset/test_persistent.py
|
formlio/forml
|
fd070da74a0107e37c0c643dd8df8680618fef74
|
[
"Apache-2.0"
] | 3
|
2020-11-05T20:42:15.000Z
|
2021-01-13T19:57:01.000Z
|
tests/runtime/asset/test_persistent.py
|
formlio/forml
|
fd070da74a0107e37c0c643dd8df8680618fef74
|
[
"Apache-2.0"
] | 7
|
2020-11-18T17:18:15.000Z
|
2021-03-24T05:14:29.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
ForML persistent unit tests.
"""
# pylint: disable=no-self-use
from forml.runtime import asset
class TestRegistry:
"""Registry unit tests."""
def test_get(self, registry: asset.Registry, project_name: asset.Project.Key, populated_lineage: asset.Lineage.Key):
"""Test lineage get."""
lineage = asset.Directory(registry).get(project_name).get(populated_lineage)
assert lineage.key == populated_lineage
| 38.28125
| 120
| 0.75102
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
ForML persistent unit tests.
"""
# pylint: disable=no-self-use
from forml.runtime import asset
class TestRegistry:
"""Registry unit tests."""
def test_get(self, registry: asset.Registry, project_name: asset.Project.Key, populated_lineage: asset.Lineage.Key):
"""Test lineage get."""
lineage = asset.Directory(registry).get(project_name).get(populated_lineage)
assert lineage.key == populated_lineage
| 0
| 0
| 0
|
6929622484867a36adedfe910766d009df4df761
| 491
|
py
|
Python
|
teste_requests.py
|
stevillis/gu-escola
|
2b26ec53e63fb70447c7a0eb13ab9c6e473122e0
|
[
"MIT"
] | null | null | null |
teste_requests.py
|
stevillis/gu-escola
|
2b26ec53e63fb70447c7a0eb13ab9c6e473122e0
|
[
"MIT"
] | null | null | null |
teste_requests.py
|
stevillis/gu-escola
|
2b26ec53e63fb70447c7a0eb13ab9c6e473122e0
|
[
"MIT"
] | null | null | null |
import requests
BASE_URL = 'http://localhost:8000/api/v2/'
# GET Avaliacoes
"""
response = requests.get(f'{BASE_URL}avaliacoes')
print(response)
print(response.status_code)
avaliacoes = response.json()
print(avaliacoes)
print(avaliacoes.get('count'))
print(avaliacoes.get('results'))
"""
# GET Cursos
headers = {
'Authorization': 'Token 6e6ab3885e67fcc06fabc926a277b07c3bd86be8'
}
response = requests.get(f'{BASE_URL}cursos', headers=headers)
print(response.json().get('results'))
| 19.64
| 69
| 0.745418
|
import requests
BASE_URL = 'http://localhost:8000/api/v2/'
# GET Avaliacoes
"""
response = requests.get(f'{BASE_URL}avaliacoes')
print(response)
print(response.status_code)
avaliacoes = response.json()
print(avaliacoes)
print(avaliacoes.get('count'))
print(avaliacoes.get('results'))
"""
# GET Cursos
headers = {
'Authorization': 'Token 6e6ab3885e67fcc06fabc926a277b07c3bd86be8'
}
response = requests.get(f'{BASE_URL}cursos', headers=headers)
print(response.json().get('results'))
| 0
| 0
| 0
|
d5b5b6fef388dc9909b4b8f5f7507dcc08300c41
| 4,852
|
py
|
Python
|
submissions/aartiste/myKMeans.py
|
dillonpoff/aima-python
|
2eadb43f6ede9c7a2e211ea38dff3fa5fd5c91df
|
[
"MIT"
] | 1
|
2018-08-24T14:04:18.000Z
|
2018-08-24T14:04:18.000Z
|
submissions/aartiste/myKMeans.py
|
dillonpoff/aima-python
|
2eadb43f6ede9c7a2e211ea38dff3fa5fd5c91df
|
[
"MIT"
] | null | null | null |
submissions/aartiste/myKMeans.py
|
dillonpoff/aima-python
|
2eadb43f6ede9c7a2e211ea38dff3fa5fd5c91df
|
[
"MIT"
] | null | null | null |
from sklearn.cluster import KMeans
import traceback
from submissions.aartiste import election
from submissions.aartiste import county_demographics
trumpECHP = DataFrame()
'''
Extract data from the CORGIS elections, and merge it with the
CORGIS demographics. Both data sets are organized by county and state.
'''
joint = {}
elections = election.get_results()
for county in elections:
try:
st = county['Location']['State Abbreviation']
countyST = county['Location']['County'] + st
trump = county['Vote Data']['Donald Trump']['Percent of Votes']
joint[countyST] = {}
joint[countyST]['ST']= st
joint[countyST]['Trump'] = trump
except:
traceback.print_exc()
demographics = county_demographics.get_all_counties()
for county in demographics:
try:
countyNames = county['County'].split()
cName = ' '.join(countyNames[:-1])
st = county['State']
countyST = cName + st
# elderly =
# college =
# home =
# poverty =
if countyST in joint:
joint[countyST]['Elderly'] = county['Age']["Percent 65 and Older"]
joint[countyST]['HighSchool'] = county['Education']["High School or Higher"]
joint[countyST]['College'] = county['Education']["Bachelor's Degree or Higher"]
joint[countyST]['White'] = county['Ethnicities']["White Alone, not Hispanic or Latino"]
joint[countyST]['Persons'] = county['Housing']["Persons per Household"]
joint[countyST]['Home'] = county['Housing']["Homeownership Rate"]
joint[countyST]['Income'] = county['Income']["Median Houseold Income"]
joint[countyST]['Poverty'] = county['Income']["Persons Below Poverty Level"]
joint[countyST]['Sales'] = county['Sales']["Retail Sales per Capita"]
except:
traceback.print_exc()
'''
Remove the counties that did not appear in both samples.
'''
intersection = {}
for countyST in joint:
if 'College' in joint[countyST]:
intersection[countyST] = joint[countyST]
trumpECHP.data = []
'''
Build the input frame, row by row.
'''
for countyST in intersection:
# choose the input values
row = []
for key in intersection[countyST]:
if key in ['ST', 'Trump']:
continue
row.append(intersection[countyST][key])
trumpECHP.data.append(row)
firstCounty = next(iter(intersection.keys()))
firstRow = intersection[firstCounty]
trumpECHP.feature_names = list(firstRow.keys())
trumpECHP.feature_names.remove('ST')
trumpECHP.feature_names.remove('Trump')
'''
Build the target list,
one entry for each row in the input frame.
The Naive Bayesian network is a classifier,
i.e. it sorts data points into bins.
The best it can do to estimate a continuous variable
is to break the domain into segments, and predict
the segment into which the variable's value will fall.
In this example, I'm breaking Trump's % into two
arbitrary segments.
'''
trumpECHP.target = []
for countyST in intersection:
# choose the target
tt = trumpTarget(intersection[countyST]['Trump'])
trumpECHP.target.append(tt)
trumpECHP.target_names = [
'Trump <= 45%',
'Trump > 45%',
]
'''
Try scaling the data.
'''
trumpScaled = DataFrame()
setupScales(trumpECHP.data)
trumpScaled.data = scaleGrid(trumpECHP.data)
trumpScaled.feature_names = trumpECHP.feature_names
trumpScaled.target = trumpECHP.target
trumpScaled.target_names = trumpECHP.target_names
'''
Make a customn classifier,
'''
km = KMeans(
n_clusters=2,
# max_iter=300,
# n_init=10,
# init='k-means++',
# algorithm='auto',
# precompute_distances='auto',
# tol=1e-4,
# n_jobs=-1,
# random_state=numpy.RandomState,
# verbose=0,
# copy_x=True,
)
Examples = {
'Trump': {
'frame': trumpScaled,
},
'TrumpCustom': {
'frame': trumpScaled,
'kmeans': km
},
}
| 27.568182
| 99
| 0.620569
|
from sklearn.cluster import KMeans
import traceback
from submissions.aartiste import election
from submissions.aartiste import county_demographics
class DataFrame:
data = []
feature_names = []
target = []
target_names = []
trumpECHP = DataFrame()
'''
Extract data from the CORGIS elections, and merge it with the
CORGIS demographics. Both data sets are organized by county and state.
'''
joint = {}
elections = election.get_results()
for county in elections:
try:
st = county['Location']['State Abbreviation']
countyST = county['Location']['County'] + st
trump = county['Vote Data']['Donald Trump']['Percent of Votes']
joint[countyST] = {}
joint[countyST]['ST']= st
joint[countyST]['Trump'] = trump
except:
traceback.print_exc()
demographics = county_demographics.get_all_counties()
for county in demographics:
try:
countyNames = county['County'].split()
cName = ' '.join(countyNames[:-1])
st = county['State']
countyST = cName + st
# elderly =
# college =
# home =
# poverty =
if countyST in joint:
joint[countyST]['Elderly'] = county['Age']["Percent 65 and Older"]
joint[countyST]['HighSchool'] = county['Education']["High School or Higher"]
joint[countyST]['College'] = county['Education']["Bachelor's Degree or Higher"]
joint[countyST]['White'] = county['Ethnicities']["White Alone, not Hispanic or Latino"]
joint[countyST]['Persons'] = county['Housing']["Persons per Household"]
joint[countyST]['Home'] = county['Housing']["Homeownership Rate"]
joint[countyST]['Income'] = county['Income']["Median Houseold Income"]
joint[countyST]['Poverty'] = county['Income']["Persons Below Poverty Level"]
joint[countyST]['Sales'] = county['Sales']["Retail Sales per Capita"]
except:
traceback.print_exc()
'''
Remove the counties that did not appear in both samples.
'''
intersection = {}
for countyST in joint:
if 'College' in joint[countyST]:
intersection[countyST] = joint[countyST]
trumpECHP.data = []
'''
Build the input frame, row by row.
'''
for countyST in intersection:
# choose the input values
row = []
for key in intersection[countyST]:
if key in ['ST', 'Trump']:
continue
row.append(intersection[countyST][key])
trumpECHP.data.append(row)
firstCounty = next(iter(intersection.keys()))
firstRow = intersection[firstCounty]
trumpECHP.feature_names = list(firstRow.keys())
trumpECHP.feature_names.remove('ST')
trumpECHP.feature_names.remove('Trump')
'''
Build the target list,
one entry for each row in the input frame.
The Naive Bayesian network is a classifier,
i.e. it sorts data points into bins.
The best it can do to estimate a continuous variable
is to break the domain into segments, and predict
the segment into which the variable's value will fall.
In this example, I'm breaking Trump's % into two
arbitrary segments.
'''
trumpECHP.target = []
def trumpTarget(percentage):
if percentage > 45:
return 1
return 0
for countyST in intersection:
# choose the target
tt = trumpTarget(intersection[countyST]['Trump'])
trumpECHP.target.append(tt)
trumpECHP.target_names = [
'Trump <= 45%',
'Trump > 45%',
]
'''
Try scaling the data.
'''
trumpScaled = DataFrame()
def setupScales(grid):
global min, max
min = list(grid[0])
max = list(grid[0])
for row in range(1, len(grid)):
for col in range(len(grid[row])):
cell = grid[row][col]
if cell < min[col]:
min[col] = cell
if cell > max[col]:
max[col] = cell
def scaleGrid(grid):
newGrid = []
for row in range(len(grid)):
newRow = []
for col in range(len(grid[row])):
try:
cell = grid[row][col]
scaled = (cell - min[col]) \
/ (max[col] - min[col])
newRow.append(scaled)
except:
pass
newGrid.append(newRow)
return newGrid
setupScales(trumpECHP.data)
trumpScaled.data = scaleGrid(trumpECHP.data)
trumpScaled.feature_names = trumpECHP.feature_names
trumpScaled.target = trumpECHP.target
trumpScaled.target_names = trumpECHP.target_names
'''
Make a customn classifier,
'''
km = KMeans(
n_clusters=2,
# max_iter=300,
# n_init=10,
# init='k-means++',
# algorithm='auto',
# precompute_distances='auto',
# tol=1e-4,
# n_jobs=-1,
# random_state=numpy.RandomState,
# verbose=0,
# copy_x=True,
)
Examples = {
'Trump': {
'frame': trumpScaled,
},
'TrumpCustom': {
'frame': trumpScaled,
'kmeans': km
},
}
| 759
| 70
| 92
|
f45d5ecb43560f81497d317a23712bf1eaf8d15f
| 603
|
py
|
Python
|
initialize_points_w.py
|
NCBI-Hackathons/McDiff
|
43037967e65e8dbdda18c891175c93537b98a238
|
[
"MIT"
] | 3
|
2018-06-21T15:16:25.000Z
|
2018-06-21T22:42:17.000Z
|
initialize_points_w.py
|
NCBI-Hackathons/McDiff
|
43037967e65e8dbdda18c891175c93537b98a238
|
[
"MIT"
] | null | null | null |
initialize_points_w.py
|
NCBI-Hackathons/McDiff
|
43037967e65e8dbdda18c891175c93537b98a238
|
[
"MIT"
] | 1
|
2018-06-25T16:17:04.000Z
|
2018-06-25T16:17:04.000Z
|
from shapely import geometry
# import random
# import numpy as np
# numParticles = 120
# point_list = [[0,0],[0,1],[1,1],[1,0]]
# poly = geometry.Polygon(point_list)
# print generate_random_points(numParticles, poly)
| 25.125
| 71
| 0.706468
|
from shapely import geometry
# import random
# import numpy as np
# numParticles = 120
# point_list = [[0,0],[0,1],[1,1],[1,0]]
# poly = geometry.Polygon(point_list)
def generate_random_points(N, poly):
list_of_points = np.zeros((2, N))
minx,miny,maxx,maxy = poly.bounds
counter = 0
while counter < N:
punto = (np.random.uniform(minx, maxx), np.random.uniform(miny,maxy))
p = geometry.Point(punto)
if poly.contains(p):
list_of_points[0,counter] = punto[0]
list_of_points[1,counter] = punto[1]
counter += 1
return list_of_points
# print generate_random_points(numParticles, poly)
| 360
| 0
| 23
|
c67cc3624a702cafd7e7246abe8b88132e111d61
| 53
|
py
|
Python
|
modules/__init__.py
|
richardHaw/nagare
|
4909c4ba8833e7cf5152e39a7bc58a558aaa2c7c
|
[
"MIT"
] | null | null | null |
modules/__init__.py
|
richardHaw/nagare
|
4909c4ba8833e7cf5152e39a7bc58a558aaa2c7c
|
[
"MIT"
] | null | null | null |
modules/__init__.py
|
richardHaw/nagare
|
4909c4ba8833e7cf5152e39a7bc58a558aaa2c7c
|
[
"MIT"
] | null | null | null |
# this file is needed for python2, delete for python3
| 53
| 53
| 0.792453
|
# this file is needed for python2, delete for python3
| 0
| 0
| 0
|
54cd06ce2ea0585ac5ee273e70cb010a30aa3f06
| 9,713
|
py
|
Python
|
python/SAGEMAKER_NOTEBOOK_NO_DIRECT_INTERNET_ACCESS/SAGEMAKER_NOTEBOOK_NO_DIRECT_INTERNET_ACCESS_test.py
|
docebo/aws-config-rules
|
75f92bcad644bd71f19bbc15cf99e6d6de6b8227
|
[
"CC0-1.0"
] | 1,295
|
2016-03-01T23:06:33.000Z
|
2022-03-31T07:17:53.000Z
|
python/SAGEMAKER_NOTEBOOK_NO_DIRECT_INTERNET_ACCESS/SAGEMAKER_NOTEBOOK_NO_DIRECT_INTERNET_ACCESS_test.py
|
tied/aws-config-rules
|
7c66e109c1225111d2ab8d1811d6e80dea0affcb
|
[
"CC0-1.0"
] | 287
|
2016-03-01T19:51:43.000Z
|
2022-01-06T04:59:55.000Z
|
python/SAGEMAKER_NOTEBOOK_NO_DIRECT_INTERNET_ACCESS/SAGEMAKER_NOTEBOOK_NO_DIRECT_INTERNET_ACCESS_test.py
|
tied/aws-config-rules
|
7c66e109c1225111d2ab8d1811d6e80dea0affcb
|
[
"CC0-1.0"
] | 744
|
2016-03-01T18:33:00.000Z
|
2022-03-31T18:46:44.000Z
|
# Copyright 2017-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
import sys
import unittest
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
##############
# Parameters #
##############
# Define the default resource to report to Config Rules
DEFAULT_RESOURCE_TYPE = 'AWS::::Account'
#############
# Main Code #
#############
CONFIG_CLIENT_MOCK = MagicMock()
STS_CLIENT_MOCK = MagicMock()
SAGEMAKER_CLIENT_MOCK = MagicMock()
sys.modules['boto3'] = Boto3Mock()
RULE = __import__('SAGEMAKER_NOTEBOOK_NO_DIRECT_INTERNET_ACCESS')
####################
# Helper Functions #
####################
| 50.853403
| 182
| 0.724699
|
# Copyright 2017-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
import sys
import unittest
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
##############
# Parameters #
##############
# Define the default resource to report to Config Rules
DEFAULT_RESOURCE_TYPE = 'AWS::::Account'
#############
# Main Code #
#############
CONFIG_CLIENT_MOCK = MagicMock()
STS_CLIENT_MOCK = MagicMock()
SAGEMAKER_CLIENT_MOCK = MagicMock()
class Boto3Mock():
@staticmethod
def client(client_name, *args, **kwargs):
if client_name == 'config':
return CONFIG_CLIENT_MOCK
if client_name == 'sts':
return STS_CLIENT_MOCK
if client_name == 'sagemaker':
return SAGEMAKER_CLIENT_MOCK
raise Exception("Attempting to create an unknown client")
sys.modules['boto3'] = Boto3Mock()
RULE = __import__('SAGEMAKER_NOTEBOOK_NO_DIRECT_INTERNET_ACCESS')
class ComplianceTest(unittest.TestCase):
notebook_instances_list = [{'NotebookInstances': [{'NotebookInstanceName': 'trial12'}, {'NotebookInstanceName': 'trial123'}]}]
notebooks_direct_internet = [{'NotebookInstanceName': 'trial12', 'DirectInternetAccess': 'Enabled'}, {'NotebookInstanceName': 'trial123', 'DirectInternetAccess': 'Enabled'}]
notebooks_no_direct_internet = [{'NotebookInstanceName': 'trial12', 'DirectInternetAccess': 'Disabled'}, {'NotebookInstanceName': 'trial123', 'DirectInternetAccess': 'Disabled'}]
notebooks_both = [{'NotebookInstanceName': 'trial12', 'DirectInternetAccess': 'Disabled'}, {'NotebookInstanceName': 'trial123', 'DirectInternetAccess': 'Enabled'}]
#SCENARIO 1: No Amazon SageMaker notebook instances exist
def test_scenario_1_no_notebooks(self):
notebook_instances_list = [{'NotebookInstances': []}]
RULE.ASSUME_ROLE_MODE = False
SAGEMAKER_CLIENT_MOCK.configure_mock(**{
"get_paginator.return_value": SAGEMAKER_CLIENT_MOCK,
"paginate.return_value": notebook_instances_list})
response = RULE.lambda_handler(build_lambda_scheduled_event(), {})
resp_expected = [build_expected_response('NOT_APPLICABLE', '123456789012', 'AWS::::Account')]
assert_successful_evaluation(self, response, resp_expected)
#SCENARIO 2: DirectInternetAccess is set to Enabled for the Amazon SageMaker notebook instances
def test_scenario_2_direct_internet_access(self):
RULE.ASSUME_ROLE_MODE = False
annotation = "This Amazon SageMaker Notebook Instance has direct internet access."
SAGEMAKER_CLIENT_MOCK.configure_mock(**{
"get_paginator.return_value": SAGEMAKER_CLIENT_MOCK,
"paginate.return_value": self.notebook_instances_list})
SAGEMAKER_CLIENT_MOCK.describe_notebook_instance = MagicMock(side_effect=self.notebooks_direct_internet)
response = RULE.lambda_handler(build_lambda_scheduled_event(), {})
resp_expected = [build_expected_response('NON_COMPLIANT', compliance_resource_id='trial12', annotation=annotation),
build_expected_response('NON_COMPLIANT', compliance_resource_id='trial123', annotation=annotation)]
assert_successful_evaluation(self, response, resp_expected, evaluations_count=2)
#SCENARIO 3: DirectInternetAccess is set to Disabled for the Amazon SageMaker notebook instances
def test_scenario_3_no_direct_internet_access(self):
RULE.ASSUME_ROLE_MODE = False
SAGEMAKER_CLIENT_MOCK.configure_mock(**{
"get_paginator.return_value": SAGEMAKER_CLIENT_MOCK,
"paginate.return_value": self.notebook_instances_list})
SAGEMAKER_CLIENT_MOCK.describe_notebook_instance = MagicMock(side_effect=self.notebooks_no_direct_internet)
response = RULE.lambda_handler(build_lambda_scheduled_event(), {})
resp_expected = [build_expected_response('COMPLIANT', compliance_resource_id='trial12'),
build_expected_response('COMPLIANT', compliance_resource_id='trial123')]
assert_successful_evaluation(self, response, resp_expected, evaluations_count=2)
#Test for a mix of compliance types
def test_scenario_2_and_3(self):
RULE.ASSUME_ROLE_MODE = False
annotation = "This Amazon SageMaker Notebook Instance has direct internet access."
SAGEMAKER_CLIENT_MOCK.configure_mock(**{
"get_paginator.return_value": SAGEMAKER_CLIENT_MOCK,
"paginate.return_value": self.notebook_instances_list})
SAGEMAKER_CLIENT_MOCK.describe_notebook_instance = MagicMock(side_effect=self.notebooks_both)
response = RULE.lambda_handler(build_lambda_scheduled_event(), {})
resp_expected = [build_expected_response('COMPLIANT', compliance_resource_id='trial12'),
build_expected_response('NON_COMPLIANT', compliance_resource_id='trial123', annotation=annotation)]
assert_successful_evaluation(self, response, resp_expected, evaluations_count=2)
####################
# Helper Functions #
####################
def build_lambda_configurationchange_event(invoking_event, rule_parameters=None):
event_to_return = {
'configRuleName':'myrule',
'executionRoleArn':'roleArn',
'eventLeftScope': False,
'invokingEvent': invoking_event,
'accountId': '123456789012',
'configRuleArn': 'arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan',
'resultToken':'token'
}
if rule_parameters:
event_to_return['ruleParameters'] = rule_parameters
return event_to_return
def build_lambda_scheduled_event(rule_parameters=None):
invoking_event = '{"messageType":"ScheduledNotification","notificationCreationTime":"2017-12-23T22:11:18.158Z"}'
event_to_return = {
'configRuleName':'myrule',
'executionRoleArn':'roleArn',
'eventLeftScope': False,
'invokingEvent': invoking_event,
'accountId': '123456789012',
'configRuleArn': 'arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan',
'resultToken':'token'
}
if rule_parameters:
event_to_return['ruleParameters'] = rule_parameters
return event_to_return
def build_expected_response(compliance_type, compliance_resource_id, compliance_resource_type=DEFAULT_RESOURCE_TYPE, annotation=None):
if not annotation:
return {
'ComplianceType': compliance_type,
'ComplianceResourceId': compliance_resource_id,
'ComplianceResourceType': compliance_resource_type
}
return {
'ComplianceType': compliance_type,
'ComplianceResourceId': compliance_resource_id,
'ComplianceResourceType': compliance_resource_type,
'Annotation': annotation
}
def assert_successful_evaluation(test_class, response, resp_expected, evaluations_count=1):
if isinstance(response, dict):
test_class.assertEquals(resp_expected['ComplianceResourceType'], response['ComplianceResourceType'])
test_class.assertEquals(resp_expected['ComplianceResourceId'], response['ComplianceResourceId'])
test_class.assertEquals(resp_expected['ComplianceType'], response['ComplianceType'])
test_class.assertTrue(response['OrderingTimestamp'])
if 'Annotation' in resp_expected or 'Annotation' in response:
test_class.assertEquals(resp_expected['Annotation'], response['Annotation'])
elif isinstance(response, list):
test_class.assertEquals(evaluations_count, len(response))
for i, response_expected in enumerate(resp_expected):
test_class.assertEquals(response_expected['ComplianceResourceType'], response[i]['ComplianceResourceType'])
test_class.assertEquals(response_expected['ComplianceResourceId'], response[i]['ComplianceResourceId'])
test_class.assertEquals(response_expected['ComplianceType'], response[i]['ComplianceType'])
test_class.assertTrue(response[i]['OrderingTimestamp'])
if 'Annotation' in response_expected or 'Annotation' in response[i]:
test_class.assertEquals(response_expected['Annotation'], response[i]['Annotation'])
def assert_customer_error_response(test_class, response, customer_error_code=None, customer_error_message=None):
if customer_error_code:
test_class.assertEqual(customer_error_code, response['customerErrorCode'])
if customer_error_message:
test_class.assertEqual(customer_error_message, response['customerErrorMessage'])
test_class.assertTrue(response['customerErrorCode'])
test_class.assertTrue(response['customerErrorMessage'])
if "internalErrorMessage" in response:
test_class.assertTrue(response['internalErrorMessage'])
if "internalErrorDetails" in response:
test_class.assertTrue(response['internalErrorDetails'])
def sts_mock():
assume_role_response = {
"Credentials": {
"AccessKeyId": "string",
"SecretAccessKey": "string",
"SessionToken": "string"}}
STS_CLIENT_MOCK.reset_mock(return_value=True)
STS_CLIENT_MOCK.assume_role = MagicMock(return_value=assume_role_response)
| 7,249
| 1,132
| 184
|
0e295a939cb3bb447622e932af4f06083d13ea4b
| 75
|
py
|
Python
|
starling_sim/basemodel/topology/__init__.py
|
tellae/starling
|
56121c728eb5de3dfc77cdf08da89548f3315c87
|
[
"CECILL-B"
] | 19
|
2021-02-16T12:32:22.000Z
|
2022-01-06T11:16:44.000Z
|
starling_sim/basemodel/topology/__init__.py
|
tellae/starling
|
56121c728eb5de3dfc77cdf08da89548f3315c87
|
[
"CECILL-B"
] | 20
|
2021-01-13T20:58:07.000Z
|
2022-03-21T15:53:07.000Z
|
starling_sim/basemodel/topology/__init__.py
|
tellae/starling
|
56121c728eb5de3dfc77cdf08da89548f3315c87
|
[
"CECILL-B"
] | null | null | null |
"""
This package contains the modules related to simulation topologies
"""
| 18.75
| 66
| 0.773333
|
"""
This package contains the modules related to simulation topologies
"""
| 0
| 0
| 0
|
7a9c4005ae9ed6fcb141368f64486d286ecf01ed
| 3,288
|
py
|
Python
|
networking_onos/extensions/callback.py
|
sanghoshin/networking-onos
|
2baec5f74e2721e5f8dffd57b3ef7a27034fa54a
|
[
"Apache-2.0"
] | null | null | null |
networking_onos/extensions/callback.py
|
sanghoshin/networking-onos
|
2baec5f74e2721e5f8dffd57b3ef7a27034fa54a
|
[
"Apache-2.0"
] | null | null | null |
networking_onos/extensions/callback.py
|
sanghoshin/networking-onos
|
2baec5f74e2721e5f8dffd57b3ef7a27034fa54a
|
[
"Apache-2.0"
] | 1
|
2017-10-19T04:23:14.000Z
|
2017-10-19T04:23:14.000Z
|
# Copyright (c) 2017 SK Telecom Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from networking_onos.extensions import constant as onos_const
_OPERATION_MAPPING = {
events.PRECOMMIT_CREATE: onos_const.ONOS_CREATE,
events.PRECOMMIT_UPDATE: onos_const.ONOS_UPDATE,
events.PRECOMMIT_DELETE: onos_const.ONOS_DELETE,
events.AFTER_CREATE: onos_const.ONOS_CREATE,
events.AFTER_UPDATE: onos_const.ONOS_UPDATE,
events.AFTER_DELETE: onos_const.ONOS_DELETE,
}
_RESOURCE_MAPPING = {
resources.SECURITY_GROUP: onos_const.ONOS_SG,
resources.SECURITY_GROUP_RULE: onos_const.ONOS_SG_RULE,
}
| 39.614458
| 79
| 0.680961
|
# Copyright (c) 2017 SK Telecom Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from networking_onos.extensions import constant as onos_const
_OPERATION_MAPPING = {
events.PRECOMMIT_CREATE: onos_const.ONOS_CREATE,
events.PRECOMMIT_UPDATE: onos_const.ONOS_UPDATE,
events.PRECOMMIT_DELETE: onos_const.ONOS_DELETE,
events.AFTER_CREATE: onos_const.ONOS_CREATE,
events.AFTER_UPDATE: onos_const.ONOS_UPDATE,
events.AFTER_DELETE: onos_const.ONOS_DELETE,
}
_RESOURCE_MAPPING = {
resources.SECURITY_GROUP: onos_const.ONOS_SG,
resources.SECURITY_GROUP_RULE: onos_const.ONOS_SG_RULE,
}
class OnosSecurityGroupHandler(object):
def __init__(self, precommit, postcommit):
assert postcommit is not None
self._precommit = precommit
self._postcommit = postcommit
self._subscribe()
def _subscribe(self):
if self._precommit is not None:
for event in (events.PRECOMMIT_CREATE, events.PRECOMMIT_DELETE):
registry.subscribe(self.sg_callback_precommit,
resources.SECURITY_GROUP, event)
registry.subscribe(self.sg_callback_precommit,
resources.SECURITY_GROUP_RULE, event)
registry.subscribe(self.sg_callback_precommit,
resources.SECURITY_GROUP,
events.PRECOMMIT_UPDATE)
for event in (events.AFTER_CREATE, events.AFTER_DELETE):
registry.subscribe(self.sg_callback_postcommit,
resources.SECURITY_GROUP, event)
registry.subscribe(self.sg_callback_postcommit,
resources.SECURITY_GROUP_RULE, event)
registry.subscribe(self.sg_callback_postcommit,
resources.SECURITY_GROUP, events.AFTER_UPDATE)
def _sg_callback(self, callback, resource, event, trigger, **kwargs):
context = kwargs['context']
res = kwargs.get(resource)
res_id = kwargs.get("%s_id" % resource)
if res_id is None:
res_id = res.get('id')
ops = _OPERATION_MAPPING[event]
res_type = _RESOURCE_MAPPING[resource]
res_dict = res
callback(context, ops, res_type, res_id, res_dict)
def sg_callback_precommit(self, resource, event, trigger, **kwargs):
self._sg_callback(self._precommit, resource, event, trigger, **kwargs)
def sg_callback_postcommit(self, resource, event, trigger, **kwargs):
self._sg_callback(self._postcommit, resource, event, trigger, **kwargs)
| 1,819
| 18
| 158
|
2e38813849e7b8d4b409de57f658a7d182ad66aa
| 3,682
|
py
|
Python
|
play.py
|
ksu-is/guesswordgame
|
65478e24c1fc834e43ab9dd3d00c8429fbe96f22
|
[
"Apache-2.0"
] | 7
|
2015-10-03T04:10:57.000Z
|
2021-04-02T14:43:21.000Z
|
play.py
|
ksu-is/guesswordgame
|
65478e24c1fc834e43ab9dd3d00c8429fbe96f22
|
[
"Apache-2.0"
] | 1
|
2016-04-20T17:11:22.000Z
|
2016-04-26T18:08:23.000Z
|
play.py
|
ksu-is/guesswordgame
|
65478e24c1fc834e43ab9dd3d00c8429fbe96f22
|
[
"Apache-2.0"
] | 5
|
2016-02-26T09:42:48.000Z
|
2021-05-09T17:32:04.000Z
|
import game.main as game
import time
import sys
if __name__ == "__main__":
try:
main()
except (KeyboardInterrupt, SystemExit):
print "\n Recieved Interrupt Signal. Bye...."
import sys
sys.exit()
| 32.017391
| 110
| 0.560565
|
import game.main as game
import time
import sys
def main():
play = "--++playtheguesswordgame++--"
if len(sys.argv) > 1 and sys.argv[1] == "tut":
print """
Enter your guess that must be containing 4 letters:
"""
time.sleep(3)
print """
# now the player types the word 'buff'
"""
time.sleep(5)
print """
Enter your guess that must be containing 4 letters: buff
_ _ _ _ **
"""
time.sleep(6)
print """
# the above is the clues for the player from his word buff
# that is, the computer is saying that there are two characters
# in the word 'buff' that exactly exists (and buff wasn't that
# word) in the word the computer has in it's mind.
# Now the player tries to find which are those two characters
# were exactly in its place and which two aren't part of the word
# that computer have in its mind.
loading .......
"""
time.sleep(20)
print """
# Now again the user tries the word 'lube'
Enter your guess that must be containing 4 letters: lube
_ _ _ _ *!!
"""
time.sleep(6)
print """
# from the above clue the player gets to know that the character 'u'
# lies exactly at the second position on the word that he has to guess
# and 'b' should be at the first position, from the previous clue (no 'f' here).
# The player has now only a one ! to figure out. i,e either 'l' or 'b' exists in the
# word but misplaced. now he is going to figure it out by trying the word 'bulk'.
"""
time.sleep(10)
print """
Enter your guess that must be containing 4 letters: bulk
_ _ _ _ ***
"""
print """
# Here, the player knows, one '*' for 'b', one '*' for 'u' and the last star for 'l' (from
# previous clue). Now, he knows first three chars and he thinks the word might be 'bulb'
"""
print """
Enter your guess that must be containing 4 letters: bulb
Congrats! you've got the right word. To continue playing the game please enter 1 and to quit enter 2:
1. play
2. quit
# so, that's it we guess!
"""
play = raw_input("Do you want to play the game now! (y/n) :")
while play != 'y' and play != 'Y' and play != 'n' and play != 'N':
print "please type either 'y' or 'n' without single quote"
play = raw_input("Do you want to play the game now! (y/n) :")
if play == "--++playtheguesswordgame++--" or play == 'y' or play == 'Y':
print """
Welcome to Guess Word game
Game: Computer will think a word and you should guess it. It would be easy to win
the game if you apply the basic logic.
Play the game by typing your guess word.
For each word you type, the game will output the number of characters that exactly
match the word that computer have in its mind (yes! the mind) as the number
of stars and the number of characters that exist in the word but not in the appropriate
position with the number of exclamation symbol.
"""
guess_word = game.GuessWord()
guess_word.start_game()
else:
print "Good bye!"
if __name__ == "__main__":
try:
main()
except (KeyboardInterrupt, SystemExit):
print "\n Recieved Interrupt Signal. Bye...."
import sys
sys.exit()
| 3,423
| 0
| 23
|
b7325eaebdbd28f2ed8cbfb180708a24650dee3d
| 5,258
|
py
|
Python
|
mysite/users/models.py
|
2021fallCMPUT404/group-cmput404-project
|
985b76dc6c554caf77e7cf5788355cca22a26e74
|
[
"Apache-2.0"
] | 2
|
2021-12-06T06:42:41.000Z
|
2022-03-29T21:40:14.000Z
|
mysite/users/models.py
|
2021fallCMPUT404/group-cmput404-project
|
985b76dc6c554caf77e7cf5788355cca22a26e74
|
[
"Apache-2.0"
] | 7
|
2021-10-29T20:31:44.000Z
|
2021-12-05T06:55:58.000Z
|
mysite/users/models.py
|
2021fallCMPUT404/group-cmput404-project
|
985b76dc6c554caf77e7cf5788355cca22a26e74
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
from django.contrib.auth.models import AbstractUser
from django.http import HttpResponse
import uuid
from django import forms
from django.forms.widgets import Textarea
import datetime
from posts.models import Post, Like, CommentLike#, InboxLike
from django.urls import reverse
SITE_URL = "https://cmput404-socialdist-project.herokuapp.com"
'''
#TODO: MERGE USER_PROFILE INTO USER
class User(AbstractUser):
pass
'''
# Create your models here.
| 38.661765
| 105
| 0.621909
|
from django.db import models
from django.contrib.auth.models import User
from django.contrib.auth.models import AbstractUser
from django.http import HttpResponse
import uuid
from django import forms
from django.forms.widgets import Textarea
import datetime
from posts.models import Post, Like, CommentLike#, InboxLike
from django.urls import reverse
SITE_URL = "https://cmput404-socialdist-project.herokuapp.com"
'''
#TODO: MERGE USER_PROFILE INTO USER
class User(AbstractUser):
pass
'''
def user_directory_path(instance, filename):
# file will be uploaded to MEDIA_ROOT / user_<id>/<filename>
#return 'user_{0}/{1}'.format(instance.user.id, filename)
return 'images/users/user_{0}/{1}'.format(instance.user.id, filename)
# Create your models here.
class Create_user(forms.Form):
username = forms.CharField(initial='')
password = forms.CharField(widget=forms.PasswordInput())
confirm_password = forms.CharField(widget=forms.PasswordInput())
class User_Profile(models.Model):
type = "author"
user = models.OneToOneField(User,
on_delete=models.CASCADE,
related_name='user_profile')
host = SITE_URL + '/'
url = SITE_URL
displayName = models.CharField(max_length=60, blank=True)
email = models.CharField(max_length=60, blank=True)
first_name = models.CharField(max_length=69, blank=True)
last_name = models.CharField(max_length=69, blank=True)
profileImage = models.ImageField(
upload_to='profile_picture',
blank=True,
default='profile_picture/default_picture.png')
github = models.CharField(blank=True, default="", max_length=100)
#user = models.ForeignKey(User, on_delete=models.CASCADE)
bio = models.CharField(max_length=256, unique=False)
#user_posts = models.ForeignKey(Post, on_delete=models.CASCADE, null=True)
def __str__(self):
return ', '.join((self.displayName, str(self.id), str(self.user.id)))
def get_absolute_url(self):
return SITE_URL + reverse('users:user_crud', args=[str(self.user.id)])
class Inbox(models.Model):
type = 'inbox'
author = models.ForeignKey(User, on_delete=models.CASCADE)
post = models.ManyToManyField(Post, null=True, blank=True)
follow = models.ManyToManyField("users.FriendRequest", null=True, blank=True)
like = models.ManyToManyField(Like, null=True, blank=True)
#comment_like = models.ManyToManyField(CommentLike, null=True, blank=True, on_delete=models.CASCADE)
#inbox_like = models.ManyToManyField(InboxLike, null=True, blank=True, on_delete=models.CASCADE)
class UserFollows(models.Model):
#following
actor = models.ForeignKey(User_Profile,
related_name="following",
on_delete=models.CASCADE,
default='')
#Got followed
object = models.ForeignKey(User_Profile,
related_name="followers",
on_delete=models.CASCADE,
default='')
#Creates new instance of Userfollow with the actor following the object
#Parameters are User_Profile objects
def create_user_follow(actor, object):
UserFollows.objects.get_or_create(actor=actor, object=object)
#The actor will stop following the object
def delete_user_follow(actor, object):
instance = UserFollows.objects.filter(actor=actor, object=object)
if instance.exists():
instance.delete()
return None
class FriendRequest(models.Model):
type = "Follow"
actor = models.ForeignKey(User_Profile,
on_delete=models.CASCADE,
related_name="actor",
default='')
object = models.ForeignKey(User_Profile,
on_delete=models.CASCADE,
related_name="object",
default='')
def create_friend_request(actor, object):
'''Creates a friend request instance with the actor being the person who follows
and the object is the person whom is being followed. The actor and object paramaters
are user_profile objects.'''
print(actor, object)
if UserFollows.objects.filter(actor=object, object=actor).exists(
): #Checks if the object is already following the actor
# Returns so it doesn't create constant friend requests
print("{} is already following {}".format(object.displayName,
actor.displayName))
return
f_request, created = FriendRequest.objects.get_or_create(actor=actor,
object=object)
print("Friend request created")
print(f_request.summary())
return f_request
def summary(self):
return '{} wants to follow {}'.format(self.actor.displayName,
self.object.displayName)
| 794
| 3,760
| 146
|
414ed76ae0b89becc26b055e4a79ab7edd82af55
| 8,528
|
py
|
Python
|
clickup/client.py
|
skwaugh/ClickUp
|
3f9fb2d1e8cc8cd4e95cd46030e1265aefa5541d
|
[
"MIT"
] | 3
|
2019-12-17T13:38:50.000Z
|
2021-05-31T13:47:50.000Z
|
clickup/client.py
|
secdevopsai/ClickUp
|
3f9fb2d1e8cc8cd4e95cd46030e1265aefa5541d
|
[
"MIT"
] | null | null | null |
clickup/client.py
|
secdevopsai/ClickUp
|
3f9fb2d1e8cc8cd4e95cd46030e1265aefa5541d
|
[
"MIT"
] | 4
|
2020-07-31T08:50:56.000Z
|
2022-02-14T18:58:04.000Z
|
import requests
from collections import defaultdict
import datetime
| 32.8
| 505
| 0.569536
|
import requests
from collections import defaultdict
import datetime
class Client:
def __init__(self, email, password, api):
self.server = "https://api.clickup.com/"
self.email = email
self.password = password
self.api = api
self.bearer = self.login(email, password)
user_response = self.get_user()
self.username = user_response['user']['username']
self.user_id = user_response['user']['id']
team_response = self.get_teams()
self.teams = {}
self.subcategories = {}
for team in team_response['teams']:
self.teams[team['id']] = team['name']
self.spaces = {}
for team in self.teams:
spaces_response = self.get_team_spaces(team)
for space in spaces_response['spaces']:
self.spaces[space['id']] = {
"name": space["name"], "team": team}
def login(self, email, password):
"""Login to clickup and retrieve bearer token
Arguments:
email {str}
password {str}
Returns:
token -- Bearer Token
"""
uri = "v1/login?include_teams=true"
data = {"email": email, "password": password}
response = requests.request(
method="GET", url=self.server + uri, data=data).json()
return response["token"]
def send_request(self, method="GET", uri=None, version="v1", **kwargs):
"""Send HTTP Request to ClickUP API
Keyword Arguments:
method {str} -- HTTP Request Method (default: {"GET"})
uri {str} -- URI
version {str} -- API Endpoint version (default: {"v1"})
Returns:
response -- JSON object from ClickUP HTTP Response
"""
if version == "v1":
headers = {"Authorization": self.api}
else:
headers = {"Authorization": "Bearer {}".format(self.bearer)}
response = requests.request(
method=method, url=self.server + uri, headers=headers, **kwargs).json()
return response
def get_user(self):
"""Retrieve user information
Returns:
dict -- JSON Object of user information
"""
uri = "api/v1/user"
response = self.send_request("GET", uri=uri)
return response
def get_teams(self):
"""Retrieve teams
Returns:
response -- JSON Object of team information
"""
uri = "api/v1/team"
response = self.send_request("GET", uri=uri)
return response
def get_team_spaces(self, team_id):
"""Retrieve Team Spaces
Arguments:
team_id
Returns:
response -- JSON Object of team spaces
"""
uri = "api/v1/team/{}/space".format(team_id)
response = self.send_request("GET", uri=uri)
return response
def get_tasks_by_team(self, team_id, space_id=None, include_closed="true", version='v1'):
"""Get tasks associated with team id
Arguments:
team_id {str}
Keyword Arguments:
space_id {str} -- Space ID (default: {None})
include_closed {str} -- Include closed tasks (default: {"true"})
version {str} -- API Version (default: {'v1'})
Returns:
response -- JSON Object of team spaces
"""
if version == 'v1':
uri = "api/{}/team/{}/task?include_closed={}".format(
version, team_id, include_closed)
if space_id:
uri += "&space_ids%5B%5D={}".format(space_id)
response = self.send_request("GET", uri=uri)
return response
def enrich_task_ids(self, team_id, space_id, task_ids):
"""Retrieve more detailed task information
Arguments:
team_id {str} -- Team ID
space_id {str} -- Space ID
task_ids {list} -- Collection of task ids
Returns:
response -- JSON Object of enriched tasks
"""
uri = "v2/task?team_id={}&project_ids%5B%5D={}?fields%5B%5D=assignees&fields%5B%5D=assigned_comments_count&fields%5B%5D=assigned_checklist_items&fields%5B%5D=attachments_thumbnail_count&fields%5B%5D=dependency_state&fields%5B%5D=parent_task&fields%5B%5D=attachments_count&fields%5B%5D=followers&fields%5B%5D=totalTimeSpent&fields%5B%5D=subtasks_count&fields%5B%5D=subtasks_by_status&fields%5B%5D=tags&fields%5B%5D=simple_statuses&fields%5B%5D=fallback_coverimage&fields%5B%5D=customFields".format(
team_id, space_id)
uri_args = "&task_ids[]=".join([task_id for task_id in task_ids])
response = self.send_request("GET", uri=uri + uri_args, version="v2")
return response
def enrich_task(self, task_id):
"""Retrieve basic task information (not including time estimates)
Arguments:
task_id {str} -- Task ID
Returns:
response -- JSON Object of task details
"""
uri = "v1/task/{}".format(task_id)
response = self.send_request("GET", uri=uri, version="v2")
return response
def get_task_ids(self, team_id, project_id, category_id, show_all=False):
"""[summary]
Arguments:
team_id {str} -- Team ID
project_id {[type]} -- Project ID
category_id {[type]} -- Category ID
Keyword Arguments:
show_all {bool} -- Show all tasks - including open (default: {False})
Returns:
response -- JSON Object of task ids
"""
uri = "v2/taskId?team_id={}&project_ids%5B%5D={}&category_ids%5B%5D={}".format(
team_id, project_id, category_id)
if not show_all:
uri += "&statuses%5B%5D=Open"
response = self.send_request("GET", uri=uri, version="v2")
return response
def get_categories(self, space_id):
"""Retrieve Categories
Arguments:
space_id {str} - Space ID
Returns:
response -- JSON Object and appends subcategories to object instance
"""
uri = "v1/project/{}/category".format(space_id)
response = self.send_request("GET", uri=uri, version="v2")
self.get_subcategories(response, space_id)
return response
def get_subcategories(self, categories, space_id):
"""Retrieve Subcategories from get_categories() response
Arguments:
categories {dict}
space_id -- Space ID
"""
for category in categories["categories"]:
for subcategory in category['subcategories']:
name = subcategory['name']
category_id = category['id']
subcategory_id = subcategory['id']
self.subcategories[subcategory_id] = {
"name": name,
"category_id": category_id,
"space_id": space_id
}
def create_task(self, subcategory, name, timestamp, estimate=None):
"""Create task in ClickUp subcategory
Arguments:
subcategory str -- Subcategory
name str -- Task name
timestamp int -- Due Date (unix timestamp)
estimate int -- Minutes estimated to complete task
Returns:
response -- JSON Object of task creation
"""
uri = "v1/subcategory/{}/task".format(
subcategory)
data = {
"name": name, "assignees": [],
"due_date": int(timestamp) * 1000,
"start_date": None, "due_date_time": False, "status": "Open",
"priority": "none", "position_wide": "subcategory",
"position": 0
}
response = self.send_request("POST", uri=uri, version="v2", data=data)
if estimate:
task_id = response['id']
estimate_time = {"time_estimate": 60000 * estimate,
"time_estimate_string": "{} minutes".format(estimate)}
uri = "v1/task/{}".format(task_id)
response = self.send_request(
"PUT", uri=uri, version="v2", data=estimate_time)
return response
def get_tags(self, project_id):
"""Retrieve task tags from Project
Arguments:
project_id
"""
uri = "v1/tag?project_id={}".format(project_id)
response = self.send_request("GET", uri=uri, version="v2")
return response
| 813
| 7,623
| 23
|
ff134e64e57b7ca7080b40af0e3f390aa9a3db33
| 1,305
|
py
|
Python
|
.env/lib/python2.7/site-packages/skimage/viewer/tests/test_utils.py
|
ViduraPrasangana/faster-rcnn-caffe
|
af6f5ee89c6e82d295bddd192d9dfcebd60d7c52
|
[
"MIT"
] | 1
|
2019-01-12T13:17:32.000Z
|
2019-01-12T13:17:32.000Z
|
.env/lib/python2.7/site-packages/skimage/viewer/tests/test_utils.py
|
ViduraPrasangana/faster-rcnn-caffe
|
af6f5ee89c6e82d295bddd192d9dfcebd60d7c52
|
[
"MIT"
] | 30
|
2020-04-15T19:37:40.000Z
|
2020-04-22T21:19:35.000Z
|
.env/lib/python2.7/site-packages/skimage/viewer/tests/test_utils.py
|
ViduraPrasangana/faster-rcnn-caffe
|
af6f5ee89c6e82d295bddd192d9dfcebd60d7c52
|
[
"MIT"
] | 2
|
2020-03-12T23:20:22.000Z
|
2021-02-15T21:54:02.000Z
|
# -*- coding: utf-8 -*-
from skimage.viewer import utils
from skimage.viewer.utils import dialogs
from skimage.viewer.qt import QtCore, QtWidgets, has_qt
from skimage._shared import testing
@testing.skipif(not has_qt, reason="Qt not installed")
@testing.skipif(not has_qt, reason="Qt not installed")
@testing.skipif(True, reason="Can't automatically close window. See #3081.")
@testing.skipif(not has_qt, reason="Qt not installed")
@testing.skipif(True, reason="Can't automatically close window. See #3081.")
@testing.skipif(not has_qt, reason="Qt not installed")
| 31.071429
| 76
| 0.724904
|
# -*- coding: utf-8 -*-
from skimage.viewer import utils
from skimage.viewer.utils import dialogs
from skimage.viewer.qt import QtCore, QtWidgets, has_qt
from skimage._shared import testing
@testing.skipif(not has_qt, reason="Qt not installed")
def test_event_loop():
utils.init_qtapp()
timer = QtCore.QTimer()
timer.singleShot(10, QtWidgets.QApplication.quit)
utils.start_qtapp()
@testing.skipif(not has_qt, reason="Qt not installed")
def test_format_filename():
fname = dialogs._format_filename(('apple', 2))
assert fname == 'apple'
fname = dialogs._format_filename('')
assert fname is None
@testing.skipif(True, reason="Can't automatically close window. See #3081.")
@testing.skipif(not has_qt, reason="Qt not installed")
def test_open_file_dialog():
QApp = utils.init_qtapp()
timer = QtCore.QTimer()
timer.singleShot(100, lambda: QApp.quit())
filename = dialogs.open_file_dialog()
assert filename is None
@testing.skipif(True, reason="Can't automatically close window. See #3081.")
@testing.skipif(not has_qt, reason="Qt not installed")
def test_save_file_dialog():
QApp = utils.init_qtapp()
timer = QtCore.QTimer()
timer.singleShot(100, lambda: QApp.quit())
filename = dialogs.save_file_dialog()
assert filename is None
| 645
| 0
| 88
|
e4eed4150a0020f361e02176075753236176288a
| 269
|
py
|
Python
|
RasaNLU/pending_actions.py
|
naikshubham/Rasa-Introduction
|
93b1c6428879e49ddd93d7a5ec5a4eb52fb9bab2
|
[
"BSD-2-Clause"
] | 1
|
2021-06-15T09:58:15.000Z
|
2021-06-15T09:58:15.000Z
|
RasaNLU/pending_actions.py
|
naikshubham/Rasa-Introduction
|
93b1c6428879e49ddd93d7a5ec5a4eb52fb9bab2
|
[
"BSD-2-Clause"
] | null | null | null |
RasaNLU/pending_actions.py
|
naikshubham/Rasa-Introduction
|
93b1c6428879e49ddd93d7a5ec5a4eb52fb9bab2
|
[
"BSD-2-Clause"
] | null | null | null |
# Pending actions
# we can improve user experience of our bot by asking the user simple yes or no followup questions
# one easy way to handle these followup is to define pending actions which gets executed as soon as user says "yes"
# and wiped if the user says "no"
| 44.833333
| 115
| 0.769517
|
# Pending actions
# we can improve user experience of our bot by asking the user simple yes or no followup questions
# one easy way to handle these followup is to define pending actions which gets executed as soon as user says "yes"
# and wiped if the user says "no"
| 0
| 0
| 0
|
28ea666798dad6da46886eee004f74017eb3e201
| 2,573
|
py
|
Python
|
get_ip_pool.py
|
vbertcen/ajk_sp_sale_rent_ratio
|
bd477441fde3ccbe396b68dba2418ec0b9aa558e
|
[
"Apache-2.0"
] | 1
|
2019-08-30T10:54:06.000Z
|
2019-08-30T10:54:06.000Z
|
get_ip_pool.py
|
vbertcen/ajk_sp_sale_rent_ratio
|
bd477441fde3ccbe396b68dba2418ec0b9aa558e
|
[
"Apache-2.0"
] | null | null | null |
get_ip_pool.py
|
vbertcen/ajk_sp_sale_rent_ratio
|
bd477441fde3ccbe396b68dba2418ec0b9aa558e
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
import sys
import pymysql
import requests
import datetime
from lxml import etree
reload(sys)
sys.setdefaultencoding('utf8')
now_str = datetime.datetime.now().strftime('%Y-%m-%d')
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 OPR/57.0.3098.116", }
if __name__ == '__main__':
init_ip_pool()
| 34.306667
| 156
| 0.539059
|
# coding=utf-8
import sys
import pymysql
import requests
import datetime
from lxml import etree
reload(sys)
sys.setdefaultencoding('utf8')
now_str = datetime.datetime.now().strftime('%Y-%m-%d')
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 OPR/57.0.3098.116", }
def init_ip_pool():
conn = pymysql.connect(host='localhost', user='root', password='Scholl7fcb', database='house_spider')
cursor = conn.cursor()
cursor.execute("truncate ip_pool")
conn.commit()
cursor.close()
index = 1
while True:
print "当前查询到第{}页".format(index)
url = 'http://www.66ip.cn/{}.html'.format(index)
html = requests.get(url=url, headers=headers)
selector = etree.HTML(html.content)
page_count = len(selector.xpath('//*[@id="main"]/div/div[1]/table/tr'))
if page_count == 0:
break
print page_count
ip = '//*[@id="main"]/div/div[1]/table/tr[{}]/td[1]'
port = '//*[@id="main"]/div/div[1]/table/tr[{}]/td[2]'
location = '//*[@id="main"]/div/div[1]/table/tr[{}]/td[3]'
for i in range(2, page_count):
ip_text = selector.xpath(ip.format(i))[0].text
port_text = selector.xpath(port.format(i))[0].text
location_text = selector.xpath(location.format(i))[0].text
cursor = conn.cursor()
if verify_available(ip_text, port_text):
cursor.execute(
"insert into ip_pool values(null,'{}','{}','{}',1,'{}')".format(ip_text, port_text, location_text,
now_str))
print "ip={},available={}".format(ip_text, "true")
else:
cursor.execute(
"insert into ip_pool values(null,'{}','{}','{}',0,'{}')".format(ip_text, port_text, location_text,
now_str))
print "ip={},available={}".format(ip_text, "false")
cursor.close()
conn.commit()
index += 1
conn.close()
def verify_available(ip, port):
pro = dict()
pro['http'] = "http://{}:{}".format(ip, port)
try:
html = requests.get(url='http://www.baidu.com', headers=headers, proxies=pro, timeout=2)
except Exception:
return False
else:
return html.content.count('百度') > 0
if __name__ == '__main__':
init_ip_pool()
| 2,130
| 0
| 46
|
dd8bdd0ca9cd34cb385f46afc75b4e9cf95ab521
| 476
|
py
|
Python
|
jython/jython/java_usage_examples.py
|
JohannesDienst/polyglot_integration
|
ee0936539282e82d4d0605ed564389c0539ede40
|
[
"MIT"
] | null | null | null |
jython/jython/java_usage_examples.py
|
JohannesDienst/polyglot_integration
|
ee0936539282e82d4d0605ed564389c0539ede40
|
[
"MIT"
] | null | null | null |
jython/jython/java_usage_examples.py
|
JohannesDienst/polyglot_integration
|
ee0936539282e82d4d0605ed564389c0539ede40
|
[
"MIT"
] | null | null | null |
from java.lang import System as javasystem
javasystem.out.println("Hello")
from java.util import Random
r = rand(100, 23)
for i in range(10):
print r.nextDouble()
| 23.8
| 56
| 0.653361
|
from java.lang import System as javasystem
javasystem.out.println("Hello")
from java.util import Random
class rand(Random):
def __init__(self, multiplier=1.0, seed=None):
self.multiplier = multiplier
if seed is None:
Random.__init__(self)
else:
Random.__init__(self, seed)
def nextDouble(self):
return Random.nextDouble(self) * self.multiplier
r = rand(100, 23)
for i in range(10):
print r.nextDouble()
| 233
| -2
| 76
|
da4b679b11109485dccab6378be56da7adfaca21
| 321
|
py
|
Python
|
5-loops/exercise_3.1.py
|
wgatharia/csci131
|
50d76603863c9a9932634fdf2e48594f8dc673d2
|
[
"MIT"
] | null | null | null |
5-loops/exercise_3.1.py
|
wgatharia/csci131
|
50d76603863c9a9932634fdf2e48594f8dc673d2
|
[
"MIT"
] | null | null | null |
5-loops/exercise_3.1.py
|
wgatharia/csci131
|
50d76603863c9a9932634fdf2e48594f8dc673d2
|
[
"MIT"
] | null | null | null |
"""
File: exercise_3.1.py
Author: William Gatharia
This code demonstrates using a for loop.
"""
#loop and print numbers from 1 to 10 using a for loop and range
# range creates a list of numbers
# starting from 1 to 10.
# Note the 11 = 10 + 1 is the upper limit form range
for i in range(1, 11):
print(i)
| 24.692308
| 63
| 0.682243
|
"""
File: exercise_3.1.py
Author: William Gatharia
This code demonstrates using a for loop.
"""
#loop and print numbers from 1 to 10 using a for loop and range
# range creates a list of numbers
# starting from 1 to 10.
# Note the 11 = 10 + 1 is the upper limit form range
for i in range(1, 11):
print(i)
| 0
| 0
| 0
|
cd438ed3e070272f3b23e2778ba9493fd02837f8
| 1,919
|
py
|
Python
|
Data_preparation/test/video_subcrop.py
|
Rukaume/LRCN
|
0d1928cc72544f59a4335fea7febc561d3dfc118
|
[
"MIT"
] | 1
|
2020-11-07T05:57:32.000Z
|
2020-11-07T05:57:32.000Z
|
Data_preparation/test/video_subcrop.py
|
Rukaume/LRCN
|
0d1928cc72544f59a4335fea7febc561d3dfc118
|
[
"MIT"
] | 1
|
2020-11-07T00:30:22.000Z
|
2021-01-26T02:22:16.000Z
|
Data_preparation/test/video_subcrop.py
|
Rukaume/LRCN
|
0d1928cc72544f59a4335fea7febc561d3dfc118
|
[
"MIT"
] | 1
|
2020-11-07T05:57:52.000Z
|
2020-11-07T05:57:52.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 27 21:19:37 2020
@author: miyazakishinichi
"""
import pandas as pd
from tkinter import messagebox
from tkinter import filedialog
import tkinter
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import os, sys, cv2
from tqdm import tqdm
####Tk root generate####
root = tkinter.Tk()
root.withdraw()
####ROI setting####
messagebox.showinfo('selectfiles', 'select csvfile for ROI setting')
ROI_file_path = tkinter.filedialog.askopenfilename(initialdir = dir)
if ROI_file_path == "":
messagebox.showinfo('cancel', 'stop before ROI setting')
sys.exit()
roi_data = csv_file_read(ROI_file_path)
roi_data['left'] = roi_data['BX']
roi_data['right'] = roi_data['BX'] + roi_data['Width']
roi_data['low'] = roi_data['BY']
roi_data['high'] = roi_data['BY'] + roi_data['Height']
roi = roi_data.loc[3]['left':'high']
####file select & directory setting####
messagebox.showinfo('selectfiles', 'select image files')
path = filedialog.askopenfilename()
if path != False:
pass
else:
messagebox.showinfo('quit', 'stop the script')
sys.exit()
folderpath = os.path.dirname(path)
os.chdir(folderpath)
imlist = os.listdir("./")
os.makedirs("../chamber3", exist_ok = True)
for i in tqdm(range(len(imlist))):
tempimage = cv2.imread(imlist[i])
left, right, low, high = int(roi['left']),\
int(roi['right']),int(roi['low']),int(roi['high'])
subimage = tempimage[low:high,left:right]
cv2.imwrite("../chamber3/{}.jpg".format(str(i).zfill(5)), subimage)
| 27.028169
| 71
| 0.668056
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 27 21:19:37 2020
@author: miyazakishinichi
"""
import pandas as pd
from tkinter import messagebox
from tkinter import filedialog
import tkinter
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import os, sys, cv2
from tqdm import tqdm
def csv_file_read(filepath):
file_dir, file_name = os.path.split(filepath)
base, ext = os.path.splitext(file_name)
if ext == '.csv':
data = pd.read_csv(filepath, index_col = 0)
return data
else:
return messagebox.showinfo('error',
'selected file is not csv file')
####Tk root generate####
root = tkinter.Tk()
root.withdraw()
####ROI setting####
messagebox.showinfo('selectfiles', 'select csvfile for ROI setting')
ROI_file_path = tkinter.filedialog.askopenfilename(initialdir = dir)
if ROI_file_path == "":
messagebox.showinfo('cancel', 'stop before ROI setting')
sys.exit()
roi_data = csv_file_read(ROI_file_path)
roi_data['left'] = roi_data['BX']
roi_data['right'] = roi_data['BX'] + roi_data['Width']
roi_data['low'] = roi_data['BY']
roi_data['high'] = roi_data['BY'] + roi_data['Height']
roi = roi_data.loc[3]['left':'high']
####file select & directory setting####
messagebox.showinfo('selectfiles', 'select image files')
path = filedialog.askopenfilename()
if path != False:
pass
else:
messagebox.showinfo('quit', 'stop the script')
sys.exit()
folderpath = os.path.dirname(path)
os.chdir(folderpath)
imlist = os.listdir("./")
os.makedirs("../chamber3", exist_ok = True)
for i in tqdm(range(len(imlist))):
tempimage = cv2.imread(imlist[i])
left, right, low, high = int(roi['left']),\
int(roi['right']),int(roi['low']),int(roi['high'])
subimage = tempimage[low:high,left:right]
cv2.imwrite("../chamber3/{}.jpg".format(str(i).zfill(5)), subimage)
| 310
| 0
| 23
|
aedaa1eb60c8454a5adaa3d060aa87eba4684ba7
| 207
|
py
|
Python
|
getFrame.py
|
divakar-lakhera/Partial-Encryption
|
0fc6537b4a23848b21618e906a22920bd00b7c41
|
[
"MIT"
] | null | null | null |
getFrame.py
|
divakar-lakhera/Partial-Encryption
|
0fc6537b4a23848b21618e906a22920bd00b7c41
|
[
"MIT"
] | null | null | null |
getFrame.py
|
divakar-lakhera/Partial-Encryption
|
0fc6537b4a23848b21618e906a22920bd00b7c41
|
[
"MIT"
] | null | null | null |
import cv2
INPUT_FILE='input_encode.avi'
FRAME_NUMBER=70
cap=cv2.VideoCapture(INPUT_FILE)
cap.set(cv2.CAP_PROP_POS_FRAMES, FRAME_NUMBER)
ret,frame=cap.read()
cv2.imwrite("frame_"+INPUT_FILE+".png",frame)
| 18.818182
| 46
| 0.797101
|
import cv2
INPUT_FILE='input_encode.avi'
FRAME_NUMBER=70
cap=cv2.VideoCapture(INPUT_FILE)
cap.set(cv2.CAP_PROP_POS_FRAMES, FRAME_NUMBER)
ret,frame=cap.read()
cv2.imwrite("frame_"+INPUT_FILE+".png",frame)
| 0
| 0
| 0
|
78169f18b371e12087115a1c033f6919a0a32815
| 27,978
|
py
|
Python
|
brainda/algorithms/decomposition/csp.py
|
TBC-TJU/MetaBCI-brainda
|
d2dc655163b771ca22e43432d886ece3d98235c8
|
[
"MIT"
] | null | null | null |
brainda/algorithms/decomposition/csp.py
|
TBC-TJU/MetaBCI-brainda
|
d2dc655163b771ca22e43432d886ece3d98235c8
|
[
"MIT"
] | null | null | null |
brainda/algorithms/decomposition/csp.py
|
TBC-TJU/MetaBCI-brainda
|
d2dc655163b771ca22e43432d886ece3d98235c8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Authors: Swolf <swolfforever@gmail.com>
# Date: 2021/1/07
# License: MIT License
"""
Common Spatial Patterns and his happy little buddies!
"""
from copy import deepcopy
from typing import Union, Optional, List, Dict, Tuple
from functools import partial
import numpy as np
from numpy import ndarray
from scipy.linalg import eigh, pinv, solve
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import GridSearchCV, StratifiedKFold, ShuffleSplit
from sklearn.feature_selection import SelectKBest, mutual_info_classif
from sklearn.svm import SVC
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier
from sklearn.pipeline import make_pipeline
from .base import robust_pattern, FilterBank
from ..utils.covariance import nearestPD, covariances
def csp_kernel(X: ndarray, y: ndarray) -> Tuple[ndarray, ndarray, ndarray]:
"""The kernel in CSP algorithm based on paper [1]_.
Parameters
----------
X: ndarray
eeg data, shape (n_trials, n_channels, n_samples).
y: ndarray
labels of X, shape (n_trials,).
Returns
-------
W: ndarray
Spatial filters, shape (n_channels, n_filters).
D: ndarray
Eigenvalues of spatial filters, shape (n_filters,).
A: ndarray
Spatial patterns, shape (n_channels, n_patterns).
References
----------
.. [1] Ramoser H, Muller-Gerking J, Pfurtscheller G. Optimal spatial filtering of single trial EEG during imagined hand movement[J]. IEEE transactions on rehabilitation engineering, 2000, 8(4): 441-446.
"""
X, y = np.copy(X), np.copy(y)
labels = np.unique(y)
X = X - np.mean(X, axis=-1, keepdims=True)
if len(labels) != 2:
raise ValueError("the current kernel is for 2-class problem.")
C1 = covariances(X[y==labels[0]])
C2 = covariances(X[y==labels[1]])
# # trace normalization
# # this operation equals to trial normalization
# C1 = C1 / np.trace(C1, axis1=-1, axis2=-2)[:, np.newaxis, np.newaxis]
# C2 = C2 / np.trace(C2, axis1=-1, axis2=-2)[:, np.newaxis, np.newaxis]
C1 = np.mean(C1, axis=0)
C2 = np.mean(C2, axis=0)
Cc = C1 + C2
# check positive-definiteness
Cc = nearestPD(Cc)
# generalized eigenvalue problem
D, W = eigh(C1, Cc)
ix = np.argsort(D)[::-1]
W = W[:, ix]
D = D[ix]
A = robust_pattern(W, C1, W.T@C1@W)
return W, D, A
def csp_feature(W: ndarray, X: ndarray,
n_components: int = 2) -> ndarray:
"""Return CSP features in paper [1]_.
Parameters
----------
W : ndarray
spatial filters from csp_kernel, shape (n_channels, n_filters)
X : ndarray
eeg data, shape (n_trials, n_channels, n_samples)
n_components : int, optional
the first k components to use, usually even number, by default 2
Returns
-------
ndarray
features of shape (n_trials, n_features)
Raises
------
ValueError
n_components should less than the number of channels
References
----------
.. [1] Ramoser H, Muller-Gerking J, Pfurtscheller G. Optimal spatial filtering of single trial EEG during imagined hand movement[J]. IEEE transactions on rehabilitation engineering, 2000, 8(4): 441-446.
"""
W, X = np.copy(W), np.copy(X)
max_components = W.shape[1]
if n_components > max_components:
raise ValueError("n_components should less than the number of channels")
eps = np.finfo(X.dtype).eps
X = X - np.mean(X, axis=-1, keepdims=True)
# normalized variance
features = np.mean(np.square(np.matmul(W[:, :n_components].T, X)), axis=-1)
features = features / (np.sum(features, axis=-1, keepdims=True) + eps)
# log-transformation
features = np.log(np.clip(features, eps, None))
return features
def _rjd(X, eps=1e-9, n_iter_max=1000):
"""Approximate joint diagonalization based on jacobi angle.
Parameters
----------
X : ndarray
A set of covariance matrices to diagonalize, shape (n_trials, n_channels, n_channels).
eps : float, optional
Tolerance for stopping criterion (default 1e-8).
n_iter_max : int, optional
The maximum number of iteration to reach convergence (default 1000).
Returns
-------
V : ndarray
The diagonalizer, shape (n_channels, n_filters), usually n_filters == n_channels.
D : ndarray
The set of quasi diagonal matrices, shape (n_trials, n_channels, n_channels).
Notes
-----
This is a direct implementation of the Cardoso AJD algorithm [1]_ used in
JADE. The code is a translation of the matlab code provided in the author
website.
References
----------
.. [1] Cardoso, Jean-Francois, and Antoine Souloumiac. Jacobi angles for simultaneous diagonalization. SIAM journal on matrix analysis and applications 17.1 (1996): 161-164.
"""
# reshape input matrix
A = np.concatenate(X, 0).T
# init variables
m, nm = A.shape
V = np.eye(m)
encore = True
k = 0
while encore:
encore = False
k += 1
if k > n_iter_max:
break
for p in range(m - 1):
for q in range(p + 1, m):
Ip = np.arange(p, nm, m)
Iq = np.arange(q, nm, m)
# computation of Givens angle
g = np.array([A[p, Ip] - A[q, Iq], A[p, Iq] + A[q, Ip]])
gg = np.dot(g, g.T)
ton = gg[0, 0] - gg[1, 1]
toff = gg[0, 1] + gg[1, 0]
theta = 0.5 * np.arctan2(toff, ton +
np.sqrt(ton * ton + toff * toff))
c = np.cos(theta)
s = np.sin(theta)
encore = encore | (np.abs(s) > eps)
if (np.abs(s) > eps):
tmp = A[:, Ip].copy()
A[:, Ip] = c * A[:, Ip] + s * A[:, Iq]
A[:, Iq] = c * A[:, Iq] - s * tmp
tmp = A[p, :].copy()
A[p, :] = c * A[p, :] + s * A[q, :]
A[q, :] = c * A[q, :] - s * tmp
tmp = V[:, p].copy()
V[:, p] = c * V[:, p] + s * V[:, q]
V[:, q] = c * V[:, q] - s * tmp
D = np.reshape(A, (m, int(nm / m), m)).transpose(1, 0, 2)
return V, D
def _ajd_pham(X, eps=1e-9, n_iter_max=1000):
"""Approximate joint diagonalization based on pham's algorithm.
Parameters
----------
X : ndarray
A set of covariance matrices to diagonalize, shape (n_trials, n_channels, n_channels).
eps : float, optional
Tolerance for stoping criterion (default 1e-6).
n_iter_max : int, optional
The maximum number of iteration to reach convergence (default 1000).
Returns
-------
V : ndarray
The diagonalizer, shape (n_channels, n_filters), usually n_filters == n_channels.
D : ndarray
The set of quasi diagonal matrices, shape (n_trials, n_channels, n_channels).
Notes
-----
This is a direct implementation of the PHAM's AJD algorithm [1]_.
References
----------
.. [1] Pham, Dinh Tuan. "Joint approximate diagonalization of positive definite Hermitian matrices." SIAM Journal on Matrix Analysis and Applications 22, no. 4 (2001): 1136-1152.
"""
# Adapted from http://github.com/alexandrebarachant/pyRiemann
n_epochs = X.shape[0]
# Reshape input matrix
A = np.concatenate(X, axis=0).T
# Init variables
n_times, n_m = A.shape
V = np.eye(n_times)
epsilon = n_times * (n_times - 1) * eps
for it in range(n_iter_max):
decr = 0
for ii in range(1, n_times):
for jj in range(ii):
Ii = np.arange(ii, n_m, n_times)
Ij = np.arange(jj, n_m, n_times)
c1 = A[ii, Ii]
c2 = A[jj, Ij]
g12 = np.mean(A[ii, Ij] / c1)
g21 = np.mean(A[ii, Ij] / c2)
omega21 = np.mean(c1 / c2)
omega12 = np.mean(c2 / c1)
omega = np.sqrt(omega12 * omega21)
tmp = np.sqrt(omega21 / omega12)
tmp1 = (tmp * g12 + g21) / (omega + 1)
tmp2 = (tmp * g12 - g21) / max(omega - 1, 1e-9)
h12 = tmp1 + tmp2
h21 = np.conj((tmp1 - tmp2) / tmp)
decr += n_epochs * (g12 * np.conj(h12) + g21 * h21) / 2.0
tmp = 1 + 1.j * 0.5 * np.imag(h12 * h21)
tmp = np.real(tmp + np.sqrt(tmp ** 2 - h12 * h21))
tau = np.array([[1, -h12 / tmp], [-h21 / tmp, 1]])
A[[ii, jj], :] = np.dot(tau, A[[ii, jj], :])
tmp = np.c_[A[:, Ii], A[:, Ij]]
tmp = np.reshape(tmp, (n_times * n_epochs, 2), order='F')
tmp = np.dot(tmp, tau.T)
tmp = np.reshape(tmp, (n_times, n_epochs * 2), order='F')
A[:, Ii] = tmp[:, :n_epochs]
A[:, Ij] = tmp[:, n_epochs:]
V[[ii, jj], :] = np.dot(tau, V[[ii, jj], :])
if decr < epsilon:
break
D = np.reshape(A, (n_times, -1, n_times)).transpose(1, 0, 2)
return V.T, D
def _uwedge(X, init=None, eps=1e-9, n_iter_max=1000):
"""Approximate joint diagonalization algorithm UWEDGE.
Parameters
----------
X : ndarray
A set of covariance matrices to diagonalize, shape (n_trials, n_channels, n_channels).
init : None | ndarray, optional
Initialization for the diagonalizer, shape (n_channels, n_channels).
eps : float, optional
Tolerance for stoping criterion (default 1e-7).
n_iter_max : int
The maximum number of iteration to reach convergence (default 1000).
Returns
-------
W_est : ndarray
The diagonalizer, shape (n_filters, n_channels), usually n_filters == n_channels.
D : ndarray
The set of quasi diagonal matrices, shape (n_trials, n_channels, n_channels).
Notes
-----
Uniformly Weighted Exhaustive Diagonalization using Gauss iteration
(U-WEDGE). Implementation of the AJD algorithm by Tichavsky and Yeredor [1]_ [2]_.
This is a translation from the matlab code provided by the authors.
References
----------
.. [1] P. Tichavsky, A. Yeredor and J. Nielsen, "A Fast Approximate Joint Diagonalization Algorithm Using a Criterion with a Block Diagonal Weight Matrix", ICASSP 2008, Las Vegas.
.. [2] P. Tichavsky and A. Yeredor, "Fast Approximate Joint Diagonalization Incorporating Weight Matrices" IEEE Transactions of Signal Processing, 2009.
"""
L, d, _ = X.shape
# reshape input matrix
M = np.concatenate(X, 0).T
# init variables
d, Md = M.shape
iteration = 0
improve = 10
if init is None:
E, H = np.linalg.eig(M[:, 0:d])
W_est = np.dot(np.diag(1. / np.sqrt(np.abs(E))), H.T)
else:
W_est = init
Ms = np.array(M)
Rs = np.zeros((d, L))
for k in range(L):
ini = k*d
Il = np.arange(ini, ini + d)
M[:, Il] = 0.5*(M[:, Il] + M[:, Il].T)
Ms[:, Il] = np.dot(np.dot(W_est, M[:, Il]), W_est.T)
Rs[:, k] = np.diag(Ms[:, Il])
crit = np.sum(Ms**2) - np.sum(Rs**2)
while (improve > eps) & (iteration < n_iter_max):
B = np.dot(Rs, Rs.T)
C1 = np.zeros((d, d))
for i in range(d):
C1[:, i] = np.sum(Ms[:, i:Md:d]*Rs, axis=1)
D0 = B*B.T - np.outer(np.diag(B), np.diag(B))
A0 = (C1 * B - np.dot(np.diag(np.diag(B)), C1.T)) / (D0 + np.eye(d))
A0 += np.eye(d)
W_est = np.linalg.solve(A0, W_est)
Raux = np.dot(np.dot(W_est, M[:, 0:d]), W_est.T)
aux = 1./np.sqrt(np.abs(np.diag(Raux)))
W_est = np.dot(np.diag(aux), W_est)
for k in range(L):
ini = k*d
Il = np.arange(ini, ini + d)
Ms[:, Il] = np.dot(np.dot(W_est, M[:, Il]), W_est.T)
Rs[:, k] = np.diag(Ms[:, Il])
crit_new = np.sum(Ms**2) - np.sum(Rs**2)
improve = np.abs(crit_new - crit)
crit = crit_new
iteration += 1
D = np.reshape(Ms, (d, L, d)).transpose(1, 0, 2)
return W_est.T, D
ajd_methods = {
'rjd': _rjd,
'ajd_pham': _ajd_pham,
'uwedge': _uwedge
}
def _check_ajd_method(method):
"""Check if a given method is valid.
Parameters
----------
method : callable object or str
Could be the name of ajd_method or a callable method itself.
Returns
-------
method: callable object
A callable ajd method.
"""
if callable(method):
pass
elif method in ajd_methods.keys():
method = ajd_methods[method]
else:
raise ValueError(
"""%s is not an valid method ! Valid methods are : %s or a
callable function""" % (method, (' , ').join(ajd_methods.keys())))
return method
def ajd(X: ndarray, method: str ='uwedge') -> Tuple[ndarray, ndarray]:
"""Wrapper of AJD methods.
Parameters
----------
X : ndarray
Input covariance matrices, shape (n_trials, n_channels, n_channels)
method : str, optional
AJD method (default uwedge).
Returns
-------
V : ndarray
The diagonalizer, shape (n_channels, n_filters), usually n_filters == n_channels.
D : ndarray
The mean of quasi diagonal matrices, shape (n_channels,).
"""
method = _check_ajd_method(method)
V, D = method(X)
D = np.diag(np.mean(D, axis=0))
ind = np.argsort(D)[::-1]
D = D[ind]
V = V[:, ind]
return V, D
def gw_csp_kernel(X: ndarray, y: ndarray,
ajd_method: str = 'uwedge') -> Tuple[ndarray, ndarray, ndarray, ndarray]:
"""Grosse-Wentrup AJD method based on paper [1]_.
Parameters
----------
X : ndarray
eeg data, shape (n_trials, n_channels, n_samples).
y : ndarray
labels, shape (n_trials).
ajd_method : str, optional
ajd methods, 'uwedge' 'rjd' and 'ajd_pham', by default 'uwedge'.
Returns
-------
W: ndarray
Spatial filters, shape (n_channels, n_filters).
D: ndarray
Eigenvalues of spatial filters, shape (n_filters,).
A: ndarray
Spatial patterns, shape (n_channels, n_patterns).
mutual_info: ndarray
Mutual informaiton values, shape (n_filters).
References
----------
.. [1] Grosse-Wentrup, Moritz, and Martin Buss. "Multiclass common spatial patterns and information theoretic feature extraction." Biomedical Engineering, IEEE Transactions on 55, no. 8 (2008): 1991-2000.
"""
X, y = np.copy(X), np.copy(y)
labels = np.unique(y)
X = X - np.mean(X, axis=-1, keepdims=True)
Cx = []
for label in labels:
C = covariances(X[y==label])
# trace normalization
C = C / np.trace(C, axis1=-1, axis2=-2)[:, np.newaxis, np.newaxis]
Cx.append(np.mean(C, axis=0))
Cx = np.stack(Cx)
W, D = ajd(Cx, method=ajd_method)
# Ctot = np.mean(Cx, axis=0)
# W = W / np.sqrt(np.diag(W.T@Ctot@W))
W = W / np.sqrt(D)
# compute mutual information values
Pc = [np.mean(y == label) for label in labels]
mutual_info = []
for j in range(W.shape[-1]):
a = 0
b = 0
for i in range(len(labels)):
# tmp = np.dot(np.dot(W[j], self.C_[i]), W[j].T)
tmp = W[:, j].T@Cx[i]@W[:, j]
a += Pc[i] * np.log(np.sqrt(tmp))
b += Pc[i] * (tmp ** 2 - 1)
mi = - (a + (3.0 / 16) * (b ** 2))
mutual_info.append(mi)
mutual_info = np.array(mutual_info)
ix = np.argsort(mutual_info)[::-1]
W = W[:, ix]
mutual_info = mutual_info[ix]
D = D[ix]
A = robust_pattern(W, Cx[0], W.T@Cx[0]@W)
return W, D, A, mutual_info
class CSP(BaseEstimator, TransformerMixin):
"""Common Spatial Pattern.
if n_components is None, auto finding the best number of components with gridsearch. The upper searching limit is determined by max_components, default is half of the number of channels.
"""
def spoc_kernel(X: ndarray, y: ndarray) -> Tuple[ndarray, ndarray, ndarray]:
"""Source Power Comodulation (SPoC) based on paper [1]_.
It is a continous CSP-like method.
Parameters
----------
X : ndarray
eeg data, shape (n_trials, n_channels, n_samples)
y : ndarray
labels, shape (n_trials)
Returns
-------
W: ndarray
Spatial filters, shape (n_channels, n_filters).
D: ndarray
Eigenvalues of spatial filters, shape (n_filters,).
A: ndarray
Spatial patterns, shape (n_channels, n_patterns).
References
----------
.. [1] Sven Dähne, Frank C. Meinecke, Stefan Haufe, Johannes Höhne, Michael Tangermann, Klaus-Robert Müller, and Vadim V. Nikulin. SPoC: a novel framework for relating the amplitude of neuronal oscillations to behaviorally relevant parameters. NeuroImage, 86:111–122, 2014. doi:10.1016/j.neuroimage.2013.07.079.
"""
X, weights = np.copy(X), np.copy(y)
eps = np.finfo(X.dtype).eps
X = X - np.mean(X, axis=-1, keepdims=True)
weights = weights - np.mean(weights)
weights = weights / np.std(weights)
Cx = covariances(X)
# trace normalization
Cx = Cx / np.trace(Cx, axis1=-1, axis2=-2)[:, np.newaxis, np.newaxis]
C = np.mean(Cx, axis=0)
Cz = np.mean(weights[:, np.newaxis, np.newaxis]*Cx, axis=0)
# check positive-definiteness
C = nearestPD(C)
Cz = nearestPD(Cz)
# TODO: direct copy from pyriemann, need verify
D, W = eigh(Cz, C)
ind = np.argsort(D)[::-1]
D = D[ind]
W = W[:, ind]
A = robust_pattern(W, Cz, W.T@Cz@W)
return W, D, A
class SPoC(BaseEstimator, TransformerMixin):
"""Source Power Comodulation (SPoC).
For continuous data, not verified.
"""
class FBCSP(FilterBank):
"""FBCSP.
FilterBank CSP based on paper [1]_.
References
----------
.. [1] Ang K K, Chin Z Y, Zhang H, et al. Filter bank common spatial pattern (FBCSP) in brain-computer interface[C]//2008 IEEE International Joint Conference on Neural Networks (IEEE World Congress on Computational Intelligence). IEEE, 2008: 2390-2397.
"""
| 36.240933
| 315
| 0.589356
|
# -*- coding: utf-8 -*-
#
# Authors: Swolf <swolfforever@gmail.com>
# Date: 2021/1/07
# License: MIT License
"""
Common Spatial Patterns and his happy little buddies!
"""
from copy import deepcopy
from typing import Union, Optional, List, Dict, Tuple
from functools import partial
import numpy as np
from numpy import ndarray
from scipy.linalg import eigh, pinv, solve
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import GridSearchCV, StratifiedKFold, ShuffleSplit
from sklearn.feature_selection import SelectKBest, mutual_info_classif
from sklearn.svm import SVC
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier
from sklearn.pipeline import make_pipeline
from .base import robust_pattern, FilterBank
from ..utils.covariance import nearestPD, covariances
def csp_kernel(X: ndarray, y: ndarray) -> Tuple[ndarray, ndarray, ndarray]:
"""The kernel in CSP algorithm based on paper [1]_.
Parameters
----------
X: ndarray
eeg data, shape (n_trials, n_channels, n_samples).
y: ndarray
labels of X, shape (n_trials,).
Returns
-------
W: ndarray
Spatial filters, shape (n_channels, n_filters).
D: ndarray
Eigenvalues of spatial filters, shape (n_filters,).
A: ndarray
Spatial patterns, shape (n_channels, n_patterns).
References
----------
.. [1] Ramoser H, Muller-Gerking J, Pfurtscheller G. Optimal spatial filtering of single trial EEG during imagined hand movement[J]. IEEE transactions on rehabilitation engineering, 2000, 8(4): 441-446.
"""
X, y = np.copy(X), np.copy(y)
labels = np.unique(y)
X = X - np.mean(X, axis=-1, keepdims=True)
if len(labels) != 2:
raise ValueError("the current kernel is for 2-class problem.")
C1 = covariances(X[y==labels[0]])
C2 = covariances(X[y==labels[1]])
# # trace normalization
# # this operation equals to trial normalization
# C1 = C1 / np.trace(C1, axis1=-1, axis2=-2)[:, np.newaxis, np.newaxis]
# C2 = C2 / np.trace(C2, axis1=-1, axis2=-2)[:, np.newaxis, np.newaxis]
C1 = np.mean(C1, axis=0)
C2 = np.mean(C2, axis=0)
Cc = C1 + C2
# check positive-definiteness
Cc = nearestPD(Cc)
# generalized eigenvalue problem
D, W = eigh(C1, Cc)
ix = np.argsort(D)[::-1]
W = W[:, ix]
D = D[ix]
A = robust_pattern(W, C1, W.T@C1@W)
return W, D, A
def csp_feature(W: ndarray, X: ndarray,
n_components: int = 2) -> ndarray:
"""Return CSP features in paper [1]_.
Parameters
----------
W : ndarray
spatial filters from csp_kernel, shape (n_channels, n_filters)
X : ndarray
eeg data, shape (n_trials, n_channels, n_samples)
n_components : int, optional
the first k components to use, usually even number, by default 2
Returns
-------
ndarray
features of shape (n_trials, n_features)
Raises
------
ValueError
n_components should less than the number of channels
References
----------
.. [1] Ramoser H, Muller-Gerking J, Pfurtscheller G. Optimal spatial filtering of single trial EEG during imagined hand movement[J]. IEEE transactions on rehabilitation engineering, 2000, 8(4): 441-446.
"""
W, X = np.copy(W), np.copy(X)
max_components = W.shape[1]
if n_components > max_components:
raise ValueError("n_components should less than the number of channels")
eps = np.finfo(X.dtype).eps
X = X - np.mean(X, axis=-1, keepdims=True)
# normalized variance
features = np.mean(np.square(np.matmul(W[:, :n_components].T, X)), axis=-1)
features = features / (np.sum(features, axis=-1, keepdims=True) + eps)
# log-transformation
features = np.log(np.clip(features, eps, None))
return features
def _rjd(X, eps=1e-9, n_iter_max=1000):
"""Approximate joint diagonalization based on jacobi angle.
Parameters
----------
X : ndarray
A set of covariance matrices to diagonalize, shape (n_trials, n_channels, n_channels).
eps : float, optional
Tolerance for stopping criterion (default 1e-8).
n_iter_max : int, optional
The maximum number of iteration to reach convergence (default 1000).
Returns
-------
V : ndarray
The diagonalizer, shape (n_channels, n_filters), usually n_filters == n_channels.
D : ndarray
The set of quasi diagonal matrices, shape (n_trials, n_channels, n_channels).
Notes
-----
This is a direct implementation of the Cardoso AJD algorithm [1]_ used in
JADE. The code is a translation of the matlab code provided in the author
website.
References
----------
.. [1] Cardoso, Jean-Francois, and Antoine Souloumiac. Jacobi angles for simultaneous diagonalization. SIAM journal on matrix analysis and applications 17.1 (1996): 161-164.
"""
# reshape input matrix
A = np.concatenate(X, 0).T
# init variables
m, nm = A.shape
V = np.eye(m)
encore = True
k = 0
while encore:
encore = False
k += 1
if k > n_iter_max:
break
for p in range(m - 1):
for q in range(p + 1, m):
Ip = np.arange(p, nm, m)
Iq = np.arange(q, nm, m)
# computation of Givens angle
g = np.array([A[p, Ip] - A[q, Iq], A[p, Iq] + A[q, Ip]])
gg = np.dot(g, g.T)
ton = gg[0, 0] - gg[1, 1]
toff = gg[0, 1] + gg[1, 0]
theta = 0.5 * np.arctan2(toff, ton +
np.sqrt(ton * ton + toff * toff))
c = np.cos(theta)
s = np.sin(theta)
encore = encore | (np.abs(s) > eps)
if (np.abs(s) > eps):
tmp = A[:, Ip].copy()
A[:, Ip] = c * A[:, Ip] + s * A[:, Iq]
A[:, Iq] = c * A[:, Iq] - s * tmp
tmp = A[p, :].copy()
A[p, :] = c * A[p, :] + s * A[q, :]
A[q, :] = c * A[q, :] - s * tmp
tmp = V[:, p].copy()
V[:, p] = c * V[:, p] + s * V[:, q]
V[:, q] = c * V[:, q] - s * tmp
D = np.reshape(A, (m, int(nm / m), m)).transpose(1, 0, 2)
return V, D
def _ajd_pham(X, eps=1e-9, n_iter_max=1000):
"""Approximate joint diagonalization based on pham's algorithm.
Parameters
----------
X : ndarray
A set of covariance matrices to diagonalize, shape (n_trials, n_channels, n_channels).
eps : float, optional
Tolerance for stoping criterion (default 1e-6).
n_iter_max : int, optional
The maximum number of iteration to reach convergence (default 1000).
Returns
-------
V : ndarray
The diagonalizer, shape (n_channels, n_filters), usually n_filters == n_channels.
D : ndarray
The set of quasi diagonal matrices, shape (n_trials, n_channels, n_channels).
Notes
-----
This is a direct implementation of the PHAM's AJD algorithm [1]_.
References
----------
.. [1] Pham, Dinh Tuan. "Joint approximate diagonalization of positive definite Hermitian matrices." SIAM Journal on Matrix Analysis and Applications 22, no. 4 (2001): 1136-1152.
"""
# Adapted from http://github.com/alexandrebarachant/pyRiemann
n_epochs = X.shape[0]
# Reshape input matrix
A = np.concatenate(X, axis=0).T
# Init variables
n_times, n_m = A.shape
V = np.eye(n_times)
epsilon = n_times * (n_times - 1) * eps
for it in range(n_iter_max):
decr = 0
for ii in range(1, n_times):
for jj in range(ii):
Ii = np.arange(ii, n_m, n_times)
Ij = np.arange(jj, n_m, n_times)
c1 = A[ii, Ii]
c2 = A[jj, Ij]
g12 = np.mean(A[ii, Ij] / c1)
g21 = np.mean(A[ii, Ij] / c2)
omega21 = np.mean(c1 / c2)
omega12 = np.mean(c2 / c1)
omega = np.sqrt(omega12 * omega21)
tmp = np.sqrt(omega21 / omega12)
tmp1 = (tmp * g12 + g21) / (omega + 1)
tmp2 = (tmp * g12 - g21) / max(omega - 1, 1e-9)
h12 = tmp1 + tmp2
h21 = np.conj((tmp1 - tmp2) / tmp)
decr += n_epochs * (g12 * np.conj(h12) + g21 * h21) / 2.0
tmp = 1 + 1.j * 0.5 * np.imag(h12 * h21)
tmp = np.real(tmp + np.sqrt(tmp ** 2 - h12 * h21))
tau = np.array([[1, -h12 / tmp], [-h21 / tmp, 1]])
A[[ii, jj], :] = np.dot(tau, A[[ii, jj], :])
tmp = np.c_[A[:, Ii], A[:, Ij]]
tmp = np.reshape(tmp, (n_times * n_epochs, 2), order='F')
tmp = np.dot(tmp, tau.T)
tmp = np.reshape(tmp, (n_times, n_epochs * 2), order='F')
A[:, Ii] = tmp[:, :n_epochs]
A[:, Ij] = tmp[:, n_epochs:]
V[[ii, jj], :] = np.dot(tau, V[[ii, jj], :])
if decr < epsilon:
break
D = np.reshape(A, (n_times, -1, n_times)).transpose(1, 0, 2)
return V.T, D
def _uwedge(X, init=None, eps=1e-9, n_iter_max=1000):
"""Approximate joint diagonalization algorithm UWEDGE.
Parameters
----------
X : ndarray
A set of covariance matrices to diagonalize, shape (n_trials, n_channels, n_channels).
init : None | ndarray, optional
Initialization for the diagonalizer, shape (n_channels, n_channels).
eps : float, optional
Tolerance for stoping criterion (default 1e-7).
n_iter_max : int
The maximum number of iteration to reach convergence (default 1000).
Returns
-------
W_est : ndarray
The diagonalizer, shape (n_filters, n_channels), usually n_filters == n_channels.
D : ndarray
The set of quasi diagonal matrices, shape (n_trials, n_channels, n_channels).
Notes
-----
Uniformly Weighted Exhaustive Diagonalization using Gauss iteration
(U-WEDGE). Implementation of the AJD algorithm by Tichavsky and Yeredor [1]_ [2]_.
This is a translation from the matlab code provided by the authors.
References
----------
.. [1] P. Tichavsky, A. Yeredor and J. Nielsen, "A Fast Approximate Joint Diagonalization Algorithm Using a Criterion with a Block Diagonal Weight Matrix", ICASSP 2008, Las Vegas.
.. [2] P. Tichavsky and A. Yeredor, "Fast Approximate Joint Diagonalization Incorporating Weight Matrices" IEEE Transactions of Signal Processing, 2009.
"""
L, d, _ = X.shape
# reshape input matrix
M = np.concatenate(X, 0).T
# init variables
d, Md = M.shape
iteration = 0
improve = 10
if init is None:
E, H = np.linalg.eig(M[:, 0:d])
W_est = np.dot(np.diag(1. / np.sqrt(np.abs(E))), H.T)
else:
W_est = init
Ms = np.array(M)
Rs = np.zeros((d, L))
for k in range(L):
ini = k*d
Il = np.arange(ini, ini + d)
M[:, Il] = 0.5*(M[:, Il] + M[:, Il].T)
Ms[:, Il] = np.dot(np.dot(W_est, M[:, Il]), W_est.T)
Rs[:, k] = np.diag(Ms[:, Il])
crit = np.sum(Ms**2) - np.sum(Rs**2)
while (improve > eps) & (iteration < n_iter_max):
B = np.dot(Rs, Rs.T)
C1 = np.zeros((d, d))
for i in range(d):
C1[:, i] = np.sum(Ms[:, i:Md:d]*Rs, axis=1)
D0 = B*B.T - np.outer(np.diag(B), np.diag(B))
A0 = (C1 * B - np.dot(np.diag(np.diag(B)), C1.T)) / (D0 + np.eye(d))
A0 += np.eye(d)
W_est = np.linalg.solve(A0, W_est)
Raux = np.dot(np.dot(W_est, M[:, 0:d]), W_est.T)
aux = 1./np.sqrt(np.abs(np.diag(Raux)))
W_est = np.dot(np.diag(aux), W_est)
for k in range(L):
ini = k*d
Il = np.arange(ini, ini + d)
Ms[:, Il] = np.dot(np.dot(W_est, M[:, Il]), W_est.T)
Rs[:, k] = np.diag(Ms[:, Il])
crit_new = np.sum(Ms**2) - np.sum(Rs**2)
improve = np.abs(crit_new - crit)
crit = crit_new
iteration += 1
D = np.reshape(Ms, (d, L, d)).transpose(1, 0, 2)
return W_est.T, D
ajd_methods = {
'rjd': _rjd,
'ajd_pham': _ajd_pham,
'uwedge': _uwedge
}
def _check_ajd_method(method):
"""Check if a given method is valid.
Parameters
----------
method : callable object or str
Could be the name of ajd_method or a callable method itself.
Returns
-------
method: callable object
A callable ajd method.
"""
if callable(method):
pass
elif method in ajd_methods.keys():
method = ajd_methods[method]
else:
raise ValueError(
"""%s is not an valid method ! Valid methods are : %s or a
callable function""" % (method, (' , ').join(ajd_methods.keys())))
return method
def ajd(X: ndarray, method: str ='uwedge') -> Tuple[ndarray, ndarray]:
"""Wrapper of AJD methods.
Parameters
----------
X : ndarray
Input covariance matrices, shape (n_trials, n_channels, n_channels)
method : str, optional
AJD method (default uwedge).
Returns
-------
V : ndarray
The diagonalizer, shape (n_channels, n_filters), usually n_filters == n_channels.
D : ndarray
The mean of quasi diagonal matrices, shape (n_channels,).
"""
method = _check_ajd_method(method)
V, D = method(X)
D = np.diag(np.mean(D, axis=0))
ind = np.argsort(D)[::-1]
D = D[ind]
V = V[:, ind]
return V, D
def gw_csp_kernel(X: ndarray, y: ndarray,
ajd_method: str = 'uwedge') -> Tuple[ndarray, ndarray, ndarray, ndarray]:
"""Grosse-Wentrup AJD method based on paper [1]_.
Parameters
----------
X : ndarray
eeg data, shape (n_trials, n_channels, n_samples).
y : ndarray
labels, shape (n_trials).
ajd_method : str, optional
ajd methods, 'uwedge' 'rjd' and 'ajd_pham', by default 'uwedge'.
Returns
-------
W: ndarray
Spatial filters, shape (n_channels, n_filters).
D: ndarray
Eigenvalues of spatial filters, shape (n_filters,).
A: ndarray
Spatial patterns, shape (n_channels, n_patterns).
mutual_info: ndarray
Mutual informaiton values, shape (n_filters).
References
----------
.. [1] Grosse-Wentrup, Moritz, and Martin Buss. "Multiclass common spatial patterns and information theoretic feature extraction." Biomedical Engineering, IEEE Transactions on 55, no. 8 (2008): 1991-2000.
"""
X, y = np.copy(X), np.copy(y)
labels = np.unique(y)
X = X - np.mean(X, axis=-1, keepdims=True)
Cx = []
for label in labels:
C = covariances(X[y==label])
# trace normalization
C = C / np.trace(C, axis1=-1, axis2=-2)[:, np.newaxis, np.newaxis]
Cx.append(np.mean(C, axis=0))
Cx = np.stack(Cx)
W, D = ajd(Cx, method=ajd_method)
# Ctot = np.mean(Cx, axis=0)
# W = W / np.sqrt(np.diag(W.T@Ctot@W))
W = W / np.sqrt(D)
# compute mutual information values
Pc = [np.mean(y == label) for label in labels]
mutual_info = []
for j in range(W.shape[-1]):
a = 0
b = 0
for i in range(len(labels)):
# tmp = np.dot(np.dot(W[j], self.C_[i]), W[j].T)
tmp = W[:, j].T@Cx[i]@W[:, j]
a += Pc[i] * np.log(np.sqrt(tmp))
b += Pc[i] * (tmp ** 2 - 1)
mi = - (a + (3.0 / 16) * (b ** 2))
mutual_info.append(mi)
mutual_info = np.array(mutual_info)
ix = np.argsort(mutual_info)[::-1]
W = W[:, ix]
mutual_info = mutual_info[ix]
D = D[ix]
A = robust_pattern(W, Cx[0], W.T@Cx[0]@W)
return W, D, A, mutual_info
class CSP(BaseEstimator, TransformerMixin):
"""Common Spatial Pattern.
if n_components is None, auto finding the best number of components with gridsearch. The upper searching limit is determined by max_components, default is half of the number of channels.
"""
def __init__(self,
n_components: Optional[int] = None,
max_components: Optional[int] = None):
self.n_components = n_components
self.max_components = max_components
def fit(self, X: ndarray, y: ndarray):
self.classes_ = np.unique(y)
self.W_, self.D_, self.A_ = csp_kernel(X, y)
# resorting with 0.5 threshold
self.D_ = np.abs(self.D_ - 0.5)
ind = np.argsort(self.D_, axis=-1)[::-1]
self.W_, self.D_, self.A_ = self.W_[:, ind], self.D_[ind], self.A_[:, ind]
# auto-tuning
if self.n_components is None:
estimator = make_pipeline(*[CSP(n_components=self.n_components), SVC()])
if self.max_components is None:
params = {'csp__n_components': np.arange(1, self.W_.shape[1]+1)}
else:
params = {'csp__n_components': np.arange(1, self.max_components+1)}
n_splits = np.min(np.unique(y, return_counts=True)[1])
n_splits = 5 if n_splits > 5 else n_splits
gs = GridSearchCV(estimator,
param_grid=params, scoring='accuracy',
cv=StratifiedKFold(n_splits=n_splits, shuffle=True), refit=False, n_jobs=-1, verbose=False)
gs.fit(X, y)
self.best_n_components_ = gs.best_params_['csp__n_components']
return self
def transform(self, X: ndarray):
n_components = self.best_n_components_ if self.n_components is None else self.n_components
return csp_feature(self.W_, X, n_components=n_components)
class MultiCSP(BaseEstimator, TransformerMixin):
def __init__(self,
n_components: Optional[int] = None,
max_components: Optional[int] = None,
multiclass: str = 'ovr', ajd_method: str ='uwedge'):
self.n_components = n_components
self.max_components = max_components
self.multiclass = multiclass
self.ajd_method = ajd_method
def fit(self, X: ndarray, y: ndarray):
self.classes_ = np.unique(y)
if self.multiclass == 'ovr':
self.estimator_ = OneVsRestClassifier(
make_pipeline(*[
CSP(n_components=self.n_components, max_components=self.max_components), SVC()
]), n_jobs=-1)
self.estimator_.fit(X, y)
elif self.multiclass == 'ovo':
self.estimator_ = OneVsOneClassifier(
make_pipeline(*[
CSP(n_components=self.n_components, max_components=self.max_components), SVC()
]), n_jobs=-1)
# patching avoiding 2d array check
self.estimator_._validate_data = partial(self.estimator_._validate_data, allow_nd=True)
self.estimator_.fit(X, y)
elif self.multiclass == 'grosse-wentrup':
self.W_, _, self.A_, self.mutualinfo_values_ = gw_csp_kernel(
X, y, ajd_method=self.ajd_method)
if self.n_components is None:
estimator = make_pipeline(*[
MultiCSP(n_components=self.n_components, multiclass='grosse-wentrup', ajd_method=self.ajd_method), SVC()
])
if self.max_components is None:
params = {'multicsp__n_components': np.arange(1, self.W_.shape[1]+1)}
else:
params = {'multicsp__n_components': np.arange(1, self.max_components+1)}
n_splits = np.min(np.unique(y, return_counts=True)[1])
n_splits = 5 if n_splits > 5 else n_splits
gs = GridSearchCV(estimator,
param_grid=params, scoring='accuracy',
cv=StratifiedKFold(n_splits=n_splits, shuffle=True), refit=False, n_jobs=-1, verbose=False)
gs.fit(X, y)
self.best_n_components_ = gs.best_params_['multicsp__n_components']
else:
raise ValueError("not a valid multiclass strategy")
return self
def transform(self, X: ndarray):
if self.multiclass == 'grosse-wentrup':
n_components = self.best_n_components_ if self.n_components is None else self.n_components
features = csp_feature(self.W_, X, n_components=n_components)
else:
features = np.concatenate([est[0].transform(X) for est in self.estimator_.estimators_], axis=-1)
return features
def spoc_kernel(X: ndarray, y: ndarray) -> Tuple[ndarray, ndarray, ndarray]:
"""Source Power Comodulation (SPoC) based on paper [1]_.
It is a continous CSP-like method.
Parameters
----------
X : ndarray
eeg data, shape (n_trials, n_channels, n_samples)
y : ndarray
labels, shape (n_trials)
Returns
-------
W: ndarray
Spatial filters, shape (n_channels, n_filters).
D: ndarray
Eigenvalues of spatial filters, shape (n_filters,).
A: ndarray
Spatial patterns, shape (n_channels, n_patterns).
References
----------
.. [1] Sven Dähne, Frank C. Meinecke, Stefan Haufe, Johannes Höhne, Michael Tangermann, Klaus-Robert Müller, and Vadim V. Nikulin. SPoC: a novel framework for relating the amplitude of neuronal oscillations to behaviorally relevant parameters. NeuroImage, 86:111–122, 2014. doi:10.1016/j.neuroimage.2013.07.079.
"""
X, weights = np.copy(X), np.copy(y)
eps = np.finfo(X.dtype).eps
X = X - np.mean(X, axis=-1, keepdims=True)
weights = weights - np.mean(weights)
weights = weights / np.std(weights)
Cx = covariances(X)
# trace normalization
Cx = Cx / np.trace(Cx, axis1=-1, axis2=-2)[:, np.newaxis, np.newaxis]
C = np.mean(Cx, axis=0)
Cz = np.mean(weights[:, np.newaxis, np.newaxis]*Cx, axis=0)
# check positive-definiteness
C = nearestPD(C)
Cz = nearestPD(Cz)
# TODO: direct copy from pyriemann, need verify
D, W = eigh(Cz, C)
ind = np.argsort(D)[::-1]
D = D[ind]
W = W[:, ind]
A = robust_pattern(W, Cz, W.T@Cz@W)
return W, D, A
class SPoC(BaseEstimator, TransformerMixin):
"""Source Power Comodulation (SPoC).
For continuous data, not verified.
"""
def __init__(self,
n_components: Optional[int] = None,
max_components: Optional[int] = None):
self.n_components = n_components
self.max_components = max_components
def fit(self, X: ndarray, y: ndarray):
self.W_, self.D_, self.A_ = spoc_kernel(X, y)
# auto-tuning
if self.n_components is None:
estimator = make_pipeline(*[SPoC(n_components=self.n_components), Ridge(alpha=0.5)])
if self.max_components is None:
params = {'spoc__n_components': np.arange(1, self.W_.shape[1]+1)}
else:
params = {'spoc__n_components': np.arange(1, self.max_components+1)}
test_size = 0.2 if len(y) > 5 else 1/len(y)
gs = GridSearchCV(estimator,
param_grid=params, scoring='neg_root_mean_squared_error',
cv=ShuffleSplit(n_splits=5, test_size=test_size), refit=False, n_jobs=-1, verbose=False)
gs.fit(X, y)
self.best_n_components_ = gs.best_params_['spoc__n_components']
def transform(self, X: ndarray):
n_components = self.best_n_components_ if self.n_components is None else self.n_components
return csp_feature(self.W_, X, n_components=n_components)
class FBCSP(FilterBank):
"""FBCSP.
FilterBank CSP based on paper [1]_.
References
----------
.. [1] Ang K K, Chin Z Y, Zhang H, et al. Filter bank common spatial pattern (FBCSP) in brain-computer interface[C]//2008 IEEE International Joint Conference on Neural Networks (IEEE World Congress on Computational Intelligence). IEEE, 2008: 2390-2397.
"""
def __init__(self,
n_components: Optional[int] = None,
max_components: Optional[int] = None,
n_mutualinfo_components: Optional[int] = None,
filterbank: Optional[List[ndarray]] = None):
self.n_components = n_components
self.max_components = max_components
self.n_mutualinfo_components = n_mutualinfo_components
self.filterbank = filterbank
super().__init__(CSP(n_components=n_components, max_components=max_components), filterbank=filterbank)
def fit(self, X: ndarray, y: ndarray):
super().fit(X, y)
features = super().transform(X)
if self.n_mutualinfo_components is None:
estimator = make_pipeline(*[
SelectKBest(score_func=mutual_info_classif, k='all'),
SVC()
])
params = {'selectkbest__k': np.arange(1, features.shape[1]+1)}
n_splits = np.min(np.unique(y, return_counts=True)[1])
n_splits = 5 if n_splits > 5 else n_splits
gs = GridSearchCV(estimator,
param_grid=params, scoring='accuracy',
cv=StratifiedKFold(n_splits=n_splits, shuffle=True), refit=False, n_jobs=-1, verbose=False)
gs.fit(features, y)
self.best_n_mutualinfo_components_ = gs.best_params_['selectkbest__k']
self.selector_ = SelectKBest(
score_func=mutual_info_classif, k=self.best_n_mutualinfo_components_)
else:
self.selector_ = SelectKBest(
score_func=mutual_info_classif, k=self.n_mutualinfo_components)
self.selector_.fit(features, y)
return self
def transform(self, X: ndarray):
features = super().transform(X)
features = self.selector_.transform(features)
return features
class FBMultiCSP(FilterBank):
def __init__(self,
n_components: Optional[int] = None,
max_components: Optional[int] = None,
multiclass: str = 'ovr', ajd_method: str ='uwedge',
n_mutualinfo_components: Optional[int] = None,
filterbank: Optional[List[ndarray]] = None):
self.n_components = n_components
self.max_components = max_components
self.multiclass = multiclass
self.ajd_method = ajd_method
self.n_mutualinfo_components = n_mutualinfo_components
self.filterbank = filterbank
self.n_mutualinfo_components = n_mutualinfo_components
super().__init__(MultiCSP(n_components=n_components, max_components=max_components, multiclass=multiclass, ajd_method=ajd_method))
def fit(self, X: ndarray, y: ndarray):
super().fit(X, y)
features = super().transform(X)
if self.n_mutualinfo_components is None:
estimator = make_pipeline(*[
SelectKBest(score_func=mutual_info_classif, k='all'),
SVC()
])
params = {'selectkbest__k': np.arange(1, features.shape[1]+1)}
n_splits = np.min(np.unique(y, return_counts=True)[1])
n_splits = 5 if n_splits > 5 else n_splits
gs = GridSearchCV(estimator,
param_grid=params, scoring='accuracy',
cv=StratifiedKFold(n_splits=n_splits, shuffle=True), refit=False, n_jobs=-1, verbose=False)
gs.fit(features, y)
self.best_n_mutualinfo_components_ = gs.best_params_['selectkbest__k']
self.selector_ = SelectKBest(
score_func=mutual_info_classif, k=self.best_n_mutualinfo_components_)
else:
self.selector_ = SelectKBest(
score_func=mutual_info_classif, k=self.n_mutualinfo_components)
self.selector_.fit(features, y)
return self
def transform(self, X: ndarray):
features = super().transform(X)
features = self.selector_.transform(features)
return features
| 9,176
| 35
| 446
|
a213ac945ac3eff393596fccbd49623779d35895
| 16,917
|
py
|
Python
|
teller/explainer/explainer.py
|
Techtonique/teller
|
3571353b843179335e3995a0128d4a0c54c2b905
|
[
"BSD-3-Clause-Clear"
] | 5
|
2021-07-14T11:57:36.000Z
|
2022-03-26T19:47:54.000Z
|
teller/explainer/explainer.py
|
Techtonique/teller
|
3571353b843179335e3995a0128d4a0c54c2b905
|
[
"BSD-3-Clause-Clear"
] | 1
|
2021-12-21T17:53:37.000Z
|
2022-01-26T11:36:32.000Z
|
teller/explainer/explainer.py
|
Techtonique/teller
|
3571353b843179335e3995a0128d4a0c54c2b905
|
[
"BSD-3-Clause-Clear"
] | 1
|
2021-12-21T17:51:00.000Z
|
2021-12-21T17:51:00.000Z
|
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.style as style
from sklearn.base import BaseEstimator
from ..utils import (
is_factor,
numerical_gradient,
numerical_gradient_jackknife,
numerical_gradient_gaussian,
numerical_interactions,
numerical_interactions_jackknife,
numerical_interactions_gaussian,
Progbar,
score_regression,
score_classification,
)
class Explainer(BaseEstimator):
"""Class Explainer: effects of features on the response.
Attributes:
obj: an object;
fitted object containing methods `fit` and `predict`
n_jobs: an integer;
number of jobs for parallel computing
y_class: an integer;
class whose probability has to be explained (for classification only)
normalize: a boolean;
whether the features must be normalized or not (changes the effects)
"""
def fit(
self,
X,
y,
X_names,
method="avg",
type_ci="jackknife",
scoring=None,
level=95,
col_inters=None,
):
"""Fit the explainer's attribute `obj` to training data (X, y).
Args:
X: array-like, shape = [n_samples, n_features];
Training vectors, where n_samples is the number
of samples and n_features is the number of features.
y: array-like, shape = [n_samples, ]; Target values.
X_names: {array-like}, shape = [n_features, ];
Column names (strings) for training vectors.
method: str;
Type of summary requested for effects. Either `avg`
(for average effects), `inters` (for interactions)
or `ci` (for effects including confidence intervals
around them).
type_ci: str;
Type of resampling for `method == 'ci'` (confidence
intervals around effects). Either `jackknife`
bootsrapping or `gaussian` (gaussian white noise with
standard deviation equal to `0.01` applied to the
features).
scoring: str;
measure of errors must be in ("explained_variance",
"neg_mean_absolute_error", "neg_mean_squared_error",
"neg_mean_squared_log_error", "neg_median_absolute_error",
"r2", "rmse") (default: "rmse").
level: int; Level of confidence required for
`method == 'ci'` (in %).
col_inters: str; Name of column for computing interactions.
"""
assert method in (
"avg",
"ci",
"inters",
), "must have: `method` in ('avg', 'ci', 'inters')"
n, p = X.shape
self.X_names = X_names
self.level = level
self.method = method
self.type_ci = type_ci
if is_factor(y): # classification ---
self.n_classes = len(np.unique(y))
assert (
self.y_class <= self.n_classes
), "self.y_class must be <= number of classes"
assert hasattr(
self.obj, "predict_proba"
), "`self.obj` must be a classifier and have a method `predict_proba`"
self.type_fit = "classification"
if scoring is None:
self.scoring = "accuracy"
self.score_ = score_classification(self.obj, X, y, scoring=self.scoring)
y_hat = predict_proba(X)
# heterogeneity of effects
if method == "avg":
self.grad_ = numerical_gradient(
predict_proba,
X,
normalize=self.normalize,
n_jobs=self.n_jobs,
)
# confidence intervals
if method == "ci":
if type_ci=="jackknife":
self.ci_ = numerical_gradient_jackknife(
predict_proba,
X,
normalize=self.normalize,
n_jobs=self.n_jobs,
level=level,
)
if type_ci=="gaussian":
self.ci_ = numerical_gradient_gaussian(
predict_proba,
X,
normalize=self.normalize,
n_jobs=self.n_jobs,
level=level,
)
# interactions
if method == "inters":
assert col_inters is not None, "`col_inters` must be provided"
self.col_inters = col_inters
ix1 = np.where(X_names == col_inters)[0][0]
pbar = Progbar(p)
if type_ci=="jackknife":
for ix2 in range(p):
self.ci_inters_.update(
{
X_names[ix2]: numerical_interactions_jackknife(
f=predict_proba,
X=X,
ix1=ix1,
ix2=ix2,
verbose=0,
)
}
)
pbar.update(ix2)
if type_ci=="gaussian":
for ix2 in range(p):
self.ci_inters_.update(
{
X_names[ix2]: numerical_interactions_gaussian(
f=predict_proba,
X=X,
ix1=ix1,
ix2=ix2,
verbose=0,
)
}
)
pbar.update(ix2)
pbar.update(p)
print("\n")
else: # is_factor(y) == False # regression ---
self.type_fit = "regression"
if scoring is None:
self.scoring = "rmse"
self.score_ = score_regression(self.obj, X, y, scoring=self.scoring)
y_hat = self.obj.predict(X)
# heterogeneity of effects
if method == "avg":
self.grad_ = numerical_gradient(
self.obj.predict,
X,
normalize=self.normalize,
n_jobs=self.n_jobs,
)
# confidence intervals
if method == "ci":
if type_ci=="jackknife":
self.ci_ = numerical_gradient_jackknife(
self.obj.predict,
X,
normalize=self.normalize,
n_jobs=self.n_jobs,
level=level,
)
if type_ci=="gaussian":
self.ci_ = numerical_gradient_gaussian(
self.obj.predict,
X,
normalize=self.normalize,
n_jobs=self.n_jobs,
level=level,
)
# interactions
if method == "inters":
assert col_inters is not None, "`col_inters` must be provided"
self.col_inters = col_inters
ix1 = np.where(X_names == col_inters)[0][0]
pbar = Progbar(p)
if type_ci=="jackknife":
for ix2 in range(p):
self.ci_inters_.update(
{
X_names[ix2]: numerical_interactions_jackknife(
f=self.obj.predict,
X=X,
ix1=ix1,
ix2=ix2,
verbose=0,
)
}
)
if type_ci=="gaussian":
for ix2 in range(p):
self.ci_inters_.update(
{
X_names[ix2]: numerical_interactions_gaussian(
f=self.obj.predict,
X=X,
ix1=ix1,
ix2=ix2,
verbose=0,
)
}
)
pbar.update(ix2)
pbar.update(p)
print("\n")
self.y_mean_ = np.mean(y)
ss_tot = np.sum((y - self.y_mean_) ** 2)
ss_reg = np.sum((y_hat - self.y_mean_) ** 2)
ss_res = np.sum((y - y_hat) ** 2)
self.residuals_ = y - y_hat
self.r_squared_ = 1 - ss_res / ss_tot
self.adj_r_squared_ = 1 - (1 - self.r_squared_) * (n - 1) / (
n - p - 1
)
# classification and regression ---
if method == "avg":
res_df = pd.DataFrame(data=self.grad_, columns=X_names)
res_df_mean = res_df.mean()
res_df_std = res_df.std()
res_df_median = res_df.median()
res_df_min = res_df.min()
res_df_max = res_df.max()
data = pd.concat(
[res_df_mean, res_df_std, res_df_median, res_df_min, res_df_max],
axis=1
)
df_effects = pd.DataFrame(
data=data.values,
columns=["mean", "std", "median", "min", "max"],
index=X_names,
)
# heterogeneity of effects
self.effects_ = df_effects.sort_values(by=["mean"], ascending=False)
return self
def summary(self):
"""Summarise results
a method in class Explainer
Args:
None
"""
assert (
(self.ci_ is not None)
| (self.effects_ is not None)
| (self.ci_inters_ is not None)
), "object not fitted, fit the object first"
if (self.ci_ is not None) & (self.method == "ci"):
# (mean_est, se_est,
# mean_est + qt*se_est, mean_est - qt*se_est,
# p_values, signif_codes)
df_mean = pd.Series(data=self.ci_[0], index=self.X_names)
df_se = pd.Series(data=self.ci_[1], index=self.X_names)
df_ubound = pd.Series(data=self.ci_[2], index=self.X_names)
df_lbound = pd.Series(data=self.ci_[3], index=self.X_names)
df_pvalue = pd.Series(data=self.ci_[4], index=self.X_names)
df_signif = pd.Series(data=self.ci_[5], index=self.X_names)
data = pd.concat(
[df_mean, df_se, df_lbound, df_ubound, df_pvalue, df_signif],
axis=1,
)
self.ci_summary_ = pd.DataFrame(
data=data.values,
columns=[
"Estimate",
"Std. Error",
str(self.level) + "% lbound",
str(self.level) + "% ubound",
"Pr(>|t|)",
"",
],
index=self.X_names,
).sort_values(by=["Estimate"], ascending=False)
print("\n")
print(f"Score ({self.scoring}): \n {np.round(self.score_, 3)}")
if self.type_fit == "regression":
print("\n")
print("Residuals: ")
self.residuals_dist_ = pd.DataFrame(
pd.Series(
data=np.quantile(
self.residuals_, q=[0, 0.25, 0.5, 0.75, 1]
),
index=["Min", "1Q", "Median", "3Q", "Max"],
)
).transpose()
print(self.residuals_dist_.to_string(index=False))
print("\n")
if self.type_ci=="jackknife":
print("Tests on marginal effects (Jackknife): ")
if self.type_ci=="gaussian":
print("Tests on marginal effects (Gaussian noise): ")
with pd.option_context(
"display.max_rows", None, "display.max_columns", None
):
print(self.ci_summary_)
print("\n")
print(
"Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘-’ 1"
)
if self.type_fit == "regression":
print("\n")
print(
f"Multiple R-squared: {np.round(self.r_squared_, 3)}, Adjusted R-squared: {np.round(self.adj_r_squared_, 3)}"
)
if (self.effects_ is not None) & (self.method == "avg"):
print("\n")
print("Heterogeneity of marginal effects: ")
with pd.option_context(
"display.max_rows", None, "display.max_columns", None
):
print(self.effects_)
print("\n")
if (self.ci_inters_ is not None) & (self.method == "inters"):
print("\n")
print("Interactions with " + self.col_inters + ": ")
with pd.option_context(
"display.max_rows", None, "display.max_columns", None
):
print(
pd.DataFrame(
self.ci_inters_,
index=[
"Estimate",
"Std. Error",
str(95) + "% lbound",
str(95) + "% ubound",
"Pr(>|t|)",
"",
],
).transpose()
)
def plot(self, what):
"""Plot average effects, heterogeneity of effects, ...
Args:
what: a string;
if .
"""
assert self.effects_ is not None, "Call method 'fit' before plotting"
assert self.grad_ is not None, "Call method 'fit' before plotting"
# For method == "avg"
if (self.method == "avg"):
if(what == "average_effects"):
sns.set(style="darkgrid")
fi = pd.DataFrame()
fi['features'] = self.effects_.index.values
fi['effect'] = self.effects_['mean'].values
sns.barplot(x='effect', y='features',
data=fi.sort_values(by='effect', ascending=False))
if(what == "hetero_effects"):
grads_df = pd.DataFrame(data=self.grad_, columns=self.X_names)
sorted_columns = list(self.effects_.index.values) # by mean
sorted_columns.reverse()
grads_df = grads_df.reindex(sorted_columns, axis=1)
sns.set(style="darkgrid")
grads_df.boxplot(vert=False)
# For method == "ci"
if (self.method == "ci"):
assert self.ci_ is not None, "Call method 'fit' before plotting"
raise NotImplementedError("No plot for method == 'ci' yet")
| 32.284351
| 131
| 0.435952
|
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.style as style
from sklearn.base import BaseEstimator
from ..utils import (
is_factor,
numerical_gradient,
numerical_gradient_jackknife,
numerical_gradient_gaussian,
numerical_interactions,
numerical_interactions_jackknife,
numerical_interactions_gaussian,
Progbar,
score_regression,
score_classification,
)
class Explainer(BaseEstimator):
"""Class Explainer: effects of features on the response.
Attributes:
obj: an object;
fitted object containing methods `fit` and `predict`
n_jobs: an integer;
number of jobs for parallel computing
y_class: an integer;
class whose probability has to be explained (for classification only)
normalize: a boolean;
whether the features must be normalized or not (changes the effects)
"""
def __init__(self, obj, n_jobs=None, y_class=0, normalize=False):
self.obj = obj
self.n_jobs = n_jobs
self.y_mean_ = None
self.effects_ = None
self.residuals_ = None
self.r_squared_ = None
self.adj_r_squared_ = None
self.effects_ = None
self.ci_ = None
self.ci_inters_ = {}
self.type_fit = None
self.y_class = y_class # classification only
self.normalize = normalize
self.type_ci = None
def fit(
self,
X,
y,
X_names,
method="avg",
type_ci="jackknife",
scoring=None,
level=95,
col_inters=None,
):
"""Fit the explainer's attribute `obj` to training data (X, y).
Args:
X: array-like, shape = [n_samples, n_features];
Training vectors, where n_samples is the number
of samples and n_features is the number of features.
y: array-like, shape = [n_samples, ]; Target values.
X_names: {array-like}, shape = [n_features, ];
Column names (strings) for training vectors.
method: str;
Type of summary requested for effects. Either `avg`
(for average effects), `inters` (for interactions)
or `ci` (for effects including confidence intervals
around them).
type_ci: str;
Type of resampling for `method == 'ci'` (confidence
intervals around effects). Either `jackknife`
bootsrapping or `gaussian` (gaussian white noise with
standard deviation equal to `0.01` applied to the
features).
scoring: str;
measure of errors must be in ("explained_variance",
"neg_mean_absolute_error", "neg_mean_squared_error",
"neg_mean_squared_log_error", "neg_median_absolute_error",
"r2", "rmse") (default: "rmse").
level: int; Level of confidence required for
`method == 'ci'` (in %).
col_inters: str; Name of column for computing interactions.
"""
assert method in (
"avg",
"ci",
"inters",
), "must have: `method` in ('avg', 'ci', 'inters')"
n, p = X.shape
self.X_names = X_names
self.level = level
self.method = method
self.type_ci = type_ci
if is_factor(y): # classification ---
self.n_classes = len(np.unique(y))
assert (
self.y_class <= self.n_classes
), "self.y_class must be <= number of classes"
assert hasattr(
self.obj, "predict_proba"
), "`self.obj` must be a classifier and have a method `predict_proba`"
self.type_fit = "classification"
if scoring is None:
self.scoring = "accuracy"
self.score_ = score_classification(self.obj, X, y, scoring=self.scoring)
def predict_proba(x):
return self.obj.predict_proba(x)[:, self.y_class]
y_hat = predict_proba(X)
# heterogeneity of effects
if method == "avg":
self.grad_ = numerical_gradient(
predict_proba,
X,
normalize=self.normalize,
n_jobs=self.n_jobs,
)
# confidence intervals
if method == "ci":
if type_ci=="jackknife":
self.ci_ = numerical_gradient_jackknife(
predict_proba,
X,
normalize=self.normalize,
n_jobs=self.n_jobs,
level=level,
)
if type_ci=="gaussian":
self.ci_ = numerical_gradient_gaussian(
predict_proba,
X,
normalize=self.normalize,
n_jobs=self.n_jobs,
level=level,
)
# interactions
if method == "inters":
assert col_inters is not None, "`col_inters` must be provided"
self.col_inters = col_inters
ix1 = np.where(X_names == col_inters)[0][0]
pbar = Progbar(p)
if type_ci=="jackknife":
for ix2 in range(p):
self.ci_inters_.update(
{
X_names[ix2]: numerical_interactions_jackknife(
f=predict_proba,
X=X,
ix1=ix1,
ix2=ix2,
verbose=0,
)
}
)
pbar.update(ix2)
if type_ci=="gaussian":
for ix2 in range(p):
self.ci_inters_.update(
{
X_names[ix2]: numerical_interactions_gaussian(
f=predict_proba,
X=X,
ix1=ix1,
ix2=ix2,
verbose=0,
)
}
)
pbar.update(ix2)
pbar.update(p)
print("\n")
else: # is_factor(y) == False # regression ---
self.type_fit = "regression"
if scoring is None:
self.scoring = "rmse"
self.score_ = score_regression(self.obj, X, y, scoring=self.scoring)
y_hat = self.obj.predict(X)
# heterogeneity of effects
if method == "avg":
self.grad_ = numerical_gradient(
self.obj.predict,
X,
normalize=self.normalize,
n_jobs=self.n_jobs,
)
# confidence intervals
if method == "ci":
if type_ci=="jackknife":
self.ci_ = numerical_gradient_jackknife(
self.obj.predict,
X,
normalize=self.normalize,
n_jobs=self.n_jobs,
level=level,
)
if type_ci=="gaussian":
self.ci_ = numerical_gradient_gaussian(
self.obj.predict,
X,
normalize=self.normalize,
n_jobs=self.n_jobs,
level=level,
)
# interactions
if method == "inters":
assert col_inters is not None, "`col_inters` must be provided"
self.col_inters = col_inters
ix1 = np.where(X_names == col_inters)[0][0]
pbar = Progbar(p)
if type_ci=="jackknife":
for ix2 in range(p):
self.ci_inters_.update(
{
X_names[ix2]: numerical_interactions_jackknife(
f=self.obj.predict,
X=X,
ix1=ix1,
ix2=ix2,
verbose=0,
)
}
)
if type_ci=="gaussian":
for ix2 in range(p):
self.ci_inters_.update(
{
X_names[ix2]: numerical_interactions_gaussian(
f=self.obj.predict,
X=X,
ix1=ix1,
ix2=ix2,
verbose=0,
)
}
)
pbar.update(ix2)
pbar.update(p)
print("\n")
self.y_mean_ = np.mean(y)
ss_tot = np.sum((y - self.y_mean_) ** 2)
ss_reg = np.sum((y_hat - self.y_mean_) ** 2)
ss_res = np.sum((y - y_hat) ** 2)
self.residuals_ = y - y_hat
self.r_squared_ = 1 - ss_res / ss_tot
self.adj_r_squared_ = 1 - (1 - self.r_squared_) * (n - 1) / (
n - p - 1
)
# classification and regression ---
if method == "avg":
res_df = pd.DataFrame(data=self.grad_, columns=X_names)
res_df_mean = res_df.mean()
res_df_std = res_df.std()
res_df_median = res_df.median()
res_df_min = res_df.min()
res_df_max = res_df.max()
data = pd.concat(
[res_df_mean, res_df_std, res_df_median, res_df_min, res_df_max],
axis=1
)
df_effects = pd.DataFrame(
data=data.values,
columns=["mean", "std", "median", "min", "max"],
index=X_names,
)
# heterogeneity of effects
self.effects_ = df_effects.sort_values(by=["mean"], ascending=False)
return self
def summary(self):
"""Summarise results
a method in class Explainer
Args:
None
"""
assert (
(self.ci_ is not None)
| (self.effects_ is not None)
| (self.ci_inters_ is not None)
), "object not fitted, fit the object first"
if (self.ci_ is not None) & (self.method == "ci"):
# (mean_est, se_est,
# mean_est + qt*se_est, mean_est - qt*se_est,
# p_values, signif_codes)
df_mean = pd.Series(data=self.ci_[0], index=self.X_names)
df_se = pd.Series(data=self.ci_[1], index=self.X_names)
df_ubound = pd.Series(data=self.ci_[2], index=self.X_names)
df_lbound = pd.Series(data=self.ci_[3], index=self.X_names)
df_pvalue = pd.Series(data=self.ci_[4], index=self.X_names)
df_signif = pd.Series(data=self.ci_[5], index=self.X_names)
data = pd.concat(
[df_mean, df_se, df_lbound, df_ubound, df_pvalue, df_signif],
axis=1,
)
self.ci_summary_ = pd.DataFrame(
data=data.values,
columns=[
"Estimate",
"Std. Error",
str(self.level) + "% lbound",
str(self.level) + "% ubound",
"Pr(>|t|)",
"",
],
index=self.X_names,
).sort_values(by=["Estimate"], ascending=False)
print("\n")
print(f"Score ({self.scoring}): \n {np.round(self.score_, 3)}")
if self.type_fit == "regression":
print("\n")
print("Residuals: ")
self.residuals_dist_ = pd.DataFrame(
pd.Series(
data=np.quantile(
self.residuals_, q=[0, 0.25, 0.5, 0.75, 1]
),
index=["Min", "1Q", "Median", "3Q", "Max"],
)
).transpose()
print(self.residuals_dist_.to_string(index=False))
print("\n")
if self.type_ci=="jackknife":
print("Tests on marginal effects (Jackknife): ")
if self.type_ci=="gaussian":
print("Tests on marginal effects (Gaussian noise): ")
with pd.option_context(
"display.max_rows", None, "display.max_columns", None
):
print(self.ci_summary_)
print("\n")
print(
"Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘-’ 1"
)
if self.type_fit == "regression":
print("\n")
print(
f"Multiple R-squared: {np.round(self.r_squared_, 3)}, Adjusted R-squared: {np.round(self.adj_r_squared_, 3)}"
)
if (self.effects_ is not None) & (self.method == "avg"):
print("\n")
print("Heterogeneity of marginal effects: ")
with pd.option_context(
"display.max_rows", None, "display.max_columns", None
):
print(self.effects_)
print("\n")
if (self.ci_inters_ is not None) & (self.method == "inters"):
print("\n")
print("Interactions with " + self.col_inters + ": ")
with pd.option_context(
"display.max_rows", None, "display.max_columns", None
):
print(
pd.DataFrame(
self.ci_inters_,
index=[
"Estimate",
"Std. Error",
str(95) + "% lbound",
str(95) + "% ubound",
"Pr(>|t|)",
"",
],
).transpose()
)
def plot(self, what):
"""Plot average effects, heterogeneity of effects, ...
Args:
what: a string;
if .
"""
assert self.effects_ is not None, "Call method 'fit' before plotting"
assert self.grad_ is not None, "Call method 'fit' before plotting"
# For method == "avg"
if (self.method == "avg"):
if(what == "average_effects"):
sns.set(style="darkgrid")
fi = pd.DataFrame()
fi['features'] = self.effects_.index.values
fi['effect'] = self.effects_['mean'].values
sns.barplot(x='effect', y='features',
data=fi.sort_values(by='effect', ascending=False))
if(what == "hetero_effects"):
grads_df = pd.DataFrame(data=self.grad_, columns=self.X_names)
sorted_columns = list(self.effects_.index.values) # by mean
sorted_columns.reverse()
grads_df = grads_df.reindex(sorted_columns, axis=1)
sns.set(style="darkgrid")
grads_df.boxplot(vert=False)
# For method == "ci"
if (self.method == "ci"):
assert self.ci_ is not None, "Call method 'fit' before plotting"
raise NotImplementedError("No plot for method == 'ci' yet")
def get_individual_effects(self):
assert self.grad_ is not None, "Call method 'fit' before calling this method"
if self.method == "avg":
return pd.DataFrame(data=self.grad_, columns=self.X_names)
| 748
| 0
| 93
|
70d25e9deb9ce5482aecfe92367ea925fc132f5b
| 4,271
|
py
|
Python
|
script/extract_spotting_area.py
|
jingyonghou/XY_QByE_STD
|
ca2a07c70ea7466ee363cd0b81808c6794a400e5
|
[
"Apache-2.0"
] | null | null | null |
script/extract_spotting_area.py
|
jingyonghou/XY_QByE_STD
|
ca2a07c70ea7466ee363cd0b81808c6794a400e5
|
[
"Apache-2.0"
] | null | null | null |
script/extract_spotting_area.py
|
jingyonghou/XY_QByE_STD
|
ca2a07c70ea7466ee363cd0b81808c6794a400e5
|
[
"Apache-2.0"
] | 1
|
2020-07-28T06:02:03.000Z
|
2020-07-28T06:02:03.000Z
|
import numpy as np
import sys
import wavedata
import random
import os
if __name__=="__main__":
if len(sys.argv) < 7:
print("USAGE: python %s result_dir keywordlist testlist testscp textfile ourdir"%sys.argv[0])
exit(1)
result_dir = sys.argv[1]
keywordlist = open(sys.argv[2]).readlines()
testlist = open(sys.argv[3]).readlines()
doc_scp_file = sys.argv[4]
relevant_dict = build_relevant_dict(sys.argv[5])
out_dir = sys.argv[6]
scorelist_all = []
arealist_all = []
for keyword in keywordlist:
result_fid = open(result_dir + keyword.strip() + ".RESULT")
resultlist = result_fid.readlines()
result_fid.close()
scorelist = []
arealist = []
for res in resultlist:
fields =res.strip().split()
score = float(fields[0])
start_point = int(fields[1])
end_point = int(fields[2])
scorelist.append(score)
arealist.append((start_point, end_point))
scorelist_all.append(scorelist)
arealist_all.append(arealist)
extract_list_all = extract_spotting_area(scorelist_all, arealist_all, keywordlist, testlist, relevant_dict)
write_spot_wave(extract_list_all, doc_scp_file, out_dir)
| 36.818966
| 199
| 0.618356
|
import numpy as np
import sys
import wavedata
import random
import os
def relevant(query, text_id, relevant_dict):
if text_id in relevant_dict[query]:
return True
return False
def build_relevant_dict(text_file):
relevant_dict = {}
for line in open(text_file).readlines():
fields = line.strip().split()
text_id = fields[0]
for i in range(1, len(fields)):
keyword_id = fields[i]
if not relevant_dict.has_key(keyword_id):
relevant_dict[keyword_id]=set()
relevant_dict[keyword_id].add(text_id)
return relevant_dict
def extract_spotting_area(scorelist_all, arealist_all, querylist, doclist, relevant_dict):
extract_list_all = []
for i in range(len(querylist)):
true_list=[]
false_list=[]
extract_list=[]
ranklist = np.array(scorelist_all[i]).argsort()
for j in range(len(ranklist)):
j_r = ranklist[j]
keyword_id = querylist[i].strip()
keyword = querylist[i].strip().split("_")[0]
utt_id = doclist[j_r].strip()
doc_id = "_".join(doclist[j_r].strip().split("_")[:-1])
if relevant(keyword, doc_id, relevant_dict):
true_list.append([ keyword_id, utt_id, 1, scorelist_all[i][j_r], j, arealist_all[i][j_r] ])
else:
false_list.append([ keyword_id, utt_id, 0, scorelist_all[i][j_r], j, arealist_all[i][j_r] ])
true_num = len(true_list)
extract_list = true_list + false_list[0:true_num]
extract_list_all.append(extract_list)
return extract_list_all
def frame_to_point(frame_pair):
return (frame_pair[0]*10*8, frame_pair[1]*10*8+25*8)
def write_spot_wave(extract_list_all, doc_scp, out_dir):
doc_dic = {}
for line in open(doc_scp).readlines():
fields = line.strip().split()
if len(fields) != 2:
print("Error: the fields of doc scp file is not 2\n")
exit(1)
doc_id = fields[0]
wav_path = fields[1]
if doc_dic.has_key(doc_id):
print("Error: repeat key in doc scp file\n")
doc_dic[doc_id] = wav_path
for extract_list in extract_list_all:
keyword_id = extract_list[0][0]
keyword_out_dir = out_dir + "-".join(keyword_id.split("'"))
cmd = "mkdir -p " + keyword_out_dir
os.system(cmd)
for item in extract_list:
doc_id = item[1]
has_keyword = item[2]
score = item[3]
rank_position = item[4]
extract_point = frame_to_point(item[5])
inputfilename = doc_dic[doc_id]
data = wavedata.readwave(inputfilename)
spotting_data = data[extract_point[0]:extract_point[1]]
outputfilename = keyword_out_dir + "/%s_%s_%s_%s_%s_%s_%s.wav"%(str(rank_position).zfill(4), str(has_keyword), str(score), str(extract_point[0]), str(extract_point[1]),keyword_id, doc_id)
wavedata.writewave(outputfilename, spotting_data, 1, 2, 8000)
if __name__=="__main__":
if len(sys.argv) < 7:
print("USAGE: python %s result_dir keywordlist testlist testscp textfile ourdir"%sys.argv[0])
exit(1)
result_dir = sys.argv[1]
keywordlist = open(sys.argv[2]).readlines()
testlist = open(sys.argv[3]).readlines()
doc_scp_file = sys.argv[4]
relevant_dict = build_relevant_dict(sys.argv[5])
out_dir = sys.argv[6]
scorelist_all = []
arealist_all = []
for keyword in keywordlist:
result_fid = open(result_dir + keyword.strip() + ".RESULT")
resultlist = result_fid.readlines()
result_fid.close()
scorelist = []
arealist = []
for res in resultlist:
fields =res.strip().split()
score = float(fields[0])
start_point = int(fields[1])
end_point = int(fields[2])
scorelist.append(score)
arealist.append((start_point, end_point))
scorelist_all.append(scorelist)
arealist_all.append(arealist)
extract_list_all = extract_spotting_area(scorelist_all, arealist_all, keywordlist, testlist, relevant_dict)
write_spot_wave(extract_list_all, doc_scp_file, out_dir)
| 2,873
| 0
| 116
|
271deb29b66fe4e4014e52baf2d9509cf8f631f6
| 427
|
py
|
Python
|
src/smallest_integer.py
|
marco-zangari/code-katas
|
1dfda1cfbbe8687b17e97e414358b38d964df675
|
[
"MIT"
] | null | null | null |
src/smallest_integer.py
|
marco-zangari/code-katas
|
1dfda1cfbbe8687b17e97e414358b38d964df675
|
[
"MIT"
] | null | null | null |
src/smallest_integer.py
|
marco-zangari/code-katas
|
1dfda1cfbbe8687b17e97e414358b38d964df675
|
[
"MIT"
] | null | null | null |
"""Find the smallest integer in the array, Kata in Codewars."""
def smallest(alist):
"""Return the smallest integer in the list.
input: a list of integers
output: a single integer
ex: [34, 15, 88, 2] should return 34
ex: [34, -345, -1, 100] should return -345
"""
res = [alist[0]]
for num in alist:
if res[0] > num:
res.pop()
res.append(num)
return res[0]
| 23.722222
| 63
| 0.569087
|
"""Find the smallest integer in the array, Kata in Codewars."""
def smallest(alist):
"""Return the smallest integer in the list.
input: a list of integers
output: a single integer
ex: [34, 15, 88, 2] should return 34
ex: [34, -345, -1, 100] should return -345
"""
res = [alist[0]]
for num in alist:
if res[0] > num:
res.pop()
res.append(num)
return res[0]
| 0
| 0
| 0
|
e3f4e7de367fe4adbb1c08ed45342cc24a82354b
| 1,810
|
py
|
Python
|
extract-code.py
|
aaw/commafree
|
6ee17fdf1e7858546782f81b1f004659c03661d3
|
[
"Unlicense"
] | null | null | null |
extract-code.py
|
aaw/commafree
|
6ee17fdf1e7858546782f81b1f004659c03661d3
|
[
"Unlicense"
] | null | null | null |
extract-code.py
|
aaw/commafree
|
6ee17fdf1e7858546782f81b1f004659c03661d3
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/python3
# Extracts a commafree code from a CNF file created by commafree.py and
# the output of a SAT solver on that CNF file. Only works on satisfiable
# instances.
#
# Usage: extract-code.py <cnf-file> <sat-solver-output-file>
import re
import sys
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Usage: %s <cnf-file> <sat-solver-output-file>' % sys.argv[0])
sys.exit(1)
mapping = strip_cnf_mapping(sys.argv[1])
solution = strip_sat_solution(sys.argv[2])
code = [mapping[code_id] for code_id in solution
if mapping.get(code_id) is not None]
assert verify_commafree(code)
print('{' + ', '.join(sorted(code)) + '}')
print('')
print('size: %s' % len(code))
| 31.754386
| 78
| 0.570718
|
#!/usr/bin/python3
# Extracts a commafree code from a CNF file created by commafree.py and
# the output of a SAT solver on that CNF file. Only works on satisfiable
# instances.
#
# Usage: extract-code.py <cnf-file> <sat-solver-output-file>
import re
import sys
def strip_cnf_mapping(filename):
# lines look like 'c var 1 == 000001 chosen'
mapping = {}
pattern = re.compile('c var ([^\\s]+) == ([^\\s]+) chosen')
with open(filename) as f:
for line in f:
if line.startswith('p'): continue
if not line.startswith('c'): return mapping
m = re.match(pattern, line)
if m is None: continue
mapping[int(m.groups()[0])] = m.groups()[1]
return mapping
def strip_sat_solution(filename):
pos = []
with open(filename) as f:
for line in f:
if not line.startswith('v'): continue
pos += [int(x) for x in line[1:].strip().split(' ') if int(x) > 0]
return pos
def verify_commafree(codewords):
n = len(codewords[0])
cws = set(c for c in codewords)
for x in codewords:
for y in codewords:
for i in range(1,n):
cw = x[i:]+y[:i]
if cw in cws:
print("CONFLICT: %s, %s, and %s." % (x,y,cw))
return False
return True
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Usage: %s <cnf-file> <sat-solver-output-file>' % sys.argv[0])
sys.exit(1)
mapping = strip_cnf_mapping(sys.argv[1])
solution = strip_sat_solution(sys.argv[2])
code = [mapping[code_id] for code_id in solution
if mapping.get(code_id) is not None]
assert verify_commafree(code)
print('{' + ', '.join(sorted(code)) + '}')
print('')
print('size: %s' % len(code))
| 1,004
| 0
| 69
|
31784bf0310bf0d5bfc0d90a75df67dd15a12b22
| 2,039
|
py
|
Python
|
bindings/python/native/tests/test_event.py
|
lmy441900/wallet.rs
|
4810f8205c3a3e1b7177d5fc6be92c714e0ef6eb
|
[
"Apache-2.0"
] | 135
|
2020-08-27T15:31:16.000Z
|
2022-03-28T07:52:07.000Z
|
bindings/python/native/tests/test_event.py
|
lmy441900/wallet.rs
|
4810f8205c3a3e1b7177d5fc6be92c714e0ef6eb
|
[
"Apache-2.0"
] | 263
|
2020-08-28T00:12:19.000Z
|
2022-03-29T18:54:29.000Z
|
bindings/python/native/tests/test_event.py
|
lmy441900/wallet.rs
|
4810f8205c3a3e1b7177d5fc6be92c714e0ef6eb
|
[
"Apache-2.0"
] | 56
|
2020-11-02T05:52:06.000Z
|
2022-03-13T00:21:12.000Z
|
import iota_wallet as iw
| 31.369231
| 76
| 0.772928
|
import iota_wallet as iw
def test_event_balance_change():
def on_balance_changed(event):
assert isinstance(event, str)
event_id = iw.on_balance_change(on_balance_changed)
iw.remove_balance_change_listener(bytes(event_id))
def test_event_new_transaction():
def on_new_transaction(event):
assert isinstance(event, str)
event_id = iw.on_new_transaction(on_new_transaction)
iw.remove_new_transaction_listener(bytes(event_id))
def test_event_confirmation_state_change():
def on_confirmation_state_change(event):
assert isinstance(event, str)
event_id = iw.on_confirmation_state_change(on_confirmation_state_change)
iw.remove_confirmation_state_change_listener(bytes(event_id))
def test_event_reattachment():
def on_reattachment(event):
assert isinstance(event, str)
event_id = iw.on_reattachment(on_reattachment)
iw.remove_reattachment_listener(bytes(event_id))
def test_event_broadcast():
def on_broadcast(event):
assert isinstance(event, str)
event_id = iw.on_broadcast(on_broadcast)
iw.remove_broadcast_listener(bytes(event_id))
def test_event_error():
def on_error(event):
assert isinstance(event, str)
event_id = iw.on_error(on_error)
iw.remove_error_listener(bytes(event_id))
def test_event_stronghold_status_change():
def on_stronghold_status_change(event):
assert isinstance(event, str)
event_id = iw.on_stronghold_status_change(on_stronghold_status_change)
iw.remove_stronghold_status_change_listener(bytes(event_id))
def test_on_transfer_progress():
def on_transfer_progress(event):
assert isinstance(event, str)
event_id = iw.on_transfer_progress(on_transfer_progress)
iw.remove_transfer_progress_listener(bytes(event_id))
def test_on_migration_progress():
def on_migration_progress(event):
assert isinstance(event, str)
event_id = iw.on_migration_progress(on_migration_progress)
iw.remove_migration_progress_listener(bytes(event_id))
| 1,798
| 0
| 207
|
6496c26b86b5e1c0f0f3e63c148cc42bb42f3e84
| 22,361
|
py
|
Python
|
chrome/test/functional/autofill.py
|
meego-tablet-ux/meego-app-browser
|
0f4ef17bd4b399c9c990a2f6ca939099495c2b9c
|
[
"BSD-3-Clause"
] | 1
|
2015-10-12T09:14:22.000Z
|
2015-10-12T09:14:22.000Z
|
chrome/test/functional/autofill.py
|
meego-tablet-ux/meego-app-browser
|
0f4ef17bd4b399c9c990a2f6ca939099495c2b9c
|
[
"BSD-3-Clause"
] | null | null | null |
chrome/test/functional/autofill.py
|
meego-tablet-ux/meego-app-browser
|
0f4ef17bd4b399c9c990a2f6ca939099495c2b9c
|
[
"BSD-3-Clause"
] | 1
|
2020-11-04T07:22:28.000Z
|
2020-11-04T07:22:28.000Z
|
#!/usr/bin/python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import pickle
import re
import autofill_dataset_converter
import autofill_dataset_generator
import pyauto_functional # Must be imported before pyauto
import pyauto
class AutofillTest(pyauto.PyUITest):
"""Tests that autofill works correctly"""
def Debug(self):
"""Test method for experimentation.
This method will not run automatically.
"""
import pprint
pp = pprint.PrettyPrinter(indent=2)
while True:
raw_input('Hit <enter> to dump info.. ')
info = self.GetAutofillProfile()
pp.pprint(info)
def testFillProfile(self):
"""Test filling profiles and overwriting with new profiles."""
profiles = [{'NAME_FIRST': 'Bob',
'NAME_LAST': 'Smith', 'ADDRESS_HOME_ZIP': '94043',},
{'EMAIL_ADDRESS': 'sue@example.com',
'COMPANY_NAME': 'Company X',}]
credit_cards = [{'CREDIT_CARD_NUMBER': '6011111111111117',
'CREDIT_CARD_EXP_MONTH': '12',
'CREDIT_CARD_EXP_4_DIGIT_YEAR': '2011'},
{'CREDIT_CARD_NAME': 'Bob C. Smith'}]
self.FillAutofillProfile(profiles=profiles, credit_cards=credit_cards)
profile = self.GetAutofillProfile()
self.assertEqual(profiles, profile['profiles'])
self.assertEqual(credit_cards, profile['credit_cards'])
profiles = [ {'NAME_FIRST': 'Larry'}]
self.FillAutofillProfile(profiles=profiles)
profile = self.GetAutofillProfile()
self.assertEqual(profiles, profile['profiles'])
self.assertEqual(credit_cards, profile['credit_cards'])
def testFillProfileCrazyCharacters(self):
"""Test filling profiles with unicode strings and crazy characters."""
# Adding autofill profiles.
file_path = os.path.join(self.DataDir(), 'autofill', 'crazy_autofill.txt')
profiles = self.EvalDataFrom(file_path)
self.FillAutofillProfile(profiles=profiles)
self.assertEqual(profiles, self.GetAutofillProfile()['profiles'])
# Adding credit cards.
file_path = os.path.join(self.DataDir(), 'autofill',
'crazy_creditcards.txt')
test_data = self.EvalDataFrom(file_path)
credit_cards_input = test_data['input']
self.FillAutofillProfile(credit_cards=credit_cards_input)
self.assertEqual(test_data['expected'],
self.GetAutofillProfile()['credit_cards'])
def testGetProfilesEmpty(self):
"""Test getting profiles when none have been filled."""
profile = self.GetAutofillProfile()
self.assertEqual([], profile['profiles'])
self.assertEqual([], profile['credit_cards'])
def testAutofillInvalid(self):
"""Test filling in invalid values for profiles."""
# First try profiles with invalid input.
without_invalid = {'NAME_FIRST': u'Will',
'ADDRESS_HOME_CITY': 'Sunnyvale',
'ADDRESS_HOME_STATE': 'CA',
'ADDRESS_HOME_ZIP': 'my_zip',
'ADDRESS_HOME_COUNTRY': 'United States'}
# Add some invalid fields.
with_invalid = without_invalid.copy()
with_invalid['PHONE_HOME_WHOLE_NUMBER'] = 'Invalid_Phone_Number'
with_invalid['PHONE_FAX_WHOLE_NUMBER'] = 'Invalid_Fax_Number'
self.FillAutofillProfile(profiles=[with_invalid])
self.assertEqual([without_invalid],
self.GetAutofillProfile()['profiles'])
def testAutofillPrefsStringSavedAsIs(self):
"""Test invalid credit card numbers typed in prefs should be saved as-is."""
credit_card = {'CREDIT_CARD_NUMBER': 'Not_0123-5Checked'}
self.FillAutofillProfile(credit_cards=[credit_card])
self.assertEqual([credit_card],
self.GetAutofillProfile()['credit_cards'],
msg='Credit card number in prefs not saved as-is.')
def _LuhnCreditCardNumberValidator(self, number):
"""Validates whether a number is valid or invalid using the Luhn test.
Validation example:
1. Example number: 49927398716
2. Reverse the digits: 61789372994
3. Sum the digits in the odd-numbered position for s1:
6 + 7 + 9 + 7 + 9 + 4 = 42
4. Take the digits in the even-numbered position: 1, 8, 3, 2, 9
4.1. Two times each digit in the even-numbered position: 2, 16, 6, 4, 18
4.2. For each resulting value that is now 2 digits, add the digits
together: 2, 7, 6, 4, 9
(0 + 2 = 2, 1 + 6 = 7, 0 + 6 = 6, 0 + 4 = 4, 1 + 8 = 9)
4.3. Sum together the digits for s2: 2 + 7 + 6 + 4 + 9 = 28
5. Sum together s1 + s2 and if the sum ends in zero, the number passes the
Luhn test: 42 + 28 = 70 which is a valid credit card number.
Args:
number: the credit card number being validated, as a string.
Return:
boolean whether the credit card number is valid or not.
"""
# Filters out non-digit characters.
number = re.sub('[^0-9]', '', number)
reverse = [int(ch) for ch in str(number)][::-1]
# The divmod of the function splits a number into two digits, ready for
# summing.
return ((sum(reverse[0::2]) + sum(sum(divmod(d*2, 10))
for d in reverse[1::2])) % 10 == 0)
def testInvalidCreditCardNumberIsNotAggregated(self):
"""Test credit card info with an invalid number is not aggregated.
When filling out a form with an invalid credit card number (one that
does not pass the Luhn test) the credit card info should not be saved into
Autofill preferences.
"""
invalid_cc_info = {'CREDIT_CARD_NAME': 'Bob Smith',
'CREDIT_CARD_NUMBER': '4408 0412 3456 7890',
'CREDIT_CARD_EXP_MONTH': '12',
'CREDIT_CARD_EXP_4_DIGIT_YEAR': '2014'}
cc_number = invalid_cc_info['CREDIT_CARD_NUMBER']
self.assertFalse(self._LuhnCreditCardNumberValidator(cc_number),
msg='This test requires an invalid credit card number.')
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'autofill_creditcard_form.html'))
self.NavigateToURL(url)
for key, value in invalid_cc_info.iteritems():
script = ('document.getElementById("%s").value = "%s"; '
'window.domAutomationController.send("done");') % (key, value)
self.ExecuteJavascript(script, 0, 0)
js_code = """
document.getElementById("cc_submit").submit();
window.addEventListener("unload", function() {
window.domAutomationController.send("done");
});
"""
self.ExecuteJavascript(js_code, 0, 0)
# Wait until the form is submitted and the page completes loading.
self.WaitUntil(
lambda: self.GetDOMValue('document.readyState'),
expect_retval='complete')
cc_infobar = self.GetBrowserInfo()['windows'][0]['tabs'][0]['infobars']
self.assertFalse(
cc_infobar, msg='Save credit card infobar offered to save CC info.')
def testWhitespacesAndSeparatorCharsStrippedForValidCCNums(self):
"""Test whitespaces and separator chars are stripped for valid CC numbers.
The credit card numbers used in this test pass the Luhn test.
For reference: http://www.merriampark.com/anatomycc.htm
"""
credit_card_info = [{'CREDIT_CARD_NAME': 'Bob Smith',
'CREDIT_CARD_NUMBER': '4408 0412 3456 7893',
'CREDIT_CARD_EXP_MONTH': '12',
'CREDIT_CARD_EXP_4_DIGIT_YEAR': '2014'},
{'CREDIT_CARD_NAME': 'Jane Doe',
'CREDIT_CARD_NUMBER': '4417-1234-5678-9113',
'CREDIT_CARD_EXP_MONTH': '10',
'CREDIT_CARD_EXP_4_DIGIT_YEAR': '2013'}]
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'autofill_creditcard_form.html'))
for cc_info in credit_card_info:
self.NavigateToURL(url)
for key, value in cc_info.iteritems():
cc_number = cc_info['CREDIT_CARD_NUMBER']
self.assertTrue(self._LuhnCreditCardNumberValidator(cc_number),
msg='This test requires a valid credit card number.')
script = ('document.getElementById("%s").value = "%s"; '
'window.domAutomationController.send("done");') % (key, value)
self.ExecuteJavascript(script, 0, 0)
js_code = """
document.getElementById("cc_submit").submit();
window.addEventListener("unload", function() {
window.domAutomationController.send("done");
});
"""
self.ExecuteJavascript(js_code, 0, 0)
# Wait until form is submitted and page completes loading.
self.WaitUntil(
lambda: self.GetDOMValue('document.readyState'),
expect_retval='complete')
self.PerformActionOnInfobar('accept', infobar_index=0)
# Verify the filled-in credit card number against the aggregated number.
aggregated_cc_1 = (
self.GetAutofillProfile()['credit_cards'][0]['CREDIT_CARD_NUMBER'])
aggregated_cc_2 = (
self.GetAutofillProfile()['credit_cards'][1]['CREDIT_CARD_NUMBER'])
self.assertFalse((' ' in aggregated_cc_1 or ' ' in aggregated_cc_2 or
'-' in aggregated_cc_1 or '-' in aggregated_cc_2),
msg='Whitespaces or separator chars not stripped.')
def testProfilesNotAggregatedWithNoAddress(self):
"""Test Autofill does not aggregate profiles with no address info."""
profile = {'NAME_FIRST': 'Bob',
'NAME_LAST': 'Smith',
'EMAIL_ADDRESS': 'bsmith@example.com',
'COMPANY_NAME': 'Company X',
'PHONE_HOME_WHOLE_NUMBER': '650-123-4567',}
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'duplicate_profiles_test.html'))
self.NavigateToURL(url)
for key, value in profile.iteritems():
script = ('document.getElementById("%s").value = "%s"; '
'window.domAutomationController.send("done");') % (key, value)
self.ExecuteJavascript(script, 0, 0)
js_code = """
document.getElementById("merge_dup").submit();
window.addEventListener("unload", function() {
window.domAutomationController.send("done");
});
"""
self.ExecuteJavascript(js_code, 0, 0)
self.assertFalse(self.GetAutofillProfile()['profiles'],
msg='Profile with no address info was aggregated.')
def testProfilesNotAggregatedWithInvalidEmail(self):
"""Test Autofill does not aggregate profiles with an invalid email."""
profile = {'NAME_FIRST': 'Bob',
'NAME_LAST': 'Smith',
'EMAIL_ADDRESS': 'garbage',
'ADDRESS_HOME_LINE1': '1234 H St.',
'ADDRESS_HOME_CITY': 'San Jose',
'ADDRESS_HOME_STATE': 'CA',
'ADDRESS_HOME_ZIP': '95110',
'COMPANY_NAME': 'Company X',
'PHONE_HOME_WHOLE_NUMBER': '408-123-4567',}
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'duplicate_profiles_test.html'))
self.NavigateToURL(url)
for key, value in profile.iteritems():
script = ('document.getElementById("%s").value = "%s"; '
'window.domAutomationController.send("done");') % (key, value)
self.ExecuteJavascript(script, 0, 0)
js_code = """
document.getElementById("merge_dup").submit();
window.addEventListener("unload", function() {
window.domAutomationController.send("done");
});
"""
self.ExecuteJavascript(js_code, 0, 0)
self.assertFalse(self.GetAutofillProfile()['profiles'],
msg='Profile with invalid email was aggregated.')
def _SendKeyEventsToPopulateForm(self, tab_index=0, windex=0):
"""Send key events to populate a web form with Autofill profile data.
Args:
tab_index: The tab index, default is 0.
windex: The window index, default is 0.
"""
TAB_KEYPRESS = 0x09 # Tab keyboard key press.
DOWN_KEYPRESS = 0x28 # Down arrow keyboard key press.
RETURN_KEYPRESS = 0x0D # Return keyboard key press.
self.SendWebkitKeypressEvent(TAB_KEYPRESS, tab_index, windex)
self.SendWebkitKeypressEvent(DOWN_KEYPRESS, tab_index, windex)
self.SendWebkitKeypressEvent(DOWN_KEYPRESS, tab_index, windex)
self.SendWebkitKeypressEvent(RETURN_KEYPRESS, tab_index, windex)
def testComparePhoneNumbers(self):
"""Test phone fields parse correctly from a given profile.
The high level key presses execute the following: Select the first text
field, invoke the autofill popup list, select the first profile within the
list, and commit to the profile to populate the form.
"""
profile_path = os.path.join(self.DataDir(), 'autofill',
'phone_pinput_autofill.txt')
profile_expected_path = os.path.join(self.DataDir(), 'autofill',
'phone_pexpected_autofill.txt')
profiles = self.EvalDataFrom(profile_path)
profiles_expected = self.EvalDataFrom(profile_expected_path)
self.FillAutofillProfile(profiles=profiles)
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'form_phones.html'))
for profile_expected in profiles_expected:
self.NavigateToURL(url)
self._SendKeyEventsToPopulateForm()
form_values = {}
for key, value in profile_expected.iteritems():
js_returning_field_value = (
'var field_value = document.getElementById("%s").value;'
'window.domAutomationController.send(field_value);'
) % key
form_values[key] = self.ExecuteJavascript(
js_returning_field_value, 0, 0)
self.assertEqual(
form_values[key], value,
msg=('Original profile not equal to expected profile at key: "%s"\n'
'Expected: "%s"\nReturned: "%s"' % (
key, value, form_values[key])))
def testCCInfoNotStoredWhenAutocompleteOff(self):
"""Test CC info not offered to be saved when autocomplete=off for CC field.
If the credit card number field has autocomplete turned off, then the credit
card infobar should not offer to save the credit card info. The credit card
number must be a valid Luhn number.
"""
credit_card_info = {'CREDIT_CARD_NAME': 'Bob Smith',
'CREDIT_CARD_NUMBER': '4408041234567893',
'CREDIT_CARD_EXP_MONTH': '12',
'CREDIT_CARD_EXP_4_DIGIT_YEAR': '2014'}
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'cc_autocomplete_off_test.html'))
self.NavigateToURL(url)
for key, value in credit_card_info.iteritems():
script = ('document.getElementById("%s").value = "%s"; '
'window.domAutomationController.send("done");') % (key, value)
self.ExecuteJavascript(script, 0, 0)
js_code = """
document.getElementById("cc_submit").submit();
window.addEventListener("unload", function() {
window.domAutomationController.send("done");
});
"""
self.ExecuteJavascript(js_code, 0, 0)
# Wait until form is submitted and page completes loading.
self.WaitUntil(
lambda: self.GetDOMValue('document.readyState'),
expect_retval='complete')
cc_infobar = self.GetBrowserInfo()['windows'][0]['tabs'][0]['infobars']
self.assertFalse(cc_infobar,
msg='Save credit card infobar offered to save CC info.')
def testNoAutofillForReadOnlyFields(self):
"""Test that Autofill does not fill in read-only fields."""
profile = {'NAME_FIRST': 'Bob',
'NAME_LAST': 'Smith',
'EMAIL_ADDRESS': 'bsmith@gmail.com',
'ADDRESS_HOME_LINE1': '1234 H St.',
'ADDRESS_HOME_CITY': 'San Jose',
'ADDRESS_HOME_STATE': 'CA',
'ADDRESS_HOME_ZIP': '95110',
'COMPANY_NAME': 'Company X',
'PHONE_HOME_WHOLE_NUMBER': '408-123-4567',}
self.FillAutofillProfile(profiles=[profile])
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'read_only_field_test.html'))
self.NavigateToURL(url)
self._SendKeyEventsToPopulateForm()
js_return_readonly_field = (
'var field_value = document.getElementById("email").value;'
'window.domAutomationController.send(field_value);')
readonly_field_value = self.ExecuteJavascript(
js_return_readonly_field, 0, 0)
js_return_addrline1_field = (
'var field_value = document.getElementById("address").value;'
'window.domAutomationController.send(field_value);')
addrline1_field_value = self.ExecuteJavascript(
js_return_addrline1_field, 0, 0)
self.assertNotEqual(
readonly_field_value, profile['EMAIL_ADDRESS'],
'Autofill filled in value "%s" for a read-only field.'
% readonly_field_value)
self.assertEqual(
addrline1_field_value, profile['ADDRESS_HOME_LINE1'],
'Unexpected value "%s" in the Address field.' % addrline1_field_value)
def FormFillLatencyAfterSubmit(self):
"""Test latency time on form submit with lots of stored Autofill profiles.
This test verifies when a profile is selected from the Autofill dictionary
that consists of thousands of profiles, the form does not hang after being
submitted.
The high level key presses execute the following: Select the first text
field, invoke the autofill popup list, select the first profile within the
list, and commit to the profile to populate the form.
This test is partially automated. The bulk of the work is done, such as
generating 1500 plus profiles, inserting those profiles into Autofill,
selecting a profile from the list. The tester will need to click on the
submit button and check if the browser hangs.
"""
# HTML file needs to be run from a http:// url.
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'latency_after_submit_test.html'))
# Run the generator script to generate the dictionary list needed for the
# profiles.
gen = autofill_dataset_generator.DatasetGenerator(
logging_level=logging.ERROR)
list_of_dict = gen.GenerateDataset(num_of_dict_to_generate=1501)
self.FillAutofillProfile(profiles=list_of_dict)
self.NavigateToURL(url)
self._SendKeyEventsToPopulateForm()
# TODO(dyu): add automated form hang or crash verification.
raw_input(
'Verify the test manually. Test hang time after submitting the form.')
def AutofillCrowdsourcing(self):
"""Test able to send POST request of web form to Autofill server.
The Autofill server processes the data offline, so it can take a few days
for the result to be detectable. Manual verification is required.
"""
# HTML file needs to be run from a specific http:// url to be able to verify
# the results a few days later by visiting the same url.
url = 'http://www.corp.google.com/~dyu/autofill/crowdsourcing-test.html'
# Adding crowdsourcing Autofill profile.
file_path = os.path.join(self.DataDir(), 'autofill',
'crowdsource_autofill.txt')
profiles = self.EvalDataFrom(file_path)
self.FillAutofillProfile(profiles=profiles)
# Autofill server captures 2.5% of the data posted.
# Looping 1000 times is a safe minimum to exceed the server's threshold or
# noise.
for i in range(1000):
fname = self.GetAutofillProfile()['profiles'][0]['NAME_FIRST']
lname = self.GetAutofillProfile()['profiles'][0]['NAME_LAST']
email = self.GetAutofillProfile()['profiles'][0]['EMAIL_ADDRESS']
# Submit form to collect crowdsourcing data for Autofill.
self.NavigateToURL(url, 0, 0)
fname_field = ('document.getElementById("fn").value = "%s"; '
'window.domAutomationController.send("done");') % fname
lname_field = ('document.getElementById("ln").value = "%s"; '
'window.domAutomationController.send("done");') % lname
email_field = ('document.getElementById("em").value = "%s"; '
'window.domAutomationController.send("done");') % email
self.ExecuteJavascript(fname_field, 0, 0);
self.ExecuteJavascript(lname_field, 0, 0);
self.ExecuteJavascript(email_field, 0, 0);
self.ExecuteJavascript('document.getElementById("frmsubmit").submit();'
'window.domAutomationController.send("done");',
0, 0)
def MergeDuplicateProfilesInAutofill(self):
"""Test Autofill ability to merge duplicate profiles and throw away junk."""
# HTML file needs to be run from a http:// url.
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'duplicate_profiles_test.html'))
# Run the parser script to generate the dictionary list needed for the
# profiles.
c = autofill_dataset_converter.DatasetConverter(
os.path.join(self.DataDir(), 'autofill', 'dataset.txt'),
logging_level=logging.INFO) # Set verbosity to INFO, WARNING, ERROR.
list_of_dict = c.Convert()
for profile in list_of_dict:
self.NavigateToURL(url)
for key, value in profile.iteritems():
script = ('document.getElementById("%s").value = "%s"; '
'window.domAutomationController.send("done");') % (key, value)
self.ExecuteJavascript(script, 0, 0)
self.ExecuteJavascript('document.getElementById("merge_dup").submit();'
'window.domAutomationController.send("done");',
0, 0)
# Verify total number of inputted profiles is greater than the final number
# of profiles after merging.
self.assertTrue(
len(list_of_dict) > len(self.GetAutofillProfile()['profiles']))
# Write profile dictionary to a file.
merged_profile = os.path.join(self.DataDir(), 'autofill',
'merged-profiles.txt')
profile_dict = self.GetAutofillProfile()['profiles']
output = open(merged_profile, 'wb')
pickle.dump(profile_dict, output)
output.close()
if __name__ == '__main__':
pyauto_functional.Main()
| 45.449187
| 80
| 0.653414
|
#!/usr/bin/python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import pickle
import re
import autofill_dataset_converter
import autofill_dataset_generator
import pyauto_functional # Must be imported before pyauto
import pyauto
class AutofillTest(pyauto.PyUITest):
"""Tests that autofill works correctly"""
def Debug(self):
"""Test method for experimentation.
This method will not run automatically.
"""
import pprint
pp = pprint.PrettyPrinter(indent=2)
while True:
raw_input('Hit <enter> to dump info.. ')
info = self.GetAutofillProfile()
pp.pprint(info)
def testFillProfile(self):
"""Test filling profiles and overwriting with new profiles."""
profiles = [{'NAME_FIRST': 'Bob',
'NAME_LAST': 'Smith', 'ADDRESS_HOME_ZIP': '94043',},
{'EMAIL_ADDRESS': 'sue@example.com',
'COMPANY_NAME': 'Company X',}]
credit_cards = [{'CREDIT_CARD_NUMBER': '6011111111111117',
'CREDIT_CARD_EXP_MONTH': '12',
'CREDIT_CARD_EXP_4_DIGIT_YEAR': '2011'},
{'CREDIT_CARD_NAME': 'Bob C. Smith'}]
self.FillAutofillProfile(profiles=profiles, credit_cards=credit_cards)
profile = self.GetAutofillProfile()
self.assertEqual(profiles, profile['profiles'])
self.assertEqual(credit_cards, profile['credit_cards'])
profiles = [ {'NAME_FIRST': 'Larry'}]
self.FillAutofillProfile(profiles=profiles)
profile = self.GetAutofillProfile()
self.assertEqual(profiles, profile['profiles'])
self.assertEqual(credit_cards, profile['credit_cards'])
def testFillProfileCrazyCharacters(self):
"""Test filling profiles with unicode strings and crazy characters."""
# Adding autofill profiles.
file_path = os.path.join(self.DataDir(), 'autofill', 'crazy_autofill.txt')
profiles = self.EvalDataFrom(file_path)
self.FillAutofillProfile(profiles=profiles)
self.assertEqual(profiles, self.GetAutofillProfile()['profiles'])
# Adding credit cards.
file_path = os.path.join(self.DataDir(), 'autofill',
'crazy_creditcards.txt')
test_data = self.EvalDataFrom(file_path)
credit_cards_input = test_data['input']
self.FillAutofillProfile(credit_cards=credit_cards_input)
self.assertEqual(test_data['expected'],
self.GetAutofillProfile()['credit_cards'])
def testGetProfilesEmpty(self):
"""Test getting profiles when none have been filled."""
profile = self.GetAutofillProfile()
self.assertEqual([], profile['profiles'])
self.assertEqual([], profile['credit_cards'])
def testAutofillInvalid(self):
"""Test filling in invalid values for profiles."""
# First try profiles with invalid input.
without_invalid = {'NAME_FIRST': u'Will',
'ADDRESS_HOME_CITY': 'Sunnyvale',
'ADDRESS_HOME_STATE': 'CA',
'ADDRESS_HOME_ZIP': 'my_zip',
'ADDRESS_HOME_COUNTRY': 'United States'}
# Add some invalid fields.
with_invalid = without_invalid.copy()
with_invalid['PHONE_HOME_WHOLE_NUMBER'] = 'Invalid_Phone_Number'
with_invalid['PHONE_FAX_WHOLE_NUMBER'] = 'Invalid_Fax_Number'
self.FillAutofillProfile(profiles=[with_invalid])
self.assertEqual([without_invalid],
self.GetAutofillProfile()['profiles'])
def testAutofillPrefsStringSavedAsIs(self):
"""Test invalid credit card numbers typed in prefs should be saved as-is."""
credit_card = {'CREDIT_CARD_NUMBER': 'Not_0123-5Checked'}
self.FillAutofillProfile(credit_cards=[credit_card])
self.assertEqual([credit_card],
self.GetAutofillProfile()['credit_cards'],
msg='Credit card number in prefs not saved as-is.')
def _LuhnCreditCardNumberValidator(self, number):
"""Validates whether a number is valid or invalid using the Luhn test.
Validation example:
1. Example number: 49927398716
2. Reverse the digits: 61789372994
3. Sum the digits in the odd-numbered position for s1:
6 + 7 + 9 + 7 + 9 + 4 = 42
4. Take the digits in the even-numbered position: 1, 8, 3, 2, 9
4.1. Two times each digit in the even-numbered position: 2, 16, 6, 4, 18
4.2. For each resulting value that is now 2 digits, add the digits
together: 2, 7, 6, 4, 9
(0 + 2 = 2, 1 + 6 = 7, 0 + 6 = 6, 0 + 4 = 4, 1 + 8 = 9)
4.3. Sum together the digits for s2: 2 + 7 + 6 + 4 + 9 = 28
5. Sum together s1 + s2 and if the sum ends in zero, the number passes the
Luhn test: 42 + 28 = 70 which is a valid credit card number.
Args:
number: the credit card number being validated, as a string.
Return:
boolean whether the credit card number is valid or not.
"""
# Filters out non-digit characters.
number = re.sub('[^0-9]', '', number)
reverse = [int(ch) for ch in str(number)][::-1]
# The divmod of the function splits a number into two digits, ready for
# summing.
return ((sum(reverse[0::2]) + sum(sum(divmod(d*2, 10))
for d in reverse[1::2])) % 10 == 0)
def testInvalidCreditCardNumberIsNotAggregated(self):
"""Test credit card info with an invalid number is not aggregated.
When filling out a form with an invalid credit card number (one that
does not pass the Luhn test) the credit card info should not be saved into
Autofill preferences.
"""
invalid_cc_info = {'CREDIT_CARD_NAME': 'Bob Smith',
'CREDIT_CARD_NUMBER': '4408 0412 3456 7890',
'CREDIT_CARD_EXP_MONTH': '12',
'CREDIT_CARD_EXP_4_DIGIT_YEAR': '2014'}
cc_number = invalid_cc_info['CREDIT_CARD_NUMBER']
self.assertFalse(self._LuhnCreditCardNumberValidator(cc_number),
msg='This test requires an invalid credit card number.')
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'autofill_creditcard_form.html'))
self.NavigateToURL(url)
for key, value in invalid_cc_info.iteritems():
script = ('document.getElementById("%s").value = "%s"; '
'window.domAutomationController.send("done");') % (key, value)
self.ExecuteJavascript(script, 0, 0)
js_code = """
document.getElementById("cc_submit").submit();
window.addEventListener("unload", function() {
window.domAutomationController.send("done");
});
"""
self.ExecuteJavascript(js_code, 0, 0)
# Wait until the form is submitted and the page completes loading.
self.WaitUntil(
lambda: self.GetDOMValue('document.readyState'),
expect_retval='complete')
cc_infobar = self.GetBrowserInfo()['windows'][0]['tabs'][0]['infobars']
self.assertFalse(
cc_infobar, msg='Save credit card infobar offered to save CC info.')
def testWhitespacesAndSeparatorCharsStrippedForValidCCNums(self):
"""Test whitespaces and separator chars are stripped for valid CC numbers.
The credit card numbers used in this test pass the Luhn test.
For reference: http://www.merriampark.com/anatomycc.htm
"""
credit_card_info = [{'CREDIT_CARD_NAME': 'Bob Smith',
'CREDIT_CARD_NUMBER': '4408 0412 3456 7893',
'CREDIT_CARD_EXP_MONTH': '12',
'CREDIT_CARD_EXP_4_DIGIT_YEAR': '2014'},
{'CREDIT_CARD_NAME': 'Jane Doe',
'CREDIT_CARD_NUMBER': '4417-1234-5678-9113',
'CREDIT_CARD_EXP_MONTH': '10',
'CREDIT_CARD_EXP_4_DIGIT_YEAR': '2013'}]
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'autofill_creditcard_form.html'))
for cc_info in credit_card_info:
self.NavigateToURL(url)
for key, value in cc_info.iteritems():
cc_number = cc_info['CREDIT_CARD_NUMBER']
self.assertTrue(self._LuhnCreditCardNumberValidator(cc_number),
msg='This test requires a valid credit card number.')
script = ('document.getElementById("%s").value = "%s"; '
'window.domAutomationController.send("done");') % (key, value)
self.ExecuteJavascript(script, 0, 0)
js_code = """
document.getElementById("cc_submit").submit();
window.addEventListener("unload", function() {
window.domAutomationController.send("done");
});
"""
self.ExecuteJavascript(js_code, 0, 0)
# Wait until form is submitted and page completes loading.
self.WaitUntil(
lambda: self.GetDOMValue('document.readyState'),
expect_retval='complete')
self.PerformActionOnInfobar('accept', infobar_index=0)
# Verify the filled-in credit card number against the aggregated number.
aggregated_cc_1 = (
self.GetAutofillProfile()['credit_cards'][0]['CREDIT_CARD_NUMBER'])
aggregated_cc_2 = (
self.GetAutofillProfile()['credit_cards'][1]['CREDIT_CARD_NUMBER'])
self.assertFalse((' ' in aggregated_cc_1 or ' ' in aggregated_cc_2 or
'-' in aggregated_cc_1 or '-' in aggregated_cc_2),
msg='Whitespaces or separator chars not stripped.')
def testProfilesNotAggregatedWithNoAddress(self):
"""Test Autofill does not aggregate profiles with no address info."""
profile = {'NAME_FIRST': 'Bob',
'NAME_LAST': 'Smith',
'EMAIL_ADDRESS': 'bsmith@example.com',
'COMPANY_NAME': 'Company X',
'PHONE_HOME_WHOLE_NUMBER': '650-123-4567',}
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'duplicate_profiles_test.html'))
self.NavigateToURL(url)
for key, value in profile.iteritems():
script = ('document.getElementById("%s").value = "%s"; '
'window.domAutomationController.send("done");') % (key, value)
self.ExecuteJavascript(script, 0, 0)
js_code = """
document.getElementById("merge_dup").submit();
window.addEventListener("unload", function() {
window.domAutomationController.send("done");
});
"""
self.ExecuteJavascript(js_code, 0, 0)
self.assertFalse(self.GetAutofillProfile()['profiles'],
msg='Profile with no address info was aggregated.')
def testProfilesNotAggregatedWithInvalidEmail(self):
"""Test Autofill does not aggregate profiles with an invalid email."""
profile = {'NAME_FIRST': 'Bob',
'NAME_LAST': 'Smith',
'EMAIL_ADDRESS': 'garbage',
'ADDRESS_HOME_LINE1': '1234 H St.',
'ADDRESS_HOME_CITY': 'San Jose',
'ADDRESS_HOME_STATE': 'CA',
'ADDRESS_HOME_ZIP': '95110',
'COMPANY_NAME': 'Company X',
'PHONE_HOME_WHOLE_NUMBER': '408-123-4567',}
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'duplicate_profiles_test.html'))
self.NavigateToURL(url)
for key, value in profile.iteritems():
script = ('document.getElementById("%s").value = "%s"; '
'window.domAutomationController.send("done");') % (key, value)
self.ExecuteJavascript(script, 0, 0)
js_code = """
document.getElementById("merge_dup").submit();
window.addEventListener("unload", function() {
window.domAutomationController.send("done");
});
"""
self.ExecuteJavascript(js_code, 0, 0)
self.assertFalse(self.GetAutofillProfile()['profiles'],
msg='Profile with invalid email was aggregated.')
def _SendKeyEventsToPopulateForm(self, tab_index=0, windex=0):
"""Send key events to populate a web form with Autofill profile data.
Args:
tab_index: The tab index, default is 0.
windex: The window index, default is 0.
"""
TAB_KEYPRESS = 0x09 # Tab keyboard key press.
DOWN_KEYPRESS = 0x28 # Down arrow keyboard key press.
RETURN_KEYPRESS = 0x0D # Return keyboard key press.
self.SendWebkitKeypressEvent(TAB_KEYPRESS, tab_index, windex)
self.SendWebkitKeypressEvent(DOWN_KEYPRESS, tab_index, windex)
self.SendWebkitKeypressEvent(DOWN_KEYPRESS, tab_index, windex)
self.SendWebkitKeypressEvent(RETURN_KEYPRESS, tab_index, windex)
def testComparePhoneNumbers(self):
"""Test phone fields parse correctly from a given profile.
The high level key presses execute the following: Select the first text
field, invoke the autofill popup list, select the first profile within the
list, and commit to the profile to populate the form.
"""
profile_path = os.path.join(self.DataDir(), 'autofill',
'phone_pinput_autofill.txt')
profile_expected_path = os.path.join(self.DataDir(), 'autofill',
'phone_pexpected_autofill.txt')
profiles = self.EvalDataFrom(profile_path)
profiles_expected = self.EvalDataFrom(profile_expected_path)
self.FillAutofillProfile(profiles=profiles)
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'form_phones.html'))
for profile_expected in profiles_expected:
self.NavigateToURL(url)
self._SendKeyEventsToPopulateForm()
form_values = {}
for key, value in profile_expected.iteritems():
js_returning_field_value = (
'var field_value = document.getElementById("%s").value;'
'window.domAutomationController.send(field_value);'
) % key
form_values[key] = self.ExecuteJavascript(
js_returning_field_value, 0, 0)
self.assertEqual(
form_values[key], value,
msg=('Original profile not equal to expected profile at key: "%s"\n'
'Expected: "%s"\nReturned: "%s"' % (
key, value, form_values[key])))
def testCCInfoNotStoredWhenAutocompleteOff(self):
"""Test CC info not offered to be saved when autocomplete=off for CC field.
If the credit card number field has autocomplete turned off, then the credit
card infobar should not offer to save the credit card info. The credit card
number must be a valid Luhn number.
"""
credit_card_info = {'CREDIT_CARD_NAME': 'Bob Smith',
'CREDIT_CARD_NUMBER': '4408041234567893',
'CREDIT_CARD_EXP_MONTH': '12',
'CREDIT_CARD_EXP_4_DIGIT_YEAR': '2014'}
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'cc_autocomplete_off_test.html'))
self.NavigateToURL(url)
for key, value in credit_card_info.iteritems():
script = ('document.getElementById("%s").value = "%s"; '
'window.domAutomationController.send("done");') % (key, value)
self.ExecuteJavascript(script, 0, 0)
js_code = """
document.getElementById("cc_submit").submit();
window.addEventListener("unload", function() {
window.domAutomationController.send("done");
});
"""
self.ExecuteJavascript(js_code, 0, 0)
# Wait until form is submitted and page completes loading.
self.WaitUntil(
lambda: self.GetDOMValue('document.readyState'),
expect_retval='complete')
cc_infobar = self.GetBrowserInfo()['windows'][0]['tabs'][0]['infobars']
self.assertFalse(cc_infobar,
msg='Save credit card infobar offered to save CC info.')
def testNoAutofillForReadOnlyFields(self):
"""Test that Autofill does not fill in read-only fields."""
profile = {'NAME_FIRST': 'Bob',
'NAME_LAST': 'Smith',
'EMAIL_ADDRESS': 'bsmith@gmail.com',
'ADDRESS_HOME_LINE1': '1234 H St.',
'ADDRESS_HOME_CITY': 'San Jose',
'ADDRESS_HOME_STATE': 'CA',
'ADDRESS_HOME_ZIP': '95110',
'COMPANY_NAME': 'Company X',
'PHONE_HOME_WHOLE_NUMBER': '408-123-4567',}
self.FillAutofillProfile(profiles=[profile])
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'read_only_field_test.html'))
self.NavigateToURL(url)
self._SendKeyEventsToPopulateForm()
js_return_readonly_field = (
'var field_value = document.getElementById("email").value;'
'window.domAutomationController.send(field_value);')
readonly_field_value = self.ExecuteJavascript(
js_return_readonly_field, 0, 0)
js_return_addrline1_field = (
'var field_value = document.getElementById("address").value;'
'window.domAutomationController.send(field_value);')
addrline1_field_value = self.ExecuteJavascript(
js_return_addrline1_field, 0, 0)
self.assertNotEqual(
readonly_field_value, profile['EMAIL_ADDRESS'],
'Autofill filled in value "%s" for a read-only field.'
% readonly_field_value)
self.assertEqual(
addrline1_field_value, profile['ADDRESS_HOME_LINE1'],
'Unexpected value "%s" in the Address field.' % addrline1_field_value)
def FormFillLatencyAfterSubmit(self):
"""Test latency time on form submit with lots of stored Autofill profiles.
This test verifies when a profile is selected from the Autofill dictionary
that consists of thousands of profiles, the form does not hang after being
submitted.
The high level key presses execute the following: Select the first text
field, invoke the autofill popup list, select the first profile within the
list, and commit to the profile to populate the form.
This test is partially automated. The bulk of the work is done, such as
generating 1500 plus profiles, inserting those profiles into Autofill,
selecting a profile from the list. The tester will need to click on the
submit button and check if the browser hangs.
"""
# HTML file needs to be run from a http:// url.
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'latency_after_submit_test.html'))
# Run the generator script to generate the dictionary list needed for the
# profiles.
gen = autofill_dataset_generator.DatasetGenerator(
logging_level=logging.ERROR)
list_of_dict = gen.GenerateDataset(num_of_dict_to_generate=1501)
self.FillAutofillProfile(profiles=list_of_dict)
self.NavigateToURL(url)
self._SendKeyEventsToPopulateForm()
# TODO(dyu): add automated form hang or crash verification.
raw_input(
'Verify the test manually. Test hang time after submitting the form.')
def AutofillCrowdsourcing(self):
"""Test able to send POST request of web form to Autofill server.
The Autofill server processes the data offline, so it can take a few days
for the result to be detectable. Manual verification is required.
"""
# HTML file needs to be run from a specific http:// url to be able to verify
# the results a few days later by visiting the same url.
url = 'http://www.corp.google.com/~dyu/autofill/crowdsourcing-test.html'
# Adding crowdsourcing Autofill profile.
file_path = os.path.join(self.DataDir(), 'autofill',
'crowdsource_autofill.txt')
profiles = self.EvalDataFrom(file_path)
self.FillAutofillProfile(profiles=profiles)
# Autofill server captures 2.5% of the data posted.
# Looping 1000 times is a safe minimum to exceed the server's threshold or
# noise.
for i in range(1000):
fname = self.GetAutofillProfile()['profiles'][0]['NAME_FIRST']
lname = self.GetAutofillProfile()['profiles'][0]['NAME_LAST']
email = self.GetAutofillProfile()['profiles'][0]['EMAIL_ADDRESS']
# Submit form to collect crowdsourcing data for Autofill.
self.NavigateToURL(url, 0, 0)
fname_field = ('document.getElementById("fn").value = "%s"; '
'window.domAutomationController.send("done");') % fname
lname_field = ('document.getElementById("ln").value = "%s"; '
'window.domAutomationController.send("done");') % lname
email_field = ('document.getElementById("em").value = "%s"; '
'window.domAutomationController.send("done");') % email
self.ExecuteJavascript(fname_field, 0, 0);
self.ExecuteJavascript(lname_field, 0, 0);
self.ExecuteJavascript(email_field, 0, 0);
self.ExecuteJavascript('document.getElementById("frmsubmit").submit();'
'window.domAutomationController.send("done");',
0, 0)
def MergeDuplicateProfilesInAutofill(self):
"""Test Autofill ability to merge duplicate profiles and throw away junk."""
# HTML file needs to be run from a http:// url.
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'duplicate_profiles_test.html'))
# Run the parser script to generate the dictionary list needed for the
# profiles.
c = autofill_dataset_converter.DatasetConverter(
os.path.join(self.DataDir(), 'autofill', 'dataset.txt'),
logging_level=logging.INFO) # Set verbosity to INFO, WARNING, ERROR.
list_of_dict = c.Convert()
for profile in list_of_dict:
self.NavigateToURL(url)
for key, value in profile.iteritems():
script = ('document.getElementById("%s").value = "%s"; '
'window.domAutomationController.send("done");') % (key, value)
self.ExecuteJavascript(script, 0, 0)
self.ExecuteJavascript('document.getElementById("merge_dup").submit();'
'window.domAutomationController.send("done");',
0, 0)
# Verify total number of inputted profiles is greater than the final number
# of profiles after merging.
self.assertTrue(
len(list_of_dict) > len(self.GetAutofillProfile()['profiles']))
# Write profile dictionary to a file.
merged_profile = os.path.join(self.DataDir(), 'autofill',
'merged-profiles.txt')
profile_dict = self.GetAutofillProfile()['profiles']
output = open(merged_profile, 'wb')
pickle.dump(profile_dict, output)
output.close()
if __name__ == '__main__':
pyauto_functional.Main()
| 0
| 0
| 0
|
707d8212ab78ecedd3b8526ac78feab6240c7ea9
| 531
|
py
|
Python
|
test/utils/assertions.py
|
wileykestner/falcon-sqlalchemy-demo
|
a1c8bdf212bafc4b577dbebab57753d724871572
|
[
"MIT"
] | 41
|
2016-10-21T04:08:05.000Z
|
2020-11-27T22:07:18.000Z
|
test/utils/assertions.py
|
wileykestner/falcon-sqlalchemy-demo
|
a1c8bdf212bafc4b577dbebab57753d724871572
|
[
"MIT"
] | null | null | null |
test/utils/assertions.py
|
wileykestner/falcon-sqlalchemy-demo
|
a1c8bdf212bafc4b577dbebab57753d724871572
|
[
"MIT"
] | 8
|
2017-12-19T21:56:49.000Z
|
2022-01-30T12:29:05.000Z
|
import pytest
from test.utils.helpers import get_header_value, get_json_from_response
| 33.1875
| 91
| 0.783427
|
import pytest
from test.utils.helpers import get_header_value, get_json_from_response
def assert_header_value(header_key, expected_value, response_headers):
header_value = get_header_value(header_key, response_headers)
if header_value is not None:
assert header_value == expected_value
else:
pytest.fail("The response headers do not contain the key: '{}'".format(header_key))
def assert_json_response(expected_json_body, response):
assert get_json_from_response(response) == expected_json_body
| 396
| 0
| 46
|
818be53fc6b29242febd7a21069a6faefa42f81e
| 616
|
py
|
Python
|
src/lib/jeffos/user.py
|
JeffTheK/Jeff-OS
|
8db91673c82bfad69076a10bce0ded376c0dd58b
|
[
"MIT"
] | null | null | null |
src/lib/jeffos/user.py
|
JeffTheK/Jeff-OS
|
8db91673c82bfad69076a10bce0ded376c0dd58b
|
[
"MIT"
] | null | null | null |
src/lib/jeffos/user.py
|
JeffTheK/Jeff-OS
|
8db91673c82bfad69076a10bce0ded376c0dd58b
|
[
"MIT"
] | null | null | null |
from .__init__ import *
from .color import ERR
| 29.333333
| 53
| 0.650974
|
from .__init__ import *
from .color import ERR
def get_current_user() -> str:
if not os.path.isfile(OS_PATH+"sys/var/usr.cfg"):
print(ERR+"usr.cfg not found")
return "ERROR"
usr_cfg = open(OS_PATH+"sys/var/usr.cfg", 'r')
current_user = usr_cfg.readlines()[0].strip()
usr_cfg.close()
return current_user
def change_current_user(user_name: str):
usr_cfg = open(OS_PATH+"sys/var/usr.cfg", 'r')
lines = usr_cfg.readlines()
lines[0] = user_name+"\n"
usr_cfg.close()
usr_cfg = open(OS_PATH+"sys/var/usr.cfg", 'w')
usr_cfg.writelines(lines)
usr_cfg.close()
| 524
| 0
| 46
|
8d53c50312ab63f92c7d0a794e02e685e48f61a5
| 17,069
|
py
|
Python
|
fred/clients/eseries.py
|
dmpe/FRB
|
692bcf576e17bd1a81db2b7644f4f61aeb39e5c7
|
[
"MIT"
] | 107
|
2016-01-19T15:13:07.000Z
|
2022-03-25T03:51:16.000Z
|
fred/clients/eseries.py
|
dmpe/FRB
|
692bcf576e17bd1a81db2b7644f4f61aeb39e5c7
|
[
"MIT"
] | 8
|
2016-02-05T20:07:51.000Z
|
2021-08-11T17:05:02.000Z
|
fred/clients/eseries.py
|
dmpe/FRB
|
692bcf576e17bd1a81db2b7644f4f61aeb39e5c7
|
[
"MIT"
] | 37
|
2016-01-19T15:13:11.000Z
|
2021-05-21T10:10:41.000Z
|
from fred.utils import NamespacedClient, query_params
from fred.helpers import _get_request
class ESeriesClient(NamespacedClient):
"""
Class for working with FRED series
"""
@query_params('realtime_start','realtime_end')
def details(self,series_id=None,response_type=None,params=None):
"""
Function to request a series of economic data.
`<https://research.stlouisfed.org/docs/api/fred/release.html>`_
:arg int series_id: The id for a series. Required.
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg bool ssl_verify: To verify HTTPs.
"""
path='/series?'
params['series_id'] = series_id
response_type = response_type if response_type else self.response_type
if response_type != 'xml': params['file_type'] = 'json'
response = _get_request(self.url_root,self.api_key,path,response_type,params,self.ssl_verify)
return response
@query_params('realtime_start','realtime_end')
def categories(self,series_id=None,response_type=None,params=None):
"""
Function to request the categories for an economic data series.
`<https://research.stlouisfed.org/docs/api/fred/release.html>`_
:arg int series_id: The id for a series. Required.
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg bool ssl_verify: To verify HTTPs.
"""
path='/series/categories?'
params['series_id'] = series_id
response_type = response_type if response_type else self.response_type
if response_type != 'xml': params['file_type'] = 'json'
response = _get_request(self.url_root,self.api_key,path,response_type,params,self.ssl_verify)
return response
@query_params('realtime_start','realtime_end')
def release(self,series_id=None,response_type=None,params=None):
"""
Function to request the release for an economic data series.
`<https://research.stlouisfed.org/docs/api/fred/series_release.html>`_
:arg int series_id: The id for a series. Required.
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg bool ssl_verify: To verify HTTPs.
"""
path='/series/release?'
params['series_id'] = series_id
response_type = response_type if response_type else self.response_type
if response_type != 'xml': params['file_type'] = 'json'
response = _get_request(self.url_root,self.api_key,path,response_type,params,self.ssl_verify)
return response
@query_params('realtime_start','realtime_end',
'order_by','sort_order')
def tags(self,series_id=None,response_type=None,params=None):
"""
Function to request FRED tags for a particular series.
FRED tags are attributes assigned to series.
`<https://research.stlouisfed.org/docs/api/fred/series_tags.html>`_
:arg int series_id: The id for a series. Required.
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg str order_by: Order results by values of the specified attribute. Options are 'series_count',
'popularity', 'created', 'name', 'group_id'
:arg str sort_order: Sort results for attribute values specified by order_by. Options are 'asc','desc'
:arg bool ssl_verify: To verify HTTPs.
"""
path = '/series/tags?'
params['series_id'] = series_id
response_type = response_type if response_type else self.response_type
if response_type != 'xml': params['file_type'] = 'json'
response = _get_request(self.url_root,self.api_key,path,response_type,params,self.ssl_verify)
return response
@query_params('realtime_start','realtime_end','limit',
'offset','filter_value')
def updates(self,series_id=None,response_type=None,params=None):
"""
Function to request economic data series sorted by when observations
were updated on the FRED server (attribute last_updated). Results are
limited to series updated within the last two weeks.
`<https://research.stlouisfed.org/docs/api/fred/series_updates.html>`_
:arg int series_id: The id for a series. Required.
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg int limit: The maximum number of results to return. Options 1 to 1000
:arg int offset: Data offset. Options >=0
:arg str filter_value: Limit results by geographic type of economic data series. Options are 'macro',
'regional', and 'all'
:arg bool ssl_verify: To verify HTTPs.
"""
path = '/series/updates?'
params['series_id'] = series_id
response_type = response_type if response_type else self.response_type
if response_type != 'xml': params['file_type'] = 'json'
response = _get_request(self.url_root,self.api_key,path,response_type,params,self.ssl_verify)
return response
@query_params('realtime_start','realtime_end','limit',
'offset','sort_order')
def vintage_dates(self,series_id=None,response_type=None,params=None):
"""
Function to request the dates in history when a series' data values were
revised or new data values were released. Vintage dates are the release dates
for a series excluding release dates when the data for the series did not change.
`<https://research.stlouisfed.org/docs/api/fred/series_vintagedates.html>`_
:arg int series_id: The id for a series. Required.
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg int limit: The maximum number of results to return. Options 1 to 1000
:arg int offset: Data offset. Options >=0
:arg str sort_order: Sort results by vintage_date. Options are 'asc','desc'
:arg bool ssl_verify: To verify HTTPs.
"""
path = '/series/vintagedates?'
params['series_id'] = series_id
response_type = response_type if response_type else self.response_type
if response_type != 'xml': params['file_type'] = 'json'
response = _get_request(self.url_root,self.api_key,path,response_type,params,self.ssl_verify)
return response
@query_params('realtime_start','realtime_end','limit',
'offset','sort_order','observation_start','observation_end',
'units','frequency','aggregation_method','output_type',
'vintage_dates')
def observations(self,series_id=None,response_type=None,params=None):
"""
Function to request the observations or data values for an economic data series.
`<https://research.stlouisfed.org/docs/api/fred/series_observations.html>`_
:arg int series_id: The id for a series. Required.
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg int limit: The maximum number of results to return. Options 1 to 100000
:arg int offset: Data offset. Options >=0
:arg str sort_order: Sort results is ascending or descending observation_date order. Options are 'asc','desc'
:arg str observation_start: The start of the observation period. Format "YYYY-MM-DD"
:arg str observation_end: The end of the observation period. Format "YYYY-MM-DD"
:arg str units: A key that indicates a data value transformation. Options are 'lin', 'chg', 'ch1', 'pch',
'pc1', 'pca', 'cch', 'cca', 'log'
:arg str frequency: Indicates a lower frequency to aggregate values. Options are 'd', 'w',
'bw', 'm', 'q', 'sa', 'a', 'wef', 'weth', 'wew', 'wetu', 'wem',
'wesu', 'wesa', 'bwew', 'bwem'
:arg str aggregation_method: Indicates the aggregation method used for frequency aggregation. Options are 'avg',
'sum', 'eop'
:arg int output_type: Output type. Options are 1, 2, 3, 4
:arg str vintage_dates: Date(s) in history. Format "YYYY-MM-DD". Example for multiple dates "2000-01-01,2005-02-24,..."
:arg bool ssl_verify: To verify HTTPs.
"""
path = '/series/observations?'
params['series_id'] = series_id
response_type = response_type if response_type else self.response_type
if response_type != 'xml': params['file_type'] = 'json'
response = _get_request(self.url_root,self.api_key,path,response_type,params,self.ssl_verify)
return response
@query_params('search_type','realtime_start','realtime_end',
'limit','offset','order_by','sort_order','filter_variable',
'filter_value','tag_names','exclude_tag_names')
def search(self,search_text=None,response_type=None,params=None):
"""
Function to request economic data series that match search text.
`<https://research.stlouisfed.org/docs/api/fred/series_search.html>`_
:arg str search_text: The words to match against economic data series. Required.
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str search_type: Determines the type of search to perform. Options are 'full_text','series_id'
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg int limit: The maximum number of results to return. Options 1 to 1000
:arg int offset: Data offset. Options >=0
:arg str order_by: Order results by values of the specified attribute. Options are 'search_rank',
'series_id', 'title', 'units', 'frequency', 'seasonal_adjustment', 'realtime_start',
'realtime_end', 'last_updated', 'observation_start', 'observation_end', 'popularity'
:arg str sort_order: Sort results for attribute values specified by order_by. Options are 'asc','desc'
:arg str filter_variable: The attribute to filter results by. Options are 'frequency', 'units','seasonal_adjustment'
:arg str filter_value: The value of the filter_variable attribute to filter results by.
:arg str tag_names: Tag names used to match series. Separate with semicolon as in "income;bea"
:arg str exclude_tag_names: Tag names used to exclude series. Separate with semicolon as in "income;bea"
:arg bool ssl_verify: To verify HTTPs.
"""
path = '/series/search?'
params['search_text'] = search_text
response_type = response_type if response_type else self.response_type
if response_type != 'xml': params['file_type'] = 'json'
response = _get_request(self.url_root,self.api_key,path,response_type,params,self.ssl_verify)
return response
@query_params('realtime_start','realtime_end',
'limit','offset','order_by','sort_order','tag_names',
'tag_group_id','tag_search_text')
def search_tags(self,series_search_text=None,response_type=None,params=None):
"""
Function to request the FRED tags for a series search.
`<https://research.stlouisfed.org/docs/api/fred/series_search_tags.html>`_
:arg str series_search_text: The words to match against economic data series. Required.
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg int limit: The maximum number of results to return. Options 1 to 1000
:arg int offset: Data offset. Options >=0
:arg str order_by: Order results by values of the specified attribute. Options are 'series_count',
'popularity', 'created', 'name', 'group_id'
:arg str sort_order: Sort results for attribute values specified by order_by. Options are 'asc','desc'
:arg str tag_names: Tag names that series match. Separate with semicolon as in "income;bea"
:arg str tag_group_id: Tag ID to filter tags by. Options are 'freq', 'gen', 'geo', 'geot', 'rls', 'seas', 'src'
:arg str tag_search_text: The words to find matching tags with.
:arg bool ssl_verify: To verify HTTPs.
"""
path = '/series/search/tags?'
params['series_search_text'] = series_search_text
response_type = response_type if response_type else self.response_type
if response_type != 'xml': params['file_type'] = 'json'
response = _get_request(self.url_root,self.api_key,path,response_type,params,self.ssl_verify)
return response
@query_params('realtime_start','realtime_end',
'limit','offset','order_by','sort_order',
'tag_group_id','tag_search_text','exclude_tag_names')
def search_related_tags(self,series_search_text=None,tag_names=None,response_type=None,params=None):
"""
Function to request the related FRED tags for one or more FRED tags matching a series search.
`<https://research.stlouisfed.org/docs/api/fred/series_search_related_tags.html>`_
:arg str series_search_text: The words to match against economic data series. Required.
:arg str tag_names: Tag names that series match. Separate with semicolon as in "income;bea". Required.
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg int limit: The maximum number of results to return. Options 1 to 1000
:arg int offset: Data offset. Options >=0
:arg str order_by: Order results by values of the specified attribute. Options are 'series_count',
'popularity', 'created', 'name', 'group_id'
:arg str sort_order: Sort results for attribute values specified by order_by. Options are 'asc','desc'
:arg str tag_group_id: Tag ID to filter tags by. Options are 'freq', 'gen', 'geo', 'geot', 'rls', 'seas', 'src'
:arg str tag_search_text: The words to find matching tags with.
:arg str exclude_tag_names: Tag names to exclude. Separate with semicolon as in "income;bea"
:arg bool ssl_verify: To verify HTTPs.
"""
path = '/series/search/related_tags?'
params['series_search_text'], params['tag_names'] = series_search_text, tag_names
response_type = response_type if response_type else self.response_type
if response_type != 'xml': params['file_type'] = 'json'
response = _get_request(self.url_root,self.api_key,path,response_type,params,self.ssl_verify)
return response
| 61.399281
| 127
| 0.656629
|
from fred.utils import NamespacedClient, query_params
from fred.helpers import _get_request
class ESeriesClient(NamespacedClient):
"""
Class for working with FRED series
"""
@query_params('realtime_start','realtime_end')
def details(self,series_id=None,response_type=None,params=None):
"""
Function to request a series of economic data.
`<https://research.stlouisfed.org/docs/api/fred/release.html>`_
:arg int series_id: The id for a series. Required.
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg bool ssl_verify: To verify HTTPs.
"""
path='/series?'
params['series_id'] = series_id
response_type = response_type if response_type else self.response_type
if response_type != 'xml': params['file_type'] = 'json'
response = _get_request(self.url_root,self.api_key,path,response_type,params,self.ssl_verify)
return response
@query_params('realtime_start','realtime_end')
def categories(self,series_id=None,response_type=None,params=None):
"""
Function to request the categories for an economic data series.
`<https://research.stlouisfed.org/docs/api/fred/release.html>`_
:arg int series_id: The id for a series. Required.
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg bool ssl_verify: To verify HTTPs.
"""
path='/series/categories?'
params['series_id'] = series_id
response_type = response_type if response_type else self.response_type
if response_type != 'xml': params['file_type'] = 'json'
response = _get_request(self.url_root,self.api_key,path,response_type,params,self.ssl_verify)
return response
@query_params('realtime_start','realtime_end')
def release(self,series_id=None,response_type=None,params=None):
"""
Function to request the release for an economic data series.
`<https://research.stlouisfed.org/docs/api/fred/series_release.html>`_
:arg int series_id: The id for a series. Required.
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg bool ssl_verify: To verify HTTPs.
"""
path='/series/release?'
params['series_id'] = series_id
response_type = response_type if response_type else self.response_type
if response_type != 'xml': params['file_type'] = 'json'
response = _get_request(self.url_root,self.api_key,path,response_type,params,self.ssl_verify)
return response
@query_params('realtime_start','realtime_end',
'order_by','sort_order')
def tags(self,series_id=None,response_type=None,params=None):
"""
Function to request FRED tags for a particular series.
FRED tags are attributes assigned to series.
`<https://research.stlouisfed.org/docs/api/fred/series_tags.html>`_
:arg int series_id: The id for a series. Required.
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg str order_by: Order results by values of the specified attribute. Options are 'series_count',
'popularity', 'created', 'name', 'group_id'
:arg str sort_order: Sort results for attribute values specified by order_by. Options are 'asc','desc'
:arg bool ssl_verify: To verify HTTPs.
"""
path = '/series/tags?'
params['series_id'] = series_id
response_type = response_type if response_type else self.response_type
if response_type != 'xml': params['file_type'] = 'json'
response = _get_request(self.url_root,self.api_key,path,response_type,params,self.ssl_verify)
return response
@query_params('realtime_start','realtime_end','limit',
'offset','filter_value')
def updates(self,series_id=None,response_type=None,params=None):
"""
Function to request economic data series sorted by when observations
were updated on the FRED server (attribute last_updated). Results are
limited to series updated within the last two weeks.
`<https://research.stlouisfed.org/docs/api/fred/series_updates.html>`_
:arg int series_id: The id for a series. Required.
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg int limit: The maximum number of results to return. Options 1 to 1000
:arg int offset: Data offset. Options >=0
:arg str filter_value: Limit results by geographic type of economic data series. Options are 'macro',
'regional', and 'all'
:arg bool ssl_verify: To verify HTTPs.
"""
path = '/series/updates?'
params['series_id'] = series_id
response_type = response_type if response_type else self.response_type
if response_type != 'xml': params['file_type'] = 'json'
response = _get_request(self.url_root,self.api_key,path,response_type,params,self.ssl_verify)
return response
@query_params('realtime_start','realtime_end','limit',
'offset','sort_order')
def vintage_dates(self,series_id=None,response_type=None,params=None):
"""
Function to request the dates in history when a series' data values were
revised or new data values were released. Vintage dates are the release dates
for a series excluding release dates when the data for the series did not change.
`<https://research.stlouisfed.org/docs/api/fred/series_vintagedates.html>`_
:arg int series_id: The id for a series. Required.
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg int limit: The maximum number of results to return. Options 1 to 1000
:arg int offset: Data offset. Options >=0
:arg str sort_order: Sort results by vintage_date. Options are 'asc','desc'
:arg bool ssl_verify: To verify HTTPs.
"""
path = '/series/vintagedates?'
params['series_id'] = series_id
response_type = response_type if response_type else self.response_type
if response_type != 'xml': params['file_type'] = 'json'
response = _get_request(self.url_root,self.api_key,path,response_type,params,self.ssl_verify)
return response
@query_params('realtime_start','realtime_end','limit',
'offset','sort_order','observation_start','observation_end',
'units','frequency','aggregation_method','output_type',
'vintage_dates')
def observations(self,series_id=None,response_type=None,params=None):
"""
Function to request the observations or data values for an economic data series.
`<https://research.stlouisfed.org/docs/api/fred/series_observations.html>`_
:arg int series_id: The id for a series. Required.
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg int limit: The maximum number of results to return. Options 1 to 100000
:arg int offset: Data offset. Options >=0
:arg str sort_order: Sort results is ascending or descending observation_date order. Options are 'asc','desc'
:arg str observation_start: The start of the observation period. Format "YYYY-MM-DD"
:arg str observation_end: The end of the observation period. Format "YYYY-MM-DD"
:arg str units: A key that indicates a data value transformation. Options are 'lin', 'chg', 'ch1', 'pch',
'pc1', 'pca', 'cch', 'cca', 'log'
:arg str frequency: Indicates a lower frequency to aggregate values. Options are 'd', 'w',
'bw', 'm', 'q', 'sa', 'a', 'wef', 'weth', 'wew', 'wetu', 'wem',
'wesu', 'wesa', 'bwew', 'bwem'
:arg str aggregation_method: Indicates the aggregation method used for frequency aggregation. Options are 'avg',
'sum', 'eop'
:arg int output_type: Output type. Options are 1, 2, 3, 4
:arg str vintage_dates: Date(s) in history. Format "YYYY-MM-DD". Example for multiple dates "2000-01-01,2005-02-24,..."
:arg bool ssl_verify: To verify HTTPs.
"""
path = '/series/observations?'
params['series_id'] = series_id
response_type = response_type if response_type else self.response_type
if response_type != 'xml': params['file_type'] = 'json'
response = _get_request(self.url_root,self.api_key,path,response_type,params,self.ssl_verify)
return response
@query_params('search_type','realtime_start','realtime_end',
'limit','offset','order_by','sort_order','filter_variable',
'filter_value','tag_names','exclude_tag_names')
def search(self,search_text=None,response_type=None,params=None):
"""
Function to request economic data series that match search text.
`<https://research.stlouisfed.org/docs/api/fred/series_search.html>`_
:arg str search_text: The words to match against economic data series. Required.
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str search_type: Determines the type of search to perform. Options are 'full_text','series_id'
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg int limit: The maximum number of results to return. Options 1 to 1000
:arg int offset: Data offset. Options >=0
:arg str order_by: Order results by values of the specified attribute. Options are 'search_rank',
'series_id', 'title', 'units', 'frequency', 'seasonal_adjustment', 'realtime_start',
'realtime_end', 'last_updated', 'observation_start', 'observation_end', 'popularity'
:arg str sort_order: Sort results for attribute values specified by order_by. Options are 'asc','desc'
:arg str filter_variable: The attribute to filter results by. Options are 'frequency', 'units','seasonal_adjustment'
:arg str filter_value: The value of the filter_variable attribute to filter results by.
:arg str tag_names: Tag names used to match series. Separate with semicolon as in "income;bea"
:arg str exclude_tag_names: Tag names used to exclude series. Separate with semicolon as in "income;bea"
:arg bool ssl_verify: To verify HTTPs.
"""
path = '/series/search?'
params['search_text'] = search_text
response_type = response_type if response_type else self.response_type
if response_type != 'xml': params['file_type'] = 'json'
response = _get_request(self.url_root,self.api_key,path,response_type,params,self.ssl_verify)
return response
@query_params('realtime_start','realtime_end',
'limit','offset','order_by','sort_order','tag_names',
'tag_group_id','tag_search_text')
def search_tags(self,series_search_text=None,response_type=None,params=None):
"""
Function to request the FRED tags for a series search.
`<https://research.stlouisfed.org/docs/api/fred/series_search_tags.html>`_
:arg str series_search_text: The words to match against economic data series. Required.
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg int limit: The maximum number of results to return. Options 1 to 1000
:arg int offset: Data offset. Options >=0
:arg str order_by: Order results by values of the specified attribute. Options are 'series_count',
'popularity', 'created', 'name', 'group_id'
:arg str sort_order: Sort results for attribute values specified by order_by. Options are 'asc','desc'
:arg str tag_names: Tag names that series match. Separate with semicolon as in "income;bea"
:arg str tag_group_id: Tag ID to filter tags by. Options are 'freq', 'gen', 'geo', 'geot', 'rls', 'seas', 'src'
:arg str tag_search_text: The words to find matching tags with.
:arg bool ssl_verify: To verify HTTPs.
"""
path = '/series/search/tags?'
params['series_search_text'] = series_search_text
response_type = response_type if response_type else self.response_type
if response_type != 'xml': params['file_type'] = 'json'
response = _get_request(self.url_root,self.api_key,path,response_type,params,self.ssl_verify)
return response
@query_params('realtime_start','realtime_end',
'limit','offset','order_by','sort_order',
'tag_group_id','tag_search_text','exclude_tag_names')
def search_related_tags(self,series_search_text=None,tag_names=None,response_type=None,params=None):
"""
Function to request the related FRED tags for one or more FRED tags matching a series search.
`<https://research.stlouisfed.org/docs/api/fred/series_search_related_tags.html>`_
:arg str series_search_text: The words to match against economic data series. Required.
:arg str tag_names: Tag names that series match. Separate with semicolon as in "income;bea". Required.
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg int limit: The maximum number of results to return. Options 1 to 1000
:arg int offset: Data offset. Options >=0
:arg str order_by: Order results by values of the specified attribute. Options are 'series_count',
'popularity', 'created', 'name', 'group_id'
:arg str sort_order: Sort results for attribute values specified by order_by. Options are 'asc','desc'
:arg str tag_group_id: Tag ID to filter tags by. Options are 'freq', 'gen', 'geo', 'geot', 'rls', 'seas', 'src'
:arg str tag_search_text: The words to find matching tags with.
:arg str exclude_tag_names: Tag names to exclude. Separate with semicolon as in "income;bea"
:arg bool ssl_verify: To verify HTTPs.
"""
path = '/series/search/related_tags?'
params['series_search_text'], params['tag_names'] = series_search_text, tag_names
response_type = response_type if response_type else self.response_type
if response_type != 'xml': params['file_type'] = 'json'
response = _get_request(self.url_root,self.api_key,path,response_type,params,self.ssl_verify)
return response
| 0
| 0
| 0
|
d1839a3279a5cf65bd5fa7efd4fde3026ed8d45c
| 9,871
|
py
|
Python
|
ke/images/python/cluster_tool.py
|
justasabc/kubernetes-ubuntu
|
afc670297a5becb2fcb4404c3ee1e02c99b5eaf4
|
[
"Apache-2.0"
] | 1
|
2020-10-18T01:34:39.000Z
|
2020-10-18T01:34:39.000Z
|
ke/images/python/cluster_tool.py
|
justasabc/kubernetes-ubuntu
|
afc670297a5becb2fcb4404c3ee1e02c99b5eaf4
|
[
"Apache-2.0"
] | null | null | null |
ke/images/python/cluster_tool.py
|
justasabc/kubernetes-ubuntu
|
afc670297a5becb2fcb4404c3ee1e02c99b5eaf4
|
[
"Apache-2.0"
] | null | null | null |
"""
Class Hierarchy
G{classtree: BaseTool}
Package tree
G{packagetree: cluster_tool}
Import Graph
G{importgraph: cluster_tool}
"""
#/usr/bin/python
# -*- coding:utf-8 -*-
import subprocess
from json_generator import JsonGenerator
from container_client import ContainerClient
DOCKER_SERVER_URL = 'tcp://master:2375'
class BaseTool:
"""
base tool
"""
class KubernetesTool(BaseTool):
"""
kubernetes tool
"""
#=====================================================================
# create pod/service/replicationController/node/minion/event
#=====================================================================
#=====================================================================
# list pod/service/replicationController/node/minion/event
#=====================================================================
#=====================================================================
# delete pod/service/replicationController/node/minion/event
#=====================================================================
#=====================================================================
# get pod hostname
#=====================================================================
#=====================================================================
# resize replicationController
#=====================================================================
class IptablesTool(BaseTool):
"""
iptables tool
"""
#==========================================================
# nat add rules to PREROUTING/POSTROUTING/INPUT/OUTPUT chains
#==========================================================
#==========================================================
# nat delete rules to PREROUTING/POSTROUTING/INPUT/OUTPUT chains
#==========================================================
#==========================================================
# nat flush PREROUTING/POSTROUTING/INPUT/OUTPUT chains
#==========================================================
#==========================================================
# nat list PREROUTING/POSTROUTING/INPUT/OUTPUT chains
#==========================================================
if __name__=="__main__":
main()
| 34.757042
| 141
| 0.668727
|
"""
Class Hierarchy
G{classtree: BaseTool}
Package tree
G{packagetree: cluster_tool}
Import Graph
G{importgraph: cluster_tool}
"""
#/usr/bin/python
# -*- coding:utf-8 -*-
import subprocess
from json_generator import JsonGenerator
from container_client import ContainerClient
DOCKER_SERVER_URL = 'tcp://master:2375'
class BaseTool:
"""
base tool
"""
def __init__(self,name):
self.name = name
""" @type: C{string} """
def execute_command(self,command_str):
#print "[BaseTool] {0}".format(command_str)
p = subprocess.Popen(command_str, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
value = ""
for line in p.stdout.readlines():
value += line
return_code = p.wait()
return value.rstrip()
class KubernetesTool(BaseTool):
"""
kubernetes tool
"""
def __init__(self):
#print "[KubernetesTool] init..."
BaseTool.__init__(self,"KubernetesTool")
self.container_client = ContainerClient(DOCKER_SERVER_URL)
""" @type: L{ContainerClient} """
#print "[KubernetesTool] OK"
def __create(self,type_name,config_file):
command_str = "kubecfg -c {0} create {1}".format(config_file,type_name)
return BaseTool.execute_command(self,command_str)
def __list(self,type_name):
command_str = "kubecfg list {0}".format(type_name)
return BaseTool.execute_command(self,command_str)
def __delete(self,type_name,type_id):
command_str = "kubecfg delete {0}/{1}".format(type_name,type_id)
return BaseTool.execute_command(self,command_str)
#=====================================================================
# create pod/service/replicationController/node/minion/event
#=====================================================================
def create_pod(self,config_file):
type_name = "pods"
return self.__create(type_name,config_file)
def create_service(self,config_file):
type_name = "services"
return self.__create(type_name,config_file)
def create_replication_controller(self,config_file):
type_name = "replicationControllers"
return self.__create(type_name,config_file)
#=====================================================================
# list pod/service/replicationController/node/minion/event
#=====================================================================
def list_pods(self):
type_name = "pods"
return self.__list(type_name)
def list_services(self):
type_name = "services"
return self.__list(type_name)
def list_replication_controller(self):
type_name = "replicationControllers"
return self.__list(type_name)
#=====================================================================
# delete pod/service/replicationController/node/minion/event
#=====================================================================
def delete_pod(self,type_id):
type_name = "pods"
return self.__delete(type_name,type_id)
def delete_service(self,type_id):
type_name = "services"
return self.__delete(type_name,type_id)
def delete_replication_controller(self,type_id):
type_name = "replicationControllers"
return self.__delete(type_name,type_id)
#=====================================================================
# get pod hostname
#=====================================================================
def get_pod_hostname(self,pod_id):
command_str = "kubecfg list pods | grep "+pod_id+ " | awk '{print $3;}' | cut -f1 -d/"
return BaseTool.execute_command(self,command_str)
def hostname_to_ip(self,hostname):
if hostname == "":
print "*"*50
print "[KubernetesTool] hostname is empty! "
print "[KubernetesTool] use master node instead! "
print "*"*50
hostname = "master"
command_str = "resolveip -s {0}".format(hostname)
return BaseTool.execute_command(self,command_str)
def get_pod_ip(self,pod_id):
hostname = self.get_pod_hostname(pod_id)
return self.hostname_to_ip(hostname)
def stats_container(self,container):
command_str = "docker stats {0}".format(container)
return BaseTool.execute_command(self,command_str)
def get_host_ip(self):
command_str = "/sbin/ifconfig $ETH0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'"
return BaseTool.execute_command(self,command_str)
def get_container_ip(self,container_name):
command_str = "docker inspect -f '{{ .NetworkSettings.IPAddress }}' {0}".format(container_name)
return BaseTool.execute_command(self,command_str)
def copy_region_xml_to_minions(self,minions):
# scp -r xml/* minion1:/volumes/var/www/region_load/
for minion in minions:
print "copying xml to {0}...".format(minion)
command_str = "scp -r xml/* {0}:/volumes/var/www/region_load/".format(minion)
BaseTool.execute_command(self,command_str)
def save_json_to_file(self,dict_data,file_path):
generator = JsonGenerator('generator')
generator.generate(dict_data,file_path)
#=====================================================================
# resize replicationController
#=====================================================================
def resize_replication_controller(self,controller_id,replicas):
command_str = "kubecfg resize {0} {1}".format(controller_id,replicas)
return BaseTool.execute_command(self,command_str)
class IptablesTool(BaseTool):
"""
iptables tool
"""
def __init__(self):
#print "[IptablesTool] init..."
BaseTool.__init__(self,"IptablesTool")
#print "[IptablesTool] OK"
#==========================================================
# nat add rules to PREROUTING/POSTROUTING/INPUT/OUTPUT chains
#==========================================================
def nat_add_rule_to_prerouting_chain(self,protocol,src_port,dst_port,src_ip,dst_ip):
command_str = "iptables -t nat -A PREROUTING -p {0} --dport {1} -j DNAT --to-destination {2}:{3}".format(protocol,dst_port,dst_ip,dst_port)
return BaseTool.execute_command(self,command_str)
def nat_add_rule_to_postrouting_chain(self,protocol,src_port,dst_port,src_ip,dst_ip):
command_str = "iptables -t nat -A POSTROUTING -p {0} -d {1} --dport {2} -j SNAT --to-source {3}".format(protocol,dst_ip,dst_port,src_ip)
return BaseTool.execute_command(self,command_str)
def nat_add_rule_to_input_chain(self,protocol,src_port,dst_port,src_ip,dst_ip):
command_str = "ls"
return BaseTool.execute_command(self,command_str)
def nat_add_rule_to_output_chain(self,protocol,src_port,dst_port,src_ip,dst_ip):
command_str = "ls"
return BaseTool.execute_command(self,command_str)
#==========================================================
# nat delete rules to PREROUTING/POSTROUTING/INPUT/OUTPUT chains
#==========================================================
def nat_delete_rule_from_prerouting_chain(self,rule_number):
command_str = "iptables -t nat -D PREROUTING {0}".format(rule_number)
return BaseTool.execute_command(self,command_str)
def nat_delete_rule_from_postrouting_chain(self,rule_number):
command_str = "iptables -t nat -D POSTROUTING {0}".format(rule_number)
return BaseTool.execute_command(self,command_str)
def nat_delete_rule_from_input_chain(self,rule_number):
command_str = "iptables -t nat -D INPUT {0}".format(rule_number)
return BaseTool.execute_command(self,command_str)
def nat_delete_rule_from_output_chain(self,rule_number):
command_str = "iptables -t nat -D OUTPUT {0}".format(rule_number)
return BaseTool.execute_command(self,command_str)
#==========================================================
# nat flush PREROUTING/POSTROUTING/INPUT/OUTPUT chains
#==========================================================
def nat_flush_prerouting_chain(self):
command_str = "iptables -t nat -F PREROUTING"
return BaseTool.execute_command(self,command_str)
def nat_flush_postrouting_chain(self):
command_str = "iptables -t nat -F POSTROUTING"
return BaseTool.execute_command(self,command_str)
def nat_flush_input_chain(self):
command_str = "iptables -t nat -F INPUT"
return BaseTool.execute_command(self,command_str)
def nat_flush_output_chain(self):
command_str = "iptables -t nat -F OUTPUT"
return BaseTool.execute_command(self,command_str)
def nat_flush_all_chains(self):
self.nat_flush_prerouting_chain()
self.nat_flush_postrouting_chain()
self.nat_flush_input_chain()
self.nat_flush_output_chain()
#==========================================================
# nat list PREROUTING/POSTROUTING/INPUT/OUTPUT chains
#==========================================================
def nat_list_prerouting_chain(self,with_line_numbers=False):
command_str = "iptables -t nat -L PREROUTING"
if with_line_numbers:
command_str += " --line-numbers"
return BaseTool.execute_command(self,command_str)
def nat_list_postrouting_chain(self,with_line_numbers=False):
command_str = "iptables -t nat -L POSTROUTING"
if with_line_numbers:
command_str += " --line-numbers"
return BaseTool.execute_command(self,command_str)
def nat_list_input_chain(self,with_line_numbers=False):
command_str = "iptables -t nat -L INPUT"
if with_line_numbers:
command_str += " --line-numbers"
return BaseTool.execute_command(self,command_str)
def nat_list_output_chain(self,with_line_numbers=False):
command_str = "iptables -t nat -L OUTPUT"
if with_line_numbers:
command_str += " --line-numbers"
return BaseTool.execute_command(self,command_str)
def nat_list_all_chains(self):
result = ""
result += (self.nat_list_prerouting_chain() + "\n")
result += (self.nat_list_postrouting_chain() + "\n")
result += (self.nat_list_input_chain() + "\n")
result += (self.nat_list_output_chain() + "\n")
return result.rstrip()
class ToolTesting(KubernetesTool,IptablesTool):
pass
def test():
cmd = IptablesTool()
cmd.nat_flush_prerouting_chain()
print cmd.nat_list_all_chains()
print "OK"
cmd = KubernetesTool()
hostname = cmd.get_pod_hostname("apache-pod")
print cmd.hostname_to_ip(hostname)
print "OK"
def main():
test()
if __name__=="__main__":
main()
| 6,544
| 32
| 1,090
|
82c4e1c9d79324cfb9f7ba26f995247d4d54ad9e
| 333
|
py
|
Python
|
utils.py
|
liliangbin/faceRecognition
|
077e070b42fb8aa8868c604863858a177c178ec7
|
[
"Apache-2.0"
] | 4
|
2019-06-30T13:04:30.000Z
|
2021-04-18T08:01:55.000Z
|
utils.py
|
liliangbin/faceRecognition
|
077e070b42fb8aa8868c604863858a177c178ec7
|
[
"Apache-2.0"
] | null | null | null |
utils.py
|
liliangbin/faceRecognition
|
077e070b42fb8aa8868c604863858a177c178ec7
|
[
"Apache-2.0"
] | null | null | null |
import matplotlib.pyplot as plt
| 27.75
| 57
| 0.708709
|
import matplotlib.pyplot as plt
def show_train_history(train_history, train, validation):
plt.plot(train_history.history[train])
plt.plot(train_history.history[validation])
plt.title('train history')
plt.ylabel(train)
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
| 277
| 0
| 23
|
4311beaaf96391f1dec77dcb15a6c9c8eec39f67
| 239
|
py
|
Python
|
cli/__init__.py
|
Polsaker/throat
|
39fd66efb7251f1607d9bf9e407e0cbbdfc10c57
|
[
"MIT"
] | 8
|
2019-05-27T19:34:25.000Z
|
2020-03-01T19:06:48.000Z
|
cli/__init__.py
|
Polsaker/throat
|
39fd66efb7251f1607d9bf9e407e0cbbdfc10c57
|
[
"MIT"
] | null | null | null |
cli/__init__.py
|
Polsaker/throat
|
39fd66efb7251f1607d9bf9e407e0cbbdfc10c57
|
[
"MIT"
] | 7
|
2019-05-29T17:12:40.000Z
|
2020-05-01T16:41:16.000Z
|
from .recount import recount
from .admin import admin
from .default import default
from .migration import migration
from .translations import translations
commands = [
migration,
recount,
admin,
default,
translations
]
| 18.384615
| 38
| 0.74477
|
from .recount import recount
from .admin import admin
from .default import default
from .migration import migration
from .translations import translations
commands = [
migration,
recount,
admin,
default,
translations
]
| 0
| 0
| 0
|
500d465798a7caedef8ae7ce212b2a7ab666165d
| 1,078
|
py
|
Python
|
madrona/common/assets.py
|
movermeyer/madrona
|
fcdced0a03408754b88a3d88f416e04d500c32d4
|
[
"BSD-3-Clause"
] | 9
|
2015-03-09T11:04:21.000Z
|
2022-01-16T09:45:36.000Z
|
madrona/common/assets.py
|
movermeyer/madrona
|
fcdced0a03408754b88a3d88f416e04d500c32d4
|
[
"BSD-3-Clause"
] | 1
|
2020-04-24T14:38:43.000Z
|
2020-04-24T14:38:43.000Z
|
madrona/common/assets.py
|
movermeyer/madrona
|
fcdced0a03408754b88a3d88f416e04d500c32d4
|
[
"BSD-3-Clause"
] | 2
|
2016-12-06T15:31:35.000Z
|
2018-03-04T20:04:44.000Z
|
from elementtree import ElementTree as et
import os
ROOT_PATH = ''
def get_js_files():
"""Returns a list of all the javascript files listed in
media/js_includes.xml"""
files = []
path = os.path.dirname(os.path.abspath(__file__))
tree = et.parse(path + '/../media/js_includes.xml')
for f in tree.findall('file'):
files.append(ROOT_PATH + f.get('path'))
return files
def get_js_test_files():
"""Returns a list of all the javascript test files listed in
media/js_includes.xml"""
files = []
path = os.path.dirname(os.path.abspath(__file__))
tree = et.parse(path + '/../media/js_includes.xml')
for f in tree.findall('test'):
files.append(ROOT_PATH + f.get('path'))
return files
def get_css_files():
"""Returns a list of all css files listed in
media/css_includes.xml"""
files = []
path = os.path.dirname(os.path.abspath(__file__))
tree = et.parse(path + '/../media/css_includes.xml')
for f in tree.findall('file'):
files.append(ROOT_PATH + f.get('path'))
return files
| 30.8
| 65
| 0.646568
|
from elementtree import ElementTree as et
import os
ROOT_PATH = ''
def get_js_files():
"""Returns a list of all the javascript files listed in
media/js_includes.xml"""
files = []
path = os.path.dirname(os.path.abspath(__file__))
tree = et.parse(path + '/../media/js_includes.xml')
for f in tree.findall('file'):
files.append(ROOT_PATH + f.get('path'))
return files
def get_js_test_files():
"""Returns a list of all the javascript test files listed in
media/js_includes.xml"""
files = []
path = os.path.dirname(os.path.abspath(__file__))
tree = et.parse(path + '/../media/js_includes.xml')
for f in tree.findall('test'):
files.append(ROOT_PATH + f.get('path'))
return files
def get_css_files():
"""Returns a list of all css files listed in
media/css_includes.xml"""
files = []
path = os.path.dirname(os.path.abspath(__file__))
tree = et.parse(path + '/../media/css_includes.xml')
for f in tree.findall('file'):
files.append(ROOT_PATH + f.get('path'))
return files
| 0
| 0
| 0
|
48a887f207778a7c4e05b2e0a8a7e32643674841
| 1,018
|
py
|
Python
|
tests/conf/config.py
|
robert-werner/fastapi-crudrouter
|
4f924307b53e5ea1adaa509302800c060ee7d06a
|
[
"MIT"
] | null | null | null |
tests/conf/config.py
|
robert-werner/fastapi-crudrouter
|
4f924307b53e5ea1adaa509302800c060ee7d06a
|
[
"MIT"
] | null | null | null |
tests/conf/config.py
|
robert-werner/fastapi-crudrouter
|
4f924307b53e5ea1adaa509302800c060ee7d06a
|
[
"MIT"
] | null | null | null |
import os
import pathlib
ENV_FILE_PATH = pathlib.Path(__file__).parent / "dev.env"
assert ENV_FILE_PATH.exists()
| 27.513514
| 150
| 0.574656
|
import os
import pathlib
ENV_FILE_PATH = pathlib.Path(__file__).parent / "dev.env"
assert ENV_FILE_PATH.exists()
class BaseConfig:
POSTGRES_HOST = ""
POSTGRES_USER = ""
POSTGRES_PASSWORD = ""
POSTGRES_DB = ""
POSTGRES_PORT = ""
def __init__(self):
self._apply_dot_env()
self._apply_env_vars()
self.POSTGRES_URI = f"postgresql://{self.POSTGRES_USER}:{self.POSTGRES_PASSWORD}@{self.POSTGRES_HOST}:{self.POSTGRES_PORT}/{self.POSTGRES_DB}"
print(self.POSTGRES_URI)
def _apply_dot_env(self):
with open(ENV_FILE_PATH) as fp:
for line in fp.readlines():
line = line.strip(" \n")
if not line.startswith("#"):
k, v = line.split("=", 1)
if hasattr(self, k) and not getattr(self, k):
setattr(self, k, v)
def _apply_env_vars(self):
for k, v in os.environ.items():
if hasattr(self, k):
setattr(self, k, v)
| 685
| 194
| 23
|
530a06fbf60cdea98dfb1c9085cf498b370520c5
| 3,958
|
py
|
Python
|
.2lanemdr/2lanemdr.py
|
hemidactylus/nbws1
|
282cc2f0d5c04f5fc818f3e411dfb5b549ea47f6
|
[
"Apache-2.0"
] | null | null | null |
.2lanemdr/2lanemdr.py
|
hemidactylus/nbws1
|
282cc2f0d5c04f5fc818f3e411dfb5b549ea47f6
|
[
"Apache-2.0"
] | null | null | null |
.2lanemdr/2lanemdr.py
|
hemidactylus/nbws1
|
282cc2f0d5c04f5fc818f3e411dfb5b549ea47f6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import re
import os
import sys
import json
import subprocess
DEF_FILE = '.2lane.info'
DIRECTIVE_TEMPLATE = '<!-- 2L {body} -->'
TYPO_WARNING_FINDER = re.compile('\W2L\W', re.IGNORECASE)
MESSAGE_TEMPLATE = '** 2lanemdr {kind} on {filename}:{linenumber} "{message}"'
def parseDirective(line, wrcs):
"""
Return (kind, target):
('endif', None)
('if', <fn>)
('elif', <fn>)
(None, None)
"""
if line == DIRECTIVE_TEMPLATE.format(body='ENDIF'):
return ('endif', None)
else:
for fn in wrcs.keys():
if line == DIRECTIVE_TEMPLATE.format(body='IF %s' % fn):
return ('if', fn)
elif line == DIRECTIVE_TEMPLATE.format(body='ELIF %s' % fn):
return ('elif', fn)
#
return None, None
def mkFiles(src, prescr, warner, errorer):
"""
Return a list with the path to all files created
"""
inContents = [
li.replace('\n', '')
for li in open(src).readlines()
]
# open files
oFiles = {
fn: open(fp, 'w')
for fn, fp in prescr.items()
}
# cursor setting
writing = {
fn: True
for fn in oFiles.keys()
}
# process lines
for lineNumber, line in enumerate(inContents):
# directive or content line?
directive, dTarget = parseDirective(line, writing)
if directive is not None:
# validate and process
if directive == 'endif':
if sum(int(c) for c in writing.values()) != 1:
errorer('Misplaced ENDIF', lineNumber)
else:
for fn in writing.keys():
writing[fn] = True
elif directive == 'if':
if sum(int(c) for c in writing.values()) != len(writing):
errorer('Misplaced IF', lineNumber)
else:
for fn in writing.keys():
writing[fn] = fn == dTarget
elif directive == 'elif':
if sum(int(c) for c in writing.values()) != 1:
errorer('Misplaced ELIF', lineNumber)
elif writing[dTarget]:
errorer('Repeated target in ELIF', lineNumber)
else:
for fn in writing.keys():
writing[fn] = fn == dTarget
else:
errorer('Unknown directive', lineNumber)
else:
#
if TYPO_WARNING_FINDER.search(line):
warner('check line', lineNumber)
# write serially on all active cursors
for fn, fh in oFiles.items():
if writing[fn]:
fh.write('%s\n' % line)
# close files
for fn, fh in oFiles.items():
fh.close()
return [fp for fp in prescr.values()]
if __name__ == '__main__':
if os.path.isfile(DEF_FILE):
defs = json.load(open(DEF_FILE))
files = defs.get('sources', {})
#
allCreatedFiles = []
#
for origF, dests in files.items():
createdFiles = mkFiles(origF, dests, warner=warner, errorer=errorer)
allCreatedFiles += createdFiles
# we git add the created files
subprocess.call(['git', 'add'] + allCreatedFiles)
| 29.984848
| 80
| 0.490904
|
#!/usr/bin/python
import re
import os
import sys
import json
import subprocess
DEF_FILE = '.2lane.info'
DIRECTIVE_TEMPLATE = '<!-- 2L {body} -->'
TYPO_WARNING_FINDER = re.compile('\W2L\W', re.IGNORECASE)
MESSAGE_TEMPLATE = '** 2lanemdr {kind} on {filename}:{linenumber} "{message}"'
def parseDirective(line, wrcs):
"""
Return (kind, target):
('endif', None)
('if', <fn>)
('elif', <fn>)
(None, None)
"""
if line == DIRECTIVE_TEMPLATE.format(body='ENDIF'):
return ('endif', None)
else:
for fn in wrcs.keys():
if line == DIRECTIVE_TEMPLATE.format(body='IF %s' % fn):
return ('if', fn)
elif line == DIRECTIVE_TEMPLATE.format(body='ELIF %s' % fn):
return ('elif', fn)
#
return None, None
def mkFiles(src, prescr, warner, errorer):
"""
Return a list with the path to all files created
"""
inContents = [
li.replace('\n', '')
for li in open(src).readlines()
]
# open files
oFiles = {
fn: open(fp, 'w')
for fn, fp in prescr.items()
}
# cursor setting
writing = {
fn: True
for fn in oFiles.keys()
}
# process lines
for lineNumber, line in enumerate(inContents):
# directive or content line?
directive, dTarget = parseDirective(line, writing)
if directive is not None:
# validate and process
if directive == 'endif':
if sum(int(c) for c in writing.values()) != 1:
errorer('Misplaced ENDIF', lineNumber)
else:
for fn in writing.keys():
writing[fn] = True
elif directive == 'if':
if sum(int(c) for c in writing.values()) != len(writing):
errorer('Misplaced IF', lineNumber)
else:
for fn in writing.keys():
writing[fn] = fn == dTarget
elif directive == 'elif':
if sum(int(c) for c in writing.values()) != 1:
errorer('Misplaced ELIF', lineNumber)
elif writing[dTarget]:
errorer('Repeated target in ELIF', lineNumber)
else:
for fn in writing.keys():
writing[fn] = fn == dTarget
else:
errorer('Unknown directive', lineNumber)
else:
#
if TYPO_WARNING_FINDER.search(line):
warner('check line', lineNumber)
# write serially on all active cursors
for fn, fh in oFiles.items():
if writing[fn]:
fh.write('%s\n' % line)
# close files
for fn, fh in oFiles.items():
fh.close()
return [fp for fp in prescr.values()]
if __name__ == '__main__':
if os.path.isfile(DEF_FILE):
defs = json.load(open(DEF_FILE))
files = defs.get('sources', {})
#
allCreatedFiles = []
#
for origF, dests in files.items():
def warner(msg, intLineno):
wmsg = MESSAGE_TEMPLATE.format(
kind='WARNING',
filename=origF,
linenumber=intLineno+1,
message=msg,
)
print(wmsg)
def errorer(msg, intLineno):
emsg = MESSAGE_TEMPLATE.format(
kind='ERROR',
filename=origF,
linenumber=intLineno+1,
message=msg,
)
print(emsg)
sys.exit(1)
createdFiles = mkFiles(origF, dests, warner=warner, errorer=errorer)
allCreatedFiles += createdFiles
# we git add the created files
subprocess.call(['git', 'add'] + allCreatedFiles)
| 525
| 0
| 70
|
9903ff1dd556a8dc9bf96c9260ccd0029845f009
| 4,704
|
py
|
Python
|
utils.py
|
menpo/menpo-admin
|
41cb5ab9aa56c3df26e7bfbf43a56a0cbd5f9674
|
[
"BSD-3-Clause"
] | null | null | null |
utils.py
|
menpo/menpo-admin
|
41cb5ab9aa56c3df26e7bfbf43a56a0cbd5f9674
|
[
"BSD-3-Clause"
] | 1
|
2017-01-24T11:17:03.000Z
|
2017-01-24T11:17:03.000Z
|
utils.py
|
menpo/menpo-admin
|
41cb5ab9aa56c3df26e7bfbf43a56a0cbd5f9674
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import subprocess
from subprocess import CalledProcessError
from functools import partial
from collections import namedtuple
Project = namedtuple('Project', ['name', 'versions'])
# all projects using condaci along with the Python version they
# need. Note that for non-python projects we can choose any single
# python version.
PROJECTS = [Project(*x) for x in
[('menpo', (2, 34, 35)),
('menpodetect', (2, 34, 35)),
('menpofit', (2, 34, 35)),
('menpo3d', (2, 34, 35)),
('menpocli', (2, 34, 35)),
('menpowidgets', (2, 34, 35)),
('landmarkerio-server', (2,)),
('menpobench', (2,)),
('cyassimp', (2, 34, 35)),
('cyrasterize', (2, 34, 35)),
('cyvlfeat', (2, 34, 35)),
('cyffld2', (2, 34, 35)),
('cypico', (2, 34, 35)),
('conda-arrow', (2, 34, 35)),
('conda-boost', (2, 34, 35)),
('conda-cherrypy', (2, 34, 35)),
('conda-dlib', (2, 34, 35)),
('conda-opencv3', (2, 34, 35)),
('conda-ffmpeg', (2, 34, 35)),
('conda-glew', (2, 34, 35)),
('conda-glfw3', (2, 34, 35)),
('conda-freeimage', (2, 34, 35)),
('conda-imageio', (2, 34, 35)),
('conda-joblib', (2, 34, 35)),
('workerbee', (2, 34, 35)),
# Python 3 only
('lsfm', (35,)),
# Python 2 only
('conda-menpo-pyvrml97', (2,)),
('conda-pathlib', (2,)),
# We currenty build mayavi (and all it's deps)
# so we can be Python 3
('conda-vtk', (2, 34, 35)),
('conda-traits', (2, 34, 35)),
('conda-envisage', (2, 34, 35)),
('conda-pyface', (2, 34, 35)),
('conda-apptools', (2, 34, 35)),
('conda-traitsui', (2, 34, 35)),
('conda-mayavi', (2, 34, 35)),
# And we also need the latest ipywidgets...
('conda-ipywidgets', (2, 34, 35)),
('conda-widgetsnbextension', (2, 34, 35)),
# Non-Python projects
('vrml97', (2,)),
('conda-flann', (2,)),
('conda-eigen', (2,)),
('conda-enum', (2,)),
('conda-vlfeat', (35,)),
('conda-opencv', (35,))
]]
PROJECT_NAMES = [p.name for p in PROJECTS]
appveyor_op = partial(perform_operation_on_file, 'appveyor.yml')
travis_op = partial(perform_operation_on_file, '.travis.yml')
def copy_and_yield(fsrc, fdst, length=1024*1024):
"""copy data from file-like object fsrc to file-like object fdst"""
while 1:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
yield
| 29.4
| 71
| 0.51892
|
import os
import subprocess
from subprocess import CalledProcessError
from functools import partial
from collections import namedtuple
Project = namedtuple('Project', ['name', 'versions'])
# all projects using condaci along with the Python version they
# need. Note that for non-python projects we can choose any single
# python version.
PROJECTS = [Project(*x) for x in
[('menpo', (2, 34, 35)),
('menpodetect', (2, 34, 35)),
('menpofit', (2, 34, 35)),
('menpo3d', (2, 34, 35)),
('menpocli', (2, 34, 35)),
('menpowidgets', (2, 34, 35)),
('landmarkerio-server', (2,)),
('menpobench', (2,)),
('cyassimp', (2, 34, 35)),
('cyrasterize', (2, 34, 35)),
('cyvlfeat', (2, 34, 35)),
('cyffld2', (2, 34, 35)),
('cypico', (2, 34, 35)),
('conda-arrow', (2, 34, 35)),
('conda-boost', (2, 34, 35)),
('conda-cherrypy', (2, 34, 35)),
('conda-dlib', (2, 34, 35)),
('conda-opencv3', (2, 34, 35)),
('conda-ffmpeg', (2, 34, 35)),
('conda-glew', (2, 34, 35)),
('conda-glfw3', (2, 34, 35)),
('conda-freeimage', (2, 34, 35)),
('conda-imageio', (2, 34, 35)),
('conda-joblib', (2, 34, 35)),
('workerbee', (2, 34, 35)),
# Python 3 only
('lsfm', (35,)),
# Python 2 only
('conda-menpo-pyvrml97', (2,)),
('conda-pathlib', (2,)),
# We currenty build mayavi (and all it's deps)
# so we can be Python 3
('conda-vtk', (2, 34, 35)),
('conda-traits', (2, 34, 35)),
('conda-envisage', (2, 34, 35)),
('conda-pyface', (2, 34, 35)),
('conda-apptools', (2, 34, 35)),
('conda-traitsui', (2, 34, 35)),
('conda-mayavi', (2, 34, 35)),
# And we also need the latest ipywidgets...
('conda-ipywidgets', (2, 34, 35)),
('conda-widgetsnbextension', (2, 34, 35)),
# Non-Python projects
('vrml97', (2,)),
('conda-flann', (2,)),
('conda-eigen', (2,)),
('conda-enum', (2,)),
('conda-vlfeat', (35,)),
('conda-opencv', (35,))
]]
PROJECT_NAMES = [p.name for p in PROJECTS]
def load_file(fpath):
with open(fpath, 'rt') as f:
text = f.read()
return text
def save_file(fpath, string):
with open(fpath, 'wt') as f:
f.write(string)
def repo_url(project_name):
return 'git@github.com:menpo/{}'.format(project_name)
def clone_repo(project_name):
repo_url = 'git@github.com:menpo/{}'.format(project_name)
print('cloning {}'.format(repo_url))
subprocess.check_output(['git', 'clone', repo_url])
def clone_all_repos(working_dir):
if not os.path.isdir(working_dir):
print('creating path at {}'.format(working_dir))
os.mkdir(working_dir)
for project in PROJECT_NAMES:
os.chdir(working_dir)
try:
clone_repo(project)
except CalledProcessError:
pass
def apply_to_all_projects(working_dir, f, clone=True):
if clone:
clone_all_repos(working_dir)
for project in PROJECT_NAMES:
repo_dir = os.path.join(working_dir, project)
print('processing {}...'.format(project))
f(repo_dir)
def perform_operation_on_file(filename, operation, repo_dir):
filepath = os.path.join(repo_dir, filename)
old_text = load_file(filepath)
new_text = operation(old_text)
if old_text != new_text:
save_file(filepath, new_text)
return True
else:
return False
def replace_str(old, new, text):
return text.replace(old, new)
appveyor_op = partial(perform_operation_on_file, 'appveyor.yml')
travis_op = partial(perform_operation_on_file, '.travis.yml')
def copy_and_yield(fsrc, fdst, length=1024*1024):
"""copy data from file-like object fsrc to file-like object fdst"""
while 1:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
yield
def download_file(url, dest_path):
try:
from urllib2 import urlopen # Py2
except ImportError:
from urllib.request import urlopen # Py3
req = urlopen(url)
with open(str(dest_path), 'wb') as fp:
for _ in copy_and_yield(req, fp):
pass
req.close()
| 1,511
| 0
| 211
|
51009389239cc5dc8055739ff5e46bb49c3c734e
| 4,566
|
py
|
Python
|
scripts/bench/4_latencybreakdown.py
|
sirikata/sirikata
|
3a0d54a8c4778ad6e25ef031d461b2bc3e264860
|
[
"BSD-3-Clause"
] | 31
|
2015-01-28T17:01:10.000Z
|
2021-11-04T08:30:37.000Z
|
scripts/bench/4_latencybreakdown.py
|
pathorn/sirikata
|
5d366a822ef2fb57cd9f64cc4f6085c0a635fdfa
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/bench/4_latencybreakdown.py
|
pathorn/sirikata
|
5d366a822ef2fb57cd9f64cc4f6085c0a635fdfa
|
[
"BSD-3-Clause"
] | 9
|
2015-08-02T18:39:49.000Z
|
2019-10-11T10:32:30.000Z
|
#!/usr/bin/python
# flow_fairness.py
#
# Runs a simulation with objects continually messaging each other.
# The analysis then generates statistics about the actual rates
# achieved and the weights. The output can be used to generate
# fairness graphs.
import sys
import subprocess
import os.path
# FIXME It would be nice to have a better way of making this script able to find
# other modules in sibling packages
sys.path.insert(0, sys.path[0]+"/..")
import util.stdio
from cluster.config import ClusterConfig
from cluster.sim import ClusterSimSettings,ClusterSim
import flow_fairness
if __name__ == "__main__":
nss=16
nobjects = 1000#19000#326
packname = '1a_objects.pack'
numoh = 1
cc = ClusterConfig()
import math;
edgex=int(math.sqrt(nss))
edgey=int(nss/int(math.sqrt(nss)))
cs = ClusterSimSettings(cc, nss, (edgex,edgey), numoh)
cs.region_weight_options = '--flatness=8'
cs.debug = True
cs.valgrind = False
cs.profile = False
cs.oprofile = False
cs.loglevels["oh"]="insane";
cs.loc = 'standard'
cs.blocksize = 256
cs.tx_bandwidth = 50000000
cs.rx_bandwidth = 5000000
cs.oseg_cache_clean_group=25;
cs.oseg_cache_entry_lifetime= "10000s"
## Use pack across multiple ohs
#cs.num_random_objects = 0
#cs.num_pack_objects = nobjects / cs.num_oh
#cs.object_pack = packname
#cs.pack_dump = True
cs.num_random_objects = 0
cs.object_sl_file='sl.trace.'+str(edgex)+'x'+str(edgey);
cs.object_sl_center=(384,384,0);
cs.object_connect_phase = '20s'
cs.center=[cs.blocksize*edgex/2,cs.blocksize*edgey/2,0]
cs.zrange=(-10000,10000)
cs.object_static = 'static'
cs.object_query_frac = 0.0
cs.duration = '420s'
rates = sys.argv[1:]
nobjectlist=[250,500,750,1000,1250,1500,1750,2000];#+=
nobjectlist+=[2500,3000,3500,4000,4500]+range(5000,20000,1000)
nobjectlist.reverse()
#nobjectlist = [5000];
#nobjectlist=[19000]
caches=[256]*len(nobjectlist)
#caches+=[250]*len(nobjectlist)
#caches+=[750]*len(nobjectlist)
#caches+=[75]*len(nobjectlist)#[10,15,20,25,30,35,40]
#nobjectlist=nobjectlist*4#run with 4 caches
cs.oseg_cache_size=caches[0];
cs.oseg_cache_selector='cache_communication';
plan = FlowPairFairness(cc, cs, scheme='csfq', payload=1024)
oldoptions=plan.cs.scenario_options;
done={}
adder=""
print "SCENARIO OPTIONS ",plan.cs.scenario_options
for rate in rates:
for nobjectsindex in range(len(nobjectlist)):
cs.oseg_cache_size=caches[nobjectsindex];
nobjects=nobjectlist[nobjectsindex]
if nobjects in done:
adder+='c';
done={}
msgfile='messagetrace.'+str(nobjects);
global trmsgfile
cs.num_sl_objects=nobjects;
cs.message_trace_file=msgfile;
trace_location=cs.pack_dir+'/'+msgfile
trmsgfile=trace_location
print 'loading file '+cs.object_sl_file+' with trace '+msgfile
plan.run(rate)
plan.analysis()
nam='endtoend';
if len(rates)>1:
nam+='-'+str(rate);
nam+=adder
nam+='.'
nam+=str(nobjects)
os.rename(flow_fairness.get_latency_logfile_name(rate),nam);
done[nobjects]=True
plan.graph()
| 31.489655
| 112
| 0.622646
|
#!/usr/bin/python
# flow_fairness.py
#
# Runs a simulation with objects continually messaging each other.
# The analysis then generates statistics about the actual rates
# achieved and the weights. The output can be used to generate
# fairness graphs.
import sys
import subprocess
import os.path
# FIXME It would be nice to have a better way of making this script able to find
# other modules in sibling packages
sys.path.insert(0, sys.path[0]+"/..")
import util.stdio
from cluster.config import ClusterConfig
from cluster.sim import ClusterSimSettings,ClusterSim
import flow_fairness
class FlowPairFairness(flow_fairness.FlowFairness):
def _setup_cluster_sim(self, rate, io):
self.cs.scenario = 'loadpackettrace'
if self.local: localval = 'true'
else: localval = 'false'
self.cs.object_simple='false'
self.cs.scenario_options = ' '.join(
['--num-pings-per-second=' + str(rate),
'--num-objects-per-server=512',
'--ping-size=' + str(self.payload_size),
'--local=' + localval,
" --tracefile="+trmsgfile
]
)
self.cs.odp_flow_scheduler = self.scheme
if 'object' not in self.cs.traces['simoh']: self.cs.traces['simoh'].append('object')
if 'ping' not in self.cs.traces['simoh']: self.cs.traces['simoh'].append('ping')
if 'message' not in self.cs.traces['all']: self.cs.traces['all'].append('message')
#if 'oseg-cumulative' not in self.cs.traces['space']: self.cs.traces['space'].append('oseg-cumulative');
cluster_sim = ClusterSim(self.cc, self.cs, io=io)
return cluster_sim
if __name__ == "__main__":
nss=16
nobjects = 1000#19000#326
packname = '1a_objects.pack'
numoh = 1
cc = ClusterConfig()
import math;
edgex=int(math.sqrt(nss))
edgey=int(nss/int(math.sqrt(nss)))
cs = ClusterSimSettings(cc, nss, (edgex,edgey), numoh)
cs.region_weight_options = '--flatness=8'
cs.debug = True
cs.valgrind = False
cs.profile = False
cs.oprofile = False
cs.loglevels["oh"]="insane";
cs.loc = 'standard'
cs.blocksize = 256
cs.tx_bandwidth = 50000000
cs.rx_bandwidth = 5000000
cs.oseg_cache_clean_group=25;
cs.oseg_cache_entry_lifetime= "10000s"
## Use pack across multiple ohs
#cs.num_random_objects = 0
#cs.num_pack_objects = nobjects / cs.num_oh
#cs.object_pack = packname
#cs.pack_dump = True
cs.num_random_objects = 0
cs.object_sl_file='sl.trace.'+str(edgex)+'x'+str(edgey);
cs.object_sl_center=(384,384,0);
cs.object_connect_phase = '20s'
cs.center=[cs.blocksize*edgex/2,cs.blocksize*edgey/2,0]
cs.zrange=(-10000,10000)
cs.object_static = 'static'
cs.object_query_frac = 0.0
cs.duration = '420s'
rates = sys.argv[1:]
nobjectlist=[250,500,750,1000,1250,1500,1750,2000];#+=
nobjectlist+=[2500,3000,3500,4000,4500]+range(5000,20000,1000)
nobjectlist.reverse()
#nobjectlist = [5000];
#nobjectlist=[19000]
caches=[256]*len(nobjectlist)
#caches+=[250]*len(nobjectlist)
#caches+=[750]*len(nobjectlist)
#caches+=[75]*len(nobjectlist)#[10,15,20,25,30,35,40]
#nobjectlist=nobjectlist*4#run with 4 caches
cs.oseg_cache_size=caches[0];
cs.oseg_cache_selector='cache_communication';
plan = FlowPairFairness(cc, cs, scheme='csfq', payload=1024)
oldoptions=plan.cs.scenario_options;
done={}
adder=""
print "SCENARIO OPTIONS ",plan.cs.scenario_options
for rate in rates:
for nobjectsindex in range(len(nobjectlist)):
cs.oseg_cache_size=caches[nobjectsindex];
nobjects=nobjectlist[nobjectsindex]
if nobjects in done:
adder+='c';
done={}
msgfile='messagetrace.'+str(nobjects);
global trmsgfile
cs.num_sl_objects=nobjects;
cs.message_trace_file=msgfile;
trace_location=cs.pack_dir+'/'+msgfile
trmsgfile=trace_location
print 'loading file '+cs.object_sl_file+' with trace '+msgfile
plan.run(rate)
plan.analysis()
nam='endtoend';
if len(rates)>1:
nam+='-'+str(rate);
nam+=adder
nam+='.'
nam+=str(nobjects)
os.rename(flow_fairness.get_latency_logfile_name(rate),nam);
done[nobjects]=True
plan.graph()
| 1,015
| 30
| 48
|
733ac65472342a53c51bb203028ce20ee9757d52
| 3,537
|
py
|
Python
|
python/sparkdl/transformers/keras_applications.py
|
alonsoir/spark-deep-learning
|
3f668d9b4a0aa2ef6fe05df5bf5c1d705cd2530d
|
[
"Apache-2.0"
] | 54
|
2017-10-12T04:42:18.000Z
|
2021-08-24T08:47:03.000Z
|
python/sparkdl/transformers/keras_applications.py
|
alonsoir/spark-deep-learning
|
3f668d9b4a0aa2ef6fe05df5bf5c1d705cd2530d
|
[
"Apache-2.0"
] | null | null | null |
python/sparkdl/transformers/keras_applications.py
|
alonsoir/spark-deep-learning
|
3f668d9b4a0aa2ef6fe05df5bf5c1d705cd2530d
|
[
"Apache-2.0"
] | 17
|
2017-10-12T07:34:10.000Z
|
2020-03-12T12:25:25.000Z
|
# Copyright 2017 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABCMeta, abstractmethod
import keras.backend as K
from keras.applications import inception_v3, xception
import tensorflow as tf
from sparkdl.transformers.utils import (imageInputPlaceholder, InceptionV3Constants)
"""
Essentially a factory function for getting the correct KerasApplicationModel class
for the network name.
"""
KERAS_APPLICATION_MODELS = {
"InceptionV3": InceptionV3Model,
"Xception": XceptionModel
}
| 31.300885
| 91
| 0.685327
|
# Copyright 2017 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABCMeta, abstractmethod
import keras.backend as K
from keras.applications import inception_v3, xception
import tensorflow as tf
from sparkdl.transformers.utils import (imageInputPlaceholder, InceptionV3Constants)
"""
Essentially a factory function for getting the correct KerasApplicationModel class
for the network name.
"""
def getKerasApplicationModel(name):
try:
return KERAS_APPLICATION_MODELS[name]()
except KeyError:
raise ValueError("%s is not a supported model. Supported models: %s" %
(name, ', '.join(KERAS_APPLICATION_MODELS.keys())))
class KerasApplicationModel:
__metaclass__ = ABCMeta
def getModelData(self, featurize):
sess = tf.Session()
with sess.as_default():
K.set_learning_phase(0)
inputImage = imageInputPlaceholder(nChannels=3)
preprocessed = self.preprocess(inputImage)
model = self.model(preprocessed, featurize)
return dict(inputTensorName=inputImage.name,
outputTensorName=model.output.name,
session=sess,
inputTensorSize=self.inputShape(),
outputMode="vector")
@abstractmethod
def preprocess(self, inputImage):
pass
@abstractmethod
def model(self, preprocessed, featurize):
pass
@abstractmethod
def inputShape(self):
pass
def _testPreprocess(self, inputImage):
"""
For testing only. The preprocess function to be called before kerasModel.predict().
"""
return self.preprocess(inputImage)
@abstractmethod
def _testKerasModel(self, include_top):
"""
For testing only. The keras model object to compare to.
"""
pass
class InceptionV3Model(KerasApplicationModel):
def preprocess(self, inputImage):
return inception_v3.preprocess_input(inputImage)
def model(self, preprocessed, featurize):
return inception_v3.InceptionV3(input_tensor=preprocessed, weights="imagenet",
include_top=(not featurize))
def inputShape(self):
return InceptionV3Constants.INPUT_SHAPE
def _testKerasModel(self, include_top):
return inception_v3.InceptionV3(weights="imagenet", include_top=include_top)
class XceptionModel(KerasApplicationModel):
def preprocess(self, inputImage):
return xception.preprocess_input(inputImage)
def model(self, preprocessed, featurize):
return xception.Xception(input_tensor=preprocessed, weights="imagenet",
include_top=(not featurize))
def inputShape(self):
return (299, 299)
def _testKerasModel(self, include_top):
return xception.Xception(weights="imagenet", include_top=include_top)
KERAS_APPLICATION_MODELS = {
"InceptionV3": InceptionV3Model,
"Xception": XceptionModel
}
| 1,583
| 619
| 305
|
006928383270000981141a2d160f55e1a9bc214b
| 773
|
py
|
Python
|
src/pyscripts/xsbs/plugins.py
|
harryd/xsbs-minimal
|
aaceaeda1d3fe6cdd7182484989eaa74e9ae9518
|
[
"BSD-3-Clause"
] | 1
|
2018-05-22T13:42:47.000Z
|
2018-05-22T13:42:47.000Z
|
src/pyscripts/xsbs/plugins.py
|
harryd/xsbs-minimal
|
aaceaeda1d3fe6cdd7182484989eaa74e9ae9518
|
[
"BSD-3-Clause"
] | null | null | null |
src/pyscripts/xsbs/plugins.py
|
harryd/xsbs-minimal
|
aaceaeda1d3fe6cdd7182484989eaa74e9ae9518
|
[
"BSD-3-Clause"
] | null | null | null |
from elixir import setup_all, create_all
import os, sys
# Initialize these before loading plugins
import xsbs.db
import xsbs.events
import xsbs.log
import xsbs.ban
import xsbs.users
import xsbs.server
import xsbs.game
import xsbs.teamcontrol
import xsbs.persistteam
import xsbs.demo
import xsbs.http
import xsbs.http.jsonapi
main()
| 21.472222
| 113
| 0.756792
|
from elixir import setup_all, create_all
import os, sys
# Initialize these before loading plugins
import xsbs.db
import xsbs.events
import xsbs.log
import xsbs.ban
import xsbs.users
import xsbs.server
import xsbs.game
import xsbs.teamcontrol
import xsbs.persistteam
import xsbs.demo
import xsbs.http
import xsbs.http.jsonapi
class PluginManager(object):
def __init__(self, plugins_path='plugins'):
self.plugins_path = plugins_path
self.plugin_modules = []
def loadPlugins(self):
files = os.listdir(self.plugins_path)
for file in files:
if file[0] != '.':
self.plugin_modules.append(__import__(os.path.basename(self.plugins_path) + '.' + os.path.splitext(file)[0]))
def main():
pm = PluginManager()
pm.loadPlugins()
setup_all()
create_all()
main()
| 339
| 7
| 92
|
67ea6b4220213acf076591a0da456dbff315ee05
| 8,959
|
py
|
Python
|
fuzzytorch/monitors.py
|
opimentel-github/fuzzy-torch
|
4f1e06e6fc445cdec23e9762ca20408feeb296e3
|
[
"MIT"
] | 1
|
2021-03-12T08:49:15.000Z
|
2021-03-12T08:49:15.000Z
|
fuzzytorch/monitors.py
|
opimentel-github/fuzzy-torch
|
4f1e06e6fc445cdec23e9762ca20408feeb296e3
|
[
"MIT"
] | null | null | null |
fuzzytorch/monitors.py
|
opimentel-github/fuzzy-torch
|
4f1e06e6fc445cdec23e9762ca20408feeb296e3
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import division
from . import _C
import os
import torch.nn as nn
import numpy as np
from . import losses as ft_losses
from . import metrics as ft_metrics
from . import optimizers as ft_optimizers
from . import exceptions as ex
import fuzzytools.files as files
from fuzzytools.counters import Counter
from fuzzytools.datascience.xerror import XError
import pandas as pd
from fuzzytools.dataframes import DFBuilder
from copy import copy, deepcopy
###################################################################################################################################################
### repr
### history methods
### along training methods
### get statistics
### file methods
| 31.882562
| 147
| 0.724969
|
from __future__ import print_function
from __future__ import division
from . import _C
import os
import torch.nn as nn
import numpy as np
from . import losses as ft_losses
from . import metrics as ft_metrics
from . import optimizers as ft_optimizers
from . import exceptions as ex
import fuzzytools.files as files
from fuzzytools.counters import Counter
from fuzzytools.datascience.xerror import XError
import pandas as pd
from fuzzytools.dataframes import DFBuilder
from copy import copy, deepcopy
###################################################################################################################################################
class LossMonitor(object):
def __init__(self, loss, optimizer, metrics,
save_mode:str=_C.SM_NO_SAVE,
target_metric_crit:str=None,
k_counter_duration:int=_C.K_COUNTER_DURATION,
val_epoch_counter_duration:int=_C.VAL_EPOCH_COUNTER_DURATION,
earlystop_epoch_duration:int=_C.EARLYSTOP_EPOCH_DURATION,
**kwargs):
### CHECKS
assert isinstance(loss, ft_losses.FTLoss)
metrics = [metrics] if isinstance(metrics, ft_metrics.FTMetric) else metrics
assert isinstance(metrics, list) and all([isinstance(metric, ft_metrics.FTMetric) for metric in metrics])
assert len([metric.name for metric in metrics])==len(set([metric.name for metric in metrics]))
assert isinstance(optimizer, ft_optimizers.LossOptimizer)
self.loss = loss
self.optimizer = optimizer
self.metrics = metrics
self.save_mode = save_mode
self.target_metric_crit = metrics[0].name if target_metric_crit is None else target_metric_crit
self.counter_k = Counter({'k': k_counter_duration})
self.counter_epoch = Counter({'val_epoch':val_epoch_counter_duration, 'earlystop_epoch':earlystop_epoch_duration})
self.name = loss.name
self.best_epoch = np.infty
self.last_saved_filedir = None
self.reset()
def reset(self):
self.best_value = None
self.loss_df = DFBuilder()
self.opt_df = DFBuilder()
self.loss_df_epoch = DFBuilder()
self.metrics_df_epoch = DFBuilder()
self.counter_k.reset()
self.counter_epoch.reset()
### repr
def __repr__(self):
def get_metrics_repr():
return f' (target_metric_crit={self.target_metric_crit})' if self.save_mode in [_C.SM_ONLY_INF_METRIC, _C.SM_ONLY_SUP_METRIC] else ''
txt = ''
txt += f'[{self.name}]'+'\n'
txt += f' - opt-parameters={len(self.optimizer):,}[p] - device={self.optimizer.get_device()}'+'\n'
txt += f' - save-mode={self.save_mode}{get_metrics_repr()}'+'\n'
txt += f' - counter_k={self.counter_k} - counter_epoch={self.counter_epoch}'+'\n'
return txt[:-1]
def get_save_dict(self):
info = {
'save_mode':self.save_mode,
'target_metric_crit':self.target_metric_crit,
'counter_k':self.counter_k,
'counter_epoch':self.counter_epoch,
'best_epoch':self.best_epoch,
'last_saved_filedir':self.last_saved_filedir,
}
d = {
'info':info,
'loss_df':self.loss_df,
'opt_df':self.opt_df,
'loss_df_epoch':self.loss_df_epoch,
'metrics_df_epoch':self.metrics_df_epoch,
}
return d
def load_from_dict(self, _d):
d = deepcopy(_d)
info = d['info']
self.save_mode = info['save_mode']
self.target_metric_crit = info['target_metric_crit']
self.counter_k = info['counter_k']
self.counter_epoch = info['counter_epoch']
self.best_epoch = info['best_epoch']
self.last_saved_filedir = info['last_saved_filedir']
self.loss_df = d['loss_df']
self.opt_df = d['opt_df']
self.loss_df_epoch = d['loss_df_epoch']
self.metrics_df_epoch = d['metrics_df_epoch']
### history methods
def add_loss_history_k(self, loss,
dt=0,
):
if self.counter_k.check('k'):
assert isinstance(loss, ft_losses.BatchLoss)
d = loss.get_info()
#index = self.counter_k.get_global_count()
index = None
d.update({
'_dt':dt,
})
self.loss_df.append(index, d)
def add_opt_history_epoch(self):
d = self.optimizer.get_info()
#index = self.counter_epoch.get_global_count()
index = None
d.update({
'_k':self.counter_k.get_global_count(),
})
self.opt_df.append(index, d)
def add_loss_history_epoch(self, loss,
dt=0,
set_name=None,
):
if self.counter_epoch.check('val_epoch'):
assert isinstance(loss, ft_losses.BatchLoss)
d = loss.get_info()
#index = self.counter_epoch.get_global_count()
index = None
d.update({
'_dt':dt,
'_set':set_name,
})
self.loss_df_epoch.append(index, d)
def add_metric_history_epoch(self, metrics_dict,
dt=0,
set_name=None,
):
if self.counter_epoch.check('val_epoch'):
d = {}
for mn in metrics_dict.keys():
metric = metrics_dict[mn]
assert isinstance(metric, ft_metrics.BatchMetric)
d[mn] = metric.get_info()['_metric']
d.update({
'_dt':dt,
'_set':set_name,
})
#index = f'{self.counter_epoch.get_global_count()}.set_name'
index = None
self.metrics_df_epoch.append(index, d)
#print(self.metrics_df_epoch.get_df())
def get_metric_names(self):
return [m.name for m in self.metrics]
### along training methods
def k_update(self):
self.counter_k.update()
def epoch_update(self):
self.optimizer.update()
self.counter_epoch.update()
if self.counter_epoch.check('earlystop_epoch'):
raise ex.TrainingInterruptedError()
def set_last_saved_filedir(self, last_saved_filedir):
self.last_saved_filedir = last_saved_filedir
def needs_save(self):
return not self.save_mode==_C.SM_NO_SAVE
def train(self):
self.optimizer.train()
def eval(self):
self.optimizer.eval()
def needs_evaluation(self):
return self.counter_epoch.check('val_epoch')
def reset_early_stop(self):
self.counter_epoch.reset_cn('earlystop_epoch')
### get statistics
def get_best_epoch(self):
return self.best_epoch
def set_best_epoch(self, best_epoch):
self.best_epoch = best_epoch
def get_time_per_iteration(self):
loss_df = self.loss_df.get_df()
return XError([v for v in loss_df['_dt'].values])
def get_evaluation_set_names(self):
loss_df_epoch = self.loss_df_epoch.get_df()
return list(np.unique(loss_df_epoch['_set'].values))
def get_time_per_epoch_set(self, set_name):
loss_df_epoch = self.loss_df_epoch.get_df()
return XError([v for v in loss_df_epoch['_dt'][loss_df_epoch['_set'].isin([set_name])].values])
def get_time_per_epoch(self): # fixme only eval times
evaluation_set_names = self.get_evaluation_set_names()
return sum([self.get_time_per_epoch_set(set_name) for set_name in evaluation_set_names])
def get_total_time(self):
evaluation_set_names = self.get_evaluation_set_names()
loss_df = self.loss_df.get_df()
loss_df_epoch = self.loss_df_epoch.get_df()
total_time = 0
total_time += loss_df['_dt'].values.sum()
total_time += sum([loss_df_epoch['_dt'][loss_df_epoch['_set'].isin([set_name])].values.sum() for set_name in evaluation_set_names]) # fixme
return total_time
### file methods
def remove_filedir(self, filedir):
if filedir is None:
return
files.delete_filedir(filedir, verbose=0) # remove last best model
def check_save_condition(self, set_name):
if self.save_mode==_C.SM_NO_SAVE:
return False
elif self.save_mode==_C.SM_ALL:
return True
elif self.save_mode==_C.SM_ONLY_ALL:
self.remove_filedir(self.last_saved_filedir) # remove last best model
return True
elif self.save_mode==_C.SM_ONLY_INF_LOSS:
loss_df_epoch = self.loss_df_epoch.get_df()
loss_evolution = [np.inf]+[v for v in loss_df_epoch['_loss'][loss_df_epoch['_set'].isin([set_name])].values]
loss_history = loss_evolution[:-1] # history
actual_loss = loss_evolution[-1] # last one
if actual_loss<np.min(loss_history): # must save and delete
self.remove_filedir(self.last_saved_filedir) # remove last best model
self.best_value = actual_loss
return True
else:
return False
elif self.save_mode==_C.SM_ONLY_INF_METRIC:
metrics_df_epoch = self.metrics_df_epoch.get_df()
metric_evolution = [np.inf]+[v for v in metrics_df_epoch[self.target_metric_crit][metrics_df_epoch['_set'].isin([set_name])].values]
metric_history = metric_evolution[:-1] # history
actual_metric = metric_evolution[-1] # last one
if actual_metric<np.min(metric_history): # must save and delete
self.remove_filedir(self.last_saved_filedir) # remove last best model
self.best_value = actual_metric
return True
else:
return False
elif self.save_mode==_C.SM_ONLY_SUP_METRIC:
metrics_df_epoch = self.metrics_df_epoch.get_df()
metric_evolution = [-np.inf]+[v for v in metrics_df_epoch[self.target_metric_crit][metrics_df_epoch['_set'].isin([set_name])].values]
metric_history = metric_evolution[:-1] # history
actual_metric = metric_evolution[-1] # last one
if actual_metric>np.max(metric_history): # must save and delete
self.remove_filedir(self.last_saved_filedir) # remove last best model
self.best_value = actual_metric
return True
else:
return False
else:
raise Exception(f'save mode {self.save_mode} not supported')
| 7,539
| 5
| 665
|
9fca07bdb9a6dbdf43649b5340b88d177ac4d6ab
| 7,256
|
py
|
Python
|
leyline/tree_drawer.py
|
bentheiii/leyline
|
e9850b6f30a0aaa453ee1fcbd22fe6bf4c49ce0b
|
[
"MIT"
] | null | null | null |
leyline/tree_drawer.py
|
bentheiii/leyline
|
e9850b6f30a0aaa453ee1fcbd22fe6bf4c49ce0b
|
[
"MIT"
] | null | null | null |
leyline/tree_drawer.py
|
bentheiii/leyline
|
e9850b6f30a0aaa453ee1fcbd22fe6bf4c49ce0b
|
[
"MIT"
] | null | null | null |
from _ast import AST, Return, Expr, Str, Call, Attribute, Name, Yield, Raise
from abc import ABC
from ast import parse, iter_child_nodes
from functools import reduce
import inspect
from textwrap import dedent
from typing import Type, List, Tuple, Collection, Optional
import re
from leyline import Node
from leyline.gviz import Digraph, GraphNode, GraphEdge
ColorName = Optional[str]
StyleName = Optional[str]
| 34.717703
| 114
| 0.570149
|
from _ast import AST, Return, Expr, Str, Call, Attribute, Name, Yield, Raise
from abc import ABC
from ast import parse, iter_child_nodes
from functools import reduce
import inspect
from textwrap import dedent
from typing import Type, List, Tuple, Collection, Optional
import re
from leyline import Node
from leyline.gviz import Digraph, GraphNode, GraphEdge
class NodeData:
def __init__(self, node: Node):
self.node = node
self.return_values: List[Tuple[AST, str]] = []
self.yield_values: List[Tuple[AST, str]] = []
self.next_nodes: List[Tuple[Node, str]] = []
self.raise_values: List[Tuple[AST, str]] = []
@classmethod
def from_node(cls, node: Node):
def extract_node_name(r_value: AST):
if isinstance(r_value, Call) \
and isinstance(r_value.func, Attribute) \
and isinstance(r_value.func.value, Name) \
and r_value.func.value.id == 'self':
return r_value.func.attr
return None
def get_of_type(ast: AST, t: Type[AST]):
prev = None
for c in iter_child_nodes(ast):
if isinstance(c, t):
label = ''
if isinstance(prev, Expr):
prev = prev.value
if isinstance(prev, Str):
label = prev.s
yield (c, label)
yield from get_of_type(c, t)
prev = c
ret = cls(node)
source = inspect.getsource(node)
source = dedent(source)
ast = parse(source)
for (r, label) in get_of_type(ast, Return):
targ_node_name = extract_node_name(r.value)
if targ_node_name is None:
is_return = True
else:
targ_node = getattr(node.owner, targ_node_name, None)
if not targ_node:
raise NameError(targ_node_name)
is_return = not isinstance(targ_node, Node)
if is_return:
ret.return_values.append((r.value, label))
else:
ret.next_nodes.append((targ_node, label))
for (y, label) in get_of_type(ast, Yield):
ret.yield_values.append((y.value, label))
for (r, label) in get_of_type(ast, Raise):
ret.raise_values.append((r.exc, label))
return ret
class GraphDrawer(ABC):
def node(self, data: NodeData) -> GraphNode:
return GraphNode(data.node.__name__)
def edge(self, origin: NodeData, goal: Node, labels: Collection[str]) -> GraphEdge:
return GraphEdge(origin.node.__name__, goal.__name__)
def _graph(self, ley: Type['Ley']) -> Digraph:
return Digraph()
def draw_ley(self, ley: Type['Ley']):
digraph = self._graph(ley)
node_stack = [ley.__start__]
known = {ley.__start__}
while node_stack:
node = node_stack.pop()
data = NodeData.from_node(node)
gnode = self.node(data)
digraph.nodes.append(gnode)
nexts = {}
for next_, label in data.next_nodes:
if next_ in nexts:
nexts[next_].append(label)
else:
nexts[next_] = [label]
for n, labels in nexts.items():
if n not in known:
node_stack.append(n)
known.add(n)
edge = self.edge(data, n, labels)
digraph.edges.append(edge)
return digraph.text()
ColorName = Optional[str]
StyleName = Optional[str]
class SimpleDrawer(GraphDrawer):
def __init__(self, start_title=None, title=...,
start_node_color: ColorName = 'green', dead_end_color: ColorName = 'red',
may_return_color: ColorName = 'yellow', may_raise_color: ColorName = 'lightblue',
may_yield_color: ColorName = 'greenyellow',
only_next_color: ColorName = 'firebrick', loop_style: StyleName = 'dashed'):
self.start_title = start_title
self.title = title
self.start_node_color = start_node_color
self.dead_end_color = dead_end_color
self.may_return_color = may_return_color
self.may_raise_color = may_raise_color
self.may_yield_color = may_yield_color
self.only_next_color = only_next_color
self.loop_style = loop_style
def node(self, data: NodeData) -> GraphNode:
ret = super().node(data)
if data.node.is_start():
ret.border_color = self.start_node_color
if self.start_title:
ret.additional_attributes['label'] = f'"{self.start_title}"'
if all(n == data.node for (n, _) in data.next_nodes):
ret.add_fill_color(self.dead_end_color)
elif data.return_values:
ret.add_fill_color(self.may_return_color)
if data.raise_values:
ret.add_fill_color(self.may_raise_color)
if data.yield_values:
ret.add_fill_color(self.may_yield_color)
if data.node.__doc__:
ret.additional_attributes['tooltip'] = f'"{data.node.__doc__.strip()}"'
return ret
num_pattern = re.compile(r'^(?P<prefix>.)*?(?P<num>[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?)(?P<postfix>.)*?$')
@classmethod
def combine_labels(cls, a: str, b: str):
if not b:
return a
if not a:
return b
a_num_match = cls.num_pattern.fullmatch(a)
b_num_match = cls.num_pattern.fullmatch(b)
if a_num_match and b_num_match \
and a_num_match['prefix'] == b_num_match['prefix'] \
and a_num_match['postfix'] == b_num_match['postfix']:
return a_num_match['prefix'] \
+ str(float(a_num_match['num']) + float(b_num_match['num'])) \
+ a_num_match['postfix']
return a + ', ' + b
def edge(self, origin: NodeData, goal: Node, labels: Collection[str]) -> GraphEdge:
ret = super().edge(origin, goal, labels)
label = reduce(self.combine_labels, labels)
if label:
ret.additional_attributes['label'] = '"' + label + '"'
if origin.node == goal:
ret.styles.append('dashed')
ret.additional_attributes['dir'] = 'back'
elif not origin.return_values \
and all((n in (goal, origin.node)) for (n, _) in origin.next_nodes):
ret.add_color(self.only_next_color)
return ret
def _graph(self, ley: Type['Ley']):
ret = super()._graph(ley)
if self.title:
title = self.title
if title is ...:
title = ley.__name__
ret.title = title
return ret
class GeneratorDrawer(SimpleDrawer):
def __init__(self, *args, wont_yield_style='dashed', **kwargs):
super().__init__(*args, may_yield_color=None, **kwargs)
self.wont_yield_style = wont_yield_style
def node(self, data: NodeData):
ret = super().node(data)
if not data.yield_values:
ret.border_style = self.wont_yield_style
return ret
| 6,228
| 359
| 252
|
3d55f92f4fbef80ed3856d2d699e6a8ff8285101
| 675
|
py
|
Python
|
zip_function.py
|
sm1216/python
|
ccb78875dcb95cc18bf8d2a66896dedbb0fe7b41
|
[
"MIT"
] | null | null | null |
zip_function.py
|
sm1216/python
|
ccb78875dcb95cc18bf8d2a66896dedbb0fe7b41
|
[
"MIT"
] | null | null | null |
zip_function.py
|
sm1216/python
|
ccb78875dcb95cc18bf8d2a66896dedbb0fe7b41
|
[
"MIT"
] | null | null | null |
list1 =[1,2,3]
list2 =["one","two","three"]
zipped = list(zip(list1,list2))
print(zipped)
print("####################")
unzipped =list(zip(*zipped))
print(unzipped)
print("####################")
for (l1, l2) in zip(list1,list2):
print(l1)
print(l2)
print("####################")
items = ["apples" ,"banana " , "orange"]
counts =[13,12,11]
prices =[20,30,40]
sentences =[]
for (items,counts,prices) in zip (items,counts,prices):
items,counts,prices = str(items),str(counts),str(prices)
sentence = "i bought "+ counts + " " + items + "at" + prices + "."
sentences.append(sentence)
print (sentences)
print("done")
| 21.09375
| 75
| 0.539259
|
list1 =[1,2,3]
list2 =["one","two","three"]
zipped = list(zip(list1,list2))
print(zipped)
print("####################")
unzipped =list(zip(*zipped))
print(unzipped)
print("####################")
for (l1, l2) in zip(list1,list2):
print(l1)
print(l2)
print("####################")
items = ["apples" ,"banana " , "orange"]
counts =[13,12,11]
prices =[20,30,40]
sentences =[]
for (items,counts,prices) in zip (items,counts,prices):
items,counts,prices = str(items),str(counts),str(prices)
sentence = "i bought "+ counts + " " + items + "at" + prices + "."
sentences.append(sentence)
print (sentences)
print("done")
| 0
| 0
| 0
|
3ba318c481e1408482608b403e889b6d462281f5
| 988
|
py
|
Python
|
examples/bokeh_interactive_units.py
|
Narsil/chempy
|
ac7217f45a8cfe3b11ca771f78f0a04c07708818
|
[
"BSD-2-Clause"
] | null | null | null |
examples/bokeh_interactive_units.py
|
Narsil/chempy
|
ac7217f45a8cfe3b11ca771f78f0a04c07708818
|
[
"BSD-2-Clause"
] | null | null | null |
examples/bokeh_interactive_units.py
|
Narsil/chempy
|
ac7217f45a8cfe3b11ca771f78f0a04c07708818
|
[
"BSD-2-Clause"
] | 1
|
2022-03-21T09:01:48.000Z
|
2022-03-21T09:01:48.000Z
|
"""
Interactive kinetics app with sliders (with units).
Start by runing:
$ bokeh serve interactive.py
Add --show argument or navigate to:
http://localhost:5006/interactive
"""
from collections import defaultdict
import sys
from chempy.util.bkh import integration_with_sliders
from chempy.units import SI_base_registry, default_units as u
from bokeh_interactive import get_rsys
if __name__.startswith('bk_'):
from bokeh.io import curdoc
kf, kb = 3/u.molar/u.s, .3/u.s
curdoc().add_root(integration_with_sliders(
get_rsys(kf, kb), tend=3*u.s,
c0=defaultdict(lambda: 0*u.molar, {'Fe+3': .9*u.molar, 'SCN-': .7*u.molar}),
parameters={'kf': kf, 'kb': kb},
get_odesys_kw=dict(
unit_registry=SI_base_registry,
output_conc_unit=u.molar,
output_time_unit=u.second
)
))
elif __name__ == '__main__':
import warnings
warnings.warn("Run using 'bokeh serve %s'" % __file__)
sys.exit(1)
| 29.058824
| 84
| 0.672065
|
"""
Interactive kinetics app with sliders (with units).
Start by runing:
$ bokeh serve interactive.py
Add --show argument or navigate to:
http://localhost:5006/interactive
"""
from collections import defaultdict
import sys
from chempy.util.bkh import integration_with_sliders
from chempy.units import SI_base_registry, default_units as u
from bokeh_interactive import get_rsys
if __name__.startswith('bk_'):
from bokeh.io import curdoc
kf, kb = 3/u.molar/u.s, .3/u.s
curdoc().add_root(integration_with_sliders(
get_rsys(kf, kb), tend=3*u.s,
c0=defaultdict(lambda: 0*u.molar, {'Fe+3': .9*u.molar, 'SCN-': .7*u.molar}),
parameters={'kf': kf, 'kb': kb},
get_odesys_kw=dict(
unit_registry=SI_base_registry,
output_conc_unit=u.molar,
output_time_unit=u.second
)
))
elif __name__ == '__main__':
import warnings
warnings.warn("Run using 'bokeh serve %s'" % __file__)
sys.exit(1)
| 0
| 0
| 0
|
fc603a336d6907b4650063f43483887c9a02976b
| 4,693
|
py
|
Python
|
renku/cli/_config.py
|
jirikuncar/renku-python
|
69df9ea1d5db3c63fd2ea3537c7e46d079360c8f
|
[
"Apache-2.0"
] | 2
|
2019-03-09T17:56:57.000Z
|
2019-07-03T15:20:22.000Z
|
renku/cli/_config.py
|
jirikuncar/renku-python
|
69df9ea1d5db3c63fd2ea3537c7e46d079360c8f
|
[
"Apache-2.0"
] | null | null | null |
renku/cli/_config.py
|
jirikuncar/renku-python
|
69df9ea1d5db3c63fd2ea3537c7e46d079360c8f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2017 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration utilities."""
import errno
import os
from functools import update_wrapper
import click
import yaml
from renku._compat import Path
from ._options import Endpoint
APP_NAME = 'Renku'
"""Application name for storing configuration."""
RENKU_HOME = '.renku'
"""Project directory name."""
# Register Endpoint serializer
yaml.add_representer(
Endpoint, lambda dumper, data: dumper.represent_str(str(data))
)
def default_config_dir():
"""Return default config directory."""
return click.get_app_dir(APP_NAME)
def config_path(path=None, final=False):
"""Return config path."""
if final and path:
return path
if path is None:
path = default_config_dir()
try:
os.makedirs(path)
except OSError as e: # pragma: no cover
if e.errno != errno.EEXIST:
raise
return os.path.join(path, 'config.yml')
def read_config(path=None, final=False):
"""Read Renku configuration."""
try:
with open(config_path(path, final=final), 'r') as configfile:
return yaml.load(configfile) or {}
except FileNotFoundError:
return {}
def write_config(config, path, final=False):
"""Write Renku configuration."""
with open(config_path(path, final=final), 'w+') as configfile:
yaml.dump(config, configfile, default_flow_style=False)
def config_load(ctx, param, value):
"""Print application config path."""
if ctx.obj is None:
ctx.obj = {}
ctx.obj['config_path'] = value
ctx.obj['config'] = read_config(value)
return value
def with_config(f):
"""Add config to function."""
# keep it.
@click.pass_context
return update_wrapper(new_func, f)
def print_app_config_path(ctx, param, value):
"""Print application config path."""
if not value or ctx.resilient_parsing:
return
click.echo(config_path(os.environ.get('RENKU_CONFIG')))
ctx.exit()
def create_project_config_path(
path, mode=0o777, parents=False, exist_ok=False
):
"""Create new project configuration folder."""
# FIXME check default directory mode
project_path = Path(path).absolute().joinpath(RENKU_HOME)
project_path.mkdir(mode=mode, parents=parents, exist_ok=exist_ok)
return str(project_path)
def get_project_config_path(path=None):
"""Return project configuration folder if exist."""
project_path = Path(path or '.').absolute().joinpath(RENKU_HOME)
if project_path.exists() and project_path.is_dir():
return str(project_path)
def find_project_config_path(path=None):
"""Find project config path."""
path = Path(path) if path else Path.cwd()
abspath = path.absolute()
project_path = get_project_config_path(abspath)
if project_path:
return project_path
for parent in abspath.parents:
project_path = get_project_config_path(parent)
if project_path:
return project_path
| 29.149068
| 75
| 0.680162
|
# -*- coding: utf-8 -*-
#
# Copyright 2017 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration utilities."""
import errno
import os
from functools import update_wrapper
import click
import yaml
from renku._compat import Path
from ._options import Endpoint
APP_NAME = 'Renku'
"""Application name for storing configuration."""
RENKU_HOME = '.renku'
"""Project directory name."""
# Register Endpoint serializer
yaml.add_representer(
Endpoint, lambda dumper, data: dumper.represent_str(str(data))
)
def default_config_dir():
"""Return default config directory."""
return click.get_app_dir(APP_NAME)
def config_path(path=None, final=False):
"""Return config path."""
if final and path:
return path
if path is None:
path = default_config_dir()
try:
os.makedirs(path)
except OSError as e: # pragma: no cover
if e.errno != errno.EEXIST:
raise
return os.path.join(path, 'config.yml')
def read_config(path=None, final=False):
"""Read Renku configuration."""
try:
with open(config_path(path, final=final), 'r') as configfile:
return yaml.load(configfile) or {}
except FileNotFoundError:
return {}
def write_config(config, path, final=False):
"""Write Renku configuration."""
with open(config_path(path, final=final), 'w+') as configfile:
yaml.dump(config, configfile, default_flow_style=False)
def config_load(ctx, param, value):
"""Print application config path."""
if ctx.obj is None:
ctx.obj = {}
ctx.obj['config_path'] = value
ctx.obj['config'] = read_config(value)
return value
def with_config(f):
"""Add config to function."""
# keep it.
@click.pass_context
def new_func(ctx, *args, **kwargs):
# Invoked with custom config:
if 'config' in kwargs:
return ctx.invoke(f, *args, **kwargs)
if ctx.obj is None:
ctx.obj = {}
config = ctx.obj['config']
project_enabled = not ctx.obj.get('no_project', False)
project_config_path = get_project_config_path()
if project_enabled and project_config_path:
project_config = read_config(project_config_path)
config['project'] = project_config
result = ctx.invoke(f, config, *args, **kwargs)
project_config = config.pop('project', None)
if project_config:
if not project_config_path:
raise RuntimeError('Invalid config update')
write_config(project_config, path=project_config_path)
write_config(config, path=ctx.obj['config_path'])
if project_config is not None:
config['project'] = project_config
return result
return update_wrapper(new_func, f)
def print_app_config_path(ctx, param, value):
"""Print application config path."""
if not value or ctx.resilient_parsing:
return
click.echo(config_path(os.environ.get('RENKU_CONFIG')))
ctx.exit()
def create_project_config_path(
path, mode=0o777, parents=False, exist_ok=False
):
"""Create new project configuration folder."""
# FIXME check default directory mode
project_path = Path(path).absolute().joinpath(RENKU_HOME)
project_path.mkdir(mode=mode, parents=parents, exist_ok=exist_ok)
return str(project_path)
def get_project_config_path(path=None):
"""Return project configuration folder if exist."""
project_path = Path(path or '.').absolute().joinpath(RENKU_HOME)
if project_path.exists() and project_path.is_dir():
return str(project_path)
def find_project_config_path(path=None):
"""Find project config path."""
path = Path(path) if path else Path.cwd()
abspath = path.absolute()
project_path = get_project_config_path(abspath)
if project_path:
return project_path
for parent in abspath.parents:
project_path = get_project_config_path(parent)
if project_path:
return project_path
| 974
| 0
| 26
|
a8139ae5acfd5b3b05d97a8c89429570e88de8c3
| 2,016
|
py
|
Python
|
JaroEliCall/src/wrapped_interfaces/login_wrapped_ui.py
|
jaroslaw-wieczorek/Project_IP_Telephony_Python_Voip
|
05143356fe91f745c286db8c3e2432714ab122e7
|
[
"MIT"
] | null | null | null |
JaroEliCall/src/wrapped_interfaces/login_wrapped_ui.py
|
jaroslaw-wieczorek/Project_IP_Telephony_Python_Voip
|
05143356fe91f745c286db8c3e2432714ab122e7
|
[
"MIT"
] | null | null | null |
JaroEliCall/src/wrapped_interfaces/login_wrapped_ui.py
|
jaroslaw-wieczorek/Project_IP_Telephony_Python_Voip
|
05143356fe91f745c286db8c3e2432714ab122e7
|
[
"MIT"
] | 1
|
2018-03-20T21:22:40.000Z
|
2018-03-20T21:22:40.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 30 18:49:11 2018
@author: afar
"""
import os
import sys
import hashlib
from PyQt5.QtWidgets import QDialog
from PyQt5.QtWidgets import QStatusBar
# importing data accc
lib_path = os.path.abspath(os.path.join(__file__, '..', '..', '..'))
sys.path.append(lib_path)
from gui.login_ui import Ui_LoginInterfaceDialog
#from gui.testpic_ui import Ui_Dialog
from gui.resources import icons_wrapper_rc
| 25.518987
| 85
| 0.632937
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 30 18:49:11 2018
@author: afar
"""
import os
import sys
import hashlib
from PyQt5.QtWidgets import QDialog
from PyQt5.QtWidgets import QStatusBar
# importing data accc
lib_path = os.path.abspath(os.path.join(__file__, '..', '..', '..'))
sys.path.append(lib_path)
from gui.login_ui import Ui_LoginInterfaceDialog
#from gui.testpic_ui import Ui_Dialog
from gui.resources import icons_wrapper_rc
class LoginWrappedUI(QDialog, Ui_LoginInterfaceDialog):
def __init__(self):
super(LoginWrappedUI, self).__init__()
self.setupUi(self)
self.statusBar = QStatusBar()
self.verticalLayout.addWidget(self.statusBar)
def set_info_text(self, text):
self.label_info.setText(text)
def clear_info_text(self):
self.label_info.clear()
def hide_info_text(self):
self.label_info.hide()
def show_info_text(self):
self.label_info.show()
def set_login(self, login):
self.line_edit_login.setText(login)
def set_password(self, password):
self.line_edit_password.setText(password)
def get_login(self):
if self.line_edit_login.text() != '' and self.line_edit_login.text() != None:
return str(self.line_edit_login.text())
else:
return None
def get_password(self):
print("haslo: ", self.line_edit_password.text())
a = self.line_edit_password.text().replace(" ", "")
print(a)
print(len(a))
print(hashlib.sha256(a.encode()).hexdigest())
return hashlib.sha256(a.encode()).hexdigest()
def set_push_button_login(self, funct):
self.push_button_login.clicked.connect(funct)
def set_push_button_register(self, funct):
self.push_button_register.clicked.connect(funct)
def nothing(self):
print("Do nothing!")
| 1,047
| 34
| 422
|