hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3f9c6dc1e2e10c35b26d5b375a5efdcdb4d468b0
| 3,336
|
py
|
Python
|
rls/algorithms/single/sql.py
|
StepNeverStop/RLs
|
25cc97c96cbb19fe859c9387b7547cbada2c89f2
|
[
"Apache-2.0"
] | 371
|
2019-04-26T00:37:33.000Z
|
2022-03-31T07:33:12.000Z
|
rls/algorithms/single/sql.py
|
BlueFisher/RLs
|
25cc97c96cbb19fe859c9387b7547cbada2c89f2
|
[
"Apache-2.0"
] | 47
|
2019-07-21T11:51:57.000Z
|
2021-08-31T08:45:22.000Z
|
rls/algorithms/single/sql.py
|
BlueFisher/RLs
|
25cc97c96cbb19fe859c9387b7547cbada2c89f2
|
[
"Apache-2.0"
] | 102
|
2019-06-29T13:11:15.000Z
|
2022-03-28T13:51:04.000Z
|
#!/usr/bin/env python3
# encoding: utf-8
import torch.distributions as td
from rls.algorithms.base.sarl_off_policy import SarlOffPolicy
from rls.common.data import Data
from rls.common.decorator import iton
from rls.nn.models import CriticQvalueAll
from rls.nn.modules.wrappers import TargetTwin
from rls.nn.utils import OPLR
from rls.utils.torch_utils import n_step_return
class SQL(SarlOffPolicy):
"""
Soft Q-Learning. ref: https://github.com/Bigpig4396/PyTorch-Soft-Q-Learning/blob/master/SoftQ.py
NOTE: not the original of the paper, NO SVGD.
Reinforcement Learning with Deep Energy-Based Policies: https://arxiv.org/abs/1702.08165
"""
policy_mode = 'off-policy'
def __init__(self,
lr=5.0e-4,
alpha=2,
polyak=0.995,
network_settings=[32, 32],
**kwargs):
super().__init__(**kwargs)
assert not self.is_continuous, 'sql only support discrete action space'
self.alpha = alpha
self.polyak = polyak
self.q_net = TargetTwin(CriticQvalueAll(self.obs_spec,
rep_net_params=self._rep_net_params,
output_shape=self.a_dim,
network_settings=network_settings),
self.polyak).to(self.device)
self.oplr = OPLR(self.q_net, lr, **self._oplr_params)
self._trainer_modules.update(model=self.q_net,
oplr=self.oplr)
@iton
def select_action(self, obs):
q_values = self.q_net(obs, rnncs=self.rnncs) # [B, A]
self.rnncs_ = self.q_net.get_rnncs()
logits = ((q_values - self._get_v(q_values)) / self.alpha).exp() # > 0 # [B, A]
logits /= logits.sum(-1, keepdim=True) # [B, A]
cate_dist = td.Categorical(logits=logits)
actions = cate_dist.sample() # [B,]
return actions, Data(action=actions)
def _get_v(self, q):
v = self.alpha * (q / self.alpha).exp().mean(-1, keepdim=True).log() # [B, 1] or [T, B, 1]
return v
@iton
def _train(self, BATCH):
q = self.q_net(BATCH.obs, begin_mask=BATCH.begin_mask) # [T, B, A]
q_next = self.q_net.t(BATCH.obs_, begin_mask=BATCH.begin_mask) # [T, B, A]
v_next = self._get_v(q_next) # [T, B, 1]
q_eval = (q * BATCH.action).sum(-1, keepdim=True) # [T, B, 1]
q_target = n_step_return(BATCH.reward,
self.gamma,
BATCH.done,
v_next,
BATCH.begin_mask).detach() # [T, B, 1]
td_error = q_target - q_eval # [T, B, 1]
q_loss = (td_error.square() * BATCH.get('isw', 1.0)).mean() # 1
self.oplr.optimize(q_loss)
return td_error, {
'LEARNING_RATE/lr': self.oplr.lr,
'LOSS/loss': q_loss,
'Statistics/q_max': q_eval.max(),
'Statistics/q_min': q_eval.min(),
'Statistics/q_mean': q_eval.mean()
}
def _after_train(self):
super()._after_train()
self.q_net.sync()
| 39.714286
| 105
| 0.539568
|
639b9d983887bc032b974c1738d06b4a4c916d3f
| 383
|
py
|
Python
|
src/numdifftools/__init__.py
|
rparini/numdifftools
|
2c88878df732c9c6629febea56e7a91fd898398d
|
[
"BSD-3-Clause"
] | null | null | null |
src/numdifftools/__init__.py
|
rparini/numdifftools
|
2c88878df732c9c6629febea56e7a91fd898398d
|
[
"BSD-3-Clause"
] | 1
|
2018-03-27T19:12:14.000Z
|
2018-03-27T19:12:14.000Z
|
src/numdifftools/__init__.py
|
rparini/numdifftools
|
2c88878df732c9c6629febea56e7a91fd898398d
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
import pkg_resources
from .info import __doc__
from .core import *
from . import extrapolation, limits, step_generators
from numpy.testing import Tester
try:
__version__ = pkg_resources.get_distribution(__name__).version
except pkg_resources.DistributionNotFound:
__version__ = 'unknown'
test = Tester(raise_warnings="release").test
| 25.533333
| 66
| 0.81201
|
36c2a8e25d355eec8fee149d5352d78c7e740a90
| 868
|
py
|
Python
|
ComRISB/pyglib/pyglib/iface/test/test_ef_lda.py
|
comscope/comsuite
|
d51c43cad0d15dc3b4d1f45e7df777cdddaa9d6c
|
[
"BSD-3-Clause"
] | 18
|
2019-06-15T18:08:21.000Z
|
2022-01-30T05:01:29.000Z
|
ComRISB/pyglib/pyglib/iface/test/test_ef_lda.py
|
comscope/Comsuite
|
b80ca9f34c519757d337487c489fb655f7598cc2
|
[
"BSD-3-Clause"
] | null | null | null |
ComRISB/pyglib/pyglib/iface/test/test_ef_lda.py
|
comscope/Comsuite
|
b80ca9f34c519757d337487c489fb655f7598cc2
|
[
"BSD-3-Clause"
] | 11
|
2019-06-05T02:57:55.000Z
|
2021-12-29T02:54:25.000Z
|
import h5py
from mpi4py import MPI
from pyglib.iface.ifwannier import get_wannier_dat
from pyglib.estructure.fermi import get_fermi_level
from pyglib.estructure.gwannier import get_gmodel, mpiget_bndev
kpts, wfwannier_list, bnd_es = get_wannier_dat(path="../wannier")
with h5py.File("GPARAMBANDS.h5", "r") as f:
num_elec = f['/nelectron'][0]
ismear = f["/ismear"][0]
delta = f["/delta"][0]
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
if rank == 0:
print("num of electrons = {}".format(num_elec))
gmodel = get_gmodel()
kpts = gmodel.k_uniform_mesh((15, 15, 15))
nk = len(kpts)
wklist = [1./nk for k in kpts]
bnd_es, bnd_vs = mpiget_bndev(kpts, gmodel, mode="risb")
if rank == 0:
print(bnd_es[0][0])
efermi = get_fermi_level(bnd_es, wklist, num_elec,
delta=delta, ismear=ismear)
print("lda fermi level = {}".format(efermi))
| 28.933333
| 65
| 0.693548
|
673c97755cc7547fe565803b79461dfb158a55ef
| 10,233
|
py
|
Python
|
src/data_process.py
|
Du-Jia/model-getting-started
|
49a84e4cb0e7f283d287a2a6b1c86913ae6827f6
|
[
"Apache-2.0"
] | null | null | null |
src/data_process.py
|
Du-Jia/model-getting-started
|
49a84e4cb0e7f283d287a2a6b1c86913ae6827f6
|
[
"Apache-2.0"
] | null | null | null |
src/data_process.py
|
Du-Jia/model-getting-started
|
49a84e4cb0e7f283d287a2a6b1c86913ae6827f6
|
[
"Apache-2.0"
] | 1
|
2021-05-11T14:44:45.000Z
|
2021-05-11T14:44:45.000Z
|
"""data process tools"""
from __future__ import annotations
import csv
import os
import pickle
from typing import List, Dict
# from typing import Literal
import torch
from src.schema import InputExample
from src.schema import InputFeatures
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter=",", quotechar='"')
lines = []
for line in reader:
lines.append(line)
return lines
class AgNewsDataProcessor(DataProcessor):
"""
process the agnews
Args:
DataProcessor ([type]): [description]
"""
def get_labels(self):
return [1, 2, 3, 4]
def get_examples(self, file: str) -> List[InputExample]:
lines = self._read_tsv(file)
examples: List[InputExample] = []
for index, (label, title, description) in enumerate(lines[1:]):
example = InputExample(
guid=f'guid-{index}',
text_a=title,
text_b=description,
label=label
)
examples.append(example)
return examples
def get_train_examples(self, data_dir) -> List[InputExample]:
return self.get_examples(data_dir)
def get_dev_examples(self, data_dir) -> List[InputExample]:
return self.get_examples(data_dir)
def get_test_examples(self, data_dir):
return self.get_examples(data_dir)
class THCNewsDataProcessor(DataProcessor):
# def __init__(self, vocab: Dict[str, int]):
# self.vocab = {}
def get_labels(self):
return range(9)
def get_examples(self, file: str) -> List[InputExample]:
lines = self._read_txt(file, encoding='utf-8')
examples: List[InputExample] = []
for index, line in enumerate(lines):
label = line.split()[-1]
title = line[:-len(label)-1].strip()
label = (int)(label)
assert 0 <= label <= 9
example = InputExample(
guid=f'guid-{index}',
text_a=title,
text_b=None,
label=label
)
examples.append(example)
return examples
def get_train_examples(self, data_dir: str) -> List[InputExample]:
return self.get_examples(data_dir)
def get_dev_examples(self, data_dir) -> List[InputExample]:
"""
get evaluation examples which is eval in training period
"""
return self.get_examples(data_dir)
def get_test_examples(self, data_dir):
return self.get_examples(data_dir)
def _read_txt(self, file, encoding='utf-8'):
with open(file, 'r', encoding=encoding) as f:
return f.readlines()
class FeaturesExtractor():
"""A base class for extracting standard input from examples"""
def __init__(self, vocab: Dict[str, int], examples: List[InputExample], language: str='zh') -> None:
"""
@parameters:
vocab, a dict{str:int}, key is word string, value is id
examples, a list of InputExample object
language, str, valid value is ['zh', 'en'], means Chinese corpus and English Corpus
"""
self.vocab = vocab
self.examples = examples
self.language = language
def get_data_iterator(self, batch_size: int, max_len: int) -> DataIterator:
"""
Get a DataIterator which provides a function called get_batch, user can
use this to get data straightly to train or evaluate, test a model.
"""
features = self._get_features_from_examples()
data_iterator = DataIterator(features, batch_size, max_len, padding_fill=len(self.vocab)-1)
return data_iterator
def _get_features_from_examples(self) -> List[InputFeatures]:
"""Get a list of features from an list of example"""
features = []
examples = self.examples
for example in examples:
features.append(self._get_feature_from_example(example))
return features
def _get_feature_from_example(self, example: InputExample) -> InputFeatures:
"""
Get an InputFeatures object from example
@parameters:
example: InputExample
@return:
InputFeatures
"""
raise NotImplementedError()
class THCNewsFeaturesExtractor(FeaturesExtractor):
def _get_feature_from_example(self, example: InputExample) -> InputFeatures:
vocab = self.vocab
language = self.language
if language == 'zh':
input_ids = [vocab[char] if char in vocab.keys() else vocab['<UNK>']
for char in example.text_a]
elif language == 'en':
input_ids = [vocab[char] if char in vocab.keys() else vocab['<UNK>']
for char in example.text_a.split()]
else:
# TODO: Replace the Exception by a more suitable exception class
raise Exception('Invalid language code, Please use zh or en')
label_id = example.label
feature = InputFeatures(
input_ids=input_ids,
attention_mask=None,
segment_ids=None,
label_id=label_id,
is_real_example=False
)
return feature
class DataIterator(object):
"""
A iterator can get batches from dataset
"""
def __init__(self, features: List[InputFeatures], batch_size: int,
max_len: int, padding_fill: int):
"""
@parameters:
batch_size: int, the number of InputFeatures in each batch
max_len: int, the max size of features
padding_fill, int, if length of text is less than max_len, fill it by padding_fill.
Normally, we choose vocab_size - 1 as padding_fill
"""
self.features = features
self.batch_size = batch_size
self.max_len = max_len
self.padding_fill = padding_fill
def _get_inputs(self, max_len: int, do_test=False) -> (
torch.LongTensor, torch.LongTensor):
"""A generator returning input matrix for train, eval and test"""
"""
@parameters:
do_test: bool, If do_test is True, Y will be set as None
max_len: int, the max size of inputs text
"""
# set input matrix x, shape [features size, max sequence length]
x = torch.LongTensor(len(self.features), max_len)
# set input label matrix, shape [features size, 1]
y = torch.LongTensor(len(self.features), 1)
# if length of inputs is more than max_len, clip inputs, else pad them.
for index, feature in enumerate(self.features):
input_ids = feature.input_ids
if len(input_ids) > max_len:
input_ids = input_ids[:max_len]
else:
padding = [self.padding_fill for _ in range(max_len - len(input_ids))]
input_ids.extend(padding)
x[index] = torch.LongTensor(input_ids)
y[index] = feature.label_id
if do_test:
y = None
return x, y
def get_batch(self, do_test=False) -> (
torch.LongTensor, torch.LongTensor):
"""Return a iterator, the item of iterator shape is [batch_size, max_len]"""
x, y = self._get_inputs(max_len=self.max_len)
batch_size = self.batch_size
batches = len(x) // batch_size
if batches * batch_size < len(x):
batches += 1
# generate batch data
for batch in range(batches):
batch_x = x[batch*batch_size:(batch+1)*batch_size]
batch_y = y[batch*batch_size:(batch+1)*batch_size]
if len(batch_x) < batch_size:
batch_x = torch.LongTensor(batch_size, self.max_len)
batch_x[:len(x)-batch*batch_size] = x[batch*batch_size:]
batch_y = torch.LongTensor(batch_size, 1)
batch_y[:len(x)-batch*batch_size] = y[batch * batch_size:]
if do_test:
y = None
yield batch_x, batch_y
if __name__ == '__main__':
# # test for thcnews
data_processor = THCNewsDataProcessor()
root = os.path.join(os.path.abspath('..'), 'data')
agnews = os.path.join(root, 'THCNews')
train_file = os.path.join(agnews, 'train.txt')
train_examples = data_processor.get_examples(train_file)
dev_file = os.path.join(agnews, 'dev.txt')
dev_examples = data_processor.get_examples(dev_file)
test_file = os.path.join(agnews, 'test.txt')
test_examples = data_processor.get_examples(test_file)
print(f'Trainset Length: {len(train_examples)}, Example: {train_examples[0]}')
print(f'Dev Length: {len(dev_examples)}, Example: {dev_examples[0]}')
print(f'Testset Length: {len(test_examples)}, Example: {test_examples[0]}')
vocab = pickle.load(open(r'D:\Project\NLP\model-getting-started\data\pretrained\zh\vocab.pkl', 'rb'))
train_iterator = THCNewsFeaturesExtractor(vocab, train_examples).get_data_iterator(batch_size=512,
max_len=40,
do_test=False)
batches = 0
for train_x, train_y in train_iterator.get_batch():
print(f'Batches: {batches}, X: {train_x.shape}, Y: {train_y.shape}')
batches += 1
| 36.41637
| 105
| 0.604319
|
f68fff850628d0b4353aeec266fcf3f5aa0ef4c2
| 161
|
py
|
Python
|
abfahrt/testutils/test_simulator/__main__.py
|
Team-Zugig-zum-Erfolg/InformatiCup
|
788076ac38bf6d8f462465b7fb96db14d13bed30
|
[
"MIT"
] | 1
|
2022-01-30T14:30:02.000Z
|
2022-01-30T14:30:02.000Z
|
abfahrt/testutils/test_simulator/__main__.py
|
Team-Zugig-zum-Erfolg/InformatiCup
|
788076ac38bf6d8f462465b7fb96db14d13bed30
|
[
"MIT"
] | null | null | null |
abfahrt/testutils/test_simulator/__main__.py
|
Team-Zugig-zum-Erfolg/InformatiCup
|
788076ac38bf6d8f462465b7fb96db14d13bed30
|
[
"MIT"
] | null | null | null |
from abfahrt.testutils.test_simulator.test_simulator import test_simulator
if __name__ == '__main__':
ts = test_simulator("abfahrt/testfiles")
ts.run()
| 26.833333
| 74
| 0.763975
|
a9b291a64074284d6b52edda2902b1b509f69732
| 1,726
|
py
|
Python
|
create_tables.py
|
Karenzhang7717/postgres_data_modelling
|
6d81550269d1f0ed28e0f324717515c3f32a0456
|
[
"Apache-2.0"
] | null | null | null |
create_tables.py
|
Karenzhang7717/postgres_data_modelling
|
6d81550269d1f0ed28e0f324717515c3f32a0456
|
[
"Apache-2.0"
] | null | null | null |
create_tables.py
|
Karenzhang7717/postgres_data_modelling
|
6d81550269d1f0ed28e0f324717515c3f32a0456
|
[
"Apache-2.0"
] | null | null | null |
import psycopg2
from sql_queries import create_table_queries, drop_table_queries
def create_database():
"""
- Creates and connects to the sparkifydb
- Returns the connection and cursor to sparkifydb
"""
# connect to default database
conn = psycopg2.connect("host=127.0.0.1 dbname=studentdb user=student password=student")
conn.set_session(autocommit=True)
cur = conn.cursor()
# create sparkify database with UTF8 encoding
# cur.execute("DROP DATABASE IF EXISTS sparkifydb")
# cur.execute("CREATE DATABASE sparkifydb WITH ENCODING 'utf8' TEMPLATE template0")
#
# # close connection to default database
# conn.close()
# connect to sparkify database
conn = psycopg2.connect("host=127.0.0.1 dbname=sparkifydb user=student password=student")
cur = conn.cursor()
return cur, conn
def drop_tables(cur, conn):
"""
Drops each table using the queries in `drop_table_queries` list.
"""
for query in drop_table_queries:
cur.execute(query)
conn.commit()
def create_tables(cur, conn):
"""
Creates each table using the queries in `create_table_queries` list.
"""
for query in create_table_queries:
cur.execute(query)
conn.commit()
def main():
"""
- Drops (if exists) and Creates the sparkify database.
- Establishes connection with the sparkify database and gets
cursor to it.
- Drops all the tables.
- Creates all tables needed.
- Finally, closes the connection.
"""
cur, conn = create_database()
drop_tables(cur, conn)
create_tables(cur, conn)
conn.close()
if __name__ == "__main__":
main()
| 24.309859
| 93
| 0.658749
|
784c77505bbf5b47d3cae90b072a9d1b11f902f6
| 228
|
py
|
Python
|
test/wxhello.py
|
bopopescu/Lauecollect
|
60ae2b05ea8596ba0decf426e37aeaca0bc8b6be
|
[
"MIT"
] | null | null | null |
test/wxhello.py
|
bopopescu/Lauecollect
|
60ae2b05ea8596ba0decf426e37aeaca0bc8b6be
|
[
"MIT"
] | 1
|
2019-10-22T21:28:31.000Z
|
2019-10-22T21:39:12.000Z
|
test/wxhello.py
|
bopopescu/Lauecollect
|
60ae2b05ea8596ba0decf426e37aeaca0bc8b6be
|
[
"MIT"
] | 2
|
2019-06-06T15:06:46.000Z
|
2020-07-20T02:03:22.000Z
|
import wx
app = wx.App()
window = wx.Frame(None, title = "wxPython Frame", size = (300,200))
panel = wx.Panel(window)
label = wx.StaticText(panel, label = "Hello World", pos = (100,50))
window.Show(True)
app.MainLoop()
| 22.8
| 68
| 0.653509
|
075ee27bc50650b498f73fa563272ec750442237
| 777
|
py
|
Python
|
src/challenges/7-8-20.py
|
rupol/Algorithms-Lecture
|
2114857af719511038c9cb85ba7e57a547a6e515
|
[
"MIT"
] | null | null | null |
src/challenges/7-8-20.py
|
rupol/Algorithms-Lecture
|
2114857af719511038c9cb85ba7e57a547a6e515
|
[
"MIT"
] | null | null | null |
src/challenges/7-8-20.py
|
rupol/Algorithms-Lecture
|
2114857af719511038c9cb85ba7e57a547a6e515
|
[
"MIT"
] | null | null | null |
# Add up and print the sum of the all of the minimum elements of each inner array:
# [[8, 4], [90, -1, 3], [9, 62], [-7, -1, -56, -6], [201], [76, 18]]
# The expected output is given by:
# 4 + -1 + 9 + -56 + 201 + 18 = 175
# You may use whatever programming language you'd like.
# Verbalize your thought process as much as possible before writing any code. Run through the UPER problem solving framework while going through your thought process.
def sum_mins(arr):
smallest = 0
for inner_array in arr:
# find min for each inner array
# sum all the smallests from the inner array and return
smallest += min(inner_array)
return smallest
my_list = [[8, 4], [90, -1, 3], [9, 62], [-7, -1, -56, -6], [201], [76, 18]]
print(sum_mins(my_list))
| 38.85
| 166
| 0.642214
|
92c9945d63e911b67f6425917b47adb5a1bf63fa
| 854
|
py
|
Python
|
mirage/generate/url_template.py
|
fossabot/django-mirage
|
814b3f2486af31f9dca42ef4bb0215655fe0aea6
|
[
"Apache-2.0"
] | 4
|
2019-05-16T09:26:21.000Z
|
2022-02-14T06:21:40.000Z
|
mirage/generate/url_template.py
|
fossabot/django-mirage
|
814b3f2486af31f9dca42ef4bb0215655fe0aea6
|
[
"Apache-2.0"
] | 77
|
2019-05-10T22:24:54.000Z
|
2021-07-02T03:11:01.000Z
|
mirage/generate/url_template.py
|
fossabot/django-mirage
|
814b3f2486af31f9dca42ef4bb0215655fe0aea6
|
[
"Apache-2.0"
] | 1
|
2018-12-08T09:57:54.000Z
|
2018-12-08T09:57:54.000Z
|
# -*- coding: utf-8 -*-
"""
Copyright 2017-2018 Shota Shimazu.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import textwrap
def create_url(app):
return textwrap.dedent(
'''
from django.urls import path
urlpatterns = [
path(r'^url_letter/', """YOUR_VIEW_CLASS""".as_view(), name='INSERT NAME HERE'),
]
''').format(app=app).strip()
| 27.548387
| 84
| 0.716628
|
9617755217e037a5f5305d4400a7869af5dbd47f
| 3,683
|
py
|
Python
|
czsc/utils/ta.py
|
fnsoxt/czsc
|
ae908ca807251eefb1c23c1a3bfa20f36977ba4b
|
[
"Apache-2.0"
] | 206
|
2021-11-16T03:08:58.000Z
|
2022-03-30T04:21:47.000Z
|
czsc/utils/ta.py
|
fnsoxt/czsc
|
ae908ca807251eefb1c23c1a3bfa20f36977ba4b
|
[
"Apache-2.0"
] | 20
|
2021-11-18T09:26:02.000Z
|
2022-03-19T11:59:34.000Z
|
czsc/utils/ta.py
|
fnsoxt/czsc
|
ae908ca807251eefb1c23c1a3bfa20f36977ba4b
|
[
"Apache-2.0"
] | 124
|
2021-11-16T08:45:11.000Z
|
2022-03-30T08:55:27.000Z
|
# -*- coding: utf-8 -*-
"""
author: zengbin93
email: zeng_bin8888@163.com
create_dt: 2022/1/24 15:01
describe: 常用技术分析指标
"""
import numpy as np
def SMA(close: np.array, timeperiod=5):
"""简单移动平均
https://baike.baidu.com/item/%E7%A7%BB%E5%8A%A8%E5%B9%B3%E5%9D%87%E7%BA%BF/217887
:param close: np.array
收盘价序列
:param timeperiod: int
均线参数
:return: np.array
"""
res = []
for i in range(len(close)):
if i < timeperiod:
seq = close[0: i+1]
else:
seq = close[i - timeperiod + 1: i + 1]
res.append(seq.mean())
return np.array(res, dtype=np.double).round(4)
def EMA(close: np.array, timeperiod=5):
"""
https://baike.baidu.com/item/EMA/12646151
:param close: np.array
收盘价序列
:param timeperiod: int
均线参数
:return: np.array
"""
res = []
for i in range(len(close)):
if i < 1:
res.append(close[i])
else:
ema = (2 * close[i] + res[i-1] * (timeperiod-1)) / (timeperiod+1)
res.append(ema)
return np.array(res, dtype=np.double).round(4)
def MACD(close: np.array, fastperiod=12, slowperiod=26, signalperiod=9):
"""MACD 异同移动平均线
https://baike.baidu.com/item/MACD%E6%8C%87%E6%A0%87/6271283
:param close: np.array
收盘价序列
:param fastperiod: int
快周期,默认值 12
:param slowperiod: int
慢周期,默认值 26
:param signalperiod: int
信号周期,默认值 9
:return: (np.array, np.array, np.array)
diff, dea, macd
"""
ema12 = EMA(close, timeperiod=fastperiod)
ema26 = EMA(close, timeperiod=slowperiod)
diff = ema12 - ema26
dea = EMA(diff, timeperiod=signalperiod)
macd = (diff - dea) * 2
return diff.round(4), dea.round(4), macd.round(4)
def KDJ(close: np.array, high: np.array, low: np.array):
"""
:param close: 收盘价序列
:param high: 最高价序列
:param low: 最低价序列
:return:
"""
n = 9
hv = []
lv = []
for i in range(len(close)):
if i < n:
h_ = high[0: i+1]
l_ = low[0: i+1]
else:
h_ = high[i - n + 1: i + 1]
l_ = low[i - n + 1: i + 1]
hv.append(max(h_))
lv.append(min(l_))
hv = np.around(hv, decimals=2)
lv = np.around(lv, decimals=2)
rsv = np.where(hv == lv, 0, (close - lv) / (hv - lv) * 100)
k = []
d = []
j = []
for i in range(len(rsv)):
if i < n:
k_ = rsv[i]
d_ = k_
else:
k_ = (2 / 3) * k[i-1] + (1 / 3) * rsv[i]
d_ = (2 / 3) * d[i-1] + (1 / 3) * k_
k.append(k_)
d.append(d_)
j.append(3 * k_ - 2 * d_)
k = np.array(k, dtype=np.double)
d = np.array(d, dtype=np.double)
j = np.array(j, dtype=np.double)
return k.round(4), d.round(4), j.round(4)
def RSQ(close: [np.array, list]) -> float:
"""拟合优度 R Square
:param close: 收盘价序列
:return:
"""
x = list(range(len(close)))
y = np.array(close)
x_squred_sum = sum([x1 * x1 for x1 in x])
xy_product_sum = sum([x[i] * y[i] for i in range(len(x))])
num = len(x)
x_sum = sum(x)
y_sum = sum(y)
delta = float(num * x_squred_sum - x_sum * x_sum)
if delta == 0:
return 0
y_intercept = (1 / delta) * (x_squred_sum * y_sum - x_sum * xy_product_sum)
slope = (1 / delta) * (num * xy_product_sum - x_sum * y_sum)
y_mean = np.mean(y)
ss_tot = sum([(y1 - y_mean) * (y1 - y_mean) for y1 in y]) + 0.00001
ss_err = sum([(y[i] - slope * x[i] - y_intercept) * (y[i] - slope * x[i] - y_intercept) for i in range(len(x))])
rsq = 1 - ss_err / ss_tot
return round(rsq, 4)
| 25.054422
| 116
| 0.526201
|
e726d46f373ee5e79001d3d50ba8e9553474d344
| 103
|
py
|
Python
|
fullstack_application/frontend/views.py
|
bhardwajRahul/fullstack-Django-React-Redux
|
10bebe1b130fe73c1550d5919020a43bc2ca9c02
|
[
"MIT"
] | 2
|
2020-05-30T03:25:07.000Z
|
2021-03-31T22:23:48.000Z
|
fullstack_application/frontend/views.py
|
bhardwajRahul/fullstack-Django-React-Redux
|
10bebe1b130fe73c1550d5919020a43bc2ca9c02
|
[
"MIT"
] | 8
|
2020-05-30T08:19:57.000Z
|
2021-09-22T19:12:16.000Z
|
fullstack_application/frontend/views.py
|
Combinativ/fullstack-Django-React-Redux
|
10bebe1b130fe73c1550d5919020a43bc2ca9c02
|
[
"MIT"
] | 2
|
2020-05-31T07:11:52.000Z
|
2020-10-19T15:20:50.000Z
|
from django.shortcuts import render
def index(request):
return render(request,'frontend/index.html')
| 20.6
| 45
| 0.796117
|
d2bc2ed82dbe3dd73db7c77893717ae49ea31719
| 28,933
|
py
|
Python
|
calwebb_spec2_pytests/auxiliary_code/compare_wcs_mos.py
|
jhunkeler/nirspec_pipe_testing_tool
|
04d247e905e6003f51834c8aff024c211b82eb8a
|
[
"BSD-3-Clause"
] | null | null | null |
calwebb_spec2_pytests/auxiliary_code/compare_wcs_mos.py
|
jhunkeler/nirspec_pipe_testing_tool
|
04d247e905e6003f51834c8aff024c211b82eb8a
|
[
"BSD-3-Clause"
] | null | null | null |
calwebb_spec2_pytests/auxiliary_code/compare_wcs_mos.py
|
jhunkeler/nirspec_pipe_testing_tool
|
04d247e905e6003f51834c8aff024c211b82eb8a
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import os
import subprocess
from collections import OrderedDict
from astropy.io import fits
from astropy import wcs
from jwst.assign_wcs import nirspec
from jwst import datamodels
from . import auxiliary_functions as auxfunc
"""
This script compares pipeline WCS info with ESA results for Multi-Object Spectroscopy (MOS) data.
"""
# HEADER
__author__ = "M. A. Pena-Guerrero"
__version__ = "2.1"
# HISTORY
# Nov 2017 - Version 1.0: initial version completed
# May 2018 - Version 2.0: Completely changed script to use the datamodel instead of the compute_world_coordinates
# script, and added new routines for plot making and statistics calculations.
# Aug 2018 - Version 2.1: Modified slit-y differences to be reported in absolute numbers rather than relative
def compare_wcs(infile_name, esa_files_path, msa_conf_name, show_figs=True, save_figs=False,
threshold_diff=1.0e-7, mode_used=None, debug=False):
"""
This function does the WCS comparison from the world coordinates calculated using the pipeline
data model with the ESA intermediary files.
Args:
infile_name: str, name of the output fits file from the assign_wcs step (with full path)
esa_files_path: str, full path of where to find all ESA intermediary products to make comparisons for the tests
msa_conf_name: str, full path where to find the shutter configuration file
show_figs: boolean, whether to show plots or not
save_figs: boolean, save the plots or not
threshold_diff: float, threshold difference between pipeline output and ESA file
mode_used: string, mode used in the PTT configuration file
debug: boolean, if true a series of print statements will show on-screen
Returns:
- plots, if told to save and/or show them.
- median_diff: Boolean, True if smaller or equal to threshold
- log_msgs: list, all print statements captured in this variable
"""
log_msgs = []
# get grating and filter info from the rate file header
if mode_used is not None and mode_used == "MOS_sim":
infile_name = infile_name.replace("assign_wcs", "extract_2d")
msg = 'wcs validation test infile_name= '+infile_name
print(msg)
log_msgs.append(msg)
det = fits.getval(infile_name, "DETECTOR", 0)
lamp = fits.getval(infile_name, "LAMP", 0)
grat = fits.getval(infile_name, "GRATING", 0)
filt = fits.getval(infile_name, "FILTER", 0)
msametfl = fits.getval(infile_name, "MSAMETFL", 0)
msg = "from assign_wcs file --> Detector: "+det+" Grating: "+grat+" Filter: "+filt+" Lamp: "+lamp
print(msg)
log_msgs.append(msg)
# check that shutter configuration file in header is the same as given in PTT_config file
if msametfl != os.path.basename(msa_conf_name):
msg = "* WARNING! MSA config file name given in PTT_config file does not match the MSAMETFL keyword in main header.\n"
print(msg)
log_msgs.append(msg)
# copy the MSA shutter configuration file into the pytest directory
try:
subprocess.run(["cp", msa_conf_name, "."])
except FileNotFoundError:
msg1 = " * PTT is not able to locate the MSA shutter configuration file. Please make sure that the msa_conf_name variable in"
msg2 = " the PTT_config.cfg file is pointing exactly to where the fits file exists (i.e. full path and name). "
msg3 = " -> The WCS test is now set to skip and no plots will be generated. "
print(msg1)
print(msg2)
print(msg3)
log_msgs.append(msg1)
log_msgs.append(msg2)
log_msgs.append(msg3)
FINAL_TEST_RESULT = "skip"
return FINAL_TEST_RESULT, log_msgs
# get shutter info from metadata
shutter_info = fits.getdata(msa_conf_name, extname="SHUTTER_INFO") # this is generally ext=2
pslit = shutter_info.field("slitlet_id")
quad = shutter_info.field("shutter_quadrant")
row = shutter_info.field("shutter_row")
col = shutter_info.field("shutter_column")
msg = 'Using this MSA shutter configuration file: '+msa_conf_name
print(msg)
log_msgs.append(msg)
# get the datamodel from the assign_wcs output file
if mode_used is None or mode_used != "MOS_sim":
img = datamodels.ImageModel(infile_name)
# these commands only work for the assign_wcs ouput file
# loop over the slits
#slits_list = nirspec.get_open_slits(img) # this function returns all open slitlets as defined in msa meta file,
# however, some of them may not be projected on the detector, and those are later removed from the list of open
# slitlets. To get the open and projected on the detector slitlets we use the following:
slits_list = img.meta.wcs.get_transform('gwa', 'slit_frame').slits
#print ('Open slits: ', slits_list, '\n')
if debug:
print("Instrument Configuration")
print("Detector: {}".format(img.meta.instrument.detector))
print("GWA: {}".format(img.meta.instrument.grating))
print("Filter: {}".format(img.meta.instrument.filter))
print("Lamp: {}".format(img.meta.instrument.lamp_state))
print("GWA_XTILT: {}".format(img.meta.instrument.gwa_xtilt))
print("GWA_YTILT: {}".format(img.meta.instrument.gwa_ytilt))
print("GWA_TTILT: {}".format(img.meta.instrument.gwa_tilt))
elif mode_used == "MOS_sim":
# this command works for the extract_2d and flat_field output files
model = datamodels.MultiSlitModel(infile_name)
slits_list = model.slits
# list to determine if pytest is passed or not
total_test_result = OrderedDict()
# loop over the slices
for slit in slits_list:
name = slit.name
msg = "\nWorking with slit: "+str(name)
print(msg)
log_msgs.append(msg)
# get the right index in the list of open shutters
pslit_list = pslit.tolist()
slitlet_idx = pslit_list.index(int(name))
# Get the ESA trace
#raw_data_root_file = "NRSV96215001001P0000000002103_1_491_SE_2016-01-24T01h25m07.cts.fits" # testing only
_, raw_data_root_file = auxfunc.get_modeused_and_rawdatrt_PTT_cfg_file()
msg = "Using this raw data file to find the corresponding ESA file: "+raw_data_root_file
print(msg)
log_msgs.append(msg)
q, r, c = quad[slitlet_idx], row[slitlet_idx], col[slitlet_idx]
msg = "Pipeline shutter info: quadrant= "+str(q)+" row= "+str(r)+" col="+str(c)
print(msg)
log_msgs.append(msg)
specifics = [q, r, c]
esafile = auxfunc.get_esafile(esa_files_path, raw_data_root_file, "MOS", specifics)
#esafile = "/Users/pena/Documents/PyCharmProjects/nirspec/pipeline/src/sandbox/zzzz/Trace_MOS_3_319_013_V96215001001P0000000002103_41543_JLAB88.fits" # testing only
# skip the test if the esafile was not found
if "ESA file not found" in esafile:
msg1 = " * compare_wcs_mos.py is exiting because the corresponding ESA file was not found."
msg2 = " -> The WCS test is now set to skip and no plots will be generated. "
print(msg1)
print(msg2)
log_msgs.append(msg1)
log_msgs.append(msg2)
FINAL_TEST_RESULT = "skip"
return FINAL_TEST_RESULT, log_msgs
# Open the trace in the esafile
if len(esafile) == 2:
print(len(esafile[-1]))
if len(esafile[-1]) == 0:
esafile = esafile[0]
msg = "Using this ESA file: \n"+str(esafile)
print(msg)
log_msgs.append(msg)
with fits.open(esafile) as esahdulist:
print ("* ESA file contents ")
esahdulist.info()
esa_shutter_i = esahdulist[0].header['SHUTTERI']
esa_shutter_j = esahdulist[0].header['SHUTTERJ']
esa_quadrant = esahdulist[0].header['QUADRANT']
if debug:
msg = "ESA shutter info: quadrant="+esa_quadrant+" shutter_i="+esa_shutter_i+" shutter_j="+esa_shutter_j
print(msg)
log_msgs.append(msg)
# first check if ESA shutter info is the same as pipeline
msg = "For slitlet"+str(name)
print(msg)
log_msgs.append(msg)
if q == esa_quadrant:
msg = "\n -> Same quadrant for pipeline and ESA data: "+str(q)
print(msg)
log_msgs.append(msg)
else:
msg = "\n -> Missmatch of quadrant for pipeline and ESA data: "+str(q)+esa_quadrant
print(msg)
log_msgs.append(msg)
if r == esa_shutter_i:
msg = "\n -> Same row for pipeline and ESA data: "+str(r)
print(msg)
log_msgs.append(msg)
else:
msg = "\n -> Missmatch of row for pipeline and ESA data: "+str(r)+esa_shutter_i
print(msg)
log_msgs.append(msg)
if c == esa_shutter_j:
msg = "\n -> Same column for pipeline and ESA data: "+str(c)+"\n"
print(msg)
log_msgs.append(msg)
else:
msg = "\n -> Missmatch of column for pipeline and ESA data: "+str(c)+esa_shutter_j+"\n"
print(msg)
log_msgs.append(msg)
# Assign variables according to detector
skipv2v3test = True
if det == "NRS1":
try:
esa_flux = fits.getdata(esafile, "DATA1")
esa_wave = fits.getdata(esafile, "LAMBDA1")
esa_slity = fits.getdata(esafile, "SLITY1")
esa_msax = fits.getdata(esafile, "MSAX1")
esa_msay = fits.getdata(esafile, "MSAY1")
pyw = wcs.WCS(esahdulist['LAMBDA1'].header)
try:
esa_v2v3x = fits.getdata(esafile, "V2V3X1")
esa_v2v3y = fits.getdata(esafile, "V2V3Y1")
skipv2v3test = False
except KeyError:
msg = "Skipping tests for V2 and V3 because ESA file does not contain corresponding extensions."
print(msg)
log_msgs.append(msg)
except KeyError:
msg = "PTT did not find ESA extensions that match detector NRS1, skipping test for this slitlet..."
print(msg)
log_msgs.append(msg)
continue
if det == "NRS2":
try:
esa_flux = fits.getdata(esafile, "DATA2")
esa_wave = fits.getdata(esafile, "LAMBDA2")
esa_slity = fits.getdata(esafile, "SLITY2")
esa_msax = fits.getdata(esafile, "MSAX2")
esa_msay = fits.getdata(esafile, "MSAY2")
pyw = wcs.WCS(esahdulist['LAMBDA2'].header)
try:
esa_v2v3x = fits.getdata(esafile, "V2V3X2")
esa_v2v3y = fits.getdata(esafile, "V2V3Y2")
skipv2v3test = False
except KeyError:
msg = "Skipping tests for V2 and V3 because ESA file does not contain corresponding extensions."
print(msg)
log_msgs.append(msg)
except KeyError:
msg = "PTT did not find ESA extensions that match detector NRS2, skipping test for this slitlet..."
print(msg)
log_msgs.append(msg)
continue
# get the WCS object for this particular slit
if mode_used is None or mode_used != "MOS_sim":
try:
wcs_slice = nirspec.nrs_wcs_set_input(img, name)
except:
ValueError
msg = "* WARNING: Slitlet "+name+" was not found in the model. Skipping test for this slitlet."
print(msg)
log_msgs.append(msg)
continue
elif mode_used == "MOS_sim":
wcs_slice = model.slits[0].wcs
# if we want to print all available transforms, uncomment line below
#print(wcs_slice)
# The WCS object attribute bounding_box shows all valid inputs, i.e. the actual area of the data according
# to the slice. Inputs outside of the bounding_box return NaN values.
#bbox = wcs_slice.bounding_box
#print('wcs_slice.bounding_box: ', wcs_slice.bounding_box)
# In different observing modes the WCS may have different coordinate frames. To see available frames
# uncomment line below.
#print("Avalable frames: ", wcs_slice.available_frames)
if mode_used is None or mode_used != "MOS_sim":
if debug:
# To get specific pixel values use following syntax:
det2slit = wcs_slice.get_transform('detector', 'slit_frame')
slitx, slity, lam = det2slit(700, 1080)
print("slitx: " , slitx)
print("slity: " , slity)
print("lambda: " , lam)
if debug:
# The number of inputs and outputs in each frame can vary. This can be checked with:
print('Number on inputs: ', det2slit.n_inputs)
print('Number on outputs: ', det2slit.n_outputs)
# Create x, y indices using the Trace WCS
pipey, pipex = np.mgrid[:esa_wave.shape[0], : esa_wave.shape[1]]
esax, esay = pyw.all_pix2world(pipex, pipey, 0)
if det == "NRS2":
msg = "NRS2 needs a flip"
print(msg)
log_msgs.append(msg)
esax = 2049-esax
esay = 2049-esay
# Compute pipeline RA, DEC, and lambda
slitlet_test_result_list = []
pra, pdec, pwave = wcs_slice(esax-1, esay-1) # => RETURNS: RA, DEC, LAMBDA (lam *= 10**-6 to convert to microns)
pwave *= 10**-6
# calculate and print statistics for slit-y and x relative differences
slitlet_name = repr(r)+"_"+repr(c)
tested_quantity = "Wavelength Difference"
rel_diff_pwave_data = auxfunc.get_reldiffarr_and_stats(threshold_diff, esa_slity, esa_wave, pwave, tested_quantity)
rel_diff_pwave_img, notnan_rel_diff_pwave, notnan_rel_diff_pwave_stats, print_stats_strings = rel_diff_pwave_data
for msg in print_stats_strings:
log_msgs.append(msg)
result = auxfunc.does_median_pass_tes(notnan_rel_diff_pwave_stats[1], threshold_diff)
msg = 'Result for test of ' + tested_quantity + ': ' + result
print(msg)
log_msgs.append(msg)
slitlet_test_result_list.append({tested_quantity: result})
# get the transforms for pipeline slit-y
det2slit = wcs_slice.get_transform('detector', 'slit_frame')
slitx, slity, _ = det2slit(esax-1, esay-1)
tested_quantity = "Slit-Y Difference"
# calculate and print statistics for slit-y and x relative differences
rel_diff_pslity_data = auxfunc.get_reldiffarr_and_stats(threshold_diff, esa_slity, esa_slity, slity, tested_quantity, abs=False)
# calculate and print statistics for slit-y and x absolute differences
#rel_diff_pslity_data = auxfunc.get_reldiffarr_and_stats(threshold_diff, esa_slity, esa_slity, slity, tested_quantity, abs=True)
rel_diff_pslity_img, notnan_rel_diff_pslity, notnan_rel_diff_pslity_stats, print_stats_strings = rel_diff_pslity_data
for msg in print_stats_strings:
log_msgs.append(msg)
result = auxfunc.does_median_pass_tes(notnan_rel_diff_pslity_stats[1], threshold_diff)
msg = 'Result for test of ' + tested_quantity + ': ' + result
print(msg)
log_msgs.append(msg)
slitlet_test_result_list.append({tested_quantity: result})
# do the same for MSA x, y and V2, V3
detector2msa = wcs_slice.get_transform("detector", "msa_frame")
pmsax, pmsay, _ = detector2msa(esax-1, esay-1) # => RETURNS: msaX, msaY, LAMBDA (lam *= 10**-6 to convert to microns)
# MSA-x
tested_quantity = "MSA_X Difference"
reldiffpmsax_data = auxfunc.get_reldiffarr_and_stats(threshold_diff, esa_slity, esa_msax, pmsax, tested_quantity)
reldiffpmsax_img, notnan_reldiffpmsax, notnan_reldiffpmsax_stats, print_stats_strings = reldiffpmsax_data
for msg in print_stats_strings:
log_msgs.append(msg)
result = auxfunc.does_median_pass_tes(notnan_reldiffpmsax_stats[1], threshold_diff)
msg = 'Result for test of ' + tested_quantity + ': ' + result
print(msg)
log_msgs.append(msg)
slitlet_test_result_list.append({tested_quantity: result})
# MSA-y
tested_quantity = "MSA_Y Difference"
reldiffpmsay_data = auxfunc.get_reldiffarr_and_stats(threshold_diff, esa_slity, esa_msay, pmsay, tested_quantity)
reldiffpmsay_img, notnan_reldiffpmsay, notnan_reldiffpmsay_stats, print_stats_strings = reldiffpmsay_data
for msg in print_stats_strings:
log_msgs.append(msg)
result = auxfunc.does_median_pass_tes(notnan_reldiffpmsay_stats[1], threshold_diff)
msg = 'Result for test of ' + tested_quantity + ': ' + result
print(msg)
log_msgs.append(msg)
slitlet_test_result_list.append({tested_quantity: result})
# V2 and V3
if not skipv2v3test:
detector2v2v3 = wcs_slice.get_transform("detector", "v2v3")
pv2, pv3, _ = detector2v2v3(esax-1, esay-1) # => RETURNS: v2, v3, LAMBDA (lam *= 10**-6 to convert to microns)
tested_quantity = "V2 difference"
# converting to degrees to compare with ESA
reldiffpv2_data = auxfunc.get_reldiffarr_and_stats(threshold_diff, esa_slity, esa_v2v3x, pv2, tested_quantity)
if reldiffpv2_data[-2][0] > 0.0:
print("\nConverting pipeline results to degrees to compare with ESA")
pv2 = pv2/3600.
reldiffpv2_data = auxfunc.get_reldiffarr_and_stats(threshold_diff, esa_slity, esa_v2v3x, pv2, tested_quantity)
reldiffpv2_img, notnan_reldiffpv2, notnan_reldiffpv2_stats, print_stats_strings = reldiffpv2_data
for msg in print_stats_strings:
log_msgs.append(msg)
result = auxfunc.does_median_pass_tes(notnan_reldiffpv2_stats[1], threshold_diff)
msg = 'Result for test of '+tested_quantity+': '+result
print(msg)
log_msgs.append(msg)
slitlet_test_result_list.append({tested_quantity: result})
tested_quantity = "V3 difference"
# converting to degrees to compare with ESA
reldiffpv3_data = auxfunc.get_reldiffarr_and_stats(threshold_diff, esa_slity, esa_v2v3y, pv3, tested_quantity)
if reldiffpv3_data[-2][0] > 0.0:
print("\nConverting pipeline results to degrees to compare with ESA")
pv3 = pv3/3600.
reldiffpv3_data = auxfunc.get_reldiffarr_and_stats(threshold_diff, esa_slity, esa_v2v3y, pv3, tested_quantity)
reldiffpv3_img, notnan_reldiffpv3, notnan_reldiffpv3_stats, print_stats_strings = reldiffpv3_data
for msg in print_stats_strings:
log_msgs.append(msg)
result = auxfunc.does_median_pass_tes(notnan_reldiffpv3_stats[1], threshold_diff)
msg = 'Result for test of '+tested_quantity+': '+result
print(msg)
log_msgs.append(msg)
slitlet_test_result_list.append({tested_quantity: result})
total_test_result[slitlet_name] = slitlet_test_result_list
# PLOTS
if show_figs or save_figs:
# set the common variables
basenameinfile_name = os.path.basename(infile_name)
main_title = filt+" "+grat+" SLITLET="+slitlet_name+"\n"
bins = 15 # binning for the histograms, if None the function will automatically calculate them
# lolim_x, uplim_x, lolim_y, uplim_y
plt_origin = None
# Wavelength
title = main_title+r"Relative wavelength difference = $\Delta \lambda$"+"\n"
info_img = [title, "x (pixels)", "y (pixels)"]
xlabel, ylabel = r"Relative $\Delta \lambda$ = ($\lambda_{pipe} - \lambda_{ESA}) / \lambda_{ESA}$", "N"
info_hist = [xlabel, ylabel, bins, notnan_rel_diff_pwave_stats]
if notnan_rel_diff_pwave_stats[1] is np.nan:
msg = "Unable to create plot of relative wavelength difference."
print(msg)
log_msgs.append(msg)
else:
plt_name = infile_name.replace(basenameinfile_name, slitlet_name+"_"+det+"_rel_wave_diffs.pdf")
auxfunc.plt_two_2Dimgandhist(rel_diff_pwave_img, notnan_rel_diff_pwave, info_img, info_hist,
plt_name=plt_name, plt_origin=plt_origin, show_figs=show_figs, save_figs=save_figs)
# Slit-y
title = main_title+r"Relative slit position = $\Delta$slit_y"+"\n"
info_img = [title, "x (pixels)", "y (pixels)"]
xlabel, ylabel = r"Relative $\Delta$slit_y = (slit_y$_{pipe}$ - slit_y$_{ESA}$)/slit_y$_{ESA}$", "N"
info_hist = [xlabel, ylabel, bins, notnan_rel_diff_pslity_stats]
if notnan_rel_diff_pslity_stats[1] is np.nan:
msg = "Unable to create plot of relative slit-y difference."
print(msg)
log_msgs.append(msg)
else:
plt_name = infile_name.replace(basenameinfile_name, slitlet_name+"_"+det+"_rel_slitY_diffs.pdf")
auxfunc.plt_two_2Dimgandhist(rel_diff_pslity_img, notnan_rel_diff_pslity, info_img, info_hist,
plt_name=plt_name, plt_origin=plt_origin, show_figs=show_figs, save_figs=save_figs)
# MSA-x
title = main_title+r"Relative MSA-x Difference = $\Delta$MSA_x"+"\n"
info_img = [title, "x (pixels)", "y (pixels)"]
xlabel, ylabel = r"Relative $\Delta$MSA_x = (MSA_x$_{pipe}$ - MSA_x$_{ESA}$)/MSA_x$_{ESA}$", "N"
info_hist = [xlabel, ylabel, bins, notnan_reldiffpmsax_stats]
if notnan_reldiffpmsax_stats[1] is np.nan:
msg = "Unable to create plot of relative MSA-x difference."
print(msg)
log_msgs.append(msg)
else:
plt_name = infile_name.replace(basenameinfile_name, slitlet_name+"_"+det+"_rel_MSAx_diffs.pdf")
auxfunc.plt_two_2Dimgandhist(reldiffpmsax_img, notnan_reldiffpmsax, info_img, info_hist,
plt_name=plt_name, plt_origin=plt_origin, show_figs=show_figs, save_figs=save_figs)
# MSA-y
title = main_title+r"Relative MSA-y Difference = $\Delta$MSA_y"+"\n"
info_img = [title, "x (pixels)", "y (pixels)"]
xlabel, ylabel = r"Relative $\Delta$MSA_y = (MSA_y$_{pipe}$ - MSA_y$_{ESA}$)/MSA_y$_{ESA}$", "N"
info_hist = [xlabel, ylabel, bins, notnan_reldiffpmsay_stats]
if notnan_reldiffpmsay_stats[1] is np.nan:
msg = "Unable to create plot of relative MSA-y difference."
print(msg)
log_msgs.append(msg)
else:
plt_name = infile_name.replace(basenameinfile_name, slitlet_name+"_"+det+"_rel_MSAy_diffs.pdf")
auxfunc.plt_two_2Dimgandhist(reldiffpmsay_img, notnan_reldiffpmsay, info_img, info_hist,
plt_name=plt_name, plt_origin=plt_origin, show_figs=show_figs, save_figs=save_figs)
if not skipv2v3test:
# V2
title = main_title+r"Relative V2 Difference = $\Delta$V2"+"\n"
info_img = [title, "x (pixels)", "y (pixels)"]
xlabel, ylabel = r"Relative $\Delta$V2 = (V2$_{pipe}$ - V2$_{ESA}$)/V2$_{ESA}$", "N"
hist_data = notnan_reldiffpv2
info_hist = [xlabel, ylabel, bins, notnan_reldiffpv2_stats]
if notnan_reldiffpv2_stats[1] is np.nan:
msg = "Unable to create plot of relative V2 difference."
print(msg)
log_msgs.append(msg)
else:
plt_name = infile_name.replace(basenameinfile_name, slitlet_name+"_"+det+"_rel_V2_diffs.pdf")
auxfunc.plt_two_2Dimgandhist(reldiffpv2_img, hist_data, info_img, info_hist,
plt_name=plt_name, plt_origin=plt_origin, show_figs=show_figs, save_figs=save_figs)
# V3
title = main_title+r"Relative V3 Difference = $\Delta$V3"+"\n"
info_img = [title, "x (pixels)", "y (pixels)"]
xlabel, ylabel = r"Relative $\Delta$V3 = (V3$_{pipe}$ - V3$_{ESA}$)/V3$_{ESA}$", "N"
hist_data = notnan_reldiffpv3
info_hist = [xlabel, ylabel, bins, notnan_reldiffpv3_stats]
if notnan_reldiffpv3_stats[1] is np.nan:
msg = "Unable to create plot of relative V3 difference."
print(msg)
log_msgs.append(msg)
else:
plt_name = infile_name.replace(basenameinfile_name, slitlet_name+"_"+det+"_rel_V3_diffs.pdf")
auxfunc.plt_two_2Dimgandhist(reldiffpv3_img, hist_data, info_img, info_hist,
plt_name=plt_name, plt_origin=plt_origin, show_figs=show_figs, save_figs=save_figs)
else:
msg = "NO plots were made because show_figs and save_figs were both set to False. \n"
print(msg)
log_msgs.append(msg)
# remove the copy of the MSA shutter configuration file
subprocess.run(["rm", msametfl])
# If all tests passed then pytest will be marked as PASSED, else it will be FAILED
FINAL_TEST_RESULT = "FAILED"
for sl, testlist in total_test_result.items():
for tdict in testlist:
for t, tr in tdict.items():
if tr == "FAILED":
FINAL_TEST_RESULT = "FAILED"
msg = "\n * The test of "+t+" for slitlet "+sl+" FAILED."
print(msg)
log_msgs.append(msg)
else:
FINAL_TEST_RESULT = "PASSED"
msg = "\n * The test of "+t+" for slitlet "+sl+ " PASSED."
print(msg)
log_msgs.append(msg)
if FINAL_TEST_RESULT == "PASSED":
msg = "\n *** Final result for assign_wcs test will be reported as PASSED *** \n"
print(msg)
log_msgs.append(msg)
else:
msg = "\n *** Final result for assign_wcs test will be reported as FAILED *** \n"
print(msg)
log_msgs.append(msg)
return FINAL_TEST_RESULT, log_msgs
if __name__ == '__main__':
# This is a simple test of the code
pipeline_path = "/Users/pena/Documents/PyCharmProjects/nirspec/pipeline"
# input parameters that the script expects
working_dir = pipeline_path+"/src/sandbox/zzzz/first_run_MOSset/"
infile_name = working_dir+"jwtest1010001_01101_00001_NRS1_short_assign_wcs.fits"
msa_conf_name = working_dir+"V9621500100101_short_msa.fits"
#working_dir = pipeline_path+"/src/sandbox/MOS_G395M_test/"
#infile_name = working_dir+"g395m_nrs1_gain_scale_assign_wcs.fits"
#msa_conf_name = working_dir+"V9621500100101_msa.fits"
esa_files_path = "/grp/jwst/wit4/nirspec_vault/prelaunch_data/testing_sets/b7.1_pipeline_testing/test_data_suite/MOS_CV3/ESA_Int_products"
#working_dir = pipeline_path+"/src/sandbox/simulation_test/491_results/"
#infile_name = working_dir+"F170LP-G235M_MOS_observation-6-c0e0_001_DN_NRS1_mod_updatedHDR_assign_wcs.fits"
#msa_conf_name = working_dir+"jw95065006001_0_msa.fits"
#msa_conf_name = working_dir+"jw95065006001_0_singles_msa.fits"
#esa_files_path="/grp/jwst/wit4/nirspec_vault/prelaunch_data/testing_sets/b7.1_pipeline_testing/test_data_suite/simulations/ESA_Int_products"
# choose None or MOS_sim, only for MOS simulations
mode_used = "MOS"
#mode_used = "MOS_sim"
# Run the principal function of the script
result = compare_wcs(infile_name, esa_files_path=esa_files_path, msa_conf_name=msa_conf_name,
show_figs=False, save_figs=True, threshold_diff=1.0e-7, mode_used=mode_used, debug=False)
| 50.230903
| 173
| 0.619742
|
41a0950a501af2dd2742997c05b201570aacb74a
| 350
|
py
|
Python
|
experiments/jacobi-2d/tmp_files/4888.py
|
LoopTilingBenchmark/benchmark
|
52a3d2e70216552a498fd91de02a2fa9cb62122c
|
[
"BSD-2-Clause"
] | null | null | null |
experiments/jacobi-2d/tmp_files/4888.py
|
LoopTilingBenchmark/benchmark
|
52a3d2e70216552a498fd91de02a2fa9cb62122c
|
[
"BSD-2-Clause"
] | null | null | null |
experiments/jacobi-2d/tmp_files/4888.py
|
LoopTilingBenchmark/benchmark
|
52a3d2e70216552a498fd91de02a2fa9cb62122c
|
[
"BSD-2-Clause"
] | null | null | null |
from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/jacobi-2d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/jacobi-2d/tmp_files/4888.c')
procedure('kernel_jacobi_2d')
loop(0)
tile(0,2,16,2)
tile(0,4,16,4)
tile(1,2,16,2)
tile(1,4,16,4)
| 26.923077
| 118
| 0.765714
|
9fa5f8f83bd237a1edb8f4adcfd56d9e25b8172f
| 971
|
py
|
Python
|
setup.py
|
Nandan-unni/Nano
|
fa7c85c85cfa6cc31ff0d98679aafe5be179e8d1
|
[
"MIT"
] | 2
|
2021-01-31T17:27:50.000Z
|
2021-02-12T17:52:43.000Z
|
setup.py
|
Nandan-unni/Nano
|
fa7c85c85cfa6cc31ff0d98679aafe5be179e8d1
|
[
"MIT"
] | 4
|
2021-02-10T10:25:26.000Z
|
2021-02-12T15:36:08.000Z
|
setup.py
|
Nandan-unni/Nano
|
fa7c85c85cfa6cc31ff0d98679aafe5be179e8d1
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
with open("README.md", "r") as README:
long_description = README.read()
setup(
name="nanoAPI",
version="0.5.0",
author="Nandanunni A S",
author_email="asnqln@gmail.com",
description="A nano web framework",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Nandan-unni/Nano",
packages=find_packages(),
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Operating System :: OS Independent"
],
python_requires='>=3.6',
install_requires=[
"gunicorn==20.0.4",
"colorama==0.4.4"
],
)
# python3 setup.py sdist bdist_wheel
# python3 -m twine upload --repository testpypi dist/*
# python3 -m twine upload --repository-url https://upload.pypi.org/legacy/ dist/*
| 26.972222
| 81
| 0.647786
|
e22756cc722a109a0693f1e3349a4a59402225e8
| 506
|
py
|
Python
|
send.py
|
CarlColglazier/libre-rytm-sds
|
a76ba95ea1d4db847985ef6b51e53ca7e2a6c966
|
[
"BSD-3-Clause"
] | 3
|
2019-10-28T09:52:35.000Z
|
2020-09-13T15:26:49.000Z
|
send.py
|
CarlColglazier/libre-rytm-sds
|
a76ba95ea1d4db847985ef6b51e53ca7e2a6c966
|
[
"BSD-3-Clause"
] | null | null | null |
send.py
|
CarlColglazier/libre-rytm-sds
|
a76ba95ea1d4db847985ef6b51e53ca7e2a6c966
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import glob
import os
from pathlib import Path
from time import sleep
import subprocess
hw = sys.argv[1]
script_path=os.path.dirname(os.path.realpath(__file__))
bin_path = script_path + "/send-sds/send-sds"
for i, f in enumerate(glob.glob("*.sds")):
run = [f'{bin_path}', f'{hw}', "0", f'{i}', f'{f}']
runs = 5
while runs > 0:
result = subprocess.run(run, capture_output=True)
print(result.returncode)
if result.returncode == 217:
runs = runs - 1
else:
break
sleep(0.5)
| 20.24
| 55
| 0.673913
|
e777fdb08cf9fdf4548efc214b1f23164e2e8657
| 6,878
|
py
|
Python
|
train_ocr_crnn.py
|
nhh1501/E2E_MLT_VN
|
dabbc1f7e12b45fbaef965200217cca41793dbc3
|
[
"MIT"
] | null | null | null |
train_ocr_crnn.py
|
nhh1501/E2E_MLT_VN
|
dabbc1f7e12b45fbaef965200217cca41793dbc3
|
[
"MIT"
] | null | null | null |
train_ocr_crnn.py
|
nhh1501/E2E_MLT_VN
|
dabbc1f7e12b45fbaef965200217cca41793dbc3
|
[
"MIT"
] | null | null | null |
'''
Created on Sep 29, 2017
@author: Michal.Busta at gmail.com
'''
import numpy as np
import torch.nn.functional as F
import os
import torch
import net_utils
import argparse
import time
import ocr_gen
import torch.nn as nn
from models_crnn import ModelResNetSep_crnn
from ocr_test_utils import print_seq_ext
from utils import E2Ecollate,E2Edataset,alignCollate,ocrDataset
from torchvision import transforms
from net_eval import strLabelConverter,eval_ocr_crnn
import matplotlib.pyplot as plt
device = 'cuda'
f = open('codec.txt', 'r')
codec = f.readlines()[0]
f.close()
print(len(codec))
base_lr = 0.001
lr_decay = 0.99
momentum = 0.9
weight_decay = 0.0005
batch_per_epoch = 1000
disp_interval = 200
def main(opts):
train_loss = 0
train_loss_lr = 0
cnt = 1
cntt = 0
time_total = 0
now = time.time()
converter = strLabelConverter(codec)
model_name = 'E2E-MLT'
net = ModelResNetSep_crnn(attention=True, multi_scale=True, num_classes=400, fixed_height=opts.norm_height,
net='densenet', )
ctc_loss = nn.CTCLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=base_lr, weight_decay=weight_decay)
# scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,factor=0.5 ,patience=5,verbose=True)
scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0.00007, max_lr=0.0003, step_size_up=3000,
cycle_momentum=False)
step_start = 0
if opts.cuda:
net.to(device)
ctc_loss.to(device)
if os.path.exists(opts.model):
print('loading model from %s' % args.model)
step_start, learning_rate = net_utils.load_net(args.model, net, optimizer)
else:
learning_rate = base_lr
for param_group in optimizer.param_groups:
param_group['lr'] = base_lr
learning_rate = param_group['lr']
print(param_group['lr'])
step_start = 0
net.train()
# data_generator = ocr_gen.get_batch(num_workers=opts.num_readers,
# batch_size=opts.batch_size,
# train_list=opts.train_list, in_train=True, norm_height=opts.norm_height, rgb = True)
data_dataset = ocrDataset(root=opts.train_list, norm_height=opts.norm_height , in_train=True)
data_generator1 = torch.utils.data.DataLoader(data_dataset, batch_size=opts.batch_size, shuffle=True,
collate_fn=alignCollate())
val_dataset = ocrDataset(root=opts.valid_list, norm_height=opts.norm_height , in_train=False)
val_generator1 = torch.utils.data.DataLoader(val_dataset, batch_size=1, shuffle=False,
collate_fn=alignCollate())
for step in range(step_start, 300000):
# images, labels, label_length = next(data_generator)
# im_data = net_utils.np_to_variable(images, is_cuda=opts.cuda).permute(0, 3, 1, 2)
try:
images, label = next(dataloader_iterator)
except:
dataloader_iterator = iter(data_generator1)
images, label = next(dataloader_iterator)
labels, label_length = converter.encode(label)
im_data = images.to(device)
labels_pred = net.forward_ocr(im_data)
# backward
probs_sizes = torch.IntTensor( [(labels_pred.size()[0])] * (labels_pred.size()[1]) )
label_sizes = torch.IntTensor( torch.from_numpy(np.array(label_length)).int() )
labels = torch.IntTensor( torch.from_numpy(np.array(labels)).int() )
loss = ctc_loss(labels_pred, labels, probs_sizes, label_sizes) / im_data.size(0) # change 1.9.
optimizer.zero_grad()
loss.backward()
clipping_value = 1.0
torch.nn.utils.clip_grad_norm_(net.parameters(),clipping_value)
if not (torch.isnan(loss) or torch.isinf(loss)):
optimizer.step()
scheduler.step()
train_loss += loss.data.cpu().numpy() #net.bbox_loss.data.cpu().numpy()[0]
# train_loss += loss.data.cpu().numpy()[0] #net.bbox_loss.data.cpu().numpy()[0]
cnt += 1
if opts.debug:
dbg = labels_pred.permute(1, 2, 0).data.cpu().numpy()
ctc_f = dbg.swapaxes(1, 2)
labels = ctc_f.argmax(2)
det_text, conf, dec_s,_ = print_seq_ext(labels[0, :], codec)
print('{0} \t'.format(det_text))
if step % disp_interval == 0:
for param_group in optimizer.param_groups:
learning_rate = param_group['lr']
train_loss /= cnt
train_loss_lr += train_loss
cntt += 1
time_now = time.time() - now
time_total += time_now
now = time.time()
save_log = os.path.join(opts.save_path, 'loss_ocr.txt')
# f = open('content/drive/My_Drive/DATA_OCR/backup/ca ca/loss.txt','a')
f = open(save_log, 'a')
f.write(
'epoch %d[%d], loss_ctc: %.3f,time: %.2f s, lr: %.5f, cnt: %d\n' % (
step / batch_per_epoch, step, train_loss, time_now,learning_rate, cnt))
f.close()
print('epoch %d[%d], loss_ctc: %.3f,time: %.2f s, lr: %.5f, cnt: %d\n' % (
step / batch_per_epoch, step, train_loss, time_now,learning_rate, cnt))
train_loss = 0
cnt = 1
if step > step_start and (step % batch_per_epoch == 0):
for param_group in optimizer.param_groups:
learning_rate = param_group['lr']
# print(learning_rate)
save_name = os.path.join(opts.save_path, 'OCR_{}_{}.h5'.format(model_name, step))
state = {'step': step,
'learning_rate': learning_rate,
'state_dict': net.state_dict(),
'optimizer': optimizer.state_dict()}
torch.save(state, save_name)
# scheduler.step(train_loss_lr / cntt)
# evaluate
CER, WER = eval_ocr_crnn(val_generator1, net)
# scheduler.step(CER)
f = open(save_log, 'a')
f.write('time epoch [%d]: %.2f s, loss_total: %.3f, CER = %f, WER = %f' % (step / batch_per_epoch, time_total, train_loss_lr / cntt, CER, WER))
f.close()
print('time epoch [%d]: %.2f s, loss_total: %.3f, CER = %f, WER = %f \n' % (step / batch_per_epoch, time_total, train_loss_lr / cntt, CER, WER))
print('save model: {}'.format(save_name))
net.train()
time_total = 0
cntt = 0
train_loss_lr = 0
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-train_list', default='/content/data_MLT_crop/gt_vi.txt')
parser.add_argument('-valid_list', default='/content/data_MLT_crop/gt_vi_eval.txt')
parser.add_argument('-save_path', default='/content/drive/My Drive/DATA_OCR/ocr_lstm')
parser.add_argument('-model', default='E2E-MLT_69000.h5')
parser.add_argument('-debug', type=int, default=0)
parser.add_argument('-batch_size', type=int, default=8)
parser.add_argument('-num_readers', type=int, default=2)
parser.add_argument('-cuda', type=bool, default=True)
parser.add_argument('-norm_height', type=int, default=64)
args = parser.parse_args()
main(args)
| 35.822917
| 150
| 0.660076
|
eb23ffcaadf958e04cdbb2ea6e022c211d79ffc7
| 47,319
|
py
|
Python
|
spyder/plugins/tours/widgets.py
|
Earthman100/spyder
|
949ce0f9100a69504c70a5678e8589a05aee7d38
|
[
"MIT"
] | 7,956
|
2015-02-17T01:19:09.000Z
|
2022-03-31T21:52:15.000Z
|
spyder/plugins/tours/widgets.py
|
Earthman100/spyder
|
949ce0f9100a69504c70a5678e8589a05aee7d38
|
[
"MIT"
] | 16,326
|
2015-02-16T23:15:21.000Z
|
2022-03-31T23:34:34.000Z
|
spyder/plugins/tours/widgets.py
|
Earthman100/spyder
|
949ce0f9100a69504c70a5678e8589a05aee7d38
|
[
"MIT"
] | 1,918
|
2015-02-20T19:26:26.000Z
|
2022-03-31T19:03:25.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Spyder interactive tours"""
# pylint: disable=C0103
# pylint: disable=R0903
# pylint: disable=R0911
# pylint: disable=R0201
# Standard library imports
from math import ceil
import sys
# Third party imports
from qtpy.QtCore import (QEasingCurve, QPoint, QPropertyAnimation, QRectF, Qt,
Signal)
from qtpy.QtGui import (QBrush, QColor, QIcon, QPainter, QPainterPath, QPen,
QPixmap, QRegion)
from qtpy.QtWidgets import (QAction, QApplication, QComboBox, QDialog,
QGraphicsOpacityEffect, QHBoxLayout, QLabel,
QLayout, QMainWindow, QMenu, QMessageBox,
QPushButton, QSpacerItem, QToolButton, QVBoxLayout,
QWidget)
# Local imports
from spyder import __docs_url__
from spyder.api.panel import Panel
from spyder.api.translations import get_translation
from spyder.config.base import _
from spyder.plugins.layout.layouts import DefaultLayouts
from spyder.py3compat import to_binary_string
from spyder.utils.icon_manager import ima
from spyder.utils.image_path_manager import get_image_path
from spyder.utils.palette import QStylePalette, SpyderPalette
from spyder.utils.qthelpers import add_actions, create_action
from spyder.utils.stylesheet import DialogStyle
MAIN_TOP_COLOR = MAIN_BG_COLOR = QColor(QStylePalette.COLOR_BACKGROUND_1)
# Localization
_ = get_translation('spyder')
MAC = sys.platform == 'darwin'
class FadingDialog(QDialog):
"""A general fade in/fade out QDialog with some builtin functions"""
sig_key_pressed = Signal()
def __init__(self, parent, opacity, duration, easing_curve):
super(FadingDialog, self).__init__(parent)
self.parent = parent
self.opacity_min = min(opacity)
self.opacity_max = max(opacity)
self.duration_fadein = duration[0]
self.duration_fadeout = duration[-1]
self.easing_curve_in = easing_curve[0]
self.easing_curve_out = easing_curve[-1]
self.effect = None
self.anim = None
self._fade_running = False
self._funcs_before_fade_in = []
self._funcs_after_fade_in = []
self._funcs_before_fade_out = []
self._funcs_after_fade_out = []
self.setModal(False)
def _run(self, funcs):
for func in funcs:
func()
def _run_before_fade_in(self):
self._run(self._funcs_before_fade_in)
def _run_after_fade_in(self):
self._run(self._funcs_after_fade_in)
def _run_before_fade_out(self):
self._run(self._funcs_before_fade_out)
def _run_after_fade_out(self):
self._run(self._funcs_after_fade_out)
def _set_fade_finished(self):
self._fade_running = False
def _fade_setup(self):
self._fade_running = True
self.effect = QGraphicsOpacityEffect(self)
self.setGraphicsEffect(self.effect)
self.anim = QPropertyAnimation(
self.effect, to_binary_string("opacity"))
# --- public api
def fade_in(self, on_finished_connect):
self._run_before_fade_in()
self._fade_setup()
self.show()
self.raise_()
self.anim.setEasingCurve(self.easing_curve_in)
self.anim.setStartValue(self.opacity_min)
self.anim.setEndValue(self.opacity_max)
self.anim.setDuration(self.duration_fadein)
self.anim.finished.connect(on_finished_connect)
self.anim.finished.connect(self._set_fade_finished)
self.anim.finished.connect(self._run_after_fade_in)
self.anim.start()
def fade_out(self, on_finished_connect):
self._run_before_fade_out()
self._fade_setup()
self.anim.setEasingCurve(self.easing_curve_out)
self.anim.setStartValue(self.opacity_max)
self.anim.setEndValue(self.opacity_min)
self.anim.setDuration(self.duration_fadeout)
self.anim.finished.connect(on_finished_connect)
self.anim.finished.connect(self._set_fade_finished)
self.anim.finished.connect(self._run_after_fade_out)
self.anim.start()
def is_fade_running(self):
return self._fade_running
def set_funcs_before_fade_in(self, funcs):
self._funcs_before_fade_in = funcs
def set_funcs_after_fade_in(self, funcs):
self._funcs_after_fade_in = funcs
def set_funcs_before_fade_out(self, funcs):
self._funcs_before_fade_out = funcs
def set_funcs_after_fade_out(self, funcs):
self._funcs_after_fade_out = funcs
class FadingCanvas(FadingDialog):
"""The black semi transparent canvas that covers the application"""
def __init__(self, parent, opacity, duration, easing_curve, color,
tour=None):
"""Create a black semi transparent canvas that covers the app."""
super(FadingCanvas, self).__init__(parent, opacity, duration,
easing_curve)
self.parent = parent
self.tour = tour
# Canvas color
self.color = color
# Decoration color
self.color_decoration = QColor(SpyderPalette.COLOR_ERROR_2)
# Width in pixels for decoration
self.stroke_decoration = 2
self.region_mask = None
self.region_subtract = None
self.region_decoration = None
self.widgets = None # The widget to uncover
self.decoration = None # The widget to draw decoration
self.interaction_on = False
self.path_current = None
self.path_subtract = None
self.path_full = None
self.path_decoration = None
# widget setup
self.setWindowFlags(Qt.Dialog | Qt.FramelessWindowHint)
self.setAttribute(Qt.WA_TranslucentBackground)
self.setAttribute(Qt.WA_TransparentForMouseEvents)
self.setModal(False)
self.setFocusPolicy(Qt.NoFocus)
self.set_funcs_before_fade_in([self.update_canvas])
self.set_funcs_after_fade_out([lambda: self.update_widgets(None),
lambda: self.update_decoration(None)])
def set_interaction(self, value):
self.interaction_on = value
def update_canvas(self):
w, h = self.parent.size().width(), self.parent.size().height()
self.path_full = QPainterPath()
self.path_subtract = QPainterPath()
self.path_decoration = QPainterPath()
self.region_mask = QRegion(0, 0, w, h)
self.path_full.addRect(0, 0, w, h)
# Add the path
if self.widgets is not None:
for widget in self.widgets:
temp_path = QPainterPath()
# if widget is not found... find more general way to handle
if widget is not None:
widget.raise_()
widget.show()
geo = widget.frameGeometry()
width, height = geo.width(), geo.height()
point = widget.mapTo(self.parent, QPoint(0, 0))
x, y = point.x(), point.y()
temp_path.addRect(QRectF(x, y, width, height))
temp_region = QRegion(x, y, width, height)
if self.interaction_on:
self.region_mask = self.region_mask.subtracted(temp_region)
self.path_subtract = self.path_subtract.united(temp_path)
self.path_current = self.path_full.subtracted(self.path_subtract)
else:
self.path_current = self.path_full
if self.decoration is not None:
for widgets in self.decoration:
if isinstance(widgets, QWidget):
widgets = [widgets]
geoms = []
for widget in widgets:
widget.raise_()
widget.show()
geo = widget.frameGeometry()
width, height = geo.width(), geo.height()
point = widget.mapTo(self.parent, QPoint(0, 0))
x, y = point.x(), point.y()
geoms.append((x, y, width, height))
x = min([geom[0] for geom in geoms])
y = min([geom[1] for geom in geoms])
width = max([
geom[0] + geom[2] for geom in geoms]) - x
height = max([
geom[1] + geom[3] for geom in geoms]) - y
temp_path = QPainterPath()
temp_path.addRect(QRectF(x, y, width, height))
temp_region_1 = QRegion(x-1, y-1, width+2, height+2)
temp_region_2 = QRegion(x+1, y+1, width-2, height-2)
temp_region = temp_region_1.subtracted(temp_region_2)
if self.interaction_on:
self.region_mask = self.region_mask.united(temp_region)
self.path_decoration = self.path_decoration.united(temp_path)
else:
self.path_decoration.addRect(0, 0, 0, 0)
# Add a decoration stroke around widget
self.setMask(self.region_mask)
self.update()
self.repaint()
def update_widgets(self, widgets):
self.widgets = widgets
def update_decoration(self, widgets):
self.decoration = widgets
def paintEvent(self, event):
"""Override Qt method"""
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing)
# Decoration
painter.fillPath(self.path_current, QBrush(self.color))
painter.strokePath(self.path_decoration, QPen(self.color_decoration,
self.stroke_decoration))
# decoration_fill = QColor(self.color_decoration)
# decoration_fill.setAlphaF(0.25)
# painter.fillPath(self.path_decoration, decoration_fill)
def reject(self):
"""Override Qt method"""
if not self.is_fade_running():
key = Qt.Key_Escape
self.key_pressed = key
self.sig_key_pressed.emit()
def mousePressEvent(self, event):
"""Override Qt method"""
pass
def focusInEvent(self, event):
"""Override Qt method."""
# To be used so tips do not appear outside spyder
if self.hasFocus():
self.tour.gain_focus()
def focusOutEvent(self, event):
"""Override Qt method."""
# To be used so tips do not appear outside spyder
if self.tour.step_current != 0:
self.tour.lost_focus()
class FadingTipBox(FadingDialog):
"""Dialog that contains the text for each frame in the tour."""
def __init__(self, parent, opacity, duration, easing_curve, tour=None,
color_top=None, color_back=None, combobox_background=None):
super(FadingTipBox, self).__init__(parent, opacity, duration,
easing_curve)
self.holder = self.anim # needed for qt to work
self.parent = parent
self.tour = tour
self.frames = None
self.offset_shadow = 0
self.fixed_width = 300
self.key_pressed = None
self.setAttribute(Qt.WA_TranslucentBackground)
self.setWindowFlags(Qt.Dialog | Qt.FramelessWindowHint |
Qt.WindowStaysOnTopHint)
self.setModal(False)
# Widgets
def toolbutton(icon):
bt = QToolButton()
bt.setAutoRaise(True)
bt.setIcon(icon)
return bt
self.button_close = toolbutton(ima.icon("tour.close"))
self.button_home = toolbutton(ima.icon("tour.home"))
self.button_previous = toolbutton(ima.icon("tour.previous"))
self.button_end = toolbutton(ima.icon("tour.end"))
self.button_next = toolbutton(ima.icon("tour.next"))
self.button_run = QPushButton(_('Run code'))
self.button_disable = None
self.button_current = QToolButton()
self.label_image = QLabel()
self.label_title = QLabel()
self.combo_title = QComboBox()
self.label_current = QLabel()
self.label_content = QLabel()
self.label_content.setOpenExternalLinks(True)
self.label_content.setMinimumWidth(self.fixed_width)
self.label_content.setMaximumWidth(self.fixed_width)
self.label_current.setAlignment(Qt.AlignCenter)
self.label_content.setWordWrap(True)
self.widgets = [self.label_content, self.label_title,
self.label_current, self.combo_title,
self.button_close, self.button_run, self.button_next,
self.button_previous, self.button_end,
self.button_home, self.button_current]
arrow = get_image_path('hide')
self.color_top = color_top
self.color_back = color_back
self.combobox_background = combobox_background
self.stylesheet = '''QComboBox {{
padding-left: 5px;
background-color: {}
border-width: 0px;
border-radius: 0px;
min-height:20px;
max-height:20px;
}}
QComboBox::drop-down {{
subcontrol-origin: padding;
subcontrol-position: top left;
border-width: 0px;
}}
QComboBox::down-arrow {{
image: url({});
}}
'''.format(self.combobox_background.name(), arrow)
# Windows fix, slashes should be always in unix-style
self.stylesheet = self.stylesheet.replace('\\', '/')
self.setFocusPolicy(Qt.StrongFocus)
for widget in self.widgets:
widget.setFocusPolicy(Qt.NoFocus)
widget.setStyleSheet(self.stylesheet)
layout_top = QHBoxLayout()
layout_top.addWidget(self.combo_title)
layout_top.addStretch()
layout_top.addWidget(self.button_close)
layout_top.addSpacerItem(QSpacerItem(self.offset_shadow,
self.offset_shadow))
layout_content = QHBoxLayout()
layout_content.addWidget(self.label_content)
layout_content.addWidget(self.label_image)
layout_content.addSpacerItem(QSpacerItem(5, 5))
layout_run = QHBoxLayout()
layout_run.addStretch()
layout_run.addWidget(self.button_run)
layout_run.addStretch()
layout_run.addSpacerItem(QSpacerItem(self.offset_shadow,
self.offset_shadow))
layout_navigation = QHBoxLayout()
layout_navigation.addWidget(self.button_home)
layout_navigation.addWidget(self.button_previous)
layout_navigation.addStretch()
layout_navigation.addWidget(self.label_current)
layout_navigation.addStretch()
layout_navigation.addWidget(self.button_next)
layout_navigation.addWidget(self.button_end)
layout_navigation.addSpacerItem(QSpacerItem(self.offset_shadow,
self.offset_shadow))
layout = QVBoxLayout()
layout.addLayout(layout_top)
layout.addStretch()
layout.addSpacerItem(QSpacerItem(15, 15))
layout.addLayout(layout_content)
layout.addLayout(layout_run)
layout.addStretch()
layout.addSpacerItem(QSpacerItem(15, 15))
layout.addLayout(layout_navigation)
layout.addSpacerItem(QSpacerItem(self.offset_shadow,
self.offset_shadow))
layout.setSizeConstraint(QLayout.SetFixedSize)
self.setLayout(layout)
self.set_funcs_before_fade_in([self._disable_widgets])
self.set_funcs_after_fade_in([self._enable_widgets, self.setFocus])
self.set_funcs_before_fade_out([self._disable_widgets])
self.setContextMenuPolicy(Qt.CustomContextMenu)
# signals and slots
# These are defined every time by the AnimatedTour Class
def _disable_widgets(self):
for widget in self.widgets:
widget.setDisabled(True)
def _enable_widgets(self):
self.setWindowFlags(Qt.Dialog | Qt.FramelessWindowHint |
Qt.WindowStaysOnTopHint)
for widget in self.widgets:
widget.setDisabled(False)
if self.button_disable == 'previous':
self.button_previous.setDisabled(True)
self.button_home.setDisabled(True)
elif self.button_disable == 'next':
self.button_next.setDisabled(True)
self.button_end.setDisabled(True)
self.button_run.setDisabled(sys.platform == "darwin")
def set_data(self, title, content, current, image, run, frames=None,
step=None):
self.label_title.setText(title)
self.combo_title.clear()
self.combo_title.addItems(frames)
self.combo_title.setCurrentIndex(step)
# min_content_len = max([len(f) for f in frames])
# self.combo_title.setMinimumContentsLength(min_content_len)
# Fix and try to see how it looks with a combo box
self.label_current.setText(current)
self.button_current.setText(current)
self.label_content.setText(content)
self.image = image
if image is None:
self.label_image.setFixedHeight(1)
self.label_image.setFixedWidth(1)
else:
extension = image.split('.')[-1]
self.image = QPixmap(get_image_path(image), extension)
self.label_image.setPixmap(self.image)
self.label_image.setFixedSize(self.image.size())
if run is None:
self.button_run.setVisible(False)
else:
self.button_run.setVisible(True)
if sys.platform == "darwin":
self.button_run.setToolTip("Not available on macOS")
# Refresh layout
self.layout().activate()
def set_pos(self, x, y):
self.x = ceil(x)
self.y = ceil(y)
self.move(QPoint(self.x, self.y))
def build_paths(self):
geo = self.geometry()
radius = 0
shadow = self.offset_shadow
x0, y0 = geo.x(), geo.y()
width, height = geo.width() - shadow, geo.height() - shadow
left, top = 0, 0
right, bottom = width, height
self.round_rect_path = QPainterPath()
self.round_rect_path.moveTo(right, top + radius)
self.round_rect_path.arcTo(right-radius, top, radius, radius, 0.0,
90.0)
self.round_rect_path.lineTo(left+radius, top)
self.round_rect_path.arcTo(left, top, radius, radius, 90.0, 90.0)
self.round_rect_path.lineTo(left, bottom-radius)
self.round_rect_path.arcTo(left, bottom-radius, radius, radius, 180.0,
90.0)
self.round_rect_path.lineTo(right-radius, bottom)
self.round_rect_path.arcTo(right-radius, bottom-radius, radius, radius,
270.0, 90.0)
self.round_rect_path.closeSubpath()
# Top path
header = 36
offset = 2
left, top = offset, offset
right = width - (offset)
self.top_rect_path = QPainterPath()
self.top_rect_path.lineTo(right, top + radius)
self.top_rect_path.moveTo(right, top + radius)
self.top_rect_path.arcTo(right-radius, top, radius, radius, 0.0, 90.0)
self.top_rect_path.lineTo(left+radius, top)
self.top_rect_path.arcTo(left, top, radius, radius, 90.0, 90.0)
self.top_rect_path.lineTo(left, top + header)
self.top_rect_path.lineTo(right, top + header)
def paintEvent(self, event):
"""Override Qt method."""
self.build_paths()
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing)
painter.fillPath(self.round_rect_path, self.color_back)
painter.fillPath(self.top_rect_path, self.color_top)
painter.strokePath(self.round_rect_path, QPen(Qt.gray, 1))
# TODO: Build the pointing arrow?
def keyReleaseEvent(self, event):
"""Override Qt method."""
key = event.key()
self.key_pressed = key
keys = [Qt.Key_Right, Qt.Key_Left, Qt.Key_Down, Qt.Key_Up,
Qt.Key_Escape, Qt.Key_PageUp, Qt.Key_PageDown,
Qt.Key_Home, Qt.Key_End, Qt.Key_Menu]
if key in keys:
if not self.is_fade_running():
self.sig_key_pressed.emit()
def mousePressEvent(self, event):
"""Override Qt method."""
# Raise the main application window on click
self.parent.raise_()
self.raise_()
if event.button() == Qt.RightButton:
pass
# clicked_widget = self.childAt(event.x(), event.y())
# if clicked_widget == self.label_current:
# self.context_menu_requested(event)
def focusOutEvent(self, event):
"""Override Qt method."""
# To be used so tips do not appear outside spyder
self.tour.lost_focus()
def context_menu_requested(self, event):
pos = QPoint(event.x(), event.y())
menu = QMenu(self)
actions = []
action_title = create_action(self, _('Go to step: '), icon=QIcon())
action_title.setDisabled(True)
actions.append(action_title)
# actions.append(create_action(self, _(': '), icon=QIcon()))
add_actions(menu, actions)
menu.popup(self.mapToGlobal(pos))
def reject(self):
"""Qt method to handle escape key event"""
if not self.is_fade_running():
key = Qt.Key_Escape
self.key_pressed = key
self.sig_key_pressed.emit()
class AnimatedTour(QWidget):
"""Widget to display an interactive tour."""
def __init__(self, parent):
QWidget.__init__(self, parent)
self.parent = parent
# Variables to adjust
self.duration_canvas = [666, 666]
self.duration_tips = [333, 333]
self.opacity_canvas = [0.0, 0.7]
self.opacity_tips = [0.0, 1.0]
self.color = Qt.black
self.easing_curve = [QEasingCurve.Linear]
self.current_step = 0
self.step_current = 0
self.steps = 0
self.canvas = None
self.tips = None
self.frames = None
self.spy_window = None
self.initial_fullscreen_state = None
self.widgets = None
self.dockwidgets = None
self.decoration = None
self.run = None
self.is_tour_set = False
self.is_running = False
# Widgets
self.canvas = FadingCanvas(self.parent, self.opacity_canvas,
self.duration_canvas, self.easing_curve,
self.color, tour=self)
self.tips = FadingTipBox(self.parent, self.opacity_tips,
self.duration_tips, self.easing_curve,
tour=self, color_top=MAIN_TOP_COLOR,
color_back=MAIN_BG_COLOR,
combobox_background=MAIN_TOP_COLOR)
# Widgets setup
# Needed to fix spyder-ide/spyder#2204.
self.setAttribute(Qt.WA_TransparentForMouseEvents)
# Signals and slots
self.tips.button_next.clicked.connect(self.next_step)
self.tips.button_previous.clicked.connect(self.previous_step)
self.tips.button_close.clicked.connect(self.close_tour)
self.tips.button_run.clicked.connect(self.run_code)
self.tips.button_home.clicked.connect(self.first_step)
self.tips.button_end.clicked.connect(self.last_step)
self.tips.button_run.clicked.connect(
lambda: self.tips.button_run.setDisabled(True))
self.tips.combo_title.currentIndexChanged.connect(self.go_to_step)
# Main window move or resize
self.parent.sig_resized.connect(self._resized)
self.parent.sig_moved.connect(self._moved)
# To capture the arrow keys that allow moving the tour
self.tips.sig_key_pressed.connect(self._key_pressed)
# To control the focus of tour
self.setting_data = False
self.hidden = False
def _resized(self, event):
if self.is_running:
geom = self.parent.geometry()
self.canvas.setFixedSize(geom.width(), geom.height())
self.canvas.update_canvas()
if self.is_tour_set:
self._set_data()
def _moved(self, event):
if self.is_running:
geom = self.parent.geometry()
self.canvas.move(geom.x(), geom.y())
if self.is_tour_set:
self._set_data()
def _close_canvas(self):
self.tips.hide()
self.canvas.fade_out(self.canvas.hide)
def _clear_canvas(self):
# TODO: Add option to also make it white... might be useful?
# Make canvas black before transitions
self.canvas.update_widgets(None)
self.canvas.update_decoration(None)
self.canvas.update_canvas()
def _move_step(self):
self._set_data()
# Show/raise the widget so it is located first!
widgets = self.dockwidgets
if widgets is not None:
widget = widgets[0]
if widget is not None:
widget.show()
widget.raise_()
self._locate_tip_box()
# Change in canvas only after fadein finishes, for visual aesthetics
self.tips.fade_in(self.canvas.update_canvas)
self.tips.raise_()
def _set_modal(self, value, widgets):
platform = sys.platform.lower()
if 'linux' in platform:
pass
elif 'win' in platform:
for widget in widgets:
widget.setModal(value)
widget.hide()
widget.show()
elif 'darwin' in platform:
pass
else:
pass
def _process_widgets(self, names, spy_window):
widgets = []
dockwidgets = []
for name in names:
try:
base = name.split('.')[0]
try:
temp = getattr(spy_window, name)
except AttributeError:
temp = None
# Check if it is the current editor
if 'get_current_editor()' in name:
temp = temp.get_current_editor()
temp = getattr(temp, name.split('.')[-1])
if temp is None:
raise
except AttributeError:
temp = eval(f"spy_window.{name}")
widgets.append(temp)
# Check if it is a dockwidget and make the widget a dockwidget
# If not return the same widget
temp = getattr(temp, 'dockwidget', temp)
dockwidgets.append(temp)
return widgets, dockwidgets
def _set_data(self):
"""Set data that is displayed in each step of the tour."""
self.setting_data = True
step, steps, frames = self.step_current, self.steps, self.frames
current = '{0}/{1}'.format(step + 1, steps)
frame = frames[step]
combobox_frames = [u"{0}. {1}".format(i+1, f['title'])
for i, f in enumerate(frames)]
title, content, image = '', '', None
widgets, dockwidgets, decoration = None, None, None
run = None
# Check if entry exists in dic and act accordingly
if 'title' in frame:
title = frame['title']
if 'content' in frame:
content = frame['content']
if 'widgets' in frame:
widget_names = frames[step]['widgets']
# Get the widgets based on their name
widgets, dockwidgets = self._process_widgets(widget_names,
self.spy_window)
self.widgets = widgets
self.dockwidgets = dockwidgets
if 'decoration' in frame:
widget_names = frames[step]['decoration']
deco, decoration = self._process_widgets(widget_names,
self.spy_window)
self.decoration = decoration
if 'image' in frame:
image = frames[step]['image']
if 'interact' in frame:
self.canvas.set_interaction(frame['interact'])
if frame['interact']:
self._set_modal(False, [self.tips])
else:
self._set_modal(True, [self.tips])
else:
self.canvas.set_interaction(False)
self._set_modal(True, [self.tips])
if 'run' in frame:
# Assume that the first widget is the console
run = frame['run']
self.run = run
self.tips.set_data(title, content, current, image, run,
frames=combobox_frames, step=step)
self._check_buttons()
# Make canvas black when starting a new place of decoration
self.canvas.update_widgets(dockwidgets)
self.canvas.update_decoration(decoration)
self.setting_data = False
def _locate_tip_box(self):
dockwidgets = self.dockwidgets
# Store the dimensions of the main window
geo = self.parent.frameGeometry()
x, y, width, height = geo.x(), geo.y(), geo.width(), geo.height()
self.width_main = width
self.height_main = height
self.x_main = x
self.y_main = y
delta = 20
offset = 10
# Here is the tricky part to define the best position for the
# tip widget
if dockwidgets is not None:
if dockwidgets[0] is not None:
geo = dockwidgets[0].geometry()
x, y, width, height = (geo.x(), geo.y(),
geo.width(), geo.height())
point = dockwidgets[0].mapToGlobal(QPoint(0, 0))
x_glob, y_glob = point.x(), point.y()
# Put tip to the opposite side of the pane
if x < self.tips.width():
x = x_glob + width + delta
y = y_glob + height/2 - self.tips.height()/2
else:
x = x_glob - self.tips.width() - delta
y = y_glob + height/2 - self.tips.height()/2
if (y + self.tips.height()) > (self.y_main + self.height_main):
y = (
y
- (y + self.tips.height() - (
self.y_main + self.height_main)) - offset
)
else:
# Center on parent
x = self.x_main + self.width_main/2 - self.tips.width()/2
y = self.y_main + self.height_main/2 - self.tips.height()/2
self.tips.set_pos(x, y)
def _check_buttons(self):
step, steps = self.step_current, self.steps
self.tips.button_disable = None
if step == 0:
self.tips.button_disable = 'previous'
if step == steps - 1:
self.tips.button_disable = 'next'
def _key_pressed(self):
key = self.tips.key_pressed
if ((key == Qt.Key_Right or key == Qt.Key_Down or
key == Qt.Key_PageDown) and self.step_current != self.steps - 1):
self.next_step()
elif ((key == Qt.Key_Left or key == Qt.Key_Up or
key == Qt.Key_PageUp) and self.step_current != 0):
self.previous_step()
elif key == Qt.Key_Escape:
self.close_tour()
elif key == Qt.Key_Home and self.step_current != 0:
self.first_step()
elif key == Qt.Key_End and self.step_current != self.steps - 1:
self.last_step()
elif key == Qt.Key_Menu:
pos = self.tips.label_current.pos()
self.tips.context_menu_requested(pos)
def _hiding(self):
self.hidden = True
self.tips.hide()
# --- public api
def run_code(self):
codelines = self.run
console = self.widgets[0]
for codeline in codelines:
console.execute_code(codeline)
def set_tour(self, index, frames, spy_window):
self.spy_window = spy_window
self.active_tour_index = index
self.last_frame_active = frames['last']
self.frames = frames['tour']
self.steps = len(self.frames)
self.is_tour_set = True
def _handle_fullscreen(self):
if (self.spy_window.isFullScreen() or
self.spy_window.layouts._fullscreen_flag):
if sys.platform == 'darwin':
self.spy_window.setUpdatesEnabled(True)
msg_title = _("Request")
msg = _("To run the tour, please press the green button on "
"the left of the Spyder window's title bar to take "
"it out of fullscreen mode.")
QMessageBox.information(self, msg_title, msg,
QMessageBox.Ok)
return True
if self.spy_window.layouts._fullscreen_flag:
self.spy_window.layouts.toggle_fullscreen()
else:
self.spy_window.setWindowState(
self.spy_window.windowState()
& (~ Qt.WindowFullScreen))
return False
def start_tour(self):
self.spy_window.setUpdatesEnabled(False)
if self._handle_fullscreen():
return
self.spy_window.layouts.save_current_window_settings(
'layout_current_temp/',
section="quick_layouts",
)
self.spy_window.layouts.quick_layout_switch(
DefaultLayouts.SpyderLayout)
geo = self.parent.geometry()
x, y, width, height = geo.x(), geo.y(), geo.width(), geo.height()
# self.parent_x = x
# self.parent_y = y
# self.parent_w = width
# self.parent_h = height
# FIXME: reset step to last used value
# Reset step to beginning
self.step_current = self.last_frame_active
# Adjust the canvas size to match the main window size
self.canvas.setFixedSize(width, height)
self.canvas.move(QPoint(x, y))
self.spy_window.setUpdatesEnabled(True)
self.canvas.fade_in(self._move_step)
self._clear_canvas()
self.is_running = True
def close_tour(self):
self.tips.fade_out(self._close_canvas)
self.spy_window.setUpdatesEnabled(False)
self.canvas.set_interaction(False)
self._set_modal(True, [self.tips])
self.canvas.hide()
try:
# set the last played frame by updating the available tours in
# parent. This info will be lost on restart.
self.parent.tours_available[self.active_tour_index]['last'] =\
self.step_current
except Exception:
pass
self.is_running = False
self.spy_window.layouts.quick_layout_switch('current_temp')
self.spy_window.setUpdatesEnabled(True)
def hide_tips(self):
"""Hide tips dialog when the main window loses focus."""
self._clear_canvas()
self.tips.fade_out(self._hiding)
def unhide_tips(self):
"""Unhide tips dialog when the main window loses focus."""
self._clear_canvas()
self._move_step()
self.hidden = False
def next_step(self):
self._clear_canvas()
self.step_current += 1
self.tips.fade_out(self._move_step)
def previous_step(self):
self._clear_canvas()
self.step_current -= 1
self.tips.fade_out(self._move_step)
def go_to_step(self, number, id_=None):
self._clear_canvas()
self.step_current = number
self.tips.fade_out(self._move_step)
def last_step(self):
self.go_to_step(self.steps - 1)
def first_step(self):
self.go_to_step(0)
def lost_focus(self):
"""Confirm if the tour loses focus and hides the tips."""
if (self.is_running and
not self.setting_data and not self.hidden):
if sys.platform == 'darwin':
if not self.tour_has_focus():
self.hide_tips()
if not self.any_has_focus():
self.close_tour()
else:
if not self.any_has_focus():
self.hide_tips()
def gain_focus(self):
"""Confirm if the tour regains focus and unhides the tips."""
if (self.is_running and self.any_has_focus() and
not self.setting_data and self.hidden):
self.unhide_tips()
def any_has_focus(self):
"""Returns True if tour or main window has focus."""
f = (self.hasFocus() or self.parent.hasFocus() or
self.tour_has_focus() or self.isActiveWindow())
return f
def tour_has_focus(self):
"""Returns true if tour or any of its components has focus."""
f = (self.tips.hasFocus() or self.canvas.hasFocus() or
self.tips.isActiveWindow())
return f
class OpenTourDialog(QDialog):
"""Initial widget with tour."""
def __init__(self, parent, tour_function):
super().__init__(parent)
if MAC:
flags = (self.windowFlags() | Qt.WindowStaysOnTopHint
& ~Qt.WindowContextHelpButtonHint)
else:
flags = self.windowFlags() & ~Qt.WindowContextHelpButtonHint
self.setWindowFlags(flags)
self.tour_function = tour_function
# Image
images_layout = QHBoxLayout()
icon_filename = 'tour-spyder-logo'
image_path = get_image_path(icon_filename)
image = QPixmap(image_path)
image_label = QLabel()
image_height = int(image.height() * DialogStyle.IconScaleFactor)
image_width = int(image.width() * DialogStyle.IconScaleFactor)
image = image.scaled(image_width, image_height, Qt.KeepAspectRatio,
Qt.SmoothTransformation)
image_label.setPixmap(image)
images_layout.addStretch()
images_layout.addWidget(image_label)
images_layout.addStretch()
if MAC:
images_layout.setContentsMargins(0, -5, 20, 0)
else:
images_layout.setContentsMargins(0, -8, 35, 0)
# Label
tour_label_title = QLabel(_("Welcome to Spyder!"))
tour_label_title.setStyleSheet(f"font-size: {DialogStyle.TitleFontSize}")
tour_label_title.setWordWrap(True)
tour_label = QLabel(
_("Check out our interactive tour to "
"explore some of Spyder's panes and features."))
tour_label.setStyleSheet(f"font-size: {DialogStyle.ContentFontSize}")
tour_label.setWordWrap(True)
tour_label.setFixedWidth(340)
# Buttons
buttons_layout = QHBoxLayout()
dialog_tour_color = QStylePalette.COLOR_BACKGROUND_2
start_tour_color = QStylePalette.COLOR_ACCENT_2
start_tour_hover = QStylePalette.COLOR_ACCENT_3
start_tour_pressed = QStylePalette.COLOR_ACCENT_4
dismiss_tour_color = QStylePalette.COLOR_BACKGROUND_4
dismiss_tour_hover = QStylePalette.COLOR_BACKGROUND_5
dismiss_tour_pressed = QStylePalette.COLOR_BACKGROUND_6
font_color = QStylePalette.COLOR_TEXT_1
self.launch_tour_button = QPushButton(_('Start tour'))
self.launch_tour_button.setStyleSheet((
"QPushButton {{ "
"background-color: {background_color};"
"border-color: {border_color};"
"font-size: {font_size};"
"color: {font_color};"
"padding: {padding}}}"
"QPushButton:hover:!pressed {{ "
"background-color: {color_hover}}}"
"QPushButton:pressed {{ "
"background-color: {color_pressed}}}"
).format(background_color=start_tour_color,
border_color=start_tour_color,
font_size=DialogStyle.ButtonsFontSize,
font_color=font_color,
padding=DialogStyle.ButtonsPadding,
color_hover=start_tour_hover,
color_pressed=start_tour_pressed))
self.launch_tour_button.setAutoDefault(False)
self.dismiss_button = QPushButton(_('Dismiss'))
self.dismiss_button.setStyleSheet((
"QPushButton {{ "
"background-color: {background_color};"
"border-color: {border_color};"
"font-size: {font_size};"
"color: {font_color};"
"padding: {padding}}}"
"QPushButton:hover:!pressed {{ "
"background-color: {color_hover}}}"
"QPushButton:pressed {{ "
"background-color: {color_pressed}}}"
).format(background_color=dismiss_tour_color,
border_color=dismiss_tour_color,
font_size=DialogStyle.ButtonsFontSize,
font_color=font_color,
padding=DialogStyle.ButtonsPadding,
color_hover=dismiss_tour_hover,
color_pressed=dismiss_tour_pressed))
self.dismiss_button.setAutoDefault(False)
buttons_layout.addStretch()
buttons_layout.addWidget(self.launch_tour_button)
if not MAC:
buttons_layout.addSpacing(10)
buttons_layout.addWidget(self.dismiss_button)
layout = QHBoxLayout()
layout.addLayout(images_layout)
label_layout = QVBoxLayout()
label_layout.addWidget(tour_label_title)
if not MAC:
label_layout.addSpacing(3)
label_layout.addWidget(tour_label)
else:
label_layout.addWidget(tour_label)
label_layout.addSpacing(10)
vertical_layout = QVBoxLayout()
if not MAC:
vertical_layout.addStretch()
vertical_layout.addLayout(label_layout)
vertical_layout.addSpacing(20)
vertical_layout.addLayout(buttons_layout)
vertical_layout.addStretch()
else:
vertical_layout.addLayout(label_layout)
vertical_layout.addLayout(buttons_layout)
general_layout = QHBoxLayout()
if not MAC:
general_layout.addStretch()
general_layout.addLayout(layout)
general_layout.addSpacing(1)
general_layout.addLayout(vertical_layout)
general_layout.addStretch()
else:
general_layout.addLayout(layout)
general_layout.addLayout(vertical_layout)
self.setLayout(general_layout)
self.launch_tour_button.clicked.connect(self._start_tour)
self.dismiss_button.clicked.connect(self.close)
self.setStyleSheet(f"background-color:{dialog_tour_color}")
self.setContentsMargins(18, 40, 18, 40)
if not MAC:
self.setFixedSize(640, 280)
def _start_tour(self):
self.close()
self.tour_function()
# ----------------------------------------------------------------------------
# Used for testing the functionality
# ----------------------------------------------------------------------------
class TourTestWindow(QMainWindow):
""" """
sig_resized = Signal("QResizeEvent")
sig_moved = Signal("QMoveEvent")
def __init__(self):
super(TourTestWindow, self).__init__()
self.setGeometry(300, 100, 400, 600)
self.setWindowTitle('Exploring QMainWindow')
self.exit = QAction('Exit', self)
self.exit.setStatusTip('Exit program')
# create the menu bar
menubar = self.menuBar()
file_ = menubar.addMenu('&File')
file_.addAction(self.exit)
# create the status bar
self.statusBar()
# QWidget or its instance needed for box layout
self.widget = QWidget(self)
self.button = QPushButton('test')
self.button1 = QPushButton('1')
self.button2 = QPushButton('2')
effect = QGraphicsOpacityEffect(self.button2)
self.button2.setGraphicsEffect(effect)
self.anim = QPropertyAnimation(effect, to_binary_string("opacity"))
self.anim.setStartValue(0.01)
self.anim.setEndValue(1.0)
self.anim.setDuration(500)
lay = QVBoxLayout()
lay.addWidget(self.button)
lay.addStretch()
lay.addWidget(self.button1)
lay.addWidget(self.button2)
self.widget.setLayout(lay)
self.setCentralWidget(self.widget)
self.button.clicked.connect(self.action1)
self.button1.clicked.connect(self.action2)
self.tour = AnimatedTour(self)
def action1(self):
frames = get_tour('test')
index = 0
dic = {'last': 0, 'tour': frames}
self.tour.set_tour(index, dic, self)
self.tour.start_tour()
def action2(self):
self.anim.start()
def resizeEvent(self, event):
"""Reimplement Qt method"""
QMainWindow.resizeEvent(self, event)
self.sig_resized.emit(event)
def moveEvent(self, event):
"""Reimplement Qt method"""
QMainWindow.moveEvent(self, event)
self.sig_moved.emit(event)
def local_test():
from spyder.utils.qthelpers import qapplication
app = QApplication([])
win = TourTestWindow()
win.show()
app.exec_()
if __name__ == '__main__':
local_test()
| 36.7669
| 82
| 0.577802
|
03bbe6bdb6dc170c5a0156d97f2f14be20bbe753
| 8,236
|
py
|
Python
|
make_tfrecord.py
|
ndaidong/tf-ssd-mobilenet
|
3d6082178b018d9e02c6044d562ef05ca2021899
|
[
"MIT"
] | null | null | null |
make_tfrecord.py
|
ndaidong/tf-ssd-mobilenet
|
3d6082178b018d9e02c6044d562ef05ca2021899
|
[
"MIT"
] | null | null | null |
make_tfrecord.py
|
ndaidong/tf-ssd-mobilenet
|
3d6082178b018d9e02c6044d562ef05ca2021899
|
[
"MIT"
] | 1
|
2018-04-18T03:41:09.000Z
|
2018-04-18T03:41:09.000Z
|
#!/usr/bin/env python3
import glob
import argparse
import sys
import hashlib
import io
from os import path, mkdir, remove
from shutil import rmtree
from random import shuffle
from lxml import etree
from funcy import compose
from tqdm import tqdm
from PIL import Image
import tensorflow as tf
from tflib.object_detection.utils import dataset_util
from tflib.object_detection.utils import label_map_util
def get_default_label_map():
return 'configs/label_map.pbtxt'
def get_default_data_dir():
return 'temp/data'
def get_default_extract_count():
return 100
def get_default_split_ratio():
return 0.1
def create_example(entry, label_map_dict):
img_path = entry[0]
label_path = entry[1]
try:
with tf.gfile.GFile(label_path, 'r') as fid:
xml_str = bytes(bytearray(fid.read(), encoding='utf-8'))
xml = etree.fromstring(xml_str)
data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']
with tf.gfile.GFile(img_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = Image.open(encoded_jpg_io)
if image.format != 'JPEG':
raise ValueError('Image format not JPEG')
key = hashlib.sha256(encoded_jpg).hexdigest()
width, height = image.size
width = int(width)
height = int(height)
xmin = []
ymin = []
xmax = []
ymax = []
classes = []
classes_text = []
truncated = []
poses = []
difficult_obj = []
if 'object' in data:
for obj in data['object']:
difficult_obj.append(int(0))
_xmin = max(float(obj['bndbox']['xmin']), 0)
_ymin = max(float(obj['bndbox']['ymin']), 0)
_xmax = min(float(obj['bndbox']['xmax']), width)
_ymax = min(float(obj['bndbox']['ymax']), height)
xmin.append(_xmin / width)
ymin.append(_ymin / height)
xmax.append(_xmax / width)
ymax.append(_ymax / height)
class_name = obj['name']
classes_text.append(class_name.encode('utf8'))
classes.append(label_map_dict[class_name])
truncated.append(int(0))
poses.append('Unspecified'.encode('utf8'))
return tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(
data['filename'].encode('utf8')
),
'image/source_id': dataset_util.bytes_feature(
data['filename'].encode('utf8')
),
'image/key/sha256': dataset_util.bytes_feature(
key.encode('utf8')
),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature(
'jpeg'.encode('utf8')
),
'image/object/bbox/xmin': dataset_util.float_list_feature(
xmin
),
'image/object/bbox/xmax': dataset_util.float_list_feature(
xmax
),
'image/object/bbox/ymin': dataset_util.float_list_feature(
ymin
),
'image/object/bbox/ymax': dataset_util.float_list_feature(
ymax
),
'image/object/class/text': dataset_util.bytes_list_feature(
classes_text
),
'image/object/class/label': dataset_util.int64_list_feature(
classes
),
'image/object/difficult': dataset_util.int64_list_feature(
difficult_obj
),
'image/object/truncated': dataset_util.int64_list_feature(
truncated
),
'image/object/view': dataset_util.bytes_list_feature(poses),
}))
except ValueError as err:
print(img_path)
print(label_path)
print(err)
return None
def select(count):
def get_subset(arr):
shuffle(arr)
max_size = min(count, len(arr))
return arr[:max_size]
return get_subset
def handle(files):
arr = []
for i in range(len(files)):
imagesrc = str(files[i])
xml_file = imagesrc.replace('images/', 'labels/')
xml_file = xml_file.replace('.jpg', '.xml')
if path.isfile(xml_file):
arr.append([imagesrc, xml_file])
return arr
def check(d):
files = []
if path.isdir(d):
files = glob.glob(d + '/images/*.jpg')
return files
def load(d, count):
return compose(select(count), handle, check)(d)
def process(entries, output_dir, label_map, split_ratio):
rat = float(split_ratio)
if rat >= 1 or rat < 0:
rat = get_default_split_ratio()
total = len(entries)
test_size = round(rat * total)
training_size = total - test_size
print('test/train/total {}/{}/{}'.format(test_size, training_size, total))
test_set = entries[:test_size]
training_set = entries[test_size:]
label_map_dict = label_map_util.get_label_map_dict(label_map)
print(label_map_dict)
tfwriter = tf.python_io.TFRecordWriter
print('Handling training set ({})'.format(training_size))
train_writer = tfwriter(output_dir + '/train.record')
for entry in tqdm(training_set):
try:
exp = create_example(entry, label_map_dict)
if exp is not None:
train_writer.write(exp.SerializeToString())
except ValueError as err:
print(err)
continue
train_writer.close()
print('Handling test set ({})'.format(test_size))
test_writer = tfwriter(output_dir + '/eval.record')
for entry in tqdm(test_set):
try:
exp = create_example(entry, label_map_dict)
if exp is not None:
test_writer.write(exp.SerializeToString())
except ValueError as err:
print(err)
continue
test_writer.close()
def preload(input_dir, extracting_count, output_dir, label_map, split_ratio):
if path.exists(output_dir):
rmtree(output_dir)
mkdir(output_dir)
files = load(input_dir, int(extracting_count))
total = len(files)
if total > 0:
print('Selected {} entries to process'.format(total))
return process(files, output_dir, label_map, split_ratio)
else:
print('No input label & image. Stopped!')
def start():
parser = argparse.ArgumentParser()
parser.add_argument(
'-d',
'--dir',
help='Path to dataset. Default "../vgg-faces-utils/output"'
)
parser.add_argument(
'-l',
'--labelmap',
help='Path to label map. Default "configs/label_map.pbtxt"'
)
parser.add_argument(
'-e',
'--extract',
help='How many items? Default 100'
)
parser.add_argument(
'-o',
'--output',
help='Path to output dir. Default "temp/data"'
)
parser.add_argument(
'-r',
'--ratio',
help='Ratio of Training/Test set. Default 0.1 (9 train/1 eval)'
)
args = parser.parse_args()
if not args.dir:
print('Please specify path to source dir')
else:
label_map = args.labelmap
if label_map is None:
label_map = get_default_label_map()
count = args.extract
if count is None:
count = get_default_extract_count()
odir = args.output
if odir is None:
odir = get_default_data_dir()
ratio = args.ratio
if ratio is None:
ratio = get_default_split_ratio()
entries = preload(
path.normpath(args.dir),
count,
odir,
label_map,
ratio
)
if __name__ == '__main__':
start()
| 28.898246
| 78
| 0.567508
|
2a1e34f8713434f98e713ea1c4334ac404f9037c
| 3,625
|
py
|
Python
|
Input.py
|
JuanCab/ChromaStarPy
|
b4401a32c607e89375ffb6d8e6e4b3de3340ef24
|
[
"MIT"
] | null | null | null |
Input.py
|
JuanCab/ChromaStarPy
|
b4401a32c607e89375ffb6d8e6e4b3de3340ef24
|
[
"MIT"
] | null | null | null |
Input.py
|
JuanCab/ChromaStarPy
|
b4401a32c607e89375ffb6d8e6e4b3de3340ef24
|
[
"MIT"
] | null | null | null |
#
#
#Custom filename tags to distinguish from other runs
project = "Project"
runVers = "Run"
#Default plot
#Select ONE only:
#makePlot = "none"
#makePlot = "structure"
#makePlot = "sed"
#makePlot = "spectrum"
#makePlot = "ldc"
#makePlot = "ft"
#makePlot = "tlaLine"
###The following two plot variables refer to the partial pressure outpue ("Report 6")
makePlot = "ppress"
plotSpec = "H"
#Spectrum synthesis mode
# - uses model in Restart.py with minimal structure calculation
#specSynMode = True
specSynMode = False
#Model atmosphere
teff = 6100.0 #, K
logg = 4.5 #, cgs
#teff = 5777.0 #, K
#logg = 4.44 #, cgs
log10ZScale = 0.0 # [A/H]
massStar = 1.0 #, solar masses
xiT = 1.0 #, km/s
logHeFe = 0.0 #, [He/Fe]
logCO = 0.0 #, [C/O]
logAlphaFe = 0.0 #, [alpha-elements/Fe]
#Spectrum synthesis
#Test
#TiO beta
#lambdaStart = 560.0 #, nm
#lambdaStop = 564.0 #, nm
#TiO gamma
#lambdaStart = 708.0 #, nm
#lambdaStop = 712.0 #, nm
#lambdaStart = 715.0 #, nm
#lambdaStop = 719.0 #, nm
#TiO gamma prime
#lambdaStart = 617.0 #, nm
#lambdaStop = 621.0 #, nm
#TiO epsilon
#lambdaStart = 839.0 #, nm
#lambdaStop = 843.0 #, nm
#TiO delta
#lambdaStart = 882.0 #, nm
#lambdaStop = 892.0 #, nm
#TiO phi
#lambdaStart = 1100.0 #, nm
#lambdaStop = 1110.0 #, nm
#CH A2Delta_X2Pi ("G-band" at 4314 A)
lambdaStart = 395.0 #, nm
lambdaStop = 400.0 #, nm
fileStem = project + "-"\
+ str(round(teff, 7)) + "-" + str(round(logg, 3)) + "-" + str(round(log10ZScale, 3))\
+ "-" + str(round(lambdaStart, 5)) + "-" + str(round(lambdaStop, 5))\
+ "-" + runVers
lineThresh = -3.0 #, min log(KapLine/kapCnt) for inclusion at all - areally, being used as "lineVoigt" for now
voigtThresh = -3.0 #, min log(KapLine/kapCnt) for treatment as Voigt - currently not used - all lines get Voigt
logGammaCol = 0.0
logKapFudge = 0.0
macroV = 1.0 #, km/s
rotV = 1.0 #, km/s
rotI = 90.0 #, degrees
RV = 0.0 #, km/s
vacAir = "vacuum"
sampling = "fine"
#Performance vs realism
nOuterIter = 12 #, no of outer Pgas(HSE) - EOS - kappa iterations
nInnerIter = 12 #, no of inner (ion fraction) - Pe iterations
ifMols = 1 #, where to include TiO JOLA bands in synthesis
ifTiO = 0 #Soon to be deprecated
#Gaussian filter for limb darkening curve, fourier transform
diskLambda = 500.0 #, nm
diskSigma = 0.01 #, nm
#Two-level atom and spectral line
userLam0 = 589.592 #, nm
userA12 = 6.24 #, A_12 logarithmic abundance = log_10(N/H_H) = 12
userLogF = -0.495 #, log(f) oscillaotr strength // saturated line
userStage = 0 #, ionization stage of user species (0 (I) - 3 (IV)
userChiI1 = 5.139 #, ground state chi_I, eV
userChiI2 = 47.29 #, 1st ionized state chi_I, eV
userChiI3 = 71.62 #, 2nd ionized state chi_I, eV
userChiI4 = 98.94 #, 3rd ionized state chi_I, eV
userChiL = 0.0 #, lower atomic E-level, eV
userGw1 = 2 #, ground state state. weight or partition fn (stage I) - unitless
userGw2 = 1 #, ground state state. weight or partition fn (stage II) - unitless
userGw3 = 1 #, ground state state. weight or partition fn (stage III) - unitless
userGw4 = 1 #, ground state state. weight or partition fn (stage IV) - unitless
userGwL = 2 #, lower E-level state. weight - unitless
userMass = 22.9 #, amu
userLogGammaCol = 1.0 #, log_10 Lorentzian broadening enhancement factor
| 32.657658
| 117
| 0.606069
|
a8147d7fb267e0842336f49dbc69560959499038
| 146
|
py
|
Python
|
output/models/saxon_data/cta/cta0025_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/saxon_data/cta/cta0025_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/saxon_data/cta/cta0025_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from output.models.saxon_data.cta.cta0025_xsd.cta0025 import (
Doc,
Event,
When,
)
__all__ = [
"Doc",
"Event",
"When",
]
| 12.166667
| 62
| 0.575342
|
6e3546106e60f708dadbe72cb2162aff1ae23582
| 308
|
py
|
Python
|
pinax/projects/basic_project/__init__.py
|
skabber/pinax
|
6fdee6b7bbbb597074d45122badf3a6dd75e0b92
|
[
"MIT"
] | 2
|
2015-12-27T23:07:51.000Z
|
2016-05-09T08:57:28.000Z
|
pinax/projects/private_beta_project/__init__.py
|
SMiGL/pinax
|
d08b2655fe661566bd13c5c170b1a4cad9e67a1d
|
[
"MIT"
] | null | null | null |
pinax/projects/private_beta_project/__init__.py
|
SMiGL/pinax
|
d08b2655fe661566bd13c5c170b1a4cad9e67a1d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
__about__ = """
This project comes with the bare minimum set of applications and templates to
get you started. It includes no extra tabs–only the profile and notices tabs are
included by default. From here you can add any extra functionality and
applications that you would like.
"""
| 38.5
| 80
| 0.762987
|
e9239bd4b40490f4163d3b428fa6c8af233c46a4
| 219
|
py
|
Python
|
analyzer/staticinfo_exceptions.py
|
JayveeHe/senz.app.staticinfo.inferrence
|
98748506adcbb28b074337e261fe79b2141f31a5
|
[
"MIT"
] | null | null | null |
analyzer/staticinfo_exceptions.py
|
JayveeHe/senz.app.staticinfo.inferrence
|
98748506adcbb28b074337e261fe79b2141f31a5
|
[
"MIT"
] | null | null | null |
analyzer/staticinfo_exceptions.py
|
JayveeHe/senz.app.staticinfo.inferrence
|
98748506adcbb28b074337e261fe79b2141f31a5
|
[
"MIT"
] | null | null | null |
__author__ = 'Jayvee'
class MsgException(Exception):
def __init__(self, msg):
self.message = msg
def __str__(self):
return self.message
def __unicode__(self):
return self.message
| 16.846154
| 30
| 0.639269
|
58b6503a102c4277bb31bb1fc2a3b23a7b32857d
| 7,698
|
py
|
Python
|
nobos_commons/tools/decorators/cache_decorator.py
|
noboevbo/nobos_commons
|
471e52e10fd2228c106777c72d8439e58b047003
|
[
"MIT"
] | 2
|
2020-06-03T16:28:44.000Z
|
2020-10-10T03:07:23.000Z
|
nobos_commons/tools/decorators/cache_decorator.py
|
noboevbo/nobos_commons
|
471e52e10fd2228c106777c72d8439e58b047003
|
[
"MIT"
] | null | null | null |
nobos_commons/tools/decorators/cache_decorator.py
|
noboevbo/nobos_commons
|
471e52e10fd2228c106777c72d8439e58b047003
|
[
"MIT"
] | 4
|
2020-10-10T03:07:25.000Z
|
2021-09-30T01:11:02.000Z
|
import collections
import hashlib
import inspect
import json
import os
import pickle
import re
import shutil
import sys
from typing import Callable, List, Any
from nobos_commons.data_structures.configs.cache_config import CacheConfig
from nobos_commons.tools.decorators.timing_decorator import stopwatch
from nobos_commons.tools.log_handler import logger
from nobos_commons.utils.file_helper import get_create_path
class __Cache(object):
__slots__ = ['func_cache_dir', 'func_cache_dir_path', 'func', 'cache_config', 'is_method', 'replacement_args', 'arg_names']
__cache_columns: List[str] = ['func_call_hash', 'func_name', 'func_code', 'func_args', 'func_result',
'date_created']
def __init__(self, func: Callable, cache_config: CacheConfig):
"""
Cache decorator which currently supports functions with base type, dictionary, list and lambda parameters
:param func:
"""
self.func = func
self.cache_config = cache_config
try:
self.func_cache_dir = self.get_function_hash()
except OSError as err:
print("OS Error, probably not possible to retrieve source code. Disable cache. Details: '{}'".format(err))
self.cache_config.cache_enabled = False
return
self.is_method = self.__is_method(self.func)
self.func_cache_dir_path = get_create_path(os.path.join(self.cache_config.cache_dir, self.func_cache_dir))
self.arg_names = list(inspect.signature(self.func).parameters.keys())
self.replacement_args = None
if self.func.__name__ in self.cache_config.str_args_replacement_dict.keys():
self.replacement_args = self.cache_config.str_args_replacement_dict[self.func.__name__]
def get_arg_replaced_cache_value(self, cache_file_path: str, *args):
old_args = list(args)
for replacement_arg in self.replacement_args:
if replacement_arg.arg_name in self.arg_names:
arg_index = self.arg_names.index(replacement_arg.arg_name)
else:
raise ValueError("replacement_arg {0} for function {1} is not in function signature".format(
replacement_arg.arg_name, self.func.__name__))
arg_value = args[arg_index]
old_args[arg_index] = replacement_arg.arg_replacement_func(arg_value)
old_args = tuple(old_args)
old_cache_file_path = self.__get_cache_file_path(*old_args)
old_cached_value = self.__get_cached_value(old_cache_file_path)
if old_cached_value is None:
return None
shutil.move(old_cache_file_path, cache_file_path)
logger.info('Moved cache file \'{}\' to \'{}\''.format(old_cache_file_path, cache_file_path))
return old_cached_value
def get_args_hash(self, *args) -> str:
arg_hashes: str = ''
start = 1 if self.is_method else 0
for i in range(start, len(args)):
arg = args[i]
if isinstance(arg, collections.Hashable):
if isinstance(arg, str):
arg_hashes = "{}_{}".format(arg_hashes, hashlib.sha256(bytes(arg.encode('utf-8'))).hexdigest())
elif callable(arg): # is a function, TODO: Mabye use the parameters + func_path as str and hash this
raise NotImplementedError(
"Type {} is not supported for caching because it is not hashable".format(type(arg)))
else: # TODO: OBJS?
arg_hashes = "{}_{}".format(arg_hashes, hashlib.sha256(bytes(arg)).hexdigest())
else:
if isinstance(arg, dict) or isinstance(arg, list):
arg_hashes = "{}_{}".format(arg_hashes, hashlib.sha256(
json.dumps(arg, sort_keys=True).encode('utf-8')).hexdigest())
else:
raise NotImplementedError(
"Type {} is not supported for caching because it is not hashable".format(type(arg)))
return hashlib.sha256(arg_hashes.encode("utf-8")).hexdigest()
def get_function_hash(self) -> str:
func_source = inspect.getsource(self.func)
func_source = re.sub("@cache\(.*\)\\n", '', func_source) # remove @cache decorator from func code
function_hashes = "{}_{}".format(self.func.__name__,
hashlib.sha256(func_source.encode('utf-8')).hexdigest())
return hashlib.sha256(bytes(function_hashes.encode('utf-8'))).hexdigest()
def __get_cache_file_path(self, *args) -> str:
arg_hash = self.get_args_hash(*args)
func_hash = self.get_function_hash()
func_call_hashes = "{}_{}".format(func_hash, arg_hash)
func_call_hash = hashlib.sha256(bytes(func_call_hashes.encode('utf-8'))).hexdigest()
return os.path.join(self.func_cache_dir_path, func_call_hash)
def __get_cached_value(self, cache_file_path: str) -> Any:
if (self.cache_config.reload_all or self.func.__name__ in self.cache_config.func_names_to_reload) and \
os.path.exists(cache_file_path):
logger.info('Deleted cache file \'{}\''.format(cache_file_path))
os.remove(cache_file_path)
if os.path.exists(cache_file_path):
try:
cache_value = pickle.load(open(cache_file_path, 'rb'))
logger.info('Loaded {} results from cache file \'{}\''.format(self.func.__name__, cache_file_path))
return cache_value
except:
os.remove(cache_file_path)
return None
@stopwatch
def __call__(self, *args, **kwargs):
"""
Checks if a func result is already cached, if yes it's returned if not the func will be executed and the
result will be saved to a cache file
:param func: The function which produces the result
:param cache_file_path: The cache file path
:param reload: If a cached file exists it should be updated
:return: The requested content, either retrieved from cache or calculated and saved to cache
"""
if not self.cache_config.cache_enabled:
logger.info('Cache disabled for {}'.format(self.func.__name__))
return self.func(*args, **kwargs)
if sys.version_info[0] == 2:
logger.info(
'Load {} from function, because caching is disabled or incompatible!'.format(self.func.__name__))
return self.func(*args, **kwargs) # pickle bug in Python2 for large datasets
cache_file_path = self.__get_cache_file_path(*args)
if self.replacement_args is not None:
cached_value = self.get_arg_replaced_cache_value(cache_file_path, *args)
else:
cached_value = self.__get_cached_value(cache_file_path)
if cached_value is not None:
return cached_value
result = self.func(*args, **kwargs) # **kwargs
pickle.dump(result, open(cache_file_path, 'wb'), protocol=4)
logger.info('Created cache file for {} in \'{}\''.format(self.func.__name__, cache_file_path))
return result
def __get__(self, obj, objtype):
"""Support instance methods."""
import functools
return functools.partial(self.__call__, obj)
@staticmethod
def __is_method(func):
spec = inspect.signature(func)
if len(spec.parameters) > 0:
if list(spec.parameters.keys())[0] == 'self':
return True
return False
def cache(cache_config: CacheConfig):
def wrapper(func):
return __Cache(func, cache_config)
return wrapper
| 45.821429
| 127
| 0.643154
|
4a2e53158e1a1c7f219fcd1dcbc35e955c922a1e
| 194
|
py
|
Python
|
processes/gather_exception.py
|
kinoreel/kino-gather
|
defc0d6b311651f985467b5bfcfdbf77d73c10ae
|
[
"MIT"
] | null | null | null |
processes/gather_exception.py
|
kinoreel/kino-gather
|
defc0d6b311651f985467b5bfcfdbf77d73c10ae
|
[
"MIT"
] | 3
|
2017-06-03T16:50:56.000Z
|
2017-10-01T09:24:37.000Z
|
processes/gather_exception.py
|
kinoreel/kino-gather
|
defc0d6b311651f985467b5bfcfdbf77d73c10ae
|
[
"MIT"
] | null | null | null |
class GatherException(Exception):
def __init__(self, imdb_id, message):
self.message = message
self.imdb_id = imdb_id
super(GatherException, self).__init__(message)
| 27.714286
| 54
| 0.685567
|
a3d5e7e48e0b7fece416ee290ee2b05d34c4bfd1
| 916
|
py
|
Python
|
get_imgs.py
|
Zhang-Qi7/Cat-vs-Dog-on-web
|
7a6aff3ec7d3bec01294de0e86fd456a5bba65d2
|
[
"MIT"
] | 2
|
2020-08-25T02:47:51.000Z
|
2020-08-25T15:13:43.000Z
|
get_imgs.py
|
Zhang-Qi7/Cat-vs-Dog-on-web
|
7a6aff3ec7d3bec01294de0e86fd456a5bba65d2
|
[
"MIT"
] | 1
|
2020-08-25T10:23:47.000Z
|
2020-08-26T07:16:17.000Z
|
get_imgs.py
|
Zhang-Qi7/Cat-vs-Dog-on-web
|
7a6aff3ec7d3bec01294de0e86fd456a5bba65d2
|
[
"MIT"
] | 1
|
2020-08-25T09:09:06.000Z
|
2020-08-25T09:09:06.000Z
|
import os
import re
import requests
def download_baidu(word):
url = 'https://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word=' + word + '&ct=201326592&v=flip'
pic_url = re.findall('"objURL":"(.*?)",', requests.get(url).text, re.S)
i = 0
for each in pic_url:
print(pic_url)
try:
pic = requests.get(each, timeout=10)
except requests.exceptions.ConnectionError:
print('exception')
continue
if word in ['dog','dogs','puppy','狗']:
word = 'dog'
if word in ['cat','cats','猫']:
word = 'cat'
string = './train/' + word + '/' + word + '_' + str(i) + '.jpg'
fp = open(string, 'wb')
fp.write(pic.content)
fp.close()
i += 1
if __name__ == '__main__':
word = input("Input key word: ")
download_baidu(word)
| 26.941176
| 109
| 0.505459
|
64b5a2cb8929ad161a1ee1dc4b0d356c36e7cf8d
| 2,930
|
py
|
Python
|
distributed_dp/dme_utils.py
|
AbdulmoneamAli/federated
|
c54a9f5053d6316f81aa6f6d1eba61068927a33d
|
[
"Apache-2.0"
] | 1
|
2021-07-29T16:35:21.000Z
|
2021-07-29T16:35:21.000Z
|
distributed_dp/dme_utils.py
|
AbdulmoneamAli/federated
|
c54a9f5053d6316f81aa6f6d1eba61068927a33d
|
[
"Apache-2.0"
] | null | null | null |
distributed_dp/dme_utils.py
|
AbdulmoneamAli/federated
|
c54a9f5053d6316f81aa6f6d1eba61068927a33d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021, Google LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for distributed mean estimation."""
import numpy as np
import tensorflow as tf
from distributed_dp.modular_clipping_factory import modular_clip_by_value
def generate_client_data(d, n, l2_norm=1):
"""Sample `n` of `d`-dim vectors on the l2 ball with radius `l2_norm`.
Args:
d: The dimension of the client vector.
n: The number of clients.
l2_norm: The L2 norm of the sampled vector.
Returns:
A list of `n` np.array each with shape (d,).
"""
vectors = np.random.normal(size=(n, d))
unit_vectors = vectors / np.linalg.norm(vectors, axis=-1, keepdims=True)
scaled_vectors = unit_vectors * l2_norm
# Cast to float32 as TF implementations use float32.
return list(scaled_vectors.astype(np.float32))
def compute_dp_average(client_data, dp_query, is_compressed, bits):
"""Aggregate client data with DPQuery's interface and take average."""
global_state = dp_query.initial_global_state()
sample_params = dp_query.derive_sample_params(global_state)
client_template = tf.zeros_like(client_data[0])
sample_state = dp_query.initial_sample_state(client_template)
if is_compressed:
# Achieve compression via modular clipping. Upper bound is exclusive.
clip_lo, clip_hi = -(2**(bits - 1)), 2**(bits - 1)
# 1. Client pre-processing stage.
for x in client_data:
record = tf.convert_to_tensor(x)
prep_record = dp_query.preprocess_record(sample_params, record)
# Client applies modular clip on the preprocessed record.
prep_record = modular_clip_by_value(prep_record, clip_lo, clip_hi)
sample_state = dp_query.accumulate_preprocessed_record(
sample_state, prep_record)
# 2. Server applies modular clip on the aggregate.
sample_state = modular_clip_by_value(sample_state, clip_lo, clip_hi)
else:
for x in client_data:
record = tf.convert_to_tensor(x)
sample_state = dp_query.accumulate_record(
sample_params, sample_state, record=record)
# Apply server post-processing.
agg_result, _ = dp_query.get_noised_result(sample_state, global_state)
# The agg_result should have the same input type as client_data.
assert agg_result.shape == client_data[0].shape
assert agg_result.dtype == client_data[0].dtype
# Take the average on the aggregate.
return agg_result / len(client_data)
| 37.088608
| 74
| 0.743686
|
c353c983370f88cd2d93c3297aa089ed064600b5
| 1,421
|
py
|
Python
|
specpogoda.py
|
SpecPogoda/PrzewidywaniePogody
|
6216da176d618e538bfbc8008e35ebdfd1d48713
|
[
"MIT"
] | null | null | null |
specpogoda.py
|
SpecPogoda/PrzewidywaniePogody
|
6216da176d618e538bfbc8008e35ebdfd1d48713
|
[
"MIT"
] | null | null | null |
specpogoda.py
|
SpecPogoda/PrzewidywaniePogody
|
6216da176d618e538bfbc8008e35ebdfd1d48713
|
[
"MIT"
] | null | null | null |
"""
https://colab.research.google.com/drive/1-OdCF3H_XB6XPItV68Lzy1I8fyTzzSz8
"""
import tensorflow_probability as tfp
import tensorflow as tf
import numpy as np
weather = [18.3, 21., 13.6, 9.8, 13.5, 12.99, 16., 16.5, 14.3]
weatherMean = np.mean(weather)
warmDay= []
coldDay= []
for i in weather:
if i >= weatherMean:
warmDay.append(i)
else:
coldDay.append(i)
wMean = np.mean(warmDay)
cMean = np.mean(coldDay)
wStanDev = np.std(warmDay)
cStanDev = np.std(coldDay)
wMean = np.float32(wMean)
cMean = np.float32(cMean)
wStanDev = np.float32(wStanDev)
cStanDev = np.float32(cStanDev)
tfd = tfp.distributions
if weather[len(weather)-1]>=wMean:
initial_distribution = tfd.Categorical(probs = [.2, .8])
else:
initial_distribution = tfd.Categorical(probs = [.8, .2])
transition_distribution = tfd.Categorical(probs = [[.7, .3], # zmiana po dniu zimnym
[.2,.8]]) # po dniu ciepłym
observation_distribution = tfd.Normal(loc = [cMean, wMean], scale = [cStanDev, wStanDev])
model = tfd.HiddenMarkovModel(
initial_distribution=initial_distribution,
transition_distribution=transition_distribution,
observation_distribution=observation_distribution,
num_steps=3)
mean = model.mean()
with tf.compat.v1.Session() as session:
print(mean.numpy())
| 27.862745
| 90
| 0.65095
|
f5fed3179bdec8fafbc8257b6f1de1bfd46c5fbb
| 383
|
py
|
Python
|
ws/Demos/Demo008_PolicyGradient_PPO_Discrete/lunarlander/test.py
|
dattaray-basab/RLGames
|
b12263fe7a4a246be02fc20ed20cfb9fda40d29b
|
[
"MIT"
] | null | null | null |
ws/Demos/Demo008_PolicyGradient_PPO_Discrete/lunarlander/test.py
|
dattaray-basab/RLGames
|
b12263fe7a4a246be02fc20ed20cfb9fda40d29b
|
[
"MIT"
] | null | null | null |
ws/Demos/Demo008_PolicyGradient_PPO_Discrete/lunarlander/test.py
|
dattaray-basab/RLGames
|
b12263fe7a4a246be02fc20ed20cfb9fda40d29b
|
[
"MIT"
] | null | null | null |
from ws.RLUtils.setup.agent_dispatcher import agent_dispatcher
def fn_execute():
agent_mgr = agent_dispatcher(__file__)
agent_mgr. \
fn_change_args(
{
'TEST_MODE': True,
}
). \
fn_run_train()
return agent_mgr.APP_INFO.ERROR_MESSAGE_
if __name__ == "__main__":
print(fn_execute())
| 20.157895
| 63
| 0.574413
|
8a71fed39007646ad139ad820b48c198051a41fb
| 37,473
|
py
|
Python
|
pantulipy/core.py
|
virtualfunction/pantulipy
|
e4f5008dfc0b72a0a30154d74e934359ca0bb60d
|
[
"Unlicense"
] | 13
|
2018-06-07T19:19:10.000Z
|
2021-12-19T22:26:49.000Z
|
pantulipy/core.py
|
kodiakcrypto/pantulipy
|
cceb042bc5725a1b1cb9c7245bedf06ca0238964
|
[
"Unlicense"
] | 2
|
2018-11-15T07:38:41.000Z
|
2020-04-12T00:20:47.000Z
|
pantulipy/core.py
|
kodiakcrypto/pantulipy
|
cceb042bc5725a1b1cb9c7245bedf06ca0238964
|
[
"Unlicense"
] | 7
|
2018-09-11T00:34:08.000Z
|
2021-06-05T08:47:35.000Z
|
# -*- coding:utf-8 -*-
import inspect as insp
import numpy as np
import pandas as pd
import tulipy
_OHLCV = ['open', 'high', 'low', 'close', 'volume']
_FUNCS = sorted([f for f in dir(tulipy) if f[0].islower() and 'lib' not in f])
_FUNCTIONS_REFERENCES = {fn: n for n, fn in enumerate(_FUNCS)}
# Added so you can loop through the rest by just inputting a dataframe
# These don't have useful default params we can put in.
_DEFAULTLESS_INDICATORS = ['decay', 'edecay', 'lag', 'volatility']
__all__ = ['ad', 'adosc', 'adx', 'adxr', 'ao', 'apo', 'aroon', 'aroonosc', 'atr', 'avgprice', 'bbands', 'bop', 'cci',
'cmo', 'crossany', 'crossover', 'cvi', 'decay', 'dema', 'di', 'dm', 'dpo', 'dx', 'edecay', 'ema', 'emv',
'fisher', 'fosc', 'hma', 'kama', 'kvo', 'lag', 'linreg', 'linregintercept', 'linregslope', 'macd',
'marketfi', 'mass', 'md', 'mfi', 'mom', 'msw', 'natr', 'nvi', 'obv', 'ppo', 'psar', 'pvi', 'qstick',
'roc', 'rocr', 'rsi', 'sma', 'stderr', 'stoch', 'tema', 'tr', 'trima', 'trix', 'tsf', 'typprice', 'ultosc',
'vhf', 'vidya', 'volatility', 'vosc', 'vwma', 'wad', 'wcprice', 'wilders', 'willr', 'wma', 'zlema']
_fx_column_names = {
'DI': ['PLUS', 'MINUS'],
'DM': ['PLUS', 'MINUS'],
'MSW': ['SINE', 'LEAD'],
'AROON': ['DOWN', 'UP'],
'BBANDS': ['LOWER', 'MIDDLE', 'UPPER'],
'FISHER': ['LINE', 'SIGNAL'],
'MACD': ['LINE', 'SIGNAL', 'HISTOGRAM'],
'STOCH': ['LINE', 'MA']
}
def _get_ohlcv_arrays(fn, ohlc):
sign = list(insp.signature(fn).parameters.keys())
params = ['close' if 'real' in p else p
for p in sign if p in _OHLCV or 'real' in p]
if isinstance(ohlc, pd.Series):
assert len(params) == 1, \
('{} requires pd.DataFrame with columns {}, not pd.Series'
.format(fn.__name__, params))
return np.asarray([ohlc.values])
else:
return ohlc[params].T.values
def _tup(fn, ohlc, *args, **kwargs):
"""
Calculate any function from "Tulipy" library from a OHLC Pandas DataFrame.
:param function fn: the "Tulipy" function to call
:param pd.DataFrame ohlc: a Pandas DataFrame type with open, high, low, close and or volume columns.
:param args: function positional params.
:param kwargs: function key pair params.
:return pd.Series: a Pandas Series with data result.
"""
fn_params = list(args) + list(kwargs.values())
fn_name = fn.__name__.upper()
data = fn(*_get_ohlcv_arrays(fn, ohlc), *fn_params)
if data is not None:
if type(data) == tuple:
data_tmp = pd.DataFrame()
i = 0
for arr in data:
num_rows = len(ohlc) - len(arr)
result = list((np.nan,) * num_rows) + arr.tolist()
suffix = _fx_column_names[fn_name][i] if fn_name in _fx_column_names.keys() else i
data_tmp = pd.concat([
data_tmp,
pd.Series(result,
index=ohlc.index,
name=f'{fn_name.lower()}_{suffix.lower()}').bfill()
], axis=1)
i += 1
data = data_tmp.copy()
else:
num_rows = len(ohlc) - len(data)
result = list((np.nan,) * num_rows) + data.tolist()
data = pd.Series(result, index=ohlc.index, name=fn_name.lower()).bfill()
return data
def ad(data):
"""
Accumulation/Distribution Line.
https://tulipindicators.org/ad
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'ad'), data)
def adosc(data, short_period=3, long_period=10):
"""
Accumulation/Distribution Oscillator:
The Accumulation/Distribution Oscillator is also known
as the Chaikin Oscillator, after its inventor.
https://tulipindicators.org/adosc
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param short_period: TODO
:param long_period: TODO
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'adosc'), data, short_period, long_period)
def adx(data, period=14):
"""
Average Directional Movement Index.
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'adx'), data, period)
def adxr(data, period=14):
"""
Average Directional Movement Rating.
https://tulipindicators.org/adxr
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'adxr'), data, period)
def ao(data):
"""
Awesome Oscillator.
https://tulipindicators.org/ao
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'ao'), data)
def apo(data, short_period=20, long_period=26):
"""
Absolute Price Oscillator.
https://tulipindicators.org/apo
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param short_period: TODO
:param long_period: TODO
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'apo'), data, short_period, long_period)
def aroon(data, period=14):
"""
Aroon.
https://tulipindicators.org/aroon
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'aroon'), data, period)
def aroonosc(data, period=14):
"""
Aroon Oscillator.
https://tulipindicators.org/aroonosc
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'aroonosc'), data, period)
def atr(data, period=14):
"""
Average True Range:
Average True Range is a measure of volatility. It represents roughly how much you can expect a security to change in price on any given day. It is often used in position sizing formulas.
Average true range is calculated by applying Wilders Smoothing to True Range.
True range for each day is the greatest of:
Day's high minus day's low
The absolute value of the day's high minus the previous day's close
The absolute value of the day's low minus the previous day's close
https://tulipindicators.org/atr
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'atr'), data, period)
def avgprice(data):
"""
Average Price:
The average price indicator calculates the mean of the open, high, low, and close of a bar.
https://tulipindicators.org/avgprice
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'avgprice'), data)
def bbands(data, period=20, stddev=2):
"""
Bollinger Bands:
The Bollinger Bands indicator calculates three results.
A middle band, which is a Simple Moving Average
Also an upper and lower band, which are spaced off the middle band
and calculated using standard deviations.
https://tulipindicators.org/bbands
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:param stddev: TODO
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'bbands'), data, period, stddev)
def bop(data):
"""
Balance Of Power:
Balance of Power compares the strength of buyers and sellers.
https://tulipindicators.org/bop
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'bop'), data)
def cci(data, period=20):
"""
Commodity Channel Index.
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'cci'), data, period)
def cmo(data, period=14):
"""
Chande Momentum Oscillator:
The Commodity Channel Index indicator is used to detect trends.
It works by taking a Simple Moving Average of the Typical Price
and comparing it to the amount of volatility in Typical Price.
https://tulipindicators.org/cci
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'cmo'), data, period)
def crossany(data):
"""
Crossany:
Crossany is a simple function that indicates when two input arrays cross each other.
When given two inputs, A and B, cross returns 1 for the periods that A crosses above B.
https://tulipindicators.org/crossany
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'crossany'), data)
def crossover(data):
"""
Crossover:
Crossover is a simple function that indicates when two input arrays crossover each other.
When given two inputs, A and B, cross returns 1 for the periods that A crosses above B.
https://tulipindicators.org/crossover
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'crossover'), data)
def cvi(data, period=14):
"""
Chaikin's Volatility:
Chaikins Volatility quantifies volatility by comparing the high and low prices.
It uses the period but also passes it into the EMA it uses.
https://tulipindicators.org/cvi
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'cvi'), data, period)
def decay(data, period):
"""
Linear Decay:
Decay is a simple function used to propagate signals from the past into the future.
It is useful in conjunction with algorithm trading and machine learning functions.
https://tulipindicators.org/decay
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'decay'), data, period)
def dema(data, period=50):
"""
Double Exponential Moving Average.
https://tulipindicators.org/dema
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'dema'), data, period)
def di(data, period=14):
"""
Directional Indicator.
https://tulipindicators.org/di
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'di'), data, period)
def dm(data, period=14):
"""
Directional Movement.
https://tulipindicators.org/dm
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'dm'), data, period)
def dpo(data, period=100):
"""
Detrended Price Oscillator.
https://tulipindicators.org/dpo
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'dpo'), data, period)
def dx(data, period=14):
"""
Directional Movement Index.
https://tulipindicators.org/dx
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'dx'), data, period)
def edecay(data, period):
"""
Exponential Decay.
https://tulipindicators.org/edecay
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'edecay'), data, period)
def ema(data, period=100):
"""
Exponential Moving Average.
https://tulipindicators.org/ema
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'ema'), data, period)
def emv(data):
"""
Ease Of Movement.
https://tulipindicators.org/emv
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'emv'), data)
def fisher(data, period=10):
"""
Fisher Transform.
https://tulipindicators.org/fisher
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'fisher'), data, period)
def fosc(data, period=14):
"""
Forecast Oscillator.
https://tulipindicators.org/fosc
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'fosc'), data, period)
def hma(data, period=200):
"""
Hull Moving Average.
https://tulipindicators.org/hma
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'hma'), data, period)
def kama(data, period=10):
"""
Kaufman Adaptive Moving Average.
https://tulipindicators.org/kama
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'kama'), data, period)
def kvo(data, short_period=34, long_period=55):
"""
Klinger Volume Oscillator.
https://tulipindicators.org/kvo
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param short_period: TODO
:param long_period: TODO
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'kvo'), data, short_period, long_period)
def lag(data, period):
"""
Lag.
https://tulipindicators.org/lag
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'lag'), data, period)
def linreg(data, period=50):
"""
Linear Regression.
https://tulipindicators.org/linreg
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'linreg'), data, period)
def linregintercept(data, period=10):
"""
Linear Regression Intercept.
https://tulipindicators.org/linregintercept
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'linregintercept'), data, period)
def linregslope(data, period=50):
"""
Linear Regression Slope.
https://tulipindicators.org/linregslope
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'linregslope'), data, period)
def macd(data, short_period=12, long_period=26, signal_period=9):
"""
Moving Average Convergence/Divergence.
https://tulipindicators.org/macd
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param short_period: TODO
:param long_period: TODO
:param signal_period: TODO
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'macd'), data, short_period, long_period, signal_period)
def marketfi(data):
"""
Market Facilitation Index.
https://tulipindicators.org/marketfi
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'marketfi'), data)
def mass(data, period=25):
"""
Mass Index.
https://tulipindicators.org/mass
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'mass'), data, period)
def md(data, period=14):
"""
Mean Deviation Over Period.
https://tulipindicators.org/md
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'md'), data, period)
def mfi(data, period=14):
"""
Money Flow Index.
https://tulipindicators.org/mfi
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'mfi'), data, period)
def mom(data, period=9):
"""
Momentum.
https://tulipindicators.org/mom
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'mom'), data, period)
def msw(data, period=25):
"""
Mesa Sine Wave.
https://tulipindicators.org/msw
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'msw'), data, period)
def natr(data, period=14):
"""
Normalized Average True Range.
https://tulipindicators.org/natr
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'natr'), data, period)
def nvi(data):
"""
Negative Volume Index:
tries to show what smart investors are doing
by staying flat on up-volume days
and only changing on down-volume days.
https://tulipindicators.org/nvi
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'nvi'), data)
def obv(data):
"""
On Balance Volume.
https://tulipindicators.org/obv
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'obv'), data)
def ppo(data, short_period=12, long_period=26):
"""
Percentage Price Oscillator.
https://tulipindicators.org/ppo
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param short_period: TODO
:param long_period: TODO
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'ppo'), data, short_period, long_period)
def psar(data, acceleration_factor_step=0.02, acceleration_factor_maximum=0.21):
"""
Parabolic Sar:
lower factor_step = less sensitive SAR
lower factor_maximum = less sensitivity
https://school.stockcharts.com/doku.php?id=technical_indicators:parabolic_sar
https://tulipindicators.org/psar
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param acceleration_factor_step: TODO
:param acceleration_factor_maximum: TODO
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'psar'), data, acceleration_factor_step, acceleration_factor_maximum)
def pvi(data):
"""
Positive Volume Index:
Positive Volume Index is very similar to Negative Volume Index,
but changes on volume-up days instead.
https://tulipindicators.org/pvi
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'pvi'), data)
def qstick(data, period=200):
"""
Qstick:
Qstick can be used to quantify the ratio of recent up-bars to down-bars
https://tulipindicators.org/qstick
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'qstick'), data, period)
def roc(data, period=9):
"""
Rate Of Change:
The Rate of Change indicator calculates the change
between the current price and the price n bars ago.
https://tulipindicators.org/roc
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'roc'), data, period)
def rocr(data, period=9):
"""
Rate Of Change Ratio:
The Rate of Change Ratio indicator calculates the change
between the current price and the price n bars ago
https://tulipindicators.org/rocr
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'rocr'), data, period)
def rsi(data, period=14):
"""
Relative Strength Index.
https://tulipindicators.org/rsi
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'rsi'), data, period)
def sma(data, period=200):
"""
Simple Moving Average.
https://tulipindicators.org/sma
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'sma'), data, period)
def stderr(data, period=50):
"""
Standard Error Over Period.
Standard Error, for a specified period, measures how far prices have deviated from a
Linear Regression Line for the same period. ... If all the closing prices equaled the
corresponding values of the Linear Regression Line, Standard Error would be zero.
https://tulipindicators.org/stderr
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'stderr'), data, period)
def stoch(data, pct_k_period=14, pct_k_slowing_period=3, pct_d_period=3):
"""
Stochastic Oscillator.
https://tulipindicators.org/stoch
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param %k_period: TODO
:param %k_slowing_period: TODO
:param %d_period: TODO
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'stoch'), data, pct_k_period, pct_k_slowing_period, pct_d_period)
def tema(data, period=200):
"""
Triple Exponential Moving Average.
https://tulipindicators.org/tema
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'tema'), data, period)
def tr(data):
"""
True Range.
https://tulipindicators.org/tr
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'tr'), data)
def trima(data, period=100):
"""
Triangular Moving Average:
The Triangular Moving Average is similar to the Simple Moving Average but instead
places more weight on middle portion of the smoothing period and less weight
on the newest and oldest bars in the period.
It takes one parameter, the period n.
Larger values for n will have a greater smoothing effect on the input data.
https://tulipindicators.org/trima
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'trima'), data, period)
def trix(data, period=14):
"""
Trix.
https://tulipindicators.org/trix
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'trix'), data, period)
def tsf(data, period=10):
"""
Time Series Forecast.
https://tulipindicators.org/tsf
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'tsf'), data, period)
def typprice(data):
"""
Typical Price.
https://tulipindicators.org/typprice
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'typprice'), data)
def ultosc(data, short_period=7, medium_period=14, long_period=28):
"""
Ultimate Oscillator.
https://tulipindicators.org/ultosc
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param short_period: TODO
:param medium_period: TODO
:param long_period: TODO
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'ultosc'), data, short_period, medium_period, long_period)
def vhf(data, period=50):
"""
Vertical Horizontal Filter:
Vertical Horizontal Filter (VHF) is a trending and ranging indicator authored by Adam White.
The VHF uses the highest close minus the lowest close divided by the sum of the absolute value
of the difference of the highest and lowest over a user defined time period.
https://tulipindicators.org/vhf
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'vhf'), data, period)
def vidya(data, short_period=14, long_period=34, alpha=0.2):
"""
Variable Index Dynamic Average:
The Variable Index Dynamic Average indicator modifies the Exponential Moving Average
by varying the smoothness based on recent volatility.
https://tulipindicators.org/vidya
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param short_period: TODO
:param long_period: TODO
:param alpha: Smoothing factor
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'vidya'), data, short_period, long_period, alpha)
def volatility(data, period):
"""
Annualized Historical Volatility:
The Annualized Historical Volatility indicator calculates the volatility over a moving window.
https://tulipindicators.org/volatility
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'volatility'), data, period)
def vosc(data, short_period=14, long_period=28):
"""
Volume Oscillator.
https://tulipindicators.org/vosc
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param short_period: TODO
:param long_period: TODO
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'vosc'), data, short_period, long_period)
def vwma(data, period=100):
"""
Volume Weighted Moving Average:
The Volume Weighted Moving Average is similar to a Simple Moving Average,
but it weights each bar by its volume.
https://tulipindicators.org/vwma
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'vwma'), data, period)
def wad(data):
"""
Williams Accumulation/Distribution.
https://tulipindicators.org/wad
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'wad'), data)
def wcprice(data):
"""
Weighted Close Price.
https://tulipindicators.org/wcprice
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'wcprice'), data)
def wilders(data, period=50):
"""
Wilders Smoothing:
Larger values for period will have a greater smoothing effect on the input data
but will also create more lag.
https://tulipindicators.org/wilders
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'wilders'), data, period)
def willr(data, period=14):
"""
Williams %R.
https://tulipindicators.org/willr
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'willr'), data, period)
def wma(data, period=50):
"""
Weighted Moving Average:
The Weighted Moving Average is similar to the Simple Moving Average but instead
places more weight on more recent bars in the smoothing period
and less weight on the oldest bars in the period.
It takes one parameter, the period.
Larger values for period will have a greater smoothing effect on the input data.
It is calculated for each bar as the weighted arithmetic mean of the previous n bars.
For example, the weights for an n of 4 are: 4, 3, 2, 1.
The weights w for a n of 7 are: 7, 6, 5, 4, 3, 2, 1.
So in that example, the most recent bar influences the average 7 times as much as the oldest bar.
https://tulipindicators.org/wma
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'wma'), data, period)
def zlema(data, period=200):
"""
Zero-Lag Exponential Moving Average.
https://tulipindicators.org/zlema
:param pd.DataFrame data: a DataFrame instance with data columns (open, high, low, close, volume).
:param int period: number of period used for indicators calcs.
:return pd.Series: indicator results as pandas Series instance.
"""
return _tup(getattr(tulipy, 'zlema'), data, period)
| 36.594727
| 194
| 0.679102
|
c65c6f27288f5250bd86d83e8c00b5883d180a0b
| 12,211
|
py
|
Python
|
experiments/lidc/train_classification.py
|
xiaosayin/RibFrac_npz_version
|
f094d60421c0609558799e24949840e233543415
|
[
"Apache-2.0"
] | null | null | null |
experiments/lidc/train_classification.py
|
xiaosayin/RibFrac_npz_version
|
f094d60421c0609558799e24949840e233543415
|
[
"Apache-2.0"
] | null | null | null |
experiments/lidc/train_classification.py
|
xiaosayin/RibFrac_npz_version
|
f094d60421c0609558799e24949840e233543415
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
import _init_paths
import fire
import time
import sys
import pandas as pd
import os
import shutil
import numpy as np
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision
import torch.nn.functional as F
from tqdm import tqdm
from collections import OrderedDict
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from sklearn.metrics import roc_auc_score,confusion_matrix
from mylib.sync_batchnorm import DataParallelWithCallback
from lidc_dataset import LIDCTwoClassDataset
from mylib.utils import MultiAverageMeter, save_model, log_results, to_var, set_seed, \
to_device, initialize, categorical_to_one_hot, copy_file_backup, redirect_stdout, \
model_to_syncbn
from lidc_config import LIDCClassConfig as cfg
from lidc_config import LIDCEnv as env
from resnet import ClsResNet
from densenet import ClsDenseNet
from vgg import ClsVGG
from acsconv.converters import ACSConverter, Conv3dConverter, Conv2_5dConverter
from load_pretrained_weights_funcs import load_mednet_pretrained_weights, load_video_pretrained_weights
def main(save_path=cfg.save,
n_epochs= cfg.n_epochs,
seed=cfg.seed
):
# set seed
if seed is not None:
set_seed(cfg.seed)
cudnn.benchmark = True
# back up your code
os.makedirs(save_path)
copy_file_backup(save_path)
redirect_stdout(save_path)
confusion_path = os.path.join(sys.path[0], 'tmp', 'confusion_matrix')
if os.path.exists(confusion_path):
shutil.rmtree(confusion_path)
os.makedirs(confusion_path) # 保存confusion matrix
# Datasets crop_size down
train_set = LIDCTwoClassDataset(crop_size=50, move=5, data_path=env.data, train=True)
valid_set = None
test_set = LIDCTwoClassDataset(crop_size=50, move=5, data_path=env.data, train=False)
# Define model
model_dict = {'resnet18': ClsResNet, 'vgg16': ClsVGG, 'densenet121': ClsDenseNet}
model = model_dict[cfg.backbone](pretrained=cfg.pretrained, num_classes=4, backbone=cfg.backbone)
# convert to counterparts and load pretrained weights according to various convolution
if cfg.conv=='ACSConv':
model = model_to_syncbn(ACSConverter(model))
if cfg.conv=='Conv2_5d':
model = model_to_syncbn(Conv2_5dConverter(model))
if cfg.conv=='Conv3d':
if cfg.pretrained_3d == 'i3d':
model = model_to_syncbn(Conv3dConverter(model, i3d_repeat_axis=-3))
else:
model = model_to_syncbn(Conv3dConverter(model, i3d_repeat_axis=None))
if cfg.pretrained_3d == 'video':
model = load_video_pretrained_weights(model, env.video_resnet18_pretrain_path)
elif cfg.pretrained_3d == 'mednet':
model = load_mednet_pretrained_weights(model, env.mednet_resnet18_pretrain_path)
print(model)
torch.save(model.state_dict(), os.path.join(save_path, 'model.dat'))
# train and test the model
train(model=model, train_set=train_set, valid_set=valid_set, test_set=test_set, save=save_path, n_epochs=n_epochs)
print('Done!')
def train(model, train_set, test_set, save, valid_set, n_epochs):
'''
Main training function
'''
# Dataloaders
train_loader = DataLoader(train_set, batch_size=cfg.batch_size, shuffle=True,
pin_memory=(torch.cuda.is_available()), num_workers=cfg.num_workers)
test_loader = DataLoader(test_set, batch_size=cfg.batch_size, shuffle=False,
pin_memory=(torch.cuda.is_available()), num_workers=cfg.num_workers)
if valid_set is None:
valid_loader = None
else:
valid_loader = DataLoader(valid_set, batch_size=cfg.batch_size, shuffle=False,
pin_memory=(torch.cuda.is_available()), num_workers=cfg.num_workers)
# Model on cuda
model = to_device(model)
# Wrap model for multi-GPUs, if necessary
model_wrapper = model
if torch.cuda.is_available() and torch.cuda.device_count() > 1:
if cfg.use_syncbn:
print('Using sync-bn')
model_wrapper = DataParallelWithCallback(model).cuda()
else:
model_wrapper = torch.nn.DataParallel(model).cuda()
# optimizer and scheduler
optimizer = torch.optim.Adam(model_wrapper.parameters(), lr=cfg.lr)
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer,max_lr=cfg.max_lr, epochs=cfg.n_epochs,steps_per_epoch = cfg.batch_size)# steps_per_epoch = len(train_loader
# scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=cfg.milestones,
# gamma=cfg.gamma)
# Start logging
logs = ['loss', 'acc', 'acc0', 'acc1','acc2','acc3']
train_logs = ['train_'+log for log in logs]
test_logs = ['test_'+log for log in logs]+['test_auc',]
log_dict = OrderedDict.fromkeys(train_logs+test_logs, 0)
with open(os.path.join(save, 'logs.csv'), 'w') as f:
f.write('epoch,')
for key in log_dict.keys():
f.write(key+',')
f.write('\n')
with open(os.path.join(save, 'loss_logs.csv'), 'w') as f:
f.write('iter,train_loss,\n')
writer = SummaryWriter(log_dir=os.path.join(save, 'Tensorboard_Results'))
# train and test the model
best_auc = 0
global iteration
iteration = 0
for epoch in range(n_epochs):
os.makedirs(os.path.join(cfg.save, 'epoch_{}'.format(epoch)))
print('learning rate: ', scheduler.get_lr())
# train epoch
train_meters = train_epoch(
model=model_wrapper,
loader=train_loader,
optimizer=optimizer,
epoch=epoch,
n_epochs=n_epochs,
writer=writer
)
# test epoch
test_meters = test_epoch(
model=model_wrapper,
loader=test_loader,
epoch=epoch,
is_test=True,
writer = writer
)
scheduler.step()
# Log results
for i, key in enumerate(train_logs):
log_dict[key] = train_meters[i]
for i, key in enumerate(test_logs):
log_dict[key] = test_meters[i]
if len(test_meters) > len(test_logs):
log_dict['test_auc'] = test_meters[-1]
log_results(save, epoch, log_dict, writer=writer)
# save model checkpoint
if cfg.save_all:
torch.save(model.state_dict(), os.path.join(save, 'epoch_{}'.format(epoch)+'model.dat'))
if log_dict['test_auc'] > best_auc:
torch.save(model.state_dict(), os.path.join(save,'epoch_{}'.format(epoch), 'model.dat'))
best_auc = log_dict['test_auc']
print('New best auc: %.4f' % log_dict['test_auc'])
else:
print('Current best auc: %.4f' % best_auc)
# end
writer.close()
with open(os.path.join(save, 'logs.csv'), 'a') as f:
f.write(',,,,best auc,%0.5f\n' % (best_auc))
print('best auc: ', best_auc)
def train_epoch(model, loader, optimizer, epoch, n_epochs, print_freq=1, writer=None):
'''
One training epoch
'''
meters = MultiAverageMeter()
# Model on train mode
model.train()
global iteration
end = time.time()
pred_all_class_train = []
gt_classes_train = []
for batch_idx, (x, y) in enumerate(loader):
# Create vaiables
x = to_var(x)
y = to_var(y)
# forward and backward
pred_logits = model(x)
loss = F.cross_entropy(pred_logits, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# calculate metrics
pred_class = pred_logits.max(-1)[1]
gt_classes_train.append(y.cpu())
pred_all_class_train.append(pred_class.cpu())
batch_size = y.size(0)
num_classes = pred_logits.size(1)
same = pred_class==y
acc = same.sum().item() / batch_size
accs = torch.zeros(num_classes)
for num_class in range(num_classes):
accs[num_class] = (same * (y==num_class)).sum().item() / ((y==num_class).sum().item()+1e-6)
# log
writer.add_scalar('train_loss_logs', loss.item(), iteration)
with open(os.path.join(cfg.save, 'loss_logs.csv'), 'a') as f:
f.write('%09d,%0.6f,\n'%((iteration + 1),loss.item(),))
iteration += 1
logs = [loss.item(), acc]+ \
[accs[i].item() for i in range(len(accs))]+ \
[time.time() - end]
meters.update(logs, batch_size)
end = time.time()
# print stats
print_freq = 2 // meters.val[-1] + 1
if batch_idx % print_freq == 0:
res = '\t'.join([
'Epoch: [%d/%d]' % (epoch + 1, n_epochs),
'Iter: [%d/%d]' % (batch_idx + 1, len(loader)),
'Time %.3f (%.3f)' % (meters.val[-1], meters.avg[-1]),
'Loss %.4f (%.4f)' % (meters.val[0], meters.avg[0]),
'ACC %.4f (%.4f)' % (meters.val[1], meters.avg[1]),
])
print(res)
gt_classes_train = torch.cat(gt_classes_train, 0).numpy() # ground truth
pred_all_class_train = torch.cat(pred_all_class_train, 0).numpy() # 预测结果
epoch_dataframe_train = pd.DataFrame([gt_classes_train, pred_all_class_train],index=['gt_classes', 'pred_all_class']).T # 将gt_classes和pred_all_class
epoch_dataframe_train.to_csv(os.path.join(sys.path[0], 'tmp', 'confusion_matrix', 'train_'+'epoch_{}_'.format(epoch) + 'confusion_matrix.csv'))
return meters.avg[:-1]
def test_epoch(model, loader, epoch, print_freq=1, is_test=True, writer=None):
'''
One test epoch
'''
meters = MultiAverageMeter()
# Model on eval mode
model.eval()
gt_classes = []
pred_all_probs = []
pred_all_class = []
end = time.time()
with torch.no_grad():
for batch_idx, (x, y) in enumerate(loader):
x = to_var(x)
y = to_var(y)
# forward
pred_logits = model(x)
loss = F.cross_entropy(pred_logits, y)
# calculate metrics
pred_class = pred_logits.max(-1)[1]
pred_probs = pred_logits.softmax(-1)
pred_all_probs.append(pred_probs.cpu())
pred_all_class.append(pred_class.cpu())
gt_classes.append(y.cpu())
batch_size = y.size(0)
num_classes = pred_logits.size(1)
same = pred_class==y
acc = same.sum().item() / batch_size
accs = torch.zeros(num_classes)
for num_class in range(num_classes):
accs[num_class] = (same * (y==num_class)).sum().item() / ((y==num_class).sum().item()+ 1e-6)
logs = [loss.item(), acc]+ \
[accs[i].item() for i in range(len(accs))]+ \
[time.time() - end]
meters.update(logs, batch_size)
end = time.time()
print_freq = 2 // meters.val[-1] + 1
if batch_idx % print_freq == 0:
res = '\t'.join([
'Test' if is_test else 'Valid',
'Iter: [%d/%d]' % (batch_idx + 1, len(loader)),
'Time %.3f (%.3f)' % (meters.val[-1], meters.avg[-1]),
'Loss %.4f (%.4f)' % (meters.val[0], meters.avg[0]),
'ACC %.4f (%.4f)' % (meters.val[1], meters.avg[1]),
])
print(res)
gt_classes = torch.cat(gt_classes, 0).numpy() # ground truth
pred_all_class = torch.cat(pred_all_class,0).numpy() # 预测结果
pred_all_probs = torch.cat(pred_all_probs, 0).numpy() # 多分类的score
epoch_dataframe = pd.DataFrame([gt_classes,pred_all_class],index = ['gt_classes','pred_all_class']).T # 将gt_classes和pred_all_class
epoch_dataframe.to_csv(os.path.join(sys.path[0], 'tmp', 'confusion_matrix', 'epoch_{}_'.format(epoch) + 'confusion_matrix.csv'))
auc = roc_auc_score(gt_classes, pred_all_probs,average = 'macro', multi_class = 'ovo')
print('auc:', auc)
return meters.avg[:-1]+[auc,]
if __name__ == '__main__':
fire.Fire(main)
| 39.263666
| 170
| 0.614692
|
48abafd7357e824d6afec6c2f307895ec7ac750d
| 41
|
py
|
Python
|
neuralnet/__init__.py
|
forumulator/BTP
|
34af5c349f658480549aaeb3a3c24f672fff2c96
|
[
"MIT"
] | null | null | null |
neuralnet/__init__.py
|
forumulator/BTP
|
34af5c349f658480549aaeb3a3c24f672fff2c96
|
[
"MIT"
] | null | null | null |
neuralnet/__init__.py
|
forumulator/BTP
|
34af5c349f658480549aaeb3a3c24f672fff2c96
|
[
"MIT"
] | null | null | null |
from neuralnet.run import NeuralNetRunner
| 41
| 41
| 0.902439
|
6878a5c5b5c2aadc39a734c589fca850df2fa056
| 3,884
|
py
|
Python
|
venv/Lib/site-packages/botocore/__init__.py
|
ishatserka/MachineLearningAndDataAnalysisCoursera
|
e82e772df2f4aec162cb34ac6127df10d14a625a
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/botocore/__init__.py
|
ishatserka/MachineLearningAndDataAnalysisCoursera
|
e82e772df2f4aec162cb34ac6127df10d14a625a
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/botocore/__init__.py
|
ishatserka/MachineLearningAndDataAnalysisCoursera
|
e82e772df2f4aec162cb34ac6127df10d14a625a
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import re
import logging
__version__ = '1.9.11'
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Configure default logger to do nothing
log = logging.getLogger('botocore')
log.addHandler(NullHandler())
_first_cap_regex = re.compile('(.)([A-Z][a-z]+)')
_number_cap_regex = re.compile('([a-z])([0-9]+)')
_end_cap_regex = re.compile('([a-z0-9])([A-Z])')
# The regex below handles the special case where some acryonym
# name is pluralized, e.g GatewayARNs, ListWebACLs, SomeCNAMEs.
_special_case_transform = re.compile('[A-Z]{3,}s$')
# Prepopulate the cache with special cases that don't match
# our regular transformation.
_xform_cache = {
('CreateCachediSCSIVolume', '_'): 'create_cached_iscsi_volume',
('CreateCachediSCSIVolume', '-'): 'create-cached-iscsi-volume',
('DescribeCachediSCSIVolumes', '_'): 'describe_cached_iscsi_volumes',
('DescribeCachediSCSIVolumes', '-'): 'describe-cached-iscsi-volumes',
('DescribeStorediSCSIVolumes', '_'): 'describe_stored_iscsi_volumes',
('DescribeStorediSCSIVolumes', '-'): 'describe-stored-iscsi-volumes',
('CreateStorediSCSIVolume', '_'): 'create_stored_iscsi_volume',
('CreateStorediSCSIVolume', '-'): 'create-stored-iscsi-volume',
('ListHITsForQualificationType', '_'): 'list_hits_for_qualification_type',
('ListHITsForQualificationType', '-'): 'list-hits-for-qualification-type',
}
# The items in this dict represent partial renames to apply globally to all
# services which might have a matching argument or operation. This way a
# common mis-translation can be fixed without having to call out each
# individual case.
_partial_renames = {
'ipv-6': 'ipv6',
'ipv_6': 'ipv6',
}
ScalarTypes = ('string', 'integer', 'boolean', 'timestamp', 'float', 'double')
BOTOCORE_ROOT = os.path.dirname(os.path.abspath(__file__))
# Used to specify anonymous (unsigned) request signature
class UNSIGNED(object):
def __copy__(self):
return self
def __deepcopy__(self, memodict):
return self
UNSIGNED = UNSIGNED()
def xform_name(name, sep='_', _xform_cache=_xform_cache,
partial_renames=_partial_renames):
"""Convert camel case to a "pythonic" name.
If the name contains the ``sep`` character, then it is
returned unchanged.
"""
if sep in name:
# If the sep is in the name, assume that it's already
# transformed and return the string unchanged.
return name
key = (name, sep)
if key not in _xform_cache:
if _special_case_transform.search(name) is not None:
is_special = _special_case_transform.search(name)
matched = is_special.group()
# Replace something like ARNs, ACLs with _arns, _acls.
name = name[:-len(matched)] + sep + matched.lower()
s1 = _first_cap_regex.sub(r'\1' + sep + r'\2', name)
s2 = _number_cap_regex.sub(r'\1' + sep + r'\2', s1)
transformed = _end_cap_regex.sub(r'\1' + sep + r'\2', s2).lower()
# Do partial renames
for old, new in partial_renames.items():
if old in transformed:
transformed = transformed.replace(old, new)
_xform_cache[key] = transformed
return _xform_cache[key]
| 36.990476
| 78
| 0.689753
|
5812cdee61d0f267c6852548a0469fb6db5453a5
| 1,047
|
py
|
Python
|
epsagon/modules/aiohttp.py
|
clericeon/epsagon-python
|
387b785708d5b6ac0e8a9f8562c52f56d0825cdf
|
[
"MIT"
] | null | null | null |
epsagon/modules/aiohttp.py
|
clericeon/epsagon-python
|
387b785708d5b6ac0e8a9f8562c52f56d0825cdf
|
[
"MIT"
] | null | null | null |
epsagon/modules/aiohttp.py
|
clericeon/epsagon-python
|
387b785708d5b6ac0e8a9f8562c52f56d0825cdf
|
[
"MIT"
] | null | null | null |
"""
aiohttp patcher module.
"""
from __future__ import absolute_import
import wrapt
from ..wrappers.aiohttp import AiohttpMiddleware
from ..utils import print_debug, is_lambda_env
def _wrapper(wrapped, _instance, args, kwargs):
"""
Adds `AiohttpMiddleware` into aiohttp app.
:param wrapped: wrapt's wrapped
:param _instance: wrapt's instance
:param args: wrapt's args
:param kwargs: wrapt's kwargs
"""
# Skip on Lambda environment since it's not relevant and might be duplicate
if is_lambda_env():
return wrapped(*args, **kwargs)
try:
if 'middlewares' not in kwargs:
kwargs['middlewares'] = []
kwargs['middlewares'].insert(0, AiohttpMiddleware)
except Exception: # pylint: disable=broad-except
print_debug('Could not add aiohttp wrapper')
return wrapped(*args, **kwargs)
def patch():
"""
Patch module.
:return: None
"""
wrapt.wrap_function_wrapper(
'aiohttp.web',
'Application.__init__',
_wrapper
)
| 24.348837
| 79
| 0.657116
|
c66e76825b6d77875c5b7190f29bae12d2c2b700
| 57
|
py
|
Python
|
chapter-04/exercise007.py
|
krastin/pp-cs3.0
|
502be9aac2d84215db176864e443c219e5e26591
|
[
"MIT"
] | null | null | null |
chapter-04/exercise007.py
|
krastin/pp-cs3.0
|
502be9aac2d84215db176864e443c219e5e26591
|
[
"MIT"
] | null | null | null |
chapter-04/exercise007.py
|
krastin/pp-cs3.0
|
502be9aac2d84215db176864e443c219e5e26591
|
[
"MIT"
] | null | null | null |
num = float(input("Please enter a number: "))
print(num)
| 19
| 45
| 0.684211
|
3e54ff10c2538d495a80fa9e9a0bd496f0a63988
| 88,031
|
py
|
Python
|
env/lib/python3.8/site-packages/numpy/lib/npyio.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 1,738
|
2017-09-21T10:59:12.000Z
|
2022-03-31T21:05:46.000Z
|
env/lib/python3.8/site-packages/numpy/lib/npyio.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 427
|
2017-09-29T22:54:36.000Z
|
2022-02-15T19:26:50.000Z
|
env/lib/python3.8/site-packages/numpy/lib/npyio.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 671
|
2017-09-21T08:04:01.000Z
|
2022-03-29T14:30:07.000Z
|
from __future__ import division, absolute_import, print_function
import sys
import os
import re
import functools
import itertools
import warnings
import weakref
import contextlib
from operator import itemgetter, index as opindex
import numpy as np
from . import format
from ._datasource import DataSource
from numpy.core import overrides
from numpy.core.multiarray import packbits, unpackbits
from numpy.core.overrides import set_module
from numpy.core._internal import recursive
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like,
has_nested_fields, flatten_dtype, easy_dtype, _decode_line
)
from numpy.compat import (
asbytes, asstr, asunicode, bytes, basestring, os_fspath, os_PathLike,
pickle, contextlib_nullcontext
)
if sys.version_info[0] >= 3:
from collections.abc import Mapping
else:
from future_builtins import map
from collections import Mapping
@set_module('numpy')
def loads(*args, **kwargs):
# NumPy 1.15.0, 2017-12-10
warnings.warn(
"np.loads is deprecated, use pickle.loads instead",
DeprecationWarning, stacklevel=2)
return pickle.loads(*args, **kwargs)
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def __dir__(self):
"""
Enables dir(bagobj) to list the files in an NpzFile.
This also enables tab-completion in an interpreter or IPython.
"""
return list(object.__getattribute__(self, '_obj').keys())
def zipfile_factory(file, *args, **kwargs):
"""
Create a ZipFile.
Allows for Zip64, and the `file` argument can accept file, str, or
pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
constructor.
"""
if not hasattr(file, 'read'):
file = os_fspath(file)
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(file, *args, **kwargs)
class NpzFile(Mapping):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
Allow loading pickled data. Default: False
.. versionchanged:: 1.16.3
Made default False in response to CVE-2019-6446.
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
Python 2 when using Python 3.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> _ = outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> sorted(npz.files)
['x', 'y']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False, allow_pickle=False,
pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
self.allow_pickle = allow_pickle
self.pickle_kwargs = pickle_kwargs
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
# Implement the Mapping ABC
def __iter__(self):
return iter(self.files)
def __len__(self):
return len(self.files)
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = False
if key in self._files:
member = True
elif key in self.files:
member = True
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes,
allow_pickle=self.allow_pickle,
pickle_kwargs=self.pickle_kwargs)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
if sys.version_info.major == 3:
# deprecate the python 2 dict apis that we supported by accident in
# python 3. We forgot to implement itervalues() at all in earlier
# versions of numpy, so no need to deprecated it here.
def iteritems(self):
# Numpy 1.15, 2018-02-20
warnings.warn(
"NpzFile.iteritems is deprecated in python 3, to match the "
"removal of dict.itertems. Use .items() instead.",
DeprecationWarning, stacklevel=2)
return self.items()
def iterkeys(self):
# Numpy 1.15, 2018-02-20
warnings.warn(
"NpzFile.iterkeys is deprecated in python 3, to match the "
"removal of dict.iterkeys. Use .keys() instead.",
DeprecationWarning, stacklevel=2)
return self.keys()
@set_module('numpy')
def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
.. warning:: Loading files that contain object arrays uses the ``pickle``
module, which is not secure against erroneous or maliciously
constructed data. Consider passing ``allow_pickle=False`` to
load data that is known not to contain object arrays for the
safer handling of untrusted sources.
Parameters
----------
file : file-like object, string, or pathlib.Path
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
allow_pickle : bool, optional
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
arrays will fail. Default: False
.. versionchanged:: 1.16.3
Made default False in response to CVE-2019-6446.
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
is True, pickle will try to map the old Python 2 names to the new names
used in Python 3.
encoding : str, optional
What encoding to use when reading Python 2 strings. Only useful when
loading Python 2 generated pickled files in Python 3, which includes
npy/npz files containing object arrays. Values other than 'latin1',
'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
data. Default: 'ASCII'
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
ValueError
The file contains an object array, but allow_pickle=False given.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
if encoding not in ('ASCII', 'latin1', 'bytes'):
# The 'encoding' value for pickle also affects what encoding
# the serialized binary data of NumPy arrays is loaded
# in. Pickle does not pass on the encoding information to
# NumPy. The unpickling code in numpy.core.multiarray is
# written to assume that unicode data appearing where binary
# should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
#
# Other encoding values can corrupt binary data, and we
# purposefully disallow them. For the same reason, the errors=
# argument is not exposed, as values other than 'strict'
# result can similarly silently corrupt numerical data.
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
if sys.version_info[0] >= 3:
pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = {}
# TODO: Use contextlib.ExitStack once we drop Python 2
if hasattr(file, 'read'):
fid = file
own_fid = False
else:
fid = open(os_fspath(file), "rb")
own_fid = True
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = b'PK\x03\x04'
_ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
# If the file size is less than N, we need to make sure not
# to seek past the beginning of the file
fid.seek(-min(N, len(magic)), 1) # back-up
if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
own_fid = False
return ret
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Try a pickle
if not allow_pickle:
raise ValueError("Cannot load file containing pickled data "
"when allow_pickle=False")
try:
return pickle.load(fid, **pickle_kwargs)
except Exception:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None):
return (arr,)
@array_function_dispatch(_save_dispatcher)
def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file, str, or pathlib.Path
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string or Path, a ``.npy``
extension will be appended to the filename if it does not already
have one.
arr : array_like
Array data to be saved.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for disallowing
pickles include security (loading pickled data can execute arbitrary
code) and portability (pickled objects may not be loadable on different
Python installations, for example if the stored objects require libraries
that are not available, and not all pickled data is compatible between
Python 2 and Python 3).
Default: True
fix_imports : bool, optional
Only useful in forcing objects in object arrays on Python 3 to be
pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
will try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
Any data saved to the file is appended to the end of the file.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> with open('test.npy', 'wb') as f:
... np.save(f, np.array([1, 2]))
... np.save(f, np.array([1, 3]))
>>> with open('test.npy', 'rb') as f:
... a = np.load(f)
... b = np.load(f)
>>> print(a, b)
# [1 2] [1 3]
"""
own_fid = False
if hasattr(file, 'write'):
fid = file
else:
file = os_fspath(file)
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
if sys.version_info[0] >= 3:
pickle_kwargs = dict(fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = None
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
finally:
if own_fid:
fid.close()
def _savez_dispatcher(file, *args, **kwds):
for a in args:
yield a
for v in kwds.values():
yield v
@array_function_dispatch(_savez_dispatcher)
def savez(file, *args, **kwds):
"""Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the filename (string) or an open file (file-like object)
where the data will be saved. If file is a string or a Path, the
``.npz`` extension will be appended to the filename if it is not
already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
When saving dictionaries, the dictionary keys become filenames
inside the ZIP archive. Therefore, keys should be valid filenames.
E.g., avoid keys that begin with ``/`` or contain ``.``.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_0', 'arr_1']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> _ = outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> sorted(npzfile.files)
['x', 'y']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def _savez_compressed_dispatcher(file, *args, **kwds):
for a in args:
yield a
for v in kwds.values():
yield v
@array_function_dispatch(_savez_compressed_dispatcher)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored filenames are
arr_0, arr_1, etc.
Parameters
----------
file : str or file
Either the filename (string) or an open file (file-like object)
where the data will be saved. If file is a string or a Path, the
``.npz`` extension will be appended to the filename if it is not
already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
numpy.save : Save a single array to a binary file in NumPy format.
numpy.savetxt : Save an array to a file as plain text.
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is compressed with
``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable
in ``.npy`` format. For a description of the ``.npy`` format, see
:py:mod:`numpy.lib.format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> test_array = np.random.rand(3, 2)
>>> test_vector = np.random.rand(4)
>>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector)
>>> loaded = np.load('/tmp/123.npz')
>>> print(np.array_equal(test_array, loaded['a']))
True
>>> print(np.array_equal(test_vector, loaded['b']))
True
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
if not hasattr(file, 'write'):
file = os_fspath(file)
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
if sys.version_info >= (3, 6):
# Since Python 3.6 it is possible to write directly to a ZIP file.
for key, val in namedict.items():
fname = key + '.npy'
val = np.asanyarray(val)
# always force zip64, gh-10776
with zipf.open(fname, 'w', force_zip64=True) as fid:
format.write_array(fid, val,
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Stage arrays in a temporary file on disk, before writing to zip.
# Import deferred for startup time improvement
import tempfile
# Since target file might be big enough to exceed capacity of a global
# temporary directory, create temp file side-by-side with the target file.
file_dir, file_prefix = os.path.split(file) if _is_string_like(file) else (None, 'tmp')
fd, tmpfile = tempfile.mkstemp(prefix=file_prefix, dir=file_dir, suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val),
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
except IOError as exc:
raise IOError("Failed to write to %s: %s" % (tmpfile, exc))
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
def floatconv(x):
x.lower()
if '0x' in x:
return float.fromhex(x)
return float(x)
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.longdouble):
return np.longdouble
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, complex):
return lambda x: complex(asstr(x).replace('+-', '-'))
elif issubclass(typ, np.bytes_):
return asbytes
elif issubclass(typ, np.unicode_):
return asunicode
else:
return asstr
# amount of lines loadtxt reads in one chunk, can be overridden for testing
_loadtxt_chunksize = 50000
@set_module('numpy')
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0, encoding='bytes', max_rows=None):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file, str, or pathlib.Path
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str or sequence of str, optional
The characters or list of characters used to indicate the start of a
comment. None implies no comments. For backwards compatibility, byte
strings will be decoded as 'latin1'. The default is '#'.
delimiter : str, optional
The string used to separate values. For backwards compatibility, byte
strings will be decoded as 'latin1'. The default is whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will parse the
column string into the desired value. E.g., if column 0 is a date
string: ``converters = {0: datestr2num}``. Converters can also be
used to provide a default value for missing data (but see also
`genfromtxt`): ``converters = {3: lambda s: float(s.strip() or 0)}``.
Default: None.
skiprows : int, optional
Skip the first `skiprows` lines, including comments; default: 0.
usecols : int or sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
.. versionchanged:: 1.11.0
When a single column has to be read it is possible to use
an integer instead of a tuple. E.g ``usecols = 3`` reads the
fourth column the same way as ``usecols = (3,)`` would.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
encoding : str, optional
Encoding used to decode the inputfile. Does not apply to input streams.
The special value 'bytes' enables backward compatibility workarounds
that ensures you receive byte arrays as results if possible and passes
'latin1' encoded strings to converters. Override this value to receive
unicode arrays and pass strings as input to converters. If set to None
the system default is used. The default value is 'bytes'.
.. versionadded:: 1.14.0
max_rows : int, optional
Read `max_rows` lines of content after `skiprows` lines. The default
is to read all the lines.
.. versionadded:: 1.16.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
.. versionadded:: 1.10.0
The strings produced by the Python float.hex method can be used as
input for floats.
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
>>> c = StringIO(u"0 1\\n2 3")
>>> np.loadtxt(c)
array([[0., 1.],
[2., 3.]])
>>> d = StringIO(u"M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([(b'M', 21, 72.), (b'F', 35, 58.)],
dtype=[('gender', 'S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO(u"1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([1., 3.])
>>> y
array([2., 4.])
"""
# Type conversions for Py3 convenience
if comments is not None:
if isinstance(comments, (basestring, bytes)):
comments = [comments]
comments = [_decode_line(x) for x in comments]
# Compile regex for comments beforehand
comments = (re.escape(comment) for comment in comments)
regex_comments = re.compile('|'.join(comments))
if delimiter is not None:
delimiter = _decode_line(delimiter)
user_converters = converters
if encoding == 'bytes':
encoding = None
byte_converters = True
else:
byte_converters = False
if usecols is not None:
# Allow usecols to be a single int or a sequence of ints
try:
usecols_as_list = list(usecols)
except TypeError:
usecols_as_list = [usecols]
for col_idx in usecols_as_list:
try:
opindex(col_idx)
except TypeError as e:
e.args = (
"usecols must be an int or a sequence of ints but "
"it contains at least one element of type %s" %
type(col_idx),
)
raise
# Fall back to existing code
usecols = usecols_as_list
fown = False
try:
if isinstance(fname, os_PathLike):
fname = os_fspath(fname)
if _is_string_like(fname):
fh = np.lib._datasource.open(fname, 'rt', encoding=encoding)
fencoding = getattr(fh, 'encoding', 'latin1')
fh = iter(fh)
fown = True
else:
fh = iter(fname)
fencoding = getattr(fname, 'encoding', 'latin1')
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
# input may be a python2 io stream
if encoding is not None:
fencoding = encoding
# we must assume local encoding
# TODO emit portability warning?
elif fencoding is None:
import locale
fencoding = locale.getpreferredencoding()
# not to be confused with the flatten_dtype we import...
@recursive
def flatten_dtype_internal(self, dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = self(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if tp.ndim > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
@recursive
def pack_items(self, items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(self(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter. """
line = _decode_line(line, encoding=encoding)
if comments is not None:
line = regex_comments.split(line, maxsplit=1)[0]
line = line.strip('\r\n')
if line:
return line.split(delimiter)
else:
return []
def read_data(chunk_size):
"""Parse each line, including the first.
The file read, `fh`, is a global defined above.
Parameters
----------
chunk_size : int
At most `chunk_size` lines are read at a time, with iteration
until all lines are read.
"""
X = []
line_iter = itertools.chain([first_line], fh)
line_iter = itertools.islice(line_iter, max_rows)
for i, line in enumerate(line_iter):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[j] for j in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
% line_num)
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
if len(X) > chunk_size:
yield X
X = []
if X:
yield X
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname, stacklevel=2)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype_internal(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
if byte_converters:
# converters may use decode to workaround numpy's old behaviour,
# so encode the string again before passing to the user converter
def tobytes_first(x, conv):
if type(x) is bytes:
return conv(x)
return conv(x.encode("latin1"))
converters[i] = functools.partial(tobytes_first, conv=conv)
else:
converters[i] = conv
converters = [conv if conv is not bytes else
lambda x: x.encode(fencoding) for conv in converters]
# read data in chunks and fill it into an array via resize
# over-allocating and shrinking the array later may be faster but is
# probably not relevant compared to the cost of actually reading and
# converting the data
X = None
for x in read_data(_loadtxt_chunksize):
if X is None:
X = np.array(x, dtype)
else:
nshape = list(X.shape)
pos = nshape[0]
nshape[0] += len(x)
X.resize(nshape, refcheck=False)
X[pos:, ...] = x
finally:
if fown:
fh.close()
if X is None:
X = np.array([], dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None,
header=None, footer=None, comments=None,
encoding=None):
return (X,)
@array_function_dispatch(_savetxt_dispatcher)
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# ', encoding=None):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : 1D or 2D array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
* a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
* a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
* a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
String or character separating lines.
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
encoding : {None, str}, optional
Encoding used to encode the outputfile. Does not apply to output
streams. If the encoding is something other than 'bytes' or 'latin1'
you will not be able to load the file in NumPy versions < 1.14. Default
is 'latin1'.
.. versionadded:: 1.14.0
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<https://docs.python.org/library/string.html#format-specification-mini-language>`_,
Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
class WriteWrap(object):
"""Convert to unicode in py2 or to bytes on bytestream inputs.
"""
def __init__(self, fh, encoding):
self.fh = fh
self.encoding = encoding
self.do_write = self.first_write
def close(self):
self.fh.close()
def write(self, v):
self.do_write(v)
def write_bytes(self, v):
if isinstance(v, bytes):
self.fh.write(v)
else:
self.fh.write(v.encode(self.encoding))
def write_normal(self, v):
self.fh.write(asunicode(v))
def first_write(self, v):
try:
self.write_normal(v)
self.write = self.write_normal
except TypeError:
# input is probably a bytestream
self.write_bytes(v)
self.write = self.write_bytes
own_fh = False
if isinstance(fname, os_PathLike):
fname = os_fspath(fname)
if _is_string_like(fname):
# datasource doesn't support creating a new file ...
open(fname, 'wt').close()
fh = np.lib._datasource.open(fname, 'wt', encoding=encoding)
own_fh = True
# need to convert str to unicode for text io output
if sys.version_info[0] == 2:
fh = WriteWrap(fh, encoding or 'latin1')
elif hasattr(fname, 'write'):
# wrap to handle byte output streams
fh = WriteWrap(fname, encoding or 'latin1')
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 0 or X.ndim > 2:
raise ValueError(
"Expected 1D or 2D array, got %dD array instead" % X.ndim)
elif X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.names)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, basestring):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(comments + header + newline)
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
s = format % tuple(row2) + newline
fh.write(s.replace('+-', '-'))
else:
for row in X:
try:
v = format % tuple(row) + newline
except TypeError:
raise TypeError("Mismatch between array dtype ('%s') and "
"format specifier ('%s')"
% (str(X.dtype), format))
fh.write(v)
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(comments + footer + newline)
finally:
if own_fh:
fh.close()
@set_module('numpy')
def fromregex(file, regexp, dtype, encoding=None):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
Filename or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
encoding : str, optional
Encoding used to decode the inputfile. Does not apply to input streams.
.. versionadded:: 1.14.0
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> _ = f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')],
dtype=[('num', '<i8'), ('key', 'S3')])
>>> output['num']
array([1312, 1534, 444])
"""
own_fh = False
if not hasattr(file, "read"):
file = np.lib._datasource.open(file, 'rt', encoding=encoding)
own_fh = True
try:
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
content = file.read()
if isinstance(content, bytes) and isinstance(regexp, np.compat.unicode):
regexp = asbytes(regexp)
elif isinstance(content, np.compat.unicode) and isinstance(regexp, bytes):
regexp = asstr(regexp)
if not hasattr(regexp, 'match'):
regexp = re.compile(regexp)
seq = regexp.findall(content)
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
@set_module('numpy')
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
names=None, excludelist=None,
deletechars=''.join(sorted(NameValidator.defaultdeletechars)),
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None, encoding='bytes'):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file, str, pathlib.Path, list of str, generator
File, filename, list, or generator to read. If the filename
extension is `.gz` or `.bz2`, the file is first decompressed. Note
that generators must return byte strings. The strings
in a list or produced by a generator are treated as lines.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skiprows : int, optional
`skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was removed in numpy 1.10. Please use `missing_values`
instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first line after
the first `skip_header` lines. This line can optionally be proceeded
by a comment delimiter. If `names` is a sequence or a single-string of
comma-separated names, the names will be used to define the field names
in a structured dtype. If `names` is None, the names of the dtype
fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
max_rows : int, optional
The maximum number of rows to read. Must not be used with skip_footer
at the same time. If given, the value must be at least 1. Default is
to read the entire file.
.. versionadded:: 1.10.0
encoding : str, optional
Encoding used to decode the inputfile. Does not apply when `fname` is
a file object. The special value 'bytes' enables backward compatibility
workarounds that ensure that you receive byte arrays when possible
and passes latin1 encoded strings to converters. Override this value to
receive unicode arrays and pass strings as input to converters. If set
to None the system default is used. The default value is 'bytes'.
.. versionadded:: 1.14.0
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] NumPy User Guide, section `I/O with NumPy
<https://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from io import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO(u"1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, b'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
Using dtype = None
>>> _ = s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, b'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
Specifying dtype and names
>>> _ = s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, b'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
An example with fixed-width columns
>>> s = StringIO(u"11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, b'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', 'S5')])
An example to show comments
>>> f = StringIO('''
... text,# of chars
... hello world,11
... numpy,5''')
>>> np.genfromtxt(f, dtype='S12,S12', delimiter=',')
array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')],
dtype=[('f0', 'S12'), ('f1', 'S12')])
"""
if max_rows is not None:
if skip_footer:
raise ValueError(
"The keywords 'skip_footer' and 'max_rows' can not be "
"specified at the same time.")
if max_rows < 1:
raise ValueError("'max_rows' must be at least 1.")
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
if encoding == 'bytes':
encoding = None
byte_converters = True
else:
byte_converters = False
# Initialize the filehandle, the LineSplitter and the NameValidator
try:
if isinstance(fname, os_PathLike):
fname = os_fspath(fname)
if isinstance(fname, basestring):
fid = np.lib._datasource.open(fname, 'rt', encoding=encoding)
fid_ctx = contextlib.closing(fid)
else:
fid = fname
fid_ctx = contextlib_nullcontext(fid)
fhd = iter(fid)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, list of strings, "
"or generator. Got %s instead." % type(fname))
with fid_ctx:
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip, encoding=encoding)
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Skip the first `skip_header` rows
try:
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
while not first_values:
first_line = _decode_line(next(fhd), encoding)
if (names is True) and (comments is not None):
if comments in first_line:
first_line = (
''.join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = ''
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if comments is not None:
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([str(_.strip()) for _ in first_values])
first_line = ''
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
if isinstance(user_missing_values, bytes):
user_missing_values = user_missing_values.decode('latin1')
# Define the list of missing_values (one column: one list)
missing_values = [list(['']) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, basestring):
user_value = user_missing_values.split(",")
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values
if user_filling_values is None:
user_filling_values = []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (j, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(j):
try:
j = names.index(j)
i = j
except ValueError:
continue
elif usecols:
try:
i = usecols.index(j)
except ValueError:
# Unused converter specified
continue
else:
i = j
# Find the value to test - first_line is not filtered by usecols:
if len(first_line):
testing_value = first_values[j]
else:
testing_value = None
if conv is bytes:
user_conv = asbytes
elif byte_converters:
# converters may use decode to workaround numpy's old behaviour,
# so encode the string again before passing to the user converter
def tobytes_first(x, conv):
if type(x) is bytes:
return conv(x)
return conv(x.encode("latin1"))
user_conv = functools.partial(tobytes_first, conv=conv)
else:
user_conv = conv
converters[i].update(user_conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, user_conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
# Fixme: possible error as following variable never used.
# miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
if usecols:
# Select only the columns we need
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values,
missing_values)]))
if len(rows) == max_rows:
break
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning, stacklevel=2)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
if loose:
rows = list(
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
else:
rows = list(
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v == np.unicode_]
if byte_converters and strcolidx:
# convert strings back to bytes for backward compatibility
warnings.warn(
"Reading unicode strings without specifying the encoding "
"argument is deprecated. Set the encoding, use None for the "
"system default.",
np.VisibleDeprecationWarning, stacklevel=2)
def encode_unicode_cols(row_tup):
row = list(row_tup)
for i in strcolidx:
row[i] = row[i].encode('latin1')
return tuple(row)
try:
data = [encode_unicode_cols(r) for r in data]
except UnicodeEncodeError:
pass
else:
for i in strcolidx:
column_types[i] = np.bytes_
# Update string types to be the right length
sized_column_types = column_types[:]
for i, col_type in enumerate(column_types):
if np.issubdtype(col_type, np.character):
n_chars = max(len(row[i]) for row in data)
sized_column_types[i] = (col_type, n_chars)
if names is None:
# If the dtype is uniform (before sizing strings)
base = {
c_type
for c, c_type in zip(converters, column_types)
if c._checked}
if len(base) == 1:
uniform_type, = base
(ddtype, mdtype) = (uniform_type, bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(sized_column_types)]
if usemask:
mdtype = [(defaultfmt % i, bool)
for (i, dt) in enumerate(sized_column_types)]
else:
ddtype = list(zip(names, sized_column_types))
mdtype = list(zip(names, [bool] * len(sized_column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names is not None:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if np.issubdtype(ttype, np.character):
ttype = (ttype, max(len(row[i]) for row in data))
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names is not None:
mdtype = [(_, bool) for _ in dtype.names]
else:
mdtype = bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names, converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != '']
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
.. deprecated:: 1.17
ndfromtxt` is a deprecated alias of `genfromtxt` which
overwrites the ``usemask`` argument with `False` even when
explicitly called as ``ndfromtxt(..., usemask=True)``.
Use `genfromtxt` instead.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
# Numpy 1.17
warnings.warn(
"np.ndfromtxt is a deprecated alias of np.genfromtxt, "
"prefer the latter.",
DeprecationWarning, stacklevel=2)
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
.. deprecated:: 1.17
np.mafromtxt is a deprecated alias of `genfromtxt` which
overwrites the ``usemask`` argument with `True` even when
explicitly called as ``mafromtxt(..., usemask=False)``.
Use `genfromtxt` instead.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
# Numpy 1.17
warnings.warn(
"np.mafromtxt is a deprecated alias of np.genfromtxt, "
"prefer the latter.",
DeprecationWarning, stacklevel=2)
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
# Set default kwargs for genfromtxt as relevant to csv import.
kwargs.setdefault("case_sensitive", "lower")
kwargs.setdefault("names", True)
kwargs.setdefault("delimiter", ",")
kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| 36.972281
| 95
| 0.571526
|
c8ae14678d57aaa23c0636e9b2440a92851d7583
| 4,428
|
py
|
Python
|
homeassistant/components/accuweather/__init__.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 6
|
2016-11-25T06:36:27.000Z
|
2021-11-16T11:20:23.000Z
|
homeassistant/components/accuweather/__init__.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 56
|
2020-08-03T07:30:54.000Z
|
2022-03-31T06:02:04.000Z
|
homeassistant/components/accuweather/__init__.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 14
|
2018-08-19T16:28:26.000Z
|
2021-09-02T18:26:53.000Z
|
"""The AccuWeather component."""
import asyncio
from datetime import timedelta
import logging
from accuweather import AccuWeather, ApiError, InvalidApiKeyError, RequestsExceededError
from aiohttp.client_exceptions import ClientConnectorError
from async_timeout import timeout
from homeassistant.const import CONF_API_KEY
from homeassistant.core import Config, HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import (
ATTR_FORECAST,
CONF_FORECAST,
COORDINATOR,
DOMAIN,
UNDO_UPDATE_LISTENER,
)
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor", "weather"]
async def async_setup(hass: HomeAssistant, config: Config) -> bool:
"""Set up configured AccuWeather."""
hass.data.setdefault(DOMAIN, {})
return True
async def async_setup_entry(hass, config_entry) -> bool:
"""Set up AccuWeather as config entry."""
api_key = config_entry.data[CONF_API_KEY]
location_key = config_entry.unique_id
forecast = config_entry.options.get(CONF_FORECAST, False)
_LOGGER.debug("Using location_key: %s, get forecast: %s", location_key, forecast)
websession = async_get_clientsession(hass)
coordinator = AccuWeatherDataUpdateCoordinator(
hass, websession, api_key, location_key, forecast
)
await coordinator.async_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
undo_listener = config_entry.add_update_listener(update_listener)
hass.data[DOMAIN][config_entry.entry_id] = {
COORDINATOR: coordinator,
UNDO_UPDATE_LISTENER: undo_listener,
}
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, component)
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(config_entry, component)
for component in PLATFORMS
]
)
)
hass.data[DOMAIN][config_entry.entry_id][UNDO_UPDATE_LISTENER]()
if unload_ok:
hass.data[DOMAIN].pop(config_entry.entry_id)
return unload_ok
async def update_listener(hass, config_entry):
"""Update listener."""
await hass.config_entries.async_reload(config_entry.entry_id)
class AccuWeatherDataUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching AccuWeather data API."""
def __init__(self, hass, session, api_key, location_key, forecast: bool):
"""Initialize."""
self.location_key = location_key
self.forecast = forecast
self.is_metric = hass.config.units.is_metric
self.accuweather = AccuWeather(api_key, session, location_key=self.location_key)
# Enabling the forecast download increases the number of requests per data
# update, we use 32 minutes for current condition only and 64 minutes for
# current condition and forecast as update interval to not exceed allowed number
# of requests. We have 50 requests allowed per day, so we use 45 and leave 5 as
# a reserve for restarting HA.
update_interval = (
timedelta(minutes=64) if self.forecast else timedelta(minutes=32)
)
_LOGGER.debug("Data will be update every %s", update_interval)
super().__init__(hass, _LOGGER, name=DOMAIN, update_interval=update_interval)
async def _async_update_data(self):
"""Update data via library."""
try:
async with timeout(10):
current = await self.accuweather.async_get_current_conditions()
forecast = (
await self.accuweather.async_get_forecast(metric=self.is_metric)
if self.forecast
else {}
)
except (
ApiError,
ClientConnectorError,
InvalidApiKeyError,
RequestsExceededError,
) as error:
raise UpdateFailed(error) from error
_LOGGER.debug("Requests remaining: %s", self.accuweather.requests_remaining)
return {**current, **{ATTR_FORECAST: forecast}}
| 33.293233
| 88
| 0.696251
|
5d9e140e704cc64204329e6714b499c462049667
| 2,314
|
py
|
Python
|
src/pdc2/scripts/mafft_wrapper.py
|
jlanga/smsk_selection
|
08070c6d4a6fbd9320265e1e698c95ba80f81123
|
[
"MIT"
] | 4
|
2021-07-18T05:20:20.000Z
|
2022-01-03T10:22:33.000Z
|
src/pdc2/scripts/mafft_wrapper.py
|
jlanga/smsk_selection
|
08070c6d4a6fbd9320265e1e698c95ba80f81123
|
[
"MIT"
] | 1
|
2017-08-21T07:26:13.000Z
|
2018-11-08T13:59:48.000Z
|
src/pdc2/scripts/mafft_wrapper.py
|
jlanga/smsk_orthofinder
|
08070c6d4a6fbd9320265e1e698c95ba80f81123
|
[
"MIT"
] | 2
|
2021-07-18T05:20:26.000Z
|
2022-03-31T18:23:31.000Z
|
"""
Takes a directory of fasta files
If there are >= 1000 sequences in the direction, use --auto
For fasta files with less than 1000 sequences, use the slower but much
more accurate algorithm
Uncomment the com += "--anysymbol " line if there are "U" or any other unusual
charactors in the sequences
"""
import os,sys
import subprocess
from seq import read_fasta_file
def mafft(DIR,fasta,thread,seqtype):
if DIR[-1] != "/": DIR += "/"
alignment = fasta+".mafft.aln"
if os.path.exists(DIR+alignment) and os.stat(DIR+alignment).st_size>0:
return alignment
assert seqtype == "aa" or seqtype == "dna","Input data type: dna or aa"
seqlist = read_fasta_file(DIR+fasta)
seqcount = len(seqlist)
maxlen = 0
for s in seqlist:
maxlen = max(maxlen,len(s.seq))
assert seqcount >= 2, "less than 4 sequences in "+DIR+fasta
if seqtype == "dna":
infasta = DIR+fasta
seq = "--nuc"
else:
infasta = DIR+fasta+".temp"
seq = "--amino"
with open(infasta,"w") as outfile:
for s in seqlist:
#remove U which is usually not in aa alphabet
s.seq = s.seq.replace("U","X")
s.seq = s.seq.replace("u","x")
#remove stop codon and seq after it
if "*" in s.seq:
s.seq = s.seq[:s.seq.find("*")]
outfile.write(s.get_fasta())
if seqcount >= 1000 or maxlen >= 10000:
alg = ["--auto"] #so that the run actually finishes!
else: alg = ["--genafpair","--maxiterate","1000"]
cmd = ["mafft"]+alg+[seq,"--thread",str(thread)]
#com += ["--anysymbol"] # when there are "U"s in aa sequences
cmd += [infasta]
print " ".join(cmd)
out = open(DIR+alignment, 'w')
p = subprocess.Popen(cmd,stderr=subprocess.PIPE,stdout=out)
out.close()
p.communicate()
assert p.returncode == 0,"Error mafft"
if seqtype == "aa": os.remove(DIR+fasta+".temp")
return alignment
def main(DIR,infile_ending,thread,seqtype):
if DIR[-1] != "/": DIR += "/"
filecount = 0
for i in os.listdir(DIR):
if i.endswith(infile_ending):
filecount += 1
mafft(DIR=DIR,fasta=i,thread=thread,seqtype=seqtype)
assert filecount > 0, "No file end with "+file_end+"found in "+DIR
if __name__ == "__main__":
if len(sys.argv) != 5:
print "usage: python mafft_wrapper.py DIR infile_ending thread dna/aa"
sys.exit()
DIR,infile_ending,thread,seqtype = sys.argv[1:]
main(DIR,infile_ending,thread,seqtype)
| 29.291139
| 78
| 0.668539
|
d8eff340c7e48deb7dc1cb09031160e715720f21
| 1,219
|
py
|
Python
|
manga_py/providers/zeroscans_com.py
|
Abijithkrishna/manga-py
|
03b142ecb944ef37a36e5095ffa580209021e3b0
|
[
"MIT"
] | 337
|
2019-08-27T16:14:50.000Z
|
2022-03-29T09:58:22.000Z
|
manga_py/providers/zeroscans_com.py
|
Abijithkrishna/manga-py
|
03b142ecb944ef37a36e5095ffa580209021e3b0
|
[
"MIT"
] | 225
|
2019-08-25T15:02:01.000Z
|
2022-03-31T06:36:09.000Z
|
manga_py/providers/zeroscans_com.py
|
Abijithkrishna/manga-py
|
03b142ecb944ef37a36e5095ffa580209021e3b0
|
[
"MIT"
] | 41
|
2019-10-04T13:28:02.000Z
|
2022-03-19T08:18:34.000Z
|
from manga_py.provider import Provider
from .helpers.std import Std
class ZeroScansCom(Provider, Std):
_key = '/comics/'
def get_chapter_index(self) -> str:
return self.re.search(
r'%s[^/]+/(\d+/\d+)' % self._key,
self.chapter
).group(1).replace('/', '-')
def get_content(self):
name = self._get_name(r'%s([^/]+)' % self._key)
return self.http_get('%s%s%s/' % (
self.domain,
self._key,
name
))
def get_manga_name(self) -> str:
return self._get_name(r'%s\d+-([^/]+)' % self._key)
def get_chapters(self):
return self._elements('.list .list-item a.text-color')
def get_files(self):
content = self.http_get(self.chapter)
raw_images = self.re.search(
r'chapterPages\s?=\s?(\[.+?\])',
content
).group(1)
images = self.json.loads(raw_images)
n = self.http().normalize_uri
return [n(i) for i in images]
def get_cover(self) -> str:
image = self._elements('.media img.media-content')
if len(image):
return self.parse_background(image)
return ''
main = ZeroScansCom
| 25.395833
| 62
| 0.545529
|
47ae4beca724e65a952dd74bc388fc9df9c0aeda
| 7,269
|
py
|
Python
|
docs/conf.py
|
crrobinson14/django-jsonit
|
3c4b10c844b4e45759bac28a5a6dd201f60c6c1d
|
[
"BSD-3-Clause"
] | 3
|
2015-01-22T19:02:55.000Z
|
2015-11-08T16:10:08.000Z
|
docs/conf.py
|
crrobinson14/django-jsonit
|
3c4b10c844b4e45759bac28a5a6dd201f60c6c1d
|
[
"BSD-3-Clause"
] | 1
|
2015-06-06T11:49:32.000Z
|
2015-06-06T11:49:32.000Z
|
docs/conf.py
|
crrobinson14/django-jsonit
|
3c4b10c844b4e45759bac28a5a6dd201f60c6c1d
|
[
"BSD-3-Clause"
] | 1
|
2020-05-12T16:41:54.000Z
|
2020-05-12T16:41:54.000Z
|
# -*- coding: utf-8 -*-
#
# django-jsonit documentation build configuration file, created by
# sphinx-quickstart on Thu Sep 29 15:20:35 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
from django.conf import global_settings
from django.core.management import setup_environ
setup_environ(global_settings, original_settings_path='django.conf.global_settings')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
from jsonit import get_version
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-jsonit'
copyright = u'2011, Lincoln Loop'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = get_version(major=True)
# The full version, including alpha/beta/rc tags.
release = get_version()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-jsonitdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-jsonit.tex', u'django-jsonit Documentation',
u'Lincoln Loop', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-jsonit', u'django-jsonit Documentation',
[u'Lincoln Loop'], 1)
]
| 32.450893
| 84
| 0.724859
|
a76a8379223b78d04e7f6fca2c0afc196019ccbc
| 18,205
|
py
|
Python
|
Code/bst.py
|
colo6299/CS-1.3-Core-Data-Structures
|
2d2b5406029477d0dcfae07fb06cc4be4ff227cd
|
[
"MIT"
] | null | null | null |
Code/bst.py
|
colo6299/CS-1.3-Core-Data-Structures
|
2d2b5406029477d0dcfae07fb06cc4be4ff227cd
|
[
"MIT"
] | 5
|
2020-02-15T17:24:36.000Z
|
2020-03-09T22:12:56.000Z
|
Code/bst.py
|
colo6299/CS-1.3-Core-Data-Structures
|
2d2b5406029477d0dcfae07fb06cc4be4ff227cd
|
[
"MIT"
] | null | null | null |
from q import ArrayQ
from ihop import IHOP_Array as stack
class BinaryNode:
# NOTE: all of the big O stuff is on the tree class :)
def __init__(self, data, left=None, right=None):
self.data = data
self.left = left
self.right = right
def __repr__(self):
"""Return a string representation of this binary tree node."""
return 'BinaryTreeNode({!r})'.format(self.data)
def search(self, term):
if self.data == term:
return self
else:
if term < self.data:
if self.left is not None:
l = self.left.search(term)
if l is not None:
return l
else:
if self.right is not None:
r = self.right.search(term)
if r is not None:
return r
def is_leaf(self):
if (self.left is None and self.right is None):
return True
return False
def is_branch(self):
return not self.is_leaf()
def height(self):
left = 0
right = 0
if self.is_leaf():
return 0
if self.left is not None:
left = self.left.height()
if self.right is not None:
right = self.right.height()
if left > right:
return left + 1
return right + 1
def insert(self, data):
if data < self.data:
if self.left is not None:
self.left.insert(data)
else:
self.left = BinaryNode(data)
else:
if self.right is not None:
self.right.insert(data)
else:
self.right = BinaryNode(data)
def set_insert(self, data):
if data < self.data:
if self.left is not None:
self.left.insert(data)
else:
self.left = BinaryNode(data)
elif data == self.data:
self.data = data
return True
else:
if self.right is not None:
self.right.insert(data)
else:
self.right = BinaryNode(data)
def items_pre_order(self, tree):
tree.last_ordering.append(self.data)
if self.left is not None:
self.left.items_pre_order(tree)
if self.right is not None:
self.right.items_pre_order(tree)
def items_in_order(self, tree):
if self.left is not None:
self.left.items_in_order(tree)
tree.last_ordering.append(self.data)
if self.right is not None:
self.right.items_in_order(tree)
def items_post_order(self, tree):
if self.left is not None:
self.left.items_post_order(tree)
if self.right is not None:
self.right.items_post_order(tree)
tree.last_ordering.append(self.data)
def find_parent_node_recursive(self, term, parent=None):
"""Return the parent node of the node containing the given item
(or the parent node of where the given item would be if inserted)
in this tree, or None if this tree is empty or has only a root node.
Search is performed recursively starting from the given node
(give the root node to start recursion)."""
if self.data == term:
return parent
else:
if self.left is not None:
l = self.left.search(term, self)
if l is not None:
return l
if self.right is not None:
r = self.right.search(term, self)
if r is not None:
return r
def find_parent_node_recursive_tuple(self, term, parent=None):
"""Return the parent node of the node containing the given item
(or the parent node of where the given item would be if inserted)
in this tree, or None if this tree is empty or has only a root node.
Search is performed recursively starting from the given node
(give the root node to start recursion)."""
if self.data == term:
return (parent, self)
else:
if self.left is not None:
l = self.left.find_parent_node_recursive_tuple(term, self)
if l is not None:
return l
if self.right is not None:
r = self.right.find_parent_node_recursive_tuple(term, self)
if r is not None:
return r
def predecessor(self, parent=None):
if self.right is not None:
return self.right.predecessor(self)
return parent, self
class BinaryTree:
def __init__(self, items=None):
"""Initialize this binary search tree and insert the given items."""
self.root = None
self.size = 0
self.last_ordering = []
if items is not None:
for item in items:
self.insert(item)
def __repr__(self):
"""Return a string representation of this binary search tree."""
return 'BinarySearchTree({} nodes)'.format(self.size)
def is_empty(self):
""" O(1)
Return True if this binary search tree is empty (has no nodes)."""
return self.root is None
def height(self):
"""
O(1)
"""
return self.root.height()
def contains(self, term):
"""
O(log n) time
"""
if self.search(term) is not None:
return True
return False
def _find_parent_node_recursive(self, term):
"""O(log n)
Return the parent node of the node containing the given item
(or the parent node of where the given item would be if inserted)
in this tree, or None if this tree is empty or has only a root node.
Search is performed recursively starting from the given node
(give the root node to start recursion)."""
if self.is_empty() is not True:
return self.root.find_parent_node_recursive(term)
def search(self, term):
"""
O(log n) time
"""
node = self.root.search(term)
if node is not None:
return node.data
def insert(self, data):
"""
O(log n) time
"""
self.size += 1
if self.root is not None:
self.root.insert(data)
else:
self.root = BinaryNode(data)
def set_insert(self, data):
"""
O(log n) time
"""
if self.root is not None:
if self.root.set_insert(data):
self.size += 1
else:
self.root = BinaryNode(data)
self.size += 1
def delete(self, item):
parent_child_nodes = self.root.find_parent_node_recursive_tuple(item)
parent_node = parent_child_nodes[0]
delete_node = parent_child_nodes[1]
left_bool = False
if parent_node is not None:
if parent_node.left == delete_node:
left_bool = True
if delete_node.left is not None and delete_node.right is not None:
pred = delete_node.left.predecessor()
retnode = pred[1]
if pred[0] is not None:
pred[0].right = pred[1].left
pred[1].right = delete_node.right
pred[1].left = delete_node.left
else:
pred[1].right = delete_node.right
elif delete_node.left is not None:
retnode = delete_node.left
elif delete_node.right is not None:
retnode = delete_node.right
else:
retnode = None
if parent_node is None:
self.root = retnode
elif left_bool:
parent_node.left = retnode
else:
parent_node.right = retnode
def _find_node_recursive(self, item, node):
"""
O(log n) time
"""
return node.search(item)
def items_level_order(self):
"""O(n) time and space
Return a level-order list of all items in this binary search tree."""
items = []
if not self.is_empty():
# Traverse tree level-order from root, appending each node's item
self._traverse_level_order_iterative(self.root, items.append)
# Return level-order list of all items in tree
return items
def _traverse_level_order_iterative(self, start_node, visit):
""" O(n) time and space
Traverse this binary tree with iterative level-order traversal (BFS).
Start at the given node and visit each node with the given function.
TODO: Running time: ??? Why and under what conditions?
TODO: Memory usage: ??? Why and under what conditions?"""
queue = ArrayQ()
queue.enqueue(start_node)
while queue.is_empty() is not True:
node = queue.dequeue()
visit(node.data)
if node.left is not None:
queue.enqueue(node.left)
if node.right is not None:
queue.enqueue(node.right)
def items_pre_order(self):
"""
O(n) time, O(log n) space
Return a pre-order list of all items in this binary search tree."""
self.last_ordering = []
if True:
self.root.items_pre_order(self)
return self.last_ordering
def items_in_order(self):
"""
O(n) time, O(log n) space
Return a pre-order list of all items in this binary search tree."""
self.last_ordering = []
if True:
self.root.items_in_order(self)
return self.items_in_order_iterative()
def items_post_order(self):
"""
O(n) time, O(log n) space
Return a pre-order list of all items in this binary search tree."""
self.last_ordering = []
if True:
self.root.items_post_order(self)
return self.last_ordering
def items_in_order_iterative(self):
"""
O(n) time, O(log n) space
"""
if self.is_empty():
return []
appender_stack = stack([self.root])
visitor_stack = stack([self.root])
items = []
while appender_stack.is_empty() is False:
if visitor_stack.is_empty() is False:
node = visitor_stack.pop()
if node.left is not None:
appender_stack.push(node.left)
visitor_stack.push(node.left)
else:
node = appender_stack.pop()
items.append(node.data)
if node.right is not None:
appender_stack.push(node.right)
visitor_stack.push(node.right)
return items
def items_pre_order_iterative(self):
"""
O(n) time, O(log n) space
"""
if self.is_empty():
return []
appender_stack = stack([self.root])
visitor_stack = stack([self.root])
items = []
while appender_stack.is_empty() is False:
if visitor_stack.is_empty() is False:
node = visitor_stack.pop()
if node.left is not None:
items.append(node.data)
appender_stack.push(node.left)
visitor_stack.push(node.left)
else:
node = appender_stack.pop()
if node.right is not None:
items.append(node.data)
appender_stack.push(node.right)
visitor_stack.push(node.right)
return items
def items_post_order_iterative(self):
"""
O(n) time, O(log n) space
"""
if self.is_empty():
return []
appender_stack = stack([self.root])
visitor_stack = stack([self.root])
items = []
while appender_stack.is_empty() is False:
if visitor_stack.is_empty() is False:
node = visitor_stack.pop()
if node.left is not None:
items.append(node.data)
appender_stack.push(node.left)
visitor_stack.push(node.left)
else:
node = appender_stack.pop()
if node.right is not None:
appender_stack.push(node.right)
visitor_stack.push(node.right)
return items
from hash_browns import Hashbrowns as Hashtable
from recomper import Combinator
from recomper import Permutator
import cProfile
import sys
import json
def dejumbler(jumble, meltionary):
melty_word = word_melter(jumble)
valid_list = []
if melty_word not in meltionary:
return valid_list
for valid_word in meltionary[melty_word]:
if is_letter_list_permutation(valid_word, jumble):
valid_list.append(valid_word)
return valid_list
def multi_dejumble(jumble_letters, word_lengths, meltionary):
retlist = []
for num in word_lengths:
retlist.append([])
_multi_word_dejumbler(list(jumble_letters), word_lengths, [], retlist, meltionary)
return retlist
# out_list should be a list of empty list of length len(word_lengths) i.e. [2, 6] -> [ [], [] ] ->
# -> the result is a list of solutions of length [1, 2]
def _multi_word_dejumbler(jumble_letters, word_lengths, cur_words, out_list, d, _ndx=-1):
c = Combinator()
_ndx += 1
if _ndx == len(word_lengths):
return
for combo, compliment in c.combo_compliments(jumble_letters, word_lengths[_ndx]):
valid_words = dejumbler(combo, d)
if len(valid_words) == 0:
if len(cur_words) != 0:
let_list = list(cur_words)
longth = len(cur_words)
str_comp = ''.join(jumble_letters)
let_list.append(str_comp)
out_list[len(word_lengths) - longth].append(let_list)
for valid_word in valid_words:
new_words = list(cur_words)
new_words.append(valid_word)
_multi_word_dejumbler(compliment, word_lengths, new_words, out_list, d, _ndx)
def dictionary_melter():
f = open('big_dict.txt')
table = Hashtable()
index = -1
while True:
index += 1
word = f.readline()
word = word.strip().lower()
if word is '':
f.close()
return table
melty = word_melter(word)
if melty in table:
table.get(melty).append(word)
else:
table.set(melty, [word])
def dictionary_melter_std():
"""
I know using the pythomn dict is kind of cheating, but I've
had enough of serialization ever since I got into Unity
"""
f = open('big_dict.txt')
table = {}
index = -1
while True:
index += 1
word = f.readline()
word = word.strip().lower()
if word is '':
f.close()
return table
melty = word_melter(word)
if melty in table:
table[melty].append(word)
else:
table[melty] = [word]
def word_melter(word):
letter_array = [None] * 27 # 27 is dash (-)
for letter in word:
if letter == '-':
letter_array[26] = True
else:
letter_array[ord(letter) - 97] = True
retstring = ''
for index, letter in enumerate(letter_array):
if letter is True:
retstring += chr(index + 97)
return retstring
def is_letter_list_permutation(llist_1, llist_2):
if len(llist_1) != len(llist_2):
return False
fraction_l1 = [0] * 27
fraction_l2 = [0] * 27
for letter in llist_1:
fraction_l1[ord(letter) - 97] += 1
for letter in llist_2:
fraction_l2[ord(letter) - 97] += 1
if fraction_l1 == fraction_l2:
return True
else:
return False
def meltionary_writer(meltionary):
dd = json.dumps(meltionary)
f = open("meltionary.json","w")
f.write(dd)
f.close()
def meltionary_reader():
f = open("meltionary.json","r")
ds = json.load(f)
f.close()
return ds
def jumble_runner(meltionary):
primary_jumbles = [
['tefon', '--o-o'],
['sokik', 'oo-o-'],
['niumem', '----o-'],
['siconu', '---oo-']
]
answers = []
for jumble in primary_jumbles:
answers.append(dejumbler(jumble[0], meltionary))
secondary_word_lengths = [6, 2]
secondary_letters = ''
for ndx, jumble in enumerate(primary_jumbles):
for index, letter in enumerate(jumble[1]):
if letter == 'o':
secondary_letters += answers[ndx][0][index]
secondary_answers_list = multi_dejumble(secondary_letters, secondary_word_lengths, meltionary)
print()
print(answers)
print()
print(secondary_answers_list[1])
print()
return (answers, secondary_answers_list)
def trash():
"""
Just waiting for the garbageman
"""
"""
c = Combinator()
combos = c.combinations(list(secondary_letters), seondary_jumble[0])
possible_secondary_answers = []
for combo in combos:
compliment = c.compliment(combo, list(secondary_letters))
combo = word_melter(''.join(combo))
compliment = word_melter(''.join(compliment))
answer_1 = dejumbler(combo, meltionary)
answer_2 = dejumbler(compliment, meltionary)
if len(answer_1) is not 0 and len(answer_2) is not 0:
possible_secondary_answers.append((answer_1, answer_2))
probable_secondary_answers = []
for possible_answer in possible_secondary_answers:
probable_secondary_answers.append(possible_answer)
"""
if __name__ == "__main__":
#cProfile.run('dictionary_melter()')
d = dictionary_melter_std()
meltionary_writer(d)
d = meltionary_reader()
#meltionary_writer(d)
#cProfile.run('jumble_runner(d)')
jumble_runner(d)
#print(dejumbler('short', d)) # sardine
| 31.013629
| 106
| 0.561384
|
91df38f9659868f86dde86a8d85541eb873602ed
| 5,917
|
py
|
Python
|
mainGame.py
|
ProNilabh/PythonShootGame.
|
1bb9552d9165d842fc1fbffc3634ae1998159f2a
|
[
"DOC"
] | 318
|
2015-01-12T02:56:45.000Z
|
2022-03-22T20:30:12.000Z
|
mainGame.py
|
ProNilabh/PythonShootGame.
|
1bb9552d9165d842fc1fbffc3634ae1998159f2a
|
[
"DOC"
] | 4
|
2016-02-16T07:30:54.000Z
|
2020-02-15T11:20:10.000Z
|
mainGame.py
|
ProNilabh/PythonShootGame.
|
1bb9552d9165d842fc1fbffc3634ae1998159f2a
|
[
"DOC"
] | 303
|
2015-01-21T09:30:57.000Z
|
2022-03-26T21:04:43.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 11 11:05:00 2013
@author: Leo
"""
import pygame
from sys import exit
from pygame.locals import *
from gameRole import *
import random
# 初始化游戏
pygame.init()
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
pygame.display.set_caption('飞机大战')
# 载入游戏音乐
bullet_sound = pygame.mixer.Sound('resources/sound/bullet.wav')
enemy1_down_sound = pygame.mixer.Sound('resources/sound/enemy1_down.wav')
game_over_sound = pygame.mixer.Sound('resources/sound/game_over.wav')
bullet_sound.set_volume(0.3)
enemy1_down_sound.set_volume(0.3)
game_over_sound.set_volume(0.3)
pygame.mixer.music.load('resources/sound/game_music.wav')
pygame.mixer.music.play(-1, 0.0)
pygame.mixer.music.set_volume(0.25)
# 载入背景图
background = pygame.image.load('resources/image/background.png').convert()
game_over = pygame.image.load('resources/image/gameover.png')
filename = 'resources/image/shoot.png'
plane_img = pygame.image.load(filename)
# 设置玩家相关参数
player_rect = []
player_rect.append(pygame.Rect(0, 99, 102, 126)) # 玩家精灵图片区域
player_rect.append(pygame.Rect(165, 360, 102, 126))
player_rect.append(pygame.Rect(165, 234, 102, 126)) # 玩家爆炸精灵图片区域
player_rect.append(pygame.Rect(330, 624, 102, 126))
player_rect.append(pygame.Rect(330, 498, 102, 126))
player_rect.append(pygame.Rect(432, 624, 102, 126))
player_pos = [200, 600]
player = Player(plane_img, player_rect, player_pos)
# 定义子弹对象使用的surface相关参数
bullet_rect = pygame.Rect(1004, 987, 9, 21)
bullet_img = plane_img.subsurface(bullet_rect)
# 定义敌机对象使用的surface相关参数
enemy1_rect = pygame.Rect(534, 612, 57, 43)
enemy1_img = plane_img.subsurface(enemy1_rect)
enemy1_down_imgs = []
enemy1_down_imgs.append(plane_img.subsurface(pygame.Rect(267, 347, 57, 43)))
enemy1_down_imgs.append(plane_img.subsurface(pygame.Rect(873, 697, 57, 43)))
enemy1_down_imgs.append(plane_img.subsurface(pygame.Rect(267, 296, 57, 43)))
enemy1_down_imgs.append(plane_img.subsurface(pygame.Rect(930, 697, 57, 43)))
enemies1 = pygame.sprite.Group()
# 存储被击毁的飞机,用来渲染击毁精灵动画
enemies_down = pygame.sprite.Group()
shoot_frequency = 0
enemy_frequency = 0
player_down_index = 16
score = 0
clock = pygame.time.Clock()
running = True
while running:
# 控制游戏最大帧率为60
clock.tick(45)
# 控制发射子弹频率,并发射子弹
if not player.is_hit:
if shoot_frequency % 15 == 0:
bullet_sound.play()
player.shoot(bullet_img)
shoot_frequency += 1
if shoot_frequency >= 15:
shoot_frequency = 0
# 生成敌机
if enemy_frequency % 50 == 0:
enemy1_pos = [random.randint(0, SCREEN_WIDTH - enemy1_rect.width), 0]
enemy1 = Enemy(enemy1_img, enemy1_down_imgs, enemy1_pos)
enemies1.add(enemy1)
enemy_frequency += 1
if enemy_frequency >= 100:
enemy_frequency = 0
# 移动子弹,若超出窗口范围则删除
for bullet in player.bullets:
bullet.move()
if bullet.rect.bottom < 0:
player.bullets.remove(bullet)
# 移动敌机,若超出窗口范围则删除
for enemy in enemies1:
enemy.move()
# 判断玩家是否被击中
if pygame.sprite.collide_circle(enemy, player):
enemies_down.add(enemy)
enemies1.remove(enemy)
player.is_hit = True
game_over_sound.play()
break
if enemy.rect.top > SCREEN_HEIGHT:
enemies1.remove(enemy)
# 将被击中的敌机对象添加到击毁敌机Group中,用来渲染击毁动画
enemies1_down = pygame.sprite.groupcollide(enemies1, player.bullets, 1, 1)
for enemy_down in enemies1_down:
enemies_down.add(enemy_down)
# 绘制背景
screen.fill(0)
screen.blit(background, (0, 0))
# 绘制玩家飞机
if not player.is_hit:
screen.blit(player.image[player.img_index], player.rect)
# 更换图片索引使飞机有动画效果
player.img_index = shoot_frequency // 8
else:
player.img_index = player_down_index // 8
screen.blit(player.image[player.img_index], player.rect)
player_down_index += 1
if player_down_index > 47:
running = False
# 绘制击毁动画
for enemy_down in enemies_down:
if enemy_down.down_index == 0:
enemy1_down_sound.play()
if enemy_down.down_index > 7:
enemies_down.remove(enemy_down)
score += 1000
continue
screen.blit(enemy_down.down_imgs[enemy_down.down_index // 2], enemy_down.rect)
enemy_down.down_index += 1
# 绘制子弹和敌机
player.bullets.draw(screen)
enemies1.draw(screen)
# 绘制得分
score_font = pygame.font.Font(None, 36)
score_text = score_font.render(str(score), True, (128, 128, 128))
text_rect = score_text.get_rect()
text_rect.topleft = [10, 10]
screen.blit(score_text, text_rect)
# 更新屏幕
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit()
# 监听键盘事件
key_pressed = pygame.key.get_pressed()
# 若玩家被击中,则无效
if not player.is_hit:
if key_pressed[K_w] or key_pressed[K_UP]:
player.moveUp()
if key_pressed[K_s] or key_pressed[K_DOWN]:
player.moveDown()
if key_pressed[K_a] or key_pressed[K_LEFT]:
player.moveLeft()
if key_pressed[K_d] or key_pressed[K_RIGHT]:
player.moveRight()
font = pygame.font.Font(None, 48)
text = font.render('Score: '+ str(score), True, (255, 0, 0))
text_rect = text.get_rect()
text_rect.centerx = screen.get_rect().centerx
text_rect.centery = screen.get_rect().centery + 24
screen.blit(game_over, (0, 0))
screen.blit(text, text_rect)
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit()
pygame.display.update()
| 29.883838
| 87
| 0.648471
|
f886732c96a6d91ddbe61c7da1f1b3464530a11c
| 1,098
|
py
|
Python
|
eyantra_provider/provider/urls.py
|
Andreaf2395/OpenID-Provider
|
cdedd42cc49e6f03e3b2570c03fb1f4a2c83be34
|
[
"MIT"
] | null | null | null |
eyantra_provider/provider/urls.py
|
Andreaf2395/OpenID-Provider
|
cdedd42cc49e6f03e3b2570c03fb1f4a2c83be34
|
[
"MIT"
] | 4
|
2021-03-19T08:17:59.000Z
|
2021-06-10T19:34:36.000Z
|
eyantra_provider/provider/urls.py
|
Andreaf2395/OpenID-Provider
|
cdedd42cc49e6f03e3b2570c03fb1f4a2c83be34
|
[
"MIT"
] | null | null | null |
"""oidc_provider URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('',views.home,name='home'),
path('create_client/',views.create_client,name='create_client'),
path('oauth/authorize/',views.authorize,name='oauth_authorize'),
path('oauth/token/',views.issue_token,name='issue_token'),
path('oauth/revoke/',views.revoke_token,name='revoke_token'),
path('oauth/login/',views.login,name='oauth_login'),
]
| 40.666667
| 77
| 0.715847
|
38e836339819dab9ee46a414709ad2f93484da55
| 2,856
|
py
|
Python
|
art/art_T1049-3.py
|
blackbotinc/artic2-atomics
|
83f6ae1269c23f29b361a974455011df80c61945
|
[
"Apache-2.0"
] | null | null | null |
art/art_T1049-3.py
|
blackbotinc/artic2-atomics
|
83f6ae1269c23f29b361a974455011df80c61945
|
[
"Apache-2.0"
] | null | null | null |
art/art_T1049-3.py
|
blackbotinc/artic2-atomics
|
83f6ae1269c23f29b361a974455011df80c61945
|
[
"Apache-2.0"
] | 1
|
2022-03-31T11:07:56.000Z
|
2022-03-31T11:07:56.000Z
|
from blackbot.core.utils import get_path_in_package
from blackbot.core.wss.atomic import Atomic
from terminaltables import SingleTable
import os
import json
class Atomic(Atomic):
def __init__(self):
self.name = 'Discovery/T1049-3'
self.controller_type = ''
self.external_id = 'T1049'
self.blackbot_id = 'T1049-3'
self.version = ''
self.language = 'boo'
self.description = self.get_description()
self.last_updated_by = 'Blackbot, Inc. All Rights reserved'
self.references = ["System.Management.Automation"]
self.options = {
'OutString': {
'Description' : 'Appends Out-String to the PowerShellCode',
'Required' : False,
'Value' : True,
},
'BypassLogging': {
'Description' : 'Bypasses ScriptBlock and Techniques logging',
'Required' : False,
'Value' : True,
},
'BypassAmsi': {
'Description' : 'Bypasses AMSI',
'Required' : False,
'Value' : True,
}
}
def payload(self):
with open(get_path_in_package('core/wss/ttp/art/src/powershell.boo'), 'r') as ttp_src:
src = ttp_src.read()
pwsh_script = get_path_in_package('core/wss/ttp/art/pwsh_ttp/discovery/T1049-3')
with open(pwsh_script) as pwsh:
src = src.replace("POWERSHELL_SCRIPT", pwsh.read())
src = src.replace("OUT_STRING", str(self.options["OutString"]["Value"]).lower())
src = src.replace("BYPASS_LOGGING", str(self.options["BypassLogging"]["Value"]).lower())
src = src.replace("BYPASS_AMSI", str(self.options["BypassAmsi"]["Value"]).lower())
return src
def get_description(self):
path = get_path_in_package('core/wss/ttp/art/pwsh_ttp/discovery/T1049-3')
with open(path) as text:
head = [next(text) for l in range(4)]
technique_name = head[0].replace('#TechniqueName: ', '').strip('\n')
atomic_name = head[1].replace('#AtomicTestName: ', '').strip('\n')
description = head[2].replace('#Description: ', '').strip('\n')
language = head[3].replace('#Language: ', '').strip('\n')
aux = ''
count = 1
for char in description:
if char == '&':
continue
aux += char
if count % 126 == 0:
aux += '\n'
count += 1
out = '{}: {}\n{}\n\n{}\n'.format(technique_name, language, atomic_name, aux)
return out
| 37.090909
| 104
| 0.507703
|
96a35873558e7e20ad49f9b61a0e2daf9d268f55
| 26,427
|
py
|
Python
|
test/test_comment_sidecar.py
|
avrona/comment-sidecar
|
9a148ddb9ffe89d55ca4a481852b87bbbae0d305
|
[
"MIT"
] | 39
|
2017-07-10T14:03:35.000Z
|
2021-12-26T02:52:05.000Z
|
test/test_comment_sidecar.py
|
avrona/comment-sidecar
|
9a148ddb9ffe89d55ca4a481852b87bbbae0d305
|
[
"MIT"
] | 3
|
2020-06-09T15:42:51.000Z
|
2021-02-24T20:37:26.000Z
|
test/test_comment_sidecar.py
|
avrona/comment-sidecar
|
9a148ddb9ffe89d55ca4a481852b87bbbae0d305
|
[
"MIT"
] | 8
|
2018-01-07T11:57:54.000Z
|
2021-08-08T07:53:07.000Z
|
#!/usr/bin/env python3
import re
import pytest
import requests
from mysql.connector import connect
from requests.models import Response
import unittest
import hashlib
import time
from path import Path
from assertpy import assert_that, fail
UNSUBSCRIBE_SUCCESS_MSG = "unsubscribed successfully"
UNSUBSCRIBE_ERROR_MSG = "Nothing has been updated. Either the comment doesn't exist or the unsubscribe token is invalid."
ADMIN_EMAIL = 'test@localhost.de'
DEFAULT_PATH = "/blogpost1/"
DEFAULT_SITE = "https://petersworld.com"
ERROR_MESSAGE_MISSING_SITE_PATH = "Please submit both query parameters 'site' and 'path'"
INVALID_QUERY_PARAMS_UNSUBSCRIBE = "Please submit both query parameters 'commentId' and 'unsubscribeToken'"
COMMENT_SIDECAR_URL = 'http://localhost/comment-sidecar.php'
UNSUBSCRIBE_URL = 'http://localhost/unsubscribe.php'
MAILHOG_BASE_URL = 'http://localhost:8025/api/'
MAILHOG_MESSAGES_URL = MAILHOG_BASE_URL + 'v2/messages'
MYSQLDB_CONNECTION = {'host': '127.0.0.1', 'port': 3306, 'user': 'root', 'passwd': 'root', 'db': 'comment-sidecar'}
@pytest.fixture(scope="module", autouse=True)
def db():
# first, run `docker-compose up`
db = connect(**MYSQLDB_CONNECTION)
cur = db.cursor()
with get_file_path('sql/init.sql').open('r') as sql:
query = "".join(sql.readlines())
cur.execute(query)
return db
@pytest.fixture(scope="function", autouse=True)
def before_each():
db = connect(**MYSQLDB_CONNECTION)
cur = db.cursor()
cur.execute("TRUNCATE TABLE comments;")
cur.execute("TRUNCATE TABLE ip_addresses;")
set_rate_limit_threshold(seconds=0)
@pytest.mark.parametrize("queryParams", {'', 'site=&path=', 'site=domain.com', 'path=blogpost1'})
def test_GET_invalid_query_params(queryParams):
response = requests.get(f'{COMMENT_SIDECAR_URL}?{queryParams}')
assert_that(response.status_code).is_equal_to(400)
assert_that(response.json()["message"]).is_equal_to(ERROR_MESSAGE_MISSING_SITE_PATH)
def test_GET_empty_array_if_no_comments():
response = get_comments()
assert_that(response.text).is_equal_to('[]')
def test_POST_and_GET_comment():
post_payload = create_post_payload()
timestamp_before = int(time.time())
response = post_comment(post_payload)
timestamp_after = int(time.time())
assert_that(response.json()['id']).is_equal_to(1)
get_response = get_comments()
comments_json = get_response.json()
assert_that(comments_json).is_length(1)
returned_comment = comments_json[0]
assert_that(returned_comment)\
.contains_entry({'id': '1'})\
.contains_entry({'author': post_payload["author"]})\
.contains_entry({'content': post_payload["content"]})\
.does_not_contain_key('replies')
assert_timestamp_between(returned_comment["creationTimestamp"], start=timestamp_before, end=timestamp_after)
assert_absent_fields(returned_comment)
def test_POST_comments_and_replies_and_GET_reply_chain():
# for adhoc debugging: `http "localhost/comment-sidecar.php?site=peterworld%2Ecom&path=%2Fblogpost1%2F&XDEBUG_SESSION_START=IDEA_DEBUG"`
# root1
# - reply 1 to root
# - reply 2 to root
# - reply to reply 2
post_payload = create_post_payload()
post_payload['content'] = 'root'
response = post_comment(post_payload)
root_id = response.json()['id']
post_payload = create_post_payload()
post_payload['replyTo'] = root_id
post_payload['content'] = 'reply 1 to root'
post_comment(post_payload)
post_payload = create_post_payload()
post_payload['replyTo'] = root_id
post_payload['content'] = 'reply 2 to root'
response = post_comment(post_payload)
reply2_id = response.json()['id']
post_payload = create_post_payload()
post_payload['replyTo'] = reply2_id
post_payload['content'] = 'reply 3 to reply 2'
post_comment(post_payload)
get_response = get_comments()
# check root comments
returned_comments = get_response.json()
assert_that(returned_comments).is_length(1) # replies are nested so only one root comment
replies = returned_comments[0]['replies']
# check reply level 1
assert_that(replies).is_not_none().is_length(2)
assert_replies_contains(replies, {
'content': 'reply 1 to root', 'id': '2', 'author': 'Peter',
})
assert_replies_contains(replies, {
'content': 'reply 2 to root', 'id': '3', 'author': 'Peter',
})
for reply in replies:
assert_absent_fields(reply)
# check reply level 2
comment = get_comment_by_content(replies, 'reply 2 to root')
replies_to_reply = comment['replies']
assert_that(replies_to_reply).is_length(1)
assert_replies_contains(replies_to_reply, {
'content': 'reply 3 to reply 2', 'id': '4', 'author': 'Peter',
})
def test_POST_invalid_replyTo_ID():
post_payload = create_post_payload()
post_payload['replyTo'] = '989089'
response = post_comment(post_payload, assert_success=False)
assert_that(response.status_code).described_as("Invalid replyTo ID should be rejected.").is_equal_to(400)
assert_that(response.json()['message']).is_equal_to("The replyTo value '989089' refers to a not existing id.")
def test_POST_and_GET_comment_with_german_umlauts():
post_payload = create_post_payload()
post_payload['content'] = "äöüß - Deutsche Umlaute? Kein Problem für utf-8! ÖÄÜ"
post_payload['author'] = "öäüßÖÄÜ"
response = post_comment(post_payload)
assert_that(response.json()['id']).is_equal_to(1)
get_response = get_comments()
returned_comment = get_response.json()[0]
assert_that(returned_comment)\
.contains_entry({'author': post_payload["author"]})\
.contains_entry({'content': post_payload["content"]})
def test_OPTIONS_CORS_headers_valid_origin():
# before sending a POST, the browser will send an OPTION request as a preflight to see the CORS headers.
# the backend will only return the required CORS headers, if the Origin is set to a allowed domain.
post_payload = create_post_payload()
valid_origin = 'http://testdomain.com'
preflight_response = requests.options(url=COMMENT_SIDECAR_URL, json=post_payload, headers={'Origin': valid_origin})
assert_cors_headers_exists(preflight_response, valid_origin)
assert_that(preflight_response.text).is_empty()
assert_that(get_comments().json())\
.described_as("No comment should have been created after an OPTIONS request")\
.is_empty()
def test_OPTIONS_CORS_headers_invalid_origin():
post_payload = create_post_payload()
valid_origin = 'http://invalid.com'
preflight_response = requests.options(url=COMMENT_SIDECAR_URL, json=post_payload, headers={'Origin': valid_origin})
assert_cors_headers_doesnt_exists(preflight_response)
assert_that(preflight_response.text).is_empty()
assert_that(get_comments().json()) \
.described_as("No comment should have been created after an OPTIONS request") \
.is_empty()
def test_GET_CORS_headers_valid_origin():
# for GETs, the browser will request immediately (without preflight), but will reject the response, if the CORS are not set.
# the backend will only return the required CORS headers, if the Origin is set to a allowed domain.
valid_origin = 'http://testdomain.com'
response = requests.get("{}?site={}&path={}".format(COMMENT_SIDECAR_URL, DEFAULT_SITE, DEFAULT_PATH), headers={'Origin': valid_origin})
assert_cors_headers_exists(response, valid_origin)
def test_GET_CORS_headers_invalid_origin():
valid_origin = 'http://invalid.com'
response = requests.get("{}?site={}&path={}".format(COMMENT_SIDECAR_URL, DEFAULT_SITE, DEFAULT_PATH), headers={'Origin': valid_origin})
assert_cors_headers_doesnt_exists(response)
def test_GET_different_paths():
path_with_two_comments = "/post1/"
path_with_one_comment = "/post2/"
post_payload = create_post_payload()
post_payload['path'] = path_with_two_comments
post_comment(post_payload)
post_comment(post_payload)
post_payload['path'] = path_with_one_comment
post_comment(post_payload)
response = get_comments(path=path_with_two_comments)
assert_that(response.json()).is_length(2)
response = get_comments(path=path_with_one_comment)
assert_that(response.json()).is_length(1)
def test_GET_different_sites():
site_with_two_comments = "mydomain2.com"
site_with_one_comment = "mydomain1.com"
post_payload = create_post_payload()
post_payload['site'] = site_with_two_comments
post_comment(post_payload)
post_comment(post_payload)
post_payload['site'] = site_with_one_comment
post_comment(post_payload)
response = get_comments(site=site_with_two_comments)
assert_that(response.json()).is_length(2)
response = get_comments(site=site_with_one_comment)
assert_that(response.json()).is_length(1)
def test_POST_without_optional_email():
post_payload = create_post_payload()
post_payload.pop('email')
response = post_comment(post_payload)
assert_that(response.json()['id']).is_equal_to(1)
@pytest.mark.parametrize("field", {'author', 'content', 'site', 'path'})
def test_POST_missing_fields(field):
post_comment_with_missing_field_and_assert_error(field)
@pytest.mark.parametrize("field", {'author', 'content', 'site', 'path'})
def test_POST_empty_fields(field):
post_comment_with_empty_field_and_assert_error(field)
@pytest.mark.parametrize("field", {'author', 'content', 'site', 'path'})
def test_POST_blank_fields(field):
post_comment_with_blank_field_and_assert_error(field)
def test_POST_to_long_fields():
post_comment_to_long_field_and_assert_error('author', 40)
post_comment_to_long_field_and_assert_error('email', 40)
post_comment_to_long_field_and_assert_error('site', 40)
post_comment_to_long_field_and_assert_error('path', 170)
def test_POST_spam_protection_set_url_is_spam():
post_payload = create_post_payload()
post_payload['url'] = 'http://only.spambots.will/populate/this/field/'
response = post_comment(post_payload, assert_success=False)
assert_that(response.status_code)\
.described_as("POST payload with an URL field should be rejected. The URL an hidden form field and used for spam protection.")\
.is_equal_to(400)
assert_that(response.json()['message']).is_empty()
def test_email_notification_after_successful_POST():
clear_mails()
post_payload = create_post_payload()
post_comment(post_payload)
json = requests.get(MAILHOG_MESSAGES_URL).json()
assert_that(json['total']).is_equal_to(1)
mail_content = json['items'][0]['Content']
mail_body = mail_content['Body']
assert_that(mail_body).contains(post_payload['site'])\
.contains(post_payload['path'])\
.contains(post_payload['content'])
headers = mail_content['Headers']
assert_that(headers['Content-Transfer-Encoding'][0]).is_equal_to('8bit')
assert_that(headers['Content-Type'][0]).is_equal_to('text/plain; charset=UTF-8')
assert_that(headers['Mime-Version'][0]).is_equal_to('1.0')
assert_that(headers['From'][0]).is_equal_to('{}<{}>'.format(post_payload['author'], post_payload['email']))
assert_that(headers['Subject'][0]).is_equal_to('Comment by {} on {}'.format(post_payload['author'], post_payload['path']))
assert_that(headers['To'][0]).is_equal_to(ADMIN_EMAIL)
def test_no_email_notification_after_invalid_POST():
clear_mails()
post_payload = create_post_payload()
post_payload.pop('author')
post_comment(post_payload, assert_success=False)
json = requests.get(MAILHOG_MESSAGES_URL).json()
assert_that(json['total']).is_equal_to(0)
def test_POST_spam_protection_empty_url_is_fine():
post_payload = create_post_payload()
post_payload['url'] = ""
response = post_comment(post_payload)
assert_that(response.json()['id']).is_equal_to(1)
def test_escaped_HTML_XSS_protection():
post_payload = create_post_payload()
post_payload['author'] = "<strong>Peter</strong>"
post_payload['content'] = '<script type="text/javascript">document.querySelector("aside#comment-sidecar h1").innerText = "XSS";</script>'
response = post_comment(post_payload)
assert_that(response.json()['id']).is_equal_to(1)
returned_json = get_comments().json()[0]
assert_that(returned_json)\
.contains_entry({'author': '<strong>Peter</strong>'})\
.contains_entry({'content': '<script type="text/javascript">document.querySelector("aside#comment-sidecar h1").innerText = "XSS";</script>'})
def test_subscription_mail_on_reply():
clear_mails()
path = "/commented-post/"
site = "https://mysupersite.de"
parent = create_post_payload()
parent["email"] = "root@root.com"
parent["path"] = path
parent["site"] = site
response = post_comment(parent)
parent_id = response.json()['id']
reply = create_post_payload()
reply["replyTo"] = parent_id
reply["path"] = path
reply["site"] = site
reply["content"] = "Root, I disagree!"
reply["email"] = "reply@reply.com!"
reply["author"] = "Replyer"
post_comment(reply)
json = requests.get(MAILHOG_MESSAGES_URL).json()
assert_that(json['total']).is_greater_than(1)
mail = find_mail_by_sender(items=json['items'], email_from=reply["author"])
if not mail:
fail("No notification mail was found! recipient/parent: {}. sender/reply author: {}".format(parent["email"], reply["author"]))
assert_that(mail["from"]).contains(reply["author"])\
.does_not_contain(reply["email"])
assert_that(mail).has_subject("Reply to your comment by {}".format(reply["author"]))\
.has_to(parent["email"])
unsubscribe_token = retrieve_unsubscribe_token_from_db(parent_id)
unsubscribe_link = "{}?commentId={}&unsubscribeToken={}".format(UNSUBSCRIBE_URL, parent_id, unsubscribe_token)
link_to_site = "{}{}#comment-sidecar".format(site, path)
assert_that(mail["body"]).contains(reply["content"])\
.contains(unsubscribe_link)\
.contains(link_to_site)\
.contains(reply["author"])\
.does_not_contain(reply["email"])
def test_subscription_no_mail_on_reply_if_no_parent_mail_defined():
clear_mails()
root_payload = create_post_payload()
root_payload.pop('email')
response = post_comment(root_payload)
root_id = response.json()['id']
reply_payload = create_post_payload()
reply_payload["replyTo"] = root_id
reply_payload["content"] = "Root, I disagree!"
reply_payload["email"] = "reply@reply.com!"
reply_payload["author"] = "Replyer"
post_comment(reply_payload)
json = requests.get(MAILHOG_MESSAGES_URL).json()
assert_no_mail_except_admin_mail(items=json['items'])
def test_subscription_no_mail_on_reply_if_unsubscribed():
clear_mails()
root_payload = create_post_payload()
root_payload["email"] = "root@root.com"
response = post_comment(root_payload)
root_id = response.json()['id']
unsubscribe_token = retrieve_unsubscribe_token_from_db(root_id)
unsubscribe(root_id, unsubscribe_token)
reply_payload = create_post_payload()
reply_payload["replyTo"] = root_id
reply_payload["content"] = "Root, I disagree!"
reply_payload["email"] = "reply@reply.com!"
reply_payload["author"] = "Replyer"
post_comment(reply_payload)
json = requests.get(MAILHOG_MESSAGES_URL).json()
assert_no_mail_except_admin_mail(items=json['items'])
def test_unsubscribe_missing_parameter():
unsubscribe_with_url_assert_error('{}'.format(UNSUBSCRIBE_URL))
unsubscribe_with_url_assert_error('{}?commentId={}'.format(UNSUBSCRIBE_URL, 1))
unsubscribe_with_url_assert_error('{}?unsubscribeToken={}'.format(UNSUBSCRIBE_URL, '12391023'))
def test_unsubscribe():
payload = create_post_payload()
response = post_comment(payload)
id = response.json()["id"]
assume_subscription_state_in_db(id, True)
unsubscribe_token = retrieve_unsubscribe_token_from_db(id)
response = unsubscribe(id, unsubscribe_token)
assert_that(response.text).contains(UNSUBSCRIBE_SUCCESS_MSG)
assume_subscription_state_in_db(id, False)
def test_unsubscribe_twice():
payload = create_post_payload()
response = post_comment(payload)
id = response.json()["id"]
assume_subscription_state_in_db(id, True)
unsubscribe_token = retrieve_unsubscribe_token_from_db(id)
unsubscribe(id, unsubscribe_token)
response = unsubscribe(id, unsubscribe_token)
assert_that(response.text).is_equal_to(UNSUBSCRIBE_ERROR_MSG)
assume_subscription_state_in_db(id, False)
def test_unsubscribe_wrong_token():
payload = create_post_payload()
response = post_comment(payload)
id = response.json()["id"]
assume_subscription_state_in_db(id, True)
invalid_unsubscribe_token = "1111jd"
response = unsubscribe(id, invalid_unsubscribe_token)
assert_that(response.text).is_equal_to(UNSUBSCRIBE_ERROR_MSG)
assume_subscription_state_in_db(id, True)
def test_unsubscribe_wrong_id():
payload = create_post_payload()
response = post_comment(payload)
id = response.json()["id"]
unsubscribe_token = retrieve_unsubscribe_token_from_db(id)
response = unsubscribe(123, unsubscribe_token)
assert_that(response.text).is_equal_to(UNSUBSCRIBE_ERROR_MSG)
def test_rate_limiting_second_request_is_rejected():
set_rate_limit_threshold(seconds=1)
post_comment(create_post_payload(), assert_success=True)
response2 = post_comment(create_post_payload(), assert_success=False)
assert_that(response2.status_code) \
.described_as("The rate limiting should prevent the second comment post") \
.is_equal_to(400)
assert_that(response2.json()['message']).contains("exceeded the maximal number of comments ")
def test_rate_limiting_second_request_is_okay_after_waiting_a_while():
set_rate_limit_threshold(seconds=1)
post_comment(create_post_payload(), assert_success=True)
time.sleep(2)
post_comment(create_post_payload(), assert_success=True)
def test_use_gzip_encoding():
assert_that(post_comment(create_post_payload()).headers['Content-Encoding']).is_equal_to("gzip")
assert_that(get_comments().headers['Content-Encoding']).is_equal_to("gzip")
# PRIVATE functions
regex = re.compile(r'const RATE_LIMIT_THRESHOLD_SECONDS = ".*";', re.IGNORECASE)
# well, it's a little bit hacky to touch the running php code in the source folder during the test
# but it doesn't require adding complexity, performance decrease and security issues to the production code.
def set_rate_limit_threshold(seconds):
new_config_string = None
config_path = get_file_path('src/config.php')
with config_path.open("r") as config_file:
config_string = config_file.read()
new_config_string = regex.sub(f'const RATE_LIMIT_THRESHOLD_SECONDS = "{seconds}";', config_string)
with config_path.open("w") as config_file:
config_file.write(new_config_string)
def assume_subscription_state_in_db(comment_id, expected_subscription_state):
db = connect(**MYSQLDB_CONNECTION)
cur = db.cursor()
cur.execute("SELECT subscribed, email FROM comments WHERE id = {}".format(comment_id))
result = cur.fetchone()
subscribed = result[0]
email = result[1]
if expected_subscription_state:
assert_that(subscribed).described_as('subscribed state').is_equal_to(1)
assert_that(email).described_as('email').is_not_none()
else:
assert_that(subscribed).described_as('subscribed state').is_equal_to(0)
assert_that(email).described_as('email').is_none()
def retrieve_unsubscribe_token_from_db(comment_id):
db = connect(**MYSQLDB_CONNECTION)
cur = db.cursor()
cur.execute("SELECT unsubscribe_token FROM comments WHERE id = {}".format(comment_id))
return cur.fetchone()[0]
def unsubscribe(comment_id, unsubscribe_token):
response = requests.get(url='{}?commentId={}&unsubscribeToken={}&XDEBUG_SESSION_START=IDEA_DEBUG'.format(UNSUBSCRIBE_URL, comment_id, unsubscribe_token))
assert_that(response).has_status_code(200)
return response
def unsubscribe_with_url_assert_error(url):
response = requests.get(url)
assert_that(response).has_status_code(400)
assert_that(response.json()['message']).is_equal_to(INVALID_QUERY_PARAMS_UNSUBSCRIBE)
def assert_cors_headers_exists(preflight_response, exptected_allowed_origin):
assert_that(preflight_response.headers)\
.contains_entry({'Access-Control-Allow-Origin': exptected_allowed_origin})\
.contains_entry({'Access-Control-Allow-Methods': 'GET, POST'})\
.contains_entry({'Access-Control-Allow-Headers': 'Content-Type'})
def assert_cors_headers_doesnt_exists(preflight_response):
assert_that(preflight_response.headers)\
.does_not_contain_key('Access-Control-Allow-Origin')\
.does_not_contain_key('Access-Control-Allow-Methods')\
.does_not_contain_key('Access-Control-Allow-Headers')
def assert_replies_contains(replies, assumed_element):
replies_matching_assumed_element = [reply for reply in replies
if reply['content'] == assumed_element['content']
and reply['id'] == assumed_element['id']
and reply['author'] == assumed_element['author']
]
assert_that(replies_matching_assumed_element) \
.described_as("Element is not in the list (or more than once).\nassumed_element: {}\nall elements: {}\n".format(assumed_element, replies)) \
.is_length(1)
def assert_absent_fields(returned_comment):
assert_that(returned_comment).does_not_contain_key('email')\
.does_not_contain_key('path')\
.does_not_contain_key('site')\
.does_not_contain_key('replyTo')
def clear_mails():
response = requests.delete(MAILHOG_BASE_URL + 'v1/messages')
assert_that(response.status_code).described_as("Test setup failed: Couldn't delete mails in mailhog.")\
.is_equal_to(200)
def post_comment_with_missing_field_and_assert_error(missing_field: str):
post_payload = create_post_payload()
post_payload.pop(missing_field)
response = post_comment(post_payload, assert_success=False)
assert_that(response.status_code).is_equal_to(400)
assert_that(response.json()['message']).is_equal_to(missing_field + " is missing, empty or blank")
def post_comment_with_empty_field_and_assert_error(empty_field: str):
post_payload = create_post_payload()
post_payload[empty_field] = ""
response = post_comment(post_payload, assert_success=False)
assert_that(response.status_code).is_equal_to(400)
assert_that(response.json()['message']).is_equal_to(empty_field + " is missing, empty or blank")
def post_comment_with_blank_field_and_assert_error(blank_field: str):
post_payload = create_post_payload()
post_payload[blank_field] = " "
response = post_comment(post_payload, assert_success=False)
assert_that(response.status_code).is_equal_to(400)
assert_that(response.json()['message']).is_equal_to(blank_field + " is missing, empty or blank")
def post_comment_to_long_field_and_assert_error(field: str, max_length: int):
post_payload = create_post_payload()
post_payload[field] = "x" * (max_length + 1)
response = post_comment(post_payload, assert_success=False)
assert_that(response.json()['message']).is_equal_to(field + " value exceeds maximal length of " + str(max_length))
assert_that(response.status_code).is_equal_to(400)
# valid length (check it to avoid off-by-one-errors)
post_payload[field] = "x" * max_length
post_comment(post_payload)
def assert_timestamp_between(creation_timestamp: str, start: int, end: int):
timestamp = int(creation_timestamp)
assert_that(timestamp).is_greater_than_or_equal_to(start)\
.is_less_than_or_equal_to(end)
def post_comment(post_payload, assert_success: bool=True) -> Response:
response = requests.post(url=COMMENT_SIDECAR_URL, json=post_payload)
if assert_success:
assert_that(response.status_code) \
.described_as("Comment creation failed. Message: " + response.text) \
.is_equal_to(201)
return response
def get_comments(site: str = DEFAULT_SITE, path: str = DEFAULT_PATH, assert_success: bool=True) -> Response:
response = requests.get("{}?site={}&path={}".format(COMMENT_SIDECAR_URL, site, path))
if assert_success:
assert_that(response.status_code)\
.described_as("Getting comments failed. Message: " + response.text)\
.is_equal_to(200)
return response
def get_comment_by_content(replies, content):
comments = [comment for comment in replies if comment['content'] == content]
assert_that(comments)\
.described_as("There should be at least on comment with the content '{}'. Elements: {}".format(content, str(replies)))\
.is_length(1)
return comments[0]
def create_post_payload():
return {
"author": "Peter",
"content": "Peter's comment",
"email": "peter@petersworld.com",
"path": DEFAULT_PATH,
"site": DEFAULT_SITE
}
def find_mail_by_sender(items, email_from: str):
for item in items:
content = item['Content']
headers = content['Headers']
if email_from in headers['From'][0]:
return {
"from": headers['From'][0]
, "subject": headers['Subject'][0]
, "to": headers['To'][0]
, "body": content['Body']
}
return None
def assert_no_mail_except_admin_mail(items):
for item in items:
to = item['Content']['Headers']['To'][0]
if to != ADMIN_EMAIL:
actual_mail = "({}; {}; {}; {})".format(item['Content']['Body'], item['Content']['Headers']['From'][0], item['Content']['Headers']['Subject'][0], item['Content']['Headers']['To'][0])
fail("A mail was sent (despite the admin notification) but that shouldn't happen! " + actual_mail)
def get_file_path(path):
path = Path(path) # if invoked via make in project root
if path.exists():
return path
return Path(f'../{path}') # if invoked directly in the IDE
if __name__ == '__main__':
unittest.main()
| 42.900974
| 194
| 0.722784
|
40c68c13ec8f64e1c2bc0bb66cc69cae7ccd75d2
| 4,947
|
py
|
Python
|
gen_best_json.py
|
crwhite14/nasbench
|
ddd6f7840a7650f700565c8ec86e791a3cdecca4
|
[
"Apache-2.0"
] | 1
|
2020-07-06T10:40:13.000Z
|
2020-07-06T10:40:13.000Z
|
gen_best_json.py
|
crwhite14/nasbench
|
ddd6f7840a7650f700565c8ec86e791a3cdecca4
|
[
"Apache-2.0"
] | null | null | null |
gen_best_json.py
|
crwhite14/nasbench
|
ddd6f7840a7650f700565c8ec86e791a3cdecca4
|
[
"Apache-2.0"
] | 1
|
2021-04-02T00:01:20.000Z
|
2021-04-02T00:01:20.000Z
|
from nasbench import api
from random import randint
import json
import numpy as np
import os
from collections import OrderedDict
# Replace this string with the path to the downloaded nasbench.tfrecord before
# executing.
NASBENCH_TFRECORD = 'data/nasbench_only108.tfrecord'
INPUT = 'input'
OUTPUT = 'output'
CONV1X1 = 'conv1x1-bn-relu'
CONV3X3 = 'conv3x3-bn-relu'
MAXPOOL3X3 = 'maxpool3x3'
def gen_data_point(nasbench):
i = 0
epoch = 108
padding = [0, 0, 0, 0, 0, 0, 0]
best_val_acc = 0
best_test_acc = 0
tf_data = {}
for unique_hash in nasbench.hash_iterator():
fixed_metrics, computed_metrics = nasbench.get_metrics_from_hash(unique_hash)
ops_array = transform_operations(fixed_metrics['module_operations'])
ops_list = convert_tf_format(ops_array)
adj_array = fixed_metrics['module_adjacency'].tolist()
print('\nIterating over {} / {} unique models in the dataset.'.format(i, 423623))
test_acc_avg = 0.0
val_acc_avg = 0.0
training_time = 0.0
for repeat_index in range(len(computed_metrics[epoch])):
assert len(computed_metrics[epoch])==3, 'len(computed_metrics[epoch]) should be 3'
data_point = computed_metrics[epoch][repeat_index]
val_acc_avg += data_point['final_validation_accuracy']
test_acc_avg += data_point['final_test_accuracy']
training_time += data_point['final_training_time']
val_acc_avg = val_acc_avg/3.0
test_acc_avg = test_acc_avg/3.0
tf_data[unique_hash] = (adj_array, ops_list, val_acc_avg)
training_time_avg = training_time/3.0
adj_array = fixed_metrics['module_adjacency'].tolist()
params = fixed_metrics['trainable_parameters']
print('parameters size: {}'.format(params))
model_spec = api.ModelSpec(fixed_metrics['module_adjacency'], fixed_metrics['module_operations'])
data = nasbench.query(model_spec, epochs=108)
print('api training time: {}'.format(data['training_time']))
print('real training time: {}'.format(training_time_avg))
if len(adj_array) <= 6:
for row in range(len(adj_array)):
for _ in range(7-len(adj_array)):
adj_array[row].append(0)
for _ in range(7-len(adj_array)):
adj_array.append(padding)
if val_acc_avg > best_val_acc:
best_val_acc = val_acc_avg
if test_acc_avg > best_test_acc:
best_test_acc = test_acc_avg
print('best val. acc: {:.4f}, best test acc {:.4f}'.format(best_val_acc, best_test_acc))
i += 1
return tf_data
def transform_operations(ops):
transform_dict = {'input':0, 'conv1x1-bn-relu':1, 'conv3x3-bn-relu':2, 'maxpool3x3':3, 'output':4}
ops_array = np.zeros([len(ops),5], dtype='int8')
for row, op in enumerate(ops):
col = transform_dict[op]
ops_array[row, col] = 1
return ops_array
def convert_tf_format(ops_array):
arr = ops_array[1:len(ops_array)-1]
arr = arr[:, 1:-1]
arr = np.argmax(arr, axis=-1)
arr = np.insert(arr, 0, -1)
arr = np.insert(arr, len(arr), -2)
return arr.tolist()
def gen_json_file():
nasbench = api.NASBench(NASBENCH_TFRECORD)
tf_data = gen_data_point(nasbench)
return tf_data
if __name__ == '__main__':
nasbench = api.NASBench(NASBENCH_TFRECORD)
optimizer = 'bananas' # re, ls, bananas
path = '~/nasbench311/NASLib/nas101_0/cifar10/bananas/' + optimizer
seed_dirs = list(os.listdir(path))
seed_dirs = sorted(seed_dirs, key=lambda x: int(x))
topk = 10
shards = {}
for seed_dir in seed_dirs:
f_path = os.path.join(path, seed_dir)
with open(os.path.join(f_path, 'errors.json')) as f:
data = json.load(f)[1]
ind = np.argsort(data['latest_acc'])[-topk:]
for i in ind:
model_spec = api.ModelSpec(data['latest_arch'][i][0], data['latest_arch'][i][1])
hash = nasbench.get_hash(model_spec)
fixed_metrics, computed_metrics = nasbench.get_metrics_from_hash(hash)
val_acc_avg = 0.0
for repeat_index in range(len(computed_metrics[108])):
assert len(computed_metrics[108]) == 3, 'len(computed_metrics[epoch]) should be 3'
data_point = computed_metrics[108][repeat_index]
val_acc_avg += data_point['final_validation_accuracy']
val_acc_avg = val_acc_avg / 3.0
print("actual: {}, predicted: {}".format(val_acc_avg, data['latest_acc'][i]))
if hash not in shards:
shards[hash] = (data['latest_arch'][i][0], convert_tf_format(transform_operations(data['latest_arch'][i][1])))
print(len(shards))
with open('data/shard_best_{}.json'.format(optimizer), 'w') as outfile:
json.dump(shards, outfile)
| 38.053846
| 130
| 0.63776
|
dc608e4d2a76a9b592bf02f9b504a7146958169e
| 2,602
|
py
|
Python
|
core/api.py
|
ans2human/silver-octo-waddle
|
c3fd566d8f2932966eb3f35ed5a19099fad4b3cb
|
[
"MIT"
] | 484
|
2018-02-19T00:38:28.000Z
|
2022-03-26T16:13:51.000Z
|
core/api.py
|
30kaltekar/django-channels-chat
|
827559a7d309bfe07f77f108fb34b7cae4c55779
|
[
"MIT"
] | 17
|
2019-01-16T19:05:24.000Z
|
2022-02-08T15:22:48.000Z
|
core/api.py
|
30kaltekar/django-channels-chat
|
827559a7d309bfe07f77f108fb34b7cae4c55779
|
[
"MIT"
] | 159
|
2018-02-19T01:36:14.000Z
|
2022-03-26T12:03:33.000Z
|
from django.db.models import Q
from django.shortcuts import get_object_or_404
from django.contrib.auth.models import User
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from rest_framework.authentication import SessionAuthentication
from chat import settings
from core.serializers import MessageModelSerializer, UserModelSerializer
from core.models import MessageModel
class CsrfExemptSessionAuthentication(SessionAuthentication):
"""
SessionAuthentication scheme used by DRF. DRF's SessionAuthentication uses
Django's session framework for authentication which requires CSRF to be
checked. In this case we are going to disable CSRF tokens for the API.
"""
def enforce_csrf(self, request):
return
class MessagePagination(PageNumberPagination):
"""
Limit message prefetch to one page.
"""
page_size = settings.MESSAGES_TO_LOAD
class MessageModelViewSet(ModelViewSet):
queryset = MessageModel.objects.all()
serializer_class = MessageModelSerializer
allowed_methods = ('GET', 'POST', 'HEAD', 'OPTIONS')
authentication_classes = (CsrfExemptSessionAuthentication,)
pagination_class = MessagePagination
def list(self, request, *args, **kwargs):
self.queryset = self.queryset.filter(Q(recipient=request.user) |
Q(user=request.user))
target = self.request.query_params.get('target', None)
if target is not None:
self.queryset = self.queryset.filter(
Q(recipient=request.user, user__username=target) |
Q(recipient__username=target, user=request.user))
return super(MessageModelViewSet, self).list(request, *args, **kwargs)
def retrieve(self, request, *args, **kwargs):
msg = get_object_or_404(
self.queryset.filter(Q(recipient=request.user) |
Q(user=request.user),
Q(pk=kwargs['pk'])))
serializer = self.get_serializer(msg)
return Response(serializer.data)
class UserModelViewSet(ModelViewSet):
queryset = User.objects.all()
serializer_class = UserModelSerializer
allowed_methods = ('GET', 'HEAD', 'OPTIONS')
pagination_class = None # Get all user
def list(self, request, *args, **kwargs):
# Get all users except yourself
self.queryset = self.queryset.exclude(id=request.user.id)
return super(UserModelViewSet, self).list(request, *args, **kwargs)
| 38.264706
| 78
| 0.697925
|
eb71f518d0e6e1212af1b1ff5fc1e9ca22733669
| 1,747
|
py
|
Python
|
setup.py
|
tohanss/repobee-feedback
|
76a2333b5bc359675c69a495d8050843f2394a5a
|
[
"MIT"
] | null | null | null |
setup.py
|
tohanss/repobee-feedback
|
76a2333b5bc359675c69a495d8050843f2394a5a
|
[
"MIT"
] | null | null | null |
setup.py
|
tohanss/repobee-feedback
|
76a2333b5bc359675c69a495d8050843f2394a5a
|
[
"MIT"
] | null | null | null |
import re
from setuptools import setup, find_packages
with open("README.md", mode="r", encoding="utf-8") as f:
readme = f.read()
# parse the version instead of importing it to avoid dependency-related crashes
with open("repobee_feedback/__version.py", mode="r", encoding="utf-8") as f:
line = f.readline()
__version__ = line.split("=")[1].strip(" '\"\n")
assert re.match(r"^\d+(\.\d+){2}(-(alpha|beta|rc)(\.\d+)?)?$", __version__)
test_requirements = ["pytest", "pytest-cov", "repobee", "codecov"]
required = ["repobee>=3.0.0-beta.1"]
setup(
name="repobee-feedback",
version=__version__,
description="A plugin that adds the issue-feedback command to RepoBee",
long_description=readme,
long_description_content_type="text/markdown",
author="Simon Larsén",
author_email="slarse@kth.se",
url="https://github.com/slarse/repobee-feedback",
download_url="https://github.com/"
"slarse"
"/repobee-feedback"
"/archive/v{}.tar.gz".format(__version__),
license="MIT",
packages=find_packages(exclude=("tests", "docs")),
tests_require=test_requirements,
install_requires=required,
extras_require=dict(TEST=test_requirements),
include_package_data=True,
zip_safe=False,
python_requires=">=3.6",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Education",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX",
],
)
| 35.653061
| 79
| 0.650258
|
737f9c05959cf8f64c320b6cb5cfb447c8dc570b
| 17,739
|
py
|
Python
|
google/cloud/dialogflow_v2beta1/types/audio_config.py
|
rogers140/python-dialogflow
|
d9ce91f8590947736560727624fbc0846601ce1c
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/dialogflow_v2beta1/types/audio_config.py
|
rogers140/python-dialogflow
|
d9ce91f8590947736560727624fbc0846601ce1c
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/dialogflow_v2beta1/types/audio_config.py
|
rogers140/python-dialogflow
|
d9ce91f8590947736560727624fbc0846601ce1c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import duration_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.dialogflow.v2beta1",
manifest={
"AudioEncoding",
"SpeechModelVariant",
"SsmlVoiceGender",
"OutputAudioEncoding",
"TelephonyDtmf",
"SpeechContext",
"SpeechWordInfo",
"InputAudioConfig",
"VoiceSelectionParams",
"SynthesizeSpeechConfig",
"OutputAudioConfig",
"TelephonyDtmfEvents",
"SpeechToTextConfig",
},
)
class AudioEncoding(proto.Enum):
r"""Audio encoding of the audio content sent in the conversational query
request. Refer to the `Cloud Speech API
documentation <https://cloud.google.com/speech-to-text/docs/basics>`__
for more details.
"""
AUDIO_ENCODING_UNSPECIFIED = 0
AUDIO_ENCODING_LINEAR_16 = 1
AUDIO_ENCODING_FLAC = 2
AUDIO_ENCODING_MULAW = 3
AUDIO_ENCODING_AMR = 4
AUDIO_ENCODING_AMR_WB = 5
AUDIO_ENCODING_OGG_OPUS = 6
AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE = 7
class SpeechModelVariant(proto.Enum):
r"""Variant of the specified [Speech
model][google.cloud.dialogflow.v2beta1.InputAudioConfig.model] to
use.
See the `Cloud Speech
documentation <https://cloud.google.com/speech-to-text/docs/enhanced-models>`__
for which models have different variants. For example, the
"phone_call" model has both a standard and an enhanced variant. When
you use an enhanced model, you will generally receive higher quality
results than for a standard model.
"""
SPEECH_MODEL_VARIANT_UNSPECIFIED = 0
USE_BEST_AVAILABLE = 1
USE_STANDARD = 2
USE_ENHANCED = 3
class SsmlVoiceGender(proto.Enum):
r"""Gender of the voice as described in `SSML voice
element <https://www.w3.org/TR/speech-synthesis11/#edef_voice>`__.
"""
SSML_VOICE_GENDER_UNSPECIFIED = 0
SSML_VOICE_GENDER_MALE = 1
SSML_VOICE_GENDER_FEMALE = 2
SSML_VOICE_GENDER_NEUTRAL = 3
class OutputAudioEncoding(proto.Enum):
r"""Audio encoding of the output audio format in Text-To-Speech."""
OUTPUT_AUDIO_ENCODING_UNSPECIFIED = 0
OUTPUT_AUDIO_ENCODING_LINEAR_16 = 1
OUTPUT_AUDIO_ENCODING_MP3 = 2
OUTPUT_AUDIO_ENCODING_MP3_64_KBPS = 4
OUTPUT_AUDIO_ENCODING_OGG_OPUS = 3
OUTPUT_AUDIO_ENCODING_MULAW = 5
class TelephonyDtmf(proto.Enum):
r"""`DTMF <https://en.wikipedia.org/wiki/Dual-tone_multi-frequency_signaling>`__
digit in Telephony Gateway.
"""
TELEPHONY_DTMF_UNSPECIFIED = 0
DTMF_ONE = 1
DTMF_TWO = 2
DTMF_THREE = 3
DTMF_FOUR = 4
DTMF_FIVE = 5
DTMF_SIX = 6
DTMF_SEVEN = 7
DTMF_EIGHT = 8
DTMF_NINE = 9
DTMF_ZERO = 10
DTMF_A = 11
DTMF_B = 12
DTMF_C = 13
DTMF_D = 14
DTMF_STAR = 15
DTMF_POUND = 16
class SpeechContext(proto.Message):
r"""Hints for the speech recognizer to help with recognition in a
specific conversation state.
Attributes:
phrases (Sequence[str]):
Optional. A list of strings containing words and phrases
that the speech recognizer should recognize with higher
likelihood.
This list can be used to:
- improve accuracy for words and phrases you expect the
user to say, e.g. typical commands for your Dialogflow
agent
- add additional words to the speech recognizer vocabulary
- ...
See the `Cloud Speech
documentation <https://cloud.google.com/speech-to-text/quotas>`__
for usage limits.
boost (float):
Optional. Boost for this context compared to other contexts:
- If the boost is positive, Dialogflow will increase the
probability that the phrases in this context are
recognized over similar sounding phrases.
- If the boost is unspecified or non-positive, Dialogflow
will not apply any boost.
Dialogflow recommends that you use boosts in the range (0,
20] and that you find a value that fits your use case with
binary search.
"""
phrases = proto.RepeatedField(proto.STRING, number=1,)
boost = proto.Field(proto.FLOAT, number=2,)
class SpeechWordInfo(proto.Message):
r"""Information for a word recognized by the speech recognizer.
Attributes:
word (str):
The word this info is for.
start_offset (google.protobuf.duration_pb2.Duration):
Time offset relative to the beginning of the
audio that corresponds to the start of the
spoken word. This is an experimental feature and
the accuracy of the time offset can vary.
end_offset (google.protobuf.duration_pb2.Duration):
Time offset relative to the beginning of the
audio that corresponds to the end of the spoken
word. This is an experimental feature and the
accuracy of the time offset can vary.
confidence (float):
The Speech confidence between 0.0 and 1.0 for
this word. A higher number indicates an
estimated greater likelihood that the recognized
word is correct. The default of 0.0 is a
sentinel value indicating that confidence was
not set.
This field is not guaranteed to be fully stable
over time for the same audio input. Users should
also not rely on it to always be provided.
"""
word = proto.Field(proto.STRING, number=3,)
start_offset = proto.Field(proto.MESSAGE, number=1, message=duration_pb2.Duration,)
end_offset = proto.Field(proto.MESSAGE, number=2, message=duration_pb2.Duration,)
confidence = proto.Field(proto.FLOAT, number=4,)
class InputAudioConfig(proto.Message):
r"""Instructs the speech recognizer on how to process the audio
content.
Attributes:
audio_encoding (google.cloud.dialogflow_v2beta1.types.AudioEncoding):
Required. Audio encoding of the audio content
to process.
sample_rate_hertz (int):
Required. Sample rate (in Hertz) of the audio content sent
in the query. Refer to `Cloud Speech API
documentation <https://cloud.google.com/speech-to-text/docs/basics>`__
for more details.
language_code (str):
Required. The language of the supplied audio. Dialogflow
does not do translations. See `Language
Support <https://cloud.google.com/dialogflow/docs/reference/language>`__
for a list of the currently supported language codes. Note
that queries in the same session do not necessarily need to
specify the same language.
enable_word_info (bool):
If ``true``, Dialogflow returns
[SpeechWordInfo][google.cloud.dialogflow.v2beta1.SpeechWordInfo]
in
[StreamingRecognitionResult][google.cloud.dialogflow.v2beta1.StreamingRecognitionResult]
with information about the recognized speech words, e.g.
start and end time offsets. If false or unspecified, Speech
doesn't return any word-level information.
phrase_hints (Sequence[str]):
A list of strings containing words and phrases that the
speech recognizer should recognize with higher likelihood.
See `the Cloud Speech
documentation <https://cloud.google.com/speech-to-text/docs/basics#phrase-hints>`__
for more details.
This field is deprecated. Please use `speech_contexts <>`__
instead. If you specify both `phrase_hints <>`__ and
`speech_contexts <>`__, Dialogflow will treat the
`phrase_hints <>`__ as a single additional
`SpeechContext <>`__.
speech_contexts (Sequence[google.cloud.dialogflow_v2beta1.types.SpeechContext]):
Context information to assist speech recognition.
See `the Cloud Speech
documentation <https://cloud.google.com/speech-to-text/docs/basics#phrase-hints>`__
for more details.
model (str):
Which Speech model to select for the given request. Select
the model best suited to your domain to get best results. If
a model is not explicitly specified, then we auto-select a
model based on the parameters in the InputAudioConfig. If
enhanced speech model is enabled for the agent and an
enhanced version of the specified model for the language
does not exist, then the speech is recognized using the
standard version of the specified model. Refer to `Cloud
Speech API
documentation <https://cloud.google.com/speech-to-text/docs/basics#select-model>`__
for more details.
model_variant (google.cloud.dialogflow_v2beta1.types.SpeechModelVariant):
Which variant of the [Speech
model][google.cloud.dialogflow.v2beta1.InputAudioConfig.model]
to use.
single_utterance (bool):
If ``false`` (default), recognition does not cease until the
client closes the stream. If ``true``, the recognizer will
detect a single spoken utterance in input audio. Recognition
ceases when it detects the audio's voice has stopped or
paused. In this case, once a detected intent is received,
the client should close the stream and start a new request
with a new stream as needed. Note: This setting is relevant
only for streaming methods. Note: When specified,
InputAudioConfig.single_utterance takes precedence over
StreamingDetectIntentRequest.single_utterance.
disable_no_speech_recognized_event (bool):
Only used in
[Participants.AnalyzeContent][google.cloud.dialogflow.v2beta1.Participants.AnalyzeContent]
and
[Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2beta1.Participants.StreamingAnalyzeContent].
If ``false`` and recognition doesn't return any result,
trigger ``NO_SPEECH_RECOGNIZED`` event to Dialogflow agent.
"""
audio_encoding = proto.Field(proto.ENUM, number=1, enum="AudioEncoding",)
sample_rate_hertz = proto.Field(proto.INT32, number=2,)
language_code = proto.Field(proto.STRING, number=3,)
enable_word_info = proto.Field(proto.BOOL, number=13,)
phrase_hints = proto.RepeatedField(proto.STRING, number=4,)
speech_contexts = proto.RepeatedField(
proto.MESSAGE, number=11, message="SpeechContext",
)
model = proto.Field(proto.STRING, number=7,)
model_variant = proto.Field(proto.ENUM, number=10, enum="SpeechModelVariant",)
single_utterance = proto.Field(proto.BOOL, number=8,)
disable_no_speech_recognized_event = proto.Field(proto.BOOL, number=14,)
class VoiceSelectionParams(proto.Message):
r"""Description of which voice to use for speech synthesis.
Attributes:
name (str):
Optional. The name of the voice. If not set, the service
will choose a voice based on the other parameters such as
language_code and
[ssml_gender][google.cloud.dialogflow.v2beta1.VoiceSelectionParams.ssml_gender].
For the list of available voices, please refer to `Supported
voices and
languages <https://cloud.google.com/text-to-speech/docs/voices>`__.
ssml_gender (google.cloud.dialogflow_v2beta1.types.SsmlVoiceGender):
Optional. The preferred gender of the voice. If not set, the
service will choose a voice based on the other parameters
such as language_code and
[name][google.cloud.dialogflow.v2beta1.VoiceSelectionParams.name].
Note that this is only a preference, not requirement. If a
voice of the appropriate gender is not available, the
synthesizer should substitute a voice with a different
gender rather than failing the request.
"""
name = proto.Field(proto.STRING, number=1,)
ssml_gender = proto.Field(proto.ENUM, number=2, enum="SsmlVoiceGender",)
class SynthesizeSpeechConfig(proto.Message):
r"""Configuration of how speech should be synthesized.
Attributes:
speaking_rate (float):
Optional. Speaking rate/speed, in the range [0.25, 4.0]. 1.0
is the normal native speed supported by the specific voice.
2.0 is twice as fast, and 0.5 is half as fast. If
unset(0.0), defaults to the native 1.0 speed. Any other
values < 0.25 or > 4.0 will return an error.
pitch (float):
Optional. Speaking pitch, in the range [-20.0, 20.0]. 20
means increase 20 semitones from the original pitch. -20
means decrease 20 semitones from the original pitch.
volume_gain_db (float):
Optional. Volume gain (in dB) of the normal native volume
supported by the specific voice, in the range [-96.0, 16.0].
If unset, or set to a value of 0.0 (dB), will play at normal
native signal amplitude. A value of -6.0 (dB) will play at
approximately half the amplitude of the normal native signal
amplitude. A value of +6.0 (dB) will play at approximately
twice the amplitude of the normal native signal amplitude.
We strongly recommend not to exceed +10 (dB) as there's
usually no effective increase in loudness for any value
greater than that.
effects_profile_id (Sequence[str]):
Optional. An identifier which selects 'audio
effects' profiles that are applied on (post
synthesized) text to speech. Effects are applied
on top of each other in the order they are
given.
voice (google.cloud.dialogflow_v2beta1.types.VoiceSelectionParams):
Optional. The desired voice of the
synthesized audio.
"""
speaking_rate = proto.Field(proto.DOUBLE, number=1,)
pitch = proto.Field(proto.DOUBLE, number=2,)
volume_gain_db = proto.Field(proto.DOUBLE, number=3,)
effects_profile_id = proto.RepeatedField(proto.STRING, number=5,)
voice = proto.Field(proto.MESSAGE, number=4, message="VoiceSelectionParams",)
class OutputAudioConfig(proto.Message):
r"""Instructs the speech synthesizer how to generate the output
audio content. If this audio config is supplied in a request, it
overrides all existing text-to-speech settings applied to the
agent.
Attributes:
audio_encoding (google.cloud.dialogflow_v2beta1.types.OutputAudioEncoding):
Required. Audio encoding of the synthesized
audio content.
sample_rate_hertz (int):
The synthesis sample rate (in hertz) for this
audio. If not provided, then the synthesizer
will use the default sample rate based on the
audio encoding. If this is different from the
voice's natural sample rate, then the
synthesizer will honor this request by
converting to the desired sample rate (which
might result in worse audio quality).
synthesize_speech_config (google.cloud.dialogflow_v2beta1.types.SynthesizeSpeechConfig):
Configuration of how speech should be
synthesized.
"""
audio_encoding = proto.Field(proto.ENUM, number=1, enum="OutputAudioEncoding",)
sample_rate_hertz = proto.Field(proto.INT32, number=2,)
synthesize_speech_config = proto.Field(
proto.MESSAGE, number=3, message="SynthesizeSpeechConfig",
)
class TelephonyDtmfEvents(proto.Message):
r"""A wrapper of repeated TelephonyDtmf digits.
Attributes:
dtmf_events (Sequence[google.cloud.dialogflow_v2beta1.types.TelephonyDtmf]):
A sequence of TelephonyDtmf digits.
"""
dtmf_events = proto.RepeatedField(proto.ENUM, number=1, enum="TelephonyDtmf",)
class SpeechToTextConfig(proto.Message):
r"""Configures speech transcription for
[ConversationProfile][google.cloud.dialogflow.v2beta1.ConversationProfile].
Attributes:
speech_model_variant (google.cloud.dialogflow_v2beta1.types.SpeechModelVariant):
Optional. The speech model used in speech to text.
``SPEECH_MODEL_VARIANT_UNSPECIFIED``, ``USE_BEST_AVAILABLE``
will be treated as ``USE_ENHANCED``. It can be overridden in
[AnalyzeContentRequest][google.cloud.dialogflow.v2beta1.AnalyzeContentRequest]
and
[StreamingAnalyzeContentRequest][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest]
request.
"""
speech_model_variant = proto.Field(proto.ENUM, number=1, enum="SpeechModelVariant",)
__all__ = tuple(sorted(__protobuf__.manifest))
| 42.539568
| 121
| 0.671797
|
c5cf5fe2b4c21f10e306a9a3f119e125e3cd5822
| 1,261
|
py
|
Python
|
myCatkin/src/joystick_drivers/wiimote/src/wiimote/wiimoteExceptions.py
|
sbow/scratch
|
8ac5cd772c8f6c3def6d25ad0402c3f973af2fae
|
[
"MIT"
] | 3
|
2021-01-10T10:52:14.000Z
|
2021-12-31T10:19:25.000Z
|
src/joystick_drivers/wiimote/src/wiimote/wiimoteExceptions.py
|
EveVengerov/Gesture-Controlling-Drone
|
8fe38dbfdc496472e13e76bcdb55b471f51b42ea
|
[
"MIT"
] | null | null | null |
src/joystick_drivers/wiimote/src/wiimote/wiimoteExceptions.py
|
EveVengerov/Gesture-Controlling-Drone
|
8fe38dbfdc496472e13e76bcdb55b471f51b42ea
|
[
"MIT"
] | 1
|
2021-02-04T04:59:32.000Z
|
2021-02-04T04:59:32.000Z
|
# ###############################################################################
#
# File: wiimoteExceptions.py
# RCS: $Header: $
# Description: Exception Classes for Wiimote Controller
# Author: Andreas Paepcke
# Created: Thu Aug 13 09:01:17 2009
# Modified: Mon Aug 17 11:27:02 2009 (Andreas Paepcke) paepcke@anw.willowgarage.com
# Language: Python
# Package: N/A
# Status: Experimental (Do Not Distribute)
#
# ###############################################################################
class WiimoteError(Exception):
"""Mother of all Wiimote exceptions"""
errMsg = None
def __init__(self, theErrMsg):
self.errMsg = theErrMsg
def __str__(self):
return self.errMsg
class WiimoteNotFoundError(WiimoteError):
"""Tried to pair but failed."""
class WiimoteEnableError(WiimoteError):
"""Found wiimote, but couldn't enable it."""
class CallbackStackMultInstError(WiimoteError):
"""Code attempted to create a second callback stack instance."""
class ResumeNonPausedError(WiimoteError):
"""Code attempted to resume callbacks without first pausing."""
class CallbackStackEmptyError(WiimoteError):
"""Attemp to operate on an empty callback stack."""
| 27.413043
| 87
| 0.601903
|
47af7eb1f277ec6dec1a39b84afb2cf584ab144d
| 3,517
|
py
|
Python
|
clinical_concept_extraction/model.py
|
sdoerstling/clinical_concept_extraction
|
cf6dcde864856462c0e3f9d0c0fc133792b51f6c
|
[
"MIT"
] | null | null | null |
clinical_concept_extraction/model.py
|
sdoerstling/clinical_concept_extraction
|
cf6dcde864856462c0e3f9d0c0fc133792b51f6c
|
[
"MIT"
] | null | null | null |
clinical_concept_extraction/model.py
|
sdoerstling/clinical_concept_extraction
|
cf6dcde864856462c0e3f9d0c0fc133792b51f6c
|
[
"MIT"
] | null | null | null |
import os
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
flags.DEFINE_float('dropout_rate', 0.5, 'the dropout rate of the CNN or RNN')
flags.DEFINE_string('rnn_cell_type', 'lstm', 'Type of RNN cell used')
flags.DEFINE_integer('hidden_state', 256, 'Number of hidden state')
flags.DEFINE_integer('depth', 2, 'Depth of rnn models')
def bidirectional_rnn_func(x, l, train=True):
rnn_type = FLAGS.rnn_cell_type
if rnn_type.lower() == 'lstm':
rnn_func = tf.nn.rnn_cell.BasicLSTMCell
elif rnn_type.lower() == 'simplernn':
rnn_func = tf.nn.rnn_cell.BasicRNNCell
elif rnn_type.lower() == 'gru':
rnn_func = tf.nn.rnn_cell.GRUCell
else:
raise TypeError
all_fw_cells = []
all_bw_cells = []
for _ in range(FLAGS.depth):
fw_cell = rnn_func(num_units=FLAGS.hidden_state)
bw_cell = rnn_func(num_units=FLAGS.hidden_state)
if train:
fw_cell = tf.nn.rnn_cell.DropoutWrapper(fw_cell, state_keep_prob=FLAGS.dropout_rate)
bw_cell = tf.nn.rnn_cell.DropoutWrapper(bw_cell, state_keep_prob=FLAGS.dropout_rate)
all_fw_cells.append(fw_cell)
all_bw_cells.append(bw_cell)
else:
all_fw_cells.append(fw_cell)
all_bw_cells.append(bw_cell)
rnn_fw_cells = tf.nn.rnn_cell.MultiRNNCell(all_fw_cells)
rnn_bw_cells = tf.nn.rnn_cell.MultiRNNCell(all_bw_cells)
rnn_layer, _ = tf.nn.bidirectional_dynamic_rnn(
rnn_fw_cells, rnn_bw_cells, x, sequence_length=l, dtype=tf.float32)
rnn_output = tf.concat(rnn_layer, axis=-1)
return rnn_output
def bidirectional_lstm_func_freeze(x, l):
all_fw_cells = []
all_bw_cells = []
for _ in range(2):
fw_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=256)
bw_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=256)
all_fw_cells.append(fw_cell)
all_bw_cells.append(bw_cell)
rnn_fw_cells = tf.nn.rnn_cell.MultiRNNCell(all_fw_cells)
rnn_bw_cells = tf.nn.rnn_cell.MultiRNNCell(all_bw_cells)
rnn_layer, _ = tf.nn.bidirectional_dynamic_rnn(
rnn_fw_cells, rnn_bw_cells, x, sequence_length=l, dtype=tf.float32)
rnn_output = tf.concat(rnn_layer, axis=-1)
return rnn_output
def annotation_ensemble(x, l, scope='clinical_concept_extraction'):
with tf.variable_scope(scope):
l = tf.cast(l, tf.int32)
all_prediction = []
for model_id in range(10):
with tf.variable_scope('copy_' + str(model_id)):
weight = tf.get_variable('weight', [3, 1], tf.float32, tf.constant_initializer(1))
n_weight = tf.nn.softmax(weight, axis=0)
gamma = tf.get_variable('gamma', [], tf.float32, tf.constant_initializer(1))
token_embedding = tf.tensordot(x, n_weight, [[-1], [0]])
token_embedding = gamma * tf.squeeze(token_embedding, axis=-1)
lstm_output = bidirectional_lstm_func_freeze(token_embedding, l)
logits = tf.layers.dense(lstm_output, 7, activation=None)
transition = tf.get_variable('transitions', shape=[7, 7], dtype=tf.float32)
viterbi_sequence, viterbi_score = tf.contrib.crf.crf_decode(logits, transition, l)
prediction = tf.cast(viterbi_sequence, tf.int32)
all_prediction.append(prediction)
all_prediction = tf.stack(all_prediction, axis=-1)
return all_prediction,
| 35.887755
| 98
| 0.667899
|
2195686ee9c7f6b0a7d1121fa8efe33cf3d6cf31
| 4,982
|
py
|
Python
|
pandas/tests/groupby/test_bin_groupby.py
|
mtrbean/pandas
|
c0ff67a22df9c18da1172766e313732ed2ab6c30
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2019-02-18T00:47:14.000Z
|
2019-02-18T00:47:14.000Z
|
pandas/tests/groupby/test_bin_groupby.py
|
mtrbean/pandas
|
c0ff67a22df9c18da1172766e313732ed2ab6c30
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2019-08-18T16:00:45.000Z
|
2019-08-18T16:00:45.000Z
|
pandas/tests/groupby/test_bin_groupby.py
|
mtrbean/pandas
|
c0ff67a22df9c18da1172766e313732ed2ab6c30
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2020-11-17T10:02:40.000Z
|
2020-11-17T10:02:40.000Z
|
import numpy as np
from numpy import nan
import pytest
from pandas._libs import groupby, lib, reduction
from pandas.core.dtypes.common import ensure_int64
from pandas import Index, Series, isna
from pandas.core.groupby.ops import generate_bins_generic
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
def test_series_grouper():
obj = Series(np.random.randn(10))
dummy = obj[:0]
labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64)
grouper = reduction.SeriesGrouper(obj, np.mean, labels, 2, dummy)
result, counts = grouper.get_result()
expected = np.array([obj[3:6].mean(), obj[6:].mean()])
assert_almost_equal(result, expected)
exp_counts = np.array([3, 4], dtype=np.int64)
assert_almost_equal(counts, exp_counts)
def test_series_bin_grouper():
obj = Series(np.random.randn(10))
dummy = obj[:0]
bins = np.array([3, 6])
grouper = reduction.SeriesBinGrouper(obj, np.mean, bins, dummy)
result, counts = grouper.get_result()
expected = np.array([obj[:3].mean(), obj[3:6].mean(), obj[6:].mean()])
assert_almost_equal(result, expected)
exp_counts = np.array([3, 3, 4], dtype=np.int64)
assert_almost_equal(counts, exp_counts)
class TestBinGroupers:
def setup_method(self, method):
self.obj = np.random.randn(10, 1)
self.labels = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 2], dtype=np.int64)
self.bins = np.array([3, 6], dtype=np.int64)
def test_generate_bins(self):
values = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64)
binner = np.array([0, 3, 6, 9], dtype=np.int64)
for func in [lib.generate_bins_dt64, generate_bins_generic]:
bins = func(values, binner, closed="left")
assert (bins == np.array([2, 5, 6])).all()
bins = func(values, binner, closed="right")
assert (bins == np.array([3, 6, 6])).all()
for func in [lib.generate_bins_dt64, generate_bins_generic]:
values = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64)
binner = np.array([0, 3, 6], dtype=np.int64)
bins = func(values, binner, closed="right")
assert (bins == np.array([3, 6])).all()
msg = "Invalid length for values or for binner"
with pytest.raises(ValueError, match=msg):
generate_bins_generic(values, [], "right")
with pytest.raises(ValueError, match=msg):
generate_bins_generic(values[:0], binner, "right")
msg = "Values falls before first bin"
with pytest.raises(ValueError, match=msg):
generate_bins_generic(values, [4], "right")
msg = "Values falls after last bin"
with pytest.raises(ValueError, match=msg):
generate_bins_generic(values, [-3, -1], "right")
def test_group_ohlc():
def _check(dtype):
obj = np.array(np.random.randn(20), dtype=dtype)
bins = np.array([6, 12, 20])
out = np.zeros((3, 4), dtype)
counts = np.zeros(len(out), dtype=np.int64)
labels = ensure_int64(np.repeat(np.arange(3), np.diff(np.r_[0, bins])))
func = getattr(groupby, "group_ohlc_{dtype}".format(dtype=dtype))
func(out, counts, obj[:, None], labels)
def _ohlc(group):
if isna(group).all():
return np.repeat(nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
expected = np.array([_ohlc(obj[:6]), _ohlc(obj[6:12]), _ohlc(obj[12:])])
assert_almost_equal(out, expected)
tm.assert_numpy_array_equal(counts, np.array([6, 6, 8], dtype=np.int64))
obj[:6] = nan
func(out, counts, obj[:, None], labels)
expected[0] = nan
assert_almost_equal(out, expected)
_check("float32")
_check("float64")
class TestMoments:
pass
class TestReducer:
def test_int_index(self):
arr = np.random.randn(100, 4)
result = reduction.compute_reduction(arr, np.sum, labels=Index(np.arange(4)))
expected = arr.sum(0)
assert_almost_equal(result, expected)
result = reduction.compute_reduction(
arr, np.sum, axis=1, labels=Index(np.arange(100))
)
expected = arr.sum(1)
assert_almost_equal(result, expected)
dummy = Series(0.0, index=np.arange(100))
result = reduction.compute_reduction(
arr, np.sum, dummy=dummy, labels=Index(np.arange(4))
)
expected = arr.sum(0)
assert_almost_equal(result, expected)
dummy = Series(0.0, index=np.arange(4))
result = reduction.compute_reduction(
arr, np.sum, axis=1, dummy=dummy, labels=Index(np.arange(100))
)
expected = arr.sum(1)
assert_almost_equal(result, expected)
result = reduction.compute_reduction(
arr, np.sum, axis=1, dummy=dummy, labels=Index(np.arange(100))
)
assert_almost_equal(result, expected)
| 32.993377
| 85
| 0.613408
|
21cac8447a322467c292896d4a9a422c8ac6de7b
| 72
|
py
|
Python
|
setup.py
|
MathOnco/valis
|
1ec9a88f21930d82fa5e823276ff78ff8b4f3a5a
|
[
"MIT"
] | 4
|
2022-03-14T12:55:42.000Z
|
2022-03-17T10:06:29.000Z
|
setup.py
|
MathOnco/valis
|
1ec9a88f21930d82fa5e823276ff78ff8b4f3a5a
|
[
"MIT"
] | null | null | null |
setup.py
|
MathOnco/valis
|
1ec9a88f21930d82fa5e823276ff78ff8b4f3a5a
|
[
"MIT"
] | null | null | null |
if __name__ == "__main__":
from setuptools import setup
setup()
| 18
| 32
| 0.666667
|
8a75a115eba7099b7a82ed4713f501a6da6ee15d
| 1,734
|
py
|
Python
|
doc/source/EXAMPLES/mu_labelsspatial.py
|
kapteyn-astro/kapteyn
|
f12332cfd567c7c0da40628dcfc7b297971ee636
|
[
"BSD-3-Clause"
] | 3
|
2016-04-28T08:55:33.000Z
|
2018-07-23T18:35:58.000Z
|
doc/source/EXAMPLES/mu_labelsspatial.py
|
kapteyn-astro/kapteyn
|
f12332cfd567c7c0da40628dcfc7b297971ee636
|
[
"BSD-3-Clause"
] | 2
|
2020-07-23T12:28:37.000Z
|
2021-07-13T18:26:06.000Z
|
doc/source/EXAMPLES/mu_labelsspatial.py
|
kapteyn-astro/kapteyn
|
f12332cfd567c7c0da40628dcfc7b297971ee636
|
[
"BSD-3-Clause"
] | 3
|
2017-05-03T14:01:08.000Z
|
2020-07-23T12:23:28.000Z
|
from kapteyn import maputils
from matplotlib import pylab as plt
header = { 'NAXIS' : 3,
'BUNIT' : 'w.u.',
'CDELT1' : -1.200000000000E-03,
'CDELT2' : 1.497160000000E-03, 'CDELT3' : 97647.745732,
'CRPIX1' : 5, 'CRPIX2' : 6, 'CRPIX3' : 32,
'CRVAL1' : 1.787792000000E+02, 'CRVAL2' : 5.365500000000E+01,
'CRVAL3' : 1378471216.4292786,
'CTYPE1' : 'RA---NCP', 'CTYPE2' : 'DEC--NCP', 'CTYPE3' : 'FREQ-OHEL',
'CUNIT1' : 'DEGREE', 'CUNIT2' : 'DEGREE', 'CUNIT3' : 'HZ',
'DRVAL3' : 1.050000000000E+03,
'DUNIT3' : 'KM/S',
'FREQ0' : 1.420405752e+9,
'INSTRUME' : 'WSRT',
'NAXIS1' : 100, 'NAXIS2' : 100, 'NAXIS3' : 64
}
fig = plt.figure(figsize=(7,7))
fig.suptitle("Axis labels for spatial maps", fontsize=14, color='r')
fig.subplots_adjust(left=0.18, bottom=0.10, right=0.90,
top=0.90, wspace=0.95, hspace=0.20)
frame = fig.add_subplot(2,2,1)
f = maputils.FITSimage(externalheader=header)
f.set_imageaxes(1,2)
annim = f.Annotatedimage(frame)
# Default labeling
grat = annim.Graticule()
frame = fig.add_subplot(2,2,2)
annim = f.Annotatedimage(frame)
# Plot labels with start position and increment
grat = annim.Graticule(startx='11h55m', deltax="15 hmssec", deltay="3 arcmin")
frame = fig.add_subplot(2,2,3)
annim = f.Annotatedimage(frame)
# Plot labels in string only
grat = annim.Graticule(startx='11h55m 11h54m30s')
grat.setp_tick(plotaxis="bottom", texsexa=False)
frame = fig.add_subplot(2,2,4)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(startx="178.75 deg", deltax="6 arcmin", unitsx="degree")
grat.setp_ticklabel(plotaxis="left", fmt="s")
maputils.showall()
| 36.125
| 80
| 0.630911
|
4a29261557a3c4928a0e1cc07159b183139e6a53
| 12,542
|
py
|
Python
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/unknown_tlv/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 64
|
2016-10-20T15:47:18.000Z
|
2021-11-11T11:57:32.000Z
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/unknown_tlv/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 126
|
2016-10-05T10:36:14.000Z
|
2019-05-15T08:43:23.000Z
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/unknown_tlv/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 63
|
2016-11-07T15:23:08.000Z
|
2021-09-22T14:41:16.000Z
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
class unknown_tlv(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/extended-link/tlvs/tlv/unknown-tlv. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: An unknown TLV within the context. Unknown TLVs are
defined to be the set of TLVs that are not modelled
within the OpenConfig model, or are unknown to the
local system such that it cannot decode their value.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "unknown-tlv"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"extended-link",
"tlvs",
"tlv",
"unknown-tlv",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/unknown_tlv/state (container)
YANG Description: Contents of an unknown TLV within the LSA
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/unknown_tlv/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Contents of an unknown TLV within the LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
from . import state
class unknown_tlv(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/extended-link/tlvs/tlv/unknown-tlv. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: An unknown TLV within the context. Unknown TLVs are
defined to be the set of TLVs that are not modelled
within the OpenConfig model, or are unknown to the
local system such that it cannot decode their value.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "unknown-tlv"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"extended-link",
"tlvs",
"tlv",
"unknown-tlv",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/unknown_tlv/state (container)
YANG Description: Contents of an unknown TLV within the LSA
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/unknown_tlv/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Contents of an unknown TLV within the LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
| 37.550898
| 375
| 0.593047
|
28db7498ebb18daf5a716b9191f39edfcf1144fa
| 67
|
py
|
Python
|
tests/components_tests/__init__.py
|
seblee97/student_teacher_catastrophic
|
9baaaf2850025ba9cf33d61c42386bc4c3b2dad2
|
[
"MIT"
] | 2
|
2021-09-13T01:44:09.000Z
|
2021-12-11T11:56:49.000Z
|
tests/components_tests/__init__.py
|
seblee97/student_teacher_catastrophic
|
9baaaf2850025ba9cf33d61c42386bc4c3b2dad2
|
[
"MIT"
] | 8
|
2020-11-13T18:37:30.000Z
|
2022-02-15T15:11:51.000Z
|
tests/components_tests/__init__.py
|
seblee97/student_teacher_catastrophic
|
9baaaf2850025ba9cf33d61c42386bc4c3b2dad2
|
[
"MIT"
] | null | null | null |
from . import data_modules_tests
__all__ = ["data_modules_tests"]
| 16.75
| 32
| 0.791045
|
a2171b216369ecb933adefa23b5760cfffdc415c
| 3,312
|
py
|
Python
|
server/url.py
|
microsoft/OneLabeler
|
316175e98a1cba72d651567d9fac08fc6c7bdf4f
|
[
"MIT"
] | 8
|
2022-03-26T17:45:01.000Z
|
2022-03-30T14:12:20.000Z
|
server/url.py
|
microsoft/OneLabeler
|
316175e98a1cba72d651567d9fac08fc6c7bdf4f
|
[
"MIT"
] | null | null | null |
server/url.py
|
microsoft/OneLabeler
|
316175e98a1cba72d651567d9fac08fc6c7bdf4f
|
[
"MIT"
] | 1
|
2022-03-27T06:11:24.000Z
|
2022-03-27T06:11:24.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from tornado.web import RequestHandler
from handlers import (CompileHandler,
ModelTrainingHandler,
ImageProcessingHandler)
# Data object selection handlers:
from handlers.data_object_selection.cluster_centroids import Handler as DOSClusterCentroids
from handlers.data_object_selection.cluster import Handler as DOSCluster
from handlers.data_object_selection.dense_areas import Handler as DOSDenseAreas
from handlers.data_object_selection.entropy import Handler as DOSEntropy
from handlers.data_object_selection.least_confident import Handler as DOSLeastConfident
from handlers.data_object_selection.random import Handler as DOSRandom
from handlers.data_object_selection.smallest_margin import Handler as DOSSmallestMargin
# Default labeling handlers:
from handlers.default_labeling.model_prediction import Handler as DLModelPrediction
from handlers.default_labeling.null import Handler as DLNull
from handlers.default_labeling.pos_tagging import Handler as DLPosTagging
from handlers.default_labeling.random import Handler as DLRandom
# Feature extraction handlers:
from handlers.feature_extraction.image_bow import Handler as FEImageBow
from handlers.feature_extraction.image_lda import Handler as FEImageLda
from handlers.feature_extraction.image_svd import Handler as FEImageSvd
from handlers.feature_extraction.text_nmf import Handler as FETextNmf
# Projection handlers:
from handlers.projection.mds import Handler as ProjectionMDS
from handlers.projection.pca import Handler as ProjectionPCA
from handlers.projection.tsne import Handler as ProjectionTSNE
class RoundtripHandler(RequestHandler):
def post(self):
self.set_header('Access-Control-Allow-Origin', '*')
url = [
# request for compiled exe package
(r'/compile/(.*)', CompileHandler),
# request for image processing algorithms
(r'/imgproc/(.*)', ImageProcessingHandler),
# request for features computed with features extraction algorithms
(r'/features/image/BoW', FEImageBow),
(r'/features/image/LDA', FEImageLda),
(r'/features/image/SVD', FEImageSvd),
(r'/features/text/NMF', FETextNmf),
# request for selection computed with data object selection algorithms
(r'/selection/ClusterCentroids', DOSClusterCentroids),
(r'/selection/Cluster', DOSCluster),
(r'/selection/DenseAreas', DOSDenseAreas),
(r'/selection/Entropy', DOSEntropy),
(r'/selection/LeastConfident', DOSLeastConfident),
(r'/selection/Random', DOSRandom),
(r'/selection/SmallestMargin', DOSSmallestMargin),
# request for default labels computed with default labeling algorithms
(r'/defaultLabels/ModelPrediction', DLModelPrediction),
(r'/defaultLabels/Null', DLNull),
(r'/defaultLabels/PosTagging', DLPosTagging),
(r'/defaultLabels/Random', DLRandom),
# request for projection computed with dimension reduction algorithms
(r'/projection/MDS', ProjectionMDS),
(r'/projection/PCA', ProjectionPCA),
(r'/projection/TSNE', ProjectionTSNE),
# request for updated model computed with interim model training algorithms
(r'/modelUpdated/(.*)', ModelTrainingHandler),
# request for roundtrip testing
(r'/roundtrip', RoundtripHandler),
]
| 43.578947
| 91
| 0.780193
|
9287e6e71705c10f85d0c94440f66390e77140e7
| 28,857
|
py
|
Python
|
kopf/reactor/registries.py
|
tinyzimmer/kopf
|
74c42a2acdf2a72446d290fa1f27b53ec5d43218
|
[
"MIT"
] | null | null | null |
kopf/reactor/registries.py
|
tinyzimmer/kopf
|
74c42a2acdf2a72446d290fa1f27b53ec5d43218
|
[
"MIT"
] | null | null | null |
kopf/reactor/registries.py
|
tinyzimmer/kopf
|
74c42a2acdf2a72446d290fa1f27b53ec5d43218
|
[
"MIT"
] | null | null | null |
"""
A registry of the handlers, attached to the resources or events.
The global registry is populated by the `kopf.on` decorators, and is used
to register the resources being watched and handled, and to attach
the handlers to the specific causes (create/update/delete/field-change).
The simple registry is part of the global registry (for each individual
resource), and also used for the sub-handlers within a top-level handler.
Both are used in the `kopf.reactor.handling` to retrieve the list
of the handlers to be executed on each reaction cycle.
"""
import abc
import collections
import functools
import warnings
from types import FunctionType, MethodType
from typing import (Any, MutableMapping, Optional, Sequence, Iterable, Iterator,
List, Set, FrozenSet, Mapping, Callable, cast, Generic, TypeVar, Union,
Container)
from kopf.reactor import causation
from kopf.reactor import invocation
from kopf.structs import callbacks
from kopf.structs import dicts
from kopf.structs import diffs
from kopf.structs import filters
from kopf.structs import handlers
from kopf.structs import resources as resources_
from kopf.utilities import piggybacking
# We only type-check for known classes of handlers/callbacks, and ignore any custom subclasses.
CauseT = TypeVar('CauseT', bound=causation.BaseCause)
HandlerT = TypeVar('HandlerT', bound=handlers.BaseHandler)
ResourceHandlerT = TypeVar('ResourceHandlerT', bound=handlers.ResourceHandler)
HandlerFnT = TypeVar('HandlerFnT',
callbacks.ActivityFn,
callbacks.ResourceWatchingFn,
callbacks.ResourceSpawningFn,
callbacks.ResourceChangingFn,
Union[callbacks.ResourceWatchingFn, callbacks.ResourceChangingFn]) # DEPRECATED: for legacy_registries
class GenericRegistry(Generic[HandlerFnT, HandlerT]):
""" A generic base class of a simple registry (with no handler getters). """
_handlers: List[HandlerT]
def __init__(self) -> None:
super().__init__()
self._handlers = []
def __bool__(self) -> bool:
return bool(self._handlers)
def append(self, handler: HandlerT) -> None:
self._handlers.append(handler)
class ActivityRegistry(GenericRegistry[
callbacks.ActivityFn,
handlers.ActivityHandler]):
def register(
self,
fn: callbacks.ActivityFn,
*,
id: Optional[str] = None,
errors: Optional[handlers.ErrorsMode] = None,
timeout: Optional[float] = None,
retries: Optional[int] = None,
backoff: Optional[float] = None,
cooldown: Optional[float] = None, # deprecated, use `backoff`
activity: Optional[handlers.Activity] = None,
_fallback: bool = False,
) -> callbacks.ActivityFn:
warnings.warn("registry.register() is deprecated; "
"use @kopf.on... decorators with registry= kwarg.",
DeprecationWarning)
real_id = generate_id(fn=fn, id=id)
handler = handlers.ActivityHandler(
id=real_id, fn=fn, activity=activity,
errors=errors, timeout=timeout, retries=retries, backoff=backoff, cooldown=cooldown,
_fallback=_fallback,
)
self.append(handler)
return fn
def get_handlers(
self,
activity: handlers.Activity,
) -> Sequence[handlers.ActivityHandler]:
return list(_deduplicated(self.iter_handlers(activity=activity)))
def iter_handlers(
self,
activity: handlers.Activity,
) -> Iterator[handlers.ActivityHandler]:
found: bool = False
# Regular handlers go first.
for handler in self._handlers:
if handler.activity is None or handler.activity == activity and not handler._fallback:
yield handler
found = True
# Fallback handlers -- only if there were no matching regular handlers.
if not found:
for handler in self._handlers:
if handler.activity is None or handler.activity == activity and handler._fallback:
yield handler
class ResourceRegistry(
Generic[CauseT, HandlerFnT, ResourceHandlerT],
GenericRegistry[HandlerFnT, ResourceHandlerT]):
def get_handlers(
self,
cause: CauseT,
excluded: Container[handlers.HandlerId] = frozenset(),
) -> Sequence[ResourceHandlerT]:
return list(_deduplicated(self.iter_handlers(cause=cause, excluded=excluded)))
@abc.abstractmethod
def iter_handlers(
self,
cause: CauseT,
excluded: Container[handlers.HandlerId] = frozenset(),
) -> Iterator[ResourceHandlerT]:
raise NotImplementedError
def get_extra_fields(
self,
) -> Set[dicts.FieldPath]:
return set(self.iter_extra_fields())
def iter_extra_fields(
self,
) -> Iterator[dicts.FieldPath]:
for handler in self._handlers:
if handler.field:
yield handler.field
def requires_finalizer(
self,
cause: causation.ResourceCause,
excluded: Container[handlers.HandlerId] = frozenset(),
) -> bool:
"""
Check whether a finalizer should be added to the given resource or not.
"""
# check whether the body matches a deletion handler
for handler in self._handlers:
if handler.id not in excluded:
if handler.requires_finalizer and match(handler=handler, cause=cause):
return True
return False
class ResourceWatchingRegistry(ResourceRegistry[
causation.ResourceWatchingCause,
callbacks.ResourceWatchingFn,
handlers.ResourceWatchingHandler]):
def register(
self,
fn: callbacks.ResourceWatchingFn,
*,
id: Optional[str] = None,
errors: Optional[handlers.ErrorsMode] = None,
timeout: Optional[float] = None,
retries: Optional[int] = None,
backoff: Optional[float] = None,
cooldown: Optional[float] = None, # deprecated, use `backoff`
labels: Optional[filters.MetaFilter] = None,
annotations: Optional[filters.MetaFilter] = None,
when: Optional[callbacks.WhenFilterFn] = None,
) -> callbacks.ResourceWatchingFn:
warnings.warn("registry.register() is deprecated; "
"use @kopf.on... decorators with registry= kwarg.",
DeprecationWarning)
real_id = generate_id(fn=fn, id=id)
handler = handlers.ResourceWatchingHandler(
id=real_id, fn=fn,
errors=errors, timeout=timeout, retries=retries, backoff=backoff, cooldown=cooldown,
labels=labels, annotations=annotations, when=when,
)
self.append(handler)
return fn
def iter_handlers(
self,
cause: causation.ResourceWatchingCause,
excluded: Container[handlers.HandlerId] = frozenset(),
) -> Iterator[handlers.ResourceWatchingHandler]:
for handler in self._handlers:
if handler.id not in excluded:
if match(handler=handler, cause=cause, ignore_fields=True):
yield handler
class ResourceSpawningRegistry(ResourceRegistry[
causation.ResourceSpawningCause,
callbacks.ResourceSpawningFn,
handlers.ResourceSpawningHandler]):
@abc.abstractmethod
def iter_handlers(
self,
cause: causation.ResourceSpawningCause,
excluded: Container[handlers.HandlerId] = frozenset(),
) -> Iterator[handlers.ResourceSpawningHandler]:
for handler in self._handlers:
if handler.id not in excluded:
if match(handler=handler, cause=cause):
yield handler
class ResourceChangingRegistry(ResourceRegistry[
causation.ResourceChangingCause,
callbacks.ResourceChangingFn,
handlers.ResourceChangingHandler]):
def register(
self,
fn: callbacks.ResourceChangingFn,
*,
id: Optional[str] = None,
reason: Optional[handlers.Reason] = None,
event: Optional[str] = None, # deprecated, use `reason`
field: Optional[dicts.FieldSpec] = None,
errors: Optional[handlers.ErrorsMode] = None,
timeout: Optional[float] = None,
retries: Optional[int] = None,
backoff: Optional[float] = None,
cooldown: Optional[float] = None, # deprecated, use `backoff`
initial: Optional[bool] = None,
deleted: Optional[bool] = None,
requires_finalizer: bool = False,
labels: Optional[filters.MetaFilter] = None,
annotations: Optional[filters.MetaFilter] = None,
when: Optional[callbacks.WhenFilterFn] = None,
) -> callbacks.ResourceChangingFn:
warnings.warn("registry.register() is deprecated; "
"use @kopf.on... decorators with registry= kwarg.",
DeprecationWarning)
if reason is None and event is not None:
reason = handlers.Reason(event)
real_field = dicts.parse_field(field) or None # to not store tuple() as a no-field case.
real_id = generate_id(fn=fn, id=id, suffix=".".join(real_field or []))
handler = handlers.ResourceChangingHandler(
id=real_id, fn=fn, reason=reason, field=real_field,
errors=errors, timeout=timeout, retries=retries, backoff=backoff, cooldown=cooldown,
initial=initial, deleted=deleted, requires_finalizer=requires_finalizer,
labels=labels, annotations=annotations, when=when,
)
self.append(handler)
return fn
def iter_handlers(
self,
cause: causation.ResourceChangingCause,
excluded: Container[handlers.HandlerId] = frozenset(),
) -> Iterator[handlers.ResourceChangingHandler]:
for handler in self._handlers:
if handler.id not in excluded:
if handler.reason is None or handler.reason == cause.reason:
if handler.initial and not cause.initial:
pass # skip initial handlers in non-initial causes.
elif handler.initial and cause.deleted and not handler.deleted:
pass # skip initial handlers on deletion, unless explicitly marked as used.
elif match(handler=handler, cause=cause):
yield handler
class OperatorRegistry:
"""
A global registry is used for handling of multiple resources & activities.
It is usually populated by the ``@kopf.on...`` decorators, but can also
be explicitly created and used in the embedded operators.
"""
activity_handlers: ActivityRegistry
resource_watching_handlers: MutableMapping[resources_.Resource, ResourceWatchingRegistry]
resource_spawning_handlers: MutableMapping[resources_.Resource, ResourceSpawningRegistry]
resource_changing_handlers: MutableMapping[resources_.Resource, ResourceChangingRegistry]
def __init__(self) -> None:
super().__init__()
self.activity_handlers = ActivityRegistry()
self.resource_watching_handlers = collections.defaultdict(ResourceWatchingRegistry)
self.resource_spawning_handlers = collections.defaultdict(ResourceSpawningRegistry)
self.resource_changing_handlers = collections.defaultdict(ResourceChangingRegistry)
@property
def resources(self) -> FrozenSet[resources_.Resource]:
""" All known resources in the registry. """
return (frozenset(self.resource_watching_handlers) |
frozenset(self.resource_spawning_handlers) |
frozenset(self.resource_changing_handlers))
#
# Everything below is deprecated and will be removed in the next major release.
#
def register_activity_handler(
self,
fn: callbacks.ActivityFn,
*,
id: Optional[str] = None,
errors: Optional[handlers.ErrorsMode] = None,
timeout: Optional[float] = None,
retries: Optional[int] = None,
backoff: Optional[float] = None,
cooldown: Optional[float] = None, # deprecated, use `backoff`
activity: Optional[handlers.Activity] = None,
_fallback: bool = False,
) -> callbacks.ActivityFn:
warnings.warn("registry.register_activity_handler() is deprecated; "
"use @kopf.on... decorators with registry= kwarg.",
DeprecationWarning)
return self.activity_handlers.register(
fn=fn, id=id, activity=activity,
errors=errors, timeout=timeout, retries=retries, backoff=backoff, cooldown=cooldown,
_fallback=_fallback,
)
def register_resource_watching_handler(
self,
group: str,
version: str,
plural: str,
fn: callbacks.ResourceWatchingFn,
id: Optional[str] = None,
labels: Optional[filters.MetaFilter] = None,
annotations: Optional[filters.MetaFilter] = None,
when: Optional[callbacks.WhenFilterFn] = None,
) -> callbacks.ResourceWatchingFn:
"""
Register an additional handler function for low-level events.
"""
warnings.warn("registry.register_resource_watching_handler() is deprecated; "
"use @kopf.on... decorators with registry= kwarg.",
DeprecationWarning)
resource = resources_.Resource(group, version, plural)
return self.resource_watching_handlers[resource].register(
fn=fn, id=id,
labels=labels, annotations=annotations, when=when,
)
def register_resource_changing_handler(
self,
group: str,
version: str,
plural: str,
fn: callbacks.ResourceChangingFn,
id: Optional[str] = None,
reason: Optional[handlers.Reason] = None,
event: Optional[str] = None, # deprecated, use `reason`
field: Optional[dicts.FieldSpec] = None,
errors: Optional[handlers.ErrorsMode] = None,
timeout: Optional[float] = None,
retries: Optional[int] = None,
backoff: Optional[float] = None,
cooldown: Optional[float] = None, # deprecated, use `backoff`
initial: Optional[bool] = None,
deleted: Optional[bool] = None,
requires_finalizer: bool = False,
labels: Optional[filters.MetaFilter] = None,
annotations: Optional[filters.MetaFilter] = None,
when: Optional[callbacks.WhenFilterFn] = None,
) -> callbacks.ResourceChangingFn:
"""
Register an additional handler function for the specific resource and specific reason.
"""
warnings.warn("registry.register_resource_changing_handler() is deprecated; "
"use @kopf.on... decorators with registry= kwarg.",
DeprecationWarning)
resource = resources_.Resource(group, version, plural)
return self.resource_changing_handlers[resource].register(
reason=reason, event=event, field=field, fn=fn, id=id,
errors=errors, timeout=timeout, retries=retries, backoff=backoff, cooldown=cooldown,
initial=initial, deleted=deleted, requires_finalizer=requires_finalizer,
labels=labels, annotations=annotations, when=when,
)
def has_activity_handlers(
self,
) -> bool:
warnings.warn("registry.has_activity_handlers() is deprecated; "
"use registry.activity_handlers directly.",
DeprecationWarning)
return bool(self.activity_handlers)
def has_resource_watching_handlers(
self,
resource: resources_.Resource,
) -> bool:
warnings.warn("registry.has_resource_watching_handlers() is deprecated; "
"use registry.resource_watching_handlers[resource] directly.",
DeprecationWarning)
return bool(self.resource_watching_handlers[resource])
def has_resource_changing_handlers(
self,
resource: resources_.Resource,
) -> bool:
warnings.warn("registry.has_resource_changing_handlers() is deprecated; "
"use registry.resource_changing_handlers[resource] directly.",
DeprecationWarning)
return bool(self.resource_changing_handlers[resource])
def get_activity_handlers(
self,
*,
activity: handlers.Activity,
) -> Sequence[handlers.ActivityHandler]:
warnings.warn("registry.get_activity_handlers() is deprecated; "
"use registry.activity_handlers.get_handlers().",
DeprecationWarning)
return self.activity_handlers.get_handlers(activity=activity)
def get_resource_watching_handlers(
self,
cause: causation.ResourceWatchingCause,
) -> Sequence[handlers.ResourceWatchingHandler]:
warnings.warn("registry.get_resource_watching_handlers() is deprecated; "
"use registry.resource_watching_handlers[resource].get_handlers().",
DeprecationWarning)
return self.resource_watching_handlers[cause.resource].get_handlers(cause=cause)
def get_resource_changing_handlers(
self,
cause: causation.ResourceChangingCause,
) -> Sequence[handlers.ResourceChangingHandler]:
warnings.warn("registry.get_resource_changing_handlers() is deprecated; "
"use registry.resource_changing_handlers[resource].get_handlers().",
DeprecationWarning)
return self.resource_changing_handlers[cause.resource].get_handlers(cause=cause)
def iter_activity_handlers(
self,
*,
activity: handlers.Activity,
) -> Iterator[handlers.ActivityHandler]:
warnings.warn("registry.iter_activity_handlers() is deprecated; "
"use registry.activity_handlers.iter_handlers().",
DeprecationWarning)
yield from self.activity_handlers.iter_handlers(activity=activity)
def iter_resource_watching_handlers(
self,
cause: causation.ResourceWatchingCause,
) -> Iterator[handlers.ResourceWatchingHandler]:
"""
Iterate all handlers for the low-level events.
"""
warnings.warn("registry.iter_resource_watching_handlers() is deprecated; "
"use registry.resource_watching_handlers[resource].iter_handlers().",
DeprecationWarning)
yield from self.resource_watching_handlers[cause.resource].iter_handlers(cause=cause)
def iter_resource_changing_handlers(
self,
cause: causation.ResourceChangingCause,
) -> Iterator[handlers.ResourceChangingHandler]:
"""
Iterate all handlers that match this cause/event, in the order they were registered (even if mixed).
"""
warnings.warn("registry.iter_resource_changing_handlers() is deprecated; "
"use registry.resource_changing_handlers[resource].iter_handlers().",
DeprecationWarning)
yield from self.resource_changing_handlers[cause.resource].iter_handlers(cause=cause)
def get_extra_fields(
self,
resource: resources_.Resource,
) -> Set[dicts.FieldPath]:
warnings.warn("registry.get_extra_fields() is deprecated; "
"use registry.resource_changing_handlers[resource].get_extra_fields().",
DeprecationWarning)
return self.resource_changing_handlers[resource].get_extra_fields()
def iter_extra_fields(
self,
resource: resources_.Resource,
) -> Iterator[dicts.FieldPath]:
warnings.warn("registry.iter_extra_fields() is deprecated; "
"use registry.resource_changing_handlers[resource].iter_extra_fields().",
DeprecationWarning)
yield from self.resource_changing_handlers[resource].iter_extra_fields()
def requires_finalizer(
self,
resource: resources_.Resource,
cause: causation.ResourceCause,
) -> bool:
"""
Check whether a finalizer should be added to the given resource or not.
"""
warnings.warn("registry.requires_finalizer() is deprecated; "
"use registry.resource_changing_handlers[resource].requires_finalizer().",
DeprecationWarning)
return self.resource_changing_handlers[resource].requires_finalizer(cause=cause)
class SmartOperatorRegistry(OperatorRegistry):
def __init__(self) -> None:
super().__init__()
try:
import pykube
except ImportError:
pass
else:
self.activity_handlers.append(handlers.ActivityHandler(
id=handlers.HandlerId('login_via_pykube'),
fn=cast(callbacks.ActivityFn, piggybacking.login_via_pykube),
activity=handlers.Activity.AUTHENTICATION,
errors=handlers.ErrorsMode.IGNORED,
timeout=None, retries=None, backoff=None, cooldown=None,
_fallback=True,
))
try:
import kubernetes
except ImportError:
pass
else:
self.activity_handlers.append(handlers.ActivityHandler(
id=handlers.HandlerId('login_via_client'),
fn=cast(callbacks.ActivityFn, piggybacking.login_via_client),
activity=handlers.Activity.AUTHENTICATION,
errors=handlers.ErrorsMode.IGNORED,
timeout=None, retries=None, backoff=None, cooldown=None,
_fallback=True,
))
def generate_id(
fn: Callable[..., Any],
id: Optional[str],
prefix: Optional[str] = None,
suffix: Optional[str] = None,
) -> handlers.HandlerId:
real_id: str
real_id = id if id is not None else get_callable_id(fn)
real_id = real_id if not suffix else f'{real_id}/{suffix}'
real_id = real_id if not prefix else f'{prefix}/{real_id}'
return cast(handlers.HandlerId, real_id)
def get_callable_id(c: Callable[..., Any]) -> str:
""" Get an reasonably good id of any commonly used callable. """
if c is None:
raise ValueError("Cannot build a persistent id of None.")
elif isinstance(c, functools.partial):
return get_callable_id(c.func)
elif hasattr(c, '__wrapped__'): # @functools.wraps()
return get_callable_id(getattr(c, '__wrapped__'))
elif isinstance(c, FunctionType) and c.__name__ == '<lambda>':
# The best we can do to keep the id stable across the process restarts,
# assuming at least no code changes. The code changes are not detectable.
line = c.__code__.co_firstlineno
path = c.__code__.co_filename
return f'lambda:{path}:{line}'
elif isinstance(c, (FunctionType, MethodType)):
return str(getattr(c, '__qualname__', getattr(c, '__name__', repr(c))))
else:
raise ValueError(f"Cannot get id of {c!r}.")
def _deduplicated(
handlers: Iterable[HandlerT],
) -> Iterator[HandlerT]:
"""
Yield the handlers deduplicated.
The same handler function should not be invoked more than once for one
single event/cause, even if it is registered with multiple decorators
(e.g. different filtering criteria or different but same-effect causes).
One of the ways how this could happen::
@kopf.on.create(...)
@kopf.on.resume(...)
def fn(**kwargs): pass
In normal cases, the function will be called either on resource creation
or on operator restart for the pre-existing (already handled) resources.
When a resource is created during the operator downtime, it is
both creation and resuming at the same time: the object is new (not yet
handled) **AND** it is detected as per-existing before operator start.
But `fn()` should be called only once for this cause.
"""
seen_ids: Set[int] = set()
for handler in handlers:
if id(handler.fn) in seen_ids:
pass
else:
seen_ids.add(id(handler.fn))
yield handler
def match(
handler: handlers.ResourceHandler,
cause: causation.ResourceCause,
ignore_fields: bool = False,
) -> bool:
# Kwargs are lazily evaluated on the first _actual_ use, and shared for all filters since then.
kwargs: MutableMapping[str, Any] = {}
return all([
_matches_field(handler, cause, ignore_fields),
_matches_labels(handler, cause, kwargs),
_matches_annotations(handler, cause, kwargs),
_matches_filter_callback(handler, cause, kwargs),
])
def _matches_field(
handler: handlers.ResourceHandler,
cause: causation.ResourceCause,
ignore_fields: bool = False,
) -> bool:
return (ignore_fields or
not isinstance(handler, handlers.ResourceChangingHandler) or
not handler.field or (
isinstance(cause, causation.ResourceChangingCause) and
bool(diffs.reduce(cause.diff, handler.field))
))
def _matches_labels(
handler: handlers.ResourceHandler,
cause: causation.ResourceCause,
kwargs: MutableMapping[str, Any],
) -> bool:
return (not handler.labels or
_matches_metadata(pattern=handler.labels,
content=cause.body.get('metadata', {}).get('labels', {}),
kwargs=kwargs, cause=cause))
def _matches_annotations(
handler: handlers.ResourceHandler,
cause: causation.ResourceCause,
kwargs: MutableMapping[str, Any],
) -> bool:
return (not handler.annotations or
_matches_metadata(pattern=handler.annotations,
content=cause.body.get('metadata', {}).get('annotations', {}),
kwargs=kwargs, cause=cause))
def _matches_metadata(
*,
pattern: filters.MetaFilter, # from the handler
content: Mapping[str, str], # from the body
kwargs: MutableMapping[str, Any],
cause: causation.ResourceCause,
) -> bool:
for key, value in pattern.items():
if value is filters.MetaFilterToken.ABSENT and key not in content:
continue
elif value is filters.MetaFilterToken.PRESENT and key in content:
continue
elif value is None and key in content: # deprecated; warned in @kopf.on
continue
elif callable(value):
if not kwargs:
kwargs.update(invocation.build_kwargs(cause=cause))
if value(content.get(key, None), **kwargs):
continue
else:
return False
elif key not in content:
return False
elif value != content[key]:
return False
else:
continue
return True
def _matches_filter_callback(
handler: handlers.ResourceHandler,
cause: causation.ResourceCause,
kwargs: MutableMapping[str, Any],
) -> bool:
if handler.when is None:
return True
if not kwargs:
kwargs.update(invocation.build_kwargs(cause=cause))
return handler.when(**kwargs)
_default_registry: Optional[OperatorRegistry] = None
def get_default_registry() -> OperatorRegistry:
"""
Get the default registry to be used by the decorators and the reactor
unless the explicit registry is provided to them.
"""
global _default_registry
if _default_registry is None:
# TODO: Deprecated registry to ensure backward-compatibility until removal:
from kopf.toolkits.legacy_registries import SmartGlobalRegistry
_default_registry = SmartGlobalRegistry()
return _default_registry
def set_default_registry(registry: OperatorRegistry) -> None:
"""
Set the default registry to be used by the decorators and the reactor
unless the explicit registry is provided to them.
"""
global _default_registry
_default_registry = registry
| 39.802759
| 124
| 0.634231
|
d7d853d375f9d48e5632cdd4ea5ba622990a0ff0
| 16,379
|
py
|
Python
|
torch_utils/ops/upfirdn2d.py
|
maua-maua-maua/nvGAN
|
edea24c58646780c9fb8ea942e49708ce9d62421
|
[
"MIT"
] | null | null | null |
torch_utils/ops/upfirdn2d.py
|
maua-maua-maua/nvGAN
|
edea24c58646780c9fb8ea942e49708ce9d62421
|
[
"MIT"
] | null | null | null |
torch_utils/ops/upfirdn2d.py
|
maua-maua-maua/nvGAN
|
edea24c58646780c9fb8ea942e49708ce9d62421
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Custom PyTorch ops for efficient resampling of 2D images."""
import os
import numpy as np
import torch
from .. import custom_ops, misc
from . import conv2d_gradfix
#----------------------------------------------------------------------------
_plugin = None
def _init():
global _plugin
if _plugin is None:
_plugin = custom_ops.get_plugin(
module_name='upfirdn2d_plugin',
sources=['upfirdn2d.cpp', 'upfirdn2d.cu'],
headers=['upfirdn2d.h'],
source_dir=os.path.dirname(__file__),
extra_cuda_cflags=['--use_fast_math'],
)
return True
def _parse_scaling(scaling):
if isinstance(scaling, int):
scaling = [scaling, scaling]
assert isinstance(scaling, (list, tuple))
assert all(isinstance(x, int) for x in scaling)
sx, sy = scaling
assert sx >= 1 and sy >= 1
return sx, sy
def _parse_padding(padding):
if isinstance(padding, int):
padding = [padding, padding]
assert isinstance(padding, (list, tuple))
assert all(isinstance(x, int) for x in padding)
if len(padding) == 2:
padx, pady = padding
padding = [padx, padx, pady, pady]
padx0, padx1, pady0, pady1 = padding
return padx0, padx1, pady0, pady1
def _get_filter_size(f):
if f is None:
return 1, 1
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
fw = f.shape[-1]
fh = f.shape[0]
with misc.suppress_tracer_warnings():
fw = int(fw)
fh = int(fh)
misc.assert_shape(f, [fh, fw][:f.ndim])
assert fw >= 1 and fh >= 1
return fw, fh
#----------------------------------------------------------------------------
def setup_filter(f, device=torch.device('cpu'), normalize=True, flip_filter=False, gain=1, separable=None):
r"""Convenience function to setup 2D FIR filter for `upfirdn2d()`.
Args:
f: Torch tensor, numpy array, or python list of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable),
`[]` (impulse), or
`None` (identity).
device: Result device (default: cpu).
normalize: Normalize the filter so that it retains the magnitude
for constant input signal (DC)? (default: True).
flip_filter: Flip the filter? (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
separable: Return a separable filter? (default: select automatically).
Returns:
Float32 tensor of the shape
`[filter_height, filter_width]` (non-separable) or
`[filter_taps]` (separable).
"""
# Validate.
if f is None:
f = 1
f = torch.as_tensor(f, dtype=torch.float32)
assert f.ndim in [0, 1, 2]
assert f.numel() > 0
if f.ndim == 0:
f = f[np.newaxis]
# Separable?
if separable is None:
separable = (f.ndim == 1 and f.numel() >= 8)
if f.ndim == 1 and not separable:
f = f.ger(f)
assert f.ndim == (1 if separable else 2)
# Apply normalize, flip, gain, and device.
if normalize:
f /= f.sum()
if flip_filter:
f = f.flip(list(range(f.ndim)))
f = f * (gain ** (f.ndim / 2))
f = f.to(device=device)
return f
#----------------------------------------------------------------------------
def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl='cuda'):
r"""Pad, upsample, filter, and downsample a batch of 2D images.
Performs the following sequence of operations for each channel:
1. Upsample the image by inserting N-1 zeros after each pixel (`up`).
2. Pad the image with the specified number of zeros on each side (`padding`).
Negative padding corresponds to cropping the image.
3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it
so that the footprint of all output pixels lies within the input image.
4. Downsample the image by keeping every Nth pixel (`down`).
This sequence of operations bears close resemblance to scipy.signal.upfirdn().
The fused op is considerably more efficient than performing the same calculation
using standard PyTorch ops. It supports gradients of arbitrary order.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
up: Integer upsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
down: Integer downsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the upsampled image. Can be a single number
or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
assert isinstance(x, torch.Tensor)
assert impl in ['ref', 'cuda']
if impl == 'cuda' and x.device.type == 'cuda' and _init():
return _upfirdn2d_cuda(up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain).apply(x, f)
return _upfirdn2d_ref(x, f, up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain)
#----------------------------------------------------------------------------
@misc.profiled_function
def _upfirdn2d_ref(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1):
"""Slow reference implementation of `upfirdn2d()` using standard PyTorch ops.
"""
# Validate arguments.
assert isinstance(x, torch.Tensor) and x.ndim == 4
if f is None:
f = torch.ones([1, 1], dtype=torch.float32, device=x.device)
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
assert f.dtype == torch.float32 and not f.requires_grad
batch_size, num_channels, in_height, in_width = x.shape
upx, upy = _parse_scaling(up)
downx, downy = _parse_scaling(down)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
# Check that upsampled buffer is not smaller than the filter.
upW = in_width * upx + padx0 + padx1
upH = in_height * upy + pady0 + pady1
assert upW >= f.shape[-1] and upH >= f.shape[0]
# Upsample by inserting zeros.
x = x.reshape([batch_size, num_channels, in_height, 1, in_width, 1])
x = torch.nn.functional.pad(x, [0, upx - 1, 0, 0, 0, upy - 1])
x = x.reshape([batch_size, num_channels, in_height * upy, in_width * upx])
# Pad or crop.
x = torch.nn.functional.pad(x, [max(padx0, 0), max(padx1, 0), max(pady0, 0), max(pady1, 0)])
x = x[:, :, max(-pady0, 0) : x.shape[2] - max(-pady1, 0), max(-padx0, 0) : x.shape[3] - max(-padx1, 0)]
# Setup filter.
f = f * (gain ** (f.ndim / 2))
f = f.to(x.dtype)
if not flip_filter:
f = f.flip(list(range(f.ndim)))
# Convolve with the filter.
f = f[np.newaxis, np.newaxis].repeat([num_channels, 1] + [1] * f.ndim)
if f.ndim == 4:
x = conv2d_gradfix.conv2d(input=x, weight=f, groups=num_channels)
else:
x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(2), groups=num_channels)
x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(3), groups=num_channels)
# Downsample by throwing away pixels.
x = x[:, :, ::downy, ::downx]
return x
#----------------------------------------------------------------------------
_upfirdn2d_cuda_cache = dict()
def _upfirdn2d_cuda(up=1, down=1, padding=0, flip_filter=False, gain=1):
"""Fast CUDA implementation of `upfirdn2d()` using custom ops.
"""
# Parse arguments.
upx, upy = _parse_scaling(up)
downx, downy = _parse_scaling(down)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
# Lookup from cache.
key = (upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain)
if key in _upfirdn2d_cuda_cache:
return _upfirdn2d_cuda_cache[key]
# Forward op.
class Upfirdn2dCuda(torch.autograd.Function):
@staticmethod
def forward(ctx, x, f): # pylint: disable=arguments-differ
assert isinstance(x, torch.Tensor) and x.ndim == 4
if f is None:
f = torch.ones([1, 1], dtype=torch.float32, device=x.device)
if f.ndim == 1 and f.shape[0] == 1:
f = f.square().unsqueeze(0) # Convert separable-1 into full-1x1.
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
y = x
if f.ndim == 2:
y = _plugin.upfirdn2d(y, f, upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain)
else:
y = _plugin.upfirdn2d(y, f.unsqueeze(0), upx, 1, downx, 1, padx0, padx1, 0, 0, flip_filter, 1.0)
y = _plugin.upfirdn2d(y, f.unsqueeze(1), 1, upy, 1, downy, 0, 0, pady0, pady1, flip_filter, gain)
ctx.save_for_backward(f)
ctx.x_shape = x.shape
return y
@staticmethod
def backward(ctx, dy): # pylint: disable=arguments-differ
f, = ctx.saved_tensors
_, _, ih, iw = ctx.x_shape
_, _, oh, ow = dy.shape
fw, fh = _get_filter_size(f)
p = [
fw - padx0 - 1,
iw * upx - ow * downx + padx0 - upx + 1,
fh - pady0 - 1,
ih * upy - oh * downy + pady0 - upy + 1,
]
dx = None
df = None
if ctx.needs_input_grad[0]:
dx = _upfirdn2d_cuda(up=down, down=up, padding=p, flip_filter=(not flip_filter), gain=gain).apply(dy, f)
assert not ctx.needs_input_grad[1]
return dx, df
# Add to cache.
_upfirdn2d_cuda_cache[key] = Upfirdn2dCuda
return Upfirdn2dCuda
#----------------------------------------------------------------------------
def filter2d(x, f, padding=0, flip_filter=False, gain=1, impl='cuda'):
r"""Filter a batch of 2D images using the given 2D FIR filter.
By default, the result is padded so that its shape matches the input.
User-specified padding is applied on top of that, with negative values
indicating cropping. Pixels outside the image are assumed to be zero.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
padding: Padding with respect to the output. Can be a single number or a
list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
padx0, padx1, pady0, pady1 = _parse_padding(padding)
fw, fh = _get_filter_size(f)
p = [
padx0 + fw // 2,
padx1 + (fw - 1) // 2,
pady0 + fh // 2,
pady1 + (fh - 1) // 2,
]
return upfirdn2d(x, f, padding=p, flip_filter=flip_filter, gain=gain, impl=impl)
#----------------------------------------------------------------------------
def upsample2d(x, f, up=2, padding=0, flip_filter=False, gain=1, impl='cuda'):
r"""Upsample a batch of 2D images using the given 2D FIR filter.
By default, the result is padded so that its shape is a multiple of the input.
User-specified padding is applied on top of that, with negative values
indicating cropping. Pixels outside the image are assumed to be zero.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
up: Integer upsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the output. Can be a single number or a
list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
upx, upy = _parse_scaling(up)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
fw, fh = _get_filter_size(f)
p = [
padx0 + (fw + upx - 1) // 2,
padx1 + (fw - upx) // 2,
pady0 + (fh + upy - 1) // 2,
pady1 + (fh - upy) // 2,
]
return upfirdn2d(x, f, up=up, padding=p, flip_filter=flip_filter, gain=gain*upx*upy, impl=impl)
#----------------------------------------------------------------------------
def downsample2d(x, f, down=2, padding=0, flip_filter=False, gain=1, impl='cuda'):
r"""Downsample a batch of 2D images using the given 2D FIR filter.
By default, the result is padded so that its shape is a fraction of the input.
User-specified padding is applied on top of that, with negative values
indicating cropping. Pixels outside the image are assumed to be zero.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
down: Integer downsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the input. Can be a single number or a
list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
downx, downy = _parse_scaling(down)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
fw, fh = _get_filter_size(f)
p = [
padx0 + (fw - downx + 1) // 2,
padx1 + (fw - downx) // 2,
pady0 + (fh - downy + 1) // 2,
pady1 + (fh - downy) // 2,
]
return upfirdn2d(x, f, down=down, padding=p, flip_filter=flip_filter, gain=gain, impl=impl)
#----------------------------------------------------------------------------
| 41.997436
| 120
| 0.574516
|
7965bd0b87d6fd669ab45392353033b7589d5c6b
| 7,470
|
py
|
Python
|
src/virtualenv/create/creator.py
|
jezdez/virtualenv
|
5c9b6246a4107e5e613a23c42b805d0373672400
|
[
"MIT"
] | 1
|
2022-02-21T02:01:37.000Z
|
2022-02-21T02:01:37.000Z
|
src/virtualenv/create/creator.py
|
jezdez/virtualenv
|
5c9b6246a4107e5e613a23c42b805d0373672400
|
[
"MIT"
] | null | null | null |
src/virtualenv/create/creator.py
|
jezdez/virtualenv
|
5c9b6246a4107e5e613a23c42b805d0373672400
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, print_function, unicode_literals
import json
import logging
import os
import shutil
import sys
from abc import ABCMeta, abstractmethod
from argparse import ArgumentTypeError
from ast import literal_eval
from collections import OrderedDict
from stat import S_IWUSR
import six
from six import add_metaclass
from virtualenv.discovery.cached_py_info import LogCmd
from virtualenv.info import WIN_CPYTHON_2
from virtualenv.pyenv_cfg import PyEnvCfg
from virtualenv.util.path import Path
from virtualenv.util.subprocess import run_cmd
from virtualenv.util.zipapp import ensure_file_on_disk
from virtualenv.version import __version__
HERE = Path(__file__).absolute().parent
DEBUG_SCRIPT = HERE / "debug.py"
@add_metaclass(ABCMeta)
class Creator(object):
"""A class that given a python Interpreter creates a virtual environment"""
def __init__(self, options, interpreter):
"""Construct a new virtual environment creator.
:param options: the CLI option as parsed from :meth:`add_parser_arguments`
:param interpreter: the interpreter to create virtual environment from
"""
self.interpreter = interpreter
self._debug = None
self.dest = Path(options.dest)
self.clear = options.clear
self.pyenv_cfg = PyEnvCfg.from_folder(self.dest)
def __repr__(self):
return six.ensure_str(self.__unicode__())
def __unicode__(self):
return "{}({})".format(self.__class__.__name__, ", ".join("{}={}".format(k, v) for k, v in self._args()))
def _args(self):
return [
("dest", six.ensure_text(str(self.dest))),
("clear", self.clear),
]
@classmethod
def can_create(cls, interpreter):
"""Determine if we can create a virtual environment.
:param interpreter: the interpreter in question
:return: ``None`` if we can't create, any other object otherwise that will be forwarded to \
:meth:`add_parser_arguments`
"""
return True
@classmethod
def add_parser_arguments(cls, parser, interpreter, meta):
"""Add CLI arguments for the creator.
:param parser: the CLI parser
:param interpreter: the interpreter we're asked to create virtual environment for
:param meta: value as returned by :meth:`can_create`
"""
parser.add_argument(
"dest", help="directory to create virtualenv at", type=cls.validate_dest, default="venv", nargs="?",
)
parser.add_argument(
"--clear",
dest="clear",
action="store_true",
help="remove the destination directory if exist before starting (will overwrite files otherwise)",
default=False,
)
@abstractmethod
def create(self):
"""Perform the virtual environment creation."""
raise NotImplementedError
@classmethod
def validate_dest(cls, raw_value):
"""No path separator in the path, valid chars and must be write-able"""
def non_write_able(dest, value):
common = Path(*os.path.commonprefix([value.parts, dest.parts]))
raise ArgumentTypeError(
"the destination {} is not write-able at {}".format(dest.relative_to(common), common)
)
# the file system must be able to encode
# note in newer CPython this is always utf-8 https://www.python.org/dev/peps/pep-0529/
encoding = sys.getfilesystemencoding()
refused = OrderedDict()
kwargs = {"errors": "ignore"} if encoding != "mbcs" else {}
for char in six.ensure_text(raw_value):
try:
trip = char.encode(encoding, **kwargs).decode(encoding)
if trip == char:
continue
raise ValueError(trip)
except ValueError:
refused[char] = None
if refused:
raise ArgumentTypeError(
"the file system codec ({}) cannot handle characters {!r} within {!r}".format(
encoding, "".join(refused.keys()), raw_value
)
)
for char in (i for i in (os.pathsep, os.altsep) if i is not None):
if char in raw_value:
raise ArgumentTypeError(
"destination {!r} must not contain the path separator ({}) as this would break "
"the activation scripts".format(raw_value, char)
)
value = Path(raw_value)
if value.exists() and value.is_file():
raise ArgumentTypeError("the destination {} already exists and is a file".format(value))
if (3, 3) <= sys.version_info <= (3, 6):
# pre 3.6 resolve is always strict, aka must exists, sidestep by using os.path operation
dest = Path(os.path.realpath(raw_value))
else:
dest = value.resolve()
value = dest
while dest:
if dest.exists():
if os.access(six.ensure_text(str(dest)), os.W_OK):
break
else:
non_write_able(dest, value)
base, _ = dest.parent, dest.name
if base == dest:
non_write_able(dest, value) # pragma: no cover
dest = base
return str(value)
def run(self):
if self.dest.exists() and self.clear:
logging.debug("delete %s", self.dest)
def onerror(func, path, exc_info):
if not os.access(path, os.W_OK):
os.chmod(path, S_IWUSR)
func(path)
else:
raise
shutil.rmtree(str(self.dest), ignore_errors=True, onerror=onerror)
self.create()
self.set_pyenv_cfg()
def set_pyenv_cfg(self):
self.pyenv_cfg.content = OrderedDict()
self.pyenv_cfg["home"] = self.interpreter.system_exec_prefix
self.pyenv_cfg["implementation"] = self.interpreter.implementation
self.pyenv_cfg["version_info"] = ".".join(str(i) for i in self.interpreter.version_info)
self.pyenv_cfg["virtualenv"] = __version__
@property
def debug(self):
"""
:return: debug information about the virtual environment (only valid after :meth:`create` has run)
"""
if self._debug is None and self.exe is not None:
self._debug = get_env_debug_info(self.exe, self.debug_script())
return self._debug
# noinspection PyMethodMayBeStatic
def debug_script(self):
return DEBUG_SCRIPT
def get_env_debug_info(env_exe, debug_script):
env = os.environ.copy()
env.pop(str("PYTHONPATH"), None)
with ensure_file_on_disk(debug_script) as debug_script:
cmd = [str(env_exe), str(debug_script)]
if WIN_CPYTHON_2:
cmd = [six.ensure_text(i) for i in cmd]
logging.debug(str("debug via %r"), LogCmd(cmd))
code, out, err = run_cmd(cmd)
# noinspection PyBroadException
try:
if code != 0:
result = literal_eval(out)
else:
result = json.loads(out)
if err:
result["err"] = err
except Exception as exception:
return {"out": out, "err": err, "returncode": code, "exception": repr(exception)}
if "sys" in result and "path" in result["sys"]:
del result["sys"]["path"][0]
return result
| 35.913462
| 113
| 0.610442
|
46eb3c5d97324a790dc225612496bb48e9d64b4f
| 1,136
|
py
|
Python
|
main1.py
|
17jrb17/E01b-Smiles
|
2f4a65c9907cc28e40cf17253fb19a22c7bc4e0a
|
[
"MIT"
] | null | null | null |
main1.py
|
17jrb17/E01b-Smiles
|
2f4a65c9907cc28e40cf17253fb19a22c7bc4e0a
|
[
"MIT"
] | null | null | null |
main1.py
|
17jrb17/E01b-Smiles
|
2f4a65c9907cc28e40cf17253fb19a22c7bc4e0a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import utils, open_color, arcade
utils.check_version((3,7))
# Open the window. Set the window title and dimensions (width and height)
arcade.open_window(800, 600, "Smiley Face Example")
arcade.set_background_color(open_color.white)
# Start the render process. This must be done before any drawing commands.
arcade.start_render()
# Draw the smiley face:
# (x,y,radius,color)
arcade.draw_circle_filled(400, 300, 100, open_color.yellow_3)
# (x,y,radius,color,border_thickness)
arcade.draw_circle_outline(400, 300, 100, open_color.black,4)
#(x,y,width,height,color)
arcade.draw_ellipse_filled(360,340,15,25,open_color.black)
arcade.draw_ellipse_filled(440,340,15,25,open_color.black)
arcade.draw_circle_filled(365,345,3,open_color.gray_2)
arcade.draw_circle_filled(445,345,3,open_color.gray_2)
#(x,y,width,height,color,start_degrees,end_degrees,border_thickness)
arcade.draw_arc_outline(400,280,60,50,open_color.black,190,350,4)
# Finish the render
# Nothing will be drawn without this.
# Must happen after all draw commands
arcade.finish_render()
# Keep the window up until someone closes it.
arcade.run()
| 30.702703
| 74
| 0.791373
|
658d77211e8a9102588343a4361271b22eb85e9c
| 903
|
py
|
Python
|
doc/conf.py
|
inducer/courseflow
|
0f9786e3616dbedf08365d81a731f672b97ba9f5
|
[
"Unlicense"
] | null | null | null |
doc/conf.py
|
inducer/courseflow
|
0f9786e3616dbedf08365d81a731f672b97ba9f5
|
[
"Unlicense"
] | null | null | null |
doc/conf.py
|
inducer/courseflow
|
0f9786e3616dbedf08365d81a731f672b97ba9f5
|
[
"Unlicense"
] | null | null | null |
import sys
import os
from urllib.request import urlopen
_conf_url = \
"https://raw.githubusercontent.com/inducer/sphinxconfig/main/sphinxconfig.py"
with urlopen(_conf_url) as _inf:
exec(compile(_inf.read(), _conf_url, "exec"), globals())
sys.path.insert(0, os.path.abspath(".."))
os.environ["DJANGO_SETTINGS_MODULE"] = "relate.settings"
import django
django.setup()
intersphinx_mapping = {
"https://docs.python.org/3/": None,
"https://numpy.org/doc/stable/": None,
"django": (
"https://docs.djangoproject.com/en/dev/",
"https://docs.djangoproject.com/en/dev/_objects/",
),
"https://docs.sympy.org/latest": None,
# https://github.com/dulwich/dulwich/issues/913
# "https://www.dulwich.io/docs/": None,
"https://tiker.net/pub/dulwich-docs-stopgap/": None,
}
copyright = u"2014-21, Andreas Kloeckner"
version = "2021.1"
release = version
| 25.083333
| 85
| 0.673311
|
4f52ddcab581f20f2a261df602a888032bc94723
| 771
|
py
|
Python
|
pyPseudo/lexer/Token.py
|
johnyob/Pseudo
|
02e637e93872e786201538f719cd19676a03c170
|
[
"MIT"
] | 1
|
2019-03-10T21:25:47.000Z
|
2019-03-10T21:25:47.000Z
|
pyPseudo/lexer/Token.py
|
johnyob/Pseudo
|
02e637e93872e786201538f719cd19676a03c170
|
[
"MIT"
] | null | null | null |
pyPseudo/lexer/Token.py
|
johnyob/Pseudo
|
02e637e93872e786201538f719cd19676a03c170
|
[
"MIT"
] | null | null | null |
class Token:
def __init__(self, type, lexeme, literal, path, line):
self._type = type
self._lexeme = lexeme
self._literal = literal
self._path = path
self._line = line
def getType(self):
return self._type
def getLexeme(self):
return self._lexeme
def getLiteral(self):
return self._literal
def getPath(self):
return self._path
def getLine(self):
return self._line
def __str__(self):
return "Type: {0}, Lexeme: {1}, Literal: {2}.".format(
self._type, self._lexeme, self._literal
)
def __repr__(self):
return "Type: {0}, Lexeme: {1}, Literal: {2}.".format(
self._type, self._lexeme, self._literal
)
| 23.363636
| 62
| 0.566796
|
dafb65ce6808a2837be117e62bb2f7c4e7da3e49
| 8,501
|
py
|
Python
|
lib/datasets/cityscapes.py
|
Zealoe/HRNet-Semantic-Segmentation
|
e5082879d6a46f1eb1127429e9948c80c0e15418
|
[
"MIT"
] | 2
|
2020-11-02T11:38:59.000Z
|
2021-03-23T09:54:14.000Z
|
lib/datasets/cityscapes.py
|
Zealoe/HRNet-Semantic-Segmentation
|
e5082879d6a46f1eb1127429e9948c80c0e15418
|
[
"MIT"
] | null | null | null |
lib/datasets/cityscapes.py
|
Zealoe/HRNet-Semantic-Segmentation
|
e5082879d6a46f1eb1127429e9948c80c0e15418
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Ke Sun (sunk@mail.ustc.edu.cn)
# ------------------------------------------------------------------------------
import os
import cv2
import numpy as np
from PIL import Image
import torch
from torch.nn import functional as F
from .base_dataset import BaseDataset
class Cityscapes(BaseDataset):
def __init__(self,
root,
list_path,
num_samples=None,
num_classes=19,
multi_scale=True,
flip=True,
ignore_label=-1,
base_size=2048,
crop_size=(512, 1024),
center_crop_test=False,
downsample_rate=1,
scale_factor=16,
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]):
super(Cityscapes, self).__init__(ignore_label, base_size,
crop_size, downsample_rate, scale_factor, mean, std,)
self.root = root
self.list_path = list_path
self.num_classes = num_classes
self.class_weights = torch.FloatTensor([0.8373, 0.918, 0.866, 1.0345,
1.0166, 0.9969, 0.9754, 1.0489,
0.8786, 1.0023, 0.9539, 0.9843,
1.1116, 0.9037, 1.0865, 1.0955,
1.0865, 1.1529, 1.0507]).cuda()
self.multi_scale = multi_scale
self.flip = flip
self.center_crop_test = center_crop_test
self.img_list = [line.strip().split() for line in open(root+list_path)]
self.files = self.read_files()
if num_samples:
self.files = self.files[:num_samples]
self.label_mapping = {-1: ignore_label, 0: ignore_label,
1: ignore_label, 2: ignore_label,
3: ignore_label, 4: ignore_label,
5: ignore_label, 6: ignore_label,
7: 0, 8: 1, 9: ignore_label,
10: ignore_label, 11: 2, 12: 3,
13: 4, 14: ignore_label, 15: ignore_label,
16: ignore_label, 17: 5, 18: ignore_label,
19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11,
25: 12, 26: 13, 27: 14, 28: 15,
29: ignore_label, 30: ignore_label,
31: 16, 32: 17, 33: 18}
def read_files(self):
files = []
if 'test' in self.list_path:
for item in self.img_list:
image_path = item
name = os.path.splitext(os.path.basename(image_path[0]))[0]
files.append({
"img": image_path[0],
"name": name,
})
else:
for item in self.img_list:
image_path, label_path = item
name = os.path.splitext(os.path.basename(label_path))[0]
files.append({
"img": image_path,
"label": label_path,
"name": name,
"weight": 1
})
return files
def convert_label(self, label, inverse=False):
temp = label.copy()
if inverse:
for v, k in self.label_mapping.items():
label[temp == k] = v
else:
for k, v in self.label_mapping.items():
label[temp == k] = v
return label
def __getitem__(self, index):
item = self.files[index]
name = item["name"]
image = cv2.imread(os.path.join(self.root,'cityscapes',item["img"]),
cv2.IMREAD_COLOR)
size = image.shape
if 'test' in self.list_path:
image = self.input_transform(image)
image = image.transpose((2, 0, 1))
return image.copy(), np.array(size), name
label = cv2.imread(os.path.join(self.root,'cityscapes',item["label"]),
cv2.IMREAD_GRAYSCALE)
label = self.convert_label(label)
image, label = self.gen_sample(image, label,
self.multi_scale, self.flip,
self.center_crop_test)
return image.copy(), label.copy(), np.array(size), name
def multi_scale_inference(self, model, image, scales=[1], flip=False):
batch, _, ori_height, ori_width = image.size()
assert batch == 1, "only supporting batchsize 1."
image = image.numpy()[0].transpose((1,2,0)).copy()
stride_h = np.int(self.crop_size[0] * 1.0)
stride_w = np.int(self.crop_size[1] * 1.0)
final_pred = torch.zeros([1, self.num_classes,
ori_height,ori_width]).cuda()
for scale in scales:
new_img = self.multi_scale_aug(image=image,
rand_scale=scale,
rand_crop=False)
height, width = new_img.shape[:-1]
if scale <= 1.0:
new_img = new_img.transpose((2, 0, 1))
new_img = np.expand_dims(new_img, axis=0)
new_img = torch.from_numpy(new_img)
preds = self.inference(model, new_img, flip)
preds = preds[:, :, 0:height, 0:width]
else:
new_h, new_w = new_img.shape[:-1]
rows = np.int(np.ceil(1.0 * (new_h -
self.crop_size[0]) / stride_h)) + 1
cols = np.int(np.ceil(1.0 * (new_w -
self.crop_size[1]) / stride_w)) + 1
preds = torch.zeros([1, self.num_classes,
new_h,new_w]).cuda()
count = torch.zeros([1,1, new_h, new_w]).cuda()
for r in range(rows):
for c in range(cols):
h0 = r * stride_h
w0 = c * stride_w
h1 = min(h0 + self.crop_size[0], new_h)
w1 = min(w0 + self.crop_size[1], new_w)
h0 = max(int(h1 - self.crop_size[0]), 0)
w0 = max(int(w1 - self.crop_size[1]), 0)
crop_img = new_img[h0:h1, w0:w1, :]
crop_img = crop_img.transpose((2, 0, 1))
crop_img = np.expand_dims(crop_img, axis=0)
crop_img = torch.from_numpy(crop_img)
pred = self.inference(model, crop_img, flip)
preds[:,:,h0:h1,w0:w1] += pred[:,:, 0:h1-h0, 0:w1-w0]
count[:,:,h0:h1,w0:w1] += 1
preds = preds / count
preds = preds[:,:,:height,:width]
preds = F.upsample(preds, (ori_height, ori_width),
mode='bilinear')
final_pred += preds
return final_pred
def get_palette(self, n):
palette = [0] * (n * 3)
for j in range(0, n):
lab = j
palette[j * 3 + 0] = 0
palette[j * 3 + 1] = 0
palette[j * 3 + 2] = 0
i = 0
while lab:
palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i))
palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i))
palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i))
i += 1
lab >>= 3
return palette
def save_pred(self, preds, sv_path, name):
palette = self.get_palette(256)
preds = preds.cpu().numpy().copy()
preds = np.asarray(np.argmax(preds, axis=1), dtype=np.uint8)
for i in range(preds.shape[0]):
pred = self.convert_label(preds[i], inverse=True)
save_img = Image.fromarray(pred)
save_img.putpalette(palette)
save_img.save(os.path.join(sv_path, name[i]+'.png'))
| 41.468293
| 81
| 0.438184
|
7824ba1ab873fbb3d210465967ec3442310287be
| 21,360
|
py
|
Python
|
kaolin/ops/spc/spc.py
|
mcx/kaolin
|
abe006921b5d522ecd0f7c5e30abe760a4459dc7
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
kaolin/ops/spc/spc.py
|
mcx/kaolin
|
abe006921b5d522ecd0f7c5e30abe760a4459dc7
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
kaolin/ops/spc/spc.py
|
mcx/kaolin
|
abe006921b5d522ecd0f7c5e30abe760a4459dc7
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021,22 NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'feature_grids_to_spc',
'scan_octrees',
'generate_points',
'to_dense',
'unbatched_query',
'unbatched_make_dual',
'unbatched_make_trinkets',
'unbatched_get_level_points'
]
import math
from torch.autograd import Function
import torch
from kaolin import _C
from .uint8 import bits_to_uint8
from kaolin.rep import Spc
from .points import points_to_morton, points_to_corners
def scan_octrees(octrees, lengths):
r"""Scan batch of octrees tensor.
Scanning refers to processing the octrees to extract auxiliary information.
There are two steps. First, a list is formed
containing the number of set bits in each octree node/byte. Second, the exclusive
sum of this list is taken.
Args:
octrees (torch.ByteTensor):
Batched :ref:`packed<packed>` collection of octrees of shape :math:`(\text{num_node})`.
lengths (torch.IntTensor):
The number of byte per octree. of shape :math:`(\text{batch_size})`.
Returns:
(int, torch.IntTensor, torch.IntTensor):
- max_level, an int containing the depth of the octrees.
- :ref:`pyramids<spc_pyramids>`, a tensor containing structural information about
the batch of structured point cloud hierarchies,
of shape :math:`(\text{batch_size}, 2, \text{max_level + 1})`.
See :ref:`the documentation <spc_pyramids>` for more details.
- :ref:`exsum<spc_exsum>`, a 1D tensor containing the exclusive sum of the bit
counts of each byte of the individual octrees within the batched input ``octrees`` tensor,
of size :math:(\text{octree_num_bytes} + \text{batch_size})`.
See :ref:`the documentation <spc_exsum>` for more details.
.. note::
The returned tensor of exclusive sums is padded with an extra element for each
item in the batch.
"""
return _C.ops.spc.scan_octrees_cuda(octrees.contiguous(), lengths.contiguous())
def generate_points(octrees, pyramids, exsum):
r"""Generate the point data for a structured point cloud.
Decode batched octree into batch of structured point hierarchies,
and batch of book keeping pyramids.
Args:
octrees (torch.ByteTensor):
Batched (packed) collection of octrees of shape :math:`(\text{num_bytes})`.
pyramids (torch.IntTensor):
Batched tensor containing point hierarchy structural information
of shape :math:`(\text{batch_size}, 2, \text{max_level}+2)`
exsum (torch.IntTensor):
Batched tensor containing the exclusive sum of the bit
counts of individual octrees of shape :math:`(k + \text{batch_size})`
Returns:
(torch.ShortTensor):
A tensor containing batched point hierachies derived from a batch of octrees,
of shape :math:`(\text{num_points_at_all_levels}, 3)`.
See :ref:`the documentation<spc_points>` for more details
"""
return _C.ops.spc.generate_points_cuda(octrees.contiguous(),
pyramids.contiguous(),
exsum.contiguous())
class ToDenseFunction(Function):
@staticmethod
def forward(ctx, point_hierarchies, level, pyramids, inputs):
inputs = inputs.contiguous()
pyramids = pyramids.contiguous()
point_hierarchies = point_hierarchies.contiguous()
ctx.save_for_backward(point_hierarchies, pyramids, inputs)
ctx.level = level
return _C.ops.spc.to_dense_forward(point_hierarchies, level, pyramids, inputs)
@staticmethod
def backward(ctx, grad_outputs):
grad_outputs = grad_outputs.contiguous()
point_hierarchies, pyramids, inputs = ctx.saved_tensors
d_inputs = _C.ops.spc.to_dense_backward(point_hierarchies, ctx.level, pyramids,
inputs, grad_outputs)
return None, None, None, d_inputs
def to_dense(point_hierarchies, pyramids, input, level=-1, **kwargs):
r"""Convert batched structured point cloud to a batched dense feature grids.
The size of the input should correspond to level :math:`l` within the
structured point cloud hierarchy. A dense voxel grid of size
:math:`(\text{batch_size}, 2^l, 2^l, 2^l, \text{input_channels})` is
returned where (for a particular batch):
.. math::
Y_{P_i} = X_i \quad\text{for}\; i \in 0,\ldots,|X|-1,
where :math:`P_i` is used as a 3D index for dense array :math:`Y`, and :math:`X_i` is the
input feature corresponding to to point :math:`P_i`. Locations in :math:`Y` without a
correspondense in :math:`X` are set to zero.
Args:
point_hierarchies (torch.ShortTensor):
:ref:`Packed <packed>` collection of point hierarchies,
of shape :math:`(\text{num_points})`.
See :ref:`point_hierarchies <spc_points>` for a detailed description.
pyramids (torch.IntTensor):
Batched tensor containing point hierarchy structural information
of shape :math:`(\text{batch_size}, 2, \text{max_level}+2)`.
See :ref:`pyramids <spc_pyramids>` for a detailed description.
input (torch.FloatTensor):
Batched tensor of input feature data,
of shape :math:`(\text{num_inputs}, \text{feature_dim})`.
With :math:`\text{num_inputs}` corresponding to a number of points in the
batched point hierarchy at ``level``.
level (int):
The level at which the octree points are converted to feature grids.
Returns:
(torch.FloatTensor):
The feature grids, of shape
:math:`(\text{batch_size}, \text{feature_dim}, 8^\text{level}, 8^\text{level}, 8^\text{level})`.
"""
remaining_kwargs = kwargs.keys() - Spc.KEYS
if len(remaining_kwargs) > 0:
raise TypeError("to_dense got an unexpected keyword argument "
f"{list(remaining_kwargs)[0]}")
if level < 0:
max_level = pyramids.shape[2] - 2
level = max_level + 1 + level
return ToDenseFunction.apply(point_hierarchies, level, pyramids, input)
def feature_grids_to_spc(feature_grids, masks=None):
r"""Convert sparse feature grids to Structured Point Cloud.
Args:
feature_grids (torch.Tensor):
The sparse 3D feature grids, of shape
:math:`(\text{batch_size}, \text{feature_dim}, X, Y, Z)`
masks (optional, torch.BoolTensor):
The masks showing where are the features,
of shape :math:`(\text{batch_size}, X, Y, Z)`.
Default: A feature is determined when not full of zeros.
Returns:
(torch.ByteTensor, torch.IntTensor, torch.Tensor):
a tuple containing:
- The octree, of size :math:`(\text{num_nodes})`
- The lengths of each octree, of size :math:`(\text{batch_size})`
- The coalescent features, of same dtype than ``feature_grids``,
of shape :math:`(\text{num_features}, \text{feature_dim})`.
"""
batch_size = feature_grids.shape[0]
feature_dim = feature_grids.shape[1]
x_dim = feature_grids.shape[2]
y_dim = feature_grids.shape[3]
z_dim = feature_grids.shape[4]
dtype = feature_grids.dtype
device = feature_grids.device
feature_grids = feature_grids.permute(0, 2, 3, 4, 1)
level = math.ceil(math.log2(max(x_dim, y_dim, z_dim)))
# We enforce a power of 2 size to make the subdivision easier
max_dim = 2 ** level
padded_feature_grids = torch.zeros(
(batch_size, max_dim, max_dim, max_dim, feature_dim),
device=device, dtype=dtype)
padded_feature_grids[:, :x_dim, :y_dim, :z_dim] = feature_grids
if masks is None:
masks = torch.any(padded_feature_grids != 0, dim=-1)
else:
assert masks.shape == feature_grids.shape[:-1]
padded_masks = torch.zeros(
(batch_size, max_dim, max_dim, max_dim),
device=device, dtype=torch.bool)
padded_masks[:, :x_dim, :y_dim, :z_dim] = masks
masks = padded_masks
bool2uint8_w = 2 ** torch.arange(8, device=device).reshape(1, 8)
octrees = []
coalescent_features = []
lengths = []
# TODO(cfujitsang): vectorize for speedup
for bs in range(batch_size):
octree = []
cur_mask = masks[bs:bs + 1]
cur_feature_grid = padded_feature_grids[bs:bs + 1]
cur_dim = max_dim
while cur_dim > 1:
cur_dim = cur_dim // 2
cur_mask = cur_mask.reshape(-1, 2, cur_dim, 2, cur_dim, 2, cur_dim)
cur_feature_grid = cur_feature_grid.reshape(
-1, 2, cur_dim, 2, cur_dim, 2, cur_dim, feature_dim)
cur_level_mask = torch.sum(cur_mask, dim=(2, 4, 6)) > 0
# indexing by masking follow naturally the morton order
cur_feature_grid = cur_feature_grid.permute(0, 1, 3, 5, 2, 4, 6, 7).reshape(
-1, 8, cur_dim, cur_dim, cur_dim, feature_dim)[cur_level_mask.reshape(-1, 8)]
cur_mask = cur_mask.permute(0, 1, 3, 5, 2, 4, 6).reshape(
-1, 8, cur_dim, cur_dim, cur_dim)[cur_level_mask.reshape(-1, 8)]
uint8_mask = bits_to_uint8(cur_level_mask.reshape(-1, 8))
octree.append(uint8_mask)
octree = torch.cat(octree, dim=0)
octrees.append(octree)
lengths.append(octree.shape[0])
coalescent_features.append(cur_feature_grid.reshape(-1, feature_dim))
octrees = torch.cat(octrees, dim=0)
lengths = torch.tensor(lengths, dtype=torch.int)
coalescent_features = torch.cat(coalescent_features, dim=0)
return octrees, lengths, coalescent_features
def unbatched_query(octree, exsum, query_coords, level, with_parents=False):
r"""Query point indices from the octree.
Given a :ref:`point hierarchy<spc_points>` (implicitly encoded in ``octree``) and some coordinates,
this function will efficiently find the indices of the points in :ref:`point hierarchy<spc_points>`
corresponding to the coordinates. Returns -1 if the point does not exist.
Args:
octree (torch.ByteTensor): The octree, of shape :math:`(\text{num_bytes})`.
exsum (torch.IntTensor): The exclusive sum of the octree bytes,
of shape :math:`(\text{num_bytes} + 1)`.
See :ref:`spc_pyramids` for more details.
query_coords (torch.FloatTensor or torch.IntTensor):
A tensor of locations to sample of shape :math:`(\text{num_query}, 3)`. If the tensor is
a FloatTensor, assumes the coordinates are normalized in [-1, 1]. Otherwise if the tensor is
an IntTensor, assumes the coordinates are in the [0, 2^level] space.
level (int): The level of the octree to query from.
with_parents (bool): If True, will return an array of indices up to the specified level as opposed
to only a single level (default: False).
Returns:
pidx (torch.LongTensor): The indices into the point hierarchy of shape :math:`(\text{num_query})`.
If with_parents is True, then the shape will be :math:`(\text{num_query, level+1})`.
Examples:
>>> import kaolin
>>> points = torch.tensor([[3,2,0],[3,1,1],[3,3,3]], device='cuda', dtype=torch.short)
>>> octree = kaolin.ops.spc.unbatched_points_to_octree(points, 2)
>>> length = torch.tensor([len(octree)], dtype=torch.int32)
>>> _, _, prefix = kaolin.ops.spc.scan_octrees(octree, length)
>>> query_coords = torch.tensor([[3,2,0]], device='cuda', dtype=torch.short)
>>> kaolin.ops.spc.unbatched_query(octree, prefix, query_coords, 2, with_parents=False)
tensor([5], device='cuda:0')
>>> kaolin.ops.spc.unbatched_query(octree, prefix, query_coords, 2, with_parents=True)
tensor([[0, 2, 5]], device='cuda:0')
"""
if not query_coords.is_floating_point():
input_coords = (query_coords.float() / (2**level)) * 2.0 - 1.0
else:
input_coords = query_coords
if with_parents:
return _C.ops.spc.query_multiscale_cuda(octree.contiguous(), exsum.contiguous(),
input_coords.contiguous(), level).long()
else:
return _C.ops.spc.query_cuda(octree.contiguous(), exsum.contiguous(),
input_coords.contiguous(), level).long()
def unbatched_get_level_points(point_hierarchy, pyramid, level):
r"""Returns the point set for the given level from the point hierarchy.
Args:
point_hierarchy (torch.ShortTensor):
The point hierarchy of shape :math:`(\text{num_points}, 3)`.
See :ref:`point_hierarchies <spc_points>` for a detailed description.
pyramid (torch.LongTensor):
The pyramid of shape :math:`(2, \text{max_level}+2)`
See :ref:`pyramids <spc_pyramids>` for a detailed description.
level (int): The level of the point hierarchy to retrieve.
Returns:
(torch.ShortTensor): The pointset of shape :math:`(\text{num_points_on_level}, 3)`.
"""
return point_hierarchy[pyramid[1, level]:pyramid[1, level + 1]]
def unbatched_make_dual(point_hierarchy, pyramid):
r"""Creates the dual of the octree given the point hierarchy and pyramid.
Each node of the primary octree (represented as the :ref:`point_hierarchies <spc_points>`)
can be thought of as voxels with 8 corners. The dual of the octree represents the corners
of the primary octree nodes as another tree of nodes with a hierarchy of points and a pyramid.
The mapping from the primary octree nodes to the nodes in the dual tree can be obtained through
trinkets which can be created from ``make_trinkets``.
Args:
point_hierarchy (torch.ShortTensor):
The point hierarchy of shape :math:`(\text{num_points}, 3)`.
See :ref:`point_hierarchies <spc_points>` for a detailed description.
pyramid (torch.LongTensor):
The pyramid of shape :math:`(2, \text{max_level}+2)`
See :ref:`pyramids <spc_pyramids>` for a detailed description.
Returns:
(torch.ShortTensor, torch.LongTensor):
- The point hierarchy of the dual octree of shape :math:`(\text{num_dual_points}, 3)`.
- The dual pyramid of shape :math:`(2, \text{max_level}+2)`
Examples:
>>> import kaolin
>>> points = torch.tensor([[0, 0, 0], [0, 0, 1], [0, 1, 0]], device='cuda', dtype=torch.int16)
>>> level = 1
>>> octree = kaolin.ops.spc.unbatched_points_to_octree(points, level)
>>> length = torch.tensor([len(octree)], dtype=torch.int32)
>>> _, pyramid, prefix = kaolin.ops.spc.scan_octrees(octree, length)
>>> point_hierarchy = kaolin.ops.spc.generate_points(octree, pyramid, prefix)
>>> point_hierarchy_dual, pyramid_dual = kaolin.ops.spc.unbatched_make_dual(point_hierarchy, pyramid[0])
>>> kaolin.ops.spc.unbatched_get_level_points(point_hierarchy_dual, pyramid_dual, 0) # the corners of the root
tensor([[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1]], device='cuda:0', dtype=torch.int16)
>>> kaolin.ops.spc.unbatched_get_level_points(point_hierarchy_dual, pyramid_dual, 1) # the corners of the 1st level
tensor([[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
[0, 0, 2],
[0, 1, 2],
[1, 0, 2],
[1, 1, 2],
[0, 2, 0],
[0, 2, 1],
[1, 2, 0],
[1, 2, 1]], device='cuda:0', dtype=torch.int16)
"""
pyramid_dual = torch.zeros_like(pyramid)
point_hierarchy_dual = []
for i in range(pyramid.shape[1] - 1):
corners = points_to_corners(unbatched_get_level_points(point_hierarchy, pyramid, i)).reshape(-1, 3)
points_dual = torch.unique(corners, dim=0)
sort_idxes = points_to_morton(points_dual).argsort()
points_dual = points_dual[sort_idxes]
point_hierarchy_dual.append(points_dual)
pyramid_dual[0, i] = len(point_hierarchy_dual[i])
if i > 0:
pyramid_dual[1, i] += pyramid_dual[:, i - 1].sum()
pyramid_dual[1, pyramid.shape[1] - 1] += pyramid_dual[:, pyramid.shape[1] - 2].sum()
point_hierarchy_dual = torch.cat(point_hierarchy_dual, dim=0)
return point_hierarchy_dual, pyramid_dual
def unbatched_make_trinkets(point_hierarchy, pyramid, point_hierarchy_dual, pyramid_dual):
r"""Creates the trinkets for the dual octree.
The trinkets are indirection pointers (in practice, indices) from the nodes of the primary octree
to the nodes of the dual octree. The nodes of the dual octree represent the corners of the voxels
defined by the primary octree. The trinkets are useful for accessing values stored on the corners
(like for example a signed distance function) and interpolating them from the nodes of the primary
octree.
Args:
point_hierarchy (torch.ShortTensor): The point hierarchy of shape :math:`(\text{num_points}, 3)`.
pyramid (torch.LongTensor): The pyramid of shape :math:`(2, \text{max_level}+2)`
point_hierarchy_dual (torch.ShortTensor): The point hierarchy of the dual octree of shape
:math:`(\text{num_dual_points}, 3)`.
pyramid_dual (torch.LongTensor): The dual pyramid of shape :math:`(2, \text{max_level}+2)`
Returns:
(torch.IntTensor, torch.IntTensor):
- The trinkets of shape :math:`(\text{num_points}, 8)`.
- Indirection pointers to the parents of shape :math:`(\text{num_points})`.
"""
device = point_hierarchy.device
trinkets = []
parents = []
# At a high level... the goal of this algorithm is to create a table which maps from the primary
# octree of voxels to the dual octree of corners, while also keeping track of parents.
# It does so by constructing a lookup table which maps morton codes of the source octree corners
# to the index of the destination (dual), then using pandas to do table lookups. It's a silly
# solution that would be much faster with a GPU but works well enough.
for lvl in range(pyramid_dual.shape[1] - 1):
# The source (primary octree) is sorted in morton order by construction
points = unbatched_get_level_points(point_hierarchy, pyramid, lvl)
corners = points_to_corners(points)
mt_src = points_to_morton(corners.reshape(-1, 3))
# The destination (dual octree) needs to be sorted too
points_dual = unbatched_get_level_points(point_hierarchy_dual, pyramid_dual, lvl)
mt_dest = points_to_morton(points_dual)
# Uses arange to associate from the morton codes to the point index. The point index is indexed from 0.
lut = {k: i for i, k in enumerate(mt_dest.cpu().numpy())}
if lvl == 0:
parents.append(torch.tensor([-1], device='cuda', dtype=torch.int).to(device))
else:
# Dividing by 2 will yield the morton code of the parent
pc = torch.floor(points / 2.0).short()
# Morton of the parents (point_hierarchy_index -> parent_morton)
mt_pc_parent = points_to_morton(pc)
# Morton of the children (point_hierarchy_index -> self_morton)
mt_pc_child = points_to_morton(points)
points_parents = unbatched_get_level_points(point_hierarchy, pyramid, lvl - 1)
# point_hierarchy_index (i-1) -> parent_morton
mt_parents = points_to_morton(points_parents)
# parent_morton -> point_hierarchy_index
plut = {k: i for i, k in enumerate(mt_parents.cpu().numpy())}
pc_idx = [plut[i] for i in mt_pc_parent.cpu().numpy()]
parents.append(torch.tensor(pc_idx, device=device, dtype=torch.int) +
pyramid[1, lvl - 1])
idx = [lut[i] for i in mt_src.cpu().numpy()]
trinkets.extend(idx)
# Trinkets are relative to the beginning of each pyramid base
trinkets = torch.tensor(trinkets, device=device, dtype=torch.int).reshape(-1, 8)
parents = torch.cat(parents, dim=0)
return trinkets, parents
| 45.83691
| 123
| 0.63633
|
e64d4d809b549dc749581acd1377b80c3137299a
| 480
|
py
|
Python
|
CTFd/utils/security/signing.py
|
amanbansal2709/ctfd
|
941335a5e205ca818ce1758076858b628e4fa05b
|
[
"Apache-2.0"
] | null | null | null |
CTFd/utils/security/signing.py
|
amanbansal2709/ctfd
|
941335a5e205ca818ce1758076858b628e4fa05b
|
[
"Apache-2.0"
] | null | null | null |
CTFd/utils/security/signing.py
|
amanbansal2709/ctfd
|
941335a5e205ca818ce1758076858b628e4fa05b
|
[
"Apache-2.0"
] | 1
|
2021-12-23T14:11:15.000Z
|
2021-12-23T14:11:15.000Z
|
from flask import current_app
from itsdangerous.url_safe import URLSafeTimedSerializer
from itsdangerous.exc import BadTimeSignature, SignatureExpired, BadSignature # noqa: F401
def serialize(data):
secret = current_app.config['SECRET_KEY']
s = URLSafeTimedSerializer(secret)
return s.dumps(data)
def unserialize(data, max_age=432000):
secret = current_app.config['SECRET_KEY']
s = URLSafeTimedSerializer(secret)
return s.loads(data, max_age=max_age)
| 30
| 91
| 0.775
|
2ffd66ed59875c79c3bb3e39e1ffe23d6eaaf3e1
| 4,670
|
py
|
Python
|
parlai/tasks/vqa_v2/agents.py
|
lifelongeek/KBKAIST_Chatbot
|
4766e6ee61a10e3719b7608c5777430ddfd277f9
|
[
"BSD-3-Clause"
] | 2
|
2017-10-06T09:56:49.000Z
|
2017-10-06T09:57:03.000Z
|
parlai/tasks/vqa_v2/agents.py
|
gmkim90/KBKAIST_Chatbot
|
4766e6ee61a10e3719b7608c5777430ddfd277f9
|
[
"BSD-3-Clause"
] | null | null | null |
parlai/tasks/vqa_v2/agents.py
|
gmkim90/KBKAIST_Chatbot
|
4766e6ee61a10e3719b7608c5777430ddfd277f9
|
[
"BSD-3-Clause"
] | 2
|
2017-10-06T09:57:04.000Z
|
2018-11-08T13:45:47.000Z
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.agents import Teacher
from parlai.core.image_featurizers import ImageLoader
from .build import build, buildImage
import json
import random
import os
def _path(opt):
build(opt)
buildImage(opt)
dt = opt['datatype'].split(':')[0]
if dt == 'train':
ques_suffix = 'v2_OpenEnded_mscoco_train2014'
annotation_suffix = 'v2_mscoco_train2014'
img_suffix = os.path.join('train2014', 'COCO_train2014_')
elif dt == 'valid':
ques_suffix = 'v2_OpenEnded_mscoco_val2014'
annotation_suffix = 'v2_mscoco_val2014'
img_suffix = os.path.join('val2014', 'COCO_val2014_')
elif dt == 'test':
ques_suffix = 'v2_OpenEnded_mscoco_test2015'
annotation_suffix = 'None'
img_suffix = os.path.join('test2015', 'COCO_test2015_')
else:
raise RuntimeError('Not valid datatype.')
data_path = os.path.join(opt['datapath'], 'VQA-v2',
ques_suffix + '_questions.json')
annotation_path = os.path.join(opt['datapath'], 'VQA-v2',
annotation_suffix + '_annotations.json')
image_path = os.path.join(opt['datapath'], 'COCO-IMG', img_suffix)
return data_path, annotation_path, image_path
class OeTeacher(Teacher):
"""VQA v2.0 Open-Ended teacher, which loads the json VQA data and
implements its own `act` method for interacting with student agent.
agent.
"""
def __init__(self, opt, shared=None):
super().__init__(opt)
self.datatype = opt['datatype']
data_path, annotation_path, self.image_path = _path(opt)
if shared and 'ques' in shared:
self.ques = shared['ques']
if 'annotation' in shared:
self.annotation = shared['annotation']
else:
self._setup_data(data_path, annotation_path)
self.len = len(self.ques['questions'])
# for ordered data in batch mode (especially, for validation and
# testing), each teacher in the batch gets a start index and a step
# size so they all process disparate sets of the data
self.step_size = opt.get('batchsize', 1)
self.data_offset = opt.get('batchindex', 0)
self.image_loader = ImageLoader(opt)
self.reset()
def __len__(self):
return self.len
def reset(self):
# Reset the dialog so that it is at the start of the epoch,
# and all metrics are reset.
super().reset()
self.lastY = None
self.episode_idx = self.data_offset - self.step_size
def observe(self, observation):
"""Process observation for metrics."""
if self.lastY is not None:
self.metrics.update(observation, self.lastY)
self.lastY = None
return observation
def act(self):
if self.datatype == 'train':
self.episode_idx = random.randrange(self.len)
else:
self.episode_idx = (self.episode_idx + self.step_size) % len(self)
if self.episode_idx == len(self) - self.step_size:
self.epochDone = True
qa = self.ques['questions'][self.episode_idx]
question = qa['question']
image_id = qa['image_id']
img_path = self.image_path + '%012d.jpg' % (image_id)
action = {
'image': self.image_loader.load(img_path),
'text': question,
'episode_done': True
}
if not self.datatype.startswith('test'):
anno = self.annotation['annotations'][self.episode_idx]
self.lastY = [ans['answer'] for ans in anno['answers']]
if self.datatype.startswith('train'):
action['labels'] = self.lastY
return action
def share(self):
shared = super().share()
shared['ques'] = self.ques
if hasattr(self, 'annotation'):
shared['annotation'] = self.annotation
return shared
def _setup_data(self, data_path, annotation_path):
print('loading: ' + data_path)
with open(data_path) as data_file:
self.ques = json.load(data_file)
if self.datatype != 'test':
print('loading: ' + annotation_path)
with open(annotation_path) as data_file:
self.annotation = json.load(data_file)
class DefaultTeacher(OeTeacher):
pass
| 33.357143
| 78
| 0.618844
|
5c309e1d54a770dc1b8e90c70ab330f354215006
| 35,348
|
py
|
Python
|
src/sage/modules/vector_space_morphism.py
|
saraedum/sage-renamed
|
d2da67b14da2ad766a5906425d60d43a3b3e1270
|
[
"BSL-1.0"
] | 1
|
2016-11-04T16:31:48.000Z
|
2016-11-04T16:31:48.000Z
|
src/sage/modules/vector_space_morphism.py
|
rwst/sage
|
a9d274b9338e6ee24bf35ea8d25875507e51e455
|
[
"BSL-1.0"
] | null | null | null |
src/sage/modules/vector_space_morphism.py
|
rwst/sage
|
a9d274b9338e6ee24bf35ea8d25875507e51e455
|
[
"BSL-1.0"
] | null | null | null |
r"""
Vector Space Morphisms (aka Linear Transformations)
AUTHOR:
- Rob Beezer: (2011-06-29)
A vector space morphism is a homomorphism between vector spaces, better known
as a linear transformation. These are a specialization of Sage's free module
homomorphisms. (A free module is like a vector space, but with scalars from a
ring that may not be a field.) So references to free modules in the
documentation or error messages should be understood as simply reflecting a
more general situation.
Creation
--------
The constructor :func:`linear_transformation` is designed to accept a
variety of inputs that can define a linear transformation. See the
documentation of the function for all the possibilities. Here we give two.
First a matrix representation. By default input matrices are understood
to act on vectors placed to left of the matrix. Optionally, an input
matrix can be described as acting on vectors placed to the right. ::
sage: A = matrix(QQ, [[-1, 2, 3], [4, 2, 0]])
sage: phi = linear_transformation(A)
sage: phi
Vector space morphism represented by the matrix:
[-1 2 3]
[ 4 2 0]
Domain: Vector space of dimension 2 over Rational Field
Codomain: Vector space of dimension 3 over Rational Field
sage: phi([2, -3])
(-14, -2, 6)
A symbolic function can be used to specify the "rule" for a
linear transformation, along with explicit descriptions of the
domain and codomain. ::
sage: F = Integers(13)
sage: D = F^3
sage: C = F^2
sage: x, y, z = var('x y z')
sage: f(x, y, z) = [2*x + 3*y + 5*z, x + z]
sage: rho = linear_transformation(D, C, f)
sage: f(1, 2, 3)
(23, 4)
sage: rho([1, 2, 3])
(10, 4)
A "vector space homspace" is the set of all linear transformations
between two vector spaces. Various input can be coerced into a
homspace to create a linear transformation. See
:mod:`sage.modules.vector_space_homspace` for more. ::
sage: D = QQ^4
sage: C = QQ^2
sage: hom_space = Hom(D, C)
sage: images = [[1, 3], [2, -1], [4, 0], [3, 7]]
sage: zeta = hom_space(images)
sage: zeta
Vector space morphism represented by the matrix:
[ 1 3]
[ 2 -1]
[ 4 0]
[ 3 7]
Domain: Vector space of dimension 4 over Rational Field
Codomain: Vector space of dimension 2 over Rational Field
A homomorphism may also be created via a method on the domain. ::
sage: F = QQ[sqrt(3)]
sage: a = F.gen(0)
sage: D = F^2
sage: C = F^2
sage: A = matrix(F, [[a, 1], [2*a, 2]])
sage: psi = D.hom(A, C)
sage: psi
Vector space morphism represented by the matrix:
[ sqrt3 1]
[2*sqrt3 2]
Domain: Vector space of dimension 2 over Number Field in sqrt3 with defining polynomial x^2 - 3
Codomain: Vector space of dimension 2 over Number Field in sqrt3 with defining polynomial x^2 - 3
sage: psi([1, 4])
(9*sqrt3, 9)
Properties
----------
Many natural properties of a linear transformation can be computed.
Some of these are more general methods of objects in the classes
:class:`sage.modules.free_module_morphism.FreeModuleMorphism` and
:class:`sage.modules.matrix_morphism.MatrixMorphism`.
Values are computed in a natural way, an inverse image of an
element can be computed with the ``lift()`` method, when the inverse
image actually exists. ::
sage: A = matrix(QQ, [[1,2], [2,4], [3,6]])
sage: phi = linear_transformation(A)
sage: phi([1,2,0])
(5, 10)
sage: phi.lift([10, 20])
(10, 0, 0)
sage: phi.lift([100, 100])
Traceback (most recent call last):
...
ValueError: element is not in the image
Images and pre-images can be computed as vector spaces. ::
sage: A = matrix(QQ, [[1,2], [2,4], [3,6]])
sage: phi = linear_transformation(A)
sage: phi.image()
Vector space of degree 2 and dimension 1 over Rational Field
Basis matrix:
[1 2]
sage: phi.inverse_image( (QQ^2).span([[1,2]]) )
Vector space of degree 3 and dimension 3 over Rational Field
Basis matrix:
[1 0 0]
[0 1 0]
[0 0 1]
sage: phi.inverse_image( (QQ^2).span([[1,1]]) )
Vector space of degree 3 and dimension 2 over Rational Field
Basis matrix:
[ 1 0 -1/3]
[ 0 1 -2/3]
Injectivity and surjectivity can be checked. ::
sage: A = matrix(QQ, [[1,2], [2,4], [3,6]])
sage: phi = linear_transformation(A)
sage: phi.is_injective()
False
sage: phi.is_surjective()
False
Restrictions and Representations
--------------------------------
It is possible to restrict the domain and codomain of a linear
transformation to make a new linear transformation. We will use
those commands to replace the domain and codomain by equal vector
spaces, but with alternate bases. The point here is that the
matrix representation used to represent linear transformations are
relative to the bases of both the domain and codomain. ::
sage: A = graphs.PetersenGraph().adjacency_matrix()
sage: V = QQ^10
sage: phi = linear_transformation(V, V, A)
sage: phi
Vector space morphism represented by the matrix:
[0 1 0 0 1 1 0 0 0 0]
[1 0 1 0 0 0 1 0 0 0]
[0 1 0 1 0 0 0 1 0 0]
[0 0 1 0 1 0 0 0 1 0]
[1 0 0 1 0 0 0 0 0 1]
[1 0 0 0 0 0 0 1 1 0]
[0 1 0 0 0 0 0 0 1 1]
[0 0 1 0 0 1 0 0 0 1]
[0 0 0 1 0 1 1 0 0 0]
[0 0 0 0 1 0 1 1 0 0]
Domain: Vector space of dimension 10 over Rational Field
Codomain: Vector space of dimension 10 over Rational Field
sage: B1 = [V.gen(i) + V.gen(i+1) for i in range(9)] + [V.gen(9)]
sage: B2 = [V.gen(0)] + [-V.gen(i-1) + V.gen(i) for i in range(1,10)]
sage: D = V.subspace_with_basis(B1)
sage: C = V.subspace_with_basis(B2)
sage: rho = phi.restrict_codomain(C)
sage: zeta = rho.restrict_domain(D)
sage: zeta
Vector space morphism represented by the matrix:
[6 5 4 3 3 2 1 0 0 0]
[6 5 4 3 2 2 2 1 0 0]
[6 6 5 4 3 2 2 2 1 0]
[6 5 5 4 3 2 2 2 2 1]
[6 4 4 4 3 3 3 3 2 1]
[6 5 4 4 4 4 4 4 3 1]
[6 6 5 4 4 4 3 3 3 2]
[6 6 6 5 4 4 2 1 1 1]
[6 6 6 6 5 4 3 1 0 0]
[3 3 3 3 3 2 2 1 0 0]
Domain: Vector space of degree 10 and dimension 10 over Rational Field
User basis matrix:
[1 1 0 0 0 0 0 0 0 0]
[0 1 1 0 0 0 0 0 0 0]
[0 0 1 1 0 0 0 0 0 0]
[0 0 0 1 1 0 0 0 0 0]
[0 0 0 0 1 1 0 0 0 0]
[0 0 0 0 0 1 1 0 0 0]
[0 0 0 0 0 0 1 1 0 0]
[0 0 0 0 0 0 0 1 1 0]
[0 0 0 0 0 0 0 0 1 1]
[0 0 0 0 0 0 0 0 0 1]
Codomain: Vector space of degree 10 and dimension 10 over Rational Field
User basis matrix:
[ 1 0 0 0 0 0 0 0 0 0]
[-1 1 0 0 0 0 0 0 0 0]
[ 0 -1 1 0 0 0 0 0 0 0]
[ 0 0 -1 1 0 0 0 0 0 0]
[ 0 0 0 -1 1 0 0 0 0 0]
[ 0 0 0 0 -1 1 0 0 0 0]
[ 0 0 0 0 0 -1 1 0 0 0]
[ 0 0 0 0 0 0 -1 1 0 0]
[ 0 0 0 0 0 0 0 -1 1 0]
[ 0 0 0 0 0 0 0 0 -1 1]
An endomorphism is a linear transformation with an equal domain and codomain,
and here each needs to have the same basis. We are using a
matrix that has well-behaved eigenvalues, as part of showing that these
do not change as the representation changes. ::
sage: A = graphs.PetersenGraph().adjacency_matrix()
sage: V = QQ^10
sage: phi = linear_transformation(V, V, A)
sage: phi.eigenvalues()
[3, -2, -2, -2, -2, 1, 1, 1, 1, 1]
sage: B1 = [V.gen(i) + V.gen(i+1) for i in range(9)] + [V.gen(9)]
sage: C = V.subspace_with_basis(B1)
sage: zeta = phi.restrict(C)
sage: zeta
Vector space morphism represented by the matrix:
[ 1 0 1 -1 2 -1 2 -2 2 -2]
[ 1 0 1 0 0 0 1 0 0 0]
[ 0 1 0 1 0 0 0 1 0 0]
[ 1 -1 2 -1 2 -2 2 -2 3 -2]
[ 2 -2 2 -1 1 -1 1 0 1 0]
[ 1 0 0 0 0 0 0 1 1 0]
[ 0 1 0 0 0 1 -1 1 0 2]
[ 0 0 1 0 0 2 -1 1 -1 2]
[ 0 0 0 1 0 1 1 0 0 0]
[ 0 0 0 0 1 -1 2 -1 1 -1]
Domain: Vector space of degree 10 and dimension 10 over Rational Field
User basis matrix:
[1 1 0 0 0 0 0 0 0 0]
[0 1 1 0 0 0 0 0 0 0]
[0 0 1 1 0 0 0 0 0 0]
[0 0 0 1 1 0 0 0 0 0]
[0 0 0 0 1 1 0 0 0 0]
[0 0 0 0 0 1 1 0 0 0]
[0 0 0 0 0 0 1 1 0 0]
[0 0 0 0 0 0 0 1 1 0]
[0 0 0 0 0 0 0 0 1 1]
[0 0 0 0 0 0 0 0 0 1]
Codomain: Vector space of degree 10 and dimension 10 over Rational Field
User basis matrix:
[1 1 0 0 0 0 0 0 0 0]
[0 1 1 0 0 0 0 0 0 0]
[0 0 1 1 0 0 0 0 0 0]
[0 0 0 1 1 0 0 0 0 0]
[0 0 0 0 1 1 0 0 0 0]
[0 0 0 0 0 1 1 0 0 0]
[0 0 0 0 0 0 1 1 0 0]
[0 0 0 0 0 0 0 1 1 0]
[0 0 0 0 0 0 0 0 1 1]
[0 0 0 0 0 0 0 0 0 1]
sage: zeta.eigenvalues()
[3, -2, -2, -2, -2, 1, 1, 1, 1, 1]
Equality
--------
Equality of linear transformations is a bit nuanced. The equality operator
``==`` tests if two linear transformations have equal matrix representations,
while we determine if two linear transformations are the same function with the
``.is_equal_function()`` method. Notice in this example that the function
never changes, just the representations. ::
sage: f = lambda x: vector(QQ, [x[1], x[0]+x[1], x[0]])
sage: H = Hom(QQ^2, QQ^3)
sage: phi = H(f)
sage: rho = linear_transformation(QQ^2, QQ^3, matrix(QQ,2, 3, [[0,1,1], [1,1,0]]))
sage: phi == rho
True
sage: U = (QQ^2).subspace_with_basis([[1, 2], [-3, 1]])
sage: V = (QQ^3).subspace_with_basis([[0, 1, 0], [2, 3, 1], [-1, 1, 6]])
sage: K = Hom(U, V)
sage: zeta = K(f)
sage: zeta == phi
False
sage: zeta.is_equal_function(phi)
True
sage: zeta.is_equal_function(rho)
True
TESTS::
sage: V = QQ^2
sage: H = Hom(V, V)
sage: f = H([V.1,-2*V.0])
sage: loads(dumps(f))
Vector space morphism represented by the matrix:
[ 0 1]
[-2 0]
Domain: Vector space of dimension 2 over Rational Field
Codomain: Vector space of dimension 2 over Rational Field
sage: loads(dumps(f)) == f
True
"""
from __future__ import absolute_import
####################################################################################
# Copyright (C) 2011 Rob Beezer <beezer@ups.edu>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
####################################################################################
import sage.modules.matrix_morphism as matrix_morphism
import sage.modules.free_module_morphism as free_module_morphism
from . import vector_space_homspace
from sage.structure.element import is_Matrix
def linear_transformation(arg0, arg1=None, arg2=None, side='left'):
r"""
Create a linear transformation from a variety of possible inputs.
FORMATS:
In the following, ``D`` and ``C`` are vector spaces over
the same field that are the domain and codomain
(respectively) of the linear transformation.
``side`` is a keyword that is either 'left' or 'right'.
When a matrix is used to specify a linear transformation,
as in the first two call formats below, you may specify
if the function is given by matrix multiplication with
the vector on the left, or the vector on the right.
The default is 'left'. Internally representations are
always carried as the 'left' version, and the default
text representation is this version. However, the matrix
representation may be obtained as either version, no matter
how it is created.
- ``linear_transformation(A, side='left')``
Where ``A`` is a matrix. The domain and codomain are inferred
from the dimension of the matrix and the base ring of the matrix.
The base ring must be a field, or have its fraction field implemented
in Sage.
- ``linear_transformation(D, C, A, side='left')``
``A`` is a matrix that behaves as above. However, now the domain
and codomain are given explicitly. The matrix is checked for
compatibility with the domain and codomain. Additionally, the
domain and codomain may be supplied with alternate ("user") bases
and the matrix is interpreted as being a representation relative
to those bases.
- ``linear_transformation(D, C, f)``
``f`` is any function that can be applied to the basis elements of the
domain and that produces elements of the codomain. The linear
transformation returned is the unique linear transformation that
extends this mapping on the basis elements. ``f`` may come from a
function defined by a Python ``def`` statement, or may be defined as a
``lambda`` function.
Alternatively, ``f`` may be specified by a callable symbolic function,
see the examples below for a demonstration.
- ``linear_transformation(D, C, images)``
``images`` is a list, or tuple, of codomain elements, equal in number
to the size of the basis of the domain. Each basis element of the domain
is mapped to the corresponding element of the ``images`` list, and the
linear transformation returned is the unique linear transformation that
extends this mapping.
OUTPUT:
A linear transformation described by the input. This is a
"vector space morphism", an object of the class
:class:`sage.modules.vector_space_morphism`.
EXAMPLES:
We can define a linear transformation with just a matrix, understood to
act on a vector placed on one side or the other. The field for the
vector spaces used as domain and codomain is obtained from the base
ring of the matrix, possibly promoting to a fraction field. ::
sage: A = matrix(ZZ, [[1, -1, 4], [2, 0, 5]])
sage: phi = linear_transformation(A)
sage: phi
Vector space morphism represented by the matrix:
[ 1 -1 4]
[ 2 0 5]
Domain: Vector space of dimension 2 over Rational Field
Codomain: Vector space of dimension 3 over Rational Field
sage: phi([1/2, 5])
(21/2, -1/2, 27)
sage: B = matrix(Integers(7), [[1, 2, 1], [3, 5, 6]])
sage: rho = linear_transformation(B, side='right')
sage: rho
Vector space morphism represented by the matrix:
[1 3]
[2 5]
[1 6]
Domain: Vector space of dimension 3 over Ring of integers modulo 7
Codomain: Vector space of dimension 2 over Ring of integers modulo 7
sage: rho([2, 4, 6])
(2, 6)
We can define a linear transformation with a matrix, while explicitly
giving the domain and codomain. Matrix entries will be coerced into the
common field of scalars for the vector spaces. ::
sage: D = QQ^3
sage: C = QQ^2
sage: A = matrix([[1, 7], [2, -1], [0, 5]])
sage: A.parent()
Full MatrixSpace of 3 by 2 dense matrices over Integer Ring
sage: zeta = linear_transformation(D, C, A)
sage: zeta.matrix().parent()
Full MatrixSpace of 3 by 2 dense matrices over Rational Field
sage: zeta
Vector space morphism represented by the matrix:
[ 1 7]
[ 2 -1]
[ 0 5]
Domain: Vector space of dimension 3 over Rational Field
Codomain: Vector space of dimension 2 over Rational Field
Matrix representations are relative to the bases for the domain
and codomain. ::
sage: u = vector(QQ, [1, -1])
sage: v = vector(QQ, [2, 3])
sage: D = (QQ^2).subspace_with_basis([u, v])
sage: x = vector(QQ, [2, 1])
sage: y = vector(QQ, [-1, 4])
sage: C = (QQ^2).subspace_with_basis([x, y])
sage: A = matrix(QQ, [[2, 5], [3, 7]])
sage: psi = linear_transformation(D, C, A)
sage: psi
Vector space morphism represented by the matrix:
[2 5]
[3 7]
Domain: Vector space of degree 2 and dimension 2 over Rational Field
User basis matrix:
[ 1 -1]
[ 2 3]
Codomain: Vector space of degree 2 and dimension 2 over Rational Field
User basis matrix:
[ 2 1]
[-1 4]
sage: psi(u) == 2*x + 5*y
True
sage: psi(v) == 3*x + 7*y
True
Functions that act on the domain may be used to compute images of
the domain's basis elements, and this mapping can be extended to
a unique linear transformation. The function may be a Python
function (via ``def`` or ``lambda``) or a Sage symbolic function. ::
sage: def g(x):
....: return vector(QQ, [2*x[0]+x[2], 5*x[1]])
sage: phi = linear_transformation(QQ^3, QQ^2, g)
sage: phi
Vector space morphism represented by the matrix:
[2 0]
[0 5]
[1 0]
Domain: Vector space of dimension 3 over Rational Field
Codomain: Vector space of dimension 2 over Rational Field
sage: f = lambda x: vector(QQ, [2*x[0]+x[2], 5*x[1]])
sage: rho = linear_transformation(QQ^3, QQ^2, f)
sage: rho
Vector space morphism represented by the matrix:
[2 0]
[0 5]
[1 0]
Domain: Vector space of dimension 3 over Rational Field
Codomain: Vector space of dimension 2 over Rational Field
sage: x, y, z = var('x y z')
sage: h(x, y, z) = [2*x + z, 5*y]
sage: zeta = linear_transformation(QQ^3, QQ^2, h)
sage: zeta
Vector space morphism represented by the matrix:
[2 0]
[0 5]
[1 0]
Domain: Vector space of dimension 3 over Rational Field
Codomain: Vector space of dimension 2 over Rational Field
sage: phi == rho
True
sage: rho == zeta
True
We create a linear transformation relative to non-standard bases,
and capture its representation relative to standard bases. With this, we
can build functions that create the same linear transformation relative
to the nonstandard bases. ::
sage: u = vector(QQ, [1, -1])
sage: v = vector(QQ, [2, 3])
sage: D = (QQ^2).subspace_with_basis([u, v])
sage: x = vector(QQ, [2, 1])
sage: y = vector(QQ, [-1, 4])
sage: C = (QQ^2).subspace_with_basis([x, y])
sage: A = matrix(QQ, [[2, 5], [3, 7]])
sage: psi = linear_transformation(D, C, A)
sage: rho = psi.restrict_codomain(QQ^2).restrict_domain(QQ^2)
sage: rho.matrix()
[ -4/5 97/5]
[ 1/5 -13/5]
sage: f = lambda x: vector(QQ, [(-4/5)*x[0] + (1/5)*x[1], (97/5)*x[0] + (-13/5)*x[1]])
sage: psi = linear_transformation(D, C, f)
sage: psi.matrix()
[2 5]
[3 7]
sage: s, t = var('s t')
sage: h(s, t) = [(-4/5)*s + (1/5)*t, (97/5)*s + (-13/5)*t]
sage: zeta = linear_transformation(D, C, h)
sage: zeta.matrix()
[2 5]
[3 7]
Finally, we can give an explicit list of images for the basis
elements of the domain. ::
sage: x = polygen(QQ)
sage: F.<a> = NumberField(x^3+x+1)
sage: u = vector(F, [1, a, a^2])
sage: v = vector(F, [a, a^2, 2])
sage: w = u + v
sage: D = F^3
sage: C = F^3
sage: rho = linear_transformation(D, C, [u, v, w])
sage: rho.matrix()
[ 1 a a^2]
[ a a^2 2]
[ a + 1 a^2 + a a^2 + 2]
sage: C = (F^3).subspace_with_basis([u, v])
sage: D = (F^3).subspace_with_basis([u, v])
sage: psi = linear_transformation(C, D, [u+v, u-v])
sage: psi.matrix()
[ 1 1]
[ 1 -1]
TESTS:
We test some bad inputs. First, the wrong things in the wrong places. ::
sage: linear_transformation('junk')
Traceback (most recent call last):
...
TypeError: first argument must be a matrix or a vector space, not junk
sage: linear_transformation(QQ^2, QQ^3, 'stuff')
Traceback (most recent call last):
...
TypeError: third argument must be a matrix, function, or list of images, not stuff
sage: linear_transformation(QQ^2, 'garbage')
Traceback (most recent call last):
...
TypeError: if first argument is a vector space, then second argument must be a vector space, not garbage
sage: linear_transformation(QQ^2, Integers(7)^2)
Traceback (most recent call last):
...
TypeError: vector spaces must have the same field of scalars, not Rational Field and Ring of integers modulo 7
Matrices must be over a field (or a ring that can be promoted to a field),
and of the right size. ::
sage: linear_transformation(matrix(Integers(6), [[2, 3],[4, 5]]))
Traceback (most recent call last):
...
TypeError: matrix must have entries from a field, or a ring with a fraction field, not Ring of integers modulo 6
sage: A = matrix(QQ, 3, 4, range(12))
sage: linear_transformation(QQ^4, QQ^4, A)
Traceback (most recent call last):
...
TypeError: domain dimension is incompatible with matrix size
sage: linear_transformation(QQ^3, QQ^3, A, side='right')
Traceback (most recent call last):
...
TypeError: domain dimension is incompatible with matrix size
sage: linear_transformation(QQ^3, QQ^3, A)
Traceback (most recent call last):
...
TypeError: codomain dimension is incompatible with matrix size
sage: linear_transformation(QQ^4, QQ^4, A, side='right')
Traceback (most recent call last):
...
TypeError: codomain dimension is incompatible with matrix size
Lists of images can be of the wrong number, or not really
elements of the codomain. ::
sage: linear_transformation(QQ^3, QQ^2, [vector(QQ, [1,2])])
Traceback (most recent call last):
...
ValueError: number of images should equal the size of the domain's basis (=3), not 1
sage: C = (QQ^2).subspace_with_basis([vector(QQ, [1,1])])
sage: linear_transformation(QQ^1, C, [vector(QQ, [1,2])])
Traceback (most recent call last):
...
ArithmeticError: some proposed image is not in the codomain, because
element [1, 2] is not in free module
Functions may not apply properly to domain elements,
or return values outside the codomain. ::
sage: f = lambda x: vector(QQ, [x[0], x[4]])
sage: linear_transformation(QQ^3, QQ^2, f)
Traceback (most recent call last):
...
ValueError: function cannot be applied properly to some basis element because
vector index out of range
sage: f = lambda x: vector(QQ, [x[0], x[1]])
sage: C = (QQ^2).span([vector(QQ, [1, 1])])
sage: linear_transformation(QQ^2, C, f)
Traceback (most recent call last):
...
ArithmeticError: some image of the function is not in the codomain, because
element [1, 0] is not in free module
A Sage symbolic function can come in a variety of forms that are
not representative of a linear transformation. ::
sage: x, y = var('x, y')
sage: f(x, y) = [y, x, y]
sage: linear_transformation(QQ^3, QQ^3, f)
Traceback (most recent call last):
...
ValueError: symbolic function has the wrong number of inputs for domain
sage: linear_transformation(QQ^2, QQ^2, f)
Traceback (most recent call last):
...
ValueError: symbolic function has the wrong number of outputs for codomain
sage: x, y = var('x y')
sage: f(x, y) = [y, x*y]
sage: linear_transformation(QQ^2, QQ^2, f)
Traceback (most recent call last):
...
ValueError: symbolic function must be linear in all the inputs:
unable to convert y to a rational
sage: x, y = var('x y')
sage: f(x, y) = [x, 2*y]
sage: C = (QQ^2).span([vector(QQ, [1, 1])])
sage: linear_transformation(QQ^2, C, f)
Traceback (most recent call last):
...
ArithmeticError: some image of the function is not in the codomain, because
element [1, 0] is not in free module
"""
from sage.matrix.constructor import matrix
from sage.modules.module import is_VectorSpace
from sage.modules.free_module import VectorSpace
from sage.categories.homset import Hom
from sage.symbolic.ring import SR
from sage.modules.vector_callable_symbolic_dense import Vector_callable_symbolic_dense
from inspect import isfunction
if not side in ['left', 'right']:
raise ValueError("side must be 'left' or 'right', not {0}".format(side))
if not (is_Matrix(arg0) or is_VectorSpace(arg0)):
raise TypeError('first argument must be a matrix or a vector space, not {0}'.format(arg0))
if is_Matrix(arg0):
R = arg0.base_ring()
if not R.is_field():
try:
R = R.fraction_field()
except (NotImplementedError, TypeError):
msg = 'matrix must have entries from a field, or a ring with a fraction field, not {0}'
raise TypeError(msg.format(R))
if side == 'right':
arg0 = arg0.transpose()
side = 'left'
arg2 = arg0
arg0 = VectorSpace(R, arg2.nrows())
arg1 = VectorSpace(R, arg2.ncols())
elif is_VectorSpace(arg0):
if not is_VectorSpace(arg1):
msg = 'if first argument is a vector space, then second argument must be a vector space, not {0}'
raise TypeError(msg.format(arg1))
if arg0.base_ring() != arg1.base_ring():
msg = 'vector spaces must have the same field of scalars, not {0} and {1}'
raise TypeError(msg.format(arg0.base_ring(), arg1.base_ring()))
# Now arg0 = domain D, arg1 = codomain C, and
# both are vector spaces with common field of scalars
# use these to make a VectorSpaceHomSpace
# arg2 might be a matrix that began in arg0
D = arg0
C = arg1
H = Hom(D, C, category=None)
# Examine arg2 as the "rule" for the linear transformation
# Pass on matrices, Python functions and lists to homspace call
# Convert symbolic function here, to a matrix
if is_Matrix(arg2):
if side == 'right':
arg2 = arg2.transpose()
elif isinstance(arg2, (list, tuple)):
pass
elif isfunction(arg2):
pass
elif isinstance(arg2, Vector_callable_symbolic_dense):
args = arg2.parent().base_ring()._arguments
exprs = arg2.change_ring(SR)
m = len(args)
n = len(exprs)
if m != D.degree():
raise ValueError('symbolic function has the wrong number of inputs for domain')
if n != C.degree():
raise ValueError('symbolic function has the wrong number of outputs for codomain')
arg2 = [[e.coefficient(a) for e in exprs] for a in args]
try:
arg2 = matrix(D.base_ring(), m, n, arg2)
except TypeError as e:
msg = 'symbolic function must be linear in all the inputs:\n' + e.args[0]
raise ValueError(msg)
# have matrix with respect to standard bases, now consider user bases
images = [v*arg2 for v in D.basis()]
try:
arg2 = matrix([C.coordinates(C(a)) for a in images])
except (ArithmeticError, TypeError) as e:
msg = 'some image of the function is not in the codomain, because\n' + e.args[0]
raise ArithmeticError(msg)
else:
msg = 'third argument must be a matrix, function, or list of images, not {0}'
raise TypeError(msg.format(arg2))
# arg2 now compatible with homspace H call method
# __init__ will check matrix sizes versus domain/codomain dimensions
return H(arg2)
def is_VectorSpaceMorphism(x):
r"""
Returns ``True`` if ``x`` is a vector space morphism (a linear transformation).
INPUT:
``x`` - anything
OUTPUT:
``True`` only if ``x`` is an instance of a vector space morphism,
which are also known as linear transformations.
EXAMPLES::
sage: V = QQ^2; f = V.hom([V.1,-2*V.0])
sage: sage.modules.vector_space_morphism.is_VectorSpaceMorphism(f)
True
sage: sage.modules.vector_space_morphism.is_VectorSpaceMorphism('junk')
False
"""
return isinstance(x, VectorSpaceMorphism)
class VectorSpaceMorphism(free_module_morphism.FreeModuleMorphism):
def __init__(self, homspace, A):
r"""
Create a linear transformation, a morphism between vector spaces.
INPUT:
- ``homspace`` - a homspace (of vector spaces) to serve
as a parent for the linear transformation and a home for
the domain and codomain of the morphism
- ``A`` - a matrix representing the linear transformation,
which will act on vectors placed to the left of the matrix
EXAMPLES:
Nominally, we require a homspace to hold the domain
and codomain and a matrix representation of the morphism
(linear transformation). ::
sage: from sage.modules.vector_space_homspace import VectorSpaceHomspace
sage: from sage.modules.vector_space_morphism import VectorSpaceMorphism
sage: H = VectorSpaceHomspace(QQ^3, QQ^2)
sage: A = matrix(QQ, 3, 2, range(6))
sage: zeta = VectorSpaceMorphism(H, A)
sage: zeta
Vector space morphism represented by the matrix:
[0 1]
[2 3]
[4 5]
Domain: Vector space of dimension 3 over Rational Field
Codomain: Vector space of dimension 2 over Rational Field
See the constructor,
:func:`sage.modules.vector_space_morphism.linear_transformation`
for another way to create linear transformations.
The ``.hom()`` method of a vector space will create a vector
space morphism. ::
sage: V = QQ^3; W = V.subspace_with_basis([[1,2,3], [-1,2,5/3], [0,1,-1]])
sage: phi = V.hom(matrix(QQ, 3, range(9)), codomain=W) # indirect doctest
sage: type(phi)
<class 'sage.modules.vector_space_morphism.VectorSpaceMorphism'>
A matrix may be coerced into a vector space homspace to
create a vector space morphism. ::
sage: from sage.modules.vector_space_homspace import VectorSpaceHomspace
sage: H = VectorSpaceHomspace(QQ^3, QQ^2)
sage: A = matrix(QQ, 3, 2, range(6))
sage: rho = H(A) # indirect doctest
sage: type(rho)
<class 'sage.modules.vector_space_morphism.VectorSpaceMorphism'>
"""
if not vector_space_homspace.is_VectorSpaceHomspace(homspace):
raise TypeError('homspace must be a vector space hom space, not {0}'.format(homspace))
if isinstance(A, matrix_morphism.MatrixMorphism):
A = A.matrix()
if not is_Matrix(A):
msg = 'input must be a matrix representation or another matrix morphism, not {0}'
raise TypeError(msg.format(A))
# now have a vector space homspace, and a matrix, check compatibility
if homspace.domain().dimension() != A.nrows():
raise TypeError('domain dimension is incompatible with matrix size')
if homspace.codomain().dimension() != A.ncols():
raise TypeError('codomain dimension is incompatible with matrix size')
A = homspace._matrix_space()(A)
free_module_morphism.FreeModuleMorphism.__init__(self, homspace, A)
def is_invertible(self):
r"""
Determines if the vector space morphism has an inverse.
OUTPUT:
``True`` if the vector space morphism is invertible, otherwise
``False``.
EXAMPLES:
If the dimension of the domain does not match the dimension
of the codomain, then the morphism cannot be invertible. ::
sage: V = QQ^3
sage: U = V.subspace_with_basis([V.0 + V.1, 2*V.1 + 3*V.2])
sage: phi = V.hom([U.0, U.0 + U.1, U.0 - U.1], U)
sage: phi.is_invertible()
False
An invertible linear transformation. ::
sage: A = matrix(QQ, 3, [[-3, 5, -5], [4, -7, 7], [6, -8, 10]])
sage: A.determinant()
2
sage: H = Hom(QQ^3, QQ^3)
sage: rho = H(A)
sage: rho.is_invertible()
True
A non-invertible linear transformation, an endomorphism of
a vector space over a finite field. ::
sage: F.<a> = GF(11^2)
sage: A = matrix(F, [[6*a + 3, 8*a + 2, 10*a + 3],
....: [2*a + 7, 4*a + 3, 2*a + 3],
....: [9*a + 2, 10*a + 10, 3*a + 3]])
sage: A.nullity()
1
sage: E = End(F^3)
sage: zeta = E(A)
sage: zeta.is_invertible()
False
"""
# endomorphism or not, this is equivalent to invertibility of
# the matrix representation, so any test of this will suffice
m = self.matrix()
if not m.is_square():
return False
return m.rank() == m.ncols()
def _latex_(self):
r"""
A LaTeX representation of this vector space morphism.
EXAMPLES::
sage: H = Hom(QQ^3, QQ^2)
sage: f = H(matrix(3, 2, range(6)))
sage: f._latex_().split(' ')
['\\text{vector', 'space', 'morphism', 'from',
'}\n\\Bold{Q}^{3}\\text{', 'to', '}\n\\Bold{Q}^{2}\\text{',
'represented', 'by', 'the', 'matrix',
'}\n\\left(\\begin{array}{rr}\n0', '&', '1',
'\\\\\n2', '&', '3', '\\\\\n4', '&', '5\n\\end{array}\\right)']
"""
from sage.misc.latex import latex
s = ('\\text{vector space morphism from }\n', self.domain()._latex_(),
'\\text{ to }\n', self.codomain()._latex_(),
'\\text{ represented by the matrix }\n', self.matrix()._latex_())
return ''.join(s)
def _repr_(self):
r"""
A text representation of this vector space morphism.
EXAMPLES::
sage: H = Hom(QQ^3, QQ^2)
sage: f = H(matrix(3, 2, range(6)))
sage: f._repr_().split(' ')
['Vector', 'space', 'morphism', 'represented', 'by',
'the', 'matrix:\n[0', '1]\n[2', '3]\n[4', '5]\nDomain:',
'Vector', 'space', 'of', 'dimension', '3', 'over',
'Rational', 'Field\nCodomain:', 'Vector', 'space', 'of',
'dimension', '2', 'over', 'Rational', 'Field']
"""
m = self.matrix()
msg = ("Vector space morphism represented by the matrix:\n",
"{!r}\n",
"Domain: {}\n",
"Codomain: {}")
return ''.join(msg).format(m, self.domain(), self.codomain())
| 36.782518
| 120
| 0.592509
|
24a454e24ab3529630e7740c14743aa06f0b813f
| 6,918
|
py
|
Python
|
python/friesian/example/ncf/ncf_train.py
|
DirkFi/BigDL
|
7493209165c046116470b9a1e1c8f527915d6f1e
|
[
"Apache-2.0"
] | null | null | null |
python/friesian/example/ncf/ncf_train.py
|
DirkFi/BigDL
|
7493209165c046116470b9a1e1c8f527915d6f1e
|
[
"Apache-2.0"
] | null | null | null |
python/friesian/example/ncf/ncf_train.py
|
DirkFi/BigDL
|
7493209165c046116470b9a1e1c8f527915d6f1e
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import argparse
import pandas as pd
import tensorflow as tf
from tensorflow.keras.layers import Input, Embedding, Dense, Flatten, concatenate, multiply
from bigdl.dllib.feature.dataset import movielens
from bigdl.orca import init_orca_context, stop_orca_context
from bigdl.orca.learn.tf2.estimator import Estimator
from bigdl.friesian.feature import FeatureTable
def build_model(num_users, num_items, class_num, layers=[20, 10], include_mf=True, mf_embed=20):
num_layer = len(layers)
user_input = Input(shape=(1,), dtype='int32', name='user_input')
item_input = Input(shape=(1,), dtype='int32', name='item_input')
mlp_embed_user = Embedding(input_dim=num_users, output_dim=int(layers[0] / 2),
input_length=1)(user_input)
mlp_embed_item = Embedding(input_dim=num_items, output_dim=int(layers[0] / 2),
input_length=1)(item_input)
user_latent = Flatten()(mlp_embed_user)
item_latent = Flatten()(mlp_embed_item)
mlp_latent = concatenate([user_latent, item_latent], axis=1)
for idx in range(1, num_layer):
layer = Dense(layers[idx], activation='relu',
name='layer%d' % idx)
mlp_latent = layer(mlp_latent)
if include_mf:
mf_embed_user = Embedding(input_dim=num_users,
output_dim=mf_embed,
input_length=1)(user_input)
mf_embed_item = Embedding(input_dim=num_users,
output_dim=mf_embed,
input_length=1)(item_input)
mf_user_flatten = Flatten()(mf_embed_user)
mf_item_flatten = Flatten()(mf_embed_item)
mf_latent = multiply([mf_user_flatten, mf_item_flatten])
concated_model = concatenate([mlp_latent, mf_latent], axis=1)
prediction = Dense(class_num, activation='softmax', name='prediction')(concated_model)
else:
prediction = Dense(class_num, activation='softmax', name='prediction')(mlp_latent)
model = tf.keras.Model([user_input, item_input], prediction)
return model
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='NCF Training')
parser.add_argument('--cluster_mode', type=str, default="local",
help='The cluster mode, such as local, yarn, standalone or spark-submit.')
parser.add_argument('--master', type=str, default=None,
help='The master url, only used when cluster mode is standalone.')
parser.add_argument('--executor_cores', type=int, default=8,
help='The executor core number.')
parser.add_argument('--executor_memory', type=str, default="160g",
help='The executor memory.')
parser.add_argument('--num_executor', type=int, default=8,
help='The number of executor.')
parser.add_argument('--driver_cores', type=int, default=4,
help='The driver core number.')
parser.add_argument('--driver_memory', type=str, default="36g",
help='The driver memory.')
parser.add_argument('--lr', default=0.001, type=float, help='learning rate')
parser.add_argument('--epochs', default=5, type=int, help='train epoch')
parser.add_argument('--batch_size', default=8000, type=int, help='batch size')
parser.add_argument('--model_dir', default='snapshot', type=str,
help='snapshot directory name (default: snapshot)')
args = parser.parse_args()
if args.cluster_mode == "local":
sc = init_orca_context("local", init_ray_on_spark=True)
elif args.cluster_mode == "standalone":
sc = init_orca_context("standalone", master=args.master,
cores=args.executor_cores, num_nodes=args.num_executor,
memory=args.executor_memory,
driver_cores=args.driver_cores, driver_memory=args.driver_memory,
init_ray_on_spark=True)
elif args.cluster_mode == "yarn":
sc = init_orca_context("yarn-client", cores=args.executor_cores,
num_nodes=args.num_executor, memory=args.executor_memory,
driver_cores=args.driver_cores, driver_memory=args.driver_memory,
object_store_memory="10g",
init_ray_on_spark=True)
elif args.cluster_mode == "spark-submit":
sc = init_orca_context("spark-submit")
else:
raise ValueError(
"cluster_mode should be one of 'local', 'yarn', 'standalone' and 'spark-submit'"
", but got " + args.cluster_mode)
movielens_data = movielens.get_id_ratings("/tmp/movielens/")
pddf = pd.DataFrame(movielens_data, columns=["user", "item", "label"])
num_users, num_items = pddf["user"].max() + 1, pddf["item"].max() + 1
full = FeatureTable.from_pandas(pddf)\
.apply("label", "label", lambda x: x - 1, 'int')
train, test = full.random_split([0.8, 0.2], seed=1)
config = {"lr": 1e-3, "inter_op_parallelism": 4, "intra_op_parallelism": args.executor_cores}
def model_creator(config):
model = build_model(num_users, num_items, 5)
print(model.summary())
optimizer = tf.keras.optimizers.Adam(config["lr"])
model.compile(optimizer=optimizer,
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_crossentropy', 'accuracy'])
return model
steps_per_epoch = math.ceil(train.size() / args.batch_size)
val_steps = math.ceil(test.size() / args.batch_size)
estimator = Estimator.from_keras(model_creator=model_creator,
verbose=False,
config=config)
estimator.fit(train.df,
batch_size=args.batch_size,
epochs=args.epochs,
feature_cols=['user', 'item'],
label_cols=['label'],
steps_per_epoch=steps_per_epoch,
validation_data=test.df,
validation_steps=val_steps)
tf.saved_model.save(estimator.get_model(), args.model_dir)
stop_orca_context()
| 46.42953
| 98
| 0.630818
|
4cbf15b5e82e0334a02e7a2b33ef321f8a872fb0
| 3,572
|
py
|
Python
|
application/api/nurse.py
|
MickeyPa/soen344
|
a9632dc36d23a4eba5bb9c943c9ae75ecc4811bd
|
[
"MIT"
] | null | null | null |
application/api/nurse.py
|
MickeyPa/soen344
|
a9632dc36d23a4eba5bb9c943c9ae75ecc4811bd
|
[
"MIT"
] | null | null | null |
application/api/nurse.py
|
MickeyPa/soen344
|
a9632dc36d23a4eba5bb9c943c9ae75ecc4811bd
|
[
"MIT"
] | 2
|
2019-06-04T20:09:14.000Z
|
2019-06-04T20:15:32.000Z
|
'''
This file documents the api routes for nurse related events
'''
from flask import Flask, Blueprint, redirect, render_template, url_for, session, request, logging
from index import app
from application.services import NurseService, DoctorService, DoctorScheduleService
from application.util import *
from passlib.hash import sha256_crypt
from application.util import convertRequestDataToDict as toDict
import json
# This is a Blueprint object. We use this as the object to route certain urls
# In /index.py we import this object and attach it to the Flask object app
# This way all the routes attached to this object will be mapped to app as well.
nurse = Blueprint('nurse', __name__)
# list of possible requests
httpMethods = ['PUT', 'GET', 'POST', 'DELETE']
# Index
@nurse.route('/api/', methods=['GET','OPTIONS'])
def index():
return json.dumps({'success': True, 'status': 'OK', 'message': 'Success'})
@nurse.route('/api/nurse/', methods=['PUT'])
def newNurse():
data = request.data
data = data.decode('utf8').replace("'",'"')
data = json.loads(data)
print(data)
success = False
# Create a nurse and find our whether it is successful or not
success = NurseService.createNurse(access_ID=data['access_ID'], fname=data['fname'], lname=data['lname'], password=data['password'])
if success:
message = "Nurse has been created"
else:
message = "Nurse already exists"
response = json.dumps({"success":success, "message":message})
return response
@nurse.route('/api/nurse/authenticate/', methods=['POST'])
def userAuthenticate():
# convert request data to dictionary
data = toDict(request.data)
success = False
message = ""
status = "" # OK, DENIED, WARNING
response = {}
user = {}
# check if access ID exists
success = NurseService.nurseExists(data['access_ID'])
# Verify User
success = NurseService.authenticate(data['access_ID'], data['password'])
# if access ID exists & authenticated, then get the patient
if success:
user = NurseService.getNurse(data['access_ID'])
# convert datetimes to strings
message = "Nurse authenticated."
status = "OK"
response = json.dumps({'success': success, 'status': status, 'message': message,'user':user})
# else the user is not authenticated, request is denied
else:
message = "User not authenticated."
status = "DENIED"
response = json.dumps({'success': success, 'status': status, 'message': message,'user':user})
return response
@nurse.route('/api/nurse/doctorAvailability/', methods=['GET'])
def getDoctorAvailability():
# convert request data to dictionary
data = request.args
success = False
message = ""
status = "" # OK, DENIED, WARNING
response = {}
user = {}
returned_values = {'timeslot': '', 'clinics': ''}
# check if permit number exists
success = DoctorService.doctorExists(data['permit_number']) and \
NurseService.verifyHash(data['access_ID'], data['password_hash'])
# if permit number exists, get the doctor's timeslots
if success:
returned_values = DoctorScheduleService.getAvailability(data['permit_number'], data['date'])
# convert datetimes to strings
message = "schedule found."
status = "OK"
response = json.dumps({'success': success, 'status': status, 'message': message, 'user': user})
# else the user is not authenticated, request is denied
else:
message = "User not authenticated or does not exist."
status = "DENIED"
response = json.dumps({'success': success, 'status': status, 'message': message,\
'schedule': returned_values['timeslot'], 'clinics': returned_values['clinics']})
return response
| 33.074074
| 133
| 0.716405
|
c5693c183b6e1a0044c109f7e1cf1f98743e2a1f
| 1,618
|
py
|
Python
|
event_pubsub/handlers/event_producer_handlers.py
|
DhivakharVenkatachalam/snet-marketplace-service
|
6aee606bc9b00d418caeae26c64deae03792e0ce
|
[
"MIT"
] | null | null | null |
event_pubsub/handlers/event_producer_handlers.py
|
DhivakharVenkatachalam/snet-marketplace-service
|
6aee606bc9b00d418caeae26c64deae03792e0ce
|
[
"MIT"
] | null | null | null |
event_pubsub/handlers/event_producer_handlers.py
|
DhivakharVenkatachalam/snet-marketplace-service
|
6aee606bc9b00d418caeae26c64deae03792e0ce
|
[
"MIT"
] | 1
|
2021-01-15T11:10:36.000Z
|
2021-01-15T11:10:36.000Z
|
from common.logger import get_logger
from common.utils import handle_exception_with_slack_notification
from common.exception_handler import exception_handler
from event_pubsub.config import NETWORKS, NETWORK_ID, SLACK_HOOK, WS_PROVIDER
from event_pubsub.producers.blockchain_event_producer import MPEEventProducer, RFAIEventProducer, RegistryEventProducer, \
TokenStakeEventProducer
from event_pubsub.repository import Repository
registry_event_producer = RegistryEventProducer(WS_PROVIDER, Repository(NETWORKS))
mpe_event_producer = MPEEventProducer(WS_PROVIDER, Repository(NETWORKS))
rfai_event_producer = RFAIEventProducer(WS_PROVIDER, Repository(NETWORKS))
token_stake_event_producer = TokenStakeEventProducer(WS_PROVIDER, Repository(NETWORKS))
logger = get_logger(__name__)
@handle_exception_with_slack_notification(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def registry_event_producer_handler(event, context):
try:
registry_event_producer.produce_event(NETWORK_ID)
except Exception as e:
raise e
def mpe_event_producer_handler(event, context):
try:
mpe_event_producer.produce_event(NETWORK_ID)
except Exception as e:
raise e
def rfai_event_producer_handler(event, context):
try:
rfai_event_producer.produce_event(NETWORK_ID)
except Exception as e:
raise e
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger)
def token_stake_event_producer_handler(event, context):
try:
token_stake_event_producer.produce_event(NETWORK_ID)
except Exception as e:
raise e
| 34.425532
| 122
| 0.811496
|
0e4092e54c63059897455494b68b05cebcd5e62d
| 482
|
py
|
Python
|
data/scripts/templates/object/draft_schematic/furniture/bestine/shared_painting_bestine_rainbow_berry_bush.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/draft_schematic/furniture/bestine/shared_painting_bestine_rainbow_berry_bush.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/draft_schematic/furniture/bestine/shared_painting_bestine_rainbow_berry_bush.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/furniture/bestine/shared_painting_bestine_rainbow_berry_bush.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 28.352941
| 108
| 0.744813
|
d63dd0d2f3cebbcf45849b4454e606c5428ff210
| 53,761
|
py
|
Python
|
thelper/train/utils.py
|
sfoucher/thelper
|
235406f3166f03ba6eb77f4a7289188aebc9b2c4
|
[
"Apache-2.0"
] | null | null | null |
thelper/train/utils.py
|
sfoucher/thelper
|
235406f3166f03ba6eb77f4a7289188aebc9b2c4
|
[
"Apache-2.0"
] | null | null | null |
thelper/train/utils.py
|
sfoucher/thelper
|
235406f3166f03ba6eb77f4a7289188aebc9b2c4
|
[
"Apache-2.0"
] | null | null | null |
"""Training/evaluation utilities module.
This module contains utilities and tools used to instantiate training sessions. It also contains
the prediction consumer interface used by metrics and loggers to receive iteration data during
training. See :mod:`thelper.optim.metrics` for more information on metrics.
"""
import json
import logging
import os
from typing import Any, AnyStr, Dict, List, Optional, Union # noqa: F401
import cv2 as cv
import numpy as np
import sklearn.metrics
import torch
import thelper.concepts
import thelper.ifaces
import thelper.typedefs # noqa: F401
import thelper.utils
from thelper.ifaces import ClassNamesHandler, FormatHandler, PredictionConsumer
from thelper.optim.eval import compute_bbox_iou
from thelper.tasks.detect import BoundingBox
logger = logging.getLogger(__name__)
class PredictionCallback(PredictionConsumer):
"""Callback function wrapper compatible with the consumer interface.
This interface is used to hide user-defined callbacks into the list of prediction consumers given
to trainer implementations. The callbacks must always be compatible with the list of arguments
defined by ``thelper.typedefs.IterCallbackParams``, but may also receive extra arguments defined in
advance and passed to the constructor of this class.
Attributes:
callback_func: user-defined function to call on every update from the trainer.
callback_kwargs: user-defined extra arguments to provide to the callback function.
"""
def __init__(self, callback_func, callback_kwargs=None):
# type: (thelper.typedefs.IterCallbackType, thelper.typedefs.IterCallbackParams) -> None
assert callback_kwargs is None or \
(isinstance(callback_kwargs, dict) and
not any([p in callback_kwargs for p in thelper.typedefs.IterCallbackParams])), \
"invalid callback kwargs (must be dict, and not contain overlap with default args)"
callback_func = thelper.utils.import_function(callback_func, params=callback_kwargs)
thelper.utils.check_func_signature(callback_func, thelper.typedefs.IterCallbackParams)
self.callback_func = callback_func
self.callback_kwargs = callback_kwargs
def __repr__(self):
"""Returns a generic print-friendly string containing info about this consumer."""
return self.__class__.__module__ + "." + self.__class__.__qualname__ + \
f"(callback_func={repr(self.callback_func)}, callback_kwargs={repr(self.callback_kwargs)})"
def update(self, *args, **kwargs):
"""Forwards the latest prediction data from the training session to the user callback."""
return self.callback_func(*args, **kwargs)
@thelper.concepts.classification
class ClassifLogger(PredictionConsumer, ClassNamesHandler, FormatHandler):
"""Classification output logger.
This class provides a simple logging interface for accumulating and saving the predictions of a classifier.
By default, all predictions will be logged. However, a confidence threshold can be set to focus on "hard"
samples if necessary. It also optionally offers tensorboardX-compatible output images that can be saved
locally or posted to tensorboard for browser-based visualization.
Usage examples inside a session configuration file::
# ...
# lists all metrics to instantiate as a dictionary
"metrics": {
# ...
# this is the name of the example consumer; it is used for lookup/printing only
"logger": {
# this type is used to instantiate the confusion matrix report object
"type": "thelper.train.utils.ClassifLogger",
"params": {
# log the three 'best' predictions for each sample
"top_k": 3,
# keep updating a set of 10 samples for visualization via tensorboardX
"viz_count": 10
}
},
# ...
}
# ...
Attributes:
top_k: number of 'best' predictions to keep for each sample (along with the gt label).
conf_threshold: threshold used to eliminate uncertain predictions.
class_names: holds the list of class label names provided by the dataset parser. If it is not
provided when the constructor is called, it will be set by the trainer at runtime.
target_name: name of the targeted label (may be 'None' if all classes are used).
target_idx: index of the targeted label (may be 'None' if all classes are used).
viz_count: number of tensorboardX images to generate and update at each epoch.
report_count: number of samples to print in reports (use 'None' if all samples must be printed).
log_keys: list of metadata field keys to copy from samples into the log for each prediction.
force_softmax: specifies whether a softmax operation should be applied to the prediction scores
obtained from the trainer.
score: array used to store prediction scores for logging.
true: array used to store groundtruth labels for logging.
meta: array used to store metadata pulled from samples for logging.
format: output format of the produced log (supports: text, CSV)
"""
def __init__(self,
top_k=1, # type: int
conf_threshold=None, # type: Optional[thelper.typedefs.Number]
class_names=None, # type: Optional[List[AnyStr]]
target_name=None, # type: Optional[AnyStr]
viz_count=0, # type: int
report_count=None, # type: Optional[int]
log_keys=None, # type: Optional[List[AnyStr]]
force_softmax=True, # type: bool
format=None, # type: Optional[AnyStr]
): # type: (...) -> None
"""Receives the logging parameters & the optional class label names used to decorate the log."""
assert isinstance(top_k, int) and top_k > 0, "invalid top-k value"
assert conf_threshold is None or (isinstance(conf_threshold, (float, int)) and 0 < conf_threshold <= 1), \
"classification confidence threshold should be 'None' or float in ]0, 1]"
assert isinstance(viz_count, int) and viz_count >= 0, "invalid image count to visualize"
assert report_count is None or (isinstance(report_count, int) and report_count >= 0), \
"invalid report sample count"
assert log_keys is None or isinstance(log_keys, list), "invalid list of sample keys to log"
self.top_k = top_k
self.target_name = target_name
self.target_idx = None
self.conf_threshold = conf_threshold
self.viz_count = viz_count
self.report_count = report_count
self.log_keys = log_keys if log_keys is not None else []
self.force_softmax = force_softmax
self.score = None
self.true = None
self.meta = None
ClassNamesHandler.__init__(self, class_names)
FormatHandler.__init__(self, format)
def __repr__(self):
"""Returns a generic print-friendly string containing info about this consumer."""
return self.__class__.__module__ + "." + self.__class__.__qualname__ + \
f"(top_k={repr(self.top_k)}, conf_threshold={repr(self.conf_threshold)}, " + \
f"class_names={repr(self.class_names)}, target_name={repr(self.target_name)}, " + \
f"viz_count={repr(self.viz_count)}, report_count={repr(self.report_count)}, " + \
f"log_keys={repr(self.log_keys)}, force_softmax={repr(self.force_softmax)})"
@ClassNamesHandler.class_names.setter
def class_names(self, class_names):
"""Sets the class label names that must be predicted by the model."""
ClassNamesHandler.class_names.fset(self, class_names)
if self.target_name is not None and self.class_names is not None:
assert self.target_name in self.class_indices, \
f"could not find target name {repr(self.target_name)} in class names list"
self.target_idx = self.class_indices[self.target_name]
else:
self.target_idx = None
def update(self, # see `thelper.typedefs.IterCallbackParams` for more info
task, # type: thelper.tasks.utils.Task
input, # type: thelper.typedefs.InputType
pred, # type: thelper.typedefs.ClassificationPredictionType
target, # type: thelper.typedefs.ClassificationTargetType
sample, # type: thelper.typedefs.SampleType
loss, # type: Optional[float]
iter_idx, # type: int
max_iters, # type: int
epoch_idx, # type: int
max_epochs, # type: int
output_path, # type: AnyStr
**kwargs, # type: Any
): # type: (...) -> None
"""Receives the latest predictions and target values from the training session.
The exact signature of this function should match the one of the callbacks defined in
:class:`thelper.train.base.Trainer` and specified by ``thelper.typedefs.IterCallbackParams``.
"""
assert len(kwargs) == 0, "unexpected extra arguments present in update call"
assert isinstance(task, thelper.tasks.Classification), "classif logger only impl for classif tasks"
assert not task.multi_label, "classif logger only impl for non-multi-label classif tasks"
assert iter_idx is not None and max_iters is not None and iter_idx < max_iters, \
"bad iteration indices given to update function"
if self.score is None or self.score.size != max_iters:
self.score = np.asarray([None] * max_iters)
self.true = np.asarray([None] * max_iters)
self.meta = {key: np.asarray([None] * max_iters) for key in self.log_keys}
if task.class_names != self.class_names:
self.class_names = task.class_names
assert pred.dim() == 2, "current classif logger impl only supports 2D outputs (BxC)"
assert pred.shape[1] == len(self.class_names), "unexpected prediction class dimension size"
if target is None or target.numel() == 0:
self.true[iter_idx] = None
else:
assert target.dim() == 1, "gt should be batched (1D) tensor"
assert pred.shape[0] == target.shape[0], "prediction/gt tensors batch size mismatch"
self.true[iter_idx] = target.numpy()
if self.force_softmax:
with torch.no_grad():
pred = torch.nn.functional.softmax(pred, dim=1)
self.score[iter_idx] = pred.numpy()
for meta_key in self.log_keys:
assert meta_key in sample, f"could not extract sample field with key {repr(meta_key)}"
val = sample[meta_key]
assert isinstance(val, (list, np.ndarray, torch.Tensor)), f"field {repr(meta_key)} should be batched"
self.meta[meta_key][iter_idx] = val if isinstance(val, list) else val.tolist()
def render(self):
"""Returns an image of predicted outputs as a numpy-compatible RGBA image drawn by pyplot."""
if self.viz_count == 0:
return None
if self.score is None or self.true is None:
return None
raise NotImplementedError # TODO
def report_text(self):
# type: () -> Optional[AnyStr]
return self.report_csv()
def report_json(self):
# type: () -> Optional[AnyStr]
csv = self.report_csv()
if csv is None:
return None
csv = csv.splitlines()
header, data = csv[0], csv[1:]
headers = header.split(",")
json_entries = [{k: float(v) if "score" in k else str(v)
for k, v in zip(headers, line.split(","))} for line in data]
return json.dumps(json_entries, sort_keys=False, indent=4)
def report_csv(self):
# type: () -> Optional[AnyStr]
"""Returns the logged metadata of predicted samples.
The returned object is a print-friendly CSV string that can be consumed directly by tensorboardX. Note
that this string might be very long if the dataset is large (i.e. it will contain one line per sample).
"""
if isinstance(self.report_count, int) and self.report_count <= 0:
return None
if self.score is None or self.true is None:
return None
pack = list(zip(*[(*pack, )
for packs in zip(self.score, self.true, *self.meta.values())
for pack in zip(*packs)]))
logdata = {key: np.stack(val, axis=0) for key, val in zip(["pred", "target", *self.meta.keys()], pack)}
assert all([len(val) == len(logdata["target"]) for val in logdata.values()]), "messed up unpacking"
header = "target_name,target_score"
for k in range(self.top_k):
header += f",pred_{k + 1}_name,pred_{k + 1}_score"
for meta_key in self.log_keys:
header += f",{str(meta_key)}"
lines = []
for sample_idx in range(len(logdata["target"])):
gt_label_idx = int(logdata["target"][sample_idx])
pred_scores = logdata["pred"][sample_idx]
sorted_score_idxs = np.argsort(pred_scores)[::-1]
sorted_scores = pred_scores[sorted_score_idxs]
if self.conf_threshold is None or gt_label_idx is None or \
pred_scores[gt_label_idx] >= self.conf_threshold:
if gt_label_idx is not None:
entry = f"{self.class_names[gt_label_idx]},{pred_scores[gt_label_idx]:2.4f}"
else:
entry = f"<unknown>,{0.0:2.4f}"
for k in range(self.top_k):
entry += f",{self.class_names[sorted_score_idxs[k]]},{sorted_scores[k]:2.4f}"
for meta_key in self.log_keys:
entry += f",{str(logdata[meta_key][sample_idx])}"
lines.append(entry)
if isinstance(self.report_count, int) and len(lines) >= self.report_count:
break
return "\n".join([header, *lines])
def reset(self):
"""Toggles a reset of the internal state, emptying storage arrays."""
self.score = None
self.true = None
self.meta = None
@thelper.concepts.classification
class ClassifReport(PredictionConsumer, ClassNamesHandler, FormatHandler):
"""Classification report interface.
This class provides a simple interface to ``sklearn.metrics.classification_report`` so that all
count-based metrics can be reported at once under a string-based representation.
Usage example inside a session configuration file::
# ...
# lists all metrics to instantiate as a dictionary
"metrics": {
# ...
# this is the name of the example consumer; it is used for lookup/printing only
"report": {
# this type is used to instantiate the classification report object
"type": "thelper.train.utils.ClassifReport",
# we do not need to provide any parameters to the constructor, defaults are fine
"params": {
# optional parameter that will indicate output as JSON is desired, plain 'text' otherwise
"format": "json"
}
},
# ...
}
# ...
Attributes:
class_names: holds the list of class label names provided by the dataset parser. If it is not
provided when the constructor is called, it will be set by the trainer at runtime.
pred: queue used to store the top-1 (best) predicted class indices at each iteration.
format: output format of the produced log (supports: text, JSON)
"""
def __init__(self, class_names=None, sample_weight=None, digits=4, format=None):
"""Receives the optional class names and arguments passed to the report generator function.
Args:
class_names: holds the list of class label names provided by the dataset parser. If it is not
provided when the constructor is called, it will be set by the trainer at runtime.
sample_weight: sample weights, forwarded to ``sklearn.metrics.classification_report``.
digits: metrics output digit count, forwarded to ``sklearn.metrics.classification_report``.
format: output format of the produced log.
"""
self.class_names = None
self.sample_weight = sample_weight
self.digits = digits
self.pred = None
self.target = None
ClassNamesHandler.__init__(self, class_names)
FormatHandler.__init__(self, format)
def __repr__(self):
"""Returns a generic print-friendly string containing info about this consumer."""
return f"{self.__class__.__module__}.{self.__class__.__qualname__}" + \
f"(class_names={repr(self.class_names)}, sample_weight={repr(self.sample_weight)}, " + \
f"digits={repr(self.digits)})"
def update(self, # see `thelper.typedefs.IterCallbackParams` for more info
task, # type: thelper.tasks.utils.Task
input, # type: thelper.typedefs.InputType
pred, # type: thelper.typedefs.ClassificationPredictionType
target, # type: thelper.typedefs.ClassificationTargetType
sample, # type: thelper.typedefs.SampleType
loss, # type: Optional[float]
iter_idx, # type: int
max_iters, # type: int
epoch_idx, # type: int
max_epochs, # type: int
output_path, # type: AnyStr
**kwargs, # type: Any
): # type: (...) -> None
"""Receives the latest predictions and target values from the training session.
The exact signature of this function should match the one of the callbacks defined in
:class:`thelper.train.base.Trainer` and specified by ``thelper.typedefs.IterCallbackParams``.
"""
assert len(kwargs) == 0, "unexpected extra arguments present in update call"
assert isinstance(task, thelper.tasks.Classification), "classif report only impl for classif tasks"
assert not task.multi_label, "classif report only impl for non-multi-label classif tasks"
assert iter_idx is not None and max_iters is not None and iter_idx < max_iters, \
"bad iteration indices given to update function"
if self.pred is None or self.pred.size != max_iters:
self.pred = np.asarray([None] * max_iters)
self.target = np.asarray([None] * max_iters)
if task.class_names != self.class_names:
self.class_names = task.class_names
if target is None or target.numel() == 0:
# only accumulate results when groundtruth is available
self.pred[iter_idx] = None
self.target[iter_idx] = None
return
assert pred.dim() == 2 or target.dim() == 1, "current classif report impl only supports batched 1D outputs"
assert pred.shape[0] == target.shape[0], "prediction/gt tensors batch size mismatch"
assert pred.shape[1] == len(self.class_names), "unexpected prediction class dimension size"
self.pred[iter_idx] = pred.topk(1, dim=1)[1].view(pred.shape[0]).tolist()
self.target[iter_idx] = target.view(target.shape[0]).tolist()
def gen_report(self, as_dict=False):
# type: (bool) -> Union[AnyStr, thelper.typedefs.JSON]
if self.pred is None or self.target is None:
return None
pred, target = zip(*[(pred, target) for preds, targets in zip(self.pred, self.target)
if targets is not None for pred, target in zip(preds, targets)])
y_true = np.asarray(target)
y_pred = np.asarray(pred)
_y_true = [self.class_names[classid] for classid in y_true]
_y_pred = [self.class_names[classid] if (0 <= classid < len(self.class_names)) else "<unset>"
for classid in y_pred]
return sklearn.metrics.classification_report(_y_true, _y_pred, sample_weight=self.sample_weight,
digits=self.digits, output_dict=as_dict)
def report_text(self):
# type: () -> Optional[AnyStr]
"""Returns the classification report as a multi-line print-friendly string."""
return f"\n{self.gen_report(as_dict=False)}"
def report_json(self):
# type: () -> Optional[AnyStr]
"""Returns the classification report as a JSON formatted string."""
return json.dumps(self.gen_report(as_dict=True), indent=4)
def reset(self):
"""Toggles a reset of the metric's internal state, emptying queues."""
self.pred = None
self.target = None
@thelper.concepts.detection
class DetectLogger(PredictionConsumer, ClassNamesHandler, FormatHandler):
"""Detection output logger.
This class provides a simple logging interface for accumulating and saving the bounding boxes of an
object detector. By default, all detections will be logged. However, a confidence threshold can be set
to focus on strong predictions if necessary.
.. todo::
It also optionally offers tensorboardX-compatible output images that can be saved
locally or posted to tensorboard for browser-based visualization.
Usage examples inside a session configuration file::
# ...
# lists all metrics to instantiate as a dictionary
"metrics": {
# ...
# this is the name of the example consumer; it is used for lookup/printing only
"logger": {
# this type is used to instantiate the confusion matrix report object
"type": "thelper.train.utils.DetectLogger",
"params": {
# (optional) log the three 'best' detections for each target
"top_k": 3
}
},
# ...
}
# ...
Attributes:
top_k: number of 'best' detections to keep for each target bbox (along with the target label).
If omitted, lists all bounding box predictions by the model after applying IoU and confidence thresholds.
conf_threshold: threshold used to eliminate uncertain predictions (if they support confidence).
If confidence is not supported by the model bbox predictions, this parameter is ignored.
iou_threshold: threshold used to eliminate predictions too far from target (regardless of confidence).
If omitted, will ignore only completely non-overlapping predicted bounding boxes (:math:`IoU=0`).
If no target bounding boxes are provided (prediction-only), this parameter is ignored.
class_names: holds the list of class label names provided by the dataset parser. If it is not
provided when the constructor is called, it will be set by the trainer at runtime.
target_name: name of the targeted label (may be 'None' if all classes are used).
target_idx: index of the targeted label (may be 'None' if all classes are used).
viz_count: number of tensorboardX images to generate and update at each epoch.
report_count: number of samples to print in reports (use 'None' if all samples must be printed).
log_keys: list of metadata field keys to copy from samples into the log for each prediction.
bbox: array used to store prediction bounding boxes for logging.
true: array used to store groundtruth labels for logging.
meta: array used to store metadata pulled from samples for logging.
format: output format of the produced log (supports: text, CSV, JSON)
"""
def __init__(self,
top_k=None, # type: Optional[int]
conf_threshold=None, # type: Optional[thelper.typedefs.Number]
iou_threshold=None, # type: Optional[thelper.typedefs.Number]
class_names=None, # type: Optional[List[AnyStr]]
target_name=None, # type: Optional[AnyStr]
viz_count=0, # type: int
report_count=None, # type: Optional[int]
log_keys=None, # type: Optional[List[AnyStr]]
format=None, # type: Optional[AnyStr]
): # type: (...) -> None
"""Receives the logging parameters & the optional class label names used to decorate the log."""
assert top_k is None or isinstance(top_k, int) and top_k > 0, "invalid top-k value"
assert conf_threshold is None or (isinstance(conf_threshold, (float, int)) and 0 <= conf_threshold <= 1), \
"detection confidence threshold should be 'None' or number in [0, 1]"
assert iou_threshold is None or (isinstance(iou_threshold, (int, float)) and 0 <= iou_threshold <= 1), \
"detection IoU threshold should be 'None' or number in [0, 1]"
assert isinstance(viz_count, int) and viz_count >= 0, "invalid image count to visualize"
assert report_count is None or (
isinstance(report_count, int) and report_count >= 0), "invalid report sample count"
assert log_keys is None or isinstance(log_keys, list), "invalid list of sample keys to log"
self.top_k = top_k
self.target_name = target_name
self.target_idx = None
self.conf_threshold = conf_threshold
self.iou_threshold = iou_threshold
self.viz_count = viz_count
self.report_count = report_count
self.log_keys = log_keys if log_keys is not None else []
self.bbox = None # type: Optional[thelper.typedefs.DetectionPredictionType]
self.true = None # type: Optional[thelper.typedefs.DetectionTargetType]
self.meta = None # type: Optional[Dict[AnyStr, List[Any]]]
ClassNamesHandler.__init__(self, class_names)
FormatHandler.__init__(self, format)
def __repr__(self):
"""Returns a generic print-friendly string containing info about this consumer."""
return f"{self.__class__.__module__}.{self.__class__.__qualname__}" + \
f"(top_k={repr(self.top_k)}, conf_threshold={repr(self.conf_threshold)}, " + \
f"class_names={repr(self.class_names)}, target_name={repr(self.target_name)}, " + \
f"viz_count={repr(self.viz_count)}, report_count={repr(self.report_count)}, " + \
f"log_keys={repr(self.log_keys)})"
@ClassNamesHandler.class_names.setter
def class_names(self, class_names):
"""Sets the class label names that must be predicted by the model."""
ClassNamesHandler.class_names.fset(self, class_names)
if self.target_name is not None and self.class_names is not None:
assert self.target_name in self.class_indices, \
f"could not find target name {repr(self.target_name)} in class names list"
self.target_idx = self.class_indices[self.target_name]
else:
self.target_idx = None
def update(self, # see `thelper.typedefs.IterCallbackParams` for more info
task, # type: thelper.tasks.utils.Task
input, # type: thelper.typedefs.InputType
pred, # type: thelper.typedefs.DetectionPredictionType
target, # type: thelper.typedefs.DetectionTargetType
sample, # type: thelper.typedefs.SampleType
loss, # type: Optional[float]
iter_idx, # type: int
max_iters, # type: int
epoch_idx, # type: int
max_epochs, # type: int
output_path, # type: AnyStr
**kwargs, # type: Any
): # type: (...) -> None
"""Receives the latest predictions and target values from the training session.
The exact signature of this function should match the one of the callbacks defined in
:class:`thelper.train.base.Trainer` and specified by ``thelper.typedefs.IterCallbackParams``.
"""
assert len(kwargs) == 0, "unexpected extra arguments present in update call"
assert isinstance(task, thelper.tasks.Detection), "detect report only impl for detection tasks"
assert iter_idx is not None and max_iters is not None and iter_idx < max_iters, \
"bad iteration indices given to update function"
if self.bbox is None or self.bbox.size != max_iters:
self.bbox = np.asarray([None] * max_iters)
self.true = np.asarray([None] * max_iters)
self.meta = {key: np.asarray([None] * max_iters) for key in self.log_keys}
if task.class_names != self.class_names:
self.class_names = task.class_names
if target is None or len(target) == 0 or all(len(t) == 0 for t in target):
target = [None] * len(pred) # simplify unpacking during report generation
else:
assert len(pred) == len(target), "prediction/target bounding boxes list batch size mismatch"
for gt in target:
assert all(isinstance(bbox, BoundingBox) for bbox in gt), \
"detect logger only supports 2D lists of bounding box targets"
for det in pred:
assert all(isinstance(bbox, BoundingBox) for bbox in det), \
"detect logger only supports 2D lists of bounding box predictions"
self.bbox[iter_idx] = pred
self.true[iter_idx] = target
for meta_key in self.log_keys:
assert meta_key in sample, f"could not extract sample field with key {repr(meta_key)}"
val = sample[meta_key]
assert isinstance(val, (list, np.ndarray, torch.Tensor)), f"field {repr(meta_key)} should be batched"
self.meta[meta_key][iter_idx] = val if isinstance(val, list) else val.tolist()
def render(self):
"""Returns an image of predicted outputs as a numpy-compatible RGBA image drawn by pyplot."""
if self.viz_count == 0:
return None
if self.bbox is None or self.true is None:
return None
raise NotImplementedError # TODO
def group_bbox(self,
target_bboxes, # type: List[Optional[BoundingBox]]
detect_bboxes, # type: List[BoundingBox]
): # type: (...) -> List[Dict[AnyStr, Union[BoundingBox, float, None]]]
"""Groups a sample's detected bounding boxes with target bounding boxes according to configuration parameters.
Returns a list of detections grouped by target(s) with following format::
[
{
"target": <associated-target-bbox>,
"detect": [
{
"bbox": <detection-bbox>,
"iou": <IoU(detect-bbox, target-bbox)>
},
...
]
},
...
]
The associated target bounding box and :math:`IoU` can be ``None`` if no target was provided
(ie: during inference). In this case, the returned list will have only a single element with all detections
associated to it. A single element list can also be returned if only one target was specified for this sample.
When multiple ground truth targets were provided, the returned list will have the same length and ordering as
these targets. The associated detected bounding boxes will depend on IoU between target/detection combinations.
All filtering thresholds specified as configuration parameter will be applied for the returned list. Detected
bounding boxes will also be sorted by highest confidence (if available) or by highest IoU as fallback.
"""
# remove low confidence and sort by highest
group_bboxes = sorted(
[bbox for bbox in (detect_bboxes if detect_bboxes else []) if isinstance(bbox, BoundingBox)],
key=lambda b: b.confidence if b.confidence is not None else 0, reverse=True)
if self.conf_threshold:
group_bboxes = [b for b in group_bboxes if b.confidence is not None and b.confidence >= self.conf_threshold]
sort_by_iou = all(bbox.confidence is None for bbox in group_bboxes)
# group according to target count
target_count = len(target_bboxes)
if target_count == 0:
group_bboxes = [{"target": None, "detect": [{"bbox": bbox, "iou": None} for bbox in group_bboxes]}]
elif target_count == 1:
sorted_detect = [{"bbox": bbox, "iou": compute_bbox_iou(bbox, target_bboxes[0])} for bbox in group_bboxes]
if sort_by_iou:
sorted_detect = list(sorted(sorted_detect, key=lambda d: d["iou"], reverse=True))
group_bboxes = [{"target": target_bboxes[0], "detect": sorted_detect}]
else:
# regroup by highest IoU
target_detects = [[] for _ in range(target_count)]
for det in group_bboxes:
# FIXME:
# should we do something different if all IoU = 0 (ie: false positive detection)
# for now, they will all be stored in the first target, but can be tracked with IoU = 0
det_target_iou = [compute_bbox_iou(det, t) for t in target_bboxes]
best_iou_idx = int(np.argmax(det_target_iou))
target_detects[best_iou_idx].append({"bbox": det, "iou": det_target_iou[best_iou_idx]})
group_bboxes = [{
"target": target_bboxes[i],
"detect": list(
sorted(target_detects[i], key=lambda d: d["iou"], reverse=True)
) if sort_by_iou else target_detects[i]
} for i in range(target_count)]
# apply filters on grouped results
if self.iou_threshold:
for grp in group_bboxes:
grp["detect"] = [d for d in grp["detect"] if d["iou"] is None or d["iou"] >= self.iou_threshold]
if self.top_k:
for grp in group_bboxes:
grp["detect"] = grp["detect"][:self.top_k]
return list(group_bboxes)
def gen_report(self):
# type: () -> Optional[List[Dict[AnyStr, Any]]]
"""Returns the logged metadata of predicted bounding boxes per sample target in a JSON-like structure.
For every target bounding box, the corresponding *best*-sorted detections are returned.
Sample metadata is appended to every corresponding sub-target if any where requested.
If ``report_count`` was specified, the returned report will be limited to that requested amount of targets.
.. seealso::
| :meth:`DetectLogger.group_bbox` for formatting, sorting and filtering details.
"""
if isinstance(self.report_count, int) and self.report_count <= 0:
return None
if self.bbox is None or self.true is None:
return None
# flatten batches/samples
pack = list(zip(*[(*pack,)
for packs in zip(self.bbox, self.true, *self.meta.values())
for pack in zip(*packs)]))
data = {key: val for key, val in zip(["detect", "target", *self.meta.keys()], pack)}
assert all([len(val) == len(data["target"]) for val in data.values()]), "messed up unpacking"
# flatten targets per batches/samples
all_targets = []
sample_count = len(data["target"])
for sample_idx in range(sample_count):
if isinstance(self.report_count, int) and self.report_count >= len(all_targets):
logger.warning(f"report max count {self.report_count} reached at {len(all_targets)} targets "
f"(sample {sample_idx}/{sample_count} processed)")
break
sample_targets = data["target"][sample_idx]
sample_detects = data["detect"][sample_idx]
sample_report = self.group_bbox(sample_targets, sample_detects)
for target in sample_report:
for k in self.meta:
target[k] = self.meta[k][sample_idx]
target["target"] = {
"bbox": target["target"],
"class_name": self.class_names[target["target"].class_id]
}
all_targets.extend(sample_report)
# format everything nicely as json
for item in all_targets:
if isinstance(item["target"]["bbox"], BoundingBox):
item["target"].update(item["target"].pop("bbox").json())
item["target"]["class_name"] = self.class_names[item["target"]["class_id"]]
for det in item["detect"]:
if isinstance(det["bbox"], BoundingBox):
det.update(det.pop("bbox").json())
det["class_name"] = self.class_names[det["class_id"]]
return all_targets
def report_json(self):
# type: () -> Optional[AnyStr]
"""Returns the logged metadata of predicted bounding boxes as a JSON formatted string."""
report = self.gen_report()
if not report:
return None
return json.dumps(report, indent=4)
def report_text(self):
# type: () -> Optional[AnyStr]
return self.report_csv()
def report_csv(self):
# type: () -> Optional[AnyStr]
r"""Returns the logged metadata of predicted bounding boxes.
The returned object is a print-friendly CSV string.
Note that this string might be very long if the dataset is large or if the model tends to generate a lot of
detections. The string will contain at least :math:`N_sample \cdot N_target` lines and each line will have
up to :math:`N_bbox` detections, unless limited by configuration parameters.
"""
report = self.gen_report()
if not report:
return None
none_str = "unknown"
def patch_none(to_patch, number_format='2.4f'): # type: (Any, str) -> str
if to_patch is None:
return none_str
if isinstance(to_patch, float):
s = f"{{:{number_format}}}"
return s.format(to_patch)
return str(to_patch)
header = "sample,target_name,target_bbox"
for meta_key in self.log_keys:
header += f",{str(meta_key)}"
if self.top_k:
for k in range(self.top_k):
header += f",detect_{k + 1}_name,detect_{k + 1}_bbox,detect_{k + 1}_conf,detect_{k + 1}_iou"
else:
# unknown count total detections (can be variable)
header += ",detect_name[N],detect_bbox[N],detect_conf[N],detect_iou[N],(...)[N]"
lines = [""] * len(report)
for i, result in enumerate(report):
target = result["target"]
detect = result["detect"]
if not target:
entry = f"{none_str},{none_str},{none_str}"
else:
entry = f"{target['image_id']},{patch_none(target['class_name'])},{patch_none(target['bbox'])}"
for meta_key in self.log_keys:
entry += f",{str(target[meta_key])}"
for det in detect:
entry += f",{det['class_name']},{det['bbox']},{patch_none(det['confidence'])},{patch_none(det['iou'])}"
lines[i] = entry
return "\n".join([header, *lines])
def reset(self):
"""Toggles a reset of the internal state, emptying storage arrays."""
self.bbox = None
self.true = None
self.meta = None
@thelper.concepts.classification
@thelper.concepts.segmentation
class ConfusionMatrix(PredictionConsumer, ClassNamesHandler):
"""Confusion matrix report interface.
This class provides a simple interface to ``sklearn.metrics.confusion_matrix`` so that a full
confusion matrix can be easily reported under a string-based representation. It also offers a
tensorboardX-compatible output image that can be saved locally or posted to tensorboard for
browser-based visualization.
Usage example inside a session configuration file::
# ...
# lists all metrics to instantiate as a dictionary
"metrics": {
# ...
# this is the name of the example consumer; it is used for lookup/printing only
"confmat": {
# this type is used to instantiate the confusion matrix report object
"type": "thelper.train.utils.ConfusionMatrix",
# we do not need to provide any parameters to the constructor, defaults are fine
"params": {
# optional parameter that will indicate output as JSON is desired, plain 'text' otherwise
"format": "json"
}
},
# ...
}
# ...
Attributes:
matrix: report generator function, called at evaluation time to generate the output string.
class_names: holds the list of class label names provided by the dataset parser. If it is not
provided when the constructor is called, it will be set by the trainer at runtime.
draw_normalized: defines whether rendered confusion matrices should be normalized or not.
pred: queue used to store the top-1 (best) predicted class indices at each iteration.
"""
def __init__(self, class_names=None, draw_normalized=True):
"""Receives the optional class label names used to decorate the output string.
Args:
class_names: holds the list of class label names provided by the dataset parser. If it is not
provided when the constructor is called, it will be set by the trainer at runtime.
draw_normalized: defines whether rendered confusion matrices should be normalized or not.
"""
def gen_matrix(y_true, y_pred, _class_names):
_y_true = [_class_names[classid] for classid in y_true]
_y_pred = [_class_names[classid] if (0 <= classid < len(_class_names)) else "<unset>" for classid in y_pred]
return sklearn.metrics.confusion_matrix(_y_true, _y_pred, labels=_class_names)
self.matrix = gen_matrix
self.draw_normalized = draw_normalized
self.pred = None
self.target = None
ClassNamesHandler.__init__(self, class_names)
def __repr__(self):
"""Returns a generic print-friendly string containing info about this consumer."""
return self.__class__.__module__ + "." + self.__class__.__qualname__ + \
f"(class_names={repr(self.class_names)}, draw_normalized={repr(self.draw_normalized)})"
def update(self, # see `thelper.typedefs.IterCallbackParams` for more info
task, # type: thelper.tasks.utils.Task
input, # type: thelper.typedefs.InputType
pred, # type: thelper.typedefs.ClassificationPredictionType
target, # type: thelper.typedefs.ClassificationTargetType
sample, # type: thelper.typedefs.SampleType
loss, # type: Optional[float]
iter_idx, # type: int
max_iters, # type: int
epoch_idx, # type: int
max_epochs, # type: int
output_path, # type: AnyStr
**kwargs, # type: Any
): # type: (...) -> None
"""Receives the latest predictions and target values from the training session.
The exact signature of this function should match the one of the callbacks defined in
:class:`thelper.train.base.Trainer` and specified by ``thelper.typedefs.IterCallbackParams``.
"""
assert len(kwargs) == 0, "unexpected extra arguments present in update call"
assert isinstance(task, thelper.tasks.Classification), "confmat only impl for classif tasks"
assert not task.multi_label, "confmat only impl for non-multi-label classif tasks"
assert iter_idx is not None and max_iters is not None and iter_idx < max_iters, \
"bad iteration indices given to update function"
if self.pred is None or self.pred.size != max_iters:
self.pred = np.asarray([None] * max_iters)
self.target = np.asarray([None] * max_iters)
if task.class_names != self.class_names:
self.class_names = task.class_names
if target is None or target.numel() == 0:
# only accumulate results when groundtruth is available
self.pred[iter_idx] = None
self.target[iter_idx] = None
return
assert pred.dim() == 2 or target.dim() == 1, "current confmat impl only supports batched 1D outputs"
assert pred.shape[0] == target.shape[0], "prediction/gt tensors batch size mismatch"
assert pred.shape[1] == len(self.class_names), "unexpected prediction class dimension size"
self.pred[iter_idx] = pred.topk(1, dim=1)[1].view(pred.shape[0]).tolist()
self.target[iter_idx] = target.view(target.shape[0]).tolist()
def report(self):
"""Returns the confusion matrix as a multi-line print-friendly string."""
if self.pred is None or self.target is None:
return None
pred, target = zip(*[(pred, target) for preds, targets in zip(self.pred, self.target)
if targets is not None for pred, target in zip(preds, targets)])
confmat = self.matrix(np.asarray(target), np.asarray(pred), self.class_names)
return "\n" + thelper.utils.stringify_confmat(confmat, self.class_names)
def render(self):
"""Returns the confusion matrix as a numpy-compatible RGBA image drawn by pyplot."""
if self.pred is None or self.target is None:
return None
pred, target = zip(*[(pred, target) for preds, targets in zip(self.pred, self.target)
if targets is not None for pred, target in zip(preds, targets)])
confmat = self.matrix(np.asarray(target), np.asarray(pred), self.class_names)
try:
fig, ax = thelper.draw.draw_confmat(confmat, self.class_names, normalize=self.draw_normalized)
array = thelper.draw.fig2array(fig)
return array
except AttributeError as e:
logger.warning(f"failed to render confusion matrix; caught exception:\n{str(e)}")
# return None if rendering fails (probably due to matplotlib on display-less server)
return None
def reset(self):
"""Toggles a reset of the metric's internal state, emptying queues."""
self.pred = None
self.target = None
def create_consumers(config):
"""Instantiates and returns the prediction consumers defined in the configuration dictionary.
All arguments are expected to be handed in through the configuration via a dictionary named 'params'.
"""
assert isinstance(config, dict), "config should be provided as a dictionary"
consumers = {}
for name, consumer_config in config.items():
assert name != "loss", "metric name 'loss' is reserved"
assert isinstance(consumer_config, dict), "consumer config should be provided as a dictionary"
assert "type" in consumer_config and consumer_config["type"], "consumer config missing 'type' field"
consumer_type = thelper.utils.import_class(consumer_config["type"])
consumer_params = thelper.utils.get_key_def(["params", "parameters"], consumer_config, {})
try:
consumer = consumer_type(**consumer_params)
except Exception:
logger.error(f"failed to create consumer {consumer_config['type']} with params:\n\t{str(consumer_params)}")
raise
assert isinstance(consumer, PredictionConsumer), \
"invalid consumer type, must derive from PredictionConsumer interface"
consumers[name] = consumer
return consumers
def create_trainer(session_name, # type: AnyStr
save_dir, # type: AnyStr
config, # type: thelper.typedefs.ConfigDict
model, # type: thelper.typedefs.ModelType
task, # type: thelper.tasks.Task
loaders, # type: thelper.typedefs.MultiLoaderType
ckptdata=None # type: Optional[thelper.typedefs.CheckpointContentType]
): # type: (...) -> thelper.train.Trainer
"""Instantiates the trainer object based on the type contained in the config dictionary.
The trainer type is expected to be in the configuration dictionary's `trainer` field, under the `type` key. For more
information on the configuration, refer to :class:`thelper.train.base.Trainer`. The instantiated type must be
compatible with the constructor signature of :class:`thelper.train.base.Trainer`. The object's constructor will
be given the full config dictionary and the checkpoint data for resuming the session (if available).
If the trainer type is missing, it will be automatically deduced based on the task object.
Args:
session_name: name of the training session used for printing and to create internal tensorboardX directories.
save_dir: path to the session directory where logs and checkpoints will be saved.
config: full configuration dictionary that will be parsed for trainer parameters and saved in checkpoints.
model: model to train/evaluate; should be compatible with :class:`thelper.nn.utils.Module`.
task: global task interface defining the type of model and training goal for the session.
loaders: a tuple containing the training/validation/test data loaders (a loader can be ``None`` if empty).
ckptdata: raw checkpoint to parse data from when resuming a session (if ``None``, will start from scratch).
Returns:
The fully-constructed trainer object, ready to begin model training/evaluation.
.. seealso::
| :class:`thelper.train.base.Trainer`
"""
assert "trainer" in config and config["trainer"], "session configuration dictionary missing 'trainer' section"
trainer_config = config["trainer"]
if "type" not in trainer_config:
if isinstance(task, thelper.tasks.Classification):
trainer_type = thelper.train.ImageClassifTrainer
elif isinstance(task, thelper.tasks.Detection):
trainer_type = thelper.train.ObjDetectTrainer
elif isinstance(task, thelper.tasks.Regression):
trainer_type = thelper.train.RegressionTrainer
elif isinstance(task, thelper.tasks.Segmentation):
trainer_type = thelper.train.ImageSegmTrainer
else:
raise AssertionError(f"unknown trainer type required for task '{str(task)}'")
else:
trainer_type = thelper.utils.import_class(trainer_config["type"])
return trainer_type(session_name, save_dir, model, task, loaders, config, ckptdata=ckptdata)
# noinspection PyUnusedLocal
def _draw_wrapper(task, # type: thelper.tasks.utils.Task
input, # type: thelper.typedefs.InputType
pred, # type: thelper.typedefs.AnyPredictionType
target, # type: thelper.typedefs.AnyTargetType
sample, # type: thelper.typedefs.SampleType
loss, # type: Optional[float]
iter_idx, # type: int
max_iters, # type: int
epoch_idx, # type: int
max_epochs, # type: int
output_path, # type: AnyStr
# extra params added by display callback interface below
save, # type: bool
# all extra params will be forwarded to the display call
**kwargs, # type: Any
# see `thelper.typedefs.IterCallbackParams` for more info
): # type: (...) -> None
"""Wrapper to :func:`thelper.draw.draw` used as a callback entrypoint for trainers."""
res = thelper.draw.draw(task=task, input=input, pred=pred, target=target, **kwargs)
if save:
assert isinstance(res, tuple) and len(res) == 2, "unexpected redraw output (should be 2-elem tuple)"
if isinstance(res[1], np.ndarray):
assert "path" in sample and isinstance(sample["path"], list) and len(sample["path"]) == 1, \
"unexpected draw format (current implementation needs batch size = 1, and path metadata)"
os.makedirs(output_path, exist_ok=True)
filepath = os.path.join(output_path, os.path.basename(sample["path"][0]))
cv.imwrite(filepath, res[1])
else:
# we're displaying with matplotlib, and have no clue on how to save the output
raise NotImplementedError
| 53.440358
| 120
| 0.62523
|
555a3d5eb5e2004469fdc862e835d15907fcd406
| 4,047
|
py
|
Python
|
tests/python/source_file.py
|
rtajan/eirballoon
|
0eded8f86174a9e5ed38297fa26c7f5a53b5ea53
|
[
"MIT"
] | null | null | null |
tests/python/source_file.py
|
rtajan/eirballoon
|
0eded8f86174a9e5ed38297fa26c7f5a53b5ea53
|
[
"MIT"
] | null | null | null |
tests/python/source_file.py
|
rtajan/eirballoon
|
0eded8f86174a9e5ed38297fa26c7f5a53b5ea53
|
[
"MIT"
] | null | null | null |
import sys
sys.path.insert(0, '../../build/lib')
sys.path.insert(0, '../../py_aff3ct/build/lib')
sys.path.insert(0, '../../src/python')
from py_aff3ct.module.py_module import Py_Module
import py_aff3ct
import numpy as np
import os
from bitstring import BitArray
class source_file(Py_Module):
def int2binseq(self,N,size): #int to numpy binary array of output size
tmp=list(bin(N))
tmp2 = tmp[2:]
tmp2 = [int(tmp[x]) for x in range(2,len(tmp))]
out = np.concatenate([np.zeros(size-len(tmp2)),tmp2])
return out
def update_attr(self): #update class attributes
self.PACKET_TYPE=0
self.PACKET_ID+=1
if self.PACKET_ID==self.F_SIZE: #last packet
self.PACKET_TYPE=2
self.INFO=list(self.int2binseq(self.PACKET_TYPE,2))+list(self.int2binseq(self.PACKET_ID,16))+list(self.int2binseq(self.F_ID,16))+list(self.int2binseq(self.F_TYPE,2))+list(self.int2binseq(self.F_SIZE,24))
return 0
def build_info_header(self): #updates info header
#set file type
if (".ts" in self.path):
self.F_TYPE=0
else:
self.F_TYPE=1
self.PACKET_ID = 0
#set packet type
self.PACKET_TYPE=1
#building the header [P_TYPE P_ID F_ID F_TYPE F_SIZE]
self.INFO=[]
self.INFO=list(self.int2binseq(self.PACKET_TYPE,2))+list(self.int2binseq(self.PACKET_ID,16))+list(self.int2binseq(self.F_ID,16))+list(self.int2binseq(self.F_TYPE,2))+list(self.int2binseq(self.F_SIZE,24))
return 0
def generate(self,_out,nb,id,type):
if self.tmp > 0:
self.tmp+=1
if self.tmp == 500:
exit(0)
else:
pass
if self.frame_nb < 100:
_out[0,::2]=np.ones((1,(self.N+self.INFO_SIZE)//2),dtype=np.int32)
_out[0,1::2]=np.zeros((1,(self.N+self.INFO_SIZE)//2),dtype=np.int32)
elif self.frame_nb==100:
_out[0,::]=np.ones((1,self.N+self.INFO_SIZE),dtype=np.int32)
else:
_out[0,0:self.INFO_SIZE] = self.INFO
_out[0,self.INFO_SIZE:] = self.src['generate::U_K'][:]
self.src('generate').exec()
nb[:]=self.number_packet
id[:]=self.PACKET_ID
type[:]=self.F_TYPE
self.update_attr()
if self.PACKET_TYPE == 2:
self.tmp = 1
self.frame_nb+=1
return 1
def compute_packet_number(self):
binary_size = os.path.getsize(self.path) * 8
return np.ceil(binary_size//self.N)
def __init__(self, path, N,auto_reset=False):
Py_Module.__init__(self)
if path:
self.src = py_aff3ct.module.source.Source_user_binary(N, path, auto_reset=False)
self.path = path
self.N = N
self.frame_nb = 1
self.name = "source_file"
self.tmp=0
self.number_packet = self.compute_packet_number()
#infos
self.F_ID=0 #16 bits, file id (65536 file)
self.F_TYPE=0 #1 bit , file type (0 .Ts file, 1 img any type
self.F_SIZE=int(self.number_packet)+1 #24 bits to encode file size in bytes (< 16Mbytes)
self.PACKET_ID=0#16 bits, packet order received for a ts file used to reorder in another
self.PACKET_TYPE=0 #2 bits to encode 3 types of frames ;1 start// 0 mid// 2 end//
#total 59 bits ########## [P_TYPE P_ID F_ID F_TYPE F_SIZE]
self.INFO_SIZE=60
self.build_info_header()
t_generate = self.create_task('generate')
_out = self.create_socket_out(t_generate, 'U_K', N+self.INFO_SIZE, np.int32)
nb = self.create_socket_out(t_generate, 'NB', 1, np.int32)
id = self.create_socket_out(t_generate, 'ID', 1, np.int32)
type = self.create_socket_out(t_generate,"type",1,np.int32)
self.create_codelet(t_generate, lambda slf, lsk,
fid: slf.generate(lsk[_out],lsk[nb],lsk[id],lsk[type]))
| 37.472222
| 211
| 0.595503
|
121ae563308c695a0a76fcf383eb6e6bb7f43011
| 3,386
|
py
|
Python
|
paddlex/cv/datasets/easydata_cls.py
|
Channingss/PaddleX
|
06fe9552472f0379ff1a16c339c9784c973b5a04
|
[
"Apache-2.0"
] | 3
|
2020-05-12T03:09:13.000Z
|
2020-06-18T02:50:34.000Z
|
paddlex/cv/datasets/easydata_cls.py
|
wyc880622/PaddleX
|
f001960b7359f3a88b7dd96e1f34500b90566ceb
|
[
"Apache-2.0"
] | null | null | null |
paddlex/cv/datasets/easydata_cls.py
|
wyc880622/PaddleX
|
f001960b7359f3a88b7dd96e1f34500b90566ceb
|
[
"Apache-2.0"
] | 1
|
2020-05-18T07:06:28.000Z
|
2020-05-18T07:06:28.000Z
|
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os.path as osp
import random
import copy
import json
import paddlex.utils.logging as logging
from .imagenet import ImageNet
from .dataset import is_pic
from .dataset import get_encoding
class EasyDataCls(ImageNet):
"""读取EasyDataCls格式的分类数据集,并对样本进行相应的处理。
Args:
data_dir (str): 数据集所在的目录路径。
file_list (str): 描述数据集图片文件和对应标注文件的文件路径(文本内每行路径为相对data_dir的相对路)。
label_list (str): 描述数据集包含的类别信息文件路径。
transforms (paddlex.cls.transforms): 数据集中每个样本的预处理/增强算子。
num_workers (int|str): 数据集中样本在预处理过程中的线程或进程数。默认为'auto'。当设为'auto'时,根据
系统的实际CPU核数设置`num_workers`: 如果CPU核数的一半大于8,则`num_workers`为8,否则为CPU核
数的一半。
buffer_size (int): 数据集中样本在预处理过程中队列的缓存长度,以样本数为单位。默认为100。
parallel_method (str): 数据集中样本在预处理过程中并行处理的方式,支持'thread'
线程和'process'进程两种方式。默认为'process'(Windows和Mac下会强制使用thread,该参数无效)。
shuffle (bool): 是否需要对数据集中样本打乱顺序。默认为False。
"""
def __init__(self,
data_dir,
file_list,
label_list,
transforms=None,
num_workers='auto',
buffer_size=100,
parallel_method='process',
shuffle=False):
super(ImageNet, self).__init__(
transforms=transforms,
num_workers=num_workers,
buffer_size=buffer_size,
parallel_method=parallel_method,
shuffle=shuffle)
self.file_list = list()
self.labels = list()
self._epoch = 0
with open(label_list, encoding=get_encoding(label_list)) as f:
for line in f:
item = line.strip()
self.labels.append(item)
logging.info("Starting to read file list from dataset...")
with open(file_list, encoding=get_encoding(file_list)) as f:
for line in f:
img_file, json_file = [osp.join(data_dir, x) \
for x in line.strip().split()[:2]]
if not is_pic(img_file):
continue
if not osp.isfile(json_file):
continue
if not osp.exists(img_file):
raise IOError(
'The image file {} is not exist!'.format(img_file))
with open(json_file, mode='r', \
encoding=get_encoding(json_file)) as j:
json_info = json.load(j)
label = json_info['labels'][0]['name']
self.file_list.append([img_file, self.labels.index(label)])
self.num_samples = len(self.file_list)
logging.info("{} samples in file {}".format(
len(self.file_list), file_list))
| 39.372093
| 77
| 0.611636
|
b824f49d75035c8f806988afdbb7ef0d78682c9b
| 1,285
|
tac
|
Python
|
docker/buildbot_worker.tac
|
ScalABLE40/buildbot-ros
|
f1badded81fa8bdfcef5bcbe596fc96dcbcba557
|
[
"Apache-2.0"
] | 1
|
2020-01-23T13:42:51.000Z
|
2020-01-23T13:42:51.000Z
|
docker/buildbot_worker.tac
|
ScalABLE40/buildbot-ros
|
f1badded81fa8bdfcef5bcbe596fc96dcbcba557
|
[
"Apache-2.0"
] | 7
|
2020-01-09T10:10:15.000Z
|
2020-03-04T13:56:59.000Z
|
docker/buildbot_worker.tac
|
ScalABLE40/buildbot-ros
|
f1badded81fa8bdfcef5bcbe596fc96dcbcba557
|
[
"Apache-2.0"
] | null | null | null |
import fnmatch
import os
import sys
from twisted.application import service
from twisted.python.log import FileLogObserver
from twisted.python.log import ILogObserver
from buildbot_worker.bot import Worker
# setup worker
basedir = os.environ.get("BUILDBOT_BASEDIR",
os.path.abspath(os.path.dirname(__file__)))
application = service.Application('buildbot-worker')
application.setComponent(ILogObserver, FileLogObserver(sys.stdout).emit)
# and worker on the same process!
buildmaster_host = os.environ.get("BUILDMASTER", 'localhost')
port = int(os.environ.get("BUILDMASTER_PORT", 9989))
workername = os.environ.get("WORKERNAME", 'docker')
passwd = os.environ.get("WORKERPASS")
# delete the password from the environ so that it is not leaked in the log
blacklist = os.environ.get("WORKER_ENVIRONMENT_BLACKLIST", "WORKERPASS").split()
for name in list(os.environ.keys()):
for toremove in blacklist:
if fnmatch.fnmatch(name, toremove):
del os.environ[name]
keepalive = 600
umask = 0o22
maxdelay = 180
allow_shutdown = None
maxretries = 10
s = Worker(buildmaster_host, port, workername, passwd, basedir,
keepalive, umask=umask, maxdelay=maxdelay,
allow_shutdown=allow_shutdown, maxRetries=maxretries)
s.setServiceParent(application)
| 31.341463
| 80
| 0.761868
|
7a0759036732f18d179a2c94c1d0765e26d12ca6
| 1,896
|
py
|
Python
|
tensorflow/python/autograph/converters/list_comprehensions_test.py
|
yage99/tensorflow
|
c7fa71b32a3635eb25596ae80d007b41007769c4
|
[
"Apache-2.0"
] | 74
|
2020-07-06T17:11:39.000Z
|
2022-01-28T06:31:28.000Z
|
tensorflow/python/autograph/converters/list_comprehensions_test.py
|
sseung0703/tensorflow
|
be084bd7a4dd241eb781fc704f57bcacc5c9b6dd
|
[
"Apache-2.0"
] | 1,056
|
2019-12-15T01:20:31.000Z
|
2022-02-10T02:06:28.000Z
|
tensorflow/python/autograph/converters/list_comprehensions_test.py
|
sseung0703/tensorflow
|
be084bd7a4dd241eb781fc704f57bcacc5c9b6dd
|
[
"Apache-2.0"
] | 12
|
2020-07-08T07:27:17.000Z
|
2021-12-27T08:54:27.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for list_comprehensions module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.converters import list_comprehensions
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.platform import test
class ListCompTest(converter_testing.TestCase):
def assertTransformedEquivalent(self, f, *inputs):
tr = self.transform(f, list_comprehensions)
self.assertEqual(f(*inputs), tr(*inputs))
def test_basic(self):
def f(l):
s = [e * e for e in l]
return s
self.assertTransformedEquivalent(f, [])
self.assertTransformedEquivalent(f, [1, 2, 3])
def test_multiple_generators(self):
def f(l):
s = [e * e for sublist in l for e in sublist] # pylint:disable=g-complex-comprehension
return s
self.assertTransformedEquivalent(f, [])
self.assertTransformedEquivalent(f, [[1], [2], [3]])
def test_cond(self):
def f(l):
s = [e * e for e in l if e > 1]
return s
self.assertTransformedEquivalent(f, [])
self.assertTransformedEquivalent(f, [1, 2, 3])
if __name__ == '__main__':
test.main()
| 30.580645
| 93
| 0.694093
|
196921d8902ae707653dadfa383687e15f16e224
| 4,928
|
py
|
Python
|
aiocoap/util/__init__.py
|
miri64/aiocoap
|
93e699280b644465213fc8ba29bae556475fb6fc
|
[
"MIT"
] | 229
|
2015-02-11T19:35:48.000Z
|
2022-03-22T08:20:14.000Z
|
aiocoap/util/__init__.py
|
miri64/aiocoap
|
93e699280b644465213fc8ba29bae556475fb6fc
|
[
"MIT"
] | 258
|
2015-01-30T13:29:36.000Z
|
2022-03-20T16:05:07.000Z
|
aiocoap/util/__init__.py
|
miri64/aiocoap
|
93e699280b644465213fc8ba29bae556475fb6fc
|
[
"MIT"
] | 124
|
2015-02-14T12:02:44.000Z
|
2022-03-16T12:15:51.000Z
|
# This file is part of the Python aiocoap library project.
#
# Copyright (c) 2012-2014 Maciej Wasilak <http://sixpinetrees.blogspot.com/>,
# 2013-2014 Christian Amsüss <c.amsuess@energyharvesting.at>
#
# aiocoap is free software, this file is published under the MIT license as
# described in the accompanying LICENSE file.
"""Tools not directly related with CoAP that are needed to provide the API
These are only part of the stable API to the extent they are used by other APIs
-- for example, you can use the type constructor of :class:`ExtensibleEnumMeta`
when creating an :class:`aiocoap.numbers.optionnumbers.OptionNumber`, but don't
expect it to be usable in a stable way for own extensions.
Most functions are available in submodules; some of them may only have
components that are exclusively used internally and never part of the public
API even in the limited fashion stated above.
.. toctree::
:glob:
aiocoap.util.*
"""
import urllib.parse
class ExtensibleEnumMeta(type):
"""Metaclass for ExtensibleIntEnum, see there for detailed explanations"""
def __init__(self, name, bases, dict):
self._value2member_map_ = {}
for k, v in dict.items():
if k.startswith('_'):
continue
if callable(v):
continue
if isinstance(v, property):
continue
instance = self(v)
instance.name = k
setattr(self, k, instance)
type.__init__(self, name, bases, dict)
def __call__(self, value):
if isinstance(value, self):
return value
if value not in self._value2member_map_:
self._value2member_map_[value] = super(ExtensibleEnumMeta, self).__call__(value)
return self._value2member_map_[value]
class ExtensibleIntEnum(int, metaclass=ExtensibleEnumMeta):
"""Similar to Python's enum.IntEnum, this type can be used for named
numbers which are not comprehensively known, like CoAP option numbers."""
def __add__(self, delta):
return type(self)(int(self) + delta)
def __repr__(self):
return '<%s %d%s>'%(type(self).__name__, self, ' "%s"'%self.name if hasattr(self, "name") else "")
def __str__(self):
return self.name if hasattr(self, "name") else int.__str__(self)
def hostportjoin(host, port=None):
"""Join a host and optionally port into a hostinfo-style host:port
string
>>> hostportjoin('example.com')
'example.com'
>>> hostportjoin('example.com', 1234)
'example.com:1234'
>>> hostportjoin('127.0.0.1', 1234)
'127.0.0.1:1234'
This is lax with respect to whether host is an IPv6 literal in brackets or
not, and accepts either form; IP-future literals that do not contain a
colon must be already presented in their bracketed form:
>>> hostportjoin('2001:db8::1')
'[2001:db8::1]'
>>> hostportjoin('2001:db8::1', 1234)
'[2001:db8::1]:1234'
>>> hostportjoin('[2001:db8::1]', 1234)
'[2001:db8::1]:1234'
"""
if ':' in host and not (host.startswith('[') and host.endswith(']')):
host = '[%s]'%host
if port is None:
hostinfo = host
else:
hostinfo = "%s:%d"%(host, port)
return hostinfo
def hostportsplit(hostport):
"""Like urllib.parse.splitport, but return port as int, and as None if not
given. Also, it allows giving IPv6 addresses like a netloc:
>>> hostportsplit('foo')
('foo', None)
>>> hostportsplit('foo:5683')
('foo', 5683)
>>> hostportsplit('[::1%eth0]:56830')
('::1%eth0', 56830)
"""
pseudoparsed = urllib.parse.SplitResult(None, hostport, None, None, None)
try:
return pseudoparsed.hostname, pseudoparsed.port
except ValueError:
if '[' not in hostport and hostport.count(':') > 1:
raise ValueError("Could not parse network location. "
"Beware that when IPv6 literals are expressed in URIs, they "
"need to be put in square brackets to distinguish them from "
"port numbers.")
raise
def quote_nonascii(s):
"""Like urllib.parse.quote, but explicitly only escaping non-ascii characters.
This function is deprecated due to it use of the irrelevant "being an ASCII
character" property (when instead RFC3986 productions like "unreserved"
should be used), and due for removal when aiocoap's URI processing is
overhauled the next time.
"""
return "".join(chr(c) if c <= 127 else "%%%02X" % c for c in s.encode('utf8'))
class Sentinel:
"""Class for sentinel that can only be compared for identity. No efforts
are taken to make these singletons; it is up to the users to always refer
to the same instance, which is typically defined on module level."""
def __init__(self, label):
self._label = label
def __repr__(self):
return '<%s>' % self._label
| 35.710145
| 106
| 0.654221
|
b84612b4ced42d92787d6a3e1fc3cf38bf608f4c
| 4,337
|
py
|
Python
|
linear-algebra-python/src/tests.py
|
niroshajayasundara/Python
|
ed113841165717d135a001307d06f4282ad16870
|
[
"MIT"
] | 5
|
2018-04-02T08:06:54.000Z
|
2018-08-02T03:01:27.000Z
|
linear_algebra_python/src/tests.py
|
tlhcelik/Python
|
3bab59ae06f79698ab579ba52d2bc4ea6402154c
|
[
"MIT"
] | 1
|
2019-02-03T07:45:29.000Z
|
2019-02-03T07:45:29.000Z
|
linear_algebra_python/src/tests.py
|
tlhcelik/Python
|
3bab59ae06f79698ab579ba52d2bc4ea6402154c
|
[
"MIT"
] | 8
|
2019-10-03T13:37:51.000Z
|
2019-10-25T20:33:10.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 26 15:40:07 2018
@author: Christian Bender
@license: MIT-license
This file contains the test-suite for the linear algebra library.
"""
import unittest
from lib import *
class Test(unittest.TestCase):
def test_component(self):
"""
test for method component
"""
x = Vector([1,2,3])
self.assertEqual(x.component(0),1)
self.assertEqual(x.component(2),3)
try:
y = Vector()
self.assertTrue(False)
except:
self.assertTrue(True)
def test_str(self):
"""
test for toString() method
"""
x = Vector([0,0,0,0,0,1])
self.assertEqual(x.__str__(),"(0,0,0,0,0,1)")
def test_size(self):
"""
test for size()-method
"""
x = Vector([1,2,3,4])
self.assertEqual(x.size(),4)
def test_euclidLength(self):
"""
test for the eulidean length
"""
x = Vector([1,2])
self.assertAlmostEqual(x.eulidLength(),2.236,3)
def test_add(self):
"""
test for + operator
"""
x = Vector([1,2,3])
y = Vector([1,1,1])
self.assertEqual((x+y).component(0),2)
self.assertEqual((x+y).component(1),3)
self.assertEqual((x+y).component(2),4)
def test_sub(self):
"""
test for - operator
"""
x = Vector([1,2,3])
y = Vector([1,1,1])
self.assertEqual((x-y).component(0),0)
self.assertEqual((x-y).component(1),1)
self.assertEqual((x-y).component(2),2)
def test_mul(self):
"""
test for * operator
"""
x = Vector([1,2,3])
a = Vector([2,-1,4]) # for test of dot-product
b = Vector([1,-2,-1])
self.assertEqual((x*3.0).__str__(),"(3.0,6.0,9.0)")
self.assertEqual((a*b),0)
def test_zeroVector(self):
"""
test for the global function zeroVector(...)
"""
self.assertTrue(zeroVector(10).__str__().count("0") == 10)
def test_unitBasisVector(self):
"""
test for the global function unitBasisVector(...)
"""
self.assertEqual(unitBasisVector(3,1).__str__(),"(0,1,0)")
def test_axpy(self):
"""
test for the global function axpy(...) (operation)
"""
x = Vector([1,2,3])
y = Vector([1,0,1])
self.assertEqual(axpy(2,x,y).__str__(),"(3,4,7)")
def test_copy(self):
"""
test for the copy()-method
"""
x = Vector([1,0,0,0,0,0])
y = x.copy()
self.assertEqual(x.__str__(),y.__str__())
def test_changeComponent(self):
"""
test for the changeComponent(...)-method
"""
x = Vector([1,0,0])
x.changeComponent(0,0)
x.changeComponent(1,1)
self.assertEqual(x.__str__(),"(0,1,0)")
def test_str_matrix(self):
A = Matrix([[1,2,3],[2,4,5],[6,7,8]],3,3)
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n",A.__str__())
def test__mul__matrix(self):
A = Matrix([[1,2,3],[4,5,6],[7,8,9]],3,3)
x = Vector([1,2,3])
self.assertEqual("(14,32,50)",(A*x).__str__())
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n",(A*2).__str__())
def test_changeComponent_matrix(self):
A = Matrix([[1,2,3],[2,4,5],[6,7,8]],3,3)
A.changeComponent(0,2,5)
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n",A.__str__())
def test_component_matrix(self):
A = Matrix([[1,2,3],[2,4,5],[6,7,8]],3,3)
self.assertEqual(7,A.component(2,1),0.01)
def test__add__matrix(self):
A = Matrix([[1,2,3],[2,4,5],[6,7,8]],3,3)
B = Matrix([[1,2,7],[2,4,5],[6,7,10]],3,3)
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n",(A+B).__str__())
def test__sub__matrix(self):
A = Matrix([[1,2,3],[2,4,5],[6,7,8]],3,3)
B = Matrix([[1,2,7],[2,4,5],[6,7,10]],3,3)
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n",(A-B).__str__())
def test_squareZeroMatrix(self):
self.assertEqual('|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|'
+'\n|0,0,0,0,0|\n',squareZeroMatrix(5).__str__())
if __name__ == "__main__":
unittest.main()
| 32.609023
| 78
| 0.509108
|
3c93989b8fef29b737de0d8334b0c9526e064848
| 493
|
py
|
Python
|
azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/version.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2018-07-23T08:59:24.000Z
|
2018-07-23T08:59:24.000Z
|
azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/version.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2018-11-29T14:46:42.000Z
|
2018-11-29T14:46:42.000Z
|
azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/version.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2018-08-28T14:36:47.000Z
|
2018-08-28T14:36:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
VERSION = "0.5.2"
| 35.214286
| 76
| 0.51927
|
124792b6da390139e7284c552b075a180b039dfd
| 760
|
py
|
Python
|
test/test_baidu.py
|
xudongchou/WebAutoTest
|
f65e57e65fb00033d05e7ddaa0247c408abf4f8a
|
[
"MIT"
] | null | null | null |
test/test_baidu.py
|
xudongchou/WebAutoTest
|
f65e57e65fb00033d05e7ddaa0247c408abf4f8a
|
[
"MIT"
] | null | null | null |
test/test_baidu.py
|
xudongchou/WebAutoTest
|
f65e57e65fb00033d05e7ddaa0247c408abf4f8a
|
[
"MIT"
] | null | null | null |
"""
@project:code
@author:lenovo
@file:test_baidu.py
@ide:PyCharm
@time:2020/8/26 16:12
@month:八月
"""
from uimethod.driverselect import SelectDriver
from uimethod.uiauto import WebUiMethod
from uimethod.helpers import Logger
import unittest
class Test_baidu(unittest.TestCase):
def test_1(self):
"""
打开百度,搜索豆瓣
"""
driver = SelectDriver.mybrowser("firefox")
print(driver.name)
We = WebUiMethod
We.navigate(driver, "http://wwww.baidu.com")
We.wait(8)
We.webEdit(driver, "//*[@id='kw']", "豆瓣")
We.wait(3)
We.webButton(driver, "//*[@id='su']").click()
We.wait(10)
We.screen_shot(driver)
We.wait(8)
We.exit(driver)
print("关闭浏览器")
| 22.352941
| 53
| 0.6
|
993cbd00287348a84e55d7d8eaf05054c4d9f984
| 505
|
py
|
Python
|
Searching/LinearSearch.py
|
imkp1/Data-Structures-Algorithms
|
71375599d4a4a02fb53f64c498fb238595f504f9
|
[
"MIT"
] | 199
|
2017-10-10T18:12:45.000Z
|
2022-02-16T23:21:22.000Z
|
Searching/LinearSearch.py
|
imkp1/Data-Structures-Algorithms
|
71375599d4a4a02fb53f64c498fb238595f504f9
|
[
"MIT"
] | 120
|
2017-10-10T18:01:59.000Z
|
2021-10-01T09:53:07.000Z
|
Searching/LinearSearch.py
|
imkp1/Data-Structures-Algorithms
|
71375599d4a4a02fb53f64c498fb238595f504f9
|
[
"MIT"
] | 262
|
2017-10-10T18:12:46.000Z
|
2021-12-30T08:52:10.000Z
|
def linearSearch(list, targetValue):
for i in range(len(list)):
if (targetValue == list[i]):
return i
return "Not Found"
def main():
# Example
exampleList = [1, 2, 3, 4, 5, 6, 7, 8, 9]
exampleTarget = 5
result = linearSearch(exampleList, exampleTarget)
if (result == "Not Found"):
print("Your value", exampleTarget, "was", result)
else:
print("Your value", exampleTarget, "was found at list index", result)
main()
| 25.25
| 78
| 0.572277
|
7c74e603000f2e0d2c987732dc544e5b98d4657e
| 3,016
|
py
|
Python
|
baselines/her/rollout.py
|
knowledgetechnologyuhh/goal_conditioned_RL_baselines
|
915fc875fd8cc75accd0804d99373916756f726e
|
[
"MIT"
] | 15
|
2020-07-01T16:16:09.000Z
|
2021-12-20T21:56:33.000Z
|
baselines/her/rollout.py
|
knowledgetechnologyuhh/goal_conditioned_RL_baselines
|
915fc875fd8cc75accd0804d99373916756f726e
|
[
"MIT"
] | 14
|
2020-09-25T22:41:20.000Z
|
2022-03-12T00:38:44.000Z
|
baselines/her/rollout.py
|
knowledgetechnologyuhh/goal_conditioned_RL_baselines
|
915fc875fd8cc75accd0804d99373916756f726e
|
[
"MIT"
] | 2
|
2020-07-01T16:19:08.000Z
|
2020-11-28T10:45:59.000Z
|
import numpy as np
import time
from baselines.template.util import store_args, logger
from baselines.template.rollout import Rollout
from tqdm import tqdm
import sys
class RolloutWorker(Rollout):
@store_args
def __init__(self, make_env, policy, dims, logger, T, rollout_batch_size=1,
exploit=False, history_len=100, render=False, **kwargs):
"""Rollout worker generates experience by interacting with one or many environments.
Args:
make_env (function): a factory function that creates a new instance of the environment
when called
policy (object): the policy that is used to act
dims (dict of ints): the dimensions for observations (o), goals (g), and actions (u)
logger (object): the logger that is used by the rollout worker
rollout_batch_size (int): the number of parallel rollouts that should be used
exploit (boolean): whether or not to exploit, i.e. to act optimally according to the
current policy without any exploration
use_target_net (boolean): whether or not to use the target net for rollouts
compute_Q (boolean): whether or not to compute the Q values alongside the actions
noise_eps (float): scale of the additive Gaussian noise
random_eps (float): probability of selecting a completely random action
history_len (int): length of history for statistics smoothing
render (boolean): whether or not to render the rollouts
"""
Rollout.__init__(self, make_env, policy, dims, logger, T, rollout_batch_size=rollout_batch_size, history_len=history_len, render=render, **kwargs)
def generate_rollouts_update(self, n_episodes, n_train_batches):
dur_ro = 0
dur_train = 0
dur_start = time.time()
for cyc in tqdm(range(n_episodes), file=sys.__stdout__):
# logger.info("Performing ")
ro_start = time.time()
episode = self.generate_rollouts()
self.policy.store_episode(episode)
dur_ro += time.time() - ro_start
train_start = time.time()
for _ in range(n_train_batches):
self.policy.train()
self.policy.update_target_net()
dur_train += time.time() - train_start
dur_total = time.time() - dur_start
updated_policy = self.policy
time_durations = (dur_total, dur_ro, dur_train)
return updated_policy, time_durations
def current_mean_Q(self):
return np.mean(self.custom_histories[0])
def logs(self, prefix='worker'):
"""Generates a dictionary that contains all collected statistics.
"""
logs = []
logs += [('success_rate', np.mean(self.success_history))]
if self.custom_histories:
logs += [('mean_Q', np.mean(self.custom_histories[0]))]
logs += [('episode', self.n_episodes)]
return logger(logs, prefix)
| 44.352941
| 154
| 0.648541
|
80b6a47e5b90481ee33114fac96d16eb883f63e4
| 21,946
|
py
|
Python
|
utils/shared/summary_core.py
|
Imperas/force-riscv
|
c15bc18e4d70e6c2f50bad1e9176e13575de6081
|
[
"Apache-2.0"
] | null | null | null |
utils/shared/summary_core.py
|
Imperas/force-riscv
|
c15bc18e4d70e6c2f50bad1e9176e13575de6081
|
[
"Apache-2.0"
] | null | null | null |
utils/shared/summary_core.py
|
Imperas/force-riscv
|
c15bc18e4d70e6c2f50bad1e9176e13575de6081
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR
# FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Threading Base
import re
from collections import defaultdict
from shared.path_utils import PathUtils
from shared.sys_utils import SysUtils
from shared.msg_utils import Msg
from shared.datetime_utils import DateTime
from shared.collections import HiThreadedProducerConsumerQueue
from shared.threads import HiThread, workers_done_event, summary_done_event
class SummaryLevel:
Silent = 0 # only show summary info, log all results to summary log
Fail = 1 # show only the fails,
Any = 2
class SummaryDetail:
Nothing = 0x00
ForceCmd = 0x01
ForceResult = 0x02
IssCmd = 0x04
IssResult = 0x08
AnyDetail = 0xFF
# class SummaryThread( HiThread ): pass
class SummaryQueueItem( object ):
def __init__( self, arg_process_info ): # arg_frun_path, arg_parent_fctrl, arg_fctrl_item, arg_group ):
self.process_info = arg_process_info
class SummaryErrorQueueItem( object ):
def __init__( self, arg_error_info ):
self.error_info = arg_error_info
class SummaryErrorItem( object ):
def load( self, arg_error_qitem ):
self.extract_error( arg_error_qitem )
self.report()
def extract_error( self, arg_error_qitem ):
self.error_item = arg_error_qitem.error_info
def get_err_line( self ):
my_err_type = str( self.error_item["type"] ).replace( "<class '","" ).replace( "'>","" )
return "[ERROR] Type: %s, Error: %s, Path: \"%s\", Message: %s" % ( my_err_type
, str( self.error_item["error"] )
, str( self.error_item["path"] )
, str( self.error_item["message"] )
)
def report( self ):
pass
# self.prepare
class SummaryQueue( HiThreadedProducerConsumerQueue ):
def __init__( self ):
super().__init__(True)
class SummaryItem( object ):
def load( self, arg_queue_item ):
try:
# my_usr_lbl = Msg.set_label( "user", "SUMMARY_ITEM" )
self.load_process_info( arg_queue_item.process_info )
# Msg.lout( self, "user", "Summary Item(load_process_info)" )
self.load_task_info( )
# Msg.lout( self, "user", "Summary Item(load_task_info)" )
self.load_process_log()
# Msg.lout( self, "user", "Summary Item(load_process_log)" )
self.load_force_log()
# Msg.lout( self, "user", "Summary Item(load_force_log)" )
self.load_force_elog()
# Msg.lout( self, "user", "Summary Item(load_force_elog)" )
self.prepare()
self.report()
# Msg.set_label( "user", my_usr_lbl )
except Exception as arg_ex:
Msg.error_trace()
Msg.err( str( arg_ex ))
except:
Msg.error_trace()
def prepare( self ):
pass
def report( self ):
if self.default is None: self.default = 0
if self.secondary is None: self.secondary = 0
if self.total is None: self.total = 0
Msg.info( "Task Id: %s, Task Index: %d" % ( self.task_id, self.task_index ))
my_msg = "Process Log Contains "
if self.detail_flags & SummaryDetail.AnyDetail == SummaryDetail.Nothing : my_msg = "Process No Found or is Empty"
else:
if self.detail_flags & SummaryDetail.ForceCmd == SummaryDetail.ForceCmd : my_msg += "ForceCommand, "
if self.detail_flags & SummaryDetail.ForceResult == SummaryDetail.ForceResult: my_msg += "ForceResult, "
if self.detail_flags & SummaryDetail.IssCmd == SummaryDetail.IssCmd : my_msg += "IssCommand, "
if self.detail_flags & SummaryDetail.IssResult == SummaryDetail.IssResult : my_msg += "IssResult, "
my_msg = my_msg[:-2]
# Msg.user( my_msg )
# Msg.info( "Originating Control File: %s" % ( self.parent_fctrl ))
# Msg.info( "Control File Item: %s" % ( str( self.fctrl_item )))
# Msg.info( "F-Run Control File: %s" % ( self.frun_path ))
Msg.dbg( "Force Ret Code: %s" % ( str( self.force_retcode )))
Msg.info( "[%s], Generate Command: %s" % ( SysUtils.ifthen( SysUtils.success( self.force_retcode ), "SUCCESS", "FAILED" ), str( self.force_cmd )))
Msg.info( "Instructions Generated - Default: %d, Secondary: %d, Total: %d" % ( self.default, self.secondary, self.total ))
if self.iss_cmd is not None:
if self.iss_success():
Msg.info( "[SUCCESS], ISS Command: %s" % ( str( self.iss_cmd )))
else:
Msg.info( "[FAILED], ISS Command: %s" % ( str( self.iss_cmd )))
#Msg.fout( self.iss_log, "user" )
Msg.blank()
def iss_success( self ):
return True
def has_generate( self ):
return self.force_cmd is not None
def has_simulate( self ):
return self.iss_cmd is not None
def load_process_info( self, arg_process_info ):
# update the user label and send iteration message to the screen if user is active
# my_usr_lbl = Msg.set_label( "user", "PROCESS-RESULT" )
# Msg.user( "Executing Iteration #%d of Test File: %s" % ( my_ndx + 1, arg_task_file ))
Msg.dbg( "self.process_result: (%s)" % ( str( arg_process_info.get("process-result", None ))))
# Msg.set_label( "user", my_usr_lbl )
self.process_cmd = arg_process_info.get("process-cmd" , None )
self.process_log = arg_process_info.get("process-log" , None )
self.process_result = arg_process_info.get("process-result", None )
self.frun_path = arg_process_info.get("frun-path" , None )
self.parent_fctrl = arg_process_info.get("parent-fctrl" , None )
self.fctrl_item = arg_process_info.get("fctrl-item" , None )
self.item_group = arg_process_info.get("item-group" , None )
self.fctrl_content = arg_process_info.get("content" , None )
self.detail_flags = SummaryDetail.Nothing
def load_task_info( self ):
self.task_id = None
self.task_index = None
self.work_dir = None
self.work_dir, my_tmp = PathUtils.split_path( self.frun_path )
my_tmp, my_index = PathUtils.split_dir( self.work_dir )
my_tmp, self.task_id = PathUtils.split_dir( my_tmp )
self.task_index = int( my_index )
def load_process_log( self ):
self.force_cmd = None
self.force_elog = None
self.force_log = None
self.force_retcode = None
self.force_stderr = None
self.force_stdout = None
self.force_level = SummaryLevel.Any
self.force_start = 0.00
self.force_end = 0.00
self.iss_cmd = None
self.iss_log = None
self.iss_retcode = None
self.max_instr = None
self.min_instr = None
# Msg.fout( self.process_log, "dbg" )
with open( self.process_log , "r" ) as my_flog:
try:
for my_line in my_flog:
Msg.dbg("Load: %s" % my_line)
self.load_process_line( my_line )
except Exception as arg_ex:
Msg.error_trace( arg_ex )
Msg.err( str( arg_ex ))
finally:
my_flog.close()
# here is the lowdown, if a generate line exists in the process log then a Result line must also exists,
if ( self.detail_flags & SummaryDetail.ForceCmd ) and not ( self.detail_flags & SummaryDetail.ForceResult ):
self.load_gen_result( { "force-retcode": self.process_result[0]
, "force-stdout" : self.process_result[2]
, "force-stderr" : self.process_result[2]
, "force_start" : self.process_result[3]
, "force_end" : self.process_result[4]
} )
elif ( self.detail_flags & SummaryDetail.IssCmd ) and not ( self.detail_flags & SummaryDetail.IssResult ):
self.load_iss_result( { "iss-retcode" : self.process_result[0]
, "iss-log" : None
} )
def load_force_log( self, arg_seed_only = False ):
self.default = None
self.secondary = None
self.total = None
self.seed = None
self.task_path = PathUtils.include_trailing_path_delimiter( self.work_dir )
my_glog = "%s%s" % ( self.task_path, self.force_log )
Msg.dbg( "Path: %s" % my_glog)
# Msg.user( "Opening Generator Log File: %s" % ( my_glog ))
with open( my_glog, "r" ) as my_flog:
try:
for my_line in my_flog:
if SysUtils.found( my_line.find( "Secondary Instructions Generated" )):
# my_secondary = my_line.replace( "[notice]Secondary Instructions Generated:", "" ).strip()
# self.secondary = int( my_secondary )
my_lpos = my_line.find( ':' )
my_count = int( my_line[my_lpos+2:].strip())
# Msg.user( "Secondary Instructions: %d" % ( my_count ))
self.secondary = my_count
elif SysUtils.found( my_line.find( "Default Instructions Generated" )):
# my_pos = my_line.find( ":" ) + 2
# my_default = my_line[ my_pos: ].strip()
# get the count for this instruction type
my_lpos = my_line.find( ':' )
my_count = int( my_line[my_lpos+2:].strip())
# Msg.user( "Default Instructions: %d" % ( my_count ))
self.default = my_count
# my_default = my_line.replace( "[notice]Default Instructions Generated:", "" ).strip()
# self.default = int( my_default )
if SysUtils.found( my_line.find( "Total Instructions Generated" )):
self.total = int( my_line.replace( "[notice]Total Instructions Generated: ", "" ).strip())
my_lpos = my_line.find( ':' )
my_count = int( my_line[my_lpos+2:].strip())
# Msg.user( "Total Instructions: %d" % ( my_count ))
self.total = my_count
if SysUtils.found( my_line.find( "Initial seed" )):
self.seed = my_line.replace( "[notice]", "" ).replace( "Initial seed = ", "" ).strip()
# Msg.dbg( "Seed: %s" % ( self.seed ))
# for simulation only the seed is needed
if arg_seed_only:
break
if not( self.seed is None or self.total is None or self.secondary is None or self.default is None ):
break
except Exception as arg_ex:
# NOTE: Determine the possible errors and handle accordingly, for now just keep processing
Msg.error_trace()
Msg.err( str( arg_ex ))
finally:
my_flog.close()
def load_force_elog( self ):
self.force_msg = None
my_elog = "%s%s" % ( PathUtils.include_trailing_path_delimiter( self.work_dir ), self.force_elog )
# Msg.dbg( my_elog )
if SysUtils.failed( self.force_retcode ):
Msg.fout( my_elog, "dbg" )
with open( my_elog , "r" ) as my_flog:
try:
for my_line in my_flog:
if SysUtils.found( my_line.find( "[fail]" )):
self.force_msg = my_line.replace( "[fail]", "" ).strip()
# Msg.dbg( "Message: %s" % ( str( self.force_msg )))
break
finally:
my_flog.close()
def load_process_line( self, arg_line ):
if SysUtils.found( arg_line.find( "ForceCommand" )):
my_glb, my_loc = SysUtils.exec_content( arg_line )
self.load_gen_info( my_loc["ForceCommand"] )
self.detail_flags |= SummaryDetail.ForceCmd
elif SysUtils.found( arg_line.find( "ForceResult" )):
my_glb, my_loc = SysUtils.exec_content( arg_line )
self.load_gen_result( my_loc["ForceResult" ] )
self.detail_flags |= SummaryDetail.ForceResult
elif SysUtils.found( arg_line.find( "ISSCommand" )):
my_glb, my_loc = SysUtils.exec_content( arg_line )
self.load_iss_info( my_loc["ISSCommand"] )
self.detail_flags |= SummaryDetail.IssCmd
elif SysUtils.found( arg_line.find( "ISSResult" )):
my_glb, my_loc = SysUtils.exec_content( arg_line )
self.load_iss_result( my_loc["ISSResult" ] )
self.detail_flags |= SummaryDetail.IssResult
# load the force generate information
def load_gen_info( self, arg_dict ):
# Msg.lout( arg_dict, "user", "Generate Info Dictionary ... " )
self.force_cmd = arg_dict["force-command"]
self.force_log = arg_dict["force-log"]
self.force_elog= arg_dict["force-elog"]
self.max_instr = arg_dict["max-instr"]
self.min_instr = arg_dict["min-instr"]
# load the force generate results
def load_gen_result( self, arg_dict ):
# Msg.lout( arg_dict, "dbg", "Generate Results Dictionary ... " )
try:
self.force_retcode = int( str( arg_dict["force-retcode"] ).strip() )
except :
self.force_retcode = -1
Msg.err( "Generate Return Code in unrecognizable format" )
self.force_stdout = arg_dict["force-stdout" ]
self.force_stderr = arg_dict["force-stderr" ]
if SysUtils.failed( self.force_retcode ):
self.force_level = SummaryLevel.Fail
self.force_start = float( arg_dict.get("force-start", 0.00 ))
self.force_end = float( arg_dict.get("force-end" , 0.00 ))
# Msg.lout( self, "dbg", "General Summary Item ... " )
# load the iss execution information
def load_iss_info( self, arg_dict ):
# Msg.lout( arg_dict, "user", "ISS Info Dictionary ... " )
self.iss_cmd = arg_dict["iss-command"]
# load the iss execution results
def load_iss_result( self,arg_dict ):
# Msg.lout( arg_dict, "user", "ISS Results Dictionary ... " )
self.iss_log = arg_dict["iss-log"]
try:
self.iss_retcode = int( arg_dict["iss-retcode"] )
except :
self.iss_retcode = -1
Msg.err( "ISS Return Code in unrecognizable format" )
def commit( self ):
my_gen_cnt = 0
my_gen_ret = 0
my_sim_cnt = 0
my_sim_ret = 0
my_tgt_name = ""
if self.has_generate():
#Msg.user( "if self.has_generate(): True" )
my_gen_cnt = 1
my_gen_ret = self.commit_generate()
if self.has_simulate():
#Msg.user( "if self.has_simulate(): True" )
my_sim_cnt = 1
my_sim_ret = self.commit_simulate()
my_tgt_name = "%s%s" % ( self.task_path, SysUtils.ifthen( bool( my_sim_ret ), "PASS", "FAIL" ))
else:
my_tgt_name = "%s%s" % ( self.task_path, SysUtils.ifthen( bool( my_gen_ret ), "PASS", "FAIL" ))
my_src_name = "%s%s" % ( self.task_path, "STARTED" )
PathUtils.move( my_src_name, my_tgt_name )
return ( my_gen_cnt, my_gen_ret, my_sim_cnt, my_sim_ret )
class SummaryGroups( object ):
def __init__( self ):
self.groups = {}
self.queue = SummaryQueue()
# self.group_lookup = []
def update_groups( self, arg_group ):
if not arg_group in self.groups:
self.groups[ arg_group ] = []
# adds an item to a group list if the group does not exist the list is created
def add_item( self, arg_item ):
# Msg.dbg( "Item Group: %s"% ( arg_item.item_group ))
if not arg_item.item_group in self.groups:
self.groups[ arg_item.item_group ] = []
self.groups[ arg_item.item_group ].append( arg_item )
# Msg.dbg( "Group Count: %d, Group %s Membership Count: %d" % ( len( self.groups ), arg_item.item_group, len( self.groups[ arg_item.item_group ]) ))
# returns the item list associated with the group passed as argument
def group_items( self, arg_group ):
return self.groups[ arg_group ]
# return the list of groups
def task_groups( self ):
return self.groups
class SummaryThread( HiThread ):
def __init__( self, sq, summary):
self.summary_queue = sq
self.summary = summary
# We do not want the thread to launch until we've loaded all the properties
super().__init__(True)
def commit_item( self, arg_item ):
if not arg_item.task_id in self.summary.tasks:
self.summary.tasks[ arg_item.task_id ] = []
self.summary.task_lookup.append( arg_item.task_id )
self.summary.groups.update_groups( arg_item.item_group )
return 0
def run( self ):
# Block on process queue while we have threads running and stuff to do
# == Replaced ==>> while True:
# == Replaced ==>> # Pop off the top of the process queue (should block if the queue is empty)
# == Replaced ==>> try:
# == Replaced ==>> next_item = self.summary_queue.dequeue(0)
# == Replaced ==>> except TimeoutError:
# == Replaced ==>> if (workers_done_event.isSet()):
# == Replaced ==>> summary_done_event.Signal()
# == Replaced ==>> return
# == Replaced ==>> else:
# == Replaced ==>> self.HeartBeat()
# == Replaced ==>> continue
# == Replaced ==>> my_item = self.summary.create_summary_item()
# == Replaced ==>> my_item.load( next_item )
# == Replaced ==>> self.summary.commit_item( my_item )
# == Replaced ==>> next_item = None
try:
while True: #not workers_done_event.isSet():
# Pop off the top of the process queue (should block if the queue is empty)
try:
# my_item = self.summary.create_summary_item()
# my_qitem = self.summary_queue.dequeue(0)
# my_item.load( my_qitem )
# self.summary.commit_item( my_item )
# my_qitem = None
# my_qitem = self.summary_queue.dequeue(0)
my_qitem = self.summary_queue.dequeue(0)
if isinstance( my_qitem, SummaryErrorQueueItem):
Msg.user( str( my_qitem.error_info ), "SUMMARY_ERROR" )
my_eitem = SummaryErrorItem()
my_eitem.load( my_qitem )
self.summary.commit_error_item( my_eitem )
else:
my_item = self.summary.create_summary_item()
my_item.load( my_qitem )
self.summary.commit_item( my_item )
my_qitem = None
except TimeoutError as arg_ex:
# {{{TODO}}} Implement proper heartbeat
# Msg.dbg( str( arg_ex ) )
if (workers_done_event.isSet()):
break
else:
self.HeartBeat()
continue
except Exception as arg_ex:
Msg.error_trace()
Msg.err( str( arg_ex ))
raise
except:
Msg.error_trace()
raise
finally:
summary_done_event.Signal()
class Summary( object ):
def __init__( self, arg_summary_dir ):
self.summary_dir = arg_summary_dir
self.tasks = defaultdict(list)
self.errors = []
self.groups = SummaryGroups()
self.queue = SummaryQueue()
# Launch a Summary Thread
self.summary_thread = SummaryThread(self.queue, self)
def lock( self ):
return True;
def unlock( self ):
True == True
def commit_error_item( self, arg_error ):
self.errors.append( arg_error )
def process_errors( self, arg_ofile ):
if len( self.errors ) > 0:
arg_ofile.write( "\n" )
Msg.blank()
for my_eitem in self.errors:
my_err_line = str( my_eitem.get_err_line())
Msg.info( my_err_line )
arg_ofile.write( "%s\n" % ( my_err_line ))
# abstracts
def create_summary_item( self ): pass
def commit_item( self, arg_item ): pass
def process_summary( self, sum_level = SummaryLevel.Fail ): pass
| 39.542342
| 157
| 0.558735
|
731666da3a7c850e4d35fa43ce3832fc3e183d5c
| 2,578
|
py
|
Python
|
setup.py
|
langdoc/pyannote-db-ikdp
|
d0f61cc0b03efc27a3a68917a2a7e62a096bea55
|
[
"MIT"
] | 1
|
2017-07-15T09:51:02.000Z
|
2017-07-15T09:51:02.000Z
|
setup.py
|
langdoc/pyannote-db-ikdp
|
d0f61cc0b03efc27a3a68917a2a7e62a096bea55
|
[
"MIT"
] | null | null | null |
setup.py
|
langdoc/pyannote-db-ikdp
|
d0f61cc0b03efc27a3a68917a2a7e62a096bea55
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
#
# Copyright (c) 2017 CNRS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr/
import versioneer
from setuptools import setup, find_packages
setup(
# replace "mydatabase" by the name of your database
name='pyannote.db.ikdp',
# replace "MyDatabase" by the name of your database
description="IKDP plugin for pyannote-database",
# replace with your information
author='Niko Partanen',
author_email='nikotapiopartanen@github.com',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
packages=find_packages(),
# replace "MyDatabase" by the new name of MyDatabase directory
package_data={
'ikdp': [
'data/*',
],
},
include_package_data=True,
install_requires=[
'pyannote.database >= 0.11.2',
'pyannote.parser >= 0.6.5',
],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Topic :: Scientific/Engineering"
],
# replace MyDatabase by the name of your database (using CamelCase)
entry_points="""
[pyannote.database.databases]
ikdp=ikdp:ikdp
"""
)
| 33.480519
| 79
| 0.688906
|
d0eba3db245b425a53e7f8b4e866da716890c02a
| 828
|
py
|
Python
|
setup.py
|
vasusen-code/mediafire-dl
|
23102dc6bf90394bb5f5cd1d2509f5985ec4042c
|
[
"MIT"
] | 2
|
2022-01-17T13:08:27.000Z
|
2022-01-20T05:33:34.000Z
|
setup.py
|
vasusen-code/mediafire-dl
|
23102dc6bf90394bb5f5cd1d2509f5985ec4042c
|
[
"MIT"
] | null | null | null |
setup.py
|
vasusen-code/mediafire-dl
|
23102dc6bf90394bb5f5cd1d2509f5985ec4042c
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name="mediafire-dl",
version="1",
description="Simple command-line script to download files from mediafire based on gdown",
url="https://github.com/vasusen-code/mediafire-dl",
author="Juvenal Yescas",
author_email="juvenal@mail.com",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7"
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
keywords="audo ai",
py_modules=['mediafire_dl'],
install_requires=[
"requests",
"tqdm",
],
entry_points={
"console_scripts": ["mediafire-dl=mediafire_dl:main"],
},
)
| 29.571429
| 93
| 0.614734
|
69466ab4cfe4606c533dc648cb050596a7e2a195
| 542
|
py
|
Python
|
tools/genhooks/recipe_idbhooks.py
|
sfinktah/src
|
7eea22820ff9915660aba20b726495cf6c618728
|
[
"BSD-3-Clause"
] | null | null | null |
tools/genhooks/recipe_idbhooks.py
|
sfinktah/src
|
7eea22820ff9915660aba20b726495cf6c618728
|
[
"BSD-3-Clause"
] | null | null | null |
tools/genhooks/recipe_idbhooks.py
|
sfinktah/src
|
7eea22820ff9915660aba20b726495cf6c618728
|
[
"BSD-3-Clause"
] | null | null | null |
recipe = {
"enum_const_created" : {
"method_name" : "enum_member_created",
"add_params" : [
{ "name" : "id", "type" : "enum_t" },
{ "name" : "cid", "type" : "const_t" },
],
},
"enum_const_deleted" : {
"method_name" : "enum_member_deleted",
"add_params" : [
{ "name" : "id", "type" : "enum_t" },
{ "name" : "cid", "type" : "const_t" },
],
},
"dirtree_segm_moved" : {"ignore" : True},
}
default_rtype = "void"
| 25.809524
| 51
| 0.433579
|
97695a3538e1e86ae9bb42092d6267836a880567
| 1,151
|
py
|
Python
|
google/ads/googleads/v10/services/services/conversion_goal_campaign_config_service/transports/__init__.py
|
JakobSteixner/google-ads-python
|
df2b802cc7e78295a4ece21cc7ef3787cd35dab0
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v10/services/services/conversion_goal_campaign_config_service/transports/__init__.py
|
JakobSteixner/google-ads-python
|
df2b802cc7e78295a4ece21cc7ef3787cd35dab0
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v10/services/services/conversion_goal_campaign_config_service/transports/__init__.py
|
JakobSteixner/google-ads-python
|
df2b802cc7e78295a4ece21cc7ef3787cd35dab0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import ConversionGoalCampaignConfigServiceTransport
from .grpc import ConversionGoalCampaignConfigServiceGrpcTransport
# Compile a registry of transports.
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[ConversionGoalCampaignConfigServiceTransport]]
_transport_registry["grpc"] = ConversionGoalCampaignConfigServiceGrpcTransport
__all__ = (
"ConversionGoalCampaignConfigServiceTransport",
"ConversionGoalCampaignConfigServiceGrpcTransport",
)
| 35.96875
| 78
| 0.791486
|
33e7efaea652a3e523e6994d7ff9941f20fcd059
| 118,652
|
py
|
Python
|
src/transformers/pipelines.py
|
hhaoyan/transformers
|
0911b6bd86b39d55ddeae42fbecef75a1244ea85
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/pipelines.py
|
hhaoyan/transformers
|
0911b6bd86b39d55ddeae42fbecef75a1244ea85
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/pipelines.py
|
hhaoyan/transformers
|
0911b6bd86b39d55ddeae42fbecef75a1244ea85
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import json
import os
import pickle
import sys
import uuid
from abc import ABC, abstractmethod
from contextlib import contextmanager
from itertools import chain
from os.path import abspath, exists
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union
from uuid import UUID
import numpy as np
from .configuration_auto import AutoConfig
from .configuration_utils import PretrainedConfig
from .data import SquadExample, squad_convert_examples_to_features
from .file_utils import add_end_docstrings, is_tf_available, is_torch_available
from .modelcard import ModelCard
from .tokenization_auto import AutoTokenizer
from .tokenization_bert import BasicTokenizer
from .tokenization_utils import PreTrainedTokenizer
from .tokenization_utils_base import BatchEncoding, PaddingStrategy
from .utils import logging
if is_tf_available():
import tensorflow as tf
from .modeling_tf_auto import (
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_WITH_LM_HEAD_MAPPING,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeq2SeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
)
if is_torch_available():
import torch
from .modeling_auto import (
MODEL_FOR_MASKED_LM_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSeq2SeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
)
if TYPE_CHECKING:
from .modeling_tf_utils import TFPreTrainedModel
from .modeling_utils import PreTrainedModel
logger = logging.get_logger(__name__)
def get_framework(model):
"""
Select framework (TensorFlow or PyTorch) to use.
Args:
model (:obj:`str`, :class:`~transformers.PreTrainedModel` or :class:`~transformers.TFPreTrainedModel`):
If both frameworks are installed, picks the one corresponding to the model passed (either a model class or
the model name). If no specific model is provided, defaults to using PyTorch.
"""
if not is_tf_available() and not is_torch_available():
raise RuntimeError(
"At least one of TensorFlow 2.0 or PyTorch should be installed. "
"To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ "
"To install PyTorch, read the instructions at https://pytorch.org/."
)
if isinstance(model, str):
if is_torch_available() and not is_tf_available():
model = AutoModel.from_pretrained(model)
elif is_tf_available() and not is_torch_available():
model = TFAutoModel.from_pretrained(model)
else:
try:
model = AutoModel.from_pretrained(model)
except OSError:
model = TFAutoModel.from_pretrained(model)
framework = "tf" if model.__class__.__name__.startswith("TF") else "pt"
return framework
def get_default_model(targeted_task: Dict, framework: Optional[str]) -> str:
"""
Select a default model to use for a given task. Defaults to pytorch if ambiguous.
Args:
targeted_task (:obj:`Dict` ):
Dictionnary representing the given task, that should contain default models
framework (:obj:`str`, None)
"pt", "tf" or None, representing a specific framework if it was specified, or None if we don't know yet.
Returns
:obj:`str` The model string representing the default model for this pipeline
"""
if is_torch_available() and not is_tf_available():
framework = "pt"
elif is_tf_available() and not is_torch_available():
framework = "tf"
default_models = targeted_task["default"]["model"]
if framework is None:
framework = "pt"
return default_models[framework]
class PipelineException(Exception):
"""
Raised by a :class:`~transformers.Pipeline` when handling __call__.
Args:
task (:obj:`str`): The task of the pipeline.
model (:obj:`str`): The model used by the pipeline.
reason (:obj:`str`): The error message to display.
"""
def __init__(self, task: str, model: str, reason: str):
super().__init__(reason)
self.task = task
self.model = model
class ArgumentHandler(ABC):
"""
Base interface for handling arguments for each :class:`~transformers.pipelines.Pipeline`.
"""
@abstractmethod
def __call__(self, *args, **kwargs):
raise NotImplementedError()
class DefaultArgumentHandler(ArgumentHandler):
"""
Default argument parser handling parameters for each :class:`~transformers.pipelines.Pipeline`.
"""
@staticmethod
def handle_kwargs(kwargs: Dict) -> List:
if len(kwargs) == 1:
output = list(kwargs.values())
else:
output = list(chain(kwargs.values()))
return DefaultArgumentHandler.handle_args(output)
@staticmethod
def handle_args(args: Sequence[Any]) -> List[str]:
# Only one argument, let's do case by case
if len(args) == 1:
if isinstance(args[0], str):
return [args[0]]
elif not isinstance(args[0], list):
return list(args)
else:
return args[0]
# Multiple arguments (x1, x2, ...)
elif len(args) > 1:
if all([isinstance(arg, str) for arg in args]):
return list(args)
# If not instance of list, then it should instance of iterable
elif isinstance(args, Iterable):
return list(chain.from_iterable(chain(args)))
else:
raise ValueError(
"Invalid input type {}. Pipeline supports Union[str, Iterable[str]]".format(type(args))
)
else:
return []
def __call__(self, *args, **kwargs):
if len(kwargs) > 0 and len(args) > 0:
raise ValueError("Pipeline cannot handle mixed args and kwargs")
if len(kwargs) > 0:
return DefaultArgumentHandler.handle_kwargs(kwargs)
else:
return DefaultArgumentHandler.handle_args(args)
class PipelineDataFormat:
"""
Base class for all the pipeline supported data format both for reading and writing.
Supported data formats currently includes:
- JSON
- CSV
- stdin/stdout (pipe)
:obj:`PipelineDataFormat` also includes some utilities to work with multi-columns like mapping from datasets
columns to pipelines keyword arguments through the :obj:`dataset_kwarg_1=dataset_column_1` format.
Args:
output_path (:obj:`str`, `optional`): Where to save the outgoing data.
input_path (:obj:`str`, `optional`): Where to look for the input data.
column (:obj:`str`, `optional`): The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
"""
SUPPORTED_FORMATS = ["json", "csv", "pipe"]
def __init__(
self,
output_path: Optional[str],
input_path: Optional[str],
column: Optional[str],
overwrite: bool = False,
):
self.output_path = output_path
self.input_path = input_path
self.column = column.split(",") if column is not None else [""]
self.is_multi_columns = len(self.column) > 1
if self.is_multi_columns:
self.column = [tuple(c.split("=")) if "=" in c else (c, c) for c in self.column]
if output_path is not None and not overwrite:
if exists(abspath(self.output_path)):
raise OSError("{} already exists on disk".format(self.output_path))
if input_path is not None:
if not exists(abspath(self.input_path)):
raise OSError("{} doesnt exist on disk".format(self.input_path))
@abstractmethod
def __iter__(self):
raise NotImplementedError()
@abstractmethod
def save(self, data: Union[dict, List[dict]]):
"""
Save the provided data object with the representation for the current
:class:`~transformers.pipelines.PipelineDataFormat`.
Args:
data (:obj:`dict` or list of :obj:`dict`): The data to store.
"""
raise NotImplementedError()
def save_binary(self, data: Union[dict, List[dict]]) -> str:
"""
Save the provided data object as a pickle-formatted binary data on the disk.
Args:
data (:obj:`dict` or list of :obj:`dict`): The data to store.
Returns:
:obj:`str`: Path where the data has been saved.
"""
path, _ = os.path.splitext(self.output_path)
binary_path = os.path.extsep.join((path, "pickle"))
with open(binary_path, "wb+") as f_output:
pickle.dump(data, f_output)
return binary_path
@staticmethod
def from_str(
format: str,
output_path: Optional[str],
input_path: Optional[str],
column: Optional[str],
overwrite=False,
) -> "PipelineDataFormat":
"""
Creates an instance of the right subclass of :class:`~transformers.pipelines.PipelineDataFormat` depending
on :obj:`format`.
Args:
format: (:obj:`str`):
The format of the desired pipeline. Acceptable values are :obj:`"json"`, :obj:`"csv"` or :obj:`"pipe"`.
output_path (:obj:`str`, `optional`):
Where to save the outgoing data.
input_path (:obj:`str`, `optional`):
Where to look for the input data.
column (:obj:`str`, `optional`):
The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
Returns:
:class:`~transformers.pipelines.PipelineDataFormat`: The proper data format.
"""
if format == "json":
return JsonPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
elif format == "csv":
return CsvPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
elif format == "pipe":
return PipedPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
else:
raise KeyError("Unknown reader {} (Available reader are json/csv/pipe)".format(format))
class CsvPipelineDataFormat(PipelineDataFormat):
"""
Support for pipelines using CSV data format.
Args:
output_path (:obj:`str`, `optional`): Where to save the outgoing data.
input_path (:obj:`str`, `optional`): Where to look for the input data.
column (:obj:`str`, `optional`): The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
"""
def __init__(
self,
output_path: Optional[str],
input_path: Optional[str],
column: Optional[str],
overwrite=False,
):
super().__init__(output_path, input_path, column, overwrite=overwrite)
def __iter__(self):
with open(self.input_path, "r") as f:
reader = csv.DictReader(f)
for row in reader:
if self.is_multi_columns:
yield {k: row[c] for k, c in self.column}
else:
yield row[self.column[0]]
def save(self, data: List[dict]):
"""
Save the provided data object with the representation for the current
:class:`~transformers.pipelines.PipelineDataFormat`.
Args:
data (:obj:`List[dict]`): The data to store.
"""
with open(self.output_path, "w") as f:
if len(data) > 0:
writer = csv.DictWriter(f, list(data[0].keys()))
writer.writeheader()
writer.writerows(data)
class JsonPipelineDataFormat(PipelineDataFormat):
"""
Support for pipelines using JSON file format.
Args:
output_path (:obj:`str`, `optional`): Where to save the outgoing data.
input_path (:obj:`str`, `optional`): Where to look for the input data.
column (:obj:`str`, `optional`): The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
"""
def __init__(
self,
output_path: Optional[str],
input_path: Optional[str],
column: Optional[str],
overwrite=False,
):
super().__init__(output_path, input_path, column, overwrite=overwrite)
with open(input_path, "r") as f:
self._entries = json.load(f)
def __iter__(self):
for entry in self._entries:
if self.is_multi_columns:
yield {k: entry[c] for k, c in self.column}
else:
yield entry[self.column[0]]
def save(self, data: dict):
"""
Save the provided data object in a json file.
Args:
data (:obj:`dict`): The data to store.
"""
with open(self.output_path, "w") as f:
json.dump(data, f)
class PipedPipelineDataFormat(PipelineDataFormat):
"""
Read data from piped input to the python process.
For multi columns data, columns should separated by \t
If columns are provided, then the output will be a dictionary with {column_x: value_x}
Args:
output_path (:obj:`str`, `optional`): Where to save the outgoing data.
input_path (:obj:`str`, `optional`): Where to look for the input data.
column (:obj:`str`, `optional`): The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
"""
def __iter__(self):
for line in sys.stdin:
# Split for multi-columns
if "\t" in line:
line = line.split("\t")
if self.column:
# Dictionary to map arguments
yield {kwargs: l for (kwargs, _), l in zip(self.column, line)}
else:
yield tuple(line)
# No dictionary to map arguments
else:
yield line
def save(self, data: dict):
"""
Print the data.
Args:
data (:obj:`dict`): The data to store.
"""
print(data)
def save_binary(self, data: Union[dict, List[dict]]) -> str:
if self.output_path is None:
raise KeyError(
"When using piped input on pipeline outputting large object requires an output file path. "
"Please provide such output path through --output argument."
)
return super().save_binary(data)
class _ScikitCompat(ABC):
"""
Interface layer for the Scikit and Keras compatibility.
"""
@abstractmethod
def transform(self, X):
raise NotImplementedError()
@abstractmethod
def predict(self, X):
raise NotImplementedError()
PIPELINE_INIT_ARGS = r"""
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`):
The framework to use, either :obj:`"pt"` for PyTorch or :obj:`"tf"` for TensorFlow. The specified framework
must be installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to the framework of the :obj:`model`, or to PyTorch if no
model is provided.
task (:obj:`str`, defaults to :obj:`""`):
A task-identifier for the pipeline.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to -1):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model
on the associated CUDA device id.
binary_output (:obj:`bool`, `optional`, defaults to :obj:`False`):
Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.
"""
@add_end_docstrings(PIPELINE_INIT_ARGS)
class Pipeline(_ScikitCompat):
"""
The Pipeline class is the class from which all pipelines inherit. Refer to this class for methods shared across
different pipelines.
Base class implementing pipelined operations.
Pipeline workflow is defined as a sequence of the following operations:
Input -> Tokenization -> Model Inference -> Post-Processing (task dependent) -> Output
Pipeline supports running on CPU or GPU through the device argument (see below).
Some pipeline, like for instance :class:`~transformers.FeatureExtractionPipeline` (:obj:`'feature-extraction'` )
output large tensor object as nested-lists. In order to avoid dumping such large structure as textual data we
provide the :obj:`binary_output` constructor argument. If set to :obj:`True`, the output will be stored in the
pickle format.
"""
default_input_names = None
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
task: str = "",
args_parser: ArgumentHandler = None,
device: int = -1,
binary_output: bool = False,
):
if framework is None:
framework = get_framework(model)
self.task = task
self.model = model
self.tokenizer = tokenizer
self.modelcard = modelcard
self.framework = framework
self.device = device if framework == "tf" else torch.device("cpu" if device < 0 else "cuda:{}".format(device))
self.binary_output = binary_output
self._args_parser = args_parser or DefaultArgumentHandler()
# Special handling
if self.framework == "pt" and self.device.type == "cuda":
self.model = self.model.to(self.device)
# Update config with task specific parameters
task_specific_params = self.model.config.task_specific_params
if task_specific_params is not None and task in task_specific_params:
self.model.config.update(task_specific_params.get(task))
def save_pretrained(self, save_directory: str):
"""
Save the pipeline's model and tokenizer.
Args:
save_directory (:obj:`str`):
A path to the directory where to saved. It will be created if it doesn't exist.
"""
if os.path.isfile(save_directory):
logger.error("Provided path ({}) should be a directory, not a file".format(save_directory))
return
os.makedirs(save_directory, exist_ok=True)
self.model.save_pretrained(save_directory)
self.tokenizer.save_pretrained(save_directory)
if self.modelcard is not None:
self.modelcard.save_pretrained(save_directory)
def transform(self, X):
"""
Scikit / Keras interface to transformers' pipelines. This method will forward to __call__().
"""
return self(X=X)
def predict(self, X):
"""
Scikit / Keras interface to transformers' pipelines. This method will forward to __call__().
"""
return self(X=X)
@contextmanager
def device_placement(self):
"""
Context Manager allowing tensor allocation on the user-specified device in framework agnostic way.
Returns:
Context manager
Examples::
# Explicitly ask for tensor allocation on CUDA device :0
pipe = pipeline(..., device=0)
with pipe.device_placement():
# Every framework specific tensor allocation will be done on the request device
output = pipe(...)
"""
if self.framework == "tf":
with tf.device("/CPU:0" if self.device == -1 else "/device:GPU:{}".format(self.device)):
yield
else:
if self.device.type == "cuda":
torch.cuda.set_device(self.device)
yield
def ensure_tensor_on_device(self, **inputs):
"""
Ensure PyTorch tensors are on the specified device.
Args:
inputs (keyword arguments that should be :obj:`torch.Tensor`): The tensors to place on :obj:`self.device`.
Return:
:obj:`Dict[str, torch.Tensor]`: The same as :obj:`inputs` but on the proper device.
"""
return {name: tensor.to(self.device) for name, tensor in inputs.items()}
def check_model_type(self, supported_models: Union[List[str], dict]):
"""
Check if the model class is in supported by the pipeline.
Args:
supported_models (:obj:`List[str]` or :obj:`dict`):
The list of models supported by the pipeline, or a dictionary with model class values.
"""
if not isinstance(supported_models, list): # Create from a model mapping
supported_models = [item[1].__name__ for item in supported_models.items()]
if self.model.__class__.__name__ not in supported_models:
raise PipelineException(
self.task,
self.model.base_model_prefix,
f"The model '{self.model.__class__.__name__}' is not supported for {self.task}. Supported models are {supported_models}",
)
def _parse_and_tokenize(self, *args, padding=True, add_special_tokens=True, **kwargs):
"""
Parse arguments and tokenize
"""
# Parse arguments
inputs = self._args_parser(*args, **kwargs)
inputs = self.tokenizer(
inputs,
add_special_tokens=add_special_tokens,
return_tensors=self.framework,
padding=padding,
)
return inputs
def __call__(self, *args, **kwargs):
inputs = self._parse_and_tokenize(*args, **kwargs)
return self._forward(inputs)
def _forward(self, inputs, return_tensors=False):
"""
Internal framework specific forward dispatching.
Args:
inputs: dict holding all the keyworded arguments for required by the model forward method.
return_tensors: Whether to return native framework (pt/tf) tensors rather than numpy array.
Returns:
Numpy array
"""
# Encode for forward
with self.device_placement():
if self.framework == "tf":
# TODO trace model
predictions = self.model(inputs.data, training=False)[0]
else:
with torch.no_grad():
inputs = self.ensure_tensor_on_device(**inputs)
predictions = self.model(**inputs)[0].cpu()
if return_tensors:
return predictions
else:
return predictions.numpy()
# Can't use @add_end_docstrings(PIPELINE_INIT_ARGS) here because this one does not accept `binary_output`
class FeatureExtractionPipeline(Pipeline):
"""
Feature extraction pipeline using no model head. This pipeline extracts the hidden states from the base
transformer, which can be used as features in downstream tasks.
This feature extraction pipeline can currently be loaded from :func:`~transformers.pipeline` using the task
identifier: :obj:`"feature-extraction"`.
All models may be used for this pipeline. See a list of all models, including community-contributed models on
`huggingface.co/models <https://huggingface.co/models>`__.
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`):
The framework to use, either :obj:`"pt"` for PyTorch or :obj:`"tf"` for TensorFlow. The specified framework
must be installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to the framework of the :obj:`model`, or to PyTorch if no
model is provided.
task (:obj:`str`, defaults to :obj:`""`):
A task-identifier for the pipeline.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to -1):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model
on the associated CUDA device id.
"""
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
args_parser: ArgumentHandler = None,
device: int = -1,
task: str = "",
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
framework=framework,
args_parser=args_parser,
device=device,
binary_output=True,
task=task,
)
def __call__(self, *args, **kwargs):
"""
Extract the features of the input(s).
Args:
args (:obj:`str` or :obj:`List[str]`): One or several texts (or one list of texts) to get the features of.
Return:
A nested list of :obj:`float`: The features computed by the model.
"""
return super().__call__(*args, **kwargs).tolist()
@add_end_docstrings(PIPELINE_INIT_ARGS)
class TextGenerationPipeline(Pipeline):
"""
Language generation pipeline using any :obj:`ModelWithLMHead`. This pipeline predicts the words that will follow a
specified text prompt.
This language generation pipeline can currently be loaded from :func:`~transformers.pipeline` using the following
task identifier: :obj:`"text-generation"`.
The models that this pipeline can use are models that have been trained with an autoregressive language modeling
objective, which includes the uni-directional models in the library (e.g. gpt2).
See the list of available community models on
`huggingface.co/models <https://huggingface.co/models?search=&filter=lm-head>`__.
"""
# Prefix text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
# in https://github.com/rusiaaman/XLNet-gen#methodology
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
XL_PREFIX = """In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
remainder of the story. 1883 Western Siberia,
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
ALLOWED_MODELS = [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"ReformerModelWithLMHead",
"GPT2LMHeadModel",
"OpenAIGPTLMHeadModel",
"CTRLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
"TFGPT2LMHeadModel",
"TFOpenAIGPTLMHeadModel",
"TFCTRLLMHeadModel",
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.check_model_type(self.ALLOWED_MODELS)
# overriding _parse_and_tokenize to allow for unusual language-modeling tokenizer arguments
def _parse_and_tokenize(self, *args, padding=True, add_special_tokens=True, **kwargs):
"""
Parse arguments and tokenize
"""
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
tokenizer_kwargs = {"add_space_before_punct_symbol": True}
else:
tokenizer_kwargs = {}
inputs = self._args_parser(*args, **kwargs)
inputs = self.tokenizer(
inputs,
add_special_tokens=add_special_tokens,
return_tensors=self.framework,
padding=padding,
**tokenizer_kwargs,
)
return inputs
def __call__(
self,
*args,
return_tensors=False,
return_text=True,
clean_up_tokenization_spaces=False,
prefix=None,
**generate_kwargs
):
"""
Complete the prompt(s) given as inputs.
Args:
args (:obj:`str` or :obj:`List[str]`):
One or several prompts (or one list of prompts) to complete.
return_tensors (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to include the tensors of predictions (as token indinces) in the outputs.
return_text (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to include the decoded texts in the outputs.
clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to clean up the potential extra spaces in the text output.
prefix (:obj:`str`, `optional`):
Prefix added to prompt.
generate_kwargs:
Additional keyword arguments to pass along to the generate method of the model (see the generate
method corresponding to your framework `here <./model.html#generative-models>`__).
Return:
A list or a list of list of :obj:`dict`: Each result comes as a dictionary with the
following keys:
- **generated_text** (:obj:`str`, present when ``return_text=True``) -- The generated text.
- **generated_token_ids** (:obj:`torch.Tensor` or :obj:`tf.Tensor`, present when ``return_tensors=True``)
-- The token ids of the generated text.
"""
text_inputs = self._args_parser(*args)
results = []
for prompt_text in text_inputs:
# Manage correct placement of the tensors
with self.device_placement():
prefix = prefix if prefix is not None else self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
prefix = self.XL_PREFIX
if prefix:
prefix_inputs = self._parse_and_tokenize(prefix, padding=False, add_special_tokens=False)
# This impacts max_length and min_length argument that need adjusting.
prefix_length = prefix_inputs["input_ids"].shape[-1]
if generate_kwargs.get("max_length", None) is not None:
generate_kwargs["max_length"] += prefix_length
if generate_kwargs.get("min_length", None) is not None:
generate_kwargs["min_length"] += prefix_length
prefix = prefix or ""
inputs = self._parse_and_tokenize(prefix + prompt_text, padding=False, add_special_tokens=False)
# set input_ids to None to allow empty prompt
if inputs["input_ids"].shape[-1] == 0:
inputs["input_ids"] = None
inputs["attention_mask"] = None
if self.framework == "pt" and inputs["input_ids"] is not None:
inputs = self.ensure_tensor_on_device(**inputs)
input_ids = inputs["input_ids"]
# Ensure that batch size = 1 (batch generation not allowed for now)
assert (
input_ids is None or input_ids.shape[0] == 1
), "Batch generation is currently not supported. See https://github.com/huggingface/transformers/issues/3021 for more information."
output_sequences = self.model.generate(input_ids=input_ids, **generate_kwargs) # BS x SL
result = []
for generated_sequence in output_sequences:
if self.framework == "pt" and generated_sequence is not None:
generated_sequence = generated_sequence.cpu()
generated_sequence = generated_sequence.numpy().tolist()
record = {}
if return_tensors:
record["generated_token_ids"] = generated_sequence
if return_text:
# Decode text
text = self.tokenizer.decode(
generated_sequence,
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
prompt_length = 0
else:
prompt_length = len(
self.tokenizer.decode(
input_ids[0],
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
)
record["generated_text"] = prompt_text + text[prompt_length:]
result.append(record)
results += [result]
if len(results) == 1:
return results[0]
return results
@add_end_docstrings(
PIPELINE_INIT_ARGS,
r"""
return_all_scores (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to return all prediction scores or just the one of the predicted class.
""",
)
class TextClassificationPipeline(Pipeline):
"""
Text classification pipeline using any :obj:`ModelForSequenceClassification`. See the
`sequence classification examples <../task_summary.html#sequence-classification>`__ for more information.
This text classification pipeline can currently be loaded from :func:`~transformers.pipeline` using the following
task identifier: :obj:`"sentiment-analysis"` (for classifying sequences according to positive or negative
sentiments).
If multiple classification labels are available (:obj:`model.config.num_labels >= 2`), the pipeline will run
a softmax over the results. If there is a single label, the pipeline will run a sigmoid over the result.
The models that this pipeline can use are models that have been fine-tuned on a sequence classification task.
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=text-classification>`__.
"""
def __init__(self, return_all_scores: bool = False, **kwargs):
super().__init__(**kwargs)
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
)
self.return_all_scores = return_all_scores
def __call__(self, *args, **kwargs):
"""
Classify the text(s) given as inputs.
Args:
args (:obj:`str` or :obj:`List[str]`):
One or several texts (or one list of prompts) to classify.
Return:
A list or a list of list of :obj:`dict`: Each result comes as list of dictionaries with the
following keys:
- **label** (:obj:`str`) -- The label predicted.
- **score** (:obj:`float`) -- The corresponding probability.
If ``self.return_all_scores=True``, one such dictionary is returned per label.
"""
outputs = super().__call__(*args, **kwargs)
if self.model.config.num_labels == 1:
scores = 1.0 / (1.0 + np.exp(-outputs))
else:
scores = np.exp(outputs) / np.exp(outputs).sum(-1, keepdims=True)
if self.return_all_scores:
return [
[{"label": self.model.config.id2label[i], "score": score.item()} for i, score in enumerate(item)]
for item in scores
]
else:
return [
{"label": self.model.config.id2label[item.argmax()], "score": item.max().item()} for item in scores
]
class ZeroShotClassificationArgumentHandler(ArgumentHandler):
"""
Handles arguments for zero-shot for text classification by turning each possible label into an NLI
premise/hypothesis pair.
"""
def _parse_labels(self, labels):
if isinstance(labels, str):
labels = [label.strip() for label in labels.split(",")]
return labels
def __call__(self, sequences, labels, hypothesis_template):
if len(labels) == 0 or len(sequences) == 0:
raise ValueError("You must include at least one label and at least one sequence.")
if hypothesis_template.format(labels[0]) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(hypothesis_template)
)
if isinstance(sequences, str):
sequences = [sequences]
labels = self._parse_labels(labels)
sequence_pairs = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(label)] for label in labels])
return sequence_pairs
@add_end_docstrings(PIPELINE_INIT_ARGS)
class ZeroShotClassificationPipeline(Pipeline):
"""
NLI-based zero-shot classification pipeline using a :obj:`ModelForSequenceClassification` trained on NLI (natural
language inference) tasks.
Any combination of sequences and labels can be passed and each combination will be posed as a premise/hypothesis
pair and passed to the pretrained model. Then, the logit for `entailment` is taken as the logit for the
candidate label being valid. Any NLI model can be used as long as the first output logit corresponds to
`contradiction` and the last to `entailment`.
This NLI pipeline can currently be loaded from :func:`~transformers.pipeline` using the following
task identifier: :obj:`"zero-shot-classification"`.
The models that this pipeline can use are models that have been fine-tuned on an NLI task.
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?search=nli>`__.
"""
def __init__(self, args_parser=ZeroShotClassificationArgumentHandler(), *args, **kwargs):
super().__init__(*args, args_parser=args_parser, **kwargs)
def _parse_and_tokenize(self, *args, padding=True, add_special_tokens=True, **kwargs):
"""
Parse arguments and tokenize only_first so that hypothesis (label) is not truncated
"""
inputs = self._args_parser(*args, **kwargs)
inputs = self.tokenizer(
inputs,
add_special_tokens=add_special_tokens,
return_tensors=self.framework,
padding=padding,
truncation="only_first",
)
return inputs
def __call__(self, sequences, candidate_labels, hypothesis_template="This example is {}.", multi_class=False):
"""
Classify the sequence(s) given as inputs.
Args:
sequences (:obj:`str` or :obj:`List[str]`):
The sequence(s) to classify, will be truncated if the model input is too large.
candidate_labels (:obj:`str` or :obj:`List[str]`):
The set of possible class labels to classify each sequence into. Can be a single label, a string of
comma-separated labels, or a list of labels.
hypothesis_template (:obj:`str`, `optional`, defaults to :obj:`"This example is {}."`):
The template used to turn each label into an NLI-style hypothesis. This template must include a {}
or similar syntax for the candidate label to be inserted into the template. For example, the default
template is :obj:`"This example is {}."` With the candidate label :obj:`"sports"`, this would be fed
into the model like :obj:`"<cls> sequence to classify <sep> This example is sports . <sep>"`. The
default template works well in many cases, but it may be worthwhile to experiment with different
templates depending on the task setting.
multi_class (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not multiple candidate labels can be true. If :obj:`False`, the scores are normalized
such that the sum of the label likelihoods for each sequence is 1. If :obj:`True`, the labels are
considered independent and probabilities are normalized for each candidate by doing a softmax of
the entailment score vs. the contradiction score.
Return:
A :obj:`dict` or a list of :obj:`dict`: Each result comes as a dictionary with the
following keys:
- **sequence** (:obj:`str`) -- The sequence for which this is the output.
- **labels** (:obj:`List[str]`) -- The labels sorted by order of likelihood.
- **scores** (:obj:`List[float]`) -- The probabilities for each of the labels.
"""
outputs = super().__call__(sequences, candidate_labels, hypothesis_template)
num_sequences = 1 if isinstance(sequences, str) else len(sequences)
candidate_labels = self._args_parser._parse_labels(candidate_labels)
reshaped_outputs = outputs.reshape((num_sequences, len(candidate_labels), -1))
if len(candidate_labels) == 1:
multi_class = True
if not multi_class:
# softmax the "entailment" logits over all candidate labels
entail_logits = reshaped_outputs[..., -1]
scores = np.exp(entail_logits) / np.exp(entail_logits).sum(-1, keepdims=True)
else:
# softmax over the entailment vs. contradiction dim for each label independently
entail_contr_logits = reshaped_outputs[..., [0, -1]]
scores = np.exp(entail_contr_logits) / np.exp(entail_contr_logits).sum(-1, keepdims=True)
scores = scores[..., 1]
result = []
for iseq in range(num_sequences):
top_inds = list(reversed(scores[iseq].argsort()))
result.append(
{
"sequence": sequences if isinstance(sequences, str) else sequences[iseq],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[iseq][top_inds].tolist(),
}
)
if len(result) == 1:
return result[0]
return result
@add_end_docstrings(
PIPELINE_INIT_ARGS,
r"""
topk (:obj:`int`, defaults to 5): The number of predictions to return.
""",
)
class FillMaskPipeline(Pipeline):
"""
Masked language modeling prediction pipeline using any :obj:`ModelWithLMHead`. See the
`masked language modeling examples <../task_summary.html#masked-language-modeling>`__ for more information.
This mask filling pipeline can currently be loaded from :func:`~transformers.pipeline` using the following
task identifier: :obj:`"fill-mask"`.
The models that this pipeline can use are models that have been trained with a masked language modeling objective,
which includes the bi-directional models in the library.
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=lm-head>`__.
.. note::
This pipeline only works for inputs with exactly one token masked.
"""
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
args_parser: ArgumentHandler = None,
device: int = -1,
topk=5,
task: str = "",
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
framework=framework,
args_parser=args_parser,
device=device,
binary_output=True,
task=task,
)
self.check_model_type(TF_MODEL_WITH_LM_HEAD_MAPPING if self.framework == "tf" else MODEL_FOR_MASKED_LM_MAPPING)
self.topk = topk
def ensure_exactly_one_mask_token(self, masked_index: np.ndarray):
numel = np.prod(masked_index.shape)
if numel > 1:
raise PipelineException(
"fill-mask",
self.model.base_model_prefix,
f"More than one mask_token ({self.tokenizer.mask_token}) is not supported",
)
elif numel < 1:
raise PipelineException(
"fill-mask",
self.model.base_model_prefix,
f"No mask_token ({self.tokenizer.mask_token}) found on the input",
)
def __call__(self, *args, targets=None, **kwargs):
"""
Fill the masked token in the text(s) given as inputs.
Args:
args (:obj:`str` or :obj:`List[str]`):
One or several texts (or one list of prompts) with masked tokens.
targets (:obj:`str` or :obj:`List[str]`, `optional`):
When passed, the model will return the scores for the passed token or tokens rather than the top k
predictions in the entire vocabulary. If the provided targets are not in the model vocab, they will
be tokenized and the first resulting token will be used (with a warning).
Return:
A list or a list of list of :obj:`dict`: Each result comes as list of dictionaries with the
following keys:
- **sequence** (:obj:`str`) -- The corresponding input with the mask token prediction.
- **score** (:obj:`float`) -- The corresponding probability.
- **token** (:obj:`int`) -- The predicted token id (to replace the masked one).
- **token** (:obj:`str`) -- The predicted token (to replace the masked one).
"""
inputs = self._parse_and_tokenize(*args, **kwargs)
outputs = self._forward(inputs, return_tensors=True)
results = []
batch_size = outputs.shape[0] if self.framework == "tf" else outputs.size(0)
if targets is not None:
if len(targets) == 0 or len(targets[0]) == 0:
raise ValueError("At least one target must be provided when passed.")
if isinstance(targets, str):
targets = [targets]
targets_proc = []
for target in targets:
target_enc = self.tokenizer.tokenize(target)
if len(target_enc) > 1 or target_enc[0] == self.tokenizer.unk_token:
logger.warning(
"The specified target token `{}` does not exist in the model vocabulary. Replacing with `{}`.".format(
target, target_enc[0]
)
)
targets_proc.append(target_enc[0])
target_inds = np.array(self.tokenizer.convert_tokens_to_ids(targets_proc))
for i in range(batch_size):
input_ids = inputs["input_ids"][i]
result = []
if self.framework == "tf":
masked_index = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()
# Fill mask pipeline supports only one ${mask_token} per sample
self.ensure_exactly_one_mask_token(masked_index)
logits = outputs[i, masked_index.item(), :]
probs = tf.nn.softmax(logits)
if targets is None:
topk = tf.math.top_k(probs, k=self.topk)
values, predictions = topk.values.numpy(), topk.indices.numpy()
else:
values = tf.gather_nd(probs, tf.reshape(target_inds, (-1, 1)))
sort_inds = tf.reverse(tf.argsort(values), [0])
values = tf.gather_nd(values, tf.reshape(sort_inds, (-1, 1))).numpy()
predictions = target_inds[sort_inds.numpy()]
else:
masked_index = torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=False)
# Fill mask pipeline supports only one ${mask_token} per sample
self.ensure_exactly_one_mask_token(masked_index.numpy())
logits = outputs[i, masked_index.item(), :]
probs = logits.softmax(dim=0)
if targets is None:
values, predictions = probs.topk(self.topk)
else:
values = probs[..., target_inds]
sort_inds = list(reversed(values.argsort(dim=-1)))
values = values[..., sort_inds]
predictions = target_inds[sort_inds]
for v, p in zip(values.tolist(), predictions.tolist()):
tokens = input_ids.numpy()
tokens[masked_index] = p
# Filter padding out:
tokens = tokens[np.where(tokens != self.tokenizer.pad_token_id)]
result.append(
{
"sequence": self.tokenizer.decode(tokens),
"score": v,
"token": p,
"token_str": self.tokenizer.convert_ids_to_tokens(p),
}
)
# Append
results += [result]
if len(results) == 1:
return results[0]
return results
@add_end_docstrings(
PIPELINE_INIT_ARGS,
r"""
ignore_labels (:obj:`List[str]`, defaults to :obj:`["O"]`):
A list of labels to ignore.
grouped_entities (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to group the tokens corresponding to the same entity together in the predictions or not.
""",
)
class TokenClassificationPipeline(Pipeline):
"""
Named Entity Recognition pipeline using any :obj:`ModelForTokenClassification`. See the
`named entity recognition examples <../task_summary.html#named-entity-recognition>`__ for more information.
This token recognition pipeline can currently be loaded from :func:`~transformers.pipeline` using the following
task identifier: :obj:`"ner"` (for predicting the classes of tokens in a sequence: person, organisation, location
or miscellaneous).
The models that this pipeline can use are models that have been fine-tuned on a token classification task.
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=token-classification>`__.
"""
default_input_names = "sequences"
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
args_parser: ArgumentHandler = None,
device: int = -1,
binary_output: bool = False,
ignore_labels=["O"],
task: str = "",
grouped_entities: bool = False,
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
framework=framework,
args_parser=args_parser,
device=device,
binary_output=binary_output,
task=task,
)
self.check_model_type(
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
)
self._basic_tokenizer = BasicTokenizer(do_lower_case=False)
self.ignore_labels = ignore_labels
self.grouped_entities = grouped_entities
def __call__(self, *args, **kwargs):
"""
Classify each token of the text(s) given as inputs.
Args:
args (:obj:`str` or :obj:`List[str]`):
One or several texts (or one list of texts) for token classification.
Return:
A list or a list of list of :obj:`dict`: Each result comes as a list of dictionaries (one for each token in
the corresponding input, or each entity if this pipeline was instantiated with
:obj:`grouped_entities=True`) with the following keys:
- **word** (:obj:`str`) -- The token/word classified.
- **score** (:obj:`float`) -- The corresponding probability for :obj:`entity`.
- **entity** (:obj:`str`) -- The entity predicted for that token/word.
- **index** (:obj:`int`, only present when ``self.grouped_entities=False``) -- The index of the
corresponding token in the sentence.
"""
inputs = self._args_parser(*args, **kwargs)
answers = []
for sentence in inputs:
# Manage correct placement of the tensors
with self.device_placement():
tokens = self.tokenizer(
sentence,
return_attention_mask=False,
return_tensors=self.framework,
truncation=True,
)
# Forward
if self.framework == "tf":
entities = self.model(tokens.data)[0][0].numpy()
input_ids = tokens["input_ids"].numpy()[0]
else:
with torch.no_grad():
tokens = self.ensure_tensor_on_device(**tokens)
entities = self.model(**tokens)[0][0].cpu().numpy()
input_ids = tokens["input_ids"].cpu().numpy()[0]
score = np.exp(entities) / np.exp(entities).sum(-1, keepdims=True)
labels_idx = score.argmax(axis=-1)
entities = []
# Filter to labels not in `self.ignore_labels`
filtered_labels_idx = [
(idx, label_idx)
for idx, label_idx in enumerate(labels_idx)
if self.model.config.id2label[label_idx] not in self.ignore_labels
]
for idx, label_idx in filtered_labels_idx:
entity = {
"word": self.tokenizer.convert_ids_to_tokens(int(input_ids[idx])),
"score": score[idx][label_idx].item(),
"entity": self.model.config.id2label[label_idx],
"index": idx,
}
entities += [entity]
# Append grouped entities
if self.grouped_entities:
answers += [self.group_entities(entities)]
# Append ungrouped entities
else:
answers += [entities]
if len(answers) == 1:
return answers[0]
return answers
def group_sub_entities(self, entities: List[dict]) -> dict:
"""
Group together the adjacent tokens with the same entity predicted.
Args:
entities (:obj:`dict`): The entities predicted by the pipeline.
"""
# Get the first entity in the entity group
entity = entities[0]["entity"]
scores = np.mean([entity["score"] for entity in entities])
tokens = [entity["word"] for entity in entities]
entity_group = {
"entity_group": entity,
"score": np.mean(scores),
"word": self.tokenizer.convert_tokens_to_string(tokens),
}
return entity_group
def group_entities(self, entities: List[dict]) -> List[dict]:
"""
Find and group together the adjacent tokens with the same entity predicted.
Args:
entities (:obj:`dict`): The entities predicted by the pipeline.
"""
entity_groups = []
entity_group_disagg = []
if entities:
last_idx = entities[-1]["index"]
for entity in entities:
is_last_idx = entity["index"] == last_idx
if not entity_group_disagg:
entity_group_disagg += [entity]
if is_last_idx:
entity_groups += [self.group_sub_entities(entity_group_disagg)]
continue
# If the current entity is similar and adjacent to the previous entity, append it to the disaggregated entity group
# The split is meant to account for the "B" and "I" suffixes
if (
entity["entity"].split("-")[-1] == entity_group_disagg[-1]["entity"].split("-")[-1]
and entity["index"] == entity_group_disagg[-1]["index"] + 1
):
entity_group_disagg += [entity]
# Group the entities at the last entity
if is_last_idx:
entity_groups += [self.group_sub_entities(entity_group_disagg)]
# If the current entity is different from the previous entity, aggregate the disaggregated entity group
else:
entity_groups += [self.group_sub_entities(entity_group_disagg)]
entity_group_disagg = [entity]
# If it's the last entity, add it to the entity groups
if is_last_idx:
entity_groups += [self.group_sub_entities(entity_group_disagg)]
return entity_groups
NerPipeline = TokenClassificationPipeline
class QuestionAnsweringArgumentHandler(ArgumentHandler):
"""
QuestionAnsweringPipeline requires the user to provide multiple arguments (i.e. question & context) to be mapped
to internal :class:`~transformers.SquadExample`.
QuestionAnsweringArgumentHandler manages all the possible to create a :class:`~transformers.SquadExample` from
the command-line supplied arguments.
"""
def __call__(self, *args, **kwargs):
# Position args, handling is sensibly the same as X and data, so forwarding to avoid duplicating
if args is not None and len(args) > 0:
if len(args) == 1:
kwargs["X"] = args[0]
else:
kwargs["X"] = list(args)
# Generic compatibility with sklearn and Keras
# Batched data
if "X" in kwargs or "data" in kwargs:
inputs = kwargs["X"] if "X" in kwargs else kwargs["data"]
if isinstance(inputs, dict):
inputs = [inputs]
else:
# Copy to avoid overriding arguments
inputs = [i for i in inputs]
for i, item in enumerate(inputs):
if isinstance(item, dict):
if any(k not in item for k in ["question", "context"]):
raise KeyError("You need to provide a dictionary with keys {question:..., context:...}")
inputs[i] = QuestionAnsweringPipeline.create_sample(**item)
elif not isinstance(item, SquadExample):
raise ValueError(
"{} argument needs to be of type (list[SquadExample | dict], SquadExample, dict)".format(
"X" if "X" in kwargs else "data"
)
)
# Tabular input
elif "question" in kwargs and "context" in kwargs:
if isinstance(kwargs["question"], str):
kwargs["question"] = [kwargs["question"]]
if isinstance(kwargs["context"], str):
kwargs["context"] = [kwargs["context"]]
inputs = [
QuestionAnsweringPipeline.create_sample(q, c) for q, c in zip(kwargs["question"], kwargs["context"])
]
else:
raise ValueError("Unknown arguments {}".format(kwargs))
if not isinstance(inputs, list):
inputs = [inputs]
return inputs
@add_end_docstrings(PIPELINE_INIT_ARGS)
class QuestionAnsweringPipeline(Pipeline):
"""
Question Answering pipeline using any :obj:`ModelForQuestionAnswering`. See the
`question answering examples <../task_summary.html#question-answering>`__ for more information.
This question answering pipeline can currently be loaded from :func:`~transformers.pipeline` using the following
task identifier: :obj:`"question-answering"`.
The models that this pipeline can use are models that have been fine-tuned on a question answering task.
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=question-answering>`__.
"""
default_input_names = "question,context"
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
device: int = -1,
task: str = "",
**kwargs
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
framework=framework,
args_parser=QuestionAnsweringArgumentHandler(),
device=device,
task=task,
**kwargs,
)
self.check_model_type(
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING if self.framework == "tf" else MODEL_FOR_QUESTION_ANSWERING_MAPPING
)
@staticmethod
def create_sample(
question: Union[str, List[str]], context: Union[str, List[str]]
) -> Union[SquadExample, List[SquadExample]]:
"""
QuestionAnsweringPipeline leverages the :class:`~transformers.SquadExample` internally.
This helper method encapsulate all the logic for converting question(s) and context(s) to
:class:`~transformers.SquadExample`.
We currently support extractive question answering.
Arguments:
question (:obj:`str` or :obj:`List[str]`): The question(s) asked.
context (:obj:`str` or :obj:`List[str]`): The context(s) in which we will look for the answer.
Returns:
One or a list of :class:`~transformers.SquadExample`: The corresponding
:class:`~transformers.SquadExample` grouping question and context.
"""
if isinstance(question, list):
return [SquadExample(None, q, c, None, None, None) for q, c in zip(question, context)]
else:
return SquadExample(None, question, context, None, None, None)
def __call__(self, *args, **kwargs):
"""
Answer the question(s) given as inputs by using the context(s).
Args:
args (:class:`~transformers.SquadExample` or a list of :class:`~transformers.SquadExample`):
One or several :class:`~transformers.SquadExample` containing the question and context.
X (:class:`~transformers.SquadExample` or a list of :class:`~transformers.SquadExample`, `optional`):
One or several :class:`~transformers.SquadExample` containing the question and context
(will be treated the same way as if passed as the first positional argument).
data (:class:`~transformers.SquadExample` or a list of :class:`~transformers.SquadExample`, `optional`):
One or several :class:`~transformers.SquadExample` containing the question and context
(will be treated the same way as if passed as the first positional argument).
question (:obj:`str` or :obj:`List[str]`):
One or several question(s) (must be used in conjunction with the :obj:`context` argument).
context (:obj:`str` or :obj:`List[str]`):
One or several context(s) associated with the qustion(s) (must be used in conjunction with the
:obj:`question` argument).
topk (:obj:`int`, `optional`, defaults to 1):
The number of answers to return (will be chosen by order of likelihood).
doc_stride (:obj:`int`, `optional`, defaults to 128):
If the context is too long to fit with the question for the model, it will be split in several chunks
with some overlap. This argument controls the size of that overlap.
max_answer_len (:obj:`int`, `optional`, defaults to 15):
The maximum length of predicted answers (e.g., only answers with a shorter length are considered).
max_seq_len (:obj:`int`, `optional`, defaults to 384):
The maximum length of the total sentence (context + question) after tokenization. The context will be
split in several chunks (using :obj:`doc_stride`) if needed.
max_question_len (:obj:`int`, `optional`, defaults to 64):
The maximum length of the question after tokenization. It will be truncated if needed.
handle_impossible_answer (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not we accept impossible as an answer.
Return:
A :obj:`dict` or a list of :obj:`dict`: Each result comes as a dictionary with the
following keys:
- **score** (:obj:`float`) -- The probability associated to the answer.
- **start** (:obj:`int`) -- The start index of the answer (in the tokenized version of the input).
- **end** (:obj:`int`) -- The end index of the answer (in the tokenized version of the input).
- **answer** (:obj:`str`) -- The answer to the question.
"""
# Set defaults values
kwargs.setdefault("topk", 1)
kwargs.setdefault("doc_stride", 128)
kwargs.setdefault("max_answer_len", 15)
kwargs.setdefault("max_seq_len", 384)
kwargs.setdefault("max_question_len", 64)
kwargs.setdefault("handle_impossible_answer", False)
if kwargs["topk"] < 1:
raise ValueError("topk parameter should be >= 1 (got {})".format(kwargs["topk"]))
if kwargs["max_answer_len"] < 1:
raise ValueError("max_answer_len parameter should be >= 1 (got {})".format(kwargs["max_answer_len"]))
# Convert inputs to features
examples = self._args_parser(*args, **kwargs)
features_list = [
squad_convert_examples_to_features(
examples=[example],
tokenizer=self.tokenizer,
max_seq_length=kwargs["max_seq_len"],
doc_stride=kwargs["doc_stride"],
max_query_length=kwargs["max_question_len"],
padding_strategy=PaddingStrategy.MAX_LENGTH.value,
is_training=False,
tqdm_enabled=False,
)
for example in examples
]
all_answers = []
for features, example in zip(features_list, examples):
model_input_names = self.tokenizer.model_input_names + ["input_ids"]
fw_args = {k: [feature.__dict__[k] for feature in features] for k in model_input_names}
# Manage tensor allocation on correct device
with self.device_placement():
if self.framework == "tf":
fw_args = {k: tf.constant(v) for (k, v) in fw_args.items()}
start, end = self.model(fw_args)[:2]
start, end = start.numpy(), end.numpy()
else:
with torch.no_grad():
# Retrieve the score for the context tokens only (removing question tokens)
fw_args = {k: torch.tensor(v, device=self.device) for (k, v) in fw_args.items()}
start, end = self.model(**fw_args)[:2]
start, end = start.cpu().numpy(), end.cpu().numpy()
min_null_score = 1000000 # large and positive
answers = []
for (feature, start_, end_) in zip(features, start, end):
# Ensure padded tokens & question tokens cannot belong to the set of candidate answers.
undesired_tokens = np.abs(np.array(feature.p_mask) - 1) & feature.attention_mask
# Generate mask
undesired_tokens_mask = undesired_tokens == 0.0
# Make sure non-context indexes in the tensor cannot contribute to the softmax
start_ = np.where(undesired_tokens_mask, -10000.0, start_)
end_ = np.where(undesired_tokens_mask, -10000.0, end_)
# Normalize logits and spans to retrieve the answer
start_ = np.exp(start_ - np.log(np.sum(np.exp(start_), axis=-1, keepdims=True)))
end_ = np.exp(end_ - np.log(np.sum(np.exp(end_), axis=-1, keepdims=True)))
if kwargs["handle_impossible_answer"]:
min_null_score = min(min_null_score, (start_[0] * end_[0]).item())
# Mask CLS
start_[0] = end_[0] = 0.0
starts, ends, scores = self.decode(start_, end_, kwargs["topk"], kwargs["max_answer_len"])
char_to_word = np.array(example.char_to_word_offset)
# Convert the answer (tokens) back to the original text
answers += [
{
"score": score.item(),
"start": np.where(char_to_word == feature.token_to_orig_map[s])[0][0].item(),
"end": np.where(char_to_word == feature.token_to_orig_map[e])[0][-1].item(),
"answer": " ".join(
example.doc_tokens[feature.token_to_orig_map[s] : feature.token_to_orig_map[e] + 1]
),
}
for s, e, score in zip(starts, ends, scores)
]
if kwargs["handle_impossible_answer"]:
answers.append({"score": min_null_score, "start": 0, "end": 0, "answer": ""})
answers = sorted(answers, key=lambda x: x["score"], reverse=True)[: kwargs["topk"]]
all_answers += answers
if len(all_answers) == 1:
return all_answers[0]
return all_answers
def decode(self, start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: int) -> Tuple:
"""
Take the output of any :obj:`ModelForQuestionAnswering` and will generate probabilities for each span to be
the actual answer.
In addition, it filters out some unwanted/impossible cases like answer len being greater than
max_answer_len or answer end position being before the starting position.
The method supports output the k-best answer through the topk argument.
Args:
start (:obj:`np.ndarray`): Individual start probabilities for each token.
end (:obj:`np.ndarray`): Individual end probabilities for each token.
topk (:obj:`int`): Indicates how many possible answer span(s) to extract from the model output.
max_answer_len (:obj:`int`): Maximum size of the answer to extract from the model's output.
"""
# Ensure we have batch axis
if start.ndim == 1:
start = start[None]
if end.ndim == 1:
end = end[None]
# Compute the score of each tuple(start, end) to be the real answer
outer = np.matmul(np.expand_dims(start, -1), np.expand_dims(end, 1))
# Remove candidate with end < start and end - start > max_answer_len
candidates = np.tril(np.triu(outer), max_answer_len - 1)
# Inspired by Chen & al. (https://github.com/facebookresearch/DrQA)
scores_flat = candidates.flatten()
if topk == 1:
idx_sort = [np.argmax(scores_flat)]
elif len(scores_flat) < topk:
idx_sort = np.argsort(-scores_flat)
else:
idx = np.argpartition(-scores_flat, topk)[0:topk]
idx_sort = idx[np.argsort(-scores_flat[idx])]
start, end = np.unravel_index(idx_sort, candidates.shape)[1:]
return start, end, candidates[0, start, end]
def span_to_answer(self, text: str, start: int, end: int) -> Dict[str, Union[str, int]]:
"""
When decoding from token probabilities, this method maps token indexes to actual word in
the initial context.
Args:
text (:obj:`str`): The actual context to extract the answer from.
start (:obj:`int`): The answer starting token index.
end (:obj:`int`): The answer end token index.
Returns:
Dictionary like :obj:`{'answer': str, 'start': int, 'end': int}`
"""
words = []
token_idx = char_start_idx = char_end_idx = chars_idx = 0
for i, word in enumerate(text.split(" ")):
token = self.tokenizer.tokenize(word)
# Append words if they are in the span
if start <= token_idx <= end:
if token_idx == start:
char_start_idx = chars_idx
if token_idx == end:
char_end_idx = chars_idx + len(word)
words += [word]
# Stop if we went over the end of the answer
if token_idx > end:
break
# Append the subtokenization length to the running index
token_idx += len(token)
chars_idx += len(word) + 1
# Join text with spaces
return {
"answer": " ".join(words),
"start": max(0, char_start_idx),
"end": min(len(text), char_end_idx),
}
@add_end_docstrings(PIPELINE_INIT_ARGS)
class SummarizationPipeline(Pipeline):
"""
Summarize news articles and other documents.
This summarizing pipeline can currently be loaded from :func:`~transformers.pipeline` using the following
task identifier: :obj:`"summarization"`.
The models that this pipeline can use are models that have been fine-tuned on a summarization task,
which is currently, '`bart-large-cnn`', '`t5-small`', '`t5-base`', '`t5-large`', '`t5-3b`', '`t5-11b`'.
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=summarization>`__.
Usage::
# use bart in pytorch
summarizer = pipeline("summarization")
summarizer("Sam Shleifer writes the best docstring examples in the whole world.", min_length=5, max_length=20)
# use t5 in tf
summarizer = pipeline("summarization", model="t5-base", tokenizer="t5-base", framework="tf")
summarizer("Sam Shleifer writes the best docstring examples in the whole world.", min_length=5, max_length=20)
"""
def __init__(self, *args, **kwargs):
kwargs.update(task="summarization")
super().__init__(*args, **kwargs)
self.check_model_type(
TF_MODEL_WITH_LM_HEAD_MAPPING if self.framework == "tf" else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
)
def __call__(
self, *documents, return_tensors=False, return_text=True, clean_up_tokenization_spaces=False, **generate_kwargs
):
r"""
Summarize the text(s) given as inputs.
Args:
documents (`str` or :obj:`List[str]`):
One or several articles (or one list of articles) to summarize.
return_text (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to include the decoded texts in the outputs
return_tensors (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to include the tensors of predictions (as token indinces) in the outputs.
clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to clean up the potential extra spaces in the text output.
generate_kwargs:
Additional keyword arguments to pass along to the generate method of the model (see the generate
method corresponding to your framework `here <./model.html#generative-models>`__).
Return:
A list or a list of list of :obj:`dict`: Each result comes as a dictionary with the
following keys:
- **summary_text** (:obj:`str`, present when ``return_text=True``) -- The summary of the corresponding
input.
- **summary_token_ids** (:obj:`torch.Tensor` or :obj:`tf.Tensor`, present when ``return_tensors=True``)
-- The token ids of the summary.
"""
assert return_tensors or return_text, "You must specify return_tensors=True or return_text=True"
assert len(documents) > 0, "Please provide a document to summarize"
if self.framework == "tf" and "BartForConditionalGeneration" in self.model.__class__.__name__:
raise NotImplementedError(
"Tensorflow is not yet supported for Bart. Please consider using T5, e.g. `t5-base`"
)
prefix = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(documents[0], list):
assert (
self.tokenizer.pad_token_id is not None
), "Please make sure that the tokenizer has a pad_token_id when using a batch input"
documents = ([prefix + document for document in documents[0]],)
padding = True
elif isinstance(documents[0], str):
documents = (prefix + documents[0],)
padding = False
else:
raise ValueError(
" `documents[0]`: {} have the wrong format. The should be either of type `str` or type `list`".format(
documents[0]
)
)
with self.device_placement():
inputs = self._parse_and_tokenize(*documents, padding=padding)
if self.framework == "pt":
inputs = self.ensure_tensor_on_device(**inputs)
input_length = inputs["input_ids"].shape[-1]
elif self.framework == "tf":
input_length = tf.shape(inputs["input_ids"])[-1].numpy()
min_length = generate_kwargs.get("min_length", self.model.config.min_length)
if input_length < min_length // 2:
logger.warning(
"Your min_length is set to {}, but you input_length is only {}. You might consider decreasing min_length manually, e.g. summarizer('...', min_length=10)".format(
min_length, input_length
)
)
max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if input_length < max_length:
logger.warning(
"Your max_length is set to {}, but you input_length is only {}. You might consider decreasing max_length manually, e.g. summarizer('...', max_length=50)".format(
max_length, input_length
)
)
summaries = self.model.generate(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
**generate_kwargs,
)
results = []
for summary in summaries:
record = {}
if return_tensors:
record["summary_token_ids"] = summary
if return_text:
record["summary_text"] = self.tokenizer.decode(
summary,
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
results.append(record)
return results
@add_end_docstrings(PIPELINE_INIT_ARGS)
class TranslationPipeline(Pipeline):
"""
Translates from one language to another.
This translation pipeline can currently be loaded from :func:`~transformers.pipeline` using the following
task identifier: :obj:`"translation_xx_to_yy"`.
The models that this pipeline can use are models that have been fine-tuned on a translation task.
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=translation>`__.
Usage::
en_fr_translator = pipeline("translation_en_to_fr")
en_fr_translator("How old are you?")
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.check_model_type(
TF_MODEL_WITH_LM_HEAD_MAPPING if self.framework == "tf" else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
)
def __call__(
self, *args, return_tensors=False, return_text=True, clean_up_tokenization_spaces=False, **generate_kwargs
):
r"""
Translate the text(s) given as inputs.
Args:
args (:obj:`str` or :obj:`List[str]`):
Texts to be translated.
return_tensors (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to include the tensors of predictions (as token indinces) in the outputs.
return_text (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to include the decoded texts in the outputs.
clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to clean up the potential extra spaces in the text output.
generate_kwargs:
Additional keyword arguments to pass along to the generate method of the model (see the generate
method corresponding to your framework `here <./model.html#generative-models>`__).
Return:
A list or a list of list of :obj:`dict`: Each result comes as a dictionary with the
following keys:
- **translation_text** (:obj:`str`, present when ``return_text=True``) -- The translation.
- **translation_token_ids** (:obj:`torch.Tensor` or :obj:`tf.Tensor`, present when ``return_tensors=True``)
-- The token ids of the translation.
"""
assert return_tensors or return_text, "You must specify return_tensors=True or return_text=True"
prefix = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0], list):
assert (
self.tokenizer.pad_token_id is not None
), "Please make sure that the tokenizer has a pad_token_id when using a batch input"
args = ([prefix + text for text in args[0]],)
padding = True
elif isinstance(args[0], str):
args = (prefix + args[0],)
padding = False
else:
raise ValueError(
" `documents[0]`: {} have the wrong format. The should be either of type `str` or type `list`".format(
args[0]
)
)
with self.device_placement():
inputs = self._parse_and_tokenize(*args, padding=padding)
if self.framework == "pt":
inputs = self.ensure_tensor_on_device(**inputs)
input_length = inputs["input_ids"].shape[-1]
elif self.framework == "tf":
input_length = tf.shape(inputs["input_ids"])[-1].numpy()
max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if input_length > 0.9 * max_length:
logger.warning(
"Your input_length: {} is bigger than 0.9 * max_length: {}. You might consider increasing your max_length manually, e.g. translator('...', max_length=400)".format(
input_length, max_length
)
)
translations = self.model.generate(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
**generate_kwargs,
)
results = []
for translation in translations:
record = {}
if return_tensors:
record["translation_token_ids"] = translation
if return_text:
record["translation_text"] = self.tokenizer.decode(
translation,
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
results.append(record)
return results
@add_end_docstrings(PIPELINE_INIT_ARGS)
class Text2TextGenerationPipeline(Pipeline):
"""
Pipeline for text to text generation using seq2seq models.
This Text2TextGenerationPipeline pipeline can currently be loaded from :func:`~transformers.pipeline` using the following
task identifier: :obj:`"text2text-generation"`.
The models that this pipeline can use are models that have been fine-tuned on a translation task.
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=seq2seq>`__.
Usage::
text2text_generator = pipeline("text2text-generation")
text2text_generator("question: What is 42 ? context: 42 is the answer to life, the universe and everything")
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
)
def __call__(
self, *args, return_tensors=False, return_text=True, clean_up_tokenization_spaces=False, **generate_kwargs
):
r"""
Generate the output text(s) using text(s) given as inputs.
Args:
args (:obj:`str` or :obj:`List[str]`):
Input text for the encoder.
return_tensors (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to include the tensors of predictions (as token indinces) in the outputs.
return_text (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to include the decoded texts in the outputs.
clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to clean up the potential extra spaces in the text output.
generate_kwargs:
Additional keyword arguments to pass along to the generate method of the model (see the generate
method corresponding to your framework `here <./model.html#generative-models>`__).
Return:
A list or a list of list of :obj:`dict`: Each result comes as a dictionary with the
following keys:
- **generated_text** (:obj:`str`, present when ``return_text=True``) -- The generated text.
- **generated_token_ids** (:obj:`torch.Tensor` or :obj:`tf.Tensor`, present when ``return_tensors=True``)
-- The token ids of the generated text.
"""
assert return_tensors or return_text, "You must specify return_tensors=True or return_text=True"
if isinstance(args[0], list):
assert (
self.tokenizer.pad_token_id is not None
), "Please make sure that the tokenizer has a pad_token_id when using a batch input"
padding = True
elif isinstance(args[0], str):
padding = False
else:
raise ValueError(
" `documents[0]`: {} have the wrong format. The should be either of type `str` or type `list`".format(
args[0]
)
)
with self.device_placement():
inputs = self._parse_and_tokenize(*args, padding=padding)
if self.framework == "pt":
inputs = self.ensure_tensor_on_device(**inputs)
generations = self.model.generate(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
**generate_kwargs,
)
results = []
for generation in generations:
record = {}
if return_tensors:
record["generated_token_ids"] = generation
if return_text:
record["generated_text"] = self.tokenizer.decode(
generation,
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
results.append(record)
return results
class Conversation:
"""
Utility class containing a conversation and its history. This class is meant to be used as an input to the
:class:`~transformers.ConversationalPipeline`. The conversation contains a number of utility function to manage the
addition of new user input and generated model responses. A conversation needs to contain an unprocessed user input
before being passed to the :class:`~transformers.ConversationalPipeline`. This user input is either created when
the class is instantiated, or by calling :obj:`conversional_pipeline.append_response("input")` after a conversation
turn.
Arguments:
text (:obj:`str`, `optional`):
The initial user input to start the conversation. If not provided, a user input needs to be provided
manually using the :meth:`~transformers.Conversation.add_user_input` method before the conversation can
begin.
conversation_id (:obj:`uuid.UUID`, `optional`):
Unique identifier for the conversation. If not provided, a random UUID4 id will be assigned to the
conversation.
Usage::
conversation = Conversation("Going to the movies tonight - any suggestions?")
# Steps usually performed by the model when generating a response:
# 1. Mark the user input as processed (moved to the history)
conversation.mark_processed()
# 2. Append a mode response
conversation.append_response("The Big lebowski.")
conversation.add_user_input("Is it good?")
"""
def __init__(self, text: str = None, conversation_id: UUID = None):
if not conversation_id:
conversation_id = uuid.uuid4()
self.uuid: UUID = conversation_id
self.past_user_inputs: List[str] = []
self.generated_responses: List[str] = []
self.history: List[int] = []
self.new_user_input: Optional[str] = text
def add_user_input(self, text: str, overwrite: bool = False):
"""
Add a user input to the conversation for the next round. This populates the internal :obj:`new_user_input`
field.
Args:
text (:obj:`str`): The user input for the next conversation round.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not existing and unprocessed user input should be overwritten when this function is called.
"""
if self.new_user_input:
if overwrite:
logger.warning(
'User input added while unprocessed input was existing: "{}" was overwritten with: "{}".'.format(
self.new_user_input, text
)
)
self.new_user_input = text
else:
logger.warning(
'User input added while unprocessed input was existing: "{}" new input ignored: "{}". '
"Set `overwrite` to True to overwrite unprocessed user input".format(self.new_user_input, text)
)
else:
self.new_user_input = text
def mark_processed(self):
"""
Mark the conversation as processed (moves the content of :obj:`new_user_input` to :obj:`past_user_inputs`) and
empties the :obj:`new_user_input` field.
"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
self.new_user_input = None
def append_response(self, response: str):
"""
Append a response to the list of generated responses.
Args:
response (:obj:`str`): The model generated response.
"""
self.generated_responses.append(response)
def set_history(self, history: List[int]):
"""
Updates the value of the history of the conversation. The history is represented by a list of :obj:`token_ids`.
The history is used by the model to generate responses based on the previous conversation turns.
Args:
history (:obj:`List[int]`): History of tokens provided and generated for this conversation.
"""
self.history = history
def __repr__(self):
"""
Generates a string representation of the conversation.
Return:
:obj:`str`:
Example:
Conversation id: 7d15686b-dc94-49f2-9c4b-c9eac6a1f114
user >> Going to the movies tonight - any suggestions?
bot >> The Big Lebowski
"""
output = "Conversation id: {} \n".format(self.uuid)
for user_input, generated_response in zip(self.past_user_inputs, self.generated_responses):
output += "user >> {} \n".format(user_input)
output += "bot >> {} \n".format(generated_response)
if self.new_user_input is not None:
output += "user >> {} \n".format(self.new_user_input)
return output
@add_end_docstrings(
PIPELINE_INIT_ARGS,
r"""
min_length_for_response (:obj:`int`, `optional`, defaults to 32):
The minimum length (in number of tokens) for a response.
""",
)
class ConversationalPipeline(Pipeline):
"""
Multi-turn conversational pipeline.
This conversational pipeline can currently be loaded from :func:`~transformers.pipeline` using the following
task identifier: :obj:`"conversational"`.
The models that this pipeline can use are models that have been fine-tuned on a multi-turn conversational task,
currently: `'microsoft/DialoGPT-small'`, `'microsoft/DialoGPT-medium'`, `'microsoft/DialoGPT-large'`.
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=conversational>`__.
Usage::
conversational_pipeline = pipeline("conversational")
conversation_1 = Conversation("Going to the movies tonight - any suggestions?")
conversation_2 = Conversation("What's the last book you have read?")
conversational_pipeline([conversation_1, conversation_2])
conversation_1.add_user_input("Is it an action movie?")
conversation_2.add_user_input("What is the genre of this book?")
conversational_pipeline([conversation_1, conversation_2])
"""
def __init__(self, min_length_for_response=32, *args, **kwargs):
super().__init__(*args, **kwargs)
assert self.tokenizer.eos_token_id is not None, "DialoguePipeline tokenizer should have an EOS token set"
if self.tokenizer.pad_token_id is not None:
self.pad_token_id = self.tokenizer.pad_token_id
else:
self.pad_token_id = self.tokenizer.eos_token_id
self.min_length_for_response = min_length_for_response
def __call__(
self,
conversations: Union[Conversation, List[Conversation]],
clean_up_tokenization_spaces=True,
**generate_kwargs
):
r"""
Generate responses for the conversation(s) given as inputs.
Args:
conversations (a :class:`~transformers.Conversation` or a list of :class:`~transformers.Conversation`):
Conversations to generate responses for.
clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to clean up the potential extra spaces in the text output.
generate_kwargs:
Additional keyword arguments to pass along to the generate method of the model (see the generate
method corresponding to your framework `here <./model.html#generative-models>`__).
Returns:
:class:`~transformers.Conversation` or a list of :class:`~transformers.Conversation`: Conversation(s) with
updated generated responses for those containing a new user input.
"""
# Input validation
if isinstance(conversations, list):
for conversation in conversations:
assert isinstance(
conversation, Conversation
), "DialoguePipeline expects a Conversation or list of Conversations as an input"
if conversation.new_user_input is None:
raise ValueError(
"Conversation with UUID {} does not contain new user input to process. "
"Add user inputs with the conversation's `add_user_input` method".format(
type(conversation.uuid)
)
)
assert (
self.tokenizer.pad_token_id is not None or self.tokenizer.eos_token_id is not None
), "Please make sure that the tokenizer has a pad_token_id or eos_token_id when using a batch input"
elif isinstance(conversations, Conversation):
conversations = [conversations]
else:
raise ValueError("DialoguePipeline expects a Conversation or list of Conversations as an input")
with self.device_placement():
inputs = self._parse_and_tokenize([conversation.new_user_input for conversation in conversations])
histories = [conversation.history for conversation in conversations]
max_length = generate_kwargs.get("max_length", self.model.config.max_length)
inputs = self._concat_inputs_history(inputs, histories, max_length)
if self.framework == "pt":
inputs = self.ensure_tensor_on_device(**inputs)
input_length = inputs["input_ids"].shape[-1]
elif self.framework == "tf":
input_length = tf.shape(inputs["input_ids"])[-1].numpy()
if input_length > 0.9 * max_length:
logger.warning(
"Longest conversation length: {} is bigger than 0.9 * max_length: {}. "
"You might consider trimming the early phase of the conversation".format(input_length, max_length)
)
generated_responses = self.model.generate(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
**generate_kwargs,
)
cleaned_history = self._clean_padding_history(generated_responses)
output = []
for conversation_index, conversation in enumerate(conversations):
conversation.mark_processed()
conversation.generated_responses.append(
self.tokenizer.decode(
cleaned_history[conversation_index][input_length:],
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
)
conversation.set_history(cleaned_history[conversation_index])
output.append(conversation)
if len(output) == 1:
return output[0]
else:
return output
def _parse_and_tokenize(self, *args, **kwargs):
"""
Parse arguments and tokenize, adding an EOS token at the end of the user input
"""
# Parse arguments
inputs = self._args_parser(*args, **kwargs)
inputs = self.tokenizer.batch_encode_plus(inputs, add_special_tokens=False, padding=False).get("input_ids", [])
for input in inputs:
input.append(self.tokenizer.eos_token_id)
return inputs
def _clean_padding_history(self, generated_tensor) -> List[List[int]]:
"""
Cleans the padding history. Padding may be generated in two places when multiple conversations are provided as
an input:
- at the end of the concatenated history and new user input, so that all input to the model have the same
length
- at the end of the generated response, as some responses will be longer than others
This method cleans up these padding token so that the history for each conversation is not impacted by the
batching process.
"""
outputs = []
for sequence in generated_tensor:
sequence_tokens = []
is_previous_pad = False
for token in sequence:
if token == self.pad_token_id:
if is_previous_pad:
continue
else:
is_previous_pad = True
else:
is_previous_pad = False
if self.framework == "pt":
sequence_tokens.append(token.item())
else:
sequence_tokens.append(int(token.numpy()))
outputs.append(sequence_tokens)
return outputs
def _concat_inputs_history(self, inputs: List[List[int]], histories: List[Optional[List[int]]], max_length: int):
"""
Builds an input prepended by the history for this conversation, allowing multi-turn conversation with context
"""
outputs = []
for new_input, history in zip(inputs, histories):
if history is not None:
new_input = history + new_input
if len(new_input) > max_length - self.min_length_for_response:
cutoff_eos_index = 0
while len(new_input) - cutoff_eos_index > max_length - self.min_length_for_response:
if cutoff_eos_index >= len(new_input):
break
cutoff_eos_index = new_input[cutoff_eos_index:].index(self.tokenizer.eos_token_id)
if cutoff_eos_index == 0 or cutoff_eos_index == len(new_input) - 1:
break
else:
new_input = new_input[cutoff_eos_index + 1 :]
outputs.append(new_input)
max_len = max([len(item) for item in outputs])
outputs = [output + [self.pad_token_id] * (max_len - len(output)) for output in outputs]
outputs = BatchEncoding(
{"input_ids": outputs, "attention_mask": [[1] * len(outputs)]},
tensor_type=self.framework,
)
return outputs
# Register all the supported tasks here
SUPPORTED_TASKS = {
"feature-extraction": {
"impl": FeatureExtractionPipeline,
"tf": TFAutoModel if is_tf_available() else None,
"pt": AutoModel if is_torch_available() else None,
"default": {"model": {"pt": "distilbert-base-cased", "tf": "distilbert-base-cased"}},
},
"sentiment-analysis": {
"impl": TextClassificationPipeline,
"tf": TFAutoModelForSequenceClassification if is_tf_available() else None,
"pt": AutoModelForSequenceClassification if is_torch_available() else None,
"default": {
"model": {
"pt": "distilbert-base-uncased-finetuned-sst-2-english",
"tf": "distilbert-base-uncased-finetuned-sst-2-english",
},
},
},
"ner": {
"impl": TokenClassificationPipeline,
"tf": TFAutoModelForTokenClassification if is_tf_available() else None,
"pt": AutoModelForTokenClassification if is_torch_available() else None,
"default": {
"model": {
"pt": "dbmdz/bert-large-cased-finetuned-conll03-english",
"tf": "dbmdz/bert-large-cased-finetuned-conll03-english",
},
},
},
"question-answering": {
"impl": QuestionAnsweringPipeline,
"tf": TFAutoModelForQuestionAnswering if is_tf_available() else None,
"pt": AutoModelForQuestionAnswering if is_torch_available() else None,
"default": {
"model": {"pt": "distilbert-base-cased-distilled-squad", "tf": "distilbert-base-cased-distilled-squad"},
},
},
"fill-mask": {
"impl": FillMaskPipeline,
"tf": TFAutoModelWithLMHead if is_tf_available() else None,
"pt": AutoModelForMaskedLM if is_torch_available() else None,
"default": {"model": {"pt": "distilroberta-base", "tf": "distilroberta-base"}},
},
"summarization": {
"impl": SummarizationPipeline,
"tf": TFAutoModelWithLMHead if is_tf_available() else None,
"pt": AutoModelForSeq2SeqLM if is_torch_available() else None,
"default": {"model": {"pt": "sshleifer/distilbart-cnn-12-6", "tf": "t5-small"}},
},
"translation_en_to_fr": {
"impl": TranslationPipeline,
"tf": TFAutoModelWithLMHead if is_tf_available() else None,
"pt": AutoModelForSeq2SeqLM if is_torch_available() else None,
"default": {"model": {"pt": "t5-base", "tf": "t5-base"}},
},
"translation_en_to_de": {
"impl": TranslationPipeline,
"tf": TFAutoModelWithLMHead if is_tf_available() else None,
"pt": AutoModelForSeq2SeqLM if is_torch_available() else None,
"default": {"model": {"pt": "t5-base", "tf": "t5-base"}},
},
"translation_en_to_ro": {
"impl": TranslationPipeline,
"tf": TFAutoModelWithLMHead if is_tf_available() else None,
"pt": AutoModelForSeq2SeqLM if is_torch_available() else None,
"default": {"model": {"pt": "t5-base", "tf": "t5-base"}},
},
"text2text-generation": {
"impl": Text2TextGenerationPipeline,
"tf": TFAutoModelForSeq2SeqLM if is_tf_available() else None,
"pt": AutoModelForSeq2SeqLM if is_torch_available() else None,
"default": {"model": {"pt": "t5-base", "tf": "t5-base"}},
},
"text-generation": {
"impl": TextGenerationPipeline,
"tf": TFAutoModelWithLMHead if is_tf_available() else None,
"pt": AutoModelForCausalLM if is_torch_available() else None,
"default": {"model": {"pt": "gpt2", "tf": "gpt2"}},
},
"zero-shot-classification": {
"impl": ZeroShotClassificationPipeline,
"tf": TFAutoModelForSequenceClassification if is_tf_available() else None,
"pt": AutoModelForSequenceClassification if is_torch_available() else None,
"default": {
"model": {"pt": "facebook/bart-large-mnli", "tf": "roberta-large-mnli"},
"config": {"pt": "facebook/bart-large-mnli", "tf": "roberta-large-mnli"},
"tokenizer": {"pt": "facebook/bart-large-mnli", "tf": "roberta-large-mnli"},
},
},
"conversational": {
"impl": ConversationalPipeline,
"tf": TFAutoModelForCausalLM if is_tf_available() else None,
"pt": AutoModelForCausalLM if is_torch_available() else None,
"default": {"model": {"pt": "microsoft/DialoGPT-medium", "tf": "microsoft/DialoGPT-medium"}},
},
}
def pipeline(
task: str,
model: Optional = None,
config: Optional[Union[str, PretrainedConfig]] = None,
tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None,
framework: Optional[str] = None,
**kwargs
) -> Pipeline:
"""
Utility factory method to build a :class:`~transformers.Pipeline`.
Pipelines are made of:
- A :doc:`tokenizer <tokenizer>` in charge of mapping raw textual input to token.
- A :doc:`model <model>` to make predictions from the inputs.
- Some (optional) post processing for enhancing model's output.
Args:
task (:obj:`str`):
The task defining which pipeline will be returned. Currently accepted tasks are:
- :obj:`"feature-extraction"`: will return a :class:`~transformers.FeatureExtractionPipeline`.
- :obj:`"sentiment-analysis"`: will return a :class:`~transformers.TextClassificationPipeline`.
- :obj:`"ner"`: will return a :class:`~transformers.TokenClassificationPipeline`.
- :obj:`"question-answering"`: will return a :class:`~transformers.QuestionAnsweringPipeline`.
- :obj:`"fill-mask"`: will return a :class:`~transformers.FillMaskPipeline`.
- :obj:`"summarization"`: will return a :class:`~transformers.SummarizationPipeline`.
- :obj:`"translation_xx_to_yy"`: will return a :class:`~transformers.TranslationPipeline`.
- :obj:`"text-generation"`: will return a :class:`~transformers.TextGenerationPipeline`.
- :obj:`"conversation"`: will return a :class:`~transformers.ConversationalPipeline`.
model (:obj:`str` or :obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`, `optional`):
The model that will be used by the pipeline to make predictions. This can be a model identifier or an
actual instance of a pretrained model inheriting from :class:`~transformers.PreTrainedModel` (for PyTorch)
or :class:`~transformers.TFPreTrainedModel` (for TensorFlow).
If not provided, the default for the :obj:`task` will be loaded.
config (:obj:`str` or :obj:`~transformers.PretrainedConfig`, `optional`):
The configuration that will be used by the pipeline to instantiate the model. This can be a model
identifier or an actual pretrained model configuration inheriting from
:class:`~transformers.PretrainedConfig`.
If not provided, the default for the :obj:`task` will be loaded.
tokenizer (:obj:`str` or :obj:`~transformers.PreTrainedTokenizer`, `optional`):
The tokenizer that will be used by the pipeline to encode data for the model. This can be a model
identifier or an actual pretrained tokenizer inheriting from
:class:`~transformers.PreTrainedTokenizer`.
If not provided, the default for the :obj:`task` will be loaded.
framework (:obj:`str`, `optional`):
The framework to use, either :obj:`"pt"` for PyTorch or :obj:`"tf"` for TensorFlow. The specified framework
must be installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to the framework of the :obj:`model`, or to PyTorch if no
model is provided.
kwargs:
Additional keyword arguments passed along to the specific pipeline init (see the documentation for the
corresponding pipeline class for possible values).
Returns:
:class:`~transformers.Pipeline`: A suitable pipeline for the task.
Examples::
>>> from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer
>>> # Sentiment analysis pipeline
>>> pipeline('sentiment-analysis')
>>> # Question answering pipeline, specifying the checkpoint identifier
>>> pipeline('question-answering', model='distilbert-base-cased-distilled-squad', tokenizer='bert-base-cased')
>>> # Named entity recognition pipeline, passing in a specific model and tokenizer
>>> model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> pipeline('ner', model=model, tokenizer=tokenizer)
"""
# Retrieve the task
if task not in SUPPORTED_TASKS:
raise KeyError("Unknown task {}, available tasks are {}".format(task, list(SUPPORTED_TASKS.keys())))
targeted_task = SUPPORTED_TASKS[task]
# Use default model/config/tokenizer for the task if no model is provided
if model is None:
# At that point framework might still be undetermined
model = get_default_model(targeted_task, framework)
framework = framework or get_framework(model)
task_class, model_class = targeted_task["impl"], targeted_task[framework]
# Try to infer tokenizer from model or config name (if provided as str)
if tokenizer is None:
if isinstance(model, str):
tokenizer = model
elif isinstance(config, str):
tokenizer = config
else:
# Impossible to guest what is the right tokenizer here
raise Exception(
"Impossible to guess which tokenizer to use. "
"Please provided a PretrainedTokenizer class or a path/identifier to a pretrained tokenizer."
)
modelcard = None
# Try to infer modelcard from model or config name (if provided as str)
if isinstance(model, str):
modelcard = model
elif isinstance(config, str):
modelcard = config
# Instantiate tokenizer if needed
if isinstance(tokenizer, (str, tuple)):
if isinstance(tokenizer, tuple):
# For tuple we have (tokenizer name, {kwargs})
tokenizer = AutoTokenizer.from_pretrained(tokenizer[0], **tokenizer[1])
else:
tokenizer = AutoTokenizer.from_pretrained(tokenizer)
# Instantiate config if needed
if isinstance(config, str):
config = AutoConfig.from_pretrained(config)
# Instantiate modelcard if needed
if isinstance(modelcard, str):
modelcard = ModelCard.from_pretrained(modelcard)
# Instantiate model if needed
if isinstance(model, str):
# Handle transparent TF/PT model conversion
model_kwargs = {}
if framework == "pt" and model.endswith(".h5"):
model_kwargs["from_tf"] = True
logger.warning(
"Model might be a TensorFlow model (ending with `.h5`) but TensorFlow is not available. "
"Trying to load the model with PyTorch."
)
elif framework == "tf" and model.endswith(".bin"):
model_kwargs["from_pt"] = True
logger.warning(
"Model might be a PyTorch model (ending with `.bin`) but PyTorch is not available. "
"Trying to load the model with Tensorflow."
)
model = model_class.from_pretrained(model, config=config, **model_kwargs)
return task_class(model=model, tokenizer=tokenizer, modelcard=modelcard, framework=framework, task=task, **kwargs)
| 42.588658
| 183
| 0.610845
|
43a6003f411f1e57656ad93237ecc98e411f3d0d
| 5,103
|
py
|
Python
|
tools/swig/test/testFarray.py
|
deepyaman/numpy
|
b7e75442b03153c7d94fc99e8026d1f09ec17f7f
|
[
"BSD-3-Clause"
] | 5
|
2018-04-24T13:32:59.000Z
|
2019-07-09T07:31:58.000Z
|
tools/swig/test/testFarray.py
|
Kshitish555/numpy
|
12c6000601c7665e52502ab4a67a54d290498266
|
[
"BSD-3-Clause"
] | 169
|
2020-12-25T07:10:57.000Z
|
2022-03-29T22:12:31.000Z
|
tools/swig/test/testFarray.py
|
Kshitish555/numpy
|
12c6000601c7665e52502ab4a67a54d290498266
|
[
"BSD-3-Clause"
] | 4
|
2021-06-25T08:40:39.000Z
|
2021-08-08T09:52:42.000Z
|
#!/usr/bin/env python3
# System imports
from distutils.util import get_platform
import os
import sys
import unittest
# Import NumPy
import numpy as np
major, minor = [ int(d) for d in np.__version__.split(".")[:2] ]
if major == 0: BadListError = TypeError
else: BadListError = ValueError
# Add the distutils-generated build directory to the python search path and then
# import the extension module
libDir = "lib.{}-{}.{}".format(get_platform(), *sys.version_info[:2])
sys.path.insert(0, os.path.join("build", libDir))
import Farray
######################################################################
class FarrayTestCase(unittest.TestCase):
def setUp(self):
self.nrows = 5
self.ncols = 4
self.array = Farray.Farray(self.nrows, self.ncols)
def testConstructor1(self):
"Test Farray size constructor"
self.failUnless(isinstance(self.array, Farray.Farray))
def testConstructor2(self):
"Test Farray copy constructor"
for i in range(self.nrows):
for j in range(self.ncols):
self.array[i, j] = i + j
arrayCopy = Farray.Farray(self.array)
self.failUnless(arrayCopy == self.array)
def testConstructorBad1(self):
"Test Farray size constructor, negative nrows"
self.assertRaises(ValueError, Farray.Farray, -4, 4)
def testConstructorBad2(self):
"Test Farray size constructor, negative ncols"
self.assertRaises(ValueError, Farray.Farray, 4, -4)
def testNrows(self):
"Test Farray nrows method"
self.failUnless(self.array.nrows() == self.nrows)
def testNcols(self):
"Test Farray ncols method"
self.failUnless(self.array.ncols() == self.ncols)
def testLen(self):
"Test Farray __len__ method"
self.failUnless(len(self.array) == self.nrows*self.ncols)
def testSetGet(self):
"Test Farray __setitem__, __getitem__ methods"
m = self.nrows
n = self.ncols
for i in range(m):
for j in range(n):
self.array[i, j] = i*j
for i in range(m):
for j in range(n):
self.failUnless(self.array[i, j] == i*j)
def testSetBad1(self):
"Test Farray __setitem__ method, negative row"
self.assertRaises(IndexError, self.array.__setitem__, (-1, 3), 0)
def testSetBad2(self):
"Test Farray __setitem__ method, negative col"
self.assertRaises(IndexError, self.array.__setitem__, (1, -3), 0)
def testSetBad3(self):
"Test Farray __setitem__ method, out-of-range row"
self.assertRaises(IndexError, self.array.__setitem__, (self.nrows+1, 0), 0)
def testSetBad4(self):
"Test Farray __setitem__ method, out-of-range col"
self.assertRaises(IndexError, self.array.__setitem__, (0, self.ncols+1), 0)
def testGetBad1(self):
"Test Farray __getitem__ method, negative row"
self.assertRaises(IndexError, self.array.__getitem__, (-1, 3))
def testGetBad2(self):
"Test Farray __getitem__ method, negative col"
self.assertRaises(IndexError, self.array.__getitem__, (1, -3))
def testGetBad3(self):
"Test Farray __getitem__ method, out-of-range row"
self.assertRaises(IndexError, self.array.__getitem__, (self.nrows+1, 0))
def testGetBad4(self):
"Test Farray __getitem__ method, out-of-range col"
self.assertRaises(IndexError, self.array.__getitem__, (0, self.ncols+1))
def testAsString(self):
"Test Farray asString method"
result = """\
[ [ 0, 1, 2, 3 ],
[ 1, 2, 3, 4 ],
[ 2, 3, 4, 5 ],
[ 3, 4, 5, 6 ],
[ 4, 5, 6, 7 ] ]
"""
for i in range(self.nrows):
for j in range(self.ncols):
self.array[i, j] = i+j
self.failUnless(self.array.asString() == result)
def testStr(self):
"Test Farray __str__ method"
result = """\
[ [ 0, -1, -2, -3 ],
[ 1, 0, -1, -2 ],
[ 2, 1, 0, -1 ],
[ 3, 2, 1, 0 ],
[ 4, 3, 2, 1 ] ]
"""
for i in range(self.nrows):
for j in range(self.ncols):
self.array[i, j] = i-j
self.failUnless(str(self.array) == result)
def testView(self):
"Test Farray view method"
for i in range(self.nrows):
for j in range(self.ncols):
self.array[i, j] = i+j
a = self.array.view()
self.failUnless(isinstance(a, np.ndarray))
self.failUnless(a.flags.f_contiguous)
for i in range(self.nrows):
for j in range(self.ncols):
self.failUnless(a[i, j] == i+j)
######################################################################
if __name__ == "__main__":
# Build the test suite
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(FarrayTestCase))
# Execute the test suite
print("Testing Classes of Module Farray")
print("NumPy version", np.__version__)
print()
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(bool(result.errors + result.failures))
| 32.297468
| 83
| 0.59612
|
4e6fa6dc622d6b4bf0d2d48dd76c6b9995c59374
| 2,929
|
py
|
Python
|
pc/level_06.py
|
au9ustine/org.au9ustine.puzzles.pythonchallenge
|
12cb9092645283c17e0f7e6f922b3c798bb085cc
|
[
"MIT"
] | null | null | null |
pc/level_06.py
|
au9ustine/org.au9ustine.puzzles.pythonchallenge
|
12cb9092645283c17e0f7e6f922b3c798bb085cc
|
[
"MIT"
] | 6
|
2021-03-18T20:14:01.000Z
|
2022-03-11T23:12:47.000Z
|
pc/level_06.py
|
au9ustine/org.au9ustine.puzzles.pythonchallenge
|
12cb9092645283c17e0f7e6f922b3c798bb085cc
|
[
"MIT"
] | null | null | null |
import unittest
import requests
import logging
import re
import zipfile
import urllib
import os
import os.path
# Default is warning, it's to suppress requests INFO log
logging.basicConfig(format='%(message)s')
def solution():
url = "http://www.pythonchallenge.com/pc/def/channel.zip"
urllib.urlretrieve(url, "channel.zip")
zip_file = zipfile.ZipFile("channel.zip")
zip_file_comments = []
member_name = "90052.txt"
while True:
zip_info = zip_file.getinfo(member_name)
with zip_file.open(member_name) as member_stream:
number = re.findall(r'\d+$', member_stream.read())
zip_file_comments.append(zip_info.comment)
if number:
member_name = '%s.txt' % number[0]
else:
break
return ''.join(zip_file_comments)
class SolutionTest(unittest.TestCase):
def setUp(self):
self.prefix = "http://www.pythonchallenge.com/pc/def/"
self.suffix = ".html"
def tearDown(self):
zip_path = "channel.zip"
if os.path.exists(zip_path):
os.remove(zip_path)
def test_solution(self):
actual = solution()
# It would be identified by pep8, but this is ascii art, who cares!
expected = '''****************************************************************
****************************************************************
** **
** OO OO XX YYYY GG GG EEEEEE NN NN **
** OO OO XXXXXX YYYYYY GG GG EEEEEE NN NN **
** OO OO XXX XXX YYY YY GG GG EE NN NN **
** OOOOOOOO XX XX YY GGG EEEEE NNNN **
** OOOOOOOO XX XX YY GGG EEEEE NN **
** OO OO XXX XXX YYY YY GG GG EE NN **
** OO OO XXXXXX YYYYYY GG GG EEEEEE NN **
** OO OO XX YYYY GG GG EEEEEE NN **
** **
****************************************************************
**************************************************************
'''
self.assertEquals(actual, expected)
# Trick: hockey is consist of letters of oxygen
origin_url = ''.join([self.prefix, 'oxygen', self.suffix])
try:
r = requests.get(origin_url)
except:
raise
self.assertTrue(r.ok)
next_entry = [re.sub(r'(.*)URL=(.*)\.html\"\>', r'\2', line)
for line in r.iter_lines() if re.match(r'.*URL.*', line)]
r.close()
if len(next_entry) != 0:
r = requests.get(
''.join([self.prefix, next_entry[0], self.suffix]))
logging.warn('Level 07 is %s' % r.url)
else:
logging.warn('Level 07 is %s' % origin_url)
if __name__ == "__main__":
unittest.main(failfast=True)
| 35.289157
| 86
| 0.480027
|
5f68713d33fc7082057063e783f0167672a450bb
| 4,014
|
py
|
Python
|
python/deepFashion/get_category10_informations.py
|
luolongqiang/caffe-luolongqiang
|
5ee132e4451a538d97b62039a62a59a69dc43bb9
|
[
"BSD-2-Clause"
] | 2
|
2017-03-23T04:19:31.000Z
|
2019-07-05T02:31:04.000Z
|
python/deepFashion/get_category10_informations.py
|
luolongqiang/caffe-luolongqiang
|
5ee132e4451a538d97b62039a62a59a69dc43bb9
|
[
"BSD-2-Clause"
] | null | null | null |
python/deepFashion/get_category10_informations.py
|
luolongqiang/caffe-luolongqiang
|
5ee132e4451a538d97b62039a62a59a69dc43bb9
|
[
"BSD-2-Clause"
] | 1
|
2018-10-11T06:07:19.000Z
|
2018-10-11T06:07:19.000Z
|
import random
import numpy as np
from numpy import array
import matplotlib.pyplot as plt
from collections import Counter
from pandas import DataFrame
import os, sys, time, argparse
def get_category_label_num_dict(input_txt):
label_list = []
with open(input_txt, 'r') as fi:
for line in list(fi)[2:]:
label_list.append(line.strip().split()[-1])
fi.close()
label_num_dict = dict(Counter(label_list))
return label_num_dict
def get_label_partition_num(label_num_dict):
indx = array(label_num_dict.values())>10000
cond_label = array(label_num_dict.keys())[indx]
cond_sample_num = array(label_num_dict.values())[indx]
#cond_label_to_new = dict(zip(cond_label, range(len(cond_label))))
train_num = array(cond_sample_num*0.8, dtype = np.int)
val_num = array(cond_sample_num*0.1, dtype = np.int)
test_num = cond_sample_num - train_num - val_num
num_partition = zip(train_num, val_num, test_num)
label_partition_num = dict(zip(cond_label, num_partition))
return label_partition_num
def get_partition_datasets(label_num_dict, input_txt, output_dir):
label_partition_num = get_label_partition_num(label_num_dict)
labels = label_partition_num.keys()
label_partition_dict = {label:[] for label in labels}
with open(input_txt, 'r') as fi:
for line in list(fi)[2:]:
label = line.strip().split()[-1]
if label in labels:
label_partition_dict[label].append('data/deepFashion/'+line)
fi.close()
fo_train = open(output_dir+'/category_train_label.txt', 'w')
fo_val = open(output_dir+'/category_val_label.txt', 'w')
fo_test = open(output_dir+'/category_test_label.txt', 'w')
for label in labels:
random.shuffle(label_partition_dict[label])
train_num, val_num, test_num = label_partition_num[label]
train_lines = array(label_partition_dict[label])[:train_num]
val_lines = array(label_partition_dict[label])[train_num:train_num+val_num]
test_lines = array(label_partition_dict[label])[train_num+val_num:]
fo_train.writelines(train_lines)
fo_val.writelines(val_lines)
fo_test.writelines(test_lines)
fo_train.close()
fo_val.close()
fo_test.close()
def output_results(label_num_dict, output_csv):
label_set = array(label_num_dict.keys(), dtype = np.int)
sample_num = label_num_dict.values()
df = DataFrame({'label':label_set, 'sample_num':sample_num})
df = df[['label', 'sample_num']]
df.to_csv(output_csv, index = False)
plt.bar(label_set, sample_num, color = "blue")
plt.xlabel("label")
plt.ylabel("sample amount")
plt.title("sample amount of label")
plt.xlim(0, max(label_set)+2)
plt.ylim(0, max(sample_num) + 5)
plt.savefig(output_csv.replace('csv','jpg'), dpi=300)
plt.close()
def get_args():
parser = argparse.ArgumentParser(description='get style sample informations')
parser.add_argument('-i', dest='input_txt',
help='list_category_img.txt', default=None, type=str)
parser.add_argument('-o', dest='output_csv',
help='category_results.csv', default=None, type=str)
parser.add_argument('-d', dest='output_dir',
help='output_partition_dir', default=None, type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = get_args()
input_txt = args.input_txt
output_csv = args.output_csv
output_dir = args.output_dir
tic = time.clock()
print 'get label_num_dict, begin...'
label_num_dict = get_category_label_num_dict(input_txt)
print 'get label_num_dict, done'
print 'get partition_datasets, begin...'
get_partition_datasets(label_num_dict, input_txt, output_dir)
print 'get partition_datasets, done'
toc = time.clock()
print 'running time:{} seconds'.format(toc-tic)
output_results(label_num_dict, output_csv)
| 38.228571
| 85
| 0.692078
|
aba8bddeec88a79b30bc619aad06f44222347ee4
| 718
|
py
|
Python
|
fake_testing.py
|
amangit007/Fake-News-Detector
|
e8cae21d66007bc07560471d63b875803efc2466
|
[
"MIT"
] | 4
|
2018-11-24T21:10:25.000Z
|
2018-12-07T17:37:51.000Z
|
fake_testing.py
|
amangit007/Fake-News-Detector
|
e8cae21d66007bc07560471d63b875803efc2466
|
[
"MIT"
] | 23
|
2018-11-27T18:01:30.000Z
|
2019-01-06T17:30:28.000Z
|
fake_testing.py
|
amangit007/Fake-News-Detector
|
e8cae21d66007bc07560471d63b875803efc2466
|
[
"MIT"
] | 11
|
2018-11-22T18:50:41.000Z
|
2018-12-12T13:51:28.000Z
|
import pandas as pd
import numpy as np
import sys
try:
news_clean_encoded = np.loadtxt('test_news_encoding.gz')
except:
print ("Please first encode the data!!")
sys.exit(0)
try:
model_file = open('clf_architecture.json', 'r')
except:
print ("Save the trained model before testing!!")
sys.exit(0)
from keras.models import model_from_json
loaded_model = model_file.read()
clf = model_from_json(loaded_model)
clf.load_weights('clf_weights.h5')
clf.compile(optimizer = 'adam', loss = 'binary_crossentropy')
x_test = news_clean_encoded
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1))
x_test = sc.fit_transform(x_test)
predictions = clf.predict(x_test)
| 23.933333
| 61
| 0.747911
|
a1df876fc1e1738b6e597dd8051db892757f9fe0
| 150
|
py
|
Python
|
tests/skl_datasets/BinaryClass_10/skl_dataset_BinaryClass_10_GaussianNB_6_code_gen.py
|
antoinecarme/sklearn_model_explanation
|
feeeb75576931f4a05a786d936babc0a664eef46
|
[
"BSD-3-Clause"
] | 13
|
2017-12-22T09:12:51.000Z
|
2021-07-26T06:31:54.000Z
|
tests/skl_datasets/BinaryClass_10/skl_dataset_BinaryClass_10_GaussianNB_6_code_gen.py
|
antoinecarme/sklearn_model_explanation
|
feeeb75576931f4a05a786d936babc0a664eef46
|
[
"BSD-3-Clause"
] | 11
|
2017-09-19T20:00:35.000Z
|
2018-02-05T07:59:06.000Z
|
tests/skl_datasets/BinaryClass_10/skl_dataset_BinaryClass_10_GaussianNB_6_code_gen.py
|
antoinecarme/sklearn_model_explanation
|
feeeb75576931f4a05a786d936babc0a664eef46
|
[
"BSD-3-Clause"
] | 4
|
2017-12-11T12:33:32.000Z
|
2020-02-09T15:08:56.000Z
|
from sklearn_explain.tests.skl_datasets import skl_datasets_test as skltest
skltest.test_class_dataset_and_model("BinaryClass_10" , "GaussianNB_6")
| 30
| 75
| 0.86
|
a2dd56622219d801d3c86d24d58b0915afa51ef1
| 635
|
py
|
Python
|
users/urls.py
|
B-Paluch/rentalP2021
|
5ec0075162150aefc1331f3468b511377eb38f28
|
[
"MIT"
] | null | null | null |
users/urls.py
|
B-Paluch/rentalP2021
|
5ec0075162150aefc1331f3468b511377eb38f28
|
[
"MIT"
] | null | null | null |
users/urls.py
|
B-Paluch/rentalP2021
|
5ec0075162150aefc1331f3468b511377eb38f28
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('', views.login, name='login'),
path('logout', views.logout, name='logout'),
path('login', views.login, name='login'),
path('index', views.index, name='index'),
path('lenditems', views.lenditems, name='lenditems'),
path('additems', views.additems, name='additems'),
path('addmultiitems', views.multiadditems, name='addmultiitems'),
path('lentlist', views.AllItemListView.as_view(), name='lentlist'),
path('returnitems', views.returnitems, name='returnitems'),
path('<int:_id>/lenditem', views.lenditem, name='lenditem'),
]
| 37.352941
| 71
| 0.675591
|
a5d8ddb0ee7838a4a3dcfa0774bc9fa57c4c3dde
| 23,293
|
py
|
Python
|
notebook/slim_people_classification/deployment/model_deploy.py
|
rucka/NeuralNetworkPlayground
|
b1c9398ee3b0831de4982fdfef34892faa04440d
|
[
"Apache-2.0"
] | null | null | null |
notebook/slim_people_classification/deployment/model_deploy.py
|
rucka/NeuralNetworkPlayground
|
b1c9398ee3b0831de4982fdfef34892faa04440d
|
[
"Apache-2.0"
] | null | null | null |
notebook/slim_people_classification/deployment/model_deploy.py
|
rucka/NeuralNetworkPlayground
|
b1c9398ee3b0831de4982fdfef34892faa04440d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deploy Slim models across multiple clones and replicas.
To easily train a model on multiple GPUs or across multiple machines this
module provides a set of helper functions: `create_clones`,
`optimize_clones` and `deploy`.
Usage:
g = tf.Graph()
# Set up DeploymentConfig
config = model_deploy.DeploymentConfig(num_clones=2, clone_on_cpu=True)
# Create the global step on the device storing the variables.
with tf.device(config.variables_device()):
global_step = slim.create_global_step()
# Define the inputs
with tf.device(config.inputs_device()):
images, labels = LoadData(...)
inputs_queue = slim.data.prefetch_queue((images, labels))
# Define the optimizer.
with tf.device(config.optimizer_device()):
optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate, FLAGS.momentum)
# Define the model including the loss.
def model_fn(inputs_queue):
images, labels = inputs_queue.dequeue()
predictions = CreateNetwork(images)
slim.losses.log_loss(predictions, labels)
model_dp = model_deploy.deploy(config, model_fn, [inputs_queue],
optimizer=optimizer)
# Run training.
slim.learning.train(model_dp.train_op, my_log_dir,
summary_op=model_dp.summary_op)
The Clone namedtuple holds together the values associated with each call to
model_fn:
* outputs: The return values of the calls to `model_fn()`.
* scope: The scope used to create the clone.
* device: The device used to create the clone.
DeployedModel namedtuple, holds together the values needed to train multiple
clones:
* train_op: An operation that run the optimizer training op and include
all the update ops created by `model_fn`. Present only if an optimizer
was specified.
* summary_op: An operation that run the summaries created by `model_fn`
and process_gradients.
* total_loss: A `Tensor` that contains the sum of all losses created by
`model_fn` plus the regularization losses.
* clones: List of `Clone` tuples returned by `create_clones()`.
DeploymentConfig parameters:
* num_clones: Number of model clones to deploy in each replica.
* clone_on_cpu: True if clones should be placed on CPU.
* replica_id: Integer. Index of the replica for which the model is
deployed. Usually 0 for the chief replica.
* num_replicas: Number of replicas to use.
* num_ps_tasks: Number of tasks for the `ps` job. 0 to not use replicas.
* worker_job_name: A name for the worker job.
* ps_job_name: A name for the parameter server job.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
slim = tf.contrib.slim
__all__ = ['create_clones',
'deploy',
'optimize_clones',
'DeployedModel',
'DeploymentConfig',
'Clone',
]
# Namedtuple used to represent a clone during deployment.
Clone = collections.namedtuple('Clone',
['outputs', # Whatever model_fn() returned.
'scope', # The scope used to create it.
'device', # The device used to create.
])
# Namedtuple used to represent a DeployedModel, returned by deploy().
DeployedModel = collections.namedtuple('DeployedModel',
['train_op', # The `train_op`
'summary_op', # The `summary_op`
'total_loss', # The loss `Tensor`
'clones', # A list of `Clones` tuples.
])
# Default parameters for DeploymentConfig
_deployment_params = {'num_clones': 1,
'clone_on_cpu': False,
'replica_id': 0,
'num_replicas': 1,
'num_ps_tasks': 0,
'worker_job_name': 'worker',
'ps_job_name': 'ps'}
def create_clones(config, model_fn, args=None, kwargs=None):
"""Creates multiple clones according to config using a `model_fn`.
The returned values of `model_fn(*args, **kwargs)` are collected along with
the scope and device used to created it in a namedtuple
`Clone(outputs, scope, device)`
Note: it is assumed that any loss created by `model_fn` is collected at
the tf.GraphKeys.LOSSES collection.
To recover the losses, summaries or update_ops created by the clone use:
```python
losses = tf.get_collection(tf.GraphKeys.LOSSES, clone.scope)
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, clone.scope)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, clone.scope)
```
The deployment options are specified by the config object and support
deploying one or several clones on different GPUs and one or several replicas
of such clones.
The argument `model_fn` is called `config.num_clones` times to create the
model clones as `model_fn(*args, **kwargs)`.
If `config` specifies deployment on multiple replicas then the default
tensorflow device is set appropriatly for each call to `model_fn` and for the
slim variable creation functions: model and global variables will be created
on the `ps` device, the clone operations will be on the `worker` device.
Args:
config: A DeploymentConfig object.
model_fn: A callable. Called as `model_fn(*args, **kwargs)`
args: Optional list of arguments to pass to `model_fn`.
kwargs: Optional list of keyword arguments to pass to `model_fn`.
Returns:
A list of namedtuples `Clone`.
"""
clones = []
args = args or []
kwargs = kwargs or {}
with slim.arg_scope([slim.model_variable, slim.variable],
device=config.variables_device()):
# Create clones.
for i in range(0, config.num_clones):
with tf.name_scope(config.clone_scope(i)) as clone_scope:
clone_device = config.clone_device(i)
with tf.device(clone_device):
with tf.variable_scope(tf.get_variable_scope(),
reuse=True if i > 0 else None):
outputs = model_fn(*args, **kwargs)
clones.append(Clone(outputs, clone_scope, clone_device))
return clones
def _gather_clone_loss(clone, num_clones, regularization_losses):
"""Gather the loss for a single clone.
Args:
clone: A Clone namedtuple.
num_clones: The number of clones being deployed.
regularization_losses: Possibly empty list of regularization_losses
to add to the clone losses.
Returns:
A tensor for the total loss for the clone. Can be None.
"""
# The return value.
sum_loss = None
# Individual components of the loss that will need summaries.
clone_loss = None
regularization_loss = None
# Compute and aggregate losses on the clone device.
with tf.device(clone.device):
all_losses = []
clone_losses = tf.get_collection(tf.GraphKeys.LOSSES, clone.scope)
if clone_losses:
clone_loss = tf.add_n(clone_losses, name='clone_loss')
if num_clones > 1:
clone_loss = tf.div(clone_loss, 1.0 * num_clones,
name='scaled_clone_loss')
all_losses.append(clone_loss)
if regularization_losses:
regularization_loss = tf.add_n(regularization_losses,
name='regularization_loss')
all_losses.append(regularization_loss)
if all_losses:
sum_loss = tf.add_n(all_losses)
# Add the summaries out of the clone device block.
if clone_loss is not None:
tf.summary.scalar(clone.scope + '/clone_loss', clone_loss)
if regularization_loss is not None:
tf.summary.scalar('regularization_loss', regularization_loss)
return sum_loss
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
**kwargs):
"""Compute losses and gradients for a single clone.
Args:
optimizer: A tf.Optimizer object.
clone: A Clone namedtuple.
num_clones: The number of clones being deployed.
regularization_losses: Possibly empty list of regularization_losses
to add to the clone losses.
**kwargs: Dict of kwarg to pass to compute_gradients().
Returns:
A tuple (clone_loss, clone_grads_and_vars).
- clone_loss: A tensor for the total loss for the clone. Can be None.
- clone_grads_and_vars: List of (gradient, variable) for the clone.
Can be empty.
"""
sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
clone_grad = None
if sum_loss is not None:
with tf.device(clone.device):
clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
return sum_loss, clone_grad
def optimize_clones(clones, optimizer,
regularization_losses=None,
**kwargs):
"""Compute clone losses and gradients for the given list of `Clones`.
Note: The regularization_losses are added to the first clone losses.
Args:
clones: List of `Clones` created by `create_clones()`.
optimizer: An `Optimizer` object.
regularization_losses: Optional list of regularization losses. If None it
will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
exclude them.
**kwargs: Optional list of keyword arguments to pass to `compute_gradients`.
Returns:
A tuple (total_loss, grads_and_vars).
- total_loss: A Tensor containing the average of the clone losses including
the regularization loss.
- grads_and_vars: A List of tuples (gradient, variable) containing the sum
of the gradients for each variable.
"""
grads_and_vars = []
clones_losses = []
num_clones = len(clones)
if regularization_losses is None:
regularization_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
for clone in clones:
with tf.name_scope(clone.scope):
clone_loss, clone_grad = _optimize_clone(
optimizer, clone, num_clones, regularization_losses, **kwargs)
if clone_loss is not None:
clones_losses.append(clone_loss)
grads_and_vars.append(clone_grad)
# Only use regularization_losses for the first clone
regularization_losses = None
# Compute the total_loss summing all the clones_losses.
total_loss = tf.add_n(clones_losses, name='total_loss')
# Sum the gradients across clones.
grads_and_vars = _sum_clones_gradients(grads_and_vars)
return total_loss, grads_and_vars
def deploy(config,
model_fn,
args=None,
kwargs=None,
optimizer=None,
summarize_gradients=False):
"""Deploys a Slim-constructed model across multiple clones.
The deployment options are specified by the config object and support
deploying one or several clones on different GPUs and one or several replicas
of such clones.
The argument `model_fn` is called `config.num_clones` times to create the
model clones as `model_fn(*args, **kwargs)`.
The optional argument `optimizer` is an `Optimizer` object. If not `None`,
the deployed model is configured for training with that optimizer.
If `config` specifies deployment on multiple replicas then the default
tensorflow device is set appropriatly for each call to `model_fn` and for the
slim variable creation functions: model and global variables will be created
on the `ps` device, the clone operations will be on the `worker` device.
Args:
config: A `DeploymentConfig` object.
model_fn: A callable. Called as `model_fn(*args, **kwargs)`
args: Optional list of arguments to pass to `model_fn`.
kwargs: Optional list of keyword arguments to pass to `model_fn`.
optimizer: Optional `Optimizer` object. If passed the model is deployed
for training with that optimizer.
summarize_gradients: Whether or not add summaries to the gradients.
Returns:
A `DeployedModel` namedtuple.
"""
# Gather initial summaries.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
# Create Clones.
clones = create_clones(config, model_fn, args, kwargs)
first_clone = clones[0]
# Gather update_ops from the first clone. These contain, for example,
# the updates for the batch_norm variables created by model_fn.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone.scope)
train_op = None
total_loss = None
with tf.device(config.optimizer_device()):
if optimizer:
# Place the global step on the device storing the variables.
with tf.device(config.variables_device()):
global_step = slim.get_or_create_global_step()
# Compute the gradients for the clones.
total_loss, clones_gradients = optimize_clones(clones, optimizer)
if clones_gradients:
if summarize_gradients:
# Add summaries to the gradients.
summaries |= set(_add_gradients_summaries(clones_gradients))
# Create gradient updates.
grad_updates = optimizer.apply_gradients(clones_gradients,
global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
train_op = control_flow_ops.with_dependencies([update_op], total_loss,
name='train_op')
else:
clones_losses = []
regularization_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
for clone in clones:
with tf.name_scope(clone.scope):
clone_loss = _gather_clone_loss(clone, len(clones),
regularization_losses)
if clone_loss is not None:
clones_losses.append(clone_loss)
# Only use regularization_losses for the first clone
regularization_losses = None
if clones_losses:
total_loss = tf.add_n(clones_losses, name='total_loss')
# Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones() or _gather_clone_loss().
summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES,
first_clone.scope))
if total_loss is not None:
# Add total_loss to summary.
summaries.add(tf.summary.scalar('total_loss', total_loss))
if summaries:
# Merge all summaries together.
summary_op = tf.summary.merge(list(summaries), name='summary_op')
else:
summary_op = None
return DeployedModel(train_op, summary_op, total_loss, clones)
def _sum_clones_gradients(clone_grads):
"""Calculate the sum gradient for each shared variable across all clones.
This function assumes that the clone_grads has been scaled appropriately by
1 / num_clones.
Args:
clone_grads: A List of List of tuples (gradient, variable), one list per
`Clone`.
Returns:
List of tuples of (gradient, variable) where the gradient has been summed
across all clones.
"""
sum_grads = []
for grad_and_vars in zip(*clone_grads):
# Note that each grad_and_vars looks like the following:
# ((grad_var0_clone0, var0), ... (grad_varN_cloneN, varN))
grads = []
var = grad_and_vars[0][1]
for g, v in grad_and_vars:
assert v == var
if g is not None:
grads.append(g)
if grads:
if len(grads) > 1:
sum_grad = tf.add_n(grads, name=var.op.name + '/sum_grads')
else:
sum_grad = grads[0]
sum_grads.append((sum_grad, var))
return sum_grads
def _add_gradients_summaries(grads_and_vars):
"""Add histogram summaries to gradients.
Note: The summaries are also added to the SUMMARIES collection.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
Returns:
The _list_ of the added summaries for grads_and_vars.
"""
summaries = []
for grad, var in grads_and_vars:
if grad is not None:
if isinstance(grad, tf.IndexedSlices):
grad_values = grad.values
else:
grad_values = grad
summaries.append(tf.summary.histogram(var.op.name + ':gradient',
grad_values))
summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm',
tf.global_norm([grad_values])))
else:
tf.logging.info('Var %s has no gradient', var.op.name)
return summaries
class DeploymentConfig(object):
"""Configuration for deploying a model with `deploy()`.
You can pass an instance of this class to `deploy()` to specify exactly
how to deploy the model to build. If you do not pass one, an instance built
from the default deployment_hparams will be used.
"""
def __init__(self,
num_clones=1,
clone_on_cpu=False,
replica_id=0,
num_replicas=1,
num_ps_tasks=0,
worker_job_name='worker',
ps_job_name='ps'):
"""Create a DeploymentConfig.
The config describes how to deploy a model across multiple clones and
replicas. The model will be replicated `num_clones` times in each replica.
If `clone_on_cpu` is True, each clone will placed on CPU.
If `num_replicas` is 1, the model is deployed via a single process. In that
case `worker_device`, `num_ps_tasks`, and `ps_device` are ignored.
If `num_replicas` is greater than 1, then `worker_device` and `ps_device`
must specify TensorFlow devices for the `worker` and `ps` jobs and
`num_ps_tasks` must be positive.
Args:
num_clones: Number of model clones to deploy in each replica.
clone_on_cpu: If True clones would be placed on CPU.
replica_id: Integer. Index of the replica for which the model is
deployed. Usually 0 for the chief replica.
num_replicas: Number of replicas to use.
num_ps_tasks: Number of tasks for the `ps` job. 0 to not use replicas.
worker_job_name: A name for the worker job.
ps_job_name: A name for the parameter server job.
Raises:
ValueError: If the arguments are invalid.
"""
if num_replicas > 1:
if num_ps_tasks < 1:
raise ValueError('When using replicas num_ps_tasks must be positive')
if num_replicas > 1 or num_ps_tasks > 0:
if not worker_job_name:
raise ValueError('Must specify worker_job_name when using replicas')
if not ps_job_name:
raise ValueError('Must specify ps_job_name when using parameter server')
if replica_id >= num_replicas:
raise ValueError('replica_id must be less than num_replicas')
self._num_clones = num_clones
self._clone_on_cpu = clone_on_cpu
self._replica_id = replica_id
self._num_replicas = num_replicas
self._num_ps_tasks = num_ps_tasks
self._ps_device = '/job:' + ps_job_name if num_ps_tasks > 0 else ''
self._worker_device = '/job:' + worker_job_name if num_ps_tasks > 0 else ''
@property
def num_clones(self):
return self._num_clones
@property
def clone_on_cpu(self):
return self._clone_on_cpu
@property
def replica_id(self):
return self._replica_id
@property
def num_replicas(self):
return self._num_replicas
@property
def num_ps_tasks(self):
return self._num_ps_tasks
@property
def ps_device(self):
return self._ps_device
@property
def worker_device(self):
return self._worker_device
def caching_device(self):
"""Returns the device to use for caching variables.
Variables are cached on the worker CPU when using replicas.
Returns:
A device string or None if the variables do not need to be cached.
"""
if self._num_ps_tasks > 0:
return lambda op: op.device
else:
return None
def clone_device(self, clone_index):
"""Device used to create the clone and all the ops inside the clone.
Args:
clone_index: Int, representing the clone_index.
Returns:
A value suitable for `tf.device()`.
Raises:
ValueError: if `clone_index` is greater or equal to the number of clones".
"""
if clone_index >= self._num_clones:
raise ValueError('clone_index must be less than num_clones')
device = ''
if self._num_ps_tasks > 0:
device += self._worker_device
if self._clone_on_cpu:
device += '/device:CPU:0'
else:
if self._num_clones > 1:
device += '/device:GPU:%d' % clone_index
return device
def clone_scope(self, clone_index):
"""Name scope to create the clone.
Args:
clone_index: Int, representing the clone_index.
Returns:
A name_scope suitable for `tf.name_scope()`.
Raises:
ValueError: if `clone_index` is greater or equal to the number of clones".
"""
if clone_index >= self._num_clones:
raise ValueError('clone_index must be less than num_clones')
scope = ''
if self._num_clones > 1:
scope = 'clone_%d' % clone_index
return scope
def optimizer_device(self):
"""Device to use with the optimizer.
Returns:
A value suitable for `tf.device()`.
"""
if self._num_ps_tasks > 0 or self._num_clones > 0:
return self._worker_device + '/device:CPU:0'
else:
return ''
def inputs_device(self):
"""Device to use to build the inputs.
Returns:
A value suitable for `tf.device()`.
"""
device = ''
if self._num_ps_tasks > 0:
device += self._worker_device
device += '/device:CPU:0'
return device
def variables_device(self):
"""Returns the device to use for variables created inside the clone.
Returns:
A value suitable for `tf.device()`.
"""
device = ''
if self._num_ps_tasks > 0:
device += self._ps_device
device += '/device:CPU:0'
class _PSDeviceChooser(object):
"""Slim device chooser for variables when using PS."""
def __init__(self, device, tasks):
self._device = device
self._tasks = tasks
self._task = 0
def choose(self, op):
if op.device:
return op.device
node_def = op if isinstance(op, tf.NodeDef) else op.node_def
if node_def.op == 'Variable':
t = self._task
self._task = (self._task + 1) % self._tasks
d = '%s/task:%d' % (self._device, t)
return d
else:
return op.device
if not self._num_ps_tasks:
return device
else:
chooser = _PSDeviceChooser(device, self._num_ps_tasks)
return chooser.choose
| 35.13273
| 80
| 0.672777
|
eeb3d27a7a11411b477a6fdacf5b46793c5a4d34
| 14,283
|
py
|
Python
|
ffeatools/modules/FFEA_trajectory.py
|
zzalscv2/FFEA
|
da8a09dadb1b3978a3d230dc79d9b163d7889242
|
[
"Apache-2.0"
] | null | null | null |
ffeatools/modules/FFEA_trajectory.py
|
zzalscv2/FFEA
|
da8a09dadb1b3978a3d230dc79d9b163d7889242
|
[
"Apache-2.0"
] | null | null | null |
ffeatools/modules/FFEA_trajectory.py
|
zzalscv2/FFEA
|
da8a09dadb1b3978a3d230dc79d9b163d7889242
|
[
"Apache-2.0"
] | 1
|
2021-04-03T16:08:21.000Z
|
2021-04-03T16:08:21.000Z
|
#
# This file is part of the FFEA simulation package
#
# Copyright (c) by the Theory and Development FFEA teams,
# as they appear in the README.md file.
#
# FFEA is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FFEA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with FFEA. If not, see <http://www.gnu.org/licenses/>.
#
# To help us fund FFEA development, we humbly ask that you cite
# the research papers on the package.
#
from os import path
import numpy as np
import FFEA_frame, FFEA_pdb
import sys
class FFEA_trajectory:
def __init__(self, fname="", surf=None, load_all=1, frame_rate = 1, num_frames_to_read = 1000000, start = 0, onlyNodes = False):
self.reset()
# Return empty object if fname not initialised
if fname == "" or fname == None:
self.valid = True
sys.stdout.write("Empty trajectory object initialised.\n")
return
self.load(fname, load_all=load_all, surf=surf, frame_rate = frame_rate, num_frames_to_read = num_frames_to_read, start = start, onlyNodes = onlyNodes)
return
def load(self, fname, surf=None, load_all=1, frame_rate = 1, num_frames_to_read = 1000000, start = 0, onlyNodes = False):
print("Loading FFEA trajectory file...")
# Test file exists
if not path.exists(fname):
print("No trajectory found at that location")
# Clear everything for beginning
self.reset()
# Header first, for sure
self.load_header(fname)
# Then rest of trajectory.
if(load_all == 1):
all_frames = 0
while(True):
# Have we read enough frames?
#print all_frames, num_frames_to_read
if((all_frames - start) == num_frames_to_read):
print("\ndone! Successfully read " + str(self.num_frames) + " frame/s from '" + fname + "'.")
break
# Skip or load
if (all_frames - start) % frame_rate != 0 or all_frames < start:
if self.skip_frame() == 1:
print("\ndone! Successfully read " + str(self.num_frames) + " frame/s from '" + fname + "'.")
break
elif(self.load_frame(surf=surf, onlyNodes=onlyNodes) != 0):
print("\ndone! Successfully read " + str(self.num_frames) + " frame/s from '" + fname + "'.")
break
all_frames += 1
#if self.num_frames % 100 == 0:
# print "Frames parsed = ", str(all_frames)
sys.stdout.write("\r\tFrames read = %d, Frames skipped = %d" % (self.num_frames, all_frames - self.num_frames))
sys.stdout.flush()
self.valid = True
self.empty = False
def load_header(self, fname):
# Get a file object and store it
try:
self.traj = open(fname, "r")
except(IOError):
raise IOError("\tFailed to open '" + fname + "' for reading.")
# Now, read only the information from the top of the file
# Title
line = self.traj.readline().strip()
if line != "FFEA_trajectory_file":
raise IOError("\tExpected to read 'FFEA_trajectory_file' but read '" + line + "'. This may not be an FFEA trajectory file.")
self.traj.readline()
self.traj.readline()
# num_blobs
try:
line = self.traj.readline()
self.num_blobs = int(line.split()[3])
except(IndexError, ValueError):
raise IOError("\tExpected to read 'Number of Blobs %d' but read '" + line + "'.")
# num_conformations
try:
line = self.traj.readline()
sline = line.split()[3:]
self.num_conformations = [int(s) for s in sline]
except(IndexError, ValueError):
raise IOError("\tExpected to read 'Number of Conformations %d %d ....%d' but read '" + line + "'.")
# num_nodes
self.num_nodes = [[0 for i in range(self.num_conformations[j])] for j in range(self.num_blobs)]
for i in range(self.num_blobs):
try:
line = self.traj.readline()
sline = line.split()[2:]
for j in range(self.num_conformations[i]):
self.num_nodes[i][j] = int(sline[4 * j + 3])
except(IndexError, ValueError):
raise IOError("\tExpected to read 'Blob " + str(i) + ": Conformation 0 Nodes %d Conformation 1 Nodes %d....Conformation " + str(num_conformations[i] - 1) + " Nodes %d' but read '" + line + "'.")
# final whitespace until '*' and save the file pos
while(self.traj.readline().strip() != "*"):
pass
self.fpos = self.traj.tell()
# Finally, build the objects
self.blob = [[FFEA_traj_blob(self.num_nodes[i][j]) for j in range(self.num_conformations[i])] for i in range(self.num_blobs)]
# Manually set header data
def set_header(self, num_blobs, num_conformations, num_nodes):
self.num_blobs = num_blobs
self.num_conformations = num_conformations
self.num_nodes = num_nodes
# Still build the objects
self.blob = [[FFEA_traj_blob(self.num_nodes[i][j]) for j in range(self.num_conformations[i])] for i in range(self.num_blobs)]
# This function must be run as fast as possible! Error checking will be at a minimum. This function is standalone so it can be threaded
def load_frame(self, surf=None, onlyNodes=False):
# For each blob
eof = False
bindex = -1
for b in self.blob:
try:
# Get indices
bindex += 1
sline = self.traj.readline().split()
cindex = int(sline[3][0])
step = int(sline[5])
except(IndexError):
self.traj.seek(self.fpos)
return 1
# ye who enter here: do not 'fix' this! The script is not handling an
# exception poorly, it is asking for forgiveness, not permission.
# Because it's faster.
# Signed, someone who tried to 'fix' this.
except(ValueError):
# Don't need to reset though!
self.traj.seek(self.fpos)
print("Unable to read conformation index for blob " + str(bindex) + " at frame " + str(self.num_frames))
return 1
# Get a motion_state
b[cindex].motion_state = self.traj.readline().strip()
#if self.traj.readline().strip() == "DYNAMIC":
# b[cindex].motion_state = "DYNAMIC"
# frame = FFEA_frame.FFEA_frame()
# else:
# b[cindex].motion_state = "STATIC"
# frame = b[cindex].frame[0]
# continue
# Do different things depending on motion state recieved
if b[cindex].motion_state == "STATIC":
# Nothing to read; frame cannot be read from trajectory
frame = None
else:
# Get a frame
frame = FFEA_frame.FFEA_frame()
frame.num_nodes = b[cindex].num_nodes
# Try to read stuff
if onlyNodes == True:
success = frame.load_from_traj_onlynodes_faster(self.traj)
else:
success = frame.load_from_traj_faster(self.traj)
# We are at eof, or halfway through a frame being written
if success == 1:
eof = True
break
frame.set_step(step)
# Load normals if necessary
#if surf != None:
# frame.calc_normals(surf[bindex][cindex])
# Append frame
b[cindex].frame.append(frame)
# Append None to all frames that aren't active
for c in range(self.num_conformations[bindex]):
if c != cindex:
b[c].frame.append(None)
if not eof:
# Gloss over kinetics stuff
self.traj.readline()
while(self.traj.readline().strip() != "*"):
pass
self.num_frames += 1
self.fpos = self.traj.tell()
return 0
else:
self.traj.seek(self.fpos)
def rescale(self, factor, frame_index):
for b in range(self.num_blobs):
for c in range(self.num_conformations[b]):
try:
self.blob[b][c].frame[frame_index].rescale(factor)
except:
continue
def translate(self, trans):
for b in range(self.num_blobs):
for c in range(self.num_conformations[b]):
for f in range(self.num_frames):
self.blob[b][c].frame[f].translate(trans)
def skip_frame(self):
num_asterisks = 0
while(True):
line = self.traj.readline()
try:
if line[0] == "*":
num_asterisks += 1
if num_asterisks == 2:
break
except:
return 1
return 0
def delete_frame(self, index=-1):
try:
for i in range(self.num_blobs):
for j in range(self.num_conformations[i]):
del self.blob[i][j].frame[index]
self.num_frames -= 1
except:
raise
def build_from_pdb(self, pdb, scale = 1e-10):
self.reset()
# Single blob single conf
self.set_header(1, [1], [[pdb.chain[0].num_atoms]])
# Get all frames in one go (they're the same core objects)
self.blob[0][0].num_nodes = pdb.chain[0].num_atoms
self.blob[0][0].frame = pdb.chain[0].frame
index = 0
for f in self.blob[0][0].frame:
f.pos *= scale
f.set_step(index)
index += 1
self.num_frames = pdb.chain[0].num_frames
self.valid = True
self.empty = False
# Manually set a single frame
def set_single_frame(self, node, surf = None, step = 0):
for i in range(self.num_blobs):
for j in range(self.num_conformations[i]):
# Reset all first
self.blob[i][j].reset()
# Add frame only for conf 0
if j == 0:
frame = FFEA_frame.FFEA_frame()
frame.set_step(step)
frame.pos = node[i].pos
if surf != None:
frame.calc_normals(surf[i])
self.blob[i][j].num_nodes = len(frame.pos)
self.blob[i][j].frame.append(frame)
else:
self.blob[i][j].frame.append(None)
self.num_frames = 1
def set_single_blob(self, bindex):
if bindex >= self.num_blobs:
print("\nBlob index '" + str(bindex) + "' greater than number of blobs, '" + str(traj.num_blobs))
raise IndexError
# Get rid of stuff we don't need and move everything to blob[0]
self.num_blobs = 1
self.num_conformations = [self.num_conformations[bindex]]
self.num_nodes = [self.num_nodes[bindex]]
self.blob = [self.blob[bindex]]
def reset(self):
self.traj = None
self.num_frames = 0
self.num_blobs = 0
self.num_conformations = []
self.num_nodes = []
self.blob = []
self.valid = False
self.empty = True
def write_to_file(self, fname, frames=None, frame_rate = 1):
print("Writing trajectory to file\n\tData will be written to %s\n" % (fname))
# Get a file object
fout = open(fname, "w")
# Write header info
self.write_header_to_file(fout)
# Write frames
if frames == None:
frames = [0,self.num_frames]
for i in range(frames[0], frames[1], frame_rate):
self.write_frame_to_file(fout, i)
fout.close()
def write_header_to_file(self, fout):
fout.write("FFEA_trajectory_file\n\nInitialisation:\nNumber of Blobs %d\nNumber of Conformations" % (self.num_blobs))
for i in self.num_conformations:
fout.write(" %d" % (i))
fout.write("\n")
for i in range(self.num_blobs):
fout.write("Blob %d:" % (i))
for j in range(self.num_conformations[i]):
fout.write(" Conformation %d Nodes %d" % (j, self.num_nodes[i][j]))
fout.write("\n")
fout.write("\n*\n")
def write_frame_to_file(self, fout, index):
if index >= self.num_frames:
return
# Traj data
cur_conf = []
bindex = -1
for b in self.blob:
bindex += 1
cindex = -1
for c in b:
cindex += 1
if c.motion_state == "STATIC":
fout.write("Blob %d, Conformation %d, step %d\nSTATIC\n" % (bindex, cindex, 0))
cur_conf.append(0)
break
else:
if c.frame[index] == None:
continue
cur_conf.append(cindex)
fout.write("Blob %d, Conformation %d, step %d\n" % (bindex, cindex, c.frame[index].step))
fout.write(c.motion_state + "\n")
c.frame[index].write_to_traj(fout)
# Kinetic Data
fout.write("*\nConformation Changes:\n")
bindex = 0
for b in self.blob:
next_conf = cur_conf[bindex]
cindex = -1
try:
for c in b:
cindex += 1
if c.frame[index + 1] != None:
next_conf = cindex
break
except:
next_conf = cur_conf[bindex]
fout.write("Blob %d: Conformation %d -> Conformation %d\n" % (bindex, cur_conf[bindex], next_conf))
bindex += 1
fout.write("*\n")
def reset(self):
self.traj = None
self.num_frames = 0
self.num_blobs = 0
self.num_conformations = []
self.num_nodes = []
self.blob = []
self.valid = False
self.empty = True
class FFEA_traj_blob:
def __init__(self, num_nodes=0):
self.reset()
self.num_nodes = num_nodes
# Manually set a frame
def set_frame(self, frame):
self.frame.append(frame)
def set_subblob(self, pin):
"""
Create a subblob from a pin object.
In: self, the pin object
Out: the ID of the generated subblob.
"""
if max(pin.index) >= self.num_nodes:
raise IndexError("Error. Pinned node index %d is larger than num_nodes, %d." % (max(pin.index), self.num_nodes))
self.subblob.append(pin.index)
self.num_subblobs += 1
return self.num_subblobs # let the user assign a nice friendly name to their subblob
def calc_centroid_trajectory(self, subblob_index = -1):
"""
Calculate the centroid (average position of all the nodes) of a
given blob. You can also specify the index of a sub-blob, and get
the centroid of that sub-blob.
In: self, subblob index
Out: a 2-d array, which is [x,y,z] wide and as tall as the number
of nodes in your blob\sub-blob.
"""
if subblob_index == -1:
indices = [i for i in range(self.num_nodes)]
else:
try:
indices = self.subblob[subblob_index]
except(IndexError):
raise IndexError("Error. Subblob index %d out of range (num_subblobs = %d)." % (subblob_index, self.num_subblobs))
# Build the trajectory
subblob_size = len(indices)
ctraj = []
step = []
for f in self.frame:
centroid = np.array([0.0,0.0,0.0])
for i in indices:
centroid += f.pos[i]
centroid /= subblob_size
ctraj.append(centroid)
step.append(f.step)
return np.array(step), np.array(ctraj)
def reset(self):
self.motion_state = "DYNAMIC"
self.num_nodes = 0
self.num_subblobs = 0
self.frame = []
self.subblob = []
# External functions
def get_num_frames(fname):
fin = open(fname, "r")
if fin.readline().strip() != "FFEA_trajectory_file":
print("\tExpected to read 'FFEA_trajectory_file' but read '" + line + "'. This may not be an FFEA trajectory file.")
self.reset()
return 1
num_asterisks = 0
for line in fin:
if "*" in line:
num_asterisks += 1
return (num_asterisks - 1) / 2
| 26.747191
| 198
| 0.659315
|
b09826588ec2712467f42f2a16f33ee30c732cef
| 13,067
|
py
|
Python
|
kubernetes_asyncio/client/models/v1_node_status.py
|
dineshsonachalam/kubernetes_asyncio
|
d57e9e9be11f6789e1ce8d5b161acb64d29acf35
|
[
"Apache-2.0"
] | 1
|
2021-02-25T04:36:18.000Z
|
2021-02-25T04:36:18.000Z
|
kubernetes_asyncio/client/models/v1_node_status.py
|
hubo1016/kubernetes_asyncio
|
d57e9e9be11f6789e1ce8d5b161acb64d29acf35
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/v1_node_status.py
|
hubo1016/kubernetes_asyncio
|
d57e9e9be11f6789e1ce8d5b161acb64d29acf35
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.12.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class V1NodeStatus(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'addresses': 'list[V1NodeAddress]',
'allocatable': 'dict(str, str)',
'capacity': 'dict(str, str)',
'conditions': 'list[V1NodeCondition]',
'config': 'V1NodeConfigStatus',
'daemon_endpoints': 'V1NodeDaemonEndpoints',
'images': 'list[V1ContainerImage]',
'node_info': 'V1NodeSystemInfo',
'phase': 'str',
'volumes_attached': 'list[V1AttachedVolume]',
'volumes_in_use': 'list[str]'
}
attribute_map = {
'addresses': 'addresses',
'allocatable': 'allocatable',
'capacity': 'capacity',
'conditions': 'conditions',
'config': 'config',
'daemon_endpoints': 'daemonEndpoints',
'images': 'images',
'node_info': 'nodeInfo',
'phase': 'phase',
'volumes_attached': 'volumesAttached',
'volumes_in_use': 'volumesInUse'
}
def __init__(self, addresses=None, allocatable=None, capacity=None, conditions=None, config=None, daemon_endpoints=None, images=None, node_info=None, phase=None, volumes_attached=None, volumes_in_use=None): # noqa: E501
"""V1NodeStatus - a model defined in Swagger""" # noqa: E501
self._addresses = None
self._allocatable = None
self._capacity = None
self._conditions = None
self._config = None
self._daemon_endpoints = None
self._images = None
self._node_info = None
self._phase = None
self._volumes_attached = None
self._volumes_in_use = None
self.discriminator = None
if addresses is not None:
self.addresses = addresses
if allocatable is not None:
self.allocatable = allocatable
if capacity is not None:
self.capacity = capacity
if conditions is not None:
self.conditions = conditions
if config is not None:
self.config = config
if daemon_endpoints is not None:
self.daemon_endpoints = daemon_endpoints
if images is not None:
self.images = images
if node_info is not None:
self.node_info = node_info
if phase is not None:
self.phase = phase
if volumes_attached is not None:
self.volumes_attached = volumes_attached
if volumes_in_use is not None:
self.volumes_in_use = volumes_in_use
@property
def addresses(self):
"""Gets the addresses of this V1NodeStatus. # noqa: E501
List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses # noqa: E501
:return: The addresses of this V1NodeStatus. # noqa: E501
:rtype: list[V1NodeAddress]
"""
return self._addresses
@addresses.setter
def addresses(self, addresses):
"""Sets the addresses of this V1NodeStatus.
List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses # noqa: E501
:param addresses: The addresses of this V1NodeStatus. # noqa: E501
:type: list[V1NodeAddress]
"""
self._addresses = addresses
@property
def allocatable(self):
"""Gets the allocatable of this V1NodeStatus. # noqa: E501
Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity. # noqa: E501
:return: The allocatable of this V1NodeStatus. # noqa: E501
:rtype: dict(str, str)
"""
return self._allocatable
@allocatable.setter
def allocatable(self, allocatable):
"""Sets the allocatable of this V1NodeStatus.
Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity. # noqa: E501
:param allocatable: The allocatable of this V1NodeStatus. # noqa: E501
:type: dict(str, str)
"""
self._allocatable = allocatable
@property
def capacity(self):
"""Gets the capacity of this V1NodeStatus. # noqa: E501
Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity # noqa: E501
:return: The capacity of this V1NodeStatus. # noqa: E501
:rtype: dict(str, str)
"""
return self._capacity
@capacity.setter
def capacity(self, capacity):
"""Sets the capacity of this V1NodeStatus.
Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity # noqa: E501
:param capacity: The capacity of this V1NodeStatus. # noqa: E501
:type: dict(str, str)
"""
self._capacity = capacity
@property
def conditions(self):
"""Gets the conditions of this V1NodeStatus. # noqa: E501
Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition # noqa: E501
:return: The conditions of this V1NodeStatus. # noqa: E501
:rtype: list[V1NodeCondition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""Sets the conditions of this V1NodeStatus.
Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition # noqa: E501
:param conditions: The conditions of this V1NodeStatus. # noqa: E501
:type: list[V1NodeCondition]
"""
self._conditions = conditions
@property
def config(self):
"""Gets the config of this V1NodeStatus. # noqa: E501
Status of the config assigned to the node via the dynamic Kubelet config feature. # noqa: E501
:return: The config of this V1NodeStatus. # noqa: E501
:rtype: V1NodeConfigStatus
"""
return self._config
@config.setter
def config(self, config):
"""Sets the config of this V1NodeStatus.
Status of the config assigned to the node via the dynamic Kubelet config feature. # noqa: E501
:param config: The config of this V1NodeStatus. # noqa: E501
:type: V1NodeConfigStatus
"""
self._config = config
@property
def daemon_endpoints(self):
"""Gets the daemon_endpoints of this V1NodeStatus. # noqa: E501
Endpoints of daemons running on the Node. # noqa: E501
:return: The daemon_endpoints of this V1NodeStatus. # noqa: E501
:rtype: V1NodeDaemonEndpoints
"""
return self._daemon_endpoints
@daemon_endpoints.setter
def daemon_endpoints(self, daemon_endpoints):
"""Sets the daemon_endpoints of this V1NodeStatus.
Endpoints of daemons running on the Node. # noqa: E501
:param daemon_endpoints: The daemon_endpoints of this V1NodeStatus. # noqa: E501
:type: V1NodeDaemonEndpoints
"""
self._daemon_endpoints = daemon_endpoints
@property
def images(self):
"""Gets the images of this V1NodeStatus. # noqa: E501
List of container images on this node # noqa: E501
:return: The images of this V1NodeStatus. # noqa: E501
:rtype: list[V1ContainerImage]
"""
return self._images
@images.setter
def images(self, images):
"""Sets the images of this V1NodeStatus.
List of container images on this node # noqa: E501
:param images: The images of this V1NodeStatus. # noqa: E501
:type: list[V1ContainerImage]
"""
self._images = images
@property
def node_info(self):
"""Gets the node_info of this V1NodeStatus. # noqa: E501
Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info # noqa: E501
:return: The node_info of this V1NodeStatus. # noqa: E501
:rtype: V1NodeSystemInfo
"""
return self._node_info
@node_info.setter
def node_info(self, node_info):
"""Sets the node_info of this V1NodeStatus.
Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info # noqa: E501
:param node_info: The node_info of this V1NodeStatus. # noqa: E501
:type: V1NodeSystemInfo
"""
self._node_info = node_info
@property
def phase(self):
"""Gets the phase of this V1NodeStatus. # noqa: E501
NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated. # noqa: E501
:return: The phase of this V1NodeStatus. # noqa: E501
:rtype: str
"""
return self._phase
@phase.setter
def phase(self, phase):
"""Sets the phase of this V1NodeStatus.
NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated. # noqa: E501
:param phase: The phase of this V1NodeStatus. # noqa: E501
:type: str
"""
self._phase = phase
@property
def volumes_attached(self):
"""Gets the volumes_attached of this V1NodeStatus. # noqa: E501
List of volumes that are attached to the node. # noqa: E501
:return: The volumes_attached of this V1NodeStatus. # noqa: E501
:rtype: list[V1AttachedVolume]
"""
return self._volumes_attached
@volumes_attached.setter
def volumes_attached(self, volumes_attached):
"""Sets the volumes_attached of this V1NodeStatus.
List of volumes that are attached to the node. # noqa: E501
:param volumes_attached: The volumes_attached of this V1NodeStatus. # noqa: E501
:type: list[V1AttachedVolume]
"""
self._volumes_attached = volumes_attached
@property
def volumes_in_use(self):
"""Gets the volumes_in_use of this V1NodeStatus. # noqa: E501
List of attachable volumes in use (mounted) by the node. # noqa: E501
:return: The volumes_in_use of this V1NodeStatus. # noqa: E501
:rtype: list[str]
"""
return self._volumes_in_use
@volumes_in_use.setter
def volumes_in_use(self, volumes_in_use):
"""Sets the volumes_in_use of this V1NodeStatus.
List of attachable volumes in use (mounted) by the node. # noqa: E501
:param volumes_in_use: The volumes_in_use of this V1NodeStatus. # noqa: E501
:type: list[str]
"""
self._volumes_in_use = volumes_in_use
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1NodeStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.997475
| 224
| 0.625622
|
97b96e6614964c688a78de62a6317f843343a32d
| 3,416
|
py
|
Python
|
PyRAI2MD/Dynamics/Propagators/tsh_helper.py
|
lopez-lab/PyRAI2MD
|
43e27fbc9bc5b6ab6a8f170791951f316fcd0964
|
[
"MIT"
] | 12
|
2021-10-20T23:41:34.000Z
|
2022-03-29T23:29:21.000Z
|
PyRAI2MD/Dynamics/Propagators/tsh_helper.py
|
lijingbai2009/PyRAI2MD
|
f672960aff4d9e25f4dff11683e9231b120a1b68
|
[
"MIT"
] | null | null | null |
PyRAI2MD/Dynamics/Propagators/tsh_helper.py
|
lijingbai2009/PyRAI2MD
|
f672960aff4d9e25f4dff11683e9231b120a1b68
|
[
"MIT"
] | 4
|
2021-02-06T04:27:01.000Z
|
2022-03-29T23:29:28.000Z
|
######################################################
#
# PyRAI2MD 2 module for trajectory surface hopping helper
#
# Author Jingbai Li
# Sep 7 2021
#
######################################################
import numpy as np
def AvoidSingularity(energy_i, energy_j, state_i, state_j):
""" Fixing singularity of state energy gap
Parameters: Type:
energy_i float energy of state i
energy_j float energy of state j
state_i int state i
state_j int state j
Return: Type:
diff float energy different between i and j
"""
cutoff = 1e-16
gap = np.abs(energy_i - energy_j)
if state_i < state_j:
sign = -1.0
else:
sign = 1.0
if energy_i == energy_j:
diff = sign * cutoff
elif energy_i != energy_j and gap < cutoff:
diff = sign * cutoff
elif energy_i != energy_j and gap >= cutoff:
diff = sign * gap
return diff
def ReflectVelo(velo, nac, reflect):
""" Reflecting velocity at frustrated hopping
Parameters: Type:
velo ndarray nuclear velocity
nac ndarray nonadibatic coupling
reflect int velocity reflection option
Return: Type:
velo ndarray nuclear velocity
"""
if reflect == 1:
velo = -velo
elif reflect == 2:
velo -= 2 * np.sum(velo * nac) / np.sum(nac * nac) * nac
return velo
def AdjustVelo(energy_old, energy_new, velo, mass, nac, adjust, reflect):
""" Adjusting velocity at surface hopping
Parameters: Type:
energy_old float energy of old state
energy_new float energy of new state
velo ndarray nuclear velocity
nac ndarray nonadibatic coupling
adjust int velocity adjustment option
reflect int velocity reflection option
Return: Type:
velo ndarray nuclear velocity
frustrated int surface hopping decision
"""
kinetic = np.sum(0.5 * mass * velo**2)
frustrated = 0
if adjust == 0:
del_kinetic = energy_old - energy_new + kinetic
if del_kinetic >= 0:
f = 1.0
else:
velo = ReflectVelo(velo, nac, reflect)
frustrated = 1
elif adjust == 1:
del_kinetic = energy_old - energy_new + kinetic
if del_kinetic >= 0:
f = (del_kinetic / kinetic)**0.5
velo *= f
else:
velo = ReflectVelo(velo, nac, reflect)
frustrated = 1
elif adjust == 2:
a = np.sum(nac * nac / (2 * mass))
b = np.sum(velo * nac)
del_kinetic = energy_old - energy_new
del_kinetic = 4 * a * del_kinetic + b**2
if del_kinetic >= 0:
if b < 0:
f = (b + del_kinetic**0.5) / (2 * a)
else:
f = (b - del_kinetic**0.5) / (2 * a)
velo -= f * nac / mass
else:
velo = ReflectVelo(velo, nac, reflect)
frustrated = 1
return velo, frustrated
| 29.196581
| 75
| 0.483021
|
feaf727a5a9fbe5116aa5027da6669d166d99a99
| 1,036
|
py
|
Python
|
NETISCE_local/bin/kmeans.py
|
laurenpmarazzi/Netisce
|
2f4550a326b16e579f0fc5948b674cfbc994b100
|
[
"MIT"
] | null | null | null |
NETISCE_local/bin/kmeans.py
|
laurenpmarazzi/Netisce
|
2f4550a326b16e579f0fc5948b674cfbc994b100
|
[
"MIT"
] | null | null | null |
NETISCE_local/bin/kmeans.py
|
laurenpmarazzi/Netisce
|
2f4550a326b16e579f0fc5948b674cfbc994b100
|
[
"MIT"
] | 1
|
2021-11-30T14:24:23.000Z
|
2021-11-30T14:24:23.000Z
|
#!/usr/bin/env python3
import pandas as pd
from scipy import stats
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.metrics import pairwise_distances
from sklearn.cluster import KMeans
from sklearn.cluster import MiniBatchKMeans
import sys
import os
def main():
#####Input files and user specifications###########
k = int(sys.argv[1]) # k means k
datasets = sys.argv[2].split(',') #the input data file (logss, DAC, both, or discrete versions)
df=pd.DataFrame()
for i in datasets:
dfi=pd.read_csv(i, delim_whitespace=True,index_col = ["name"])
df=pd.concat([df,dfi],axis=0)
# perform kmeans
kmeans = MiniBatchKMeans(n_clusters=k,random_state=0,n_init=10000).fit(df)
labels = kmeans.labels_
df['clusters'] = labels
df2 = pd.DataFrame(index=df.index)
df2['clusters'] = labels
df2.index.name = 'name'
df2.to_csv('kmeans.txt', sep=' ')
main()
| 28
| 99
| 0.705598
|
2f2e901d7b95708385eca28f31826f2014e0bc61
| 4,355
|
py
|
Python
|
api/base.py
|
itdxer/pythonUSPS
|
74d75dc95ed010d3ffffc36e99b96609d4c16ef2
|
[
"MIT"
] | 1
|
2015-02-07T10:14:47.000Z
|
2015-02-07T10:14:47.000Z
|
api/base.py
|
itdxer/pythonUSPS
|
74d75dc95ed010d3ffffc36e99b96609d4c16ef2
|
[
"MIT"
] | null | null | null |
api/base.py
|
itdxer/pythonUSPS
|
74d75dc95ed010d3ffffc36e99b96609d4c16ef2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import urllib
import urllib2
from xml.etree import ElementTree
from xml.dom.minidom import *
from default.errors import USPSXMLError, XMLTagNameError
class USPSServiceSender(object):
service_types = ("ExpressMail",
"PriorityMail",
"Returns",
"International",
"OtherPackages",
)
def __init__(self, url):
self.url = url
@property
def root_tag(self):
return "%sRequest" % self.api_key
def send_request(self, list_of_xml_tags, user_id=None, password=None):
""" " (dict, str, str) -> (class xml.etree.ElementTree.Element)
Send request on USPS server and eturn object with XML tags
"""
xml = self.build_xml(list_of_xml_tags, user_id, password)
data = {'API': self.api_key, 'XML': xml}
print "XML=" + xml
response = urllib2.urlopen(self.url, urllib.urlencode(data))
response_type = ElementTree.parse(response).getroot()
if response_type.tag == 'Error':
raise USPSXMLError(response_type)
return response_type
def build_xml_level(self, list_of_xml_tags, document, root):
for iterator, xml_tag in enumerate(list_of_xml_tags):
if not xml_tag["name"] in self.xml_tag_names:
raise XMLTagNameError(xml_tag["name"])
xml_tag_name = xml_tag["name"]
this_xml_tag = document.createElement(xml_tag_name)
xml_from_tag_dict = list_of_xml_tags[iterator]
this_xml_tag_text = xml_from_tag_dict.get("text", None)
# add attribute to tag
if xml_from_tag_dict.get("attribute", None):
xml_tag_attribute = xml_from_tag_dict["attribute"]
tag_attr = document.createAttribute(xml_tag_attribute["name"])
tag_attr.value = xml_tag_attribute["value"]
this_xml_tag.setAttributeNode(tag_attr)
if xml_from_tag_dict.get("inner_tags", None):
this_xml_tag = self.build_xml_level(
xml_from_tag_dict["inner_tags"],
document, this_xml_tag
)
elif this_xml_tag_text != None:
this_xml_tag_text = document.createTextNode(this_xml_tag_text)
this_xml_tag.appendChild(this_xml_tag_text)
root.appendChild(this_xml_tag)
return root
def build_xml(self, list_of_xml_tags, user_id=None, password=None):
""" (dict, str, str) -> (str)
Get dictionary with XML tags and there parametrs
Return XML in string format
"""
document = Document()
root = document.createElement(self.root_tag)
if user_id is not None:
user_id_attr = document.createAttribute('USERID')
user_id_attr.value = user_id
root.setAttributeNode(user_id_attr)
if password is not None:
user_id_attr = document.createAttribute('PASSWORD')
user_id_attr.value = password
root.setAttributeNode(user_id_attr)
root = self.build_xml_level(list_of_xml_tags, document, root)
document.appendChild(root)
return document.toxml()
def get_response_information(self, xml_response):
""" (class xml.etree.ElementTree.Element) -> dict
Return dictionary with all XML response information
"""
response_info = {}
for xml_tag in xml_response.getchildren():
if xml_tag.tag == 'Error':
raise USPSXMLError(xml_tag)
if xml_tag.getchildren():
response_info[xml_tag.tag] = self.get_response_information(xml_tag)
elif xml_tag is not None:
response_info[xml_tag.tag] = xml_tag.text
return response_info
def is_service_type_correct(self, service_type):
""" (str) -> bool
Return True if correct name of service type
"""
return service_type in self.service_types
def get_package_size_type(self, size):
""" (int) -> str
Return string type from size
"""
return "REGULAR" if size <= 12 else "LARGE"
if __name__ == '__main__':
pass
| 32.022059
| 83
| 0.601378
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.