hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
67bb468d4e8788f36e1783f576c1ab1f1ae90543
| 834
|
py
|
Python
|
leetcode/binary_search/search_for_a_range.py
|
phantomnat/python-learning
|
addc7ba5fc4fb8920cdd2891d4b2e79efd1a524a
|
[
"MIT"
] | null | null | null |
leetcode/binary_search/search_for_a_range.py
|
phantomnat/python-learning
|
addc7ba5fc4fb8920cdd2891d4b2e79efd1a524a
|
[
"MIT"
] | null | null | null |
leetcode/binary_search/search_for_a_range.py
|
phantomnat/python-learning
|
addc7ba5fc4fb8920cdd2891d4b2e79efd1a524a
|
[
"MIT"
] | null | null | null |
from typing import List
class Solution:
def searchRange(self, nums: List[int], target: int) -> List[int]:
l,r=0,len(nums)-1
ans = -1
while l<r:
m = (l+r)//2
if nums[m] < target:
l = m+1
else:
r = m
if nums[r] != target:
return [-1,-1]
ans = r
l,r=r,len(nums)-1
while l<r:
m = (l+r+1)//2
if nums[m] > target:
r = m-1
else:
l = m
return [ans, l]
s = Solution()
ans = [
s.searchRange([1],0),
s.searchRange([5,7,7,8,8,8,9,10],8),
s.searchRange([7,7,7,8,10],7),
s.searchRange([7,7,7,8,10,10,10,10],10),
s.searchRange([7,7,7,8,10],10),
s.searchRange([7,7,7,7,8,10],10),
]
for a in ans:
print(a)
| 23.828571
| 69
| 0.425659
| 535
| 0.641487
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
67bbf09857ef02050b6c12ecac3ac6f6bf74d30b
| 770
|
py
|
Python
|
pi/Cart/main.py
|
polycart/polycart
|
2c36921b126df237b109312a16dfb04f2b2ab20f
|
[
"Apache-2.0"
] | 3
|
2020-01-10T15:54:57.000Z
|
2020-03-14T13:04:14.000Z
|
pi/Cart/main.py
|
polycart/polycart
|
2c36921b126df237b109312a16dfb04f2b2ab20f
|
[
"Apache-2.0"
] | null | null | null |
pi/Cart/main.py
|
polycart/polycart
|
2c36921b126df237b109312a16dfb04f2b2ab20f
|
[
"Apache-2.0"
] | 1
|
2020-01-29T06:07:39.000Z
|
2020-01-29T06:07:39.000Z
|
#!/usr/bin/python3
import cartinit
from kivy.app import App
from kivy.uix.screenmanager import Screen, ScreenManager, SlideTransition
from kivy.lang import Builder
from buttons import RoundedButton
cartinit.init()
# create ScreenManager as root, put all screens into
sm = ScreenManager()
sm.transition = SlideTransition()
screens = []
# load kv files
Builder.load_file('screens.kv')
class DefaultScreen(Screen):
# DefaultScreen, other screen should be subclass of DefaultScreen
pass
class MainScreen(DefaultScreen):
# main menu on startup
pass
class CartApp(App):
# main app
def build(self):
return sm
if __name__ == '__main__':
app = CartApp()
screens.append(MainScreen())
sm.switch_to(screens[-1])
app.run()
| 18.780488
| 73
| 0.720779
| 248
| 0.322078
| 0
| 0
| 0
| 0
| 0
| 0
| 204
| 0.264935
|
67bece9167131625c374de6477b0b045ebb3b193
| 160
|
py
|
Python
|
docs.bak/test.py
|
goujou/CompartmentalSystems
|
4724555c33f11395ddc32738e8dfed7349ee155f
|
[
"MIT"
] | null | null | null |
docs.bak/test.py
|
goujou/CompartmentalSystems
|
4724555c33f11395ddc32738e8dfed7349ee155f
|
[
"MIT"
] | null | null | null |
docs.bak/test.py
|
goujou/CompartmentalSystems
|
4724555c33f11395ddc32738e8dfed7349ee155f
|
[
"MIT"
] | null | null | null |
from CompartmentalSystems import smooth_reservoir_model
from CompartmentalSystems import smooth_model_run
from CompartmentalSystems import start_distributions
| 32
| 55
| 0.91875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
67bee977fd10b6b9e05e382910c3fcfaf854728d
| 6,482
|
py
|
Python
|
src/functions_DJTB.py
|
QTGTech/DJTB-Generator
|
96c36516b4bede5fee7a538d79e1e7b380f9d31f
|
[
"Apache-2.0"
] | null | null | null |
src/functions_DJTB.py
|
QTGTech/DJTB-Generator
|
96c36516b4bede5fee7a538d79e1e7b380f9d31f
|
[
"Apache-2.0"
] | null | null | null |
src/functions_DJTB.py
|
QTGTech/DJTB-Generator
|
96c36516b4bede5fee7a538d79e1e7b380f9d31f
|
[
"Apache-2.0"
] | 1
|
2017-12-08T18:39:01.000Z
|
2017-12-08T18:39:01.000Z
|
import numpy as np
import re
"""
"""
OCC_LIMIT = 10
def load_and_parse(filepath, verbose=True, pad_to_tweets=False, tweet_length=280):
"""
Le nom est plutot equivoque. Charge le fichier txt de chemin 'filepath' et retire les artefacts de parsing
:param filepath: chemin d'acces vers le fichier (.txt contenant le texte brut des tweets)
:param verbose: affiche ou non l'etat d'avancement de l'algorithme
:param pad_to_tweets: permet de forcer les tweets à faire 'tweet_length' caracteres
:param tweet_length: longueur des tweets dans le cas pad_to_tweets=True
:return: charset: set contenant les caracteres uniques utilises dans le texte (moins ceux supprimes car trop peu
utilises.
text: string contenant le texte brut nettoye.
"""
if verbose:
print("Starting Data parsing...\n")
# Lecture et caracterisation du corpus
text = open(filepath, 'r').read().lower()
charset = list(set(text))
vocab_size = len(charset)
# Suppression de certains caractères speciaux polluant la comprehension de la machine
re.sub(r"\n", ' ', text)
# Détection des caractères n'apparaissant pas au moins OCC_LIMIT fois dans le corpus
nb_occ_chars = np.zeros(len(charset))
for i in range(len(charset)):
for j in range(len(text)):
if text[j] == charset[i]:
nb_occ_chars[i] += 1
vocab_occ = dict(zip(charset, nb_occ_chars))
key_blacklist = []
for key in vocab_occ:
if vocab_occ[key] < OCC_LIMIT:
key_blacklist.append(key)
# La suppression des caractères trop peu nombreux dans le corpus prend en compte les caracteres speciaux
# et s'efforce de les rendre lisibles dans les regular expressions en ajoutant un antislash
unreadable_chars = ['|', '.', '*' '^', '$', '+', '?']
for k in key_blacklist:
if k in unreadable_chars:
readable_k = '\\' + k
else:
readable_k = k
text = re.sub(readable_k, '', text)
del vocab_occ[k]
print("Deleted following characters :\n", key_blacklist, "\n(Insufficient occurences in corpus)\n")
# Suppression des 'http://www. ' qui ne menent à rien et ajout d'espace avant les liens n'en ayant pas
text = re.sub('([0-9]|[a-z]|:|!)(http://|https://)', '\g<1> \g<2>', text)
text = re.sub('(http://www.|https://www.|http://)\n', '', text)
# Suppression des doubles et triples espaces
text = re.sub(' +', ' ', text)
if pad_to_tweets:
print("Padding tweets...")
iterator = 0
old_iterator = 0
text = text + '£'
while text[iterator] != '£':
if text[iterator] == '\n' and text[iterator + 1] != '£':
padding_string = " " * (tweet_length - (iterator - old_iterator))
text = text[:iterator] + padding_string + text[(iterator+1):]
old_iterator += tweet_length
iterator += len(padding_string)
iterator += 1
return charset, text
def format_data(charset, data, sequence_length, verbose_x=False):
"""
:param sequence_length:
:param charset: set contenant tous les caracteres utilises par le texte
:param data: texte brut pre-nettoye (à l'aide de load_and_parse)
:return: x:
"""
# Dictionnaire liant chaque caractere a un entier et vice-versa(necessaire pour que le reseau les comprenne !)
ix_to_char = {ix: char for ix, char in enumerate(charset)}
char_to_ix = {char: ix for ix, char in enumerate(charset)}
vocab_size = len(charset)
# Creation de matrices de donnees. On va en fait decouper ensuite nos donnees en sequences de caracteres de longueur
# sequence_length. La matrice de donnees en 3 dimensions : une ligne correspond a une sequence, une colonne a un
# caractere dans cette sequence
# Le // evite de placer un float dans un in range. Je doute de la proprete mais jusqu'ici pas de soucis
x = np.zeros((len(data) // sequence_length, sequence_length, vocab_size))
y = np.zeros((len(data) // sequence_length, sequence_length, vocab_size))
# Le gros du boulot. Remplissage de la matrice ligne par ligne.
for i in range(0, len(data) // sequence_length):
x_sequence = data[i * sequence_length:(i + 1) * sequence_length]
if verbose_x:
print(x_sequence)
x_sequence_ix = [char_to_ix[value] for value in x_sequence]
input_sequence = np.zeros((sequence_length, vocab_size))
for j in range(sequence_length):
input_sequence[j][x_sequence_ix[j]] = 1.
x[i] = input_sequence
y_sequence = data[i * sequence_length + 1:(i + 1) * sequence_length + 1]
y_sequence_ix = [char_to_ix[value] for value in y_sequence]
target_sequence = np.zeros((sequence_length, vocab_size))
for j in range(sequence_length) :
target_sequence[j][y_sequence_ix[j]] = 1.
y[i] = target_sequence
return x, y, vocab_size, ix_to_char
# Generation d'un texte utilisant un modele existant
def generate_text(model, length, vocab_size, ix_to_char, number=1, save_to_file=False, save_path="../data/generated/",
seed="6969"):
if number < 1:
return -1
text_table = []
for k in range(number):
print(k, '\n')
# On donne un seed (en la forme d'un caractere choisi aleatoirement)
ix = [np.random.randint(vocab_size)]
y_char = [ix_to_char[ix[-1]]]
x = np.zeros((1, length, vocab_size))
for i in range(length):
# On ajoute le caractere predit a la sequence
x[0, i, :][ix[-1]] = 1
print(ix_to_char[ix[-1]], end = "")
ix = np.argmax(model.predict(x[:, :i + 1, :])[0], 1)
y_char.append(ix_to_char[ix[-1]])
text_table.append(''.join(y_char))
if save_to_file:
with open(save_path + seed + ".txt", "w") as generated_tweets:
for j in range(len(text_table)):
generated_tweets.write(text_table[j] + "\n")
return text_table
# --------------------------------TESTING------------------------------
if __name__ == "__main__":
chars, txt = load_and_parse("./data/tweets_small_raw.txt", pad_to_tweets=True)
x, y, v_s, tochar = format_data(chars, txt, 280)
| 38.583333
| 120
| 0.611848
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,547
| 0.392329
|
67bfb2a09270657736e8e4b32cff8a3a6b09b92a
| 141
|
py
|
Python
|
src/tsp_c/__init__.py
|
kjudom/tsp-c
|
2ed4ba83ac14443533e6167edf20a4199e871657
|
[
"MIT"
] | null | null | null |
src/tsp_c/__init__.py
|
kjudom/tsp-c
|
2ed4ba83ac14443533e6167edf20a4199e871657
|
[
"MIT"
] | null | null | null |
src/tsp_c/__init__.py
|
kjudom/tsp-c
|
2ed4ba83ac14443533e6167edf20a4199e871657
|
[
"MIT"
] | null | null | null |
from . import _tsp_c
from .tsp_c import solve_greedy
from .tsp_c import solve_SA
from .tsp_c import set_param_SA
from .tsp_c import solve_PSO
| 28.2
| 31
| 0.829787
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
67bff67472f4b5e6324ab64de0cd6d6f2c3905b9
| 4,496
|
py
|
Python
|
biosimulators_test_suite/results/data_model.py
|
Ryannjordan/Biosimulators_test_suite
|
5f79f157ee8927df277b1967e9409ccfc6baf45f
|
[
"CC0-1.0",
"MIT"
] | null | null | null |
biosimulators_test_suite/results/data_model.py
|
Ryannjordan/Biosimulators_test_suite
|
5f79f157ee8927df277b1967e9409ccfc6baf45f
|
[
"CC0-1.0",
"MIT"
] | null | null | null |
biosimulators_test_suite/results/data_model.py
|
Ryannjordan/Biosimulators_test_suite
|
5f79f157ee8927df277b1967e9409ccfc6baf45f
|
[
"CC0-1.0",
"MIT"
] | null | null | null |
""" Data model for results of test cases
:Author: Jonathan Karr <karr@mssm.edu>
:Date: 2021-01-01
:Copyright: 2021, Center for Reproducible Biomedical Modeling
:License: MIT
"""
from .._version import __version__
from ..warnings import TestCaseWarning # noqa: F401
import enum
__all__ = [
'TestCaseResultType',
'TestCaseResult',
'TestResultsReport',
]
class TestCaseResultType(str, enum.Enum):
""" Type of test case result """
passed = 'passed'
failed = 'failed'
skipped = 'skipped'
class TestCaseResult(object):
""" A result of executing a test case
Attributes:
case (:obj:`TestCase`): test case
type (:obj:`obj:`TestCaseResultType`): type
duration (:obj:`float`): execution duration in seconds
exception (:obj:`Exception`): exception
warnings (:obj:`list` of :obj:`TestCaseWarning`): warnings
skip_reason (:obj:`Exception`): Exception which explains reason for skip
log (:obj:`str`): log of execution
"""
def __init__(self, case=None, type=None, duration=None, exception=None, warnings=None, skip_reason=None, log=None):
"""
Args:
case (:obj:`TestCase`, optional): test case
type (:obj:`obj:`TestCaseResultType`, optional): type
duration (:obj:`float`, optional): execution duration in seconds
exception (:obj:`Exception`, optional): exception
warnings (:obj:`list` of :obj:`TestCaseWarning`, optional): warnings
skip_reason (:obj:`Exception`, optional): Exception which explains reason for skip
log (:obj:`str`, optional): log of execution
"""
self.case = case
self.type = type
self.duration = duration
self.exception = exception
self.warnings = warnings or []
self.skip_reason = skip_reason
self.log = log
def to_dict(self):
""" Generate a dictionary representation e.g., for export to JSON
Returns:
:obj:`dict`: dictionary representation
"""
return {
'case': {
'id': self.case.id,
'description': self.case.description,
},
'resultType': self.type.value,
'duration': self.duration,
'exception': {
'category': self.exception.__class__.__name__,
'message': str(self.exception),
} if self.exception else None,
'warnings': [{'category': warning.category.__name__, 'message': str(warning.message)}
for warning in self.warnings],
'skipReason': {
'category': self.skip_reason.__class__.__name__,
'message': str(self.skip_reason),
} if self.skip_reason else None,
'log': self.log,
}
class TestResultsReport(object):
""" A report of the results of executing the test suite with a simulation tool
Attributes:
test_suite_version (:obj:`str`): version of the test suite which was executed
results (:obj:`list` of :obj:`TestCaseResult`): results of the test cases of the test suite
gh_issue (:obj:`int`): GitHub issue for which the test suite was executed
gh_action_run (:obj:`int`): GitHub action run in which the test suite was executed
"""
def __init__(self, test_suite_version=__version__, results=None, gh_issue=None, gh_action_run=None):
"""
Args:
test_suite_version (:obj:`str`, optional): version of the test suite which was executed
results (:obj:`list` of :obj:`TestCaseResult`, optional): results of the test cases of the test suite
gh_issue (:obj:`int`, optional): GitHub issue for which the test suite was executed
gh_action_run (:obj:`int`, optional): GitHub action run in which the test suite was executed
"""
self.test_suite_version = test_suite_version
self.results = results or []
self.gh_issue = gh_issue
self.gh_action_run = gh_action_run
def to_dict(self):
""" Generate a dictionary representation e.g., for export to JSON
Returns:
:obj:`dict`: dictionary representation
"""
return {
'testSuiteVersion': self.test_suite_version,
'results': [result.to_dict() for result in self.results],
'ghIssue': self.gh_issue,
'ghActionRun': self.gh_action_run,
}
| 37.157025
| 119
| 0.61121
| 4,119
| 0.916148
| 0
| 0
| 0
| 0
| 0
| 0
| 2,670
| 0.593861
|
67c0cd97d0c8bd3cb2723928b3e6589de9cc3b73
| 8,834
|
py
|
Python
|
Projects/Project1/regan/regression.py
|
adelezaini/MachineLearning
|
dc3f34f5d509bed6a993705373c46be4da3f97db
|
[
"MIT"
] | null | null | null |
Projects/Project1/regan/regression.py
|
adelezaini/MachineLearning
|
dc3f34f5d509bed6a993705373c46be4da3f97db
|
[
"MIT"
] | 1
|
2021-10-03T15:16:07.000Z
|
2021-10-03T15:16:07.000Z
|
Projects/Project1/regan/regression.py
|
adelezaini/MachineLearning
|
dc3f34f5d509bed6a993705373c46be4da3f97db
|
[
"MIT"
] | null | null | null |
# The MIT License (MIT)
#
# Copyright © 2021 Fridtjof Gjengset, Adele Zaini, Gaute Holen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import numpy as np
from random import random, seed
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from sklearn.model_selection import train_test_split
from sklearn import linear_model
from sklearn.preprocessing import StandardScaler
from sklearn.utils import resample
# FrankeFunction: a two-variables function to create the dataset of our vanilla problem
def FrankeFunction(x,y):
term1 = 0.75*np.exp(-(0.25*(9*x-2)**2) - 0.25*((9*y-2)**2))
term2 = 0.75*np.exp(-((9*x+1)**2)/49.0 - 0.1*(9*y+1))
term3 = 0.5*np.exp(-(9*x-7)**2/4.0 - 0.25*((9*y-3)**2))
term4 = -0.2*np.exp(-(9*x-4)**2 - (9*y-7)**2)
return term1 + term2 + term3 + term4
# 3D plot of FrankeFunction
def Plot_FrankeFunction(x,y,z, title="Dataset"):
fig = plt.figure(figsize=(8, 7))
ax = fig.gca(projection="3d")
# Plot the surface.
surf = ax.plot_surface(x, y, z, cmap=cm.coolwarm, linewidth=0, antialiased=False)
# Customize the z axis.
ax.set_zlim(-0.10, 1.40)
ax.set_xlabel(r"$x$")
ax.set_ylabel(r"$y$")
ax.set_zlabel(r"$z$")
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.title(title)
plt.show()
# Create xyz dataset from the FrankeFunction with a added normal distributed noise
def create_xyz_dataset(n,mu_N, sigma_N):
x = np.linspace(0,1,n)
y = np.linspace(0,1,n)
x,y = np.meshgrid(x,y)
z = FrankeFunction(x,y) +mu_N +sigma_N*np.random.randn(n,n)
return x,y,z
# Error analysis: MSE and R2 score
def R2(z_data, z_model):
return 1 - np.sum((z_data - z_model) ** 2) / np.sum((z_data - np.mean(z_data)) ** 2)
def MSE(z_data,z_model):
n = np.size(z_model)
return np.sum((z_data-z_model)**2)/n
# SVD theorem
def SVD(A):
U, S, VT = np.linalg.svd(A,full_matrices=True)
D = np.zeros((len(U),len(VT)))
print("shape D= ", np.shape(D))
print("Shape S= ",np.shape(S))
print("lenVT =",len(VT))
print("lenU =",len(U))
D = np.eye(len(U),len(VT))*S
"""
for i in range(0,VT.shape[0]): #was len(VT)
D[i,i]=S[i]
print("i=",i)"""
return U @ D @ VT
# SVD inversion
def SVDinv(A):
U, s, VT = np.linalg.svd(A)
# reciprocals of singular values of s
d = 1.0 / s
# create m x n D matrix
D = np.zeros(A.shape)
# populate D with n x n diagonal matrix
D[:A.shape[1], :A.shape[1]] = np.diag(d)
UT = np.transpose(U)
V = np.transpose(VT)
return np.matmul(V,np.matmul(D.T,UT))
# Design matrix for two indipendent variables x,y
def create_X(x, y, n):
if len(x.shape) > 1:
x = np.ravel(x)
y = np.ravel(y)
N = len(x)
l = int((n+1)*(n+2)/2) # Number of elements in beta, number of feutures (degree of polynomial)
X = np.ones((N,l))
for i in range(1,n+1):
q = int((i)*(i+1)/2)
for k in range(i+1):
X[:,q+k] = (x**(i-k))*(y**k)
return X
def scale_Xz(X_train, X_test, z_train, z_test, with_std=False):
scaler_X = StandardScaler(with_std=with_std) #with_std=False
scaler_X.fit(X_train)
X_train = scaler_X.transform(X_train)
X_test = scaler_X.transform(X_test)
scaler_z = StandardScaler(with_std=with_std) #with_std=False
z_train = np.squeeze(scaler_z.fit_transform(z_train.reshape(-1, 1))) #scaler_z.fit_transform(z_train) #
z_test = np.squeeze(scaler_z.transform(z_test.reshape(-1, 1))) #scaler_z.transform(z_test) #
return X_train, X_test, z_train, z_test
# Splitting and rescaling data (rescaling is optional)
# Default values: 20% of test data and the scaler is StandardScaler without std.dev.
def Split_and_Scale(X,z,test_size=0.2, scale=True, with_std=False):
#Splitting training and test data
X_train, X_test, z_train, z_test = train_test_split(X, z, test_size=test_size)
# Rescaling X and z (optional)
if scale:
X_train, X_test, z_train, z_test = scale_Xz(X_train, X_test, z_train, z_test, with_std=with_std)
return X_train, X_test, z_train, z_test
# OLS equation
def OLS_solver(X_train, X_test, z_train, z_test):
# Calculating Beta Ordinary Least Square Equation with matrix pseudoinverse
# Altervatively to Numpy pseudoinverse it is possible to use the SVD theorem to evalute the inverse of a matrix (even in case it is singular). Just replace 'np.linalg.pinv' with 'SVDinv'.
ols_beta = np.linalg.pinv(X_train.T @ X_train) @ X_train.T @ z_train
z_tilde = X_train @ ols_beta # z_prediction of the train data
z_predict = X_test @ ols_beta # z_prediction of the test data
return ols_beta, z_tilde, z_predict
# Return the rolling mean of a vector and two values at one sigma from the rolling average
def Rolling_Mean(vector, windows=3):
vector_df = pd.DataFrame({'vector': vector})
# computing the rolling average
rolling_mean = vector_df.vector.rolling(windows).mean().to_numpy()
# computing the values at two sigmas from the rolling average
rolling_std = vector_df.vector.rolling(windows).std().to_numpy()
value_up = rolling_mean + rolling_std
value_down = rolling_mean - rolling_std
return rolling_mean, value_down, value_up
# Plot MSE in function of complexity of the model (rolling mean)
def plot_ols_complexity(x, y, z, maxdegree = 20, title="MSE as a function of model complexity"):
complexity = np.arange(0,maxdegree+1)
MSE_train_set = []
MSE_test_set = []
for degree in complexity:
X = create_X(x, y, degree)
X_train, X_test, z_train, z_test = Split_and_Scale(X,np.ravel(z)) #StardardScaler, test_size=0.2, scale=true
ols_beta, z_tilde,z_predict = OLS_solver(X_train, X_test, z_train, z_test)
MSE_train_set.append(MSE(z_train,z_tilde))
MSE_test_set.append(MSE(z_test,z_predict))
plt.figure( figsize = ( 10, 7))
MSE_train_mean, MSE_train_down, MSE_train_up = Rolling_Mean(MSE_train_set)
plt.plot(complexity, MSE_train_mean, label ="Train (rolling ave.)", color="purple")
plt.fill_between(complexity, MSE_train_down, MSE_train_up, alpha=0.2, color="purple")
MSE_test_mean, MSE_test_down, MSE_test_up = Rolling_Mean(MSE_test_set)
plt.plot(complexity, MSE_test_mean, label ="Test (rolling ave.)", color="orange")
plt.fill_between(complexity, MSE_test_down, MSE_test_up, alpha=0.2, color="orange")
plt.plot(complexity, MSE_train_set, '--', alpha=0.3, color="purple", label ="Train (actual values)")
plt.plot(complexity, MSE_test_set, '--', alpha=0.3, color="orange", label ="Test (actual values)")
plt.xlabel("Complexity")
plt.ylabel("MSE")
plt.xlim(complexity[~np.isnan(MSE_train_mean)][0]-1,complexity[-1]+1)
plt.title("Plot of the MSE as a function of complexity of the model\n– Rolling mean and one-sigma region –")
plt.legend()
plt.grid()
plt.show()
def ridge_reg(X_train, X_test, z_train, z_test, lmd = 10**(-12)):
ridge_beta = np.linalg.pinv(X_train.T @ X_train + lmd*np.eye(len(X_train.T))) @ X_train.T @ z_train #psudoinverse
z_model = X_train @ ridge_beta #calculates model
z_predict = X_test @ ridge_beta
#finds the lambda that gave the best MSE
#best_lamda = lambdas[np.where(MSE_values == np.min(MSE_values))[0]]
return ridge_beta, z_model, z_predict
def lasso_reg(X_train, X_test, z_train, z_test, lmd = 10**(-12)):
RegLasso = linear_model.Lasso(lmd)
_ = RegLasso.fit(X_train,z_train)
z_model = RegLasso.predict(X_train)
z_predict = RegLasso.predict(X_test)
return z_model, z_predict
| 38.745614
| 191
| 0.695381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,236
| 0.365774
|
67c210c665f75559fb74fd11831d3b0f31fccc08
| 3,521
|
py
|
Python
|
habittracker/commands/list-habits.py
|
anjakuchenbecker/oofpp_habits_project
|
5db8e46fedc7ce839008bf8a7f00eabfee2ba901
|
[
"MIT"
] | 2
|
2021-02-16T16:49:16.000Z
|
2021-05-13T13:22:02.000Z
|
habittracker/commands/list-habits.py
|
anjakuchenbecker/oofpp_habits_project
|
5db8e46fedc7ce839008bf8a7f00eabfee2ba901
|
[
"MIT"
] | null | null | null |
habittracker/commands/list-habits.py
|
anjakuchenbecker/oofpp_habits_project
|
5db8e46fedc7ce839008bf8a7f00eabfee2ba901
|
[
"MIT"
] | null | null | null |
import json
import shelve
import sys
import os
import click
from prettytable import PrettyTable
import app_config as conf
import analytics
def get_json_out(raw_text):
"""Convert input raw text and return JSON."""
return json.dumps(raw_text, indent=4, sort_keys=False)
def get_human_out(raw_text):
"""Convert input raw text and return human readable format (table style)."""
human_text = PrettyTable(["id", "name", "description", "periodicity", "created", "checkoffs"])
for item in raw_text:
human_text.add_row([item["id"], item["name"], item["description"], item["periodicity"], item["created"],
"\n".join(item["checkoffs"])])
return human_text
@click.command(short_help="Return a list of all currently tracked habits")
@click.option("-l", "--limit", default=0, type=int,
help="A limit on the number of objects to be returned, must be positive. Default is no limit.")
@click.option("-o", "--output", required=False, default="JSON",
type=click.Choice(["JSON", "HUMAN"], case_sensitive=True), help="Output format. Default JSON.")
def cli(limit, output):
"""Return a list of all currently tracked habits.
The habits are returned sorted by creation date, with the most recently created habit appearing first.
"""
try:
# Open habits database
habits_db = shelve.open(os.path.join(conf.data_dir, conf.db_name))
# Load habits
habits = [habit for habit in habits_db.items()]
# Close habits database
habits_db.close()
# Analyze
habit_list = analytics.list_habits(habits)
# Return habit
return_value = []
for item in habit_list:
return_value.append(item[1].to_custom_dict())
# Reverse order, that the most recently created habit appearing first
return_value = sorted(return_value, key=lambda k: k["created"], reverse=True)
# Apply limit if given
if limit > 0:
if output == "JSON":
click.echo(get_json_out(return_value[:limit]))
else:
click.echo(get_human_out(return_value[:limit]))
elif limit < 0:
raise ValueError(f"A negative limit (given {limit}) is not permitted")
else:
if output == "JSON":
click.echo(get_json_out(return_value))
else:
click.echo(get_human_out(return_value))
except ValueError as e:
# Inform user: Return error if unexpected error occurred and exit application
click.secho("################# ERROR #################", bg="red", fg="white", bold=True)
click.secho("! An error occurred !", bg="red", fg="white", bold=True)
click.secho(f"{type(e).__name__}: {e}", bg="red", fg="white", bold=True)
click.secho("########################################", bg="red", fg="white", bold=True)
sys.exit(1)
except Exception as e:
# Inform user: Return error if unexpected error occurred and exit application
click.secho("################# ERROR #################", bg="red", fg="white", bold=True)
click.secho("! An unexpected error occurred !", bg="red", fg="white", bold=True)
click.secho(f"{type(e).__name__}: {e}", bg="red", fg="white", bold=True)
click.secho("########################################", bg="red", fg="white", bold=True)
sys.exit(1)
| 44.0125
| 113
| 0.585345
| 0
| 0
| 0
| 0
| 2,780
| 0.789548
| 0
| 0
| 1,400
| 0.397614
|
67c2e5278bdfc21f2e207b4643b01e0663656b3d
| 4,065
|
py
|
Python
|
src/zhinst/toolkit/helpers/shf_waveform.py
|
MadSciSoCool/zhinst-toolkit
|
5ea884db03f53029552b7898dae310f22ce622ba
|
[
"MIT"
] | null | null | null |
src/zhinst/toolkit/helpers/shf_waveform.py
|
MadSciSoCool/zhinst-toolkit
|
5ea884db03f53029552b7898dae310f22ce622ba
|
[
"MIT"
] | null | null | null |
src/zhinst/toolkit/helpers/shf_waveform.py
|
MadSciSoCool/zhinst-toolkit
|
5ea884db03f53029552b7898dae310f22ce622ba
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2020 Zurich Instruments
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
import numpy as np
class SHFWaveform(object):
"""Implements a waveform for single channel.
The 'data' attribute holds the waveform samples with the proper scaling,
granularity and minimal length. The 'data' attribute holds the actual
waveform array that can be sent to the instrument.
Arguments:
wave (array): list or numpy array for the waveform, will be
scaled to have a maximum amplitude of 1
delay (float): individual waveform delay in seconds with respect
to the time origin of the sequence, a positive value shifts
the start of the waveform forward in time (default: 0)
granularity (int): granularity that the number of samples are
aligned to (default: 4)
min_length (int): minimum waveform length that the number of
samples are rounded up to (default: 4)
align_start (bool): the waveform will be padded with zeros to
match the granularity, either before or after the samples
(default: True)
Properties:
data (array): normalized waveform data of to be uplaoded to the
generator
delay (double): delay in seconds of the individual waveform
w.r.t. the sequence time origin
buffer_length (int): number of samples for the sequence code
buffer wave
"""
def __init__(self, wave, delay=0, granularity=4, min_length=4, align_start=True):
self._granularity = granularity
self._min_length = min_length
self._align_start = align_start
self._wave = wave
self._delay = delay
self._update()
def replace_data(self, wave, delay=0):
"""Replaces the data in the waveform."""
new_buffer_length = self._round_up(len(wave))
self._delay = delay
if new_buffer_length == self.buffer_length:
self._wave = wave
self._update()
else:
raise Exception("Waveform lengths don't match!")
@property
def data(self):
return self._data
@property
def delay(self):
return self._delay
@property
def buffer_length(self):
return self._buffer_length
def _update(self):
"""Update the buffer length and data attributes for new waveforms."""
self._buffer_length = self._round_up(len(self._wave))
self._data = self._adjust_scale(self._wave)
def _adjust_scale(self, wave):
"""Adjust the scaling of the waveform.
The data is actually sent as complex values in the range of (-1, 1).
"""
if len(wave) == 0:
wave = np.zeros(1)
n = len(wave)
n = min(n, self.buffer_length)
m = np.max(np.abs(wave))
data = np.zeros(self.buffer_length)
if self._align_start:
if len(wave) > n:
data[:n] = wave[:n] / m if m >= 1 else wave[:n]
else:
data[: len(wave)] = wave / m if m >= 1 else wave
else:
if len(wave) > n:
data[:n] = (
wave[len(wave) - n :] / m if m >= 1 else wave[len(wave) - n :]
)
else:
data[(self.buffer_length - len(wave)) :] = wave / m if m >= 1 else wave
complex_data = data.astype(complex)
return complex_data
def _round_up(self, waveform_length):
"""Adapt to the allowed granularity and minimum length of waveforms.
The length of the waveform is rounded up if it does not match
the waveform granularity and minimum waveform length specifications
of the instrument.
"""
length = max(waveform_length, self._min_length)
multiplier, rest = divmod(length, self._granularity)
if not rest:
return length
else:
return (multiplier + 1) * self._granularity
| 34.74359
| 87
| 0.60861
| 3,880
| 0.95449
| 0
| 0
| 185
| 0.04551
| 0
| 0
| 1,999
| 0.491759
|
67c3fb858e01fe9489719be010810d56f24cb176
| 3,905
|
py
|
Python
|
mongoadmin/auth/forms.py
|
hywhut/django-mongoadmin
|
7252f9724e4d556878a907914424745f5fdb0d42
|
[
"BSD-3-Clause"
] | null | null | null |
mongoadmin/auth/forms.py
|
hywhut/django-mongoadmin
|
7252f9724e4d556878a907914424745f5fdb0d42
|
[
"BSD-3-Clause"
] | null | null | null |
mongoadmin/auth/forms.py
|
hywhut/django-mongoadmin
|
7252f9724e4d556878a907914424745f5fdb0d42
|
[
"BSD-3-Clause"
] | 1
|
2020-05-10T13:57:36.000Z
|
2020-05-10T13:57:36.000Z
|
# from django.utils.translation import ugettext_lazy as _
# from django import forms
# from django.contrib.auth.forms import ReadOnlyPasswordHashField
#
# from mongoengine.django.auth import User
#
# from mongodbforms import DocumentForm
#
# class UserCreationForm(DocumentForm):
# """
# A form that creates a user, with no privileges, from the given username and
# password.
# """
# error_messages = {
# 'duplicate_username': _("A user with that username already exists."),
# 'password_mismatch': _("The two password fields didn't match."),
# }
# username = forms.RegexField(label=_("Username"), max_length=30,
# regex=r'^[\w.@+-]+$',
# help_text=_("Required. 30 characters or fewer. Letters, digits and "
# "@/./+/-/_ only."),
# error_messages={
# 'invalid': _("This value may contain only letters, numbers and "
# "@/./+/-/_ characters.")})
# password1 = forms.CharField(label=_("Password"),
# widget=forms.PasswordInput)
# password2 = forms.CharField(label=_("Password confirmation"),
# widget=forms.PasswordInput,
# help_text=_("Enter the same password as above, for verification."))
#
# class Meta:
# model = User
# fields = ("username",)
#
# def clean_username(self):
# # Since User.username is unique, this check is redundant,
# # but it sets a nicer error message than the ORM. See #13147.
# username = self.cleaned_data["username"]
# try:
# User.objects.get(username=username)
# except User.DoesNotExist:
# return username
# raise forms.ValidationError(
# self.error_messages['duplicate_username'],
# code='duplicate_username',
# )
#
# def clean_password2(self):
# password1 = self.cleaned_data.get("password1")
# password2 = self.cleaned_data.get("password2")
# if password1 and password2 and password1 != password2:
# raise forms.ValidationError(
# self.error_messages['password_mismatch'],
# code='password_mismatch',
# )
# return password2
#
# def save(self, commit=True):
# user = super(UserCreationForm, self).save(commit=False)
# self.instance = user.set_password(self.cleaned_data["password1"])
# return self.instance
#
#
# class UserChangeForm(DocumentForm):
# username = forms.RegexField(
# label=_("Username"), max_length=30, regex=r"^[\w.@+-]+$",
# help_text=_("Required. 30 characters or fewer. Letters, digits and "
# "@/./+/-/_ only."),
# error_messages={
# 'invalid': _("This value may contain only letters, numbers and "
# "@/./+/-/_ characters.")})
# password = ReadOnlyPasswordHashField(label=_("Password"),
# help_text=_("Raw passwords are not stored, so there is no way to see "
# "this user's password, but you can change the password "
# "using <a href=\"password/\">this form</a>."))
#
# class Meta:
# model = User
#
# def __init__(self, *args, **kwargs):
# super(UserChangeForm, self).__init__(*args, **kwargs)
# f = self.fields.get('user_permissions', None)
# if f is not None:
# f.queryset = f.queryset.select_related('content_type')
#
# def clean_password(self):
# # Regardless of what the user provides, return the initial value.
# # This is done here, rather than on the field, because the
# # field does not have access to the initial value
# return self.initial["password"]
#
# def clean_email(self):
# email = self.cleaned_data.get("email")
# if email == '':
# return None
# return email
| 40.257732
| 81
| 0.589245
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,809
| 0.975416
|
67c4dc33394c474c6cabe97b41d6b2b8fa22728a
| 2,554
|
py
|
Python
|
odin-libraries/python/odin_test.py
|
gspu/odin
|
a01d039e809eca257fa78d358fe72eb3ad2a09f2
|
[
"MIT"
] | 447
|
2020-05-21T11:22:16.000Z
|
2022-03-13T01:28:25.000Z
|
odin-libraries/python/odin_test.py
|
gspu/odin
|
a01d039e809eca257fa78d358fe72eb3ad2a09f2
|
[
"MIT"
] | 40
|
2020-05-21T13:17:57.000Z
|
2022-03-02T08:44:45.000Z
|
odin-libraries/python/odin_test.py
|
gspu/odin
|
a01d039e809eca257fa78d358fe72eb3ad2a09f2
|
[
"MIT"
] | 25
|
2020-05-28T21:23:13.000Z
|
2022-03-18T19:31:31.000Z
|
""" Runs tests for Ptyhon Odin SDK """
import unittest
from os import environ
import random
from pymongo import MongoClient
import pyodin as odin
class OdinSdkTest(unittest.TestCase):
""" Establish OdinSdkTest object """
def setUp(self):
client = MongoClient(environ.get('ODIN_MONGODB'))
mongodb = client['odin']
self.collection = mongodb['observability']
def tearDown(self):
self.collection.delete_many({"id" : "test_id"})
def test_condition_not_odin_env(self):
""" Run condition operation outside of Odin Env """
random_int = random.randint(100000, 999999)
test_desc = 'test_desc' + str(random_int)
odin_test = odin.Odin(config="job.yml", path_type="relative")
cond = odin_test.condition(test_desc, True)
result = self.collection.find_one({"description" : test_desc})
self.assertEqual(cond, True)
self.assertEqual(None, result)
def test_watch_not_odin_env(self):
""" Run watch operation outside of Odin Env """
random_int = random.randint(100000, 999999)
test_desc = 'test_desc' + str(random_int)
odin_test = odin.Odin(config="job.yml", path_type="relative")
odin_test.watch(test_desc, True)
result = self.collection.find_one({"description" : test_desc})
self.assertEqual(None, result)
def test_condition(self):
""" Run condition operation inside Odin Env """
random_int = random.randint(100000, 999999)
test_desc = 'test_desc' + str(random_int)
# test True sets odin exc env to true and in turn enables logging everything to the DB
odin_test = odin.Odin(test=True, config="job.yml", path_type="relative")
cond = odin_test.condition(test_desc, True)
result = self.collection.find_one({"description" : test_desc})
self.assertEqual(cond, True)
self.assertEqual(test_desc, result['description'])
def test_watch(self):
""" Run watch operation inside Odin Env """
random_int = random.randint(100000, 999999)
test_desc = 'test_desc' + str(random_int)
# test True sets odin exc env to true and in turn enables logging everything to the DB
odin_test = odin.Odin(test=True, config="job.yml", path_type="relative")
odin_test.watch(test_desc, True)
result = self.collection.find_one({"description" : test_desc})
self.assertEqual(test_desc, result['description'])
if __name__ == "__main__":
unittest.main() # run all tests
| 34.513514
| 94
| 0.664056
| 2,341
| 0.916601
| 0
| 0
| 0
| 0
| 0
| 0
| 705
| 0.276038
|
67c4e469d6bfee9cfc7c187e94df576f7ce20488
| 657
|
py
|
Python
|
artemis/general/test_dict_ops.py
|
peteroconnor-bc/artemis
|
ad2871fae7d986bf10580eec27aee5b7315adad5
|
[
"BSD-2-Clause-FreeBSD"
] | 235
|
2016-08-26T14:18:51.000Z
|
2022-03-13T10:54:39.000Z
|
artemis/general/test_dict_ops.py
|
peteroconnor-bc/artemis
|
ad2871fae7d986bf10580eec27aee5b7315adad5
|
[
"BSD-2-Clause-FreeBSD"
] | 112
|
2016-04-30T11:48:38.000Z
|
2021-01-12T20:17:32.000Z
|
artemis/general/test_dict_ops.py
|
peteroconnor-bc/artemis
|
ad2871fae7d986bf10580eec27aee5b7315adad5
|
[
"BSD-2-Clause-FreeBSD"
] | 31
|
2016-11-05T19:09:19.000Z
|
2021-09-13T07:35:40.000Z
|
from artemis.general.dict_ops import cross_dict_dicts, merge_dicts
__author__ = 'peter'
def test_cross_dict_dicts():
assert cross_dict_dicts({'a':{'aa': 1}, 'b':{'bb': 2}}, {'c': {'cc': 3}, 'd': {'dd': 4}}) == {
('a','c'):{'aa':1, 'cc':3},
('a','d'):{'aa':1, 'dd':4},
('b','c'):{'bb':2, 'cc':3},
('b','d'):{'bb':2, 'dd':4}
}
def test_dict_merge():
assert merge_dicts({'a': 1, 'b': 2, 'c': 3}, {'c': 4, 'd': 5}, {'d': 6, 'e': 7}) == {
'a': 1,
'b': 2,
'c': 4,
'd': 6,
'e': 7,
}
if __name__ == "__main__":
test_dict_merge()
test_cross_dict_dicts()
| 22.655172
| 98
| 0.427702
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 137
| 0.208524
|
67c5e84b87b6ce3f11354746686bb279c5332a32
| 1,317
|
py
|
Python
|
plur/eval/cubert_swapped_operand_classification_eval.py
|
VHellendoorn/plur
|
63ea4b8dd44b43d26177fb23b0572e0b7c20f4cd
|
[
"Apache-2.0"
] | 52
|
2021-12-03T17:54:27.000Z
|
2022-03-30T13:38:16.000Z
|
plur/eval/cubert_swapped_operand_classification_eval.py
|
VHellendoorn/plur
|
63ea4b8dd44b43d26177fb23b0572e0b7c20f4cd
|
[
"Apache-2.0"
] | 2
|
2022-02-18T01:04:45.000Z
|
2022-03-31T17:20:25.000Z
|
plur/eval/cubert_swapped_operand_classification_eval.py
|
VHellendoorn/plur
|
63ea4b8dd44b43d26177fb23b0572e0b7c20f4cd
|
[
"Apache-2.0"
] | 6
|
2021-12-21T06:00:44.000Z
|
2022-03-30T21:10:46.000Z
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compute class and mean-per-class accuracy for CuBERT SO."""
from plur.eval.cubert_classification_eval import CuBertClassificationEval
from plur.stage_1.cubert_swapped_operand_classification_dataset import CuBertSwappedOperandClassificationDataset
class CuBertSwappedOperandClassificationEval(CuBertClassificationEval):
"""Eval class for CuBERT SO dataset."""
def __init__(self,
prediction_file: str,
target_file: str,
top_n: int = 1) -> None:
"""As per superclass."""
assert top_n == 1
super().__init__(
prediction_file=prediction_file,
target_file=target_file,
all_classes=CuBertSwappedOperandClassificationDataset.ALL_CLASSES,
top_n=top_n)
| 38.735294
| 112
| 0.742597
| 488
| 0.370539
| 0
| 0
| 0
| 0
| 0
| 0
| 687
| 0.52164
|
67c77d71f1fdbcad027edc06ae60ed4f292fc007
| 908
|
py
|
Python
|
Dynamic Programming/Paint House II.py
|
ikaushikpal/DS-450-python
|
9466f77fb9db9e6a5bb3f20aa89ba6332f49e848
|
[
"MIT"
] | 3
|
2021-06-28T12:04:19.000Z
|
2021-09-07T07:23:41.000Z
|
Dynamic Programming/Paint House II.py
|
ikaushikpal/DS-450-python
|
9466f77fb9db9e6a5bb3f20aa89ba6332f49e848
|
[
"MIT"
] | null | null | null |
Dynamic Programming/Paint House II.py
|
ikaushikpal/DS-450-python
|
9466f77fb9db9e6a5bb3f20aa89ba6332f49e848
|
[
"MIT"
] | 1
|
2021-06-28T15:42:55.000Z
|
2021-06-28T15:42:55.000Z
|
class Solution:
def paintHouse(self, cost:list, houses:int, colors:int)->int:
if houses == 0: # no houses to paint
return 0
if colors == 0: # no colors to paint houses
return 0
dp = [[0]*colors for _ in range(houses)]
dp[0] = cost[0]
for i in range(1, houses):
MINCOST = 1000000007
for j in range(colors):
for k in range(colors):
if j != k:
MINCOST = min(MINCOST, dp[i-1][k])
dp[i][j] = cost[i][j] + MINCOST
return min(dp[n-1])
if __name__ == "__main__":
cost = [[1, 5, 7, 2, 1, 4],
[5, 8, 4, 3, 6, 1],
[3, 2, 9, 7, 2, 3],
[1, 2, 4, 9, 1, 7]]
n, k = len(cost), len(cost[0])
print(Solution().paintHouse(cost, n, k))
| 29.290323
| 66
| 0.4163
| 652
| 0.718062
| 0
| 0
| 0
| 0
| 0
| 0
| 59
| 0.064978
|
67c9536255b8a2a78151de4a15608734a1f092c8
| 6,445
|
py
|
Python
|
dufi/gui/balloontip/__init__.py
|
Shura1oplot/dufi
|
c9c25524020e57d3670c298acca305900b6490e7
|
[
"MIT"
] | null | null | null |
dufi/gui/balloontip/__init__.py
|
Shura1oplot/dufi
|
c9c25524020e57d3670c298acca305900b6490e7
|
[
"MIT"
] | null | null | null |
dufi/gui/balloontip/__init__.py
|
Shura1oplot/dufi
|
c9c25524020e57d3670c298acca305900b6490e7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, print_function, absolute_import
import sys
import os
import threading
import warnings
import locale
import logging
import win32api
import win32con
import win32gui
import win32ts
PY2 = sys.version_info < (3,)
if PY2:
reload(sys)
sys.setdefaultencoding(locale.getpreferredencoding() or "utf-8")
NIN_BALLOONSHOW = win32con.WM_USER + 2
NIN_BALLOONHIDE = win32con.WM_USER + 3
NIN_BALLOONTIMEOUT = win32con.WM_USER + 4
NIN_BALLOONUSERCLICK = win32con.WM_USER + 5
WM_TRAY_EVENT = win32con.WM_USER + 20
win32gui.InitCommonControls()
class BalloonTooltip(object):
def __init__(self, title, message, icon_type, callback):
super(BalloonTooltip, self).__init__()
self.title = title
self.message = message
self.icon_type = icon_type
self.callback = callback
self._class_atom = None
self._hwnd = None
self._hinst = None
def show(self):
# Register the Window class.
wc = win32gui.WNDCLASS()
self._hinst = wc.hInstance = win32api.GetModuleHandle(None)
wc.lpszClassName = (bytes if PY2 else str)("PythonTaskbar")
wc.lpfnWndProc = {win32con.WM_DESTROY: self._on_destroy,
WM_TRAY_EVENT: self._on_tray_event}
self._class_atom = win32gui.RegisterClass(wc)
# Create the Window.
style = win32con.WS_OVERLAPPED | win32con.WS_SYSMENU
self._hwnd = win32gui.CreateWindow(
self._class_atom, "Taskbar", style, 0, 0, win32con.CW_USEDEFAULT,
win32con.CW_USEDEFAULT, 0, 0, self._hinst, None)
win32gui.UpdateWindow(self._hwnd)
win32ts.WTSRegisterSessionNotification(
self._hwnd, win32ts.NOTIFY_FOR_THIS_SESSION)
icon_path_name = self._find_icon()
icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE
try:
hicon = win32gui.LoadImage(
self._hinst, icon_path_name, win32con.IMAGE_ICON, 0, 0, icon_flags)
except Exception:
hicon = win32gui.LoadIcon(0, win32con.IDI_APPLICATION)
# http://docs.activestate.com/activepython/3.2/pywin32/PyNOTIFYICONDATA.html
flags = win32gui.NIF_ICON | win32gui.NIF_MESSAGE | win32gui.NIF_TIP
nid = (self._hwnd, 0, flags, WM_TRAY_EVENT, hicon, "tooltip")
win32gui.Shell_NotifyIcon(win32gui.NIM_ADD, nid)
flags = {"error": win32gui.NIIF_ERROR,
"warn": win32gui.NIIF_WARNING,
"info": win32gui.NIIF_INFO}.get(self.icon_type, win32gui.NIIF_NONE)
nid = (self._hwnd, 0, win32gui.NIF_INFO, WM_TRAY_EVENT, hicon,
"Balloon tooltip", self.message, 200, self.title, flags)
win32gui.Shell_NotifyIcon(win32gui.NIM_MODIFY, nid)
logging.debug("show(...) -> hwnd=%d", self._hwnd)
win32gui.PumpMessages()
def hide(self):
if not self._hwnd:
return
win32gui.PostMessage(self._hwnd, WM_TRAY_EVENT, 0, NIN_BALLOONHIDE)
def _find_icon(self):
getattr(sys, '_MEIPASS', None)
if getattr(sys, "frozen", False):
base_path = getattr(sys, '_MEIPASS', None)
if not base_path:
base_path = os.path.dirname(sys.executable)
else:
base_path = os.path.dirname(sys.argv[0])
return os.path.abspath(os.path.join(base_path, "balloontip.ico"))
def _on_destroy(self, hwnd, msg, wparam, lparam):
logging.debug("_on_destroy(hwnd=%d)", hwnd)
if self._hwnd != hwnd:
warnings.warn("_on_destroy called with invalid hwnd")
return
win32gui.Shell_NotifyIcon(win32gui.NIM_DELETE, (hwnd, 0))
win32gui.PostMessage(hwnd, win32con.WM_QUIT, 0, 0)
self._hwnd = None
def _on_tray_event(self, hwnd, msg, wparam, lparam):
logging.debug("_on_tray_event(hwnd=%r, lparam=%s)", hwnd,
self._get_const_name(lparam))
if self._hwnd != hwnd:
warnings.warn("_on_tray_event called with invalid hwnd")
return
if lparam in (NIN_BALLOONHIDE, NIN_BALLOONTIMEOUT, NIN_BALLOONUSERCLICK,
win32con.WM_LBUTTONDOWN, win32con.WM_LBUTTONUP,
win32con.WM_LBUTTONDBLCLK, win32con.WM_RBUTTONDOWN,
win32con.WM_RBUTTONUP, win32con.WM_RBUTTONDBLCLK):
logging.debug("_on_tray_event(...) -> destroy window")
win32gui.DestroyWindow(hwnd)
logging.debug("_on_tray_event(...) -> unregister class")
win32gui.UnregisterClass(self._class_atom, self._hinst)
self._class_atom = None
self._hinst = None
if lparam == NIN_BALLOONUSERCLICK and callable(self.callback):
logging.debug("_on_tray_event(...) -> execute callback")
self.callback()
@staticmethod
def _get_const_name(value, _cache={512: "WM_MOUSEMOVE"}):
if value in _cache:
return _cache[value]
for var_name, var_value in globals().items():
if var_name.startswith("NIN_") and var_value == value:
_cache[value] = var_name
return var_name
for var_name in dir(win32con):
if var_name.startswith("WM_") and getattr(win32con, var_name) == value:
_cache[value] = var_name
return var_name
_cache[value] = str(value)
return _cache[value]
def balloon_tip(title, message, *, icon_type=None, callback=None, block=True):
wbt = BalloonTooltip(title, message, icon_type, callback)
if block:
wbt.show()
return
t = threading.Thread(target=wbt.show)
t.daemon = True
t.start()
def hide_balloon_tip():
wbt.hide()
t.join()
return t.is_alive, hide_balloon_tip
################################################################################
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
def _test_async():
import time
i = 0
f = lambda: False
while True:
if not f():
f, _ = balloon_tip("Example 3",
"Async balloontip: {}".format(i),
block=False)
i += 1
time.sleep(0.5)
_test_async()
| 31.286408
| 84
| 0.611482
| 4,885
| 0.757952
| 0
| 0
| 593
| 0.092009
| 0
| 0
| 716
| 0.111094
|
67caf9eed648abdd18c55cb059b56dcfdeff5272
| 7,893
|
py
|
Python
|
ProxyIP.py
|
plumefox/BiliTrend
|
449bade3cbaa92878fab866457f513aa81dcd567
|
[
"Apache-2.0"
] | 2
|
2019-05-11T18:05:34.000Z
|
2022-02-18T13:34:21.000Z
|
ProxyIP.py
|
plumefox/BiliTrend
|
449bade3cbaa92878fab866457f513aa81dcd567
|
[
"Apache-2.0"
] | null | null | null |
ProxyIP.py
|
plumefox/BiliTrend
|
449bade3cbaa92878fab866457f513aa81dcd567
|
[
"Apache-2.0"
] | null | null | null |
# * coding:utf-8 *
# Author : Lucy Cai
# Create Time : 2019/4/12
# IDE : PyCharm
# Copyright(C) 2019 Lucy Cai/plumefox (LucysTime@outlook.com)
# Github:https://github.com/plumefox/BiliTrend/
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/plumefox/BiliTrend/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================
from urllib import request
from lxml import etree
class ProxyIP():
def __init__(self):
self.headers = {
'Host': 'www.xicidaili.com',
'Referer': 'https://www.xicidaili.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 '
'Safari/537.36 Edge/16.16299'
}
self.targetUrl = "https://www.xicidaili.com/nn/"
self.content = None
# save the result in list
self.resultList = []
self.__setReusltDic()
def run(self):
self.__start()
# start the Spider
def __start(self):
self.setRequest()
self.response = request.urlopen(self.req)
res = self.response.read()
self.content = res.decode("utf-8")
# print(self.content)
self.__getHtml()
self.__getRankItemInformation()
# self.__createDict()
self.saveTomySql()
# set Request
def setRequest(self):
self.req = request.Request(self.targetUrl, headers=self.headers)
# set Headers
def setHeaders(self, headers):
self.headers = headers
def __getHtml(self):
self.html = etree.HTML(self.content)
def __setReusltDic(self,ip=None,port=None,type= None,protocol = None,
speed = None,connectTime = None,aliveTime = None):
self.resultItem = {
"country":'cn',
"ip":ip,
"port":port,
"type":type,
"protocol":protocol,
"speed":speed,
"connectTime":connectTime,
"aliveTime":aliveTime
}
def __getRankItemInformation(self):
try:
xPathUrl ='//table/tr'
self.ip = self.html.xpath(xPathUrl+'/td[2]/text()')
self.port = self.html.xpath(xPathUrl+'/td[3]/text()')
self.type = self.html.xpath(xPathUrl + '/td[5]/text()')
self.protocol = self.html.xpath(xPathUrl + '/td[6]/text()')
self.speed = self.html.xpath(xPathUrl + '/td[7]/div[@class = "bar"]/@title')
self.connectTime = self.html.xpath(xPathUrl + '/td[8]/div[@class = "bar"]/@title')
self.aliveTime = self.html.xpath(xPathUrl + '/td[9]/text()')
except Exception as e:
print(e)
def __createDict(self):
length = len(self.ip)
for i in range(0,length):
thisip = self.ip[i]
thisport = self.port[i]
thistype = self.type[i]
thisprotocol = self.protocol [i]
thisspeed = self.speed[i]
thisconnectTime = self.connectTime[i]
thisaliveTime = self.aliveTime[i]
self.__setReusltDic(thisip,thisport,thistype,thisprotocol,thisspeed,thisconnectTime,thisaliveTime)
self.resultList.append(self.resultItem)
print(len(self.resultList))
print(self.resultList)
def setProxyIP(self, protocal="http", ip="110.52.235.114", port="9999"):
self.proxyIP = {
protocal: ip + ":" + port
}
print(self.proxyIP)
def setHttpProxy(self, proxySwitch=False):
httpproxy_handler = request.ProxyHandler(self.proxyIP)
print(self.proxyIP)
# no proxy
nullproxy_handler = request.ProxyHandler({})
if proxySwitch:
print("switch open")
self.opener = request.build_opener(httpproxy_handler)
else:
self.opener = request.build_opener(nullproxy_handler)
# this test used before saving
def testProxyIP(self):
length = len(self.ip)
count = 0
headers = {
'Host': 'www.baidu.com',
'Referer': 'https://www.baidu.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 '
'Safari/537.36 Edge/16.16299'
}
for i in range(0,length):
self.setProxyIP(self.protocol[i],self.ip[i],self.port[i])
self.setHttpProxy(True)
self.setHeaders(headers=headers)
req = request.Request("http://www.baidu.com")
reponse = self.opener.open(req)
status = reponse.getcode()
if(status == 200):
self.__setReusltDic(self.ip[i], self.port[i], self.type[i], self.protocol[i], self.speed[i], self.connectTime[i], self.aliveTime[i])
self.resultList.append(self.resultItem)
count += 1
print(count)
def saveTomySql(self):
self.testProxyIP()
import mySQLConnect
this = mySQLConnect.MySqlConnection()
this.save_myself(self.resultList)
# this test used before using
def testOneIP(self,protocol,ip,port,url,headers):
self.setProxyIP(protocol,ip,port)
self.setHttpProxy(True)
self.setHeaders(headers=headers)
print(url)
req = request.Request(url)
reponse = self.opener.open(req)
status = reponse.getcode()
print(status)
if(status == 200):
return True
else:
return False
def readProxyIP(self):
# get ip from mysql and save to resultList
import mySQLConnect
this = mySQLConnect.MySqlConnection()
self.resultList = []
self.resultList = this.select_mysql()
def getProxyIP(self,testUrl,headers):
print("start")
flag = False
needProtocol = testUrl.split(':')[0].upper()
for i in range(0, len(self.resultList)):
temp_dict = self.resultList[i]
ip = temp_dict['ip']
port = temp_dict['port']
protocol = temp_dict['protocol']
# 保证都是http或者https 否则不起作用
if(protocol != needProtocol):
continue
#if end of the mysql: return and tell the status
# test proxy ip
flag = self.testOneIP(protocol,ip,port,testUrl,headers)
print(flag)
if(flag):
proxyIPInformation = {
"ip": ip,
"port": port,
"protocol": protocol,
}
return proxyIPInformation
return None
if __name__ == '__main__':
headers = {
'Host': 'www.bilibili.com',
'Referer': 'https://www.bilibili.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 '
'Safari/537.36 Edge/16.16299'
}
url = 'https://www.bilibili.com/ranking/'
a = ProxyIP()
a.readProxyIP()
u = a.getProxyIP(url,headers)
print(u)
print("a")
| 33.444915
| 149
| 0.548714
| 6,534
| 0.825313
| 0
| 0
| 0
| 0
| 0
| 0
| 2,190
| 0.27662
|
67cc334615da33b43cc91dce1c8d5fcb9a162b36
| 29,914
|
py
|
Python
|
name_matching/test/test_name_matcher.py
|
DeNederlandscheBank/name_matching
|
366a376596403a1fd912cbf130062016b82306bf
|
[
"MIT"
] | null | null | null |
name_matching/test/test_name_matcher.py
|
DeNederlandscheBank/name_matching
|
366a376596403a1fd912cbf130062016b82306bf
|
[
"MIT"
] | null | null | null |
name_matching/test/test_name_matcher.py
|
DeNederlandscheBank/name_matching
|
366a376596403a1fd912cbf130062016b82306bf
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import os.path as path
import abydos.distance as abd
import abydos.phonetic as abp
import pytest
from scipy.sparse import csc_matrix
from sklearn.feature_extraction.text import TfidfVectorizer
import name_matching.name_matcher as nm
@pytest.fixture
def name_match():
package_dir = path.dirname(path.dirname(path.dirname(path.abspath(__file__))))
data = pd.read_csv(path.join(package_dir, 'test','test_names.csv'))
name_matcher = nm.NameMatcher()
name_matcher.load_and_process_master_data(
'company_name', data, start_processing=False, transform=False)
return name_matcher
@pytest.fixture
def adjusted_name():
package_dir = path.dirname(path.dirname(path.dirname(path.abspath(__file__))))
return pd.read_csv(path.join(package_dir, 'test','adjusted_test_names.csv'))
@pytest.fixture
def words():
return ['fun', 'small', 'pool', 'fun', 'small', 'pool', 'sign',
'small', 'pool', 'sign', 'sign', 'small', 'pool', 'sign', 'paper',
'oppose', 'paper', 'oppose', 'brown', 'pig', 'fat', 'oppose', 'paper',
'oppose', 'brown', 'pig', 'fat', 'snail']
@pytest.mark.parametrize("method",
["",
None,
'no_method']
)
def test_make_distance_metrics_error(name_match, method):
with pytest.raises(TypeError):
name_match.set_distance_metrics([method])
@pytest.mark.parametrize("method, result",
[['indel', abd.Indel()],
['discounted_levenshtein', abd.DiscountedLevenshtein()],
['tichy', abd.Tichy()],
['cormodeL_z', abd.CormodeLZ()],
['iterative_sub_string', abd.IterativeSubString()],
['baulieu_xiii', abd.BaulieuXIII()],
['clement', abd.Clement()],
['dice_asymmetricI', abd.DiceAsymmetricI()],
['kuhns_iii', abd.KuhnsIII()],
['overlap', abd.Overlap()],
['pearson_ii', abd.PearsonII()],
['weighted_jaccard', abd.WeightedJaccard()],
['warrens_iv', abd.WarrensIV()],
['bag', abd.Bag()],
['rouge_l', abd.RougeL()],
['ratcliff_obershelp', abd.RatcliffObershelp()],
['ncd_bz2', abd.NCDbz2()],
['fuzzy_wuzzy_partial_string',
abd.FuzzyWuzzyPartialString()],
['fuzzy_wuzzy_token_sort', abd.FuzzyWuzzyTokenSort()],
['fuzzy_wuzzy_token_set', abd.FuzzyWuzzyTokenSet()],
['editex', abd.Editex()],
['typo', abd.Typo()],
['lig_3', abd.LIG3()],
['ssk', abd.SSK()],
['refined_soundex', abd.PhoneticDistance(transforms=abp.RefinedSoundex(
max_length=30), metric=abd.Levenshtein(), encode_alpha=True)],
['double_metaphone', abd.PhoneticDistance(transforms=abp.DoubleMetaphone(max_length=30), metric=abd.Levenshtein(), encode_alpha=True)]]
)
def test_make_distance_metrics(name_match, method, result):
name_match.set_distance_metrics([method])
assert type(name_match._distance_metrics.popitem()[1][0]) == type(result)
@pytest.mark.parametrize("kwargs_str, result_1, result_2, result_3, result_4",
[[{"ngrams": (4, 5)}, 0, False, (4, 5), 5000],
[{"low_memory": True}, 0, True, (2, 3), 5000],
[{"legal_suffixes": True}, 244, False, (2, 3), 5000],
[{"legal_suffixes": True, "number_of_rows": 8,
"ngrams": (1, 2, 3)}, 244, False, (1, 2, 3), 8],
])
def test_initialisation(kwargs_str, result_1, result_2, result_3, result_4):
name_match = nm.NameMatcher(**kwargs_str)
assert len(name_match._word_set) == result_1
assert name_match._low_memory == result_2
assert name_match._vec.ngram_range == result_3
assert name_match._number_of_rows == result_4
@pytest.mark.parametrize("occ, result_1, result_2, result_3, result_4, result_5",
[[1, '', '', '', '', ''],
[2, 'a-nd', 'Hndkiewicz,2Nicolas',
'Tashirian', 'Hpdson Sbns', 'Marquardt,'],
[3, 'Dickens a-nd', 'Hndkiewicz,2Nicolas',
'Runolfsson, Tashirian Will', 'Hpdson Sbns', 'Hermiston Marquardt,'],
])
def test_preprocess_reduce(name_match, adjusted_name, occ, result_1, result_2, result_3, result_4, result_5):
name_match._column_matching = 'company_name'
new_names = name_match._preprocess_reduce(
adjusted_name, occurence_count=occ)
assert new_names.loc[1866, 'company_name'] == result_1
assert new_names.loc[1423, 'company_name'] == result_2
assert new_names.loc[268, 'company_name'] == result_3
assert new_names.loc[859, 'company_name'] == result_4
assert new_names.loc[1918, 'company_name'] == result_5
@pytest.mark.parametrize("col, start_pro, transform",
[['company_name', False, False],
['no_name', False, False],
['company_name', True, False],
['company_name', True, True],
['company_name', True, True],
])
def test_load_and_process_master_data(adjusted_name, col, start_pro, transform):
name_matcher = nm.NameMatcher()
name_matcher.load_and_process_master_data(
column=col,
df_matching_data=adjusted_name,
start_processing=start_pro,
transform=transform)
assert name_matcher._column == col
pd.testing.assert_frame_equal(
name_matcher._df_matching_data, adjusted_name)
assert name_matcher._preprocessed == start_pro
if transform & start_pro:
assert type(name_matcher._n_grams_matching) == csc_matrix
@pytest.mark.parametrize("trans, common",
[[False, False],
[True, False],
[False, True],
[True, True],
])
def test_process_matching_data(name_match, trans, common):
name_match._postprocess_common_words = common
name_match._process_matching_data(transform=trans)
assert name_match._preprocessed
if trans:
assert type(name_match._n_grams_matching) == csc_matrix
else:
assert name_match._n_grams_matching is None
if common:
assert len(name_match._word_set) > 0
else:
assert len(name_match._word_set) == 0
@pytest.mark.parametrize("lower_case, punctuations, ascii, result_1, result_2, result_3",
[[False, False, False, 'Schumm PLC', 'Towne, Johnston and Murray', 'Ösinski-Schinner'],
[True, False, False, 'schumm plc',
'towne, johnston and murray', 'ösinski-schinner'],
[False, True, False, 'Schumm PLC',
'Towne Johnston and Murray', 'ÖsinskiSchinner'],
[False, False, True, 'Schumm PLC',
'Towne, Johnston and Murray', 'Osinski-Schinner'],
[False, True, True, 'Schumm PLC',
'Towne Johnston and Murray', 'OsinskiSchinner'],
[True, False, True, 'schumm plc',
'towne, johnston and murray', 'osinski-schinner'],
[True, True, False, 'schumm plc',
'towne johnston and murray', 'ösinskischinner'],
[True, True, True, 'schumm plc',
'towne johnston and murray', 'osinskischinner'],
])
def test_preprocess(name_match, lower_case, punctuations, ascii, result_1, result_2, result_3):
name_match._preprocess_lowercase = lower_case
name_match._preprocess_punctuations = punctuations
name_match._preprocess_ascii = ascii
new_df = name_match.preprocess(
name_match._df_matching_data, 'company_name')
assert new_df.loc[0, 'company_name'] == result_1
assert new_df.loc[2, 'company_name'] == result_2
assert new_df.loc[784, 'company_name'] == result_3
@pytest.mark.parametrize("low_memory, ngrams, result_1, result_2, result_3",
[[1, (5, 6), 0.02579, 0.00781, 0.01738],
[6, (2, 3), 0.009695, 0.01022, 0.01120],
[8, (1, 2), 0.027087, 0.02765, 0.02910],
[0, (5, 6), 0.02579, 0.00781, 0.01738],
[0, (2, 3), 0.009695, 0.01022, 0.01120],
[0, (1, 2), 0.027087, 0.02765, 0.02910],
])
def test_transform_data(name_match, low_memory, ngrams, result_1, result_2, result_3):
name_match._low_memory = low_memory
name_match._vec = TfidfVectorizer(
lowercase=False, analyzer="char", ngram_range=ngrams)
name_match._process_matching_data(transform=False)
name_match.transform_data()
assert name_match._n_grams_matching.data[10] == pytest.approx(
result_1, 0.001)
assert name_match._n_grams_matching.data[181] == pytest.approx(
result_2, 0.001)
assert name_match._n_grams_matching.data[1000] == pytest.approx(
result_3, 0.001)
@pytest.mark.parametrize("to_be_matched, possible_matches, metrics, result",
[('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandse Bank', 'Bank de Nederlandsche'], ['weighted_jaccard'], 2),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandse Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'discounted_levenshtein'], 5),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandse Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'discounted_levenshtein', 'iterative_sub_string'], 7),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandse Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'overlap', 'iterative_sub_string'], 6),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandse Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'overlap', 'bag'], 11),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank',
'De Nederlandsche Bank', 'Bank de Nederlandsche'], ['weighted_jaccard'], 2),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandsche Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'discounted_levenshtein'], 4),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandsche Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'discounted_levenshtein', 'iterative_sub_string'], 6),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandsche Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'overlap', 'iterative_sub_string'], 6),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandsche Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'overlap', 'bag'], 6),
('Schumm PLC', ['Torphy-Corkery', 'Hansen, Hoppe and Tillman',
'Gerlach and Sons', 'Bank de Nederlandsche'], ['weighted_jaccard'], 2),
('Schumm PLC', ['Torphy-Corkery', 'Hansen, Hoppe and Tillman', 'Gerlach and Sons',
'Bank de Nederlandsche'], ['weighted_jaccard', 'discounted_levenshtein'], 4),
('Schumm PLC', ['Torphy-Corkery', 'Hansen, Hoppe and Tillman', 'Gerlach and Sons', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'discounted_levenshtein', 'iterative_sub_string'], 6),
('Schumm PLC', ['Torphy-Corkery', 'Hansen, Hoppe and Tillman', 'Gerlach and Sons',
'Bank de Nederlandsche'], ['weighted_jaccard', 'overlap', 'iterative_sub_string'], 8),
('Schumm PLC', ['Torphy-Corkery', 'Hansen, Hoppe and Tillman', 'Gerlach and Sons',
'Bank de Nederlandsche'], ['weighted_jaccard', 'overlap', 'bag'], 8)
])
def test_score_matches(to_be_matched, possible_matches, metrics, result):
name_match = nm.NameMatcher()
name_match.set_distance_metrics(metrics)
assert np.argmax(name_match._score_matches(
to_be_matched, possible_matches)) == result
@pytest.mark.parametrize("number_of_matches, match_score, metrics, result",
[(1, np.array([[0.9, 0.3, 0.5, 0.2, 0.1]]), ['weighted_jaccard'], [0]),
(2, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5]]), [
'weighted_jaccard', 'discounted_levenshtein'], [0, 1]),
(3, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [1, 0.2, 0.3, 0.2, 0.1]]), [
'weighted_jaccard', 'discounted_levenshtein', 'iterative_sub_string'], [2, 1, 1]),
(2, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [
1, 0.2, 0.3, 0.2, 0.1]]), ['tichy', 'overlap', 'bag'], [2, 1]),
(2, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5]]), [
'overlap', 'bag'], [0, 2]),
(1, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [
1, 0.2, 0.3, 0.2, 0.1]]), ['weighted_jaccard', 'overlap', 'iterative_sub_string'], [1]),
(2, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [
1, 0.2, 0.3, 0.2, 0.1]]), ['weighted_jaccard', 'overlap', 'bag'], [1, 0]),
(1, np.array([[0.3, 0.3, 0.8, 0.2, 0.2]]), [
'weighted_jaccard'], [0]),
(3, np.array([[0.3, 0.3, 0.8, 0.2, 0.2], [0.3, 0.3, 0.8, 0.1, 0.1]]), [
'weighted_jaccard', 'discounted_levenshtein'], [0, 1]),
(2, np.array([[0.3, 0.3, 0.2, 0.1, 0.02], [0.1, 0.1, 0.2, 0.3, 0.02]]), [
'weighted_jaccard', 'iterative_sub_string'], [0, 0]),
(1, np.array([[0.3, 0.3, 0.2, 0.1, 0.02], [0.3, 0.3, 0.2, 0.3, 0.02]]), [
'overlap', 'iterative_sub_string'], [1]),
(1, np.array(
[[-0.5, -0.8, -0.3, -0.7, 0, 2]]), ['bag'], [0]),
(3, np.array([[10, 8, 7, 6, 12, 15, 14, 88]]), [
'weighted_jaccard'], [0]),
(2, np.array([[1, 0.3], [0.1, 0.4]]), [
'weighted_jaccard', 'discounted_levenshtein'], [0, 1])
])
def test_rate_matches(number_of_matches, match_score, metrics, result):
name_match = nm.NameMatcher()
name_match._number_of_matches = number_of_matches
name_match.set_distance_metrics(metrics)
ind = name_match._rate_matches(match_score)
print(ind)
assert len(ind) == np.min([number_of_matches, match_score.shape[0]])
assert list(ind) == result
def test_vectorise_data(name_match):
name_match._vectorise_data(transform=False)
assert len(name_match._vec.vocabulary_) > 0
@pytest.mark.parametrize("match, number_of_matches, word_set, score, result",
[(pd.Series(['Nederandsche', 0, 2, 'De Nederlandsche Bank'], index=['match_name_0', 'score_0', 'match_index_0', 'original_name']), 1, set(['De', 'Bank', 'nl']), 0, 94.553),
(pd.Series(['Nederandsche', 0, 2, 'De Nederlandsche Bank'], index=[
'match_name_0', 'score_0', 'match_index_0', 'original_name']), 1, set(['komt', 'niet', 'voor']), 0, 69.713),
(pd.Series(['nederandsche', 0, 2, 'de nederand bank', 0.4, 3, 'De Nederlandsche Bank'], index=[
'match_name_0', 'score_0', 'match_index_0', 'match_name_1', 'score_1', 'match_index_1', 'original_name']), 1, set(['De', 'Bank', 'nl']), 1, 0.4),
(pd.Series(['nederandsche', 0, 2, 'de nederand bank', 0.4, 3, 'De Nederlandsche Bank'], index=[
'match_name_0', 'score_0', 'match_index_0', 'match_name_1', 'score_1', 'match_index_1', 'original_name']), 1, set(['De', 'Bank', 'nl']), 0, 86.031),
])
def test_postprocess(name_match, match, number_of_matches, word_set, score, result):
name_match._number_of_matches = number_of_matches
name_match._word_set = word_set
new_match = name_match.postprocess(match)
assert new_match.loc[f'score_{score}'] == pytest.approx(result, 0.0001)
@pytest.mark.parametrize("indicator, punctuations, word_set, cut_off, result_1, result_2",
[('legal', False, set(), 0.01, 'plc.', 'bedrijf'),
('legal', True, set(), 0.01, 'plc', 'bedrijf'),
('legal', True, set(['bedrijf']),
0.01, 'bedrijf', 'Group'),
('common', True, set(), 0.01, 'Group', 'West'),
('common', True, set(), 0.3, 'and', 'Group'),
('common', True, set(['West']),
0.3, 'West', 'bedrijf'),
('someting', True, set(['key']), 0.01, 'key', 'val')
])
def test_make_no_scoring_words(name_match, indicator, punctuations, word_set, cut_off, result_1, result_2):
name_match._preprocess_punctuations = punctuations
new_word_set = name_match._make_no_scoring_words(
indicator, word_set, cut_off)
print(new_word_set)
assert new_word_set.issuperset(set([result_1]))
assert not new_word_set.issuperset(set([result_2]))
def test_search_for_possible_matches_error(adjusted_name):
name_matcher = nm.NameMatcher()
with pytest.raises(RuntimeError):
name_matcher._search_for_possible_matches(adjusted_name)
@pytest.mark.parametrize("top_n, low_memory, result_1, result_2",
[(10, 0, 1518, 144),
(50, 0, 1992, 9),
(100, 0, 1999, 6),
(1, 0, 44, 144),
(10, 8, 1518, 144),
(50, 8, 1992, 9),
(100, 8, 1999, 6),
(1, 8, 44, 144)
])
def test_search_for_possible_matches(name_match, adjusted_name, top_n, low_memory, result_1, result_2):
name_match._column_matching = 'company_name'
name_match._low_memory = low_memory
name_match._top_n = top_n
name_match._process_matching_data(True)
possible_match = name_match._search_for_possible_matches(adjusted_name)
assert possible_match.shape[1] == top_n
assert np.max(possible_match) < len(adjusted_name)
assert np.all(possible_match.astype(int) == possible_match)
assert np.max(possible_match[44, :]) == result_1
assert np.min(possible_match[144, :]) == result_2
@pytest.mark.parametrize("common_words, num_matches, possible_matches, matching_series, result_0, result_1",
[(True, 3, np.array([29, 343, 727, 855, 1702]), pd.Series(
['Company and Sons'], index=['company_name']), 36.03, 31.33),
(False, 2, np.array([29, 343, 727, ]), pd.Series(
['Company and Sons'], index=['company_name']), 71.28, 68.6),
(False, 2, np.array([29, 343]), pd.Series(
['Company and Sons'], index=['company_name']), 71.28, 68.6),
(False, 2, np.array([[29, 343], [0, 0]]), pd.Series(
['Company and Sons'], index=['company_name']), 71.28, 68.6),
(False, 2, np.array([29, 343, 727, 855, 1702]), pd.Series(
['Company and Sons'], index=['company_name']), 72.28, 71.28)
])
def test_fuzzy_matches(name_match, common_words, num_matches, possible_matches, matching_series, result_0, result_1):
name_match._column_matching = 'company_name'
name_match._number_of_matches = num_matches
name_match._postprocess_common_words = common_words
name_match._word_set = set(['Sons', 'and'])
match = name_match.fuzzy_matches(possible_matches, matching_series)
assert match['score_0'] == pytest.approx(result_0, 0.0001)
assert match['score_1'] == pytest.approx(result_1, 0.0001)
assert match['match_index_0'] in possible_matches
assert match['match_index_1'] in possible_matches
def test_do_name_matching_full(name_match, adjusted_name):
result = name_match.match_names(adjusted_name, 'company_name')
assert np.sum(result['match_index'] == result.index) == 1922
def test_do_name_matching_split(name_match, adjusted_name):
name_match._preprocess_split = True
result = name_match.match_names(adjusted_name.iloc[44, :], 'company_name')
assert np.any(result['match_index'] == 44)
def test_do_name_matching_series(name_match, adjusted_name):
result = name_match.match_names(adjusted_name.iloc[44, :], 'company_name')
assert np.any(result['match_index'] == 44)
def test_do_name_matching_error(adjusted_name):
name_match = nm.NameMatcher()
with pytest.raises(ValueError):
name_match.match_names(adjusted_name, 'company_name')
@pytest.mark.parametrize("verbose", [True, False])
def test_do_name_matching_print(capfd, name_match, adjusted_name, verbose):
name_match._verbose = verbose
name_match.match_names(adjusted_name.iloc[:5].copy(), 'company_name')
out, err = capfd.readouterr()
if verbose:
assert out.find('preprocessing') > -1
assert out.find('searching') > -1
assert out.find('possible') > -1
assert out.find('fuzzy') > -1
assert out.find('done') > -1
else:
assert out == ''
@pytest.mark.parametrize("word, occurence_count, result",
[['fun snail pool', 2, 'snail'],
['fun snail pool', 3, 'fun snail'],
['fun snail pool', 1, ''],
['fun small pool', 3, 'fun small pool'],
['fun snail', 3, 'fun snail'],
['fun small pool', 5, 'fun small pool']])
def test_select_top_words(word, words, occurence_count, result):
word_counts = pd.Series(words).value_counts()
name_match = nm.NameMatcher()
new_word = name_match._select_top_words(
word.split(), word_counts, occurence_count)
assert new_word == result
@pytest.mark.parametrize("match, num_of_matches, result",
[[{'match_name_1': 'fun', 'match_name_2': 'dog',
'match_name_0': 'cat'}, 3, ['cat', 'fun', 'dog']],
[{'match_name_1': 'fun', 'match_name_2': 'dog',
'match_name_0': 'cat'}, 2, ['cat', 'fun']],
[{'match_name_1': 'fun', 'match_name_0': 'cat'},
2, ['cat', 'fun']],
[{'match_name_1': 'fun', 'match_name_2': 'dog', 'match_name_0': 'cat'}, 0, []]])
def test_get_alternative_names(match, num_of_matches, result):
name_match = nm.NameMatcher(number_of_matches=num_of_matches)
res = name_match._get_alternative_names(pd.Series(match))
assert res == result
@pytest.mark.parametrize("preprocess_punctuations, output, input, x",
[[True, '_blame_', {'test': ['fun...', 'done'], 'num':['_.blame._']}, 2],
[True, 'done', {'test': ['fun. . . ',
'done'], 'num':['_.blame._']}, 1],
[True, 'fun', {
'test': ['fun. . . ', 'done'], 'num':['_.blame._']}, 0],
[False, 'fun. . .', {
'test': ['fun. . . ', 'done'], 'num':['_.blame._']}, 0],
[False, 'fun. . .', {
'num': ['_.blame._'], 'test': ['fun. . . ', 'done']}, 1]
])
def test_preprocess_word_list(preprocess_punctuations, output, input, x):
name_match = nm.NameMatcher(punctuations=preprocess_punctuations)
res = name_match._preprocess_word_list(input)
print(res)
assert res[x] == output
@pytest.mark.parametrize("num_matches, match_score, match, result, y",
[[3, np.array([[1, 1, 1], [1, 1, 1], [0, 0, 0]]), pd.Series(dtype=float), 100, 0],
[2, np.array([[1, 1], [0.4, 0.4], [0, 0]]),
pd.Series(dtype=float), 40, 1],
[1, np.array([[1, 1], [1, 1], [0, 0]]),
pd.Series(dtype=float), 100, 0]
])
def test_adjust_scores(num_matches, match_score, match, result, y):
name_match = nm.NameMatcher(number_of_matches=num_matches)
match = name_match._adjust_scores(match_score, match)
assert match[y] == result
@pytest.mark.parametrize("string, stringlist, result_1, result_2, y",
[['know sign first', ['know', 'know sign', 'know sign first'], 'know first', 'know first', 2],
['know sign first', ['know', 'know sign',
'know sign first'], 'know first', 'know', 1],
['know sign first', ['know', 'know sign',
'know sign first'], 'know first', 'know', 0],
['know first', ['know', 'know', 'know'],
'know first', 'know', 1],
['pool sign small', ['sign small',
'small pool sign', 'small'], '', '', 0],
['pool sign small know', ['sign small',
'small pool sign', 'small'], 'know', '', 0],
['know pool sign small', ['sign small',
'small pool sign', 'small'], 'know', '', 0],
['pool sign small', ['sign small',
'small pool know sign', 'small'], '', 'know', 1],
])
def test_process_words(words, string, stringlist, result_1, result_2, y):
name_match = nm.NameMatcher()
name_match._word_set = set(words)
string, stringlist = name_match._process_words(string, stringlist)
assert string == result_1
assert stringlist[y] == result_2
@pytest.mark.parametrize("word_set, cut_off, result_1, result_2",
[[set(), 0, 1518, 'Group'],
[set(), 0, 1518, 'and'],
[set(), 0.1, 7, 'Group'],
[set(), 0.1, 7, 'LLC'],
[set(), 0.12, 6, 'LLC'],
[set(), 0.2, 1, 'and'],
[set(['apple']), 1, 1, 'apple'],
[set(['apple']), 0, 1519, 'apple'],
[set(['apple']), 0, 1519, 'Group']
])
def test_process_common_words(name_match, word_set, cut_off, result_1, result_2):
words = name_match._process_common_words(word_set, cut_off)
assert result_2 in words
assert len(words) == result_1
@pytest.mark.parametrize("word_set, preprocess, result_1, result_2",
[[set(), True, 244, 'company'],
[set(), True, 244, '3ao'],
[set(), True, 244, 'gmbh'],
[set(), False, 312, '& company'],
[set(), False, 312, '3ao'],
[set(), False, 312, 'g.m.b.h.'],
[set(['apple']), True, 245, 'apple'],
[set(['apple']), False, 313, 'apple'],
[set(['apple..']), True, 245, 'apple..'],
[set(['apple..']), False, 313, 'apple..']
])
def test_process_legal_words(word_set, preprocess, result_1, result_2):
name_match = nm.NameMatcher()
name_match._preprocess_punctuations = preprocess
words = name_match._process_legal_words(word_set)
assert result_2 in words
assert len(words) == result_1
| 55.499072
| 197
| 0.526576
| 0
| 0
| 0
| 0
| 28,443
| 0.950699
| 0
| 0
| 7,513
| 0.25112
|
67ccd647dc5505b2bf0b3f2efbfadce995daded7
| 645
|
py
|
Python
|
data/train/python/67ccd647dc5505b2bf0b3f2efbfadce995daded7create_new_default.py
|
harshp8l/deep-learning-lang-detection
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
[
"MIT"
] | 84
|
2017-10-25T15:49:21.000Z
|
2021-11-28T21:25:54.000Z
|
data/train/python/67ccd647dc5505b2bf0b3f2efbfadce995daded7create_new_default.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 5
|
2018-03-29T11:50:46.000Z
|
2021-04-26T13:33:18.000Z
|
data/train/python/67ccd647dc5505b2bf0b3f2efbfadce995daded7create_new_default.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 24
|
2017-11-22T08:31:00.000Z
|
2022-03-27T01:22:31.000Z
|
'''
Created on Dec 21, 2014
@author: Ben
'''
def create_new_default(directory: str, dest: dict, param: dict):
'''
Creates new default parameter file based on parameter settings
'''
with open(directory, 'w') as new_default:
new_default.write(
'''TARGET DESTINATION = {}
SAVE DESTINATION = {}
SAVE DESTINATION2 = {}
SAVE STARTUP DEST1 = {}
SAVE STARTUP DEST2 = {}
SAVE TYPE DEST1 = {}
SAVE TYPE DEST2 = {}
'''.format(dest['target'], dest['save'], dest['save2'],
param["dest1_save_on_start"], param["dest2_save_on_start"],
param["save_dest1"], param["save_dest2"])
)
| 23.888889
| 70
| 0.612403
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 383
| 0.593798
|
67cdceeb2a0e37311849079ddc2d4d94bc900a6a
| 4,129
|
py
|
Python
|
analysis/SiPMPE_reader.py
|
akira-okumura/isee_sipm
|
dff98c82ed8ef950c450c83ad8951743e3799e94
|
[
"MIT"
] | 1
|
2019-07-08T02:43:12.000Z
|
2019-07-08T02:43:12.000Z
|
analysis/SiPMPE_reader.py
|
akira-okumura/ISEE_SiPM
|
dff98c82ed8ef950c450c83ad8951743e3799e94
|
[
"MIT"
] | null | null | null |
analysis/SiPMPE_reader.py
|
akira-okumura/ISEE_SiPM
|
dff98c82ed8ef950c450c83ad8951743e3799e94
|
[
"MIT"
] | null | null | null |
import numpy as np
import math
import ROOT
import sys
class DistrReader:
def __init__(self, dataset):
self.stat_error = 0
self.sys_error = 0
self.plambda = 0
self.dataset = str(dataset)
self.hist = ROOT.TH1D('','', 100, -0.2, 0.2)
self.distr = ROOT.TH1D('','', 64, 0, 64)
self.CalcLambda()
def GetStatError(self):
return self.stat_error
def GetSysError(self):
return self.sys_error
def GetLambda(self):
return self.plambda
def Reset(self):
self.stat_error = 0
self.sys_error = 0
self.plambda = 0
self.dataset = ''
def CalcLambda(self):
for asic in range(4):
for channel in range(16):
hfile = ROOT.TFile("%s/hist_as%d_ch%d.root" %(self.dataset, asic, channel))
self.hNoise = hfile.Get('noise')
self.hSignal = hfile.Get('signal')
self.hNoise.SetDirectory(0)
self.hSignal.SetDirectory(0)
hfile.Close()
hist_s = self.hSignal.Clone()
hist_n = self.hNoise.Clone()
hist_s.GetXaxis().SetRangeUser(-40, 100) # 0pe position
p0 = hist_s.GetMaximumBin()
hist_s.GetXaxis().SetRangeUser(120, 250) # 1pe position
p1 = hist_s.GetMaximumBin()
thrsh = int((p0+p1)/1.9)
del hist_s
del hist_n
hist_s = self.hSignal
hist_n = self.hNoise
N0_s = hist_s.Integral(1, thrsh)
N0_su = hist_s.Integral(1, hist_s.FindBin(hist_s.GetXaxis().GetBinCenter(thrsh) + 30))
N0_sl = hist_s.Integral(1, hist_s.FindBin(hist_s.GetXaxis().GetBinCenter(thrsh) - 30))
N0_n = hist_n.Integral(1, thrsh)
N0_nu = hist_n.Integral(1, hist_n.FindBin(hist_n.GetXaxis().GetBinCenter(thrsh) + 30))
N0_nl = hist_n.Integral(1, hist_n.FindBin(hist_n.GetXaxis().GetBinCenter(thrsh) - 30))
N_s = hist_s.Integral() + hist_s.GetBinContent(hist_s.GetNbinsX() + 1)
N_n = hist_n.Integral() + hist_n.GetBinContent(hist_n.GetNbinsX() + 1)
P0_s = N0_s / N_s
P0_su = N0_su / N_s
P0_sl = N0_sl / N_s
P0_n = N0_n / N_n
P0_nu = N0_nu / N_n
P0_nl = N0_nl / N_n
err_s_stat = np.sqrt(N_s * (1 - P0_s) * P0_s) / N0_s
err_n_stat = np.sqrt(N_n * (1 - P0_n) * P0_n) / N0_n
err_s_sys = ROOT.TMath.Log(P0_sl) - ROOT.TMath.Log(P0_su)
err_n_sys = ROOT.TMath.Log(P0_nl) - ROOT.TMath.Log(P0_nu)
err_tot_sys = np.sqrt(np.power(err_s_sys, 2) + np.power(err_n_sys, 2))
err_tot_stat = np.sqrt(np.power(err_s_stat, 2) + np.power(err_n_stat, 2))
self.sys_error += np.power(err_tot_sys, 2)
self.stat_error += np.power(err_tot_stat, 2)
Plambda = - (ROOT.TMath.Log(P0_s) - ROOT.TMath.Log(P0_n))
self.plambda += Plambda
self.hist.Fill(Plambda)
self.distr.Fill(asic * 16 + channel, Plambda)
hist_s.Delete()
hist_n.Delete()
self.stat_error = np.sqrt(self.GetStatError())
self.sys_error = np.sqrt(self.GetSysError())
def GetLambdaHist(self):
return self.hist
def GetLambdaDistr(self):
return self.distr
# #
# PEd = PEdistr('/Volumes/Untitled/zenin/linearity_465/linearity_465_sipm/hists/3500_4_465')
#
# total = PEd.GetLambda()
# stat_err = PEd.GetStatError()
# sys_err = PEd.GetSysError()
#
# print('total lambda = %f \u00B1 %f stat \u00B1 %f sys'%(total, stat_err, sys_err))
# print('relative uncertainty = %f%% stat + %f%% sys'%(stat_err/total*100, sys_err/total*100))
#
# h = PEd.GetLambdaDistr().Clone()
# print(h.GetBinContent(9))
# h.Draw()
| 34.123967
| 102
| 0.534996
| 3,627
| 0.878421
| 0
| 0
| 0
| 0
| 0
| 0
| 509
| 0.123274
|
67cde7d5e3ff3451bd18f756ff702549907cc3a3
| 2,364
|
py
|
Python
|
bad_apps_blog/__init__.py
|
bkesk/bad-apps-blog
|
86df1e848cd17f17bce9bb06d6c1ac1f81b23b9e
|
[
"BSD-3-Clause"
] | null | null | null |
bad_apps_blog/__init__.py
|
bkesk/bad-apps-blog
|
86df1e848cd17f17bce9bb06d6c1ac1f81b23b9e
|
[
"BSD-3-Clause"
] | 1
|
2022-03-31T00:30:57.000Z
|
2022-03-31T21:31:17.000Z
|
bad_apps_blog/__init__.py
|
bkesk/bad-apps-blog
|
86df1e848cd17f17bce9bb06d6c1ac1f81b23b9e
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Bad Apps Blog
Author: Brandon Eskridge (a.k.a. 7UR7L3)
(Initial commit is based on the official Flask tutorial)
About: This app began as an (essentially) exact copy
of the official Flask tutorial (linke below). It is
intented as an opportunity to practice application
security, secure design, and secure coding techniques.
At the end of the Flask tutorial, the interested student
is challenged to implement several features. In order to
achive that goal, we will attempt to implement those features
while "pushing left" (security-wise) in the process.
Official Flask tutorial : https://flask.palletsprojects.com/en/2.0.x/tutorial/
"""
import os
import secrets
from flask import Flask
import logging
logging.basicConfig(level=logging.INFO,format='%(asctime)s %(levelname)s %(name)s %(threadName)s : %(message)s')
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
APP_VERSION = '0.0.1',
DB_VERSION = '0.0.1',
DATABASE=os.path.join(app.instance_path, 'bad_apps_blog.sqlite'),
CSRF_TOKEN_AGE = 3600 # seconds
)
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent=True)
app.logger.info('loading configuraion from config.py in instance folder')
else:
# load the test config if passed in
test_config['SECRET_KEY'] = secrets.token_hex(32)
test_config['CSRF_TOKEN_AGE'] = 2
app.config.from_mapping(test_config)
app.logger.info('generating test configuration')
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
app.logger.info('created instance folder')
except OSError as e:
app.logger.info('instance folder already exists')
# register the config generator with the current app instance
from . import gen_config
gen_config.init_app(app)
# register the DBs with the current app instance
from . import db
db.init_app(app)
# register the authorization blueprint
from . import auth
app.register_blueprint(auth.bp)
# register the blog blueprint
from . import blog
app.register_blueprint(blog.bp)
app.add_url_rule('/', endpoint='index')
return app
| 29.55
| 112
| 0.706853
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,280
| 0.541455
|
67ce55c048774bb454c705b23d4003d7370d1d13
| 204
|
py
|
Python
|
status/urls.py
|
Khryptooo/infra_api
|
15b69dea8e0ce1795525f96d9362722151b3c8f7
|
[
"BSD-2-Clause"
] | null | null | null |
status/urls.py
|
Khryptooo/infra_api
|
15b69dea8e0ce1795525f96d9362722151b3c8f7
|
[
"BSD-2-Clause"
] | null | null | null |
status/urls.py
|
Khryptooo/infra_api
|
15b69dea8e0ce1795525f96d9362722151b3c8f7
|
[
"BSD-2-Clause"
] | null | null | null |
from django.conf.urls import patterns, url
from status import views
urlpatterns = patterns('',
url(r'^ups$', views.ups_status, name='ups_status'),
url(r'^tor$', views.tor_status, name='tor_status'),
)
| 25.5
| 52
| 0.720588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 42
| 0.205882
|
67ce7c38eacf87bac8bd21b2a7cec718eeabebeb
| 9,100
|
py
|
Python
|
automation/auto_update_image_pr.py
|
WaqasAhmedLatif/cloud-native-edition
|
1e6002f27ea971c153df59373e30d4506e9932dc
|
[
"Apache-2.0"
] | 23
|
2020-04-18T14:51:41.000Z
|
2022-03-31T19:59:40.000Z
|
automation/auto_update_image_pr.py
|
WaqasAhmedLatif/cloud-native-edition
|
1e6002f27ea971c153df59373e30d4506e9932dc
|
[
"Apache-2.0"
] | 236
|
2020-04-22T08:59:27.000Z
|
2022-03-31T07:21:12.000Z
|
automation/auto_update_image_pr.py
|
WaqasAhmedLatif/cloud-native-edition
|
1e6002f27ea971c153df59373e30d4506e9932dc
|
[
"Apache-2.0"
] | 23
|
2020-04-19T15:25:59.000Z
|
2022-03-16T17:17:36.000Z
|
import os
import json
from common import update_json_file, get_logger, exec_cmd
from yamlparser import Parser
from pathlib import Path
logger = get_logger("update-image")
# Functions that work to update gluu_versions.json
def determine_final_official_and_dev_version(tag_list):
"""
Determine official version i.e 4.1.0 , 4.2.2..etc using oxauths repo
@param tag_list:
@return:
"""
# Check for the highest major.minor.patch i.e 4.2.0 vs 4.2.2
dev_image = ""
patch_list = []
for tag in tag_list:
patch_list.append(int(tag[4:5]))
# Remove duplicates
patch_list = list(set(patch_list))
# Sort
patch_list.sort()
highest_major_minor_patch_number = str(patch_list[-1])
versions_list = []
for tag in tag_list:
if "dev" in tag and tag[4:5] == highest_major_minor_patch_number:
dev_image = tag[0:5] + "_dev"
# Exclude any tag with the following
if "dev" not in tag and "a" not in tag and tag[4:5] == highest_major_minor_patch_number:
versions_list.append(int(tag[6:8]))
# A case were only a dev version of a new patch is available then a lower stable patch should be checked.
# i.e there is no 4.3.0_01 but there is 4.2.2_dev
if not versions_list:
highest_major_minor_patch_number = str(int(highest_major_minor_patch_number) - 1)
for tag in tag_list:
if not dev_image and "dev" in tag and tag[4:5] == highest_major_minor_patch_number:
dev_image = tag[0:5] + "_dev"
# Exclude any tag with the following
if "dev" not in tag and "a" not in tag and tag[4:5] == highest_major_minor_patch_number:
versions_list.append(int(tag[6:8]))
# Remove duplicates
versions_list = list(set(versions_list))
# Sort
versions_list.sort()
# Return highest patch
highest_major_minor_patch_image_patch = str(versions_list[-1])
if len(highest_major_minor_patch_image_patch) == 1:
highest_major_minor_patch_image_patch = "0" + highest_major_minor_patch_image_patch
highest_major_minor_patch_image = ""
for tag in tag_list:
if "dev" not in tag and highest_major_minor_patch_image_patch in tag \
and tag[4:5] == highest_major_minor_patch_number:
highest_major_minor_patch_image = tag
return highest_major_minor_patch_image, dev_image
def determine_major_version(all_repos_tags):
"""
Determine official major version i.e 4.1 , 4.2..etc using oxauths repo
@param all_repos_tags:
@return:
"""
versions_list = []
for tag in all_repos_tags["oxauth"]:
# Exclude any tag with the following
if "dev" not in tag \
and "latest" not in tag \
and "secret" not in tag \
and "gluu-engine" not in tag:
versions_list.append(float(tag[0:3]))
# Remove duplicates
versions_list = list(set(versions_list))
# Sort
versions_list.sort()
# Return highest version
return versions_list[-1]
def get_docker_repo_tag(org, repo):
"""
Returns a dictionary of all available tags for a certain repo
:param org:
:param repo:
:return:
"""
logger.info("Getting docker tag for repository {}.".format(repo))
exec_get_repo_tag_curl_command = ["curl", "-s",
"https://hub.docker.com/v2/repositories/{}/{}/tags/?page_size=100".format(org,
repo)]
stdout, stderr, retcode = None, None, None
try:
stdout, stderr, retcode = exec_cmd(" ".join(exec_get_repo_tag_curl_command))
except (IndexError, Exception):
manual_curl_command = " ".join(exec_get_repo_tag_curl_command)
logger.error("Failed to curl\n{}".format(manual_curl_command))
all_tags = json.loads(stdout)["results"]
image_tags = []
for tag in all_tags:
image_tags.append(tag["name"])
image_tags_dict = dict()
image_tags_dict[repo] = image_tags
return image_tags_dict
def filter_all_repo_dictionary_tags(all_repos_tags, major_official_version):
"""
Analyze the dictionary containing all repos and keeps only the list of tags and versions matching the major version
@param all_repos_tags:
@param major_official_version:
"""
filtered_all_repos_tags = dict()
for repo, tag_list in all_repos_tags.items():
temp_filtered_tag_list = []
for tag in tag_list:
if major_official_version == tag[0:3]:
temp_filtered_tag_list.append(tag)
filtered_all_repos_tags[repo] = temp_filtered_tag_list
return filtered_all_repos_tags
def analyze_filtered_dict_return_final_dict(filtered_all_repos_tags, major_official_version):
"""
Analyze filtered dictionary and return the final dict with only one official version and one dev version
@param filtered_all_repos_tags:
@param major_official_version:
"""
final_official_version_dict = dict()
final_dev_version_dict = dict()
# Gluus main values.yaml
gluu_values_file = Path("../pygluu/kubernetes/templates/helm/gluu/values.yaml").resolve()
gluu_values_file_parser = Parser(gluu_values_file, True)
dev_version = ""
def update_dicts_and_yamls(name, rep, tags_list, helm_name=None):
final_tag, final_dev_tag = determine_final_official_and_dev_version(tags_list)
final_official_version_dict[name + "_IMAGE_NAME"] = "gluufederation/" + rep
final_dev_version_dict[name + "_IMAGE_NAME"] = "gluufederation/" + rep
final_official_version_dict[name + "_IMAGE_TAG"], final_dev_version_dict[name + "_IMAGE_TAG"] \
= final_tag, final_dev_tag
if rep != "upgrade":
if helm_name:
gluu_values_file_parser[helm_name]["image"]["repository"] = "gluufederation/" + rep
gluu_values_file_parser[helm_name]["image"]["tag"] = final_tag
else:
gluu_values_file_parser[rep]["image"]["repository"] = "gluufederation/" + rep
gluu_values_file_parser[rep]["image"]["tag"] = final_tag
for repo, tag_list in filtered_all_repos_tags.items():
official_version, dev_version = determine_final_official_and_dev_version(tag_list)
if repo == "casa":
update_dicts_and_yamls("CASA", repo, tag_list)
elif repo == "oxd-server":
update_dicts_and_yamls("OXD", repo, tag_list)
elif repo == "fido2":
update_dicts_and_yamls("FIDO2", repo, tag_list)
elif repo == "scim":
update_dicts_and_yamls("SCIM", repo, tag_list)
elif repo == "config-init":
update_dicts_and_yamls("CONFIG", repo, tag_list, "config")
elif repo == "cr-rotate":
update_dicts_and_yamls("CACHE_REFRESH_ROTATE", repo, tag_list)
elif repo == "certmanager":
update_dicts_and_yamls("CERT_MANAGER", repo, tag_list, "oxauth-key-rotation")
elif repo == "opendj":
update_dicts_and_yamls("LDAP", repo, tag_list, "opendj")
elif repo == "jackrabbit":
update_dicts_and_yamls("JACKRABBIT", repo, tag_list)
elif repo == "oxauth":
update_dicts_and_yamls("OXAUTH", repo, tag_list)
elif repo == "oxpassport":
update_dicts_and_yamls("OXPASSPORT", repo, tag_list)
elif repo == "oxshibboleth":
update_dicts_and_yamls("OXSHIBBOLETH", repo, tag_list)
elif repo == "oxtrust":
update_dicts_and_yamls("OXTRUST", repo, tag_list)
elif repo == "persistence":
update_dicts_and_yamls("PERSISTENCE", repo, tag_list)
elif repo == "upgrade":
update_dicts_and_yamls("UPGRADE", repo, tag_list)
gluu_versions_dict = {major_official_version: final_official_version_dict,
dev_version: final_dev_version_dict}
gluu_values_file_parser.dump_it()
return gluu_versions_dict
def main():
all_repos_tags = dict()
org = os.environ.get("ORG_NAME", "gluufederation")
gluu_docker_repositories_names_used_in_cn = ["casa", "fido2", "scim", "config-init",
"cr-rotate", "certmanager", "opendj", "jackrabbit", "oxauth",
"oxd-server", "oxpassport", "oxshibboleth",
"oxtrust", "persistence", "upgrade"]
for repo in gluu_docker_repositories_names_used_in_cn:
all_repos_tags.update(get_docker_repo_tag(org, repo))
major_official_version = str(determine_major_version(all_repos_tags))
filtered_all_repos_tags = filter_all_repo_dictionary_tags(all_repos_tags, major_official_version)
final_gluu_versions_dict = analyze_filtered_dict_return_final_dict(filtered_all_repos_tags, major_official_version)
update_json_file(final_gluu_versions_dict, '../pygluu/kubernetes/templates/gluu_versions.json')
if __name__ == '__main__':
main()
| 42.325581
| 120
| 0.656703
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,357
| 0.259011
|
67ce95b83726624dc137a006b385290c23c7bf1c
| 2,767
|
py
|
Python
|
es_reporting_tool/generate_report.py
|
yugendra/elasticsearch_reporting_tool
|
bdbb5ae95efdc7552d9dfe771ecf44432246d7bb
|
[
"Apache-2.0"
] | null | null | null |
es_reporting_tool/generate_report.py
|
yugendra/elasticsearch_reporting_tool
|
bdbb5ae95efdc7552d9dfe771ecf44432246d7bb
|
[
"Apache-2.0"
] | 4
|
2021-06-01T21:49:24.000Z
|
2022-01-13T00:39:06.000Z
|
es_reporting_tool/generate_report.py
|
yugendra/elasticsearch_reporting_tool
|
bdbb5ae95efdc7552d9dfe771ecf44432246d7bb
|
[
"Apache-2.0"
] | null | null | null |
from reportlab.lib import colors
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.units import inch
from reportlab.lib.pagesizes import A3
from reportlab.platypus import Paragraph, SimpleDocTemplate, Table, TableStyle
from reportlab.lib.enums import TA_CENTER
import datetime
class CreateReport():
def __init__(self, title='SampleReport.pdf'):
self.title = title
self.doc = SimpleDocTemplate(self.title, pagesize=A3)
self.styles = getSampleStyleSheet()
self.reportHeaderStyle = self.styles['Heading1']
self.reportHeaderStyle.alignment = TA_CENTER
self.reportHeaerStyle = self.styles['Heading1']
self.userHeaderStyle = self.styles['Heading2']
self.TableHeaderStyle = self.styles['Heading3']
self.TableHeaderStyle.alignment = TA_CENTER
self.normalStyle = self.styles['Normal']
self.normalStyle.wordWrap = 'CJK'
self.story = []
def wrap_text(self, data, style):
row = []
for filed in data:
row.append(Paragraph(filed, style))
return row
def add_report_header(self, data):
self.story.append(Paragraph(data, self.reportHeaderStyle))
def add_user_header(self, data):
self.story.append(Paragraph(data, self.userHeaderStyle))
def add_table_data(self, data, style='TData'):
if style == 'THeader':
style = self.TableHeaderStyle
else:
style = self.normalStyle
for i in range(len(data)):
if data[i][0] == "Time":
continue
for j in range(i+1, len(data)):
iDate = datetime.datetime.strptime(data[i][0], "%Y-%m-%d %H:%M:%S")
jDate = datetime.datetime.strptime(data[j][0], "%Y-%m-%d %H:%M:%S")
if iDate > jDate:
tmp = data[i]
data[i] = data[j]
data[j] = tmp
table_halign='LEFT'
data_align='LEFT'
data1 = []
for row in data:
data1.append(self.wrap_text(row, style))
table = Table(data1, hAlign=table_halign, colWidths=[1 * inch, 1.5 * inch, 3 * inch, 0.7 * inch, 3.5 * inch])
table.setStyle(TableStyle([
('FONT', (0, 0), (-1, 0), 'Helvetica-Bold'),
('ALIGN', (0, 0), (-1, 0), 'CENTER'),
('ALIGN',(0, 0),(0,-1), data_align),
('INNERGRID', (0, 0), (-1, -1), 0.50, colors.black),
('BOX', (0,0), (-1,-1), 0.25, colors.black),
]))
self.story.append(table)
def create(self):
self.doc.build(self.story)
| 36.893333
| 118
| 0.550777
| 2,455
| 0.887243
| 0
| 0
| 0
| 0
| 0
| 0
| 203
| 0.073365
|
67cee025d3929b6dcb02f8283d7e7b80eb2a3619
| 2,958
|
py
|
Python
|
fe/functional.py
|
proteneer/timemachine
|
feee9f24adcb533ab9e1c15a3f4fa4dcc9d9a701
|
[
"Apache-2.0"
] | 91
|
2019-01-05T17:03:04.000Z
|
2022-03-11T09:08:46.000Z
|
fe/functional.py
|
proteneer/timemachine
|
feee9f24adcb533ab9e1c15a3f4fa4dcc9d9a701
|
[
"Apache-2.0"
] | 474
|
2019-01-07T14:33:15.000Z
|
2022-03-31T19:15:12.000Z
|
fe/functional.py
|
proteneer/timemachine
|
feee9f24adcb533ab9e1c15a3f4fa4dcc9d9a701
|
[
"Apache-2.0"
] | 12
|
2019-01-13T00:40:36.000Z
|
2022-01-14T10:23:54.000Z
|
from jax import config
config.update("jax_enable_x64", True)
from jax import custom_jvp, numpy as np
from timemachine.lib.potentials import SummedPotential
def _make_selection_mask(compute_du_dx=False, compute_du_dp=False, compute_du_dl=False, compute_u=False):
return (compute_du_dx, compute_du_dp, compute_du_dl, compute_u)
def wrap_impl(impl, pack=lambda x: x):
"""Construct a differentiable function U(x, params, box, lam) -> float
from a single unbound potential
"""
@custom_jvp
def U(coords, params, box, lam):
selection = _make_selection_mask(compute_u=True)
result_tuple = impl.execute_selective(coords, pack(params), box, lam, *selection)
return result_tuple[3]
def U_jvp_x(coords_dot, _, coords, params, box, lam):
selection = _make_selection_mask(compute_du_dx=True)
result_tuple = impl.execute_selective(coords, pack(params), box, lam, *selection)
return np.sum(coords_dot * result_tuple[0])
def U_jvp_params(params_dot, _, coords, params, box, lam):
selection = _make_selection_mask(compute_du_dp=True)
result_tuple = impl.execute_selective(coords, pack(params), box, lam, *selection)
return np.sum(pack(params_dot) * result_tuple[1])
def U_jvp_lam(lam_dot, _, coords, params, box, lam):
selection = _make_selection_mask(compute_du_dl=True)
result_tuple = impl.execute_selective(coords, pack(params), box, lam, *selection)
return np.sum(lam_dot * result_tuple[2])
U.defjvps(U_jvp_x, U_jvp_params, None, U_jvp_lam)
return U
def construct_differentiable_interface(unbound_potentials, precision=np.float32):
"""Construct a differentiable function U(x, params, box, lam) -> float
from a collection of unbound potentials
>>> U = construct_differentiable_interface(unbound_potentials)
>>> _ = grad(U, (0,1,3))(coords, sys_params, box, lam)
This implementation computes the sum of the component potentials in Python
"""
impls = [ubp.unbound_impl(precision) for ubp in unbound_potentials]
U_s = [wrap_impl(impl) for impl in impls]
def U(coords, params, box, lam):
return np.sum(np.array([U_i(coords, p_i, box, lam) for (U_i, p_i) in zip(U_s, params)]))
return U
def construct_differentiable_interface_fast(unbound_potentials, params, precision=np.float32):
"""Construct a differentiable function U(x, params, box, lam) -> float
from a collection of unbound potentials
>>> U = construct_differentiable_interface(unbound_potentials, params)
>>> _ = grad(U, (0,1,3))(coords, sys_params, box, lam)
This implementation computes the sum of the component potentials in C++ using the SummedPotential custom op
"""
impl = SummedPotential(unbound_potentials, params).unbound_impl(precision)
def pack(params):
return np.concatenate([ps.reshape(-1) for ps in params])
U = wrap_impl(impl, pack)
return U
| 36.975
| 111
| 0.713658
| 0
| 0
| 0
| 0
| 226
| 0.076403
| 0
| 0
| 829
| 0.280257
|
67cf0d02161a3633d1e7bda727c4a5909dae5bbc
| 996
|
py
|
Python
|
utilityfiles/race.py
|
IronicNinja/covid19api
|
f96a18c646379fe144db228eaa3c69d66125628d
|
[
"MIT"
] | 1
|
2020-09-16T05:18:54.000Z
|
2020-09-16T05:18:54.000Z
|
utilityfiles/race.py
|
IronicNinja/covid19api
|
f96a18c646379fe144db228eaa3c69d66125628d
|
[
"MIT"
] | null | null | null |
utilityfiles/race.py
|
IronicNinja/covid19api
|
f96a18c646379fe144db228eaa3c69d66125628d
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup as soup
from urllib.request import Request, urlopen
from datetime import date
import math
import openpyxl
import pandas as pd
fname = 'https://www.governing.com/gov-data/census/state-minority-population-data-estimates.html'
req = Request(fname, headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req)
page_soup = soup(webpage, "html.parser")
containers = page_soup.findAll("table")
container = containers[1]
A = container.findAll("tr")
tmp_list = [[], [], [], [], []]
for x in range(1, 52):
if x == 9:
continue
B = A[x].findAll("td")
for c in range(1, 6):
s = str(B[c])
s1 = s.replace('<td>', '')
s2 = s1.replace('</td>', '')
s3 = s2.replace('%', '')
tmp_list[c-1].append(float(s3))
df = pd.read_excel('states_info.xlsx')
headers_list = ['hispanic', 'white', 'black', 'asian', 'american indian']
for pos in range(5):
df[headers_list[pos]] = tmp_list[pos]
df.to_excel('states_info.xlsx')
| 25.538462
| 97
| 0.63755
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 248
| 0.248996
|
67d227f164d327f585654ba9c51b22b4d48f67c1
| 7,601
|
py
|
Python
|
prioListe/utils.py
|
FelixTheC/allSales
|
76d955b80bf9b5bb58bd53d8ee644249cf04e1a3
|
[
"Apache-2.0"
] | null | null | null |
prioListe/utils.py
|
FelixTheC/allSales
|
76d955b80bf9b5bb58bd53d8ee644249cf04e1a3
|
[
"Apache-2.0"
] | null | null | null |
prioListe/utils.py
|
FelixTheC/allSales
|
76d955b80bf9b5bb58bd53d8ee644249cf04e1a3
|
[
"Apache-2.0"
] | null | null | null |
from django.core.exceptions import FieldError
from staff.models import Staff
import re
def get_choices():
# choices in a seperated funtion to change it easier
STATUS_CHOICES = (
('', ''),
('Test', 'Test'),
('Fertig', 'Fertig'),
('Löschen', 'Löschen'),
('Vertrieb', 'Vertrieb'),
('Produktion', 'Produktion'),
('Bearbeitung', 'Bearbeitung'),
)
return STATUS_CHOICES
STAFFCHOICESONE = set()
for staff in Staff.objects.all():
STAFFCHOICESONE.add((staff.initialies, staff.name))
STAFFCHOICESTWO = set()
STAFFCHOICESTWO.add(('', ''))
for staff in Staff.objects.all():
STAFFCHOICESTWO.add((staff.initialies, staff.name))
def check_for_update(queryset):
try:
for object in queryset:
time_in_weeks = (object.finished_until - object.created_at) / 7
object.time_in_weeks = time_in_weeks.days
object.save()
except:
pass
def check_form_and_db(form, queryset):
"""
get data from(bevor it was saved) and get data from current object
check if there are changes between them
:param form:
:param queryset:
:return: boolean update
"""
update = False
if queryset.box != form.instance.box:
update = True
elif queryset.customer != form.instance.customer:
update = True
elif queryset.hardware != form.instance.hardware:
update = True
elif queryset.created_at != form.instance.created_at:
update = True
elif queryset.status != form.instance.status:
update = False
elif queryset.finished_until != form.instance.finished_until:
update = True
elif queryset.optional_status != form.instance.optional_status:
update = False
elif queryset.finished_until != form.instance.finished_until:
update = True
elif queryset.staff != form.instance.staff:
update = True
elif queryset.time_in_weeks != int(form.instance.time_in_weeks):
update = True
elif queryset.remark != form.instance.remark:
update = True
elif queryset.production_remark != form.instance.production_remark:
update = False
return update
def update_time_in_weeks(date1, date2):
days = (date2 - date1).days
if days > 7:
return days / 7
else:
return days
COLORS = {
'Fertig': '#33cc00',
'Test': '#99ff99',
'Bearbeitung': '#ffff00',
'Produktion': '#ffffcc',
'Vertrieb': '#ff99ff',
'Löschen': '#ffffff'
}
def searching(model, search_string, *args, **kwargs):
'''
usage e.g.:
t = searching(ModelName, search_string, 'Foo', 'Bar', **kwargs)
tmp = ModelName.objects.none()
for i in t:
tmp = i | tmp #merge Querysets
:param model: Django Modelobject
:param search_string: self explaning
:param args: datatypes that should be excluded
:param kwargs: can contain exlude or exact as key with a list of values containing the field name/-s
:return: list of querysets gte 1
'''
types = [field.get_internal_type() for field in model._meta.get_fields()]
names = [f.name for f in [field for field in model._meta.get_fields()]]
field_name_dict = dict(zip(names, types))
excat_fields = []
foreignKeyFields = None
special_filter = None
if kwargs:
try:
foreignKeyFields = kwargs['foreignKeyFields']
except KeyError:
pass
try:
special_filter = kwargs['filter']
except KeyError:
pass
try:
field_name_dict = remove_items_dict(field_name_dict, kwargs['exclude'])
except KeyError:
pass
try:
excat_fields = kwargs['exact']
except KeyError:
pass
# to use following e.g. in function call:
# data = {'exclude': liste['foo', ]}
# searching(modelname, searchstring, kwargs=data)
try:
if 'exclude' in kwargs['kwargs']:
field_name_dict = remove_items_dict(field_name_dict, kwargs['kwargs']['exclude'])
elif 'exact' in kwargs:
excat_fields = kwargs['exact']
except KeyError:
pass
if args:
field_name_dict = remove_items_dict(field_name_dict, args)
if special_filter is not None:
tmp = model.objects.filter(**{special_filter[0]: special_filter[1]})
else:
tmp = model.objects.all()
liste = []
for key, value in field_name_dict.items():
if value != 'ForeignKey' and value != 'ManyToManyField':
if key in excat_fields:
filter = f'{key}__iexact'
if len(tmp.filter(**{filter: search_string})) > 0:
liste.append(tmp.filter(**{filter: search_string}))
else:
filter = f'{key}__icontains'
if len(tmp.filter(**{filter: search_string})) > 0:
liste.append(tmp.filter(**{filter: search_string}))
elif value == 'ManyToManyField' and key == 'customer_collar':
filter = f'{key}__serialno__icontains'
if len(tmp.filter(**{filter: search_string})) > 0:
liste.append(tmp.filter(**{filter: search_string}))
else:
filter = f'{key}__pk__iexact'
if len(tmp.filter(**{filter: search_string})) > 0:
liste.append(tmp.filter(**{filter: search_string}))
else:
if foreignKeyFields is not None:
for keyfield in foreignKeyFields:
filter = f'{key}__{keyfield}__icontains'
try:
if len(tmp.filter(**{filter: search_string})) > 0:
liste.append(tmp.filter(**{filter: search_string}))
except FieldError:
pass
else:
filter = f'{key}__name__icontains'
if len(tmp.filter(**{filter: search_string})) > 0:
liste.append(tmp.filter(**{filter: search_string}))
return liste
def remove_items_dict(dictionary, keys):
'''
Remove items from dictonary
:param dictionary:
:param keys:
:return:
'''
return {key: value for key, value in dictionary.items() if key not in keys and value not in keys}
def move_ids_from_remark_to_ids(text):
'''
extract ids from allready existing production_remark to new field ids
:param text:
:return: ids as string seperated by ;
'''
range_ids = re.findall(r'[0-9]*-[0-9]*', text)
tmp_string = '; '.join(range_ids)
tmp = re.sub(r'[0-9]*-[0-9]*', '', text)
id_list = list(filter(lambda x: len(x) > 4, filter(None, re.findall(r'[\d]*', tmp))))
new_string = '; '.join(id_list)
return f'{new_string}; {tmp_string}'
def filter_ids(obj, id):
'''
:param id:
:return:
'''
queryset = obj.objects.all().only('pk', 'ids')
for i in queryset:
if i.ids is not None:
if '-' in i.ids:
x = i.ids.split('; ')
x = list(filter(lambda x: '-' in x, x))
for ids in x:
if int(ids.split('-')[0]) > int(id) or int(id) < int(ids.split('-')[1]):
return i.pk
else:
if id in i.ids:
return i.pk
else:
return None
else:
if id in i.ids:
return i.pk
return None
| 32.482906
| 104
| 0.568215
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,766
| 0.232246
|
67d23e8a7d069e05acd374ed761b417602e522e5
| 287
|
py
|
Python
|
app/pydantic_models/phone.py
|
matiasbavera/fastapi-tortoise-fk-example
|
b61b202e20604a03bb36291fc534935048f17187
|
[
"Apache-2.0"
] | null | null | null |
app/pydantic_models/phone.py
|
matiasbavera/fastapi-tortoise-fk-example
|
b61b202e20604a03bb36291fc534935048f17187
|
[
"Apache-2.0"
] | null | null | null |
app/pydantic_models/phone.py
|
matiasbavera/fastapi-tortoise-fk-example
|
b61b202e20604a03bb36291fc534935048f17187
|
[
"Apache-2.0"
] | null | null | null |
from pydantic import BaseModel
from app.orm_models.phone import Phone
from tortoise.contrib.pydantic import pydantic_model_creator
Phone_Pydantic = pydantic_model_creator(Phone, name="Phone")
PhoneIn_Pydantic = pydantic_model_creator(
Phone, name="PhoneIn", exclude_readonly=True)
| 31.888889
| 60
| 0.832753
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 16
| 0.055749
|
67d27163450c56993ca54027a1f3ba12395df50b
| 6,403
|
py
|
Python
|
suls/mealymachine.py
|
TCatshoek/lstar
|
042b0ae3a0627db7a412c828f3752a9c30928ec1
|
[
"MIT"
] | 2
|
2019-10-15T11:28:12.000Z
|
2021-01-28T15:14:09.000Z
|
suls/mealymachine.py
|
TCatshoek/lstar
|
042b0ae3a0627db7a412c828f3752a9c30928ec1
|
[
"MIT"
] | null | null | null |
suls/mealymachine.py
|
TCatshoek/lstar
|
042b0ae3a0627db7a412c828f3752a9c30928ec1
|
[
"MIT"
] | null | null | null |
# Need this to fix types
from __future__ import annotations
import tempfile
import threading
from typing import Union, Iterable, Dict, Tuple
from suls.sul import SUL
from graphviz import Digraph
import random
from itertools import product
class MealyState:
def __init__(self, name: str, edges: Dict[str, Tuple[MealyState, str]] = None):
if edges is None:
edges = {}
self.name = name
self.edges = edges
def __str__(self):
return f'[MealyState: {self.name}, edges: {[f"{a}/{o}:{n.name}" for a, (n, o) in self.edges.items()]}]'
def add_edge(self, action: str, output: str, other_state: MealyState, override=False):
if override:
self.edges[action] = (other_state, output)
else:
if action not in self.edges.keys():
self.edges[action] = (other_state, output)
else:
raise Exception(f'{action} already defined in state {self.name}')
def next(self, action) -> Tuple[MealyState, str]:
if action in self.edges.keys():
return self.edges.get(action)
else:
raise Exception(f'Invalid action {action} from state {self.name}')
def next_state(self, action) -> MealyState:
if action in self.edges.keys():
return self.edges.get(action)[0]
else:
raise Exception(f'Invalid action {action} from state {self.name}')
# A statemachine can represent a system under learning
class MealyMachine(SUL):
def __init__(self, initial_state: MealyState):
self.initial_state = initial_state
self.state: MealyState = initial_state
def __str__(self):
states = self.get_states()
#Hacky backslash thing
tab = '\t'
nl = '\n'
return f'[MealyMachine: \n { nl.join([f"{tab}{str(state)}" for state in states]) } ' \
f'\n\n\t[Initial state: {self.initial_state.name}]' \
f'\n]'
# Performs a bfs to gather all reachable states
def get_states(self):
to_visit = [self.initial_state]
visited = []
while len(to_visit) > 0:
cur_state = to_visit.pop()
if cur_state not in visited:
visited.append(cur_state)
for action, (other_state, output) in cur_state.edges.items():
if other_state not in visited and other_state not in to_visit:
to_visit.append(other_state)
return visited
# Traverses all states and collects all possible actions (i.e. the alphabet of the language)
def get_alphabet(self):
states = self.get_states()
actions = set()
for state in states:
actions = actions.union(set(state.edges.keys()))
#print(actions)
return actions
# Runs the given inputs on the state machine
def process_input(self, inputs):
last_output = None
if not isinstance(inputs, Iterable):
inputs = [inputs]
for input in inputs:
try:
nextstate, output = self.state.next(input)
#print(f'({self.state.name}) ={input}=> ({nextstate.name})')
self.state = nextstate
last_output = output
except Exception as e:
#print(e)
return "invalid_input"
return last_output
def reset(self):
self.state = self.initial_state
def render_graph(self, filename=None, format='pdf', render_options=None):
def render(filename, render_options):
if filename is None:
filename = tempfile.mktemp('.gv')
if render_options is None:
render_options = {}
# Extract color options if present
node_color = {}
if 'node_attributes' in render_options:
for state, attributes in render_options['node_attributes'].items():
if 'color' in attributes:
node_color[state] = attributes['color']
g = Digraph('G', filename=filename)
g.attr(rankdir='LR')
# Collect nodes and edges
to_visit = [self.initial_state]
visited = []
# Hacky way to draw start arrow pointing to first node
g.attr('node', shape='none')
g.node('startz', label='', _attributes={'height': '0', 'width': '0'})
# Draw initial state
g.attr('node', shape='circle')
if self.initial_state in node_color:
g.node(self.initial_state.name, color=node_color[self.initial_state], style='filled')
else:
g.node(self.initial_state.name)
g.edge('startz', self.initial_state.name)
while len(to_visit) > 0:
cur_state = to_visit.pop()
visited.append(cur_state)
g.attr('node', shape='circle')
for action, (other_state, output) in cur_state.edges.items():
# Draw other states, but only once
if other_state not in visited and other_state not in to_visit:
to_visit.append(other_state)
if other_state in node_color:
g.node(other_state.name, color=node_color[other_state], style='filled')
else:
g.node(other_state.name)
# Draw edges too
ignore_self_edges = []
ignore_edges = []
if 'ignore_self_edges' in render_options:
ignore_self_edges = render_options['ignore_self_edges']
if 'ignore_edges' in render_options:
ignore_edges = render_options['ignore_edges']
if (not (any([output.startswith(x) for x in ignore_self_edges]) and other_state == cur_state)) \
and not(any([output.startswith(x) for x in ignore_edges])):
g.edge(cur_state.name, other_state.name, label=f'{action}/{output}')
if format != None:
g.render(format=format, view=True)
else:
g.save()
renderthread = threading.Thread(target=render, args=(filename, render_options))
renderthread.start()
| 34.240642
| 116
| 0.562236
| 6,099
| 0.952522
| 0
| 0
| 0
| 0
| 0
| 0
| 1,195
| 0.186631
|
67d2e3d4874353fb5ea93748eaef79e0a94659bb
| 636
|
py
|
Python
|
app/email.py
|
DXYyang/shenNeng_gasAnalysis
|
d94e2451d1938c090d1377dfbd487d0c6a649188
|
[
"MIT"
] | 1
|
2020-02-16T04:32:15.000Z
|
2020-02-16T04:32:15.000Z
|
app/email.py
|
DXYyang/shenNeng_gasAnalysis
|
d94e2451d1938c090d1377dfbd487d0c6a649188
|
[
"MIT"
] | null | null | null |
app/email.py
|
DXYyang/shenNeng_gasAnalysis
|
d94e2451d1938c090d1377dfbd487d0c6a649188
|
[
"MIT"
] | null | null | null |
from threading import Thread
from flask import current_app,render_template
from flask_mail import Message
from . import mail
def send_async_email(app,msg):
with app.app_context():
mail.send(msg)
def send_email(to,subject,template,**kwargs):
app=current_app._get_current_object()
msg=Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX']+' '+subject,
sender=app.config['FLASKY_MAIL_SENDER'],recipients=[to])
msg.body=render_template(template+'.txt',**kwargs)
msg.html=render_template(template+'.html',**kwargs)
thr=Thread(target=send_async_email,args=[app,msg])
thr.start()
return thr
| 35.333333
| 72
| 0.72956
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 64
| 0.100629
|
67d3514f1ace46de9127a9a4a21e892c7ad712e0
| 29,708
|
py
|
Python
|
MAIN_FIGURES.py
|
tortugar/Schott_etal_2022
|
5cccec4d59184397df39f0bae3544b9c8294ffe2
|
[
"MIT"
] | null | null | null |
MAIN_FIGURES.py
|
tortugar/Schott_etal_2022
|
5cccec4d59184397df39f0bae3544b9c8294ffe2
|
[
"MIT"
] | null | null | null |
MAIN_FIGURES.py
|
tortugar/Schott_etal_2022
|
5cccec4d59184397df39f0bae3544b9c8294ffe2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 10 18:30:46 2021
@author: fearthekraken
"""
import AS
import pwaves
import sleepy
import pandas as pd
#%%
### FIGURE 1C - example EEGs for NREM, IS, and REM ###
ppath = '/home/fearthekraken/Documents/Data/photometry'
AS.plot_example(ppath, 'hans_091118n1', ['EEG'], tstart=721.5, tend=728.5, eeg_nbin=4, ylims=[(-0.6, 0.6)]) # NREM EEG
AS.plot_example(ppath, 'hans_091118n1', ['EEG'], tstart=780.0, tend=787.0, eeg_nbin=4, ylims=[(-0.6, 0.6)]) # IS EEG
AS.plot_example(ppath, 'hans_091118n1', ['EEG'], tstart=818.5, tend=825.5, eeg_nbin=4, ylims=[(-0.6, 0.6)]) # REM EEG
#%%
### FIGURE 1E - example photometry recording ###
ppath = '/home/fearthekraken/Documents/Data/photometry'
AS.plot_example(ppath, 'hans_091118n1', tstart=170, tend=2900, PLOT=['EEG', 'SP', 'EMG_AMP', 'HYPNO', 'DFF'], dff_nbin=1800,
eeg_nbin=130, fmax=25, vm=[50,1800], highres=False, pnorm=0, psmooth=[2,5], flatten_tnrem=4, ma_thr=0)
#%%
### FIGURE 1F - average DF/F signal in each brain state ###
ppath = '/home/fearthekraken/Documents/Data/photometry'
recordings = sleepy.load_recordings(ppath, 'crh_photometry.txt')[1]
df = AS.dff_activity(ppath, recordings, istate=[1,2,3,4], ma_thr=20, flatten_tnrem=4, ma_state=3)
#%%
### FIGURE 1G - example EEG theta burst & DF/F signal ###
ppath = '/home/fearthekraken/Documents/Data/photometry'
AS.plot_example(ppath, 'hans_091118n1', tstart=2415, tend=2444, PLOT=['SP', 'DFF'], dff_nbin=450, fmax=20,
vm=[0,5], highres=True, recalc_highres=False, nsr_seg=2.5, perc_overlap=0.8, pnorm=1, psmooth=[4,4])
#%%
### FIGURE 1H - average spectral field during REM ###
ppath = '/home/fearthekraken/Documents/Data/photometry'
recordings = sleepy.load_recordings(ppath, 'crh_photometry.txt')[1]
pwaves.spectralfield_highres_mice(ppath, recordings, pre=4, post=4, istate=[1], theta=[1,10,100,1000,10000], pnorm=1,
psmooth=[6,1], fmax=25, nsr_seg=2, perc_overlap=0.8, recalc_highres=True)
#%%
### FIGURE 2B - recorded P-waveforms ###
ppath ='/media/fearthekraken/Mandy_HardDrive1/nrem_transitions'
# left - example LFP trace with P-waves
AS.plot_example(ppath, 'Fincher_040221n1', tstart=16112, tend=16119, PLOT=['LFP'], lfp_nbin=7, ylims=[(-0.4, 0.2)])
# right - average P-waveform
recordings = sleepy.load_recordings(ppath, 'pwaves_mice.txt')[0]
pwaves.avg_waveform(ppath, recordings, istate=[], win=[0.15,0.15], mode='pwaves', plaser=False, p_iso=0, pcluster=0, clus_event='waves')
#%%
### FIGURE 2C - average P-wave frequency in each brain state ###
ppath ='/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
recordings = sleepy.load_recordings(ppath, 'pwaves_mice.txt')[0]
istate = [1,2,3,4]; p_iso=0; pcluster=0
_,_,_,_ = pwaves.state_freq(ppath, recordings, istate, plotMode='03', ma_thr=20, flatten_tnrem=4, ma_state=3,
p_iso=p_iso, pcluster=pcluster, ylim2=[-0.3, 0.1])
#%%
### FIGURE 2D - time-normalized P-wave frequency across brain state transitions ###
ppath ='/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
recordings = sleepy.load_recordings(ppath, 'pwaves_mice.txt')[0]
sequence=[3,4,1,2]; state_thres=[(0,10000)]*len(sequence); nstates=[20,20,20,20]; vm=[0.2, 2.1] # NREM --> IS --> REM --> WAKE
_, mx_pwave, _ = pwaves.stateseq(ppath, recordings, sequence=sequence, nstates=nstates, state_thres=state_thres, ma_thr=20, ma_state=3,
flatten_tnrem=4, fmax=25, pnorm=1, vm=vm, psmooth=[2,2], mode='pwaves', mouse_avg='mouse', print_stats=False)
#%%
### FIGURE 2E - example theta burst & P-waves ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/dreadds_processed/'
AS.plot_example(ppath, 'Scrabble_072420n1', tstart=11318.6, tend=11323, PLOT=['SP','EEG','LFP'], eeg_nbin=1, lfp_nbin=6, fmax=20,
vm=[0,4.5], highres=True, recalc_highres=False, nsr_seg=1, perc_overlap=0.85, pnorm=1, psmooth=[4,5])
#%%
### FIGURE 2F - averaged spectral power surrounding P-waves ###
ppath ='/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
recordings = sleepy.load_recordings(ppath, 'pwaves_mice.txt')[0]
filename = 'sp_win3'
# top - averaged spectrogram
pwaves.avg_SP(ppath, recordings, istate=[1], win=[-3,3], mouse_avg='mouse', plaser=False, pnorm=2, psmooth=[2,2], fmax=25,
vm=[0.8,1.5], pload=filename, psave=filename)
# bottom - averaged high theta power
_ = pwaves.avg_band_power(ppath, recordings, istate=[1], bands=[(8,15)], band_colors=['green'], win=[-3,3], mouse_avg='mouse',
plaser=False, pnorm=2, psmooth=0, ylim=[0.6,1.8], pload=filename, psave=filename)
#%%
### FIGURE 2H - example DF/F signal and P-waves ###
ppath = '/home/fearthekraken/Documents/Data/photometry'
AS.plot_example(ppath, 'Fritz_032819n1', tstart=2991, tend=2996.75, PLOT=['DFF','LFP_THRES_ANNOT'], dff_nbin=50, lfp_nbin=10)
#%%
### FIGURE 2I - DF/F signal surrounding P-waves ###
ppath ='/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
# top - diagrams of P-waveforms
recordings = sleepy.load_recordings(ppath, 'pwaves_mice.txt')[0]
p_iso=0.8; pcluster=0; clus_event='waves' # single P-waves
#p_iso=0; pcluster=0.1; clus_event='cluster start' # clustered P-waves
pwaves.avg_waveform(ppath, recordings, istate=[], win=[1,1], mode='pwaves', plaser=False, p_iso=p_iso,
pcluster=pcluster, clus_event=clus_event, wform_std=False)
# middle/bottom - heatmaps & average DF/F plots
ppath = '/home/fearthekraken/Documents/Data/photometry'
recordings = sleepy.load_recordings(ppath, 'pwaves_photometry.txt')[1]
# single P-waves
pzscore=[2,2,2]; p_iso=0.8; pcluster=0; ylim=[-0.4,1.0]; vm=[-1,1.5]
iso_mx = pwaves.dff_timecourse(ppath, recordings, istate=0, plotMode='ht', dff_win=[10,10], pzscore=pzscore, mouse_avg='mouse',
base_int=2.5, baseline_start=0, p_iso=p_iso, pcluster=pcluster, clus_event='waves', ylim=ylim, vm=vm,
psmooth=(8,15), ds=1000, sf=1000)[0]
# clustered P-waves
pzscore=[2,2,2]; p_iso=0; pcluster=0.5; ylim=[-0.4,1.0]; vm=[-1,1.5]
clus_mx = pwaves.dff_timecourse(ppath, recordings, istate=0, plotMode='ht', dff_win=[10,10], pzscore=pzscore, mouse_avg='mouse',
base_int=2.5, baseline_start=0, p_iso=p_iso, pcluster=pcluster, clus_event='waves', ylim=ylim, vm=vm,
psmooth=(4,15), ds=1000, sf=1000)[0]
# random points
pzscore=[2,2,2]; p_iso=0.8; pcluster=0; ylim=[-0.4,1.0]; vm=[-1,1.5]
jter_mx = pwaves.dff_timecourse(ppath, recordings, istate=0, plotMode='ht', dff_win=[10,10], pzscore=pzscore, mouse_avg='mouse',
base_int=2.5, baseline_start=0, p_iso=p_iso, pcluster=pcluster, clus_event='waves', ylim=ylim, vm=vm,
psmooth=(8,15), ds=1000, sf=1000, jitter=10)[0]
#%%
### FIGURE 3B - example open loop opto recording ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed'
AS.plot_example(ppath, 'Huey_082719n1', tstart=12300, tend=14000, PLOT=['LSR', 'SP', 'HYPNO'], fmax=25, vm=[50,1800], highres=False,
pnorm=0, psmooth=[2,2], flatten_tnrem=4, ma_thr=10)
#%%
### FIGURE 3C,D - percent time spent in each brain state surrounding laser ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed/'
recordings = sleepy.load_recordings(ppath, 'crh_chr2_ol.txt')[1]
BS, t, df = AS.laser_brainstate(ppath, recordings, pre=400, post=520, flatten_tnrem=4, ma_state=3, ma_thr=20, edge=10, sf=0, ci='sem', ylim=[0,80])
#%%
### FIGURE 3E - averaged SPs and frequency band power surrounding laser ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed/'
recordings = sleepy.load_recordings(ppath, 'crh_chr2_ol.txt')[1]
bands=[(0.5,4), (6,10), (11,15), (55,99)]; band_labels=['delta', 'theta', 'sigma', 'gamma']; band_colors=['firebrick', 'limegreen', 'cyan', 'purple']
AS.laser_triggered_eeg_avg(ppath, recordings, pre=400, post=520, fmax=100, laser_dur=120, pnorm=1, psmooth=3, harmcs=10, iplt_level=2,
vm=[0.6,1.4], sf=7, bands=bands, band_labels=band_labels, band_colors=band_colors, ci=95, ylim=[0.6,1.3])
#%%
### FIGURE 3G - example closed loop opto recording ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed/'
AS.plot_example(ppath, 'Cinderella_022420n1', tstart=7100, tend=10100, PLOT=['LSR', 'SP', 'HYPNO'], fmax=25, vm=[0,1500],
highres=False, pnorm=0, psmooth=[2,3], flatten_tnrem=4, ma_thr=0)
#%%
### FIGURE 3H - closed-loop ChR2 graph ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed/'
recordings = sleepy.load_recordings(ppath, 'crh_chr2_cl.txt')[1]
_ = AS.state_online_analysis(ppath, recordings, istate=1, plotMode='03', ylim=[0,130])
#%%
### FIGURE 3I - eYFP controls for ChR2 ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed/'
recordings = sleepy.load_recordings(ppath, 'crh_yfp_chr2_cl.txt')[1]
_ = AS.state_online_analysis(ppath, recordings, istate=1, plotMode='03', ylim=[0,130])
#%%
### FIGURE 3J - closed-loop iC++ graph ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed/'
recordings = sleepy.load_recordings(ppath, 'crh_ic_cl.txt')[1]
_ = AS.state_online_analysis(ppath, recordings, istate=1, plotMode='03', ylim=[0,130])
#%%
### FIGURE 3K - eYFP controls for iC++ ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed/'
recordings = sleepy.load_recordings(ppath, 'crh_yfp_ic_cl.txt')[1]
_ = AS.state_online_analysis(ppath, recordings, istate=1, plotMode='03', ylim=[0,130])
#%%
### FIGURE 4B - example spontaneous & laser-triggered P-wave ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed'
recordings = sleepy.load_recordings(ppath, 'lsr_pwaves.txt')[1]
AS.plot_example(ppath, 'Huey_101719n1', tstart=5925, tend=5930, PLOT=['LSR', 'EEG', 'LFP'], eeg_nbin=5, lfp_nbin=10)
#%%
### FIGURE 4C,D,E - waveforms & spectral power surrounding P-waves/laser ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed'
recordings = sleepy.load_recordings(ppath, 'lsr_pwaves.txt')[1]
# top - averaged waveforms surrounding P-waves & laser
filename = 'wf_win025'; wform_win = [0.25,0.25]; istate=[1]
pwaves.avg_waveform(ppath, recordings, istate, mode='pwaves', win=wform_win, mouse_avg='trials', # spontaneous & laser-triggered P-waves
plaser=True, post_stim=0.1, pload=filename, psave=filename, ylim=[-0.3,0.1])
pwaves.avg_waveform(ppath, recordings, istate, mode='lsr', win=wform_win, mouse_avg='trials', # successful & failed laser
plaser=True, post_stim=0.1, pload=filename, psave=filename, ylim=[-0.3,0.1])
# middle - averaged SPs surrounding P-waves & laser
filename = 'sp_win3'; win=[-3,3]; pnorm=2
pwaves.avg_SP(ppath, recordings, istate=[1], mode='pwaves', win=win, plaser=True, post_stim=0.1, # spontaneous & laser-triggered P-waves
mouse_avg='mouse', pnorm=pnorm, psmooth=[(8,8),(8,8)], vm=[(0.82,1.32),(0.8,1.45)],
fmax=25, recalc_highres=False, pload=filename, psave=filename)
pwaves.avg_SP(ppath, recordings, istate=[1], mode='lsr', win=win, plaser=True, post_stim=0.1, # successful & failed laser
mouse_avg='mouse', pnorm=pnorm, psmooth=[(8,8),(8,8)], vm=[(0.82,1.32),(0.6,1.8)],
fmax=25, recalc_highres=False, pload=filename, psave=filename)
# bottom - average high theta power surrounding P-waves & laser
_ = pwaves.avg_band_power(ppath, recordings, istate=[1], mode='pwaves', win=win, plaser=True, # spontaneous & laser-triggered P-waves
post_stim=0.1, mouse_avg='mouse', bands=[(8,15)], band_colors=[('green')],
pnorm=pnorm, psmooth=0, fmax=25, pload=filename, psave=filename, ylim=[0.5,1.5])
# successful and failed laser
_ = pwaves.avg_band_power(ppath, recordings, istate=[1], mode='lsr', win=win, plaser=True, # successful & failed laser
post_stim=0.1, mouse_avg='mouse', bands=[(8,15)], band_colors=[('green')],
pnorm=pnorm, psmooth=0, fmax=25, pload=filename, psave=filename, ylim=[0.5,1.5])
#%%
### FIGURE 4F - spectral profiles: null vs spon vs success lsr vs fail lsr ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed'
recordings = sleepy.load_recordings(ppath, 'lsr_pwaves.txt')[1]
filename = 'sp_win3'
spon_win=[-0.5, 0.5]; lsr_win=[0,1]; collect_win=[-3,3]; frange=[0, 20]; pnorm=2; null=True; null_win=0; null_match='lsr'
df = pwaves.sp_profiles(ppath, recordings, spon_win=spon_win, lsr_win=lsr_win, collect_win=collect_win, frange=frange,
null=null, null_win=null_win, null_match=null_match, plaser=True, post_stim=0.1, pnorm=pnorm,
psmooth=12, mouse_avg='mouse', ci='sem', pload=filename, psave=filename)
#%%
### FIGURE 4G - probability of laser success per brainstate ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed'
recordings = sleepy.load_recordings(ppath, 'lsr_pwaves.txt')[1]
filename = 'lsr_stats'
df = pwaves.get_lsr_stats(ppath, recordings, istate=[1,2,3,4], lsr_jitter=5, post_stim=0.1,
flatten_tnrem=4, ma_thr=20, ma_state=3, psave=filename)
_ = pwaves.lsr_state_success(df, istate=[1,2,3,4]) # true laser success
_ = pwaves.lsr_state_success(df, istate=[1], jstate=[1]) # true vs sham laser success
#%%
### FIGURE 4H - latencies of elicited P-waves to laser ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed'
recordings = sleepy.load_recordings(ppath, 'lsr_pwaves.txt')[1]
df = pd.read_pickle('lsr_stats.pkl')
pwaves.lsr_pwave_latency(df, istate=1, jitter=True)
#%%
### FIGURE 4I - phase preferences of spontaneous & laser-triggered P-waves ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed'
recordings = sleepy.load_recordings(ppath, 'lsr_pwaves.txt')[1]
filename = 'lsr_phases'
pwaves.lsr_hilbert(ppath, recordings, istate=1, bp_filt=[6,12], min_state_dur=30, stat='perc', mode='pwaves',
mouse_avg='trials', bins=9, pload=filename, psave=filename)
#%%
### FIGURE 5B,C - example recordings of hm3dq + saline vs cno ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
AS.plot_example(ppath, 'Dahl_030321n1', tstart=3960, tend=5210, PLOT=['EEG', 'SP', 'HYPNO', 'EMG_AMP'], eeg_nbin=100, # saline
fmax=25, vm=[15,2200], psmooth=(1,2), flatten_tnrem=4, ma_thr=0, ylims=[[-0.6,0.6],'','',[0,300]])
AS.plot_example(ppath, 'Dahl_031021n1', tstart=3620, tend=4870, PLOT=['EEG', 'SP', 'HYPNO', 'EMG_AMP'], eeg_nbin=100, # CNO
fmax=25, vm=[15,2200], psmooth=(1,2), flatten_tnrem=4, ma_thr=0, ylims=[[-0.6,0.6],'','',[0,300]])
#%%
### FIGURE 5D - hm3dq percent time spent in REM ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm3dq_tnrem.txt', dose=True, pwave_channel=False); e=e['0.25']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[1], tbin=18000, n=1, stats='perc', flatten_tnrem=4, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[1], tbin=18000, n=1, stats='perc', flatten_tnrem=4, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='perc', plotMode='03',
group_colors=['gray', 'blue'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','0.25'])
pwaves.pairT_from_df(df.iloc[np.where(df['state']==1)[0],:], 'dose', '0', '0.25', ['t0'], print_notice='### STATE = 1 ###')
#%%
### FIGURE 5E - hm3dq mean REM duration ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm3dq_tnrem.txt', dose=True, pwave_channel=False); e=e['0.25']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[1], tbin=18000, n=1, stats='dur', flatten_tnrem=4, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[1], tbin=18000, n=1, stats='dur', flatten_tnrem=4, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='dur', plotMode='03',
group_colors=['gray', 'blue'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','0.25'])
pwaves.pairT_from_df(df.iloc[np.where(df['state']==1)[0],:], 'dose', '0', '0.25', ['t0'], print_notice='### STATE = 1 ###')
#%%
### FIGURE 5F - hm3dq mean REM frequency ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm3dq_tnrem.txt', dose=True, pwave_channel=False); e=e['0.25']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[1], tbin=18000, n=1, stats='freq', flatten_tnrem=4, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[1], tbin=18000, n=1, stats='freq', flatten_tnrem=4, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='freq', plotMode='03',
group_colors=['gray', 'blue'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','0.25'])
pwaves.pairT_from_df(df.iloc[np.where(df['state']==1)[0],:], 'dose', '0', '0.25', ['t0'], print_notice='### STATE = 1 ###')
#%%
### FIGURE 5G - hm3dq percent time spent in Wake/NREM/IS ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm3dq_tnrem.txt', dose=True, pwave_channel=False); e=e['0.25']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[2,3,4], tbin=18000, n=1, stats='perc', flatten_tnrem=4, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[2,3,4], tbin=18000, n=1, stats='perc', flatten_tnrem=4, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='perc', plotMode='03',
group_colors=['gray', 'blue'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','0.25'])
for s in [2,3,4]:
pwaves.pairT_from_df(df.iloc[np.where(df['state']==s)[0],:], 'dose', '0', '0.25', ['t0'], print_notice='### STATE = ' + str(s) + ' ###')
#%%
### FIGURE 5H - hm3dq probability of IS-->REM transition ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm3dq_tnrem.txt', dose=True, pwave_channel=False); e=e['0.25']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[1], tbin=18000, n=1, stats='transition probability', flatten_tnrem=False, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[1], tbin=18000, n=1, stats='transition probability', flatten_tnrem=False, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='transition probability', plotMode='03',
group_colors=['gray', 'blue'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','0.25'])
pwaves.pairT_from_df(df, 'dose', '0', '0.25', ['t0'], print_notice='### STATE = 1 ###')
#%%
### FIGURE 5I - example P-waves during NREM-->IS-->REM transitions ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
AS.plot_example(ppath, 'King_071020n1', ['HYPNO', 'EEG', 'LFP'], tstart=16097, tend=16172, ylims=['',(-0.6, 0.6), (-0.3, 0.15)]) # saline
AS.plot_example(ppath, 'King_071520n1', ['HYPNO', 'EEG', 'LFP'], tstart=5600, tend=5675, ylims=['',(-0.6, 0.6), (-0.3, 0.15)]) # CNO
#%%
### FIGURE 5J - hm3dq time-normalized P-wave frequency across brain state transitions ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm3dq_tnrem.txt', dose=True, pwave_channel=True); e=e['0.25']
c = [i[0] for i in c if i[1] != 'X']; e = [i[0] for i in e if i[1] != 'X']
sequence=[3,4,1,2]; state_thres=[(0,10000)]*len(sequence); nstates=[20,20,20,20]; cvm=[0.3,2.5]; evm= [0.28,2.2] # NREM --> IS --> REM --> WAKE
mice,cmx,cspe = pwaves.stateseq(ppath, c, sequence=sequence, nstates=nstates, state_thres=state_thres, fmax=25, pnorm=1, # saline
vm=cvm, psmooth=[2,2], mode='pwaves', mouse_avg='mouse', pplot=False, print_stats=False)
mice,emx,espe = pwaves.stateseq(ppath, e, sequence=sequence, nstates=nstates, state_thres=state_thres, fmax=25, pnorm=1, # CNO
vm=evm, psmooth=[2,2], mode='pwaves', mouse_avg='mouse', pplot=False, print_stats=False)
# plot timecourses
pwaves.plot_activity_transitions([cmx, emx], [mice, mice], plot_id=['gray', 'blue'], group_labels=['saline', 'cno'],
xlim=nstates, xlabel='Time (normalized)', ylabel='P-waves/s', title='NREM-->tNREM-->REM-->Wake')
#%%
### FIGURE 5K - hm3dq average P-wave frequency in each brain state ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm3dq_tnrem.txt', dose=True, pwave_channel=True); e=e['0.25']
c = [i[0] for i in c if i[1] != 'X']; e = [i[0] for i in e if i[1] != 'X']
# top - mean P-wave frequency
mice, x, cf, cw = pwaves.state_freq(ppath, c, istate=[1,2,3,4], flatten_tnrem=4, pplot=False, print_stats=False) # saline
mice, x, ef, ew = pwaves.state_freq(ppath, e, istate=[1,2,3,4], flatten_tnrem=4, pplot=False, print_stats=False) # CNO
pwaves.plot_state_freq(x, [mice, mice], [cf, ef], [cw, ew], group_colors=['gray', 'blue'], group_labels=['saline','cno'])
# bottom - change in P-wave frequency from saline to CNO
fdif = (ef-cf)
df = pd.DataFrame(columns=['Mouse','State','Change'])
for i,state in enumerate(x):
df = df.append(pd.DataFrame({'Mouse':mice, 'State':[state]*len(mice), 'Change':fdif[:,i]}))
plt.figure(); sns.barplot(x='State', y='Change', data=df, order=['NREM', 'tNREM', 'REM', 'Wake'], color='lightblue', ci=68)
sns.swarmplot(x='State', y='Change', data=df, order=['NREM', 'tNREM', 'REM', 'Wake'], color='black', size=9); plt.show()
# stats
for i,s in enumerate([1,2,3,4]):
p = stats.ttest_rel(cf[:,i], ef[:,i], nan_policy='omit')
print(f'saline vs cno, state={s} -- T={round(p.statistic,3)}, p-value={round(p.pvalue,5)}')
#%%
### FIGURE 5L - hm4di percent time spent in REM ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm4di_tnrem.txt', dose=True, pwave_channel=False); e=e['5']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[1], tbin=18000, n=1, stats='perc', flatten_tnrem=4, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[1], tbin=18000, n=1, stats='perc', flatten_tnrem=4, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='perc', plotMode='03',
group_colors=['gray', 'red'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','5'])
pwaves.pairT_from_df(df.iloc[np.where(df['state']==1)[0],:], 'dose', '0', '5', ['t0'], print_notice='### STATE = 1 ###')
#%%
### FIGURE 5M - hm4di mean REM duration ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm4di_tnrem.txt', dose=True, pwave_channel=False); e=e['5']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[1], tbin=18000, n=1, stats='dur', flatten_tnrem=4, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[1], tbin=18000, n=1, stats='dur', flatten_tnrem=4, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='dur', plotMode='03',
group_colors=['gray', 'red'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','5'])
pwaves.pairT_from_df(df.iloc[np.where(df['state']==1)[0],:], 'dose', '0', '5', ['t0'], print_notice='### STATE = 1 ###')
#%%
### FIGURE 5N - hm4di mean REM frequency ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm4di_tnrem.txt', dose=True, pwave_channel=False); e=e['5']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[1], tbin=18000, n=1, stats='freq', flatten_tnrem=4, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[1], tbin=18000, n=1, stats='freq', flatten_tnrem=4, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='freq', plotMode='03',
group_colors=['gray', 'red'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','5'])
pwaves.pairT_from_df(df.iloc[np.where(df['state']==1)[0],:], 'dose', '0', '5', ['t0'], print_notice='### STATE = 1 ###')
#%%
### FIGURE 5O - hm4di percent time spent in Wake/NREM/IS ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm4di_tnrem.txt', dose=True, pwave_channel=False); e=e['5']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[2,3,4], tbin=18000, n=1, stats='perc', flatten_tnrem=4, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[2,3,4], tbin=18000, n=1, stats='perc', flatten_tnrem=4, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='perc', plotMode='03',
group_colors=['gray', 'red'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','5'])
for s in [2,3,4]:
pwaves.pairT_from_df(df.iloc[np.where(df['state']==s)[0],:], 'dose', '0', '5', ['t0'], print_notice='### STATE = ' + str(s) + ' ###')
#%%
### FIGURE 5P - hm4di probability of IS-->REM transition ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm4di_tnrem.txt', dose=True, pwave_channel=False); e=e['5']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[1], tbin=18000, n=1, stats='transition probability', flatten_tnrem=False, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[1], tbin=18000, n=1, stats='transition probability', flatten_tnrem=False, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='transition probability', plotMode='03',
group_colors=['gray', 'red'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','5'])
pwaves.pairT_from_df(df, 'dose', '0', '5', ['t0'], print_notice='### STATE = 1 ###')
#%%
### FIGURE 5Q - hm4di time-normalized P-wave frequency across brain state transitions ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm4di_tnrem.txt', dose=True, pwave_channel=True); e=e['5']
c = [i[0] for i in c if i[1] != 'X']; e = [i[0] for i in e if i[1] != 'X']
sequence=[3,4,1,2]; state_thres=[(0,10000)]*len(sequence); nstates=[20,20,20,20]; cvm=[0.3,2.5]; evm= [0.28,2.2] # NREM --> IS --> REM --> WAKE
mice,cmx,cspe = pwaves.stateseq(ppath, c, sequence=sequence, nstates=nstates, state_thres=state_thres, fmax=25, pnorm=1, # saline
vm=cvm, psmooth=[2,2], mode='pwaves', mouse_avg='mouse', pplot=False, print_stats=False)
mice,emx,espe = pwaves.stateseq(ppath, e, sequence=sequence, nstates=nstates, state_thres=state_thres, fmax=25, pnorm=1, # CNO
vm=evm, psmooth=[2,2], mode='pwaves', mouse_avg='mouse', pplot=False, print_stats=False)
# plot timecourses
pwaves.plot_activity_transitions([cmx, emx], [mice, mice], plot_id=['gray', 'red'], group_labels=['saline', 'cno'],
xlim=nstates, xlabel='Time (normalized)', ylabel='P-waves/s', title='NREM-->tNREM-->REM-->Wake')
#%%
### FIGURE 5R - hm4di average P-wave frequency in each brain state ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm4di_tnrem.txt', dose=True, pwave_channel=True); e=e['5']
c = [i[0] for i in c if i[1] != 'X']; e = [i[0] for i in e if i[1] != 'X']
# top - mean P-wave frequency
mice, x, cf, cw = pwaves.state_freq(ppath, c, istate=[1,2,3,4], flatten_tnrem=4, pplot=False, print_stats=False) # saline
mice, x, ef, ew = pwaves.state_freq(ppath, e, istate=[1,2,3,4], flatten_tnrem=4, pplot=False, print_stats=False) # CNO
pwaves.plot_state_freq(x, [mice, mice], [cf, ef], [cw, ew], group_colors=['gray', 'red'], group_labels=['saline','cno'])
# bottom - change in P-wave frequency from saline to CNO
fdif = (ef-cf)
df = pd.DataFrame(columns=['Mouse','State','Change'])
for i,state in enumerate(x):
df = df.append(pd.DataFrame({'Mouse':mice, 'State':[state]*len(mice), 'Change':fdif[:,i]}))
plt.figure(); sns.barplot(x='State', y='Change', data=df, order=['NREM', 'tNREM', 'REM', 'Wake'], color='salmon', ci=68)
sns.swarmplot(x='State', y='Change', data=df, order=['NREM', 'tNREM', 'REM', 'Wake'], color='black', size=9); plt.show()
# stats
for i,s in enumerate([1,2,3,4]):
p = stats.ttest_rel(cf[:,i], ef[:,i], nan_policy='omit')
print(f'saline vs cno, state={s} -- T={round(p.statistic,3)}, p-value={round(p.pvalue,5)}')
| 60.752556
| 150
| 0.660832
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10,127
| 0.340885
|
67d3ce8adb8ddc67219cf049efed17f327e1aab1
| 42
|
py
|
Python
|
bitmovin/services/filters/__init__.py
|
camberbridge/bitmovin-python
|
3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95
|
[
"Unlicense"
] | 44
|
2016-12-12T17:37:23.000Z
|
2021-03-03T09:48:48.000Z
|
bitmovin/services/filters/__init__.py
|
camberbridge/bitmovin-python
|
3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95
|
[
"Unlicense"
] | 38
|
2017-01-09T14:45:45.000Z
|
2022-02-27T18:04:33.000Z
|
bitmovin/services/filters/__init__.py
|
camberbridge/bitmovin-python
|
3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95
|
[
"Unlicense"
] | 27
|
2017-02-02T22:49:31.000Z
|
2019-11-21T07:04:57.000Z
|
from .filter_service import FilterService
| 21
| 41
| 0.880952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
67d3edf3fcff0ea5f8066746c234cf386931fcea
| 4,177
|
py
|
Python
|
inspect_population.py
|
puzis/OverflowPrediction
|
01341df701e513025cb427d4cdf1db0868a5963b
|
[
"MIT"
] | 5
|
2019-11-19T11:53:23.000Z
|
2022-03-11T05:54:46.000Z
|
inspect_population.py
|
puzis/OverflowPrediction
|
01341df701e513025cb427d4cdf1db0868a5963b
|
[
"MIT"
] | 5
|
2020-05-29T23:53:14.000Z
|
2022-03-12T00:05:11.000Z
|
inspect_population.py
|
erap129/EEGNAS
|
1d9c94b106d40317146f7f09d79fad489f1059dc
|
[
"MIT"
] | 1
|
2021-12-17T14:25:04.000Z
|
2021-12-17T14:25:04.000Z
|
import pickle
from copy import deepcopy
from graphviz import Digraph
from torch.nn import Conv2d, MaxPool2d, ELU, Dropout, BatchNorm2d
import pandas as pd
from EEGNAS.model_generation.abstract_layers import IdentityLayer, ConvLayer, PoolingLayer, ActivationLayer
from EEGNAS.model_generation.custom_modules import IdentityModule
SHORT_NAMES = {Conv2d: 'C',
MaxPool2d: 'M',
ELU: 'E',
Dropout: 'D',
BatchNorm2d: 'B'}
def get_layer_stats(layer, delimiter):
if type(layer) == ELU or type(layer) == BatchNorm2d or type(layer) == Dropout:
return ''
elif type(layer) == Conv2d:
return f'{delimiter}f:{layer.out_channels},k:{layer.kernel_size[0]}'
elif type(layer) == MaxPool2d:
return f'{delimiter}k:{layer.kernel_size[0]},s:{layer.stride[0]}'
else:
return ''
def export_eegnas_table(models, filename):
model_series = []
for model_idx, model in enumerate(models):
layer_list = []
module_list = list(model._modules.values())[:-1]
module_list = [l for l in module_list if type(l) != IdentityModule]
for layer_idx, layer in enumerate(module_list):
layer_str = f'{SHORT_NAMES[type(layer)]}'
layer_str += get_layer_stats(layer, ' ')
layer_list.append(layer_str)
layer_series = pd.Series(layer_list)
layer_series.name = f'Model {model_idx}'
model_series.append(pd.Series(layer_list))
df = pd.DataFrame(model_series).transpose()
df.columns = [f'Model {i+1}' for i in range(len(models))]
df.to_csv(filename)
def plot_eegnas_model(model, f, subgraph_idx, nodes):
nodes = deepcopy(nodes)
multiplier = 1
module_list = list(model._modules.values())[:-1]
module_list = [l for l in module_list if type(l) != IdentityModule]
for layer_idx, layer in enumerate(module_list):
if type(layer) == BatchNorm2d or type(layer) == Dropout or type(layer) == ELU:
if layer_idx < len(module_list) - 1 and type(module_list[layer_idx + 1]) == type(layer):
multiplier += 1
continue
layer_str = f'{SHORT_NAMES[type(layer)]}'
layer_str += get_layer_stats(layer, ',')
layer_str = f'<<B>{layer_str}</B>>'
if multiplier > 1:
f.node(f'{subgraph_idx}_{layer_idx}', label=layer_str, xlabel=f'<<B>X {multiplier}</B>>')
else:
f.node(f'{subgraph_idx}_{layer_idx}', label=layer_str)
nodes.append(f'{subgraph_idx}_{layer_idx}')
if type(layer) == BatchNorm2d or type(layer) == Dropout or type(layer) == ELU:
if layer_idx < len(module_list) - 1 and type(module_list[layer_idx + 1]) != type(layer):
multiplier = 1
nodes.append('output')
for idx in range(len(nodes) - 1):
f.edge(nodes[idx], nodes[idx+1])
def create_ensemble_digraph(weighted_population, n_members):
f = Digraph('EEGNAS model', filename='EEGNAS_model.gv', graph_attr={'dpi':'300'}, format='png')
f.attr('node', shape='box')
f.node(f'input', label='<<B>Input: (Bsize, 240, 22)</B>>')
f.node(f'output', label='<<B>Output: (Bsize, 5, 22)</B>>')
nodes = ['input']
for i in range(n_members):
plot_eegnas_model(weighted_population[i]['finalized_model'], f, i, nodes)
f.render('test_eegnas_graphviz', view=False)
sum_path = "/home/user/Documents/eladr/netflowinsights/CDN_overflow_prediction/eegnas_models/195_10_input_height_240_normalized_handovers_all_inheritance_fold9_architectures_iteration_1.p"
per_path = '/home/user/Documents/eladr/netflowinsights/CDN_overflow_prediction/eegnas_models/197_10_input_height_240_normalized_per_handover_handovers_all_inheritance_fold9_architectures_iteration_1.p'
weighted_population_per = pickle.load(open(per_path, 'rb'))
weighted_population_sum = pickle.load(open(sum_path, 'rb'))
# export_eegnas_table([weighted_population_per[i]['finalized_model'] for i in range(5)], 'per_architectures.csv')
# export_eegnas_table([weighted_population_sum[i]['finalized_model'] for i in range(5)], 'sum_architectures.csv')
create_ensemble_digraph(weighted_population_per, 5)
| 44.913978
| 201
| 0.677041
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,170
| 0.280105
|
67d91682b7361980dedb029fa4ec3aa3743a4f6d
| 3,910
|
py
|
Python
|
implementations/rest/bin/authhandlers.py
|
djsincla/SplunkModularInputsPythonFramework
|
1dd215214f3d2644cb358e41f4105fe40cff5393
|
[
"Apache-2.0"
] | 3
|
2020-08-31T00:59:26.000Z
|
2021-10-19T22:01:00.000Z
|
implementations/rest/bin/authhandlers.py
|
djsincla/SplunkModularInputsPythonFramework
|
1dd215214f3d2644cb358e41f4105fe40cff5393
|
[
"Apache-2.0"
] | null | null | null |
implementations/rest/bin/authhandlers.py
|
djsincla/SplunkModularInputsPythonFramework
|
1dd215214f3d2644cb358e41f4105fe40cff5393
|
[
"Apache-2.0"
] | null | null | null |
from requests.auth import AuthBase
import hmac
import base64
import hashlib
import urlparse
import urllib
#add your custom auth handler class to this module
class MyEncryptedCredentialsAuthHAndler(AuthBase):
def __init__(self,**args):
# setup any auth-related data here
#self.username = args['username']
#self.password = args['password']
pass
def __call__(self, r):
# modify and return the request
#r.headers['foouser'] = self.username
#r.headers['foopass'] = self.password
return r
#template
class MyCustomAuth(AuthBase):
def __init__(self,**args):
# setup any auth-related data here
#self.username = args['username']
#self.password = args['password']
pass
def __call__(self, r):
# modify and return the request
#r.headers['foouser'] = self.username
#r.headers['foopass'] = self.password
return r
class MyCustomOpsViewAuth(AuthBase):
def __init__(self,**args):
self.username = args['username']
self.password = args['password']
self.url = args['url']
pass
def __call__(self, r):
#issue a PUT request (not a get) to the url from self.url
payload = {'username': self.username,'password':self.password}
auth_response = requests.put(self.url,params=payload,verify=false)
#get the auth token from the auth_response.
#I have no idea where this is in your response,look in your documentation ??
tokenstring = "mytoken"
headers = {'X-Opsview-Username': self.username,'X-Opsview-Token':tokenstring}
r.headers = headers
return r
class MyUnifyAuth(AuthBase):
def __init__(self,**args):
self.username = args['username']
self.password = args['password']
self.url = args['url']
pass
def __call__(self, r):
login_url = '%s?username=%s&login=login&password=%s' % self.url,self.username,self.password
login_response = requests.get(login_url)
cookies = login_response.cookies
if cookies:
r.cookies = cookies
return r
#example of adding a client certificate
class MyAzureCertAuthHAndler(AuthBase):
def __init__(self,**args):
self.cert = args['certPath']
pass
def __call__(self, r):
r.cert = self.cert
return r
#example of adding a client certificate
class GoogleBigQueryCertAuthHandler(AuthBase):
def __init__(self,**args):
self.cert = args['certPath']
pass
def __call__(self, r):
r.cert = self.cert
return r
#cloudstack auth example
class CloudstackAuth(AuthBase):
def __init__(self,**args):
# setup any auth-related data here
self.apikey = args['apikey']
self.secretkey = args['secretkey']
pass
def __call__(self, r):
# modify and return the request
parsed = urlparse.urlparse(r.url)
url = parsed.geturl().split('?',1)[0]
url_params= urlparse.parse_qs(parsed.query)
#normalize the list value
for param in url_params:
url_params[param] = url_params[param][0]
url_params['apikey'] = self.apikey
keys = sorted(url_params.keys())
sig_params = []
for k in keys:
sig_params.append(k + '=' + urllib.quote_plus(url_params[k]).replace("+", "%20"))
query = '&'.join(sig_params)
signature = base64.b64encode(hmac.new(
self.secretkey,
msg=query.lower(),
digestmod=hashlib.sha1
).digest())
query += '&signature=' + urllib.quote_plus(signature)
r.url = url + '?' + query
return r
| 29.179104
| 100
| 0.586701
| 3,584
| 0.916624
| 0
| 0
| 0
| 0
| 0
| 0
| 1,082
| 0.276726
|
67d9abf1948658a2c5e38ae12ec4d8b8adf3bd58
| 1,515
|
py
|
Python
|
sdk/core/azure-core/azure/core/pipeline/policies/authentication_async.py
|
pjquirk/azure-sdk-for-python
|
cbf02ec4f177b96eae1dbbba87c34c2c93880150
|
[
"MIT"
] | null | null | null |
sdk/core/azure-core/azure/core/pipeline/policies/authentication_async.py
|
pjquirk/azure-sdk-for-python
|
cbf02ec4f177b96eae1dbbba87c34c2c93880150
|
[
"MIT"
] | null | null | null |
sdk/core/azure-core/azure/core/pipeline/policies/authentication_async.py
|
pjquirk/azure-sdk-for-python
|
cbf02ec4f177b96eae1dbbba87c34c2c93880150
|
[
"MIT"
] | null | null | null |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from azure.core.pipeline import PipelineRequest, PipelineResponse
from azure.core.pipeline.policies import AsyncHTTPPolicy
from azure.core.pipeline.policies.authentication import _BearerTokenCredentialPolicyBase
class AsyncBearerTokenCredentialPolicy(_BearerTokenCredentialPolicyBase, AsyncHTTPPolicy):
# pylint:disable=too-few-public-methods
"""Adds a bearer token Authorization header to requests.
:param credential: The credential.
:type credential: ~azure.core.credentials.TokenCredential
:param str scopes: Lets you specify the type of access needed.
"""
async def send(self, request: PipelineRequest) -> PipelineResponse:
"""Aync flavor that adds a bearer token Authorization header to request and sends request to next policy.
:param request: The pipeline request object to be modified.
:type request: ~azure.core.pipeline.PipelineRequest
:return: The pipeline response object
:rtype: ~azure.core.pipeline.PipelineResponse
"""
token = await self._credential.get_token(*self._scopes)
self._update_headers(request.http_request.headers, token)
return await self.next.send(request) # type: ignore
| 48.870968
| 113
| 0.681848
| 990
| 0.653465
| 0
| 0
| 0
| 0
| 612
| 0.40396
| 937
| 0.618482
|
67da024b54f0853f0965d1f566e700aad7c2a74c
| 152
|
py
|
Python
|
pbt/population/__init__.py
|
automl/HPO_for_RL
|
d82c7ddd6fe19834c088137570530f11761d9390
|
[
"Apache-2.0"
] | 9
|
2021-06-22T08:54:19.000Z
|
2022-03-28T09:10:59.000Z
|
pbt/population/__init__.py
|
automl/HPO_for_RL
|
d82c7ddd6fe19834c088137570530f11761d9390
|
[
"Apache-2.0"
] | null | null | null |
pbt/population/__init__.py
|
automl/HPO_for_RL
|
d82c7ddd6fe19834c088137570530f11761d9390
|
[
"Apache-2.0"
] | null | null | null |
from .trial import Trial, NoTrial
from .member import Member
from .population import Population
__all__ = ['Trial', 'NoTrial', 'Member', 'Population']
| 25.333333
| 54
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 36
| 0.236842
|
67da0e87556ec7b055d13f1258cbac356a9a64d2
| 7,003
|
py
|
Python
|
darth/process.py
|
OOXXXXOO/DARTH
|
bd899acc7a777157f393c7078b9deccbf6e7e461
|
[
"Apache-2.0"
] | 11
|
2020-06-30T03:57:41.000Z
|
2021-05-20T13:19:41.000Z
|
darth/process.py
|
ceresman/darth
|
038cd7cdc18771b73873bd5a8653c89655336448
|
[
"Apache-2.0"
] | 3
|
2021-09-08T02:14:52.000Z
|
2022-03-12T00:37:29.000Z
|
darth/process.py
|
ceresman/darth
|
038cd7cdc18771b73873bd5a8653c89655336448
|
[
"Apache-2.0"
] | 6
|
2020-07-01T06:11:43.000Z
|
2020-09-11T05:57:41.000Z
|
import multiprocessing
from tqdm import tqdm
import os
import gdal
from .downloader import downloader
from .obsclient import bucket
from .vector import Vector
def Process(
VectorDataSource,
WgsCord,
Class_key,
DataSourcesType='Google China',
DataSetName="DataSet",
Remote_dataset_root="DataSets/",
Thread_count=2,
Nodata=0,
Merge=False,
Keep_local=True,
Over_write=True,
Upload=False,
**args
):
"""
Step I:
Init Downlaoder,Bucket,Vector
Step II:
Init default vector layer
Init area , imagery level of mission
Step III:
Download
Merge(Optional)
Rasterize
Step IV:
Upload to Bucket
Last Step:
If don't save temp dataset ,clean the cache
args:
for obs server:
ak : access_key_id,
sk : secret_access_key,
server : server
bn : bucketname
"""
print("\033[1;32# ---------------------------------------------------------------------------- #\033[0m")
print("\033[1;32# DARTH #\033[0m")
print("\033[1;32# ---------------------------------------------------------------------------- #\033[0m")
print("# ===== Bucket para preview\033[1;32 %s\033[0m"%args)
print("\n\n\n# ---------------------------------------------------------------------------- #")
print("# ---------------------------------- Step I ---------------------------------- #")
print("# ---------------------------------------------------------------------------- #")
Download=downloader(DataSourcesType,thread_count=Thread_count)
if Upload:
Bucket=bucket(
access_key_id=args["ak"],
secret_access_key=args["sk"],
server=args["server"],
bucketName=args["bn"]
)
if not Over_write:
Bucket.check(remote_metaname)
Vec=Vector(VectorDataSource)
remote_metaname=Remote_dataset_root+DataSetName+"/.meta"
print("\n\n\n# ---------------------------------------------------------------------------- #")
print("# ---------------------------------- Step II --------------------------------- #")
print("# ---------------------------------------------------------------------------- #")
Vec.getDefaultLayerbyName(Class_key)
Download.add_cord(*WgsCord)
Vec.crop_default_layer_by_rect(Download.mercator_cord)
print("\n\n\n# ---------------------------------------------------------------------------- #")
print("# --------------------------------- Step III --------------------------------- #")
print("# ---------------------------------------------------------------------------- #")
image_dir=os.path.join(DataSetName,'images/')
targets_dir=os.path.join(DataSetName,'targets/')
print("# ===== imagery dir :\033[1;32%s\033[0m"%image_dir)
print("# ===== targets dir :\033[1;32%s\033[0m"%targets_dir)
if not os.path.exists("./"+DataSetName):
os.makedirs(image_dir)
os.makedirs(targets_dir)
local_metaname=DataSetName+"/.meta"
with open(local_metaname,"w") as meta:
if Upload:
meta.write(
"Bucket Meta:\n"+str(Bucket.getBucketMetadata())
)
meta.write(
"Vector object Meta:\n"+str(Vec.meta)
)
meta.close()
if Upload:
bucket_imagery_root=os.path.join(Remote_dataset_root,image_dir)
bucket_targets_root=os.path.join(Remote_dataset_root,targets_dir)
bucket_description_root=os.path.join(Remote_dataset_root,DataSetName+"/")
print("# ===== Bucket imagery root :\033[1;32%s\033[0m",bucket_imagery_root)
print("# ===== Bucket Targets root :\033[1;32%s\033[0m",bucket_targets_root)
print("# ===== Bucket Description root :\033[1;32%s\033[0m",bucket_description_root)
Bucket.cd("DataSets")
Bucket.ls()
print("\033[5;36# ===== Start Downloading.....\033[0m")
Download.download(output_path=image_dir)
tiles=[i["path"] for i in Download.result]
Vec.generate(tiles,output_path=targets_dir)
if Upload:
print("\n\n\n# ---------------------------------------------------------------------------- #")
print("# ---------------------------------- Step IV --------------------------------- #")
print("# ---------------------------------------------------------------------------- #")
print("# ===== Upload dataset meta\033[1;32%s\033[0m"%remote_metaname)
Bucket.upload(
remote_path=remote_metaname,
local_path=local_metaname
)
## Saveing index json file
remote_json_path=os.path.join(bucket_description_root,Download.json_path.split('/')[-1])
print("# ===== Upload dataset description\033[1;32%s\033[0m"%remote_json_path)
if not Over_write:
Bucket.check(remote_json_path)
Bucket.upload(
remote_path=remote_json_path,
local_path=Download.json_path
)
print("# ===== upload imagry to bucket.....")
for tile in tqdm(tiles):
file_name=tile.split('/')[-1]
remote_tiles=os.path.join(bucket_imagery_root,file_name)
if not Over_write:
Bucket.check(remote_tiles)
Bucket.upload(
remote_path=remote_tiles,
local_path=tile
)
print("# ===== upload target to bucket.....")
for target in tqdm(Vec.labellist):
file_name=target.split('/')[-1]
remote_target=os.path.join(bucket_targets_root,file_name)
if not Over_write:
Bucket.check(remote_target)
Bucket.upload(
remote_path=remote_target,
local_path=target
)
print("# ===== uploaded bucket:")
Bucket.ls()
if not Keep_local:
print("# ------------------------------- Clear-cache ------------------------------- #")
cmd="rm -rf "+DataSetName
os.system(cmd)
print("# -------------------------------- Clear-Done ------------------------------- #")
print("# ---------------------------------------------------------------------------- #")
print("# DataSet process done #")
print("# ---------------------------------------------------------------------------- #")
def main():
vecfile="/workspace/data/osm-2017-07-03-v3.6.1-china_beijing.mbtiles"
macfile='/Users/tanwenxuan/workspace/Data/osm-2017-07-03-v3.6.1-china_beijing.mbtiles'
tqfile="/workspace/osm-2017-07-03-v3.6.1-china_beijing.mbtiles"
process(tqfile,Keep_local=False,Over_write=True)
if __name__ == '__main__':
main()
| 31.977169
| 109
| 0.456519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,157
| 0.450807
|
67dbe149e9deb1f839afee4ecf248d5698ff9007
| 1,016
|
py
|
Python
|
setup.py
|
Willd14469/cj8-patient-panthers
|
b977091c19cd0e7299f91ebd94ce25c086661fd7
|
[
"MIT"
] | 1
|
2021-10-04T09:42:58.000Z
|
2021-10-04T09:42:58.000Z
|
setup.py
|
Willd14469/cj8-patient-panthers
|
b977091c19cd0e7299f91ebd94ce25c086661fd7
|
[
"MIT"
] | 5
|
2021-07-17T13:24:42.000Z
|
2021-07-17T13:35:32.000Z
|
setup.py
|
Willd14469/cj8-patient-panthers
|
b977091c19cd0e7299f91ebd94ce25c086661fd7
|
[
"MIT"
] | null | null | null |
import sys
from setuptools import setup
required_packages = ["boombox", "Pillow", "PyYAML", "rich"]
win_packages = ["keyboard"]
unix_packages = ["pynput"]
WIN = "win32"
LINUX = "linux"
MACOS = "darwin"
if sys.platform == WIN:
required_packages += win_packages
elif sys.platform in (LINUX, MACOS):
required_packages += unix_packages
setup(
name="pantheras_box",
version="0.1.0",
packages=[
"pantheras_box",
"pantheras_box.story",
"pantheras_box.sounds",
"pantheras_box.backend",
"pantheras_box.frontend",
"pantheras_box.keyboard_handlers",
],
url="",
license="MIT",
author="Patient Panthers",
author_email="",
description="Pantheras box TUI game.",
install_requires=required_packages,
entry_points={
"console_scripts": [
"pantheras-box = pantheras_box.run:run_game",
],
},
package_data={"": ["**/*.txt", "**/*.yaml", "**/*.png", "**/*.wav"]},
include_package_data=True,
)
| 23.627907
| 73
| 0.616142
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 387
| 0.380906
|
67dc3420f8889bf1e85452c17cc2bb0c45148c0c
| 2,609
|
py
|
Python
|
lunch_handler.py
|
wimo7083/Wheel-Of-Lunch-Slack-Bot
|
7bcb8cc6a4ccd1b6034a9e3a60b470a1934962ef
|
[
"MIT"
] | 1
|
2018-03-27T04:01:19.000Z
|
2018-03-27T04:01:19.000Z
|
lunch_handler.py
|
wimo7083/Wheel-Of-Lunch-Slack-Bot
|
7bcb8cc6a4ccd1b6034a9e3a60b470a1934962ef
|
[
"MIT"
] | 2
|
2018-04-22T22:25:44.000Z
|
2018-05-26T03:10:08.000Z
|
lunch_handler.py
|
wimo7083/Wheel-Of-Lunch-Slack-Bot
|
7bcb8cc6a4ccd1b6034a9e3a60b470a1934962ef
|
[
"MIT"
] | null | null | null |
from zipcodes import is_valid
from random import randint
from all_lunch_locs import call_lunch_api
default_max = 30
default_range = 20
def random_zip():
# because what matters is good food, not close food.
random_zip = 0
# because strings are required for this module
while not is_valid(str(random_zip)):
range_start = 10 ** (4)
range_end = (10 ** 5) - 1
random_zip = randint(range_start, range_end)
return str(random_zip)
def within_lunch_range(input_number):
return int(input_number) <= default_max
def set_values_with_default(loc=random_zip(), range=default_range):
return {'location': loc, 'range': range}
def two_params(first_param, second_param):
if is_valid(first_param) and within_lunch_range(second_param):
return set_values_with_default(first_param, second_param)
else:
return set_values_with_default()
def split_params(param_text):
if not param_text: # no params, default random zip code, 20 miles
return set_values_with_default()
params = param_text.split()
if len(params) == 2: # two values
return two_params(params[0], params[1])
if len(params) == 1 and is_valid(params[0]): # one value
return set_values_with_default(loc=params[0])
else:
return set_values_with_default()
def select_random_location(lunch_response):
number_locs = len(lunch_response['businesses'])
selected_loc = randint(0, number_locs - 1)
return lunch_response['businesses'][selected_loc]
def build_response_text(loc_dict):
return f'The Wheel of Lunch has selected {loc_dict["name"]} at {" ".join(loc_dict["location"]["display_address"])}'
def create_lunch_event(request):
param_dict = split_params(request.get('text'))
response = call_lunch_api(location=param_dict['location'], range=param_dict['range'])
location = select_random_location(response.json())
return build_response_text(location)
if __name__ == '__main__':
# format of the json
# CombinedMultiDict([ImmutableMultiDict([]), ImmutableMultiDict(
# [('token', 'workspace token'), ('team_id', 'team_id'), ('team_domain', 'some_string_name'),
# ('channel_id', 'some_channel_id'), ('channel_name', 'some_channel_name'), ('user_id', 'user_id_requested'), ('user_name', 'user_name_requested'),
# ('command', '/lunch'), ('text', '80233'), #<---- args
# ('response_url', 'response url'),
# ('trigger_id', 'slash trigger command')])])
print(create_lunch_event({'text': '80020 20'}))
print(create_lunch_event({'text': '20'}))
| 31.817073
| 156
| 0.690303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 858
| 0.328862
|
67e03d999e85af82b3115a02553d48dddb7a3aa2
| 1,414
|
py
|
Python
|
py-insta/__init__.py
|
ItsTrakos/Py-insta
|
483725f13b7c7eab0261b461c7ec507d1109a9f4
|
[
"Unlicense"
] | null | null | null |
py-insta/__init__.py
|
ItsTrakos/Py-insta
|
483725f13b7c7eab0261b461c7ec507d1109a9f4
|
[
"Unlicense"
] | null | null | null |
py-insta/__init__.py
|
ItsTrakos/Py-insta
|
483725f13b7c7eab0261b461c7ec507d1109a9f4
|
[
"Unlicense"
] | null | null | null |
"""
# -*- coding: utf-8 -*-
__author__ = "Trakos"
__email__ = "mhdeiimhdeiika@gmail.com"
__version__ = 1.0.0"
__copyright__ = "Copyright (c) 2019 -2021 Leonard Richardson"
# Use of this source code is governed by the MIT license.
__license__ = "MIT"
Description:
py-Insta Is A Python Library
Scrape Instagram Data
And Print It Or You Can Define It Into A Variable...
#####
__version__ = 1.0
import requests
from bs4 import BeautifulSoup
__url__ = "https://www.instagram.com/{}/"
def Insta(username):
try:
response = requests.get(__url__.format(username.replace('@','')),timeout=5) # InCase Someone Types @UserName
if '404' in str(response): # If The Username Is Invalid
data = 'No Such Username'
return data
else:
soup = BeautifulSoup(response.text, "html.parser")
meta = soup.find("meta", property="og:description")
try:
s = meta.attrs['content'].split(' ')
data = {
'Followers': s[0],
'Following': s[2],
'Posts': s[4],
'Name': s[13]
}
return data
except requests.exceptions.InvalidURL:
return 'No Such Username'
except (requests.ConnectionError, requests.Timeout):
return 'No InterNet Connection'
| 32.883721
| 117
| 0.562942
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 89
| 0.062942
|
67e244309b1b3c160456702586e33422cb197d21
| 1,182
|
py
|
Python
|
pyopenproject/business/services/command/membership/create.py
|
webu/pyopenproject
|
40b2cb9fe0fa3f89bc0fe2a3be323422d9ecf966
|
[
"MIT"
] | 5
|
2021-02-25T15:54:28.000Z
|
2021-04-22T15:43:36.000Z
|
pyopenproject/business/services/command/membership/create.py
|
webu/pyopenproject
|
40b2cb9fe0fa3f89bc0fe2a3be323422d9ecf966
|
[
"MIT"
] | 7
|
2021-03-15T16:26:23.000Z
|
2022-03-16T13:45:18.000Z
|
pyopenproject/business/services/command/membership/create.py
|
webu/pyopenproject
|
40b2cb9fe0fa3f89bc0fe2a3be323422d9ecf966
|
[
"MIT"
] | 6
|
2021-06-18T18:59:11.000Z
|
2022-03-27T04:58:52.000Z
|
from pyopenproject.api_connection.exceptions.request_exception import RequestError
from pyopenproject.api_connection.requests.post_request import PostRequest
from pyopenproject.business.exception.business_error import BusinessError
from pyopenproject.business.services.command.membership.membership_command import MembershipCommand
from pyopenproject.model import membership as mem
class Create(MembershipCommand):
def __init__(self, connection, membership):
"""Constructor for class Create, from MembershipCommand
:param connection: The connection data
:param membership: The membership to create
"""
super().__init__(connection)
self.membership = membership
def execute(self):
try:
json_obj = PostRequest(connection=self.connection,
headers={"Content-Type": "application/json"},
context=f"{self.CONTEXT}",
json=self.membership.__dict__).execute()
return mem.Membership(json_obj)
except RequestError as re:
raise BusinessError("Error creating membership") from re
| 43.777778
| 99
| 0.685279
| 797
| 0.674281
| 0
| 0
| 0
| 0
| 0
| 0
| 242
| 0.204738
|
67e2f36fcb3cfb98bcd8a0637b9a6793dd11a7cc
| 5,783
|
py
|
Python
|
lottery/branch/singular_values.py
|
NogaBar/open_lth
|
09bcea21e69708549ecff2659690162a6c45f9ca
|
[
"MIT"
] | null | null | null |
lottery/branch/singular_values.py
|
NogaBar/open_lth
|
09bcea21e69708549ecff2659690162a6c45f9ca
|
[
"MIT"
] | null | null | null |
lottery/branch/singular_values.py
|
NogaBar/open_lth
|
09bcea21e69708549ecff2659690162a6c45f9ca
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from lottery.branch import base
import models.registry
from pruning.mask import Mask
from pruning.pruned_model import PrunedModel
from training import train
from utils.tensor_utils import shuffle_state_dict, weight_erank, feature_erank, activation, generate_mask_active, features_spectral, features_frobenius, features_spectral_fro_ratio, erank
from platforms.platform import get_platform
from foundations import paths
import json
import os
import datasets.registry
import copy
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import tqdm
import seaborn as sns
import pandas as pd
import numpy as np
from utils.tensor_utils import generate_mask_active, erank, shuffle_tensor, mutual_coherence
sns.set_style("whitegrid")
class Branch(base.Branch):
def branch_function(self, seed: int, erank_path: str = '', coherence_path: str = '',
frobenius_path: str = '', min_singular_path: str = '', nuclear_path: str = '',
normalized: bool = False, batch_average: int = 1):
# Randomize the mask.
orig_mask = Mask.load(self.level_root)
best_mask = Mask()
start_step = self.lottery_desc.str_to_step('0ep')
# Use level 0 model for dense pre-pruned model
if not get_platform().is_primary_process: return
base_model = models.registry.load(self.level_root.replace(f'level_{self.level}', 'level_0'), start_step,
self.lottery_desc.model_hparams)
orig_model = PrunedModel(base_model, Mask.ones_like(base_model))
model_graduate = copy.deepcopy(orig_model)
model = copy.deepcopy(orig_model)
lth_model = PrunedModel(copy.deepcopy(base_model), orig_mask)
# Randomize while keeping the same layerwise proportions as the original mask.
prunable_tensors = set(orig_model.prunable_layer_names) - set(orig_model.prunable_conv_names)
tensors = {k[6:]: v.clone() for k, v in orig_model.state_dict().items() if k[6:] in prunable_tensors}
train_loader = datasets.registry.get(self.lottery_desc.dataset_hparams, train=True)
input = []
offset = 1 if batch_average > 1 else 0
for b in range(batch_average):
input.append(list(train_loader)[b+offset][0])
singular_values = []
eranks_values = []
# lth_features = lth_model.intermediate(input)
# _, s, _ = torch.svd(lth_features[-1], compute_uv=False)
# if normalized:
# s = s / s[0]
# singular_values.append(s)
eranks = np.load(os.path.join(self.level_root, '../', erank_path), allow_pickle=True)
coherence = np.load(os.path.join(self.level_root, '../', coherence_path), allow_pickle=True)
frobenius = np.load(os.path.join(self.level_root, '../', frobenius_path), allow_pickle=True)
min_singular = np.load(os.path.join(self.level_root, '../', min_singular_path), allow_pickle=True)
nuclear = np.load(os.path.join(self.level_root, '../', nuclear_path), allow_pickle=True)
erank_seeds = []
coherence_seeds = []
frobenius_seeds = []
min_singular_seeds = []
nuclear_seeds = []
for layer in range(eranks.shape[0]):
erank_seeds.append(np.argmax(eranks[layer, :]))
coherence_seeds.append(np.argmax(coherence[layer, :]))
frobenius_seeds.append(np.argmax(frobenius[layer, :]))
min_singular_seeds.append(np.argmax(min_singular[layer, :]))
nuclear_seeds.append(np.argmax(nuclear[layer, :]))
# Assign all masks to model
for b in range(batch_average):
lth_features = lth_model.intermediate(input[b])
_, s, _ = torch.svd(lth_features[-1], compute_uv=False)
if normalized:
s = s / s[0]
eranks_values.append(erank(lth_features[-1]))
singular_values.append(s)
for seeds in [erank_seeds, coherence_seeds, frobenius_seeds, min_singular_seeds, nuclear_seeds, [seed] * len(erank_seeds)]:
curr_mask = Mask()
for i, (name, param) in enumerate(tensors.items()):
curr_mask[name] = shuffle_tensor(orig_mask[name], int(seed + seeds[i])).int()
model_graduate.register_buffer(PrunedModel.to_mask_name(name), curr_mask[name].float())
features = model_graduate.intermediate(input[b])
_, s, _ = torch.svd(features[-1], compute_uv=False)
if normalized:
s = s / s[0]
eranks_values.append(erank(features[-1]))
singular_values.append(s)
model_graduate = copy.deepcopy(orig_model)
# features = lth_model(in)
types = ['lth', 'erank', 'mutual coherence', 'frobenius', 'min singular', 'nuclear', 'random']
data = pd.concat([pd.DataFrame(
{'svd_value': list(singular_values[i].detach().numpy()), 'type': [types[i % len(types)]] * len(singular_values[i]),
'svd_index': list(range(len(singular_values[i])))}) for i in range(len(types) * batch_average)], ignore_index=True)
#
f = sns.lineplot(data=data.loc[data['type'] != 'nuclear'], x='svd_index', y='svd_value', hue='type', markers=True, dashes=False, style="type")
f.set(yscale='log')
f.get_figure().savefig(os.path.join(self.branch_root, 'svd_plot.pdf'))
@staticmethod
def description():
return "Plot singular values."
@staticmethod
def name():
return 'singular_values'
| 45.896825
| 187
| 0.649144
| 4,845
| 0.8378
| 0
| 0
| 137
| 0.02369
| 0
| 0
| 829
| 0.143351
|
67e342235525736d0490c23bf879ad0c51964c88
| 6,400
|
py
|
Python
|
parser.py
|
Saevon/DMP-Career-Share
|
e3486080d1e17b93b6676bdf59e0dc89c524c9f6
|
[
"MIT"
] | null | null | null |
parser.py
|
Saevon/DMP-Career-Share
|
e3486080d1e17b93b6676bdf59e0dc89c524c9f6
|
[
"MIT"
] | null | null | null |
parser.py
|
Saevon/DMP-Career-Share
|
e3486080d1e17b93b6676bdf59e0dc89c524c9f6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from collections import OrderedDict
from decimal import Decimal
from parser_data import InlineList, DuplicationList
from state import State, StateMachine
from type_check import is_int, is_float, is_sci_notation
from format import format
from error import DMPException
class ParserStateMachine(StateMachine):
def __init__(self, options):
self.data = OrderedDict()
initial = NeutralState(self.data)
initial.parent = initial
super(ParserStateMachine, self).__init__(initial, options)
def get_data(self):
return self.data
def preprocess(self, val):
return val.strip()
class DataState(State):
def __init__(self, data):
super(DataState, self).__init__()
self.data = data
class NeutralState(DataState):
def run(self, line):
if '=' in line:
key, val = [val.strip() for val in line.split('=')]
old_data = self.data.get(key, None)
if old_data is None:
# First time we got said data, just add it in
self.data[key] = self.read_data(val)
elif isinstance(old_data, DuplicationList):
# The stored data is a list, append to it
self.data[key].append(val)
else:
# We got the same key? Turn the stored data into a list
old_val = self.data[key]
self.data[key] = DuplicationList()
self.data[key].append(old_val)
self.data[key].append(val)
return self.finish_state()
else:
self.debug('= DICT =')
return self.rerun_with_state(
DictState(self.data).set_parent(self.parent)
)
def read_data(self, val):
if ',' in val:
space_formatted = ', ' in val
val = [subval.strip() for subval in val.split(',')]
val = [self.read_data(subval) for subval in val]
val = InlineList(val)
val.space_formatted = space_formatted
elif val == 'True':
val = True
elif val == 'False':
val = False
elif is_sci_notation(val):
val = Decimal(val)
elif is_int(val):
val = Decimal(val)
elif is_float(val):
val = Decimal(val)
return val
class DictState(DataState):
def __init__(self, data):
super(DictState, self).__init__(data)
self.val = OrderedDict()
self.run = self.state_name
def state_name(self, val):
self.debug('= NAME = ')
self.name = val
self.run = self.state_open
def state_open(self, val):
self.debug('= OPEN = ')
if val != '{':
raise State.Error("Expected dict open brace")
self.depth += 1
self.run = self.state_data
def state_data(self, val):
if val == '}':
self.debug('= CLOSED = ')
if not self.data.get(self.name, False):
self.data[self.name] = DuplicationList()
self.data.get(self.name).append(self.val)
self.depth -= 1
return self.finish_state()
else:
self.debug('= DATA = ')
return self.rerun_with_state(
NeutralState(self.val).set_parent(self)
)
class PostProcessor(object):
'''
Module for post processing
'''
PROCESSORS = {}
def register_processor(mapping, name):
def wrapper(func):
mapping[name] = func
return func
return wrapper
@classmethod
def run(Class, data):
return Class().process(data)
def process(self, data):
'''
Does special post-processing based on a file schema
'''
# This
if "GAME" in data.keys():
scenarios = data["GAME"][0]["SCENARIO"]
for scenario in scenarios:
if "name" in data.keys():
self.process_scenario(scenario)
elif "name" in data.keys():
self.process_scenario(data)
return data
def process_scenario(self, scenario):
processor = self.PROCESSORS.get(scenario["name"], False)
if processor:
processor(self, scenario)
@register_processor(PROCESSORS, "ResearchAndDevelopment")
def process_rnd(self, scenario):
# We know for sure that each tech has a list of parts
# but the list is a duplication list (therefore sometimes parses as a single item)
for tech in scenario.get("Tech", {}):
if "part" in tech.keys() and not isinstance(tech["part"], list):
tech["part"] = DuplicationList([tech["part"]])
def load(fp, options=None):
config = {
# 'verbose': True,
}
if options is not None:
config.update(options)
machine = ParserStateMachine(config)
try:
machine.runAll(fp)
except State.Error as err:
raise DMPException.wraps(err)
return PostProcessor.run(machine.get_data())
def dump(data, options=None):
config = {
# 'verbose': True,
}
if options is not None:
config.update(options)
lines = []
for key, val in data.iteritems():
lines += format(key, val)
# Adds Trailing newline
lines.append('')
return '\n'.join(lines)
def _test(infile, outfile):
with open(infile, 'r') as fp:
data = load(fp)
with open(infile, 'r') as fp:
raw = fp.read()
# print json.dumps(data, indent=4)
out = dump(data)
with open(outfile, 'w') as fp:
fp.write(out)
import subprocess
subprocess.call(['diff', infile, outfile])
subprocess.call(['rm', outfile])
if __name__ == "__main__":
ALL_DATA = [
"ContractSystem.txt",
"Funding.txt",
"PCScenario.txt",
"ProgressTracking.txt",
"Reputation.txt",
"ResearchAndDevelopment.txt",
"ResourceScenario.txt",
"ScenarioDestructibles.txt",
"ScenarioNewGameIntro.txt",
"ScenarioUpgradeableFacilities.txt",
"StrategySystem.txt",
"VesselRecovery.txt",
]
outfile = './tmp.txt'
import os.path
for filename in ALL_DATA:
infile = os.path.join('../Universe/Scenarios/Saevon/', filename)
_test(infile, outfile)
| 26.122449
| 90
| 0.569688
| 4,440
| 0.69375
| 0
| 0
| 508
| 0.079375
| 0
| 0
| 1,089
| 0.170156
|
67e41af80998f84e9f552dffe5a9fc7f2b6c4124
| 1,795
|
py
|
Python
|
scripts/redact_cli_py/redact/io/blob_reader.py
|
jhapran/OCR-Form-Tools
|
77e80227f7285c419f72b12edbbc8c316b973874
|
[
"MIT"
] | 412
|
2020-03-02T21:43:17.000Z
|
2022-03-24T17:20:33.000Z
|
scripts/redact_cli_py/redact/io/blob_reader.py
|
jhapran/OCR-Form-Tools
|
77e80227f7285c419f72b12edbbc8c316b973874
|
[
"MIT"
] | 388
|
2020-03-05T14:08:31.000Z
|
2022-03-25T19:07:05.000Z
|
scripts/redact_cli_py/redact/io/blob_reader.py
|
jhapran/OCR-Form-Tools
|
77e80227f7285c419f72b12edbbc8c316b973874
|
[
"MIT"
] | 150
|
2020-03-03T17:29:11.000Z
|
2022-03-16T23:55:27.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project
# root for license information.
from typing import List
from pathlib import Path
from azure.storage.blob import ContainerClient
from redact.types.file_bundle import FileBundle
class BlobReader():
def __init__(self, container_url: str, prefix: str):
self.container_client = ContainerClient.from_container_url(
container_url)
self.prefix = prefix
def download_bundles(self, to: str) -> List[FileBundle]:
blobs = self.container_client.list_blobs(name_starts_with=self.prefix)
all_file_name_list = [Path(blob.name).name for blob in blobs]
file_bundles = FileBundle.from_names(all_file_name_list)
for bundle in file_bundles:
image_blob_path = self.prefix + bundle.image_file_name
fott_blob_path = self.prefix + bundle.fott_file_name
ocr_blob_path = self.prefix + bundle.ocr_file_name
image_path = Path(to, bundle.image_file_name)
fott_path = Path(to, bundle.fott_file_name)
ocr_path = Path(to, bundle.ocr_file_name)
with open(image_path, 'wb') as image_file, \
open(fott_path, 'wb') as fott_file, \
open(ocr_path, 'wb') as ocr_file:
image_file.write(
self.container_client.
download_blob(image_blob_path).readall())
fott_file.write(
self.container_client.
download_blob(fott_blob_path).readall())
ocr_file.write(
self.container_client.
download_blob(ocr_blob_path).readall())
return file_bundles
| 37.395833
| 78
| 0.640111
| 1,488
| 0.828969
| 0
| 0
| 0
| 0
| 0
| 0
| 166
| 0.092479
|
67e4a190f4b21b618d8a69e714cec31032c3687f
| 8,111
|
py
|
Python
|
layers/util/mapping_functions.py
|
meder411/spherical-package
|
73d51a25da5891d12e4c04d8ad2e6f1854ffa121
|
[
"BSD-3-Clause"
] | 8
|
2020-06-13T19:49:06.000Z
|
2022-02-24T07:16:02.000Z
|
layers/util/mapping_functions.py
|
meder411/spherical-package
|
73d51a25da5891d12e4c04d8ad2e6f1854ffa121
|
[
"BSD-3-Clause"
] | 4
|
2020-07-03T08:44:13.000Z
|
2021-09-17T12:18:57.000Z
|
layers/util/mapping_functions.py
|
meder411/spherical-package
|
73d51a25da5891d12e4c04d8ad2e6f1854ffa121
|
[
"BSD-3-Clause"
] | 3
|
2020-06-10T23:30:20.000Z
|
2020-12-29T13:50:01.000Z
|
import torch
import math
from .grids import *
from .conversions import *
# =============================================================================
# Equirectangular mapping functions
# =============================================================================
#
# Note that there is no concept of padding for spherical images because there
# are no image boundaries.
# #
def equirectangular_kernel(shape, kernel_size, dilation=1):
"""
Returns a kernel sampling grid with angular spacing according to the provided shape (and associated computed angular resolution) of an equirectangular image
shape: (H, W)
kernel_size: (kh, kw)
"""
# For convenience
kh, kw = kernel_size
# Get equirectangular grid resolution
res_lon, res_lat = get_equirectangular_grid_resolution(shape)
# Build the kernel according to the angular resolution of the equirectangular image
dlon = torch.zeros(kernel_size)
dlat = torch.zeros(kernel_size)
for i in range(kh):
cur_i = i - (kh // 2)
for j in range(kw):
cur_j = j - (kw // 2)
dlon[i, j] = cur_j * dilation * res_lon
# Flip sign is because +Y is down
dlat[i, j] = cur_i * dilation * -res_lat
# Returns the kernel differentials as kh x kw
return dlon, dlat
def grid_projection_map(shape, kernel_size, stride=1, dilation=1):
# For convenience
H, W = shape
kh, kw = kernel_size
# Get lat/lon mesh grid and resolution
lon, lat = spherical_meshgrid(shape)
# Get the kernel differentials
dlon, dlat = equirectangular_kernel(shape, kernel_size, dilation)
# Equalize views
lat = lat.view(H, W, 1)
lon = lon.view(H, W, 1)
dlon = dlon.view(1, 1, kh * kw)
dlat = dlat.view(1, 1, kh * kw)
# Compute the "projection"
map_lat = lat + dlat
map_lon = lon + dlon
# Convert the spherical coordinates to pixel coordinates
# H x W x KH*KW x 2
map_pixels = convert_spherical_to_image(
torch.stack((map_lon, map_lat), -1), shape)
# Adjust the stride of the map accordingly
map_pixels = map_pixels[::stride, ::stride, ...].contiguous()
# Return the pixel sampling map
# H x W x KH*KW x 2
return map_pixels
def inverse_gnomonic_projection_map(shape, kernel_size, stride=1, dilation=1):
# For convenience
H, W = shape
kh, kw = kernel_size
# Get lat/lon mesh grid and resolution
lon, lat = spherical_meshgrid(shape)
# Get the kernel differentials
dlon, dlat = equirectangular_kernel(shape, kernel_size, dilation)
# Equalize views
lat = lat.view(H, W, 1)
lon = lon.view(H, W, 1)
dlon = dlon.view(1, 1, kh * kw)
dlat = dlat.view(1, 1, kh * kw)
# Compute the inverse gnomonic projection of each tangent grid (the kernel) back onto sphere at each pixel of the equirectangular image.
rho = (dlon**2 + dlat**2).sqrt()
nu = rho.atan()
map_lat = (nu.cos() * lat.sin() + dlat * nu.sin() * lat.cos() / rho).asin()
map_lon = lon + torch.atan2(
dlon * nu.sin(),
rho * lat.cos() * nu.cos() - dlat * lat.sin() * nu.sin())
# Handle the (0,0) case
map_lat[..., [kh * kw // 2]] = lat
map_lon[..., [kh * kw // 2]] = lon
# Compensate for longitudinal wrap around
map_lon = ((map_lon + math.pi) % (2 * math.pi)) - math.pi
# Convert the spherical coordinates to pixel coordinates
# H x W x KH*KW x 2
map_pixels = convert_spherical_to_image(
torch.stack((map_lon, map_lat), -1), shape)
# Adjust the stride of the map accordingly
map_pixels = map_pixels[::stride, ::stride, ...].contiguous()
# Return the pixel sampling map
# H x W x KH*KW x 2
return map_pixels
def inverse_equirectangular_projection_map(shape,
kernel_size,
stride=1,
dilation=1):
# For convenience
H, W = shape
kh, kw = kernel_size
# Get lat/lon mesh grid and resolution
lon, lat = spherical_meshgrid(shape)
# Get the kernel differentials
dlon, dlat = equirectangular_kernel(shape, kernel_size, dilation)
# Equalize views
lat = lat.view(H, W, 1)
lon = lon.view(H, W, 1)
dlon = dlon.view(1, 1, kh * kw)
dlat = dlat.view(1, 1, kh * kw)
# Compute the inverse equirectangular projection of each tangent grid (the kernel) back onto sphere at each pixel of the equirectangular image.
# Compute the projection back onto sphere
map_lat = lat + dlat
map_lon = lon + dlon / map_lat.cos()
# Compensate for longitudinal wrap around
map_lon = ((map_lon + math.pi) % (2 * math.pi)) - math.pi
# Convert the spherical coordinates to pixel coordinates
# H x W x KH*KW x 2
map_pixels = convert_spherical_to_image(
torch.stack((map_lon, map_lat), -1), shape)
# Adjust the stride of the map accordingly
map_pixels = map_pixels[::stride, ::stride, ...].contiguous()
# Return the pixel sampling map
# H x W x KH*KW x 2
return map_pixels
# =============================================================================
# Cube map mapping functions
# =============================================================================
def cube_kernel(cube_dim, kernel_size, dilation=1):
"""
Returns a kernel sampling grid with angular spacing according to the provided cube dimension (and associated computed angular resolution) of a cube map
cube_dim: length of side of square face of cube map
kernel_size: (kh, kw)
"""
# For convenience
kh, kw = kernel_size
cube_res = 1 / cube_dim
# Build the kernel according to the angular resolution of the cube face
dx = torch.zeros(kernel_size)
dy = torch.zeros(kernel_size)
for i in range(kh):
cur_i = i - (kh // 2)
for j in range(kw):
cur_j = j - (kw // 2)
dx[i, j] = cur_j * dilation * cube_res
# Flip sign is because +Y is down
dy[i, j] = cur_i * dilation * -cube_res
# Returns the kernel differentials as kh x kw
return dx, dy
def inverse_cube_face_projection_map(cube_dim,
kernel_size,
stride=1,
dilation=1,
polar=False):
"""
Creates a sampling map which models each face of the cube as an gnomonic projection (equatorial aspect) of the sphere. Warps the kernel according to the inverse gnomonic projection for the face.
"""
# For convenience
kh, kw = kernel_size
# Get a meshgrid of a cube face in terms of spherical coordinates
face_lon, face_lat = cube_face_spherical_meshgrid(cube_dim, polar)
# Get the kernel differentials
dx, dy = cube_kernel(cube_dim, kernel_size, dilation)
# Equalize views
face_lat = face_lat.view(cube_dim, cube_dim, 1)
face_lon = face_lon.view(cube_dim, cube_dim, 1)
dx = dx.view(1, 1, kh * kw)
dy = dy.view(1, 1, kh * kw)
# Compute the inverse gnomonic projection of each tangent grid (the kernel) back onto sphere at each pixel of the cube face
rho = (dx**2 + dy**2).sqrt()
nu = rho.atan()
map_lat = (nu.cos() * face_lat.sin() +
dy * nu.sin() * face_lat.cos() / rho).asin()
map_lon = face_lon + torch.atan2(
dx * nu.sin(),
rho * face_lat.cos() * nu.cos() - dy * face_lat.sin() * nu.sin())
# Handle the (0,0) case
map_lat[..., [kh * kw // 2]] = face_lat
map_lon[..., [kh * kw // 2]] = face_lon
# Create the sample map in terms of spherical coordinates
map_face = torch.stack((map_lon, map_lat), -1)
# Convert the cube coordinates on the sphere to pixels in the cube map
map_pixels = convert_spherical_to_cube_face(map_face, cube_dim)
# Adjust the stride of the map accordingly
map_pixels = map_pixels[::stride, ::stride, ...].contiguous()
# Return the pixel sampling map
# cube_dime x cube_dim x KH*KW x 2
return map_pixels
| 33.378601
| 198
| 0.601159
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,307
| 0.407718
|
67e4a6a4b62a36140c3ec2606810cde8cf6567ae
| 8,164
|
py
|
Python
|
src/lambda_router/routers.py
|
jpaidoussi/lambda-router
|
c7909e6667f2fc837f34f54ccffcc409e33cebb6
|
[
"BSD-3-Clause"
] | null | null | null |
src/lambda_router/routers.py
|
jpaidoussi/lambda-router
|
c7909e6667f2fc837f34f54ccffcc409e33cebb6
|
[
"BSD-3-Clause"
] | null | null | null |
src/lambda_router/routers.py
|
jpaidoussi/lambda-router
|
c7909e6667f2fc837f34f54ccffcc409e33cebb6
|
[
"BSD-3-Clause"
] | 1
|
2021-03-05T06:50:26.000Z
|
2021-03-05T06:50:26.000Z
|
import json
from typing import Any, Callable, Dict, Optional
import attr
from .interfaces import Event, Router
@attr.s(kw_only=True)
class SingleRoute(Router):
"""
Routes to a single defined route without any conditions.
:param route: The single defined route. Only set via ``add_route``.
"""
route: Optional[Callable] = attr.ib(init=False, default=None)
def add_route(self, *, fn: Callable) -> None:
"""
Adds the single route.
:param fn: The callable to route to.
:type fn: callable
:raises ValueError: Raised when a single route has already been defined.
"""
if self.route is not None:
raise ValueError("Single route is already defined. SingleRoute can only have a single defined route.")
self.route = fn
def get_route(self, *, event: Optional[Event]) -> Callable:
"""
Returns the defined route
:raises ValueError: Raised if no route is defined.
:rtype: callable
"""
if self.route is None:
raise ValueError("No route defined.")
return self.route
def dispatch(self, *, event: Event) -> Any:
"""
Gets the configured route and invokes the callable.
:param event: The event to pass to the callable route.
"""
route = self.get_route(event=event)
return route(event=event)
@attr.s(kw_only=True)
class EventField(Router):
"""
Routes on a the value of the specified top-level ``key`` in the
given ``Event.raw`` dict.
:param key: The name of the top-level key to look for when routing.
:param routes: The routes mapping. Only set via ``add_route``
"""
key: str = attr.ib(kw_only=True)
routes: Dict[str, Callable] = attr.ib(init=False, factory=dict)
def add_route(self, *, fn: Callable, key: str) -> None:
"""
Adds the route with the given key.
:param fn: The callable to route to.
:type fn: callable
:param key: The key to associate the route with.
:type fn: str
"""
self.routes[key] = fn
def get_route(self, *, event: Event) -> Callable:
"""
Returns the matching route for the value of the ``key`` in the
given event.
:raises ValueError: Raised if no route is defined or routing key is
not present in the event.
:rtype: callable
"""
field_value: str = event.raw.get(self.key, None)
if field_value is None:
raise ValueError(f"Routing key ({self.key}) not present in the event.")
try:
return self.routes[field_value]
except KeyError:
raise ValueError(f"No route configured for given field ({field_value}).")
def dispatch(self, *, event: Event) -> Any:
"""
Gets the configured route and invokes the callable.
:param event: The event to pass to the callable route.
"""
route = self.get_route(event=event)
return route(event=event)
@attr.s(kw_only=True)
class SQSMessage:
meta: Dict[str, Any] = attr.ib(factory=dict)
body: Dict[str, Any] = attr.ib(factory=dict)
key: str = attr.ib()
event: Event = attr.ib()
@classmethod
def from_raw_sqs_message(cls, *, raw_message: Dict[str, Any], key_name: str, event: Event):
meta = {}
attributes = raw_message.pop("attributes", None)
if attributes:
meta.update(attributes)
body = body = raw_message.pop("body", "")
message_attribites = raw_message.pop("messageAttributes", None)
key = None
if message_attribites:
key_attribute = message_attribites.get(key_name, None)
if key_attribute is not None:
key = key_attribute["stringValue"]
for k, value in raw_message.items():
meta[k] = value
# Attempt to decode json body.
body = json.loads(body)
return cls(meta=meta, body=body, key=key, event=event)
@attr.s(kw_only=True)
class SQSMessageField(Router):
"""
Processes all message records in a given ``Event``, routing each based on
on the configured key.
:param key: The name of the message-level key to look for when routing.
:param routes: The routes mapping. Only set via ``add_route``
"""
key: str = attr.ib(kw_only=True)
routes: Dict[str, Callable] = attr.ib(init=False, factory=dict)
def _get_message(self, raw_message: Dict[str, Any], event: Event) -> SQSMessage:
return SQSMessage.from_raw_sqs_message(raw_message=raw_message, key_name=self.key, event=event)
def add_route(self, *, fn: Callable, key: str) -> None:
"""
Adds the route with the given key.
:param fn: The callable to route to.
:type fn: callable
:param key: The key to associate the route with.
:type fn: str
"""
self.routes[key] = fn
def get_route(self, *, message: SQSMessage) -> Callable:
"""
Returns the matching route for the value of the ``key`` in the
given message.
:raises ValueError: Raised if no route is defined or routing key is
not present in the message.
:rtype: callable
"""
field_value: str = message.key
if field_value is None:
raise ValueError(f"Routing key ({self.key}) not present in the message.")
try:
return self.routes[field_value]
except KeyError:
raise ValueError(f"No route configured for given field ({field_value}).")
def dispatch(self, *, event: Event) -> Any:
"""
Iterates over all the message records in the given Event and executes the
applicable callable as determined by the configured routes.
:param event: The event to parse for messages.
"""
messages = event.raw.get("Records", None)
if messages is None:
raise ValueError("No messages present in Event.")
for raw_message in messages:
message = self._get_message(raw_message, event=event)
route = self.get_route(message=message)
# Process each message now.
route(message=message)
# SQS Lambdas don't return a value.
return None
@attr.s(kw_only=True)
class GenericSQSMessage(Router):
"""
Routes to a single defined route without any conditions.
:param route: The single defined route. Only set via ``add_route``.
"""
route: Optional[Callable] = attr.ib(init=False, default=None)
def _get_message(self, raw_message: Dict[str, Any], event: Event) -> SQSMessage:
return SQSMessage.from_raw_sqs_message(raw_message=raw_message, key_name=None, event=event)
def add_route(self, *, fn: Callable) -> None:
"""
Adds the single route.
:param fn: The callable to route to.
:type fn: callable
:raises ValueError: Raised when a single route has already been defined.
"""
if self.route is not None:
raise ValueError("Single route is already defined. SingleRoute can only have a single defined route.")
self.route = fn
def get_route(self, *, message: SQSMessage) -> Callable:
"""
Returns the defined route
:raises ValueError: Raised if no route is defined.
:rtype: callable
"""
if self.route is None:
raise ValueError("No route defined.")
return self.route
def dispatch(self, *, event: Event) -> Any:
"""
Gets the configured route and invokes the callable.
:param event: The event to pass to the callable route.
"""
messages = event.raw.get("Records", None)
if messages is None:
raise ValueError("No messages present in Event.")
for raw_message in messages:
message = self._get_message(raw_message, event=event)
route = self.get_route(message=message)
# Process each message now.
route(message=message)
# SQS Lambdas don't return a value.
return None
| 32.268775
| 114
| 0.614037
| 7,925
| 0.970725
| 0
| 0
| 8,035
| 0.984199
| 0
| 0
| 3,731
| 0.457006
|
67e5a6a6c74d4339ea14061f1806e706d149cac0
| 6,026
|
py
|
Python
|
Modules/ego_planner/ego-planner-swarm/src/uav_simulator/Utils/multi_map_server/src/multi_map_server/msg/_VerticalOccupancyGridList.py
|
473867143/Prometheus
|
df1e1b0d861490223ac8b94d8cc4796537172292
|
[
"BSD-3-Clause"
] | 1,217
|
2020-07-02T13:15:18.000Z
|
2022-03-31T06:17:44.000Z
|
Modules/ego_planner/ego-planner-swarm/src/uav_simulator/Utils/multi_map_server/src/multi_map_server/msg/_VerticalOccupancyGridList.py
|
473867143/Prometheus
|
df1e1b0d861490223ac8b94d8cc4796537172292
|
[
"BSD-3-Clause"
] | 167
|
2020-07-12T15:35:43.000Z
|
2022-03-31T11:57:40.000Z
|
Modules/ego_planner/ego-planner-swarm/src/uav_simulator/Utils/multi_map_server/src/multi_map_server/msg/_VerticalOccupancyGridList.py
|
473867143/Prometheus
|
df1e1b0d861490223ac8b94d8cc4796537172292
|
[
"BSD-3-Clause"
] | 270
|
2020-07-02T13:28:00.000Z
|
2022-03-28T05:43:08.000Z
|
"""autogenerated by genpy from multi_map_server/VerticalOccupancyGridList.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class VerticalOccupancyGridList(genpy.Message):
_md5sum = "7ef85cc95b82747f51eb01a16bd7c795"
_type = "multi_map_server/VerticalOccupancyGridList"
_has_header = False #flag to mark the presence of a Header object
_full_text = """float32 x
float32 y
int32[] upper
int32[] lower
int32[] mass
"""
__slots__ = ['x','y','upper','lower','mass']
_slot_types = ['float32','float32','int32[]','int32[]','int32[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
x,y,upper,lower,mass
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(VerticalOccupancyGridList, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.x is None:
self.x = 0.
if self.y is None:
self.y = 0.
if self.upper is None:
self.upper = []
if self.lower is None:
self.lower = []
if self.mass is None:
self.mass = []
else:
self.x = 0.
self.y = 0.
self.upper = []
self.lower = []
self.mass = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_2f.pack(_x.x, _x.y))
length = len(self.upper)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(struct.pack(pattern, *self.upper))
length = len(self.lower)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(struct.pack(pattern, *self.lower))
length = len(self.mass)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(struct.pack(pattern, *self.mass))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 8
(_x.x, _x.y,) = _struct_2f.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.upper = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.lower = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.mass = struct.unpack(pattern, str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_2f.pack(_x.x, _x.y))
length = len(self.upper)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(self.upper.tostring())
length = len(self.lower)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(self.lower.tostring())
length = len(self.mass)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(self.mass.tostring())
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 8
(_x.x, _x.y,) = _struct_2f.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.upper = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.lower = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.mass = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_2f = struct.Struct("<2f")
| 32.397849
| 123
| 0.623963
| 5,771
| 0.957683
| 0
| 0
| 0
| 0
| 0
| 0
| 1,778
| 0.295055
|
67e63c84e17221da6f00d66f3c8761be24cd93e2
| 2,718
|
py
|
Python
|
examples/plot_benchmark.py
|
MrNuggelz/glvq
|
1eba279a07fd7abe2ee18ccdba27fba22755f877
|
[
"BSD-3-Clause"
] | 27
|
2018-04-11T06:46:07.000Z
|
2022-03-24T06:15:31.000Z
|
examples/plot_benchmark.py
|
MrNuggelz/glvq
|
1eba279a07fd7abe2ee18ccdba27fba22755f877
|
[
"BSD-3-Clause"
] | 11
|
2018-04-13T02:04:06.000Z
|
2021-09-26T21:32:50.000Z
|
examples/plot_benchmark.py
|
MrNuggelz/glvq
|
1eba279a07fd7abe2ee18ccdba27fba22755f877
|
[
"BSD-3-Clause"
] | 17
|
2018-04-05T13:46:06.000Z
|
2022-03-24T06:15:35.000Z
|
"""
==============
GLVQ Benchmark
==============
This example shows the differences between the 4 different GLVQ implementations and LMNN.
The Image Segmentation dataset is used for training and test. Each plot shows the projection
and classification from each implementation. Because Glvq can't project the data on its own
a PCA is used.
"""
from __future__ import with_statement
import numpy as np
import matplotlib.pyplot as plt
from metric_learn import LMNN
from sklearn.decomposition import PCA
from sklearn_lvq import GlvqModel, GrlvqModel, LgmlvqModel, GmlvqModel
from sklearn_lvq.utils import _to_tango_colors, _tango_color
print(__doc__)
def plot(data, target, target_p, prototype, prototype_label, p):
p.scatter(data[:, 0], data[:, 1], c=_to_tango_colors(target, 0), alpha=0.5)
p.scatter(data[:, 0], data[:, 1], c=_to_tango_colors(target_p, 0),
marker='.')
p.scatter(prototype[:, 0], prototype[:, 1],
c=_tango_color('aluminium', 5), marker='D')
try:
p.scatter(prototype[:, 0], prototype[:, 1], s=60,
c=_to_tango_colors(prototype_label, 0), marker='.')
except:
p.scatter(prototype[:, 0], prototype[:, 1], s=60,
c=_tango_color(prototype_label), marker='.')
p.axis('equal')
y = []
x = []
with open('segmentation.data') as f:
for line in f:
v = line.split(',')
y.append(v[0])
x.append(v[1:])
x = np.asarray(x, dtype='float64')
y = np.asarray(y)
lmnn = LMNN(k=5, learn_rate=1e-6)
lmnn.fit(x, y)
x_t = lmnn.transform(x)
p1 = plt.subplot(231)
p1.scatter(x_t[:, 0], x_t[:, 1], c=_to_tango_colors(y, 0))
p1.axis('equal')
p1.set_title('LMNN')
# GLVQ
glvq = GlvqModel()
glvq.fit(x, y)
p2 = plt.subplot(232)
p2.set_title('GLVQ')
plot(PCA().fit_transform(x), y, glvq.predict(x), glvq.w_, glvq.c_w_, p2)
# GRLVQ
grlvq = GrlvqModel()
grlvq.fit(x, y)
p3 = plt.subplot(233)
p3.set_title('GRLVQ')
plot(grlvq.project(x, 2),
y, grlvq.predict(x), grlvq.project(grlvq.w_, 2),
grlvq.c_w_, p3)
# GMLVQ
gmlvq = GmlvqModel()
gmlvq.fit(x, y)
p4 = plt.subplot(234)
p4.set_title('GMLVQ')
plot(gmlvq.project(x, 2),
y, gmlvq.predict(x), gmlvq.project(gmlvq.w_, 2),
gmlvq.c_w_, p4)
# LGMLVQ
lgmlvq = LgmlvqModel()
lgmlvq.fit(x, y)
p5 = plt.subplot(235)
elem_set = list(set(lgmlvq.c_w_))
p5.set_title('LGMLVQ 1')
plot(lgmlvq.project(x, 1, 2, True),
y, lgmlvq.predict(x), lgmlvq.project(np.array([lgmlvq.w_[1]]), 1, 2),
elem_set.index(lgmlvq.c_w_[1]), p5)
p6 = plt.subplot(236)
p6.set_title('LGMLVQ 2')
plot(lgmlvq.project(x, 6, 2, True),
y, lgmlvq.predict(x), lgmlvq.project(np.array([lgmlvq.w_[6]]), 6, 2),
elem_set.index(lgmlvq.c_w_[6]), p6)
plt.show()
| 27.734694
| 92
| 0.654893
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 485
| 0.17844
|
67e6967f9057bb9fe14cc5543b93fd2036edcf8d
| 2,662
|
py
|
Python
|
8/star2.py
|
nfitzen/advent-of-code-2020
|
774b7db35aaf31b0e72a569b3441343d50f4d079
|
[
"CC0-1.0",
"MIT"
] | null | null | null |
8/star2.py
|
nfitzen/advent-of-code-2020
|
774b7db35aaf31b0e72a569b3441343d50f4d079
|
[
"CC0-1.0",
"MIT"
] | null | null | null |
8/star2.py
|
nfitzen/advent-of-code-2020
|
774b7db35aaf31b0e72a569b3441343d50f4d079
|
[
"CC0-1.0",
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# SPDX-FileCopyrightText: 2020 Nathaniel Fitzenrider <https://github.com/nfitzen>
#
# SPDX-License-Identifier: CC0-1.0
# Jesus Christ this was overengineered to Hell and back.
from typing import List, Tuple, Union
with open('input.txt') as f:
instructions = f.readlines()
class Console():
def __init__(self):
self.instructions = {'nop', 'acc', 'jmp'}
self.accumulator = 0
self.lastOp = 'nop'
self.lastArg = '+0'
self.position = 0
self.lastPosition = 0
self.status = 0
def process(self, instructions: List[Union[Tuple[str, int], str]]) -> int:
'''Returns the accumulator value at the end.'''
self.status = 0
if type(instructions[0]) == str:
instructions = self.compile(instructions)
visitedPos = set()
while self.position < len(instructions):
self.lastPosition = self.position
ins = self.parse(instructions[self.position])
if ins[0] == 'acc':
self.acc(ins[1])
elif ins[0] == 'jmp':
self.jmp(ins[1])
elif ins[0] == 'nop':
self.nop(ins[1])
self.position += 1
if self.position in visitedPos:
self.status = 1
break
visitedPos.add(self.position)
return (self.accumulator, self.status)
def compile(self, instructions: list) -> List[Tuple[str, int]]:
return [self.parse(i) if type(i) == str else i for i in instructions]
def parse(self, instruction: str) -> Tuple[str, int]:
if type(instruction) == tuple:
return instruction
op = instruction[0:3]
arg = int(instruction[4:])
if op not in self.instructions:
op = 'nop'
arg = 0
return (op, arg)
def acc(self, arg: int):
self.accumulator += arg
return self.accumulator
def jmp(self, arg: int) -> int:
'''Returns last position'''
self.lastPosition = self.position
self.position += arg - 1
return self.position
def nop(self, arg: int):
return arg
# It's not a universal solution; it only works for jmp.
# I just got lucky.
console = Console()
instructions = console.compile(instructions)
positions = {i[0] if i[1][0] == 'jmp' else None for i in enumerate(console.compile(instructions))}
positions -= {None}
for pos in positions:
console.__init__()
tmpInstruct = instructions.copy()
tmpInstruct[pos] = ('nop', tmpInstruct[pos][1])
acc, status = console.process(tmpInstruct)
if status == 0:
print(acc)
| 31.690476
| 98
| 0.583396
| 1,872
| 0.703231
| 0
| 0
| 0
| 0
| 0
| 0
| 407
| 0.152893
|
67e77f21e80bffc6d63b3d609643ba3804770c10
| 1,010
|
py
|
Python
|
projects/20151163/api/api.py
|
universe3306/WebStudio2019
|
f6827875c449e762bae21e0d4d4fc76187626930
|
[
"MIT"
] | 14
|
2019-03-06T10:32:40.000Z
|
2021-11-18T01:44:28.000Z
|
projects/20151163/api/api.py
|
universe3306/WebStudio2019
|
f6827875c449e762bae21e0d4d4fc76187626930
|
[
"MIT"
] | 35
|
2019-03-13T07:04:02.000Z
|
2019-10-08T06:26:45.000Z
|
projects/20151163/api/api.py
|
universe3306/WebStudio2019
|
f6827875c449e762bae21e0d4d4fc76187626930
|
[
"MIT"
] | 22
|
2019-03-11T11:00:24.000Z
|
2019-09-14T06:53:30.000Z
|
from flask import Flask, request, jsonify
from flask_restful import Api, Resource
from flask_cors import CORS
import json, os
from models import db, User
from UserList import UserList
from PicturesList import Picture, PicturesList, Uploader
basedir = os.path.dirname(os.path.abspath(__file__))
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')
app = Flask(__name__)
app.config.update({
'SQLALCHEMY_TRACK_MODIFICATIONS': True,
"SQLALCHEMY_DATABASE_URI": SQLALCHEMY_DATABASE_URI,
})
cors = CORS(app)
api = Api(app)
db.init_app(app)
def serializer(l):
ret = []
for row in l:
ret.append(json.loads(row.serialize()))
return json.dumps(ret)
api.add_resource(UserList, '/api/users')
api.add_resource(PicturesList, '/api/pictures')
api.add_resource(Picture, '/api/pictures/<name>')
api.add_resource(Uploader, '/api/pictures/new')
if __name__ == '__main__':
with app.app_context():
db.create_all()
app.run(host='0.0.0.0', port=5000, debug=True)
| 26.578947
| 72
| 0.725743
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 164
| 0.162376
|
67e793c1f1db4accdabd37b5f3ae0c798f19a953
| 40,518
|
py
|
Python
|
app.py
|
sharonytlau/dash-loan-calculator
|
b789d30953c8836cc5e861f36a66e73aace24e2c
|
[
"Apache-2.0"
] | 1
|
2021-10-30T14:41:15.000Z
|
2021-10-30T14:41:15.000Z
|
app.py
|
sharonytlau/dash-loan-calculator
|
b789d30953c8836cc5e861f36a66e73aace24e2c
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
sharonytlau/dash-loan-calculator
|
b789d30953c8836cc5e861f36a66e73aace24e2c
|
[
"Apache-2.0"
] | null | null | null |
# Ying Tung Lau - sharonlau@brandeis.edu
# Jiaying Yan - jiayingyan@brandeis.edu
# <editor-fold desc="import modules">
import pandas as pd
import numpy as np
import json
import os
import re
import dash
import dash_table
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import State, Input, Output
from dash.exceptions import PreventUpdate
import plotly.graph_objects as go
from algorithms.Helper import *
from algorithms.LoanImpacts import *
# </editor-fold>
# <editor-fold desc="dash app">
external_stylesheets = [dbc.themes.BOOTSTRAP,
'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.config.suppress_callback_exceptions = True
# <editor-fold desc="app-components">
def individiual_contribution_input(index_loan, index_person, style={'display': 'none'}):
id_contribution_input = {'type': 'contribution', 'index': '-'.join([str(index_loan), index_person])}
id_individual_contribution = {'type': 'individual-contribution', 'index': '-'.join([str(index_loan), index_person])}
individual_contribution = dbc.FormGroup(
[
dbc.Label("Contributor " + index_person, html_for=id_individual_contribution, className='m-0 d-none'),
dbc.InputGroup([
dbc.InputGroupAddon(index_person, addon_type="prepend"),
dbc.Input(id=id_contribution_input, type="number", min=0, step=0.01, max=1e15,
className="border-0", placeholder='0.00'),
dbc.FormFeedback(valid=False)
], className='border-bottom individual-formgroup')
], id=id_individual_contribution, style=style, className='input-form-2')
return individual_contribution
def loan_contribution_input(index_loan, style={'display': 'none'}):
id_loan_contribution = {'type': 'loan-contribution', 'index': str(index_loan)}
loan_contribution = html.Div([
individiual_contribution_input(index_loan, 'A', {'display': 'block'}),
individiual_contribution_input(index_loan, 'B'),
individiual_contribution_input(index_loan, 'C')
], style=style, id=id_loan_contribution)
return loan_contribution
def individual_loan_input(index, style={'display': 'none'}):
id_principal = {'type': 'principal', 'index': str(index)}
id_rate = {'type': 'rate', 'index': str(index)}
id_payment = {'type': 'payment', 'index': str(index)}
id_extra = {'type': 'extra', 'index': str(index)}
id_group = {'type': 'individual-loan-input', 'index': str(index)}
loan_header = html.H5('LOAN {}'.format(index), className='card-loan-title')
principal = dbc.FormGroup(
[
dbc.Label("Principal", html_for=id_principal, className='m-0'),
dbc.InputGroup([
dbc.InputGroupAddon("$", addon_type="prepend"),
dbc.Input(id=id_principal, type="number", min=0.01, step=0.01, max=1e15,
pattern="re.compile(r'^[1-9]+\d*(\.\d{1,2})?$')", className="border-0"),
dbc.FormFeedback(valid=False)
], className='individual-formgroup border-bottom'),
], className='input-form'
)
rate = dbc.FormGroup(
[
dbc.Label("Interest rate per year", html_for=id_rate, className='m-0'),
dbc.InputGroup([
dbc.Input(id=id_rate, type="number", min=0.01, step=0.01, max=1e15, className="border-0"),
dbc.InputGroupAddon("%", addon_type="prepend"),
dbc.FormFeedback(valid=False)
], className='border-bottom individual-formgroup')
], className='input-form'
)
payment = dbc.FormGroup(
[
dbc.Label("Monthly payment", html_for=id_payment, className='m-0'),
dbc.InputGroup([
dbc.InputGroupAddon("$", addon_type="prepend"),
dbc.Input(id=id_payment, type="number", min=0.01, step=0.01, max=1e15, className="border-0"),
dbc.FormFeedback(valid=False)
], className='border-bottom individual-formgroup')
], className='input-form'
)
extra = dbc.FormGroup(
[
dbc.Label("Extra payment", html_for=id_extra, className='m-0'),
dbc.InputGroup([
dbc.InputGroupAddon("$", addon_type="prepend"),
dbc.Input(id=id_extra, type="number", min=0.0, step=0.01, max=1e15, className="border-0",
placeholder='0.00'),
dbc.FormFeedback(valid=False)
], className='border-bottom individual-formgroup')
], className='input-form-2'
)
contributions = loan_contribution_input(index)
individual_form = html.Div(
[loan_header,
dbc.Form([
principal,
rate,
payment,
extra,
contributions
])
]
, id=id_group, style=style, className='individual-loan w-100')
return individual_form
loan_input_card = dbc.Card(
[
dbc.CardHeader(
[
html.Div(
[
html.H1('LOAN SPECS'),
],
className='w-fit d-flex align-items-center text-nowrap'),
html.Div(
[
html.Div(
[
"Loan Number",
html.Div(
[
dbc.Button('-', color='light', id='decrease-loan',
className='symbol-style offset-2',
n_clicks=0),
dbc.Button('+', color='light', id='increase-loan',
className='symbol-style mr-1',
n_clicks=0),
], className='increment-btn'),
], className='number-widget pl-3'),
html.Div(
[
'Contribution Number',
dbc.Button('+', color='light', id='contribution-button',
className='symbol-style mr-1 increment-btn',
n_clicks=0, ),
], className='number-widget'),
]
, className="d-flex flex-column align-items-end"),
],
className='d-inline-flex justify-content-between'),
dbc.CardBody(
[
individual_loan_input(1, {'display': 'block'}),
individual_loan_input(2),
individual_loan_input(3),
], id="loan-card", className='input-card-body'),
], className='input-card'
)
# </editor-fold>
# <editor-fold desc="app-callbacks">
# %% alter input panel
@app.callback(
[
Output('loan-number', 'data'),
Output({'type': 'individual-loan-input', 'index': '2'}, 'style'),
Output({'type': 'individual-loan-input', 'index': '3'}, 'style'),
Output({'type': 'loan-contribution', 'index': '1'}, "style"),
Output({'type': 'loan-contribution', 'index': '2'}, 'style'),
Output({'type': 'loan-contribution', 'index': '3'}, 'style'),
Output({'type': 'individual-contribution', 'index': '1-B'}, 'style'),
Output({'type': 'individual-contribution', 'index': '2-B'}, 'style'),
Output({'type': 'individual-contribution', 'index': '3-B'}, 'style'),
Output({'type': 'individual-contribution', 'index': '1-C'}, 'style'),
Output({'type': 'individual-contribution', 'index': '2-C'}, 'style'),
Output({'type': 'individual-contribution', 'index': '3-C'}, 'style'),
Output({'type': 'principal', 'index': '1'}, 'value'),
Output({'type': 'principal', 'index': '2'}, 'value'),
Output({'type': 'principal', 'index': '3'}, 'value'),
Output({'type': 'rate', 'index': '1'}, 'value'),
Output({'type': 'rate', 'index': '2'}, 'value'),
Output({'type': 'rate', 'index': '3'}, 'value'),
Output({'type': 'payment', 'index': '1'}, 'value'),
Output({'type': 'payment', 'index': '2'}, 'value'),
Output({'type': 'payment', 'index': '3'}, 'value'),
Output({'type': 'extra', 'index': '1'}, 'value'),
Output({'type': 'extra', 'index': '2'}, 'value'),
Output({'type': 'extra', 'index': '3'}, 'value'),
Output({'type': 'contribution', 'index': '1-A'}, 'value'),
Output({'type': 'contribution', 'index': '1-B'}, 'value'),
Output({'type': 'contribution', 'index': '1-C'}, 'value'),
Output({'type': 'contribution', 'index': '2-A'}, 'value'),
Output({'type': 'contribution', 'index': '2-B'}, 'value'),
Output({'type': 'contribution', 'index': '2-C'}, 'value'),
Output({'type': 'contribution', 'index': '3-A'}, 'value'),
Output({'type': 'contribution', 'index': '3-B'}, 'value'),
Output({'type': 'contribution', 'index': '3-C'}, 'value'),
],
[
Input("contribution-button", 'n_clicks'),
Input("decrease-loan", 'n_clicks'),
Input("increase-loan", 'n_clicks'),
Input('reset-button', 'n_clicks')
],
[State('loan-number', 'data'),
State({'type': 'principal', 'index': '1'}, 'value'),
State({'type': 'principal', 'index': '2'}, 'value'),
State({'type': 'principal', 'index': '3'}, 'value'),
State({'type': 'rate', 'index': '1'}, 'value'),
State({'type': 'rate', 'index': '2'}, 'value'),
State({'type': 'rate', 'index': '3'}, 'value'),
State({'type': 'payment', 'index': '1'}, 'value'),
State({'type': 'payment', 'index': '2'}, 'value'),
State({'type': 'payment', 'index': '3'}, 'value'),
State({'type': 'extra', 'index': '1'}, 'value'),
State({'type': 'extra', 'index': '2'}, 'value'),
State({'type': 'extra', 'index': '3'}, 'value'),
State({'type': 'contribution', 'index': '1-A'}, 'value'),
State({'type': 'contribution', 'index': '1-B'}, 'value'),
State({'type': 'contribution', 'index': '1-C'}, 'value'),
State({'type': 'contribution', 'index': '2-A'}, 'value'),
State({'type': 'contribution', 'index': '2-B'}, 'value'),
State({'type': 'contribution', 'index': '2-C'}, 'value'),
State({'type': 'contribution', 'index': '3-A'}, 'value'),
State({'type': 'contribution', 'index': '3-B'}, 'value'),
State({'type': 'contribution', 'index': '3-C'}, 'value')]
)
def loan_num(n, back, nxt, reset_n, last_history, principal1, principal2, principal3, rate1, rate2, rate3, payment1,
payment2, payment3,
extra1, extra2, extra3, contribution1a, contribution1b, contribution1c, contribution2a, contribution2b,
contribution2c, contribution3a,
contribution3b, contribution3c):
vis = {'display': 'block'}
invis = {'display': 'none'}
button_id = dash.callback_context.triggered[0]['prop_id'].split('.')[0]
reset_n = 0
if button_id == "reset-button":
last_history["num"] = 1
return (last_history,) + tuple({"display": "none"} for i in range(11)) + tuple(None for i in range(21))
else:
try:
if back > last_history["back"]:
last_history["back"] = back
last_history['num'] = max(1, last_history['num'] - 1)
elif nxt > last_history["next"]:
last_history["next"] = nxt
last_history['num'] = min(3, last_history['num'] + 1)
loan_2 = invis
loan_3 = invis
contribute_1 = invis
contribute_2 = invis
contribute_3 = invis
contribute_b = invis
contribute_c = invis
if n >= 2:
contribute_b = vis
if n >= 3:
contribute_c = vis
if n:
contribute_1 = vis
if last_history['num'] >= 2:
loan_2 = vis
if n:
contribute_2 = vis
if last_history['num'] == 3:
loan_3 = vis
if n:
contribute_3 = vis
return last_history, loan_2, loan_3, contribute_1, contribute_2, contribute_3, contribute_b, contribute_b, \
contribute_b, contribute_c, contribute_c, contribute_c, principal1, principal2, principal3, rate1, rate2, rate3, \
payment1, payment2, payment3, extra1, extra2, extra3, contribution1a, contribution1b, contribution1c, \
contribution2a, contribution2b, contribution2c, contribution3a, contribution3b, contribution3c
# if last_history store is None
except:
last_history = {"num": 1, "back": 0, "next": 0}
return (last_history,) + tuple(invis for _ in range(11)) + (
principal1, principal2, principal3, rate1, rate2, rate3, payment1, payment2, payment3, extra1, extra2,
extra3, contribution1a, contribution1b, contribution1c, contribution2a, contribution2b, contribution2c,
contribution3a, contribution3b, contribution3c)
# %%
# %% store input loan data
@app.callback(
[
Output('apply-alert', 'children'),
Output('apply-alert', 'is_open'),
Output('apply-alert', 'className'),
Output('go-row-2', 'style'),
Output('row-2', 'style'),
Output('row-3', 'style'),
Output("apply-store", 'data'),
Output({'type': 'principal', 'index': '1'}, 'invalid'),
Output({'type': 'rate', 'index': '1'}, 'invalid'),
Output({'type': 'payment', 'index': '1'}, 'invalid'),
Output({'type': 'principal', 'index': '2'}, 'invalid'),
Output({'type': 'rate', 'index': '2'}, 'invalid'),
Output({'type': 'payment', 'index': '2'}, 'invalid'),
Output({'type': 'principal', 'index': '3'}, 'invalid'),
Output({'type': 'rate', 'index': '3'}, 'invalid'),
Output({'type': 'payment', 'index': '3'}, 'invalid'),
Output({'type': 'extra', 'index': '1'}, 'invalid'),
Output({'type': 'extra', 'index': '2'}, 'invalid'),
Output({'type': 'extra', 'index': '3'}, 'invalid'),
Output({'type': 'contribution', 'index': '1-A'}, 'invalid'),
Output({'type': 'contribution', 'index': '1-B'}, 'invalid'),
Output({'type': 'contribution', 'index': '1-C'}, 'invalid'),
Output({'type': 'contribution', 'index': '2-A'}, 'invalid'),
Output({'type': 'contribution', 'index': '2-B'}, 'invalid'),
Output({'type': 'contribution', 'index': '2-C'}, 'invalid'),
Output({'type': 'contribution', 'index': '3-A'}, 'invalid'),
Output({'type': 'contribution', 'index': '3-B'}, 'invalid'),
Output({'type': 'contribution', 'index': '3-C'}, 'invalid'),
],
[Input('apply-button', 'n_clicks')],
[
State('loan-number', 'data'),
State({'type': 'principal', 'index': '1'}, 'value'),
State({'type': 'principal', 'index': '2'}, 'value'),
State({'type': 'principal', 'index': '3'}, 'value'),
State({'type': 'rate', 'index': '1'}, 'value'),
State({'type': 'rate', 'index': '2'}, 'value'),
State({'type': 'rate', 'index': '3'}, 'value'),
State({'type': 'payment', 'index': '1'}, 'value'),
State({'type': 'payment', 'index': '2'}, 'value'),
State({'type': 'payment', 'index': '3'}, 'value'),
State({'type': 'extra', 'index': '1'}, 'value'),
State({'type': 'extra', 'index': '2'}, 'value'),
State({'type': 'extra', 'index': '3'}, 'value'),
State({'type': 'contribution', 'index': '1-A'}, 'value'),
State({'type': 'contribution', 'index': '1-B'}, 'value'),
State({'type': 'contribution', 'index': '1-C'}, 'value'),
State({'type': 'contribution', 'index': '2-A'}, 'value'),
State({'type': 'contribution', 'index': '2-B'}, 'value'),
State({'type': 'contribution', 'index': '2-C'}, 'value'),
State({'type': 'contribution', 'index': '3-A'}, 'value'),
State({'type': 'contribution', 'index': '3-B'}, 'value'),
State({'type': 'contribution', 'index': '3-C'}, 'value'),
State({'type': 'extra', 'index': '1'}, 'invalid'),
State({'type': 'extra', 'index': '2'}, 'invalid'),
State({'type': 'extra', 'index': '3'}, 'invalid'),
State({'type': 'contribution', 'index': '1-A'}, 'invalid'),
State({'type': 'contribution', 'index': '1-B'}, 'invalid'),
State({'type': 'contribution', 'index': '1-C'}, 'invalid'),
State({'type': 'contribution', 'index': '2-A'}, 'invalid'),
State({'type': 'contribution', 'index': '2-B'}, 'invalid'),
State({'type': 'contribution', 'index': '2-C'}, 'invalid'),
State({'type': 'contribution', 'index': '3-A'}, 'invalid'),
State({'type': 'contribution', 'index': '3-B'}, 'invalid'),
State({'type': 'contribution', 'index': '3-C'}, 'invalid'),
],
prevent_initial_call=True)
def on_click(n_clicks, loan_number, principal1, principal2, principal3, rate1, rate2, rate3, payment1,
payment2, payment3, extra1, extra2, extra3, contribution1a, contribution1b, contribution1c, contribution2a,
contribution2b, contribution2c, contribution3a, contribution3b, contribution3c, inval1, inval2, inval3,
inval4, inval5, inval6, inval7, inval8, inval9, inval10, inval11, inval12):
# reset
if n_clicks == 0:
invis = {'display': 'none'}
return ("", False, "", invis, invis, invis, [],) + tuple(False for i in range(21))
# check values and store
else:
# input value if valid else none
def num(value):
try:
value = float(value)
if 0.0 < value <= 1e15 and re.compile(r'^[1-9]+\d*(\.\d{1,2})?$').match(str(value)):
return value
else:
return None
except:
return None
invalid_flag = [1]
def extra(value):
try:
value = float(value)
if 0.0 <= value <= 1e15 and (value == 0.0 or re.compile(r'^[1-9]+\d*(\.\d{1,2})?$').match(str(value))):
return value
else:
# print(value)
invalid_flag[0] = -1
return None
except:
return None
# initialize loan data
loan1, loan2, loan3 = (
{'principal': '', 'rate': '', 'payment': '', 'extra': '', 'contribution': {'A': '', 'B': '', 'C': ''}}
for i in range(3))
loan1['principal'], loan1['rate'], loan1['payment'], loan2['principal'], loan2['rate'], loan2['payment'], loan3[
'principal'], loan3['rate'], loan3['payment'] = \
(num(_) for _ in [
principal1, rate1, payment1,
principal2, rate2, payment2,
principal3, rate3, payment3])
loan1['extra'], loan1['contribution']['A'], loan1['contribution']['B'], loan1['contribution']['C'], \
loan2['extra'], loan2['contribution']['A'], loan2['contribution']['B'], loan2['contribution']['C'], \
loan3['extra'], loan3['contribution']['A'], loan3['contribution']['B'], loan3['contribution']['C'] = \
(extra(_) for _ in [
extra1, contribution1a, contribution1b, contribution1c,
extra2, contribution2a, contribution2b, contribution2c,
extra3, contribution3a, contribution3b, contribution3c])
# extra = 0 if None
loan1['extra'], loan1['contribution']['A'], loan1['contribution']['B'], loan1['contribution']['C'], \
loan2['extra'], loan2['contribution']['A'], loan2['contribution']['B'], loan2['contribution']['C'], \
loan3['extra'], loan3['contribution']['A'], loan3['contribution']['B'], loan3['contribution']['C'] = \
(_ or 0 for _ in
[loan1['extra'], loan1['contribution']['A'], loan1['contribution']['B'], loan1['contribution']['C'],
loan2['extra'], loan2['contribution']['A'], loan2['contribution']['B'], loan2['contribution']['C'],
loan3['extra'], loan3['contribution']['A'],
loan3['contribution']['B'], loan3['contribution']['C']])
# delete contributor if all extra is 0
def extra_key_del(extra_key, *args):
if all(_['contribution'][extra_key] == 0 for _ in args):
for loan in [loan1, loan2, loan3]:
del loan['contribution'][extra_key]
for _ in ['A', 'B', 'C']:
extra_key_del(_, loan1, loan2, loan3)
# update flags for input validation and data
flags = [not bool(num(_)) for _ in
[principal1, rate1, payment1, principal2, rate2, payment2, principal3, rate3, payment3]]
loan_num = loan_number['num']
if loan_num == 2:
loan3 = None
flags[6:9] = (False for i in range(3))
if loan_num == 1:
loan2 = None
loan3 = None
flags[3:9] = (False for i in range(6))
# store data, data = [] if there is invalid input loan
def is_invalid(loan):
return not all([loan['principal'], loan['rate'], loan['payment']])
anchor_style = {'display': 'none'}
row_display = {'display': 'none'}
if invalid_flag[0] == -1 or is_invalid(loan1) or (loan_num >= 2 and is_invalid(loan2)) or (
loan_num == 3 and is_invalid(loan3)):
data = []
alert_message = 'Please provide valid loan specs'
alert_class = 'd-flex apply-alert alert-danger'
else:
data = [loan for loan in [loan1, loan2, loan3] if loan]
for loan in data:
if loan['payment'] <= loan['principal'] * loan['rate'] / 1200.0:
data = []
alert_class = 'd-flex apply-alert alert-danger'
alert_message = 'Oops! Monthly payment must be greater than interest'
break
else:
anchor_style = {'display': 'block'}
row_display = {'display': 'flex'}
alert_class = 'd-flex apply-alert alert-success'
alert_message = 'See your loan schedules below'
print('loan number is:', loan_num)
print('stored loan data:', data)
return (alert_message, True, alert_class, anchor_style, row_display, row_display, data,) + tuple(flags) + (
inval1, inval2, inval3, inval4, inval5, inval6, inval7, inval8, inval9,
inval10, inval11, inval12)
# %%
# %% Reset input
@app.callback(
[Output("contribution-button", 'n_clicks'),
Output('apply-button', 'n_clicks')],
[Input('reset-button', 'n_clicks')],
prevent_initial_call=True)
def reset(n):
if n:
return 0, 0
# %%
# %% Show checklist
@app.callback([Output('contribution_checklist', 'options'),
Output('contribution_checklist', 'value')],
[Input('apply-store', 'modified_timestamp')],
[State('apply-store', 'data')],
prevent_initial_call=True)
def update_checklist(modified_timestamp, loans_data):
# print(modified_timestamp)
# print(loans_data)
loans = loans_data
if_contribution = any([sum(i.values()) for i in [loan['contribution'] for loan in loans]])
# Get checklist if having contribution
if if_contribution:
contribution = [i['contribution'] for i in loans]
checklist_options = [{'label': member, 'value': member} for member in contribution[0].keys()]
checklist_value = list(contribution[0].keys())
else:
contribution = None
checklist_options = []
checklist_value = []
return checklist_options, checklist_value
# %% Show schedule figure
# Define functions for use of shedule figure
def get_Bar_principal(index, df_schedule):
palette = [dict(color='rgba(163, 201, 199, 1)', line=dict(color='rgba(163, 201, 199, 1)')),
dict(color='rgba(163, 201, 199, 0.7)', line=dict(color='rgba(163, 201, 199, 0.7)')),
dict(color='rgba(163, 201, 199, 0.4)', line=dict(color='rgba(163, 201, 199, 0.4)')),
]
fig = go.Bar(name='Loan{} Principal'.format(index + 1),
x=df_schedule['Payment Number'],
y=df_schedule['Applied Principal'],
marker=palette[index],
legendgroup=index,
)
return fig
def get_Bar_interest(index, df_schedule):
palette = [dict(color='rgba(236, 197, 76, 1)', line=dict(color='rgba(236, 197, 76, 1)')),
dict(color='rgba(236, 197, 76, 0.7)', line=dict(color='rgba(236, 197, 76, 0.7)')),
dict(color='rgba(236, 197, 76, 0.4)', line=dict(color='rgba(236, 197, 76, 0.4)')),
]
fig = go.Bar(name='Loan{} Interest'.format(index + 1),
x=df_schedule['Payment Number'],
y=df_schedule['Applied Interest'],
marker=palette[index],
legendgroup=index,
)
return fig
@app.callback([Output('schedule', 'figure'),
Output('impact_banner', 'children'),
Output('store_df_impact', 'data'),
],
[Input('contribution_checklist', 'value')],
[State('apply-store', 'data')],
prevent_initial_call=True)
def update_schedule_figure(checklist_value, loans_data):
# print(checklist_value)
loans = loans_data
principal = [i['principal'] for i in loans]
rate = [i['rate'] for i in loans]
payment = [i['payment'] for i in loans]
extra_payment = [i['extra'] for i in loans]
if_contribution = any([sum(i.values()) for i in [loan['contribution'] for loan in loans]])
if if_contribution:
contribution = [i['contribution'] for i in loans]
else:
contribution = None
# Compute contribution impact if any
if contribution != None:
loan_impacts = LoanImpacts(principal=principal, rate=rate, payment=payment,
extra_payment=extra_payment, contributions=contribution)
df_impact = loan_impacts.compute_impacts()
store_df_impact = df_impact.to_json()
else:
store_df_impact = ''
# Get a impact banner according to checklist_value
# for i in range(len(principal)):
if contribution != None:
if len(checklist_value) != 0:
checklist_value.sort()
if len(checklist_value) == len(contribution[0]):
impact_banner = 'With all the contribution, you only need to pay ${} interest in total. The loan term is {}.'.format(
*df_impact[df_impact['Index'] == 'ALL'].iloc[0][['InterestPaid', 'Duration']])
else:
unchecked_list = [i for i in list(contribution[0].keys()) if i not in checklist_value]
impact_banner = 'Without the contribution of {}'.format(' and '.join(unchecked_list)) + \
', you need to pay ${} more interest in total. The loan term will be extended by {}.'.format(
*df_impact[df_impact['Index'] == ' and '.join(checklist_value)].iloc[0][
['MIInterest', 'MIDuration']])
else:
impact_banner = 'Without any contribution, you need to pay ${} more interest in total. The loan term will be extended by {}.'.format(
*df_impact[df_impact['Index'] == 'None'].iloc[0][['MIInterest', 'MIDuration']])
else:
impact_banner = None
# Compute the portfolio schedule according to checklist_value
loan_portfolio = LoanPortfolio()
for i in range(len(principal)):
if contribution != None:
if len(checklist_value) != 0:
loan = Loan(principal=principal[i], rate=rate[i],
payment=payment[i], extra_payment=extra_payment[i] + sum(
[contribution[i][member] for member in checklist_value]))
else:
loan = Loan(principal=principal[i], rate=rate[i],
payment=payment[i], extra_payment=extra_payment[i])
else:
loan = Loan(principal=principal[i], rate=rate[i],
payment=payment[i], extra_payment=extra_payment[i])
loan.check_loan_parameters()
loan.compute_schedule()
loan_portfolio.add_loan(loan)
loan_portfolio.aggregate()
df_schedules = [Helper.schedule_as_df(loan) for loan in loan_portfolio.loans]
# Draw schedule plot
fig = go.Figure(
data=[get_Bar_principal(index, df_schedule.round(2)) for index, df_schedule in enumerate(df_schedules)] + \
[get_Bar_interest(index, df_schedule.round(2)) for index, df_schedule in enumerate(df_schedules)]
)
fig.update_layout( # margin={"t": 0, "r": 0.4, "b": 0, "l": 0}, #################
margin=dict(l=0, r=0, b=0, t=30),
barmode='stack',
bargap=0,
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
xaxis=dict(title="<b>Schedule</b>", showgrid=False), # Time to loan termination
yaxis=dict(title="<b>USD</b>", showgrid=False),
legend=dict(xanchor='left', x=0 if len(df_schedules) == 3 else 0, y=-0.25, orientation='h'),
hovermode='x unified',
hoverlabel=dict(
bgcolor='rgba(255, 255, 255, 0.9)',
namelength=-1
),
)
return fig, impact_banner, store_df_impact
# %% Show contribution
def get_contribution_fig(df_impact):
fig = go.Figure()
trace_interest = go.Bar(
name="Total Interest Paid",
x=df_impact['Index'],
y=df_impact['InterestPaid'],
yaxis='y',
offsetgroup=1,
marker=dict(color='rgba(236, 197, 76, 1)')
)
trace_duration = go.Bar(
name="Loan Term",
x=df_impact['Index'],
y=df_impact['Duration'],
yaxis='y2',
offsetgroup=2,
marker=dict(color='rgba(163, 161, 161, 1)')
)
fig.add_trace(trace_interest)
fig.add_trace(trace_duration)
fig['layout'].update(
margin=dict(l=0, r=0, b=0, t=30),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
legend=dict(xanchor='left', x=0, y=-0.25, orientation='h'), # , bordercolor = 'Black', borderwidth = 1
xaxis=dict(title="<b>Contributor</b>"),
yaxis=dict(title="<b>Total Interest Paid</b>",
range=[0.5 * max(df_impact['InterestPaid']), 1.1 * max(df_impact['InterestPaid'])], showgrid=False),
yaxis2=dict(title="<b>Loan Term</b>", anchor='x', overlaying='y', side='right', showgrid=False),
)
return fig
@app.callback([Output('contribution', 'figure'),
Output('graph-switch-btn', 'style')],
[Input('store_df_impact', 'modified_timestamp')],
[State('store_df_impact', 'data')],
prevent_initial_call=True)
def contribution_figure(modified_timestamp, store_df_impact):
if store_df_impact != '':
df_impact = pd.DataFrame(json.loads(store_df_impact))
df_impact = df_impact[['Index', 'InterestPaid', 'Duration']]
df_impact = df_impact[df_impact['Index'].str.contains('and') == False]
df_impact = df_impact.sort_values('InterestPaid')
fig = get_contribution_fig(df_impact)
style = {'display': 'block'}
else:
fig = go.Figure()
style = {'display': 'none'}
return fig, style
@app.callback([Output('contribution', 'style'),
Output('graph-schedule', 'style')],
[Input('graph-switch-btn', 'n_clicks')],
[State('graph-schedule', 'style'),
State('contribution', 'style')],
prevent_initial_call=True)
def figure_switch(n_clicks, schedule_style, contribution_style):
if n_clicks == 1:
return {'display': 'flex', 'animation': 'appear 0.5s ease'}, {'display': 'none'}
if n_clicks:
if schedule_style == {'display': 'none'}:
schedule_style = {'display': 'flex'}
else:
schedule_style = {'display': 'none'}
if contribution_style == {'display': 'none'}:
contribution_style = {'display': 'flex'}
else:
contribution_style = {'display': 'none'}
return contribution_style, schedule_style
# %% Schedule Table
@app.callback(
[
Output('dropdown_schedule', 'options'),
Output('dropdown_schedule', 'value')
],
[Input('apply-store', 'modified_timestamp')],
[State('apply-store', 'data')],
prevent_initial_call=True)
def choose_loan_to_show_schedule(modified_timestamp, loans_data):
options = [{'label': 'loan{}'.format(i + 1), 'value': 'loan{}'.format(i + 1)} for i in range(len(loans_data))] + \
[{'label': 'portfolio', 'value': 'portfolio'}]
value = 'portfolio'
return options, value
@app.callback([Output('table_schedule', 'columns'),
Output('table_schedule', 'data')],
[Input('apply-store', 'modified_timestamp'),
Input('dropdown_schedule', 'value')],
[State('apply-store', 'data')],
prevent_initial_call=True)
def schedule_table(modified_timestamp, dropdown_value, loans_data):
columns = ['Payment Number', 'Begin Principal', 'Payment', 'Extra Payment',
'Applied Principal', 'Applied Interest', 'End Principal']
columns = [{"name": i, "id": i} for i in columns]
loans = LoanPortfolio()
loans_schedule = {}
for index, loan_data in enumerate(loans_data):
loan = Loan(principal=loan_data['principal'], rate=loan_data['rate'], payment=loan_data['payment'],
extra_payment=loan_data['extra'] + sum(loan_data['contribution'].values()))
loan.compute_schedule()
loans.add_loan(loan)
loans_schedule['loan{}'.format(index + 1)] = Helper.schedule_as_df(loan)
loans.aggregate()
loans_schedule['portfolio'] = Helper.schedule_as_df(loans)
selected_schedule = loans_schedule[dropdown_value].round(2)
selected_schedule = selected_schedule.to_dict('records')
return columns, selected_schedule
# %%
# </editor-fold>
# <editor-fold desc="app-layout">
app.layout = html.Div(
[
dcc.Store(id="apply-store"),
dcc.Store(id='loan-number'),
dcc.Store(id='store_df_impact'),
dbc.Alert(id='apply-alert', is_open=False, duration=4000, className='apply-alert'),
dbc.Row(
[
html.P('💰', className='bar-title title-icon'),
html.Div([
html.P('MULTI-LOAN CALCULATOR', className='bar-title'),
html.P('\u00a0\u00a0\u00a0- by Jiaying Yan, Ying Tung Lau', className='bar-author'),
], className='d-flex flex-column align-items-end'),
dbc.Tooltip(
'Need help on loan terminology? Click to see web article on loan amortization by Investopedia.',
target='info-button', placement='right'),
html.A([dbc.Button(html.I(className="fa fa-question"), className='info-button', color='dark',
outline=True, id='info-button')],
href='https://www.investopedia.com/terms/a/amortization_schedule.asp', target='_blank',
rel="noopener noreferrer", className='info-button-wrapper'),
],
className='bar'),
dbc.Row([
loan_input_card,
html.Div(
[
html.H1('Multi-loan', className='display-1 m-0 text-nowrap'),
html.H1('Calculator', className='display-1 text-nowrap mb-3'),
html.P(
'Our smart tool helps you manage multiple loans with ease, allowing calculation for '
'up to three loans and three contributions.',
className='pb-0 pt-3 m-0'),
html.P('Enter your loan specs on the left and click submit right now to see your loan schedules!',
className='pt-0 pb-2 m-0'),
html.Div([
dbc.Button("SUBMIT", color="primary", outline=True, id='apply-button', n_clicks=0,
className='apply-button'),
dbc.Button('Reset', color='danger', outline=True, id='reset-button', className='reset-button',
n_clicks=0)
], className="apply-btn-group"),
],
className='app-title'),
html.A(html.I(className="fa fa-chevron-down"), href='#row-2-target', style={'display': 'none'},
className='go-row-2', id='go-row-2')
], className='app-row-1'),
dbc.Row(
[
html.A(id='row-2-target', className='anchor-target'),
html.A(html.I(className="fa fa-chevron-up"), href='#top', className='return-to-top'),
html.Div(
[
html.H6('Amortization Schedule and Contribution Impact', className='display-4 row-2-title'),
html.P(
"See the interactive chart for amortization schedule of your loan portfolio. "),
html.P(
'Receiving contributions for repaying loans? Check or uncheck the contributor boxes to see changes'
' of your loan schedules under different combination of contributions, and compare the impact'
' on total interest and loan term among contributors.'),
dbc.Button([html.Span('Switch Chart\u00a0'), html.Span(html.I(className="fa fa-caret-right"))],
id='graph-switch-btn', className='switch-btn', n_clicks=0, color='dark',
outline=True)
], className='row-2-text'),
html.Div([
html.Div(
[
html.Div(id='impact_banner', className='impact_banner'),
dbc.Checklist(id='contribution_checklist'),
dcc.Graph(id='schedule', figure=go.Figure(), className='graph-schedule')
], style={'display': 'flex'}, id='graph-schedule', className='graph-schedule-wrapper'
),
dcc.Graph(id='contribution', figure=go.Figure(), className='graph-contribution', style={'display': 'none'}),
], className='graph-container')
],
className='app-row-2', id='row-2', style={'display': 'none'}),
dbc.Row(
[
html.A(id='row-3-target', className='anchor-target'),
html.A(html.I(className="fa fa-chevron-up"), href='#top', className='return-to-top'),
html.H6('Amortization Table', className='display-4 row-3-title'),
html.Div(
[
dcc.RadioItems(id='dropdown_schedule'),
html.Div(dash_table.DataTable(
id='table_schedule',
style_table={'overflowY': 'auto'},
style_cell={'textOverflow': 'ellipsis', },
style_header={'bacgroundColor': 'white', 'fontWeight': 'bold'},
style_as_list_view=True,
), className="table-wrapper"),
], className='schedule-table-group'),
],
className='app-row-3', id='row-3', style={'display': 'none'}),
], className='app-body'
)
app.run_server(debug=False, use_reloader=False)
# </editor-fold>
# </editor-fold>
| 45.88675
| 145
| 0.542722
| 0
| 0
| 0
| 0
| 24,972
| 0.616273
| 0
| 0
| 12,960
| 0.319834
|
67e7da06bf5b0c480be1e68da30d3dd8280232f5
| 2,888
|
py
|
Python
|
examples/advanced-topics/IIR-FIR/delay_channels.py
|
qua-platform/qua-libs
|
805a3b1a69980b939b370b3ba09434bc26dc45ec
|
[
"BSD-3-Clause"
] | 21
|
2021-05-21T08:23:34.000Z
|
2022-03-25T11:30:55.000Z
|
examples/advanced-topics/IIR-FIR/delay_channels.py
|
qua-platform/qua-libs
|
805a3b1a69980b939b370b3ba09434bc26dc45ec
|
[
"BSD-3-Clause"
] | 9
|
2021-05-13T19:56:00.000Z
|
2021-12-21T05:11:04.000Z
|
examples/advanced-topics/IIR-FIR/delay_channels.py
|
qua-platform/qua-libs
|
805a3b1a69980b939b370b3ba09434bc26dc45ec
|
[
"BSD-3-Clause"
] | 2
|
2021-06-21T10:56:40.000Z
|
2021-12-19T14:21:33.000Z
|
import scipy.signal as sig
import numpy as np
from qm.qua import *
import matplotlib.pyplot as plt
import warnings
from qm.QuantumMachinesManager import (
SimulationConfig,
QuantumMachinesManager,
LoopbackInterface,
)
ntaps = 40
delays = [0, 22, 22.25, 22.35]
def delay_gaussian(delay, ntaps):
def get_coefficents(delay, ntaps):
n_extra = 5
full_coeff = np.sinc(
np.linspace(0 - n_extra, ntaps + n_extra, ntaps + 1 + 2 * n_extra)[0:-1]
- delay
)
extra_coeff = np.abs(
np.concatenate((full_coeff[:n_extra], full_coeff[-n_extra:]))
)
if np.any(extra_coeff > 0.02): # Contribution is more than 2%
warnings.warn("Contribution from missing coefficients is not negligible.")
coeff = full_coeff[n_extra:-n_extra]
return coeff
qmm = QuantumMachinesManager()
with program() as filter_delay:
play("gaussian", "flux1")
pulse_len = 60
feedforward_filter = get_coefficents(delay, ntaps)
print("feedforward taps:", feedforward_filter)
config = {
"version": 1,
"controllers": {
"con1": {
"type": "opx1",
"analog_outputs": {
1: {"offset": +0.0, "filter": {"feedforward": feedforward_filter}},
},
},
},
"elements": {
"flux1": {
"singleInput": {"port": ("con1", 1)},
"intermediate_frequency": 10,
"operations": {
"gaussian": "gaussian_pulse",
},
},
},
"pulses": {
"gaussian_pulse": {
"operation": "control",
"length": pulse_len,
"waveforms": {"single": "gaussian_wf"},
},
},
"waveforms": {
"gaussian_wf": {
"type": "arbitrary",
"samples": 0.25 * sig.gaussian(pulse_len, 5),
},
},
"digital_waveforms": {
"ON": {"samples": [(1, 0)]},
},
"integration_weights": {
"xWeights": {
"cosine": [1.0] * (pulse_len // 4),
"sine": [0.0] * (pulse_len // 4),
},
"yWeights": {
"cosine": [0.0] * (pulse_len // 4),
"sine": [1.0] * (pulse_len // 4),
},
},
}
job = qmm.simulate(
config,
filter_delay,
SimulationConfig(duration=150, include_analog_waveforms=True),
)
job.result_handles.wait_for_all_values()
job.get_simulated_samples().con1.plot()
for delay in delays:
delay_gaussian(delay, ntaps)
plt.legend(delays)
plt.axis([270, 340, -0.01, 0.26])
| 28.594059
| 88
| 0.480956
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 550
| 0.190443
|
67e7dfe8a3a11d78c472c0f64358e33daa1e6979
| 1,696
|
py
|
Python
|
listener.py
|
chrismarget/ios-icmp-channel
|
b2a09f1c345816f525a3f7aed6a562631b0fc7e6
|
[
"Apache-2.0"
] | 1
|
2018-01-30T01:53:20.000Z
|
2018-01-30T01:53:20.000Z
|
listener.py
|
chrismarget/ios-icmp-channel
|
b2a09f1c345816f525a3f7aed6a562631b0fc7e6
|
[
"Apache-2.0"
] | null | null | null |
listener.py
|
chrismarget/ios-icmp-channel
|
b2a09f1c345816f525a3f7aed6a562631b0fc7e6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import sys
class message(object):
def add(self, idx, b):
self.message[idx] = b
if (b == '\x04') and self.is_complete():
self.print_message()
def get_eom_idx(self):
for i in sorted(self.message.keys()):
if self.message[i] == '\x04':
return i
return False
def is_complete(self):
eom_idx = self.get_eom_idx()
if not eom_idx:
return False
received = self.message.keys()
for i in range(0,eom_idx):
if not (i in received):
return False
return True
def print_message(self):
print self.sender + "\t" + self.get_message()
def get_message(self):
out = ''
eom_idx = self.get_eom_idx()
for i in range(0,eom_idx):
out+=self.message[i]
return out
def __init__(self, sender, idx, b):
self.sender = sender
self.message = {}
self.add(idx, b)
def open_icmp_sniffer():
import socket, sys
import struct
try:
s = socket.socket(socket.AF_INET, socket.SOCK_RAW,
socket.IPPROTO_ICMP)
except socket.error, msg:
print 'Socket create failed: '+str(msg[0])+' Message ' + msg[1]
sys.exit()
s.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
s.bind(('', 0))
return s
s = open_icmp_sniffer()
messages = {}
while True:
p = s.recvfrom(65565)
sender = p[1][0]
sequence = ord(p[0][-2])
payload = p[0][-1]
if sender not in messages.keys():
messages[sender] = message(sender, sequence, payload)
else:
messages[sender].add(sequence, payload)
| 27.803279
| 71
| 0.56191
| 967
| 0.570165
| 0
| 0
| 0
| 0
| 0
| 0
| 76
| 0.044811
|
67e8afbf9560d8370d86399ad38f91aac9488a9d
| 478
|
py
|
Python
|
Integer to Roman.py
|
HalShaw/Leetcode
|
27c52aac5a8ecc5b5f02e54096a001920661b4bb
|
[
"MIT"
] | 1
|
2016-12-22T04:09:25.000Z
|
2016-12-22T04:09:25.000Z
|
Integer to Roman.py
|
HalShaw/Leetcode
|
27c52aac5a8ecc5b5f02e54096a001920661b4bb
|
[
"MIT"
] | null | null | null |
Integer to Roman.py
|
HalShaw/Leetcode
|
27c52aac5a8ecc5b5f02e54096a001920661b4bb
|
[
"MIT"
] | null | null | null |
class Solution(object):
def intToRoman(self, num):
"""
数字到罗马数字的转换
:type num: int
:rtype: str
"""
dic = ["M","CM","D","CD","C","XC","L","XL","X","IX","V","IV","I"]
nums = [1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1]#两个数组,从高到低
res = ""
for st, n in zip(dic, nums):#zip函数同时调用两个数组
res += st * int(num / n)#计算num中含有多少个字母,从高到低
num %= n#取余降低一位后继续计算
return res
| 34.142857
| 79
| 0.451883
| 580
| 0.986395
| 0
| 0
| 0
| 0
| 0
| 0
| 289
| 0.491497
|
67e9dd76bdad3ed45018c88774b6229ebe78a253
| 12,780
|
py
|
Python
|
hapiclient/util.py
|
hbatta/client-python
|
1c1d32fce9e84bc1a4938ae7adc30cef8d682aa4
|
[
"BSD-3-Clause"
] | null | null | null |
hapiclient/util.py
|
hbatta/client-python
|
1c1d32fce9e84bc1a4938ae7adc30cef8d682aa4
|
[
"BSD-3-Clause"
] | null | null | null |
hapiclient/util.py
|
hbatta/client-python
|
1c1d32fce9e84bc1a4938ae7adc30cef8d682aa4
|
[
"BSD-3-Clause"
] | null | null | null |
def setopts(defaults, given):
"""Override default keyword dictionary options.
kwargs = setopts(defaults, kwargs)
A warning is shown if kwargs contains a key not found in default.
"""
# Override defaults
for key, value in given.items():
if type(given[key]) == dict:
setopts(defaults[key], given[key])
continue
if key in defaults:
defaults[key] = value
else:
warning('Ignoring invalid keyword option "%s".' % key)
return defaults
def log_test():
log("Test 1", {"logging": True})
log("Test 2", {"logging": False})
def log(msg, opts):
"""Print message to console or file."""
import os
import sys
if not 'logging' in opts:
opts = opts.copy()
opts['logging'] = False
pre = sys._getframe(1).f_code.co_name + '(): '
if isinstance(opts['logging'], bool) and opts['logging']:
if pythonshell() == 'jupyter-notebook':
# Don't show full path information.
msg = msg.replace(opts['cachedir'] + os.path.sep, '')
msg = msg.replace(opts['cachedir'], '')
print(pre + msg)
elif hasattr(opts['logging'], 'write'):
opts['logging'].write(pre + msg + "\n")
opts['logging'].flush()
else:
pass # TODO: error
def jsonparse(res, url):
"""Try/catch of json.loads() function with short error message."""
from json import loads
try:
return loads(res.read().decode('utf-8'))
except:
error('Could not parse JSON from %s' % url)
def pythonshell():
"""Determine python shell
pythonshell() returns
'shell' if started python on command line using "python"
'ipython' if started ipython on command line using "ipython"
'ipython-notebook' if running in Spyder or started with "ipython qtconsole"
'jupyter-notebook' if running in a Jupyter notebook started using executable
named jupyter-notebook
On Windows, jupyter-notebook cannot be detected and ipython-notebook
will be returned.
See also https://stackoverflow.com/a/37661854
"""
import os
env = os.environ
program = ''
if '_' in env:
program = os.path.basename(env['_'])
shell = 'shell'
try:
shell_name = get_ipython().__class__.__name__
if shell_name == 'TerminalInteractiveShell':
shell = 'ipython'
elif shell_name == 'ZMQInteractiveShell':
if 'jupyter-notebook' in program:
shell = 'jupyter-notebook'
else:
shell = 'ipython-notebook'
# Not needed, but could be used
#if 'spyder' in sys.modules:
# shell = 'spyder-notebook'
except:
pass
return shell
def warning_test():
"""For testing warning function."""
# Should show warnings in order and only HAPIWarning {1,2} should
# have a different format
from warnings import warn
warn('Normal warning 1')
warn('Normal warning 2')
warning('HAPI Warning 1')
warning('HAPI Warning 2')
warn('Normal warning 3')
warn('Normal warning 4')
def warning(*args):
"""Display a short warning message.
warning(message) raises a warning of type HAPIWarning and displays
"Warning: " + message. Use for warnings when a full stack trace is not
needed.
"""
import warnings
from os import path
from sys import stderr
from inspect import stack
message = args[0]
if len(args) > 1:
fname = args[1]
else:
fname = stack()[1][1]
#line = stack()[1][2]
fname = path.basename(fname)
# Custom warning format function
def _warning(message, category=UserWarning, filename='', lineno=-1, file=None, line=''):
if category.__name__ == "HAPIWarning":
stderr.write("\x1b[31mWarning in " + fname + "\x1b[0m: " + str(message) + "\n")
else:
# Use default showwarning function.
showwarning_default(message, category=UserWarning,
filename='', lineno=-1,
file=None, line='')
stderr.flush()
# Reset showwarning function to default
warnings.showwarning = showwarning_default
class HAPIWarning(Warning):
pass
# Copy default showwarning function
showwarning_default = warnings.showwarning
# Use custom warning function instead of default
warnings.showwarning = _warning
# Raise warning
warnings.warn(message, HAPIWarning)
class HAPIError(Exception):
pass
def error(msg, debug=False):
"""Display a short error message.
error(message) raises an error of type HAPIError and displays
"Error: " + message. Use for errors when a full stack trace is not needed.
If debug=True, full stack trace is shown.
"""
import sys
from inspect import stack
from os import path
debug = False
if pythonshell() != 'shell':
try:
from IPython.core.interactiveshell import InteractiveShell
except:
pass
sys.stdout.flush()
fname = stack()[1][1]
fname = path.basename(fname)
#line = stack()[1][2]
def exception_handler_ipython(self, exc_tuple=None,
filename=None, tb_offset=None,
exception_only=False,
running_compiled_code=False):
#import traceback
exception = sys.exc_info()
if not debug and exception[0].__name__ == "HAPIError":
sys.stderr.write("\033[0;31mHAPIError:\033[0m " + str(exception[1]))
else:
# Use default
showtraceback_default(self, exc_tuple=None,
filename=None, tb_offset=None,
exception_only=False,
running_compiled_code=False)
sys.stderr.flush()
# Reset back to default
InteractiveShell.showtraceback = showtraceback_default
def exception_handler(exception_type, exception, traceback):
if not debug and exception_type.__name__ == "HAPIError":
print("\033[0;31mHAPIError:\033[0m %s" % exception)
else:
# Use default.
sys.__excepthook__(exception_type, exception, traceback)
sys.stderr.flush()
# Reset back to default
sys.excepthook = sys.__excepthook__
if pythonshell() == 'shell':
sys.excepthook = exception_handler
else:
try:
# Copy default function
showtraceback_default = InteractiveShell.showtraceback
# TODO: Use set_custom_exc
# https://ipython.readthedocs.io/en/stable/api/generated/IPython.core.interactiveshell.html
InteractiveShell.showtraceback = exception_handler_ipython
except:
# IPython over-rides this, so this does nothing in IPython shell.
# https://stackoverflow.com/questions/1261668/cannot-override-sys-excepthook
# Don't need to copy default function as it is provided as sys.__excepthook__.
sys.excepthook = exception_handler
raise HAPIError(msg)
def head(url):
"""HTTP HEAD request on URL."""
import urllib3
http = urllib3.PoolManager()
try:
res = http.request('HEAD', url, retries=2)
if res.status != 200:
raise Exception('Head request failed on ' + url)
return res.headers
except Exception as e:
raise e
return res.headers
def urlopen(url):
"""Wrapper to request.get() in urllib3"""
import sys
from json import load
# https://stackoverflow.com/a/2020083
def get_full_class_name(obj):
module = obj.__class__.__module__
if module is None or module == str.__class__.__module__:
return obj.__class__.__name__
return module + '.' + obj.__class__.__name__
import urllib3
c = " If problem persists, a contact email for the server may be listed "
c = c + "at http://hapi-server.org/servers/"
try:
http = urllib3.PoolManager()
res = http.request('GET', url, preload_content=False, retries=2)
if res.status != 200:
try:
jres = load(res)
if 'status' in jres:
if 'message' in jres['status']:
error('\n%s\n %s\n' % (url, jres['status']['message']))
error("Problem with " + url + \
". Server responded with non-200 HTTP status (" \
+ str(res.status) + \
") and invalid HAPI JSON error message in response body." + c)
except:
error("Problem with " + url + \
". Server responded with non-200 HTTP status (" + \
str(res.status) + \
") and no HAPI JSON error message in response body." + c)
except urllib3.exceptions.NewConnectionError:
error('Connection error for : ' + url + c)
except urllib3.exceptions.ConnectTimeoutError:
error('Connection timeout for: ' + url + c)
except urllib3.exceptions.MaxRetryError:
error('Failed to connect to: ' + url + c)
except urllib3.exceptions.ReadTimeoutError:
error('Read timeout for: ' + url + c)
except urllib3.exceptions.LocationParseError:
error('Could not parse URL: ' + url)
except urllib3.exceptions.LocationValueError:
error('Invalid URL: ' + url)
except urllib3.exceptions.HTTPError as e:
error('Exception ' + get_full_class_name(e) + " for: " + url)
except Exception as e:
error(type(sys.exc_info()[1]).__name__ + ': ' \
+ str(e) + ' for URL: ' + url)
return res
def urlretrieve(url, fname, check_last_modified=False, **kwargs):
"""Download URL to file
urlretrieve(url, fname, check_last_modified=False, **kwargs)
If check_last_modified=True, `fname` is found, URL returns Last-Modfied
header, and `fname` timestamp is after Last-Modfied timestamp, the URL
is not downloaded.
"""
import shutil
from os import path, utime, makedirs
from time import mktime, strptime
if check_last_modified:
if modified(url, fname, **kwargs):
log('Downloading ' + url + ' to ' + fname, kwargs)
res = urlretrieve(url, fname, check_last_modified=False)
if "Last-Modified" in res.headers:
# Change access and modfied time to match that on server.
# TODO: Won't need if using file.head in modified().
urlLastModified = mktime(strptime(res.headers["Last-Modified"],
"%a, %d %b %Y %H:%M:%S GMT"))
utime(fname, (urlLastModified, urlLastModified))
else:
log('Local version of ' + fname + ' is up-to-date; using it.', kwargs)
dirname = path.dirname(fname)
if not path.exists(dirname):
makedirs(dirname)
with open(fname, 'wb') as out:
res = urlopen(url)
shutil.copyfileobj(res, out)
return res
def modified(url, fname, **kwargs):
"""Check if timestamp on file is later than Last-Modifed in HEAD request"""
from os import stat, path
from time import mktime, strptime
debug = False
if not path.exists(fname):
return True
# HEAD request on url
log('Making head request on ' + url, kwargs)
headers = head(url)
# TODO: Write headers to file.head
if debug:
print("Header:\n--\n")
print(headers)
print("--")
# TODO: Get this from file.head if found
fileLastModified = stat(fname).st_mtime
if "Last-Modified" in headers:
urlLastModified = mktime(strptime(headers["Last-Modified"],
"%a, %d %b %Y %H:%M:%S GMT"))
if debug:
print("File Last Modified = %s" % fileLastModified)
print("URL Last Modified = %s" % urlLastModified)
if urlLastModified > fileLastModified:
return True
return False
else:
if debug:
print("No Last-Modified header. Will re-download")
# TODO: Read file.head and compare etag
return True
def urlquote(url):
"""Python 2/3 urlquote compatability function.
If Python 3, returns
urllib.parse.quote(url)
If Python 2, returns
urllib.quote(url)
"""
import sys
if sys.version_info[0] == 2:
from urllib import quote
return quote(url)
import urllib.parse
return urllib.parse.quote(url)
| 30.356295
| 103
| 0.590141
| 76
| 0.005947
| 0
| 0
| 0
| 0
| 0
| 0
| 4,642
| 0.363224
|
67ea232a964b415b5c48734cb2b31e366146e901
| 269
|
py
|
Python
|
docs/examples/combine-configs/convert.py
|
Mbompr/fromconfig
|
eb34582c79a9a9e3b9e60d41fec2ac6a619e9c27
|
[
"Apache-2.0"
] | 19
|
2021-03-18T16:48:03.000Z
|
2022-03-02T13:09:21.000Z
|
docs/examples/combine-configs/convert.py
|
Mbompr/fromconfig
|
eb34582c79a9a9e3b9e60d41fec2ac6a619e9c27
|
[
"Apache-2.0"
] | 3
|
2021-04-23T23:03:29.000Z
|
2021-05-11T14:09:16.000Z
|
docs/examples/combine-configs/convert.py
|
Mbompr/fromconfig
|
eb34582c79a9a9e3b9e60d41fec2ac6a619e9c27
|
[
"Apache-2.0"
] | 3
|
2021-04-19T22:05:34.000Z
|
2022-02-21T11:32:16.000Z
|
"""Convert file format."""
import fire
import fromconfig
def convert(path_input, path_output):
"""Convert input into output with load and dump."""
fromconfig.dump(fromconfig.load(path_input), path_output)
if __name__ == "__main__":
fire.Fire(convert)
| 17.933333
| 61
| 0.717472
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 87
| 0.32342
|
67eb8e7c17780b803858f13f5e39eadc802e465d
| 11,257
|
py
|
Python
|
pyfibot/modules/module_rss.py
|
aapa/pyfibot
|
a8a4330d060b05f0ce63cbcfc6915afb8141955f
|
[
"BSD-3-Clause"
] | null | null | null |
pyfibot/modules/module_rss.py
|
aapa/pyfibot
|
a8a4330d060b05f0ce63cbcfc6915afb8141955f
|
[
"BSD-3-Clause"
] | null | null | null |
pyfibot/modules/module_rss.py
|
aapa/pyfibot
|
a8a4330d060b05f0ce63cbcfc6915afb8141955f
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import unicode_literals, print_function, division
import feedparser
import dataset
from twisted.internet.reactor import callLater
from threading import Thread
import twisted.internet.error
import logging
logger = logging.getLogger('module_rss')
DATABASE = None
updater = None
botref = None
config = {}
def init(bot, testing=False):
''' Initialize updater '''
global DATABASE
global config
global botref
global updater
global logger
if testing:
DATABASE = dataset.connect('sqlite:///:memory:')
else:
DATABASE = dataset.connect('sqlite:///databases/rss.db')
logger.info('RSS module initialized')
botref = bot
config = bot.config.get('rss', {})
finalize()
# As there's no signal if this is a rehash or restart
# update feeds in 30 seconds
updater = callLater(30, update_feeds)
def finalize():
''' Finalize updater (rehash etc) so we don't leave an updater running '''
global updater
global logger
logger.info('RSS module finalized')
if updater:
try:
updater.cancel()
except twisted.internet.error.AlreadyCalled:
pass
updater = None
def get_feeds(**kwargs):
''' Get feeds from database '''
return [
Feed(f['network'], f['channel'], f['id'])
for f in list(DATABASE['feeds'].find(**kwargs))
]
def find_feed(network, channel, **kwargs):
''' Find specific feed from database '''
f = DATABASE['feeds'].find_one(network=network, channel=channel, **kwargs)
if not f:
return
return Feed(f['network'], f['channel'], f['id'])
def add_feed(network, channel, url):
''' Add feed to database '''
f = Feed(network=network, channel=channel, url=url)
return (f.initialized, f.read())
def remove_feed(network, channel, id):
''' Remove feed from database '''
f = find_feed(network=network, channel=channel, id=int(id))
if not f:
return
DATABASE['feeds'].delete(id=f.id)
DATABASE['items_%i' % (f.id)].drop()
return f
def update_feeds(cancel=True, **kwargs):
# from time import sleep
''' Update all feeds in the DB '''
global config
global updater
global logger
logger.info('Updating RSS feeds started')
for f in get_feeds(**kwargs):
Thread(target=f.update).start()
# If we get a cancel, cancel the existing updater
# and start a new one
# NOTE: Not sure if needed, as atm cancel isn't used in any command...
if cancel:
try:
updater.cancel()
except twisted.internet.error.AlreadyCalled:
pass
updater = callLater(5 * 60, update_feeds)
def command_rss(bot, user, channel, args):
commands = ['list', 'add', 'remove', 'latest', 'update']
args = args.split()
if not args or args[0] not in commands:
return bot.say(channel, 'rss: valid arguments are [%s]' % (', '.join(commands)))
command = args[0]
network = bot.network.alias
# Get latest feed item from database
# Not needed? mainly for debugging
# Possibly useful for checking if feed still exists?
if command == 'latest':
if len(args) < 2:
return bot.say(channel, 'syntax: ".rss latest <id from list>"')
feed = find_feed(network=network, channel=channel, id=int(args[1]))
if not feed:
return bot.say(channel, 'feed not found, no action taken')
item = feed.get_latest()
if not item:
return bot.say(channel, 'no items in feed')
return bot.say(channel, feed.get_item_str(item))
# List all feeds for current network && channel
if command == 'list':
feeds = get_feeds(network=network, channel=channel)
if not feeds:
return bot.say(channel, 'no feeds set up')
for f in feeds:
bot.say(channel, '%02i: %s <%s>' % (f.id, f.name, f.url))
return
# Rest of the commands are only for admins
if not bot.factory.isAdmin(user):
return bot.say(channel, 'only "latest" and "list" available for non-admins')
# Add new feed for channel
if command == 'add':
if len(args) < 2:
return bot.say(channel, 'syntax: ".rss add url"')
init, items = add_feed(network, channel, url=args[1])
if not init:
return bot.say(channel, 'feed already added')
return bot.say(channel, 'feed added with %i items' % len(items))
# remove feed from channel
if command == 'remove':
if len(args) < 2:
return bot.say(channel, 'syntax: ".rss remove <id from list>"')
feed = remove_feed(network, channel, id=args[1])
if not feed:
return bot.say(channel, 'feed not found, no action taken')
return bot.say(channel, 'feed "%s" <%s> removed' % (feed.name, feed.url))
# If there's no args, update all feeds (even for other networks)
# If arg exists, try to update the feed...
if command == 'update':
if len(args) < 2:
bot.say(channel, 'feeds updating')
update_feeds()
return
feed = find_feed(network, channel, id=int(args[1]))
if not feed:
return bot.say(channel, 'feed not found, no action taken')
feed.update()
return
class Feed(object):
''' Feed object to simplify feed handling '''
def __init__(self, network, channel, id=None, url=None):
# Not sure if (this complex) init is needed...
self.id = id
self.network = network
self.channel = channel
self.url = url
if url:
self.url = url
self.initialized = False
# load feed details from database
self._get_feed_from_db()
def __repr__(self):
return '(%s, %s, %s)' % (self.url, self.channel, self.network)
def __unicode__(self):
return '%i - %s' % (self.id, self.url)
def __init_feed(self):
''' Initialize databases for feed '''
DATABASE['feeds'].insert({
'network': self.network,
'channel': self.channel,
'url': self.url,
'name': '',
})
# Update feed to match the created
feed = self._get_feed_from_db()
# Initialize item-database for feed
self.__save_item({
'title': 'PLACEHOLDER',
'link': 'https://github.com/lepinkainen/pyfibot/',
'printed': True,
})
self.initialized = True
return feed
def __get_items_tbl(self):
''' Get table for feeds items '''
return DATABASE[('items_%i' % (self.id))]
def __parse_feed(self):
''' Parse items from feed '''
f = feedparser.parse(self.url)
if self.initialized:
self.update_feed_info({'name': f['channel']['title']})
items = [{
'title': i['title'],
'link': i['link'],
} for i in f['items']]
return (f, items)
def __save_item(self, item, table=None):
''' Save item to feeds database '''
if table is None:
table = self.__get_items_tbl()
# If override is set or the item cannot be found, it's a new one
if not table.find_one(title=item['title'], link=item['link']):
# If printed isn't set, set it to the value in self.initialized (True, if initializing, else False)
# This is to prevent flooding when adding a new feed...
if 'printed' not in item:
item['printed'] = self.initialized
table.insert(item)
def __mark_printed(self, item, table=None):
''' Mark item as printed '''
if table is None:
table = self.__get_items_tbl()
table.update({'id': item['id'], 'printed': True}, ['id'])
def _get_feed_from_db(self):
''' Get self from database '''
feed = None
if self.url and not self.id:
feed = DATABASE['feeds'].find_one(network=self.network, channel=self.channel, url=self.url)
if self.id:
feed = DATABASE['feeds'].find_one(network=self.network, channel=self.channel, id=self.id)
if not feed:
feed = self.__init_feed()
self.id = feed['id']
self.network = feed['network']
self.channel = feed['channel']
self.url = feed['url']
# TODO: Name could just be the domain part of url?
self.name = feed['name']
return feed
def get_item_str(self, item):
return '[%s] %s <%s>' % (''.join([c for c in self.name][0:18]), item['title'], item['link'])
def get_latest(self):
tbl = self.__get_items_tbl()
items = [i for i in list(tbl.find(order_by='id'))]
if not items:
return
return items[-1]
def update_feed_info(self, data):
''' Update feed information '''
data['id'] = self.id
if 'url' in data:
self.url = data['url']
DATABASE['feeds'].update(data, ['id'])
# Update self to match new...
self._get_feed_from_db()
def read(self):
''' Read new items from feed '''
f, items = self.__parse_feed()
# Get table -reference to speed up stuff...
tbl = self.__get_items_tbl()
# Save items in DB, saving takes care of duplicate checks
for i in reversed(items):
self.__save_item(i, tbl)
# Set initialized to False, as we have read everything...
self.initialized = False
return items
def get_new_items(self, mark_printed=False):
''' Get all items which are not marked as printed, if mark_printed is set, update printed also. '''
tbl = self.__get_items_tbl()
items = [i for i in list(tbl.find(printed=False))]
if mark_printed:
for i in items:
self.__mark_printed(i, tbl)
return items
def update(self):
global logger
global botref
# If botref isn't defined, bot isn't running, no need to run
# (used for tests?)
if not botref:
return
# Read all items for feed
logger.debug('Feed "%s" updating' % (self.name))
self.read()
# Get number of unprinted items (and don't mark as printed)
items = self.get_new_items(False)
if len(items) == 0:
logger.debug('Feed "%s" containes no new items, doing nothing.' % (self.name))
return
logger.debug('Feed "%s" updated with %i new items' % (self.name, len(items)))
# If bot instance isn't found, don't print anything
bot_instance = botref.find_bot_for_network(self.network)
if not bot_instance:
logger.error('Bot instance for "%s" not found, not printing' % (self.name))
return
logger.debug('Printing new items for "%s"' % (self.name))
# Get all new (not printed) items and print them
items = self.get_new_items(True)
for i in items:
bot_instance.say(self.channel, self.get_item_str(i))
if __name__ == '__main__':
f = Feed('ircnet', '#pyfibot', 'http://feeds.feedburner.com/ampparit-kaikki?format=xml')
f.read()
for i in f.get_new_items(True):
print(i)
| 32.819242
| 111
| 0.587634
| 5,762
| 0.511859
| 0
| 0
| 0
| 0
| 0
| 0
| 3,474
| 0.308608
|
67ec5c96d81577346cea04b4409e2275d4e56466
| 15,335
|
py
|
Python
|
main.py
|
omidsakhi/progressive_introvae
|
8f052ca7202196fe214ea238afe60e806660d6d4
|
[
"MIT"
] | 5
|
2018-10-19T03:30:27.000Z
|
2019-03-25T06:01:27.000Z
|
main.py
|
omidsakhi/progressive_introvae
|
8f052ca7202196fe214ea238afe60e806660d6d4
|
[
"MIT"
] | 1
|
2019-03-27T08:39:55.000Z
|
2019-03-27T08:39:55.000Z
|
main.py
|
omidsakhi/progressive_introvae
|
8f052ca7202196fe214ea238afe60e806660d6d4
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, ops, utils
# Standard Imports
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import numpy as np
import tensorflow as tf
from PIL import Image
import input_pipelines
import models
from tensorflow.contrib.tpu.python.tpu import tpu_config # pylint: disable=E0611
from tensorflow.contrib.tpu.python.tpu import tpu_estimator # pylint: disable=E0611
from tensorflow.contrib.tpu.python.tpu import tpu_optimizer # pylint: disable=E0611
from tensorflow.python.estimator import estimator # pylint: disable=E0611
FLAGS = flags.FLAGS
global dataset
dataset = input_pipelines
USE_TPU = False
DRY_RUN = False
# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
'tpu', default='omid-sakhi',
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.')
flags.DEFINE_string(
'gcp_project', default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone', default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string('data_dir', 'gs://os_celeba/dataset' if USE_TPU else 'C:/Projects/datasets/tfr-celeba128',
'Bucket/Folder that contains the data tfrecord files')
flags.DEFINE_string(
'model_dir', 'gs://os_celeba/output1' if USE_TPU else './output', 'Output model directory')
flags.DEFINE_integer('noise_dim', 256,
'Number of dimensions for the noise vector')
flags.DEFINE_integer('batch_size', 128 if USE_TPU else 32,
'Batch size for both generator and discriminator')
flags.DEFINE_integer('start_resolution', 8,
'Starting resoltuion')
flags.DEFINE_integer('end_resolution', 128,
'Ending resoltuion')
flags.DEFINE_integer('resolution_steps', 10000 if not DRY_RUN else 60,
'Resoltuion steps')
flags.DEFINE_integer('num_shards', 8, 'Number of TPU chips')
flags.DEFINE_integer('train_steps', 500000, 'Number of training steps')
flags.DEFINE_integer('train_steps_per_eval', 5000 if USE_TPU else (200 if not DRY_RUN else 20) ,
'Steps per eval and image generation')
flags.DEFINE_integer('iterations_per_loop', 500 if USE_TPU else (50 if not DRY_RUN else 5) ,
'Steps per interior TPU loop. Should be less than'
' --train_steps_per_eval')
flags.DEFINE_float('learning_rate', 0.001, 'LR for both D and G')
flags.DEFINE_boolean('eval_loss', False,
'Evaluate discriminator and generator loss during eval')
flags.DEFINE_boolean('use_tpu', True if USE_TPU else False,
'Use TPU for training')
flags.DEFINE_integer('num_eval_images', 100,
'Number of images for evaluation')
def lerp_update_ops(resolution, value):
name = str(resolution) + 'x' + str(resolution)
gt = tf.get_default_graph().get_tensor_by_name('Decoder/'+name+'_t:0')
assert(gt is not None)
dt = tf.get_default_graph().get_tensor_by_name('Encoder/'+name+'_t:0')
assert(dt is not None)
return [tf.assign(gt, value), tf.assign(dt, value)]
def model_fn(features, labels, mode, params):
del labels
resolution = params['resolution']
if mode == tf.estimator.ModeKeys.PREDICT:
###########
# PREDICT #
###########
random_noise = features['random_noise']
predictions = {
'generated_images': models.dec(random_noise, FLAGS.start_resolution, resolution)
}
if FLAGS.use_tpu:
return tpu_estimator.TPUEstimatorSpec(mode=mode, predictions=predictions)
else:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
def fLreg(mean, std):
return tf.reduce_mean(tf.reduce_sum(1.0 + tf.log(tf.square(std) + 1e-8) - tf.square(mean) - tf.square(std), axis=1)) * (10.0 ** (-np.log2(resolution)))
def fLae(x1,x2):
return tf.reduce_mean(tf.squared_difference(x1,x2))
def ng(x):
return tf.stop_gradient(x)
resolution_step = utils.get_or_create_resolution_step()
fadein_rate = tf.minimum(tf.cast(resolution_step, tf.float32) / float(FLAGS.resolution_steps), 1.0)
batch_size = params['batch_size'] # pylint: disable=unused-variable
X = features['real_images']
Zmean, Zstd = models.enc(X, FLAGS.start_resolution, resolution)
Z = ops.sample(Zmean, Zstd)
Zp = features['random_noise_1']
Xr = models.dec(Z, FLAGS.start_resolution, resolution)
Xp = models.dec(Zp, FLAGS.start_resolution, resolution)
Lae = tf.reduce_mean(fLae(Xr,X))
Zr = models.enc(ng(Xr), FLAGS.start_resolution, resolution)
Zpp = models.enc(ng(Xp), FLAGS.start_resolution, resolution)
m = 90
enc_zr = tf.nn.relu(m - fLreg(Zr[0],Zr[1]))
enc_zpp = tf.nn.relu(m - fLreg(Zpp[0], Zpp[1]))
enc_loss = fLreg(Zmean, Zstd) + (enc_zr + enc_zpp) + Lae
Zr = models.enc(Xr, FLAGS.start_resolution, resolution)
Zpp = models.enc(Xp, FLAGS.start_resolution, resolution)
rec_zr = fLreg(Zr[0],Zr[1])
rec_zpp = fLreg(Zpp[0], Zpp[1])
dec_loss = (rec_zr + rec_zpp) + Lae
with tf.variable_scope('Penalties'):
tf.summary.scalar('enc_loss', tf.reduce_mean(enc_loss))
tf.summary.scalar('dec_loss', tf.reduce_mean(dec_loss))
tf.summary.scalar('mean', tf.reduce_mean(Zmean))
tf.summary.scalar('std', tf.reduce_mean(Zstd))
tf.summary.scalar('lae', tf.reduce_mean(Lae))
tf.summary.scalar('rec_zr', tf.reduce_mean(rec_zr))
tf.summary.scalar('rec_zpp', tf.reduce_mean(rec_zpp))
tf.summary.scalar('enc_zr', tf.reduce_mean(enc_zr))
tf.summary.scalar('enc_zpp', tf.reduce_mean(enc_zpp))
if mode == tf.estimator.ModeKeys.TRAIN or mode == 'RESOLUTION_CHANGE':
#########
# TRAIN #
#########
e_optimizer = tf.train.AdamOptimizer(
learning_rate=FLAGS.learning_rate, beta1=0.9, beta2=0.999)
d_optimizer = tf.train.AdamOptimizer(
learning_rate=FLAGS.learning_rate, beta1=0.9, beta2=0.999)
if FLAGS.use_tpu:
e_optimizer = tpu_optimizer.CrossShardOptimizer(e_optimizer)
d_optimizer = tpu_optimizer.CrossShardOptimizer(d_optimizer)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
e_step = e_optimizer.minimize(
enc_loss,
var_list=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope='Encoder'))
d_step = d_optimizer.minimize(
dec_loss,
var_list=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope='Decoder'))
with tf.control_dependencies([e_step, d_step]):
increment_global_step = tf.assign_add(
tf.train.get_or_create_global_step(), 1)
increment_resolution_step = tf.assign_add(
utils.get_or_create_resolution_step(), 1)
if resolution>=FLAGS.start_resolution * 2:
with tf.control_dependencies([increment_global_step, increment_resolution_step]):
lerp_ops = lerp_update_ops(resolution, fadein_rate)
joint_op = tf.group([d_step, e_step, lerp_ops[0], lerp_ops[1], increment_global_step, increment_resolution_step])
else:
joint_op = tf.group([d_step, e_step, increment_global_step, increment_resolution_step])
if mode == 'RESOLUTION_CHANGE':
return [d_optimizer, e_optimizer]
else:
if FLAGS.use_tpu:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=dec_loss + enc_loss,
train_op=joint_op)
else:
return tf.estimator.EstimatorSpec(
mode=mode,
loss=dec_loss + enc_loss,
train_op=joint_op)
elif mode == tf.estimator.ModeKeys.EVAL:
########
# EVAL #
########
if FLAGS.use_tpu:
def _eval_metric_fn(e_loss, d_loss):
# When using TPUs, this function is run on a different machine than the
# rest of the model_fn and should not capture any Tensors defined there
return {
'enc_loss': tf.metrics.mean(e_loss),
'dec_loss': tf.metrics.mean(d_loss)}
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=tf.reduce_mean(enc_loss + enc_loss),
eval_metrics=(_eval_metric_fn, [enc_loss, dec_loss]))
else:
return tf.estimator.EstimatorSpec(
mode=mode,
loss=tf.reduce_mean(enc_loss + dec_loss),
eval_metric_ops={
'enc_loss': tf.metrics.mean(enc_loss),
'dec_loss': tf.metrics.mean(dec_loss)
})
raise ValueError('Invalid mode provided to model_fn')
def noise_input_fn(params):
np.random.seed(0)
noise_dataset = tf.data.Dataset.from_tensors(tf.constant(
np.random.randn(params['batch_size'], FLAGS.noise_dim), dtype=tf.float32))
noise = noise_dataset.make_one_shot_iterator().get_next()
return {'random_noise': noise}, None
def get_estimator(model_dir, resolution):
tpu_cluster_resolver = None
if FLAGS.use_tpu:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu,
zone=FLAGS.tpu_zone,
project=FLAGS.gcp_project)
config = tpu_config.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=model_dir,
tpu_config=tpu_config.TPUConfig(
num_shards=FLAGS.num_shards,
iterations_per_loop=FLAGS.iterations_per_loop))
est = tpu_estimator.TPUEstimator(
model_fn=model_fn,
use_tpu=FLAGS.use_tpu,
config=config,
params={"data_dir": FLAGS.data_dir, "resolution": resolution},
train_batch_size=FLAGS.batch_size,
eval_batch_size=FLAGS.batch_size)
local_est = tpu_estimator.TPUEstimator(
model_fn=model_fn,
use_tpu=False,
config=config,
params={"data_dir": FLAGS.data_dir, "resolution": resolution},
predict_batch_size=FLAGS.num_eval_images)
else:
est = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=model_dir,
params={"data_dir": FLAGS.data_dir, "batch_size": FLAGS.batch_size, "resolution": resolution})
local_est = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=model_dir,
params={"data_dir": FLAGS.data_dir, "batch_size": FLAGS.num_eval_images, "resolution": resolution})
return est, local_est
def change_resolution(resolution):
batch_size = 1
graph = tf.Graph()
store_dir = os.path.join(FLAGS.model_dir, 'resolution_' + str(resolution))
restore_dir = os.path.join(FLAGS.model_dir, 'resolution_' + str(resolution // 2))
tf.gfile.MakeDirs(store_dir)
ckpt_file = store_dir + '/model.ckp'
with graph.as_default(): # pylint: disable=E1129
train_input = dataset.TrainInputFunction(FLAGS.noise_dim, resolution, 'NHWC')
params = {'data_dir' : FLAGS.data_dir, 'batch_size' : batch_size , "resolution": resolution}
features, labels = train_input(params)
optimizers = model_fn(features, labels, 'RESOLUTION_CHANGE', params)
global_step = tf.train.get_or_create_global_step()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
utils.restore(sess, restore_dir)
utils.reset_resolution_step()
for opt in optimizers:
sess.run(tf.variables_initializer(opt.variables()))
saver = tf.train.Saver(name='main_saver')
saver.save(sess, ckpt_file, global_step = global_step)
def main(argv):
del argv
tf.gfile.MakeDirs(os.path.join(FLAGS.model_dir))
resolution = FLAGS.end_resolution
initial_checkpoint = None
while initial_checkpoint is None and resolution != 1:
model_dir = os.path.join(FLAGS.model_dir, 'resolution_' + str(resolution))
initial_checkpoint = tf.train.latest_checkpoint(model_dir)
resolution = resolution // 2
if initial_checkpoint is None or resolution == 1:
resolution = FLAGS.start_resolution
model_dir = os.path.join(FLAGS.model_dir, 'resolution_' + str(resolution))
else:
resolution *= 2
model_dir = os.path.join(FLAGS.model_dir, 'resolution_' + str(resolution))
est, local_est = get_estimator(model_dir, resolution)
current_step = estimator._load_global_step_from_checkpoint_dir(
model_dir) # pylint: disable=protected-access,line-too-long
tf.logging.info('Starting training for %d steps, current step: %d' %
(FLAGS.train_steps, current_step))
while current_step < FLAGS.train_steps:
if current_step != 0 and current_step % FLAGS.resolution_steps == 0 and resolution != FLAGS.end_resolution:
resolution *= 2
tf.logging.info('Change of resolution from %d to %d' % (resolution // 2, resolution))
model_dir = os.path.join(FLAGS.model_dir, 'resolution_' + str(resolution))
change_resolution(resolution)
est, local_est = get_estimator(model_dir, resolution)
next_checkpoint = min(current_step + FLAGS.train_steps_per_eval,
FLAGS.train_steps)
est.train(input_fn=dataset.TrainInputFunction(FLAGS.noise_dim, resolution, 'NHWC'),
max_steps=next_checkpoint)
current_step = next_checkpoint
tf.logging.info('Finished training step %d' % current_step)
if FLAGS.eval_loss:
metrics = est.evaluate(input_fn=dataset.TrainInputFunction(FLAGS.noise_dim, resolution, 'NHWC'),
steps=FLAGS.num_eval_images // FLAGS.batch_size)
tf.logging.info('Finished evaluating')
tf.logging.info(metrics)
generated_iter = local_est.predict(input_fn=noise_input_fn)
images = [p['generated_images'][:, :, :] for p in generated_iter]
filename = os.path.join(FLAGS.model_dir, '%s-%s.png' % (
str(current_step).zfill(5), 'x' + str(resolution)))
utils.write_images(images, filename, 'NHWC')
tf.logging.info('Finished generating images')
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main)
| 44.708455
| 159
| 0.637105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,639
| 0.17209
|
67ecb4f05375d9a4dfbfec0d8b5a28b3678e0e4e
| 172
|
py
|
Python
|
docs/examples/timer.py
|
vlcinsky/nameko
|
88d7e5211de4fcc1c34cd7f84d7c77f0619c5f5d
|
[
"Apache-2.0"
] | 3,425
|
2016-11-10T17:12:42.000Z
|
2022-03-31T19:07:49.000Z
|
docs/examples/timer.py
|
vlcinsky/nameko
|
88d7e5211de4fcc1c34cd7f84d7c77f0619c5f5d
|
[
"Apache-2.0"
] | 371
|
2020-03-04T21:51:56.000Z
|
2022-03-31T20:59:11.000Z
|
docs/examples/timer.py
|
vlcinsky/nameko
|
88d7e5211de4fcc1c34cd7f84d7c77f0619c5f5d
|
[
"Apache-2.0"
] | 420
|
2016-11-17T05:46:42.000Z
|
2022-03-23T12:36:06.000Z
|
from nameko.timer import timer
class Service:
name ="service"
@timer(interval=1)
def ping(self):
# method executed every second
print("pong")
| 17.2
| 38
| 0.627907
| 139
| 0.80814
| 0
| 0
| 99
| 0.575581
| 0
| 0
| 45
| 0.261628
|
67ed812b563acfcc4e10ecbff190182561180c0d
| 752
|
py
|
Python
|
app/controllers/config/system/slack.py
|
grepleria/SnitchDNS
|
24f98b01fd5fca9aa2c660d6ee15742f2e44915c
|
[
"MIT"
] | 152
|
2020-12-07T13:26:53.000Z
|
2022-03-23T02:00:04.000Z
|
app/controllers/config/system/slack.py
|
grepleria/SnitchDNS
|
24f98b01fd5fca9aa2c660d6ee15742f2e44915c
|
[
"MIT"
] | 16
|
2020-12-07T17:04:36.000Z
|
2022-03-10T11:12:52.000Z
|
app/controllers/config/system/slack.py
|
grepleria/SnitchDNS
|
24f98b01fd5fca9aa2c660d6ee15742f2e44915c
|
[
"MIT"
] | 36
|
2020-12-09T13:04:40.000Z
|
2022-03-12T18:14:36.000Z
|
from .. import bp
from flask import request, render_template, flash, redirect, url_for
from flask_login import current_user, login_required
from app.lib.base.provider import Provider
from app.lib.base.decorators import admin_required
@bp.route('/slack', methods=['GET'])
@login_required
@admin_required
def slack():
return render_template('config/system/slack.html')
@bp.route('/slack/save', methods=['POST'])
@login_required
@admin_required
def slack_save():
provider = Provider()
settings = provider.settings()
slack_enabled = True if int(request.form.get('slack_enabled', 0)) == 1 else False
settings.save('slack_enabled', slack_enabled)
flash('Settings saved', 'success')
return redirect(url_for('config.slack'))
| 26.857143
| 85
| 0.743351
| 0
| 0
| 0
| 0
| 512
| 0.680851
| 0
| 0
| 127
| 0.168883
|
67edef8325e323ad0e7a7ee375973574e5b9dbb3
| 845
|
py
|
Python
|
setup.py
|
7AM7/Arabic-dialects-segmenter-with-flask
|
a69e060fa25a5905864dae7d500c4f46436e0c40
|
[
"MIT"
] | 1
|
2021-07-07T06:54:43.000Z
|
2021-07-07T06:54:43.000Z
|
setup.py
|
7AM7/Arabic-dialects-segmenter-with-flask
|
a69e060fa25a5905864dae7d500c4f46436e0c40
|
[
"MIT"
] | null | null | null |
setup.py
|
7AM7/Arabic-dialects-segmenter-with-flask
|
a69e060fa25a5905864dae7d500c4f46436e0c40
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='FarasaPy3',
version='3.0.0',
packages=find_packages(exclude=['tests*']),
license='MIT',
description='Farasa (which means “insight” in Arabic), is a fast and accurate text processing toolkit for Arabic text.',
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=['requests', 'json'],
url='https://github.com/ahmed451/SummerInternship2020-PyPIFarasa/tree/master/7AM7',
author='AM7',
author_email='ahmed.moorsy798@gmail.com',
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 32.5
| 124
| 0.673373
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 417
| 0.491166
|
67eef460ddcba049717ee205dce3da7ab1a62a5b
| 45,026
|
py
|
Python
|
oldversion/crystIT_v0.1.py
|
GKieslich/crystIT
|
2632b544b3ec0f4893f84aa6bb73f03a7f3c0890
|
[
"MIT"
] | 4
|
2020-10-14T04:35:40.000Z
|
2022-03-31T08:11:40.000Z
|
oldversion/crystIT_v0.1.py
|
GKieslich/crystIT
|
2632b544b3ec0f4893f84aa6bb73f03a7f3c0890
|
[
"MIT"
] | null | null | null |
oldversion/crystIT_v0.1.py
|
GKieslich/crystIT
|
2632b544b3ec0f4893f84aa6bb73f03a7f3c0890
|
[
"MIT"
] | null | null | null |
import ase
from ase.spacegroup import crystal
from ase.units import kB,mol,kJ
import spglib
import pyxtal
from pyxtal.symmetry import Group
import numpy # arrays
import math # log
import os.path # isfile, isdir
import copy # copy dictionary
import glob # iterate through dir
import time # for batch processing
import io # creating file from string
import multiprocessing # for batch mode
import warnings # control warning output
import traceback # detailed error messages
warningCache = '' #
# Default Settings
symmetryTolerance = 5e-3 # distance tolerance in cartesian coordinates to find crystal symmetry
occupancy = False # show menu to correct occupancy values
maxThreads = multiprocessing.cpu_count() # maximum no of parallel threads
decimalSeparator = '.'
entropyOptions = False # calculation of entropy values from Krivovichev (2016)
recursive = False # subdirectory scanning in batch mode
# except for userMenu() these settings are usually forwarded through function parameters, as nested functions sometimes do not realize that global variables have been changed
# Program Information
programName = 'crystIT'
paper = 'Kaußler, Kieslich (2020): unpublished'
versionNumber = '0.1'
releaseDate = '2020-09-22'
authors = 'Clemens Kaußler and Gregor Kieslich'
institution = 'Technical University of Munich'
def getComplexity(structure, pathToCif, verbose, entropy, sym):
"""
calculates complexity of crystal structure based on an ASE Atoms object (including tags, storing CIF data-block)
Parameters:
arg1 (Atoms): ASE Atoms object, including CIF data tags (store_tags = True)
arg2 (string): path to CIF
arg3 (bool): output results to console (True) or suppress console output and return result array (False)
arg4 (bool): entropy options
arg5 (float): symmetry tolerance value in cartesian coordinates
Returns: if (arg3 == False): array will be returned; most important variables given below:
if (arg4 == True): values in {brackets} are returned additionally
array:
warningCache, errors and warnings
chemical_formula, chemical formula composed from CIF-entry, ignoring dummy entries
aSG, spacegroup assumed by spglib
SG, spacegroup given in CIF
atomsPerUnitCell, number of atoms per crystallographic unit cell (vacancies do not count as atoms)
atomsPerPrimitiveUnitCell, number of atoms per primitive unit cell (vacancies do not count as atoms)
positionsPerPrimitiveUnitCell, amount of positions per primitive unit cell, corresponding to the sum over the crystallographic orbits' multiplicities
uniqueSpecies, number of unique species, defined by combination of element (vacancies count as elements too) and crystallographic orbit
aritySum, number of coordinational degrees of freedom per reduced unit cell
I_comb, I_comb_max, I_comb_norm, I_comb_tot, I_comb_density, {S_comb_max_molar, Delta_S_comb_molar,} combinatorial information, as defined by S. Krivovichev in 2014 (corresponding to I_G, I_G,max, I_G,norm, I_G,total, rho_inf, S_cfg_max, Delta S), but extended by partial occupancies
I_coor, I_coor_max, I_coor_norm, I_coor_tot, I_coor_density, {S_coor_max_molar, Delta_S_coor_molar,} coordinational information, as defined by W. Hornfeck in 2020, NOTE: sum over unique Wyckoff positions
I_conf, I_conf_max, I_conf_norm, I_conf_tot, I_conf_density, {S_conf_max_molar, Delta_S_conf_molar} configurational information, as defined by W. Hornfeck in 2020
"""
if not verbose:
global warningCache
# direct input of ASE Atoms object into spglib is deprecated!
cell = (
structure.get_cell(),
structure.get_scaled_positions(),
structure.get_atomic_numbers()
)
# find reduced unit cell
primitiveCell = spglib.find_primitive(cell, symprec = sym)
# get symmetry from reduced unit cell
primitiveDataset = spglib.get_symmetry_dataset(primitiveCell, symprec = sym)
primitiveCrystallographicOrbits = primitiveDataset['crystallographic_orbits']
primitiveWyckoff = primitiveDataset['wyckoffs']
# compare spacegroup set in CIF (SG) with assumed spacegroup (aSG)
cifTags = structure.info.copy()
try:
iSG = cifTags['_symmetry_space_group_name_h-m']
except:
try:
iSG = cifTags['_space_group_name_h-m_alt']
except:
iSG = 'not set'
try:
iSGNo = str(cifTags['_symmetry_int_tables_number'])
except:
try:
iSGNo = str(cifTags['_space_group_it_number'])
except:
iSGNo = 'not set'
SG = iSG + ' (' + iSGNo + ')'
aSG = spglib.get_spacegroup(cell, symprec = sym)
groupnumber = aSG[aSG.index('(')+1:aSG.index(')')]
if not iSGNo == 'not set' and not iSGNo == groupnumber:
if verbose:
print(f'Wrong space group detected by spglib: {groupnumber} vs. {iSGNo} given in CIF. Try to alter the symmetry tolerance value. Continuing with fingers crossed.')
else:
warningCache += f'Wrong space group detected by spglib: {groupnumber} vs. {iSGNo} given in CIF. Try to alter the symmetry tolerance value. Continuing with fingers crossed. '
# gather some more info about publication (batch documentation)
try:
journal = str(cifTags['_journal_name_full']).replace('\n', ' ').replace(';', ',')
except:
journal = ''
try:
year = str(cifTags['_journal_year'])
except:
year = ''
try:
doi = str(cifTags['_journal_paper_doi']).replace(';', '')
except:
doi = ''
# compose matrix of wyckoff letters, multiplicities and arities for all crystallographic orbits
g = Group(int(groupnumber))
iCrystallographicOrbits = {}
equivalenceClassNumber = 0
for x in numpy.unique(primitiveCrystallographicOrbits):
iCrystallographicOrbits[equivalenceClassNumber, 0] = numpy.count_nonzero(primitiveCrystallographicOrbits == x) # 0 - multiplicity (in context of red uc)
wyckoffLetter = primitiveWyckoff[list(primitiveCrystallographicOrbits).index(x)]
iCrystallographicOrbits[equivalenceClassNumber, 1] = wyckoffLetter #1 - wyckoff letter
iCrystallographicOrbits[equivalenceClassNumber, 2] = getArity(g[wyckoffLetter]) #2 - arity
equivalenceClassNumber += 1
arityArray = []
for x in numpy.unique(primitiveWyckoff):
arityArray.append(getArity(g[str(x)]))
# identify duplicate atoms (same x,y,z coordinates = same cryst orbit) from structure in order to condense occupancyDict for all entries with identical coordinates!
try:
atomSiteTypeSymbol = []
for entry in cifTags['_atom_site_type_symbol']:
if len(entry) > 1 and entry[1].islower():
atomSiteTypeSymbol.append(entry[0:2])
else:
atomSiteTypeSymbol.append(entry[0])
except:
# sometimes _atom_site_type_symbol isn't set, usually when there are no fractional occupancies to consider -> extract atom species from _atom_site_label
atomSiteTypeSymbol = []
for entry in cifTags['_atom_site_label']:
if len(entry) > 1 and entry[1].islower():
atomSiteTypeSymbol.append(entry[0:2])
else:
atomSiteTypeSymbol.append(entry[0])
duplicateArray = []
identPos = []
for x in range(0, len(atomSiteTypeSymbol)):
XYZInfo = [
cifTags['_atom_site_fract_x'][x],
cifTags['_atom_site_fract_y'][x],
cifTags['_atom_site_fract_z'][x]
]
# check whether coordinates of current atom are already contained in identPos
for y in range(0, len(identPos)):
if numpy.allclose(XYZInfo, identPos[y], atol = sym):
duplicateArray.append([x, y])
break
identPos.append(XYZInfo)
discrepancy = len(atomSiteTypeSymbol) - equivalenceClassNumber - len(duplicateArray)
if discrepancy > 0:
# same crystallographic orbit has probably been reached with different coordinates (e.g. GITWIQ)
# ==> construct all symmetrically equivalent positions & compare with priors. Requires significantly more computing power, therefore only executed in second step...
duplicateArray = []
symEquivPos = []
for x in range(0, len(atomSiteTypeSymbol)):
duplicate = False
XYZInfo = [
cifTags['_atom_site_fract_x'][x],
cifTags['_atom_site_fract_y'][x],
cifTags['_atom_site_fract_z'][x]
]
# check whether coordinates of current atom are already contained in symEquivPos
for y in range(0, len(symEquivPos)):
for pos in symEquivPos[y]:
if numpy.allclose(XYZInfo, pos, atol = sym):
duplicateArray.append([x, y])
duplicate = True
break
if duplicate:
break
if not duplicate:
# generate all symmetrically equivalent positions
offset = len(duplicateArray) # if duplicates were identified, x has to be reduced
wyckoffLetter = iCrystallographicOrbits[x-offset, 1]
arity = iCrystallographicOrbits[x-offset, 2]
# using partially parametrized positions ==> find out which wyckoff instance is present and isolate actual (x,y,z)
if arity > 0:
lineNo = -1
for line in str(g[wyckoffLetter]).split('\n'):
if lineNo == -1:
lineNo += 1
continue
elements = line.split(',')
matches = 0
for y in range(0, 3):
if(
'x' not in elements[y]
and 'y' not in elements[y]
and 'z' not in elements[y]
and XYZInfo[y] == eval(elements[y])
):
matches += 1
if matches == (3 - arity):
correctedXYZInfo = [0, 0, 0]
for z in range (0, 3):
if 'x' in elements[z]:
correctedXYZInfo[0] = correctCoordinates(elements[z], 'x', XYZInfo[z])
elif 'y' in elements[z]:
correctedXYZInfo[1] = correctCoordinates(elements[z], 'y', XYZInfo[z])
elif 'z' in elements[z]:
correctedXYZInfo[2] = correctCoordinates(elements[z], 'z', XYZInfo[z])
XYZInfo = correctedXYZInfo
break
lineNo += 1
symEquivPos.append(
pyxtal.operations.filtered_coords(
pyxtal.operations.apply_ops(XYZInfo, g[wyckoffLetter])
)
)
else:
symEquivPos.append([])
discrepancy = len(atomSiteTypeSymbol) - equivalenceClassNumber - len(duplicateArray)
if discrepancy == 0:
# compose own occupancyDict, as too many errors may occur while correcting the one given by ASE (structure.info['occupancy'])
try:
siteOccupancy = cifTags['_atom_site_occupancy']
except:
siteOccupancy = []
for i in range(0, len(atomSiteTypeSymbol)):
siteOccupancy.append(1)
occupancyDict = {}
offset = 0
for i in range(0, equivalenceClassNumber):
# ignore duplicates
for entry in duplicateArray:
if entry[0] == (i+offset):
offset += 1
# add value
occupancyDict[i] = {}
occupancyDict[i][atomSiteTypeSymbol[i + offset]] = siteOccupancy[i + offset]
# add all duplicates
for entry in duplicateArray:
if entry[1] == (i + offset):
try:
occupancyDict[i][atomSiteTypeSymbol[entry[0]]] += siteOccupancy[entry[0]]
except:
occupancyDict[i][atomSiteTypeSymbol[entry[0]]] = siteOccupancy[entry[0]]
# double check for too high occupancy value at current crystallographic orbit
occupancySum = 0
for element in occupancyDict[i]:
occupancySum += occupancyDict[i][element]
if occupancySum > 1:
if verbose:
print(f'Warning: Occupancy sum {occupancySum} at Wyckoff {iCrystallographicOrbits[i, 0]}{iCrystallographicOrbits[i, 1]}, crystallographic orbit #{i}: {occupancyDict[i]}.')
else:
warningCache += f'Warning: Occupancy sum {occupancySum} at Wyckoff {iCrystallographicOrbits[i, 0]}{iCrystallographicOrbits[i, 1]}, crystallographic orbit #{i}: {occupancyDict[i]}. '
elif verbose:
print(f'Error: discrepancy of {discrepancy} positions between crystallographic orbits calculated by spglib and given CIF-entries. Wrong space group detected? Try to adjust symmetry tolerance!')
return
else:
warningCache += f'Error: discrepancy of {discrepancy} positions between crystallographic orbits calculated by spglib and given CIF-entries. Wrong space group detected? Try to adjust symmetry tolerance! '
return [warningCache, pathToCif]
# allow corrections if occupancy options are enabled
if occupancy:
if '[' in pathToCif or verbose == False:
print('\n\n'+pathToCif)
occupancyDict = correctOccupancy(occupancyDict, iCrystallographicOrbits)
# determine number of atoms in primitive unit cell and thereby compose sum formula
# w/ occupancy (find gcd of crystal orbit muliplicities, consider occupancy)
wyckoffSum = 0.0
chemicalFormulaDict = {}
numbers = []
for i in range(0, equivalenceClassNumber):
numbers.append(iCrystallographicOrbits[i, 0])
divisor = gcd(numbers)
if divisor < 0:
divisor = 1
counter = 0
for x in occupancyDict:
multiplicity = iCrystallographicOrbits[counter, 0]
for element in occupancyDict[x]:
try:
chemicalFormulaDict[element] += occupancyDict[x][element] * multiplicity / divisor
except:
chemicalFormulaDict[element] = occupancyDict[x][element] * multiplicity / divisor
wyckoffSum += occupancyDict[x][element] * multiplicity
counter += 1
# sometimes gcd of multiplicities does not yield empirical formula (e.g. Cu2P6O18Li2 / MnN10C18H28)
# better safe than sorry: try to reduce formula a second time
# (multiplicity approach still implemented bc fractional occupancies often complicate computation of gcd)
numbers = []
for element in chemicalFormulaDict:
# suppose: a) lacking precision
if abs(chemicalFormulaDict[element] - round(chemicalFormulaDict[element])) < 0.1:
numbers.append(round(chemicalFormulaDict[element]))
# or b) more severe defects
else:
numbers.append(math.ceil(chemicalFormulaDict[element]))
if not numbers:
divisor = 1
else:
divisor = gcd(numbers)
if divisor < 0:
divisor = 1
# compose assumed chemical formula
chemical_formula = ''
for element in sorted(chemicalFormulaDict):
stoichiometry = chemicalFormulaDict[element] / divisor
if stoichiometry == 1:
stoichiometry = ''
elif stoichiometry % 1 == 0:
stoichiometry = str(int(stoichiometry))
else:
stoichiometry = str(stoichiometry)
chemical_formula = chemical_formula + element + stoichiometry
atomsPerPrimitiveUnitCell = wyckoffSum
atomsPerUnitCell = wyckoffSum * len(structure) / len(primitiveCrystallographicOrbits)
positionsPerPrimitiveUnitCell = 0 # sum over multiplicities of all crystallographic orbits
for x in range(0, equivalenceClassNumber):
positionsPerPrimitiveUnitCell += iCrystallographicOrbits[x,0]
aritySum = 0 # sum over arities of unique, occupied wyckoff positions (different crystallographic orbits with same wyckoff letter are NOT counted multiple times!)
for x in arityArray:
aritySum += x
# calculate information contents
I_comb = I_coor = I_conf = 0.0
uniqueSpecies = 0
if aritySum > 0:
# the coordinational sum is formed over unique wyckoff positions
for x in arityArray:
probability = x / aritySum
if probability > 0:
I_coor -= probability * math.log(probability, 2)
# the configurational sum over wyckoff positions and crystallographic orbits
probability = x / (aritySum + positionsPerPrimitiveUnitCell)
if probability > 0:
I_conf -= probability * math.log(probability, 2)
for x in range(0, equivalenceClassNumber):
# the combinatorial sum is formed over each element in a crystallographic orbit individually (in other words: over unique species)
# vacancies count as elements too -> probability according to positionsPerPrimitiveUnitCell
occupancySum = 0
multiplicity = iCrystallographicOrbits[x, 0]
for element in occupancyDict[x]:
occupancyValue = occupancyDict[x][element]
occupancySum += occupancyDict[x][element]
probability = multiplicity * occupancyValue / positionsPerPrimitiveUnitCell
if probability > 0:
I_comb -= probability * math.log(probability, 2)
uniqueSpecies += 1
elif verbose:
print(f'Probability <= 0 was skipped: {element} at pos. {x}')
else:
warningCache += f'Probability <= 0 was skipped: {element} at pos. {x} '
probability = multiplicity * occupancyValue / (aritySum + positionsPerPrimitiveUnitCell)
if probability > 0:
I_conf -= probability * math.log(probability, 2)
if occupancySum < 1:
probability = multiplicity * (1 - occupancySum) / positionsPerPrimitiveUnitCell
I_comb -= probability * math.log(probability, 2)
uniqueSpecies += 1
probability = multiplicity * (1 - occupancySum) / (aritySum + positionsPerPrimitiveUnitCell)
I_conf -= probability * math.log(probability, 2)
I_comb_tot = positionsPerPrimitiveUnitCell * I_comb
I_coor_tot = aritySum * I_coor
I_conf_tot = (aritySum + positionsPerPrimitiveUnitCell) * I_conf
# maximum combinatorial information content based on number of unique species which are defined by a combination of crystallographic orbit and element (vacancies obviously count too).
# otherwise: I_comb > I_comb_max for alloys (in general: cases w/ all occupancies < 1)
I_comb_max = math.log(uniqueSpecies, 2)
if aritySum > 0:
I_coor_max = math.log(aritySum, 2)
else:
I_coor_max = 0
I_conf_max = math.log(uniqueSpecies + aritySum, 2)
if I_comb_max != 0:
I_comb_norm = I_comb / I_comb_max
else:
I_comb_norm = 0
if I_coor_max != 0:
I_coor_norm = I_coor / I_coor_max
else:
I_coor_norm = 0
if I_conf_max != 0:
I_conf_norm = I_conf / I_conf_max
else:
I_conf_norm = 0
# correct cell volume to primitive cell volume
perVolume = atomsPerUnitCell / (atomsPerPrimitiveUnitCell * structure.cell.volume)
I_comb_density = perVolume * I_comb_tot
I_coor_density = perVolume * I_coor_tot
I_conf_density = perVolume * I_conf_tot
if entropy:
gasConstantR = mol * kB / (kJ / 1000)
conversionFactor = math.log(2, math.e)
# error for stirling-approximation of ln(N!) < 1% for N >= 90
if positionsPerPrimitiveUnitCell >= 90:
S_comb_max_molar = gasConstantR * positionsPerPrimitiveUnitCell * (math.log(positionsPerPrimitiveUnitCell, math.e) - 1)
else:
S_comb_max_molar = gasConstantR * math.log(math.factorial(positionsPerPrimitiveUnitCell), math.e)
if aritySum >= 90:
S_coor_max_molar = gasConstantR * aritySum * (math.log(aritySum, math.e) - 1)
else:
S_coor_max_molar = gasConstantR * math.log(math.factorial(aritySum), math.e)
if (positionsPerPrimitiveUnitCell + aritySum) >= 90:
S_conf_max_molar = gasConstantR * (positionsPerPrimitiveUnitCell + aritySum) * (math.log((positionsPerPrimitiveUnitCell + aritySum), math.e) - 1)
else:
S_conf_max_molar = gasConstantR * math.log(math.factorial(positionsPerPrimitiveUnitCell + aritySum), math.e)
Delta_S_comb_molar = gasConstantR * I_comb * conversionFactor
Delta_S_coor_molar = gasConstantR * I_coor * conversionFactor
Delta_S_conf_molar = gasConstantR * I_conf * conversionFactor
if verbose:
print(f'\n\n------------ {pathToCif} ------------')
print(f'assumed formula\t {chemical_formula}')
print(f'assumed SG\t {aSG}')
print(f'SG from CIF\t {SG}')
print(
'lattice [A] \t a: {:.2f}, b: {:.2f}, c: {:.2f}'.format(
structure.get_cell_lengths_and_angles()[0],
structure.get_cell_lengths_and_angles()[1],
structure.get_cell_lengths_and_angles()[2]
).replace('.', decimalSeparator)
)
print(
'angles [°] \t b,c: {:.2f}, a,c: {:.2f}, a,b: {:.2f}'.format(
structure.get_cell_lengths_and_angles()[3],
structure.get_cell_lengths_and_angles()[4],
structure.get_cell_lengths_and_angles()[5]
).replace('.', decimalSeparator)
)
print('---')
print('{:.6f} \t atoms / unit cell'.format(atomsPerUnitCell).replace('.', decimalSeparator))
print('{:.6f} \t atoms / reduced unit cell'.format(atomsPerPrimitiveUnitCell).replace('.', decimalSeparator))
print('{:.6f} \t positions / reduced unit cell'.format(positionsPerPrimitiveUnitCell).replace('.', decimalSeparator))
print('{:.6f} \t unique species'.format(uniqueSpecies).replace('.', decimalSeparator))
print('{:.6f} \t coordinational degrees of freedom'.format(aritySum).replace('.', decimalSeparator))
print('--- combinatorial (extended Krivovichev) ---')
print('{:.6f} \t I_comb \t\t [bit / position]'.format(I_comb).replace('.', decimalSeparator))
print('{:.6f} \t I_comb_max \t\t [bit / position]'.format(I_comb_max).replace('.', decimalSeparator))
print('{:.6f} \t I_comb_norm \t\t [-]'.format(I_comb_norm).replace('.', decimalSeparator))
print('{:.6f} \t I_comb_tot \t\t [bit / reduced unit cell]'.format(I_comb_tot).replace('.', decimalSeparator))
print('{:.6f} \t I_comb_dens \t\t [bit / A^3]'.format(I_comb_density).replace('.', decimalSeparator))
if entropy:
print('{:.6f} \t S_comb_max_molar \t [J / (mol * K)]'.format(S_comb_max_molar).replace('.', decimalSeparator))
print('{:.6f} \t Delta_S_comb_molar \t [J / (mol * K)]'.format(Delta_S_comb_molar).replace('.', decimalSeparator))
print('--- coordinational (Hornfeck) ---')
print('{:.6f} \t I_coor \t\t [bit / freedom]'.format(I_coor).replace('.', decimalSeparator))
print('{:.6f} \t I_coor_max \t\t [bit / freedom]'.format(I_coor_max).replace('.', decimalSeparator))
print('{:.6f} \t I_coor_norm \t\t [-]'.format(I_coor_norm).replace('.', decimalSeparator))
print('{:.6f} \t I_coor_tot \t\t [bit / reduced unit cell]'.format(I_coor_tot).replace('.', decimalSeparator))
print('{:.6f} \t I_coor_dens \t\t [bit / A^3]'.format(I_coor_density).replace('.', decimalSeparator))
if entropy:
print('{:.6f} \t S_coor_max_molar \t [J / (mol * K)]'.format(S_coor_max_molar).replace('.', decimalSeparator))
print('{:.6f} \t Delta_S_coor_molar \t [J / (mol * K)]'.format(Delta_S_coor_molar).replace('.', decimalSeparator))
print('--- configurational (extended Hornfeck) ---')
print('{:.6f} \t I_conf \t\t [bit / (position + freedom)]'.format(I_conf).replace('.', decimalSeparator))
print('{:.6f} \t I_conf_max \t\t [bit / (position + freedom)]'.format(I_conf_max).replace('.', decimalSeparator))
print('{:.6f} \t I_conf_norm \t\t [-]'.format(I_conf_norm).replace('.', decimalSeparator))
print('{:.6f} \t I_conf_tot \t\t [bit / reduced unit cell]'.format(I_conf_tot).replace('.', decimalSeparator))
print('{:.6f} \t I_conf_dens \t\t [bit / A^3]'.format(I_conf_density).replace('.', decimalSeparator))
if entropy:
print('{:.6f} \t S_conf_max_molar \t [J / (mol * K)]'.format(S_conf_max_molar).replace('.', decimalSeparator))
print('{:.6f} \t Delta_S_conf_molar \t [J / (mol * K)]'.format(Delta_S_conf_molar).replace('.', decimalSeparator))
return
elif entropy:
returnArray = [
warningCache,
pathToCif,
doi, journal, year,
chemical_formula,
aSG,
SG,
structure.get_cell_lengths_and_angles()[0],
structure.get_cell_lengths_and_angles()[1],
structure.get_cell_lengths_and_angles()[2],
structure.get_cell_lengths_and_angles()[3],
structure.get_cell_lengths_and_angles()[4],
structure.get_cell_lengths_and_angles()[5],
atomsPerUnitCell,
atomsPerPrimitiveUnitCell,
positionsPerPrimitiveUnitCell,
uniqueSpecies,
aritySum,
I_comb, I_comb_max, I_comb_norm, I_comb_tot, I_comb_density, S_comb_max_molar, Delta_S_comb_molar,
I_coor, I_coor_max, I_coor_norm, I_coor_tot, I_coor_density, S_coor_max_molar, Delta_S_coor_molar,
I_conf, I_conf_max, I_conf_norm, I_conf_tot, I_conf_density, S_conf_max_molar, Delta_S_conf_molar
]
else:
returnArray = [
warningCache,
pathToCif,
doi, journal, year,
chemical_formula,
aSG,
SG,
structure.get_cell_lengths_and_angles()[0],
structure.get_cell_lengths_and_angles()[1],
structure.get_cell_lengths_and_angles()[2],
structure.get_cell_lengths_and_angles()[3],
structure.get_cell_lengths_and_angles()[4],
structure.get_cell_lengths_and_angles()[5],
atomsPerUnitCell,
atomsPerPrimitiveUnitCell,
positionsPerPrimitiveUnitCell,
uniqueSpecies,
aritySum,
I_comb, I_comb_max, I_comb_norm, I_comb_tot, I_comb_density,
I_coor, I_coor_max, I_coor_norm, I_coor_tot, I_coor_density,
I_conf, I_conf_max, I_conf_norm, I_conf_tot, I_conf_density
]
return returnArray
def correctCoordinates(coordinateDescription, parameter, coordinate):
"""
extracts x/y/z parameter of a wyckoff position's individual coordinates. e.g. the z-coordinate of a wyckoff position 4c in SG 24 might be defined as (-z+1/2) = 0.3 --> returns (z) = 0.2
Parameters
arg1 (string) parametrized description of the coordinate e.g. '-z+1/2'
arg2 (string) 'x', 'y' or 'z' as parameter to isolate from arg1 (coordinateDescription) e.g. 'z'
arg3 (float) fractional coordinate on x/y/z axis e.g. 0.3
Returns
float fractional coordinate, corresponding to the isolated parameter (x, y or z) e.g. 0.2
"""
if coordinateDescription.split(parameter)[0] == '-':
factor = -1
else:
factor = +1
if coordinateDescription.split(parameter)[1] != '':
summand = eval(coordinateDescription.split(parameter)[1])
else:
summand = 0
return (factor * (coordinate - summand)) % 1
def getArity(pyxtalWyckoff):
"""
calculates the arity of a given wyckoff position
Parameters
arg1 (Wyckoff_position) pyxtal Wyckoff_position class object
Returns
int arity
"""
firstSymmOp = str(pyxtalWyckoff).splitlines()[1] # line 0 contains general description: 'wyckoff pos nA in SG xx with site symmetry xx'
arity = 0
if 'x' in firstSymmOp:
arity += 1
if 'y' in firstSymmOp:
arity += 1
if 'z' in firstSymmOp:
arity += 1
return arity
def correctOccupancy(occupancyDict, iCrystallographicOrbits):
"""
a menu that allows for on-the-fly editing of occupancy values
Parameters
arg1 (dictionary) dictionary, containing {Element1 : occupancy1, Element2 : occupancy2} for every crystallographic orbit
arg2 (array) array, containing the multiplicities [x, 0], wyckoff letters [x, 1] and arities [x, 2] of every crystallographic orbit
Returns
dictionary updated occupancyDict
"""
corrOccupancyDict = copy.deepcopy(occupancyDict)
while True:
print('\n\nEnter a number on the left to correct the species\' occupancy. \'c\' to continue with current values. \'d\' to discard changes.')
print('#\t Element \t Wyckoff \t arity \t original \t current')
positions = []
for x in corrOccupancyDict:
for element in corrOccupancyDict[x]:
positions.append([x,element])
print(f'{len(positions) - 1} \t {element} \t\t {iCrystallographicOrbits[x, 0]}{iCrystallographicOrbits[x, 1]} \t\t {iCrystallographicOrbits[x, 2]} \t {occupancyDict[x][element]} \t\t {corrOccupancyDict[x][element]}')
print('')
userInput = input()
if userInput == 'c':
return corrOccupancyDict
elif userInput == 'd':
return occupancyDict
elif RepresentsInt(userInput) and 0 <= int(userInput) < len(positions):
x = positions[int(userInput)][0]
element = positions[int(userInput)][1]
print(f'\n\nInput the new stoichiometry for {element} at Wyckoff {iCrystallographicOrbits[x, 0]}{iCrystallographicOrbits[x, 1]} with \'.\' as decimal separator. Currently: {corrOccupancyDict[x][element]}')
userInput2 = input()
if RepresentsFloat(userInput2) and 0 < float(userInput2) <= 1:
corrOccupancyDict[x][element] = float(userInput2)
else:
print(f'\n\nPlease only insert occupancy values 0 < x <= 1')
continue
else:
print(f'\n\nPlease only enter integer numbers in the range of 0 to {len(positions) - 1}')
continue
def RepresentsInt(s):
try:
int(s)
return True
except ValueError:
return False
def RepresentsFloat(s):
try:
float(s)
return True
except ValueError:
return False
def gcd(numbers):
"""
calculates the greatest common divisor of a given array of integers
"""
divisor = numbers[0]
while True:
try:
for number in numbers:
rest = number % divisor
if not rest:
pass
else:
raise
break
except:
pass
divisor -= 1
return divisor
def customWarnings(message, category, filename, lineno, file, random):
"""
redirects warnings into the global variable warningCache (batch mode)
"""
global warningCache
warningCache += str(message) + ', in: ' + str(filename) + ', line: ' + str(lineno) + ' '
def processFile(pathToCif, verbose, entropy, symprec):
"""
open CIF from given path, perform corrections that enhance ASE-compatibility and facilitate calculations in getComplexity()
let ASE parse the file and forward the data blocks in form of Atoms objects to getComplexity()
Parameters:
arg1 (string) path to valid CIF
arg2 (Boolean) verbosity: (True) --> output to console <=> (False) --> output to .csv-file in respective folder
arg3 (Boolean) entropy options
arg4 (float) symmetry tolerance in cartesian coordinates
Returns:
returns return valuess of getComplexity() as an array
"""
# redirect warnings for batch mode
if not verbose:
resultArray = []
global warningCache
warnings.showwarning = customWarnings
# get contents from CIF-file and thereby correct spacegroups that are written with brackets (ASE will throw errors)
# crystal water is often denominated as "Wat", ASE hates that, replace "Wat" with "O" as hydrogen atoms are missing anyway
# ignore dummy atoms completely as they will cause problems and should not contribute to any information content
# filter fractional coordinates with modulo operator (should be between 0 and 1!), thereby discard of uncertainty values
input = open(pathToCif)
output = ''
xPos = yPos = zPos = counter = -1
for line in input:
low = line.lower()
if line[0] == '#':
continue
elif '_' in line:
if (
'_symmetry_space_group_name_h-m' in low
or '_space_group_name_h-m_alt' in low
):
output += line.replace('(', '').replace(')', '')
elif 'loop_' in low:
output += line
xPos = yPos = zPos = counter = -1
elif '_atom_site_fract_x' in low:
output += line
xPos = counter
elif '_atom_site_fract_y' in low:
output += line
yPos = counter
elif '_atom_site_fract_z' in low:
output += line
zPos = counter
else:
output += line
counter += 1
elif xPos >= 0 and yPos >=0 and zPos >= 0:
if 'dum' in low:
continue
segments = line.split()
if len(segments) > max([xPos, yPos, zPos]):
if '(' in segments[xPos]:
segments[xPos] = segments[xPos][0:segments[xPos].find('(')]
if '(' in segments[yPos]:
segments[yPos] = segments[yPos][0:segments[yPos].find('(')]
if '(' in segments[zPos]:
segments[zPos] = segments[zPos][0:segments[zPos].find('(')]
if RepresentsFloat(segments[xPos]):
segments[xPos] = str(float(segments[xPos]) % 1)
if RepresentsFloat(segments[yPos]):
segments[yPos] = str(float(segments[yPos]) % 1)
if RepresentsFloat(segments[zPos]):
segments[zPos] = str(float(segments[zPos]) % 1)
for segment in segments:
output += ' '
output += segment.replace('Wat', 'O')
output += '\n'
else:
output += line.replace('Wat', 'O')
else:
output += line
cifFile = io.StringIO(output)
#let ase read adjusted CIF-file
try:
structureList = ase.io.read(cifFile, format = 'cif', index = ':', store_tags = True, reader = 'ase') #, fractional_occupancies = True
except Exception as e:
errorMessage = 'File is either empty or corrupt. ' + traceback.format_exc().replace('\n', ' ')
if verbose:
print(errorMessage)
return
else:
errorMessage += warningCache
warningCache = ''
resultArray.append([errorMessage, pathToCif])
return resultArray
# iterate through entries in CIF-file
index = 0
for structure in structureList:
outputPath = pathToCif
if len(structureList) > 1:
outputPath = outputPath + ' [' + str(index) + ']'
try:
if verbose:
getComplexity(structure, outputPath, verbose, entropy, symprec)
else:
resultArray.append(getComplexity(structure, outputPath, verbose, entropy, symprec))
except Exception as e:
errorMessage = 'Error: ' + traceback.format_exc().replace('\n', ' ')
if verbose:
print(errorMessage)
else:
warningCache += errorMessage
resultArray.append([warningCache, outputPath])
warningCache = ''
index += 1
if not verbose:
return resultArray
def processDirectory(dir, recursive, entropy, symprec):
"""
iterates through all .cif-files in a given directory with multithreading and compiles results into .csv-file
Parameters:
arg1 (string): path to directory
arg2 (Boolean): iterate through subdirs as well?
arg3 (Boolean): entropy options
arg4 (float): symmetry tolerance in cartesian coordinates
Returns: results as .csv-file into dir
"""
start = time.time()
if not dir[-1] == '/' and not dir[-1] == '\\':
dir += '\\'
if recursive:
extension = '**/*.cif'
else:
extension = '*.cif'
resultArray = []
fileList = glob.glob(dir + extension, recursive = recursive)
numFiles = len(fileList)
if numFiles == 0:
print(f'{dir} does not contain .cif-files')
return
if numFiles > maxThreads:
numProcesses = maxThreads
else:
numProcesses = numFiles
pool = multiprocessing.Pool(processes = numProcesses)
for file in fileList:
resultArray.append(pool.apply_async(processFile, args = (file, False, entropy, symprec)))
output = ''
numEntries = 0
for fileResult in resultArray:
for cifResult in fileResult.get():
counter = 0
numEntries += 1
for string in cifResult:
if counter > 7:
if decimalSeparator == ',':
output += '{:.6f}; '.format(string).replace('.', ',')
else:
output += '{:.6f}; '.format(string)
else:
output += string + '; '
counter += 1
output += '\n '
if entropyOptions:
header = 'Errors; Path; DOI; Journal; Year; Assumed Formula; assumed SG; SG from CIF; a [A]; b [A]; c [A]; b,c [°]; a,c [°]; a,b [°]; atoms / uc; atoms / reduc; pos / reduc; unique species; coor freedom (aritySum); I_comb; I_comb_max; I_comb_norm; I_comb_tot; I_comb_density; S_comb_max_molar; Delta_S_comb_molar; I_coor; I_coor_max; I_coor_norm; I_coor_tot; I_coor_density; S_coor_max_molar; Delta_S_coor_molar; I_conf; I_conf_max; I_conf_norm; I_conf_tot; I_conf_density; S_conf_max_molar; Delta_S_conf_molar; \n '
else:
header = 'Errors; Path; DOI; Journal; Year; Assumed Formula; assumed SG; SG from CIF; a [A]; b [A]; c [A]; b,c [°]; a,c [°]; a,b [°]; atoms / uc; atoms / reduc; pos / reduc; unique species; coor freedom (aritySum); I_comb; I_comb_max; I_comb_norm; I_comb_tot; I_comb_density; I_coor; I_coor_max; I_coor_norm; I_coor_tot; I_coor_density; I_conf; I_conf_max; I_conf_norm; I_conf_tot; I_conf_density; \n '
finish = time.time()
outputFile = dir + f'batch_{int(finish)}.csv'
f = open(outputFile, 'w', encoding = 'utf-8')
f.write(header + output)
f.close()
timer = '{:.3f}'.format(finish - start)
print(f'\n\nProcessed {numFiles} files ({numEntries} entries) in {timer} s. Results written into {outputFile}')
def userMenu():
global symmetryTolerance
global occupancy
global maxThreads
global decimalSeparator
global entropyOptions
global recursive
print(
f'Welcome to {programName} -- A Crystal Structure Complexity Analyzer Based on Information Theory\n'
+ f'Version {versionNumber}, release date: {releaseDate}\n'
+ f'Written by {authors} ({institution})\n'
+ f'Please cite the following paper if {programName} is utilized in your work:\n'
+ f'\t {paper}'
)
while True:
print(f'\n\nInput path of .cif file or directory for complexity analysis. \'s\' for settings. \'e\' to exit.')
userInput = input().replace('\"', '')
if userInput == 'exit' or userInput == 'e':
break
elif userInput == 's':
while True:
print(
f'\n\nInput float as symmetry tolerance 0 < x < 1\t (currently {symmetryTolerance}).'
+ f'\nInput int as maximum number of threads\t\t (currently {maxThreads})'
+ f'\n\'d\' to toggle between decimal separators\t (currently \'{decimalSeparator}\').'
+ f'\n\'o\' to toggle occupancy editing options\t\t (currently {occupancy}).'
+ f'\n\'r\' to toggle recursive subdir scan\t\t (currently {recursive}). '
+ f'\n\'s\' to toggle entropy calculation\t\t (currently {entropyOptions}).'
+ '\n\'e\' exit to main menu:'
)
userInput = input()
if userInput == 'o':
occupancy = not occupancy
elif userInput == 'r':
recursive = not recursive
elif userInput == 's':
entropyOptions = not entropyOptions
elif userInput == 'd':
if decimalSeparator == '.':
decimalSeparator = ','
else:
decimalSeparator = '.'
elif userInput == 'e' or userInput == 'exit':
break
elif RepresentsFloat(userInput) and 0 < float(userInput) < 1:
symmetryTolerance = float(userInput)
elif RepresentsInt(userInput) and int(userInput) > 0:
maxThreads = int(userInput)
else:
print('\n\nInvalid input')
continue
continue
elif os.path.isdir(userInput):
processDirectory(userInput, recursive, entropyOptions, symmetryTolerance)
continue
elif '.' in userInput:
extension = userInput.split('.')[-1]
if extension != 'cif':
userInput += '.cif'
else:
userInput += '.cif'
if os.path.isfile(userInput):
processFile(userInput, True, entropyOptions, symmetryTolerance)
else:
print('\n\nInvalid path')
continue
if __name__ == '__main__':
userMenu()
| 47.296218
| 525
| 0.58042
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 16,636
| 0.369402
|
67ef29d1d4ce47e0f4c946159c2b8e5e9239317e
| 2,166
|
py
|
Python
|
bin-opcodes-vec/top50opcodes.py
|
laurencejbelliott/Ensemble_DL_Ransomware_Detector
|
0cae02c2425e787a810513537a47897f3a42e5b5
|
[
"MIT"
] | 18
|
2019-04-10T21:16:45.000Z
|
2021-11-03T00:22:14.000Z
|
bin-opcodes-vec/top50opcodes.py
|
laurencejbelliott/Ensemble_DL_Ransomware_Detector
|
0cae02c2425e787a810513537a47897f3a42e5b5
|
[
"MIT"
] | null | null | null |
bin-opcodes-vec/top50opcodes.py
|
laurencejbelliott/Ensemble_DL_Ransomware_Detector
|
0cae02c2425e787a810513537a47897f3a42e5b5
|
[
"MIT"
] | 9
|
2019-06-29T18:09:24.000Z
|
2021-11-10T22:15:13.000Z
|
__author__ = "Laurence Elliott - 16600748"
from capstone import *
import pefile, os
# samplePaths = ["testSamples/" + sample for sample in os.listdir("testSamples")]
samplePaths = ["../bin-utf8-vec/benignSamples/" + sample for sample in os.listdir("../bin-utf8-vec/benignSamples")] + \
["../bin-utf8-vec/malwareSamples/" + sample for sample in os.listdir("../bin-utf8-vec/malwareSamples")] + \
["../bin-utf8-vec/ransomwareSamples/" + sample for sample in os.listdir("../bin-utf8-vec/ransomwareSamples")]
opcodeSet = set()
opCodeDicts = []
opCodeFreqs = {}
nSamples = len(samplePaths)
count = 1
for sample in samplePaths:
try:
pe = pefile.PE(sample, fast_load=True)
entryPoint = pe.OPTIONAL_HEADER.AddressOfEntryPoint
data = pe.get_memory_mapped_image()[entryPoint:]
cs = Cs(CS_ARCH_X86, CS_MODE_32)
opcodes = []
for i in cs.disasm(data, 0x1000):
opcodes.append(i.mnemonic)
opcodeDict = {}
total = len(opcodes)
opcodeSet = set(list(opcodeSet) + opcodes)
for opcode in opcodeSet:
freq = 1
for op in opcodes:
if opcode == op:
freq += 1
try:
opCodeFreqs[opcode] += freq
except:
opCodeFreqs[opcode] = freq
opcodeDict[opcode] = round((freq / total) * 100, 2)
opCodeDicts.append(opcodeDict)
os.system("clear")
print(str((count / nSamples) * 100) + "%")
count += 1
except Exception as e:
print(e)
# for opcode in opcodeSet:
# print(opcode, str(opcodeDict[opcode]) + "%")
# for opcodeDict in opCodeDicts:
# freqSorted = sorted(opcodeDict, key=opcodeDict.get)[-1:0:-1]
# print(opcodeDict[freqSorted[0]], opcodeDict[freqSorted[1]], opcodeDict[freqSorted[2]], freqSorted)
opCodeFreqsSorted = sorted(opCodeFreqs, key=opCodeFreqs.get)[-1:0:-1]
with open("top50opcodes.csv", "w") as f:
f.write("opcode, frequency\n")
for opcode in opCodeFreqsSorted[:50]:
f.write(str(opcode) + ", " + str(opCodeFreqs[opcode]) + "\n")
print(opcode, opCodeFreqs[opcode])
| 31.391304
| 119
| 0.612188
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 647
| 0.298707
|
67f01ead8301ab0d013d90c2874dceeac2e0f7b9
| 233
|
py
|
Python
|
chat/messaging/apps.py
|
VsevolodOkhrimenko/enchad
|
eca2790b374d336dfc5e109657d25ab0616196ee
|
[
"MIT"
] | null | null | null |
chat/messaging/apps.py
|
VsevolodOkhrimenko/enchad
|
eca2790b374d336dfc5e109657d25ab0616196ee
|
[
"MIT"
] | null | null | null |
chat/messaging/apps.py
|
VsevolodOkhrimenko/enchad
|
eca2790b374d336dfc5e109657d25ab0616196ee
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class MessagingConfig(AppConfig):
name = 'chat.messaging'
def ready(self):
try:
import chat.messaging.signals # noqa F401
except ImportError:
pass
| 19.416667
| 54
| 0.622318
| 196
| 0.841202
| 0
| 0
| 0
| 0
| 0
| 0
| 27
| 0.11588
|
67f15b64983f5eafc8f2961a8adfe37568e44cb9
| 2,051
|
py
|
Python
|
tests/test_keepalived2.py
|
khosrow/lvsm
|
516ee1422f736d016ccc198e54f5f019102504a6
|
[
"MIT"
] | 15
|
2015-03-18T21:45:24.000Z
|
2021-02-22T09:41:30.000Z
|
tests/test_keepalived2.py
|
khosrow/lvsm
|
516ee1422f736d016ccc198e54f5f019102504a6
|
[
"MIT"
] | 12
|
2016-01-15T19:32:36.000Z
|
2016-10-27T14:21:14.000Z
|
tests/test_keepalived2.py
|
khosrow/lvsm
|
516ee1422f736d016ccc198e54f5f019102504a6
|
[
"MIT"
] | 8
|
2015-03-20T00:24:56.000Z
|
2021-11-19T06:21:19.000Z
|
import unittest
import os
import sys
import StringIO
path = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../lvsm')))
from lvsm.modules import keepalived
class Keepalived(unittest.TestCase):
"""Tests for the functionality of the keepalived module"""
def setUp(self):
args = {'keepalived-mib': 'KEEPALIVED-MIB',
'snmp_community': 'private',
'snmp_host': 'localhost',
'snmp_user': '',
'snmp_password': '',
'cache_dir': path + '/cache'
}
self.director = keepalived.Keepalived(path + '/scripts/ipvsadm3',
path + '/etc/keepalived.conf',
restart_cmd='',
nodes='',
args=args)
def test_show(self):
self.maxDiff = None
# Testing show on non-standard ports
expected_result = ['',
'Layer 4 Load balancing',
'======================',
'TCP 192.0.2.2:8888 rr ',
' -> 192.0.2.200:8888 Masq 1 0 0 ',
' -> 192.0.2.201:8888 Masq 1 0 0 ',
'',
'UDP 192.0.2.2:domain rr ',
' -> 192.0.2.202:domain Masq 1 0 0 ',
' -> 192.0.2.203:domain Masq 1 0 0 ',
'',
'']
self.assertEqual(self.director.show(numeric=False, color=False), expected_result)
if __name__ == "__main__":
unittest.main()
| 42.729167
| 113
| 0.376889
| 1,770
| 0.862994
| 0
| 0
| 0
| 0
| 0
| 0
| 788
| 0.384203
|
67f2b9d79410dba976d86159718de46c71935384
| 1,416
|
py
|
Python
|
faeAuditor/auditGroupResults/urlsCSV.py
|
opena11y/fae-auditor
|
ea9099b37b77ddc30092b0cdd962647c92b143a7
|
[
"Apache-2.0"
] | 2
|
2018-02-28T19:03:28.000Z
|
2021-09-30T13:40:23.000Z
|
faeAuditor/auditGroupResults/urlsCSV.py
|
opena11y/fae-auditor
|
ea9099b37b77ddc30092b0cdd962647c92b143a7
|
[
"Apache-2.0"
] | 6
|
2020-02-11T21:53:58.000Z
|
2022-02-10T07:57:58.000Z
|
faeAuditor/auditGroupResults/urlsCSV.py
|
opena11y/fae-auditor
|
ea9099b37b77ddc30092b0cdd962647c92b143a7
|
[
"Apache-2.0"
] | 1
|
2019-12-05T06:05:20.000Z
|
2019-12-05T06:05:20.000Z
|
"""
Copyright 2014-2018 University of Illinois
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
file: auditResults/urlsCSV.py
Author: Jon Gunderson
"""
# reports/urls.py
from __future__ import absolute_import
from django.conf.urls import url
from .viewsCSV import GroupResultsViewCSV
from .viewsCSV import GroupResultsAuditGroupViewCSV
from .viewsCSV import GroupRuleGroupResultsViewCSV
urlpatterns = [
url(r'^all/(?P<result_slug>[\w-]+)/(?P<rule_grouping>[\w-]+)/$',
GroupResultsViewCSV,
name='group_results_csv'),
url(r'^all/(?P<result_slug>[\w-]+)/(?P<rule_grouping>[\w-]+)/g/(?P<audit_group_slug>[\w-]+)/$',
GroupResultsAuditGroupViewCSV,
name='group_results_audit_group_csv'),
# Rule grouping result views
url(r'^some/(?P<result_slug>[\w-]+)/(?P<rule_grouping>[\w-]+)/rg/(?P<rule_group_slug>[\w-]+)/$',
GroupRuleGroupResultsViewCSV,
name='group_rule_group_results_csv')
]
| 29.5
| 100
| 0.735169
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 995
| 0.702684
|
67f2d1af7b93140433f3b44d8d6f9fbf50549676
| 912
|
py
|
Python
|
microcosm_caching/base.py
|
globality-corp/microcosm-caching
|
9e4ddb60d95e1344bf97f69248d1f7ac36a92cc8
|
[
"Apache-2.0"
] | 1
|
2019-08-29T16:47:18.000Z
|
2019-08-29T16:47:18.000Z
|
microcosm_caching/base.py
|
globality-corp/microcosm-caching
|
9e4ddb60d95e1344bf97f69248d1f7ac36a92cc8
|
[
"Apache-2.0"
] | 2
|
2019-10-29T19:25:16.000Z
|
2019-11-12T00:00:04.000Z
|
microcosm_caching/base.py
|
globality-corp/microcosm-caching
|
9e4ddb60d95e1344bf97f69248d1f7ac36a92cc8
|
[
"Apache-2.0"
] | null | null | null |
"""
Cache abstractions for use with API resources.
"""
from abc import ABC, abstractmethod
class CacheBase(ABC):
"""
A simple key-value cache interface.
"""
@abstractmethod
def get(self, key):
pass
@abstractmethod
def set(self, key, value, ttl=None):
"""
Set a key, value pair to the cache.
Optional ttl (time-to-live) value should be in seconds.
"""
pass
@abstractmethod
def set_many(self, values, ttl=None):
"""
Set key/value pairs in the cache
Optional ttl (time-to-live) value should be in seconds.
"""
pass
@abstractmethod
def add(self, key, value, ttl=None):
"""
Add a key, value pair to the cache, skipping the set if
the key has already been set
Optional ttl (time-to-live) value should be in seconds.
"""
pass
| 19.404255
| 63
| 0.574561
| 817
| 0.895833
| 0
| 0
| 716
| 0.785088
| 0
| 0
| 536
| 0.587719
|
67f2fda918bbde7a4b1b415f81dab3ffab386200
| 876
|
py
|
Python
|
randomizer.py
|
shane1027/PollDaddySlurp
|
6cc17156f38427379d095277681dbe1a68baa49d
|
[
"MIT"
] | null | null | null |
randomizer.py
|
shane1027/PollDaddySlurp
|
6cc17156f38427379d095277681dbe1a68baa49d
|
[
"MIT"
] | null | null | null |
randomizer.py
|
shane1027/PollDaddySlurp
|
6cc17156f38427379d095277681dbe1a68baa49d
|
[
"MIT"
] | 1
|
2019-10-10T15:19:33.000Z
|
2019-10-10T15:19:33.000Z
|
#!/usr/bin/env python2.7
import time
from http_request_randomizer.requests.proxy.requestProxy import RequestProxy
if __name__ == '__main__':
start = time.time()
req_proxy = RequestProxy()
print "Initialization took: {0} sec".format((time.time() - start))
print "Size : ", len(req_proxy.get_proxy_list())
print " ALL = ", req_proxy.get_proxy_list()
test_url = 'http://ipv4.icanhazip.com'
while True:
start = time.time()
request = req_proxy.generate_proxied_request(test_url)
print "Proxied Request Took: {0} sec => Status: {1}".format((time.time() - start), request.__str__())
if request is not None:
print "\t Response: ip={0}".format(u''.join(request.text).encode('utf-8'))
print "Proxy List Size: ", len(req_proxy.get_proxy_list())
print"-> Going to sleep.."
time.sleep(1)
| 35.04
| 109
| 0.643836
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 226
| 0.257991
|
67f38cc9e41435b2a8a8c22aa5a456b1d76fb88e
| 555
|
py
|
Python
|
examples/nni_data_augmentation/basenet/data.py
|
petuum/tuun
|
8eec472dbf0e5e695449b0fa2d98985469fd5b30
|
[
"Apache-2.0"
] | 33
|
2020-08-30T16:22:35.000Z
|
2022-02-26T13:48:32.000Z
|
examples/nni_data_augmentation/basenet/data.py
|
petuum/tuun
|
8eec472dbf0e5e695449b0fa2d98985469fd5b30
|
[
"Apache-2.0"
] | 2
|
2021-01-18T19:46:43.000Z
|
2021-03-24T09:59:14.000Z
|
examples/nni_data_augmentation/basenet/data.py
|
petuum/tuun
|
8eec472dbf0e5e695449b0fa2d98985469fd5b30
|
[
"Apache-2.0"
] | 2
|
2020-08-25T17:02:15.000Z
|
2021-04-21T16:40:44.000Z
|
#!/usr/bin/env python
"""
data.py
"""
import itertools
def loopy_wrapper(gen):
while True:
for x in gen:
yield x
class ZipDataloader:
def __init__(self, dataloaders):
self.dataloaders = dataloaders
self._len = len(dataloaders[0])
def __len__(self):
return self._len
def __iter__(self):
counter = 0
iters = [loopy_wrapper(d) for d in self.dataloaders]
while counter < len(self):
yield tuple(zip(*[next(it) for it in iters]))
counter += 1
| 18.5
| 60
| 0.578378
| 409
| 0.736937
| 299
| 0.538739
| 0
| 0
| 0
| 0
| 38
| 0.068468
|
67f3afbe3c2036ebfbec72e16288761010482211
| 1,180
|
py
|
Python
|
tools_box/_selling/report/sales_representative_scorecard/sales_representative_scorecard.py
|
maisonarmani/Tools_Box
|
4f8cc3a0deac1be50a3ac80758a10608faf58454
|
[
"MIT"
] | null | null | null |
tools_box/_selling/report/sales_representative_scorecard/sales_representative_scorecard.py
|
maisonarmani/Tools_Box
|
4f8cc3a0deac1be50a3ac80758a10608faf58454
|
[
"MIT"
] | null | null | null |
tools_box/_selling/report/sales_representative_scorecard/sales_representative_scorecard.py
|
maisonarmani/Tools_Box
|
4f8cc3a0deac1be50a3ac80758a10608faf58454
|
[
"MIT"
] | 1
|
2022-01-30T12:15:41.000Z
|
2022-01-30T12:15:41.000Z
|
# Copyright (c) 2013, masonarmani38@gmail.com and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
def execute(filters=None):
columns, data = ["Sales Person: Link/Sales Person200", "Item:Link/Item:200","Item Name:Data:200","Qty:Float:200","Amount:Currency:200"], []
item=""
customer=""
territory=""
if filters.get("item"):
item = """ and soi.item_code = '{}' """.format(filters.get("item"))
if filters.get("sales"):
customer = """ and st.sales_person = '{}' """.format(filters.get("sales"))
if filters.get("territory"):
territory = """ and so.territory = '{}' """.format(filters.get("territory"))
data = frappe.db.sql("""select st.sales_person, soi.item_code,soi.item_name,sum(soi.qty),sum(soi.amount) from `tabSales Invoice Item` soi
join `tabSales Invoice` so on soi.parent=so.name
join `tabCustomer` c on c.name = so.customer
join `tabSales Team` st on c.name = st.parent
where so.docstatus=1 and (so.posting_date between '{}' and '{}') {} {} {} group by soi.item_code""".format(filters.get("from"),filters.get("to"),item,customer,territory),as_list=1 )
return columns, data
| 39.333333
| 183
| 0.691525
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 751
| 0.636441
|
67f3bbd2cd29eb37f8dc56a77c4074bc640a2a29
| 484
|
py
|
Python
|
Google-IT-Automation-with-Python-Professional-Certificate/3-Introduction-to-Git-and-Github/Week-1/disk_usage.py
|
fengjings/Coursera
|
54098a9732faa4b37afe69d196e27805b1ac73aa
|
[
"MIT"
] | null | null | null |
Google-IT-Automation-with-Python-Professional-Certificate/3-Introduction-to-Git-and-Github/Week-1/disk_usage.py
|
fengjings/Coursera
|
54098a9732faa4b37afe69d196e27805b1ac73aa
|
[
"MIT"
] | null | null | null |
Google-IT-Automation-with-Python-Professional-Certificate/3-Introduction-to-Git-and-Github/Week-1/disk_usage.py
|
fengjings/Coursera
|
54098a9732faa4b37afe69d196e27805b1ac73aa
|
[
"MIT"
] | 1
|
2021-06-09T08:59:48.000Z
|
2021-06-09T08:59:48.000Z
|
import shutil
import sys
def check_disk_usage(disk, min_absolute, min_percent):
'''return true if there is enough free disk space, else false'''
du = shutil.disk_usage(disk)
percent_free= 100*du.free/du.total
gigabytes_free = du.free/2**30
if percent_free<min_percent or gigabytes_free < min_absolute:
return False
return True
if not check_disk_usage('/',2*2**30, 10):
print('error not enough space')
return 1
print('everything ok')
return 0
| 26.888889
| 68
| 0.708678
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 106
| 0.219008
|
67f441ca489816b005f268005b6753cf7c38a180
| 1,796
|
py
|
Python
|
src/utils/tests/test_www.py
|
nuuuwan/utils
|
d5085d9bddd1ffc79544241b43aaa8269c5806f0
|
[
"MIT"
] | null | null | null |
src/utils/tests/test_www.py
|
nuuuwan/utils
|
d5085d9bddd1ffc79544241b43aaa8269c5806f0
|
[
"MIT"
] | 1
|
2021-07-06T11:16:58.000Z
|
2021-07-06T11:16:58.000Z
|
src/utils/tests/test_www.py
|
nuuuwan/utils
|
d5085d9bddd1ffc79544241b43aaa8269c5806f0
|
[
"MIT"
] | null | null | null |
"""Test."""
import os
import unittest
import pytest
from utils import www
TEST_JSON_URL = os.path.join(
'https://raw.githubusercontent.com',
'nuuuwan/misc-sl-data/master',
'sl_power_station_info.json',
)
TEST_TSV_URL = os.path.join(
'https://raw.githubusercontent.com',
'nuuuwan/gig-data/master',
'province.tsv',
)
TEST_INVALID_URL = 'http://www.29df.c'
TEST_IMAGE_LINK = 'https://www.python.org/static/img/python-logo@2x.png'
class testWWW(unittest.TestCase):
"""Test."""
@pytest.mark.slow
def test_read(self):
"""Test."""
data = www.read(TEST_JSON_URL)
self.assertIn('Station', data)
data_selenium = www.read(TEST_JSON_URL, use_selenium=True)
self.assertIn(data, data_selenium)
def test_read_json(self):
"""Test."""
data = www.read_json(TEST_JSON_URL)
self.assertIn('Station', data[0])
def test_read_tsv(self):
"""Test."""
data = www.read_tsv(TEST_TSV_URL)
self.assertEqual(len(data), 9)
self.assertEqual(data[0]['province_id'], 'LK-1')
def test_invalid_url(self):
"""Test."""
data = www.read_json(TEST_INVALID_URL)
self.assertEqual(data, None)
def test_download_binary(self):
"""Test."""
file_name = '/tmp/utils.test_www.file.png'
www.download_binary(
TEST_IMAGE_LINK,
file_name,
)
@pytest.mark.slow
def test_exists(self):
"""Test."""
self.assertTrue(www.exists('https://www.python.org/'))
self.assertFalse(www.exists('https://www.python123.org/'))
def test_get_all_urls(self):
"""Test."""
self.assertGreater(
len(www.get_all_urls('https://www.python.org/')),
50,
)
| 24.60274
| 72
| 0.604677
| 1,336
| 0.743875
| 0
| 0
| 444
| 0.247216
| 0
| 0
| 483
| 0.268931
|
67f6677df6c93e2d632b899ab9dc98b595479ae0
| 19,511
|
py
|
Python
|
src/qrl/core/State.py
|
scottdonaldau/QRL
|
fb78c1cdf227330ace46f590a36cc6a52c7af3fe
|
[
"MIT"
] | 1
|
2020-07-12T23:40:48.000Z
|
2020-07-12T23:40:48.000Z
|
src/qrl/core/State.py
|
scottdonaldau/QRL
|
fb78c1cdf227330ace46f590a36cc6a52c7af3fe
|
[
"MIT"
] | null | null | null |
src/qrl/core/State.py
|
scottdonaldau/QRL
|
fb78c1cdf227330ace46f590a36cc6a52c7af3fe
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
from typing import Optional
from statistics import median
import functools
from google.protobuf.json_format import MessageToJson, Parse
from pyqrllib.pyqrllib import bin2hstr
from pyqryptonight.pyqryptonight import UInt256ToString
from qrl.core import config
from qrl.core.BlockMetadata import BlockMetadata
from qrl.core.GenesisBlock import GenesisBlock
from qrl.core.Block import Block
from qrl.core.misc import logger, db
from qrl.core.txs.Transaction import Transaction
from qrl.core.txs.TransferTokenTransaction import TransferTokenTransaction
from qrl.core.txs.TokenTransaction import TokenTransaction
from qrl.core.txs.CoinBase import CoinBase
from qrl.core.TokenMetadata import TokenMetadata
from qrl.core.AddressState import AddressState
from qrl.core.LastTransactions import LastTransactions
from qrl.core.TransactionMetadata import TransactionMetadata
from qrl.generated import qrl_pb2, qrlstateinfo_pb2
class State:
# FIXME: Rename to PersistentState
# FIXME: Move blockchain caching/storage over here
# FIXME: Improve key generation
def __init__(self):
self._db = db.DB() # generate db object here
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._db is not None:
if self._db.db is not None:
del self._db.db
del self._db
self._db = None
@property
def batch(self):
return self._db.get_batch()
@property
def total_coin_supply(self):
try:
return int.from_bytes(self._db.get_raw(b'total_coin_supply'), byteorder='big', signed=False)
except KeyError:
return 0
def get_block_size_limit(self, block: Block):
# NOTE: Miner /
block_size_list = []
for _ in range(0, 10):
block = self.get_block(block.prev_headerhash)
if not block:
return None
block_size_list.append(block.size)
if block.block_number == 0:
break
return max(config.dev.block_min_size_limit, config.dev.size_multiplier * median(block_size_list))
def put_block(self, block: Block, batch):
self._db.put_raw(block.headerhash, block.serialize(), batch)
def get_block(self, header_hash: bytes) -> Optional[Block]:
try:
data = self._db.get_raw(header_hash)
return Block.deserialize(data)
except KeyError:
logger.debug('[get_block] Block header_hash %s not found', bin2hstr(header_hash).encode())
except Exception as e:
logger.error('[get_block] %s', e)
return None
def put_block_metadata(self, headerhash: bytes, block_metadata: BlockMetadata, batch):
self._db.put_raw(b'metadata_' + headerhash, block_metadata.serialize(), batch)
def get_block_metadata(self, header_hash: bytes) -> Optional[BlockMetadata]:
try:
data = self._db.get_raw(b'metadata_' + header_hash)
return BlockMetadata.deserialize(data)
except KeyError:
logger.debug('[get_block_metadata] Block header_hash %s not found',
b'metadata_' + bin2hstr(header_hash).encode())
except Exception as e:
logger.error('[get_block_metadata] %s', e)
return None
def remove_blocknumber_mapping(self, block_number, batch):
self._db.delete(str(block_number).encode(), batch)
def put_block_number_mapping(self, block_number: int, block_number_mapping, batch):
self._db.put_raw(str(block_number).encode(), MessageToJson(block_number_mapping, sort_keys=True).encode(), batch)
def get_block_number_mapping(self, block_number: int) -> Optional[qrl_pb2.BlockNumberMapping]:
try:
data = self._db.get_raw(str(block_number).encode())
block_number_mapping = qrl_pb2.BlockNumberMapping()
return Parse(data, block_number_mapping)
except KeyError:
logger.debug('[get_block_number_mapping] Block #%s not found', block_number)
except Exception as e:
logger.error('[get_block_number_mapping] %s', e)
return None
def get_block_by_number(self, block_number: int) -> Optional[Block]:
block_number_mapping = self.get_block_number_mapping(block_number)
if not block_number_mapping:
return None
return self.get_block(block_number_mapping.headerhash)
@staticmethod
def prepare_address_list(block) -> set:
addresses = set()
for proto_tx in block.transactions:
tx = Transaction.from_pbdata(proto_tx)
tx.set_affected_address(addresses)
for genesis_balance in GenesisBlock().genesis_balance:
bytes_addr = genesis_balance.address
if bytes_addr not in addresses:
addresses.add(bytes_addr)
return addresses
def put_addresses_state(self, addresses_state: dict, batch=None):
"""
:param addresses_state:
:param batch:
:return:
"""
for address in addresses_state:
address_state = addresses_state[address]
data = address_state.pbdata.SerializeToString()
self._db.put_raw(address_state.address, data, batch)
def get_state_mainchain(self, addresses_set: set):
addresses_state = dict()
for address in addresses_set:
addresses_state[address] = self.get_address_state(address)
return addresses_state
def get_mainchain_height(self) -> int:
try:
return int.from_bytes(self._db.get_raw(b'blockheight'), byteorder='big', signed=False)
except KeyError:
pass
except Exception as e:
logger.error('get_blockheight Exception %s', e)
return -1
@property
def last_block(self):
block_number = self.get_mainchain_height()
return self.get_block_by_number(block_number)
def update_mainchain_height(self, height, batch):
self._db.put_raw(b'blockheight', height.to_bytes(8, byteorder='big', signed=False), batch)
def _remove_last_tx(self, block, batch):
if len(block.transactions) == 0:
return
try:
last_txn = LastTransactions.deserialize(self._db.get_raw(b'last_txn'))
except: # noqa
return
for protobuf_txn in block.transactions:
txn = Transaction.from_pbdata(protobuf_txn)
i = 0
while i < len(last_txn.tx_metadata):
tx = Transaction.from_pbdata(last_txn.tx_metadata[i].transaction)
if txn.txhash == tx.txhash:
del last_txn.tx_metadata[i]
break
i += 1
self._db.put_raw(b'last_txn', last_txn.serialize(), batch)
def _update_last_tx(self, block, batch):
if len(block.transactions) == 0:
return
last_txn = LastTransactions()
try:
last_txn = LastTransactions.deserialize(self._db.get_raw(b'last_txn'))
except: # noqa
pass
for protobuf_txn in block.transactions[-20:]:
txn = Transaction.from_pbdata(protobuf_txn)
if isinstance(txn, CoinBase):
continue
last_txn.add(txn, block.block_number, block.timestamp)
self._db.put_raw(b'last_txn', last_txn.serialize(), batch)
def get_last_txs(self):
try:
last_txn = LastTransactions.deserialize(self._db.get_raw(b'last_txn'))
except: # noqa
return []
txs = []
for tx_metadata in last_txn.tx_metadata:
data = tx_metadata.transaction
tx = Transaction.from_pbdata(data)
txs.append(tx)
return txs
#########################################
#########################################
#########################################
#########################################
#########################################
def get_token_metadata(self, token_txhash: bytes):
try:
data = self._db.get_raw(b'token_' + token_txhash)
return TokenMetadata.deserialize(data)
except KeyError:
pass
except Exception as e:
logger.error('[get_token_metadata] %s', e)
return None
def update_token_metadata(self, transfer_token: TransferTokenTransaction):
token_metadata = self.get_token_metadata(transfer_token.token_txhash)
token_metadata.update([transfer_token.txhash])
self._db.put_raw(b'token_' + transfer_token.token_txhash,
token_metadata.serialize())
def create_token_metadata(self, token: TokenTransaction):
token_metadata = TokenMetadata.create(token_txhash=token.txhash, transfer_token_txhashes=[token.txhash])
self._db.put_raw(b'token_' + token.txhash,
token_metadata.serialize())
def remove_transfer_token_metadata(self, transfer_token: TransferTokenTransaction):
token_metadata = self.get_token_metadata(transfer_token.token_txhash)
token_metadata.remove(transfer_token.txhash)
self._db.put_raw(b'token_' + transfer_token.token_txhash,
token_metadata.serialize())
def remove_token_metadata(self, token: TokenTransaction):
self._db.delete(b'token_' + token.txhash)
#########################################
#########################################
#########################################
#########################################
#########################################
def get_txn_count(self, addr):
try:
return int.from_bytes(self._db.get_raw(b'txn_count_' + addr), byteorder='big', signed=False)
except KeyError:
pass
except Exception as e:
# FIXME: Review
logger.error('Exception in get_txn_count')
logger.exception(e)
return 0
#########################################
#########################################
#########################################
#########################################
#########################################
def rollback_tx_metadata(self, block, batch):
fee_reward = 0
for protobuf_txn in block.transactions:
txn = Transaction.from_pbdata(protobuf_txn)
fee_reward += txn.fee
self.remove_tx_metadata(txn, batch)
# FIXME: Being updated without batch, need to fix,
if isinstance(txn, TransferTokenTransaction):
self.remove_transfer_token_metadata(txn)
elif isinstance(txn, TokenTransaction):
self.remove_token_metadata(txn)
self._decrease_txn_count(self.get_txn_count(txn.addr_from),
txn.addr_from)
txn = Transaction.from_pbdata(block.transactions[0]) # Coinbase Transaction
self._update_total_coin_supply(fee_reward - txn.amount)
self._remove_last_tx(block, batch)
def update_tx_metadata(self, block, batch):
fee_reward = 0
# TODO (cyyber): Move To State Cache, instead of writing directly
for protobuf_txn in block.transactions:
txn = Transaction.from_pbdata(protobuf_txn)
fee_reward += txn.fee
self.put_tx_metadata(txn,
block.block_number,
block.timestamp,
batch)
# FIXME: Being updated without batch, need to fix,
if isinstance(txn, TransferTokenTransaction):
self.update_token_metadata(txn)
elif isinstance(txn, TokenTransaction):
self.create_token_metadata(txn)
self._increase_txn_count(self.get_txn_count(txn.addr_from),
txn.addr_from)
txn = Transaction.from_pbdata(block.transactions[0]) # Coinbase Transaction
self._update_total_coin_supply(txn.amount - fee_reward)
self._update_last_tx(block, batch)
def remove_tx_metadata(self, txn, batch):
try:
self._db.delete(txn.txhash, batch)
except Exception:
pass
def put_tx_metadata(self, txn: Transaction, block_number: int, timestamp: int, batch):
try:
tm = TransactionMetadata.create(tx=txn,
block_number=block_number,
timestamp=timestamp)
self._db.put_raw(txn.txhash,
tm.serialize(),
batch)
except Exception:
pass
def get_tx_metadata(self, txhash: bytes):
try:
tx_metadata = TransactionMetadata.deserialize(self._db.get_raw(txhash))
except Exception:
return None
data, block_number = tx_metadata.transaction, tx_metadata.block_number
return Transaction.from_pbdata(data), block_number
#########################################
#########################################
#########################################
#########################################
#########################################
def _increase_txn_count(self, last_count: int, addr: bytes):
# FIXME: This should be transactional
self._db.put_raw(b'txn_count_' + addr, (last_count + 1).to_bytes(8, byteorder='big', signed=False))
def _decrease_txn_count(self, last_count: int, addr: bytes):
# FIXME: This should be transactional
if last_count == 0:
raise ValueError('Cannot decrease transaction count last_count: %s, addr %s',
last_count, bin2hstr(addr))
self._db.put_raw(b'txn_count_' + addr, (last_count - 1).to_bytes(8, byteorder='big', signed=False))
def get_address_state(self, address: bytes) -> AddressState:
try:
data = self._db.get_raw(address)
pbdata = qrl_pb2.AddressState()
pbdata.ParseFromString(bytes(data))
address_state = AddressState(pbdata)
return address_state
except KeyError:
return AddressState.get_default(address)
def get_all_address_state(self) -> list:
addresses_state = []
try:
for address in self._db.get_db_keys(False):
if AddressState.address_is_valid(address) or address == config.dev.coinbase_address:
addresses_state.append(self.get_address_state(address).pbdata)
return addresses_state
except Exception as e:
logger.error("Exception in get_addresses_state %s", e)
return []
def get_address_balance(self, addr: bytes) -> int:
return self.get_address_state(addr).balance
def get_address_nonce(self, addr: bytes) -> int:
return self.get_address_state(addr).nonce
def get_address_is_used(self, address: bytes) -> bool:
# FIXME: Probably obsolete
try:
return self.get_address_state(address) is not None
except KeyError:
return False
except Exception as e:
# FIXME: Review
logger.error('Exception in address_used')
logger.exception(e)
raise
def _return_all_addresses(self):
addresses = []
for key, data in self._db.RangeIter(b'Q', b'Qz'):
pbdata = qrl_pb2.AddressState()
pbdata.ParseFromString(bytes(data))
address_state = AddressState(pbdata)
addresses.append(address_state)
return addresses
def write_batch(self, batch):
self._db.write_batch(batch)
#########################################
#########################################
#########################################
#########################################
#########################################
def _update_total_coin_supply(self, balance):
self._db.put_raw(b'total_coin_supply', (self.total_coin_supply + balance).to_bytes(8, byteorder='big', signed=False))
def get_measurement(self, block_timestamp, parent_headerhash, parent_metadata: BlockMetadata):
count_headerhashes = len(parent_metadata.last_N_headerhashes)
if count_headerhashes == 0:
return config.dev.mining_setpoint_blocktime
elif count_headerhashes == 1:
nth_block = self.get_block(parent_headerhash)
count_headerhashes += 1
else:
nth_block = self.get_block(parent_metadata.last_N_headerhashes[1])
nth_block_timestamp = nth_block.timestamp
if count_headerhashes < config.dev.N_measurement:
nth_block_timestamp -= config.dev.mining_setpoint_blocktime
return (block_timestamp - nth_block_timestamp) // count_headerhashes
def _delete(self, key, batch):
self._db.delete(key, batch)
def put_fork_state(self, fork_state: qrlstateinfo_pb2.ForkState, batch=None):
self._db.put_raw(b'fork_state', fork_state.SerializeToString(), batch)
def get_fork_state(self) -> Optional[qrlstateinfo_pb2.ForkState]:
try:
data = self._db.get_raw(b'fork_state')
fork_state = qrlstateinfo_pb2.ForkState()
fork_state.ParseFromString(bytes(data))
return fork_state
except KeyError:
return None
except Exception as e:
logger.error('Exception in get_fork_state')
logger.exception(e)
raise
def delete_fork_state(self, batch=None):
self._db.delete(b'fork_state', batch)
@functools.lru_cache(maxsize=config.dev.block_timeseries_size + 50)
def get_block_datapoint(self, headerhash):
block = self.get_block(headerhash)
if block is None:
return None
block_metadata = self.get_block_metadata(headerhash)
prev_block_metadata = self.get_block_metadata(block.prev_headerhash)
prev_block = self.get_block(block.prev_headerhash)
data_point = qrl_pb2.BlockDataPoint()
data_point.number = block.block_number
data_point.header_hash = headerhash
if prev_block is not None:
data_point.header_hash_prev = prev_block.headerhash
data_point.timestamp = block.timestamp
data_point.time_last = 0
data_point.time_movavg = 0
data_point.difficulty = UInt256ToString(block_metadata.block_difficulty)
if prev_block is not None:
data_point.time_last = block.timestamp - prev_block.timestamp
if prev_block.block_number == 0:
data_point.time_last = config.dev.mining_setpoint_blocktime
movavg = self.get_measurement(block.timestamp,
block.prev_headerhash,
prev_block_metadata)
data_point.time_movavg = movavg
try:
# FIXME: need to consider average difficulty here
data_point.hash_power = int(data_point.difficulty) * (config.dev.mining_setpoint_blocktime / movavg)
except ZeroDivisionError:
data_point.hash_power = 0
return data_point
| 38.559289
| 125
| 0.602788
| 18,439
| 0.945057
| 0
| 0
| 2,471
| 0.126647
| 0
| 0
| 2,599
| 0.133207
|
67f6729eb5c33b2e9485a361bcba852adc1d1e4b
| 2,670
|
py
|
Python
|
data/make_stterror_data/main.py
|
gcunhase/StackedDeBERT
|
82777114fd99cafc6e2a3d760e774f007c563245
|
[
"MIT"
] | 32
|
2020-01-03T09:53:03.000Z
|
2021-09-07T07:23:26.000Z
|
data/make_stterror_data/main.py
|
gcunhase/StackedDeBERT
|
82777114fd99cafc6e2a3d760e774f007c563245
|
[
"MIT"
] | null | null | null |
data/make_stterror_data/main.py
|
gcunhase/StackedDeBERT
|
82777114fd99cafc6e2a3d760e774f007c563245
|
[
"MIT"
] | 6
|
2020-01-21T06:50:21.000Z
|
2021-01-22T08:04:00.000Z
|
import os.path
from timeit import default_timer as timer
import data.make_stterror_data.utils as utils
from data.make_stterror_data.handler import HandlerIntent
from data.make_stterror_data.parser import snips_parser
__author__ = "Gwena Cunha"
""" Main module for Snips
text -> TTS -> STT -> wrong text
"""
def main():
# 1. Settings
args = snips_parser()
audio_file_dir = args.data_dir # "data/intent_snips/"
audios_relative_dir = args.audios_dir # "results_tts_audios/"
recovered_texts_relative_dir = args.recovered_texts_dir # "results_stt_recovered_texts/"
scores_dir = args.scores_dir # "results_bleu_score/"
text_filename = args.filename # "test.tsv"
tts_type_arr = args.tts_types # ["gtts", "macsay"]
stt_type_arr = args.stt_types # ["witai"]
audio_type = ".wav"
textHandler = HandlerIntent(audio_file_dir, text_filename) # Initialize TextHandler
# 2. TTS from single file
audios_dir = os.path.join(utils.project_dir_name(), audio_file_dir, audios_relative_dir)
utils.ensure_dir(audios_dir)
for tts_type in tts_type_arr:
text_results_dir = "{}/{}/".format(audios_relative_dir, tts_type)
textHandler.text2audio(audio_files_dir=text_results_dir, audio_type=audio_type, tts_type=tts_type)
# 3. Apply STT to directory and get audio referring to that line
recovered_texts_dir = os.path.join(utils.project_dir_name(), audio_file_dir, recovered_texts_relative_dir)
utils.ensure_dir(recovered_texts_dir)
for tts_type in tts_type_arr:
text_results_dir = "{}/{}/".format(audios_relative_dir, tts_type)
for stt_type in stt_type_arr:
textHandler.audio2text(audio_files_dir=text_results_dir, audio_type=audio_type,
stt_type=stt_type, recovered_texts_dir=recovered_texts_relative_dir,
stt_out_text_filename="{}_{}_{}.tsv".format(text_filename.split('.tsv')[0], tts_type, stt_type))
# 4. BLEU scores
for tts_type in tts_type_arr:
for stt_type in stt_type_arr:
stt_out_text_filename = "{}_{}_{}.tsv".format(text_filename.split('.tsv')[0], tts_type, stt_type)
scores_filename = "{}_{}_{}.txt".format(text_filename.split('.tsv')[0], tts_type, stt_type)
textHandler.bleu_score(recovered_texts_dir=recovered_texts_relative_dir,
stt_out_text_filename=stt_out_text_filename, scores_dir=scores_dir,
scores_filename=scores_filename)
if __name__ == '__main__':
time = timer()
main()
print("Program ran for %.2f minutes" % ((timer()-time)/60))
| 45.254237
| 131
| 0.695506
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 486
| 0.182022
|
67f6d526ab4ecec5625261ee10602db862d65a55
| 5,591
|
py
|
Python
|
src/tk_live_model_test.py
|
KarlWithK/gesture
|
d60204684c1e3868177e76b62d74d899d39d287d
|
[
"MIT"
] | null | null | null |
src/tk_live_model_test.py
|
KarlWithK/gesture
|
d60204684c1e3868177e76b62d74d899d39d287d
|
[
"MIT"
] | null | null | null |
src/tk_live_model_test.py
|
KarlWithK/gesture
|
d60204684c1e3868177e76b62d74d899d39d287d
|
[
"MIT"
] | 2
|
2021-09-01T01:06:23.000Z
|
2021-09-06T00:18:54.000Z
|
import tkinter as tk
from PIL import Image, ImageTk
from cv2 import cv2
import numpy as np
import mediapipe as mp
from keyboard import press_and_release as press
from json import load
from data_preprocessor import DataGenerator
from gestures import GESTURES
import tensorflow as tf
TARGET_FRAMERATE: int = 20
GESTURE_LENGTH: int = 20
TFLITE_MODEL_PATH: str = "saved_models/MODEL-2021-06-02-16-12-10.tflite"
VIDEO_WIDTH = 1920
VIDEO_HEIGHT = 1080
keys = load(open("keybinds.json", "r"))
for key in keys:
if key in GESTURES:
GESTURES[key]['keybind'] = keys[key]
class LiveModelTester(tk.Tk):
"""
Main Window
"""
def __init__(self, *args, **kwargs):
# TKinter setup
tk.Tk.__init__(self, *args, **kwargs)
self.wm_title("Gesture Recognition Tester")
# MediaPipe setup
self.mpHands = mp.solutions.hands.Hands(
min_detection_confidence=0.6, min_tracking_confidence=0.75, max_num_hands=1
)
# OpenCV setup
self.cap = cv2.VideoCapture(0)
self.cap.set(cv2.CAP_PROP_FPS, 60)
# self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, VIDEO_WIDTH)
# self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, VIDEO_HEIGHT)
# OpenCV current frame
self.image = None
# Video Stream Frame
self.videoFrame = tk.Frame(self, width=800, height=800)
self.videoFrame.grid(row=0, column=0, padx=10, pady=10)
self.videoLabel = tk.Label(self.videoFrame)
self.videoLabel.grid(row=0, column=0)
self.predictionLabel = tk.Label(self, text="")
self.predictionLabel.grid(row=1, column=0)
# Toggle keyboard input
self.keyboardToggle = tk.BooleanVar()
self.useKeyboardToggle = tk.Checkbutton(
self, text="Send Keypresses", onvalue=True, offvalue=False, variable=self.keyboardToggle)
self.useKeyboardToggle.grid(row=1, column=1, padx=5)
self.frameCache = []
self.skipFrames = 0
self.interpreter = tf.lite.Interpreter(TFLITE_MODEL_PATH)
self.interpreter.allocate_tensors()
# Start event loop
self.appLoop()
def appLoop(self) -> None:
"""
Event loop
"""
success, hand = self.fetchHand()
if success and self.skipFrames <= 0:
self.frameCache.append(hand)
if len(self.frameCache) > GESTURE_LENGTH:
self.frameCache.pop(0)
self.updatePrediction()
self.skipFrames -= 1
img = Image.fromarray(cv2.cvtColor(self.image, cv2.COLOR_BGR2RGBA))
imgtk = ImageTk.PhotoImage(image=img)
self.videoLabel.imgtk = imgtk
self.videoLabel.configure(image=imgtk)
self.videoLabel.after(int(1000 / TARGET_FRAMERATE), self.appLoop)
def updatePrediction(self):
if len(self.frameCache) != GESTURE_LENGTH:
return
sample = np.array(
DataGenerator.center_sample(np.array(self.frameCache))[None, :],
dtype="float32",
)
self.interpreter.set_tensor(
self.interpreter.get_input_details()[0]["index"], sample
)
self.interpreter.invoke()
prediction = self.interpreter.get_tensor(
self.interpreter.get_output_details()[0]["index"]
)
gestureLabel = str(list(GESTURES)[np.argmax(prediction)])
gestureCertainty = round(np.max(prediction) * 100, 2)
predictionString = "{} {}%".format(gestureLabel, str(gestureCertainty))
self.predictionLabel.config(text=predictionString)
if self.keyboardToggle.get() and gestureCertainty > 96 and "keybind" in GESTURES[gestureLabel]:
press(GESTURES[gestureLabel]['keybind'])
# print(gestureLabel)
# empty framecache
self.frameCache = []
self.skipFrames = 10
def fetchHand(self, draw_hand=True) -> tuple:
"""
Returns a tuple of (success, hand), where hand is
a Hand is an array of shape (21,3)
Also sets self.image property to a frame
with the hand drawn on it.
"""
success, self.image = self.cap.read()
if not success:
return (False, None)
# Flip the image horizontally for a later selfie-view display, and convert
# the BGR image to RGB.
self.image = cv2.cvtColor(cv2.flip(self.image, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
self.image.flags.writeable = False
results = self.mpHands.process(self.image)
# Draw the hand annotations on the image.
self.image.flags.writeable = True
self.image = cv2.cvtColor(self.image, cv2.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
hand = np.array(
[(i.x, i.y, i.z)
for i in hand_landmarks.ListFields()[0][1]]
)
if draw_hand:
mp.solutions.drawing_utils.draw_landmarks(
self.image,
hand_landmarks,
mp.solutions.hands.HAND_CONNECTIONS,
)
return (True, hand)
return (False, None)
if __name__ == "__main__":
app = LiveModelTester()
app.mainloop()
| 34.512346
| 104
| 0.597746
| 4,899
| 0.87623
| 0
| 0
| 0
| 0
| 0
| 0
| 973
| 0.17403
|
67f86eeb953024e2463d4d73c584b0e83d0b4555
| 12,761
|
py
|
Python
|
wykop/api/client.py
|
selfisekai/wykop-sdk-reborn
|
7f17c5b2a3d282b5aaf72475a0f58ba66d5c5c5d
|
[
"MIT"
] | null | null | null |
wykop/api/client.py
|
selfisekai/wykop-sdk-reborn
|
7f17c5b2a3d282b5aaf72475a0f58ba66d5c5c5d
|
[
"MIT"
] | null | null | null |
wykop/api/client.py
|
selfisekai/wykop-sdk-reborn
|
7f17c5b2a3d282b5aaf72475a0f58ba66d5c5c5d
|
[
"MIT"
] | null | null | null |
import logging
from typing import Dict, List
from wykop.api.api_const import PAGE_NAMED_ARG, BODY_NAMED_ARG, FILE_POST_NAME
from wykop.core.credentials import Credentials
from wykop.core.requestor import Requestor
log = logging.getLogger(__name__)
class WykopAPI:
"""Wykop API version 2."""
def __init__(self, appkey, secretkey, account_key=None,
output='', response_format='json'):
self.requestor = Requestor(
credentials=Credentials(appkey, secretkey, account_key),
output=output,
response_format=response_format
)
def request(self, rtype, rmethod=None,
named_params=None, api_params=None, post_params=None, file_params=None):
return self.requestor.request(rtype, rmethod=rmethod,
named_params=named_params,
api_params=api_params,
post_params=post_params,
file_params=file_params)
def authenticate(self, account_key=None):
self.requestor.authenticate(account_key)
# entries
def entries_stream(self, page=1, first_id=None):
named_params = self \
.__with_page(page) \
.update(dict(firstId=first_id))
return self.request('Entries', 'Stream', named_params=named_params)
def entries_hot(self, page=1, period=12):
assert period in [6, 12, 24]
named_params = self \
.__with_page(page) \
.update(dict(period=period))
return self.request('Entries', 'Hot',
named_params=named_params)
def entries_active(self, page=1):
return self.request('Entries', 'Active',
named_params=self.__with_page(page))
def entries_observed(self, page=1):
return self.request('Entries', 'Observed',
named_params=self.__with_page(page))
def entry(self, entry_id):
return self.request('Entries', 'Entry',
api_params=self.__api_param(entry_id))
def entry_add(self, body: str, file=None, file_url: str = None, is_adult_media: bool = False):
return self.request('Entries', 'Add',
post_params=self.content_post_params(body, file_url, is_adult_media),
file_params=self.__with_file(file))
def entry_edit(self, entry_id: str, body: str, file=None, file_url: str = None, is_adult_media: bool = False):
return self.request('Entries', 'Edit',
post_params=self.content_post_params(body, file_url, is_adult_media),
api_params=self.__api_param(entry_id),
file_params=self.__with_file(file))
def entry_vote_up(self, entry_id: str):
return self.request('Entries', 'VoteUp',
api_params=self.__api_param(entry_id))
def entry_vote_remove(self, entry_id: str):
return self.request('Entries', 'VoteRemove',
api_params=self.__api_param(entry_id))
def entry_upvoters(self, entry_id: str):
return self.request('Entries', 'Upvoters',
api_params=self.__api_param(entry_id))
def entry_delete(self, entry_id: str):
return self.request('Entries', 'Delete',
api_params=self.__api_param(entry_id))
def entry_favorite_toggle(self, entry_id: str):
return self.request('Entries', 'Favorite',
api_params=self.__api_param(entry_id))
def entry_survey_vote(self, entry_id: str, answer_id: str):
return self.request('Entries', 'SurveyVote',
api_params=[entry_id, answer_id])
# comments
def entry_comment(self, comment_id: str):
return self.request('Entries', 'Comment',
api_params=self.__api_param(comment_id))
def entry_comment_add(self, entry_id: str, body: str, file=None, file_url: str = None,
is_adult_media: bool = False):
return self.request('Entries', 'CommentAdd',
post_params=self.content_post_params(body, file_url, is_adult_media),
api_params=self.__api_param(entry_id),
file_params=self.__with_file(file))
def entry_comment_edit(self, comment_id: str, body: str, file=None, file_url: str = None,
is_adult_media: bool = False):
return self.request('Entries', 'CommentEdit',
post_params=self.content_post_params(body, file_url, is_adult_media),
api_params=self.__api_param(comment_id),
file_params=self.__with_file(file))
def entry_comment_delete(self, comment_id: str):
return self.request('Entries', 'CommentDelete',
api_params=self.__api_param(comment_id))
def entry_comment_vote_up(self, comment_id: str):
return self.request('Entries', 'CommentVoteUp',
api_params=self.__api_param(comment_id))
def entry_comment_vote_remote(self, comment_id: str):
return self.request('Entries', 'CommentVoteRemove',
api_params=self.__api_param(comment_id))
def entry_comment_observed(self, page: int = 1):
return self.request('Entries', 'ObservedComments',
named_params=self.__with_page(page))
def entry_comment_favorite_toggle(self, entry_id: str):
return self.request('Entries', 'CommentFavorite',
api_params=self.__api_param(entry_id))
# links
def links_promoted(self, page=1):
return self.request('links', 'promoted',
named_params=self.__with_page(page))
# mywykop
# profiles
def observe_profile(self, username):
named_params = {
'observe': username,
}
return self.request('profiles', named_params=named_params)
def unobserve_profile(self, username):
named_params = {
'unobserve': username,
}
return self.request('profiles', named_params=named_params)
def block_profile(self, username):
named_params = {
'block': username,
}
return self.request('profiles', named_params=named_params)
def unblock_profile(self, username):
named_params = {
'unblock': username,
}
return self.request('profiles', named_params=named_params)
# hits
def hits_popular(self):
return self.request('hits', 'popular')
# pm
def conversations_list(self):
return self.request('pm', 'conversationsList')
def conversation(self, receiver: str):
return self.request('pm', 'Conversation',
api_params=self.__api_param(receiver))
def send_message(self, receiver: str, message: str):
return self.request('pm', 'SendMessage',
post_params=self.__with_body(message),
api_params=self.__api_param(receiver))
def delete_conversation(self, receiver: str):
return self.request('pm', 'DeleteConversation',
api_params=self.__api_param(receiver))
# notifications
def notifications_direct(self, page=1):
return self.request('notifications',
named_params=self.__with_page(page))
def notifications_direct_count(self):
return self.request('notifications', 'Count')
def notifications_hashtags_notifications(self, page=1):
return self.request('notifications', 'hashtags',
named_params=self.__with_page(page))
def notifications_hashtags_count(self):
return self.request('notifications', 'hashtagscount')
def notifications_all(self, page=1):
return self.request('notifications', 'total',
named_params=self.__with_page(page))
def notifications_all_count(self):
return self.request('notifications', 'totalcount')
def notification_mark_all_as_read(self):
return self.request('Notifications', 'ReadAllNotifications')
def notifications_mark_all_direct_as_read(self):
return self.request('Notifications', 'ReadDirectedNotifications')
def notifications_mark_all_hashtag_as_read(self):
return self.request('Notifications', 'ReadHashTagsNotifications')
def notification_mark_as_read(self, notification_id):
return self.request('Notifications', 'MarkAsRead',
api_params=self.__api_param(notification_id))
# search
def search_links(self, page=1, query=None, when=None, votes=None, from_date=None, to_date=None, what=None,
sort=None):
assert len(query) > 2 if query else True
assert when in ["all", "today", "yesterday", "week", "month", "range"] if when else True
assert what in ["all", "promoted", "archived", "duplicates"] if when else True
assert sort in ["best", "diggs", "comments", "new"] if when else True
post_params = {
'q': query,
'when': when,
'votes': votes,
'from': from_date,
'to': to_date,
'what': what,
'sort': sort
}
return self.request('Search', 'Links',
post_params=post_params,
named_params=self.__with_page(page))
def search_entries(self, page=1, query=None, when=None, votes=None, from_date=None, to_date=None):
assert len(query) > 2 if query else True
assert when in ["all", "today", "yesterday", "week", "month", "range"] if when else True
post_params = {
'q': query,
'when': when,
'votes': votes,
'from': from_date,
'to': to_date
}
return self.request('Search', 'Entries',
post_params=post_params,
named_params=self.__with_page(page))
def search_profiles(self, query):
assert len(query) > 2 if query else True
post_params = {
'q': query,
}
return self.request('Search', 'Profiles',
post_params=post_params)
# tags
def tag(self, tag, page=1):
return self.request('Tags', 'Index',
named_params=dict(page=page),
api_params=self.__api_param(tag))
def tag_links(self, tag, page=1):
return self.request('Tags', 'Links',
named_params=self.__with_page(page),
api_params=self.__api_param(tag))
def tag_entries(self, tag, page=1):
return self.request('Tags', 'Entries',
named_params=self.__with_page(page),
api_params=self.__api_param(tag))
def observe_tag(self, tag):
return self.request('Tags', 'Observe',
api_params=self.__api_param(tag))
def unobserve_tag(self, tag):
return self.request('Tags', 'Unobserve',
api_params=self.__api_param(tag))
def enable_tags_notifications(self, tag):
return self.request('Tags', 'Notify',
api_params=self.__api_param(tag))
def disable_tags_notifications(self, tag):
return self.request('Tags', 'Dontnotify',
api_params=self.__api_param(tag))
def block_tag(self, tag):
return self.request('Tags', 'Block',
api_params=self.__api_param(tag))
def unblock_tag(self, tag):
return self.request('Tags', 'Unblock',
api_params=self.__api_param(tag))
@staticmethod
def __api_param(param: str) -> List[str]:
return [str(param)]
@staticmethod
def __with_page(page: int) -> Dict[str, int]:
return {PAGE_NAMED_ARG: page}
@staticmethod
def __with_body(body: str) -> Dict[str, str]:
return {BODY_NAMED_ARG: body}
@staticmethod
def __with_file(file: str) -> Dict[str, str]:
return {FILE_POST_NAME: file} if file else None
@staticmethod
def content_post_params(body: str, file_url: str, is_adult_media: bool):
post_params = {
'adultmedia': is_adult_media,
'body': body,
'embed': file_url
}
return post_params
| 37.754438
| 114
| 0.587728
| 12,507
| 0.980096
| 0
| 0
| 657
| 0.051485
| 0
| 0
| 1,439
| 0.112765
|
67f9a1f6ffa0fc0bfe7226b1e9ede9e0f2fe3d7a
| 1,461
|
py
|
Python
|
brainbox/tests/test_singlecell.py
|
SebastianBruijns/ibllib
|
49f2091b7a53430c00c339b862dfc1a53aab008b
|
[
"MIT"
] | null | null | null |
brainbox/tests/test_singlecell.py
|
SebastianBruijns/ibllib
|
49f2091b7a53430c00c339b862dfc1a53aab008b
|
[
"MIT"
] | null | null | null |
brainbox/tests/test_singlecell.py
|
SebastianBruijns/ibllib
|
49f2091b7a53430c00c339b862dfc1a53aab008b
|
[
"MIT"
] | null | null | null |
from brainbox.singlecell import acorr, calculate_peths
import unittest
import numpy as np
class TestPopulation(unittest.TestCase):
def test_acorr_0(self):
spike_times = np.array([0, 10, 10, 20])
bin_size = 1
winsize_bins = 2 * 3 + 1
c_expected = np.zeros(7, dtype=np.int32)
c_expected[3] = 1
c = acorr(spike_times, bin_size=bin_size, window_size=winsize_bins)
self.assertTrue(np.allclose(c, c_expected))
class TestPeths(unittest.TestCase):
def test_peths_synthetic(self):
n_spikes = 20000
n_clusters = 20
n_events = 200
record_length = 1654
cluster_sel = [1, 2, 3, 6, 15, 16]
np.random.seed(seed=42)
spike_times = np.sort(np.random.rand(n_spikes, ) * record_length)
spike_clusters = np.random.randint(0, n_clusters, n_spikes)
event_times = np.sort(np.random.rand(n_events, ) * record_length)
peth, fr = calculate_peths(spike_times, spike_clusters, cluster_ids=cluster_sel,
align_times=event_times)
self.assertTrue(peth.means.shape[0] == len(cluster_sel))
self.assertTrue(np.all(peth.means.shape == peth.stds.shape))
self.assertTrue(np.all(fr.shape == (n_events, len(cluster_sel), 28)))
self.assertTrue(peth.tscale.size == 28)
def test_firing_rate():
pass
if __name__ == "__main__":
np.random.seed(0)
unittest.main(exit=False)
| 31.085106
| 88
| 0.644764
| 1,249
| 0.854894
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.006845
|
67f9b6a00e2c9b6075dbb4dc4f6b1acedc0ffc2d
| 11,958
|
py
|
Python
|
test/test_base_metric.py
|
Spraitazz/metric-learn
|
137880d9c6ce9a2b81a8af24c07d80e528f657cd
|
[
"MIT"
] | 547
|
2019-08-01T23:21:30.000Z
|
2022-03-31T10:23:04.000Z
|
test/test_base_metric.py
|
Spraitazz/metric-learn
|
137880d9c6ce9a2b81a8af24c07d80e528f657cd
|
[
"MIT"
] | 104
|
2019-08-02T10:15:53.000Z
|
2022-03-29T20:33:55.000Z
|
test/test_base_metric.py
|
Spraitazz/metric-learn
|
137880d9c6ce9a2b81a8af24c07d80e528f657cd
|
[
"MIT"
] | 69
|
2019-08-12T16:22:57.000Z
|
2022-03-10T15:10:02.000Z
|
import pytest
import re
import unittest
import metric_learn
import numpy as np
from sklearn import clone
from test.test_utils import ids_metric_learners, metric_learners, remove_y
from metric_learn.sklearn_shims import set_random_state, SKLEARN_AT_LEAST_0_22
def remove_spaces(s):
return re.sub(r'\s+', '', s)
def sk_repr_kwargs(def_kwargs, nndef_kwargs):
"""Given the non-default arguments, and the default
keywords arguments, build the string that will appear
in the __repr__ of the estimator, depending on the
version of scikit-learn.
"""
if SKLEARN_AT_LEAST_0_22:
def_kwargs = {}
def_kwargs.update(nndef_kwargs)
args_str = ",".join(f"{key}={repr(value)}"
for key, value in def_kwargs.items())
return args_str
class TestStringRepr(unittest.TestCase):
def test_covariance(self):
def_kwargs = {'preprocessor': None}
nndef_kwargs = {}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.Covariance())),
remove_spaces(f"Covariance({merged_kwargs})"))
def test_lmnn(self):
def_kwargs = {'convergence_tol': 0.001, 'init': 'auto', 'k': 3,
'learn_rate': 1e-07, 'max_iter': 1000, 'min_iter': 50,
'n_components': None, 'preprocessor': None,
'random_state': None, 'regularization': 0.5,
'verbose': False}
nndef_kwargs = {'convergence_tol': 0.01, 'k': 6}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.LMNN(convergence_tol=0.01, k=6))),
remove_spaces(f"LMNN({merged_kwargs})"))
def test_nca(self):
def_kwargs = {'init': 'auto', 'max_iter': 100, 'n_components': None,
'preprocessor': None, 'random_state': None, 'tol': None,
'verbose': False}
nndef_kwargs = {'max_iter': 42}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.NCA(max_iter=42))),
remove_spaces(f"NCA({merged_kwargs})"))
def test_lfda(self):
def_kwargs = {'embedding_type': 'weighted', 'k': None,
'n_components': None, 'preprocessor': None}
nndef_kwargs = {'k': 2}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.LFDA(k=2))),
remove_spaces(f"LFDA({merged_kwargs})"))
def test_itml(self):
def_kwargs = {'convergence_threshold': 0.001, 'gamma': 1.0,
'max_iter': 1000, 'preprocessor': None,
'prior': 'identity', 'random_state': None, 'verbose': False}
nndef_kwargs = {'gamma': 0.5}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.ITML(gamma=0.5))),
remove_spaces(f"ITML({merged_kwargs})"))
def_kwargs = {'convergence_threshold': 0.001, 'gamma': 1.0,
'max_iter': 1000, 'num_constraints': None,
'preprocessor': None, 'prior': 'identity',
'random_state': None, 'verbose': False}
nndef_kwargs = {'num_constraints': 7}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.ITML_Supervised(num_constraints=7))),
remove_spaces(f"ITML_Supervised({merged_kwargs})"))
def test_lsml(self):
def_kwargs = {'max_iter': 1000, 'preprocessor': None, 'prior': 'identity',
'random_state': None, 'tol': 0.001, 'verbose': False}
nndef_kwargs = {'tol': 0.1}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.LSML(tol=0.1))),
remove_spaces(f"LSML({merged_kwargs})"))
def_kwargs = {'max_iter': 1000, 'num_constraints': None,
'preprocessor': None, 'prior': 'identity',
'random_state': None, 'tol': 0.001, 'verbose': False,
'weights': None}
nndef_kwargs = {'verbose': True}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.LSML_Supervised(verbose=True))),
remove_spaces(f"LSML_Supervised({merged_kwargs})"))
def test_sdml(self):
def_kwargs = {'balance_param': 0.5, 'preprocessor': None,
'prior': 'identity', 'random_state': None,
'sparsity_param': 0.01, 'verbose': False}
nndef_kwargs = {'verbose': True}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.SDML(verbose=True))),
remove_spaces(f"SDML({merged_kwargs})"))
def_kwargs = {'balance_param': 0.5, 'num_constraints': None,
'preprocessor': None, 'prior': 'identity',
'random_state': None, 'sparsity_param': 0.01,
'verbose': False}
nndef_kwargs = {'sparsity_param': 0.5}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.SDML_Supervised(sparsity_param=0.5))),
remove_spaces(f"SDML_Supervised({merged_kwargs})"))
def test_rca(self):
def_kwargs = {'n_components': None, 'preprocessor': None}
nndef_kwargs = {'n_components': 3}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.RCA(n_components=3))),
remove_spaces(f"RCA({merged_kwargs})"))
def_kwargs = {'chunk_size': 2, 'n_components': None, 'num_chunks': 100,
'preprocessor': None, 'random_state': None}
nndef_kwargs = {'num_chunks': 5}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.RCA_Supervised(num_chunks=5))),
remove_spaces(f"RCA_Supervised({merged_kwargs})"))
def test_mlkr(self):
def_kwargs = {'init': 'auto', 'max_iter': 1000,
'n_components': None, 'preprocessor': None,
'random_state': None, 'tol': None, 'verbose': False}
nndef_kwargs = {'max_iter': 777}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.MLKR(max_iter=777))),
remove_spaces(f"MLKR({merged_kwargs})"))
def test_mmc(self):
def_kwargs = {'convergence_threshold': 0.001, 'diagonal': False,
'diagonal_c': 1.0, 'init': 'identity', 'max_iter': 100,
'max_proj': 10000, 'preprocessor': None,
'random_state': None, 'verbose': False}
nndef_kwargs = {'diagonal': True}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(remove_spaces(str(metric_learn.MMC(diagonal=True))),
remove_spaces(f"MMC({merged_kwargs})"))
def_kwargs = {'convergence_threshold': 1e-06, 'diagonal': False,
'diagonal_c': 1.0, 'init': 'identity', 'max_iter': 100,
'max_proj': 10000, 'num_constraints': None,
'preprocessor': None, 'random_state': None,
'verbose': False}
nndef_kwargs = {'max_iter': 1}
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
self.assertEqual(
remove_spaces(str(metric_learn.MMC_Supervised(max_iter=1))),
remove_spaces(f"MMC_Supervised({merged_kwargs})"))
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_get_metric_is_independent_from_metric_learner(estimator,
build_dataset):
"""Tests that the get_metric method returns a function that is independent
from the original metric learner"""
input_data, labels, _, X = build_dataset()
model = clone(estimator)
set_random_state(model)
# we fit the metric learner on it and then we compute the metric on some
# points
model.fit(*remove_y(model, input_data, labels))
metric = model.get_metric()
score = metric(X[0], X[1])
# then we refit the estimator on another dataset
model.fit(*remove_y(model, np.sin(input_data), labels))
# we recompute the distance between the two points: it should be the same
score_bis = metric(X[0], X[1])
assert score_bis == score
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_get_metric_raises_error(estimator, build_dataset):
"""Tests that the metric returned by get_metric raises errors similar to
the distance functions in scipy.spatial.distance"""
input_data, labels, _, X = build_dataset()
model = clone(estimator)
set_random_state(model)
model.fit(*remove_y(model, input_data, labels))
metric = model.get_metric()
list_test_get_metric_raises = [(X[0].tolist() + [5.2], X[1]), # vectors with
# different dimensions
(X[0:4], X[1:5]), # 2D vectors
(X[0].tolist() + [5.2], X[1] + [7.2])]
# vectors of same dimension but incompatible with what the metric learner
# was trained on
for u, v in list_test_get_metric_raises:
with pytest.raises(ValueError):
metric(u, v)
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_get_metric_works_does_not_raise(estimator, build_dataset):
"""Tests that the metric returned by get_metric does not raise errors (or
warnings) similarly to the distance functions in scipy.spatial.distance"""
input_data, labels, _, X = build_dataset()
model = clone(estimator)
set_random_state(model)
model.fit(*remove_y(model, input_data, labels))
metric = model.get_metric()
list_test_get_metric_doesnt_raise = [(X[0], X[1]),
(X[0].tolist(), X[1].tolist()),
(X[0][None], X[1][None])]
for u, v in list_test_get_metric_doesnt_raise:
with pytest.warns(None) as record:
metric(u, v)
assert len(record) == 0
# Test that the scalar case works
model.components_ = np.array([3.1])
metric = model.get_metric()
for u, v in [(5, 6.7), ([5], [6.7]), ([[5]], [[6.7]])]:
with pytest.warns(None) as record:
metric(u, v)
assert len(record) == 0
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_n_components(estimator, build_dataset):
"""Check that estimators that have a n_components parameters can use it
and that it actually works as expected"""
input_data, labels, _, X = build_dataset()
model = clone(estimator)
if hasattr(model, 'n_components'):
set_random_state(model)
model.set_params(n_components=None)
model.fit(*remove_y(model, input_data, labels))
assert model.components_.shape == (X.shape[1], X.shape[1])
model = clone(estimator)
set_random_state(model)
model.set_params(n_components=X.shape[1] - 1)
model.fit(*remove_y(model, input_data, labels))
assert model.components_.shape == (X.shape[1] - 1, X.shape[1])
model = clone(estimator)
set_random_state(model)
model.set_params(n_components=X.shape[1] + 1)
with pytest.raises(ValueError) as expected_err:
model.fit(*remove_y(model, input_data, labels))
assert (str(expected_err.value) ==
'Invalid n_components, must be in [1, {}]'.format(X.shape[1]))
model = clone(estimator)
set_random_state(model)
model.set_params(n_components=0)
with pytest.raises(ValueError) as expected_err:
model.fit(*remove_y(model, input_data, labels))
assert (str(expected_err.value) ==
'Invalid n_components, must be in [1, {}]'.format(X.shape[1]))
if __name__ == '__main__':
unittest.main()
| 42.860215
| 79
| 0.647516
| 6,700
| 0.560294
| 0
| 0
| 4,409
| 0.368707
| 0
| 0
| 3,149
| 0.263338
|
67fa9c3bff783bccc4fb93e62dd21fe1343fce47
| 881
|
py
|
Python
|
examples/geomopt/20-callback.py
|
QuESt-Calculator/pyscf
|
0ed03633b699505c7278f1eb501342667d0aa910
|
[
"Apache-2.0"
] | 501
|
2018-12-06T23:48:17.000Z
|
2022-03-31T11:53:18.000Z
|
examples/geomopt/20-callback.py
|
QuESt-Calculator/pyscf
|
0ed03633b699505c7278f1eb501342667d0aa910
|
[
"Apache-2.0"
] | 710
|
2018-11-26T22:04:52.000Z
|
2022-03-30T03:53:12.000Z
|
examples/geomopt/20-callback.py
|
QuESt-Calculator/pyscf
|
0ed03633b699505c7278f1eb501342667d0aa910
|
[
"Apache-2.0"
] | 273
|
2018-11-26T10:10:24.000Z
|
2022-03-30T12:25:28.000Z
|
#!/usr/bin/env python
'''
Optimize molecular geometry within the environment of QM/MM charges.
'''
from pyscf import gto, scf
from pyscf.geomopt import berny_solver
from pyscf.geomopt import geometric_solver
mol = gto.M(atom='''
C 0.000000 0.000000 -0.542500
O 0.000000 0.000000 0.677500
H 0.000000 0.9353074360871938 -1.082500
H 0.000000 -0.9353074360871938 -1.082500
''',
basis='3-21g')
mf = scf.RHF(mol)
# Run analyze function in callback
def cb(envs):
mf = envs['g_scanner'].base
mf.analyze(verbose=4)
#
# Method 1: Pass callback to optimize function
#
geometric_solver.optimize(mf, callback=cb)
berny_solver.optimize(mf, callback=cb)
#
# Method 2: Add callback to geometry optimizer
#
opt = mf.nuc_grad_method().as_scanner().optimizer()
opt.callback = cb
opt.kernel()
| 22.589744
| 68
| 0.659478
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 472
| 0.535755
|
67fa9dc096cb1ead50c5acc747b6ed866a1988a5
| 8,251
|
py
|
Python
|
Q1_final_project_v2.py
|
wolhandlerdeb/clustering
|
d84b0ff91d20b8dbf45e235fc8204f8cedf1ecc5
|
[
"MIT"
] | null | null | null |
Q1_final_project_v2.py
|
wolhandlerdeb/clustering
|
d84b0ff91d20b8dbf45e235fc8204f8cedf1ecc5
|
[
"MIT"
] | null | null | null |
Q1_final_project_v2.py
|
wolhandlerdeb/clustering
|
d84b0ff91d20b8dbf45e235fc8204f8cedf1ecc5
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import scipy as sc
from scipy.stats import randint, norm, multivariate_normal, ortho_group
from scipy import linalg
from scipy.linalg import subspace_angles, orth
from scipy.optimize import fmin
import math
from statistics import mean
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import itertools as it
import seaborn as sns
import matplotlib.pyplot as plt
from cluster.selfrepresentation import ElasticNetSubspaceClustering
import time
# functions for simulate data
def first_simulation(p, dim, k):
b = [orth(np.random.rand(p, dim)) for i in range(k + 1)]
return b
def find_theta_max(b, t, k):
theta_max = []
for i in range(1, k + 1):
for j in range(1, i):
theta_max.append(subspace_angles(b[i], b[j]).max())
max_avg_theta = mean(theta_max)
theta = max_avg_theta * t
return theta
def second_simulation(p, k, dim, theta, b):
def find_a_for_theta(a, b=b, k=k, theta=theta):
temp_theta = []
for i in range(1, k + 1):
for j in range(1, i):
temp_theta.append(subspace_angles(b[0] * (1 - a) + b[i] * a, b[0] * (1 - a) + b[j] * a).max())
return mean(temp_theta) - theta
a = sc.optimize.bisect(find_a_for_theta, 0, 1)
B = [b[0] * (1 - a) + b[i] * a for i in range(1, k + 1)]
return B
def third_simulation(n, p, dim, B, k, theta):
z = np.random.randint(0, k, n)
w = np.random.multivariate_normal(mean=np.zeros(dim), cov=np.diag(np.ones(dim)), size=n)
X = np.zeros((n, p))
for i in range(n):
X[i,] = np.random.multivariate_normal(mean=np.array(np.dot(np.array(w[i, :]), B[z[i]].T)).flatten(),
cov=np.diag(np.ones(p))) # sigma value is missing
return n, p, dim, theta, X, z, B
# data simulation
def final_data_simulation(k):
nn = [2 ** j for j in range(3, 11)]
pp = [2 ** j for j in range(4, 8)]
dd = [2 ** -j for j in range(1, 5)]
tt = [10 ** -j for j in range(0, 3)]
df = pd.DataFrame(columns=['n', 'p', 'dim', 'theta', 'X', 'z', 'B'])
for p in pp:
for d in dd:
dim = int(d * p)
b = first_simulation(p=p, dim=dim, k=k)
for t in tt:
theta = find_theta_max(b=b, t=t, k=k)
for n in nn:
B = second_simulation(p=p, k=k, dim=dim, theta=theta, b=b)
row = pd.Series(list(third_simulation(n=n, p=p, dim=dim, B=B, k=k, theta=theta)[0:7]),
["n", "p", "dim", "theta", "X", "z", "B"])
df = df.append([row], ignore_index=True)
return df
df = final_data_simulation(4)
X = df['X'][31]
z = df['z'][31]
z
dim = 4
p = 16
k = 4
kmeans = KMeans(n_clusters=k)
kmeans
temp_df = pd.DataFrame(X)
temp_df['cluster'] = kmeans.fit_predict(X)
# for i in range(k) :
i = 1
df_new = temp_df[temp_df['cluster'] == i].drop(['cluster'], axis=1)
cluster_kmean = KMeans(n_clusters=k).fit_predict(X)
data = {'cluster1': z, 'cluster2': cluster_kmean}
clusters = pd.DataFrame(data, index=range(len(z)))
all_per = list(it.permutations(range(k)))
accuracy_rate_all_per = np.zeros(len(all_per))
c = [i for i in range(k)]
for l, p in enumerate(all_per):
dic = dict(zip(c, p))
clusters['premut_cluster'] = clusters['cluster2'].transform(lambda x: dic[x] if x in dic else None)
m = clusters.groupby(['cluster1', 'premut_cluster']).size().unstack(fill_value=0)
accuracy_rate_all_per[l] = np.trace(m)
accuracy_rate_all_per.max(), len(cluster_kmean)
per = all_per[2]
dic = dict(zip(c, per))
clusters['premut_cluster'] = clusters['cluster2'].transform(lambda x: dic[x] if x in dic else None)
clusters.groupby(['cluster2', 'premut_cluster']).size()
# find kmeans clusters and subspaces
def pca_subspace(df, i, dim):
df_new = df[df['cluster'] == i].drop(['cluster'], axis=1)
pca_components_number = len(df_new) - 1 if len(df_new) < dim else dim # handling with low n (lower than dim)
pca = PCA(n_components=pca_components_number)
pca.fit_transform(df_new)
B_kmeans = pca.components_
return B_kmeans.T
def find_kmeans_subspace(X, k, dim):
kmeans = KMeans(n_clusters=k)
temp_df = pd.DataFrame(X)
temp_df['cluster'] = kmeans.fit_predict(X)
B_kmean = [pca_subspace(temp_df, i, dim) for i in range(k)]
return B_kmean
def find_ensc_subspace(X, k, dim):
temp_df = pd.DataFrame(X)
temp_df['cluster'] = ElasticNetSubspaceClustering(n_clusters=k, algorithm='lasso_lars', gamma=50).fit(X.T)
B_ensc = [pca_subspace(temp_df, i, dim) for i in range(k)]
return B_ensc
# Recovery Performance
def performance_measure1(k, B1, B2):
all_per = list(it.permutations(range(k)))
sum_cos_angles_all_per = np.zeros(len(all_per))
for l, val in enumerate(all_per):
for i in range(k):
if B2[val[i]].shape[1] > 0: # handling with empty clusters
sum_cos_angles_all_per[l] += (math.cos(
subspace_angles(B1[i], B2[val[i]]).max())) ** 2 # use min or max????????????????
cost_subspace = sum_cos_angles_all_per.max()
return cost_subspace
# WHAT ARE WE DOING WITH EMPTY CLUSTERS
def performance_measure2(k, cluster1, cluster2):
data = {'cluster1': cluster1, 'cluster2': cluster2}
clusters = pd.DataFrame(data, index=range(len(cluster1)))
all_per = list(it.permutations(range(k)))
accuracy_rate_all_per = np.zeros(len(all_per))
for l, per in enumerate(all_per):
c = [i for i in range(k)]
dic = dict(zip(c, per))
clusters['premut_cluster'] = clusters['cluster2'].transform(lambda x: dic[x] if x in dic else None)
m = clusters.groupby(['cluster1', 'premut_cluster']).size().unstack(fill_value=0)
accuracy_rate_all_per[l] = np.trace(m)
cost_cluster = (accuracy_rate_all_per.max()) / len(cluster1)
return cost_cluster
def all_process(k):
df = final_data_simulation(k)
df['B_kmean'] = df.apply(lambda x: find_kmeans_subspace(x['X'], k, x['dim']), axis=1)
df['cluster_kmean'] = df.apply(lambda x: KMeans(n_clusters=k).fit_predict(x['X']),
axis=1) # try to return the clusters in "find_kmeans_subspace"
# df['B_ensc'] = df.apply(lambda x: find_ensc_subspace(x['X'], k, x['dim']), axis=1)
# df['cluster_ensc']=df.apply(lambda x: ElasticNetSubspaceClustering(n_clusters=k,algorithm='lasso_lars',gamma=50).fit(x['X'].T), axis=1)
return df
measure1_kmean = pd.DataFrame()
measure2_kmean = pd.DataFrame()
k = 4
for iter in range(2):
df = all_process(k)
measure1_kmean.insert(iter, "", df.apply(lambda x: performance_measure1(k, x['B'], x['B_kmean']), axis=1), True)
measure2_kmean.insert(iter, "", df.apply(lambda x: performance_measure2(k, x['z'], x['cluster_kmean']), axis=1),
True)
# measure1_ensc.insert(iter, "", df.apply(lambda x: performance_measure1(k, x['B'], x['B_ensc']), axis=1), True)
# measure2_ensc.insert(iter, "", df.apply(lambda x: performance_measure2(k, x['z'], x['cluster_ensc']), axis=1), True)
df['measure1_kmean'] = measure1_kmean.apply(lambda x: mean(x), axis=1)
df['measure2_kmean'] = measure2_kmean.apply(lambda x: mean(x), axis=1)
# df['measure1_ensc'] = measure1_ensc.apply(lambda x: mean(x), axis=1)
# df['measure2_ensc'] = measure2_ensc.apply(lambda x: mean(x), axis=1)
df['theta_degree'] = df.apply(lambda x: math.degrees(x['theta']), axis=1)
# ploting
def plotting_performance_measure(df, measure):
pp = [2 ** j for j in range(4, 8)]
dd = [2 ** -j for j in range(1, 5)]
plt.title("PERFORMANCE MEASURE1 - KMEANS")
i = 1
for p in pp:
for d in dd:
dim = int(d * p)
sns_df = df[(df['p'] == p) & (df['dim'] == dim)]
sns_df = sns_df.pivot("theta_degree", "n", measure)
plt.subplot(4, 4, i)
ax = sns.heatmap(sns_df)
plt.title('p= {p} ,dim= {dim} '.format(p=p, dim=dim))
i += 1
plotting_performance_measure(df, "measure1_kmean")
plotting_performance_measure(df, "measure2_kmean")
plotting_performance_measure(df, "measure1_ensc")
plotting_performance_measure(df, "measure2_ensc")
| 37.848624
| 141
| 0.630105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,538
| 0.186402
|
67facec68d3d68647d57845cc972fe7ead4b3012
| 793
|
py
|
Python
|
lnbits/extensions/usermanager/models.py
|
blackcoffeexbt/lnbits-legend
|
a9f2877af77ea56d1900e2b5bc1c21b9b7ac2f64
|
[
"MIT"
] | 76
|
2021-11-02T22:19:59.000Z
|
2022-03-30T18:01:33.000Z
|
lnbits/extensions/usermanager/models.py
|
blackcoffeexbt/lnbits-legend
|
a9f2877af77ea56d1900e2b5bc1c21b9b7ac2f64
|
[
"MIT"
] | 100
|
2021-11-04T16:33:28.000Z
|
2022-03-30T15:03:52.000Z
|
lnbits/extensions/usermanager/models.py
|
blackcoffeexbt/lnbits-legend
|
a9f2877af77ea56d1900e2b5bc1c21b9b7ac2f64
|
[
"MIT"
] | 57
|
2021-11-08T06:43:59.000Z
|
2022-03-31T08:53:16.000Z
|
from sqlite3 import Row
from fastapi.param_functions import Query
from pydantic import BaseModel
from typing import Optional
class CreateUserData(BaseModel):
user_name: str = Query(...)
wallet_name: str = Query(...)
admin_id: str = Query(...)
email: str = Query("")
password: str = Query("")
class CreateUserWallet(BaseModel):
user_id: str = Query(...)
wallet_name: str = Query(...)
admin_id: str = Query(...)
class Users(BaseModel):
id: str
name: str
admin: str
email: Optional[str] = None
password: Optional[str] = None
class Wallets(BaseModel):
id: str
admin: str
name: str
user: str
adminkey: str
inkey: str
@classmethod
def from_row(cls, row: Row) -> "Wallets":
return cls(**dict(row))
| 19.341463
| 45
| 0.630517
| 655
| 0.825977
| 0
| 0
| 90
| 0.113493
| 0
| 0
| 13
| 0.016393
|
67fbc8dcaaaab886066c2cc01da3a3bc0ee4a485
| 3,215
|
py
|
Python
|
Operator.py
|
zijieli-Jlee/FGN
|
f707ed31687ea355ab62a1eaf43b5756a6ed883e
|
[
"MIT"
] | 2
|
2022-02-28T07:36:47.000Z
|
2022-03-10T04:45:57.000Z
|
Operator.py
|
BaratiLab/FGN
|
04729eaebfa8395a7d2ebb275761f98dc0342933
|
[
"MIT"
] | null | null | null |
Operator.py
|
BaratiLab/FGN
|
04729eaebfa8395a7d2ebb275761f98dc0342933
|
[
"MIT"
] | null | null | null |
import numba as nb
import numpy as np
import torch
from torch.autograd import Function
from Constants import MPS_KERNEL as w
from Constants import BASE_RADIUS, ND_RAIUS, GRAD_RADIUS, LAP_RADIUS
class DivOp(Function):
"""Compute the divergence of a given physics value.
Implement in terms of pytorch autograd function because we need to minimize the
compressibility during training"""
@staticmethod
def forward(ctx, val, Adj_arr, N0):
if not isinstance(val, torch.Tensor):
val = torch.from_numpy(val)
A = Adj_arr.clone() * (3. / N0)
val.require_grad = True
div_val = torch.zeros((val.size(0), 1), dtype=torch.float32)
ctx.save_for_backward(A)
for dim in range(3):
sliced_val = val[:, dim].view(-1, 1)
div_val += torch.sparse.mm(A[dim], sliced_val).view(-1, 1)
return div_val
@staticmethod
def backward(ctx, grad_input):
grad_input.double()
A, = ctx.saved_tensors
grad_output = []
for dim in range(3):
grad_output += [torch.sparse.mm(
A[dim], grad_input).view(-1, 1)]
grad_output = torch.stack(grad_output).squeeze().view(-1, 3)
return grad_output, None, None
class LapOp(Function):
@staticmethod
def forward(ctx, val, Adj_arr, N0, lam):
if not isinstance(val, torch.Tensor):
val = torch.from_numpy(val)
A = Adj_arr * (2. * 3.)/(N0 * lam)
out = torch.sparse.mm(A, val)
ctx.save_for_backward(A)
return out
@staticmethod
def backward(ctx, grad_input):
grad_input.double()
A, = ctx.saved_tensors
grad_output = torch.sparse.mm(A, grad_input)
return grad_output, None, None, None, None
Divergence = DivOp.apply
Laplacian = LapOp.apply
class GradientOp(object):
@staticmethod
def forward(val, val_min, A, A_diag, N0, to_numpy=True):
if not isinstance(val, torch.Tensor):
val = torch.from_numpy(val)
# val.require_grad = True
val = val.float().view(-1, 1)
val_min = val_min.view(-1, 1)
grad_val = torch.zeros((val.size(0), 3), dtype=torch.float32)
# ctx.save_for_backward(A)
for dim in range(3):
grad_val[:, dim] = (3. / N0) * (torch.sparse.mm(A[dim], val) - torch.sparse.mm(A_diag[dim], val_min)).view(-1,)
if to_numpy:
return grad_val.detach().numpy()
else:
return grad_val
class CollisionOp(object):
@staticmethod
def forward(vel, Adj_arr, coef_rest):
if not isinstance(vel, torch.Tensor):
vel = torch.from_numpy(vel)
fdt = torch.zeros_like(vel)
fdt -= torch.sparse.mm(Adj_arr, vel)
fdt *= (coef_rest + 1.0) / 2.0
correction = torch.sparse.mm(Adj_arr, fdt)
return correction
class SumOp(object):
@staticmethod
def forward(Adj_arr, device='cpu', to_numpy=True):
A = Adj_arr.clone()
I = torch.ones((A.size(0), 1), dtype=torch.float32).to(device)
out = torch.sparse.mm(A, I)
if to_numpy:
return out.cpu().numpy()
else:
return out
| 31.213592
| 123
| 0.601244
| 2,953
| 0.918507
| 0
| 0
| 2,614
| 0.813064
| 0
| 0
| 238
| 0.074028
|
67fc163e324d1273cf478cbfac97cd26f437a946
| 5,274
|
py
|
Python
|
pythia/LinearRegression.py
|
MaudBoucherit/Pythia
|
0076d8008350c3a323e28c400b26628be34302e6
|
[
"MIT"
] | null | null | null |
pythia/LinearRegression.py
|
MaudBoucherit/Pythia
|
0076d8008350c3a323e28c400b26628be34302e6
|
[
"MIT"
] | 4
|
2018-02-09T01:16:14.000Z
|
2018-03-04T07:48:49.000Z
|
pythia/LinearRegression.py
|
MaudBoucherit/Pythia
|
0076d8008350c3a323e28c400b26628be34302e6
|
[
"MIT"
] | 3
|
2018-02-08T22:52:27.000Z
|
2018-02-08T22:53:05.000Z
|
# LinearRegression.py
# March 2018
#
# This script builds a Linear regression class to analyse data.
# It supports a continuous response and several continuous features.
# The class has a constructor building and fitting the model, and
# a plotting method for residuals.
#
# Dependencies:
#
# Usage:
# from pythia.LinearRegression import LinearRegression
# lm = LinearRegression(X,y)
# print(lm.weights)
# plot_pythia(lm)
## Imports
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
import os
sys.path.insert(0, os.path.abspath("."))
sys.path.insert(0, os.path.abspath("../"))
import pandas as pd
import numpy as np
import numpy.random as random
## The LinearRegression class
class LinearRegression:
"""
LinearRegression is a class performing a linear regression on a data frame
containing continuous features.
Its attributes are the coefficients estimates, the fitted values
and the residuals from fitting a linear regression of y on X.
Args:
X: a pandas.dataframe containing continuous variables (including the response)
y: a pandas.Series of same length containing the response
Attributes:
weights: a pandas.Series, the estimated coefficients
fitted: a pandas.Series, the fitted values
residuals: a pandas.Series, the residuals
"""
def __init__(self, X, y):
# Check the type of the features and select the numeric ones
X_mat = X.select_dtypes(include=[np.number], exclude=None)
if X_mat.shape[1] == 0:
raise NameError("You need at least one continuous features")
try:
for var in X_mat.columns:
assert np.all(X_mat[[var]].notnull())
except AssertionError:
raise NameError("Some of your numeric features contain missing values. Please deal with them (remove, impute...) before using this function.")
else:
# Add an intercept column and convert the data frame in a matrix
n = X_mat.shape[0]
X_mat['intercept'] = pd.Series(np.ones(n), index=X_mat.index)
names = X_mat.columns
X_mat = X_mat.as_matrix()
d = X_mat.shape[1]
y = np.array(y).reshape((10,1))
# Set hyperparameters
alpha = 0.001
n_iter = 1000000
# The gradient of the squared error
def ols_grad(w):
return np.dot(np.transpose(X_mat), np.dot(X_mat, w) - y)
# A norm function for Frobenius
def norm(x):
return np.sum(np.abs(x))
# Update the weights using gradient method
weights = np.zeros(d).reshape((d,1))
i = 0
grad = ols_grad(weights)
while i < n_iter and norm(grad) > 1e-7:
grad = ols_grad(weights)
weights = weights - alpha*grad
i += 1
temp = {}
for i in range(len(weights)):
temp[names[i]] = weights[i,0]
self.weights = temp
# Calculate the fitted values
self.fitted = np.dot(X_mat, weights)
# Calculate the residuals
self.residuals = y - self.fitted
def plot_residuals(self):
"""
This script makes various diagnostic plots for linear regression analysis.
It supports a continuous response and several continuous features.
Args:
A LinearRegression object containing
weights: the estimates of the parameters of the linear regression
fitted: the fitted values
residuals: the residuals.
Returns:
Residuals vs Fitted Plot
Normal Q-Q Plot
Fitted vs True Value Plot(s)
"""
assert len(self.residuals) > 0, "There are no residuals"
assert len(self.fitted) > 0, "There are no fitted values"
assert len(self.residuals) == len(self.fitted), "The number of residuals and fitted values do not match"
# Get fitted values and residuals
residuals = self.residuals
fitted = self.fitted
residuals = residuals.flatten()
fitted = fitted.flatten()
# Fitted vs Residuals
plt.figure(figsize=(10,6))
plt.scatter(fitted, residuals, color='grey')
plt.axhline(y = 0, linewidth = 1, color = 'red')
plt.xlabel('Fitted Values')
plt.ylabel('Residuals')
plt.title('Residuals vs. Fitted Values')
resfit = plt.show()
# Normal QQ Plot
res = np.asarray(residuals)
res.sort()
# Generate normal distribution
ndist = random.normal(loc = 0, scale = 1, size = len(res))
ndist.sort()
# Fit Normal Trendline.
fit = np.polyfit(ndist, res, 1)
fit = fit.tolist()
func = np.poly1d(fit)
trendline_y = func(ndist)
plt.figure(figsize=(10,6))
plt.scatter(ndist, res, color = 'grey')
plt.plot(ndist, trendline_y, color = 'red')
plt.title("Normal QQ Plot")
plt.xlabel("Theoretical quantiles")
plt.ylabel("Expreimental quantiles")
qqplot = plt.show()
return (resfit,qqplot)
| 32.555556
| 154
| 0.603527
| 4,544
| 0.861585
| 0
| 0
| 0
| 0
| 0
| 0
| 2,464
| 0.467198
|
67fc89d1bcce49307c043c31ae573dd5205a3395
| 289
|
py
|
Python
|
src/renault_api/exceptions.py
|
slater0013/renault-api
|
13c784b6af09331368341c93888f1eb32c46cb19
|
[
"MIT"
] | 44
|
2020-11-01T15:52:33.000Z
|
2022-03-31T04:40:03.000Z
|
src/renault_api/exceptions.py
|
slater0013/renault-api
|
13c784b6af09331368341c93888f1eb32c46cb19
|
[
"MIT"
] | 334
|
2020-11-01T13:00:01.000Z
|
2022-03-31T17:17:40.000Z
|
src/renault_api/exceptions.py
|
slater0013/renault-api
|
13c784b6af09331368341c93888f1eb32c46cb19
|
[
"MIT"
] | 22
|
2020-11-20T08:26:26.000Z
|
2022-03-11T18:58:31.000Z
|
"""Exceptions for Renault API."""
class RenaultException(Exception): # noqa: N818
"""Base exception for Renault API errors."""
pass
class NotAuthenticatedException(RenaultException): # noqa: N818
"""You are not authenticated, or authentication has expired."""
pass
| 20.642857
| 67
| 0.702422
| 249
| 0.861592
| 0
| 0
| 0
| 0
| 0
| 0
| 164
| 0.567474
|
67fce63714fc2695753fbce893969560aebb15c1
| 203
|
py
|
Python
|
algorithms/Grayscale.py
|
AadityaMunjal/image-processing-algorithms
|
ff7bba1a4bb3dce930f9481f92a29277084e33d9
|
[
"MIT"
] | 2
|
2021-03-09T03:54:10.000Z
|
2021-03-22T21:35:29.000Z
|
algorithms/Grayscale.py
|
AadityaMunjal/image-processing-algorithms
|
ff7bba1a4bb3dce930f9481f92a29277084e33d9
|
[
"MIT"
] | 1
|
2022-01-20T03:06:27.000Z
|
2022-01-22T12:04:16.000Z
|
algorithms/Grayscale.py
|
AadityaMunjal/image-processing-algorithms
|
ff7bba1a4bb3dce930f9481f92a29277084e33d9
|
[
"MIT"
] | null | null | null |
def grayscale(image):
for row in range(image.shape[0]):
for col in range(image.shape[1]):
avg = sum(image[row][col][i] for i in range(3)) // 3
image[row][col] = [avg for _ in range(3)]
| 33.833333
| 58
| 0.600985
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
67fd6116ebb01570250dd4cf9fbbcabbf9f0ae67
| 5,945
|
py
|
Python
|
analysis/playing_with_pykalman.py
|
rafaelvalero/covid_forecast
|
4e009ade5481f4e3bd48fd8048ca7d293d5d19b4
|
[
"MIT"
] | 3
|
2020-03-20T14:23:51.000Z
|
2020-03-29T18:55:12.000Z
|
analysis/playing_with_pykalman.py
|
rafaelvalero/covid_forecast
|
4e009ade5481f4e3bd48fd8048ca7d293d5d19b4
|
[
"MIT"
] | 2
|
2020-03-21T14:07:17.000Z
|
2020-03-22T07:38:11.000Z
|
analysis/playing_with_pykalman.py
|
rafaelvalero/covid_forecast
|
4e009ade5481f4e3bd48fd8048ca7d293d5d19b4
|
[
"MIT"
] | 1
|
2020-05-12T14:37:28.000Z
|
2020-05-12T14:37:28.000Z
|
'''
=============================
EM for Linear-Gaussian Models
=============================
This example shows how one may use the EM algorithm to estimate model
parameters with a Kalman Filter.
The EM algorithm is a meta-algorithm for learning parameters in probabilistic
models. The algorithm works by first fixing the parameters and finding a closed
form distribution over the unobserved variables, then finds new parameters that
maximize the expected likelihood of the observed variables (where the
expectation is taken over the unobserved ones). Due to convexity arguments, we
are guaranteed that each iteration of the algorithm will increase the
likelihood of the observed data and that it will eventually reach a local
optimum.
The EM algorithm is applied to the Linear-Gaussian system (that is, the model
assumed by the Kalman Filter) by first using the Kalman Smoother to calculate
the distribution over all unobserved variables (in this case, the hidden target
states), then closed-form update equations are used to update the model
parameters.
The first figure plotted contains 4 sets of lines. The first, labeled `true`,
represents the true, unobserved state of the system. The second, labeled
`blind`, represents the predicted state of the system if no measurements are
incorporated. The third, labeled `filtered`, are the state estimates given
measurements up to and including the current time step. Finally, the fourth,
labeled `smoothed`, are the state estimates using all observations for all time
steps. The latter three estimates use parameters learned via 10 iterations of
the EM algorithm.
The second figure contains a single line representing the likelihood of the
observed data as a function of the EM Algorithm iteration.
'''
from pykalman import KalmanFilter
import numpy as np
import matplotlib.pyplot as plt
import time
measurements = np.asarray([(399,293),(403,299),(409,308),(416,315),(418,318),(420,323),(429,326),(423,328),(429,334),(431,337),(433,342),(434,352),(434,349),(433,350),(431,350),(430,349),(428,347),(427,345),(425,341),(429,338),(431,328),(410,313),(406,306),(402,299),(397,291),(391,294),(376,270),(372,272),(351,248),(336,244),(327,236),(307,220)])
initial_state_mean = [measurements[0, 0],
0,
measurements[0, 1],
0]
transition_matrix = [[1, 1, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 1],
[0, 0, 0, 1]]
observation_matrix = [[1, 0, 0, 0],
[0, 0, 1, 0]]
kf1 = KalmanFilter(transition_matrices = transition_matrix,
observation_matrices = observation_matrix,
initial_state_mean = initial_state_mean)
kf1 = kf1.em(measurements, n_iter=5)
(smoothed_state_means, smoothed_state_covariances) = kf1.smooth(measurements)
'''
=============================
EM for Linear-Gaussian Models
=============================
This example shows how one may use the EM algorithm to estimate model
parameters with a Kalman Filter.
The EM algorithm is a meta-algorithm for learning parameters in probabilistic
models. The algorithm works by first fixing the parameters and finding a closed
form distribution over the unobserved variables, then finds new parameters that
maximize the expected likelihood of the observed variables (where the
expectation is taken over the unobserved ones). Due to convexity arguments, we
are guaranteed that each iteration of the algorithm will increase the
likelihood of the observed data and that it will eventually reach a local
optimum.
The EM algorithm is applied to the Linear-Gaussian system (that is, the model
assumed by the Kalman Filter) by first using the Kalman Smoother to calculate
the distribution over all unobserved variables (in this case, the hidden target
states), then closed-form update equations are used to update the model
parameters.
The first figure plotted contains 4 sets of lines. The first, labeled `true`,
represents the true, unobserved state of the system. The second, labeled
`blind`, represents the predicted state of the system if no measurements are
incorporated. The third, labeled `filtered`, are the state estimates given
measurements up to and including the current time step. Finally, the fourth,
labeled `smoothed`, are the state estimates using all observations for all time
steps. The latter three estimates use parameters learned via 10 iterations of
the EM algorithm.
The second figure contains a single line representing the likelihood of the
observed data as a function of the EM Algorithm iteration.
'''
from pykalman import KalmanFilter
import numpy as np
import matplotlib.pyplot as plt
import time
measurements = np.asarray([(399,293),(403,299),(409,308),(416,315),(418,318),(420,323),(429,326),(423,328),(429,334),(431,337),(433,342),(434,352),(434,349),(433,350),(431,350),(430,349),(428,347),(427,345),(425,341),(429,338),(431,328),(410,313),(406,306),(402,299),(397,291),(391,294),(376,270),(372,272),(351,248),(336,244),(327,236),(307,220)])
initial_state_mean = [measurements[0, 0],
0,
measurements[0, 1],
0]
transition_matrix = [[1, 1, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 1],
[0, 0, 0, 1]]
observation_matrix = [[1, 0, 0, 0],
[0, 0, 1, 0]]
kf1 = KalmanFilter(transition_matrices = transition_matrix,
observation_matrices = observation_matrix,
initial_state_mean = initial_state_mean)
kf1 = kf1.em(measurements, n_iter=5)
(smoothed_state_means, smoothed_state_covariances) = kf1.smooth(measurements)
plt.figure(1)
times = range(measurements.shape[0])
plt.plot(times, measurements[:, 0], 'bo',
times, measurements[:, 1], 'ro',
times, smoothed_state_means[:, 0], 'b--',
times, smoothed_state_means[:, 2], 'r--',)
plt.show()
| 49.541667
| 348
| 0.697056
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,526
| 0.593103
|
67fd71b159a22e60b64a07348a0a3e35c2a3b7e5
| 382
|
py
|
Python
|
phyutil/__init__.py
|
frib-high-level-controls/phyhlc
|
6486607e3aa0212054a12e9f2ad1a3ef15542f48
|
[
"BSD-3-Clause"
] | 1
|
2018-03-22T15:18:54.000Z
|
2018-03-22T15:18:54.000Z
|
phyutil/__init__.py
|
frib-high-level-controls/phyhlc
|
6486607e3aa0212054a12e9f2ad1a3ef15542f48
|
[
"BSD-3-Clause"
] | null | null | null |
phyutil/__init__.py
|
frib-high-level-controls/phyhlc
|
6486607e3aa0212054a12e9f2ad1a3ef15542f48
|
[
"BSD-3-Clause"
] | null | null | null |
# encoding: UTF-8
"""Physics Applications Utility"""
__copyright__ = "Copyright (c) 2015, Facility for Rare Isotope Beams"
__author__ = "Dylan Maxwell"
__version__ = "0.0.1"
import logging
import phylib
import machine
from machine import *
from phylib.libCore import *
# configure the root logger
logging.basicConfig(format="%(levelname)s: %(asctime)s: %(name)s: %(message)s")
| 21.222222
| 79
| 0.740838
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 204
| 0.534031
|
67fdbf96ac87d3b403bf853041d7bc6c394c1dfd
| 1,902
|
py
|
Python
|
pydyn/explicit_blocks.py
|
chhokrad/PYPOWER-Dynamics
|
e6e42fc6975828a51cd01c42a81d7a45844f323f
|
[
"BSD-3-Clause"
] | null | null | null |
pydyn/explicit_blocks.py
|
chhokrad/PYPOWER-Dynamics
|
e6e42fc6975828a51cd01c42a81d7a45844f323f
|
[
"BSD-3-Clause"
] | null | null | null |
pydyn/explicit_blocks.py
|
chhokrad/PYPOWER-Dynamics
|
e6e42fc6975828a51cd01c42a81d7a45844f323f
|
[
"BSD-3-Clause"
] | 1
|
2021-09-13T14:34:41.000Z
|
2021-09-13T14:34:41.000Z
|
#!python3
#
# Copyright (C) 2014-2015 Julius Susanto. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""
PYPOWER-Dynamics
Functions for standard blocks (solves a step)
"""
import numpy as np
# Gain block
# yo = p * yi
# p is a scalar gain coefficient
def gain_block(yi, p):
yo = p * yi
return yo
# Divide block
# yo = yi / p
# p is a scalar gain coefficient
def gain_block(yi, p):
if p != 0:
yo = yi / p
else:
print('Error: division by zero, ignoring dividion operation')
yo = yi
return yo
# Integrator block
# K / sT
# p = [K, T]
def int_block(h, x0, yi, p):
f = yi * p[0] / p[1]
x1 = x0 + h * f
yo = x1
return yo, x1, f
# Lag block
# K / (1 + sT)
# p = [K, T]
def lag_block(h, x0, yi, p):
f = (yi - x0) / p[1]
x1 = x0 + h * f
yo = p[0] * x1
return yo, x1, f
# Lead-Lag block
# (1 + sTa) / (1 + sTb)
# p = [Ta, Tb]
def leadlag_block(h, x0, yi, p):
f = (yi - x0) / p[1]
x1 = x0 + h * f
yo = x1 + p[0] * (yi - x0) / p[1]
return yo, x1, f
# Limiter block
# yo = min_lim, if yi < min_lim
# yo = max_lim, if yi > max_lim
# yo = yi, min_lim <= yi <= max_lim
# p = [min_lim, max_lim]
def lim_block(yi, p):
min_lim = p[0]
max_lim = p[1]
if yi < min_lim:
yo = min_lim
elif yi > max_lim:
yo = max_lim
else:
yo = yi
return yo
# Multiplication block
# yo = yi1 * yi2 * ... * yin
# yi = [yi1, yi2, ... yin]
def mult_block(yi):
yo = np.prod(yi)
return yo
# Summation block
# yo = yi1 + yi2 + ... + yin
# yi = [yi1, yi2, ... yin]
def sum_block(yi):
yo = sum(yi)
return yo
# Washout block
# (s / (1 + sT)
# p is the time constant T
def wout_block(h, x0, yi, p):
f = (yi - x0) / p
x1 = x0 + h * f
yo = (yi - x1) / p
return yo, x1, f
| 15.463415
| 69
| 0.532072
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 880
| 0.462671
|
67ff40cfd4c8a6b2e69d26c388ef6020f73b4c94
| 2,151
|
py
|
Python
|
river/migrations/0012_auto_20191113_1550.py
|
xuziheng1002/django-river
|
7c7f23aa4790e451019c3e2b4d29f35852de17e6
|
[
"BSD-3-Clause"
] | null | null | null |
river/migrations/0012_auto_20191113_1550.py
|
xuziheng1002/django-river
|
7c7f23aa4790e451019c3e2b4d29f35852de17e6
|
[
"BSD-3-Clause"
] | null | null | null |
river/migrations/0012_auto_20191113_1550.py
|
xuziheng1002/django-river
|
7c7f23aa4790e451019c3e2b4d29f35852de17e6
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.25 on 2019-11-13 21:50
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('river', '0011_auto_20191110_1411'),
]
operations = [
migrations.AlterField(
model_name='onapprovedhook',
name='transition_approval',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='on_approved_hooks', to='river.TransitionApproval',
verbose_name='Transition Approval'),
),
migrations.AlterField(
model_name='onapprovedhook',
name='transition_approval_meta',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='on_approved_hooks', to='river.TransitionApprovalMeta', verbose_name='Transition Approval Meta'),
),
migrations.AlterField(
model_name='ontransithook',
name='transition',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='on_transit_hooks', to='river.Transition', verbose_name='Transition'),
),
migrations.AlterField(
model_name='ontransithook',
name='transition_meta',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='on_transit_hooks', to='river.TransitionMeta', verbose_name='Transition Meta'),
),
migrations.AlterField(
model_name='workflow',
name='content_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='contenttypes.ContentType', verbose_name='Content Type'),
),
migrations.AlterField(
model_name='workflow',
name='initial_state',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='workflow_this_set_as_initial_state', to='river.State', verbose_name='Initial State'),
),
]
| 45.765957
| 191
| 0.664807
| 1,959
| 0.910739
| 0
| 0
| 0
| 0
| 0
| 0
| 641
| 0.298001
|
db00271e05f78081485f6f0bf77fff9b5da0dd36
| 929
|
py
|
Python
|
nesta/packages/examples/tests/test_example_package.py
|
anniyanvr/nesta
|
4b3ae79922cebde0ad33e08ac4c40b9a10e8e7c3
|
[
"MIT"
] | 13
|
2019-06-18T16:53:53.000Z
|
2021-03-04T10:58:52.000Z
|
nesta/packages/examples/tests/test_example_package.py
|
nestauk/old_nesta_daps
|
4b3ae79922cebde0ad33e08ac4c40b9a10e8e7c3
|
[
"MIT"
] | 208
|
2018-08-10T13:15:40.000Z
|
2021-07-21T10:16:07.000Z
|
nesta/packages/examples/tests/test_example_package.py
|
nestauk/old_nesta_daps
|
4b3ae79922cebde0ad33e08ac4c40b9a10e8e7c3
|
[
"MIT"
] | 8
|
2018-09-20T15:19:23.000Z
|
2020-12-15T17:41:34.000Z
|
from collections import namedtuple
import pytest
from nesta.packages.examples.example_package import some_func
@pytest.fixture
def mocked_row():
def _mocked_row(*, id, name):
Row = namedtuple('Row', ['id', 'name'])
return Row(id=id, name=name)
return _mocked_row
class TestSomeFunc:
def test_some_func_returns_true_when_start_string_in_name(self, mocked_row):
mocked_row = mocked_row(id=1, name='cat')
assert some_func('cat', mocked_row) == {'my_id': 1, 'data': True}
def test_some_func_returns_false_when_start_string_not_in_name(self, mocked_row):
mocked_row = mocked_row(id=2, name='cat')
assert some_func('dog', mocked_row) == {'my_id': 2, 'data': False}
def test_some_func_returns_false_when_name_is_none(self, mocked_row):
mocked_row = mocked_row(id=3, name=None)
assert some_func('cat', mocked_row) == {'my_id': 3, 'data': False}
| 33.178571
| 85
| 0.697524
| 635
| 0.683531
| 0
| 0
| 175
| 0.188375
| 0
| 0
| 79
| 0.085038
|
db0097f13bc0f850f8b50c6cc9087132aa46c5fd
| 6,408
|
py
|
Python
|
test/test_misc.py
|
mhthies/smarthomeconnect
|
d93d1038145285af66769ebf10589c1088b323ed
|
[
"Apache-2.0"
] | 5
|
2021-07-02T21:48:45.000Z
|
2021-12-12T21:55:42.000Z
|
test/test_misc.py
|
mhthies/smarthomeconnect
|
d93d1038145285af66769ebf10589c1088b323ed
|
[
"Apache-2.0"
] | 49
|
2020-09-18T20:05:55.000Z
|
2022-03-05T19:51:33.000Z
|
test/test_misc.py
|
mhthies/smarthomeconnect
|
d93d1038145285af66769ebf10589c1088b323ed
|
[
"Apache-2.0"
] | 1
|
2021-12-10T14:50:43.000Z
|
2021-12-10T14:50:43.000Z
|
import asyncio
import unittest
import unittest.mock
import shc.misc
from test._helper import ExampleSubscribable, ExampleWritable, async_test, ExampleReadable
class MiscTests(unittest.TestCase):
@async_test
async def test_two_way_pipe(self) -> None:
pipe = shc.misc.TwoWayPipe(float)
pub_left = ExampleSubscribable(float)
pub_right = ExampleSubscribable(float)
sub_left = ExampleWritable(float)
sub_right = ExampleWritable(float)
pipe.connect_left(pub_left)
sub_left.connect(pipe)
pipe.connect_right(pub_right)
pipe.connect_right(sub_right)
await pub_left.publish(42.0, [self])
sub_right._write.assert_called_once_with(42.0, [self, pub_left, pipe.right])
sub_left._write.assert_not_called()
sub_right._write.reset_mock()
await pub_right.publish(36.0, [self])
sub_left._write.assert_called_once_with(36.0, [self, pub_right, pipe.left])
sub_right._write.assert_not_called()
@async_test
async def test_two_way_pipe_concurrent_update(self) -> None:
var1 = shc.Variable(int)
pipe = shc.misc.TwoWayPipe(int).connect_left(var1)
var2 = shc.Variable(int).connect(pipe.right)
await asyncio.gather(var1.write(42, []), var2.write(56, []))
self.assertEqual(await var1.read(), await var2.read())
@async_test
async def test_breakable_subscription_simple(self) -> None:
pub = ExampleSubscribable(float)
control = ExampleReadable(bool, True)
sub = ExampleWritable(float)
sub.connect(shc.misc.BreakableSubscription(pub, control))
await pub.publish(42.0, [self])
sub._write.assert_called_once_with(42.0, [self, pub, unittest.mock.ANY])
sub._write.reset_mock()
control.read.side_effect = (False,)
await pub.publish(36.0, [self])
sub._write.assert_not_called()
sub._write.reset_mock()
control.read.side_effect = (True,)
await pub.publish(56.0, [self])
sub._write.assert_called_once_with(56, unittest.mock.ANY)
@async_test
async def test_breakable_subscription_readsubscribable(self) -> None:
pub = shc.Variable(float)
control = shc.Variable(bool, initial_value=False)
sub = ExampleWritable(float)
sub.connect(shc.misc.BreakableSubscription(pub, control))
# pub is uninitialized, so we should not receive anything, when control changes to True
await control.write(True, [self])
await asyncio.sleep(0.01)
sub._write.assert_not_called()
await pub.write(42.0, [self])
await asyncio.sleep(0.01)
sub._write.assert_called_once_with(42.0, [self, pub, unittest.mock.ANY])
sub._write.reset_mock()
await control.write(False, [self])
await pub.write(56.0, [self])
await asyncio.sleep(0.01)
sub._write.assert_not_called()
await control.write(True, [self])
await asyncio.sleep(0.01)
sub._write.assert_called_once_with(56.0, [self, control, unittest.mock.ANY])
@async_test
async def test_hysteresis(self) -> None:
pub = ExampleSubscribable(float)
hystersis = shc.misc.Hysteresis(pub, 42.0, 56.0)
sub = ExampleWritable(bool).connect(hystersis)
# Check initial value
self.assertEqual(False, await hystersis.read())
# Check climbing value
await pub.publish(41.0, [self])
await pub.publish(43.5, [self])
await pub.publish(44.5, [self])
self.assertEqual(False, await hystersis.read())
sub._write.assert_not_called()
await pub.publish(57.4, [self])
sub._write.assert_called_once_with(True, [self, pub, hystersis])
self.assertEqual(True, await hystersis.read())
sub._write.reset_mock()
await pub.publish(58, [self])
sub._write.assert_not_called()
self.assertEqual(True, await hystersis.read())
# Check descending value
await pub.publish(44.5, [self])
self.assertEqual(True, await hystersis.read())
sub._write.assert_not_called()
await pub.publish(41.4, [self])
sub._write.assert_called_once_with(False, [self, pub, hystersis])
self.assertEqual(False, await hystersis.read())
sub._write.reset_mock()
await pub.publish(40.0, [self])
sub._write.assert_not_called()
self.assertEqual(False, await hystersis.read())
# Check jumps
await pub.publish(57.4, [self])
sub._write.assert_called_once_with(True, [self, pub, hystersis])
self.assertEqual(True, await hystersis.read())
sub._write.reset_mock()
await pub.publish(41.4, [self])
sub._write.assert_called_once_with(False, [self, pub, hystersis])
self.assertEqual(False, await hystersis.read())
@async_test
async def test_fade_step_adapter(self) -> None:
subscribable1 = ExampleSubscribable(shc.datatypes.FadeStep)
variable1 = shc.Variable(shc.datatypes.RangeFloat1)\
.connect(shc.misc.FadeStepAdapter(subscribable1))
with self.assertLogs() as logs:
await subscribable1.publish(shc.datatypes.FadeStep(0.5), [self])
await asyncio.sleep(0.05)
self.assertIn("Cannot apply FadeStep", logs.records[0].msg) # type: ignore
await variable1.write(shc.datatypes.RangeFloat1(0.5), [self])
await asyncio.sleep(0.05)
await subscribable1.publish(shc.datatypes.FadeStep(0.25), [self])
await asyncio.sleep(0.05)
self.assertEqual(shc.datatypes.RangeFloat1(0.75), await variable1.read())
await subscribable1.publish(shc.datatypes.FadeStep(0.5), [self])
await asyncio.sleep(0.05)
self.assertEqual(shc.datatypes.RangeFloat1(1.0), await variable1.read())
@async_test
async def test_convert_subscription(self) -> None:
pub = ExampleSubscribable(shc.datatypes.RangeUInt8)
sub = ExampleWritable(shc.datatypes.RangeFloat1)
sub.connect(shc.misc.ConvertSubscription(pub, shc.datatypes.RangeFloat1))
await pub.publish(shc.datatypes.RangeUInt8(255), [self])
sub._write.assert_called_once_with(shc.datatypes.RangeFloat1(1.0), [self, pub, unittest.mock.ANY])
self.assertIsInstance(sub._write.call_args[0][0], shc.datatypes.RangeFloat1)
| 37.473684
| 106
| 0.666042
| 6,245
| 0.974563
| 0
| 0
| 6,168
| 0.962547
| 6,056
| 0.945069
| 204
| 0.031835
|
db00bdc9b4970c171632e8c7e85bbb5706127395
| 27,709
|
py
|
Python
|
pysatSpaceWeather/instruments/sw_f107.py
|
JonathonMSmith/pysatSpaceWeather
|
b403a14bd9a37dd010e97be6e5da15c54a87b888
|
[
"BSD-3-Clause"
] | 3
|
2021-02-02T05:33:46.000Z
|
2022-01-20T16:54:35.000Z
|
pysatSpaceWeather/instruments/sw_f107.py
|
JonathonMSmith/pysatSpaceWeather
|
b403a14bd9a37dd010e97be6e5da15c54a87b888
|
[
"BSD-3-Clause"
] | 48
|
2020-08-13T22:05:06.000Z
|
2022-01-21T22:48:14.000Z
|
pysatSpaceWeather/instruments/sw_f107.py
|
JonathonMSmith/pysatSpaceWeather
|
b403a14bd9a37dd010e97be6e5da15c54a87b888
|
[
"BSD-3-Clause"
] | 3
|
2021-02-02T05:33:54.000Z
|
2021-08-19T17:14:24.000Z
|
# -*- coding: utf-8 -*-
"""Supports F10.7 index values. Downloads data from LASP and the SWPC.
Properties
----------
platform
'sw'
name
'f107'
tag
- 'historic' LASP F10.7 data (downloads by month, loads by day)
- 'prelim' Preliminary SWPC daily solar indices
- 'daily' Daily SWPC solar indices (contains last 30 days)
- 'forecast' Grab forecast data from SWPC (next 3 days)
- '45day' 45-Day Forecast data from the Air Force
Example
-------
Download and load all of the historic F10.7 data. Note that it will not
stop on the current date, but a point in the past when post-processing has
been successfully completed.
::
f107 = pysat.Instrument('sw', 'f107', tag='historic')
f107.download(start=f107.lasp_stime, stop=f107.today(), freq='MS')
f107.load(date=f107.lasp_stime, end_date=f107.today())
Note
----
The forecast data is stored by generation date, where each file contains the
forecast for the next three days. Forecast data downloads are only supported
for the current day. When loading forecast data, the date specified with the
load command is the date the forecast was generated. The data loaded will span
three days. To always ensure you are loading the most recent data, load
the data with tomorrow's date.
::
f107 = pysat.Instrument('sw', 'f107', tag='forecast')
f107.download()
f107.load(date=f107.tomorrow())
Warnings
--------
The 'forecast' F10.7 data loads three days at a time. Loading multiple files,
loading multiple days, the data padding feature, and multi_file_day feature
available from the pyast.Instrument object is not appropriate for 'forecast'
data.
Like 'forecast', the '45day' forecast loads a specific period of time (45 days)
and subsequent files contain overlapping data. Thus, loading multiple files,
loading multiple days, the data padding feature, and multi_file_day feature
available from the pyast.Instrument object is not appropriate for '45day' data.
"""
import datetime as dt
import ftplib
import json
import numpy as np
import os
import requests
import sys
import warnings
import pandas as pds
import pysat
from pysatSpaceWeather.instruments.methods import f107 as mm_f107
from pysatSpaceWeather.instruments.methods.ace import load_csv_data
from pysatSpaceWeather.instruments.methods import general
logger = pysat.logger
# ----------------------------------------------------------------------------
# Instrument attributes
platform = 'sw'
name = 'f107'
tags = {'historic': 'Daily LASP value of F10.7',
'prelim': 'Preliminary SWPC daily solar indices',
'daily': 'Daily SWPC solar indices (contains last 30 days)',
'forecast': 'SWPC Forecast F107 data next (3 days)',
'45day': 'Air Force 45-day Forecast'}
# Dict keyed by inst_id that lists supported tags for each inst_id
inst_ids = {'': [tag for tag in tags.keys()]}
# Dict keyed by inst_id that lists supported tags and a good day of test data
# generate todays date to support loading forecast data
now = dt.datetime.utcnow()
today = dt.datetime(now.year, now.month, now.day)
tomorrow = today + pds.DateOffset(days=1)
# The LASP archive start day is also important
lasp_stime = dt.datetime(1947, 2, 14)
# ----------------------------------------------------------------------------
# Instrument test attributes
_test_dates = {'': {'historic': dt.datetime(2009, 1, 1),
'prelim': dt.datetime(2009, 1, 1),
'daily': tomorrow,
'forecast': tomorrow,
'45day': tomorrow}}
# Other tags assumed to be True
_test_download_travis = {'': {'prelim': False}}
# ----------------------------------------------------------------------------
# Instrument methods
preprocess = general.preprocess
def init(self):
"""Initializes the Instrument object with instrument specific values.
Runs once upon instantiation.
"""
self.acknowledgements = mm_f107.acknowledgements(self.name, self.tag)
self.references = mm_f107.references(self.name, self.tag)
logger.info(self.acknowledgements)
# Define the historic F10.7 starting time
if self.tag == 'historic':
self.lasp_stime = lasp_stime
return
def clean(self):
""" Cleaning function for Space Weather indices
Note
----
F10.7 doesn't require cleaning
"""
return
# ----------------------------------------------------------------------------
# Instrument functions
def load(fnames, tag=None, inst_id=None):
"""Load F10.7 index files
Parameters
----------
fnames : pandas.Series
Series of filenames
tag : str or NoneType
tag or None (default=None)
inst_id : str or NoneType
satellite id or None (default=None)
Returns
-------
data : pandas.DataFrame
Object containing satellite data
meta : pysat.Meta
Object containing metadata such as column names and units
Note
----
Called by pysat. Not intended for direct use by user.
"""
# Get the desired file dates and file names from the daily indexed list
file_dates = list()
if tag in ['historic', 'prelim']:
unique_files = list()
for fname in fnames:
file_dates.append(dt.datetime.strptime(fname[-10:], '%Y-%m-%d'))
if fname[0:-11] not in unique_files:
unique_files.append(fname[0:-11])
fnames = unique_files
# Load the CSV data files
data = load_csv_data(fnames, read_csv_kwargs={"index_col": 0,
"parse_dates": True})
# If there is a date range, downselect here
if len(file_dates) > 0:
idx, = np.where((data.index >= min(file_dates))
& (data.index < max(file_dates) + dt.timedelta(days=1)))
data = data.iloc[idx, :]
# Initialize the metadata
meta = pysat.Meta()
meta['f107'] = {meta.labels.units: 'SFU',
meta.labels.name: 'F10.7 cm solar index',
meta.labels.notes: '',
meta.labels.desc:
'F10.7 cm radio flux in Solar Flux Units (SFU)',
meta.labels.fill_val: np.nan,
meta.labels.min_val: 0,
meta.labels.max_val: np.inf}
if tag == '45day':
meta['ap'] = {meta.labels.units: '',
meta.labels.name: 'Daily Ap index',
meta.labels.notes: '',
meta.labels.desc: 'Daily average of 3-h ap indices',
meta.labels.fill_val: np.nan,
meta.labels.min_val: 0,
meta.labels.max_val: 400}
elif tag == 'daily' or tag == 'prelim':
meta['ssn'] = {meta.labels.units: '',
meta.labels.name: 'Sunspot Number',
meta.labels.notes: '',
meta.labels.desc: 'SESC Sunspot Number',
meta.labels.fill_val: -999,
meta.labels.min_val: 0,
meta.labels.max_val: np.inf}
meta['ss_area'] = {meta.labels.units: '10$^-6$ Solar Hemisphere',
meta.labels.name: 'Sunspot Area',
meta.labels.notes: '',
meta.labels.desc:
''.join(['Sunspot Area in Millionths of the ',
'Visible Hemisphere']),
meta.labels.fill_val: -999,
meta.labels.min_val: 0,
meta.labels.max_val: 1.0e6}
meta['new_reg'] = {meta.labels.units: '',
meta.labels.name: 'New Regions',
meta.labels.notes: '',
meta.labels.desc: 'New active solar regions',
meta.labels.fill_val: -999,
meta.labels.min_val: 0,
meta.labels.max_val: np.inf}
meta['smf'] = {meta.labels.units: 'G',
meta.labels.name: 'Solar Mean Field',
meta.labels.notes: '',
meta.labels.desc: 'Standford Solar Mean Field',
meta.labels.fill_val: -999,
meta.labels.min_val: 0,
meta.labels.max_val: np.inf}
meta['goes_bgd_flux'] = {meta.labels.units: 'W/m^2',
meta.labels.name: 'X-ray Background Flux',
meta.labels.notes: '',
meta.labels.desc:
'GOES15 X-ray Background Flux',
meta.labels.fill_val: '*',
meta.labels.min_val: -np.inf,
meta.labels.max_val: np.inf}
meta['c_flare'] = {meta.labels.units: '',
meta.labels.name: 'C X-Ray Flares',
meta.labels.notes: '',
meta.labels.desc: 'C-class X-Ray Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['m_flare'] = {meta.labels.units: '',
meta.labels.name: 'M X-Ray Flares',
meta.labels.notes: '',
meta.labels.desc: 'M-class X-Ray Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['x_flare'] = {meta.labels.units: '',
meta.labels.name: 'X X-Ray Flares',
meta.labels.notes: '',
meta.labels.desc: 'X-class X-Ray Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['o1_flare'] = {meta.labels.units: '',
meta.labels.name: '1 Optical Flares',
meta.labels.notes: '',
meta.labels.desc: '1-class Optical Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['o2_flare'] = {meta.labels.units: '',
meta.labels.name: '2 Optical Flares',
meta.labels.notes: '',
meta.labels.desc: '2-class Optical Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['o3_flare'] = {meta.labels.units: '',
meta.labels.name: '3 Optical Flares',
meta.labels.notes: '',
meta.labels.desc: '3-class Optical Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
return data, meta
def list_files(tag=None, inst_id=None, data_path=None, format_str=None):
"""Return a Pandas Series of every file for F10.7 data
Parameters
----------
tag : string or NoneType
Denotes type of file to load.
(default=None)
inst_id : string or NoneType
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : string or NoneType
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : string or NoneType
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
Returns
-------
out_files : pysat._files.Files
A class containing the verified available files
Note
----
Called by pysat. Not intended for direct use by user.
"""
if data_path is not None:
if tag == 'historic':
# Files are by month, going to add date to monthly filename for
# each day of the month. The load routine will load a month of
# data and use the appended date to select out appropriate data.
if format_str is None:
format_str = 'f107_monthly_{year:04d}-{month:02d}.txt'
out_files = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
if not out_files.empty:
out_files.loc[out_files.index[-1] + pds.DateOffset(months=1)
- pds.DateOffset(days=1)] = out_files.iloc[-1]
out_files = out_files.asfreq('D', 'pad')
out_files = out_files + '_' + out_files.index.strftime(
'%Y-%m-%d')
elif tag == 'prelim':
# Files are by year (and quarter)
if format_str is None:
format_str = ''.join(['f107_prelim_{year:04d}_{month:02d}',
'_v{version:01d}.txt'])
out_files = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
if not out_files.empty:
# Set each file's valid length at a 1-day resolution
orig_files = out_files.sort_index().copy()
new_files = list()
for orig in orig_files.iteritems():
# Version determines each file's valid length
version = int(orig[1].split("_v")[1][0])
doff = pds.DateOffset(years=1) if version == 2 \
else pds.DateOffset(months=3)
istart = orig[0]
iend = istart + doff - pds.DateOffset(days=1)
# Ensure the end time does not extend past the number of
# possible days included based on the file's download time
fname = os.path.join(data_path, orig[1])
dend = dt.datetime.utcfromtimestamp(os.path.getctime(fname))
dend = dend - pds.DateOffset(days=1)
if dend < iend:
iend = dend
# Pad the original file index
out_files.loc[iend] = orig[1]
out_files = out_files.sort_index()
# Save the files at a daily cadence over the desired period
new_files.append(out_files.loc[istart:
iend].asfreq('D', 'pad'))
# Add the newly indexed files to the file output
out_files = pds.concat(new_files, sort=True)
out_files = out_files.dropna()
out_files = out_files.sort_index()
out_files = out_files + '_' + out_files.index.strftime(
'%Y-%m-%d')
elif tag in ['daily', 'forecast', '45day']:
format_str = ''.join(['f107_', tag,
'_{year:04d}-{month:02d}-{day:02d}.txt'])
out_files = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
# Pad list of files data to include most recent file under tomorrow
if not out_files.empty:
pds_off = pds.DateOffset(days=1)
out_files.loc[out_files.index[-1]
+ pds_off] = out_files.values[-1]
out_files.loc[out_files.index[-1]
+ pds_off] = out_files.values[-1]
else:
raise ValueError(' '.join(('Unrecognized tag name for Space',
'Weather Index F107:', tag)))
else:
raise ValueError(' '.join(('A data_path must be passed to the loading',
'routine for F107')))
return out_files
def download(date_array, tag, inst_id, data_path, update_files=False):
"""Routine to download F107 index data
Parameters
-----------
date_array : list-like
Sequence of dates to download date for.
tag : string or NoneType
Denotes type of file to load.
inst_id : string or NoneType
Specifies the satellite ID for a constellation.
data_path : string or NoneType
Path to data directory.
update_files : bool
Re-download data for files that already exist if True (default=False)
Note
----
Called by pysat. Not intended for direct use by user.
Warnings
--------
Only able to download current forecast data, not archived forecasts.
"""
# download standard F107 data
if tag == 'historic':
# Test the date array, updating it if necessary
if date_array.freq != 'MS':
warnings.warn(''.join(['Historic F10.7 downloads should be invoked',
" with the `freq='MS'` option."]))
date_array = pysat.utils.time.create_date_range(
dt.datetime(date_array[0].year, date_array[0].month, 1),
date_array[-1], freq='MS')
# Download from LASP, by month
for dl_date in date_array:
# Create the name to which the local file will be saved
str_date = dl_date.strftime('%Y-%m')
data_file = os.path.join(data_path,
'f107_monthly_{:s}.txt'.format(str_date))
if update_files or not os.path.isfile(data_file):
# Set the download webpage
dstr = ''.join(['http://lasp.colorado.edu/lisird/latis/dap/',
'noaa_radio_flux.json?time%3E=',
dl_date.strftime('%Y-%m-%d'),
'T00:00:00.000Z&time%3C=',
(dl_date + pds.DateOffset(months=1)
- pds.DateOffset(days=1)).strftime('%Y-%m-%d'),
'T00:00:00.000Z'])
# The data is returned as a JSON file
req = requests.get(dstr)
# Process the JSON file
raw_dict = json.loads(req.text)['noaa_radio_flux']
data = pds.DataFrame.from_dict(raw_dict['samples'])
if data.empty:
warnings.warn("no data for {:}".format(dl_date),
UserWarning)
else:
# The file format changed over time
try:
# This is the new data format
times = [dt.datetime.strptime(time, '%Y%m%d')
for time in data.pop('time')]
except ValueError:
# Accepts old file formats
times = [dt.datetime.strptime(time, '%Y %m %d')
for time in data.pop('time')]
data.index = times
# Replace fill value with NaNs
idx, = np.where(data['f107'] == -99999.0)
data.iloc[idx, :] = np.nan
# Create a local CSV file
data.to_csv(data_file, header=True)
elif tag == 'prelim':
ftp = ftplib.FTP('ftp.swpc.noaa.gov') # connect to host, default port
ftp.login() # user anonymous, passwd anonymous@
ftp.cwd('/pub/indices/old_indices')
bad_fname = list()
# Get the local files, to ensure that the version 1 files are
# downloaded again if more data has been added
local_files = list_files(tag, inst_id, data_path)
# To avoid downloading multiple files, cycle dates based on file length
dl_date = date_array[0]
while dl_date <= date_array[-1]:
# The file name changes, depending on how recent the requested
# data is
qnum = (dl_date.month - 1) // 3 + 1 # Integer floor division
qmonth = (qnum - 1) * 3 + 1
quar = 'Q{:d}_'.format(qnum)
fnames = ['{:04d}{:s}DSD.txt'.format(dl_date.year, ss)
for ss in ['_', quar]]
versions = ["01_v2", "{:02d}_v1".format(qmonth)]
vend = [dt.datetime(dl_date.year, 12, 31),
dt.datetime(dl_date.year, qmonth, 1)
+ pds.DateOffset(months=3) - pds.DateOffset(days=1)]
downloaded = False
rewritten = False
# Attempt the download(s)
for iname, fname in enumerate(fnames):
# Test to see if we already tried this filename
if fname in bad_fname:
continue
local_fname = fname
saved_fname = os.path.join(data_path, local_fname)
ofile = '_'.join(['f107', 'prelim',
'{:04d}'.format(dl_date.year),
'{:s}.txt'.format(versions[iname])])
outfile = os.path.join(data_path, ofile)
if os.path.isfile(outfile):
downloaded = True
# Check the date to see if this should be rewritten
checkfile = os.path.split(outfile)[-1]
has_file = local_files == checkfile
if np.any(has_file):
if has_file[has_file].index[-1] < vend[iname]:
# This file will be updated again, but only attempt
# to do so if enough time has passed from the
# last time it was downloaded
yesterday = today - pds.DateOffset(days=1)
if has_file[has_file].index[-1] < yesterday:
rewritten = True
else:
# The file does not exist, if it can be downloaded, it
# should be 'rewritten'
rewritten = True
# Attempt to download if the file does not exist or if the
# file has been updated
if rewritten or not downloaded:
try:
sys.stdout.flush()
ftp.retrbinary('RETR ' + fname,
open(saved_fname, 'wb').write)
downloaded = True
logger.info(' '.join(('Downloaded file for ',
dl_date.strftime('%x'))))
except ftplib.error_perm as exception:
# Could not fetch, so cannot rewrite
rewritten = False
# Test for an error
if str(exception.args[0]).split(" ", 1)[0] != '550':
raise RuntimeError(exception)
else:
# file isn't actually there, try the next name
os.remove(saved_fname)
# Save this so we don't try again
# Because there are two possible filenames for
# each time, it's ok if one isn't there. We just
# don't want to keep looking for it.
bad_fname.append(fname)
# If the first file worked, don't try again
if downloaded:
break
if not downloaded:
logger.info(' '.join(('File not available for',
dl_date.strftime('%x'))))
elif rewritten:
with open(saved_fname, 'r') as fprelim:
lines = fprelim.read()
mm_f107.rewrite_daily_file(dl_date.year, outfile, lines)
os.remove(saved_fname)
# Cycle to the next date
dl_date = vend[iname] + pds.DateOffset(days=1)
# Close connection after downloading all dates
ftp.close()
elif tag == 'daily':
logger.info('This routine can only download the latest 30 day file')
# Set the download webpage
furl = 'https://services.swpc.noaa.gov/text/daily-solar-indices.txt'
req = requests.get(furl)
# Save the output
data_file = 'f107_daily_{:s}.txt'.format(today.strftime('%Y-%m-%d'))
outfile = os.path.join(data_path, data_file)
mm_f107.rewrite_daily_file(today.year, outfile, req.text)
elif tag == 'forecast':
logger.info(' '.join(('This routine can only download the current',
'forecast, not archived forecasts')))
# Set the download webpage
furl = ''.join(('https://services.swpc.noaa.gov/text/',
'3-day-solar-geomag-predictions.txt'))
req = requests.get(furl)
# Parse text to get the date the prediction was generated
date_str = req.text.split(':Issued: ')[-1].split(' UTC')[0]
dl_date = dt.datetime.strptime(date_str, '%Y %b %d %H%M')
# Get starting date of the forecasts
raw_data = req.text.split(':Prediction_dates:')[-1]
forecast_date = dt.datetime.strptime(raw_data[3:14], '%Y %b %d')
# Set the times for output data
times = pds.date_range(forecast_date, periods=3, freq='1D')
# String data is the forecast value for the next three days
raw_data = req.text.split('10cm_flux:')[-1]
raw_data = raw_data.split('\n')[1]
val1 = int(raw_data[24:27])
val2 = int(raw_data[38:41])
val3 = int(raw_data[52:])
# Put data into nicer DataFrame
data = pds.DataFrame([val1, val2, val3], index=times, columns=['f107'])
# Write out as a file
data_file = 'f107_forecast_{:s}.txt'.format(
dl_date.strftime('%Y-%m-%d'))
data.to_csv(os.path.join(data_path, data_file), header=True)
elif tag == '45day':
logger.info(' '.join(('This routine can only download the current',
'forecast, not archived forecasts')))
# Set the download webpage
furl = 'https://services.swpc.noaa.gov/text/45-day-ap-forecast.txt'
req = requests.get(furl)
# Parse text to get the date the prediction was generated
date_str = req.text.split(':Issued: ')[-1].split(' UTC')[0]
dl_date = dt.datetime.strptime(date_str, '%Y %b %d %H%M')
# Get to the forecast data
raw_data = req.text.split('45-DAY AP FORECAST')[-1]
# Grab AP part
raw_ap = raw_data.split('45-DAY F10.7 CM FLUX FORECAST')[0]
raw_ap = raw_ap.split('\n')[1:-1]
# Get the F107
raw_f107 = raw_data.split('45-DAY F10.7 CM FLUX FORECAST')[-1]
raw_f107 = raw_f107.split('\n')[1:-4]
# Parse the AP data
ap_times, ap = mm_f107.parse_45day_block(raw_ap)
# Parse the F10.7 data
f107_times, f107 = mm_f107.parse_45day_block(raw_f107)
# Collect into DataFrame
data = pds.DataFrame(f107, index=f107_times, columns=['f107'])
data['ap'] = ap
# Write out as a file
data_file = 'f107_45day_{:s}.txt'.format(dl_date.strftime('%Y-%m-%d'))
data.to_csv(os.path.join(data_path, data_file), header=True)
return
| 40.688693
| 80
| 0.521383
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10,619
| 0.383233
|
db031f4543bacf2c603d4a3ccb452d553dc3e0d6
| 486
|
py
|
Python
|
user/migrations/0004_auto_20200813_1948.py
|
VladimirZubavlenko/ikaf42-app
|
240e012675e4347370289554f34d9c60c8b6f35d
|
[
"MIT"
] | null | null | null |
user/migrations/0004_auto_20200813_1948.py
|
VladimirZubavlenko/ikaf42-app
|
240e012675e4347370289554f34d9c60c8b6f35d
|
[
"MIT"
] | null | null | null |
user/migrations/0004_auto_20200813_1948.py
|
VladimirZubavlenko/ikaf42-app
|
240e012675e4347370289554f34d9c60c8b6f35d
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.5 on 2020-08-13 19:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0003_auto_20200813_1943'),
]
operations = [
migrations.AlterField(
model_name='user',
name='emailConfirmToken',
field=models.TextField(default='-CBGbHSkumN38RqAx2UPSak73vs1Tklm2_-xoY1V', max_length=30, verbose_name='Токен подтверждения почты'),
),
]
| 25.578947
| 144
| 0.652263
| 416
| 0.817289
| 0
| 0
| 0
| 0
| 0
| 0
| 195
| 0.383104
|
db03fc21b23af129e340ee65486e184e179cf632
| 1,394
|
py
|
Python
|
vfoot/graphics/__init__.py
|
filipecn/vfoot
|
3059f5bb471b6bdf92a18a7cdb6b33a2c8852046
|
[
"MIT"
] | null | null | null |
vfoot/graphics/__init__.py
|
filipecn/vfoot
|
3059f5bb471b6bdf92a18a7cdb6b33a2c8852046
|
[
"MIT"
] | null | null | null |
vfoot/graphics/__init__.py
|
filipecn/vfoot
|
3059f5bb471b6bdf92a18a7cdb6b33a2c8852046
|
[
"MIT"
] | null | null | null |
import glfw
import OpenGL.GL as gl
import imgui
from imgui.integrations.glfw import GlfwRenderer
def app(render):
imgui.create_context()
window = impl_glfw_init()
impl = GlfwRenderer(window)
while not glfw.window_should_close(window):
glfw.poll_events()
impl.process_inputs()
gl.glClearColor(.2, .5, .2, 0.6)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
imgui.new_frame()
render()
imgui.render()
impl.render(imgui.get_draw_data())
glfw.swap_buffers(window)
impl.shutdown()
glfw.terminate()
def impl_glfw_init():
width, height = 1280, 720
window_name = "minimal ImGui/GLFW3 example"
if not glfw.init():
print("Could not initialize OpenGL context")
exit(1)
# OS X supports only forward-compatible core profiles from 3.2
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, gl.GL_TRUE)
# Create a windowed mode window and its OpenGL context
window = glfw.create_window(
int(width), int(height), window_name, None, None
)
glfw.make_context_current(window)
if not window:
glfw.terminate()
print("Could not initialize Window")
exit(1)
return window
| 27.333333
| 67
| 0.677188
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 211
| 0.151363
|
db04b4c5b6cb46accefdb0e93dbb064e76e6bb44
| 1,472
|
py
|
Python
|
master/rabbitvcs-master/rabbitvcs-master/rabbitvcs/util/_locale.py
|
AlexRogalskiy/DevArtifacts
|
931aabb8cbf27656151c54856eb2ea7d1153203a
|
[
"MIT"
] | 4
|
2018-09-07T15:35:24.000Z
|
2019-03-27T09:48:12.000Z
|
master/rabbitvcs-master/rabbitvcs-master/rabbitvcs/util/_locale.py
|
AlexRogalskiy/DevArtifacts
|
931aabb8cbf27656151c54856eb2ea7d1153203a
|
[
"MIT"
] | 371
|
2020-03-04T21:51:56.000Z
|
2022-03-31T20:59:11.000Z
|
master/rabbitvcs-master/rabbitvcs-master/rabbitvcs/util/_locale.py
|
AlexRogalskiy/DevArtifacts
|
931aabb8cbf27656151c54856eb2ea7d1153203a
|
[
"MIT"
] | 3
|
2019-06-18T19:57:17.000Z
|
2020-11-06T03:55:08.000Z
|
from __future__ import absolute_import
import locale
import os
from rabbitvcs.util.log import Log
import rabbitvcs.util.settings
import rabbitvcs.util.helper
log = Log("rabbitvcs.util.locale")
def initialize_locale():
try:
settings = rabbitvcs.util.settings.SettingsManager()
sane_default = locale.getdefaultlocale(['LANG', 'LANGUAGE'])
# Just try to set the default locale for the user
locale.setlocale(locale.LC_ALL, sane_default)
# Now, if the user has set a default, try to apply that
user_default = settings.get("general", "language")
if user_default:
locale.setlocale(locale.LC_ALL, (user_default, sane_default[1]))
except locale.Error:
# If the user's environment does not specify an encoding, Python will
# pick a default which might not be available. It seems to pick
# ISO8859-1 (latin1), but UTF8 is a better idea on GNU/Linux.
log.warning("Could not set default locale (LANG: %s)" % os.environ.get("LANG"))
(loc, enc) = sane_default
# We should only try this if we have a region to set as well.
if loc and enc != "UTF8":
try:
locale.setlocale(locale.LC_ALL, (loc, "UTF8"))
log.warning("Manually set encoding to UTF-8")
except locale.Error:
# Nope, no UTF8 either.
log.warning("Could not set user's locale to UTF-8")
| 36.8
| 87
| 0.63587
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 568
| 0.38587
|
db05538cc85061ce7b28bead1b966a843722b5be
| 7,378
|
py
|
Python
|
vectorize_enriched_api.py
|
mfejzer/tracking_buggy_files
|
161095f315a94709ef74ab4bb6696889537aaa6a
|
[
"MIT"
] | 3
|
2019-08-06T05:29:53.000Z
|
2021-05-23T08:23:10.000Z
|
vectorize_enriched_api.py
|
mfejzer/tracking_buggy_files
|
161095f315a94709ef74ab4bb6696889537aaa6a
|
[
"MIT"
] | 5
|
2020-04-23T18:29:06.000Z
|
2021-12-09T21:21:57.000Z
|
vectorize_enriched_api.py
|
mfejzer/tracking_buggy_files
|
161095f315a94709ef74ab4bb6696889537aaa6a
|
[
"MIT"
] | 1
|
2021-05-23T08:23:12.000Z
|
2021-05-23T08:23:12.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Usage: %(scriptName) <bug_report_file> <data_prefix>
"""
import json
from timeit import default_timer
import datetime
import numpy as np
import pickle
import sys
from multiprocessing import Pool
from operator import itemgetter
from scipy import sparse
from sklearn.feature_extraction.text import TfidfTransformer
from tqdm import tqdm
from unqlite import UnQLite
from date_utils import convert_commit_date
def main():
print("Start", datetime.datetime.now().isoformat())
before = default_timer()
bug_report_file_path = sys.argv[1]
print("bug report file path", bug_report_file_path)
data_prefix = sys.argv[2]
print("data prefix", data_prefix)
fixes_list = extract_fixes_list(bug_report_file_path)
vectorize_enriched_api(fixes_list, data_prefix)
after = default_timer()
total = after - before
print("End", datetime.datetime.now().isoformat())
print("total time", total)
def load_bug_reports(bug_report_file_path):
"""load bug report file (the one generated from xml)"""
with open(bug_report_file_path) as bug_report_file:
bug_reports = json.load(bug_report_file)
return bug_reports
def sort_bug_reports_by_commit_date(bug_reports):
commit_dates = []
for index, commit in enumerate(tqdm(bug_reports)):
sha = bug_reports[commit]['commit']['metadata']['sha'].replace('commit ','').strip()
commit_date = convert_commit_date(bug_reports[commit]['commit']['metadata']['date'].replace('Date:','').strip())
commit_dates.append((sha, commit_date))
sorted_commit_dates = sorted(commit_dates, key=itemgetter(1))
sorted_commits = [commit_date[0] for commit_date in sorted_commit_dates]
return sorted_commits
def extract_fixes_list(bug_report_file_path):
bug_reports = load_bug_reports(bug_report_file_path)
return sort_bug_reports_by_commit_date(bug_reports)
def find_supertype_shas(types, class_name_lookup, variable_sha):
if variable_sha not in types:
return []
# variable_type = types[variable_sha]
variable_type = pickle.loads(types[variable_sha])
shas = []
for name in variable_type['superclassNames']:
if name in class_name_lookup:
shas.append(class_name_lookup[name])
for name in variable_type['interfaceNames']:
if name in class_name_lookup:
shas.append(class_name_lookup[name])
return shas
def find_types_shas(types, class_name_lookup, sha):
result = []
to_check = [sha]
while to_check:
current_sha = to_check.pop(0)
if current_sha not in result:
result.append(current_sha)
supertypes = find_supertype_shas(types, class_name_lookup, current_sha)
to_check.extend(supertypes)
return result
def get_indexes(asts, shas):
indexes = []
for sha in shas:
# indexes.append(asts[sha]['source'])
source_index = pickle.loads(asts[sha])['source']
indexes.append(source_index)
return indexes
def add_types_source_to_bug_report_data(data, data_prefix, class_name_lookup, ast_sha):
asts = UnQLite(data_prefix+"_ast_index_collection_index_db", flags = 0x00000100 | 0x00000001)
types = UnQLite(data_prefix+"_ast_types_collection_index_db", flags = 0x00000100 | 0x00000001)
# current_type = types[ast_sha]
# print "searching", ast_sha
current_type = pickle.loads(types[ast_sha])
# print "found", ast_sha
# print current_type['methodVariableTypes']
# exit(0)
types_per_method = current_type['methodVariableTypes']
cl = data.shape[1]
current_index = 0
start = current_index
enriched_apis = []
for method_types in types_per_method:
method_type_shas = []
for method_type in method_types:
if method_type in class_name_lookup:
method_type_shas.append(class_name_lookup[method_type])
supertypes_shas_per_type = [set(find_types_shas(types, class_name_lookup, s)) for s in method_type_shas]
indexes = []
for supertypes in supertypes_shas_per_type:
indexes.extend(get_indexes(asts, supertypes))
if indexes == []:
method_enriched_api = sparse.coo_matrix(np.zeros(cl).reshape(1,cl))
else:
method_enriched_api = sparse.coo_matrix(np.sum((data[indexes,:]), axis = 0))
enriched_apis.append(method_enriched_api)
if enriched_apis == []:
class_enriched_api = sparse.coo_matrix(np.zeros(cl).reshape(1,cl))
else:
class_enriched_api = sparse.coo_matrix(np.sum(enriched_apis, axis = 0))
enriched_apis.append(class_enriched_api)
current_index += len(enriched_apis)
asts.close()
types.close()
lookup = {}
lookup['enrichedApiStart'] = start
lookup['enrichedApiEnd'] = current_index - 1
enriched_apis_matrix = sparse.vstack(enriched_apis)
return (enriched_apis_matrix, lookup, ast_sha)
def vectorize_enriched_api(bug_report_fixing_commits, data_prefix):
work = []
for fixing_commit in bug_report_fixing_commits:
work.append((data_prefix, fixing_commit))
pool = Pool(12, maxtasksperchild=1)
r = list(tqdm(pool.imap(_f, work), total=len(work)))
print("r", len(r))
def _f(args):
return extract_enriched_api(args[0], args[1])
def extract_enriched_api(data_prefix, bug_report_full_sha):
data = sparse.load_npz(data_prefix+'_raw_count_data.npz')
bug_report_files_collection_db = UnQLite(data_prefix+"_bug_report_files_collection_db", flags = 0x00000100 | 0x00000001)
current_files = pickle.loads(bug_report_files_collection_db[bug_report_full_sha])
bug_report_files_collection_db.close()
bug_report_id = bug_report_full_sha[0:7]
shas = current_files['shas']
class_name_lookup = current_files['class_name_to_sha']
bug_report_data = []
bug_report_lookup = {}
n_rows = 0
for ast_sha in shas:
ast_data, lookup, current_ast_sha = add_types_source_to_bug_report_data(data, data_prefix, class_name_lookup, ast_sha)
current_index = n_rows
bug_report_data.append(ast_data)
for k in lookup:
lookup[k] += current_index
bug_report_lookup[current_ast_sha] = lookup
n_rows += ast_data.shape[0]
bug_report_row = get_bug_report(data_prefix, data, bug_report_id)
bug_report_data.append(bug_report_row)
bug_report_data_matrix = sparse.vstack(bug_report_data)
sparse.save_npz(data_prefix+'_'+bug_report_id+'_partial_enriched_api', bug_report_data_matrix)
with open(data_prefix+'_'+bug_report_id+'_partial_enriched_api_index_lookup', 'w') as outfile:
json.dump(bug_report_lookup, outfile)
transformer = TfidfTransformer()
tf_idf_data = transformer.fit_transform(bug_report_data_matrix)
sparse.save_npz(data_prefix+'_'+bug_report_id+'_tfidf_enriched_api', tf_idf_data)
# print "bug_report_id", bug_report_id
return bug_report_id
def get_bug_report(data_prefix, vectorized_data, bug_report_id):
bug_report_index_collection = UnQLite(data_prefix+"_bug_report_index_collection_index_db")
bug_report = pickle.loads(bug_report_index_collection[bug_report_id])
bug_report_index_collection.close()
index = bug_report['report']
return vectorized_data[index, :]
if __name__ == '__main__':
main()
| 33.234234
| 126
| 0.719301
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 951
| 0.128897
|
db063dcff6ca568e771df05b7ae7f650c6cd2aea
| 4,270
|
py
|
Python
|
interpreter.py
|
bendmorris/beaver
|
4db3e1690145dee89d30144f3632396313218214
|
[
"MIT"
] | 2
|
2018-10-06T08:35:41.000Z
|
2019-04-03T21:15:02.000Z
|
interpreter.py
|
bendmorris/beaver
|
4db3e1690145dee89d30144f3632396313218214
|
[
"MIT"
] | null | null | null |
interpreter.py
|
bendmorris/beaver
|
4db3e1690145dee89d30144f3632396313218214
|
[
"MIT"
] | null | null | null |
import argparse
import os
import sys
from lib.graph import Graph
from lib.types import BeaverException, Uri
from lib.command import OutCommand
import sys
reload(sys)
sys.setdefaultencoding('utf8')
from __init__ import __version__
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--version', help='print version and exit', action='version', version=__version__)
arg_parser.add_argument('-t', '--test', help='run unit tests and exit', action='store_true')
arg_parser.add_argument('file', nargs='*', help='file to be interpreted')
arg_parser.add_argument('-i', '--interactive', help='enter interactive mode after interpreting file', action='store_true')
arg_parser.add_argument('-e', '--eval', help='string to be evaluated')
arg_parser.add_argument('-v', '--verbose', help='print each triple statement as evaluated', action='store_true')
arg_parser.add_argument('-d', '--draw', help='output an image of the resulting graph to the given image file; image type is inferred from file extension')
arg_parser.add_argument('-o', '--out', help='serialize the resulting graph to the given output file (using Turtle)', nargs='?', const=True, default=None)
args = arg_parser.parse_args()
#print args.__dict__
if args.test:
import tests
tests.run_tests(verbose=args.verbose)
sys.exit()
if not sys.stdin.isatty():
# read and evaluate piped input
if args.eval is None: args.eval = ''
args.eval = sys.stdin.read() + args.eval
interactive = (not args.file and not args.eval) or (args.interactive and sys.stdin.isatty())
def run():
if interactive: print '''Beaver %s''' % __version__
graph = Graph(verbose=args.verbose)
for input_file in args.file:
try:
graph.parse(filename=input_file)
except KeyboardInterrupt:
print "KeyboardInterrupt"
sys.exit()
except Exception as e:
print e
sys.exit()
if args.eval:
try:
graph.parse(text=args.eval)
except KeyboardInterrupt:
print "KeyboardInterrupt"
sys.exit()
except Exception as e:
print e
sys.exit()
if interactive:
import readline
exit = False
while not exit:
graph.verbose = args.verbose
try:
next_line = raw_input('>> ').strip()
if not next_line: continue
if next_line[0] == '-' and next_line.split(' ')[0] in arg_parser._option_string_actions:
command = next_line.split(' ')[0]
action = arg_parser._option_string_actions[command].dest
if len(next_line.split(' ')) > 1:
arg = ' '.join(next_line.split(' ')[1:])
try: arg = eval(arg)
except: pass
else:
arg = not getattr(args, action)
try:
setattr(args, action, arg)
except:
print 'Illegal argument: %s %s' % (command, arg)
elif next_line in ('exit', 'quit'):
exit = True
else:
stmts = graph.parse(text=next_line)
if stmts == 0:
raise BeaverException('Failed to parse line: %s' % next_line)
except EOFError:
print
exit = True
except KeyboardInterrupt:
print
continue
except Exception as e:
print e
continue
if args.out:
if args.out is True:
filename = None
else:
filename = args.out
if not filename.startswith('<') and filename.endswith('>'):
filename = '<%s>' % os.path.abspath(filename)
filename = Uri(filename)
graph.execute(OutCommand(filename))
if args.draw:
graph.draw(args.draw)
if __name__ == '__main__': run()
| 32.846154
| 154
| 0.544028
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 735
| 0.172131
|
db0693e026c74e759573c7252d4aff5ef90ae5ad
| 242
|
py
|
Python
|
euler/28.py
|
DevStarSJ/algorithmExercise
|
66b42c54cdd594ff3f229613fd83446f8c1f9153
|
[
"MIT"
] | null | null | null |
euler/28.py
|
DevStarSJ/algorithmExercise
|
66b42c54cdd594ff3f229613fd83446f8c1f9153
|
[
"MIT"
] | null | null | null |
euler/28.py
|
DevStarSJ/algorithmExercise
|
66b42c54cdd594ff3f229613fd83446f8c1f9153
|
[
"MIT"
] | null | null | null |
def get_cross_sum(n):
start = 1
total = 1
for i in range(1, n):
step = i * 2
start = start + step
total += start * 4 + step * 6
start = start + step * 3
return total
print(get_cross_sum(501))
| 18.615385
| 37
| 0.516529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
db06e9490bbc299985803b6daf8dbca9d83d6fc3
| 1,509
|
py
|
Python
|
titan/react_view_pkg/router/resources.py
|
mnieber/gen
|
65f8aa4fb671c4f90d5cbcb1a0e10290647a31d9
|
[
"MIT"
] | null | null | null |
titan/react_view_pkg/router/resources.py
|
mnieber/gen
|
65f8aa4fb671c4f90d5cbcb1a0e10290647a31d9
|
[
"MIT"
] | null | null | null |
titan/react_view_pkg/router/resources.py
|
mnieber/gen
|
65f8aa4fb671c4f90d5cbcb1a0e10290647a31d9
|
[
"MIT"
] | null | null | null |
import typing as T
from dataclasses import dataclass, field
from moonleap import Resource
from titan.react_pkg.component import Component
class Router(Component):
pass
@dataclass
class RouterConfig(Resource):
component: Component
url: str
params: T.List[str] = field(default_factory=list)
wraps: bool = False
side_effects: T.List[T.Any] = field(default_factory=list)
def reduce_router_configs(router_configs, base_route):
result = []
for router_config in router_configs:
child_components = getattr(router_config.component.typ, "child_components", [])
for child_component in child_components:
# The last router config always corresponds to the child component itself.
# Any preceeding router configs supply dependencies
# (e.g. state providers, load effects, etc)
supporting_router_configs = child_component.typ.create_router_configs(
named_component=child_component
)[:-1]
if not supporting_router_configs:
continue
preceeding_router_configs = reduce_router_configs(supporting_router_configs)
result = concat_router_configs(preceeding_router_configs, result)
result.extend(router_configs)
return result
def concat_router_configs(first, second):
first_components = [x.component for x in first]
second_filtered = [x for x in second if x.component not in first_components]
return first + second_filtered
| 32.106383
| 88
| 0.713718
| 240
| 0.159046
| 0
| 0
| 218
| 0.144467
| 0
| 0
| 186
| 0.12326
|
db077393470e53a796d0d72580ad3f3064dd2bda
| 2,119
|
py
|
Python
|
lab-taxi/agent.py
|
JunShern/deep-reinforcement-learning
|
4c99d8e3b5c6df0ec7985a33611a16a791eb0041
|
[
"MIT"
] | null | null | null |
lab-taxi/agent.py
|
JunShern/deep-reinforcement-learning
|
4c99d8e3b5c6df0ec7985a33611a16a791eb0041
|
[
"MIT"
] | null | null | null |
lab-taxi/agent.py
|
JunShern/deep-reinforcement-learning
|
4c99d8e3b5c6df0ec7985a33611a16a791eb0041
|
[
"MIT"
] | null | null | null |
import numpy as np
from collections import defaultdict
class Agent:
def __init__(self, nA=6):
""" Initialize agent.
Params
======
- nA: number of actions available to the agent
"""
self.nA = nA
self.actions = list(range(nA))
self.Q = defaultdict(lambda: np.zeros(self.nA))
self.alpha = 0.01
self.epsilon = 1
self.epsilon_decay = 0.99999
self.epsilon_min = 0.001
self.gamma = 1
print("alpha", self.alpha, "e_decay", self.epsilon_decay, "e_min", self.epsilon_min, "gamma", self.gamma)
def select_action(self, state):
""" Given the state, select an action.
Params
======
- state: the current state of the environment
Returns
=======
- action: an integer, compatible with the task's action space
"""
# Follow epsilon-greedy policy
greedy_choice = np.argmax(self.Q[state])
random_choice = np.random.choice(self.actions)
epsilon_greedy_choice = np.random.choice(
[greedy_choice, random_choice],
p = [1-self.epsilon, self.epsilon]
)
return epsilon_greedy_choice
def step(self, state, action, reward, next_state, done):
""" Update the agent's knowledge, using the most recently sampled tuple.
Params
======
- state: the previous state of the environment
- action: the agent's previous choice of action
- reward: last reward received
- next_state: the current state of the environment
- done: whether the episode is complete (True or False)
"""
self.epsilon = max(self.epsilon * self.epsilon_decay, self.epsilon_min)
# Calculate expected return
next_G = 0
if not done:
next_G = self.epsilon * sum([self.Q[next_state][action] for action in self.actions]) / self.nA + (1 - self.epsilon) * max(self.Q[next_state])
# Update Q
self.Q[state][action] += self.alpha * ((reward + self.gamma * next_G) - self.Q[state][action])
| 33.634921
| 153
| 0.591789
| 2,063
| 0.973572
| 0
| 0
| 0
| 0
| 0
| 0
| 842
| 0.397357
|
db07a7ea8e4f0634af5cfc5dde1a21fb51caf3b5
| 11,271
|
py
|
Python
|
visicom_reverse_geocoding.py
|
zimirrr/visicom_reverse_geocoding
|
3da913f80e934f8352bcc8abe9d24ba54bbc482a
|
[
"MIT"
] | null | null | null |
visicom_reverse_geocoding.py
|
zimirrr/visicom_reverse_geocoding
|
3da913f80e934f8352bcc8abe9d24ba54bbc482a
|
[
"MIT"
] | null | null | null |
visicom_reverse_geocoding.py
|
zimirrr/visicom_reverse_geocoding
|
3da913f80e934f8352bcc8abe9d24ba54bbc482a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
VisicomReverseGeocoder
A QGIS plugin
plugin for reverse geocoding from visicom api
Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
-------------------
begin : 2018-09-21
git sha : $Format:%H$
copyright : (C) 2018 by zimirrr
email : zimirrr@mail.ru
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from qgis.gui import *
from qgis.core import *
# Initialize Qt resources from file resources.py
from .resources import *
# Import the code for the dialog
from .settings_dialog import Config
from .utils import pointToWGS84
from .visicom_api_parser import *
import os.path
import requests
class VisicomReverseGeocoder:
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'VisicomReverseGeocoder_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Declare instance attributes
self.actions = []
self.menu = self.tr(u'&Visicom reverse geocoding')
# TODO: We are going to let the user set this up in a future iteration
self.toolbar = self.iface.addToolBar(u'VisicomReverseGeocoder')
self.toolbar.setObjectName(u'VisicomReverseGeocoder')
# settings from ini file
self.settings = self.config_read_from_ini(['AUTH_KEY','URL','LANG','CATEGORIES'])
# memory layer for results
self.layer = None
# progressbar when geocoding
self.bar = QProgressBar()
self.bar.setRange(0, 0)
# canvas and point tool
self.canvas = self.iface.mapCanvas()
self.mapPointTool = QgsMapToolEmitPoint(self.canvas)
self.mapPointTool.canvasClicked.connect(self.reverse_geocoding)
# noinspection PyMethodMayBeStatic
def tr(self, message):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('VisicomReverseGeocoder', message)
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None):
"""Add a toolbar icon to the toolbar.
:param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
:type icon_path: str
:param text: Text that should be shown in menu items for this action.
:type text: str
:param callback: Function to be called when the action is triggered.
:type callback: function
:param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
:type enabled_flag: bool
:param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
:type add_to_menu: bool
:param add_to_toolbar: Flag indicating whether the action should also
be added to the toolbar. Defaults to True.
:type add_to_toolbar: bool
:param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
:type status_tip: str
:param parent: Parent widget for the new action. Defaults None.
:type parent: QWidget
:param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
:returns: The action that was created. Note that the action is also
added to self.actions list.
:rtype: QAction
"""
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
self.toolbar.addAction(action)
if add_to_menu:
self.iface.addPluginToWebMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
icon_path = os.path.join(self.plugin_dir, 'icons', 'geocode.png')
self.add_action(
icon_path,
text=self.tr(u'Visicom reverse geocoding'),
callback=self.run,
parent=self.iface.mainWindow())
icon_path = os.path.join(self.plugin_dir, 'icons', 'settings.png')
self.add_action(
icon_path,
text=self.tr(u'Settings'),
callback=self.show_settings,
parent=self.iface.mainWindow())
icon_path = os.path.join(self.plugin_dir, 'icons', 'about.png')
self.add_action(
icon_path,
text=self.tr(u'About'),
callback=self.show_about,
parent=self.iface.mainWindow())
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
for action in self.actions:
self.iface.removePluginWebMenu(
self.tr(u'&Visicom reverse geocoding'),
action)
self.iface.removeToolBarIcon(action)
# remove the toolbar
del self.toolbar
def run(self):
if self.settings['AUTH_KEY'] == '':
self.iface.messageBar().pushMessage("Error", "You need to get Visicom API key, see Settings", level=Qgis.Critical)
else:
self.canvas.setMapTool(self.mapPointTool)
def show_about(self):
infoString = """<b>Visicom reverse geocoding</b><br><br>
If the plugin doesn't return result, demo key expired.<br>
You need to get your own authorithation key <a href=https://api.visicom.ua/docs/terms/key>here</a>.<br><br>
<a href=https://api.visicom.ua>Read more about Visicom API</a>"""
QMessageBox.information(self.iface.mainWindow(), "About", infoString)
def show_settings(self):
if not bool(self.settings):
self.settings = self.config_read_from_ini(['AUTH_KEY','URL','LANG','CATEGORIES'])
dlg = Config(self)
dlg.visicomKey.insert(self.settings['AUTH_KEY'])
dlg.show()
dlg.adjustSize()
result = dlg.exec_()
if result == 1:
self.settings['AUTH_KEY'] = dlg.visicomKey.text()
self.config_write_to_ini(self.settings)
def config_read_from_ini(self, settings_list):
"""returns dictionary with keys from settings_list"""
qgs = QSettings(f'{self.plugin_dir}/{os.path.basename(__file__)[:-3]}.ini', QSettings.IniFormat)
res = {}
for item in settings_list:
res[item] = qgs.value(item)
return res
def config_write_to_ini(self, settings_dict):
"""writes dictionary into ini file"""
qgs = QSettings(f'{self.plugin_dir}/{os.path.basename(__file__)[:-3]}.ini', QSettings.IniFormat)
for k, v in settings_dict.items():
qgs.setValue(k, v)
def create_memory_layer(self):
try:
_ = self.layer.id()
except:
self.layer = None
if self.layer is None:
uri = 'Point?crs=epsg:4326&field=full_string:string(255)&index=yes'
self.layer = QgsVectorLayer(uri, 'visicom_geocoded', 'memory')
QgsProject.instance().addMapLayer(self.layer)
def reverse_geocoding(self, point):
"""function that is called when mapTool emits click """
# add progress bar
self.iface.mainWindow().statusBar().addWidget(self.bar)
self.bar.show()
# if mapCanvas crs not wgs84
crs = self.canvas.mapSettings().destinationCrs()
point_wgs84 = pointToWGS84(point, crs)
coords = f'{point_wgs84.x():.6f},{point_wgs84.y():.6f}'
cfg = self.settings
send_params = {
'key' : cfg['AUTH_KEY'],
'near' : coords,
'radius' : 5
}
categories = cfg['CATEGORIES']
url = f'{cfg["URL"]}/{cfg["LANG"]}/search/{categories}.json'
r = requests.get(url, params=send_params)
if r.status_code == 200:
resp = r.json()
if resp['type'] == 'FeatureCollection':
allfeatures = parse_featureCollection(resp['features'])
elif resp['type'] == 'Feature':
allfeatures = parse_featureCollection((resp,))
result = geocoded_object(allfeatures)
self.create_memory_layer()
newfeature = QgsFeature(self.layer.fields())
newfeature.setGeometry(QgsGeometry.fromPointXY(point_wgs84) )
newfeature.setAttribute('full_string', result['full_string'])
self.layer.startEditing()
self.layer.addFeature(newfeature)
self.layer.commitChanges()
else:
QgsMessageLog.logMessage(
f'Response status_code is {r.status_code}',
'Visicom reverse geocoding'
)
self.iface.mainWindow().statusBar().removeWidget(self.bar)
| 35.332288
| 126
| 0.58167
| 9,662
| 0.857244
| 0
| 0
| 0
| 0
| 0
| 0
| 5,321
| 0.472097
|