hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a57fff444e34ab3085f258b8aa57323a8f86efde
| 1,683
|
py
|
Python
|
Exercicios/Exercicio070.py
|
RicardoMart922/estudo_Python
|
cb595c2a5e5aee568b6afa71b3ed9dd9cb7eef72
|
[
"MIT"
] | null | null | null |
Exercicios/Exercicio070.py
|
RicardoMart922/estudo_Python
|
cb595c2a5e5aee568b6afa71b3ed9dd9cb7eef72
|
[
"MIT"
] | null | null | null |
Exercicios/Exercicio070.py
|
RicardoMart922/estudo_Python
|
cb595c2a5e5aee568b6afa71b3ed9dd9cb7eef72
|
[
"MIT"
] | null | null | null |
# Crie um programa que leia a idade e o sexo de várias pessoas. A cada pessoa cadastrada, o programa deverá perguntar se o usuário quer ou não continuar. No final, mostre:
# A) Quantas pessoas tem mais de 18 anos.
# B) Quantos homens foram cadastrados.
# C) Quantas mulheres tem menos de 20 anos.
maisdezoito = 0
qtdmulheres = 0
qtdhomens = 0
idade = 0
opcao = ''
sexo = ''
print('-= Informe a idade e o sexo para o cadastro =-')
while True:
idade = int(input('Idade: '))
if idade > 18:
maisdezoito += 1
while True:
sexo = str(input('Sexo [M/F]: ')).upper()
if sexo == 'M' or sexo == 'F':
if sexo == 'M':
qtdhomens += 1
if sexo == 'F' and idade < 20:
qtdmulheres += 1
break
while True:
opcao = str(input('Quer continuar [S/N]: ')).upper()
if opcao == 'S' or opcao == 'N':
break
if opcao == 'N':
break
if maisdezoito == 0:
print('Nenhuma pessoa com mais de 18 anos foi cadastrada.')
elif maisdezoito == 1:
print('Foi cadastrado uma pessoa com mais de 18 anos.')
else:
print(f'Foi cadastrado {maisdezoito} pessoas com mais de 18 anos.')
if qtdhomens == 0:
print('Nenhum homem foi cadastrado.')
elif qtdhomens == 1:
print('Apenas um homem foi cadastrado.')
else:
print(f'A quantidade de homens cadastrados foi {qtdhomens}.')
if qtdmulheres == 0:
print('Nenhuma mulher com menos de 20 anos foi cadastrada.')
elif qtdmulheres == 1:
print('Apenas uma mulher com menos de 20 anos foi cadastrada.')
else:
print(f'A quantidade de mulheres com menos de 20 anos que foram cadastradas foi {qtdmulheres}.')
| 35.0625
| 172
| 0.62448
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 893
| 0.529342
|
a583106bd0bb53ab734f77ad352678e3fedf5e53
| 3,050
|
py
|
Python
|
tests/test_entry.py
|
anaulin/tasks.py
|
aa05b4194ff6b01061e6842520752da515e625d6
|
[
"MIT"
] | null | null | null |
tests/test_entry.py
|
anaulin/tasks.py
|
aa05b4194ff6b01061e6842520752da515e625d6
|
[
"MIT"
] | 2
|
2020-06-30T20:05:59.000Z
|
2020-08-01T03:42:20.000Z
|
tests/test_entry.py
|
anaulin/tasks.py
|
aa05b4194ff6b01061e6842520752da515e625d6
|
[
"MIT"
] | null | null | null |
import filecmp
import shutil
import tempfile
import os
from .context import entry
TEST_ENTRY = os.path.join(os.path.dirname(__file__), "test_entry.md")
TEST_ENTRY_CONTENT = """
Some content.
## A section in the content
Content that looks like frontmatter:
```
+++
but this is
not really frontmatter
+++
```
More content.
"""
def test_get_toml_and_content():
(toml, content) = entry.get_toml_and_content(TEST_ENTRY)
assert toml == {
'title': "Book Notes: The Sorcerer of the Wildeeps",
'tags': ["books", "stuff"],
'book': {'title': 'The Sorcerer of the Wildeeps', 'rating': 4}
}
assert content == TEST_ENTRY_CONTENT
def test_get_toml():
toml = entry.get_toml(TEST_ENTRY)
assert toml == {
'title': "Book Notes: The Sorcerer of the Wildeeps",
'tags': ["books", "stuff"],
'book': {'title': 'The Sorcerer of the Wildeeps', 'rating': 4}
}
def test_get_url():
url = entry.get_url("../foo/bar/this-is-the-slug.md")
assert url == "https://anaulin.org/blog/this-is-the-slug/"
url = entry.get_url("this-is-another-slug.md")
assert url == "https://anaulin.org/blog/this-is-another-slug/"
def test_add_to_toml():
with tempfile.NamedTemporaryFile() as temp:
shutil.copy2(TEST_ENTRY, temp.name)
entry.add_to_toml(temp.name, {'new_key': 'new_value'})
new_toml = entry.get_toml(temp.name)
assert new_toml == {
'title': "Book Notes: The Sorcerer of the Wildeeps",
'tags': ["books", "stuff"],
'book': {'title': 'The Sorcerer of the Wildeeps', 'rating': 4},
'new_key': 'new_value'
}
def test_add_to_toml_list():
with tempfile.NamedTemporaryFile() as temp:
shutil.copy2(TEST_ENTRY, temp.name)
entry.add_to_toml(temp.name, {'tags': ['new_tag']})
new_toml = entry.get_toml(temp.name)
assert new_toml == {
'title': "Book Notes: The Sorcerer of the Wildeeps",
'tags': ["new_tag"],
'book': {'title': 'The Sorcerer of the Wildeeps', 'rating': 4}
}
def test_write_toml():
with tempfile.NamedTemporaryFile() as temp:
shutil.copy2(TEST_ENTRY, temp.name)
entry.write_toml(temp.name, {'new_key': 'new_value'})
(new_toml, new_content) = entry.get_toml_and_content(temp.name)
(_, old_content) = entry.get_toml_and_content(TEST_ENTRY)
assert new_toml == {'new_key': 'new_value'}
assert new_content == old_content
def test_add_syndication_url():
with tempfile.NamedTemporaryFile() as temp:
shutil.copy2(TEST_ENTRY, temp.name)
entry.add_syndication_url(temp.name, "new_url")
assert entry.get_toml(temp.name)["syndication_urls"] == ["new_url"]
entry.add_syndication_url(temp.name, "another_url")
assert entry.get_toml(temp.name)["syndication_urls"] == [
"new_url", "another_url"]
def test_to_slug():
assert entry.to_slug("Some Title: With #1 and Stuff!!") == "some-title-with-1-and-stuff"
| 30.19802
| 92
| 0.635082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,040
| 0.340984
|
a583ce21b151702ce7c45ced989d01eb53545764
| 1,833
|
py
|
Python
|
plotapp/controllers/window_controller.py
|
maldata/matplotlib_qtquick_playground
|
f7da94093315d8f540124d5037406d004574dede
|
[
"MIT"
] | null | null | null |
plotapp/controllers/window_controller.py
|
maldata/matplotlib_qtquick_playground
|
f7da94093315d8f540124d5037406d004574dede
|
[
"MIT"
] | null | null | null |
plotapp/controllers/window_controller.py
|
maldata/matplotlib_qtquick_playground
|
f7da94093315d8f540124d5037406d004574dede
|
[
"MIT"
] | null | null | null |
import random
from PyQt5.QtCore import pyqtSignal, pyqtProperty, pyqtSlot, QObject
class WindowController(QObject):
label_changed = pyqtSignal()
def __init__(self, app):
super().__init__()
self._app = app
self._qml_engine = None
self._label = 'whatever'
self._figure = None
self._ax = None
self._data = ([], [])
def startup(self, qml_engine):
print('Main controller startup')
self._qml_engine = qml_engine
main_window = self._qml_engine.rootObjects()[0]
main_window.show()
# TODO: If we have other screens, we'd probably do this there. findChild() can be called on
# any QML object that was loaded with the QMLEngine.load() method.
self._figure = main_window.findChild(QObject, "figure").getFigure()
self._ax = self._figure.add_subplot(111)
@pyqtSlot()
def shutdown(self):
print("Shutting down.")
self._app.quit()
@pyqtProperty(str, notify=label_changed)
def label(self):
return self._label
@pyqtSlot()
def generate_data(self):
x = [random.random() for i in range(10)]
x.sort()
y = [random.random() for i in range(10)]
self._data = (x, y)
self._ax.clear()
self._ax.plot(x, y)
self._figure.canvas.draw_idle()
@pyqtSlot()
def append_data(self):
x = self._data[0]
y = self._data[1]
x_offset = max(x) + 0.1
new_x = [random.random() + x_offset for i in range(10)]
new_x.sort()
new_y = [random.random() for i in range(10)]
x = x + new_x
y = y + new_y
self._data = (x, y)
self._ax.clear()
self._ax.plot(x, y)
self._figure.canvas.draw_idle()
| 25.816901
| 99
| 0.569013
| 1,747
| 0.953082
| 0
| 0
| 928
| 0.506274
| 0
| 0
| 216
| 0.11784
|
a5852febf93eb6f982e8fd189b72f16bda399d56
| 337
|
py
|
Python
|
training/train.py
|
gert-janwille/Eleonora
|
a979dcd9b41231ea3abc9a57d842c680314ac9ca
|
[
"MIT"
] | 1
|
2017-11-19T10:57:38.000Z
|
2017-11-19T10:57:38.000Z
|
training/train.py
|
gert-janwille/Eleonora
|
a979dcd9b41231ea3abc9a57d842c680314ac9ca
|
[
"MIT"
] | 6
|
2017-11-15T16:04:09.000Z
|
2018-01-18T17:12:18.000Z
|
training/train.py
|
gert-janwille/Eleonora
|
a979dcd9b41231ea3abc9a57d842c680314ac9ca
|
[
"MIT"
] | null | null | null |
from training.emotional_training import emotional_training
from training.facial_training import facial_training
def train():
print('\n0: Emotional Training')
print('1: Facial Training\n')
choose = int(input("Type Number > "))
if choose == 0:
emotional_training()
if choose == 1:
facial_training()
| 22.466667
| 58
| 0.682493
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 63
| 0.186944
|
a585ab12f199b6ce2a2bd25bb26ea5865e4f682d
| 9,190
|
py
|
Python
|
nnaps/mesa/compress_mesa.py
|
vosjo/nnaps
|
bc4aac715b511c5df897ef24fb953ad7265927ea
|
[
"MIT"
] | 4
|
2020-09-24T12:55:58.000Z
|
2021-05-19T14:46:10.000Z
|
nnaps/mesa/compress_mesa.py
|
vosjo/nnaps
|
bc4aac715b511c5df897ef24fb953ad7265927ea
|
[
"MIT"
] | 4
|
2021-06-02T09:28:35.000Z
|
2021-06-04T08:32:24.000Z
|
nnaps/mesa/compress_mesa.py
|
vosjo/nnaps
|
bc4aac715b511c5df897ef24fb953ad7265927ea
|
[
"MIT"
] | 3
|
2020-10-05T13:18:27.000Z
|
2021-06-02T09:29:11.000Z
|
import os
from pathlib import Path
import numpy as np
# repack_fields is necessary since np 1.16 as selecting columns from a recarray returns an array with padding
# that is difficult to work with afterwards.
from numpy.lib import recfunctions as rf
from nnaps.mesa import fileio
from nnaps import __version__
def read_mesa_header(model):
"""
process the MESA history files header.
This will require more work in the future to also deal with correct type conversions. Now everything is considered
a string. This is fine as the header is ignored by the rest of nnaps.
todo: implement converting of header values to the correct data types.
:param model: list of lists
:return: numpy array containing strings with the header info.
"""
res = []
for line in model:
new_line = [l.replace('\"', '') for l in line]
res.append(new_line)
return np.array(res, str).T
def read_mesa_output(filename=None, only_first=False):
"""
Read star.log and .data files from MESA.
This returns a record array with the global and local parameters (the latter
can also be a summary of the evolutionary track instead of a profile if
you've given a 'star.log' file.
The stellar profiles are given from surface to center.
Function writen by Pieter DeGroote
:param filename: name of the log file
:type filename: str
:param only_first: read only the first model (or global parameters)
:type only_first: bool
:return: list of models in the data file (typically global parameters, local parameters)
:rtype: list of rec arrays
"""
models = []
new_model = False
header = None
# -- open the file and read the data
with open(filename, 'r') as ff:
# -- skip first 5 lines when difference file
if os.path.splitext(filename)[1] == '.diff':
for i in range(5):
line = ff.readline()
models.append([])
new_model = True
while 1:
line = ff.readline()
if not line:
break # break at end-of-file
line = line.strip().split()
if not line:
continue
# -- begin a new model
if all([iline == str(irange) for iline, irange in zip(line, range(1, len(line) + 1))]):
# -- wrap up previous model
if len(models):
try:
model = np.array(models[-1], float).T
except:
model = read_mesa_header(models[-1])
models[-1] = np.rec.fromarrays(model, names=header)
if only_first: break
models.append([])
new_model = True
continue
# -- next line is the header of the data, remember it
if new_model:
header = line
new_model = False
continue
models[-1].append(line)
if len(models) > 1:
try:
model = np.array(models[-1], float).T
except:
indices = []
for i, l in enumerate(models[-1]):
if len(l) != len(models[-1][0]):
indices.append(i)
for i in reversed(indices):
del models[-1][i]
print("Found and fixed errors on following lines: ", indices)
model = np.array(models[-1], float).T
models[-1] = np.rec.fromarrays(model, names=header)
return models
def get_end_log_file(logfile):
if os.path.isfile(logfile):
# case for models ran locally
ifile = open(logfile)
lines = ifile.readlines()
ifile.close()
return lines[-30:-1]
else:
return []
def convert2hdf5(modellist, star_columns=None, binary_columns=None, profile_columns=None,
add_stopping_condition=True, skip_existing=True,
star1_history_file='LOGS/history1.data', star2_history_file='LOGS/history2.data',
binary_history_file='LOGS/binary_history.data', log_file='log.txt',
profile_files=None, profiles_path='', profile_pattern='*.profile',
input_path_kw='path', input_path_prefix='', output_path=None, verbose=False):
if not os.path.isdir(output_path):
os.mkdir(output_path)
for i, model in modellist.iterrows():
print(input_path_prefix, model[input_path_kw])
if not os.path.isdir(Path(input_path_prefix, model[input_path_kw])):
continue
if skip_existing and os.path.isfile(Path(output_path, model[input_path_kw]).with_suffix('.h5')):
if verbose:
print(i, model[input_path_kw], ': exists, skipping')
continue
if verbose:
print(i, model[input_path_kw], ': processing')
# store all columns of the input file in the hdf5 file
data = {}
extra_info = {}
for col in model.index:
extra_info[col] = model[col]
# obtain the termination code and store if requested
termination_code = 'uk'
if add_stopping_condition:
lines = get_end_log_file(Path(input_path_prefix, model[input_path_kw], log_file))
for line in lines:
if 'termination code' in line:
termination_code = line.split()[-1]
extra_info['termination_code'] = termination_code
# store the nnaps-version in the output data.
extra_info['nnaps-version'] = __version__
data['extra_info'] = extra_info
# check if all history files that are requested are available and can be read. If there is an error,
# skip to the next model
history = {}
if star1_history_file is not None:
try:
d1 = read_mesa_output(Path(input_path_prefix, model[input_path_kw], star1_history_file))[1]
if star_columns is not None:
d1 = rf.repack_fields(d1[star_columns])
history['star1'] = d1
except Exception as e:
if verbose:
print("Error in reading star1: ", e)
continue
if star2_history_file is not None:
try:
d2 = read_mesa_output(Path(input_path_prefix, model[input_path_kw], star2_history_file))[1]
if star_columns is not None:
d2 = rf.repack_fields(d2[star_columns])
history['star2'] = d2
except Exception as e:
if verbose:
print("Error in reading star2: ", e)
continue
if binary_history_file is not None:
try:
d3 = read_mesa_output(Path(input_path_prefix, model[input_path_kw], binary_history_file))[1]
if star_columns is not None:
d3 = rf.repack_fields(d3[binary_columns])
history['binary'] = d3
except Exception as e:
if verbose:
print("Error in reading binary: ", e)
continue
data['history'] = history
# check if profiles exists and store them is requested. Also make a profile lookup table (legend)
profiles = {}
profile_legend = []
profile_name_length = 0 # store longest profile name to create recarray of profile_legend
if profile_files is not None:
if profile_files == 'all':
profile_paths = Path(input_path_prefix, model[input_path_kw], profiles_path).glob(profile_pattern)
else:
profile_paths = [Path(input_path_prefix, model[input_path_kw], profiles_path, p) for p in profile_files]
for filepath in profile_paths:
if not filepath.is_file():
continue
profile_name = filepath.stem
header, profile_data = read_mesa_output(filename=filepath, only_first=False)
if profile_columns is not None:
profile_data = rf.repack_fields(profile_data[profile_columns])
profiles[profile_name] = profile_data
if len(profile_name) > profile_name_length:
profile_name_length = len(profile_name)
profile_legend.append((header['model_number'], profile_name))
if len(profiles.keys()) >= 1:
data['profiles'] = profiles
profile_legend = np.array(profile_legend, dtype=[('model_number', 'f8'),
('profile_name', 'a'+str(profile_name_length))])
data['profile_legend'] = profile_legend
# rather annoying way to assure that Path doesn't cut of part of the folder name when adding the .h5 suffix
# if not this will happen: M1.080_M0.502_P192.67_Z0.01129 -> M1.080_M0.502_P192.67_Z0.h5
output_file = Path(output_path, model[input_path_kw])
output_file = output_file.with_suffix(output_file.suffix + '.h5')
fileio.write2hdf5(data, output_file, update=False)
| 38.291667
| 120
| 0.586507
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,538
| 0.27617
|
a5880384a51a2b5216de1db68e0632fb623a8bfc
| 1,022
|
py
|
Python
|
src/_deblaze.py
|
MenkeTechnologies/zsh-more-completions
|
c0d4716b695ea9bf3d0e870bc2ced5354db3c031
|
[
"MIT"
] | 25
|
2018-07-29T01:49:23.000Z
|
2022-01-19T19:21:23.000Z
|
src/_deblaze.py
|
MenkeTechnologies/zsh-more-completions
|
c0d4716b695ea9bf3d0e870bc2ced5354db3c031
|
[
"MIT"
] | null | null | null |
src/_deblaze.py
|
MenkeTechnologies/zsh-more-completions
|
c0d4716b695ea9bf3d0e870bc2ced5354db3c031
|
[
"MIT"
] | null | null | null |
#compdef deblaze.py
local arguments
arguments=(
'--version[show programs version number and exit]'
'(- * :)'{-h,--help}'[show this help message and exit]'
{-u,--url}'[URL for AMF Gateway]'
{-s,--service}'[remote service to call]'
{-m,--method}'[method to call]'
{-p,--params}'[parameters to send pipe seperated]'
{-f,--fullauto}'[URL to SWF - Download SWF, find remoting services]'
'--fuzz[fuzz parameter values]'
{-c,--creds}'[username and password for service in u:p format]'
{-b,--cookie}'[send cookies with request]'
{-A,--user-agent}'[user-Agent string to send to the server]'
{-1,--bruteService}'[file to load services for brute forcing (mutually]'
{-2,--bruteMethod}'[file to load methods for brute forcing (mutually]'
{-d,--debug}'[enable pyamf/AMF debugging]'
{-v,--verbose}'[print http request/response]'
{-r,--report}'[generate HTML report]'
{-n,--nobanner}'[do not display banner]'
{-q,--quiet}'[do not display messages]'
'*:filename:_files'
)
_arguments -s $arguments
| 36.5
| 74
| 0.662427
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 685
| 0.670254
|
a58a9d34b89b4bc4bc0e0b2929228a0dbbb74a83
| 1,379
|
py
|
Python
|
jakso_ml/training_data/white_balancer.py
|
JaksoSoftware/jakso-ml
|
5720ea557ca2fcf9ae16e329c198acd8e31258c4
|
[
"MIT"
] | null | null | null |
jakso_ml/training_data/white_balancer.py
|
JaksoSoftware/jakso-ml
|
5720ea557ca2fcf9ae16e329c198acd8e31258c4
|
[
"MIT"
] | 3
|
2020-09-25T18:40:52.000Z
|
2021-08-25T14:44:30.000Z
|
jakso_ml/training_data/white_balancer.py
|
JaksoSoftware/jakso-ml
|
5720ea557ca2fcf9ae16e329c198acd8e31258c4
|
[
"MIT"
] | null | null | null |
import random, copy
import cv2 as cv
import numpy as np
from scipy import interpolate
from .augmenter import Augmenter
class WhiteBalancer(Augmenter):
'''
Augmenter that randomly changes the white balance of the SampleImages.
'''
def __init__(
self,
min_red_rand,
max_red_rand,
min_blue_rand,
max_blue_rand,
**kwargs
):
super().__init__(**kwargs)
self.min_red_rand = min_red_rand
self.max_red_rand = max_red_rand
self.min_blue_rand = min_blue_rand
self.max_blue_rand = max_blue_rand
def augment(self, sample):
sample_copy = copy.deepcopy(sample)
b, g, r = cv.split(sample_copy.image)
rand_b = 128 * random.uniform(1 + self.min_blue_rand, 1 + self.max_blue_rand)
rand_r = 0
if rand_b < 1:
rand_r = 128 * random.uniform(1, 1 + self.max_red_rand)
else:
rand_r = 128 * random.uniform(1 + self.min_red_rand, 1)
lut_b = self._create_lut(rand_b)
lut_r = self._create_lut(rand_r)
b = cv.LUT(b, lut_b)
r = cv.LUT(r, lut_r)
sample_copy.image = cv.merge((b, g, r))
return sample_copy
def _create_lut(self, center):
tck = interpolate.splrep([0, 128, 256], [0, center, 256], k = 2)
lut = np.rint(interpolate.splev(range(256), tck, der = 0))
lut = np.where(lut > 255, 255, lut)
lut = np.where(lut < 0, 0, lut)
lut = np.uint8(lut)
return lut
| 25.072727
| 81
| 0.658448
| 1,258
| 0.912255
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 0.059463
|
a58ab462ad7e52132f563d3dc36462f69902b7de
| 824
|
py
|
Python
|
app/set_game/deck.py
|
mmurch/set-game
|
8fd1303ab2a4d628547fd7ebca572cf04087cbdb
|
[
"MIT"
] | null | null | null |
app/set_game/deck.py
|
mmurch/set-game
|
8fd1303ab2a4d628547fd7ebca572cf04087cbdb
|
[
"MIT"
] | 5
|
2021-03-10T04:32:22.000Z
|
2022-02-26T22:25:52.000Z
|
app/set_game/deck.py
|
mmurch/set-game
|
8fd1303ab2a4d628547fd7ebca572cf04087cbdb
|
[
"MIT"
] | null | null | null |
from .card import Card
from .features import Number, Color, Shape, Style
from math import floor
class Deck():
def __init__(self):
return
@staticmethod
def get_card_by_id(self, id):
if id < 1 or id > 81:
raise ValueError
return Card(
self.get_number(id),
self.get_color(id),
self.get_style(id)
)
@staticmethod
def get_color(id):
return Color(floor((id - 1) % 9 / 3))
@staticmethod
def get_number(id):
return Number((id - 1) % 3)
@staticmethod
def get_shape(id):
return Shape(floor((id - 1) % 27 / 9))
@staticmethod
def get_style(id):
if id <= 27:
return Style.FILLED
elif id <= 54:
return Style.SHADED
return Style.EMPTY
| 20.6
| 49
| 0.54733
| 725
| 0.879854
| 0
| 0
| 642
| 0.779126
| 0
| 0
| 0
| 0
|
a58be826db80a8cc6c893e8f64d3265192b6d0a2
| 27,777
|
py
|
Python
|
tests/test_utils.py
|
grantsrb/langpractice
|
59cf8f53b85fa8b4d639ffc6e175ec22c0d2362c
|
[
"MIT"
] | null | null | null |
tests/test_utils.py
|
grantsrb/langpractice
|
59cf8f53b85fa8b4d639ffc6e175ec22c0d2362c
|
[
"MIT"
] | null | null | null |
tests/test_utils.py
|
grantsrb/langpractice
|
59cf8f53b85fa8b4d639ffc6e175ec22c0d2362c
|
[
"MIT"
] | null | null | null |
from langpractice.utils.utils import *
import unittest
import torch.nn.functional as F
class TestUtils(unittest.TestCase):
def test_zipfian1(self):
n_loops = 5000
low = 1
high = 10
order = 1
counts = {i:0 for i in range(low, high+1)}
tot = 0
for i in range(n_loops):
samp = zipfian(low, high, order)
counts[samp] += 1
tot += 1
targ_probs = {k:1/(k**order) for k in counts.keys()}
s = np.sum(list(targ_probs.values()))
targ_probs = {k:v/s for k,v in targ_probs.items()}
for k,v in counts.items():
prob = v/tot
diff = prob-targ_probs[k]
self.assertTrue(np.abs(diff) < 0.03)
def test_zipfian2(self):
n_loops = 5000
low = 1
high = 10
order = 2
counts = {i:0 for i in range(low, high+1)}
tot = 0
for i in range(n_loops):
samp = zipfian(low, high, order)
counts[samp] += 1
tot += 1
targ_probs = {k:1/(k**order) for k in counts.keys()}
s = np.sum(list(targ_probs.values()))
targ_probs = {k:v/s for k,v in targ_probs.items()}
for k,v in counts.items():
prob = v/tot
diff = prob-targ_probs[k]
self.assertTrue(np.abs(diff) < 0.03)
def test_piraha_labels(self):
weights = {
3: torch.FloatTensor([.55, .45]),
4: torch.FloatTensor([.4, .6]),
5: torch.FloatTensor([.4, .6]),
6: torch.FloatTensor([.4, .6]),
7: torch.FloatTensor([.45, .55]),
8: torch.FloatTensor([.3, .7]),
9: torch.FloatTensor([.3, .7]),
10: torch.FloatTensor([.3, .7]),
}
n_items = torch.randint(0,11, (100,))
avgs = torch.zeros_like(n_items)
n_loops = 5000
for i in range(n_loops):
labels = torch.zeros_like(n_items)
labels = get_piraha_labels(labels,n_items)
avgs = avgs + labels
avgs = avgs/n_loops
for k in weights.keys():
targ = weights[k][0]*2 + weights[k][1]*3
avg = avgs[n_items==k]
if len(avg) > 0:
avg = avg.mean()
diff = float(avg-targ)
self.assertTrue(np.abs(diff)<0.01)
def test_duplicate_labels(self):
n_items = torch.randint(0,11, (100,))
avgs = torch.zeros_like(n_items)
n_loops = 5000
for i in range(n_loops):
labels = torch.zeros_like(n_items)
labels = get_duplicate_labels(labels,n_items, 22)
if i < 20:
for n,l in zip(n_items,labels):
self.assertTrue((n*2)==l or (n*2+1)==l)
avgs = avgs + labels
avgs = avgs/n_loops
for i in range(torch.max(n_items)):
avg = avgs[n_items==i]
if len(avg) > 0:
avg = avg.mean()
targ = ((i*2)+.5)
diff = targ-avg
self.assertTrue(np.abs(diff)<0.01)
def test_get_lang_labels_english(self):
max_label = 10
use_count_words = 1
n_samps = 100
n_items = torch.randint(0,max_label+10, (n_samps,))
n_targs = n_items.clone()
labels = get_lang_labels(
n_items,
n_targs,
max_label,
use_count_words
)
labels = labels.cpu().detach().numpy()
n_items = n_items.cpu().detach().numpy()
idx = n_items<max_label
self.assertTrue(np.array_equal(labels[idx],n_items[idx]))
self.assertTrue(
np.array_equal(
labels[~idx],
np.ones_like(labels[~idx])*max_label
)
)
def test_get_lang_labels_comparison(self):
max_label = 10
use_count_words = 0
n_samps = 100
n_items = torch.randint(0,max_label+10, (n_samps,))
n_targs = torch.randint(0,max_label+10, (n_samps,))
labels = get_lang_labels(
n_items,
n_targs,
max_label,
use_count_words
)
labels = labels.cpu().detach().numpy()
n_items = n_items.cpu().detach().numpy()
n_targs = n_targs.cpu().detach().numpy()
idx = n_items<n_targs
goal = np.zeros_like(labels[idx])
self.assertTrue(np.array_equal(labels[idx],goal))
idx = n_items==n_targs
goal = np.ones_like(labels[idx])
self.assertTrue(np.array_equal(labels[idx],goal))
idx = n_items>n_targs
goal = np.ones_like(labels[idx])*2
self.assertTrue(np.array_equal(labels[idx],goal))
def test_calc_accs(self):
logits = torch.FloatTensor([[
[1,2,3,4],
[4,1,2,3],
[-1,2,-3,-4],
[-100,-5,100,0],
]])
# 0 correct
targs = torch.LongTensor([[ 0, 1, 0, 0, ]])
accs = calc_accs(logits, targs,prepender="test")
self.assertEqual(0, accs["test_acc"])
# 1 correct
targs = torch.LongTensor([[ 0, 0, 0, 0, ]])
accs = calc_accs(logits, targs,prepender="test")
self.assertEqual(1/4, accs["test_acc"])
# all correct
targs = torch.LongTensor([[ 3,0,1,2 ]])
accs = calc_accs(logits, targs,prepender="test")
self.assertEqual(1, accs["test_acc"])
def test_calc_losses(self):
logits = torch.FloatTensor([[
[1,2,3,4],
[4,1,2,3],
[-1,2,-3,-4],
[-100,-5,100,0],
]])
loss_fxn = F.cross_entropy
# 0 correct
targs = torch.LongTensor([[ 0, 1, 0, 0, ]])
targ_loss = loss_fxn(
logits.reshape(-1,logits.shape[-1]),
targs.reshape(-1),
reduction="none"
).mean().item()
losses = calc_losses(
logits,
targs,
loss_fxn=loss_fxn,
prepender="test"
)
self.assertEqual(targ_loss, losses["test_loss"])
# 1 correct
targs = torch.LongTensor([[ 0, 0, 0, 0, ]])
targ_loss = loss_fxn(
logits.reshape(-1,logits.shape[-1]),
targs.reshape(-1),
reduction="none"
).mean().item()
losses = calc_losses(
logits,
targs,
loss_fxn=loss_fxn,
prepender="test"
)
self.assertEqual(targ_loss, losses["test_loss"])
# all correct
targs = torch.LongTensor([[ 3,0,1,2 ]])
targ_loss = loss_fxn(
logits.reshape(-1,logits.shape[-1]),
targs.reshape(-1),
reduction="none"
).mean().item()
losses = calc_losses(
logits,
targs,
loss_fxn=loss_fxn,
prepender="test"
)
self.assertEqual(targ_loss, losses["test_loss"])
def test_calc_accs_categories(self):
logits = torch.FloatTensor([
[
[1,2,3,4],
[4,1,2,3],
[-1,2,-3,-4],
[-100,-5,100,0],
],
[
[1,2,3,4],
[4,1,2,3],
[-1,2,-3,-4],
[-100,-5,100,0],
],
])
categories = torch.LongTensor([
[ 1, 1, 3, 0, ],
[ 0, 3, 0, 3, ],
])
# 0 correct
targs = torch.LongTensor([
[ 0, 1, 0, 0, ],
[ 0, 1, 0, 0, ]
])
accs = calc_accs(logits, targs, categories, prepender="test")
self.assertEqual(0, accs["test_accctg_0"])
self.assertEqual(0, accs["test_accctg_1"])
self.assertEqual(0, accs["test_accctg_3"])
# 1 correct 1
targs = torch.LongTensor([
[ 0, 0, 0, 0, ],
[ 0, 1, 0, 0, ]
])
accs = calc_accs(logits, targs, categories,prepender="test")
self.assertEqual(0, accs["test_accctg_0"])
self.assertEqual(1/2, accs["test_accctg_1"])
self.assertEqual(0, accs["test_accctg_3"])
# all correct 0
targs = torch.LongTensor([
[ 0, 0, 0, 2, ],
[ 3, 1, 1, 0, ]
])
accs = calc_accs(logits, targs, categories,prepender="test")
self.assertEqual(1, accs["test_accctg_0"])
self.assertEqual(1/2, accs["test_accctg_1"])
self.assertEqual(0, accs["test_accctg_3"])
def test_calc_losses_categories(self):
logits = torch.FloatTensor([
[
[1,2,3,4],
[4,1,2,3],
[-1,2,-3,-4],
[-100,-5,100,0],
],
[
[1,2,3,4],
[4,1,2,3],
[-1,2,-3,-4],
[-100,-5,100,0],
],
])
categories = torch.LongTensor([
[ 1, 1, 3, 0, ],
[ 0, 3, 0, 3, ],
])
loss_fxn = F.cross_entropy
# 0 correct
targs = torch.LongTensor([
[ 0, 1, 0, 0, ],
[ 0, 1, 0, 0, ]
])
losses = calc_losses(logits, targs, categories, prepender="test")
idxs = categories.reshape(-1)==0
targ_loss = loss_fxn(
logits.reshape(-1,logits.shape[-1])[idxs],
targs.reshape(-1)[idxs],
).item()
self.assertEqual(targ_loss, losses["test_lossctg_0"])
idxs = categories.reshape(-1)==1
targ_loss = loss_fxn(
logits.reshape(-1,logits.shape[-1])[idxs],
targs.reshape(-1)[idxs],
).item()
self.assertEqual(targ_loss, losses["test_lossctg_1"])
idxs = categories.reshape(-1)==3
targ_loss = loss_fxn(
logits.reshape(-1,logits.shape[-1])[idxs],
targs.reshape(-1)[idxs],
).item()
self.assertEqual(targ_loss, losses["test_lossctg_3"])
# 1 correct 1
targs = torch.LongTensor([
[ 0, 0, 0, 0, ],
[ 0, 1, 0, 0, ]
])
losses = calc_losses(logits, targs, categories,prepender="test")
idxs = categories.reshape(-1)==0
targ_loss = loss_fxn(
logits.reshape(-1,logits.shape[-1])[idxs],
targs.reshape(-1)[idxs],
).item()
self.assertEqual(targ_loss, losses["test_lossctg_0"])
idxs = categories.reshape(-1)==1
targ_loss = loss_fxn(
logits.reshape(-1,logits.shape[-1])[idxs],
targs.reshape(-1)[idxs],
).item()
self.assertEqual(targ_loss, losses["test_lossctg_1"])
idxs = categories.reshape(-1)==3
targ_loss = loss_fxn(
logits.reshape(-1,logits.shape[-1])[idxs],
targs.reshape(-1)[idxs],
).item()
self.assertEqual(targ_loss, losses["test_lossctg_3"])
# all correct 0
targs = torch.LongTensor([
[ 0, 0, 0, 2, ],
[ 3, 1, 1, 0, ]
])
losses = calc_losses(logits, targs, categories,prepender="test")
idxs = categories.reshape(-1)==0
targ_loss = loss_fxn(
logits.reshape(-1,logits.shape[-1])[idxs],
targs.reshape(-1)[idxs],
).item()
self.assertEqual(targ_loss, losses["test_lossctg_0"])
idxs = categories.reshape(-1)==1
targ_loss = loss_fxn(
logits.reshape(-1,logits.shape[-1])[idxs],
targs.reshape(-1)[idxs],
).item()
self.assertEqual(targ_loss, losses["test_lossctg_1"])
idxs = categories.reshape(-1)==3
targ_loss = loss_fxn(
logits.reshape(-1,logits.shape[-1])[idxs],
targs.reshape(-1)[idxs],
).item()
self.assertEqual(targ_loss, losses["test_lossctg_3"])
def test_avg_over_dicts(self):
vals = np.arange(10)
dicts = [ {"foo": i, "poo": i*i} for i in vals ]
avgs = avg_over_dicts(dicts)
self.assertEqual(np.mean(vals), avgs["foo"])
self.assertEqual(np.mean(vals**2), avgs["poo"])
def test_calc_lang_loss_and_accs(self):
loss_fxn = torch.nn.CrossEntropyLoss()
langs = (torch.FloatTensor([
[
[1,2,3,4],
[4,1,2,3],
[-1,2,-3,-4],
[-100,-5,100,0],
],
[
[1,2,3,4],
[4,1,2,3],
[-1,2,-3,-4],
[-100,-5,100,0],
]
]).cuda(),)
drops = torch.LongTensor([
[ 1,1,1,1 ],
[ 1,1,1,1 ],
])
# 0 correct
targs = torch.LongTensor([
[ 0, 1, 0, 0, ],
[ 0, 1, 0, 0, ],
])
targ_loss = loss_fxn(langs[0].reshape(-1,4), targs.cuda().reshape(-1))
targ_accs = calc_accs(langs[0].cpu(), targs, targs, prepender="test_lang")
loss, losses, accs = calc_lang_loss_and_accs(
langs,
targs.reshape(-1),
drops.reshape(-1),
loss_fxn=loss_fxn,
categories=targs.reshape(-1),
prepender="test"
)
self.assertEqual(float(loss), float(targ_loss))
for k in targ_accs.keys():
self.assertEqual(targ_accs[k], accs[k])
targ_losses = calc_losses(langs[0].cpu(), targs, targs, prepender="test_lang")
for k in targ_losses.keys():
self.assertEqual(targ_losses[k], losses[k])
# 3 correct
targs = torch.LongTensor([
[ 3, 1, 1, 0, ],
[ 0, 0, 0, 0, ],
])
targ_loss = loss_fxn(langs[0].reshape(-1,4), targs.cuda().reshape(-1))
targ_accs = calc_accs(langs[0].cpu(), targs, targs, prepender="test_lang")
loss, losses, accs = calc_lang_loss_and_accs(
langs,
targs.reshape(-1),
drops.reshape(-1),
loss_fxn=loss_fxn,
categories=targs.reshape(-1),
prepender="test"
)
self.assertEqual(float(loss), float(targ_loss))
for k in targ_accs.keys():
self.assertEqual(targ_accs[k], accs[k])
targ_losses = calc_losses(langs[0].cpu(), targs, targs, prepender="test_lang")
for k in targ_losses.keys():
self.assertAlmostEqual(targ_losses[k], losses[k], places=4)
def test_calc_lang_loss_and_accs_drops(self):
loss_fxn = torch.nn.CrossEntropyLoss()
langs = (torch.FloatTensor([
[
[1,2,3,4],
[4,1,2,3],
[-1,2,-3,-4],
[-100,-5,100,0],
],
[
[1,2,3,4],
[4,1,2,3],
[-1,2,-3,-4],
[-100,-5,100,0],
]
]).cuda(),)
drops = torch.LongTensor([
[ 0,1,0,1 ],
[ 1,0,1,0 ],
])
dropped_lang = torch.FloatTensor([
[
[4,1,2,3],
[-100,-5,100,0],
],
[
[1,2,3,4],
[-1,2,-3,-4],
]
]).cuda()
# 0 correct
targs = torch.LongTensor([
[ 0, 1, 0, 0, ],
[ 0, 1, 0, 0, ],
])
dropped_targs = torch.LongTensor([
[ 1, 0, ],
[ 0, 0, ],
])
targ_loss = loss_fxn(
dropped_lang.reshape(-1,4),
dropped_targs.cuda().reshape(-1)
)
targ_accs = calc_accs(
dropped_lang.cpu(),
dropped_targs,
dropped_targs,
prepender="test_lang"
)
targ_losses = calc_losses(
dropped_lang.cpu(),
dropped_targs,
dropped_targs,
prepender="test_lang"
)
loss, losses, accs = calc_lang_loss_and_accs(
langs,
targs.reshape(-1),
drops.reshape(-1),
loss_fxn,
categories=targs.reshape(-1),
prepender="test"
)
self.assertEqual(float(loss), float(targ_loss))
for k in targ_accs.keys():
self.assertEqual(targ_accs[k], accs[k])
for k in targ_losses.keys():
self.assertEqual(targ_losses[k], losses[k])
# 3 correct
targs = torch.LongTensor([
[ 3, 0, 1, 2, ],
[ 0, 0, 0, 0, ],
])
dropped_targs = torch.LongTensor([
[ 0, 2, ],
[ 0, 0, ],
])
targ_loss = loss_fxn(
dropped_lang.reshape(-1,4),
dropped_targs.cuda().reshape(-1)
)
targ_accs = calc_accs(
dropped_lang.cpu(),
dropped_targs,
dropped_targs,
prepender="test_lang"
)
targ_losses = calc_losses(
dropped_lang.cpu(),
dropped_targs,
dropped_targs,
prepender="test_lang"
)
loss, losses, accs = calc_lang_loss_and_accs(
langs,
targs.reshape(-1),
drops.reshape(-1),
loss_fxn,
categories=targs.reshape(-1),
prepender="test"
)
self.assertEqual(float(loss), float(targ_loss))
for k in targ_accs.keys():
self.assertEqual(targ_accs[k], accs[k])
for k in targ_losses.keys():
self.assertAlmostEqual(targ_losses[k], losses[k], places=4)
def test_calc_actn_loss_and_accs(self):
loss_fxn = torch.nn.CrossEntropyLoss()
actns = torch.FloatTensor([
[
[1,2,3,4],
[4,1,2,3],
[-1,2,-3,-4],
[-100,-5,100,0],
],
[
[1,2,3,4],
[4,1,2,3],
[-1,2,-3,-4],
[-100,-5,100,0],
]
]).cuda()
n_targs = torch.LongTensor([
[ 0, 1, 2, 3 ],
[ 4, 3, 2, 1 ],
])
# 0 correct
targs = torch.LongTensor([
[ 0, 1, 0, 0, ],
[ 0, 1, 0, 0, ],
])
targ_loss = loss_fxn(actns.reshape(-1,4), targs.cuda().reshape(-1))
targ_accs = calc_accs(
actns.cpu(),
targs,
n_targs,
prepender="test_actn"
)
loss, accs = calc_actn_loss_and_accs(
actns,
targs.reshape(-1),
n_targs.reshape(-1),
loss_fxn,
prepender="test"
)
self.assertEqual(float(loss), float(targ_loss))
for k in targ_accs.keys():
self.assertEqual(targ_accs[k], accs[k])
# 3 correct
targs = torch.LongTensor([
[ 3, 1, 1, 0, ],
[ 0, 0, 0, 0, ],
])
targ_loss = loss_fxn(actns.reshape(-1,4), targs.cuda().reshape(-1))
targ_accs = calc_accs(
actns.cpu().reshape(-1,4),
targs.reshape(-1),
n_targs.reshape(-1),
prepender="test_actn"
)
loss, accs = calc_actn_loss_and_accs(
actns,
targs.reshape(-1),
n_targs.reshape(-1),
loss_fxn,
prepender="test"
)
self.assertEqual(float(loss), float(targ_loss))
for k in targ_accs.keys():
self.assertEqual(targ_accs[k], accs[k])
def test_get_loss_and_accs_phase0(self):
phase = 0
loss_fxn = torch.nn.CrossEntropyLoss()
preds = (torch.FloatTensor([
[
[1,2,3,4],
[4,1,2,3],
[-1,2,-3,-4],
[-100,-5,100,0],
],
[
[1,2,3,4],
[4,1,2,3],
[-1,2,-3,-4],
[-100,-5,100,0],
]
]).cuda(),)
drops = torch.LongTensor([
[ 0,1,0,1 ],
[ 1,0,1,0 ],
])
n_targs = torch.LongTensor([
[ 0, 1, 2, 3 ],
[ 4, 3, 2, 1 ],
])
# 0 correct
targs = torch.LongTensor([
[ 0, 1, 0, 0, ],
[ 0, 1, 0, 0, ],
])
targ_loss, _, targ_accs = calc_lang_loss_and_accs(
preds,
targs.reshape(-1),
drops.reshape(-1),
loss_fxn,
categories=targs.reshape(-1),
prepender="test"
)
loss, losses, accs = get_loss_and_accs(
phase=phase,
actn_preds=preds,
lang_preds=preds,
actn_targs=targs.reshape(-1),
lang_targs=targs.reshape(-1),
drops=drops.reshape(-1),
n_targs=n_targs.reshape(-1),
n_items=targs.reshape(-1),
prepender="test",
loss_fxn=loss_fxn,
lang_p=0.5
)
self.assertEqual(float(loss), float(targ_loss))
for k in targ_accs.keys():
self.assertEqual(targ_accs[k], accs[k])
# 3 correct
targs = torch.LongTensor([
[ 3, 1, 1, 0, ],
[ 0, 0, 0, 0, ],
])
targ_loss, _, targ_accs = calc_lang_loss_and_accs(
preds,
targs.reshape(-1),
drops.reshape(-1),
loss_fxn,
categories=targs.reshape(-1),
prepender="test"
)
loss, losses, accs = get_loss_and_accs(
phase=phase,
actn_preds=preds,
lang_preds=preds,
actn_targs=targs.reshape(-1),
lang_targs=targs.reshape(-1),
drops=drops.reshape(-1),
n_targs=n_targs.reshape(-1),
n_items=targs.reshape(-1),
prepender="test",
loss_fxn=loss_fxn,
lang_p=0.5
)
self.assertEqual(float(loss), float(targ_loss))
for k in targ_accs.keys():
self.assertEqual(targ_accs[k], accs[k])
def test_get_loss_and_accs_phase1(self):
phase = 1
loss_fxn = torch.nn.CrossEntropyLoss()
preds = torch.FloatTensor([
[
[1,2,3,4],
[4,1,2,3],
[-1,2,-3,-4],
[-100,-5,100,0],
],
[
[1,2,3,4],
[4,1,2,3],
[-1,2,-3,-4],
[-100,-5,100,0],
]
]).cuda()
drops = torch.LongTensor([
[ 0,1,0,1 ],
[ 1,0,1,0 ],
])
n_targs = torch.LongTensor([
[ 0, 1, 2, 3 ],
[ 4, 3, 2, 1 ],
])
# 0 correct
targs = torch.LongTensor([
[ 0, 1, 0, 0, ],
[ 0, 1, 0, 0, ],
])
targ_loss, targ_accs = calc_actn_loss_and_accs(
preds,
targs.reshape(-1),
n_targs.reshape(-1),
loss_fxn,
prepender="test"
)
loss, losses, accs = get_loss_and_accs(
phase=phase,
actn_preds=preds,
lang_preds=preds,
actn_targs=targs.reshape(-1),
lang_targs=targs.reshape(-1),
drops=drops.reshape(-1),
n_targs=n_targs.reshape(-1),
n_items=targs.reshape(-1),
prepender="test",
loss_fxn=loss_fxn,
lang_p=0.5
)
self.assertEqual(float(loss), float(targ_loss))
for k in targ_accs.keys():
self.assertEqual(targ_accs[k], accs[k])
# 3 correct
targs = torch.LongTensor([
[ 3, 1, 1, 0, ],
[ 0, 0, 0, 0, ],
])
targ_loss, targ_accs = calc_actn_loss_and_accs(
preds,
targs.reshape(-1),
n_targs.reshape(-1),
loss_fxn,
prepender="test"
)
loss, losses, accs = get_loss_and_accs(
phase=phase,
actn_preds=preds,
lang_preds=preds,
actn_targs=targs.reshape(-1),
lang_targs=targs.reshape(-1),
drops=drops.reshape(-1),
n_targs=n_targs.reshape(-1),
n_items=targs.reshape(-1),
prepender="test",
loss_fxn=loss_fxn,
lang_p=0.5
)
self.assertEqual(float(loss), float(targ_loss))
for k in targ_accs.keys():
self.assertEqual(targ_accs[k], accs[k])
def test_get_loss_and_accs_phase2(self):
phase = 2
loss_fxn = torch.nn.CrossEntropyLoss()
actns = torch.FloatTensor([
[
[1,2,3,4],
[4,1,2,3],
[-1,2,-3,-4],
[-100,-5,100,0],
],
[
[1,2,3,4],
[4,1,2,3],
[-1,2,-3,-4],
[-100,-5,100,0],
]
]).cuda()
langs = (actns.clone(),)
drops = torch.LongTensor([
[ 0,1,0,1 ],
[ 1,0,1,0 ],
])
n_targs = torch.LongTensor([
[ 0, 1, 2, 3 ],
[ 4, 3, 2, 1 ],
])
# 0 correct
targs = torch.LongTensor([
[ 0, 1, 0, 0, ],
[ 0, 1, 0, 0, ],
])
lang_targ_loss, _, lang_targ_accs = calc_lang_loss_and_accs(
langs,
targs.reshape(-1),
drops.reshape(-1),
loss_fxn,
categories=targs.reshape(-1),
prepender="test"
)
actn_targ_loss, actn_targ_accs = calc_actn_loss_and_accs(
actns,
targs.reshape(-1),
n_targs.reshape(-1),
loss_fxn,
prepender="test"
)
targ_loss = 0.5*lang_targ_loss + 0.5*actn_targ_loss
targ_accs = {**lang_targ_accs, **actn_targ_accs}
loss, losses, accs = get_loss_and_accs(
phase=phase,
actn_preds=actns,
lang_preds=langs,
actn_targs=targs.reshape(-1),
lang_targs=targs.reshape(-1),
drops=drops.reshape(-1),
n_targs=n_targs.reshape(-1),
n_items=targs.reshape(-1),
prepender="test",
loss_fxn=loss_fxn,
lang_p=0.5
)
self.assertEqual(float(loss), float(targ_loss))
for k in targ_accs.keys():
self.assertEqual(targ_accs[k], accs[k])
# 3 correct
targs = torch.LongTensor([
[ 3, 1, 1, 0, ],
[ 0, 0, 0, 0, ],
])
lang_targ_loss, _, lang_targ_accs = calc_lang_loss_and_accs(
langs,
targs.reshape(-1),
drops.reshape(-1),
loss_fxn,
categories=targs.reshape(-1),
prepender="test"
)
actn_targ_loss, actn_targ_accs = calc_actn_loss_and_accs(
actns,
targs.reshape(-1),
n_targs.reshape(-1),
loss_fxn,
prepender="test"
)
targ_loss = 0.5*lang_targ_loss + 0.5*actn_targ_loss
targ_accs = {**lang_targ_accs, **actn_targ_accs}
loss, losses, accs = get_loss_and_accs(
phase=phase,
actn_preds=actns,
lang_preds=langs,
actn_targs=targs.reshape(-1),
lang_targs=targs.reshape(-1),
drops=drops.reshape(-1),
n_targs=n_targs.reshape(-1),
n_items=targs.reshape(-1),
prepender="test",
loss_fxn=loss_fxn,
lang_p=0.5
)
self.assertEqual(float(loss), float(targ_loss))
for k in targ_accs.keys():
self.assertEqual(targ_accs[k], accs[k])
if __name__=="__main__":
unittest.main()
| 31.89093
| 86
| 0.464161
| 27,637
| 0.99496
| 0
| 0
| 0
| 0
| 0
| 0
| 972
| 0.034993
|
a58e0065829efa585d05c036b442a368f95ae6a9
| 1,626
|
py
|
Python
|
src/entities/git_repo.py
|
wnjustdoit/devops-py
|
54dd722a577c4b3ecda45aa85c067130fd292ab9
|
[
"Apache-2.0"
] | null | null | null |
src/entities/git_repo.py
|
wnjustdoit/devops-py
|
54dd722a577c4b3ecda45aa85c067130fd292ab9
|
[
"Apache-2.0"
] | 6
|
2021-04-08T20:46:56.000Z
|
2022-01-13T01:52:06.000Z
|
src/entities/git_repo.py
|
wnjustdoit/devops-py
|
54dd722a577c4b3ecda45aa85c067130fd292ab9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
from .entity import Entity, EntitySchema, Base
from sqlalchemy import Column, Integer, String, Sequence
from marshmallow import Schema, fields, post_load
class GitRepo(Entity, Base):
__tablename__ = 'git_repo'
id = Column(Integer, Sequence('git_repo_id_seq'), primary_key=True)
description = Column(String(128), nullable=True)
ssh_url_to_repo = Column(String(128), nullable=False)
http_url_to_repo = Column(String(128), nullable=False)
web_url = Column(String(128), nullable=False)
name = Column(String(64), nullable=False)
name_with_namespace = Column(String(64), nullable=False)
path = Column(String(64), nullable=False)
path_with_namespace = Column(String(64), nullable=False)
def __init__(self, description, ssh_url_to_repo, http_url_to_repo, web_url, name, name_with_namespace, path,
path_with_namespace, id=None, created_by=None):
Entity.__init__(self, created_by)
self.id = id
self.description = description
self.ssh_url_to_repo = ssh_url_to_repo
self.http_url_to_repo = http_url_to_repo
self.web_url = web_url
self.name = name
self.name_with_namespace = name_with_namespace
self.path = path
self.path_with_namespace = path_with_namespace
class GitRepoSchema(EntitySchema):
id = fields.Number()
description = ssh_url_to_repo = http_url_to_repo = web_url = name = name_with_namespace = path = path_with_namespace = fields.Str(
missing=None)
@post_load
def make_git_repo(self, data, **kwargs):
return GitRepo(**data)
| 37.813953
| 134
| 0.710947
| 1,442
| 0.886839
| 0
| 0
| 86
| 0.052891
| 0
| 0
| 49
| 0.030135
|
a590274916afd797594033b1e72a778f82d65211
| 4,415
|
py
|
Python
|
src/algorithms/tcn_utils/tcn_model.py
|
pengkangzaia/mvts-ano-eval
|
976ffa2f151c8f91ce007e9a455bb4f97f89f2c9
|
[
"MIT"
] | 24
|
2021-09-04T08:51:55.000Z
|
2022-03-30T16:45:54.000Z
|
src/algorithms/tcn_utils/tcn_model.py
|
pengkangzaia/mvts-ano-eval
|
976ffa2f151c8f91ce007e9a455bb4f97f89f2c9
|
[
"MIT"
] | 3
|
2021-10-12T02:34:34.000Z
|
2022-03-18T10:37:35.000Z
|
src/algorithms/tcn_utils/tcn_model.py
|
pengkangzaia/mvts-ano-eval
|
976ffa2f151c8f91ce007e9a455bb4f97f89f2c9
|
[
"MIT"
] | 15
|
2021-09-18T03:41:02.000Z
|
2022-03-21T09:03:01.000Z
|
import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
"""TCN adapted from https://github.com/locuslab/TCN"""
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
return x[:, :, :-self.chomp_size].contiguous()
class pad1d(nn.Module):
def __init__(self, pad_size):
super(pad1d, self).__init__()
self.pad_size = pad_size
def forward(self, x):
return torch.cat([x, x[:, :, -self.pad_size:]], dim = 2).contiguous()
class TemporalBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding,
dropout=0.2):
super(TemporalBlock, self).__init__()
self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding,
dilation=dilation))
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, kernel_size,
stride=stride, padding=padding,
dilation=dilation))
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.dropout1,
self.conv2, self.chomp2, self.relu2, self.dropout2)
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
class TemporalBlockTranspose(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding,
dropout=0.2):
super(TemporalBlockTranspose, self).__init__()
self.conv1 = weight_norm(nn.ConvTranspose1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding,
dilation=dilation))
self.pad1 = pad1d(padding)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(nn.ConvTranspose1d(n_outputs, n_outputs, kernel_size,
stride=stride, padding=padding,
dilation=dilation))
self.pad2 = pad1d(padding)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.dropout1, self.relu1, self.pad1, self.conv1,
self.dropout2, self.relu2, self.pad2, self.conv2)
self.downsample = nn.ConvTranspose1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
class TemporalConvNet(nn.Module):
def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.2):
super(TemporalConvNet, self).__init__()
layers = []
num_levels = len(num_channels)
for i in range(num_levels):
dilation_size = 2 ** i
in_channels = num_inputs if i == 0 else num_channels[i-1]
out_channels = num_channels[i]
layers += [TemporalBlock(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size,
padding=(kernel_size-1) * dilation_size, dropout=dropout)]
self.network = nn.Sequential(*layers)
def forward(self, x):
return self.network(x)
| 39.070796
| 110
| 0.59479
| 4,271
| 0.967384
| 0
| 0
| 0
| 0
| 0
| 0
| 54
| 0.012231
|
a591a1103146cfd95f29ba55d7e7f556a915a79a
| 1,868
|
py
|
Python
|
static/file/2021-04-10/index.py
|
yuguo97/nest-node
|
a3d6cb99005403691779c44a488e3b22f5479538
|
[
"MIT"
] | null | null | null |
static/file/2021-04-10/index.py
|
yuguo97/nest-node
|
a3d6cb99005403691779c44a488e3b22f5479538
|
[
"MIT"
] | null | null | null |
static/file/2021-04-10/index.py
|
yuguo97/nest-node
|
a3d6cb99005403691779c44a488e3b22f5479538
|
[
"MIT"
] | null | null | null |
'''
Author: your name
Date: 2021-04-08 17:14:41
LastEditTime: 2021-04-09 09:13:28
LastEditors: Please set LastEditors
Description: In User Settings Edit
FilePath: \github\test\index.py
'''
#!user/bin/env python3
# -*- coding: utf-8 -*-
import psutil
cpu_info = {'user': 0, 'system': 0, 'idle': 0, 'percent': 0}
memory_info = {'total': 0, 'available': 0,
'percent': 0, 'used': 0, 'free': 0}
disk_id = []
disk_total = []
disk_used = []
disk_free = []
disk_percent = []
# get cpu information
def get_cpu_info():
cpu_times = psutil.cpu_times()
cpu_info['user'] = cpu_times.user
cpu_info['system'] = cpu_times.system
cpu_info['idle'] = cpu_times.idle
cpu_info['percent'] = psutil.cpu_percent(interval=2)
# get memory information
def get_memory_info():
mem_info = psutil.virtual_memory()
memory_info['total'] = mem_info.total
memory_info['available'] = mem_info.available
memory_info['percent'] = mem_info.percent
memory_info['used'] = mem_info.used
memory_info['free'] = mem_info.free
def get_disk_info():
for id in psutil.disk_partitions():
if 'cdrom' in id.opts or id.fstype == '':
continue
disk_name = id.device.split(':')
s = disk_name[0]
disk_id.append(s)
disk_info = psutil.disk_usage(id.device)
disk_total.append(disk_info.total)
disk_used.append(disk_info.used)
disk_free.append(disk_info.free)
disk_percent.append(disk_info.percent)
if __name__ == '__main__':
get_cpu_info()
cpu_status = cpu_info['percent']
print('cpu usage is:%s%%' % cpu_status)
get_memory_info()
mem_status = memory_info['percent']
print('memory usage is:%s%%' % mem_status)
get_disk_info()
for i in range(len(disk_id)):
print('%sdisk usage is:%s%%' % (disk_id[i], 100 - disk_percent[i]))
| 26.685714
| 75
| 0.646681
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 517
| 0.276767
|
a5924218bd91ec5cd3a910146334e0e5acd39d37
| 1,592
|
py
|
Python
|
SS/p202.py
|
MTandHJ/leetcode
|
f3832ed255d259cb881666ec8bd3de090d34e883
|
[
"MIT"
] | null | null | null |
SS/p202.py
|
MTandHJ/leetcode
|
f3832ed255d259cb881666ec8bd3de090d34e883
|
[
"MIT"
] | null | null | null |
SS/p202.py
|
MTandHJ/leetcode
|
f3832ed255d259cb881666ec8bd3de090d34e883
|
[
"MIT"
] | null | null | null |
"""
编写一个算法来判断一个数 n 是不是快乐数。
「快乐数」定义为:
对于一个正整数,每一次将该数替换为它每个位置上的数字的平方和。
然后重复这个过程直到这个数变为 1,也可能是 无限循环 但始终变不到 1。
如果 可以变为 1,那么这个数就是快乐数。
如果 n 是快乐数就返回 true ;不是,则返回 false 。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/happy-number
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
from typing import List
class Solution:
def isHappy(self, n: int) -> bool:
# 先求出一个数的个十百千
LIMIT = 1000
nums = list(map(int, list(str(n))))
cnt = 0
# res = n
res = self.square_sum(nums)
while cnt < LIMIT:
if res == 1:
return True
else:
nums = list(map(int, list(str(res))))
res = self.square_sum(nums)
cnt += 1
return False
def square_sum(self, nums:List[int]) -> int:
def my_pow(x):
return x ** 2
return sum(list(map(my_pow, nums)))
# hash表方法
class Solution:
def isHappy(self, n: int) -> bool:
# 创建一个初始hash映射来存储k-v映射
res_sum = set()
# 定义一个函数来获取一轮平方和之后的数据
def getNext(n: int) -> int:
res_sum = 0
# 当至少二位数时
while n > 0:
n, digit = divmod(n, 10)
res_sum += digit ** 2
return res_sum
# 更新数据,进行判断
# 当这个书在res_sum中出现过,且不是1,则说明已经进入循环
# 且循环是跳不出来的
while n != 1:
n = getNext(n)
if n in res_sum:
return False
res_sum.add(n)
return True
# for test
if __name__ == "__main__":
ins = Solution()
n = 19
print(ins.isHappy(n))
| 21.808219
| 53
| 0.523241
| 1,364
| 0.6437
| 0
| 0
| 0
| 0
| 0
| 0
| 955
| 0.450684
|
a5964514746ca9cd43f5272151dd592b02ad5040
| 2,309
|
py
|
Python
|
UI/UIObject.py
|
R2D2Hud/CharlieOSX
|
37c4edb0b31eda8082acd8e31afc3dc85fd75abe
|
[
"MIT"
] | 12
|
2020-04-11T13:10:14.000Z
|
2022-03-24T09:12:54.000Z
|
UI/UIObject.py
|
R2D2Hud/CharlieOSX
|
37c4edb0b31eda8082acd8e31afc3dc85fd75abe
|
[
"MIT"
] | 14
|
2020-01-24T14:07:45.000Z
|
2020-12-20T19:14:04.000Z
|
UI/UIObject.py
|
R2D2Hud/CharlieOSX
|
37c4edb0b31eda8082acd8e31afc3dc85fd75abe
|
[
"MIT"
] | 11
|
2020-06-19T20:12:43.000Z
|
2021-04-25T05:02:20.000Z
|
from profileHelper import ProfileHelper
from pybricks.parameters import Button, Color
from pybricks.media.ev3dev import Image, ImageFile, Font, SoundFile
# from UI.tools import Box
class UIObject:
def __init__(self, name: str, brick: EV3Brick, bounds: Box, contentType, content, padding=(0, 0, False), font=Font(family='arial', size=11), visible=True):
# self.logger = logger
self.name = name
self.brick = brick
self.bounds = bounds
self.padding = padding
self.contentType = contentType
self.content = content
self.font = font
self.visibility = visible
self.radius = 0
self.selected = False
def getName(self):
return self.name
def setVisibility(self, visibility: bool):
self.visibility = visibility
def getVisibility(self):
return self.visibility
def update(self):
pass
def draw(self, selected=False):
if self.padding[2]:
x = self.padding[0]
y = self.padding[1]
else:
x = self.bounds.x + self.padding[0]
y = self.bounds.y + self.padding[1]
if self.visibility:
if self.contentType == 'img':
if self.selected:
self.radius = 5
else:
self.radius = 0
self.brick.screen.draw_image(x, y, self.content, transparent=Color.RED)
elif self.contentType == 'textBox':
self.brick.screen.set_font(self.font)
self.brick.screen.draw_box(x, y, x + self.bounds.width, y + self.bounds.height, r=2, fill=True, color=Color.WHITE)
self.brick.screen.draw_box(x, y, x + self.bounds.width, y + self.bounds.height, r=2, fill=False if not selected else True, color=Color.BLACK)
self.brick.screen.draw_text(self.bounds.x + 1, self.bounds.y + 1, self.content, text_color=Color.BLACK if not selected else Color.WHITE)
else:
if self.contentType == 'textBox':
self.brick.screen.draw_box(x, y, x + self.bounds.width, y + self.bounds.height, r=2, fill=True, color=Color.WHITE)
def setClickAction(self, action: Function):
self.clickAction = action
def click(self):
self.clickAction()
| 37.241935
| 159
| 0.603725
| 2,124
| 0.919879
| 0
| 0
| 0
| 0
| 0
| 0
| 78
| 0.033781
|
a59648f6d46920ef327bbe7ce9659f9fe533785d
| 9,558
|
py
|
Python
|
factory.py
|
rosinality/vision-transformers-pytorch
|
b884b5da79900c96e4ce17fbb575cf1c5cb3cd5f
|
[
"MIT"
] | 77
|
2021-04-03T06:44:19.000Z
|
2021-07-07T07:05:01.000Z
|
factory.py
|
rosinality/vision-transformers-pytorch
|
b884b5da79900c96e4ce17fbb575cf1c5cb3cd5f
|
[
"MIT"
] | 1
|
2021-04-08T06:59:41.000Z
|
2021-04-08T11:20:32.000Z
|
factory.py
|
rosinality/vision-transformers-pytorch
|
b884b5da79900c96e4ce17fbb575cf1c5cb3cd5f
|
[
"MIT"
] | 6
|
2021-04-15T13:36:37.000Z
|
2022-02-03T12:32:20.000Z
|
import os
from types import SimpleNamespace
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
from PIL import Image
import numpy as np
from tensorfn import distributed as dist, nsml, get_logger
try:
from nvidia.dali.pipeline import Pipeline
from nvidia.dali import fn, types, pipeline_def
from nvidia.dali.plugin.pytorch import DALIClassificationIterator
except ImportError:
pass
from autoaugment import RandAugment
from dataset import LMDBDataset
from mix_dataset import MixDataset
from transforms import RandomErasing
def wd_skip_fn(skip_type):
def check_wd_skip_fn(name, param):
if skip_type == "nfnet":
return "bias" in name or "gain" in name
elif skip_type == "resnet":
return "bias" in name or "bn" in name or param.ndim == 1
elif skip_type == "vit":
return "bias" in name or "cls" in name or "norm" in name or param.ndim == 1
elif skip_type == "dino":
return "bias" in name or param.ndim == 1
return check_wd_skip_fn
def make_optimizer(train_conf, parameters):
lr = train_conf.base_lr * train_conf.dataloader.batch_size / 256
return train_conf.optimizer.make(parameters, lr=lr)
def make_scheduler(train_conf, optimizer, epoch_len):
warmup = train_conf.scheduler.warmup * epoch_len
n_iter = epoch_len * train_conf.epoch
lr = train_conf.base_lr * train_conf.dataloader.batch_size / 256
if train_conf.scheduler.type == "exp_epoch":
return train_conf.scheduler.make(
optimizer, epoch_len, lr=lr, max_iter=train_conf.epoch, warmup=warmup
)
else:
return train_conf.scheduler.make(optimizer, lr=lr, n_iter=n_iter, warmup=warmup)
def repeated_sampler(sampler):
epoch = 0
while True:
for i in sampler:
yield i
epoch += 1
sampler.set_epoch(epoch)
class ExternalSource:
def __init__(self, dataset, batch_size, shuffle, distributed):
self.dataset = dataset
self.batch_size = batch_size
self.sampler = dist.data_sampler(dataset, shuffle=True, distributed=distributed)
def __iter__(self):
self.generator = repeated_sampler(self.sampler)
return self
def __next__(self):
images, labels = [], []
for _ in range(self.batch_size):
img, label = self.dataset[next(self.generator)]
images.append(np.frombuffer(img, dtype=np.uint8))
labels.append(label)
return images, torch.tensor(labels, dtype=torch.int64)
# @pipeline_def
def dali_pipeline(source, image_size, training, cpu=False):
images, labels = fn.external_source(source=source, num_outputs=2)
if cpu:
device = "cpu"
images = fn.decoders.image(images, device=device)
else:
device = "gpu"
images = fn.decoders.image(
images,
device="mixed",
device_memory_padding=211025920,
host_memory_padding=140544512,
)
if training:
images = fn.random_resized_crop(
images,
device=device,
size=image_size,
interp_type=types.DALIInterpType.INTERP_CUBIC,
)
coin = fn.random.coin_flip(0.5)
images = fn.flip(images, horizontal=coin)
else:
pass
return images, labels
class DALIWrapper:
def __init__(self, pipeline):
self.dataloader = DALIClassificationIterator(pipeline)
def __iter__(self):
self.iterator = iter(self.dataloader)
return self
def __next__(self):
data = next(self.iterator)
image = data[0]["data"]
label = data[0]["label"]
def make_dali_dataloader(
path, train_size, valid_size, train_set, valid_set, batch, distributed, n_worker
):
pass
def make_augment_dataset(path, train_transform, valid_transform):
train_dir = os.path.join(nsml.DATASET_PATH, path, "train.lmdb")
valid_dir = os.path.join(nsml.DATASET_PATH, path, "valid.lmdb")
train_set = LMDBDataset(train_dir, train_transform)
valid_set = LMDBDataset(valid_dir, valid_transform)
return train_set, valid_set
def make_dataset(
path, train_size, valid_size, randaug_params, mix_params, erasing, verbose=True
):
train_dir = os.path.join(nsml.DATASET_PATH, path, "train.lmdb")
valid_dir = os.path.join(nsml.DATASET_PATH, path, "valid.lmdb")
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
transform_list = [
transforms.RandomResizedCrop(train_size, interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(),
RandAugment(**randaug_params),
transforms.ToTensor(),
normalize,
]
if erasing > 0:
transform_list += [
RandomErasing(
erasing, mode="pixel", max_count=1, num_splits=0, device="cpu"
)
]
if mix_params["mix_before_aug"]:
preprocess = transform_list[:2]
postprocess = transform_list[2:]
else:
preprocess = transform_list
postprocess = []
if verbose:
logger = get_logger()
log = f"""Transforms
Transform before Mixes:
{preprocess}
Mixes: mixup={mix_params["mixup"]}, cutmix={mix_params["cutmix"]}"""
if mix_params["mix_before_aug"]:
log += f"""
Transform after Mixes:
{postprocess}"""
logger.info(log)
train_preprocess = transforms.Compose(preprocess)
train_postprocess = transforms.Compose(postprocess)
train_set = LMDBDataset(train_dir, train_preprocess)
train_set = MixDataset(
train_set, train_postprocess, mix_params["mixup"], mix_params["cutmix"]
)
valid_preprocess = transforms.Compose(
[
transforms.Resize(valid_size + 32, interpolation=Image.BICUBIC),
transforms.CenterCrop(valid_size),
transforms.ToTensor(),
normalize,
]
)
valid_set = LMDBDataset(valid_dir, valid_preprocess)
return train_set, valid_set
def make_dataset_cuda(path, train_size, valid_size, randaug_params, mixup, cutmix):
train_dir = os.path.join(nsml.DATASET_PATH, path, "train.lmdb")
valid_dir = os.path.join(nsml.DATASET_PATH, path, "valid.lmdb")
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
train_preprocess = transforms.Compose(
[
transforms.RandomResizedCrop(train_size, interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(),
]
)
train_postprocess = transforms.Compose(
[RandAugment(**randaug_params), transforms.ToTensor(), normalize]
)
train_set = LMDBDataset(train_dir, train_preprocess)
train_set = MixDataset(train_set, train_postprocess, mixup, cutmix)
valid_preprocess = transforms.Compose(
[
transforms.Resize(valid_size + 32, interpolation=Image.BICUBIC),
transforms.CenterCrop(valid_size),
transforms.ToTensor(),
normalize,
]
)
valid_set = LMDBDataset(valid_dir, valid_preprocess)
return train_set, valid_set
def make_dataloader(train_set, valid_set, batch, distributed, n_worker):
batch_size = batch // dist.get_world_size()
train_sampler = dist.data_sampler(train_set, shuffle=True, distributed=distributed)
train_loader = DataLoader(
train_set, batch_size=batch_size, sampler=train_sampler, num_workers=n_worker
)
valid_loader = DataLoader(
valid_set,
batch_size=batch_size,
sampler=dist.data_sampler(valid_set, shuffle=False, distributed=distributed),
num_workers=n_worker,
)
return train_loader, valid_loader, train_sampler
def lerp(start, end, stage, max_stage):
return start + (end - start) * (stage / (max_stage - 1))
def progressive_adaptive_regularization(
stage,
max_stage,
train_sizes,
valid_sizes,
randaug_layers,
randaug_magnitudes,
mixups,
cutmixes,
dropouts,
drop_paths,
verbose=True,
):
train_size = int(lerp(*train_sizes, stage, max_stage))
valid_size = int(lerp(*valid_sizes, stage, max_stage))
randaug_layer = int(lerp(*randaug_layers, stage, max_stage))
randaug_magnitude = lerp(*randaug_magnitudes, stage, max_stage)
mixup = lerp(*mixups, stage, max_stage)
cutmix = lerp(*cutmixes, stage, max_stage)
dropout = lerp(*dropouts, stage, max_stage)
drop_path = lerp(*drop_paths, stage, max_stage)
if verbose:
logger = get_logger()
log = f"""Progressive Training with Adaptive Regularization
Stage: {stage + 1} / {max_stage}
Image Size: train={train_size}, valid={valid_size}
RandAugment: n_augment={randaug_layer}, magnitude={randaug_magnitude}
Mixup: {mixup}, Cutmix: {cutmix}, Dropout={dropout}, DropPath={drop_path}"""
logger.info(log)
return SimpleNamespace(
train_size=train_size,
valid_size=valid_size,
randaug_layer=randaug_layer,
randaug_magnitude=randaug_magnitude,
mixup=mixup,
cutmix=cutmix,
dropout=dropout,
drop_path=drop_path,
)
| 29.319018
| 89
| 0.643022
| 1,028
| 0.107554
| 168
| 0.017577
| 0
| 0
| 0
| 0
| 716
| 0.074911
|
a5965f266f95ad0e2605b8928b40d8635af8fdc1
| 2,990
|
py
|
Python
|
scripts/binarize-phrase-table.py
|
grgau/GroundHog
|
35fac1b80bdcc6b7516cb82fe2ecd19dbcfa248a
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/binarize-phrase-table.py
|
grgau/GroundHog
|
35fac1b80bdcc6b7516cb82fe2ecd19dbcfa248a
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/binarize-phrase-table.py
|
grgau/GroundHog
|
35fac1b80bdcc6b7516cb82fe2ecd19dbcfa248a
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Converts moses phrase table file to HDF5 files
# Written by Bart van Merrienboer (University of Montreal)
import argparse
import cPickle
import gzip
import sys
import tables
import numpy
parser = argparse.ArgumentParser()
parser.add_argument("input",
type=argparse.FileType('r'),
help="The phrase table to be processed")
parser.add_argument("source_output",
type=argparse.FileType('w'),
help="The source output file")
parser.add_argument("target_output",
type=argparse.FileType('w'),
help="The target output file")
parser.add_argument("source_dictionary",
type=argparse.FileType('r'),
help="A pickled dictionary with words and IDs as keys and "
"values respectively")
parser.add_argument("target_dictionary",
type=argparse.FileType('r'),
help="A pickled dictionary with words and IDs as keys and "
"values respectively")
parser.add_argument("--labels",
type=int, default=15000,
help="Set the maximum word index")
args = parser.parse_args()
class Index(tables.IsDescription):
pos = tables.UInt32Col()
length = tables.UInt32Col()
files = [args.source_output, args.target_output]
vlarrays = []
indices = []
for i, f in enumerate(files):
files[i] = tables.open_file(f.name, f.mode)
vlarrays.append(files[i].createEArray(files[i].root, 'phrases',
tables.Int32Atom(),shape=(0,)))
indices.append(files[i].createTable("/", 'indices', Index, "a table of indices and lengths"))
sfile = gzip.open(args.input.name, args.input.mode)
source_table = cPickle.load(args.source_dictionary)
target_table = cPickle.load(args.target_dictionary)
tables = [source_table, target_table]
count = 0
counts = numpy.zeros(2).astype('int32')
freqs_sum = 0
for line in sfile:
fields = line.strip().split('|||')
for field_index in [0, 1]:
words = fields[field_index].strip().split(' ')
word_indices = [tables[field_index].get(word, 1) for word in words]
if args.labels > 0:
word_indices = [word_index if word_index < args.labels else 1
for word_index in word_indices]
vlarrays[field_index].append(numpy.array(word_indices))
pos = counts[field_index]
length = len(word_indices)
ind = indices[field_index].row
ind['pos'] = pos
ind['length'] = length
ind.append()
counts[field_index] += len(word_indices)
count += 1
if count % 100000 == 0:
print count,
[i.flush() for i in indices]
sys.stdout.flush()
elif count % 10000 == 0:
print '.',
sys.stdout.flush()
for f in indices:
f.flush()
for f in files:
f.close()
sfile.close()
print 'processed', count, 'phrase pairs'
| 30.510204
| 97
| 0.614716
| 95
| 0.031773
| 0
| 0
| 0
| 0
| 0
| 0
| 596
| 0.199331
|
a596a50f47d0ab9d4cfb1eb2e63d7c4e56340474
| 1,137
|
py
|
Python
|
Easy/1207.UniqueNumberofOccurrences.py
|
YuriSpiridonov/LeetCode
|
2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781
|
[
"MIT"
] | 39
|
2020-07-04T11:15:13.000Z
|
2022-02-04T22:33:42.000Z
|
Easy/1207.UniqueNumberofOccurrences.py
|
YuriSpiridonov/LeetCode
|
2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781
|
[
"MIT"
] | 1
|
2020-07-15T11:53:37.000Z
|
2020-07-15T11:53:37.000Z
|
Easy/1207.UniqueNumberofOccurrences.py
|
YuriSpiridonov/LeetCode
|
2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781
|
[
"MIT"
] | 20
|
2020-07-14T19:12:53.000Z
|
2022-03-02T06:28:17.000Z
|
"""
Given an array of integers arr, write a function that returns true if and
only if the number of occurrences of each value in the array is unique.
Example:
Input: arr = [1,2,2,1,1,3]
Output: true
Explanation: The value 1 has 3 occurrences, 2 has 2 and 3 has 1. No two
values have the same number of occurrences.
Example:
Input: arr = [1,2]
Output: false
Example:
Input: arr = [-3,0,1,-3,1,1,1,-3,10,0]
Output: true
Constraints:
- 1 <= arr.length <= 1000
- -1000 <= arr[i] <= 1000
"""
#Difficulty: Easy
#63 / 63 test cases passed.
#Runtime: 48 ms
#Memory Usage: 13.8 MB
#Runtime: 48 ms, faster than 39.33% of Python3 online submissions for Unique Number of Occurrences.
#Memory Usage: 13.8 MB, less than 92.46% of Python3 online submissions for Unique Number of Occurrences.
class Solution:
def uniqueOccurrences(self, arr: List[int]) -> bool:
digits = {}
for d in arr:
if d not in digits:
digits[d] = 0
digits[d] += 1
return len(digits.keys()) == len(set(digits.values()))
| 29.153846
| 104
| 0.60774
| 266
| 0.233949
| 0
| 0
| 0
| 0
| 0
| 0
| 861
| 0.757256
|
a598b26fe309d9bc4db6c62f8d0ba413c791f7b0
| 9,360
|
py
|
Python
|
Playground3/src/playground/network/devices/pnms/PNMSDevice.py
|
kandarpck/networksecurity2018
|
dafe2ee8d39bd9596b1ce3fbc8b50ca645bcd626
|
[
"MIT"
] | 3
|
2018-10-25T16:03:53.000Z
|
2019-06-13T15:24:41.000Z
|
Playground3/src/playground/network/devices/pnms/PNMSDevice.py
|
kandarpck/networksecurity2018
|
dafe2ee8d39bd9596b1ce3fbc8b50ca645bcd626
|
[
"MIT"
] | null | null | null |
Playground3/src/playground/network/devices/pnms/PNMSDevice.py
|
kandarpck/networksecurity2018
|
dafe2ee8d39bd9596b1ce3fbc8b50ca645bcd626
|
[
"MIT"
] | null | null | null |
from playground.common.os import isPidAlive
from playground.common import CustomConstant as Constant
from .NetworkManager import NetworkManager, ConnectionDeviceAPI, RoutesDeviceAPI
import os, signal, time
class PNMSDeviceLoader(type):
"""
This metaclass for PNMS device types auto loads concrete device types
into the system.
"""
@classmethod
def loadPnmsDefinitions(cls, newClass):
if newClass.REGISTER_DEVICE_TYPE_NAME:
if newClass.REGISTER_DEVICE_TYPE_NAME in NetworkManager.REGISTERED_DEVICE_TYPES:
raise Exception("Duplicate Device Type Registration")
NetworkManager.REGISTERED_DEVICE_TYPES[newClass.REGISTER_DEVICE_TYPE_NAME] = newClass
for deviceType in newClass.CanConnectTo:
if not issubclass(deviceType, PNMSDevice):
raise Exception("Connect rules requires a subclass of device type. Got {}".format(deviceType))
rule = (newClass, deviceType)
if not ConnectionDeviceAPI.ConnectionPermitted(newClass, deviceType):
ConnectionDeviceAPI.PERMITTED_CONNECTION_TYPES.append(rule)
if newClass.CanRoute:
if not RoutesDeviceAPI.PermitsRouting(newClass):
RoutesDeviceAPI.PERMITTED_ROUTING_TYPES.append(newClass)
def __new__(cls, name, parents, dict):
definitionCls = super().__new__(cls, name, parents, dict)
cls.loadPnmsDefinitions(definitionCls)
return definitionCls
class PNMSDevice(metaclass=PNMSDeviceLoader):
CONFIG_TRUE = "true"
CONFIG_FALSE = "false"
CONFIG_OPTION_AUTO = "auto_enable"
"""
Sub classes that need access to the Connection section or
Routing section need to override these values
"""
CanConnectTo = []
CanRoute = False
STATUS_DISABLED = Constant(strValue="Disabled", boolValue=False)
STATUS_WAITING_FOR_DEPENDENCIES = Constant(strValue="Waiting", boolValue=False)
STATUS_ABNORMAL_SHUTDOWN = Constant(strValue="Abnormal Shutdown", boolValue=False)
STATUS_ENABLED = Constant(strValue="Enabled", boolValue=True)
REGISTER_DEVICE_TYPE_NAME = None # All abstract classes should leave this none. All concrete classes must specify.
def __init__(self, deviceName):
self._pnms = None
self._config = None
self._name = deviceName
self._deviceDependencies = set([])
# the status is the current status
self._enableStatus = self.STATUS_DISABLED
# the toggle is if there has been a request to go from one state to the other
self._enableToggle = False
def _cleanupFiles(self):
if not self._enableStatus:
runFiles = self._getDeviceRunFiles()
for file in runFiles:
if os.path.exists(file):
os.unlink(file)
def _reloadRuntimeData(self):
pass
def installToNetwork(self, pnms, mySection):
self._pnms = pnms
self._config = mySection
self._reloadRuntimeData()
# call self.enabled to correctly set enableStatus
# cannot call in constructor, requires self._pnms
self._runEnableStatusStateMachine()
def networkManager(self):
return self._pnms
def _sanitizeVerb(self, verb):
return verb.strip().lower()
def name(self):
return self._name
def dependenciesEnabled(self):
for device in self._deviceDependencies:
if not device.enabled(): return False
return True
def isAutoEnabled(self):
return self._config.get(self.CONFIG_OPTION_AUTO, self.CONFIG_FALSE) == self.CONFIG_TRUE
def pnmsAlert(self, device, alert, alertArgs):
if device in self._deviceDependencies:
if alert == device.enabled:
self._runEnableStatusStateMachine()
def initialize(self, args):
pass
def destroy(self):
pass
def enable(self):
if not self.enabled():
self._enableToggle = True
self._runEnableStatusStateMachine()
def disable(self):
if self.enabled():
self._enableToggle = True
self._runEnableStatusStateMachine()
def enabled(self):
self._cleanupFiles()
return self._enableStatus
def getPid(self):
statusFile, pidFile, lockFile = self._getDeviceRunFiles()
if os.path.exists(pidFile):
with open(pidFile) as f:
return int(f.read().strip())
return None
def config(self, verb, args):
pass
def query(self, verb, args):
return None
def _getDeviceRunFiles(self):
statusFile = os.path.join(self._pnms.location(), "device_{}.status".format(self.name()))
pidFile = os.path.join(self._pnms.location(), "device_{}.pid".format(self.name()))
lockFile = os.path.join(self._pnms.location(), "device_{}.pid.lock".format(self.name()))
return statusFile, pidFile, lockFile
def _running(self):
for requiredFile in self._getDeviceRunFiles():
if not os.path.exists(requiredFile):
return False
pid = self.getPid()
return pid and isPidAlive(pid)
def _runEnableStatusStateMachine(self):
newStatus = self._enableStatus
# TODO: I wrote this function in a 'haze' thinkin the manager keeps running.
# but, of course, it shuts down after run. There's going to be
# no callback. Well, I'm leaving this code in. Because, it may
# be that in the future I have a call-back system that works.
# but for now, let's try to activate everything.
if not self._enableStatus and self._enableToggle:
for device in self._deviceDependencies:
if not device.enabled():
device.enable()
if self._enableStatus in [self.STATUS_DISABLED, self.STATUS_ABNORMAL_SHUTDOWN]:
if self._running():
# We might have gotten here because of a restart
# or a toggle.
if self.dependenciesEnabled():
newStatus = self.STATUS_ENABLED
else:
# oops. A dependency has shut down.
# Assume this device was supposed to be enabled.
self._shutdown()
newStatus = self.STATUS_WAITING_FOR_DEPENDENCIES
elif self._enableToggle:
if self.dependenciesEnabled():
self._launch()
if self._running():
newStatus = self.STATUS_ENABLED
else:
newStatus = self.STATUS_ABNORMAL_SHUTDOWN
else:
newStatus = self.STATUS_DISABLED
elif self._enableStatus == self.STATUS_WAITING_FOR_DEPENDENCIES:
if self._enableToggle:
# we were trying to turn on, were waiting for deps, but now stop
newStatus = self.STATUS_DISABLED
elif self.dependenciesEnabled():
self._launch()
if self._running():
newStatus = self.STATUS_ENABLED
else:
newStatus = self.STATUS_ABNORMAL_SHUTDOWN
else:
newStatus = self.STATUS_WAITING_FOR_DEPENDENCIES
elif self._enableStatus == self.STATUS_ENABLED:
if self._enableToggle:
self._shutdown()
newStatus = self.STATUS_DISABLED
elif not self._running():
newStatus = self.STATUS_DISABLED
elif not self.dependenciesEnabled():
self._shutdown()
newStatus = self.STATUS_WAITING_FOR_DEPENDENCIES
else:
newStatus = self.STATUS_ENABLED
alert = (self._enableStatus != newStatus)
self._enableStatus = newStatus
self._enableToggle = False
self._pnms.postAlert(self.enable, self._enableStatus)
def _shutdown(self, timeout=5):
pid = self.getPid()
if pid:
os.kill(pid, signal.SIGTERM)
sleepCount = timeout
while isPidAlive(pid) and sleepCount > 0:
time.sleep(1)
sleepCount = sleepCount-1
if isPidAlive(pid):
raise Exception("Could not shut down device {}. (pid={})".format(self.name(), pid))
for file in self._getDeviceRunFiles():
if os.path.exists(file):
os.unlink(file)
def _launch(self, timeout=30):
pass
def _waitUntilRunning(self, timeout=30):
sleepCount = timeout
while not self._running() and sleepCount > 0:
time.sleep(1)
sleepCount = sleepCount - 1
return self._running()
| 39.327731
| 119
| 0.583761
| 9,130
| 0.975427
| 0
| 0
| 957
| 0.102244
| 0
| 0
| 1,319
| 0.140919
|
a5991177aa084d283fe154f4a7a56db6da664557
| 162
|
py
|
Python
|
testing/tests/constants_enums/constants_enums.py
|
gigabackup/gigantum-client
|
70fe6b39b87b1c56351f2b4c551b6f1693813e4f
|
[
"MIT"
] | 60
|
2018-09-26T15:46:00.000Z
|
2021-10-10T02:37:14.000Z
|
testing/tests/constants_enums/constants_enums.py
|
gigabackup/gigantum-client
|
70fe6b39b87b1c56351f2b4c551b6f1693813e4f
|
[
"MIT"
] | 1,706
|
2018-09-26T16:11:22.000Z
|
2021-08-20T13:37:59.000Z
|
testing/tests/constants_enums/constants_enums.py
|
griffinmilsap/gigantum-client
|
70fe6b39b87b1c56351f2b4c551b6f1693813e4f
|
[
"MIT"
] | 11
|
2019-03-14T13:23:51.000Z
|
2022-01-25T01:29:16.000Z
|
import enum
"""Declare all enumerations used in test."""
class ProjectConstants(enum.Enum):
"""All constants for project test."""
SUCCESS = 'success'
| 16.2
| 44
| 0.685185
| 100
| 0.617284
| 0
| 0
| 0
| 0
| 0
| 0
| 90
| 0.555556
|
a59a37e3de5885e67c006743f177528505c3b6da
| 3,315
|
py
|
Python
|
core/eval.py
|
lmkoch/subgroup-shift-detection
|
31971704dc4a768db5e082e6e37a504f4e245224
|
[
"MIT"
] | null | null | null |
core/eval.py
|
lmkoch/subgroup-shift-detection
|
31971704dc4a768db5e082e6e37a504f4e245224
|
[
"MIT"
] | null | null | null |
core/eval.py
|
lmkoch/subgroup-shift-detection
|
31971704dc4a768db5e082e6e37a504f4e245224
|
[
"MIT"
] | 1
|
2022-01-26T09:54:41.000Z
|
2022-01-26T09:54:41.000Z
|
import os
import pandas as pd
import numpy as np
from core.dataset import dataset_fn
from core.model import model_fn, get_classification_model
from core.mmdd import trainer_object_fn
from core.muks import muks
def stderr_proportion(p, n):
return np.sqrt(p * (1-p) / n)
def eval(exp_dir, exp_name, params, seed, split, sample_sizes=[10, 30, 50, 100, 500],
num_reps=100, num_permutations=1000):
"""Analysis of test power vs sample size for both MMD-D and MUKS
Args:
exp_dir ([type]): exp base directory
exp_name ([type]): experiment name (hashed config)
params (Dict): [description]
seed (int): random seed
split (str): fold to evaluate, e.g. 'validation' or 'test
sample_sizes (list, optional): Defaults to [10, 30, 50, 100, 500].
num_reps (int, optional): for calculation rejection rates. Defaults to 100.
num_permutations (int, optional): for MMD-D permutation test. Defaults to 1000.
"""
log_dir = os.path.join(exp_dir, exp_name)
out_csv = os.path.join(log_dir, f'{split}_consistency_analysis.csv')
df = pd.DataFrame(columns=['sample_size','power', 'power_stderr',
'type_1err', 'type_1err_stderr', 'method'])
for batch_size in sample_sizes:
params['dataset']['dl']['batch_size'] = batch_size
dataloader = dataset_fn(seed=seed, params_dict=params['dataset'])
# MMD-D
model = model_fn(seed=seed, params=params['model'])
trainer = trainer_object_fn(model=model, dataloaders=dataloader, seed=seed,
log_dir=log_dir, **params['trainer'])
res = trainer.performance_measures(dataloader[split]['p'], dataloader[split]['q'], num_batches=num_reps,
num_permutations=num_permutations)
res_mmd = {'exp_hash': exp_name,
'sample_size': batch_size,
'power': res['reject_rate'],
'power_stderr': stderr_proportion(res['reject_rate'], batch_size),
'type_1err': res['type_1_err'] ,
'type_1err_stderr': stderr_proportion(res['type_1_err'] , batch_size),
'method': 'mmd'}
# MUKS
model = get_classification_model(params['model'])
reject_rate, type_1_err = muks(dataloader[split]['p'], dataloader[split]['q'], num_reps, model)
res_rabanser = {'exp_hash': exp_name,
'sample_size': batch_size,
'power': reject_rate,
'power_stderr': stderr_proportion(reject_rate, batch_size),
'type_1err': type_1_err,
'type_1err_stderr': stderr_proportion(type_1_err, batch_size),
'method': 'rabanser'}
print('---------------------------------')
print(f'sample size: {batch_size}')
print(f'mmd: {res_mmd}')
print(f'rabanser: {res_rabanser}')
df = df.append(pd.DataFrame(res_mmd, index=['']), ignore_index=True)
df = df.append(pd.DataFrame(res_rabanser, index=['']), ignore_index=True)
df.to_csv(out_csv)
| 41.4375
| 112
| 0.574962
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,095
| 0.330317
|
a59a527b87a6e3d50b3ac6e6acea7185a59af36b
| 1,423
|
py
|
Python
|
handlers/product_handlers.py
|
group-project-carbon-accounting/server
|
93155868a0988c04fe79d30ef565c652d2c8f5de
|
[
"MIT"
] | null | null | null |
handlers/product_handlers.py
|
group-project-carbon-accounting/server
|
93155868a0988c04fe79d30ef565c652d2c8f5de
|
[
"MIT"
] | null | null | null |
handlers/product_handlers.py
|
group-project-carbon-accounting/server
|
93155868a0988c04fe79d30ef565c652d2c8f5de
|
[
"MIT"
] | null | null | null |
import tornado.web
import json
from handlers.async_fetch import async_fetch, GET, POST
class ProductAddHandler(tornado.web.RequestHandler):
async def post(self):
request_data = json.loads(self.request.body)
data = {
'prod_id': request_data['product_id'],
'comp_id': request_data['company_id'],
'carbon_cost': request_data['carbon_cost_offset']
}
response = await async_fetch('/product/add', method=POST, data=data)
self.write(json.dumps({'success': (response['status'] == 'success')}))
class ProductUpdateHandler(tornado.web.RequestHandler):
async def post(self):
request_data = json.loads(self.request.body)
data = {
'prod_id': request_data['product_id'],
'comp_id': request_data['company_id'],
'carbon_cost': request_data['carbon_cost_offset']
}
response = await async_fetch('/product/update', method=POST, data=data)
self.write(json.dumps({'success': (response['status'] == 'success')}))
class ProductGetHandler(tornado.web.RequestHandler):
async def get(self, company_id, product_id):
response_data = await async_fetch('/product/get/' + company_id + '/' + product_id, method=GET)
self.write(json.dumps({'generic': (response_data['comp_id'] is None),
'carbon_cost_offset': response_data['carbon_cost']}))
| 43.121212
| 102
| 0.645819
| 1,329
| 0.933942
| 0
| 0
| 0
| 0
| 1,155
| 0.811665
| 302
| 0.212228
|
a59ac366b9f4a35b896bc07199abf2aebd42714c
| 3,144
|
py
|
Python
|
Python/lab8 [2, 5, 7, 12, 17]/tz17.py
|
da-foxbite/KSU121
|
133637abb4f465aeecb845e6735ba383a2fdd689
|
[
"MIT"
] | 3
|
2019-09-23T06:06:30.000Z
|
2020-02-24T10:22:26.000Z
|
Python/lab8 [2, 5, 7, 12, 17]/tz17.py
|
da-foxbite/KSU141
|
133637abb4f465aeecb845e6735ba383a2fdd689
|
[
"MIT"
] | null | null | null |
Python/lab8 [2, 5, 7, 12, 17]/tz17.py
|
da-foxbite/KSU141
|
133637abb4f465aeecb845e6735ba383a2fdd689
|
[
"MIT"
] | 1
|
2020-10-26T11:00:22.000Z
|
2020-10-26T11:00:22.000Z
|
# 141, Суптеля Владислав
# 【Дата】:「09.04.20」
# 17. Клас Покупець: Прізвище, Ім'я, По батькові, Адреса, Номер кредитної картки, Номер банківського рахунку; конструктор;
# Методи: установка значень атрибутів, отримання значень атрибутів, висновок інформації. Створити масив об'єктів даного класу.
# Вивести список покупців в алфавітному порядку і список покупців, у яких номер кредитної картки знаходиться в заданому діапазоні.
import names
from faker import Faker
fake = Faker()
import string
import random
def getRanNum(size, chars=string.digits):
return ''.join(random.choice(chars) for _ in range(size))
class Customer:
def __init__(self, firstName: str, secondName: str, middleName: str, address: str, creditCard: str, accNum: str):
self.firstName = firstName
self.secondName = secondName
self.middleName = middleName
self.address = address
self.creditCard = creditCard
self.accNum = accNum
def __str__(self, ):
return f"""
ФИО: {self.firstName} {self.secondName} {self.middleName}
Адрес: {self.address}
Номер кредитной карты: {self.creditCard}
Номер банковскового счета: {self.accNum}
"""
def __getattr__(self, name: str):
if name == 'fullName':
return self.firstName + ' ' + self.secondName + ' ' + self.middleName
#set
def setFirstName(self, firstName: str):
self.firstName = firstName
def setSecondName(self, secondName: str):
self.firstName = secondName
def setMiddleName(self, middleName: str):
self.firstName = middleName
def setAddress(self, address: str):
self.firstName = address
def setCreditCard(self, creditCard: str):
self.firstName = creditCard
def setAccNum(self, accNum: str):
self.firstName = accNum
#get
def getName(self):
return self.firstName
def getSecondName(self):
return self.secondName
def getMiddleName(self):
return self.middleName
def getAddress(self):
return self.address
def getCreditCard(self):
return self.creditCard
def getBackNum(self):
return self.accNum
def fixPrintout(l):
print('\033[0;37;40m Отсортированный список покупателей: ')
print('\n'.join(map(str, l)), end='\n')
customers = []
for i in range(0, 5):
customers.append(Customer(
names.get_first_name(), names.get_first_name(), names.get_first_name(), fake.address(),
getRanNum(16), getRanNum(8)))
# print("Информация о покупателе: ", customers[i])
customers.sort(key=lambda customer: customer.fullName)
fixPrintout(customers)
def CardNumCheck(customer: Customer, maxNum: str):
for i in range(0, len(customer.creditCard)):
if int(customer.creditCard[i]) > int(maxNum[i]):
return customer
if int(customer.creditCard[i]) < int(maxNum[i]):
return False
return False
maxNum = getRanNum(16)
#print(maxNum)
print('\033[0;37;49m Список покупателей чей номер карты находится в заданном диапазоне: ')
for i in range(0, 5):
if CardNumCheck(customers[i], maxNum) == False:
print('-')
pass
else:
print(customers[i])
| 33.094737
| 130
| 0.682252
| 1,608
| 0.441758
| 0
| 0
| 0
| 0
| 0
| 0
| 1,360
| 0.373626
|
a59c22cef1a85002b71aba681bd1b6e2ffee762e
| 7,344
|
py
|
Python
|
absolv/tests/test_models.py
|
SimonBoothroyd/absolv
|
dedb2b6eb567ec1b627dbe50f36f68e0c32931c4
|
[
"MIT"
] | null | null | null |
absolv/tests/test_models.py
|
SimonBoothroyd/absolv
|
dedb2b6eb567ec1b627dbe50f36f68e0c32931c4
|
[
"MIT"
] | 30
|
2021-11-02T12:47:24.000Z
|
2022-03-01T22:00:39.000Z
|
absolv/tests/test_models.py
|
SimonBoothroyd/absolv
|
dedb2b6eb567ec1b627dbe50f36f68e0c32931c4
|
[
"MIT"
] | null | null | null |
import numpy
import pytest
from openmm import unit
from pydantic import ValidationError
from absolv.models import (
DeltaG,
EquilibriumProtocol,
MinimizationProtocol,
SimulationProtocol,
State,
SwitchingProtocol,
System,
TransferFreeEnergyResult,
)
from absolv.tests import is_close
class TestSystem:
def test_n_solute_molecules(self):
system = System(solutes={"CO": 2, "CCO": 3}, solvent_a={"O": 1}, solvent_b=None)
assert system.n_solute_molecules == 5
@pytest.mark.parametrize("solvent_a, n_expected", [({"O": 3}, 3), (None, 0)])
def test_n_solvent_molecules_a(self, solvent_a, n_expected):
system = System(
solutes={
"CO": 1,
},
solvent_a=solvent_a,
solvent_b={"O": 5},
)
assert system.n_solvent_molecules_a == n_expected
@pytest.mark.parametrize("solvent_b, n_expected", [({"O": 5}, 5), (None, 0)])
def test_n_solvent_molecules_b(self, solvent_b, n_expected):
system = System(
solutes={
"CO": 1,
},
solvent_a={"O": 3},
solvent_b=solvent_b,
)
assert system.n_solvent_molecules_b == n_expected
def test_validate_solutes(self):
with pytest.raises(
ValidationError, match="at least one solute must be specified"
):
System(solutes={}, solvent_a=None, solvent_b=None)
system = System(solutes={"C": 1}, solvent_a=None, solvent_b=None)
assert system.solutes == {"C": 1}
def test_validate_solvent_a(self):
with pytest.raises(
ValidationError, match="specified when `solvent_a` is not none"
):
System(solutes={"C": 1}, solvent_a={}, solvent_b=None)
system = System(solutes={"C": 1}, solvent_a={"O": 2}, solvent_b=None)
assert system.solvent_a == {"O": 2}
def test_validate_solvent_b(self):
with pytest.raises(
ValidationError, match="specified when `solvent_b` is not none"
):
System(solutes={"C": 1}, solvent_a=None, solvent_b={})
system = System(solutes={"C": 1}, solvent_a=None, solvent_b={"O": 2})
assert system.solvent_b == {"O": 2}
def test_to_components(self):
system = System(
solutes={"CO": 1, "CCO": 2}, solvent_a={"O": 3}, solvent_b={"OCO": 4}
)
components_a, components_b = system.to_components()
assert components_a == [("CO", 1), ("CCO", 2), ("O", 3)]
assert components_b == [("CO", 1), ("CCO", 2), ("OCO", 4)]
class TestState:
def test_unit_validation(self):
state = State(
temperature=298.0 * unit.kelvin, pressure=101.325 * unit.kilopascals
)
assert is_close(state.temperature, 298.0)
assert is_close(state.pressure, 1.0)
class TestMinimizationProtocol:
def test_unit_validation(self):
protocol = MinimizationProtocol(
tolerance=1.0 * unit.kilojoule_per_mole / unit.angstrom
)
assert is_close(protocol.tolerance, 10.0)
class TestSimulationProtocol:
def test_unit_validation(self):
protocol = SimulationProtocol(
n_steps_per_iteration=1,
n_iterations=1,
timestep=0.002 * unit.picoseconds,
thermostat_friction=0.003 / unit.femtoseconds,
)
assert is_close(protocol.timestep, 2.0)
assert is_close(protocol.thermostat_friction, 3.0)
class TestEquilibriumProtocol:
def test_n_states(self):
protocol = EquilibriumProtocol(
lambda_sterics=[1.0, 0.5, 0.0], lambda_electrostatics=[1.0, 1.0, 1.0]
)
assert protocol.n_states == 3
@pytest.mark.parametrize(
"lambda_sterics, lambda_electrostatics",
[([1.0, 0.5, 0.0], [1.0, 1.0]), ([1.0, 0.5], [1.0, 1.0, 1.0])],
)
def test_validate_lambda_lengths(self, lambda_sterics, lambda_electrostatics):
with pytest.raises(ValidationError, match="lambda lists must be the same"):
EquilibriumProtocol(
lambda_sterics=lambda_sterics,
lambda_electrostatics=lambda_electrostatics,
)
class TestSwitchingProtocol:
def test_unit_validation(self):
protocol = SwitchingProtocol(
n_electrostatic_steps=6250,
n_steps_per_electrostatic_step=1,
n_steric_steps=18750,
n_steps_per_steric_step=1,
timestep=0.002 * unit.picoseconds,
thermostat_friction=0.003 / unit.femtoseconds,
)
assert is_close(protocol.timestep, 2.0)
assert is_close(protocol.thermostat_friction, 3.0)
class TestDeltaG:
def test_add(self):
value_a = DeltaG(value=1.0, std_error=2.0)
value_b = DeltaG(value=3.0, std_error=4.0)
result = value_a + value_b
assert is_close(result.value, 4.0)
assert is_close(result.std_error, numpy.sqrt(20))
def test_sub(self):
value_a = DeltaG(value=1.0, std_error=2.0)
value_b = DeltaG(value=3.0, std_error=4.0)
result = value_b - value_a
assert is_close(result.value, 2.0)
assert is_close(result.std_error, numpy.sqrt(20))
class TestTransferFreeEnergyResult:
@pytest.fixture()
def free_energy_result(self, argon_eq_schema):
return TransferFreeEnergyResult(
input_schema=argon_eq_schema,
delta_g_solvent_a=DeltaG(value=1.0, std_error=2.0),
delta_g_solvent_b=DeltaG(value=3.0, std_error=4.0),
)
def test_delta_g_from_a_to_b(self, free_energy_result):
delta_g = free_energy_result.delta_g_from_a_to_b
assert is_close(delta_g.value, -2.0)
assert is_close(delta_g.std_error, numpy.sqrt(20))
def test_delta_g_from_b_to_a(self, free_energy_result):
delta_g = free_energy_result.delta_g_from_b_to_a
assert is_close(delta_g.value, 2.0)
assert is_close(delta_g.std_error, numpy.sqrt(20))
def test_boltzmann_temperature(self, free_energy_result):
value = free_energy_result._boltzmann_temperature
assert is_close(value, 85.5 * unit.kelvin * unit.MOLAR_GAS_CONSTANT_R)
def test_delta_g_from_a_to_b_with_units(self, free_energy_result):
value, std_error = free_energy_result.delta_g_from_a_to_b_with_units
assert is_close(value, -2.0 * 85.5 * unit.kelvin * unit.MOLAR_GAS_CONSTANT_R)
assert is_close(
std_error, numpy.sqrt(20) * 85.5 * unit.kelvin * unit.MOLAR_GAS_CONSTANT_R
)
def test_delta_g_from_b_to_a_with_units(self, free_energy_result):
value, std_error = free_energy_result.delta_g_from_b_to_a_with_units
assert is_close(value, 2.0 * 85.5 * unit.kelvin * unit.MOLAR_GAS_CONSTANT_R)
assert is_close(
std_error, numpy.sqrt(20) * 85.5 * unit.kelvin * unit.MOLAR_GAS_CONSTANT_R
)
def test_str(self, free_energy_result):
assert (
str(free_energy_result)
== "ΔG a->b=-0.340 kcal/mol ΔG a->b std=0.760 kcal/mol"
)
def test_repr(self, free_energy_result):
assert repr(free_energy_result) == (
"TransferFreeEnergyResult(ΔG a->b=-0.340 kcal/mol ΔG a->b std=0.760 kcal/mol)"
)
| 30.473029
| 90
| 0.631672
| 7,008
| 0.953729
| 0
| 0
| 1,490
| 0.202776
| 0
| 0
| 474
| 0.064507
|
a59f046e4edcd4dce70590e6b4351f5262990e72
| 868
|
py
|
Python
|
archiv/tables.py
|
acdh-oeaw/gtrans
|
6f56b1d09de0cad503273bf8a01cd81e25220524
|
[
"MIT"
] | 1
|
2020-03-15T16:14:02.000Z
|
2020-03-15T16:14:02.000Z
|
archiv/tables.py
|
acdh-oeaw/gtrans
|
6f56b1d09de0cad503273bf8a01cd81e25220524
|
[
"MIT"
] | 14
|
2018-11-09T08:34:23.000Z
|
2022-02-10T08:15:53.000Z
|
archiv/tables.py
|
acdh-oeaw/gtrans
|
6f56b1d09de0cad503273bf8a01cd81e25220524
|
[
"MIT"
] | null | null | null |
import django_tables2 as tables
from django_tables2.utils import A
from entities.models import *
from archiv.models import *
class ArchResourceTable(tables.Table):
id = tables.LinkColumn(
'archiv:archresource_detail',
args=[A('pk')], verbose_name='ID'
)
title = tables.LinkColumn(
'archiv:archresource_detail',
args=[A('pk')], verbose_name='Titel'
)
mentioned_person = tables.ManyToManyColumn()
mentioned_inst = tables.ManyToManyColumn()
mentioned_place = tables.ManyToManyColumn()
creator_person = tables.ManyToManyColumn()
creator_inst = tables.ManyToManyColumn()
subject_norm = tables.ManyToManyColumn()
creators = tables.ManyToManyColumn()
class Meta:
model = ArchResource
sequence = ('id', 'title',)
attrs = {"class": "table table-responsive table-hover"}
| 31
| 63
| 0.687788
| 740
| 0.852535
| 0
| 0
| 0
| 0
| 0
| 0
| 129
| 0.148618
|
a5a01c24d79e75ecbeea7e8b127b09c3ad1d05e0
| 376
|
py
|
Python
|
accounts/migrations/0005_auto_20200227_0418.py
|
inclusive-design/coop-map-directory-index
|
b215ea95677dc90fafe60eaa494a4fd6af0431fb
|
[
"BSD-3-Clause"
] | 1
|
2020-01-28T16:16:49.000Z
|
2020-01-28T16:16:49.000Z
|
accounts/migrations/0005_auto_20200227_0418.py
|
inclusive-design/coop-map-directory-index
|
b215ea95677dc90fafe60eaa494a4fd6af0431fb
|
[
"BSD-3-Clause"
] | 114
|
2020-02-12T20:22:07.000Z
|
2021-09-22T18:29:50.000Z
|
accounts/migrations/0005_auto_20200227_0418.py
|
inclusive-design/coop-map-directory-index
|
b215ea95677dc90fafe60eaa494a4fd6af0431fb
|
[
"BSD-3-Clause"
] | 4
|
2020-04-21T21:09:25.000Z
|
2021-01-08T14:18:58.000Z
|
# Generated by Django 3.0.3 on 2020-02-27 04:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_auto_20200226_2329'),
]
operations = [
migrations.AlterModelOptions(
name='usersocialnetwork',
options={'verbose_name': "User's Social Network"},
),
]
| 20.888889
| 62
| 0.619681
| 291
| 0.773936
| 0
| 0
| 0
| 0
| 0
| 0
| 138
| 0.367021
|
a5a08838db67fdc32c63308d4dd034cb11ff2a45
| 3,745
|
py
|
Python
|
src/FSG/WordEmbedding.py
|
handsomebrothers/Callback2Vec
|
370adbcfcc229d385ba9c8c581489b703a39ca85
|
[
"MIT"
] | null | null | null |
src/FSG/WordEmbedding.py
|
handsomebrothers/Callback2Vec
|
370adbcfcc229d385ba9c8c581489b703a39ca85
|
[
"MIT"
] | null | null | null |
src/FSG/WordEmbedding.py
|
handsomebrothers/Callback2Vec
|
370adbcfcc229d385ba9c8c581489b703a39ca85
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import multiprocessing
from gensim.models import Word2Vec
import csv
def embedding_sentences(sentences, embedding_size = 64, window = 3, min_count = 0, file_to_load = None, file_to_save = None):
'''
embeding_size Word Embedding Dimension
window : Context window
min_count : Word frequency less than min_count will be deleted
'''
if file_to_load is not None:
w2vModel = Word2Vec.load(file_to_load) # load model
else:
w2vModel = Word2Vec(sentences, size = embedding_size, window = window, min_count = min_count, workers = multiprocessing.cpu_count(),seed=200)
if file_to_save is not None:
w2vModel.save(file_to_save) # Save Model
return w2vModel
# This function is used to represent a sentence as a vector (corresponding to representing a method as a vector)
def get_method_vector(sentence,w2vModel):
sentence_vector=[]
for word in sentence:
sentence_vector.append(w2vModel[word])#Word vectors for adding each word
return sentence_vector
# This function is used to represent a word as a vector (corresponding to a word in method)
def get_word_vector(word,w2vModel):
return w2vModel[word]
# This function is used to get the vector of a text (corresponding to the word vector of class or apk)
def get_apk_class_vector(document,w2vModel):
all_vectors = []
embeddingDim = w2vModel.vector_size
# 嵌入维数
embeddingUnknown = [0 for i in range(embeddingDim)]
for sentence in document:
this_vector = []
for word in sentence:
if word in w2vModel.wv.vocab:
this_vector.append(w2vModel[word])
else:
this_vector.append(embeddingUnknown)
all_vectors.append(this_vector)
return all_vectors
# This function is used to obtain the similarity between two sentences,
# with the help of python's own function to calculate the similarity.
def get_two_sentence_simility(sentence1,sentence2,w2vModel):
sim = w2vModel.n_similarity(sentence1, sentence2)
return sim
# Used to build corpus
def bulid_word2vec_model():#Used to build word 2vec model
model = embedding_sentences(get_corpus_(), embedding_size=32,
min_count=0,
file_to_save='D:\\APK_科研\\word2vec\\apk_trained_word2vec.model')
return model
# Used to get the model that has been created
def get_already_word2vec_model(file_to_load):
model = Word2Vec.load(file_to_load)
return model
# Used for acquiring corpus
def get_corpus():
all_data=[]
data_readers=csv.reader(open('D:/new_amd_callback_data1.csv'))
for reader in data_readers:
if len(reader)>1:
# print(reader)
all_data.append(reader)
amd_data_readers=csv.reader(open('D:/new_callback_data1.csv'))
for amd_reader in amd_data_readers:
if len(amd_reader)>1:
# print(amd_reader)
all_data.append(amd_reader)
print('over')
return all_data
def get_corpus_():
all_data = []
data_readers = csv.reader(open('D:/new_amd_callback_data.csv'))
for reader in data_readers:
if len(reader) > 1:
# print(reader)
all_data.append(reader)
amd_data_readers = csv.reader(open('D:/new_amd_callback_data1.csv'))
for amd_reader in amd_data_readers:
if len(amd_reader) > 1:
# print(amd_reader)
all_data.append(amd_reader)
amd_data_readers_=csv.reader(open('D:/new_callback_data.csv'))
for amd_reader_ in amd_data_readers_:
if len(amd_reader_)>1:
all_data.append(amd_reader_)
print('over')
return all_data
if __name__ == "__main__":
bulid_word2vec_model()
| 40.706522
| 149
| 0.687316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,108
| 0.294916
|
a5a1b481c21e6820b7064b6612f4c7a3b1370fc4
| 10,914
|
py
|
Python
|
hearthstone/player.py
|
dianarvp/stone_ground_hearth_battles
|
450e70eaef21b543be579a6d696676fb148a99b0
|
[
"Apache-2.0"
] | null | null | null |
hearthstone/player.py
|
dianarvp/stone_ground_hearth_battles
|
450e70eaef21b543be579a6d696676fb148a99b0
|
[
"Apache-2.0"
] | null | null | null |
hearthstone/player.py
|
dianarvp/stone_ground_hearth_battles
|
450e70eaef21b543be579a6d696676fb148a99b0
|
[
"Apache-2.0"
] | null | null | null |
import itertools
import typing
from collections import defaultdict
from typing import Optional, List, Callable, Type
from hearthstone.cards import MonsterCard, CardEvent, Card
from hearthstone.events import BuyPhaseContext, EVENTS
from hearthstone.hero import EmptyHero
from hearthstone.monster_types import MONSTER_TYPES
from hearthstone.triple_reward_card import TripleRewardCard
if typing.TYPE_CHECKING:
from hearthstone.tavern import Tavern
from hearthstone.hero import Hero
from hearthstone.randomizer import Randomizer
class BuyPhaseEvent:
pass
StoreIndex = typing.NewType("StoreIndex", int)
HandIndex = typing.NewType("HandIndex", int)
BoardIndex = typing.NewType("BoardIndex", int)
class Player:
def __init__(self, tavern: 'Tavern', name: str, hero_options: List['Hero']):
self.name = name
self.tavern = tavern
self.hero = None
self.hero_options = hero_options
self.health = None
self.tavern_tier = 1
self.coins = 0
self.triple_rewards = []
self.discovered_cards: List[MonsterCard] = []
self.maximum_board_size = 7
self.maximum_hand_size = 10
self.refresh_store_cost = 1
self._tavern_upgrade_costs = (0, 5, 7, 8, 9, 10)
self.tavern_upgrade_cost = 5
self.hand: List[MonsterCard] = []
self.in_play: List[MonsterCard] = []
self.store: List[MonsterCard] = []
self.frozen = False
self.counted_cards = defaultdict(lambda: 0)
@staticmethod
def new_player_with_hero(tavern: 'Tavern', name: str, hero: Optional['Hero'] = None) -> 'Player':
if hero is None:
hero = EmptyHero()
player = Player(tavern, name, [hero])
player.choose_hero(hero)
return player
@property
def coin_income_rate(self):
return min(self.tavern.turn_count + 3, 10)
def player_main_step(self):
self.draw()
# player can:
# rearrange monsters
# summon monsters
# buy from the store
# freeze the store
# refresh the store
# sell monsters
# set fight ready
def apply_turn_start_income(self):
self.coins = self.coin_income_rate
def decrease_tavern_upgrade_cost(self):
self.tavern_upgrade_cost = max(0, self.tavern_upgrade_cost - 1)
def upgrade_tavern(self):
assert self.validate_upgrade_tavern()
self.coins -= self.tavern_upgrade_cost
self.tavern_tier += 1
if self.tavern_tier < self.max_tier():
self.tavern_upgrade_cost = self._tavern_upgrade_costs[self.tavern_tier]
def validate_upgrade_tavern(self) -> bool:
if self.tavern_tier >= self.max_tier():
return False
if self.coins < self.tavern_upgrade_cost:
return False
return True
def summon_from_hand(self, index: HandIndex, targets: Optional[List[BoardIndex]] = None):
# TODO: add (optional?) destination index parameter for Defender of Argus
# TODO: make sure that the ordering of monster in hand and monster.battlecry are correct
# TODO: Jarett can monster be event target
if targets is None:
targets = []
assert self.validate_summon_from_hand(index, targets)
card = self.hand.pop(index)
self.in_play.append(card)
if card.golden:
self.triple_rewards.append(TripleRewardCard(min(self.tavern_tier + 1, 6)))
if card.magnetic:
self.check_magnetic(card)
target_cards = [self.in_play[target] for target in targets]
self.broadcast_buy_phase_event(CardEvent(card, EVENTS.SUMMON_BUY, target_cards))
def validate_summon_from_hand(self, index: HandIndex, targets: Optional[List[BoardIndex]] = None) -> bool:
if targets is None:
targets = []
# TODO: Jack num_battlecry_targets should only accept 0,1,2
if index not in range(len(self.hand)):
return False
card = self.hand[index]
if not self.room_on_board():
return False
valid_targets = [target_index for target_index, target_card in enumerate(self.in_play) if
card.validate_battlecry_target(target_card)]
num_possible_targets = min(len(valid_targets), card.num_battlecry_targets)
if len(targets) != num_possible_targets:
return False
if len(set(targets)) != len(targets):
return False
for target in targets:
if target not in valid_targets:
return False
return True
def play_triple_rewards(self):
if not self.triple_rewards:
return
discover_tier = self.triple_rewards.pop(-1).level
self.draw_discover(lambda card: card.tier == discover_tier)
def validate_triple_rewards(self) -> bool:
return bool(self.triple_rewards)
def draw_discover(self, predicate: Callable[[Card], bool]):
discoverables = [card for card in self.tavern.deck.all_cards() if predicate(card)]
for _ in range(3):
self.discovered_cards.append(self.tavern.randomizer.select_discover_card(discoverables))
discoverables.remove(self.discovered_cards[-1])
self.tavern.deck.remove_card(self.discovered_cards[-1])
def select_discover(self, card: Card):
assert (card in self.discovered_cards)
assert (isinstance(card, MonsterCard)) # TODO: discover other card types
self.discovered_cards.remove(card)
self.hand.append(card)
self.tavern.deck.return_cards(itertools.chain.from_iterable([card.dissolve() for card in self.discovered_cards]))
self.discovered_cards = []
self.check_golden(type(card))
def summon_from_void(self, monster: MonsterCard):
if self.room_on_board():
self.in_play.append(monster)
self.check_golden(type(monster))
self.broadcast_buy_phase_event(CardEvent(monster, EVENTS.SUMMON_BUY))
def room_on_board(self):
return len(self.in_play) < self.maximum_board_size
def draw(self):
if self.frozen:
self.frozen = False
else:
self.return_cards()
number_of_cards = 3 + self.tavern_tier // 2 - len(self.store)
self.store.extend([self.tavern.deck.draw(self) for _ in range(number_of_cards)])
def purchase(self, index: StoreIndex):
# check if the index is valid
assert self.validate_purchase(index)
card = self.store.pop(index)
self.coins -= card.coin_cost
self.hand.append(card)
event = CardEvent(card, EVENTS.BUY)
self.broadcast_buy_phase_event(event)
self.check_golden(type(card))
def validate_purchase(self, index: StoreIndex) -> bool:
if index not in range(len(self.store)):
return False
if self.coins < self.store[index].coin_cost:
return False
if not self.room_in_hand():
return False
return True
def check_golden(self, check_card: Type[MonsterCard]):
cards = [card for card in self.in_play + self.hand if isinstance(card, check_card) and not card.golden]
assert len(cards) <= 3, f"fnord{cards}"
if len(cards) == 3:
for card in cards:
if card in self.in_play:
self.in_play.remove(card)
if card in self.hand:
self.hand.remove(card)
golden_card = check_card()
golden_card.golden_transformation(cards)
self.hand.append(golden_card)
def check_magnetic(self, card):
# TODO: decide if magnetic should be implemented using targets
index = self.in_play.index(card)
assert card.magnetic
if index + 1 in range(len(self.in_play)) and self.in_play[index + 1].monster_type in (MONSTER_TYPES.MECH, MONSTER_TYPES.ALL):
mech = self.in_play[index + 1]
self.in_play.remove(card)
mech.magnetic_transformation(card)
def reroll_store(self):
assert self.validate_reroll()
self.coins -= self.refresh_store_cost
self.return_cards()
self.draw()
def validate_reroll(self) -> bool:
return self.coins >= self.refresh_store_cost
def return_cards(self):
self.tavern.deck.return_cards(itertools.chain.from_iterable([card.dissolve() for card in self.store]))
self.store = []
def freeze(self):
self.frozen = True
def _sell_minion(self, location: List[MonsterCard], index: int):
assert self._validate_sell_minion(location, index)
self.broadcast_buy_phase_event(CardEvent(location[index], EVENTS.SELL))
card = location.pop(index)
self.coins += card.redeem_rate
self.tavern.deck.return_cards(card.dissolve())
def sell_hand_minion(self, index: HandIndex):
return self._sell_minion(self.hand, index)
def sell_board_minion(self, index: BoardIndex):
return self._sell_minion(self.in_play, index)
@staticmethod
def _validate_sell_minion(location: List[MonsterCard], index: int) -> bool:
return index in range(len(location))
def validate_sell_hand_minion(self, index: HandIndex) -> bool:
return self._validate_sell_minion(self.hand, index)
def validate_sell_board_minion(self, index: BoardIndex) -> bool:
return self._validate_sell_minion(self.in_play, index)
def hero_power(self):
self.hero.hero_power(BuyPhaseContext(self, self.tavern.randomizer))
def validate_hero_power(self) -> bool:
return self.hero.hero_power_valid(BuyPhaseContext(self, self.tavern.randomizer))
def broadcast_buy_phase_event(self, event: CardEvent, randomizer: Optional['Randomizer'] = None):
self.hero.handle_event(event, BuyPhaseContext(self, randomizer or self.tavern.randomizer))
for card in self.in_play.copy():
card.handle_event(event, BuyPhaseContext(self, randomizer or self.tavern.randomizer))
for card in self.hand.copy():
card.handle_event_in_hand(event, BuyPhaseContext(self, randomizer or self.tavern.randomizer))
def hand_size(self):
return len(self.hand) + len(self.triple_rewards)
def room_in_hand(self):
return self.hand_size() < self.maximum_hand_size
def max_tier(self):
return len(self._tavern_upgrade_costs)
def choose_hero(self, hero: 'Hero'):
assert(self.validate_choose_hero(hero))
self.hero = hero
self.hero_options = []
self.health = self.hero.starting_health()
self._tavern_upgrade_costs = self.hero.tavern_upgrade_costs()
self.tavern_upgrade_cost = self.hero.tavern_upgrade_costs()[1]
def validate_choose_hero(self, hero: 'Hero'):
return self.hero is None and hero in self.hero_options
| 38.702128
| 133
| 0.660711
| 10,229
| 0.937237
| 0
| 0
| 502
| 0.045996
| 0
| 0
| 647
| 0.059282
|
a5a2a13b3d7e2462a415df9e5bf700f91ae466fd
| 12,743
|
py
|
Python
|
PyStationB/libraries/ABEX/abex/optimizers/zoom_optimizer.py
|
BrunoKM/station-b-libraries
|
ea3591837e4a33f0bef789d905467754c27913b3
|
[
"MIT"
] | 6
|
2021-09-29T15:46:55.000Z
|
2021-12-14T18:39:51.000Z
|
PyStationB/libraries/ABEX/abex/optimizers/zoom_optimizer.py
|
BrunoKM/station-b-libraries
|
ea3591837e4a33f0bef789d905467754c27913b3
|
[
"MIT"
] | null | null | null |
PyStationB/libraries/ABEX/abex/optimizers/zoom_optimizer.py
|
BrunoKM/station-b-libraries
|
ea3591837e4a33f0bef789d905467754c27913b3
|
[
"MIT"
] | 3
|
2021-09-27T10:35:20.000Z
|
2021-10-02T17:53:07.000Z
|
# -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
"""A submodule implementing "zooming in" (Biological) optimization strategy.
This optimization strategy has a single hyperparameter :math:`s`, called the *shrinking factor*.
It consists of of the following steps:
1. The optimization space is a hypercuboid
.. math::
C = [a_1, b_1] \\times [a_2, b_2] \\times \\cdots \\times [a_n, b_n].
2. Find the optimum :math:`x=(x_1, x_2, \\dots, x_n)` among the already collected samples.
3. Construct a new hypercuboid :math:`D` centered at :math:`x`. If this is the :math:`N`th optimization step, the
volume of :math:`D` is given by
.. math::
\\mathrm{vol}\\, D = s^N \\cdot \\mathrm{vol}\\, C
Step :math:`N` is either provided in the configuration file or is estimated as ``n_samples/batch_size``.
4. If :math:`D` is not a subset of :math:`C`, we translate it by a vector.
5. To suggest a new batch we sample the hypercuboid :math:`D`. Many different sampling methods are available, see
:ref:`abex.sample_designs` for this. For example, we can construct a grid, sample in a random way or use Latin
or Sobol sampling.
"""
from pathlib import Path
from typing import List, Tuple
import abex.optimizers.optimizer_base as base
import numpy as np
import pandas as pd
from abex import space_designs as designs
from abex.dataset import Dataset
from abex.settings import OptimizationStrategy, ZoomOptSettings
from emukit.core import ContinuousParameter, ParameterSpace
Interval = Tuple[float, float] # Endpoints of an interval
Hypercuboid = List[Interval] # Optimization space is represented by a rectangular box
class ZoomOptimizer(base.OptimizerBase):
strategy_name = OptimizationStrategy.ZOOM.value
def run(self) -> Tuple[Path, pd.DataFrame]:
"""
Optimizes function using "zooming in" strategy -- around observed maximum a new "shrunk" space is selected. We
sample this space (e.g. using grid sampling or random sampling) to suggest new observations.
Note:
This method should not work well with very noisy functions or functions having a non-unique maximum. A more
robust alternative (as Bayes optimization) should be preferred. On the other hand, this method is much
faster to compute.
Returns:
path to the CSV with locations of new samples to be collected
data frame with locations of new samples to be collected
Raises:
ValueError, if batch size is less than 1
"""
# Construct the data set
dataset: Dataset = self.construct_dataset()
assert (
self.config.zoomopt is not None
), "You need to set the 'zoomopt' field in the config to use Zoom optimizer."
batch_transformed_space: np.ndarray = _suggest_samples(dataset=dataset, settings=self.config.zoomopt)
# Transform the batch back to original space
batch_original_space: pd.DataFrame = self.suggestions_to_original_space(
dataset=dataset, new_samples=batch_transformed_space
)
# Save the batch to the disk and return it
batch_original_space.to_csv(self.config.experiment_batch_path, index=False)
# Save the inferred optimum
optimum = evaluate_optimum(dataset)
optimum.to_csv(self.config.results_dir / "optima.csv", index=False)
return self.config.experiment_batch_path, batch_original_space
def evaluate_optimum(dataset: Dataset) -> pd.DataFrame:
"""
Return the optimum as inferred by the Zoom Opt. algorithm. The inferred optimum is taken as the location
of the observed sample with highest observed objective.
Args:
dataset (dataset.Dataset): Dataset with the data observed so-far.
Returns:
pd.DataFrame: A DataFrame with a single row: the inputs at the inferred optimum
"""
# Get the index of data point with highest observed objective
optimum_idx = dataset.pretransform_df[dataset.pretransform_output_name].argmax()
# Get the inputs of the data point with highest observed objective
optimum_loc = dataset.pretransform_df[dataset.pretransform_input_names].iloc[[optimum_idx]]
return optimum_loc
def _suggest_samples(dataset: Dataset, settings: ZoomOptSettings) -> np.ndarray:
"""Suggests a new batch of samples.
Currently this method doesn't allow categorical inputs.
Returns:
a batch of suggestions. Shape (batch_size, n_inputs).
Raises:
ValueError, if batch size is less than 1
NotImplementedError, if any categorical inputs are present
"""
if settings.batch < 1:
raise ValueError(f"Use batch size at least 1. (Was {settings.batch}).") # pragma: no cover
continuous_dict, categorical_dict = dataset.parameter_space
# If any categorical variable is present, we raise an exception. In theory they should be represented by one-hot
# encodings, but I'm not sure how to retrieve the bounds of this space and do optimization within it (the
# best way is probably to optimize it in an unconstrained space and map it to one-hot vectors using softmax).
# Moreover, in BayesOpt there is iteration over contexts.
if categorical_dict:
raise NotImplementedError("This method doesn't work with categorical inputs right now.") # pragma: no cover
# It seems that continuous_dict.values() contains pandas series instead of tuples, so we need to map over it
# to retrieve the parameter space
original_space: Hypercuboid = [(a, b) for a, b in continuous_dict.values()]
# Find the location of the optimum. We will shrink the space around it
optimum: np.ndarray = _get_optimum_location(dataset)
# Estimate how many optimization iterations were performed.
step_number: int = settings.n_step or _estimate_step_number(
n_points=len(dataset.output_array), batch_size=settings.batch
)
# Convert to per-batch shrinking factor if a per-iteration shrinking factor supplied
per_batch_shrinking_factor = (
settings.shrinking_factor ** settings.batch if settings.shrink_per_iter else settings.shrinking_factor
)
# Calculate by what factor each dimension of the hypercube should be shrunk
shrinking_factor_per_dim: float = _calculate_shrinking_factor(
initial_shrinking_factor=per_batch_shrinking_factor, step_number=step_number, n_dim=len(original_space)
)
# Shrink the space
new_space: Hypercuboid = [
shrink_interval(
shrinking_factor=shrinking_factor_per_dim, interval=interval, shrinking_anchor=optimum_coordinate
)
for interval, optimum_coordinate in zip(original_space, optimum)
]
# The shrunk space may be out of the original bounds (e.g. if the maximum was close to the boundary).
# Translate it.
new_space = _move_to_original_bounds(new_space=new_space, original_space=original_space)
# Sample the new space to get a batch of new suggestions.
parameter_space = ParameterSpace([ContinuousParameter(f"x{i}", low, upp) for i, (low, upp) in enumerate(new_space)])
return designs.suggest_samples(
parameter_space=parameter_space, design_type=settings.design, point_count=settings.batch
)
def _estimate_step_number(n_points: int, batch_size: int) -> int:
"""Estimates which step this is (or rather how many steps were collected previously, basing on the ratio
of number of points collected and the batch size).
Note that this method is provisional and may be replaced with a parameter in the config.
Raises:
ValueError if ``n_points`` or ``batch_size`` is less than 1
"""
if min(n_points, batch_size) < 1:
raise ValueError(
f"Both n_points={n_points} and batch_size={batch_size} must be at least 1."
) # pragma: no cover
return n_points // batch_size
def _calculate_shrinking_factor(initial_shrinking_factor: float, step_number: int, n_dim: int) -> float:
"""The length of each in interval bounding the parameter space needs to be multiplied by this number.
Args:
initial_shrinking_factor: in each step the total volume is shrunk by this amount
step_number: optimization step -- if we collected only an initial batch, this step is 1
n_dim: number of dimensions
Example:
Assume that ``initial_shrinking_factor=0.5`` and ``step_number=1``. This means that the total volume should
be multiplied by :math:`1/2`. Hence, if there are :math:`N` dimensions (``n_dim``), the length of each
bounding interval should be multiplied by :math:`1/2^{1/N}`.
However, if ``step_number=3``, each dimension should be shrunk three times, i.e. we need to multiply it by
:math:`1/2^{3/N}`.
Returns:
the shrinking factor for each dimension
"""
assert 0 < initial_shrinking_factor < 1, (
f"Shrinking factor must be between 0 and 1. " f"(Was {initial_shrinking_factor})."
)
assert step_number >= 1 and n_dim >= 1, (
f"Step number and number of dimensions must be greater than 0. "
f"(Where step_number={step_number}, n_dim={n_dim})."
)
return initial_shrinking_factor ** (step_number / n_dim)
def _get_optimum_location(dataset: Dataset) -> np.ndarray:
"""Returns the position (in the transformed space) of the maximum. Shape (n_inputs,)."""
# Retrieve the observations
X, Y = dataset.inputs_array, dataset.output_array
# Return the location of the maximum
best_index = int(np.argmax(Y))
return X[best_index, :]
def shrink_interval(shrinking_factor: float, interval: Interval, shrinking_anchor: float) -> Interval:
"""Shrinks a one-dimensional interval around the ``shrinking_anchor``. The new interval
is centered around the optimum.
Note:
the shrunk interval may not be contained in the initial one. (E.g. if the shrinking anchor is near the
boundary).
Args:
shrinking_factor: by this amount the length interval is multiplied. Expected to be between 0 and 1
interval: endpoints of the interval
shrinking_anchor: point around which the interval will be shrunk
Returns:
endpoints of the shrunk interval
"""
neighborhood = shrinking_factor * (interval[1] - interval[0])
return shrinking_anchor - neighborhood / 2, shrinking_anchor + neighborhood / 2
def _validate_interval(interval: Interval) -> None:
"""Validates whether an interval is non-empty.
Note:
one-point interval :math:`[a, a]` is allowed
Raises:
ValueError: if the end of the interval is less than its origin
"""
origin, end = interval
if end < origin:
raise ValueError(f"Interval [{origin}, {end}] is not a proper one.") # pragma: no cover
def interval_length(interval: Interval) -> float:
"""Returns interval length."""
_validate_interval(interval)
return interval[1] - interval[0]
def shift_to_within_parameter_bounds(new_interval: Interval, old_interval: Interval) -> Interval:
"""Translates ``new_interval`` to ``old_interval``, without changing its volume.
Raises:
ValueError: if translation is not possible.
"""
if interval_length(new_interval) > interval_length(old_interval):
raise ValueError( # pragma: no cover
f"Translation is not possible. New interval {new_interval} is longer "
f"than the original one {old_interval}."
)
new_min, new_max = new_interval
old_min, old_max = old_interval
if old_min <= new_min and new_max <= old_max: # In this case we don't need to translate the interval
return new_interval
else:
if new_min < old_min: # Figure out the direction of the translation
translation = old_min - new_min
else:
translation = old_max - new_max
return new_min + translation, new_max + translation
def _move_to_original_bounds(new_space: Hypercuboid, original_space: Hypercuboid) -> Hypercuboid:
"""Translates ``new_space`` to be a subset of the ``original_space``, without affecting its volume."""
moved_bounds: Hypercuboid = []
for new_interval, old_interval in zip(new_space, original_space):
moved_bounds.append(shift_to_within_parameter_bounds(new_interval=new_interval, old_interval=old_interval))
return moved_bounds
| 41.106452
| 120
| 0.697167
| 1,815
| 0.142431
| 0
| 0
| 0
| 0
| 0
| 0
| 7,464
| 0.585733
|
a5a44f9a6a387924ac0536e279f50da03dd8ba3f
| 1,146
|
py
|
Python
|
Labs/lab4/l4e3.py
|
felixchiasson/ITI1520
|
4208904bf7576433313524ebd1c1bdb9f49277f2
|
[
"MIT"
] | null | null | null |
Labs/lab4/l4e3.py
|
felixchiasson/ITI1520
|
4208904bf7576433313524ebd1c1bdb9f49277f2
|
[
"MIT"
] | null | null | null |
Labs/lab4/l4e3.py
|
felixchiasson/ITI1520
|
4208904bf7576433313524ebd1c1bdb9f49277f2
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
###############################################################################
# File Name : l4e3.py
# Created By : Félix Chiasson (7138723)
# Creation Date : [2015-10-06 11:43]
# Last Modified : [2015-10-06 11:56]
# Description : Asks user to guess randomly generated number
###############################################################################
from random import randint
def devine(reponse):
correct = False
essai = 0
print("Let's play a game! Devinez un nombre entre 1 et 10.")
while not correct:
reponse = int(input("Quel est le nombre? "))
if reponse == r:
print("Bravo! Vous avez réussi après", essai,"essai(s)")
correct = True
elif reponse != r and (reponse >= 1 and reponse <= 10):
if reponse > r:
print("Plus bas!")
if reponse < r:
print("Plus haut!")
essai = essai + 1
else:
print("Veuillez entrer un chiffre entre 1 et 10!")
r = randint(1, 10)
devine(r)
| 35.8125
| 79
| 0.447644
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 638
| 0.555265
|
a5a4a070bcfd5efb385e2904922ea624312e4682
| 2,984
|
py
|
Python
|
python/datamongo/text/dmo/text_query_windower.py
|
jiportilla/ontology
|
8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40
|
[
"MIT"
] | null | null | null |
python/datamongo/text/dmo/text_query_windower.py
|
jiportilla/ontology
|
8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40
|
[
"MIT"
] | null | null | null |
python/datamongo/text/dmo/text_query_windower.py
|
jiportilla/ontology
|
8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import string
import pandas as pd
from pandas import DataFrame
from base import BaseObject
class TextQueryWindower(BaseObject):
""" Window Text Query Results
"""
__exclude = set(string.punctuation)
def __init__(self,
query_results: dict,
is_debug: bool = False):
"""
Created:
craig.trim@ibm.com
16-Oct-2019
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1122#issuecomment-15340437
:param text_parser_results
the text parser results
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._query_results = query_results
def _to_text(self):
"""
Purpose:
Transform Query results into pure text
:return:
return a list of text results only
"""
values = set()
for cnum in self._query_results:
[values.add(d['value']) for d in self._query_results[cnum]]
return sorted(values)
def _tokens(self,
term: str,
input_text: str) -> list:
input_text = input_text.lower().replace('\t', ' ')
input_text = ''.join(ch for ch in input_text if ch not in self.__exclude)
tokens = input_text.split(' ')
tokens = [x.strip() for x in tokens if x and len(x.strip())]
tokens = [x.lower() for x in tokens]
if ' ' not in term: # return unigrams
return tokens
if term.count(' ') == 1: # return bigrams
s = set()
for i in range(0, len(tokens)):
if i + 1 < len(tokens):
s.add(f"{tokens[i]} {tokens[i + 1]}")
return sorted(s)
raise NotImplementedError
def process(self,
term: str,
window_size: int = 5) -> DataFrame:
"""
:param term:
:param window_size:
:return:
"""
master = []
term = term.lower().strip()
for input_text in self._to_text():
tokens = self._tokens(term, input_text)
n = tokens.index(term)
def pos_x():
if n - window_size >= 0:
return n - window_size
return 0
def pos_y():
if n + window_size < len(tokens):
return n + window_size
return len(tokens)
x = pos_x()
y = pos_y()
def l_context():
return ' '.join(tokens[x:n]).strip()
def r_context():
return ' '.join(tokens[n + 1:y]).strip()
master.append({
"A": l_context(),
"B": tokens[n],
"C": r_context()})
return pd.DataFrame(master).sort_values(
by=['A'], ascending=False)
| 25.947826
| 103
| 0.499665
| 2,840
| 0.951743
| 0
| 0
| 0
| 0
| 0
| 0
| 703
| 0.23559
|
a5a5088a8ab15596ca84187c9c0e0627828850f9
| 683
|
py
|
Python
|
CondTools/L1Trigger/python/L1ConfigTSCKeys_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
CondTools/L1Trigger/python/L1ConfigTSCKeys_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
CondTools/L1Trigger/python/L1ConfigTSCKeys_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
from L1TriggerConfig.CSCTFConfigProducers.CSCTFObjectKeysOnline_cfi import *
from L1TriggerConfig.DTTrackFinder.L1DTTFTSCObjectKeysOnline_cfi import *
from L1TriggerConfig.RPCTriggerConfig.L1RPCObjectKeysOnline_cfi import *
from L1TriggerConfig.GMTConfigProducers.L1MuGMTParametersKeysOnlineProd_cfi import *
from L1TriggerConfig.L1ScalesProducers.L1MuTriggerScaleKeysOnlineProd_cfi import *
L1MuTriggerScaleKeysOnlineProd.subsystemLabel = 'GMTScales'
from L1TriggerConfig.RCTConfigProducers.L1RCTObjectKeysOnline_cfi import *
from L1TriggerConfig.GctConfigProducers.L1GctTSCObjectKeysOnline_cfi import *
from L1TriggerConfig.L1GtConfigProducers.l1GtTscObjectKeysOnline_cfi import *
| 68.3
| 84
| 0.90776
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 0.016105
|
a5a553d43dc2a036ccb015ad21d1dcf2af2ae50c
| 640
|
py
|
Python
|
hackerrank/interview_prep/making_anagrams.py
|
luojxxx/CodingPractice
|
bac357aaddbda8e6e73a49c36f2eefd4304b336d
|
[
"MIT"
] | null | null | null |
hackerrank/interview_prep/making_anagrams.py
|
luojxxx/CodingPractice
|
bac357aaddbda8e6e73a49c36f2eefd4304b336d
|
[
"MIT"
] | null | null | null |
hackerrank/interview_prep/making_anagrams.py
|
luojxxx/CodingPractice
|
bac357aaddbda8e6e73a49c36f2eefd4304b336d
|
[
"MIT"
] | null | null | null |
# https://www.hackerrank.com/challenges/ctci-making-anagrams
from collections import Counter
def number_needed(a, b):
aCounts = Counter(a)
bCounts = Counter(b)
aSet = set(aCounts)
bSet = set(bCounts)
similar = aSet.intersection(bSet)
differences = aSet.symmetric_difference(bSet)
matchingKeysDiff = sum([ abs(aCounts[key] - bCounts[key]) for key in similar ])
differentKeysDiff = 0
for key in differences:
if key in aCounts:
differentKeysDiff += aCounts[key]
if key in bCounts:
differentKeysDiff += bCounts[key]
return matchingKeysDiff + differentKeysDiff
| 29.090909
| 83
| 0.678125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 60
| 0.09375
|
a5a5adab4d37dc9f239bb54f261403d5485bdb40
| 803
|
py
|
Python
|
DongbinNa/19/pt4.py
|
wonnerky/coteMaster
|
360e491e6342c1ee42ff49750b838a2ead865613
|
[
"Apache-2.0"
] | null | null | null |
DongbinNa/19/pt4.py
|
wonnerky/coteMaster
|
360e491e6342c1ee42ff49750b838a2ead865613
|
[
"Apache-2.0"
] | null | null | null |
DongbinNa/19/pt4.py
|
wonnerky/coteMaster
|
360e491e6342c1ee42ff49750b838a2ead865613
|
[
"Apache-2.0"
] | null | null | null |
n = int(input())
numbers = list(map(int, input().split()))
add, sub, mul, div = map(int, input().split())
def dfs(now, i):
global max_num, min_num, add, sub, mul, div
if i == n:
max_num = max(max_num, now)
min_num = min(min_num, now)
else:
if add > 0:
add -= 1
dfs(now + numbers[i], i + 1)
add += 1
if sub > 0:
sub -= 1
dfs(now - numbers[i], i + 1)
sub += 1
if mul > 0:
mul -= 1
dfs(now * numbers[i], i + 1)
mul += 1
if div > 0:
div -= 1
dfs(int(now / numbers[i]), i + 1)
div += 1
min_num = 1e9
max_num = -1e9
dfs(numbers[0], 1)
print(max_num)
print(min_num)
| 22.305556
| 48
| 0.414695
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
a5a7f71a8d3d53892df66d8802c0d53865e70be7
| 497
|
py
|
Python
|
app/store/migrations/0003_auto_20201127_1957.py
|
Yuehan-Wang/Marvas
|
d868a152865b9e8308db8d98642016a67b78f31d
|
[
"MIT"
] | null | null | null |
app/store/migrations/0003_auto_20201127_1957.py
|
Yuehan-Wang/Marvas
|
d868a152865b9e8308db8d98642016a67b78f31d
|
[
"MIT"
] | null | null | null |
app/store/migrations/0003_auto_20201127_1957.py
|
Yuehan-Wang/Marvas
|
d868a152865b9e8308db8d98642016a67b78f31d
|
[
"MIT"
] | 3
|
2022-01-22T16:14:13.000Z
|
2022-01-23T18:25:06.000Z
|
# Generated by Django 2.2 on 2020-11-27 13:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('store', '0002_auto_20201127_1945'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'verbose_name_plural': 'Categories'},
),
migrations.RenameField(
model_name='category',
old_name='images',
new_name='image',
),
]
| 21.608696
| 58
| 0.573441
| 414
| 0.832998
| 0
| 0
| 0
| 0
| 0
| 0
| 145
| 0.291751
|
a5a81b703f6ebb1da895acb3224ef4edc9e40b99
| 19,141
|
py
|
Python
|
Graded/G3/slam/EKFSLAM.py
|
chrstrom/TTK4250
|
f453c3a59597d3fe6cff7d35b790689919798b94
|
[
"Unlicense"
] | null | null | null |
Graded/G3/slam/EKFSLAM.py
|
chrstrom/TTK4250
|
f453c3a59597d3fe6cff7d35b790689919798b94
|
[
"Unlicense"
] | null | null | null |
Graded/G3/slam/EKFSLAM.py
|
chrstrom/TTK4250
|
f453c3a59597d3fe6cff7d35b790689919798b94
|
[
"Unlicense"
] | null | null | null |
from typing import Tuple
import numpy as np
from numpy import ndarray
from dataclasses import dataclass, field
from scipy.linalg import block_diag
import scipy.linalg as la
from utils import rotmat2d
from JCBB import JCBB
import utils
import solution
@dataclass
class EKFSLAM:
Q: ndarray
R: ndarray
do_asso: bool
alphas: 'ndarray[2]' = field(default=np.array([0.001, 0.0001]))
sensor_offset: 'ndarray[2]' = field(default=np.zeros(2))
def f(self, x: np.ndarray, u: np.ndarray) -> np.ndarray:
"""Add the odometry u to the robot state x.
Parameters
----------
x : np.ndarray, shape=(3,)
the robot state
u : np.ndarray, shape=(3,)
the odometry
Returns
-------
np.ndarray, shape = (3,)
the predicted state
"""
psikm1 = x[2]
xk = x[0] + u[0]*np.cos(psikm1) - u[1]*np.sin(psikm1)
yk = x[1] + u[0]*np.sin(psikm1) + u[1]*np.cos(psikm1)
psik = psikm1 + u[2]
xpred = np.array([xk, yk, psik])
return xpred
def Fx(self, x: np.ndarray, u: np.ndarray) -> np.ndarray:
"""Calculate the Jacobian of f with respect to x.
Parameters
----------
x : np.ndarray, shape=(3,)
the robot state
u : np.ndarray, shape=(3,)
the odometry
Returns
-------
np.ndarray
The Jacobian of f wrt. x.
"""
#Fx = solution.EKFSLAM.EKFSLAM.Fx(self, x, u)
#return Fx
psi = x[2]
Fx = np.array([[1, 0, -u[0]*np.sin(psi) - u[1]*np.cos(psi)],
[0, 1, u[0]*np.cos(psi) - u[1]*np.sin(psi)],
[0, 0, 1]])
return Fx
def Fu(self, x: np.ndarray, u: np.ndarray) -> np.ndarray:
"""Calculate the Jacobian of f with respect to u.
Parameters
----------
x : np.ndarray, shape=(3,)
the robot state
u : np.ndarray, shape=(3,)
the odometry
Returns
-------
np.ndarray
The Jacobian of f wrt. u.
"""
#Fu = solution.EKFSLAM.EKFSLAM.Fu(self, x, u)
#return Fu
psi = x[2]
Fu = np.array([[np.cos(psi), -np.sin(psi), 0],
[np.sin(psi), np.cos(psi), 0],
[0, 0, 1]])
return Fu
def predict(
self, eta: np.ndarray, P: np.ndarray, z_odo: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""Predict the robot state using the zOdo as odometry the corresponding state&map covariance.
Parameters
----------
eta : np.ndarray, shape=(3 + 2*#landmarks,)
the robot state and map concatenated
P : np.ndarray, shape=(3 + 2*#landmarks,)*2
the covariance of eta
z_odo : np.ndarray, shape=(3,)
the measured odometry
Returns
-------
Tuple[np.ndarray, np.ndarray], shapes= (3 + 2*#landmarks,), (3 + 2*#landmarks,)*2
predicted mean and covariance of eta.
"""
#etapred, P = solution.EKFSLAM.EKFSLAM.predict(self, eta, P, z_odo)
#return etapred, P
# check inout matrix
assert np.allclose(P, P.T), "EKFSLAM.predict: not symmetric P input"
assert np.all(
np.linalg.eigvals(P) >= 0
), "EKFSLAM.predict: non-positive eigen values in P input"
assert (
eta.shape * 2 == P.shape
), "EKFSLAM.predict: input eta and P shape do not match"
etapred = np.empty_like(eta)
x = eta[:3]
etapred[:3] = self.f(x, z_odo)
etapred[3:] = eta[3:]
Fx = self.Fx(x, z_odo)
Fu = self.Fu(x, z_odo)
# evaluate covariance prediction in place to save computation
# only robot state changes, so only rows and colums of robot state needs changing
# cov matrix layout:
# [[P_xx, P_xm],
# [P_mx, P_mm]]
P[:3, :3] = Fx@P[:3, :3]@Fx.T + Fu@self.Q@Fu.T
P[:3, 3:] = Fx@P[:3, 3:]
P[3:, :3] = P[:3, 3:].T
assert np.allclose(P, P.T), "EKFSLAM.predict: not symmetric P"
assert np.all(
np.linalg.eigvals(P) > 0
), "EKFSLAM.predict: non-positive eigen values"
assert (
etapred.shape * 2 == P.shape
), "EKFSLAM.predict: calculated shapes does not match"
return etapred, P
def h(self, eta: np.ndarray) -> np.ndarray:
"""Predict all the landmark positions in sensor frame.
Parameters
----------
eta : np.ndarray, shape=(3 + 2 * #landmarks,)
The robot state and landmarks stacked.
Returns
-------
np.ndarray, shape=(2 * #landmarks,)
The landmarks in the sensor frame.
"""
#zpred = solution.EKFSLAM.EKFSLAM.h(self, eta)
#return zpred
# extract states and map
x = eta[0:3]
# reshape map (2, #landmarks), m[:, j] is the jth landmark
m = eta[3:].reshape((-1, 2)).T
Rot = rotmat2d(-x[2])
# relative position of landmark to sensor on robot in world frame
delta_m = (m.T - eta[0:2]).T
# predicted measurements in cartesian coordinates, beware sensor offset for VP
zpredcart = Rot @ delta_m - self.sensor_offset[:, None] # None as index ads an axis with size 1 at that position.
zpred_r = la.norm(zpredcart, 2, axis=0) # ranges
zpred_theta = np.arctan2(zpredcart[1,:], zpredcart[0,:]) # bearings
zpred = np.vstack((zpred_r, zpred_theta)) # the two arrays above stacked on top of each other vertically like
# stack measurements along one dimension, [range1 bearing1 range2 bearing2 ...]
zpred = zpred.T.ravel()
assert (
zpred.ndim == 1 and zpred.shape[0] == eta.shape[0] - 3
), "SLAM.h: Wrong shape on zpred"
return zpred
def h_jac(self, eta: np.ndarray) -> np.ndarray:
"""Calculate the jacobian of h.
Parameters
----------
eta : np.ndarray, shape=(3 + 2 * #landmarks,)
The robot state and landmarks stacked.
Returns
-------
np.ndarray, shape=(2 * #landmarks, 3 + 2 * #landmarks)
the jacobian of h wrt. eta.
"""
# H = solution.EKFSLAM.EKFSLAM.h_jac(self, eta)
# return H
# extract states and map
x = eta[0:3]
# reshape map (2, #landmarks), m[j] is the jth landmark
m = eta[3:].reshape((-1, 2)).T
numM = m.shape[1]
Rot = rotmat2d(x[2])
# relative position of landmark to robot in world frame. m - rho that appears in (11.15) and (11.16)
delta_m = (m.T - eta[0:2]).T
# (2, #measurements), each measured position in cartesian coordinates like
zc = delta_m - Rot @ self.sensor_offset[:, None]
zr = la.norm(zc, 2, axis=0) # ranges
Rpihalf = rotmat2d(np.pi / 2)
# In what follows you can be clever and avoid making this for all the landmarks you _know_
# you will not detect (the maximum range should be available from the data).
# But keep it simple to begin with.
# Allocate H and set submatrices as memory views into H
# You may or may not want to do this like this
# see eq (11.15), (11.16), (11.17)
H = np.zeros((2 * numM, 3 + 2 * numM))
Hx = H[:, :3] # slice view, setting elements of Hx will set H as well
Hm = H[:, 3:] # slice view, setting elements of Hm will set H as well
# proposed way is to go through landmarks one by one
# preallocate and update this for some speed gain if looping
jac_z_cb = -np.eye(2, 3)
for i in range(numM): # But this whole loop can be vectorized
ind = 2 * i # starting postion of the ith landmark into H
# the inds slice for the ith landmark into H
inds = slice(ind, ind + 2)
jac_z_cb[:,2] = -Rpihalf@delta_m[:,i]
jac_x_range = zc[:,i].T / zr[i]
jac_x_bearing = zc[:,i].T @ Rpihalf.T / zr[i]**2
Hx[ind,:] = jac_x_range @ jac_z_cb
Hx[ind+1,:] = jac_x_bearing @ jac_z_cb
Hm[ind,inds] = jac_x_range
Hm[ind+1,inds] = jac_x_bearing
# You can set some assertions here to make sure that some of the structure in H is correct
# Don't mind if I don't :)
return H
def add_landmarks(
self, eta: np.ndarray, P: np.ndarray, z: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""Calculate new landmarks, their covariances and add them to the state.
Parameters
----------
eta : np.ndarray, shape=(3 + 2*#landmarks,)
the robot state and map concatenated
P : np.ndarray, shape=(3 + 2*#landmarks,)*2
the covariance of eta
z : np.ndarray, shape(2 * #newlandmarks,)
A set of measurements to create landmarks for
Returns
-------
Tuple[np.ndarray, np.ndarray], shapes=(3 + 2*(#landmarks + #newlandmarks,), (3 + 2*(#landmarks + #newlandmarks,)*2
eta with new landmarks appended, and its covariance
"""
# etaadded, Padded = solution.EKFSLAM.EKFSLAM.add_landmarks(
# self, eta, P, z)
# return etaadded, Padded
n = P.shape[0]
assert z.ndim == 1, "SLAM.add_landmarks: z must be a 1d array"
numLmk = z.shape[0] // 2
lmnew = np.empty_like(z)
Gx = np.empty((numLmk * 2, 3))
Rall = np.zeros((numLmk * 2, numLmk * 2))
I2 = np.eye(2) # Preallocate, used for Gx
Rnb = rotmat2d(eta[2])
sensor_offset_world = Rnb @ self.sensor_offset + eta[:2]
sensor_offset_world_der = rotmat2d(
eta[2] + np.pi / 2) @ self.sensor_offset # Used in Gx
for j in range(numLmk):
ind = 2 * j
inds = slice(ind, ind + 2)
zj = z[inds]
ang = zj[1] + eta[2]
rot = rotmat2d(ang) # rotmat in Gz
# calculate position of new landmark in world frame
lmnew[inds] = Rnb @ (zj[0] * np.array([np.cos(zj[1]), np.sin(zj[1])])) + sensor_offset_world
Gx[inds, :2] = I2
Gx[inds, 2] = zj[0] * np.array([-np.sin(ang), np.cos(ang)]) + sensor_offset_world_der
Gz = rot @ np.diag([1, zj[0]])
# Gz * R * Gz^T, transform measurement covariance from polar to cartesian coordinates
Rall[inds, inds] = Gz @ self.R @ Gz.T
assert len(lmnew) % 2 == 0, "SLAM.add_landmark: lmnew not even length"
etaadded = np.append(eta, lmnew) # append new landmarks to state vector
# block diagonal of P_new, see problem text in 1g) in graded assignment 3
Padded = block_diag(P, Gx@P[:3,:3]@Gx.T + Rall)
Padded[:n, n:] = P[:, :3]@Gx.T # top right corner of Padded
Padded[n:, :n] = Padded[:n, n:].T # botton left corner of Padded
assert (
etaadded.shape * 2 == Padded.shape
), "EKFSLAM.add_landmarks: calculated eta and P has wrong shape"
assert np.allclose(
Padded, Padded.T
), "EKFSLAM.add_landmarks: Padded not symmetric"
assert np.all(
np.linalg.eigvals(Padded) >= 0
), "EKFSLAM.add_landmarks: Padded not PSD"
return etaadded, Padded
def associate(
self, z: np.ndarray, zpred: np.ndarray, H: np.ndarray, S: np.ndarray,
): # -> Tuple[*((np.ndarray,) * 5)]:
"""Associate landmarks and measurements, and extract correct matrices for these.
Parameters
----------
z : np.ndarray,
The measurements all in one vector
zpred : np.ndarray
Predicted measurements in one vector
H : np.ndarray
The measurement Jacobian matrix related to zpred
S : np.ndarray
The innovation covariance related to zpred
Returns
-------
Tuple[*((np.ndarray,) * 5)]
The extracted measurements, the corresponding zpred, H, S and the associations.
Note
----
See the associations are calculated using JCBB. See this function for documentation
of the returned association and the association procedure.
"""
if self.do_asso:
# Associate
a = JCBB(z, zpred, S, self.alphas[0], self.alphas[1])
# Extract associated measurements
zinds = np.empty_like(z, dtype=bool)
zinds[::2] = a > -1 # -1 means no association
zinds[1::2] = zinds[::2]
zass = z[zinds]
# extract and rearange predicted measurements and cov
zbarinds = np.empty_like(zass, dtype=int)
zbarinds[::2] = 2 * a[a > -1]
zbarinds[1::2] = 2 * a[a > -1] + 1
zpredass = zpred[zbarinds]
Sass = S[zbarinds][:, zbarinds]
Hass = H[zbarinds]
assert zpredass.shape == zass.shape
assert Sass.shape == zpredass.shape * 2
assert Hass.shape[0] == zpredass.shape[0]
return zass, zpredass, Hass, Sass, a
else:
# should one do something her
pass
def update(
self, eta: np.ndarray, P: np.ndarray, z: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, float, np.ndarray]:
"""Update eta and P with z, associating landmarks and adding new ones.
Parameters
----------
eta : np.ndarray
[description]
P : np.ndarray
[description]
z : np.ndarray, shape=(#detections, 2)
[description]
Returns
-------
Tuple[np.ndarray, np.ndarray, float, np.ndarray]
[description]
"""
# etaupd, Pupd, NIS, a = solution.EKFSLAM.EKFSLAM.update(self, eta, P, z)
#return etaupd, Pupd, NIS, a
numLmk = (eta.size - 3) // 2
assert (len(eta) - 3) % 2 == 0, "EKFSLAM.update: landmark lenght not even"
if numLmk > 0:
# Prediction and innovation covariance
zpred = self.h(eta)
H = self.h_jac(eta)
# Here you can use simply np.kron (a bit slow) to form the big (very big in VP after a while) R,
# or be smart with indexing and broadcasting (3d indexing into 2d mat) realizing you are adding the same R on all diagonals
S = H@P@H.T + np.kron(np.eye(numLmk), self.R)
assert (
S.shape == zpred.shape * 2
), "EKFSLAM.update: wrong shape on either S or zpred"
z = z.ravel() # 2D -> flat
# Perform data association
za, zpred, Ha, Sa, a = self.associate(z, zpred, H, S)
# No association could be made, so skip update
if za.shape[0] == 0:
etaupd = eta
Pupd = P
NIS = 1 # TODO: beware this one when analysing consistency.
else:
# Create the associated innovation
v = za.ravel() - zpred # za: 2D -> flat
v[1::2] = utils.wrapToPi(v[1::2])
# Kalman mean update
S_cho_factors = la.cho_factor(Sa) # Optional, used in places for S^-1, see scipy.linalg.cho_factor and scipy.linalg.cho_solve
Sa_inv = la.cho_solve(S_cho_factors, np.eye(Sa.shape[0]))
W = P@Ha.T@Sa_inv
etaupd = eta + W@v
# Kalman cov update: use Joseph form for stability
jo = -W @ Ha
# same as adding Identity mat
jo[np.diag_indices(jo.shape[0])] += 1
Pupd = jo@P@jo.T + W@np.kron(np.eye(int(len(zpred)/2)), self.R)@W.T
# calculate NIS, can use S_cho_factors
NIS = v.T@Sa_inv@v
# When tested, remove for speed
assert np.allclose(
Pupd, Pupd.T), "EKFSLAM.update: Pupd not symmetric"
assert np.all(
np.linalg.eigvals(Pupd) > 0
), "EKFSLAM.update: Pupd not positive definite"
else: # All measurements are new landmarks,
a = np.full(z.shape[0], -1)
z = z.flatten()
NIS = 1 # TODO: beware this one when analysing consistency.
etaupd = eta
Pupd = P
# Create new landmarks if any is available
if self.do_asso:
is_new_lmk = a == -1
if np.any(is_new_lmk):
z_new_inds = np.empty_like(z, dtype=bool)
z_new_inds[::2] = is_new_lmk
z_new_inds[1::2] = is_new_lmk
z_new = z[z_new_inds]
etaupd, Pupd = self.add_landmarks(etaupd, Pupd, z_new)
assert np.allclose(
Pupd, Pupd.T), "EKFSLAM.update: Pupd must be symmetric"
assert np.all(np.linalg.eigvals(Pupd) >=
0), "EKFSLAM.update: Pupd must be PSD"
return etaupd, Pupd, NIS, a
@classmethod
def NEESes(cls, x: np.ndarray, P: np.ndarray, x_gt: np.ndarray,) -> np.ndarray:
"""Calculates the total NEES and the NEES for the substates
Args:
x (np.ndarray): The estimate
P (np.ndarray): The state covariance
x_gt (np.ndarray): The ground truth
Raises:
AssertionError: If any input is of the wrong shape, and if debug mode is on, certain numeric properties
Returns:
np.ndarray: NEES for [all, position, heading], shape (3,)
"""
assert x.shape == (3,), f"EKFSLAM.NEES: x shape incorrect {x.shape}"
assert P.shape == (3, 3), f"EKFSLAM.NEES: P shape incorrect {P.shape}"
assert x_gt.shape == (
3,), f"EKFSLAM.NEES: x_gt shape incorrect {x_gt.shape}"
d_x = x - x_gt
d_x[2] = utils.wrapToPi(d_x[2])
assert (
-np.pi <= d_x[2] <= np.pi
), "EKFSLAM.NEES: error heading must be between (-pi, pi)"
d_p = d_x[0:2]
P_p = P[0:2, 0:2]
assert d_p.shape == (2,), "EKFSLAM.NEES: d_p must be 2 long"
d_heading = d_x[2] # Note: scalar
assert np.ndim(
d_heading) == 0, "EKFSLAM.NEES: d_heading must be scalar"
P_heading = P[2, 2] # Note: scalar
assert np.ndim(
P_heading) == 0, "EKFSLAM.NEES: P_heading must be scalar"
# NB: Needs to handle both vectors and scalars! Additionally, must handle division by zero
NEES_all = d_x @ (np.linalg.solve(P, d_x))
NEES_pos = d_p @ (np.linalg.solve(P_p, d_p))
try:
NEES_heading = d_heading ** 2 / P_heading
except ZeroDivisionError:
NEES_heading = 1.0 # TODO: beware
NEESes = np.array([NEES_all, NEES_pos, NEES_heading])
NEESes[np.isnan(NEESes)] = 1.0 # We may divide by zero, # TODO: beware
assert np.all(NEESes >= 0), "ESKF.NEES: one or more negative NEESes"
return NEESes
| 35.77757
| 141
| 0.539575
| 18,876
| 0.986155
| 0
| 0
| 18,887
| 0.98673
| 0
| 0
| 9,389
| 0.490518
|
a5a924ddb3332cd660e8de578d9b220740f27184
| 3,185
|
py
|
Python
|
pykob/audio.py
|
Greg-R/PyKOB
|
fd3c7ca352f900bd14bb10dc71d567221a8af8cf
|
[
"MIT"
] | 3
|
2020-06-29T19:59:39.000Z
|
2021-02-08T19:56:32.000Z
|
pykob/audio.py
|
Greg-R/PyKOB
|
fd3c7ca352f900bd14bb10dc71d567221a8af8cf
|
[
"MIT"
] | 197
|
2020-04-30T08:08:52.000Z
|
2021-03-22T19:10:20.000Z
|
pykob/audio.py
|
MorseKOB/pykob-4
|
bf86917e4e06ce9590f414ace0eacbde08416137
|
[
"MIT"
] | 2
|
2021-04-17T01:05:24.000Z
|
2021-11-03T16:43:53.000Z
|
"""
MIT License
Copyright (c) 2020 PyKOB - MorseKOB in Python
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""
audio module
Provides audio for simulated sounder.
"""
import wave
from pathlib import Path
from pykob import log
try:
import pyaudio
ok = True
except:
log.log('PyAudio not installed.')
ok = False
BUFFERSIZE = 16
nFrames = [0, 0]
frames = [None, None]
nullFrames = None
iFrame = [0, 0]
sound = 0
if ok:
pa = pyaudio.PyAudio()
# Resource folder
root_folder = Path(__file__).parent
resource_folder = root_folder / "resources"
# Audio files
audio_files = ['clack48.wav', 'click48.wav']
for i in range(len(audio_files)):
fn = resource_folder / audio_files[i]
# print("Load audio file:", fn)
f = wave.open(str(fn), mode='rb')
nChannels = f.getnchannels()
sampleWidth = f.getsampwidth()
sampleFormat = pa.get_format_from_width(sampleWidth)
frameWidth = nChannels * sampleWidth
frameRate = f.getframerate()
nFrames[i] = f.getnframes()
frames[i] = f.readframes(nFrames[i])
iFrame[i] = nFrames[i]
f.close()
nullFrames = bytes(frameWidth*BUFFERSIZE)
def play(snd):
global sound
sound = snd
iFrame[sound] = 0
def callback(in_data, frame_count, time_info, status_flags):
if frame_count != BUFFERSIZE:
log.err('Unexpected frame count request from PyAudio:', frame_count)
if iFrame[sound] + frame_count < nFrames[sound]:
startByte = iFrame[sound] * frameWidth
endByte = (iFrame[sound] + frame_count) * frameWidth
outData = frames[sound][startByte:endByte]
iFrame[sound] += frame_count
return (outData, pyaudio.paContinue)
else:
return(nullFrames, pyaudio.paContinue)
if ok:
apiInfo = pa.get_default_host_api_info()
apiName = apiInfo['name']
devIdx = apiInfo['defaultOutputDevice']
devInfo = pa.get_device_info_by_index(devIdx)
devName = devInfo['name']
strm = pa.open(rate=frameRate, channels=nChannels, format=sampleFormat,
output=True, output_device_index=devIdx, frames_per_buffer=BUFFERSIZE,
stream_callback=callback)
| 32.5
| 82
| 0.706122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,354
| 0.425118
|
a5a96f07f26b02ec492974bd34c7406e72ba2e22
| 3,333
|
py
|
Python
|
main.py
|
DaKidReturns/WikipediaScrapper
|
288b0bc3e882ff4ccb45dbdc021eabbc25cc19d0
|
[
"MIT"
] | null | null | null |
main.py
|
DaKidReturns/WikipediaScrapper
|
288b0bc3e882ff4ccb45dbdc021eabbc25cc19d0
|
[
"MIT"
] | null | null | null |
main.py
|
DaKidReturns/WikipediaScrapper
|
288b0bc3e882ff4ccb45dbdc021eabbc25cc19d0
|
[
"MIT"
] | null | null | null |
import requests
from bs4 import BeautifulSoup as bs4
from docx import Document as doc
from docx.shared import Cm
import sys
if len(sys.argv) != 3:
print("The format should be \n./main.py <url> <output_file_name>")
else:
url = sys.argv[1]
doc_name = sys.argv[2]
document = doc()
page = requests.get(url)
if(page.status_code == requests.codes.ok):
soup = bs4(page.content,'html.parser')
headings = soup.find_all("h1",class_="firstHeading")
document.add_heading(headings[0].text)
details = soup.find("div",id="bodyContent")
main_soup = bs4(details.prettify(),'html.parser')
#Extract the table elements to be implemented in the future
table = main_soup.find('table').extract()
#isEmpty is the lambda function that checks if a list is empty
isEmpty = lambda x: True if(x == []) else False
#tableElem = ('table','td','tr')
for x in details.children:
if x != '\n' and x !='' and x != ' ':
if(not isEmpty(list(x.children))):
for i in list(x.children):
# print(i.string)
if i.string == None:
#print(len(list(i.children)))
for j in i.children:
#print(j.name)
if j.string == None:
#print(j.attrs)
if(j.name == 'table' or j.name == 'ol' or j.name == 'ul'):
#print(j.attrs)
continue
#j = j.next_sibling.next_sibling
#search and purge references
if list(j.descendants) != []:
#print(list(j.descendants))
for a in j.descendants:
if a.string == None:
attr = a.attrs.keys()
#print(a.attrs)
if 'class' in attr:
if 'mw-references-wrap' in a.attrs['class']:
#print(a.text)
a.decompose()
break
#if 'href' in attr:
#if '#References' in a.attrs['href']:
#a.decompose()
#print the elements
document.add_paragraph(j.text)
#print(j.prettify())
#print('\n')
if doc_name.endswith('.doc') or doc_name.endswith('.docx'):
document.save(doc_name)
else:
document.save(doc_name+'.doc')
| 42.189873
| 96
| 0.370237
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 685
| 0.205521
|
a5a9f77ca2671875a0d1fe9de7b77aefb68618a3
| 583
|
py
|
Python
|
math/count_digits.py
|
ethyl2/code_challenges
|
3c9ccca1782f92728e60a515a7ca797f6d470e81
|
[
"MIT"
] | null | null | null |
math/count_digits.py
|
ethyl2/code_challenges
|
3c9ccca1782f92728e60a515a7ca797f6d470e81
|
[
"MIT"
] | null | null | null |
math/count_digits.py
|
ethyl2/code_challenges
|
3c9ccca1782f92728e60a515a7ca797f6d470e81
|
[
"MIT"
] | null | null | null |
"""
https://www.codewars.com/kata/566fc12495810954b1000030/train/python
Given an pos int n,
and a digit that is < 10, d.
Square all ints from 0 - n, and return the number times d is used in the squared results.
"""
def nb_dig(n, d):
'''
results = ''
for i in range(n+1):
results += str(i * i)
return results.count(str(d))
'''
return ''.join([str(i * i) for i in range(n + 1)]).count(str(d))
print(nb_dig(10, 1)) # 4
print(nb_dig(5750, 0)) # 4700
print(nb_dig(11011, 2)) # 9481
print(nb_dig(12224, 8)) # 7733
print(nb_dig(11549, 1)) # 11905
| 23.32
| 89
| 0.61578
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 362
| 0.620926
|
a5ac9cd651f965f113812d5a35b9a777736d390b
| 3,492
|
py
|
Python
|
{{ cookiecutter.project_slug }}/{{ cookiecutter.package_name }}/strategies/resource.py
|
EMMC-ASBL/oteapi-plugin-template
|
31a772a4fb9be6eafabfa206fe6e7a23516bf188
|
[
"MIT"
] | null | null | null |
{{ cookiecutter.project_slug }}/{{ cookiecutter.package_name }}/strategies/resource.py
|
EMMC-ASBL/oteapi-plugin-template
|
31a772a4fb9be6eafabfa206fe6e7a23516bf188
|
[
"MIT"
] | 35
|
2022-01-17T10:23:01.000Z
|
2022-03-11T19:41:36.000Z
|
{{ cookiecutter.project_slug }}/{{ cookiecutter.package_name }}/strategies/resource.py
|
EMMC-ASBL/oteapi-plugin-template
|
31a772a4fb9be6eafabfa206fe6e7a23516bf188
|
[
"MIT"
] | 2
|
2022-01-20T06:45:27.000Z
|
2022-02-09T15:59:21.000Z
|
"""Demo resource strategy class."""
# pylint: disable=no-self-use,unused-argument
from typing import TYPE_CHECKING, Optional
from oteapi.models import AttrDict, DataCacheConfig, ResourceConfig, SessionUpdate
from oteapi.plugins import create_strategy
from pydantic import Field
from pydantic.dataclasses import dataclass
if TYPE_CHECKING: # pragma: no cover
from typing import Any, Dict
class DemoConfig(AttrDict):
"""Strategy-specific Configuration Data Model."""
datacache_config: Optional[DataCacheConfig] = Field(
None,
description="Configuration for the data cache.",
)
class DemoResourceConfig(ResourceConfig):
"""Demo resource strategy config."""
# Require the resource to be a REST API with JSON responses that uses the
# DemoJSONDataParseStrategy strategy.
mediaType: str = Field(
"application/jsonDEMO",
const=True,
description=ResourceConfig.__fields__["mediaType"].field_info.description,
)
accessService: str = Field(
"DEMO-access-service",
const=True,
description=ResourceConfig.__fields__["accessService"].field_info.description,
)
configuration: DemoConfig = Field(
DemoConfig(),
description="Demo resource strategy-specific configuration.",
)
class SessionUpdateDemoResource(SessionUpdate):
"""Class for returning values from Demo Resource strategy."""
output: dict = Field(
...,
description=(
"The output from downloading the response from the given `accessUrl`."
),
)
@dataclass
class DemoResourceStrategy:
"""Resource Strategy.
**Registers strategies**:
- `("accessService", "DEMO-access-service")`
"""
resource_config: DemoResourceConfig
def initialize(self, session: "Optional[Dict[str, Any]]" = None) -> SessionUpdate:
"""Initialize strategy.
This method will be called through the `/initialize` endpoint of the OTEAPI
Services.
Parameters:
session: A session-specific dictionary context.
Returns:
An update model of key/value-pairs to be stored in the
session-specific context from services.
"""
return SessionUpdate()
def get(
self, session: "Optional[Dict[str, Any]]" = None
) -> SessionUpdateDemoResource:
"""Execute the strategy.
This method will be called through the strategy-specific endpoint of the
OTEAPI Services.
Parameters:
session: A session-specific dictionary context.
Returns:
An update model of key/value-pairs to be stored in the
session-specific context from services.
"""
# Example of the plugin using a parse strategy to (fetch) and parse the data
session = session if session else {}
parse_config = self.resource_config.copy()
if not parse_config.downloadUrl:
parse_config.downloadUrl = self.resource_config.accessUrl
session.update(create_strategy("parse", parse_config).initialize(session))
session.update(create_strategy("parse", parse_config).get(session))
if "content" not in session:
raise ValueError(
f"Expected the parse strategy for {self.resource_config.mediaType!r} "
"to return a session with a 'content' key."
)
return SessionUpdateDemoResource(output=session["content"])
| 29.846154
| 86
| 0.665521
| 3,075
| 0.880584
| 0
| 0
| 1,909
| 0.546678
| 0
| 0
| 1,679
| 0.480813
|
a5ad0bf99db5282a28fe82ac56a8026546459cf4
| 1,480
|
py
|
Python
|
unittests/TestSets.py
|
vtbassmatt/Scrython
|
49fd9bd112e0f552a4310ac81fdb3f2b9e2a3976
|
[
"MIT"
] | null | null | null |
unittests/TestSets.py
|
vtbassmatt/Scrython
|
49fd9bd112e0f552a4310ac81fdb3f2b9e2a3976
|
[
"MIT"
] | null | null | null |
unittests/TestSets.py
|
vtbassmatt/Scrython
|
49fd9bd112e0f552a4310ac81fdb3f2b9e2a3976
|
[
"MIT"
] | null | null | null |
# This workaround makes sure that we can import from the parent dir
import sys
sys.path.append('..')
from scrython.sets import Code
import unittest
import time
promo_khans = Code('PKTK')
khans = Code('KTK')
class TestSets(unittest.TestCase):
def test_object(self):
self.assertIsInstance(khans.object(), str)
def test_code(self):
self.assertIsInstance(khans.code(), str)
def test_mtgo_code(self):
self.assertIsInstance(khans.mtgo_code(), str)
def test_name(self):
self.assertIsInstance(khans.name(), str)
def test_set_type(self):
self.assertIsInstance(khans.set_type(), str)
def test_released_at(self):
self.assertIsInstance(khans.released_at(), str)
def test_block_code(self):
self.assertIsInstance(khans.block_code(), str)
def test_block(self):
self.assertIsInstance(khans.block(), str)
def test_parent_set_code(self):
self.assertIsInstance(promo_khans.parent_set_code(), str)
def test_card_count(self):
self.assertIsInstance(khans.card_count(), int)
def test_digital(self):
self.assertIsInstance(khans.digital(), bool)
def test_foil_only(self):
self.assertIsInstance(khans.foil_only(), bool)
def test_icon_svg_uri(self):
self.assertIsInstance(khans.icon_svg_uri(), str)
def test_search_uri(self):
self.assertIsInstance(khans.search_uri(), str)
if __name__ == '__main__':
unittest.main()
| 25.084746
| 67
| 0.691892
| 1,220
| 0.824324
| 0
| 0
| 0
| 0
| 0
| 0
| 92
| 0.062162
|
a5ad538fb112ec421c158be3cf3243f38640e710
| 194
|
py
|
Python
|
GUI/check_email.py
|
BrendanCheong/BT2102-OSHES-Group16
|
2b62772e6c654b8d4e76f09df6473ac88912df28
|
[
"MIT"
] | 5
|
2021-09-11T15:07:34.000Z
|
2021-09-11T15:16:04.000Z
|
GUI/check_email.py
|
BrendanCheong/Online-Smart-Home-Ecommerce-System
|
2b62772e6c654b8d4e76f09df6473ac88912df28
|
[
"MIT"
] | 1
|
2021-09-18T10:33:00.000Z
|
2021-09-18T10:34:01.000Z
|
GUI/check_email.py
|
BrendanCheong/BT2102-OSHES-Group16
|
2b62772e6c654b8d4e76f09df6473ac88912df28
|
[
"MIT"
] | null | null | null |
import re
def check(email):
regex = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b'
if (re.fullmatch(regex, email)):
return True
else:
return False
| 19.4
| 67
| 0.489691
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 54
| 0.278351
|
a5aea13c60563cdbc4bc77d66b48baaf6efb6ec5
| 1,587
|
py
|
Python
|
SimpleEmailer.py
|
dschoonwinkel/InverterMQTT
|
75f13900f584d9905a02488eff7bd1dd3e53e73a
|
[
"Apache-2.0"
] | null | null | null |
SimpleEmailer.py
|
dschoonwinkel/InverterMQTT
|
75f13900f584d9905a02488eff7bd1dd3e53e73a
|
[
"Apache-2.0"
] | null | null | null |
SimpleEmailer.py
|
dschoonwinkel/InverterMQTT
|
75f13900f584d9905a02488eff7bd1dd3e53e73a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import smtplib
import time
import configparser
config = configparser.ConfigParser()
config.read('/home/pi/Development/Python/InverterMQTT/emailcredentials.conf')
email = config['credentials']['email']
password = config['credentials']['password']
to_email = config['credentials']['to_email']
#
# Based on tutorial: https://www.bc-robotics.com/tutorials/sending-email-using-python-raspberry-pi/
#Email Variables
SMTP_SERVER = 'smtp.gmail.com' #Email Server (don't change!)
SMTP_PORT = 587 #Server Port (don't change!)
GMAIL_USERNAME = email #change this to match your gmail account
GMAIL_PASSWORD = password #change this to match your gmail password
class Emailer:
def sendmail(self, subject, content, recipient=to_email):
#Create Headers
headers = ["From: " + GMAIL_USERNAME, "Subject: " + subject, "To: " + recipient,
"MIME-Version: 1.0", "Content-Type: text/html"]
headers = "\r\n".join(headers)
#Connect to Gmail Server
session = smtplib.SMTP(SMTP_SERVER, SMTP_PORT)
session.ehlo()
session.starttls()
session.ehlo()
#Login to Gmail
session.login(GMAIL_USERNAME, GMAIL_PASSWORD)
#Send Email & Exit
session.sendmail(GMAIL_USERNAME, recipient, headers + "\r\n\r\n" + content)
session.quit
def main():
sender = Emailer()
emailSubject = "Hello World!"
emailContent = "This is a test of my emailer class on Linux"
sender.sendmail(emailSubject, emailContent)
if __name__ == '__main__':
main()
| 29.943396
| 99
| 0.674228
| 682
| 0.429742
| 0
| 0
| 0
| 0
| 0
| 0
| 649
| 0.408948
|
a5b066bc7defe004716762bdcddd92dae0d3fd15
| 876
|
py
|
Python
|
BaseKnowledge/file/file.py
|
Kose-i/python_test
|
d7b031aa33d699aeb9fe196fe0a6d216aa006f0d
|
[
"Unlicense"
] | null | null | null |
BaseKnowledge/file/file.py
|
Kose-i/python_test
|
d7b031aa33d699aeb9fe196fe0a6d216aa006f0d
|
[
"Unlicense"
] | null | null | null |
BaseKnowledge/file/file.py
|
Kose-i/python_test
|
d7b031aa33d699aeb9fe196fe0a6d216aa006f0d
|
[
"Unlicense"
] | null | null | null |
#! /usr/bin/env python3
def func1():
f = open("test.txt", 'w')
f.write("This is test")
f.close()
def func2():
with open("test.txt", 'r') as f:
print(f.read())
import codecs
def func3():
f = codecs.open("test.txt", 'w', 'utf-8', 'ignore')
f.write("test func3")
f.close()
import os.path
def func4():
path = "tmp/tmp-1/tmp.txt"
print(os.path.split(path))
import shutil
def func5():
shutil.copyfile("test.txt", "test2.txt")
import glob
def func6():
print(glob.glob('*'))
import tempfile
def func7():
tmpfd, tmpname = tempfile.mkstemp(dir='.')
print(tmpname)
f = os.fdopen(tmpfd, 'w+b')
f.close()
if __name__=='__main__':
print("\nfunc1()")
func1()
print("\nfunc2()")
func2()
print("\nfunc3()")
func3()
print("\nfunc4()")
func4()
print("\nfunc5()")
func5()
print("\nfunc6()")
func6()
print("\nfunc7()")
func7()
| 16.528302
| 53
| 0.592466
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 241
| 0.275114
|
a5b2bd395585d35f2949dc453f6442697664d6bf
| 202
|
py
|
Python
|
types/msg.py
|
UltiRequiem/professional-phython-platzi
|
0bf8f97b172d0799d6906193090ef69beb1c8b4b
|
[
"MIT"
] | 4
|
2021-08-02T21:34:46.000Z
|
2021-09-24T03:26:35.000Z
|
types/msg.py
|
UltiRequiem/professional-phython-platzi
|
0bf8f97b172d0799d6906193090ef69beb1c8b4b
|
[
"MIT"
] | null | null | null |
types/msg.py
|
UltiRequiem/professional-phython-platzi
|
0bf8f97b172d0799d6906193090ef69beb1c8b4b
|
[
"MIT"
] | 4
|
2021-08-02T21:34:47.000Z
|
2021-08-11T03:21:37.000Z
|
def run(msg: str) -> None:
"""
Print the message received parameters.
"""
print(msg)
if __name__ == "__main__":
message: str = "Zero commands Python to be typed!"
run(message)
| 18.363636
| 54
| 0.60396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 99
| 0.490099
|
a5b4efb9c597491e24e7c42cb5dac380b74e6e91
| 702
|
py
|
Python
|
apps/billing/tasks.py
|
banyanbbt/banyan_data
|
4ce87dc1c49920d587a472b70842fcf5b3d9a3d2
|
[
"MIT"
] | 2
|
2018-09-08T05:16:39.000Z
|
2018-09-10T02:50:31.000Z
|
apps/billing/tasks.py
|
banyanbbt/banyan_data
|
4ce87dc1c49920d587a472b70842fcf5b3d9a3d2
|
[
"MIT"
] | null | null | null |
apps/billing/tasks.py
|
banyanbbt/banyan_data
|
4ce87dc1c49920d587a472b70842fcf5b3d9a3d2
|
[
"MIT"
] | null | null | null |
import logging
from config.celery_configs import app
from lib.sms import client as sms_client
from lib.blockchain.pandora import Pandora
from apps.user.models import UserProfile
logger = logging.getLogger(__name__)
@app.task
def sync_monthly_billing():
logger.info("start sync_monthly_billing")
accounts = UserProfile.company_accounts()
for account in accounts:
Pandora.monthly_bill(account)
logger.info("end sync_monthly_billing")
@app.task
def sync_weekly_billing():
logger.info("start sync_weekly_billing")
accounts = UserProfile.company_accounts()
for account in accounts:
Pandora.weekly_bill(account)
logger.info("end sync_weekly_billing")
| 23.4
| 45
| 0.763533
| 0
| 0
| 0
| 0
| 476
| 0.678063
| 0
| 0
| 106
| 0.150997
|
a5b6d5ce0ce97c7ff9249912738d183eb9ca560c
| 449
|
py
|
Python
|
LBP51.py
|
Anandgowda18/LogicBasedPrograms
|
25baa9fbf19cd45229c87e099877e97281b0e76b
|
[
"MIT"
] | null | null | null |
LBP51.py
|
Anandgowda18/LogicBasedPrograms
|
25baa9fbf19cd45229c87e099877e97281b0e76b
|
[
"MIT"
] | null | null | null |
LBP51.py
|
Anandgowda18/LogicBasedPrograms
|
25baa9fbf19cd45229c87e099877e97281b0e76b
|
[
"MIT"
] | null | null | null |
'''Given a valid IP address, return a defanged version of that IP address. A defanged IP address replaces every period '.' with "[.]".
Input Format
A string
Constraints
non-empty String
Output Format
replacement String
Sample Input 0
1.1.1.1
Sample Output 0
1[.]1[.]1[.]1
Sample Input 1
255.100.50.0
Sample Output 1
255[.]100[.]50[.]0
Sample Input 2
1.2.3.4
Sample Output 2
1[.]2[.]3[.]4'''
#solution
print(input().replace('.','[.]'))
| 12.472222
| 134
| 0.67706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 420
| 0.935412
|
a5b824b421e3455471988b500baaf9d0bcd0357a
| 4,981
|
py
|
Python
|
website/urls.py
|
pomo-mondreganto/CTForces-old
|
86758192f800108ff109f07fe155d5a98b4a3e14
|
[
"MIT"
] | null | null | null |
website/urls.py
|
pomo-mondreganto/CTForces-old
|
86758192f800108ff109f07fe155d5a98b4a3e14
|
[
"MIT"
] | 6
|
2021-10-01T14:18:34.000Z
|
2021-10-01T14:19:17.000Z
|
website/urls.py
|
pomo-mondreganto/CTForces-old
|
86758192f800108ff109f07fe155d5a98b4a3e14
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.urls import path, re_path
from django.views.static import serve
from .views import *
urlpatterns = [
re_path('^$', MainView.as_view(), name='main_view'),
path('page/<int:page>/', MainView.as_view(), name='main_view_with_page'),
re_path('^signup/$', UserRegistrationView.as_view(), name='signup'),
re_path('^signin/$', UserLoginView.as_view(), name='signin'),
re_path('^logout/$', logout_user, name='logout'),
path('user/<str:username>/', UserInformationView.as_view(), name='user_info'),
re_path('^settings/general/$', SettingsGeneralView.as_view(), name='settings_general_view'),
re_path('^settings/social/$', SettingsSocialView.as_view(), name='settings_social_view'),
re_path('^friends/$', FriendsView.as_view(), name='friends_view'),
path('friends/page/<int:page>/', FriendsView.as_view(), name='friends_view_with_page'),
re_path('^search_users/$', search_users, name='user_search'),
path('user/<str:username>/blog/', UserBlogView.as_view(), name='user_blog_view'),
path('user/<str:username>/blog/page/<int:page>/', UserBlogView.as_view(), name='user_blog_view_with_page'),
path('user/<str:username>/tasks/', UserTasksView.as_view(), name='user_tasks_view'),
path('user/<str:username>/tasks/page/<int:page>/', UserTasksView.as_view(), name='user_tasks_view_with_page'),
path('user/<str:username>/contests/', UserContestListView.as_view(), name='user_contests_view'),
path('user/<str:username>/contests/page/<int:page>/', UserContestListView.as_view(),
name='user_contests_view_with_page'),
path('user/<str:username>/solved_tasks/', UserSolvedTasksView.as_view(),
name='user_solved_tasks_view'),
path('user/<str:username>/solved_tasks/page/<int:page>/', UserSolvedTasksView.as_view(),
name='user_solved_tasks_view_with_page'),
path('top_users/', UserTopView.as_view(), name='users_top_view'),
path('top_users/page/<int:page>/', UserTopView.as_view(), name='users_top_view_with_page'),
path('top_rating_users/', UserRatingTopView.as_view(), name='users_rating_top_view'),
path('top_rating_users/page/<int:page>/', UserRatingTopView.as_view(), name='users_rating_top_view_with_page'),
path('top_rating_users_by_group/', UserByGroupRatingTopView.as_view(), name='users_by_group_rating_top_view'),
path('top_rating_users_by_group/page/<int:page>/', UserByGroupRatingTopView.as_view(),
name='users_by_group_rating_top_view_with_page'),
re_path('^add_post/$', PostCreationView.as_view(), name='post_creation_view'),
path('post/<int:post_id>/', PostView.as_view(), name='post_view'),
re_path('^leave_comment/$', leave_comment, name='leave_comment'),
re_path('^media/(?P<path>.*)$', serve, {
'document_root': settings.MEDIA_ROOT,
}),
path('task/<int:task_id>/', TaskView.as_view(), name='task_view'),
path('task/<int:task_id>/edit/', TaskEditView.as_view(), name='task_edit_view'),
path('task/<int:task_id>/submit/', submit_task, name='task_submit'),
path('task/<int:task_id>/solved/', TaskSolvedView.as_view(), name='task_solved_view'),
path('task/<int:task_id>/solved/page/<int:page>/', TaskSolvedView.as_view(), name='task_solved_view_with_page'),
re_path('^create_task/$', TaskCreationView.as_view(), name='task_creation_view'),
re_path('^tasks/$', TasksArchiveView.as_view(), name='task_archive_view'),
path('tasks/page/<int:page>/', TasksArchiveView.as_view(), name='task_archive_view_with_page'),
re_path('^confirm_email/$', account_confirmation, name='confirm_account'),
re_path('^resend_email/$', EmailResendView.as_view(), name='resend_email_view'),
re_path('^password_reset_email/$', PasswordResetEmailView.as_view(), name='password_reset_email'),
re_path('^reset_password/$', PasswordResetPasswordView.as_view(), name='password_reset_password'),
re_path('^search_tags/$', search_tags, name='search_tags'),
re_path('^get_task/$', get_task, name='get_task_by_id'),
re_path('^create_contest/$', ContestCreationView.as_view(), name='create_contest'),
path('contests/', ContestsMainListView.as_view(), name='contests_main_list_view'),
path('contests/page/<int:page>/', ContestsMainListView.as_view(), name='contests_main_list_view_with_page'),
path('contest/<int:contest_id>/', ContestMainView.as_view(), name='contest_view'),
path('contest/<int:contest_id>/register/', register_for_contest, name='register_for_contest'),
path('contest/<int:contest_id>/scoreboard/', ContestScoreboardView.as_view(), name='contest_scoreboard_view'),
path('contest/<int:contest_id>/task/<int:task_id>/', ContestTaskView.as_view(), name='contest_task_view'),
path('contest/<int:contest_id>/task/<int:task_id>/submit/', submit_contest_flag, name='contest_task_submit'),
re_path('^test', test_view, name='test_view'),
re_path('^debug', debug_view, name='debug_view'),
]
| 54.736264
| 116
| 0.718932
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,352
| 0.472194
|
a5b8284d0679076f983319f40b4e3ceca65a28c5
| 1,372
|
py
|
Python
|
part2.py
|
Tiziana-I/project-covid-mask-classifier
|
e1619172656f8de92e8faae5dcb7437686f7ca5e
|
[
"MIT"
] | null | null | null |
part2.py
|
Tiziana-I/project-covid-mask-classifier
|
e1619172656f8de92e8faae5dcb7437686f7ca5e
|
[
"MIT"
] | null | null | null |
part2.py
|
Tiziana-I/project-covid-mask-classifier
|
e1619172656f8de92e8faae5dcb7437686f7ca5e
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
import os
cap = cv2.VideoCapture(0)
#model=cv2.CascadeClassifier(os.path.join("haar-cascade-files","haarcascade_frontalface_default.xml"))
smile=cv2.CascadeClassifier(os.path.join("haar-cascade-files","haarcascade_smile.xml"))
#eye=cv2.CascadeClassifier(os.path.join("haar-cascade-files","haarcascade_eye.xml"))
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Face detector
#cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
#roi = frame[y:y+h,x:x+w]
#faces = model.detectMultiScale(frame,scaleFactor=1.5,minNeighbors=3,flags=cv2.CASCADE_DO_ROUGH_SEARCH | cv2.CASCADE_SCALE_IMAGE)
faces = smile.detectMultiScale(frame,scaleFactor=1.5,minNeighbors=3,flags=cv2.CASCADE_DO_ROUGH_SEARCH | cv2.CASCADE_SCALE_IMAGE)
#faces = eye.detectMultiScale(frame,scaleFactor=1.5,minNeighbors=3,flags=cv2.CASCADE_DO_ROUGH_SEARCH | cv2.CASCADE_SCALE_IMAGE)
print(faces)
for x,y,w,h in faces:
print(x,y,w,h)
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2) # blue BGR
frame = cv2.putText(frame,"Ciao", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0) , 2, cv2.LINE_AA)
# Display the resulting frame
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
| 38.111111
| 133
| 0.707726
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 696
| 0.507289
|
a5b83e7cc19ace3ba764ad74920296c856b01e5f
| 375
|
py
|
Python
|
spikes/function_signatures.py
|
insequor/webapp
|
73990bd74afd6d0f794c447e1bcc5d557ee2ed31
|
[
"MIT"
] | 1
|
2020-08-07T12:16:49.000Z
|
2020-08-07T12:16:49.000Z
|
spikes/function_signatures.py
|
insequor/webapp
|
73990bd74afd6d0f794c447e1bcc5d557ee2ed31
|
[
"MIT"
] | 1
|
2021-10-30T10:21:34.000Z
|
2021-10-30T10:21:34.000Z
|
spikes/function_signatures.py
|
insequor/webapp
|
73990bd74afd6d0f794c447e1bcc5d557ee2ed31
|
[
"MIT"
] | null | null | null |
from inspect import signature
def testFunction(a, b=None):
pass
class TestClass:
def testMethod(me):
pass
if __name__ == '__main__':
#sig = signature(testFunction)
sig = signature(TestClass.testMethod)
for key in sig.parameters:
param = sig.parameters[key]
print(key, param, dir(param))
print(' ', param.kind)
| 20.833333
| 41
| 0.624
| 53
| 0.141333
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 0.12
|
a5b8565cb66fcfd69f346054d3bf2453f6824c71
| 1,371
|
py
|
Python
|
docs/commands.py
|
immersionroom/vee
|
2c6f781dc96e9028f2446777b906ca37dc2f4299
|
[
"BSD-3-Clause"
] | 6
|
2017-11-05T02:44:10.000Z
|
2021-07-14T19:10:56.000Z
|
docs/commands.py
|
immersionroom/vee
|
2c6f781dc96e9028f2446777b906ca37dc2f4299
|
[
"BSD-3-Clause"
] | null | null | null |
docs/commands.py
|
immersionroom/vee
|
2c6f781dc96e9028f2446777b906ca37dc2f4299
|
[
"BSD-3-Clause"
] | 1
|
2017-01-31T23:10:09.000Z
|
2017-01-31T23:10:09.000Z
|
import os
import sys
from argparse import _SubParsersAction
sys.path.append(os.path.abspath(os.path.join(__file__, '..', '..')))
from vee.commands.main import get_parser
def get_sub_action(parser):
for action in parser._actions:
if isinstance(action, _SubParsersAction):
return action
parser = get_parser()
usage = parser.format_usage().replace('usage:', '')
print('''
top-level
---------
.. _cli_vee:
``vee``
~~~~~~~
::
''')
for line in parser.format_help().splitlines():
print(' ' + line)
subaction = get_sub_action(parser)
for group_name, funcs in parser._func_groups:
did_header = False
visible = set(ca.dest for ca in subaction._choices_actions)
for name, func in funcs:
if not name in visible:
continue
if not did_header:
print('.. _cli_%s:' % group_name.replace(' ', '_'))
print()
print(group_name)
print('-' * len(group_name))
print()
did_header = True
subparser = subaction._name_parser_map[name]
print('.. _cli_vee_%s:' % name)
print()
print('``vee %s``' % name)
print('~' * (8 + len(name)))
print()
print('::')
print()
for line in subparser.format_help().splitlines():
print(' ' + line)
print()
| 18.527027
| 68
| 0.56674
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 152
| 0.110868
|
a5b88dea17e5a8c345a0188b0209c92393ef06ec
| 551
|
py
|
Python
|
main.py
|
SciFiTy10/talkLikeSnoop
|
1a3408dfa244669a0d723737c62da93feb7d9ba8
|
[
"MIT"
] | 1
|
2022-01-07T10:27:14.000Z
|
2022-01-07T10:27:14.000Z
|
main.py
|
SciFiTy10/talkLikeSnoop
|
1a3408dfa244669a0d723737c62da93feb7d9ba8
|
[
"MIT"
] | null | null | null |
main.py
|
SciFiTy10/talkLikeSnoop
|
1a3408dfa244669a0d723737c62da93feb7d9ba8
|
[
"MIT"
] | null | null | null |
#imports
from routing_methods import on_launch, intent_router
##############################
# Program Entry
##############################
#lambda_handler (this is like main())
def lambda_handler(event, context):
#event is a python dictionary
#LaunchRequest is an object that means the user made a request to a skill, but didn't specify the intent
if event['request']['type'] == "LaunchRequest":
return on_launch(event, context)
elif event['request']['type'] == "IntentRequest":
return intent_router(event, context)
| 34.4375
| 108
| 0.638838
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 313
| 0.568058
|
a5bc2b0b89e7e05fdfc86ac8ee4661e2d1a71f8f
| 13,303
|
py
|
Python
|
thrift/clients.py
|
fabiobatalha/processing
|
f3ad99e161de2befc7908168bfd7843f988c379d
|
[
"BSD-2-Clause"
] | null | null | null |
thrift/clients.py
|
fabiobatalha/processing
|
f3ad99e161de2befc7908168bfd7843f988c379d
|
[
"BSD-2-Clause"
] | null | null | null |
thrift/clients.py
|
fabiobatalha/processing
|
f3ad99e161de2befc7908168bfd7843f988c379d
|
[
"BSD-2-Clause"
] | null | null | null |
# coding: utf-8
import os
import thriftpy
import json
import logging
from thriftpy.rpc import make_client
from xylose.scielodocument import Article, Journal
LIMIT = 1000
logger = logging.getLogger(__name__)
ratchet_thrift = thriftpy.load(
os.path.join(os.path.dirname(__file__))+'/ratchet.thrift')
articlemeta_thrift = thriftpy.load(
os.path.join(os.path.dirname(__file__))+'/articlemeta.thrift')
citedby_thrift = thriftpy.load(
os.path.join(os.path.dirname(__file__))+'/citedby.thrift')
accessstats_thrift = thriftpy.load(
os.path.join(os.path.dirname(__file__))+'/access_stats.thrift')
publication_stats_thrift = thriftpy.load(
os.path.join(os.path.dirname(__file__))+'/publication_stats.thrift')
class ServerError(Exception):
def __init__(self, message=None):
self.message = message or 'thirftclient: ServerError'
def __str__(self):
return repr(self.message)
class AccessStats(object):
def __init__(self, address, port):
"""
Cliente thrift para o Access Stats.
"""
self._address = address
self._port = port
@property
def client(self):
client = make_client(
accessstats_thrift.AccessStats,
self._address,
self._port
)
return client
def _compute_access_lifetime(self, query_result):
data = []
for publication_year in query_result['aggregations']['publication_year']['buckets']:
for access_year in publication_year['access_year']['buckets']:
data.append([
publication_year['key'],
access_year['key'],
int(access_year['access_html']['value']),
int(access_year['access_abstract']['value']),
int(access_year['access_pdf']['value']),
int(access_year['access_epdf']['value']),
int(access_year['access_total']['value'])
])
return sorted(data)
def access_lifetime(self, issn, collection, raw=False):
body = {
"query": {
"bool": {
"must": [{
"match": {
"collection": collection
}
},
{
"match": {
"issn": issn
}
}
]
}
},
"size": 0,
"aggs": {
"publication_year": {
"terms": {
"field": "publication_year",
"size": 0,
"order": {
"access_total": "desc"
}
},
"aggs": {
"access_total": {
"sum": {
"field": "access_total"
}
},
"access_year": {
"terms": {
"field": "access_year",
"size": 0,
"order": {
"access_total": "desc"
}
},
"aggs": {
"access_total": {
"sum": {
"field": "access_total"
}
},
"access_abstract": {
"sum": {
"field": "access_abstract"
}
},
"access_epdf": {
"sum": {
"field": "access_epdf"
}
},
"access_html": {
"sum": {
"field": "access_html"
}
},
"access_pdf": {
"sum": {
"field": "access_pdf"
}
}
}
}
}
}
}
}
query_parameters = [
accessstats_thrift.kwargs('size', '0')
]
query_result = json.loads(self.client.search(json.dumps(body), query_parameters))
computed = self._compute_access_lifetime(query_result)
return query_result if raw else computed
class PublicationStats(object):
def __init__(self, address, port):
"""
Cliente thrift para o PublicationStats.
"""
self._address = address
self._port = port
@property
def client(self):
client = make_client(
publication_stats_thrift.PublicationStats,
self._address,
self._port
)
return client
def _compute_first_included_document_by_journal(self, query_result):
if len(query_result.get('hits', {'hits': []}).get('hits', [])) == 0:
return None
return query_result['hits']['hits'][0].get('_source', None)
def first_included_document_by_journal(self, issn, collection):
body = {
"query": {
"filtered": {
"query": {
"bool": {
"must": [
{
"match": {
"collection": collection
}
},
{
"match": {
"issn": issn
}
}
]
}
}
}
},
"sort": [
{
"publication_date": {
"order": "asc"
}
}
]
}
query_parameters = [
publication_stats_thrift.kwargs('size', '1')
]
query_result = json.loads(self.client.search('article', json.dumps(body), query_parameters))
return self._compute_first_included_document_by_journal(query_result)
def _compute_last_included_document_by_journal(self, query_result):
if len(query_result.get('hits', {'hits': []}).get('hits', [])) == 0:
return None
return query_result['hits']['hits'][0].get('_source', None)
def last_included_document_by_journal(self, issn, collection, metaonly=False):
body = {
"query": {
"filtered": {
"query": {
"bool": {
"must": [
{
"match": {
"collection": collection
}
},
{
"match": {
"issn": issn
}
}
]
}
},
"filter": {
"exists": {
"field": "publication_date"
}
}
}
},
"sort": [
{
"publication_date": {
"order": "desc"
}
}
]
}
query_parameters = [
publication_stats_thrift.kwargs('size', '1')
]
query_result = json.loads(self.client.search('article', json.dumps(body), query_parameters))
return self._compute_last_included_document_by_journal(query_result)
class Citedby(object):
def __init__(self, address, port):
"""
Cliente thrift para o Citedby.
"""
self._address = address
self._port = port
@property
def client(self):
client = make_client(
citedby_thrift.Citedby,
self._address,
self._port
)
return client
def citedby_pid(self, code, metaonly=False):
data = self.client.citedby_pid(code, metaonly)
return data
class Ratchet(object):
def __init__(self, address, port):
"""
Cliente thrift para o Ratchet.
"""
self._address = address
self._port = port
@property
def client(self):
client = make_client(
ratchet_thrift.RatchetStats,
self._address,
self._port
)
return client
def document(self, code):
data = self.client.general(code=code)
return data
class ArticleMeta(object):
def __init__(self, address, port):
"""
Cliente thrift para o Articlemeta.
"""
self._address = address
self._port = port
@property
def client(self):
client = make_client(
articlemeta_thrift.ArticleMeta,
self._address,
self._port
)
return client
def journals(self, collection=None, issn=None):
offset = 0
while True:
identifiers = self.client.get_journal_identifiers(collection=collection, issn=issn, limit=LIMIT, offset=offset)
if len(identifiers) == 0:
raise StopIteration
for identifier in identifiers:
journal = self.client.get_journal(
code=identifier.code[0], collection=identifier.collection)
jjournal = json.loads(journal)
xjournal = Journal(jjournal)
logger.info('Journal loaded: %s_%s' % ( identifier.collection, identifier.code))
yield xjournal
offset += 1000
def exists_article(self, code, collection):
try:
return self.client.exists_article(
code,
collection
)
except:
msg = 'Error checking if document exists: %s_%s' % (collection, code)
raise ServerError(msg)
def set_doaj_id(self, code, collection, doaj_id):
try:
article = self.client.set_doaj_id(
code,
collection,
doaj_id
)
except:
msg = 'Error senting doaj id for document: %s_%s' % (collection, code)
raise ServerError(msg)
def document(self, code, collection, replace_journal_metadata=True, fmt='xylose'):
try:
article = self.client.get_article(
code=code,
collection=collection,
replace_journal_metadata=True,
fmt=fmt
)
except:
msg = 'Error retrieving document: %s_%s' % (collection, code)
raise ServerError(msg)
jarticle = None
try:
jarticle = json.loads(article)
except:
msg = 'Fail to load JSON when retrienving document: %s_%s' % (collection, code)
raise ServerError(msg)
if not jarticle:
logger.warning('Document not found for : %s_%s' % ( collection, code))
return None
if fmt == 'xylose':
xarticle = Article(jarticle)
logger.info('Document loaded: %s_%s' % ( collection, code))
return xarticle
else:
logger.info('Document loaded: %s_%s' % ( collection, code))
return article
def documents(self, collection=None, issn=None, from_date=None,
until_date=None, fmt='xylose'):
offset = 0
while True:
identifiers = self.client.get_article_identifiers(
collection=collection, issn=issn, from_date=from_date,
until_date=until_date, limit=LIMIT, offset=offset)
if len(identifiers) == 0:
raise StopIteration
for identifier in identifiers:
document = self.document(
code=identifier.code,
collection=identifier.collection,
replace_journal_metadata=True,
fmt=fmt
)
yield document
offset += 1000
def collections(self):
return [i for i in self._client.get_collection_identifiers()]
| 29.496674
| 123
| 0.42622
| 12,563
| 0.944373
| 1,470
| 0.110501
| 940
| 0.070661
| 0
| 0
| 1,751
| 0.131624
|
a5be28a44a12bd589d156a3a7d0bbad6c6678d9a
| 6,705
|
py
|
Python
|
src/pypsr.py
|
wagglefoot/TVAE
|
74f8c5413d3c0d8607af50ddb0d96c4c2d477261
|
[
"MIT"
] | 22
|
2015-03-14T04:23:00.000Z
|
2022-03-24T03:29:22.000Z
|
src/pypsr.py
|
wagglefoot/TVAE
|
74f8c5413d3c0d8607af50ddb0d96c4c2d477261
|
[
"MIT"
] | null | null | null |
src/pypsr.py
|
wagglefoot/TVAE
|
74f8c5413d3c0d8607af50ddb0d96c4c2d477261
|
[
"MIT"
] | 15
|
2015-02-04T13:09:27.000Z
|
2022-03-24T03:29:24.000Z
|
from operator import sub
import numpy as np
from sklearn import metrics
from sklearn.neighbors import NearestNeighbors
from toolz import curry
def global_false_nearest_neighbors(x, lag, min_dims=1, max_dims=10, **cutoffs):
"""
Across a range of embedding dimensions $d$, embeds $x(t)$ with lag $\tau$, finds all nearest neighbors,
and computes the percentage of neighbors that that remain neighbors when an additional dimension is unfolded.
See [1] for more information.
Parameters
----------
x : array-like
Original signal $x(t).
lag : int
Time lag $\tau$ in units of the sampling time $h$ of $x(t)$.
min_dims : int, optional
The smallest embedding dimension $d$ to test.
max_dims : int, optional
The largest embedding dimension $d$ to test.
relative_distance_cutoff : float, optional
The cutoff for determining neighborliness,
in distance increase relative to the original distance between neighboring points.
The default, 15, is suggested in [1] (p. 41).
relative_radius_cutoff : float, optional
The cutoff for determining neighborliness,
in distance increase relative to the radius of the attractor.
The default, 2, is suggested in [1] (p. 42).
Returns
-------
dims : ndarray
The tested dimensions $d$.
gfnn : ndarray
The percentage of nearest neighbors that are false neighbors at each dimension.
See Also
--------
reconstruct
References
----------
[1] Arbanel, H. D. (1996). *Analysis of Observed Chaotic Data* (pp. 40-43). New York: Springer.
"""
x = _vector(x)
dimensions = np.arange(min_dims, max_dims + 1)
false_neighbor_pcts = np.array([_gfnn(x, lag, n_dims, **cutoffs) for n_dims in dimensions])
return dimensions, false_neighbor_pcts
def _gfnn(x, lag, n_dims, **cutoffs):
# Global false nearest neighbors at a particular dimension.
# Returns percent of all nearest neighbors that are still neighbors when the next dimension is unfolded.
# Neighbors that can't be embedded due to lack of data are not counted in the denominator.
offset = lag*n_dims
is_true_neighbor = _is_true_neighbor(x, _radius(x), offset)
return np.mean([
not is_true_neighbor(indices, distance, **cutoffs)
for indices, distance in _nearest_neighbors(reconstruct(x, lag, n_dims))
if (indices + offset < x.size).all()
])
def _radius(x):
# Per Arbanel (p. 42):
# "the nominal 'radius' of the attractor defined as the RMS value of the data about its mean."
return np.sqrt(((x - x.mean())**2).mean())
@curry
def _is_true_neighbor(
x, attractor_radius, offset, indices, distance,
relative_distance_cutoff=15,
relative_radius_cutoff=2
):
distance_increase = np.abs(sub(*x[indices + offset]))
return (distance_increase / distance < relative_distance_cutoff and
distance_increase / attractor_radius < relative_radius_cutoff)
def _nearest_neighbors(y):
"""
Wrapper for sklearn.neighbors.NearestNeighbors.
Yields the indices of the neighboring points, and the distance between them.
"""
distances, indices = NearestNeighbors(n_neighbors=2, algorithm='kd_tree').fit(y).kneighbors(y)
for distance, index in zip(distances, indices):
yield index, distance[1]
def reconstruct(x, lag, n_dims):
"""Phase-space reconstruction.
Given a signal $x(t)$, dimensionality $d$, and lag $\tau$, return the reconstructed signal
\[
\mathbf{y}(t) = [x(t), x(t + \tau), \ldots, x(t + (d - 1)\tau)].
\]
Parameters
----------
x : array-like
Original signal $x(t)$.
lag : int
Time lag $\tau$ in units of the sampling time $h$ of $x(t)$.
n_dims : int
Embedding dimension $d$.
Returns
-------
ndarray
$\mathbf{y}(t)$ as an array with $d$ columns.
"""
x = _vector(x)
if lag * (n_dims - 1) >= x.shape[0] // 2:
raise ValueError('longest lag cannot be longer than half the length of x(t)')
lags = lag * np.arange(n_dims)
return np.vstack(x[lag:lag - lags[-1] or None] for lag in lags).transpose()
def ami(x, y=None, n_bins=10):
"""Calculate the average mutual information between $x(t)$ and $y(t)$.
Parameters
----------
x : array-like
y : array-like, optional
$x(t)$ and $y(t)$.
If only `x` is passed, it must have two columns;
the first column defines $x(t)$ and the second $y(t)$.
n_bins : int
The number of bins to use when computing the joint histogram.
Returns
-------
scalar
Average mutual information between $x(t)$ and $y(t)$, in nats (natural log equivalent of bits).
See Also
--------
lagged_ami
References
----------
Arbanel, H. D. (1996). *Analysis of Observed Chaotic Data* (p. 28). New York: Springer.
"""
x, y = _vector_pair(x, y)
if x.shape[0] != y.shape[0]:
raise ValueError('timeseries must have the same length')
return metrics.mutual_info_score(None, None, contingency=np.histogram2d(x, y, bins=n_bins)[0])
def lagged_ami(x, min_lag=0, max_lag=None, lag_step=1, n_bins=10):
"""Calculate the average mutual information between $x(t)$ and $x(t + \tau)$, at multiple values of $\tau$.
Parameters
----------
x : array-like
$x(t)$.
min_lag : int, optional
The shortest lag to evaluate, in units of the sampling period $h$ of $x(t)$.
max_lag : int, optional
The longest lag to evaluate, in units of $h$.
lag_step : int, optional
The step between lags to evaluate, in units of $h$.
n_bins : int
The number of bins to use when computing the joint histogram in order to calculate mutual information.
See |ami|.
Returns
-------
lags : ndarray
The evaluated lags $\tau_i$, in units of $h$.
amis : ndarray
The average mutual information between $x(t)$ and $x(t + \tau_i)$.
See Also
--------
ami
"""
if max_lag is None:
max_lag = x.shape[0]//2
lags = np.arange(min_lag, max_lag, lag_step)
amis = [ami(reconstruct(x, lag, 2), n_bins=n_bins) for lag in lags]
return lags, np.array(amis)
def _vector_pair(a, b):
a = np.squeeze(a)
if b is None:
if a.ndim != 2 or a.shape[1] != 2:
raise ValueError('with one input, array must have be 2D with two columns')
a, b = a[:, 0], a[:, 1]
return a, np.squeeze(b)
def _vector(x):
x = np.squeeze(x)
if x.ndim != 1:
raise ValueError('x(t) must be a 1-dimensional signal')
return x
| 31.186047
| 113
| 0.631022
| 0
| 0
| 360
| 0.053691
| 363
| 0.054139
| 0
| 0
| 4,197
| 0.625951
|
a5bef664ecd325ec7c754416c8cb289908db04d1
| 2,026
|
py
|
Python
|
tests/test_fetching_info_from_websites.py
|
antoniodimariano/websites_metrics_collector
|
5113a680612b126005ac7f9f52ed35d26b806ea0
|
[
"Apache-2.0"
] | null | null | null |
tests/test_fetching_info_from_websites.py
|
antoniodimariano/websites_metrics_collector
|
5113a680612b126005ac7f9f52ed35d26b806ea0
|
[
"Apache-2.0"
] | null | null | null |
tests/test_fetching_info_from_websites.py
|
antoniodimariano/websites_metrics_collector
|
5113a680612b126005ac7f9f52ed35d26b806ea0
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from unittest import IsolatedAsyncioTestCase
from websites_metrics_collector.communication import webpages_fetcher
class Test(IsolatedAsyncioTestCase):
"""
This Class tests the fetch_list_of_urls() async method used to fetch URLs
"""
async def test_a_fetch_with_valid_list_of_url(self):
urls_to_fetch = [('http://motoguzzi.com', ['twitter', 'Antonio']), ('http://ducati.com', ['twitter', 'url']),
('http://ferrari.com', ['twitter', 'url'])]
ret = await webpages_fetcher.fetch_list_of_urls(list_of_urls=urls_to_fetch)
self.assertIsInstance(ret,list)
self.assertEqual(len(ret),3)
self.assertEqual(ret[0].url,'http://motoguzzi.com')
self.assertEqual(ret[0].http_status,200)
self.assertIsInstance(ret[0].elapsed_time,float)
self.assertIsInstance(ret[0].pattern_verified,bool)
async def test_b_fetch_with_valid_list_of_url(self):
urls_to_fetch = [('http://motoguzzi.com', ['twitter', 'Antonio'])]
ret = await webpages_fetcher.fetch_list_of_urls(list_of_urls=urls_to_fetch)
self.assertIsInstance(ret,list)
self.assertEqual(len(ret),1)
self.assertEqual(ret[0].url,'http://motoguzzi.com')
self.assertEqual(ret[0].http_status,200)
self.assertIsInstance(ret[0].elapsed_time,float)
self.assertIsInstance(ret[0].pattern_verified,bool)
@unittest.skip
async def test_c_fetch_with_valid_list_of_url(self):
urls_to_fetch = [('http://sjsjsjjsjsjsjsj.com', ['twitter', 'Antonio'])]
ret = await webpages_fetcher.fetch_list_of_urls(list_of_urls=urls_to_fetch)
self.assertIsInstance(ret,list)
self.assertEqual(len(ret),1)
self.assertEqual(ret[0].url,'http://sjsjsjjsjsjsjsj.com')
self.assertEqual(ret[0].http_status,403)
self.assertIsInstance(ret[0].elapsed_time,float)
self.assertIsInstance(ret[0].pattern_verified,bool)
self.assertEqual(ret[0].pattern_verified,False)
| 44.043478
| 117
| 0.695953
| 1,892
| 0.93386
| 0
| 0
| 602
| 0.297137
| 1,725
| 0.851431
| 354
| 0.174729
|
a5bef6fa512a2ff46684cc9ce0bb82ae7685d3ba
| 773
|
py
|
Python
|
planegeometry/structures/tests/random_segments.py
|
ufkapano/planegeometry
|
fa9309a4e867acedd635665f32d7f59a8eeaf2e3
|
[
"BSD-3-Clause"
] | null | null | null |
planegeometry/structures/tests/random_segments.py
|
ufkapano/planegeometry
|
fa9309a4e867acedd635665f32d7f59a8eeaf2e3
|
[
"BSD-3-Clause"
] | null | null | null |
planegeometry/structures/tests/random_segments.py
|
ufkapano/planegeometry
|
fa9309a4e867acedd635665f32d7f59a8eeaf2e3
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
import random
import Gnuplot # Python 2 only
from planegeometry.structures.points import Point
from planegeometry.structures.segments import Segment
gnu = Gnuplot.Gnuplot (persist = 1)
visible = True
for i in range(10):
segment = Segment(random.random(), random.random(),
random.random(), random.random())
gnu(segment.gnu(visible))
# Wyswietlenie grafu.
gnu('set terminal pdf enhanced')
gnu('set output "random_segments.pdf"')
gnu('set grid')
gnu('unset key')
gnu('set size square')
#gnu('unset border')
#gnu('unset tics')
gnu('set xlabel "x"')
gnu('set ylabel "y"')
gnu('set title "Random segments"')
gnu('set xrange [{}:{}]'.format(0, 1))
gnu('set yrange [{}:{}]'.format(0, 1))
gnu.plot('NaN title ""')
gnu('unset output')
# EOF
| 23.424242
| 55
| 0.684347
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 324
| 0.419146
|
3c0172a4b6c39d5c3838a7e6ee2dd86d14d618b0
| 77
|
py
|
Python
|
proxy/admin.py
|
jokajak/infinity_tracker
|
21f83925d9899dc25bc58b198426f329a549b0e0
|
[
"Apache-2.0"
] | 1
|
2021-01-21T08:44:21.000Z
|
2021-01-21T08:44:21.000Z
|
proxy/admin.py
|
jokajak/infinity_tracker
|
21f83925d9899dc25bc58b198426f329a549b0e0
|
[
"Apache-2.0"
] | 126
|
2020-08-03T22:07:38.000Z
|
2022-03-28T22:25:59.000Z
|
proxy/admin.py
|
jokajak/infinity_tracker
|
21f83925d9899dc25bc58b198426f329a549b0e0
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin # NOQA: F401
# Register your models here.
| 19.25
| 46
| 0.753247
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 40
| 0.519481
|
3c01c3ac689a157ca3b1ed4911d58fd47e935434
| 1,050
|
py
|
Python
|
local/make_fbank.py
|
coolEphemeroptera/AESRC2020
|
b64cdeeaaf74e8c1a741930b3a47dc8dcadca8de
|
[
"Apache-2.0"
] | 35
|
2020-09-26T13:40:16.000Z
|
2022-03-22T19:42:20.000Z
|
local/make_fbank.py
|
coolEphemeroptera/ARNet
|
b64cdeeaaf74e8c1a741930b3a47dc8dcadca8de
|
[
"Apache-2.0"
] | 4
|
2021-04-10T13:05:52.000Z
|
2022-03-14T03:22:32.000Z
|
local/make_fbank.py
|
coolEphemeroptera/ARNet
|
b64cdeeaaf74e8c1a741930b3a47dc8dcadca8de
|
[
"Apache-2.0"
] | 7
|
2020-09-26T15:52:45.000Z
|
2021-06-11T05:05:23.000Z
|
import python_speech_features as psf
import soundfile as sf
# import scipy.io.wavfile as wav
import pickle as pkl
import sys
import os
import re
# linux to windows 路径转换
def path_lin2win(path):
pattern = "/[a-z]/"
position = re.findall(pattern,path)[0][1].upper()
return re.sub(pattern,"%s:/"%position,path)
# 存储文件
def save(data,path):
f = open(path,"wb")
pkl.dump(data,f)
f.close()
def path2utt(path):
return path.split('/')[-1].split('.')[0]
def fbank(path):
# path = path_lin2win(path) # windows path
y,sr = sf.read(path)
mel = psf.fbank(y,samplerate=sr,nfilt=80)[0]
return mel
if __name__ == "__main__":
audio_file = sys.argv[1]
# audio_file = r"E:/LIBRISPEECH/LibriSpeech/dev/dev-clean/1272/128104/1272-128104-0000.flac"
out_file = sys.argv[2]
dir = os.path.dirname(out_file)
if not os.path.isdir(dir):os.mkdir(out_file)
mel = fbank(audio_file)
save(mel,out_file)
print(path2utt(out_file),mel.shape[0])
exit()
| 23.863636
| 97
| 0.631429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 251
| 0.23546
|
3c0299abc0c111e544b5842dcd9b42f82f6088c5
| 1,344
|
py
|
Python
|
tests/__init__.py
|
jun-kai-xin/douban
|
989a797de467f5a9a8b77a05fa8242bebf657a51
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
jun-kai-xin/douban
|
989a797de467f5a9a8b77a05fa8242bebf657a51
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
jun-kai-xin/douban
|
989a797de467f5a9a8b77a05fa8242bebf657a51
|
[
"MIT"
] | null | null | null |
def fake_response_from_file(file_name, url=None, meta=None):
import os
import codecs
from scrapy.http import HtmlResponse, Request
if not url:
url = 'http://www.example.com'
_meta = {'mid': 1291844, 'login': False} # 必要的信息,随便弄一个就行了
if meta:
meta.update(_meta)
else:
meta = _meta
request = Request(url=url, meta=meta)
if not file_name[0] == '/':
responses_dir = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(responses_dir, file_name)
else:
file_path = file_name
with codecs.open(file_path, 'r', 'utf-8') as f:
file_content = f.read()
response = HtmlResponse(url=url,
encoding='utf-8',
request=request,
body=file_content)
return response
def fake_response_from_url(url, headers=None, meta=None):
import requests
from scrapy.http import HtmlResponse, Request
resp = requests.get(url, headers=headers)
_meta = {'mid': 1291844, 'login': False} # 必要的信息,随便弄一个就行了
if meta:
meta.update(_meta)
else:
meta = _meta
return HtmlResponse(url=url, status=resp.status_code, body=resp.text,
encoding='utf-8', request=Request(url=url, meta=meta))
| 28.595745
| 78
| 0.590774
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 163
| 0.116429
|
3c02f34d8d7c7f266cdc6308a85575de226c48f6
| 2,703
|
py
|
Python
|
src/tests/test_pyning/test_combinationdict.py
|
essennell/pyning
|
c28d8fae99ab6cb4394960b72565a4915aee7adc
|
[
"MIT"
] | null | null | null |
src/tests/test_pyning/test_combinationdict.py
|
essennell/pyning
|
c28d8fae99ab6cb4394960b72565a4915aee7adc
|
[
"MIT"
] | 3
|
2020-03-24T16:25:58.000Z
|
2021-06-01T22:57:53.000Z
|
src/tests/test_pyning/test_combinationdict.py
|
essennell/pyning
|
c28d8fae99ab6cb4394960b72565a4915aee7adc
|
[
"MIT"
] | null | null | null |
from pyning.combinationdict import CombinationDict
import pytest
def test_key_at_root_is_located():
items = CombinationDict( '/', { 'a': 10 } )
assert items[ 'a' ] == 10
def test_key_nested_1_level_is_located():
items = CombinationDict( '/', { 'a': { 'b': 10 } } )
assert items[ 'a/b' ] == 10
def test_escaped_separator_is_used_as_direct_key():
items = CombinationDict( '.', { 'a': { 'b\\.c': { 'd': 10 } } } )
assert items[ 'a.b\\.c.d' ] == 10
def test_nested_value_can_be_updated():
items = CombinationDict( '.', { 'a': { 'b': { 'c': 10 } } } )
items[ 'a.b.c' ] = 100
assert items[ 'a' ][ 'b' ][ 'c' ] == 100
def test_item_value_can_be_a_list():
items = CombinationDict( '.', { 'a': [ 1, 2 ] } )
assert items[ 'a' ][ 0 ] == 1
def test_nested_item_can_be_a_list():
items = CombinationDict( '.', { 'a': { 'b': [ 1, 2 ] } } )
assert items[ 'a.b' ][ 0 ] == 1
def test_nested_dict_can_be_updated():
items = CombinationDict( '.', { 'a': { 'b': 10, 'c': 20 } } )
items.update( { 'a': { 'b': 100 } } )
assert items[ 'a.b' ] == 100
assert items[ 'a.c' ] == 20
def test_nested_dict_can_be_updated_from_tuple():
items = CombinationDict( '.', { 'a': { 'b': 10 } } )
items[ 'a' ].update( [ ( 'c', 100 ), ( 'e', 1 ) ] )
assert items[ 'a.c' ] == 100
assert items[ 'a.e' ] == 1
def test_update_respects_nesting_notation():
items = CombinationDict( '.', { 'a': { 'b': 10 } } )
items.update( { 'a.b': 100 } )
assert items[ 'a' ][ 'b' ] == items[ 'a.b' ]
assert 'a.b' not in set( items.keys() )
def test_separator_for_nesting_can_be_escaped():
items = CombinationDict( '.', { 'a': { 'b': 10 } } )
items[ r'a\.b' ] = 100
assert items[ 'a.b' ] == 10
def test_attribute_is_found_if_set():
items = CombinationDict( '.', { 'a': 10 } )
assert items.a == 10
def test_nested_attribute_is_found_with_same_syntax():
items = CombinationDict( '/', { 'a': { 'x': { 'y': 'hello' } } } )
assert items.a.x.y == 'hello'
def test_attribute_name_can_contain_spaces():
items = CombinationDict( '.', { 'a b': 'hello' } )
assert items[ 'a b' ] == 'hello'
def test_attribute_name_can_contain_other_chars():
items = CombinationDict( '.', { 'a_b': 'hello' } )
assert items.a_b == 'hello'
def test_calling_get_method_raises_no_exceptions():
items = CombinationDict( '.', { } )
assert items.get( 'a' ) == None
def test_can_convert_to_a_real_dict_of_nested_dicts():
items = CombinationDict( '.', { 'a': '10', 'b': { 'c': 100 } } )
assert isinstance( items, dict )
assert isinstance( items[ 'b' ], dict )
if __name__ == '__main__':
pytest.main()
| 27.865979
| 70
| 0.574547
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 324
| 0.119867
|
3c045b5de4e55fe90b3f8563b224a0193ac2dff7
| 6,917
|
py
|
Python
|
stockBOT/Discord/fc_info.py
|
Chenct-jonathan/LokiHub
|
7193589151e88f4e66aee6457926e565d0023fa1
|
[
"MIT"
] | 17
|
2020-11-25T07:40:18.000Z
|
2022-03-07T03:29:18.000Z
|
stockBOT/Discord/fc_info.py
|
Chenct-jonathan/LokiHub
|
7193589151e88f4e66aee6457926e565d0023fa1
|
[
"MIT"
] | 8
|
2020-12-18T13:23:59.000Z
|
2021-10-03T21:41:50.000Z
|
stockBOT/Discord/fc_info.py
|
Chenct-jonathan/LokiHub
|
7193589151e88f4e66aee6457926e565d0023fa1
|
[
"MIT"
] | 43
|
2020-12-02T09:03:57.000Z
|
2021-12-23T03:30:25.000Z
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
from bs4 import BeautifulSoup
import requests
from requests import post
from requests import codes
def information(symbol):
URL = "https://goodinfo.tw/StockInfo/StockDetail.asp?STOCK_ID="+ symbol
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'}
r = requests.post(url=URL,headers=headers)
html =BeautifulSoup(r.content, "html.parser")
result_infoDICT = {}
table = html.findAll("table")[40]
table_row_name=table.findAll("tr")[1]
td_name = table_row_name.findAll("td")[1]
name = td_name.text
result_infoDICT["name"] = name
table_row_industry=table.findAll("tr")[2]
td_industry=table_row_industry.findAll("td")[1]
industry=td_industry.text
result_infoDICT["industry"] = industry
table_row_value=table.findAll("tr")[4]
td_value = table_row_value.findAll("td")[3]
value = td_value.text
result_infoDICT["value"] = value
table_row_business=table.findAll("tr")[14]
td_business = table_row_business.findAll("td")[0]
business = td_business.text
result_infoDICT["business"] = business
return result_infoDICT
def growth(symbol):
URL = "https://goodinfo.tw/StockInfo/StockFinDetail.asp?RPT_CAT=XX_M_QUAR_ACC&STOCK_ID="+ symbol
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'}
r = requests.post(url=URL,headers=headers)
html =BeautifulSoup(r.content, "html.parser")
result_growthDICT = {}
table = html.findAll("table")[16]
table_row_quarter=table.findAll("tr")[0]
th_quarter = table_row_quarter.findAll("th")[1]
quarter = th_quarter.text
result_growthDICT["quarter"] = quarter
table_row_revenue=table.findAll("tr")[14]
td_revenue = table_row_revenue.findAll("td")[1]
revenue_YOY = td_revenue.text
result_growthDICT["revenue_YOY"] = revenue_YOY
table_row_gross_profit = table.findAll("tr")[15]
td_gross_profit = table_row_gross_profit.findAll("td")[1]
gross_profit_YOY = td_gross_profit.text
result_growthDICT["gross_profit_YOY"] = gross_profit_YOY
table_row_operating_income=table.findAll("tr")[16]
td_operating_income = table_row_operating_income.findAll("td")[1]
operating_income_YOY = td_operating_income.text
result_growthDICT["operating_income_YOY"] = operating_income_YOY
table_row_NIBT=table.findAll("tr")[17]
td_NIBT = table_row_NIBT.findAll("td")[1]
NIBT_YOY = td_NIBT.text
result_growthDICT["NIBT_YOY"] = NIBT_YOY
table_row_NI=table.findAll("tr")[18]
td_NI = table_row_NI.findAll("td")[1]
NI_YOY = td_NI.text
result_growthDICT["NI_YOY"] = NI_YOY
table_row_EPS=table.findAll("tr")[20]
td_EPS = table_row_EPS.findAll("td")[1]
EPS_YOY = td_EPS.text
result_growthDICT["EPS_YOY"] = EPS_YOY
table_row_total_assets_growth=table.findAll("tr")[50]
td_total_assets_growth = table_row_total_assets_growth.findAll("td")[1]
total_assets_growth = td_total_assets_growth.text
result_growthDICT["total_assets_growth"] = total_assets_growth
return result_growthDICT
def profitability(symbol):
URL = "https://goodinfo.tw/StockInfo/StockFinDetail.asp?RPT_CAT=XX_M_QUAR_ACC&STOCK_ID="+ symbol
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'}
r = requests.post(url=URL,headers=headers)
html =BeautifulSoup(r.content, "html.parser")
result_profitabilityDICT = {}
table = html.findAll("table")[16]
table_row_quarter=table.findAll("tr")[0]
th_quarter = table_row_quarter.findAll("th")[1]
quarter = th_quarter.text
result_profitabilityDICT["quarter"] = quarter
table_row_GPM=table.findAll("tr")[1]
td_GPM = table_row_GPM.findAll("td")[1]
GPM = td_GPM.text
result_profitabilityDICT["GPM"] = GPM
table_row_OPM=table.findAll("tr")[2]
td_OPM = table_row_OPM.findAll("td")[1]
OPM = td_OPM.text
result_profitabilityDICT["OPM"] = OPM
table_row_PTPM=table.findAll("tr")[3]
td_PTPM = table_row_PTPM.findAll("td")[1]
PTPM = td_PTPM.text
result_profitabilityDICT["PTPM"] = PTPM
table_row_NPM=table.findAll("tr")[4]
td_NPM = table_row_NPM.findAll("td")[1]
NPM = td_NPM.text
result_profitabilityDICT["NPM"] = NPM
table_row_EPS=table.findAll("tr")[7]
td_EPS = table_row_EPS.findAll("td")[1]
EPS = td_EPS.text
result_profitabilityDICT["EPS"] = EPS
table_row_NASPS=table.findAll("tr")[8]
td_NASPS = table_row_NASPS.findAll("td")[1]
NASPS = td_NASPS.text
result_profitabilityDICT["NASPS"] = NASPS
table_row_ROW=table.findAll("tr")[9]
td_ROE = table_row_ROW.findAll("td")[1]
ROE = td_ROE.text
result_profitabilityDICT["ROE"] = ROE
table_row_ROA=table.findAll("tr")[11]
td_ROA = table_row_ROA.findAll("td")[1]
ROA = td_ROA.text
result_profitabilityDICT["ROA"] = ROA
return result_profitabilityDICT
def safety(symbol):
URL = "https://goodinfo.tw/StockInfo/StockFinDetail.asp?RPT_CAT=XX_M_QUAR_ACC&STOCK_ID="+ symbol
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'}
r = requests.post(url=URL,headers=headers)
html =BeautifulSoup(r.content, "html.parser")
result_safetyDICT = {}
table = html.findAll("table")[16]
table_row_quarter=table.findAll("tr")[75]
th_quarter = table_row_quarter.findAll("td")[1]
quarter = th_quarter.text
result_safetyDICT["quarter"] = quarter
table_row_CR=table.findAll("tr")[76]
td_CR = table_row_CR.findAll("td")[1]
CR = td_CR.text
result_safetyDICT["CR"] = CR
table_row_QR=table.findAll("tr")[77]
td_QR = table_row_QR.findAll("td")[1]
QR = td_QR.text
result_safetyDICT["QR"] = QR
table_row_current_ratio=table.findAll("tr")[78]
td_current_ratio = table_row_current_ratio.findAll("td")[1]
current_ratio = td_current_ratio.text
result_safetyDICT["current_ratio"] = current_ratio
table_row_ICR=table.findAll("tr")[79]
td_ICR = table_row_ICR.findAll("td")[1]
ICR = td_ICR.text
result_safetyDICT["ICR"] = ICR
table_row_OCFR=table.findAll("tr")[80]
td_OCFR = table_row_OCFR.findAll("td")[1]
OCFR = td_OCFR.text
result_safetyDICT["OCFR"] = OCFR
table_row_DR=table.findAll("tr")[56]
td_DR = table_row_DR.findAll("td")[1]
DR = td_DR.text
result_safetyDICT["DR"] = DR
return result_safetyDICT
| 32.474178
| 134
| 0.675293
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,357
| 0.196183
|
3c062192bd225720274ca7e3b61333f806b3a7b1
| 6,781
|
py
|
Python
|
tests/constants.py
|
phihos/Python-OpenVPN-LDAP-Auth
|
87dd986f49555d0fb50ad8d991cf02092a9d55dc
|
[
"MIT"
] | 1
|
2021-12-17T14:54:36.000Z
|
2021-12-17T14:54:36.000Z
|
tests/constants.py
|
phihos/python-openvpn-ldap-auth
|
87dd986f49555d0fb50ad8d991cf02092a9d55dc
|
[
"MIT"
] | null | null | null |
tests/constants.py
|
phihos/python-openvpn-ldap-auth
|
87dd986f49555d0fb50ad8d991cf02092a9d55dc
|
[
"MIT"
] | null | null | null |
import os
import shutil
from datetime import datetime
# INPUT PARAMS
LDAP_URL = os.environ['TEST_LDAP_URL']
LDAP_BASE_DN = os.environ['TEST_LDAP_BASE_DN']
LDAP_ADMIN_DN = os.environ['TEST_LDAP_ADMIN_DN']
LDAP_ADMIN_PASSWORD = os.environ['TEST_LDAP_ADMIN_PASSWORD']
LDAP_BIND_TIMEOUT = os.environ.get('TEST_LDAP_BIND_TIMEOUT', 5)
OPENVPN_SERVER_START_TIMEOUT = os.environ.get('TEST_OPENVPN_SERVER_START_TIMEOUT', 5)
OPENVPN_CLIENT_CONNECT_TIMEOUT = os.environ.get('TEST_OPENVPN_CLIENT_CONNECT_TIMEOUT', 2)
TEST_TIMEOUT = os.environ.get('TEST_TIMEOUT', 10)
TEST_PROMPT_DEFAULT_TIMEOUT = os.environ.get('TEST_PROMPT_DEFAULT_TIMEOUT', 3)
OPENVPN_BINARY = os.environ.get('TEST_OPENVPN_BINARY', shutil.which('openvpn'))
PYTHON_VERSION = os.environ.get('python_version', 'please set "python_version" in the env vars')
OPENVPN_VERSION = os.environ.get('openvpn_version', 'please set "openvpn_version" in the env vars')
# PATHS
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
AUTH_SCRIPT_PATH = shutil.which('openvpn-ldap-auth')
AUTH_SCRIPT_PATH_PYINSTALLER = shutil.which('openvpn-ldap-auth-pyinstaller')
BENCHMARK_DIR = os.path.join(
SCRIPT_DIR, os.pardir, 'benchmark',
f"python{PYTHON_VERSION}-openvpn{OPENVPN_VERSION}-{datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}"
)
# CONSTANTS: SERVER SETUP
OPENVPN_SERVER_PORT = 1194
OPENVPN_SERVER_DH_FILE = os.path.realpath(os.path.join(SCRIPT_DIR, 'resources', 'server', 'dh2048.pem'))
OPENVPN_SERVER_CA_FILE = os.path.realpath(os.path.join(SCRIPT_DIR, 'resources', 'server', 'ca.crt'))
OPENVPN_SERVER_CERT_FILE = os.path.realpath(os.path.join(SCRIPT_DIR, 'resources', 'server', 'server.crt'))
OPENVPN_SERVER_KEY_FILE = os.path.realpath(os.path.join(SCRIPT_DIR, 'resources', 'server', 'server.key'))
OPENVPN_SERVER_CHALLENGE_RESPONSE_PROMPT = 'Enter challenge response'
OPENVPN_SERVER_LDAP_CONFIG_PATH = '/etc/openvpn/ldap.yaml'
OPENVPN_SERVER_LDAP_C_CONFIG_PATH = '/etc/openvpn/ldap.conf'
# CONSTANTS: CMD ARGS
OPENVPN_SERVER_ARGS = ['--mode', 'server', '--server', '10.5.99.0', '255.255.255.0', '--dev', 'tun', '--port',
str(OPENVPN_SERVER_PORT), '--verb', '4', '--keepalive', '10', '120',
'--verify-client-cert', 'none', '--tls-server', '--dh',
OPENVPN_SERVER_DH_FILE, '--ca', OPENVPN_SERVER_CA_FILE, '--cert',
OPENVPN_SERVER_CERT_FILE, '--key', OPENVPN_SERVER_KEY_FILE, '--script-security', '3', '--user',
'root', '--group', 'root', '--duplicate-cn', '--max-clients', '1000', '--status',
'openvpn-status.log', '--topology', 'subnet']
OPENVPN_SERVER_ARGS_VIA_FILE = OPENVPN_SERVER_ARGS + ['--auth-user-pass-verify', AUTH_SCRIPT_PATH,
'via-file']
OPENVPN_SERVER_ARGS_VIA_ENV = OPENVPN_SERVER_ARGS + ['--auth-user-pass-verify', AUTH_SCRIPT_PATH,
'via-env']
OPENVPN_SERVER_ARGS_VIA_FILE_PYINSTALLER = OPENVPN_SERVER_ARGS + ['--auth-user-pass-verify',
AUTH_SCRIPT_PATH_PYINSTALLER,
'via-file']
OPENVPN_SERVER_ARGS_VIA_ENV_PYINSTALLER = OPENVPN_SERVER_ARGS + ['--auth-user-pass-verify',
AUTH_SCRIPT_PATH_PYINSTALLER,
'via-env']
OPENVPN_SERVER_ARGS_C_PLUGIN = OPENVPN_SERVER_ARGS + ['--plugin', '/usr/lib/openvpn/openvpn-auth-ldap.so',
OPENVPN_SERVER_LDAP_C_CONFIG_PATH, 'login',
'--username-as-common-name']
OPENVPN_CLIENT_ARGS = (
'--client', '--dev', 'tun', '--verb', '5', '--proto', 'udp', '--remote', '127.0.0.1',
str(OPENVPN_SERVER_PORT),
'--nobind', '--ifconfig-noexec', '--route-noexec', '--route-nopull', '--ca', OPENVPN_SERVER_CA_FILE,
'--auth-user-pass', '--explicit-exit-notify', '1', '--keepalive', '10', '120',
)
OPENVPN_CLIENT_ARGS_WITH_CHALLENGE = OPENVPN_CLIENT_ARGS + ('--static-challenge',
OPENVPN_SERVER_CHALLENGE_RESPONSE_PROMPT, '1')
OPENVPN_CLIENT_ARGS_WITHOUT_CHALLENGE = OPENVPN_CLIENT_ARGS
# CONSTANTS: ldap.yaml CONFIGS
CONFIG_BASE = {
'ldap': {
'url': LDAP_URL,
'bind_dn': LDAP_ADMIN_DN,
'password': LDAP_ADMIN_PASSWORD,
},
'authorization': {
'base_dn': LDAP_BASE_DN,
'search_filter': '(uid={})'
}
}
CONFIG_CHALLENGE_RESPONSE_APPEND = {**CONFIG_BASE, **{
'authorization': {
'base_dn': LDAP_BASE_DN,
'static_challenge': 'append',
}
}}
CONFIG_CHALLENGE_RESPONSE_PREPEND = {**CONFIG_BASE, **{
'authorization': {
'base_dn': LDAP_BASE_DN,
'static_challenge': 'prepend',
}
}}
CONFIG_CHALLENGE_RESPONSE_IGNORE = {**CONFIG_BASE, **{
'authorization': {
'base_dn': LDAP_BASE_DN,
'static_challenge': 'ignore',
}
}}
CONFIG_C = f"""<LDAP>
URL "{LDAP_URL}"
BindDN {LDAP_ADMIN_DN}
Password {LDAP_ADMIN_PASSWORD}
Timeout 15
TLSEnable no
FollowReferrals yes
</LDAP>
<Authorization>
BaseDN "{LDAP_BASE_DN}"
SearchFilter "(uid=%u)"
RequireGroup false
<Group>
BaseDN "{LDAP_BASE_DN}"
SearchFilter "(|(cn=developers)(cn=artists))"
MemberAttribute member
</Group>
</Authorization>
"""
# CONSTANTS: TEST CREDENTIALS
TEST_USERNAME = 'testuser'
TEST_USER_DN_TEMPLATE = "uid={},{}"
TEST_USER_DN = TEST_USER_DN_TEMPLATE.format(TEST_USERNAME, LDAP_BASE_DN)
TEST_USER_PASSWORD = 'testpass'
TEST_USER_WRONG_PASSWORD = 'wrong_password'
# CONSTANTS: EXPECTED OPENVPN LOG FRAGMENTS
OPENVPN_LOG_SERVER_INIT_COMPLETE = 'Initialization Sequence Completed'
OPENVPN_LOG_CLIENT_INIT_COMPLETE = 'Initialization Sequence Completed'
OPENVPN_LOG_AUTH_SUCCEEDED_SERVER = 'authentication succeeded for username'
OPENVPN_LOG_AUTH_SUCCEEDED_CLIENT = 'Initialization Sequence Completed'
OPENVPN_LOG_AUTH_FAILED_SERVER = 'verification failed for peer'
OPENVPN_LOG_AUTH_FAILED_CLIENT = 'AUTH_FAILED'
# CONSTANTS: BENCHMARK CSV
BENCHMARK_CSV_HEADER_LABEL = 'label'
BENCHMARK_CSV_HEADER_PYTHON = 'python_version'
BENCHMARK_CSV_HEADER_OPENVPN = 'openvpn_version'
BENCHMARK_CSV_HEADER_LOGINS = 'concurrent_logins'
BENCHMARK_CSV_HEADER_MIN = 'min'
BENCHMARK_CSV_HEADER_MAX = 'max'
BENCHMARK_CSV_HEADER_AVG = 'avg'
BENCHMARK_CSV_HEADERS = (BENCHMARK_CSV_HEADER_LABEL, BENCHMARK_CSV_HEADER_PYTHON, BENCHMARK_CSV_HEADER_OPENVPN,
BENCHMARK_CSV_HEADER_LOGINS, BENCHMARK_CSV_HEADER_MIN, BENCHMARK_CSV_HEADER_MAX,
BENCHMARK_CSV_HEADER_AVG)
| 46.765517
| 118
| 0.668191
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,612
| 0.385194
|
3c06dc2f7a1273c76e68bacba57d4a3e26a88d66
| 1,377
|
py
|
Python
|
http_utils/recs/top_popular_recommendation_handler.py
|
drayvs/grouple-recsys-production
|
5141bacd5dc64e023059292faff5bfdefefd9f23
|
[
"MIT"
] | null | null | null |
http_utils/recs/top_popular_recommendation_handler.py
|
drayvs/grouple-recsys-production
|
5141bacd5dc64e023059292faff5bfdefefd9f23
|
[
"MIT"
] | null | null | null |
http_utils/recs/top_popular_recommendation_handler.py
|
drayvs/grouple-recsys-production
|
5141bacd5dc64e023059292faff5bfdefefd9f23
|
[
"MIT"
] | null | null | null |
from concurrent.futures import ThreadPoolExecutor
from tornado.concurrent import run_on_executor
from webargs import fields
from webargs.tornadoparser import use_args
from loguru import logger
from http_utils.base import BaseHandler, MAX_THREADS
class TopPopularRecommendationHandler(BaseHandler):
executor = ThreadPoolExecutor(MAX_THREADS)
def initialize(self, **kwargs):
self.loader = kwargs['loader']
super().initialize(**kwargs)
def get_top_popular(self, n):
return self.loader.top_popular[:n]
@run_on_executor()
@use_args({'n_recs': fields.Int(required=False, missing=20)}, location='querystring')
@logger.catch
def get(self, reqargs):
# Returns n top popular items. default n=20
n_recs = reqargs['n_recs']
logger.info(f'topPopular n_recs={n_recs}')
model, mapper = self.get_model_and_mapper()
if mapper is None:
return self.write({'error': 'Model is not ready yet'})
make_item = lambda idx, score: {'itemId': idx, 'siteId': self.config.site_id, 'score': score,
'item_id': idx, 'site_id': self.config.site_id}
items = [make_item(item, None) for item in self.get_top_popular(n_recs)]
return self.write({'isTopPop': 1, 'items': items, 'args': reqargs,
'is_top_pop': 1})
| 37.216216
| 102
| 0.658678
| 1,128
| 0.819172
| 0
| 0
| 833
| 0.604938
| 0
| 0
| 216
| 0.156863
|
3c07a5241ac429798f7ed558bc1d6c02e0ff5253
| 662
|
py
|
Python
|
NucleicAcids/dssrBlock3.py
|
MooersLab/jupyterlabpymolpysnipsplus
|
b886750d63372434df53d4d6d7cdad6cb02ae4e7
|
[
"MIT"
] | null | null | null |
NucleicAcids/dssrBlock3.py
|
MooersLab/jupyterlabpymolpysnipsplus
|
b886750d63372434df53d4d6d7cdad6cb02ae4e7
|
[
"MIT"
] | null | null | null |
NucleicAcids/dssrBlock3.py
|
MooersLab/jupyterlabpymolpysnipsplus
|
b886750d63372434df53d4d6d7cdad6cb02ae4e7
|
[
"MIT"
] | null | null | null |
# Description: DSSR block representation for a multi-state example after loading the dssr_block.py script by Thomas Holder. The x3dna-dssr executable needs to be in the PATH. Edit the path to Thomas Holder's block script.
# Source: Generated while helping Miranda Adams at U of Saint Louis.
"""
cmd.do('reinitialize;')
cmd.do('run ${1:"/Users/blaine/.pymol/startup/dssr_block.py"};')
cmd.do('fetch ${2:2n2d}, async=0;')
cmd.do('dssr_block ${2:2n2d}, 0;')
cmd.do('set all_states;')
"""
cmd.do('reinitialize;')
cmd.do('run "/Users/blaine/.pymol/startup/dssr_block.py";')
cmd.do('fetch 2n2d, async=0;')
cmd.do('dssr_block 2n2d, 0;')
cmd.do('set all_states;')
| 38.941176
| 222
| 0.712991
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 612
| 0.924471
|
3c091171ce7d459ab7bdf55ac4292ac21cd0a68c
| 12,007
|
py
|
Python
|
custom_components/climate/gree.py
|
ardeus-ua/gree-python-api
|
ecfbdef34ff99fc0822f70be17cdeb6c625fd276
|
[
"MIT"
] | 1
|
2018-12-10T17:32:48.000Z
|
2018-12-10T17:32:48.000Z
|
custom_components/climate/gree.py
|
ardeus-ua/gree-python-api
|
ecfbdef34ff99fc0822f70be17cdeb6c625fd276
|
[
"MIT"
] | null | null | null |
custom_components/climate/gree.py
|
ardeus-ua/gree-python-api
|
ecfbdef34ff99fc0822f70be17cdeb6c625fd276
|
[
"MIT"
] | 1
|
2020-08-11T14:51:04.000Z
|
2020-08-11T14:51:04.000Z
|
import asyncio
import logging
import binascii
import socket
import os.path
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.climate import (DOMAIN, ClimateDevice, PLATFORM_SCHEMA, STATE_IDLE, STATE_HEAT, STATE_COOL, STATE_AUTO, STATE_DRY,
SUPPORT_OPERATION_MODE, SUPPORT_TARGET_TEMPERATURE, SUPPORT_FAN_MODE, SUPPORT_SWING_MODE)
from homeassistant.const import (ATTR_UNIT_OF_MEASUREMENT, ATTR_TEMPERATURE, CONF_NAME, CONF_HOST, CONF_MAC, CONF_TIMEOUT, CONF_CUSTOMIZE)
from homeassistant.helpers.event import (async_track_state_change)
from homeassistant.core import callback
from homeassistant.helpers.restore_state import RestoreEntity
from configparser import ConfigParser
from base64 import b64encode, b64decode
REQUIREMENTS = ['gree==0.3.2']
_LOGGER = logging.getLogger(__name__)
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_OPERATION_MODE | SUPPORT_FAN_MODE | SUPPORT_SWING_MODE
CONF_UNIQUE_KEY = 'unique_key'
CONF_MIN_TEMP = 'min_temp'
CONF_MAX_TEMP = 'max_temp'
CONF_TARGET_TEMP = 'target_temp'
CONF_TEMP_SENSOR = 'temp_sensor'
CONF_OPERATIONS = 'operations'
CONF_FAN_MODES = 'fan_modes'
CONF_SWING_LIST = 'swing_list'
CONF_DEFAULT_OPERATION = 'default_operation'
CONF_DEFAULT_FAN_MODE = 'default_fan_mode'
CONF_DEFAULT_SWING_MODE = 'default_swing_mode'
CONF_DEFAULT_OPERATION_FROM_IDLE = 'default_operation_from_idle'
STATE_FAN = 'fan'
STATE_OFF = 'off'
DEFAULT_NAME = 'GREE AC Climate'
DEFAULT_TIMEOUT = 10
DEFAULT_RETRY = 3
DEFAULT_MIN_TEMP = 16
DEFAULT_MAX_TEMP = 30
DEFAULT_TARGET_TEMP = 20
DEFAULT_OPERATION_LIST = [STATE_OFF, STATE_AUTO, STATE_COOL, STATE_DRY, STATE_FAN, STATE_HEAT]
OPERATION_LIST_MAP = {
STATE_AUTO: 0,
STATE_COOL: 1,
STATE_DRY: 2,
STATE_FAN: 3,
STATE_HEAT: 4,
}
DEFAULT_FAN_MODE_LIST = ['auto', 'low', 'medium-low', 'medium', 'medium-high', 'high']
FAN_MODE_MAP = {
'auto': 0,
'low': 1,
'medium-low': 2,
'medium': 3,
'medium-high': 4,
'high': 5
}
DEFAULT_SWING_LIST = ['default', 'swing-full-range', 'fixed-up', 'fixed-middle', 'fixed-down', 'swing-up', 'swing-middle', 'swing-down']
SWING_MAP = {
'default': 0,
'swing-full-range': 1,
'fixed-up': 2,
'fixed-middle': 4,
'fixed-down': 6,
'swing-up': 11,
'swing-middle': 9,
'swing-down': 7
}
DEFAULT_OPERATION = 'idle'
DEFAULT_FAN_MODE = 'auto'
DEFAULT_SWING_MODE = 'default'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_MAC): cv.string,
vol.Required(CONF_UNIQUE_KEY): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_MIN_TEMP, default=DEFAULT_MIN_TEMP): cv.positive_int,
vol.Optional(CONF_MAX_TEMP, default=DEFAULT_MAX_TEMP): cv.positive_int,
vol.Optional(CONF_TARGET_TEMP, default=DEFAULT_TARGET_TEMP): cv.positive_int,
vol.Optional(CONF_TEMP_SENSOR): cv.entity_id,
vol.Optional(CONF_DEFAULT_OPERATION, default=DEFAULT_OPERATION): cv.string,
vol.Optional(CONF_DEFAULT_FAN_MODE, default=DEFAULT_FAN_MODE): cv.string,
vol.Optional(CONF_DEFAULT_SWING_MODE, default=DEFAULT_SWING_MODE): cv.string,
vol.Optional(CONF_DEFAULT_OPERATION_FROM_IDLE): cv.string
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the GREE platform."""
name = config.get(CONF_NAME)
ip_addr = config.get(CONF_HOST)
mac_addr = config.get(CONF_MAC)
unique_key = config.get(CONF_UNIQUE_KEY).encode()
min_temp = config.get(CONF_MIN_TEMP)
max_temp = config.get(CONF_MAX_TEMP)
target_temp = config.get(CONF_TARGET_TEMP)
temp_sensor_entity_id = config.get(CONF_TEMP_SENSOR)
operation_list = DEFAULT_OPERATION_LIST
swing_list = DEFAULT_SWING_LIST
fan_list = DEFAULT_FAN_MODE_LIST
default_operation = config.get(CONF_DEFAULT_OPERATION)
default_fan_mode = config.get(CONF_DEFAULT_FAN_MODE)
default_swing_mode = config.get(CONF_DEFAULT_SWING_MODE)
default_operation_from_idle = config.get(CONF_DEFAULT_OPERATION_FROM_IDLE)
import gree
gree_device = gree.GreeDevice(mac_addr, unique_key, ip_addr)
try:
gree_device.update_status()
except socket.timeout:
_LOGGER.error("Failed to connect to Gree Device")
async_add_devices([
GreeClimate(hass, name, gree_device, min_temp, max_temp, target_temp, temp_sensor_entity_id, operation_list, fan_list, swing_list, default_operation, default_fan_mode, default_swing_mode, default_operation_from_idle)
])
ATTR_VALUE = 'value'
DEFAULT_VALUE = True
def gree_set_health(call):
value = call.data.get(ATTR_VALUE, DEFAULT_VALUE)
gree_device.send_command(health_mode=bool(value))
hass.services.async_register(DOMAIN, 'gree_set_health', gree_set_health)
class GreeClimate(ClimateDevice):
def __init__(self, hass, name, gree_device, min_temp, max_temp, target_temp, temp_sensor_entity_id, operation_list, fan_list, swing_list, default_operation, default_fan_mode, default_swing_mode, default_operation_from_idle):
"""Initialize the Gree Climate device."""
self.hass = hass
self._name = name
self._min_temp = min_temp
self._max_temp = max_temp
self._target_temperature = target_temp
self._target_temperature_step = 1
self._unit_of_measurement = hass.config.units.temperature_unit
self._current_temperature = 0
self._temp_sensor_entity_id = temp_sensor_entity_id
self._current_operation = default_operation
self._current_fan_mode = default_fan_mode
self._current_swing_mode = default_swing_mode
self._operation_list = operation_list
self._fan_list = fan_list
self._swing_list = swing_list
self._default_operation_from_idle = default_operation_from_idle
self._gree_device = gree_device
if temp_sensor_entity_id:
async_track_state_change(
hass, temp_sensor_entity_id, self._async_temp_sensor_changed)
sensor_state = hass.states.get(temp_sensor_entity_id)
if sensor_state:
self._async_update_current_temp(sensor_state)
def send_command(self):
power = True
mode = None
operation = self._current_operation.lower()
if operation == 'off':
power = False
else:
mode = OPERATION_LIST_MAP[operation]
fan_speed = FAN_MODE_MAP[self._current_fan_mode.lower()]
temperature = self._target_temperature
swing = SWING_MAP[self._current_swing_mode.lower()]
for retry in range(DEFAULT_RETRY):
try:
self._gree_device.send_command(power_on=power, temperature=temperature, fan_speed=fan_speed, mode=mode, swing=swing)
except (socket.timeout, ValueError):
try:
self._gree_device.update_status()
except socket.timeout:
if retry == DEFAULT_RETRY-1:
_LOGGER.error("Failed to send command to Gree Device")
@asyncio.coroutine
def _async_temp_sensor_changed(self, entity_id, old_state, new_state):
"""Handle temperature changes."""
if new_state is None:
return
self._async_update_current_temp(new_state)
yield from self.async_update_ha_state()
@callback
def _async_update_current_temp(self, state):
"""Update thermostat with latest state from sensor."""
unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
try:
_state = state.state
if self.represents_float(_state):
self._current_temperature = self.hass.config.units.temperature(
float(_state), unit)
except ValueError as ex:
_LOGGER.error('Unable to update from sensor: %s', ex)
def represents_float(self, s):
try:
float(s)
return True
except ValueError:
return False
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def name(self):
"""Return the name of the climate device."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def min_temp(self):
"""Return the polling state."""
return self._min_temp
@property
def max_temp(self):
"""Return the polling state."""
return self._max_temp
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return self._target_temperature_step
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
return self._current_operation
@property
def operation_list(self):
"""Return the list of available operation modes."""
return self._operation_list
@property
def swing_list(self):
"""Return the list of available swing modes."""
return self._swing_list
@property
def current_fan_mode(self):
"""Return the fan setting."""
return self._current_fan_mode
@property
def current_swing_mode(self):
"""Return current swing mode."""
return self._current_swing_mode
@property
def fan_list(self):
"""Return the list of available fan modes."""
return self._fan_list
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
def set_temperature(self, **kwargs):
"""Set new target temperatures."""
if kwargs.get(ATTR_TEMPERATURE) is not None:
self._target_temperature = kwargs.get(ATTR_TEMPERATURE)
if not (self._current_operation.lower() == 'off' or self._current_operation.lower() == 'idle'):
self.send_command()
elif self._default_operation_from_idle is not None:
self.set_operation_mode(self._default_operation_from_idle)
self.schedule_update_ha_state()
def set_fan_mode(self, fan):
"""Set new target temperature."""
self._current_fan_mode = fan
if not (self._current_operation.lower() == 'off' or self._current_operation.lower() == 'idle'):
self.send_command()
self.schedule_update_ha_state()
def set_operation_mode(self, operation_mode):
"""Set new target temperature."""
self._current_operation = operation_mode
self.send_command()
self.schedule_update_ha_state()
def set_swing_mode(self, swing_mode):
"""Set new target swing operation."""
self._current_swing_mode = swing_mode
self.send_command()
self.schedule_update_ha_state()
@asyncio.coroutine
def async_added_to_hass(self):
state = yield from RestoreEntity(self.hass, self.entity_id)
if state is not None:
self._target_temperature = state.attributes['temperature']
self._current_operation = state.attributes['operation_mode']
self._current_fan_mode = state.attributes['fan_mode']
self._current_swing_mode = state.attributes['swing_mode']
| 34.404011
| 228
| 0.68077
| 7,080
| 0.589656
| 678
| 0.056467
| 4,686
| 0.390272
| 0
| 0
| 1,653
| 0.13767
|
3c09d1eafa4175a7dae038754ad5b4a09e871bc9
| 6,492
|
py
|
Python
|
overhang/dnastorage_utils/system/header.py
|
dna-storage/DINOS
|
65f4142e80d646d7eefa3fc16d747d21ec43fbbe
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
overhang/dnastorage_utils/system/header.py
|
dna-storage/DINOS
|
65f4142e80d646d7eefa3fc16d747d21ec43fbbe
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
overhang/dnastorage_utils/system/header.py
|
dna-storage/DINOS
|
65f4142e80d646d7eefa3fc16d747d21ec43fbbe
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
from dnastorage.codec.base_conversion import convertIntToBytes,convertBytesToInt
from dnastorage.arch.builder import *
import editdistance as ed
#from dnastorage.primer.primer_util import edit_distance
from io import BytesIO
from dnastorage.util.packetizedfile import *
import math
import struct
from dnastorage.system.formats import *
### Designed to fit on a single strand for most use cases
###
### Every header strand begins with special sequence that can't be used at the beginning of indices: ATCGATGC
###
### 1. 'ATCGATGC' [1]
### 2. short index - usually 0, 0-255, at most 16 strands [1]
### 3. major version (0-255) [1]
### 4. minor version (0-255) [1]
### 5. num bytes for size [1]
### 6. size [x]
### 7 num bytes for original filename [2]
### 8. null terminated string
### 9. encoding style [2]
### 10. length of remaining record (2 bytes) [2]
### 11. remaining record byte encoded [?]
### Pad to final width using arbitrary sequence
system_version = { 'major': 0, 'minor':1 }
magic_header = 'ATCGATGC' #'CCATCCAT'
def encode_primer_diff(o,n):
hdr = []
baseVal = { 'A': 0, 'C': 1, 'G':2, 'T':3 }
assert len(o)==len(n)
for i,(oo,nn) in enumerate(zip(o,n)):
if oo != nn:
hdr += [ (baseVal[nn] << 6) | (i&0x3F) ]
if len(hdr) == 0:
return [0]
hdr = [len(hdr)] + hdr
return hdr
def decode_primer_diff(data,oprimer):
sz = data[0]
if sz==0:
return oprimer,1
baseVal = [ 'A', 'C', 'G', 'T' ]
nprimer = [ _ for _ in oprimer ]
for i in range(sz):
val = data[1+i]
base = baseVal[(val&0xC0)>>6]
pos = val&0x3F
nprimer[pos] = base
return "".join(nprimer),sz+1
def encode_size_and_value(val):
data = []
if val==0:
data += [1, 0]
else:
data += convertIntToBytes(int(math.ceil(math.log(val+1,2)/8.0)),1)
data += convertIntToBytes(int(val),int(math.ceil(math.log(val+1,2)/8.0)))
return data
def decode_size_and_value(data,pos):
#print "decode_size_and_value: ",pos, len(data)
size_bytes = data[pos]
val = convertBytesToInt(data[pos+1:pos+1+size_bytes])
return val,size_bytes+1
def encode_file_header_comments(filename,format_id,size,other_data,primer5,primer3):
comment = "% dnastorage version {}.{}\n".format(system_version['major'],system_version['minor'])
comment += "% {} \n".format(size)
if len(filename) > 0:
comment += "% {} \n".format(filename)
else:
comment += "% No filename recorded.\n"
comment += "% 5-{} 3-{}\n".format(primer5,primer3)
comment += "% Id-{} Description-{} \n".format(format_id,file_system_format_description(format_id))
comment += "% {} bytes of additional data \n".format(len(other_data))
return comment
def encode_file_header(filename,format_id,size,other_data,primer5,primer3,fsmd_abbrev='FSMD'):
data = [ system_version['major'], system_version['minor'] ]
data += convertIntToBytes(int(math.ceil(math.log(size,2)/8.0)),1)
data += convertIntToBytes(size,int(math.ceil(math.log(size,2)/8.0)))
data += convertIntToBytes(len(filename)+1,2)
data += [ ord(_) for _ in filename ] + [0]
data += convertIntToBytes(format_id,2)
data += convertIntToBytes(len(other_data),2)
data += other_data
#data += [0]*(80-len(data))
data = "".join([chr(x) for x in data])
#print "size of file header: ",len(data)
pf = ReadPacketizedFilestream(BytesIO(data))
enc_func = file_system_encoder_by_abbrev(fsmd_abbrev)
enc = enc_func(pf,primer5+magic_header,primer3)
strands = []
for e in enc:
if type(e) is list:
for s in e:
strands.append(s)
else:
strands.append(e)
return strands
def pick_nonheader_strands(strands,primer5):
others = []
picks = []
for s in strands:
if s.startswith(primer5):
if s.startswith(primer5+magic_header):
pass
else:
others.append(s)
else:
others.append(s)
return others
def pick_header_strands(strands,primer5):
picks = []
others = []
for s in strands:
if s.find(primer5+magic_header)!=-1:
picks.append(s)
elif s.find(primer5)!=-1:
plen= s.find(primer5)+len(primer5)
possible_hdr = s[plen:plen+len(magic_header)]
if ed.eval(possible_hdr,magic_header) < 2:
#ss = s[:]
#ss[plen:plen+len(magic_header)] = magic_header
picks.append(s)
else:
others.append(s)
return picks,others
def decode_file_header(strands,primer5,primer3,fsmd_abbrev='FSMD'):
picks,_ = pick_header_strands(strands,primer5)
#print picks
b = BytesIO()
fid = file_system_formatid_by_abbrev(fsmd_abbrev)
packetsize = file_system_format_packetsize(fid)
pf = WritePacketizedFilestream(b,packetsize,packetsize)
dec_func = file_system_decoder_by_abbrev(fsmd_abbrev)
dec = dec_func(pf,primer5+magic_header,primer3)
for s in picks:
#tmp = dec.decode_from_phys_to_strand(s)
#print len(tmp),tmp
dec.decode(s)
dec.write()
assert dec.complete
data = [ ord(x) for x in b.getvalue() ]
#print data
assert data[0] == system_version['major']
assert data[1] == system_version['minor']
header = {}
header['version'] = [ data[0], data[1] ]
size_bytes = data[2]
pos = 3
header['size'] = convertBytesToInt(data[pos:pos+size_bytes])
pos += size_bytes
size_filename = convertBytesToInt(data[pos:pos+2])
pos+=2
header['filename'] = "".join([chr(x) for x in data[pos:pos+size_filename]])
pos += size_filename
header['formatid'] = convertBytesToInt(data[pos:pos+2])
pos += 2
size_other_data = convertBytesToInt(data[pos:pos+2])
pos += 2
header['other_data'] = [ x for x in data[pos:pos+size_other_data] ]
#print "size_other_data={}".format(size_other_data)
#print "len(other_data)={}".format(len(header['other_data']))
return header
if __name__ == "__main__":
strands = encode_file_header("",0xA,2,[1,2,3,4],"A"*19+"G","T"*19+"G")
for s in strands:
print "{}: strand={}".format(len(s), s)
print decode_file_header(strands,"A"*19+"G","T"*19+"G")
| 31.211538
| 109
| 0.608595
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,397
| 0.215188
|
3c0c8d1fb6b9a95e3b3506596eae5b34be7226ac
| 2,386
|
py
|
Python
|
numba/containers/typedtuple.py
|
liuzhenhai/numba
|
855a2b262ae3d82bd6ac1c3e1c0acb36ee2e2acf
|
[
"BSD-2-Clause"
] | 1
|
2015-01-29T06:52:36.000Z
|
2015-01-29T06:52:36.000Z
|
numba/containers/typedtuple.py
|
shiquanwang/numba
|
a41c85fdd7d6abf8ea1ebe9116939ddc2217193b
|
[
"BSD-2-Clause"
] | null | null | null |
numba/containers/typedtuple.py
|
shiquanwang/numba
|
a41c85fdd7d6abf8ea1ebe9116939ddc2217193b
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from functools import partial
import numba as nb
from numba.containers import orderedcontainer
import numpy as np
INITIAL_BUFSIZE = 5
def notimplemented(msg):
raise NotImplementedError("'%s' method of type 'typedtuple'" % msg)
_tuple_cache = {}
#-----------------------------------------------------------------------
# Runtime Constructor
#-----------------------------------------------------------------------
def typedtuple(item_type, iterable=None, _tuple_cache=_tuple_cache):
"""
>>> typedtuple(nb.int_)
()
>>> ttuple = typedtuple(nb.int_, range(10))
>>> ttuple
(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
>>> ttuple[5]
5L
>>> typedtuple(nb.float_, range(10))
(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0)
"""
typedtuple_ctor = compile_typedtuple(item_type)
return typedtuple_ctor(iterable)
#-----------------------------------------------------------------------
# Typedlist implementation
#-----------------------------------------------------------------------
def compile_typedtuple(item_type, _tuple_cache=_tuple_cache):
if item_type in _tuple_cache:
return _tuple_cache[item_type]
dtype = item_type.get_dtype()
methods = orderedcontainer.container_methods(item_type, notimplemented)
@nb.jit(warn=False)
class typedtuple(object):
@nb.void(nb.object_)
def __init__(self, iterable):
self.size = 0
# TODO: Use length hint of iterable for initial buffer size
self.buf = np.empty(INITIAL_BUFSIZE, dtype=dtype)
if iterable != None:
self.__extend(iterable)
__getitem__ = methods['getitem']
__append = methods['append']
index = methods['index']
count = methods['count']
@nb.void(nb.object_)
def __extend(self, iterable):
for obj in iterable:
self.__append(obj)
@nb.Py_ssize_t()
def __len__(self):
return self.size
@nb.c_string_type()
def __repr__(self):
buf = ", ".join([str(self.buf[i]) for i in range(self.size)])
return "(" + buf + ")"
_tuple_cache[item_type] = typedtuple
return typedtuple
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26.808989
| 75
| 0.544007
| 858
| 0.359598
| 0
| 0
| 882
| 0.369656
| 0
| 0
| 768
| 0.321878
|
3c0cdb9dded53f14973b9af474148c0b7d6c7d6f
| 1,353
|
py
|
Python
|
pythondata_cpu_minerva/__init__.py
|
litex-hub/litex-data-cpu-minerva
|
3896ce15f5d6420f7797b1f95249f948533bf542
|
[
"BSD-2-Clause"
] | null | null | null |
pythondata_cpu_minerva/__init__.py
|
litex-hub/litex-data-cpu-minerva
|
3896ce15f5d6420f7797b1f95249f948533bf542
|
[
"BSD-2-Clause"
] | null | null | null |
pythondata_cpu_minerva/__init__.py
|
litex-hub/litex-data-cpu-minerva
|
3896ce15f5d6420f7797b1f95249f948533bf542
|
[
"BSD-2-Clause"
] | null | null | null |
import os.path
__dir__ = os.path.split(os.path.abspath(os.path.realpath(__file__)))[0]
data_location = os.path.join(__dir__, "sources")
src = "https://github.com/lambdaconcept/minerva"
# Module version
version_str = "0.0.post260"
version_tuple = (0, 0, 260)
try:
from packaging.version import Version as V
pversion = V("0.0.post260")
except ImportError:
pass
# Data version info
data_version_str = "0.0.post120"
data_version_tuple = (0, 0, 120)
try:
from packaging.version import Version as V
pdata_version = V("0.0.post120")
except ImportError:
pass
data_git_hash = "08251daae42ec8cfc54fb82865a5942727186192"
data_git_describe = "v0.0-120-g08251da"
data_git_msg = """\
commit 08251daae42ec8cfc54fb82865a5942727186192
Author: Jean-François Nguyen <jf@jfng.fr>
Date: Tue Apr 5 15:33:21 2022 +0200
stage: fix commit 6c3294b9.
"""
# Tool version info
tool_version_str = "0.0.post140"
tool_version_tuple = (0, 0, 140)
try:
from packaging.version import Version as V
ptool_version = V("0.0.post140")
except ImportError:
pass
def data_file(f):
"""Get absolute path for file inside pythondata_cpu_minerva."""
fn = os.path.join(data_location, f)
fn = os.path.abspath(fn)
if not os.path.exists(fn):
raise IOError("File {f} doesn't exist in pythondata_cpu_minerva".format(f))
return fn
| 26.529412
| 83
| 0.719882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 528
| 0.389956
|
3c0d77712915106228bf8f6e63542f7a42d1d3f1
| 1,602
|
py
|
Python
|
config.py
|
jasonyanglu/fedavgpy
|
cefbe5854f02d3df1197d849872286439c86e949
|
[
"MIT"
] | 1
|
2022-03-18T15:27:29.000Z
|
2022-03-18T15:27:29.000Z
|
config.py
|
jasonyanglu/fedavgpy
|
cefbe5854f02d3df1197d849872286439c86e949
|
[
"MIT"
] | null | null | null |
config.py
|
jasonyanglu/fedavgpy
|
cefbe5854f02d3df1197d849872286439c86e949
|
[
"MIT"
] | null | null | null |
# GLOBAL PARAMETERS
DATASETS = ['sent140', 'nist', 'shakespeare',
'mnist', 'synthetic', 'cifar10']
TRAINERS = {'fedavg': 'FedAvgTrainer',
'fedavg4': 'FedAvg4Trainer',
'fedavg5': 'FedAvg5Trainer',
'fedavg9': 'FedAvg9Trainer',
'fedavg_imba': 'FedAvgTrainerImba',}
OPTIMIZERS = TRAINERS.keys()
class ModelConfig(object):
def __init__(self):
pass
def __call__(self, dataset, model):
dataset = dataset.split('_')[0]
if dataset == 'mnist' or dataset == 'nist':
if model == 'logistic' or model == '2nn':
return {'input_shape': 784, 'num_class': 10}
else:
return {'input_shape': (1, 28, 28), 'num_class': 10}
elif dataset == 'cifar10':
return {'input_shape': (3, 32, 32), 'num_class': 10}
elif dataset == 'sent140':
sent140 = {'bag_dnn': {'num_class': 2},
'stacked_lstm': {'seq_len': 25, 'num_class': 2, 'num_hidden': 100},
'stacked_lstm_no_embeddings': {'seq_len': 25, 'num_class': 2, 'num_hidden': 100}
}
return sent140[model]
elif dataset == 'shakespeare':
shakespeare = {'stacked_lstm': {'seq_len': 80, 'emb_dim': 80, 'num_hidden': 256}
}
return shakespeare[model]
elif dataset == 'synthetic':
return {'input_shape': 60, 'num_class': 10}
else:
raise ValueError('Not support dataset {}!'.format(dataset))
MODEL_PARAMS = ModelConfig()
| 38.142857
| 103
| 0.529963
| 1,217
| 0.759675
| 0
| 0
| 0
| 0
| 0
| 0
| 568
| 0.354557
|
3c0dac01937088c28952c4c1e01fa4a3c19fcaa9
| 3,266
|
py
|
Python
|
Gan/gan.py
|
caiyueliang/CarClassification
|
a8d8051085c4e66ed3ed67e56360a515c9762cd5
|
[
"Apache-2.0"
] | null | null | null |
Gan/gan.py
|
caiyueliang/CarClassification
|
a8d8051085c4e66ed3ed67e56360a515c9762cd5
|
[
"Apache-2.0"
] | null | null | null |
Gan/gan.py
|
caiyueliang/CarClassification
|
a8d8051085c4e66ed3ed67e56360a515c9762cd5
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
from argparse import ArgumentParser
import os
import model_train
from torchvision import models
def parse_argvs():
parser = ArgumentParser(description='GAN')
parser.add_argument('--data_path', type=str, help='data_path', default='./data')
parser.add_argument('--num_workers', type=int, help='num_workers', default=1)
parser.add_argument('--img_size', type=int, help='image_size', default=96)
parser.add_argument('--batch_size', type=int, help='batch_size', default=32)
parser.add_argument('--max_epoch', type=int, help='max_epoch', default=1000)
parser.add_argument('--lr1', type=float, help='learning rate 1', default=2e-4)
parser.add_argument('--lr2', type=float, help='learning rate 2', default=2e-4)
parser.add_argument('--beta1', type=float, help='Adam beta1', default=0.5)
parser.add_argument('--use_gpu', type=bool, help='use_gpu', default=True)
parser.add_argument('--nz', type=int, help='noise_channel', default=100)
parser.add_argument('--ngf', type=int, help='生成器feature map', default=64)
parser.add_argument('--ndf', type=int, help='判别器feature map', default=64)
parser.add_argument('--save_path', type=str, help='image save path', default='./images')
parser.add_argument('--vis', type=bool, help='visdom可视化', default=True)
parser.add_argument("--env", type=str, help="visdom的env", default='GAN')
parser.add_argument('--plot_every', type=int, help='间隔20画一次', default=20)
parser.add_argument('--debug_file', type=str, help='存在该文件夹、进入debug模式', default='./tmp/debug_gan')
parser.add_argument('--d_every', type=int, help='每1个batch训练一次判别器', default=1)
parser.add_argument('--g_every', type=int, help='每5个batch训练一次生成器', default=1)
parser.add_argument('--decay_every', type=int, help='每10个epoch保存一次模型', default=10)
parser.add_argument('--netd_path', type=str, help='model_path', default='./checkpoints/netd.pth')
parser.add_argument('--best_netd_path', type=str, help='model_path', default='./checkpoints/netd_best.pth')
parser.add_argument('--netg_path', type=str, help='model_path', default='./checkpoints/netg.pth')
parser.add_argument('--best_netg_path', type=str, help='model_path', default='./checkpoints/netg_best.pth')
parser.add_argument('--gen_img', type=str, help='gen_img', default='result.png')
parser.add_argument('--gen_num', type=int, help='从512张生成的图片中保存最好的64张', default=64)
parser.add_argument('--gen_search_num', type=int, help='gen_search_num', default=512)
parser.add_argument('--gen_mean', type=int, help='噪声均值', default=0)
parser.add_argument('--gen_std', type=int, help='噪声方差', default=1)
input_args = parser.parse_args()
print(input_args)
return input_args
if __name__ == '__main__':
args = parse_argvs()
# train_path = args.train_path
# test_path = args.test_path
# output_model_path = args.output_model_path
# num_classes = args.classes_num
# batch_size = args.batch_size
# img_size = args.img_size
# lr = args.lr
# model = models.resnet18(num_classes=num_classes)
# model = models.squeezenet1_1(num_classes=num_classes)
model_train = model_train.ModuleTrain(opt=args)
model_train.train()
# model_train.test(show_img=True)
| 48.029412
| 111
| 0.709124
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,400
| 0.410076
|
3c1079153ceb5f7b4146c5df6cbab9e874e7d7f4
| 854
|
py
|
Python
|
Modulo 2/ex068.py
|
Werberty/Curso-em-Video-Python3
|
24c0299edd635fb9c2db2ecbaf8532d292f92d49
|
[
"MIT"
] | 1
|
2022-03-06T11:37:47.000Z
|
2022-03-06T11:37:47.000Z
|
Modulo 2/ex068.py
|
Werberty/Curso-em-Video-Python3
|
24c0299edd635fb9c2db2ecbaf8532d292f92d49
|
[
"MIT"
] | null | null | null |
Modulo 2/ex068.py
|
Werberty/Curso-em-Video-Python3
|
24c0299edd635fb9c2db2ecbaf8532d292f92d49
|
[
"MIT"
] | null | null | null |
from random import randint
print('-=-'*10)
print('JOGO DO PAR OU IMPAR')
cont = 0
while True:
print('-=-' * 10)
n = int(input('Digite um valor: '))
op = str(input('Par ou impar? [P/I] ')).upper().strip()[0]
ia = randint(0, 10)
res = n + ia
print('-'*30)
print(f'Você jogou {n} e o computador {ia}. Total de {res} ', end='')
if res % 2 == 0:
print('DEU PAR')
print('-' * 30)
if op == 'P':
print('Você VENCEU!\nVamos jogar novamente...')
cont += 1
else:
break
elif res % 2 != 0:
print('DEU IMPAR')
print('-' * 30)
if op == 'I':
print('Você VENCEU!\nVamos jogar novamente...')
cont += 1
else:
break
print('Você PERDEU!')
print('-=-' * 10)
print(f'GAME OVER! Você venceu {cont} vez.')
| 25.878788
| 73
| 0.480094
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 305
| 0.355064
|
3c10cbd008220b779ffa61252edc4ab7bdc901a1
| 5,506
|
py
|
Python
|
server/inbox/views.py
|
amy-xiang/CMPUT404_PROJECT
|
cbcea0cd164d6377ede397e934f960505e8f347a
|
[
"W3C-20150513"
] | 1
|
2021-04-06T22:35:53.000Z
|
2021-04-06T22:35:53.000Z
|
server/inbox/views.py
|
amy-xiang/CMPUT404_PROJECT
|
cbcea0cd164d6377ede397e934f960505e8f347a
|
[
"W3C-20150513"
] | null | null | null |
server/inbox/views.py
|
amy-xiang/CMPUT404_PROJECT
|
cbcea0cd164d6377ede397e934f960505e8f347a
|
[
"W3C-20150513"
] | null | null | null |
from django.core.exceptions import ValidationError
from django.shortcuts import render, get_object_or_404
from django.db import IntegrityError
from rest_framework import authentication, generics, permissions, status
from rest_framework.exceptions import PermissionDenied
from rest_framework.response import Response
from posts.serializers import PostSerializer
from author.serializers import AuthorProfileSerializer
from main.models import Author
from nodes.models import Node
from main import utils
from posts.models import Post
from likes.models import Like
from .models import Inbox
from .serializers import InboxSerializer
from urllib.parse import urlparse
import requests
import json
# api/author/{AUTHOR_ID}/inbox/
class InboxView(generics.RetrieveUpdateDestroyAPIView):
serializer_class = InboxSerializer
authenticate_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def get_inbox(self):
request_author_id = self.kwargs['author_id']
if self.request.user.id != request_author_id:
raise PermissionDenied(
detail={'error': ['You do not have permission to this inbox.']})
if not self.request.user.adminApproval:
raise PermissionDenied(
detail={'error': ['User has not been approved by admin.']})
return get_object_or_404(Inbox, author=Author.objects
.get(id=self.request.user.id))
# GET: get Inbox of an user
def get(self, request, *args, **kwargs):
inbox = self.get_inbox()
serializer = InboxSerializer(inbox, context={'request': request})
return Response(serializer.data)
# POST: send a Post, Like or Follow to Inbox
def post(self, request, *args, **kwargs):
request_author_id = self.kwargs['author_id']
inbox_type = request.data.get('type')
if inbox_type is not None: inbox_type = inbox_type.lower()
host_name = request.get_host()
if inbox_type == 'post':
post_id = request.data.get('id')
try:
Inbox.objects.get(author=request_author_id).send_to_inbox(request.data)
except Inbox.DoesNotExist as e:
return Response({'error':'Author not found! Please check author_id in URL.'},
status=status.HTTP_404_NOT_FOUND)
return Response({'data':f'Shared Post {post_id} with Author '
f'{request_author_id} on {host_name}.'},
status=status.HTTP_200_OK)
elif inbox_type == 'like':
id_url = request.data.get('object')
parsed_uri = urlparse(id_url)
object_host = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)
# Sending a LIKE from (us or remote server) to us
if (object_host == utils.HOST):
try:
Inbox.objects.get(author=request_author_id).send_to_inbox(request.data)
except Inbox.DoesNotExist as e:
return Response({'error':'Author not found! Please check author_id in URL.'},
status=status.HTTP_404_NOT_FOUND)
# Sending a LIKE from us to remote server
else:
try:
remote_server = Node.objects.get(remote_server_url=object_host)
except Node.DoesNotExist:
return Response({'error':'Could not find remote server user'}, status=status.HTTP_404_NOT_FOUND)
r = requests.post(
f"{object_host}api/author/{request_author_id}/inbox/",
json=request.data,
auth=(remote_server.konnection_username, remote_server.konnection_password))
if r.status_code < 200 or r.status_code >= 300:
return Response({'error':'Could not complete the request to the remote server'},
status=r.status_code)
# Gather information for the Like object creation
try:
object_type = Like.LIKE_COMMENT if ('comments' in id_url) else Like.LIKE_POST
if (id_url.endswith('/')):
object_id = id_url.split('/')[-2]
else:
object_id = id_url.split('/')[-1]
like_author_id = request.data.get('author')['id'].split('/')[-1]
Like.objects.create(
author=request.data.get('author'), author_id=like_author_id,
object=id_url, object_type=object_type, object_id=object_id
)
except IntegrityError:
return Response({'data':f'You have already sent a like to {object_type} {id_url} on {host_name}.'},
status=status.HTTP_200_OK)
return Response({'data':f'Sent like to {object_type} {id_url} on {host_name}.'},
status=status.HTTP_200_OK)
else:
return Response({'error':'Invalid type, only \'post\', \'like\''},
status=status.HTTP_400_BAD_REQUEST)
# DELETE: Clear the inbox
def delete(self, request, *args, **kwargs):
inbox = self.get_inbox()
length = len(inbox.items)
inbox.items.clear()
inbox.save()
return Response({'data':f'Deleted {length} messages.'}, status=status.HTTP_200_OK)
| 44.403226
| 116
| 0.606793
| 4,780
| 0.868144
| 0
| 0
| 0
| 0
| 0
| 0
| 1,064
| 0.193244
|
3c119513513dbce82555731b084d2de00dc48dc8
| 1,873
|
py
|
Python
|
black_list_all.py
|
philipempl/mail_watch
|
802df3146c462aeb670a4a973e428976d90abf06
|
[
"Apache-2.0"
] | null | null | null |
black_list_all.py
|
philipempl/mail_watch
|
802df3146c462aeb670a4a973e428976d90abf06
|
[
"Apache-2.0"
] | 1
|
2019-12-11T08:49:51.000Z
|
2019-12-11T08:49:51.000Z
|
black_list_all.py
|
philipempl/mail_watch
|
802df3146c462aeb670a4a973e428976d90abf06
|
[
"Apache-2.0"
] | null | null | null |
import imaplib, base64, os, email, re, configparser
import tkinter as tk
from tkinter import messagebox
from datetime import datetime
from email import generator
from dateutil.parser import parse
def init():
mail = imaplib.IMAP4_SSL(config['SERVER']['Host'],config['SERVER']['Port'])
pwd = str(input("PWD: "))
print(pwd)
mail.login(str(config['ADDRESS']['Email']),pwd )
for dir in config['MAIL_DIRS']:
dir = config['MAIL_DIRS'][dir]
print('\n ########################## ' + dir + ' ##################################\n')
mail.select(dir)
type, data = mail.search(None, 'ALL')
mail_ids = data[0]
id_list = mail_ids.split()
readAllMails(id_list, mail)
def readAllMails(id_list, mail):
counter = 0
l = len(id_list)
for num in id_list:
typ, data = mail.fetch(num, '(RFC822)' )
raw_email = data[0][1]
# converts byte literal to string removing b''
try:
raw_email_string = raw_email.decode('utf-8')
email_message = email.message_from_string(raw_email_string)
# get sender from mail
except:
continue
sender_name = ''
sender_email = ''
sender_array = email_message['from'].split('<')
if(len(sender_array) > 1):
sender_email = (sender_array[1][:-1]).lower()
sender_name = re.sub(r"[^a-zA-Z0-9]+", ' ',sender_array[0]).strip()
else:
sender_email = (sender_array[0]).lower()
counter = counter + 1
printProgressBar(counter, l, prefix = 'Progress:', suffix = 'Complete', length = 50)
if(isInBlackList(sender_email) == False):
addToBlackList(sender_email)
def isInBlackList(sender):
with open(black_list) as blackList:
if sender in blackList.read():
return True
else:
return False
def addToBlackList(sender):
hs = open("blackList.txt","a")
hs.write(sender + "\n")
hs.close()
init()
| 28.378788
| 99
| 0.620929
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 317
| 0.169247
|
3c11fb38e2dcb32d635011cf74ded4f173fac7e7
| 539
|
py
|
Python
|
chpt6/Pentagonal_numbers.py
|
GDG-Buea/learn-python
|
9dfe8caa4b57489cf4249bf7e64856062a0b93c2
|
[
"Apache-2.0"
] | null | null | null |
chpt6/Pentagonal_numbers.py
|
GDG-Buea/learn-python
|
9dfe8caa4b57489cf4249bf7e64856062a0b93c2
|
[
"Apache-2.0"
] | 2
|
2018-05-21T09:39:00.000Z
|
2018-05-27T15:59:15.000Z
|
chpt6/Pentagonal_numbers.py
|
GDG-Buea/learn-python
|
9dfe8caa4b57489cf4249bf7e64856062a0b93c2
|
[
"Apache-2.0"
] | 2
|
2018-05-19T14:59:56.000Z
|
2018-05-19T15:25:48.000Z
|
#
# This program is a function that displays the first 100 pentagonal numbers with 10 numbers on each line.
# A pentagonal number is defined as n(3n - 1)/2 for n = 1, 2, c , and so on.
# So, the first few numbers are 1, 5, 12, 22, ....
def get_pentagonal_number(n):
pentagonal_number = round(n * (3 * n - 1) / 2)
print(format(pentagonal_number, '5d'), end=' ')
def main():
count = 0
for i in range(1, 101):
if count % 10 == 0:
print()
get_pentagonal_number(i)
count += 1
main()
| 23.434783
| 105
| 0.595547
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 241
| 0.447124
|
3c129d467e7a619b95bbc8aa752a9a6e384e5ae6
| 4,075
|
py
|
Python
|
iraclis/_1databases.py
|
nespinoza/Iraclis
|
3b5dd8d6bc073f6d2c24ad14341020694255bf65
|
[
"CC-BY-4.0"
] | null | null | null |
iraclis/_1databases.py
|
nespinoza/Iraclis
|
3b5dd8d6bc073f6d2c24ad14341020694255bf65
|
[
"CC-BY-4.0"
] | null | null | null |
iraclis/_1databases.py
|
nespinoza/Iraclis
|
3b5dd8d6bc073f6d2c24ad14341020694255bf65
|
[
"CC-BY-4.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ._0errors import *
from ._0imports import *
class Database:
def __init__(self, database_name, vital=False, date_to_update='daily', force_update=False, ask_size=None):
package_name = 'iraclis'
info_file_name = '_0database.pickle'
directory_name = 'database'
last_update_file_name = 'database_last_update.txt'
info_file_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), info_file_name)
package_path = os.path.join(os.path.expanduser('~'), '.{0}'.format(package_name))
if not os.path.isdir(package_path):
os.mkdir(package_path)
directory_path = os.path.join(package_path, '{0}_{1}'.format(database_name, directory_name))
last_update_file_path = os.path.join(package_path, '{0}_{1}'.format(database_name, last_update_file_name))
if date_to_update == 'daily':
date_to_update = int(time.strftime('%y%m%d'))
else:
date_to_update = int(date_to_update)
if os.path.isdir(directory_path):
if force_update or len(glob.glob(os.path.join(directory_path, '*'))) == 0:
shutil.rmtree(directory_path)
os.mkdir(directory_path)
update = True
else:
if not os.path.isfile(last_update_file_path):
update = True
elif int(open(last_update_file_path).readlines()[0]) < date_to_update:
update = True
else:
update = False
else:
os.mkdir(directory_path)
update = True
if update and ask_size:
if input('Downloading {0} database (up to {1})... proceed with download now? (y/n): '.format(
database_name, ask_size)) == 'y':
update = True
else:
update = False
if update:
# noinspection PyBroadException
try:
print('\nDownloading {0} database...'.format(database_name))
dbx_files = pickle.load(open(info_file_path, 'rb'))
dbx_files = dbx_files['{0}_{1}'.format(database_name, directory_name)]
for i in glob.glob(os.path.join(directory_path, '*')):
if os.path.split(i)[1] not in dbx_files:
os.remove(i)
for i in dbx_files:
if not os.path.isfile(os.path.join(package_path, dbx_files[i]['local_path'])):
print(i)
urlretrieve(dbx_files[i]['link'], os.path.join(package_path, dbx_files[i]['local_path']))
if database_name == 'clablimb':
xx = pickle.load(open(glob.glob(os.path.join(directory_path, '*'))[0], 'rb'))
for i in xx:
w = open(os.path.join(directory_path, i), 'w')
w.write(xx[i])
w.close()
w = open(last_update_file_path, 'w')
w.write(time.strftime('%y%m%d'))
w.close()
except Exception as inst:
print('\nDownloading {0} database failed. A download will be attempted next time.'.format(
database_name))
print('Error:', sys.exc_info()[0])
print(inst.args)
pass
if (not os.path.isdir(directory_path) or
len(glob.glob(os.path.join(directory_path, '*'))) == 0):
if vital:
raise IraclisLibraryError('{0} database not available.'.format(database_name))
else:
print('\n{0} features cannot be used.'.format(database_name))
self.path = False
else:
self.path = directory_path
class Databases:
def __init__(self):
self.wfc3 = Database('wfc3', vital=True, date_to_update='181212').path
databases = Databases()
| 38.084112
| 114
| 0.553374
| 3,884
| 0.953129
| 0
| 0
| 0
| 0
| 0
| 0
| 496
| 0.121718
|
3c134e04d61928fa6fcc6871ade77a7efb97baf0
| 1,029
|
py
|
Python
|
Level2/Ex_5.py
|
zac11/Python_Excerices
|
775739e2639be1f82cc3690c854b9ea0ece05042
|
[
"Apache-2.0"
] | 2
|
2019-03-09T20:31:06.000Z
|
2020-06-19T12:15:13.000Z
|
Level2/Ex_5.py
|
zac11/Python_Excerices
|
775739e2639be1f82cc3690c854b9ea0ece05042
|
[
"Apache-2.0"
] | null | null | null |
Level2/Ex_5.py
|
zac11/Python_Excerices
|
775739e2639be1f82cc3690c854b9ea0ece05042
|
[
"Apache-2.0"
] | 1
|
2018-08-11T18:36:49.000Z
|
2018-08-11T18:36:49.000Z
|
"""
Write a program that accepts a sequence of whitespace separated words as input and prints the words after removing all
duplicate words and sorting them alphanumerically.
Suppose the following input is supplied to the program:
hello world and practice makes perfect and hello world again
Then, the output should be:
again and hello makes perfect practice world
"""
string_input = input()
words =[word for word in string_input.split(" ")]
print(" ".join(sorted(list(set(words)))))
"""
Let's break it down now
print(set(words))
This will print a set of the words, with all the unique values
print(list(set(words)))
Create a list out of the values of words
print(sorted(list(set(words))))
This will sort the list
print(" ".join(sorted(list(set(words)))))
This is join the sorted list items with a whitespace
For this input :
I like to yawn and I also like to make a music and a car
Now output will be :
I a also and car like make music to yawn
Notice that the uppercase I is sorted at first position
"""
| 19.415094
| 118
| 0.74344
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 914
| 0.888241
|
3c1675a2a9274be019b322c8830f740dbd48fb14
| 6,063
|
py
|
Python
|
alfworld/agents/utils/traj_process.py
|
roy860328/VSGM
|
3ec19f9cf1401cecf45527687936b8fe4167f672
|
[
"MIT"
] | 6
|
2021-05-22T15:33:42.000Z
|
2022-01-12T03:34:39.000Z
|
alfworld/agents/utils/traj_process.py
|
roy860328/VSGM
|
3ec19f9cf1401cecf45527687936b8fe4167f672
|
[
"MIT"
] | 1
|
2021-06-19T10:04:13.000Z
|
2021-06-20T03:37:23.000Z
|
alfworld/agents/utils/traj_process.py
|
roy860328/VSGM
|
3ec19f9cf1401cecf45527687936b8fe4167f672
|
[
"MIT"
] | null | null | null |
import os
import cv2
import json
import numpy as np
import h5py
from PIL import Image
TASK_TYPES = {1: "pick_and_place_simple",
2: "look_at_obj_in_light",
3: "pick_clean_then_place_in_recep",
4: "pick_heat_then_place_in_recep",
5: "pick_cool_then_place_in_recep",
6: "pick_two_obj_and_place"}
def save_trajectory(envs, store_states, task_desc_strings, expert_actions, still_running_masks):
print("=== SAVE BATCH ===")
TRAIN_DATA = "TRAIN_DATA.json"
for i, thor in enumerate(envs):
save_data_path = thor.env.save_frames_path
print("=== save one episode len ===", len(expert_actions))
print("=== save path ===", save_data_path)
data = {
"task_desc_string": [],
"expert_action": [],
"sgg_meta_data": [],
"rgb_image": [],
}
img_name = 0
for store_state, task_desc_string, expert_action, still_running_mask in \
zip(store_states, task_desc_strings, expert_actions, still_running_masks):
if int(still_running_mask[i]) == 0:
break
_task_desc_string = task_desc_string[i]
_expert_action = expert_action[i]
rgb_image = store_state[i]["rgb_image"]
img_path = os.path.join(save_data_path, '%09d.png' % img_name)
cv2.imwrite(img_path, rgb_image)
data["task_desc_string"].append(_task_desc_string)
data["expert_action"].append(_expert_action)
data["rgb_image"].append(img_path)
data["sgg_meta_data"].append(store_state[i]["sgg_meta_data"])
img_name += 1
with open(os.path.join(save_data_path, TRAIN_DATA), 'w') as f:
json.dump(data, f)
def save_exploration_trajectory(envs, exploration_frames, sgg_meta_datas):
print("=== SAVE EXPLORATION BATCH ===")
TRAIN_DATA = "TRAIN_DATA.json"
for i, thor in enumerate(envs):
save_data_path = thor.env.save_frames_path
print("=== save exploration one episode len ===", len(sgg_meta_datas[i]))
print("=== save exploration path ===", save_data_path)
data = {
"exploration_img": [],
"exploration_sgg_meta_data": [],
}
img_name = 0
for exploration_frame, sgg_meta_data, in zip(exploration_frames[i], sgg_meta_datas[i]):
img_path = os.path.join(save_data_path, 'exploration_img%09d.png' % img_name)
cv2.imwrite(img_path, exploration_frame)
data["exploration_img"].append(img_path)
data["exploration_sgg_meta_data"].append(sgg_meta_data)
img_name += 1
with open(os.path.join(save_data_path, TRAIN_DATA), 'r') as f:
ori_data = json.load(f)
with open(os.path.join(save_data_path, TRAIN_DATA), 'w') as f:
data = {**ori_data, **data}
json.dump(data, f)
def get_traj_train_data(tasks_paths, save_frames_path):
# [store_states, task_desc_strings, expert_actions]
transition_caches = []
for task_path in tasks_paths:
transition_cache = [None, None, None]
traj_root = os.path.dirname(task_path)
task_path = os.path.join(save_frames_path, traj_root.replace('../', ''))
with open(task_path + '/TRAIN_DATA.json', 'r') as f:
data = json.load(f)
# store store_states
store_states = []
rgb_array = load_img_with_h5(data["rgb_image"], task_path)
for img, sgg_meta_data in zip(rgb_array, data["sgg_meta_data"]):
store_state = {
"rgb_image": img,
"sgg_meta_data": sgg_meta_data,
}
store_states.append(store_state)
# len(store_state) == 39
transition_cache[0] = store_states
# len(seq_task_desc_strings) == 39
transition_cache[1] = [[task_desc_string] for task_desc_string in data["task_desc_string"]]
# len(seq_target_strings) == 39
transition_cache[2] = [[expert_action] for expert_action in data["expert_action"]]
transition_caches.append(transition_cache)
# import pdb; pdb.set_trace()
return transition_caches
def get_exploration_traj_train_data(tasks_paths, save_frames_path):
# [store_states, task_desc_strings, expert_actions]
exploration_transition_caches = []
for task_path in tasks_paths:
transition_cache = [None, None, None]
traj_root = os.path.dirname(task_path)
task_path = os.path.join(save_frames_path, traj_root.replace('../', ''))
with open(task_path + '/TRAIN_DATA.json', 'r') as f:
data = json.load(f)
# store store_states
store_states = []
rgb_array = load_img_with_h5(data["exploration_img"], task_path, pt_name="exploration_img.pt")
for img, sgg_meta_data in zip(rgb_array, data["exploration_sgg_meta_data"]):
store_state = {
"exploration_img": img,
"exploration_sgg_meta_data": sgg_meta_data,
}
store_states.append(store_state)
# len(store_state) == 39
transition_cache[0] = store_states
exploration_transition_caches.append(transition_cache)
# import pdb; pdb.set_trace()
return exploration_transition_caches
def load_img_with_h5(rgb_img_names, img_dir_path, pt_name="img.pt"):
img_h5 = os.path.join(img_dir_path, pt_name)
if not os.path.isfile(img_h5):
rgb_array = []
for rgb_img_name in rgb_img_names:
rgb_img_name = rgb_img_name.rsplit("/", 1)[-1]
rgb_img_path = os.path.join(img_dir_path, rgb_img_name)
rgb_img = Image.open(rgb_img_path).convert("RGB")
rgb_img = np.array(rgb_img)
rgb_array.append(rgb_img)
hf = h5py.File(img_h5, 'w')
hf.create_dataset('rgb_array', data=rgb_array)
hf.close()
print("Save img data to {}".format(img_h5))
hf = h5py.File(img_h5, 'r')
rgb_array = hf['rgb_array'][:]
return rgb_array
| 41.527397
| 102
| 0.628072
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,274
| 0.210127
|
3c17265b394405d74fda0b7ba580609c53a824f6
| 846
|
py
|
Python
|
log.py
|
bsha3l173/NetDiagBot
|
c76d00a34ae4587942010b2370dd0ac35a83bcdd
|
[
"Unlicense"
] | null | null | null |
log.py
|
bsha3l173/NetDiagBot
|
c76d00a34ae4587942010b2370dd0ac35a83bcdd
|
[
"Unlicense"
] | null | null | null |
log.py
|
bsha3l173/NetDiagBot
|
c76d00a34ae4587942010b2370dd0ac35a83bcdd
|
[
"Unlicense"
] | null | null | null |
__author__ = 'bsha3l173'
import logging
import datetime
from conf import LOG_FILENAME
class Log():
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG)
def log_d(self, message, text):
last_name = ''
first_name = ''
user_name = ''
if not message.from_user.first_name is None:
first_name = message.from_user.first_name.encode('utf-8') + ' '
if not message.from_user.last_name is None:
last_name = message.from_user.last_name.encode('utf-8') + ' '
if not message.from_user.username is None:
user_name = '(' + message.from_user.username.encode('utf-8') + ')'
name = last_name + first_name + user_name
logging.debug(str(datetime.datetime.now()) + ' ' + str(message.chat.id) + ' ' + name + ': ' + text)
| 33.84
| 108
| 0.611111
| 748
| 0.884161
| 0
| 0
| 0
| 0
| 0
| 0
| 60
| 0.070922
|
3c1927e4c80951e764d207f99cb77de8d5e6eb00
| 1,850
|
py
|
Python
|
selenium-browser.py
|
steflayanto/international-google-search
|
05cc773b158fe11202fdf39fb515b398a08b7e3c
|
[
"MIT"
] | null | null | null |
selenium-browser.py
|
steflayanto/international-google-search
|
05cc773b158fe11202fdf39fb515b398a08b7e3c
|
[
"MIT"
] | null | null | null |
selenium-browser.py
|
steflayanto/international-google-search
|
05cc773b158fe11202fdf39fb515b398a08b7e3c
|
[
"MIT"
] | null | null | null |
import os, time, pyautogui
import selenium
from selenium import webdriver
from location_reference import country_map
# STATIC SETTINGS
DPI = 125 # Scaling factor of texts and apps in display settings
screen_dims = [x / (DPI/100) for x in pyautogui.size()]
code_map = country_map()
print("International Google Search")
print("Supported Countries: USA, UK, Japan, Canada, Germany, Italy, France, Australia, Brasil, India, Korea, Pakistan")
query = input("Please input Search Query: ")
text = " "
codes = []
while text is not "" and len(codes) != 3:
text = input("Input Country. Input nothing to start search: ").lower()
if text not in code_map.keys():
print("\tERROR: Country not recognized")
continue
codes.append(code_map[text])
print("Starting Search")
# Using Chrome Incognito to access web
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--incognito")
drivers = []
for i in range(3):
drivers.append(webdriver.Chrome(chrome_options=chrome_options))
drivers[i].set_window_position(i * screen_dims[0] / 3, 0)
assert len(codes) == len(drivers)
for i, driver in enumerate(drivers):
# Open the website
code = codes[i]
driver.get('https://www.google.com/ncr')
time.sleep(0.5)
driver.get('https://www.google.com/?gl=' + code)
# print(screen_dims)
# print(driver.get_window_size())
driver.set_window_size(screen_dims[0] / 3, screen_dims[1])
# print(driver.get_window_size())
element = driver.find_element_by_name("q")
element.send_keys(query)
element.submit()
# for i in range(3):
# drivers[i].set_window_position(i * screen_dims[0] / 3, 0)
# driver.manage().window().setPosition(0,0)
# Get Search Box
# element = driver.find_element_by_name("q")
# element.send_keys("Hotels")
# element.submit()
input("Press enter to exit")
| 28.90625
| 120
| 0.702162
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 814
| 0.44
|
3c1ce045f39d2d470a259001626bc914b8162303
| 29
|
py
|
Python
|
homeassistant/components/thomson/__init__.py
|
domwillcode/home-assistant
|
f170c80bea70c939c098b5c88320a1c789858958
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
homeassistant/components/thomson/__init__.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 31,101
|
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
homeassistant/components/thomson/__init__.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""The thomson component."""
| 14.5
| 28
| 0.655172
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 28
| 0.965517
|
3c1d0a50a97a1bf750da3e79140c45303971c672
| 2,027
|
py
|
Python
|
registration/admin.py
|
allenallen/interedregistration
|
d6b93bfc33d7bb9bfbabdcdb27b685f3a6be3ea9
|
[
"MIT"
] | null | null | null |
registration/admin.py
|
allenallen/interedregistration
|
d6b93bfc33d7bb9bfbabdcdb27b685f3a6be3ea9
|
[
"MIT"
] | 6
|
2020-02-11T23:05:13.000Z
|
2021-06-10T20:43:51.000Z
|
registration/admin.py
|
allenallen/interedregistration
|
d6b93bfc33d7bb9bfbabdcdb27b685f3a6be3ea9
|
[
"MIT"
] | null | null | null |
import csv
from django.contrib import admin
from django.http import HttpResponse
from .models import Student, SchoolList, Event, ShsTrack, SchoolOfficial
class ExportCsvMixin:
def export_as_csv(self, request, queryset):
meta = self.model._meta
field_names = [field.name for field in meta.fields]
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename={}.csv'.format(meta)
writer = csv.writer(response)
writer.writerow(field_names)
for obj in queryset:
row = writer.writerow([getattr(obj, field) for field in field_names])
return response
export_as_csv.short_description = "Export Selected"
@admin.register(SchoolOfficial)
class SchoolOfficialAdmin(admin.ModelAdmin, ExportCsvMixin):
list_display = (
'id', 'last_name', 'first_name', 'school', 'designation', 'course_taken', 'email', 'date_of_birth', 'mobile',
'gender', 'date_registered', 'registered_event')
list_filter = ('registered_event', 'school',)
actions = ['export_as_csv']
@admin.register(Student)
class StudentAdmin(admin.ModelAdmin, ExportCsvMixin):
list_display = (
'id', 'last_name', 'first_name', 'school', 'grade_level', 'shs_track', 'projected_course', 'email',
'date_of_birth', 'mobile',
'gender', 'date_registered', 'registered_event')
actions = ['export_as_csv']
list_filter = ('registered_event', 'school',)
change_list_template = 'change_list.html'
search_fields = ('first_name', 'last_name', 'email')
@admin.register(Event)
class EventAdmin(admin.ModelAdmin):
list_display = ('name', 'start_date', 'end_date')
fieldsets = (
(None, {
'fields': ('name', 'logo', 'event_registration_url')
}),
('Event Date', {
'fields': ('start_date', 'end_date')
}),
)
readonly_fields = ('event_registration_url',)
admin.site.register(SchoolList)
admin.site.register(ShsTrack)
| 30.712121
| 117
| 0.665022
| 1,715
| 0.846078
| 0
| 0
| 1,222
| 0.602861
| 0
| 0
| 630
| 0.310804
|
3c1e8f234365a8d2c0de799db1420fb70afb127b
| 1,251
|
py
|
Python
|
python/src/aoc/year2016/day5.py
|
ocirne/adventofcode
|
ea9b5f1b48a04284521e85c96b420ed54adf55f0
|
[
"Unlicense"
] | 1
|
2021-02-16T21:30:04.000Z
|
2021-02-16T21:30:04.000Z
|
python/src/aoc/year2016/day5.py
|
ocirne/adventofcode
|
ea9b5f1b48a04284521e85c96b420ed54adf55f0
|
[
"Unlicense"
] | null | null | null |
python/src/aoc/year2016/day5.py
|
ocirne/adventofcode
|
ea9b5f1b48a04284521e85c96b420ed54adf55f0
|
[
"Unlicense"
] | null | null | null |
import hashlib
from itertools import islice
from aoc.util import load_input
def search(door_id, is_part1=False, is_part2=False):
i = 0
while True:
md5_hash = hashlib.md5((door_id + str(i)).encode()).hexdigest()
if md5_hash.startswith("00000"):
if is_part1:
yield md5_hash[5]
if is_part2:
pos, char = md5_hash[5:7]
if pos.isnumeric() and 0 <= int(pos) <= 7:
yield int(pos), md5_hash[6]
i += 1
def part1(lines):
"""
>>> part1(['abc'])
'18f47a30'
"""
door_id = lines[0].strip()
return "".join(islice(search(door_id, is_part1=True), 8))
def part2(lines, be_extra_proud=True):
"""
>>> part2(['abc'], False)
'05ace8e3'
"""
result = 8 * [" "]
count = 0
for position, character in search(lines[0].strip(), is_part2=True):
if result[position] == " ":
result[position] = character
count += 1
if count == 8:
return "".join(result)
if be_extra_proud:
print("".join(result))
if __name__ == "__main__":
data = load_input(__file__, 2016, "5")
print(part1(data))
print(part2(data))
| 24.529412
| 71
| 0.529976
| 0
| 0
| 439
| 0.350919
| 0
| 0
| 0
| 0
| 137
| 0.109512
|
3c1f8c82eeba6453a646f8492c4afe649539ab25
| 2,324
|
py
|
Python
|
arraycircles.py
|
BastiHz/arraycircles
|
cf2e8ac48b099570d6b351ae84dc060263ee4e3d
|
[
"MIT"
] | null | null | null |
arraycircles.py
|
BastiHz/arraycircles
|
cf2e8ac48b099570d6b351ae84dc060263ee4e3d
|
[
"MIT"
] | null | null | null |
arraycircles.py
|
BastiHz/arraycircles
|
cf2e8ac48b099570d6b351ae84dc060263ee4e3d
|
[
"MIT"
] | null | null | null |
import math
import random
import os
os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "1"
import numpy as np
import pygame as pg
WINDOW_SIZE = (800, 600)
FPS = 60
pg.init()
window = pg.display.set_mode(WINDOW_SIZE)
clock = pg.time.Clock()
font = pg.font.SysFont("monospace", 20)
def make_circle_array(diameter, hue):
circle = np.zeros((diameter, diameter, 3), int)
center = (diameter - 1) / 2
radius = diameter / 2
color = pg.Color("white")
color.set_length(3)
color.hsva = hue, 100, 100, 100
# Can't use 2d arrays because there seems to be a bug with
# pygame.surfarray.make_surface() not handling 2d arrays properly.
# color = (color.r << 16) + (color.g << 8) + color.b
# TODO: This could be vectorized using numpy.hypot()
# TODO: I only need to do this for a quadrant and then mirror the result around.
for x in range(diameter):
for y in range(diameter):
dx = x - center
dy = y - center
dist = math.hypot(dx, dy)
if dist <= radius:
circle[x, y] = color
return circle
hues = (0, 120, 240)
angles = [math.radians(i) for i in (0, 120, 240)]
window_center_x = WINDOW_SIZE[0] // 2
window_center_y = WINDOW_SIZE[1] // 2
distance_from_center = 75
circle_surfs = [None, None, None]
circle_rects = [None, None, None]
for i in range(3):
circle = make_circle_array(200, hues[i])
circle_surf = pg.surfarray.make_surface(circle)
circle_surfs[i] = circle_surf
circle_rect = circle_surf.get_rect()
circle_rect.center = [
window_center_x + math.sin(angles[i]) * distance_from_center,
window_center_y - math.cos(angles[i]) * distance_from_center
]
circle_rects[i] = circle_rect
running = True
while running:
clock.tick(FPS)
for event in pg.event.get():
if event.type == pg.QUIT:
running = False
elif event.type == pg.KEYDOWN:
if event.key == pg.K_ESCAPE:
running = False
window.fill(pg.Color("black"))
fps_text = font.render(f"{clock.get_fps():.0f}", False, pg.Color("white"))
window.blit(fps_text, (0, 0))
for i in range(3):
window.blit(
circle_surfs[i],
circle_rects[i],
special_flags=pg.BLEND_RGB_ADD
)
pg.display.flip()
| 27.023256
| 84
| 0.623924
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 395
| 0.169966
|
3c1fbd1f77839d16929ae16aa95f7765710bb079
| 1,268
|
py
|
Python
|
choosy/star.py
|
creiht/choosy
|
08c18f1480e542ee122b86a0b47a30c8e5b4017e
|
[
"BSD-3-Clause"
] | null | null | null |
choosy/star.py
|
creiht/choosy
|
08c18f1480e542ee122b86a0b47a30c8e5b4017e
|
[
"BSD-3-Clause"
] | null | null | null |
choosy/star.py
|
creiht/choosy
|
08c18f1480e542ee122b86a0b47a30c8e5b4017e
|
[
"BSD-3-Clause"
] | null | null | null |
from flask import (
abort, Blueprint, current_app, flash, g, redirect, render_template, request,
url_for
)
import giphy_client
from werkzeug.exceptions import abort
from choosy.auth import login_required
from choosy import db
bp = Blueprint("star", __name__)
@bp.route("/stars")
@login_required
def index():
gifs = []
error = None
more = True
try:
offset = int(request.args.get("offset", "0"))
except ValueError:
error = "Invalid offset"
if offset < 0:
offset = 0
gif_ids = db.get_starred_gifs(g.user["id"], 7, offset)
if len(gif_ids) < 7:
# There are no more items to load
more = False
else:
# We only want the first 6
gif_ids = gif_ids[:-1]
for gif_id in gif_ids:
try:
giphy = giphy_client.DefaultApi()
giphy_key = current_app.config["GIPHY_KEY"]
# TODO: do this async
resp = giphy.gifs_gif_id_get(giphy_key, gif_id)
gifs.append(resp.data)
except Exception as e:
current_app.logger.error("Error loading gif from giphy: %s" % e)
return abort(500)
return render_template("star/index.html",
gifs=gifs, offset=offset, more=more)
| 26.978723
| 80
| 0.605678
| 0
| 0
| 0
| 0
| 997
| 0.786278
| 0
| 0
| 187
| 0.147476
|
3c1ff1fa706a7ee54f33c5565b4c5b7b1c4bf065
| 7,700
|
py
|
Python
|
src/1-3_autocorrect.py
|
BernhardSchiffer/1-dynamic-programming
|
81d89e6d579a329058a40b0e6c85b45c97db083a
|
[
"MIT"
] | null | null | null |
src/1-3_autocorrect.py
|
BernhardSchiffer/1-dynamic-programming
|
81d89e6d579a329058a40b0e6c85b45c97db083a
|
[
"MIT"
] | null | null | null |
src/1-3_autocorrect.py
|
BernhardSchiffer/1-dynamic-programming
|
81d89e6d579a329058a40b0e6c85b45c97db083a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# %%
# Assignment Pt. 1: Edit Distances
import numpy as np
from bs4 import BeautifulSoup
import math
vocabulary_file = open('../res/count_1w.txt', 'r')
lines = vocabulary_file.readlines()
vocabulary = dict()
word_count = 0
# Strips the newline character
for line in lines:
line = line.strip()
w = line.split('\t')
word = {'word': w[0], 'count': w[1]}
word_count = word_count + int(w[1])
vocabulary[word['word']] = word
print(len(vocabulary))
print(list(vocabulary.values())[0:5])
gem_doppel = [
("GCGTATGAGGCTAACGC", "GCTATGCGGCTATACGC"),
("kühler schrank", "schüler krank"),
("the longest", "longest day"),
("nicht ausgeloggt", "licht ausgenockt"),
("gurken schaben", "schurkengaben")
]
# %%
def hamming(s1: str, s2: str) -> int:
distance = 0
# pad strings to equal length
if(len(s2) > len(s1)):
s1 = s1.ljust(len(s2), ' ')
else:
s2 = s2.ljust(len(s1), ' ')
# calculate differences in characters
for c1, c2 in zip(s1,s2):
if(c1 != c2):
distance = distance + 1
return distance
assert hamming('GCGTATGAGGCTAACGC', 'GCTATGCGGCTATACGC') == 10
assert hamming('kühler schrank', 'schüler krank') == 13
assert hamming('the longest', 'longest day') == 11
assert hamming('nicht ausgeloggt', 'licht ausgenockt') == 4
assert hamming('gurken schaben', 'schurkengaben') == 14
# %%
def levenshtein(s1: str, s2: str) -> (int, str):
get_values = lambda v: [vv[0] for vv in v]
operations = list()
distances = np.zeros((len(s1)+1, len(s2)+1))
distances[0,:] = [*range(0,len(s2)+1)]
distances[:,0] = [*range(0,len(s1)+1)]
operations.append(['i'*int(i) for i in distances[0,:]])
for row in distances[1:,:]:
operations.append(['d'*int(i) for i in row])
for cidx in range(1,np.shape(distances)[0]):
for ridx in range(1,np.shape(distances)[1]):
c1 = s1[cidx-1]
c2 = s2[ridx-1]
deletion = (distances[cidx-1,ridx] + 1, operations[cidx-1][ridx] + 'd')
insertion = (distances[cidx,ridx-1] + 1, operations[cidx][ridx-1] + 'i')
if(c1 != c2):
substitution = (distances[cidx-1,ridx-1] + 1, operations[cidx-1][ridx-1] + 's')
else:
substitution = (distances[cidx-1,ridx-1] + 0, operations[cidx-1][ridx-1] + 'm')
x = [deletion, insertion, substitution]
minimum = min(get_values(x))
minidx = get_values(x).index(minimum)
distances[cidx,ridx] = minimum
operations[cidx][ridx] = x[minidx][1]
distance = int(distances[-1,-1])
operations = operations[-1][-1]
return (distance, operations)
assert levenshtein('GCGTATGAGGCTAACGC', 'GCTATGCGGCTATACGC') == (3, 'mmdmmmmsmmmmmimmmm')
assert levenshtein('kühler schrank', 'schüler krank') == (6, 'ssmimmmmsddmmmm')
assert levenshtein('the longest', 'longest day') == (8, 'ddddmmmmmmmiiii')
assert levenshtein('nicht ausgeloggt', 'licht ausgenockt') == (4, 'smmmmmmmmmmsmssm')
assert levenshtein('gurken schaben', 'schurkengaben') == (7, 'siimmmmmsdddmmmm')
# %%
# Assignment Pt. 2: Auto-Correct
def suggest(w: str, dist, max_cand=5) -> list:
"""
w: word in question
dist: edit distance to use
max_cand: maximum of number of suggestions
returns a list of tuples (word, dist, score) sorted by score and distance"""
if w in vocabulary:
Pw = math.log(int(vocabulary[w]['count'])/word_count)
return [(w, 0, Pw)]
suggestions = list()
for word in list(vocabulary.values())[:]:
distance, _ = dist(w, word['word'])
Pw = math.log(int(word['count'])/word_count)
suggestions.append((word['word'], distance, 0.5* math.log(1/distance) + Pw))
suggestions.sort(key=lambda s: s[1])
return suggestions[:max_cand]
examples = [
"pirates", # in-voc
"pirutes", # pirates?
"continoisly", # continuosly?
]
for w in examples[:]:
print(w, suggest(w, levenshtein, max_cand=3))
# sample result; your scores may vary!
# pirates [('pirates', 0, -11.408058827802126)]
# pirutes [('pirates', 1, -11.408058827802126), ('minutes', 2, -8.717825438953103), ('viruses', 2, -11.111468702571859)]
# continoisly [('continously', 1, -15.735337826575178), ('continuously', 2, -11.560071979871001), ('continuosly', 2, -17.009283000138204)]
# %%
# Assignment Pt. 3: Needleman-Wunsch
# reading content
file = open("../res/de.xml", "r")
contents = file.read()
# parsing
soup = BeautifulSoup(contents, 'xml')
# get characters
keys = soup.find_all('char')
keyboard = {}
# display content
for key in keys:
k = {'value': key.string}
# get key of character
parent = key.parent
k['left'] = parent['left']
k['top'] = parent['top']
k['width'] = parent['width']
k['height'] = parent['height']
k['fingerIndex'] = parent['fingerIndex']
keyboard[k['value']] = k
# get special keys
specialKeys = soup.find_all('specialKey')
for key in specialKeys:
if key['type'] == 'space':
keyboard[' '] = {
'value': ' ',
'left': key['left'],
'top': key['top'],
'width': key['width'],
'height': key['height']
}
def keyboardsim(s1: str, s2: str) -> float:
key1 = keyboard[s1]
key2 = keyboard[s2]
key1_pos = (int(key1['left']), int(key1['top']))
key2_pos = (int(key2['left']), int(key2['top']))
return math.dist(key1_pos, key2_pos)
def nw(s1: str, s2: str, d: float = 0, sim = keyboardsim) -> float:
get_values = lambda v: [vv[0] for vv in v]
operations = list()
scores = np.zeros((len(s1)+1, len(s2)+1))
scores[0,:] = [i*-1 for i in [*range(0,len(s2)+1)]]
scores[:,0] = [i*-1 for i in [*range(0,len(s1)+1)]]
operations.append(['-'*int(-i) for i in scores[0,:]])
for row in scores[1:,:]:
operations.append(['-'*int(-i) for i in row])
for cidx in range(1,np.shape(scores)[0]):
for ridx in range(1,np.shape(scores)[1]):
c1 = s1[cidx-1]
c2 = s2[ridx-1]
deletion = (scores[cidx-1,ridx] - 1, operations[cidx-1][ridx] + '-')
insertion = (scores[cidx,ridx-1] - 1, operations[cidx][ridx-1] + '-')
if(c1 != c2):
cost = sim(c1, c2)
substitution = (scores[cidx-1,ridx-1] - cost, operations[cidx-1][ridx-1] + '-')
else:
substitution = (scores[cidx-1,ridx-1] + 1, operations[cidx-1][ridx-1] + '+')
x = [deletion, insertion, substitution]
maximum = max(get_values(x))
minidx = get_values(x).index(maximum)
scores[cidx,ridx] = maximum
operations[cidx][ridx] = x[minidx][1]
score = int(scores[-1,-1])
operations = operations[-1][-1]
return (score, operations)
#return score
assert nw('GCGTATGAGGCTAACGC', 'GCTATGCGGCTATACGC', sim=lambda x,y: 1) == (12, '++-++++-+++++-++++')
assert nw('kühler schrank', 'schüler krank', sim=lambda x,y: 1) == (3, '--+-++++---++++')
assert nw('the longest', 'longest day', sim=lambda x,y: 1) == (-1, '----+++++++----')
assert nw('nicht ausgeloggt', 'licht ausgenockt', sim=lambda x,y: 1) == (8, '-++++++++++-+--+')
assert nw('gurken schaben', 'schurkengaben', sim=lambda x,y: 1) == (2, '---+++++----++++')
# How does your suggest function behave with nw and a keyboard-aware similarity?
print(nw('GCGTATGAGGCTAACGC', 'GCTATGCGGCTATACGC'))
print(nw('kühler schrank', 'schüler krank'))
print(nw('the longest', 'longest day'))
print(nw('nicht ausgeloggt', 'licht ausgenockt'))
print(nw('gurken schaben', 'schurkengaben'))
# %%
| 32.352941
| 138
| 0.587662
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,377
| 0.308301
|
3c212a108eea23aed5b72646850bf521126d934b
| 251
|
py
|
Python
|
krb5ticket/errors.py
|
degagne/python-krb5ticket
|
1113e0b51e8eac36f6c85cce10e86e2c82ca4828
|
[
"MIT"
] | 2
|
2021-12-09T05:41:34.000Z
|
2022-03-18T18:23:24.000Z
|
krb5ticket/errors.py
|
degagne/python-krb5ticket
|
1113e0b51e8eac36f6c85cce10e86e2c82ca4828
|
[
"MIT"
] | null | null | null |
krb5ticket/errors.py
|
degagne/python-krb5ticket
|
1113e0b51e8eac36f6c85cce10e86e2c82ca4828
|
[
"MIT"
] | null | null | null |
class KeytabFileNotExists(RuntimeError):
"""
Raised when a Kerberos keytab file doesn't exist.
"""
pass
class KtutilCommandNotFound(RuntimeError):
"""
Raised when ``ktutil`` command-line interface not found.
"""
pass
| 19.307692
| 60
| 0.661355
| 247
| 0.984064
| 0
| 0
| 0
| 0
| 0
| 0
| 137
| 0.545817
|
3c21c614e14a12fda17173ca64af48d998a556ab
| 2,451
|
py
|
Python
|
recipes/Python/577691_Validate_ACNs_AustraliCompany/recipe-577691.py
|
tdiprima/code
|
61a74f5f93da087d27c70b2efe779ac6bd2a3b4f
|
[
"MIT"
] | 2,023
|
2017-07-29T09:34:46.000Z
|
2022-03-24T08:00:45.000Z
|
recipes/Python/577691_Validate_ACNs_AustraliCompany/recipe-577691.py
|
unhacker/code
|
73b09edc1b9850c557a79296655f140ce5e853db
|
[
"MIT"
] | 32
|
2017-09-02T17:20:08.000Z
|
2022-02-11T17:49:37.000Z
|
recipes/Python/577691_Validate_ACNs_AustraliCompany/recipe-577691.py
|
unhacker/code
|
73b09edc1b9850c557a79296655f140ce5e853db
|
[
"MIT"
] | 780
|
2017-07-28T19:23:28.000Z
|
2022-03-25T20:39:41.000Z
|
def isacn(obj):
"""isacn(string or int) -> True|False
Validate an ACN (Australian Company Number).
http://www.asic.gov.au/asic/asic.nsf/byheadline/Australian+Company+Number+(ACN)+Check+Digit
Accepts an int, or a string of digits including any leading zeroes.
Digits may be optionally separated with spaces. Any other input raises
TypeError or ValueError.
Return True if the argument is a valid ACN, otherwise False.
>>> isacn('004 085 616')
True
>>> isacn('005 085 616')
False
"""
if isinstance(obj, int):
if not 0 <= obj < 10**9:
raise ValueError('int out of range for an ACN')
obj = '%09d' % obj
assert len(obj) == 9
if not isinstance(obj, str):
raise TypeError('expected a str or int but got %s' % type(obj))
obj = obj.replace(' ', '')
if len(obj) != 9:
raise ValueError('ACN must have exactly 9 digits')
if not obj.isdigit():
raise ValueError('non-digit found in ACN')
digits = [int(c) for c in obj]
weights = [8, 7, 6, 5, 4, 3, 2, 1]
assert len(digits) == 9 and len(weights) == 8
chksum = 10 - sum(d*w for d,w in zip(digits, weights)) % 10
if chksum == 10:
chksum = 0
return chksum == digits[-1]
if __name__ == '__main__':
# Check the list of valid ACNs from the ASIC website.
ACNs = '''
000 000 019 * 000 250 000 * 000 500 005 * 000 750 005
001 000 004 * 001 250 004 * 001 500 009 * 001 749 999
001 999 999 * 002 249 998 * 002 499 998 * 002 749 993
002 999 993 * 003 249 992 * 003 499 992 * 003 749 988
003 999 988 * 004 249 987 * 004 499 987 * 004 749 982
004 999 982 * 005 249 981 * 005 499 981 * 005 749 986
005 999 977 * 006 249 976 * 006 499 976 * 006 749 980
006 999 980 * 007 249 989 * 007 499 989 * 007 749 975
007 999 975 * 008 249 974 * 008 499 974 * 008 749 979
008 999 979 * 009 249 969 * 009 499 969 * 009 749 964
009 999 964 * 010 249 966 * 010 499 966 * 010 749 961
'''.replace('*', '\n').split('\n')
ACNs = [s for s in ACNs if s and not s.isspace()]
for s in ACNs:
n = int(s.replace(' ', ''))
if not (isacn(s) and isacn(n) and not isacn(n+1)):
print('test failed for ACN: %s' % s.strip())
break
else:
print('all ACNs tested okay')
| 38.904762
| 95
| 0.565075
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,533
| 0.625459
|
3c2312e967df908333d00837244d79e34fe4f564
| 2,845
|
py
|
Python
|
scripts/code_standards/code_standards.py
|
dolphingarlic/sketch-frontend
|
e646b7d51405e8a693f45472aa3cc6991a6f38af
|
[
"X11"
] | 1
|
2020-12-06T03:40:53.000Z
|
2020-12-06T03:40:53.000Z
|
scripts/code_standards/code_standards.py
|
dolphingarlic/sketch-frontend
|
e646b7d51405e8a693f45472aa3cc6991a6f38af
|
[
"X11"
] | null | null | null |
scripts/code_standards/code_standards.py
|
dolphingarlic/sketch-frontend
|
e646b7d51405e8a693f45472aa3cc6991a6f38af
|
[
"X11"
] | null | null | null |
#!/usr/bin/env python2.6
# -*- coding: utf-8 -*-
from __future__ import print_function
import optparse
import path_resolv
from path_resolv import Path
def check_file(f, show_info, override_ignores):
text = f.read()
if ("@code standards ignore file" in text) and (not override_ignores):
return
if "\r" in text:
raise Exception("FATAL - dos endlines in %s" %(f))
for i, line in enumerate(text.split("\n")):
def warn(text):
print("%30s %30s :%03d" %("WARNING - " + text, f, i))
def info(text):
if show_info:
print("%30s %30s :%03d" %("INFO - " + text, f, i))
if "\t" in line:
warn("tabs present")
# for now, ignore Eclipse blank comment lines
if line.endswith(" ") and line.strip() != "*":
warn("trailing whitespace")
# the following can be ignored
if "@code standards ignore" in line and not override_ignores:
continue
# spaces don't show up as much for variable indent
relevant_line = line.lstrip('/').strip()
if float(len(line)) * 0.7 + float(len(relevant_line)) * 0.3 > 90:
warn("long line")
# the following only apply to uncommented code
if line.lstrip().startswith("//"):
continue
# the following do not apply to this file
if f.endswith("build_util/code_standards.py"):
continue
if "System.exit" in line:
warn("raw system exit")
if "DebugOut.assertSlow" in line:
info("debug assert slow call")
def warn(text):
print("%30s %30s" %("WARNING - " + text, f))
if f.endswith(".java") and not "http://creativecommons.org/licenses/BSD/" in text:
warn("no license")
def main(srcdir, file_extensions, **kwargs):
assert type(file_extensions) == list
for root, dirs, files in Path(srcdir).walk():
for f in files:
f = Path(root, f)
if f.splitext()[-1][1:] in file_extensions:
check_file(f, **kwargs)
if __name__ == "__main__":
cmdopts = optparse.OptionParser(usage="%prog [options]")
cmdopts.add_option("--srcdir", default=Path("."),
help="source directory to look through")
cmdopts.add_option("--file_extensions", default="java,scala,py,sh",
help="comma-sepated list of file extensions")
cmdopts.add_option("--show_info", action="store_true",
help="show info for command")
cmdopts.add_option("--override_ignores", action="store_true",
help="ignore \"@code standards ignore [file]\"")
options, args = cmdopts.parse_args()
options.file_extensions = options.file_extensions.split(",")
if not options.show_info:
print("use --show_info to show more notices")
main(**options.__dict__)
| 34.695122
| 86
| 0.59754
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 968
| 0.340246
|
3c25269f1d545577e247a812c7d95d25ce72bbfe
| 2,368
|
py
|
Python
|
grease/scanner.py
|
JorgeRubio96/grease-lang
|
94a7cf9f01339ae2aac2c1fa1fefb623c32fffc9
|
[
"MIT"
] | null | null | null |
grease/scanner.py
|
JorgeRubio96/grease-lang
|
94a7cf9f01339ae2aac2c1fa1fefb623c32fffc9
|
[
"MIT"
] | null | null | null |
grease/scanner.py
|
JorgeRubio96/grease-lang
|
94a7cf9f01339ae2aac2c1fa1fefb623c32fffc9
|
[
"MIT"
] | 1
|
2018-10-09T22:57:34.000Z
|
2018-10-09T22:57:34.000Z
|
import ply.lex as lex
from grease.core.indents import Indents
reserved = {
'var': 'VAR',
'if': 'IF',
'else': 'ELSE',
'scan': 'SCAN',
'print': 'PRINT',
'and': 'AND',
'or': 'OR',
'Bool': 'BOOL',
'Int': 'INT',
'Float': 'FLOAT',
'Char': 'CHAR',
'fn': 'FN',
'interface': 'INTERFACE',
'import': 'IMPORT',
'struct':'STRUCT',
'while':'WHILE',
'alias':'ALIAS',
'as':'AS',
'gt': 'GT',
'ge': 'GE',
'lt': 'LT',
'le': 'LE',
'eq': 'EQ',
'not':'NOT',
'from': 'FROM',
'return': 'RETURN',
'true': 'TRUE',
'false': 'FALSE'
}
tokens = [
'ID', 'CONST_INT', 'CONST_REAL', 'CONST_STR', 'CONST_CHAR',
'ARROW', 'SEMICOLON', 'COLON', 'COMMA', 'DOT', 'EQUALS', 'NEW_LINE',
'OPEN_BRACK','CLOSE_BRACK', 'OPEN_PAREN', 'CLOSE_PAREN', 'PLUS', 'MINUS',
'TIMES', 'DIVIDE', 'AMP', 'INDENT', 'DEDENT'
] + list(reserved.values())
t_DOT = r'\.'
t_SEMICOLON = r'\;'
t_COLON = r'\:'
t_COMMA = r'\,'
t_OPEN_BRACK = r'\['
t_CLOSE_BRACK = r'\]'
t_EQUALS = r'\='
t_OPEN_PAREN = r'\('
t_CLOSE_PAREN = r'\)'
t_PLUS = r'\+'
t_MINUS = r'\-'
t_TIMES = r'\*'
t_DIVIDE = r'\/'
t_AMP = r'\&'
t_ARROW = r'\-\>'
t_ignore = ' '
def t_ignore_SINGLE_COMMENT(t):
r'\#.*\n'
t.lexer.lineno += 1
def t_ignore_MULTI_COMMENT(t):
r'\/\*[\s\S]*\*\/\s*'
t.lexer.lineno += t.value.count('\n')
def t_ID(t):
r'[a-zA-Z_][a-zA-Z0-9_]*'
t.type = reserved.get(t.value, 'ID')
if t.type == 'CONST_BOOL':
if t.value == 'true':
t.value = True
else:
t.value = False
return t
def t_CONST_REAL(t):
r'[0-9]+\.[0-9]+'
t.value = float(t.value)
return t
def t_CONST_INT(t):
r'[0-9]+'
t.value = int(t.value)
return t
def t_CONST_STR(t):
r'\".+\"'
t.value = t.value[1:-1]
return t
def t_CONST_CHAR(t):
r'\'.+\''
t.value = t.value[1:-1]
return t
def t_NEW_LINE(t):
r'\n\s*[\t ]*'
t.lexer.lineno += t.value.count('\n')
t.value = len(t.value) - 1 - t.value.rfind('\n')
return t
def first_word(s):
whites = [' ', '\t', '\n']
low = 0
for l in s:
if l in whites:
break
low += 1
return s[0:low]
def t_error(t):
print("Unexpected \"{}\" at line {}".format(first_word(t.value), t.lexer.lineno))
grease_lexer = Indents(lex.lex())
| 19.89916
| 85
| 0.505912
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 796
| 0.336149
|
3c2804fa00492d199e8c3aefe6c666e804514568
| 768
|
py
|
Python
|
patan/utils.py
|
tttlh/patan
|
d3e5cfec085e21f963204b5c07a85cf1f029560c
|
[
"MIT"
] | null | null | null |
patan/utils.py
|
tttlh/patan
|
d3e5cfec085e21f963204b5c07a85cf1f029560c
|
[
"MIT"
] | null | null | null |
patan/utils.py
|
tttlh/patan
|
d3e5cfec085e21f963204b5c07a85cf1f029560c
|
[
"MIT"
] | 1
|
2021-03-01T08:35:34.000Z
|
2021-03-01T08:35:34.000Z
|
# _*_ coding: utf-8 _*_
from importlib import import_module
def is_iterable(var):
return hasattr(var, '__iter__')
def to_iterable(var):
if var is None:
return []
elif is_iterable(var):
return var
else:
return [var]
def load_class_by_name(qualified_name):
last_dot = qualified_name.rindex('.')
module, name = qualified_name[:last_dot], qualified_name[last_dot + 1:]
mod = import_module(module)
obj = getattr(mod, name)
return obj
def get_obj_by_class(cls, settings, *args, **kwargs):
if settings is None:
raise ValueError('settings not found')
if hasattr(cls, 'from_settings'):
return cls.from_settings(settings, *args, **kwargs)
else:
return cls(*args, **kwargs)
| 22.588235
| 75
| 0.653646
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 71
| 0.092448
|
3c2968143388eec54e35192431494447d2c82d24
| 3,673
|
py
|
Python
|
tests/test_assert_immediate.py
|
makaimann/fault
|
8c805415f398e64971d18fbd3014bc0b59fb38b8
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_assert_immediate.py
|
makaimann/fault
|
8c805415f398e64971d18fbd3014bc0b59fb38b8
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_assert_immediate.py
|
makaimann/fault
|
8c805415f398e64971d18fbd3014bc0b59fb38b8
|
[
"BSD-3-Clause"
] | null | null | null |
import tempfile
import pytest
import fault as f
import magma as m
from fault.verilator_utils import verilator_version
@pytest.mark.parametrize('success_msg', [None, "OK"])
@pytest.mark.parametrize('failure_msg', [None, "FAILED"])
@pytest.mark.parametrize('severity', ["error", "fatal", "warning"])
@pytest.mark.parametrize('on', [None, f.posedge])
@pytest.mark.parametrize('name', [None, "my_assert"])
def test_immediate_assert(capsys, failure_msg, success_msg, severity, on,
name):
if verilator_version() < 4.0:
pytest.skip("Untested with earlier verilator versions")
if failure_msg is not None and severity == "fatal":
# Use integer exit code
failure_msg = 1
class Foo(m.Circuit):
io = m.IO(
I0=m.In(m.Bit),
I1=m.In(m.Bit)
) + m.ClockIO()
io.CLK.unused()
f.assert_immediate(~(io.I0 & io.I1),
success_msg=success_msg,
failure_msg=failure_msg,
severity=severity,
on=on if on is None else on(io.CLK),
name=name)
tester = f.Tester(Foo, Foo.CLK)
tester.circuit.I0 = 1
tester.circuit.I1 = 1
tester.step(2)
try:
with tempfile.TemporaryDirectory() as dir_:
tester.compile_and_run("verilator", magma_opts={"inline": True},
flags=['--assert'], directory=dir_,
disp_type="realtime")
except AssertionError:
assert failure_msg is None or severity in ["error", "fatal"]
else:
# warning doesn't trigger exit code/failure (but only if there's a
# failure_msg, otherwise severity is ignored)
assert severity == "warning"
out, _ = capsys.readouterr()
if failure_msg is not None:
if severity == "warning":
msg = "%Warning:"
else:
msg = "%Error:"
msg += " Foo.v:29: Assertion failed in TOP.Foo"
if name is not None:
msg += f".{name}"
if severity == "error":
msg += f": {failure_msg}"
assert msg in out
tester.clear()
tester.circuit.I0 = 0
tester.circuit.I1 = 1
tester.step(2)
with tempfile.TemporaryDirectory() as dir_:
tester.compile_and_run("verilator",
magma_opts={"inline": True,
"verilator_compat": True},
flags=['--assert'], directory=dir_,
disp_type="realtime")
out, _ = capsys.readouterr()
if success_msg is not None:
assert success_msg in out
def test_immediate_assert_tuple_msg(capsys):
if verilator_version() < 4.0:
pytest.skip("Untested with earlier verilator versions")
class Foo(m.Circuit):
io = m.IO(
I0=m.In(m.Bit),
I1=m.In(m.Bit)
)
f.assert_immediate(
io.I0 == io.I1,
failure_msg=("io.I0 -> %x != %x <- io.I1", io.I0, io.I1)
)
tester = f.Tester(Foo)
tester.circuit.I0 = 1
tester.circuit.I1 = 0
tester.eval()
with pytest.raises(AssertionError):
with tempfile.TemporaryDirectory() as dir_:
tester.compile_and_run("verilator", magma_opts={"inline": True},
flags=['--assert'], directory=dir_,
disp_type="realtime")
out, _ = capsys.readouterr()
msg = ("%Error: Foo.v:13: Assertion failed in TOP.Foo: io.I0 -> 1 != 0 <-"
" io.I1")
assert msg in out, out
| 34.980952
| 78
| 0.54288
| 680
| 0.185135
| 0
| 0
| 2,605
| 0.70923
| 0
| 0
| 682
| 0.185679
|
3c2af43cd6a571a35fff3b7b22af4c58d6015098
| 3,098
|
py
|
Python
|
cs673backend/api/authentication.py
|
MicobyteMichael/CS673ProjectBackend
|
87b28c62f29630059e1906c8bf7383d814880bd0
|
[
"Apache-2.0"
] | null | null | null |
cs673backend/api/authentication.py
|
MicobyteMichael/CS673ProjectBackend
|
87b28c62f29630059e1906c8bf7383d814880bd0
|
[
"Apache-2.0"
] | null | null | null |
cs673backend/api/authentication.py
|
MicobyteMichael/CS673ProjectBackend
|
87b28c62f29630059e1906c8bf7383d814880bd0
|
[
"Apache-2.0"
] | null | null | null |
from flask import session
from flask_restful import Resource
from flask_restful.reqparse import RequestParser
from bcrypt import gensalt, hashpw
from hashlib import sha256
from hmac import new as hash_mac
from os import environ
PEPPER = environ["PEPPER"].encode("utf-8")
def hash(password, salt):
return hashpw(hash_mac(PEPPER, password.encode("utf-8"), sha256).hexdigest().encode("utf-8"), salt).decode("utf-8")
def start(flaskapp, db, api, UserAccount):
class Login(Resource):
def __init__(self):
self.parser = RequestParser()
self.parser.add_argument("username", type = str, required = True)
self.parser.add_argument("password", type = str, required = True)
def post(self):
args = self.parser.parse_args(strict = True)
user = UserAccount.query.filter_by(username = args["username"]).first()
success = False
if user is not None:
pass_hash = hash(args["password"], user.salt.encode("utf-8"))
if pass_hash == user.passhash:
session["user"] = user.username
session["userid"] = user.id
success = True
return { "authenticated": success }
class Register(Resource):
def __init__(self):
self.parser = RequestParser()
self.parser.add_argument("username", type = str, required = True)
self.parser.add_argument("password", type = str, required = True)
self.parser.add_argument("email", type = str, required = True)
self.parser.add_argument("phone", type = str, required = True)
def post(self):
args = self.parser.parse_args(strict = True)
new_args = { k:v for k, v in args.items() if k != "password" }
for k, v in new_args.items():
user = UserAccount.query.filter_by(**{k : v}).first()
if user is not None:
return { "created": False, "reason": "duplicate " + k }
salt = gensalt()
new_args["salt"] = salt.decode("utf-8")
new_args["passhash"] = hash(args["password"], salt)
user = UserAccount(**new_args)
db.session.add(user)
db.session.commit()
session["user"] = user.username
session["userid"] = user.id
return { "created": True }
class ResetPassword(Resource):
def __init__(self):
self.parser = RequestParser()
self.parser.add_argument("username", type = str, required = True)
self.parser.add_argument("newPass", type = str, required = True)
self.parser.add_argument("email", type = str, required = True)
self.parser.add_argument("phone", type = str, required = True)
def post(self):
args = self.parser.parse_args(strict = True)
new_args = { k:v for k, v in args.items() if k != "newPass" }
user = UserAccount.query.filter_by(**new_args).first()
if user is not None:
user.passhash = hash(args["newPass"], user.salt.encode("utf-8"))
db.session.commit()
session["user"] = user.username
session["userid"] = user.id
return { "reset": user is not None }
api.add_resource(Login, "/login")
api.add_resource(Register, "/register")
api.add_resource(ResetPassword, "/resetpass")
| 34.422222
| 117
| 0.65042
| 2,467
| 0.79632
| 0
| 0
| 0
| 0
| 0
| 0
| 351
| 0.113299
|
3c2d0e8fef55c7fd0b954db4e7dcf85c4711c86c
| 4,606
|
py
|
Python
|
sunpy/sun/tests/test_sun.py
|
PritishC/sunpy
|
76a7b5994566674d85eada7dcec54bf0f120269a
|
[
"BSD-2-Clause"
] | null | null | null |
sunpy/sun/tests/test_sun.py
|
PritishC/sunpy
|
76a7b5994566674d85eada7dcec54bf0f120269a
|
[
"BSD-2-Clause"
] | null | null | null |
sunpy/sun/tests/test_sun.py
|
PritishC/sunpy
|
76a7b5994566674d85eada7dcec54bf0f120269a
|
[
"BSD-2-Clause"
] | null | null | null |
from astropy.coordinates import Angle
from astropy.time import Time
import astropy.units as u
from astropy.tests.helper import assert_quantity_allclose
from sunpy.sun import sun
def test_true_longitude():
# Validate against a published value from the Astronomical Almanac (1992)
t = Time('1992-10-13', scale='tdb')
assert_quantity_allclose(sun.true_longitude(t), Angle('199d54m26.17s'), atol=0.1*u.arcsec)
def test_apparent_longitude():
# Validate against a published value from the Astronomical Almanac (1992)
t = Time('1992-10-13', scale='tdb')
assert_quantity_allclose(sun.apparent_longitude(t), Angle('199d54m21.56s'), atol=0.1*u.arcsec)
def test_true_latitude():
# Validate against a published value from the Astronomical Almanac (1992)
t = Time('1992-10-13', scale='tdb')
assert_quantity_allclose(sun.true_latitude(t), Angle('0.72s'), atol=0.05*u.arcsec)
def test_apparent_latitude():
# Validate against a published value from the Astronomical Almanac (1992)
t = Time('1992-10-13', scale='tdb')
assert_quantity_allclose(sun.apparent_latitude(t), Angle('0.72s'), atol=0.05*u.arcsec)
def test_solar_cycle_number():
assert_quantity_allclose(sun.solar_cycle_number("2012/11/11"), 5, atol=1e-1)
assert_quantity_allclose(sun.solar_cycle_number("2011/2/22"), 4, atol=1e-1)
assert_quantity_allclose(sun.solar_cycle_number("2034/1/15"), 27, atol=1e-1)
def test_solar_semidiameter_angular_size():
assert_quantity_allclose(sun.solar_semidiameter_angular_size("2012/11/11"), 968.871294 * u.arcsec, atol=1e-3 * u.arcsec)
assert_quantity_allclose(sun.solar_semidiameter_angular_size("2043/03/01"), 968.326347 * u.arcsec, atol=1e-3 * u.arcsec)
assert_quantity_allclose(sun.solar_semidiameter_angular_size("2001/07/21"), 944.039007 * u.arcsec, atol=1e-3 * u.arcsec)
def test_mean_obliquity_of_ecliptic():
t = Time('1992-10-13', scale='tdb')
assert_quantity_allclose(sun.mean_obliquity_of_ecliptic(t), 84384.8*u.arcsec, atol=0.1*u.arcsec)
def test_true_rightascension():
assert_quantity_allclose(sun.true_rightascension("2012/11/11"), 226.548*u.deg, atol=1e-3*u.deg)
assert_quantity_allclose(sun.true_rightascension("2142/02/03"), 316.466*u.deg, atol=1e-3*u.deg)
assert_quantity_allclose(sun.true_rightascension("2013/12/11"), 258.150*u.deg, atol=1e-3*u.deg)
def test_true_rightascension_J2000():
# Validate against JPL HORIZONS output
t = Time('1992-10-13', scale='tdb')
assert_quantity_allclose(sun.true_rightascension(t, equinox_of_date=False),
Angle('13h13m53.65s'), atol=0.01*u.arcsec)
def test_true_declination():
assert_quantity_allclose(sun.true_declination("2012/11/11"), -17.470*u.deg, atol=1e-3*u.deg)
assert_quantity_allclose(sun.true_declination("2245/12/01"), -21.717*u.deg, atol=1e-3*u.deg)
assert_quantity_allclose(sun.true_declination("2014/05/27"), 21.245*u.deg, atol=1e-3*u.deg)
def test_true_declination_J2000():
# Validate against JPL HORIZONS output
t = Time('1992-10-13', scale='tdb')
assert_quantity_allclose(sun.true_declination(t, equinox_of_date=False),
Angle('-7d49m20.8s'), atol=0.05*u.arcsec)
def test_true_obliquity_of_ecliptic():
t = Time('1992-10-13', scale='tdb')
assert_quantity_allclose(sun.true_obliquity_of_ecliptic(t), 84384.5*u.arcsec, atol=0.1*u.arcsec)
def test_apparent_rightascension():
# Validate against a published value from the Astronomical Almanac (1992)
t = Time('1992-10-13', scale='tdb')
assert_quantity_allclose(sun.apparent_rightascension(t), Angle('13h13m30.749s'),
atol=0.01*u.arcsec)
def test_apparent_rightascension_J2000():
# Regression-only test
t = Time('1992-10-13', scale='tdb')
assert_quantity_allclose(sun.apparent_rightascension(t, equinox_of_date=False),
Angle('13h13m52.37s'), atol=0.01*u.arcsec)
def test_apparent_declination():
# Validate against a published value from the Astronomical Almanac (1992)
t = Time('1992-10-13', scale='tdb')
assert_quantity_allclose(sun.apparent_declination(t), Angle('-7d47m01.74s'), atol=0.05*u.arcsec)
def test_apparent_declination_J2000():
# Regression-only test
t = Time('1992-10-13', scale='tdb')
assert_quantity_allclose(sun.apparent_declination(t, equinox_of_date=False),
Angle('-7d49m13.09s'), atol=0.05*u.arcsec)
def test_print_params():
# Test only for any issues with printing; accuracy is covered by other tests
sun.print_params()
| 41.495495
| 124
| 0.721884
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,108
| 0.240556
|
3c2db6513413d924898e189ce93d55aaff3a377a
| 1,031
|
py
|
Python
|
components/collector/src/source_collectors/file_source_collectors/pyupio_safety.py
|
Gamer1120/quality-time
|
f3a0d6f75cd6055d78995d37feae72bc3e837e4b
|
[
"Apache-2.0"
] | 1
|
2021-02-22T07:53:36.000Z
|
2021-02-22T07:53:36.000Z
|
components/collector/src/source_collectors/file_source_collectors/pyupio_safety.py
|
Gamer1120/quality-time
|
f3a0d6f75cd6055d78995d37feae72bc3e837e4b
|
[
"Apache-2.0"
] | 338
|
2020-10-29T04:28:09.000Z
|
2022-02-22T04:09:33.000Z
|
components/collector/src/source_collectors/file_source_collectors/pyupio_safety.py
|
dicksnel/quality-time
|
4c04f8852aa97175f2bca2b5c5391b3e09b657af
|
[
"Apache-2.0"
] | 1
|
2022-01-06T04:07:03.000Z
|
2022-01-06T04:07:03.000Z
|
"""Pyup.io Safety metrics collector."""
from typing import Final
from base_collectors import JSONFileSourceCollector
from source_model import Entity, SourceMeasurement, SourceResponses
class PyupioSafetySecurityWarnings(JSONFileSourceCollector):
"""Pyup.io Safety collector for security warnings."""
PACKAGE: Final[int] = 0
AFFECTED: Final[int] = 1
INSTALLED: Final[int] = 2
VULNERABILITY: Final[int] = 3
KEY: Final[int] = 4
async def _parse_source_responses(self, responses: SourceResponses) -> SourceMeasurement:
"""Return a list of warnings."""
entities = []
for response in responses:
entities.extend(
[Entity(
key=warning[self.KEY], package=warning[self.PACKAGE], installed=warning[self.INSTALLED],
affected=warning[self.AFFECTED], vulnerability=warning[self.VULNERABILITY])
for warning in await response.json(content_type=None)])
return SourceMeasurement(entities=entities)
| 36.821429
| 108
| 0.682832
| 841
| 0.815713
| 0
| 0
| 0
| 0
| 571
| 0.553831
| 124
| 0.120272
|
3c312cb7c5567e3a8e860f6d1634192c56119a38
| 2,580
|
py
|
Python
|
jaf/main.py
|
milano-slesarik/jaf
|
97c0a579f4ece70dbfb583d72aa35380f7a82f8d
|
[
"MIT"
] | null | null | null |
jaf/main.py
|
milano-slesarik/jaf
|
97c0a579f4ece70dbfb583d72aa35380f7a82f8d
|
[
"MIT"
] | null | null | null |
jaf/main.py
|
milano-slesarik/jaf
|
97c0a579f4ece70dbfb583d72aa35380f7a82f8d
|
[
"MIT"
] | null | null | null |
import json
import os
import typing
from io import IOBase
from jaf.encoders import JAFJSONEncoder
class JsonArrayFileWriterNotOpenError(Exception):
pass
class JsonArrayFileWriter:
MODE__APPEND_OR_CREATE = 'ac'
MODE__REWRITE_OR_CREATE = 'rc'
def __init__(self, filepath: str, mode=MODE__REWRITE_OR_CREATE, indent: typing.Optional[int] = None,
json_encoder=JAFJSONEncoder):
self.filepath: str = filepath
self.mode = mode
self.indent: int = indent
self.lines: int = 0
self.json_encoder = json_encoder
self.file: typing.Optional[IOBase] = None
def __enter__(self) -> 'JsonArrayFileWriter':
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.close()
def open(self) -> None:
if self.mode == self.MODE__REWRITE_OR_CREATE:
self.file = open(self.filepath, 'w')
self.file.write('[')
elif self.mode == self.MODE__APPEND_OR_CREATE:
if os.path.exists(self.filepath):
with open(self.filepath) as f:
jsn = json.load(f) # loads whole JSON into the memory
os.rename(self.filepath, self.filepath + '.bak')
else:
jsn = []
self.file = open(self.filepath, 'w')
self.file.write('[')
for entry in jsn:
self.write(entry)
elif self.mode == self.MODE__APPEND:
raise NotImplementedError
else:
raise NotImplementedError(f"Unknown write mode \"{self.mode}\"")
def write(self, dct: dict) -> None:
if getattr(self, 'file', None) is None:
raise JsonArrayFileWriterNotOpenError(
"JsonArrayFileWriter needs to be opened by calling `.open()` or used within a context manager `with JsonArrayFileWriter(<FILEPATH>,**kwargs) as writer:`")
jsn = json.dumps(dct, indent=self.indent, cls=self.json_encoder)
if self.lines:
self.file.write(f',')
self.write_newline()
self.file.write(jsn)
self.lines += 1
def write_dict(self, dct: dict) -> None:
self.write(dct)
def write_newline(self):
self.file.write(os.linesep)
def close(self) -> None:
self.file.write('\n')
self.file.write(']')
self.file.close()
with JsonArrayFileWriter('output.json', mode=JsonArrayFileWriter.MODE__APPEND_OR_CREATE, indent=4) as j:
d = {1: 2, 2: 3, 3: 4, 4: 6}
for i in range(1000000):
j.write(d)
| 31.084337
| 170
| 0.601163
| 2,287
| 0.886434
| 0
| 0
| 0
| 0
| 0
| 0
| 301
| 0.116667
|
3c325c2ee5bdb7ac85221911bcf0265edefa9de5
| 91
|
py
|
Python
|
8_kyu/Removing_Elements.py
|
UlrichBerntien/Codewars-Katas
|
bbd025e67aa352d313564d3862db19fffa39f552
|
[
"MIT"
] | null | null | null |
8_kyu/Removing_Elements.py
|
UlrichBerntien/Codewars-Katas
|
bbd025e67aa352d313564d3862db19fffa39f552
|
[
"MIT"
] | null | null | null |
8_kyu/Removing_Elements.py
|
UlrichBerntien/Codewars-Katas
|
bbd025e67aa352d313564d3862db19fffa39f552
|
[
"MIT"
] | null | null | null |
def remove_every_other(my_list):
return [my_list[it] for it in range(0,len(my_list),2)]
| 45.5
| 58
| 0.736264
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3c3406ddfc224f8162dd8e58c6d1818f19d5fb3c
| 812
|
py
|
Python
|
BluePlug/fork.py
|
liufeng3486/BluePlug
|
c7c5c769ed35c71ebc542d34848d6bf309abd051
|
[
"MIT"
] | 1
|
2019-01-27T04:08:05.000Z
|
2019-01-27T04:08:05.000Z
|
BluePlug/fork.py
|
liufeng3486/BluePlug
|
c7c5c769ed35c71ebc542d34848d6bf309abd051
|
[
"MIT"
] | 5
|
2021-03-18T21:35:20.000Z
|
2022-01-13T00:58:18.000Z
|
BluePlug/fork.py
|
liufeng3486/BluePlug
|
c7c5c769ed35c71ebc542d34848d6bf309abd051
|
[
"MIT"
] | null | null | null |
from aip import AipOcr
BAIDU_APP_ID='14490756'
BAIDU_API_KEY = 'Z7ZhXtleolXMRYYGZ59CGvRl'
BAIDU_SECRET_KEY = 'zbHgDUGmRnBfn6XOBmpS5fnr9yKer8C6'
client= AipOcr(BAIDU_APP_ID, BAIDU_API_KEY, BAIDU_SECRET_KEY)
options = {}
options["recognize_granularity"] = "big"
options["language_type"] = "CHN_ENG"
options["detect_direction"] = "true"
options["detect_language"] = "true"
options["vertexes_location"] = "true"
options["probability"] = "true"
def getimagestream(path):
with open(path, 'rb') as f:
return f.read()
def getcharactor(path):
obj = client.general(getimagestream(path))
if obj.get('error_code'):
return obj
res = []
for r in obj['words_result']:
res.append(r['words'])
return res
if __name__ == '__main__':
r = getcharactor('5.png')
print(r)
| 24.606061
| 62
| 0.69335
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 267
| 0.328818
|
3c34f86c770e6ffff7025e5fd4715854fbee0f6d
| 1,233
|
py
|
Python
|
test/test_model.py
|
karlsimsBBC/feed-me
|
e2bc87aef4740c2899b332f1b4036c169b108b79
|
[
"MIT"
] | null | null | null |
test/test_model.py
|
karlsimsBBC/feed-me
|
e2bc87aef4740c2899b332f1b4036c169b108b79
|
[
"MIT"
] | 2
|
2020-02-28T16:52:05.000Z
|
2020-02-28T16:52:11.000Z
|
test/test_model.py
|
karlsimsBBC/feed-me
|
e2bc87aef4740c2899b332f1b4036c169b108b79
|
[
"MIT"
] | null | null | null |
import unittest
from unittest.mock import Mock
from unittest.mock import mock_open
from contextlib import contextmanager
class TestDocumentDB(unittest.TestCase):
def test_reads_articles(self):
db = DocumentDB()
expected = [
{'article_idx': 0},
{'article_idx': 1}
]
self.assertEquals(db.read('articles'), expected)
def test_writes_atricle(self):
db = DocumentDB()
self.assertEquals(db.read('articles'), [])
db.write({'article_idx': 0})
expected = [
{'article_idx': 0}
]
actual = db.read('articles')
self.assertEquals(db.read('articles'), expected)
def test_skips_write_when_article_exists(self):
db = DocumentDB()
self.assertEquals(db.read('articles'), [])
db.write({'article_idx': 0})
db.write({'article_idx': 0})
expected = [
{'article_idx': 0}
]
self.assertEquals(db.read('articles'), expected)
@contextmanager
def mock_files(data):
f = mock_open(read_data=data)
with mock.patch("__builtin__.open", f) as fs:
yield fs
MOCK_DATA_A = ''
MOCK_DATA_B = '{"article_idx": 0}\n{"article_idx": 1}\n"
| 27.4
| 56
| 0.596918
| 895
| 0.725872
| 122
| 0.098946
| 138
| 0.111922
| 0
| 0
| 212
| 0.171938
|
3c36a55c48b2843a0df149d905928f2eb9279e29
| 4,596
|
py
|
Python
|
GuessGame.py
|
VedantKhairnar/Guess-Game
|
a959d03cbfea539a63e451e5c65f7cd9790d1b7f
|
[
"MIT"
] | null | null | null |
GuessGame.py
|
VedantKhairnar/Guess-Game
|
a959d03cbfea539a63e451e5c65f7cd9790d1b7f
|
[
"MIT"
] | null | null | null |
GuessGame.py
|
VedantKhairnar/Guess-Game
|
a959d03cbfea539a63e451e5c65f7cd9790d1b7f
|
[
"MIT"
] | 1
|
2020-06-05T12:42:39.000Z
|
2020-06-05T12:42:39.000Z
|
from tkinter import *
import random
from tkinter import messagebox
class GuessGame:
def protocolhandler(self):
if messagebox.askyesno("Exit", "Really Wanna stop Guessing?"):
if messagebox.askyesno("Exit", "Are you sure?"):
self.root.destroy()
def result(self):
print (" You have ran out of guesses :( i was thinking of the number: ",self.n)
lose = Label(self.root, text=" You have run out of chances :(\nand I was thinking of the number: "+str(self.n),bg='black',fg='cyan',font=5)
lose.place(x = 140,y = 500)
def check(self):
print("Checking the number provided...")
self.flag = 0
self.turn += 1
if self.flag == 0 and self.turn == 10:
self.result()
return
print("Entered number is "+ str(self.m.get()))
if self.m.get()<1 or self.m.get()>100:
print("Invalid number..")
self.invalid = Label(self.root, text="Invalid number entered.. ",bg='black',fg='cyan',font=5)
self.invalid.place(x = 140,y = 503)
elif self.m.get()==self.n:
print("Bravos,You guessed it right!!! in " +str(self.turn)+" turns")
self.flag=1
self.win = Label(self.root, text="Bravos,You guessed it right!!! in " +str(self.turn)+" turns",bg='black',fg='cyan',font=5)
self.win.place(x=130,y=503)
elif self.m.get()<self.n:
print ("Too low! You have ",10-self.turn, "guesses left!")
self.less = Label(self.root, text="Too low! You have "+str(10-self.turn)+ " guesses left!",bg='black',fg='cyan',font=5)
self.less.place(x=135,y=503)
elif self.m.get()>self.n:
print ("Too high! You have ",10-self.turn, "guesses left!")
self.more = Label(self.root, text="Too high! You have "+str(10-self.turn)+ " guesses left!",bg='black',fg='cyan',font=5)
self.more.place(x=135,y=503)
else:
print("There's some problem!!!")
self.root.destroy()
def __init__(self):
self.root = Tk()
self.root.geometry('800x600')
self.root.config(bg='black')
self.root.title('Guess Game')
self.m = IntVar()
self.status = ""
self.flag = 0
self.turn=0
self.n = random.randint(1,101)
# self.root.protocol("WM_DELETE_WINDOW", self.protocolhandler)
photo = PhotoImage(file="pythonlogoneonf.png")
label = Label(self.root, image=photo,border=0)
label.place(x=300, y=300)
self.win = Label(self.root, text="Bravos,You guessed it right!!! in " +str(self.turn)+" turns",bg='black',fg='cyan')
self.more = Label(self.root, text="Too high! You have "+str(10-self.turn)+ "guesses left!",bg='black',fg='cyan')
self.less = Label(self.root, text="Too low! You have "+str(10-self.turn)+ "guesses left!",bg='black',fg='cyan')
self.invalid = Label(self.root, text="Invalid number entered.. ",bg='black',fg='cyan')
status = Label(self.root,text = "Status: ",bg='black',fg='cyan')
status.config(font=("magneto", 20))
status.place(x=17,y=495)
title_g = Label(self.root, text="G",bg='black',fg='cyan')
# title_g.config(font=("mexicanero", 50))
title_g.config(font=("prometheus", 80))
title_g.place(x=250,y=70)
title_1 = Label(self.root, text="uess",fg='cyan',bg='black')
title_1.config(font=("prometheus", 38))
title_1.place(x=350,y=70)
title_2 = Label(self.root, text="ame",fg='cyan',bg='black')
title_2.config(font=("prometheus", 38))
title_2.place(x=370,y=125)
instructions = Label(self.root, text="Instruction: I am thinking of a number from 1-100..\nGuess it with the directions I'll provide.\nYou have 10 chances in total\nGood Luck\n:)",bg='black',fg='cyan')
instructions.config(font=("calibri", 13))
instructions.place(x=220,y=350)
guess = Label(self.root, text="Enter Your Guess here:",bg='black',fg='cyan')
guess.config(font=("fragmentcore", 13))
guess.place(x=23,y=290)
self.entry = Entry(self.root,textvariable=self.m,bg='black',fg='cyan')
self.entry.place(x=205,y=293)
button_push = Button(self.root, text="Check",bd=4,bg='black',fg='cyan', command=self.check)
button_push.place(x=350,y=285)
self.root.mainloop()
s = GuessGame()
| 42.555556
| 210
| 0.570061
| 4,501
| 0.97933
| 0
| 0
| 0
| 0
| 0
| 0
| 1,370
| 0.298085
|
3c39dc3a117517ba44438eb56f648a0feefd8459
| 2,051
|
py
|
Python
|
kanban.py
|
vtashlikovich/jira-task-analysis
|
34690406243fe0b4c5f1400c5bca872923856571
|
[
"MIT"
] | null | null | null |
kanban.py
|
vtashlikovich/jira-task-analysis
|
34690406243fe0b4c5f1400c5bca872923856571
|
[
"MIT"
] | null | null | null |
kanban.py
|
vtashlikovich/jira-task-analysis
|
34690406243fe0b4c5f1400c5bca872923856571
|
[
"MIT"
] | null | null | null |
import configparser
import sys
from jiraparser import JiraJSONParser, TokenAuth
import requests
from requests.auth import AuthBase
""" Getting a list of issues connected to a board id (defined by configuration) and printing analysis information """
# read config
config = configparser.ConfigParser()
config.read("config.ini")
# prepare parameters
jSQLString = JiraJSONParser.formJQLQuery(
projectId=config["default"]["issueKey"],
filter=int(config["default"]["filterId"]),
taskTypes=["Story"],
)
authToken = config["default"]["authentication-token"]
jiraBaseAPIURL = config["default"]["jiraURL"] + "/rest/api/2/issue/"
boardAPIURL = config["default"]["jiraURL"] + "/rest/api/2/search?jql=" + jSQLString
# fetch board issues
resp = requests.get(
boardAPIURL, auth=TokenAuth(authToken), params={"Content-Type": "application/json"}
)
if resp.status_code != 200:
raise Exception("Board information has not been fetched")
result = resp.json()
print("max {:d} out of {:d}".format(result["maxResults"], result["total"]))
# TODO: replace with full list when needed
narrowedList = result["issues"][:5]
for task in narrowedList:
# fetch issue info
issueParser = JiraJSONParser(authToken, jiraBaseAPIURL)
issueParser.parseIssueJson(task)
print(
"Issue: "
+ task["key"]
+ ", type: "
+ issueParser.issueTypeName
+ ", status: "
+ issueParser.issueStatus
)
# if there are subtasks - fetch them one by one
if issueParser.issueHasSubtasks:
issueParser.getAndParseSubtasks(False)
if len(issueParser.subtasksWOEstimation) > 0:
print("Sub-tasks not estimated: " + ",".join(issueParser.subtasksWOEstimation))
# print progress in 1 line
progressInfoLine = issueParser.getCompactProgressInfo()
if len(progressInfoLine) > 0:
print(issueParser.getCompactProgressInfo())
# warn if there is no estimation for task/bug
elif issueParser.issueTypeName.lower() != "story":
print("No estimation")
print("")
| 31.075758
| 117
| 0.694783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 728
| 0.354949
|
3c3a5c531bfcc3cf9b1021a5ea94cb71ba7d11b0
| 1,268
|
py
|
Python
|
duckling/test/test_api.py
|
handsomezebra/zoo
|
db9ef7f9daffd34ca859d5a4d76d947e00a768b8
|
[
"MIT"
] | 1
|
2020-03-08T07:46:14.000Z
|
2020-03-08T07:46:14.000Z
|
duckling/test/test_api.py
|
handsomezebra/zoo
|
db9ef7f9daffd34ca859d5a4d76d947e00a768b8
|
[
"MIT"
] | null | null | null |
duckling/test/test_api.py
|
handsomezebra/zoo
|
db9ef7f9daffd34ca859d5a4d76d947e00a768b8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
import requests
import logging
import csv
url = "http://localhost:10000/parse"
def get_result(text, lang, dims, latent=None, reftime=None, tz=None):
data = {
"text": text,
"lang": lang,
"dims": json.dumps(dims)
}
if reftime is not None:
data["reftime"] = reftime
if tz is not None:
data["tz"] = tz
if latent is not None:
data["latent"] = latent
response = None
try:
response = requests.post(url, data=data)
response.raise_for_status()
except requests.exceptions.RequestException as e:
logging.warning("Service %s requests exception: %s", url, e)
if response is None:
logging.warning("Failed to call service")
return None
elif response.status_code != 200:
logging.warning("Invalid response code %d from service", response.status_code)
return None
else:
return response.json()
def test_time_en():
reftime = "1559920354000" # 6/7/2019 8:12:34 AM
time_zone = "America/Los_Angeles"
result = get_result("tomorrow at eight", "en", ["time"], reftime=reftime)
assert result is not None and result[0]["value"]["value"] == "2019-06-08T08:00:00.000-07:00"
| 23.924528
| 96
| 0.621451
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 321
| 0.253155
|