hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
353bb0db8607a6842f81b42d90314c743ed67327 | 936 | py | Python | PackageGen.py | Qudix/Buffout4 | 4ac65dca4854dfcea860cd6f3e2f9b7cf35701c0 | [
"MIT"
] | null | null | null | PackageGen.py | Qudix/Buffout4 | 4ac65dca4854dfcea860cd6f3e2f9b7cf35701c0 | [
"MIT"
] | null | null | null | PackageGen.py | Qudix/Buffout4 | 4ac65dca4854dfcea860cd6f3e2f9b7cf35701c0 | [
"MIT"
] | null | null | null | import os
import zipfile
import zlib
def make_rel_archive(a_parent, a_name):
archive = zipfile.ZipFile("release/" + a_name + ".zip", "w", zipfile.ZIP_DEFLATED)
def do_write(a_relative):
archive.write(a_parent + a_relative, a_relative)
do_write("F4SE/Plugins/" + a_name + ".dll")
do_write("F4SE/Plugins/" + a_name + ".toml")
do_write("F4SE/Plugins/" + a_name + "_preload.txt")
def make_dbg_archive(a_parent, a_name):
archive = zipfile.ZipFile("release/" + a_name + "_pdb" + ".zip", "w", zipfile.ZIP_DEFLATED)
archive.write(a_parent + "F4SE/Plugins/" + a_name + ".pdb", a_name + ".pdb")
def main():
os.chdir(os.path.dirname(os.path.realpath(__file__)))
try:
os.mkdir("release")
except FileExistsError:
pass
parent = os.environ["Fallout4Path"] + "/Data/"
project = os.path.split(os.getcwd())[1].strip(os.sep)
make_rel_archive(parent, project)
make_dbg_archive(parent, project)
if __name__ == "__main__":
main()
| 29.25 | 92 | 0.701923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 184 | 0.196581 |
353cbfa860da0589e9ef54e5529ecb9742d22ebd | 8,246 | py | Python | scripts/extract-traj.py | KeithLabPitt/scripts | a6631736ea2fbcd5ef85d93d793602f746f7277d | [
"MIT"
] | null | null | null | scripts/extract-traj.py | KeithLabPitt/scripts | a6631736ea2fbcd5ef85d93d793602f746f7277d | [
"MIT"
] | null | null | null | scripts/extract-traj.py | KeithLabPitt/scripts | a6631736ea2fbcd5ef85d93d793602f746f7277d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2020, Alex M. Maldonado
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import argparse
import numpy as np
_element_to_z = {
'H': 1, 'He': 2, 'Li': 3, 'Be': 4, 'B': 5, 'C': 6, 'N': 7, 'O': 8, 'F': 9,
'Ne': 10, 'Na': 11, 'Mg': 12, 'Al': 13, 'Si': 14, 'P': 15, 'S': 16,
'Cl': 17, 'Ar': 18, 'K': 19, 'Ca': 20, 'Sc': 21, 'Ti': 22, 'V': 23,
'Cr': 24, 'Mn': 25, 'Fe': 26, 'Co': 27, 'Ni': 28, 'Cu': 29, 'Zn': 30,
'Ga': 31,'Ge': 32, 'As': 33, 'Se': 34, 'Br': 35, 'Kr': 36, 'Rb': 37,
'Sr': 38, 'Y': 39, 'Zr': 40, 'Nb': 41, 'Mo': 42, 'Tc': 43, 'Ru': 44,
'Rh': 45, 'Pd': 46, 'Ag': 47, 'Cd': 48, 'In': 49, 'Sn': 50, 'Sb': 51,
'Te': 52, 'I': 53, 'Xe': 54, 'Cs': 55, 'Ba': 56, 'La': 57, 'Ce': 58,
'Pr': 59, 'Nd': 60, 'Pm': 61, 'Sm': 62, 'Eu': 63, 'Gd': 64, 'Tb': 65,
'Dy': 66, 'Ho': 67, 'Er': 68, 'Tm': 69, 'Yb': 70, 'Lu': 71, 'Hf': 72,
'Ta': 73, 'W': 74, 'Re': 75, 'Os': 76, 'Ir': 77, 'Pt': 78, 'Au': 79,
'Hg': 80, 'Tl': 81, 'Pb': 82, 'Bi': 83, 'Po': 84, 'At': 85, 'Rn': 86,
'Fr': 87, 'Ra': 88, 'Ac': 89, 'Th': 90, 'Pa': 91, 'U': 92, 'Np': 93,
'Pu': 94, 'Am': 95, 'Cm': 96, 'Bk': 97, 'Cf': 98, 'Es': 99, 'Fm': 100,
'Md': 101, 'No': 102, 'Lr': 103, 'Rf': 104, 'Db': 105, 'Sg': 106,
'Bh': 107, 'Hs': 108, 'Mt': 109, 'Ds': 110, 'Rg': 111, 'Cn': 112,
'Uuq': 114, 'Uuh': 116,
}
_z_to_element = {v: k for k, v in _element_to_z.items()}
def _string_coords(z, R):
"""Puts atomic coordinates into a Python string. Typically used for
writing to an input file.
Parameters
atoms : :obj:`numpy.ndarray`
A (n,) numpy array containing all ``n`` elements labled by their atomic
number.
coords : :obj:`numpy.array`
Contains atomic positions in a (n, 3) numpy array where the x, y, and z
Cartesian coordinates in Angstroms are given for the n atoms.
Returns
-------
:obj:`str`
XYZ atomic coordinates as a string.
"""
atom_coords_string = ''
atom_index = 0
while atom_index < len(z):
atom_element = str(_z_to_element[z[atom_index]])
coords_string = np.array2string(
R[atom_index],
suppress_small=True, separator=' ',
formatter={'float_kind':'{:0.9f}'.format}
)[1:-1] + '\n'
atom_coords_string += (atom_element + ' ' \
+ coords_string).replace(' -', '-')
atom_index += 1
return atom_coords_string
def _atomlabel_to_z(atom_labels):
"""Convert atom labels (e.g., O1, H1, H2) to their atomic number.
Parameters
----------
atom_labels : :obj:`numpy.ndarray`
Strings of atom labels with one dimension.
Returns
-------
:obj:`numpy.ndarray`
Atomic numbers of atom labels.
"""
if atom_labels.ndim != 1:
raise ValueError('Array must have only one dimension.')
z_list = []
for atom_label in atom_labels:
element = ''.join(filter(lambda x: not x.isdigit(), atom_label))
z_list.append(_element_to_z[element])
return np.array(z_list)
def get_traj(data):
"""
Not all npz files will store the coordinates or atom specifications the
same way. This function will combine the right data to get a full trajectory
based on the package. If the "store-traj" script is modified to change the
labels of data, this function will NOT work.
Parameters
----------
data : :obj:`dict`
Dictionary of :obj:`numpy.ndarray` data from trajectory.
Returns
-------
:obj:`numpy.ndarray`
Atomic numbers of all atoms in system.
:obj:`numpy.ndarray`
Atomic Cartesian coordinates in the same order as the atomic numbers.
"""
z = np.array([])
R = None
if data['package'][()].lower() == 'gamess':
if data['n_qm'][()] != 0:
z = np.concatenate((z, data['z_qm'].flatten()))
if R is None:
R = data['R_qm']
if data['n_frag'] != 0:
z_frag = _atomlabel_to_z(data['z_frag'])
z = np.concatenate((z, z_frag.flatten()))
if R is None:
R = data['R_frag']
else:
R = np.concatenate((R, data['R_frag']), axis=1)
if data['n_mm'] != 0:
raise ValueError('MM atoms are not supported.')
return z, R
def _write_traj(z, R, t, t_unit, E, E_unit, filename, save_dir):
"""
Parameters
----------
z : :obj:`numpy.ndarray`
Atomic numbers of all atoms in system.
R : :obj:`numpy.ndarray`
Atomic Cartesian coordinates of the trajectory.
t : :obj:`numpy.ndarray`
Simulation time.
t_unit : :obj:`numpy.ndarray`
Units of time.
E : :obj:`numpy.ndarray`
Total energies of the snapshots.
E_unit : :obj:`
"""
if save_dir[-1] != '/':
save_dir += '/'
atom_num = z.shape[0]
write_lines = []
for i in range(R.shape[0]):
write_lines.append(str(atom_num) + '\n')
write_lines.append(f'time = {t[i]} {t_unit}; E_total = {E[i]} {E_unit}\n')
write_lines.append(_string_coords(z, R[i]))
with open(f'{save_dir}{filename}-traj.xyz', 'w') as f:
f.writelines(write_lines)
def export_traj(z, R, data, filename, save_dir):
"""Handles writing and selecting timing and energies data.
Parameters
----------
z : :obj:`numpy.ndarray`
Atomic numbers of all atoms in system.
R : :obj:`numpy.ndarray`
Atomic Cartesian coordinates of the trajectory.
data : :obj:`dict`
Dictionary of :obj:`numpy.ndarray` data from trajectory. Used to get
energies and times for comments.
filename : :obj:`str`
Name of the npz file.
save_dir : :obj:`str`
Directory to save the trajectory.
"""
if data['package'][()].lower() == 'gamess':
E = data['E_total']
E_unit = str(data['unit_E'][()])
t = data['time']
t_unit = str(data['unit_time'][()])
_write_traj(z, R, t, t_unit, E, E_unit, filename, save_dir)
def main():
parser = argparse.ArgumentParser(
description=(
'Extract data from a trajectory npz file.\n\n'
'Data options are:\n'
'* traj\n'
' * Will save a XYZ trajectory from the npz with energies as '
'comments'
),
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
'npz_traj', metavar='npz traj', type=str, nargs='?',
help='Path to NumPy npz file with stored trajectory data.'
)
parser.add_argument(
'traj_data', metavar='data', type=str, nargs='?',
help='What data do you want to extract?'
)
args = parser.parse_args()
npz_traj_path = args.npz_traj
npz_traj_name = '.'.join(npz_traj_path.split('/')[-1].split('.')[:1])
if not os.path.exists(npz_traj_path):
raise ValueError(f'{npz_traj_path} does not exist.')
data = dict(np.load(npz_traj_path))
if args.traj_data == 'traj':
z, R = get_traj(data)
export_traj(z, R, data, npz_traj_name, '.')
if __name__ == "__main__":
main()
| 35.69697 | 82 | 0.576643 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,601 | 0.557967 |
353d49fb2aa341e74610eef072c9aede1a4b9a62 | 3,553 | py | Python | examples/M4_competition/evaluate_prophet.py | LeoTafti/darts | 210605fafb730de564e3d723ab3919ed94da42b9 | [
"Apache-2.0"
] | 1 | 2021-07-15T11:12:05.000Z | 2021-07-15T11:12:05.000Z | examples/M4_competition/evaluate_prophet.py | LeoTafti/darts | 210605fafb730de564e3d723ab3919ed94da42b9 | [
"Apache-2.0"
] | 1 | 2021-02-02T11:22:36.000Z | 2021-02-02T11:22:36.000Z | examples/M4_competition/evaluate_prophet.py | LeoTafti/darts | 210605fafb730de564e3d723ab3919ed94da42b9 | [
"Apache-2.0"
] | null | null | null | """Evaluating Prophet model on M4 timeseries
"""
from darts.models import Prophet
from darts.utils.statistics import check_seasonality
from darts.utils import _build_tqdm_iterator
import numpy as np
import pandas as pd
import pickle as pkl
from M4_metrics import owa_m4, mase_m4, smape_m4
if __name__ == "__main__":
data_categories = ['Yearly', 'Quarterly', 'Monthly', 'Weekly', 'Daily', 'Hourly']
info_dataset = pd.read_csv('dataset/M4-info.csv', delimiter=',').set_index('M4id')
for cat in data_categories[::-1]:
# Load TimeSeries from M4
ts_train = pkl.load(open("dataset/train_"+cat+".pkl", "rb"))
ts_test = pkl.load(open("dataset/test_"+cat+".pkl", "rb"))
# Test models on all time series
mase_all = []
smape_all = []
m = int(info_dataset.Frequency[cat[0]+"1"])
for train, test in _build_tqdm_iterator(zip(ts_train, ts_test), verbose=True):
train_des = train
seasonOut = 1
if m > 1:
if check_seasonality(train, m=int(m), max_lag=2*m):
pass
else:
m = 1
try:
prophet_args = {
'daily_seasonality': False,
'weekly_seasonality': False,
'yearly_seasonality': False,
'frequency': None,
'changepoint_range': 0.95,
}
if cat == 'Daily':
prophet_args['daily_seasonality'] = True
elif cat == 'Hourly':
prophet_args['daily_seasonality'] = True
elif cat == 'Weekly':
prophet_args['weekly_seasonality'] = True
elif cat == 'Monthly':
prophet_args['yearly_seasonality'] = True
elif cat == 'Quarterly':
prophet_args['yearly_seasonality'] = True
elif cat == 'Yearly':
prophet_args['yearly_seasonality'] = True
prophet = Prophet(**prophet_args)
derivate = np.diff(train.univariate_values(), n=1)
jump = derivate.max()/(train.max().max() - train.min().min())
try:
if jump <= 0.5:
prophet.fit(train)
else:
prophet.fit(train.drop_before(train.time_index()[np.argmax(derivate)+1]))
except ValueError as e:
raise e
forecast_prophet = prophet.predict(len(test))
m = info_dataset.Frequency[cat[0]+"1"]
mase_all.append(np.vstack([
mase_m4(train, test, forecast_prophet, m=m),
]))
smape_all.append(np.vstack([
smape_m4(test, forecast_prophet),
]))
except Exception as e:
print(e)
break
pkl.dump(mase_all, open("prophet_mase_"+cat+".pkl", "wb"))
pkl.dump(smape_all, open("prophet_smape_"+cat+".pkl", "wb"))
print("MASE; Prophet: {}".format(*tuple(np.nanmean(np.stack(mase_all), axis=(0, 2)))))
print("sMAPE; Prophet: {}".format(*tuple(np.nanmean(np.stack(smape_all), axis=(0, 2)))))
print("OWA: ", owa_m4(cat, np.nanmean(np.stack(smape_all), axis=(0, 2)),
np.nanmean(np.stack(mase_all), axis=(0, 2))))
| 41.313953 | 97 | 0.504363 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 609 | 0.171404 |
353d634a1acddc64d1cd7c68b17cf48d708bb09b | 1,100 | py | Python | preprocess/step4/count.py | moore3930/dependency-based-w2v | 03edc3e89c4af939a1eae50abc395e5dc53f886c | [
"MIT"
] | 11 | 2017-12-28T12:00:37.000Z | 2022-01-18T05:45:12.000Z | preprocess/step4/count.py | njirene/dependency-based-w2v | 0b55147e32a856759f7922e1690771c316c5b8e7 | [
"MIT"
] | 3 | 2018-07-29T13:42:53.000Z | 2018-11-22T07:33:07.000Z | preprocess/step4/count.py | njirene/dependency-based-w2v | 0b55147e32a856759f7922e1690771c316c5b8e7 | [
"MIT"
] | 2 | 2019-07-27T05:10:22.000Z | 2019-10-03T11:33:37.000Z | import codecs
import sys
import argparse
parser = argparse.ArgumentParser(description='manual to this script')
parser.add_argument('--file', type=str, default = None)
parser.add_argument('--quantity', type=int, default=5785)
args = parser.parse_args()
output = codecs.open('weightcn.txt', 'w', 'utf-8')
dic = {}
i = 0
for j in range(1, args.quantity):
dic[j] = 0
with codecs.open(args.file, 'r', 'utf-8')as f:
for lines in f:
line = lines.replace('\n', '')
if len(line)==0:
continue
str1 = line.split(' ')
i = 0
for s in str1:
if (i!=0) and (i%2==0):
str2 = s.split(',')
for j in str2:
if len(j)==0:
continue
if int(j) not in dic:
dic[int(j)] = 1
else:
dic[int(j)] = dic[int(j)] + 1
i = i + 1
items=dic.items()
backitems=[[v[0],v[1]] for v in items]
backitems.sort(reverse=False)
for i in backitems:
output.write(str(i[1]))
output.write('\n')
output.close()
| 27.5 | 69 | 0.516364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 93 | 0.084545 |
353e4f8031534b9754c44cd582947dcb265760b5 | 16,619 | py | Python | pyannote/audio/applications/pyannote_audio.py | kan-cloud/pyannote-audio | 5bcc57782849ecfc746e0404e480c262410b38cd | [
"MIT"
] | 1 | 2020-12-01T07:03:01.000Z | 2020-12-01T07:03:01.000Z | pyannote/audio/applications/pyannote_audio.py | kan-cloud/pyannote-audio | 5bcc57782849ecfc746e0404e480c262410b38cd | [
"MIT"
] | null | null | null | pyannote/audio/applications/pyannote_audio.py | kan-cloud/pyannote-audio | 5bcc57782849ecfc746e0404e480c262410b38cd | [
"MIT"
] | 5 | 2020-06-06T23:56:06.000Z | 2022-02-21T10:43:10.000Z | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2019-2020 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
"""
Neural building blocks for speaker diarization
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Usage:
pyannote-audio (sad | scd | ovl | emb | dom) train [--cpu | --gpu] [options] <root> <protocol>
pyannote-audio (sad | scd | ovl | emb | dom) validate [--cpu | --gpu] [options] <train> <protocol>
pyannote-audio (sad | scd | ovl | emb | dom) apply [--cpu | --gpu] [options] <validate> <protocol>
pyannote-audio -h | --help
pyannote-audio --version
This command line tool can be used to train, validate, and apply neural networks
for the following blocks of a speaker diarization pipeline:
* (sad) speech activity detection consists in detecting speech regions in
an audio recording.
* (scd) speaker change detection consists in detecting timestamps of
speaker change point.
* (ovl) overlapped speech detection consists in detection regions with two
or more simultaneous speakers.
* (emb) speaker embedding consists in projecting audio chunk into a
(usually high-dimensional) vector space where same speaker
embeddings are close to each other, and different speaker embeddings
are not.
* (dom) domain classification consists in predicting the domain of an
audio recording
Running a complete speech activity detection experiment on the provided
"debug" dataset would go like this:
* Run experiment on this pyannote.database protocol
$ export DATABASE=Debug.SpeakerDiarization.Debug
* This directory will contain experiments artifacts:
$ mkdir my_experiment && cd my_experiment
* A unique configuration file describes the experiment hyper-parameters
(see "Configuration file" below for details):
$ edit config.yml
* This will train the model on the training set:
$ pyannote-audio sad train ${PWD} ${DATABASE}
* Training artifacts (including model weights) are stored in a sub-directory
whose name makes it clear which dataset and subset (train, by default)
were used for training the model.
$ cd train/${DATABASE}.train
* This will validate the model on the development set:
$ pyannote-audio sad validate ${PWD} ${DATABASE}
* Validation artifacts (including the selection of the best epoch) are
stored in a sub-directory named after the dataset and subset (development,
by default) used for validating the model.
$ cd validate/${DATABASE}.development
* This will apply the best model (according to the validation step) to the
test set:
$ pyannote-audio sad apply ${PWD} ${DATABASE}
* Inference artifacts are stored in a sub-directory whose name makes it
clear which epoch has been used (e.g. apply/0125). Artifacts include:
* raw output of the best model (one numpy array per file than can be
loaded with pyannote.audio.features.Precomputed API and handled with
pyannote.core.SlidingWindowFeature API)
* (depending on the task) a file "${DATABASE}.test.rttm" containing the
post-processing of raw output.
* (depending on the task) a file "${DATABASE}.test.eval" containing the
evaluation result computed with pyannote.metrics.
pyannote.database support
~~~~~~~~~~~~~~~~~~~~~~~~~
PYANNOTE_DATABASE_CONFIG=
Configuration file <root>/config.yml
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Reproducible research is facilitated by the systematic use of configuration
files stored in <root>/config.yml in YAML format.
.......................... <root>/config.yml ..........................
task:
name:
params:
feature_extraction:
name:
params:
data_augmentation:
name:
params:
architecture:
name:
params:
scheduler:
name:
params:
preprocessors:
callbacks:
...................................................................
File <root>/config.yml is mandatory, unless option --pretrained is used.
When fine-tuning a model with option --pretrained=<model>, one can omit it
and the original <model> configuration file is used instead. If (a possibly
partial) <root>/config.yml file is provided anyway, it is used to override
<model> configuration file.
Tensorboard support
~~~~~~~~~~~~~~~~~~~
A bunch of metrics are logged during training and validation (e.g. loss,
learning rate, computation time, validation metric). They can be visualized
using tensorboard:
$ tensorboard --logdir=<root>
Common options
~~~~~~~~~~~~~~
<root> Experiment root directory. Should contain config.yml
configuration file, unless --pretrained option is
used (for which config.yml is optional).
<protocol> Name of protocol to use for training, validation, or
inference. Have a look at pyannote.database
documentation for instructions on how to define a
protocol with your own dataset:
https://github.com/pyannote/pyannote-database#custom-protocols
<train> Path to <root> sub-directory containing training
artifacts (e.g. <root>/train/<protocol>.train)
<validate> Path to <train> sub-directory containing validation
artifacts (e.g. <train>/validate/<protocol>.development)
In case option --pretrained=<model> is used, the
output of the pretrained model is dumped into the
<validate> directory.
--subset=<subset> Subset to use for training (resp. validation,
inference). Defaults to "train" (resp. "development",
"test") for strict enforcement of machine learning
good practices.
--gpu Run on GPU. When multiple GPUs are available, use
CUDA_VISIBLE_DEVICES environment variable to force
using a specific one. Defaults to using CPU if no GPU
is available.
--cpu Run on CPU. Defaults to using GPU when available.
--debug Run using PyTorch's anomaly detection. This will throw
an error if a NaN value is produced, and the stacktrace
will point to the origin of it. This option can
considerably slow execution.
--from=<epoch> Start training (resp. validating) at epoch <epoch>.
Use --from=last to start from last available epoch at
launch time. Not used for inference [default: 0].
--to=<epoch> End training (resp. validating) at epoch <epoch>.
Use --end=last to validate until last available epoch
at launch time. Not used for inference [default: 100].
--batch=<size> Set batch size used for validation and inference.
Has no effect when training as this parameter should
be defined in the configuration file [default: 32].
--step=<ratio> Ratio of audio chunk duration used as step between
two consecutive audio chunks [default: 0.25]
--parallel=<n_jobs> Use at most that many threads for generating training
samples or validating files. Defaults to using all
CPUs but one.
Speaker embedding
~~~~~~~~~~~~~~~~~
--duration=<duration> Use audio chunks with that duration. Defaults to the
fixed duration used during training, when available.
--metric=<metric> Use this metric (e.g. "cosine" or "euclidean") to
compare embeddings. Defaults to the metric defined in
<root>/config.yml configuration file.
Pretrained model options
~~~~~~~~~~~~~~~~~~~~~~~~
--pretrained=<model> Warm start training with pre-trained model. Can be
either a path to an existing checkpoint (e.g.
<train>/weights/0050.pt) or the name of a model
available in torch.hub.list('pyannote/pyannote.audio')
This option can also be used to apply a pretrained
model. See description of <validate> for more details.
Validation options
~~~~~~~~~~~~~~~~~~
--every=<epoch> Validate model every <epoch> epochs [default: 1].
--evergreen Prioritize validation of most recent epoch.
For speech activity and overlapped speech detection, validation consists in
looking for the value of the detection threshold that maximizes the f-score
of recall and precision.
For speaker change detection, validation consists in looking for the value of
the peak detection threshold that maximizes the f-score of purity and
coverage:
--diarization Use diarization purity and coverage instead of
(default) segmentation purity and coverage.
For speaker embedding and verification protocols, validation runs the actual
speaker verification experiment (representing each recording by its average
embedding) and reports equal error rate.
For speaker embedding and diarization protocols, validation runs a speaker
diarization pipeline based on oracle segmentation and "pool-linkage"
agglomerative clustering of speech turns (represented by their average
embedding), and looks for the threshold that maximizes the f-score of purity
and coverage.
"""
import sys
import warnings
from docopt import docopt
from pathlib import Path
import multiprocessing
import torch
from .base import apply_pretrained
from .speech_detection import SpeechActivityDetection
from .change_detection import SpeakerChangeDetection
from .overlap_detection import OverlapDetection
from .speaker_embedding import SpeakerEmbedding
from .domain_classification import DomainClassification
def main():
# TODO: update version automatically
arg = docopt(__doc__, version="pyannote-audio 2.0")
params = {}
if arg["sad"]:
Application = SpeechActivityDetection
elif arg["scd"]:
Application = SpeakerChangeDetection
elif arg["ovl"]:
Application = OverlapDetection
elif arg["emb"]:
Application = SpeakerEmbedding
elif arg["dom"]:
Application = DomainClassification
device = "cuda" if torch.cuda.is_available() else "cpu"
if arg["--gpu"] and device == "cpu":
msg = "No GPU is available. Using CPU instead."
warnings.warn(msg)
if arg["--cpu"] and device == "cuda":
device = "cpu"
params["device"] = torch.device(device)
protocol = arg["<protocol>"]
subset = arg["--subset"]
if arg["--debug"]:
msg = "Debug mode is enabled, this option might slow execution considerably."
warnings.warn(msg, RuntimeWarning)
torch.autograd.set_detect_anomaly(True)
n_jobs = arg["--parallel"]
if n_jobs is None:
n_jobs = max(1, multiprocessing.cpu_count() - 1)
params["n_jobs"] = int(n_jobs)
if arg["train"]:
params["subset"] = "train" if subset is None else subset
# start training at this epoch (defaults to 0, but 'last' is supported)
warm_start = arg["--from"]
if warm_start != "last":
warm_start = int(warm_start)
# or start from pretrained model
pretrained = arg["--pretrained"]
pretrained_config_yml = None
if pretrained is not None:
# start from an existing model checkpoint
# (from a different experiment)
if Path(pretrained).exists():
warm_start = Path(pretrained)
else:
try:
warm_start = torch.hub.load(
# TODO. change to 'pyannote/pyannote-audio'
# after 2.0 release
"pyannote/pyannote-audio:develop",
pretrained,
).weights_pt_
except Exception as e:
msg = (
f'Could not load "{warm_start}" model from torch.hub.'
f"The following exception was raised:\n\n{e}\n\n"
)
sys.exit(msg)
pretrained_config_yml = warm_start.parents[3] / "config.yml"
params["warm_start"] = warm_start
# stop training at this epoch (defaults to never stop)
params["epochs"] = int(arg["--to"])
root_dir = Path(arg["<root>"]).expanduser().resolve(strict=True)
app = Application(
root_dir, training=True, pretrained_config_yml=pretrained_config_yml
)
app.train(protocol, **params)
if arg["validate"]:
train_dir = Path(arg["<train>"]).expanduser().resolve(strict=True)
app = Application.from_train_dir(train_dir, training=False)
params["subset"] = "development" if subset is None else subset
start = arg["--from"]
if start != "last":
start = int(start)
params["start"] = start
end = arg["--to"]
if end != "last":
end = int(end)
params["end"] = end
params["every"] = int(arg["--every"])
params["chronological"] = not arg["--evergreen"]
params["batch_size"] = int(arg["--batch"])
params["diarization"] = arg["--diarization"]
duration = arg["--duration"]
if duration is None:
duration = getattr(app.task_, "duration", None)
if duration is None:
msg = (
"Task has no 'duration' defined. "
"Use '--duration' option to provide one."
)
raise ValueError(msg)
else:
duration = float(duration)
params["duration"] = duration
params["step"] = float(arg["--step"])
if arg["emb"]:
metric = arg["--metric"]
if metric is None:
metric = getattr(app.task_, "metric", None)
if metric is None:
msg = (
"Approach has no 'metric' defined. "
"Use '--metric' option to provide one."
)
raise ValueError(msg)
params["metric"] = metric
# FIXME: parallel is broken in pyannote.metrics
params["n_jobs"] = 1
app.validate(protocol, **params)
if arg["apply"]:
validate_dir = Path(arg["<validate>"]).expanduser().resolve(strict=True)
params["subset"] = "test" if subset is None else subset
params["batch_size"] = int(arg["--batch"])
duration = arg["--duration"]
if duration is not None:
duration = float(duration)
params["duration"] = duration
params["step"] = float(arg["--step"])
params["Pipeline"] = getattr(Application, "Pipeline", None)
params["pretrained"] = arg["--pretrained"]
apply_pretrained(validate_dir, protocol, **params)
| 37.599548 | 103 | 0.609363 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12,408 | 0.74657 |
353e538978effe724514576a6a13f995049887e9 | 1,352 | py | Python | convert_data_to_tfrecords.py | richiesui/Deep-Association-Learning | 70157b79f6ffb73aa7115e2a5a641c0cc09dd550 | [
"MIT"
] | 70 | 2018-08-23T01:54:37.000Z | 2021-09-11T12:54:35.000Z | convert_data_to_tfrecords.py | richiesui/Deep-Association-Learning | 70157b79f6ffb73aa7115e2a5a641c0cc09dd550 | [
"MIT"
] | 1 | 2019-01-20T14:58:46.000Z | 2019-01-21T08:12:36.000Z | convert_data_to_tfrecords.py | yanbeic/Deep-Association-Learning | 70157b79f6ffb73aa7115e2a5a641c0cc09dd550 | [
"MIT"
] | 32 | 2018-08-24T09:32:38.000Z | 2022-02-05T05:59:50.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import os
from datasets import convert_data
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('data_type', None,
'The type of the dataset to convert, need to be either "train" or "test".')
tf.app.flags.DEFINE_string('dataset_dir', None,
'The directory where the image files are saved.')
tf.app.flags.DEFINE_string('output_dir', None,
'The directory where the output TFRecords are saved.')
tf.app.flags.DEFINE_string('filename', None,
'The txt file where the list all image files to be converted.')
tf.app.flags.DEFINE_integer('num_tfrecords', 1,
'Number of tfrecords to convert.')
def main(_):
# check if dir exits and make it
directory = FLAGS.output_dir
if not os.path.exists(directory):
os.makedirs(directory)
# start convert data to tfrecords
convert_data.run(dataset_dir=FLAGS.dataset_dir,
output_dir=FLAGS.output_dir,
filename=FLAGS.filename,
data_type=FLAGS.data_type,
num_tfrecords=FLAGS.num_tfrecords)
if __name__ == '__main__':
tf.app.run()
| 34.666667 | 102 | 0.638314 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 406 | 0.300296 |
353e59f2bd97a83046f221148d98e2324a9d87dd | 1,933 | py | Python | vispy/geometry/tests/test_generation.py | chongxi/vispy | 3683ea1f58e43b4aa1b32a3e69656bead8a31e99 | [
"BSD-3-Clause"
] | 3 | 2018-05-09T17:55:53.000Z | 2019-07-22T09:14:41.000Z | vispy/geometry/tests/test_generation.py | chongxi/vispy | 3683ea1f58e43b4aa1b32a3e69656bead8a31e99 | [
"BSD-3-Clause"
] | 9 | 2017-04-07T01:44:15.000Z | 2018-12-16T20:47:08.000Z | graphViz/vispy/geometry/tests/test_generation.py | onecklam/ethereum-graphviz | 6993accf0cb85e23013bf7ae6b04145724a6dbd2 | [
"Apache-2.0"
] | 1 | 2021-09-15T08:52:26.000Z | 2021-09-15T08:52:26.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
from vispy.testing import run_tests_if_main
from vispy.geometry import (create_box, create_cube, create_cylinder,
create_sphere, create_plane)
def test_box():
"""Test box function"""
vertices, filled, outline = create_box()
assert_array_equal(np.arange(len(vertices)), np.unique(filled))
assert_array_equal(np.arange(len(vertices)), np.unique(outline))
def test_cube():
"""Test cube function"""
vertices, filled, outline = create_cube()
assert_array_equal(np.arange(len(vertices)), np.unique(filled))
assert_array_equal(np.arange(len(vertices)), np.unique(outline))
def test_sphere():
"""Test sphere function"""
md = create_sphere(rows=10, cols=20, radius=10, method='latitude')
radii = np.sqrt((md.get_vertices() ** 2).sum(axis=1))
assert_allclose(radii, np.ones_like(radii) * 10)
md = create_sphere(subdivisions=5, radius=10, method='ico')
radii = np.sqrt((md.get_vertices() ** 2).sum(axis=1))
assert_allclose(radii, np.ones_like(radii) * 10)
md = create_sphere(rows=20, cols=20, depth=20, radius=10, method='cube')
radii = np.sqrt((md.get_vertices() ** 2).sum(axis=1))
assert_allclose(radii, np.ones_like(radii) * 10)
def test_cylinder():
"""Test cylinder function"""
md = create_cylinder(10, 20, radius=[10, 10])
radii = np.sqrt((md.get_vertices()[:, :2] ** 2).sum(axis=1))
assert_allclose(radii, np.ones_like(radii) * 10)
def test_plane():
"""Test plane function"""
vertices, filled, outline = create_plane()
assert_array_equal(np.arange(len(vertices)), np.unique(filled))
assert_array_equal(np.arange(len(vertices)), np.unique(outline))
run_tests_if_main()
| 35.796296 | 76 | 0.689602 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 288 | 0.148991 |
353f5e1b21c175277d0414b42c4543aa99713967 | 3,990 | py | Python | bico/utils/BICONode.py | gallmerci/bico | b87cee155f64d40118817e888fd783084f04b013 | [
"MIT"
] | 39 | 2017-01-02T01:02:58.000Z | 2022-02-22T13:58:14.000Z | bico/utils/BICONode.py | gallmerci/bico | b87cee155f64d40118817e888fd783084f04b013 | [
"MIT"
] | 2 | 2017-01-06T18:40:42.000Z | 2017-01-12T21:49:46.000Z | bico/utils/BICONode.py | gallmerci/bico | b87cee155f64d40118817e888fd783084f04b013 | [
"MIT"
] | 10 | 2017-01-02T17:52:02.000Z | 2022-02-22T14:07:24.000Z | import logging
import numpy as np
from bico.geometry.point import Point
from bico.nearest_neighbor.base import NearestNeighbor
from bico.utils.ClusteringFeature import ClusteringFeature
from datetime import datetime
from typing import Callable, TextIO, List
logger = logging.getLogger(__name__)
class BICONode:
def __init__(self, level: int, dim: int, proj: int, bico: 'BICO',
projection_func: Callable[[int, int, float], NearestNeighbor]):
self.level = level
self.dim = dim
self.proj = proj
self.point_to_biconode = []
self.projection_func = projection_func
self.nn_engine = projection_func(dim, proj, bico.get_radius(self.level))
self.num_cfs = 0
self.bico = bico
self.cf = ClusteringFeature(Point(np.zeros(dim)), Point(np.zeros(dim)), 0, 0)
def insert_point(self, point_cf: ClusteringFeature) -> int:
if self.bico.verbose:
logger.debug("Insert point: {}".format(point_cf))
# check whether geometry fits into CF
if self.level > 0:
if self.cf.size == 0:
self.cf += point_cf
self.cf.ref = point_cf.ref
else:
test = self.cf + point_cf
cost = test.kmeans_cost(self.cf.ref)
if self.bico.verbose:
logger.debug("Cost: " + str(cost) + ", Thresh: " + str(self.bico.get_threshold(self.level)))
if cost < self.bico.get_threshold(self.level):
self.cf = test
return 0
# search nearest neighbor and insert geometry there or open new BICONode
candidates = []
if self.num_cfs > 0:
if self.bico.track_time:
tstart = datetime.now()
candidates = self.nn_engine.get_candidates(point_cf.ref.p)
# candidates = self.ann_engine.neighbours(point_cf.ref.p)
if self.bico.track_time:
tend = datetime.now()
if len(self.bico.time) < self.level + 1:
self.bico.time.append(tend - tstart)
else:
self.bico.time[self.level] += tend - tstart
if len(candidates) == 0:
if self.bico.verbose:
logger.debug("No nearest neighbor found.")
self.num_cfs += 1
self.nn_engine.insert_candidate(point=point_cf.ref.p, metadata=self.num_cfs)
# self.ann_engine.store_vector(point_cf.ref.p, data=self.num_cfs)
new_node = BICONode(self.level + 1, self.dim, self.proj, self.bico, self.projection_func)
# new_node.cf = ClusteringFeature(geometry, geometry, geometry*geometry, 1)
new_node.cf = point_cf
# debug
if len(self.point_to_biconode) != self.num_cfs - 1:
logger.error("Something is wrong: {} != {}".format(len(self.point_to_biconode), self.num_cfs - 1))
self.point_to_biconode.append(new_node)
return 1
else:
if self.bico.verbose:
logger.debug(str(len(candidates)) + " nearest neighbor found!")
logger.debug(candidates)
nearest = candidates[0]
node = nearest.data # contains the index
# sanity check
if len(self.point_to_biconode) < node - 2:
logger.error("Something is wrong: {} > {}".format(len(self.point_to_biconode), node - 2))
return self.point_to_biconode[node - 1].insert_point(point_cf)
def output_cf(self, f: TextIO) -> None:
if self.level > 0:
f.write(str(self.cf) + "\n")
for node in self.point_to_biconode:
node.output_cf(f)
def get_cf(self) -> List[np.ndarray]:
cur = []
if self.level > 0:
cur.append(np.insert(self.cf.center().p, 0, self.cf.size))
for node in self.point_to_biconode:
cur = cur + node.get_cf()
return cur
| 42.903226 | 114 | 0.582206 | 3,691 | 0.925063 | 0 | 0 | 0 | 0 | 0 | 0 | 508 | 0.127318 |
354187da39692ce916dbf65d6fc9ecbdceb76905 | 239 | py | Python | pythonScript.py | qinenergy/QinBox | 9047d1fc6b7f1796820f13aabc1d7fe20cf8f34d | [
"MIT"
] | null | null | null | pythonScript.py | qinenergy/QinBox | 9047d1fc6b7f1796820f13aabc1d7fe20cf8f34d | [
"MIT"
] | null | null | null | pythonScript.py | qinenergy/QinBox | 9047d1fc6b7f1796820f13aabc1d7fe20cf8f34d | [
"MIT"
] | null | null | null | """
Lite install NLTK resource
"""
import nltk
dler = nltk.downloader.Downloader()
dler._update_index()
dler._status_cache['panlex_lite'] = 'installed' # Trick the index to treat panlex_lite as it's already installed.
dler.download('all')
| 26.555556 | 113 | 0.761506 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 128 | 0.535565 |
3542ab8dbb20886a361a27ff5789ccca3ada3441 | 1,224 | py | Python | py2450/misc.py | sveinrou/py2450 | 195a649efd282b15b60ee0e3184d096afce5e311 | [
"MIT"
] | null | null | null | py2450/misc.py | sveinrou/py2450 | 195a649efd282b15b60ee0e3184d096afce5e311 | [
"MIT"
] | null | null | null | py2450/misc.py | sveinrou/py2450 | 195a649efd282b15b60ee0e3184d096afce5e311 | [
"MIT"
] | null | null | null | import pyvisa
def find_instruments():
rm = pyvisa.ResourceManager()
instruments = rm.list_resources()
return {'usb':[i for i in instruments if 'USB' in i],
'gpib':[i for i in instruments if 'GPIB' in i],
'serial':[i for i in instruments if 'ASRL' in i]}
def play_star_wars(smu):
c, d, e, f, g, a, b, c5, d5, e5, f5, g5, pause = 262, 294, 330, 349, 392, 440, 494, 523, 587, 659, 689, 783, 0
TAB_DURATION = 1
star_wars = (
(1, c),
(1, g),
(1/6, f),
(1/6, e),
(1/6, d),
(1, c5),
(1/2, g),
(1/6, f),
(1/6, e),
(1/6, d),
(1, c5),
(1/2, g),
(1/6, f),
(1/6, e),
(1/6, f),
(2, d),
(1/6, c),
(1/16, pause),
(1/6, c),
(1/16, pause),
(1, c),
(1, g),
(1/6, f),
(1/6, e),
(1/6, d),
(1, c5),
(1/2, g),
(1/6, f),
(1/6, e),
(1/6, d),
(1, c5),
(1/2, g),
(1/6, f),
(1/6, e),
(1/6, f),
(2, d),
)
for duration, freq in star_wars:
smu.write(f'beeper.beep({duration*TAB_DURATION}, {freq})')
| 22.666667 | 114 | 0.377451 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 83 | 0.06781 |
3543867a3c8172578aa9304ce724339a82b0436c | 5,406 | py | Python | LinkedList/test XOR.py | ikaushikpal/DS-450-python | 9466f77fb9db9e6a5bb3f20aa89ba6332f49e848 | [
"MIT"
] | 3 | 2021-06-28T12:04:19.000Z | 2021-09-07T07:23:41.000Z | LinkedList/test XOR.py | SupriyoDam/DS-450-python | 5dc21ce61b3279e9bd9d6ef3ad236667227ca283 | [
"MIT"
] | null | null | null | LinkedList/test XOR.py | SupriyoDam/DS-450-python | 5dc21ce61b3279e9bd9d6ef3ad236667227ca283 | [
"MIT"
] | 1 | 2021-06-28T15:42:55.000Z | 2021-06-28T15:42:55.000Z | # import required module
import ctypes
# create node class
class Node:
def __init__(self, value):
self.value = value
self.npx = 0
# create linked list class
class XorLinkedList:
# constructor
def __init__(self):
self.head = None
self.tail = None
self.__nodes = []
# method to insert node at beginning
def InsertAtStart(self, value):
node = Node(value)
if self.head is None: # If list is empty
self.head = node
self.tail = node
else:
self.head.npx = id(node) ^ self.head.npx
node.npx = id(self.head)
self.head = node
self.__nodes.append(node)
# method to insert node at end
def InsertAtEnd(self, value):
node = Node(value)
if self.head is None: # If list is empty
self.head = node
self.tail = node
else:
self.tail.npx = id(node) ^ self.tail.npx
node.npx = id(self.tail)
self.tail = node
self.__nodes.append(node)
# method to remove node at beginning
def DeleteAtStart(self):
if self.isEmpty(): # If list is empty
return "List is empty !"
elif self.head == self.tail: # If list has 1 node
self.head = self.tail = None
elif (0 ^ self.head.npx) == id(self.tail): # If list has 2 nodes
self.head = self.tail
self.head.npx = self.tail.npx = 0
else: # If list has more than 2 nodes
res = self.head.value
x = self.__type_cast(0 ^ self.head.npx) # Address of next node
y = (id(self.head) ^ x.npx) # Address of next of next node
self.head = x
self.head.npx = 0 ^ y
return res
# method to remove node at end
def DeleteAtEnd(self):
if self.isEmpty(): # If list is empty
return "List is empty !"
elif self.head == self.tail: # If list has 1 node
self.head = self.tail = None
elif self.__type_cast(0 ^ self.head.npx) == (self.tail): # If list has 2 nodes
self.tail = self.head
self.head.npx = self.tail.npx = 0
else: # If list has more than 2 nodes
prev_id = 0
node = self.head
next_id = 1
while next_id:
next_id = prev_id ^ node.npx
if next_id:
prev_id = id(node)
node = self.__type_cast(next_id)
res = node.value
x = self.__type_cast(prev_id).npx ^ id(node)
y = self.__type_cast(prev_id)
y.npx = x ^ 0
self.tail = y
return res
# method to traverse linked list
def Print(self):
"""We are printing values rather than returning it bacause
for returning we have to append all values in a list
and it takes extra memory to save all values in a list."""
if self.head != None:
prev_id = 0
node = self.head
next_id = 1
print(node.value, end=' ')
while next_id:
next_id = prev_id ^ node.npx
if next_id:
prev_id = id(node)
node = self.__type_cast(next_id)
print(node.value, end=' ')
else:
return
else:
print("List is empty !")
# method to traverse linked list in reverse order
def ReversePrint(self):
# Print Values is reverse order.
"""We are printing values rather than returning it bacause
for returning we have to append all values in a list
and it takes extra memory to save all values in a list."""
if self.head != None:
prev_id = 0
node = self.tail
next_id = 1
print(node.value, end=' ')
while next_id:
next_id = prev_id ^ node.npx
if next_id:
prev_id = id(node)
node = self.__type_cast(next_id)
print(node.value, end=' ')
else:
return
else:
print("List is empty !")
# method to get length of linked list
def Length(self):
if not self.isEmpty():
prev_id = 0
node = self.head
next_id = 1
count = 1
while next_id:
next_id = prev_id ^ node.npx
if next_id:
prev_id = id(node)
node = self.__type_cast(next_id)
count += 1
else:
return count
else:
return 0
# method to get node data value by index
def PrintByIndex(self, index):
prev_id = 0
node = self.head
for i in range(index):
next_id = prev_id ^ node.npx
if next_id:
prev_id = id(node)
node = self.__type_cast(next_id)
else:
return "Value dosn't found index out of range."
return node.value
# method to check if the liked list is empty or not
def isEmpty(self):
if self.head is None:
return True
return False
# method to return a new instance of type
def __type_cast(self, id):
return ctypes.cast(id, ctypes.py_object).value
# Driver Code
# create object
obj = XorLinkedList()
# insert nodes
obj.InsertAtEnd(2)
obj.InsertAtEnd(3)
obj.InsertAtEnd(4)
obj.InsertAtEnd(0)
obj.InsertAtEnd(6)
obj.InsertAtEnd(55)
# display length
# print("\nLength:", obj.Length())
# traverse
print("\nTraverse linked list:")
obj.Print()
# print("\nTraverse in reverse order:")
# obj.ReversePrint()
# # display data values by index
# print('\nNodes:')
# for i in range(obj.Length()):
# print("Data value at index", i, 'is', obj.PrintByIndex(i))
# # removing nodes
# print("\nDelete Last Node: ", obj.DeleteAtEnd())
# print("\nDelete First Node: ", obj.DeleteAtStart())
# # new length
# print("\nUpdated length:", obj.Length())
# # display data values by index
# print('\nNodes:')
# for i in range(obj.Length()):
# print("Data value at index", i, 'is', obj.PrintByIndex(i))
# # traverse
# print("\nTraverse linked list:")
# obj.Print()
# print("\nTraverse in reverse order:")
# obj.ReversePrint()
| 24.026667 | 81 | 0.63744 | 4,304 | 0.796152 | 0 | 0 | 0 | 0 | 0 | 0 | 2,056 | 0.380318 |
3544ad58108f77d2815d664fdbba023d8958f9ac | 354 | py | Python | opticmedian/utils/file_reader.py | QuicqDev/OpticRescue | f857e3b770f43958f1d687f164ea896aa5390482 | [
"MIT"
] | 2 | 2021-08-09T01:35:06.000Z | 2021-08-09T01:37:05.000Z | opticmedian/utils/file_reader.py | QuicqDev/OpticRescue | f857e3b770f43958f1d687f164ea896aa5390482 | [
"MIT"
] | 8 | 2021-08-07T15:27:48.000Z | 2021-09-05T18:24:29.000Z | opticmedian/utils/file_reader.py | ASH1998/OpticRescue | f857e3b770f43958f1d687f164ea896aa5390482 | [
"MIT"
] | 1 | 2021-08-09T01:37:23.000Z | 2021-08-09T01:37:23.000Z | """
class to read files in specific ways
"""
import glob
import random
class Filer:
"""
read files
"""
def __init__(self, file_path):
self.path = file_path
def get_random_iter(self):
"""
get file contents in random
"""
nb_files = sum(1 for _ in glob.iglob(self.path))
file_iter = glob.glob(self.path)
return file_iter, nb_files
| 13.615385 | 50 | 0.677966 | 278 | 0.785311 | 0 | 0 | 0 | 0 | 0 | 0 | 103 | 0.29096 |
35459007a7ac1087649915a526d0359cf8ca1114 | 6,335 | py | Python | pypiv/velofilter.py | jr7/pypiv | e43f33a905434f57cc7a7d448a63cdab10f4a8e6 | [
"BSD-3-Clause"
] | 7 | 2017-04-01T11:13:58.000Z | 2020-06-11T19:42:11.000Z | pypiv/velofilter.py | jruebsam/pypiv | e43f33a905434f57cc7a7d448a63cdab10f4a8e6 | [
"BSD-3-Clause"
] | 2 | 2019-11-18T17:42:11.000Z | 2019-11-26T14:55:41.000Z | pypiv/velofilter.py | jruebsam/pypiv | e43f33a905434f57cc7a7d448a63cdab10f4a8e6 | [
"BSD-3-Clause"
] | 2 | 2017-03-21T10:37:06.000Z | 2020-06-22T13:29:57.000Z | import numpy as np
from scipy.stats import linregress as li
from math import exp
def calc_factor(field,stepsize=0.01):
"""
Function for calculation of the summed binning.
The returned result is an integral over the binning of the velocities.
It is done for the negative and positive half separately.
:param field: is a 1D field which will be binned
:param stepsize: is the step size for the velocity
:return (positive,negative):
velocities and the binning result for positive half and negative half are returned
as a tuple of numpy arrays
"""
result_pos = []
result_neg = []
alpha = 0.
#: binning of the positive half
while alpha <= np.max(field)+stepsize:
pos = alpha
neg = 0.
filtered = np.copy(field)
filtered[filtered<=neg] = np.nan
filtered[filtered>pos] = np.nan
outlier = np.count_nonzero(~np.isnan(filtered))/np.float(np.count_nonzero(~np.isnan(field)))
result_pos.append([alpha,outlier])
alpha += stepsize
alpha = 0.
#: binning of the negative half
while alpha <= np.abs(np.min(field))+stepsize:
pos = 0.
neg = -1.*alpha
filtered = np.copy(field)
filtered[filtered<=neg] = np.nan
filtered[filtered>pos] = np.nan
outlier = np.count_nonzero(~np.isnan(filtered))/np.float(np.count_nonzero(~np.isnan(field)))
result_neg.append([-1.*alpha,outlier])
alpha += stepsize
return (np.array(result_pos),np.array(result_neg))
def calc_derivative(field,stepsize=0.01):
"""
Function for calculation of the binning.
The returned result is the binning of the velocities.
It is called derivative because it is mathematically the derivative of the function:
.. function:: velofilter.calc_factor
It is done for the negative and positive half separately.
:param field: is a 1D field which will be binned
:param stepsize: is the step size for the velocity
:return (positive,negative):
velocities and the binning result for positive half and negative half are returned
as a tuple
"""
result_pos = []
result_neg = []
outlier = 1.
alpha = 0.
while alpha <= np.max(field)+stepsize:
pos = alpha+stepsize
neg = alpha
filtered = np.copy(field)
filtered[(filtered<=neg) | (filtered>pos)] = np.nan
#filtered[filtered>pos] = np.nan
outlier = np.count_nonzero(~np.isnan(filtered))/np.float(np.count_nonzero(~np.isnan(field)))
result_pos.append([alpha,outlier])
alpha += stepsize
outlier = 1.
alpha = 0.
while alpha <= np.abs(np.min(field))+stepsize:
pos = -1.*alpha
neg = -1.*(alpha+stepsize)
filtered = np.copy(field)
filtered[(filtered<=neg) | (filtered>pos)] = np.nan
#filtered[filtered>pos] = np.nan
outlier = np.count_nonzero(~np.isnan(filtered))/np.float(np.count_nonzero(~np.isnan(field)))
result_neg.append([-1.*alpha,outlier])
alpha += stepsize
return (np.array(result_pos),np.array(result_neg))
def filter(piv,tfactor=3.,dalpha=.01):
"""
Function for calculating the cutoff values.
:param object piv: PIV class object
This is supposed to be an object from a Direct or adaptive Class
it is needed to get the velocities
:param double tfactor: Factor for cutoff in the velocity binning
The default value is set to 3 which works for many cases
:param double dalpha: value for differential velocity
The default is set to .01 which work for many cases
if the velocities vary over a larger ranger use a larger value
"""
#: pre sampling
numberup = np.count_nonzero(piv.u<=0.)/np.float(np.count_nonzero(piv.u))
numberun = np.count_nonzero(piv.u>0.)/np.float(np.count_nonzero(piv.u))
numbervp = np.count_nonzero(piv.v<=0.)/np.float(np.count_nonzero(piv.v))
numbervn = np.count_nonzero(piv.v>0.)/np.float(np.count_nonzero(piv.v))
upos = numberup
uneg = numberun
vpos = numbervp
vneg = numbervn
#: get alpha dependency
up_alpha, un_alpha = calc_factor(piv.u,dalpha)
vp_alpha, vn_alpha = calc_factor(piv.v,dalpha)
#: calculate derivative directly from data
dup_alpha1, dun_alpha1 = calc_derivative(piv.u,dalpha)
dvp_alpha1, dvn_alpha1 = calc_derivative(piv.v,dalpha)
dup_alpha = dup_alpha1[:,1]
dun_alpha = dun_alpha1[:,1]
dvp_alpha = dvp_alpha1[:,1]
dvn_alpha = dvn_alpha1[:,1]
#get boundaries
boundup = np.sum(dup_alpha[0:5])/5./np.exp(tfactor)
boundun = np.sum(dun_alpha[0:5])/5./np.exp(tfactor)
boundvp = np.sum(dvp_alpha[0:5])/5./np.exp(tfactor)
boundvn = np.sum(dvn_alpha[0:5])/5./np.exp(tfactor)
#get indices and exponential
if upos != 0.:
indexup = np.where(dup_alpha<boundup)
cut_up = np.int(np.sum(indexup[0][0:5])/5.)
nup = np.polyfit(np.log( up_alpha[1:cut_up,0]),np.log(up_alpha[1:cut_up,1]),1)
upos = exp(-nup[1]/nup[0])
if uneg != 0.:
indexun = np.where(dun_alpha<boundun)
cut_un = np.int(np.sum(indexun[0][0:5])/5.)
nun = np.polyfit(np.log(-un_alpha[1:cut_un,0]),np.log(un_alpha[1:cut_un,1]),1)
uneg = -exp(-nun[1]/nun[0])
if vpos != 0.:
indexvp = np.where(dvp_alpha<boundvp)
cut_vp = np.int(np.sum(indexvp[0][0:5])/5.)
nvp = np.polyfit(np.log( vp_alpha[1:cut_vp,0]),np.log(vp_alpha[1:cut_vp,1]),1)
vpos = exp(-nvp[1]/nvp[0])
if vneg != 0.:
indexvn = np.where(dvn_alpha<boundvn)
cut_vn = np.int(np.sum(indexvn[0][0:5])/5.)
nvn = np.polyfit(np.log(-vn_alpha[1:cut_vn,0]),np.log(vn_alpha[1:cut_vn,1]),1)
vneg = -exp(-nvn[1]/nvn[0])
#filter + clamping
if upos > np.max(piv.u):
upos = np.max(piv.u)
if uneg < np.min(piv.u):
uneg = np.min(piv.u)
if vpos > np.max(piv.v):
vpos = np.max(piv.v)
if vneg < np.min(piv.v):
vneg = np.min(piv.v)
#equalizing the cutoff
upos *= (0.5+numberup)
uneg *= (0.5+numberun)
vpos *= (0.5+numbervp)
vneg *= (0.5+numbervn)
#making the mask
masku = (piv.u<uneg) | (piv.u>upos)
maskv = (piv.v<vneg) | (piv.v>vpos)
piv.u[masku] = np.nan
piv.v[maskv] = np.nan
| 35.391061 | 100 | 0.63015 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,881 | 0.296922 |
354591beed6d1616f9cdab23d48fdf510319c266 | 4,347 | py | Python | lib/floppy.py | FlorianPoot/Floppy | 1b62e5ffce7c45ab82ced3b17172a37431b5c395 | [
"MIT"
] | null | null | null | lib/floppy.py | FlorianPoot/Floppy | 1b62e5ffce7c45ab82ced3b17172a37431b5c395 | [
"MIT"
] | null | null | null | lib/floppy.py | FlorianPoot/Floppy | 1b62e5ffce7c45ab82ced3b17172a37431b5c395 | [
"MIT"
] | null | null | null | from machine import Pin, UART
from grip import Grip
import time
class Floppy:
AXIS_POS_LIMIT = (0, 5, 5)
AXIS_NEG_LIMIT = (-7.5, 0, -5)
def __init__(self):
# region Attributes
self._speed = 20
self._buffer = 0
self._pos_tracker = [0.0, 0.0, 0.0]
# endregion
self.error_led = Pin(21, Pin.OUT)
self.rst_grbl = Pin(13, Pin.OUT)
self.rst_grbl(1)
for i in range(3):
self.error_led(1)
time.sleep(0.5)
self.error_led(0)
time.sleep(0.5)
self._uart = UART(1, 115200)
self._uart.init(115200, tx=17, rx=16)
self.grip = Grip(pin=4)
# Initialization
self.reset_grbl()
self.read() # Flush
def reset_grbl(self) -> None:
self.rst_grbl(0)
time.sleep_us(5)
self.rst_grbl(1)
time.sleep(2) # Wait for grbl to initialize
def get_state(self) -> str:
self.read() # Flush
self._uart.write(b"?\n")
time.sleep_ms(100)
data = self._uart.readline().decode().split("|")[0][1:]
self.read() # Flush
return data
def get_position(self) -> tuple:
self.read() # Flush
self._uart.write(b"?\n")
time.sleep_ms(100)
data = self._uart.readline().decode()
data = data.replace("\n", "").replace("\r", "")
data = [float(x) for x in data.split("|")[1].split(":")[1].split(",")]
# data = [sum(x) for x in zip(data, self._offset)]
self.read() # Flush
return tuple(data)
def move_to(self, joint: tuple, relative=False, jog=False) -> None:
# 10 per revolution.
if self.get_state() == "Idle":
self._buffer = 0
if self._buffer >= 10:
self.error_led(1)
raise Exception("Buffer overflow, a maximum of 10 commands can be sent simultaneously. Abort")
else:
if jog:
cmd = "$J="
else:
cmd = "G1"
if relative:
cmd += "G91"
new_pos = list()
for j, n in zip(joint, self._pos_tracker):
if j is not None:
new_pos.append(sum((j, n)))
else:
cmd += "G90"
new_pos = joint
if not jog:
for pos, neg_limit, pos_limit in zip(new_pos, self.AXIS_NEG_LIMIT, self.AXIS_POS_LIMIT):
if pos is not None:
if pos > pos_limit or pos < neg_limit:
self.error_led(1)
raise ValueError("Trying to move outside limits.")
self._pos_tracker = new_pos
for axis, joint in zip(("X", "Y", "Z"), joint):
if joint is not None:
cmd += axis + str(joint)
cmd += "F" + str(self.speed) + "\n"
self._uart.write(cmd.encode())
self.read() # Flush
self._buffer += 1
def cancel_jog(self):
self._uart.write(b"\x85")
self.read() # Flush
def wait_until_idle(self) -> None:
time.sleep(0.2)
msg = self.get_state()
while msg != "Idle":
time.sleep(0.2)
msg = self.get_state()
def read(self) -> None:
msg = self._uart.read()
if msg is None:
return
if "error" in msg.decode():
self.error_led(1)
raise Exception("GRBL respond with error. Abort")
def disable_motors(self, force=False) -> None:
if force or self.get_position() == (0.0, 0.0, 0.0):
self._uart.write("$SLP\n")
else:
self.error_led(1)
raise Exception("Could not disable motors while not at home position. Abort")
def send_command(self, command: str) -> None:
self._uart.write(command + "\n")
time.sleep_ms(100)
print(self._uart.read().decode())
@property
def speed(self) -> int:
return self._speed
@speed.setter
def speed(self, value: int) -> None:
if 0 < value <= 500:
self._speed = value
else:
self.error_led(1)
raise ValueError("Speed must be between 1 and 500")
floppy = Floppy()
| 25.273256 | 106 | 0.501035 | 4,259 | 0.979756 | 0 | 0 | 287 | 0.066023 | 0 | 0 | 537 | 0.123533 |
35468d2fd3afa4235fa24ef45b8d2a4fc954d35d | 891 | py | Python | WeatherStationSensorsReader/controllers/wind_measurement_controller.py | weather-station-project/weather-station-sensors-reader | cda7902ee382248b41d14b9a2c0543817decbb4a | [
"MIT"
] | null | null | null | WeatherStationSensorsReader/controllers/wind_measurement_controller.py | weather-station-project/weather-station-sensors-reader | cda7902ee382248b41d14b9a2c0543817decbb4a | [
"MIT"
] | null | null | null | WeatherStationSensorsReader/controllers/wind_measurement_controller.py | weather-station-project/weather-station-sensors-reader | cda7902ee382248b41d14b9a2c0543817decbb4a | [
"MIT"
] | null | null | null | from controllers.controller import Controller
from dao.wind_measurement_dao import WindMeasurementDao
from sensors.wind_measurement_sensor import WindMeasurementSensor
class WindMeasurementController(Controller):
""" Represents the controller with the wind measurement sensor and DAO """
def __init__(self, anemometer_port_number, server, database, user, password):
super(WindMeasurementController, self).__init__(sensor=WindMeasurementSensor(anemometer_port_number=anemometer_port_number),
dao=WindMeasurementDao(server=server,
database=database,
user=user,
password=password))
| 59.4 | 132 | 0.560045 | 720 | 0.808081 | 0 | 0 | 0 | 0 | 0 | 0 | 74 | 0.083053 |
3546d98d4dfa60c74b37af1f19933322143ce069 | 750 | py | Python | Alignment/CommonAlignmentProducer/python/ALCARECOTkAlMinBias_Output_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | Alignment/CommonAlignmentProducer/python/ALCARECOTkAlMinBias_Output_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | Alignment/CommonAlignmentProducer/python/ALCARECOTkAlMinBias_Output_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
# AlCaReco for track based alignment using MinBias events
OutALCARECOTkAlMinBias_noDrop = cms.PSet(
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('pathALCARECOTkAlMinBias')
),
outputCommands = cms.untracked.vstring(
'keep *_ALCARECOTkAlMinBias_*_*',
'keep L1AcceptBunchCrossings_*_*_*',
'keep L1GlobalTriggerReadoutRecord_gtDigis_*_*',
'keep *_TriggerResults_*_*',
'keep DcsStatuss_scalersRawToDigi_*_*',
'keep *_offlinePrimaryVertices_*_*',
'keep *_offlineBeamSpot_*_*')
)
import copy
OutALCARECOTkAlMinBias = copy.deepcopy(OutALCARECOTkAlMinBias_noDrop)
OutALCARECOTkAlMinBias.outputCommands.insert(0, "drop *")
| 35.714286 | 69 | 0.729333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 332 | 0.442667 |
3547f355d42e86cac54a2fa19887cfc08f3e0eb5 | 984 | py | Python | dashboard/migrations/0005_usercacherefreshtime.py | Wassaf-Shahzad/micromasters | b1340a8c233499b1d8d22872a6bc1fe7f49fd323 | [
"BSD-3-Clause"
] | 32 | 2016-03-25T01:03:13.000Z | 2022-01-15T19:35:42.000Z | dashboard/migrations/0005_usercacherefreshtime.py | Wassaf-Shahzad/micromasters | b1340a8c233499b1d8d22872a6bc1fe7f49fd323 | [
"BSD-3-Clause"
] | 4,858 | 2016-03-03T13:48:30.000Z | 2022-03-29T22:09:51.000Z | dashboard/migrations/0005_usercacherefreshtime.py | umarmughal824/micromasters | ea92d3bcea9be4601150fc497302ddacc1161622 | [
"BSD-3-Clause"
] | 20 | 2016-08-18T22:07:44.000Z | 2021-11-15T13:35:35.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-11-04 21:12
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('dashboard', '0004_switch_jsonfield'),
]
operations = [
migrations.CreateModel(
name='UserCacheRefreshTime',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('enrollment', models.DateTimeField(null=True)),
('certificate', models.DateTimeField(null=True)),
('current_grade', models.DateTimeField(null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 33.931034 | 118 | 0.644309 | 760 | 0.772358 | 0 | 0 | 0 | 0 | 0 | 0 | 181 | 0.183943 |
35481960fa84aa0d7d74f4ec46b633eb060d28fb | 1,282 | py | Python | easy/836-Rectangle Overlap.py | Davidxswang/leetcode | d554b7f5228f14c646f726ddb91014a612673e06 | [
"Apache-2.0"
] | 2 | 2020-05-08T02:17:17.000Z | 2020-05-17T04:55:56.000Z | easy/836-Rectangle Overlap.py | Davidxswang/leetcode | d554b7f5228f14c646f726ddb91014a612673e06 | [
"Apache-2.0"
] | null | null | null | easy/836-Rectangle Overlap.py | Davidxswang/leetcode | d554b7f5228f14c646f726ddb91014a612673e06 | [
"Apache-2.0"
] | null | null | null | """
https://leetcode.com/problems/rectangle-overlap/
A rectangle is represented as a list [x1, y1, x2, y2], where (x1, y1) are the coordinates of its bottom-left corner, and (x2, y2) are the coordinates of its top-right corner.
Two rectangles overlap if the area of their intersection is positive. To be clear, two rectangles that only touch at the corner or edges do not overlap.
Given two (axis-aligned) rectangles, return whether they overlap.
Example 1:
Input: rec1 = [0,0,2,2], rec2 = [1,1,3,3]
Output: true
Example 2:
Input: rec1 = [0,0,1,1], rec2 = [1,0,2,1]
Output: false
Notes:
Both rectangles rec1 and rec2 are lists of 4 integers.
All coordinates in rectangles will be between -10^9 and 10^9.
"""
# time complexity: O(1), space complexity: O(1)
# The two solutions are both interesting.
class Solution:
def isRectangleOverlap(self, rec1: List[int], rec2: List[int]) -> bool:
# solution 1
# return not (rec1[0] >= rec2[2] or rec1[2] <= rec2[0] or rec1[1] >= rec2[3] or rec1[3] <= rec2[1])
# solution 2
def intersect(p_left, p_right, q_left, q_right):
return min(p_right, q_right) > max(q_left, p_left)
return intersect(rec1[0], rec1[2], rec2[0], rec2[2]) and intersect(rec1[1], rec1[3], rec2[1], rec2[3]) | 35.611111 | 174 | 0.675507 | 474 | 0.369735 | 0 | 0 | 0 | 0 | 0 | 0 | 925 | 0.721529 |
35481fe584384c36a89d37bc77fef6fadfaeafc5 | 69 | py | Python | src/vaccinebot_token.py | DPS0340/vaccine-dispenser | d43f2313de26a5e0a6d15621ffda3d9c505a664c | [
"MIT"
] | 2 | 2021-08-07T17:28:49.000Z | 2021-08-08T05:40:06.000Z | src/vaccinebot_token.py | DPS0340/vaccine-dispenser | d43f2313de26a5e0a6d15621ffda3d9c505a664c | [
"MIT"
] | null | null | null | src/vaccinebot_token.py | DPS0340/vaccine-dispenser | d43f2313de26a5e0a6d15621ffda3d9c505a664c | [
"MIT"
] | null | null | null | import os
def get_token():
return os.environ['VACCINEBOT_TOKEN'] | 17.25 | 41 | 0.73913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.26087 |
35489b42960c3642fb09f7d87e33db297be3e58b | 2,651 | py | Python | fluiddb/scripts/testing.py | fluidinfo/fluiddb | b5a8c8349f3eaf3364cc4efba4736c3e33b30d96 | [
"Apache-2.0"
] | 3 | 2021-05-10T14:41:30.000Z | 2021-12-16T05:53:30.000Z | fluiddb/scripts/testing.py | fluidinfo/fluiddb | b5a8c8349f3eaf3364cc4efba4736c3e33b30d96 | [
"Apache-2.0"
] | null | null | null | fluiddb/scripts/testing.py | fluidinfo/fluiddb | b5a8c8349f3eaf3364cc4efba4736c3e33b30d96 | [
"Apache-2.0"
] | 2 | 2018-01-24T09:03:21.000Z | 2021-06-25T08:34:54.000Z | import logging
from fluiddb.data.store import getMainStore
from fluiddb.exceptions import FeatureError
from fluiddb.model.namespace import NamespaceAPI
from fluiddb.model.tag import TagAPI
from fluiddb.model.user import UserAPI, getUser
TESTING_DATA = {
u'users': [
u'testuser1',
u'testuser2'],
u'namespaces': [
u'fluiddb/testing',
u'fluiddb/testing/testing',
u'testuser1/testing',
u'testuser1/testing/testing',
u'testuser2/testing',
u'testuser2/testing/testing'],
u'tags': [
u'fluiddb/testing/test1',
u'fluiddb/testing/test2',
u'testuser1/testing/test1',
u'testuser1/testing/test2',
u'testuser2/testing/test1',
u'testuser2/testing/test2']
}
def prepareForTesting():
"""
Create a set of L{User}s, L{Namespace}s and L{Tag}s for testing purposes.
"""
admin = getUser(u'fluiddb')
logging.info('Creating testing users.')
UserAPI().create([(username, 'secret', u'Test user', u'test@example.com')
for username in TESTING_DATA[u'users']])
logging.info('Creating testing namespaces.')
NamespaceAPI(admin).create([(namespace, u'Used for testing purposes.')
for namespace in TESTING_DATA[u'namespaces']])
logging.info('Creating testing tags.')
TagAPI(admin).create([(tag, u'Used for testing purposes.')
for tag in TESTING_DATA[u'tags']])
getMainStore().commit()
def removeTestingData():
"""
Delete L{User}s, L{Namespace}s and L{Tag}s used for testing purposes.
"""
admin = getUser(u'fluiddb')
logging.info('Deleting testing tags.')
result = TagAPI(admin).get(TESTING_DATA[u'tags'])
if result:
TagAPI(admin).delete(result.keys())
logging.info('Deleting testing namespaces.')
result = NamespaceAPI(admin).get(TESTING_DATA[u'namespaces'])
# we must delete namespaces one by one, otherwise we'll get NotEmptyError.
for path in sorted(result.keys(), reverse=True):
NamespaceAPI(admin).delete([path])
logging.info('Deleting testing users.')
result = UserAPI().get(TESTING_DATA[u'users'])
if result:
for username in result:
path = '%s/private' % username
try:
NamespaceAPI(admin).delete([path])
except FeatureError:
# FIXME This is a bit crap, but it's faster than checking to
# see if the namespace exists before attempting to delete it.
continue
if result:
UserAPI().delete(result.keys())
getMainStore().commit()
| 33.987179 | 78 | 0.629951 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,056 | 0.39834 |
3548f9938f3a5d5177badd45e2cdec5c57b3903f | 6,752 | py | Python | prediction/main_ml.py | Anukriti12/OptumStratethon2.0 | b66dba07735bfa47d99e9907eb8bccdd3b77075c | [
"MIT"
] | 1 | 2021-03-04T05:49:01.000Z | 2021-03-04T05:49:01.000Z | prediction/main_ml.py | ankitpriyarup/optum_carewheel | f6c66f293b2980501e8bca8ab7e26ebd3b26cdd1 | [
"Apache-2.0"
] | null | null | null | prediction/main_ml.py | ankitpriyarup/optum_carewheel | f6c66f293b2980501e8bca8ab7e26ebd3b26cdd1 | [
"Apache-2.0"
] | 1 | 2020-09-13T11:50:49.000Z | 2020-09-13T11:50:49.000Z | import urllib3
import pandas as pd
import numpy as np
import zipfile
import copy
import pickle
import os
from esig import tosig
from tqdm import tqdm
from multiprocessing import Pool
from functools import partial
from os import listdir
from os.path import isfile, join
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score, confusion_matrix
def get_inputs():
url = "https://storage.googleapis.com/challenge-2012-1.0.0.physionet.org/set-a.zip"
http = urllib3.PoolManager()
r = http.request('GET', url, preload_content=False)
with open('data/input.zip', 'wb') as out:
while True:
data = r.read()
if not data:
break
out.write(data)
r.release_conn()
zip_ref = zipfile.ZipFile("data/input.zip", 'r')
zip_ref.extractall("data/")
zip_ref.close()
data = {}
list_files = [f for f in listdir(
"data/set-a") if isfile(join("data/set-a", f))]
for f in list_files:
df = pd.read_csv(join("data/set-a", f))
patient_id = int(df.values[0, 2])
data[patient_id] = df
return data
def get_outputs():
url = "https://storage.googleapis.com/challenge-2012-1.0.0.physionet.org/Outcomes-a.txt"
data_df = pd.read_csv(url)
data = {}
for patient in data_df.values:
patient_id = int(patient[0])
data[patient_id] = patient[-1]
return data
def download():
X_dict, Y_dict = get_inputs(), get_outputs()
X = []
Y = []
for patient_id in X_dict:
X.append(X_dict[patient_id])
Y.append(Y_dict[patient_id])
print("Data for %s patients downloaded." % len(X))
return X, Y
def split(X, Y, proportion=0.75):
idx = int(len(X)*proportion)
print("Dataset split in a training set of %s and testing set of %s patients." % (
idx, len(X)-idx))
return X[:idx], Y[:idx], X[idx:], Y[idx:]
def features_point(x):
static, path = x
maximums = np.max(path, axis=0)
minimums = np.min(path, axis=0)
last_observation = path[-1]
return np.concatenate([static, maximums, minimums, last_observation])
def extract(X):
return list(map(features_point, X))
def lead_lag(mylist):
leadlist = np.concatenate([[mylist[0]], mylist])
laglist = np.concatenate([mylist, [mylist[-1]]])
return np.concatenate([leadlist, laglist], axis=1)
def add_time(mylist, init_time=0., total_time=1.):
ans = [[init_time + xn * total_time /
(len(mylist)-1)] + list(x) for (xn, x) in enumerate(mylist)]
return np.array(ans)
def home_and_pen_off(mylist):
ans = [list(x) + [1.] for x in mylist]
last = list(ans[-1])
last[-1] = 0.
ans.append(last)
ans.append([0 for item in last])
return np.array(ans)
def refocus(path, centre):
return np.concatenate((centre[::-1], path), axis=0)
def train(features, Y):
classifier = RandomForestClassifier()
classifier.fit(features, Y)
return classifier
def normalise_point(x):
static, path = x
path[:, 0] /= 2.
return [static, path]
def normalise(X):
return list(map(normalise_point, X))
def evaluate(classifier, features, Y):
THRESHOLD = .3
predictions_proba = classifier.predict_proba(features)[:, 1]
predictions = [1. if pred >
THRESHOLD else 0. for pred in predictions_proba]
cm = confusion_matrix(Y, predictions)
Se = cm[1, 1] / float(cm[1, 1] + cm[1, 0])
P = cm[1, 1] / float(cm[1, 1] + cm[0, 1])
score = min(Se, P)
print("Score of predictions: %s" % score)
def to_path(df, dynamic_variables):
dim = len(dynamic_variables) + 1
path = [[0.]*dim]
for event in df.values:
if event[1] in dynamic_variables:
new_value = copy.deepcopy(path[-1])
idx = 1 + dynamic_variables.index(event[1])
new_value[idx] = event[2]
hour, min = event[0].split(":")
days = (float(hour) + float(min) / 60.)/24.
new_value[0] = days
path.append(new_value)
path = np.array(path)
unique_times = np.unique(path[:, 0])
idx = []
for time in unique_times:
last_idx = np.where(path[:, 0] == time)[0][-1]
idx.append(last_idx)
path = path[idx]
return path
def static_features(df, static_variables):
return df[df["Parameter"].isin(static_variables)]["Value"].values
def reformat(X, static_variables, dynamic_variables):
for i, x in enumerate(X):
dynamic = to_path(x, dynamic_variables=dynamic_variables)
static = static_features(x, static_variables=static_variables)
X[i] = [static, dynamic]
return X
def st2si(order, stream):
if order > 1:
return(tosig.stream2sig(stream, order))
else:
if order == 1:
return np.concatenate((np.array([1]), stream[-1] - stream[0]), axis=0)
else:
return np.array([1])
def compute(X, order=2):
func = partial(st2si, order)
pool = Pool()
n_samples = len(X)
signatures = []
try:
signatures = np.array(list(tqdm(pool.imap(func, X), total=n_samples)))
except Exception as e:
print('Failed to compute signatures: ' + repr(e))
signatures = []
return signatures
def predict(classifier, url):
http = urllib3.PoolManager()
r = http.request('GET', url, preload_content=False)
with open('data/test_input.txt', 'wb') as out:
while True:
data = r.read()
if not data:
break
out.write(data)
r.release_conn()
data = {}
df = pd.read_csv("data/test_input.txt")
patient_id = int(df.values[0, 2])
data[patient_id] = df
X = []
for patient_id in data:
X.append(data[patient_id])
X = reformat(X, static_variables=["Age", "Gender"], dynamic_variables=[
"Creatinine", "Glucose"])
X = normalise(X)
X = extract(X)
# [0] means in-house dead [1] means in-house alive
print('Predicted result: ' + classifier.predict(X))
if __name__ == "__main__":
# DOWNLOAD & REFORMAT EVENT DATA, TRANSFORM TIME DEPENDENT VARIABLES
X, Y = download()
X = reformat(X, static_variables=["Age", "Gender"], dynamic_variables=[
"Creatinine", "Glucose"])
# NORMALISE & EXTRACT FEATURES
X = normalise(X)
features = extract(X)
# TRAIN THE MODEL BY SPLITING
features_train, Y_train, features_test, Y_test = split(
features, Y, proportion=0.75)
classifier = train(features_train, Y_train)
predict(classifier, 'https://storage.googleapis.com/challenge-2012-1.0.0.physionet.org/set-a/132539.txt')
# EVALUATE PERFORMANCE
evaluate(classifier, features_test, Y_test)
| 26.687747 | 109 | 0.618928 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 862 | 0.127666 |
3549ef84efc1cdf33ac92611c87fc798eee128a4 | 7,254 | py | Python | NeuralNetworkRef/create_pics.py | cmt-qo/cm-flakes | c11f37b50b088cf5c876ef8a6161b7d8d775e99b | [
"MIT"
] | 6 | 2019-11-04T07:04:24.000Z | 2021-02-10T21:35:00.000Z | NeuralNetworkRef/create_pics.py | cmt-qo/cm-flakes | c11f37b50b088cf5c876ef8a6161b7d8d775e99b | [
"MIT"
] | null | null | null | NeuralNetworkRef/create_pics.py | cmt-qo/cm-flakes | c11f37b50b088cf5c876ef8a6161b7d8d775e99b | [
"MIT"
] | 2 | 2020-08-07T09:29:41.000Z | 2021-02-10T21:35:05.000Z | #-------------------------------------------------------------------------------
# Filename: create_pics.py
# Description: creates square pictures out of a picture which is mostly empty
# for training a neural network later.
# The parameters to fool around with include:
# factor: scaled down image for faster image processing
# sq_size: size of square that is used to construct the standard-deviation map
# cutoff: cutoff for standard deviation
# Authors: Mark H Fischer, Eliska Greplova
#-------------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from os import listdir, path, makedirs
import argparse
import sys
# class MyParser(argparse.ArgumentParser):
# def error(self, message):
# sys.stderr.write('error: %s\n' % message)
# self.print_help()
# sys.exit(2)
def pics(from_path='raw_data',to_path='preproc_data'):
# parser = MyParser()
# parser.add_argument('input_folder', nargs='+')
# parser.add_argument('output_folder', nargs='+')
# args = parser.parse_args()
# from_path = args.input_folder[0]
if not from_path[-1]=='/':
from_path+=('/')
# to_path = args.output_folder[0]
if not to_path[-1]=='/':
to_path+=('/')
#check whether input path exists
if not path.exists(from_path):
raise IOError("input directory {0} does not exist, exiting script".format(from_path))
#possible image file extensions.
exts = ['.jpg', '.png', '.tif', '.bmp']
# input file dimensions
xdim = 1330 #2560
ydim = 884 #1920
# output file dimensions
dim = 80 #256
export_ext = '.png' #extension files will be saved
#first, find all the image file in the directory
files = listdir(from_path)
filenames = []
extensions = []
for f in files:
name, ext = path.splitext(from_path+f)
if ext in exts:
filenames.append(name)
extensions.append(ext)
print("found {0} image files in folder {1}".format(len(filenames), from_path))
total_flakes = 0
good_flakes = 0
missed_flakes = 0
#start the actual work of cutting the pictures into smaller pictures
for i, filename in enumerate(filenames):
print("starting with new image file: {0}{1}".format(filename,
extensions[i]))
#first, check for the .csv file with the coordinates of good flakes
good_ones = []
try:
with open(filename+".csv") as f:
content = f.read().splitlines()
for line in content:
good_ones.append(line.split(','))
except IOError:
print("Warning: Couldn't find file {0}.csv, assume there's no good flakes".format(filename))
# open image
full_im = Image.open(filename+extensions[i])
Lx = full_im.size[0] #x dimension of picture
Ly = full_im.size[1] #y dimension of picture
# we want to work on pictures of equal size, so if they are not the right
# size, we rescale them.
scalex = 1.
scaley = 1.
if not Lx == xdim:
scalex = float(xdim) / Lx
scaley = float(ydim) / Ly
full_im = full_im.resize((xdim, ydim))
print("picture is too big, resizing to ({0}, {1})".format(xdim, ydim))
#to speed up the whole work, we resize the image for the first step
factor = 8
lx = int(xdim/factor) # resized x dimension
ly = int(ydim/factor) # resized y dimension
small_im = full_im.resize((lx, ly))
sq_size = dim//factor # size of square in resized image
cutoff = 5 #was 2.75 # cutoff for standard deviation
#calculate the standard deviation of the black and white images
# (convert('L') returns a BW image)
stds = np.zeros((lx-sq_size, ly-sq_size))
for k in range(lx-sq_size):
for l in range(ly-sq_size):
tmp_im = small_im.crop((k, l, k+sq_size, l+sq_size))
stds[k,l] = np.std(list(tmp_im.convert('L').getdata()))
Lstds = np.reshape(stds, (lx-sq_size)*(ly-sq_size))
sorted_stds = np.argsort(Lstds)
centers = []
for j in reversed(sorted_stds):
if Lstds[j]< cutoff: break
ix = int(j/(ly-sq_size))+sq_size/2
iy = j%(ly-sq_size)+sq_size/2
included = False
for c in centers:
if (abs(c[0]-ix) < sq_size) and (abs(c[1]-iy)<sq_size):
included = True
continue
if included: continue
ix = min(max(sq_size, ix), lx-sq_size)
iy = min(max(sq_size, iy), ly-sq_size)
centers.append((ix, iy))
print("identified {0} potential candidates in image {1}".format(len(centers), filename))
total_flakes += len(centers)
squares = []
coordinates = []
for c in centers:
ix = c[0]*factor
iy = c[1]*factor
coordinates.append([ix, iy])
x0 = ix - factor*sq_size
x1 = ix + factor*sq_size
y0 = iy - factor*sq_size
y1 = iy + factor*sq_size
squares.append(full_im.crop((x0, y0, x1, y1)))
if not path.exists(to_path):
print("{0} does not exist yet, creating it".format(to_path))
makedirs(to_path)
found = np.zeros(len(good_ones)) # to make sure we found all good ones
for k in range(len(squares)):
x = coordinates[k][0]
y = coordinates[k][1]
bad = True
name = filename.split('/')[-1]
for j, good in enumerate(good_ones):
g0 = scalex*float(good[0])
g1 = scaley*float(good[1])
if (abs(g0-x) < factor*sq_size) and (abs(g1-y)<factor*sq_size):
this_file = to_path+name+"_" + str(coordinates[k][0])\
+ "_" + str(coordinates[k][1])+"_0A"+ export_ext
squares[k].resize((dim, dim)).save(this_file)
for t in range(5):
this_file = to_path+name + "_" + str(coordinates[k][0]) + \
"_" + str(coordinates[k][1])+"_{0}A".format(t+1)+ export_ext
squares[k].transpose(t).resize((dim, dim)).save(this_file)
found[j]=1
bad = False
good_flakes += 1
if not bad: continue
this_file = to_path + name +"_" + str(coordinates[k][0]) + "_" + \
str(coordinates[k][1])+"_B" + export_ext
squares[k].resize((dim, dim)).save(this_file)
if np.sum(found)<len(good_ones):
missed_flakes += len(good_ones) - np.sum(found)
print("Warning: We have missed a good one in {0}".format(filename))
print("(should have found {0}, found {1}instead".format( \
len(good_ones), np.sum(found)))
print("")
print("total flakes found: {0}".format(total_flakes))
print("of which are good : {0}".format(good_flakes))
print("good flakes missed: {0}".format(int(missed_flakes)))
| 38.178947 | 104 | 0.553763 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,372 | 0.326992 |
354a546386259b039381e2eef2517c0b0f0d4c22 | 150 | py | Python | pyomt5/api/__init__.py | paulorodriguesxv/pyomt5 | 9287395f9f72b049c945e625e3b75c491ae50407 | [
"MIT"
] | 8 | 2019-09-06T02:44:04.000Z | 2021-07-08T04:10:11.000Z | pyomt5/api/__init__.py | dausech/pyomt5 | 691dbf7b9732728425e57a7b9055d971838c5c4d | [
"MIT"
] | null | null | null | pyomt5/api/__init__.py | dausech/pyomt5 | 691dbf7b9732728425e57a7b9055d971838c5c4d | [
"MIT"
] | 2 | 2019-09-10T16:41:16.000Z | 2020-10-14T13:49:33.000Z | from .metatradercom import (MetatraderCom, ConnectionTimeoutError,
DataNotFoundError)
from .timeframe import MT5TimeFrame
| 37.5 | 66 | 0.706667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
101e55aaa1d141b7b14f9e29fcdf1c97452cd043 | 2,600 | py | Python | driverapp/models.py | gabyxbinnaeah/Bus-Booking | 51d2a521f890986e4e7e17775708cec3cd71d2b4 | [
"MIT"
] | null | null | null | driverapp/models.py | gabyxbinnaeah/Bus-Booking | 51d2a521f890986e4e7e17775708cec3cd71d2b4 | [
"MIT"
] | null | null | null | driverapp/models.py | gabyxbinnaeah/Bus-Booking | 51d2a521f890986e4e7e17775708cec3cd71d2b4 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class Driver(models.Model):
name = models.CharField(max_length=30)
password = models.CharField(max_length=30)
email = models.EmailField()
Contact = models.CharField(max_length=10)
def save_driver(self):
self.save()
def delete_driver(self):
self.delete()
@classmethod
def update_driver(self):
driver=Driver.objects.get_or_create()
return driver
def _str_(self):
return self.email
class Bus(models.Model):
bus_name = models.CharField(max_length=30)
source= models.CharField(max_length=30)
destination = models.CharField(max_length=30)
nos = models.IntegerField(default=0)
rem = models.CharField(null=True, max_length=5,blank=True)
fare = models.CharField(null=True, max_length=6)
date = models.DateField()
time = models.TimeField()
name = models.CharField(max_length=30,null=True)
password = models.CharField(max_length=30,null=True)
email = models.EmailField(null=True)
Contact = models.CharField(max_length=10,null=True)
@classmethod
def search_buses(cls, source, destination):
return cls.objects.filter(source__icontains=source , destination__icontains=destination).all()
def bus_details(cls):
bus_details_list=cls.objects.all()
return bus_details_list
def save_bus(self):
self.save()
def delete_bus(self):
self.delete()
@classmethod
def update_bus(self):
bus=Bus.objects.get_or_create()
return bus
def _str_(self):
return self.bus_name
class Book(models.Model):
BOOKED = 'B'
CANCELLED = 'C'
TICKET_STATUSES = ((BOOKED, 'Booked'),
(CANCELLED, 'Cancelled'),)
email = models.EmailField()
name = models.CharField(max_length=30)
userid =models.ForeignKey(Driver,null=True,on_delete=models.CASCADE)
busid=models.ForeignKey(Bus, null=True,on_delete=models.CASCADE, related_name='bus_id')
source = models.CharField(max_length=30)
destination = models.CharField(max_length=30,null=True ,blank=True)
seat_no = models.CharField(max_length=30,null=True)
fare = models.CharField(null=True, max_length=6)
date = models.DateField()
time = models.TimeField()
status = models.CharField(choices=TICKET_STATUSES, default=BOOKED, max_length=2)
created_at = models.DateTimeField(auto_now_add=True, null=True)
def _str_(self):
return self.email
| 29.545455 | 102 | 0.682308 | 2,472 | 0.950769 | 0 | 0 | 369 | 0.141923 | 0 | 0 | 59 | 0.022692 |
101ea49bb93a3f7ed00d033876886b5bfdafa971 | 1,870 | py | Python | tests/helper.py | MSLNZ/MSL-IO | 0b7a1f6ddacc936a98f134fd67f209840a500030 | [
"MIT"
] | 6 | 2021-06-27T00:26:09.000Z | 2022-02-11T06:04:23.000Z | tests/helper.py | MSLNZ/MSL-IO | 0b7a1f6ddacc936a98f134fd67f209840a500030 | [
"MIT"
] | null | null | null | tests/helper.py | MSLNZ/MSL-IO | 0b7a1f6ddacc936a98f134fd67f209840a500030 | [
"MIT"
] | 1 | 2018-03-01T03:11:00.000Z | 2018-03-01T03:11:00.000Z | """
Helper functions for the tests
"""
import os
import numpy as np
from msl.io import read
def read_sample(filename, **kwargs):
"""Read a file in the 'samples' directory.
Parameters
----------
filename : str
The name of the file in the samples/ directory
Returns
-------
A root object
"""
return read(os.path.join(os.path.dirname(__file__), 'samples', filename), **kwargs)
def metadata_equal(m1, m2):
"""Assert that two Metadata objects are equal."""
assert len(m1) == len(m2)
for k1, v1 in m1.items():
v2 = m2[k1]
if isinstance(v1, (list, tuple, np.ndarray)):
assert np.array_equal(v1, v2), '{}\n{}'.format(v1, v2)
else:
assert v1 == v2, '{} != {}'.format(v1, v2)
return True
def datasets_equal(d1, d2):
"""Assert that two Dataset objects are equal."""
assert d1.name == d2.name, '{} != {}'.format(d1.name, d2.name)
assert np.array_equal(d1.data, d2.data), '{}\n{}'.format(d1.data, d2.data)
assert metadata_equal(d1.metadata, d2.metadata)
return True
def roots_equal(r1, r2):
"""Assert that two Root objects are equal."""
assert metadata_equal(r1.metadata, r2.metadata)
groups1 = list(r1.groups())
groups1.sort(key=lambda x: x.name)
groups2 = list(r2.groups())
groups2.sort(key=lambda x: x.name)
assert len(groups1) == len(groups2)
for g1, g2 in zip(groups1, groups2):
assert g1.name == g2.name, '{} != {}'.format(g1.name, g2.name)
assert metadata_equal(g1.metadata, g2.metadata)
datasets1 = list(r1.datasets())
datasets1.sort(key=lambda x: x.name)
datasets2 = list(r2.datasets())
datasets2.sort(key=lambda x: x.name)
assert len(datasets1) == len(datasets2)
for d1, d2 in zip(datasets1, datasets2):
assert datasets_equal(d1, d2)
return True
| 27.5 | 87 | 0.617647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 433 | 0.231551 |
101f9a1a4f2942de9319c207e8d75011f9f74070 | 1,019 | py | Python | imagepy/tools/Draw/floodfill_tol.py | adines/imagepy | d7cdf3273d25e06046626ef2ef9200b1846ea49a | [
"BSD-4-Clause"
] | 1 | 2019-02-22T03:09:24.000Z | 2019-02-22T03:09:24.000Z | imagepy/tools/Draw/floodfill_tol.py | adines/imagepy | d7cdf3273d25e06046626ef2ef9200b1846ea49a | [
"BSD-4-Clause"
] | null | null | null | imagepy/tools/Draw/floodfill_tol.py | adines/imagepy | d7cdf3273d25e06046626ef2ef9200b1846ea49a | [
"BSD-4-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 19 17:35:09 2016
@author: yxl
"""
from imagepy.core.engine import Tool
import numpy as np
from imagepy.core.manager import ColorManager
from imagepy.core.draw.fill import floodfill
class Plugin(Tool):
title = 'Flood Fill'
para = {'tor':10, 'con':'8-connect'}
view = [(int, 'tor', (0,1000), 0, 'torlorance', 'value'),
(list, 'con', ['4-connect', '8-connect'], str, 'fill', 'pix')]
def mouse_down(self, ips, x, y, btn, **key):
ips.snapshot()
msk = floodfill(ips.img, x, y, self.para['tor'], self.para['con']=='8-connect')
#plt.imshow(msk)
#plt.show()
color = ColorManager.get_front()
if ips.get_nchannels()==1:color = np.mean(color)
ips.img[msk] = color
ips.update()
def mouse_up(self, ips, x, y, btn, **key):
pass
def mouse_move(self, ips, x, y, btn, **key):
pass
def mouse_wheel(self, ips, x, y, d, **key):
pass
| 26.815789 | 87 | 0.557409 | 786 | 0.771344 | 0 | 0 | 0 | 0 | 0 | 0 | 223 | 0.218842 |
10204734fd1c1eef0317965dd5d5dbfb049ba8d9 | 953 | py | Python | authors/apps/articles/signals.py | andela/ah-backend-summer | f842a3e02f8418f123dc5de36809ad67557b1c1d | [
"BSD-3-Clause"
] | 1 | 2019-03-11T12:45:24.000Z | 2019-03-11T12:45:24.000Z | authors/apps/articles/signals.py | andela/ah-backend-summer | f842a3e02f8418f123dc5de36809ad67557b1c1d | [
"BSD-3-Clause"
] | 53 | 2019-01-29T08:02:23.000Z | 2022-03-11T23:39:37.000Z | authors/apps/articles/signals.py | andela/ah-backend-summer | f842a3e02f8418f123dc5de36809ad67557b1c1d | [
"BSD-3-Clause"
] | 5 | 2019-10-04T07:02:38.000Z | 2020-06-11T12:39:22.000Z | """Signal dispatchers and handlers for the articles module"""
from django.db.models.signals import post_save
from django.dispatch import receiver, Signal
from authors.apps.articles.models import Article
# our custom signal that will be sent when a new article is published
# we could have stuck to using the post_save signal and receiving it in the
# notifications app or calling one of the util methods there,
# but that kills the whole benefit to the modularity we're going for
article_published_signal = Signal(providing_args=["article"])
class ArticlesSignalSender:
pass
@receiver(post_save, sender=Article)
def on_article_post_save(sender, **kwargs):
"""called when an article is saved"""
if kwargs['created']:
# we are only acting when something we are interested in
# actually happened
article_published_signal.send(ArticlesSignalSender,
article=kwargs['instance'])
| 36.653846 | 75 | 0.736621 | 36 | 0.037775 | 0 | 0 | 367 | 0.3851 | 0 | 0 | 474 | 0.497377 |
1021bc2651a4687c5f86fe5d2e96514c84207cfb | 4,923 | py | Python | benchmarks/roberta/benchmark_tft.py | legacyai/tf-transformers | 65a5f9a4bcb3236483daa598a37b91673f56cb97 | [
"Apache-2.0"
] | 116 | 2021-03-15T09:48:41.000Z | 2022-03-24T05:15:51.000Z | benchmarks/roberta/benchmark_tft.py | legacyai/tf-transformers | 65a5f9a4bcb3236483daa598a37b91673f56cb97 | [
"Apache-2.0"
] | 4 | 2021-03-20T11:20:57.000Z | 2022-01-05T04:59:07.000Z | benchmarks/roberta/benchmark_tft.py | legacyai/tf-transformers | 65a5f9a4bcb3236483daa598a37b91673f56cb97 | [
"Apache-2.0"
] | 9 | 2021-03-17T04:14:48.000Z | 2021-09-13T07:15:31.000Z | """TFTBechmark scripts"""
import shutil
import tempfile
import time
import tensorflow as tf
import tqdm
from datasets import load_dataset
from transformers import RobertaTokenizerFast
from tf_transformers.models import Classification_Model
from tf_transformers.models import RobertaModel as Model
_ALLOWED_DECODER_TYPES = ["keras_model", "saved_model"]
class TftBenchmark:
def __init__(self, cfg):
self.cfg = cfg
# Check compatible model type
self.model_type = cfg.benchmark.model.type
if self.model_type not in _ALLOWED_DECODER_TYPES:
raise ValueError("Unknow model type {} defined".format(self.model_type))
self.model_name = cfg.benchmark.model.name
self.tokenizer = RobertaTokenizerFast.from_pretrained(self.model_name)
self.temp_dir = tempfile.mkdtemp()
def load_and_batch_dataset(self):
"""Load TF dataset"""
cfg = self.cfg
tokenizer = self.tokenizer
# Load from hydra config
dataset_name = cfg.benchmark.data.name
take_sample = cfg.benchmark.data.take_sample
batch_size = cfg.benchmark.data.batch_size
max_length = cfg.benchmark.data.max_length
dataset = load_dataset(dataset_name, split="test")
if take_sample:
dataset = dataset.select(range(50))
# Add summarize: with text
self.dataset = dataset
dataset = dataset.map(
lambda e: tokenizer(e["text"], truncation=True, padding=True, max_length=max_length),
batched=True,
)
dataset.set_format(type="tensorflow", columns=["input_ids"])
features = {
x: tf.cast(dataset[x], dtype=tf.int32).to_tensor(default_value=0, shape=[None, max_length])
for x in ["input_ids"]
}
features['input_mask'] = tf.ones_like(features['input_ids'])
features['input_type_ids'] = tf.zeros_like(features['input_ids'])
tfdataset = tf.data.Dataset.from_tensor_slices((features)).batch(batch_size)
# Convert alldataset to a list for not including that latency while measuring model
# performance
# (batch_dataset, batch_size, seq_length)
batched_datasets = [(batch_dataset, batch_dataset['input_ids'].shape[0]) for batch_dataset in tfdataset]
return batched_datasets
def _load_keras_model(self):
"""Load using TextDecoder KerasModel"""
def classifier_fn(model):
def _classifier_fn(inputs):
return model(inputs)
return _classifier_fn
model_name = self.cfg.benchmark.model.name
# Load Auto Regressive Version
model = Model.from_pretrained(model_name=model_name)
model = Classification_Model(model, num_classes=2)
model = model.get_model()
return classifier_fn(model)
def _load_saved_model(self):
"""Load using TextDecoder saved_model"""
def classifier_fn():
model = self.loaded.signatures['serving_default']
def _classifier_fn(inputs):
return model(**inputs)
return _classifier_fn
model_name = self.cfg.benchmark.model.name
model = Model.from_pretrained(model_name=model_name)
model = Classification_Model(model, num_classes=2)
model = model.get_model()
# Save as saved_model
model.save_serialized(self.temp_dir, overwrite=True)
# Load as saved_model
del model
self.loaded = tf.saved_model.load(self.temp_dir)
return classifier_fn()
def load_model_classifier_fn(self):
"""Load Model"""
if self.model_type == "keras_model":
classifier_fn = self._load_keras_model()
if self.model_type == "saved_model":
classifier_fn = self._load_saved_model()
return classifier_fn
def run(self):
#### Load Decoder function
classifier_fn = self.load_model_classifier_fn()
print("Decoder function loaded succesfully")
#### Load dataset
batched_datasets = self.load_and_batch_dataset()
print("Dataset loaded succesfully")
import gc
gc.collect()
#### Run classifier function
# Sample batch (to avoid first time compilation time)
sample_batch_inputs, _ = batched_datasets[0]
outputs = classifier_fn(sample_batch_inputs)
slines = 0
start_time = time.time()
for (batch_inputs, batch_size) in tqdm.tqdm(batched_datasets, unit="batch "):
outputs = classifier_fn(batch_inputs) # noqa
slines += batch_size
end_time = time.time()
shutil.rmtree(self.temp_dir)
time_taken = end_time - start_time
samples_per_second = slines / time_taken
return {"model_type": self.model_type, "time_taken": time_taken, "samples_per_second": samples_per_second}
| 31.967532 | 114 | 0.655495 | 4,564 | 0.927077 | 0 | 0 | 0 | 0 | 0 | 0 | 882 | 0.179159 |
102307b53d88ad118e69555cc2a3d59eca3bcc56 | 393 | py | Python | Linear Structures/LinkedList/Single Linkedlist/LinkedList Traversal.py | Ash515/PyDataStructures | e8bd4f8866a85482f43a7db11c0db40f1c1151bc | [
"MIT"
] | 7 | 2020-11-24T12:27:11.000Z | 2021-09-28T14:51:35.000Z | Linear Structures/LinkedList/Single Linkedlist/LinkedList Traversal.py | Ash515/PyDataStructures | e8bd4f8866a85482f43a7db11c0db40f1c1151bc | [
"MIT"
] | null | null | null | Linear Structures/LinkedList/Single Linkedlist/LinkedList Traversal.py | Ash515/PyDataStructures | e8bd4f8866a85482f43a7db11c0db40f1c1151bc | [
"MIT"
] | null | null | null | class Node():
def __init__(self,data):
self.data=data
self.ref=None
class LinkedList():
def __init__(self):
self.head=None
def Print_ll(self):
n=self.head
if n is None:
print("LinkedList is empty")
else:
while n is not None:
print(n.data)
n=n.ref
ll=LinkedList()
ll.Print_ll()
| 21.833333 | 40 | 0.519084 | 361 | 0.918575 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.053435 |
10241faf582974a58757a74fbc6ec5ed03a8cff3 | 170 | py | Python | devlivery/ext/migrate/__init__.py | wlsouza/flasklivery | 564c6135d29493ae5efe074488cb0df7f811d889 | [
"Unlicense"
] | null | null | null | devlivery/ext/migrate/__init__.py | wlsouza/flasklivery | 564c6135d29493ae5efe074488cb0df7f811d889 | [
"Unlicense"
] | null | null | null | devlivery/ext/migrate/__init__.py | wlsouza/flasklivery | 564c6135d29493ae5efe074488cb0df7f811d889 | [
"Unlicense"
] | null | null | null | from flask import Flask
from flask_migrate import Migrate
from devlivery.ext.db import db
migrate = Migrate()
def init_app(app: Flask):
migrate.init_app(app, db)
| 15.454545 | 33 | 0.758824 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1024a19ea737a54eb4b1646afd282457a464d5aa | 1,027 | py | Python | src/query_planner/storage_plan.py | alilakda/Eva | e3d447f81e1e47172e21758c059ad6f5ee21ffa4 | [
"Apache-2.0"
] | null | null | null | src/query_planner/storage_plan.py | alilakda/Eva | e3d447f81e1e47172e21758c059ad6f5ee21ffa4 | [
"Apache-2.0"
] | null | null | null | src/query_planner/storage_plan.py | alilakda/Eva | e3d447f81e1e47172e21758c059ad6f5ee21ffa4 | [
"Apache-2.0"
] | 1 | 2020-02-19T02:11:39.000Z | 2020-02-19T02:11:39.000Z | from src.models.catalog.video_info import VideoMetaInfo
from src.query_planner.abstract_plan import AbstractPlan
from src.query_planner.types import PlanNodeType
class StoragePlan(AbstractPlan):
"""
This is the plan used for retrieving the frames from the storage and
and returning to the higher levels.
"""
def __init__(self, video: VideoMetaInfo, batch_size: int = 1,
skip_frames: int = 0, offset: int = None, limit: int = None):
super().__init__(PlanNodeType.STORAGE_PLAN)
self._video = video
self._batch_size = batch_size
self._skip_frames = skip_frames
self._offset = offset
self._limit = limit
@property
def video(self):
return self._video
@property
def batch_size(self):
return self._batch_size
@property
def skip_frames(self):
return self._skip_frames
@property
def offset(self):
return self._offset
@property
def limit(self):
return self._limit
| 25.675 | 78 | 0.665044 | 862 | 0.839338 | 0 | 0 | 309 | 0.300876 | 0 | 0 | 124 | 0.12074 |
1025728984eda2735c08a9884289c0228f56f366 | 34 | py | Python | env/lib/python3.6/base64.py | xianjunzhengbackup/Cloud-Native-Python | 0b74303b444a74210bc2e95f13d17771f6f71583 | [
"MIT"
] | 2 | 2020-09-22T14:38:24.000Z | 2020-10-30T03:11:36.000Z | SDK/Example/Linkage_demo/work_with_AlwaysAI/pedestrian_tracking_demo/venv/lib/python3.6/base64.py | EyecloudAi/OpenNCC-SDK | efeebfe385a900280de6f46d80bd6fff0416aba3 | [
"Apache-2.0"
] | null | null | null | SDK/Example/Linkage_demo/work_with_AlwaysAI/pedestrian_tracking_demo/venv/lib/python3.6/base64.py | EyecloudAi/OpenNCC-SDK | efeebfe385a900280de6f46d80bd6fff0416aba3 | [
"Apache-2.0"
] | 1 | 2019-11-21T12:23:41.000Z | 2019-11-21T12:23:41.000Z | /usr/local/lib/python3.6/base64.py | 34 | 34 | 0.794118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
10284f0ef7e85d673663fac1fed0f6fb76403071 | 2,802 | py | Python | python-scripts/3nPlus1.py | leo237/scripts | 6e530ba4afd89a9747e62f1bd63eb2310791091c | [
"MIT"
] | null | null | null | python-scripts/3nPlus1.py | leo237/scripts | 6e530ba4afd89a9747e62f1bd63eb2310791091c | [
"MIT"
] | null | null | null | python-scripts/3nPlus1.py | leo237/scripts | 6e530ba4afd89a9747e62f1bd63eb2310791091c | [
"MIT"
] | null | null | null | import logging
import matplotlib.pyplot as plt
import argparse
from typing import List
from enum import Enum
from enum import Enum
class Constants:
class PlotType(Enum):
HAILSTONE = 'HAILSTONE'
PEAK = 'PEAK'
STOPPING_TIMES = 'STOPPING_TIMES'
class Stats:
def __init__(self, num : int, total_stopping_time : int, step_values: List[int], peak: int):
self.num = num
self.total_stopping_time = total_stopping_time
self.step_values = step_values
self.peak = peak
def get_arguments():
parser = argparse.ArgumentParser(description='Generate 3n+1 graphs')
parser.add_argument('--num', '-n', type=int, help='Generate 3n+1 function till num ', default=30)
parser.add_argument('--plot-type', '-p', type=Constants.PlotType, help='What kind of plot type do you want', default=Constants.PlotType.HAILSTONE)
args = parser.parse_args()
return args
def func3nPlus1(x):
is_even = x%2 == 0
if is_even:
return int(x/2)
else:
return int(3*x + 1)
def recursive_3nPlus1(num):
res = num
steps = 0
peak = 0
step_values = [res]
logging.debug("Running function for : {}".format(num))
while (res > 1):
# Basic
res = func3nPlus1(res)
steps+=1
# Find peak
if res > peak:
peak = res
# For graph
step_values.append(res)
logging.debug("f(res) : {} | Steps : {}".format(res, steps))
logging.info("Num : {} | Total Stopping Time : {} | FinalValue : {} | Peak : {}".format(num, steps, res, peak))
stat = Stats(num=num, total_stopping_time=steps, step_values=step_values, peak=peak)
return stat
def plot_hailstone(stats : List[Stats]):
for stat in stats:
num = stat.num
y = stat.step_values
x = [i for i in range(len(y))]
plt.plot(x,y, label=num)
plt.xlabel("Steps")
plt.ylabel("f(x)")
plt.legend()
plt.show()
def plot_peaks(stats : List[Stats]):
x = [stat.num for stat in stats]
y = [stat.peak for stat in stats]
plot_x_y(x, y, 'num', 'peaks', 'Peaks')
def plot_stopping_times(stats: List[Stats]):
x = [stat.num for stat in stats]
y = [stat.total_stopping_time for stat in stats]
plot_x_y(x, y, 'num', 'total_stopping_time', 'Stopping Times')
def plot_x_y(x : List[int], y: List[int], xlabel : str, ylabel:str, title:str = None):
plt.plot(x, y)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.legend()
plt.show()
if __name__ == '__main__':
logging.basicConfig(format='[%(levelname)s] %(asctime)s : %(message)s', level=logging.INFO)
args = get_arguments()
plot_type = args.plot_type
stats = []
for i in range(args.num):
stat = recursive_3nPlus1(i)
stats.append(stat)
# Generate graph
if plot_type == Constants.PlotType.HAILSTONE:
plot_hailstone(stats)
if plot_type == Constants.PlotType.PEAK:
plot_peaks(stats)
if plot_type == Constants.PlotType.STOPPING_TIMES:
plot_stopping_times(stats) | 23.35 | 147 | 0.692363 | 341 | 0.121699 | 0 | 0 | 0 | 0 | 0 | 0 | 445 | 0.158815 |
102bacad1f71192a75efa312e5576727d5d197a4 | 2,894 | py | Python | test/functional/tests/initialize/test_clean_reboot.py | Ostrokrzew/open-cas-linux | 35eb5682c9aae13ee7b44da5acc2dd0b593a0b10 | [
"BSD-3-Clause-Clear"
] | 139 | 2019-03-29T08:01:40.000Z | 2022-03-19T01:01:44.000Z | test/functional/tests/initialize/test_clean_reboot.py | Ostrokrzew/open-cas-linux | 35eb5682c9aae13ee7b44da5acc2dd0b593a0b10 | [
"BSD-3-Clause-Clear"
] | 604 | 2019-04-12T14:18:59.000Z | 2022-03-31T18:19:56.000Z | test/functional/tests/initialize/test_clean_reboot.py | Ostrokrzew/open-cas-linux | 35eb5682c9aae13ee7b44da5acc2dd0b593a0b10 | [
"BSD-3-Clause-Clear"
] | 64 | 2019-03-29T08:44:01.000Z | 2022-03-30T09:11:30.000Z | #
# Copyright(c) 2020-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import os
import pytest
from api.cas import casadm
from api.cas.cache_config import CacheMode
from core.test_run import TestRun
from storage_devices.disk import DiskTypeSet, DiskType, DiskTypeLowerThan
from test_tools.dd import Dd
from test_tools.disk_utils import Filesystem
from test_utils.filesystem.file import File
from test_utils.os_utils import drop_caches, DropCachesMode, sync
from test_utils.size import Size, Unit
mount_point = "/mnt/test"
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("cache_mode", CacheMode)
@pytest.mark.parametrizex("filesystem", Filesystem)
@pytest.mark.parametrizex("reboot_type", ["soft", "hard"])
@pytest.mark.require_plugin("power_control")
def test_load_after_clean_shutdown(reboot_type, cache_mode, filesystem):
"""
title: Planned system shutdown test.
description: Test for data consistency after clean system shutdown.
pass_criteria:
- DUT should reboot successfully.
- Checksum of file on core device should be the same before and after reboot.
"""
with TestRun.step("Prepare CAS device."):
cache_disk = TestRun.disks['cache']
cache_disk.create_partitions([Size(1, Unit.GibiByte)])
cache_dev = cache_disk.partitions[0]
core_dev = TestRun.disks['core']
cache = casadm.start_cache(cache_dev, cache_mode, force=True)
core = cache.add_core(core_dev)
core.create_filesystem(filesystem, blocksize=int(Size(1, Unit.Blocks4096)))
core.mount(mount_point)
with TestRun.step("Create file on cache and count its checksum."):
test_file = File(os.path.join(mount_point, "test_file"))
Dd()\
.input("/dev/zero")\
.output(test_file.full_path)\
.block_size(Size(1, Unit.KibiByte))\
.count(1024)\
.run()
test_file.refresh_item()
test_file_md5 = test_file.md5sum()
sync()
drop_caches(DropCachesMode.ALL)
with TestRun.step("Reset platform."):
if reboot_type == "soft":
TestRun.executor.reboot()
else:
power_control = TestRun.plugin_manager.get_plugin('power_control')
power_control.power_cycle()
with TestRun.step("Load cache."):
casadm.load_cache(cache_dev)
core.mount(mount_point)
with TestRun.step("Check file md5sum."):
test_file.refresh_item()
if test_file_md5 != test_file.md5sum():
TestRun.LOGGER.error("Checksums does not match - file is corrupted.")
else:
TestRun.LOGGER.info("File checksum is correct.")
with TestRun.step("Remove test file."):
test_file.remove()
| 36.175 | 87 | 0.685211 | 0 | 0 | 0 | 0 | 2,343 | 0.809606 | 0 | 0 | 737 | 0.254665 |
102d40488d0cca4d36fca9f9b896d9f118df7173 | 393 | py | Python | 4/state/ResultState.py | ytyaru/Pygame.GameState.201707251432 | c2a67c9f0c6ff9ee5c495be02d8c775b897f16d2 | [
"CC0-1.0"
] | null | null | null | 4/state/ResultState.py | ytyaru/Pygame.GameState.201707251432 | c2a67c9f0c6ff9ee5c495be02d8c775b897f16d2 | [
"CC0-1.0"
] | null | null | null | 4/state/ResultState.py | ytyaru/Pygame.GameState.201707251432 | c2a67c9f0c6ff9ee5c495be02d8c775b897f16d2 | [
"CC0-1.0"
] | null | null | null | import pygame
from pygame.locals import *
from .GameState import GameState
class ResultState(GameState):
def __init__(self, stateSwitcher): super().__init__(stateSwitcher)
def Event(self, event):
if event.type == KEYDOWN:
if event.key == K_RETURN or event.key == K_SPACE or event.key == K_z: self.Switcher.Next()
def Draw(self, screen): screen.fill((0,0,255))
| 35.727273 | 102 | 0.692112 | 316 | 0.804071 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
102e04336b537dbf112a2ddd7fae2de699b6b707 | 7,093 | py | Python | bindgen.py | fitzgen/wasmtime-py | 02a2af5e012a44af77690d59fd97df4d3caba962 | [
"Apache-2.0"
] | null | null | null | bindgen.py | fitzgen/wasmtime-py | 02a2af5e012a44af77690d59fd97df4d3caba962 | [
"Apache-2.0"
] | null | null | null | bindgen.py | fitzgen/wasmtime-py | 02a2af5e012a44af77690d59fd97df4d3caba962 | [
"Apache-2.0"
] | null | null | null | # type: ignore
# This is a small script to parse the header files from wasmtime and generate
# appropriate function definitions in Python for each exported function. This
# also reflects types into Python with `ctypes`. While there's at least one
# other generate that does this already it seemed to not quite fit our purposes
# with lots of extra an unnecessary boilerplate.
from pycparser import c_ast, parse_file
class Visitor(c_ast.NodeVisitor):
def __init__(self):
self.ret = ''
self.ret += '# flake8: noqa\n'
self.ret += '#\n'
self.ret += '# This is a procedurally generated file, DO NOT EDIT\n'
self.ret += '# instead edit `./bindgen.py` at the root of the repo\n'
self.ret += '\n'
self.ret += 'from ctypes import *\n'
self.ret += 'from typing import Any\n'
self.ret += 'from ._ffi import dll, wasm_val_t\n'
self.generated_wasm_ref_t = False
# Skip all function definitions, we don't bind those
def visit_FuncDef(self, node):
pass
def visit_Struct(self, node):
if not node.name or not node.name.startswith('was'):
return
# This is hand-generated since it has an anonymous union in it
if node.name == 'wasm_val_t':
return
# This is defined twice in the header file, but we only want to insert
# one definition.
if node.name == 'wasm_ref_t':
if self.generated_wasm_ref_t:
return
self.generated_wasm_ref_t = True
self.ret += "\n"
self.ret += "class {}(Structure):\n".format(node.name)
if node.decls:
self.ret += " _fields_ = [\n"
for decl in node.decls:
self.ret += " (\"{}\", {}),\n".format(decl.name, type_name(decl.type))
self.ret += " ]\n"
else:
self.ret += " pass\n"
def visit_Typedef(self, node):
if not node.name or not node.name.startswith('was'):
return
self.visit(node.type)
tyname = type_name(node.type)
if tyname != node.name:
self.ret += "\n"
self.ret += "{} = {}\n".format(node.name, type_name(node.type))
def visit_FuncDecl(self, node):
if isinstance(node.type, c_ast.TypeDecl):
ptr = False
ty = node.type
elif isinstance(node.type, c_ast.PtrDecl):
ptr = True
ty = node.type.type
name = ty.declname
# This is probably a type, skip it
if name.endswith('_t'):
return
# Skip anything not related to wasi or wasm
if not name.startswith('was'):
return
# TODO: these are bugs with upstream wasmtime
if name == 'wasm_frame_copy':
return
if name == 'wasm_frame_instance':
return
if name == 'wasm_module_serialize':
return
if name == 'wasm_module_deserialize':
return
if 'ref_as_' in name:
return
if 'extern_const' in name:
return
if 'foreign' in name:
return
ret = ty.type
argpairs = []
argtypes = []
argnames = []
if node.args:
for i, param in enumerate(node.args.params):
argname = param.name
if not argname or argname == "import" or argname == "global":
argname = "arg{}".format(i)
argpairs.append("{}: Any".format(argname))
argnames.append(argname)
argtypes.append(type_name(param.type))
retty = type_name(node.type, ptr, typing=True)
self.ret += "\n"
self.ret += "_{0} = dll.{0}\n".format(name)
self.ret += "_{}.restype = {}\n".format(name, type_name(ret, ptr))
self.ret += "_{}.argtypes = [{}]\n".format(name, ', '.join(argtypes))
self.ret += "def {}({}) -> {}:\n".format(name, ', '.join(argpairs), retty)
self.ret += " return _{}({}) # type: ignore\n".format(name, ', '.join(argnames))
def type_name(ty, ptr=False, typing=False):
while isinstance(ty, c_ast.TypeDecl):
ty = ty.type
if ptr:
if typing:
return "pointer"
if isinstance(ty, c_ast.IdentifierType) and ty.names[0] == "void":
return "c_void_p"
elif not isinstance(ty, c_ast.FuncDecl):
return "POINTER({})".format(type_name(ty, False, typing))
if isinstance(ty, c_ast.IdentifierType):
assert(len(ty.names) == 1)
if ty.names[0] == "void":
return "None"
elif ty.names[0] == "_Bool":
return "c_bool"
elif ty.names[0] == "byte_t":
return "c_ubyte"
elif ty.names[0] == "uint8_t":
return "c_uint8"
elif ty.names[0] == "uint32_t":
return "int" if typing else "c_uint32"
elif ty.names[0] == "uint64_t":
return "c_uint64"
elif ty.names[0] == "size_t":
return "int" if typing else "c_size_t"
elif ty.names[0] == "char":
return "c_char"
elif ty.names[0] == "int":
return "int" if typing else "c_int"
# ctypes values can't stand as typedefs, so just use the pointer type here
elif typing and 'func_callback' in ty.names[0]:
return "pointer"
elif typing and ('size' in ty.names[0] or 'pages' in ty.names[0]):
return "int"
return ty.names[0]
elif isinstance(ty, c_ast.Struct):
return ty.name
elif isinstance(ty, c_ast.FuncDecl):
tys = []
# TODO: apparently errors are thrown if we faithfully represent the
# pointer type here, seems odd?
if isinstance(ty.type, c_ast.PtrDecl):
tys.append("c_size_t")
else:
tys.append(type_name(ty.type))
if ty.args.params:
for param in ty.args.params:
tys.append(type_name(param.type))
return "CFUNCTYPE({})".format(', '.join(tys))
elif isinstance(ty, c_ast.PtrDecl) or isinstance(ty, c_ast.ArrayDecl):
return type_name(ty.type, True, typing)
else:
raise RuntimeError("unknown {}".format(ty))
ast = parse_file(
'./wasmtime/include/wasmtime.h',
use_cpp=True,
cpp_path='gcc',
cpp_args=[
'-E',
'-I./wasmtime/include',
'-D__attribute__(x)=',
'-D__asm__(x)=',
'-D__asm(x)=',
'-D__volatile__(x)=',
'-D_Static_assert(x, y)=',
'-Dstatic_assert(x, y)=',
'-D__restrict=',
'-D__restrict__=',
'-D__extension__=',
'-D__inline__=',
'-D__signed=',
'-D__builtin_va_list=int',
]
)
v = Visitor()
v.visit(ast)
if __name__ == "__main__":
with open("wasmtime/_bindings.py", "w") as f:
f.write(v.ret)
else:
with open("wasmtime/_bindings.py", "r") as f:
contents = f.read()
if contents != v.ret:
raise RuntimeError("bindings need an update, run this script")
| 34.100962 | 93 | 0.549556 | 3,690 | 0.520231 | 0 | 0 | 0 | 0 | 0 | 0 | 2,201 | 0.310306 |
102e1e5c55ff01d3808c58b77752bddaea6f8b94 | 1,115 | py | Python | Chapter38.ManagedAttributes/3-desc-state-inst.py | mindnhand/Learning-Python-5th | 3dc1b28d6e048d512bf851de6c7f6445edfe7b84 | [
"MIT"
] | null | null | null | Chapter38.ManagedAttributes/3-desc-state-inst.py | mindnhand/Learning-Python-5th | 3dc1b28d6e048d512bf851de6c7f6445edfe7b84 | [
"MIT"
] | null | null | null | Chapter38.ManagedAttributes/3-desc-state-inst.py | mindnhand/Learning-Python-5th | 3dc1b28d6e048d512bf851de6c7f6445edfe7b84 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#encoding=utf-8
#------------------------------------------------
# Usage: python3 3-desc-state-inst.py
# Description: descriptor for attribute intercept
#------------------------------------------------
class InstState: # Using instance state, (object) in 2.X
def __get__(self, instance, owner):
print('InstState get') # Assume set by client class
return instance._X * 10
def __set__(self, instance, value):
print('InstState set')
instance._X = value
# Client class
class CalcAttrs:
X = InstState() # Descriptor class attr
Y = 3 # Class attr
def __init__(self):
self._X = 2 # Instance attr
self.Z = 4 # Instance attr
if __name__ == '__main__':
obj = CalcAttrs()
print(obj.X, obj.Y, obj.Z) # X is computed, others are not
obj.X = 5 # X assignment is intercepted
CalcAttrs.Y = 6 # Y reassigned in class
obj.Z = 7 # Z assigned in instance
print(obj.X, obj.Y, obj.Z)
obj2 = CalcAttrs() # But X differs now, like Z!
print(obj2.X, obj2.Y, obj2.Z)
| 28.589744 | 63 | 0.556951 | 488 | 0.437668 | 0 | 0 | 0 | 0 | 0 | 0 | 542 | 0.486099 |
102f51bc17643b398143c26098d6f0bd199096fd | 342 | py | Python | src/property_app/app_info.py | almostprod/property-app | 2e9dc6c64e7fd91d287fc95e513fa3ab1079fa54 | [
"Apache-2.0"
] | 2 | 2020-03-03T16:52:31.000Z | 2020-03-17T21:35:30.000Z | src/property_app/app_info.py | amcclosky/property-app | 9afb0210739955ff19bfcb477acdbd07521ce851 | [
"Apache-2.0"
] | 1 | 2021-05-11T16:54:56.000Z | 2021-05-11T16:54:56.000Z | src/property_app/app_info.py | amcclosky/property-app | 9afb0210739955ff19bfcb477acdbd07521ce851 | [
"Apache-2.0"
] | 1 | 2020-05-04T06:39:35.000Z | 2020-05-04T06:39:35.000Z | import time
from dataclasses import dataclass
from datetime import date, datetime
from property_app.config import get_config
config = get_config()
@dataclass
class AppInfo:
project: str = config.ASGI_APP
commit_hash: str = config.APP_BUILD_HASH
build_date: date = datetime.today()
build_epoch_sec: int = int(time.time())
| 20.117647 | 44 | 0.75731 | 179 | 0.523392 | 0 | 0 | 190 | 0.555556 | 0 | 0 | 0 | 0 |
1030df14c1adf08ce6691dd030b83b86e5a978e4 | 133 | py | Python | services/explorer/config/gunicorn/config.py | cheperuiz/elasticskill | 41d9e72a59468ee2676538a584448d16d59a7c51 | [
"MIT"
] | null | null | null | services/explorer/config/gunicorn/config.py | cheperuiz/elasticskill | 41d9e72a59468ee2676538a584448d16d59a7c51 | [
"MIT"
] | 2 | 2020-09-11T15:10:35.000Z | 2022-01-22T10:29:45.000Z | services/explorer/config/gunicorn/config.py | cheperuiz/elasticskill | 41d9e72a59468ee2676538a584448d16d59a7c51 | [
"MIT"
] | null | null | null | bind = "0.0.0.0:5000"
backlog = 2048
workers = 1
worker_class = "sync"
threads = 16
spew = False
reload = True
loglevel = "debug"
| 11.083333 | 21 | 0.661654 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.203008 |
103175058f2fa208dcde4dafb16a9cb6d6a48a58 | 723 | py | Python | Boot2Root/hackthebox/Tenten/files/exploit.py | Kan1shka9/CTFs | 33ab33e094ea8b52714d5dad020c25730e91c0b0 | [
"MIT"
] | 21 | 2016-02-06T14:30:01.000Z | 2020-09-11T05:39:17.000Z | Boot2Root/hackthebox/Tenten/files/exploit.py | Kan1shka9/CTFs | 33ab33e094ea8b52714d5dad020c25730e91c0b0 | [
"MIT"
] | null | null | null | Boot2Root/hackthebox/Tenten/files/exploit.py | Kan1shka9/CTFs | 33ab33e094ea8b52714d5dad020c25730e91c0b0 | [
"MIT"
] | 7 | 2017-02-02T16:27:02.000Z | 2021-04-30T17:14:53.000Z | import requests
print """
CVE-2015-6668
Title: CV filename disclosure on Job-Manager WP Plugin
Author: Evangelos Mourikis
Blog: https://vagmour.eu
Plugin URL: http://www.wp-jobmanager.com
Versions: <=0.7.25
"""
website = raw_input('Enter a vulnerable website: ')
filename = raw_input('Enter a file name: ')
filename2 = filename.replace(" ", "-")
for year in range(2017,2018):
for i in range(1,13):
for extension in {'png','jpeg','jpg'}:
URL = website + "/wp-content/uploads/" + str(year) + "/" + "{:02}".format(i) + "/" + filename2 + "." + extension
req = requests.get(URL)
if req.status_code==200:
print "[+] URL of CV found! " + URL
| 30.125 | 124 | 0.593361 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 336 | 0.46473 |
10318f66a573c70a9037cd72320a499bd6bc01f7 | 1,129 | py | Python | setup.py | iomintz/sql-remove-comma | 503bb90c1fd3346ea783ef826247a5e44dc7c743 | [
"BSD-2-Clause-Patent"
] | null | null | null | setup.py | iomintz/sql-remove-comma | 503bb90c1fd3346ea783ef826247a5e44dc7c743 | [
"BSD-2-Clause-Patent"
] | null | null | null | setup.py | iomintz/sql-remove-comma | 503bb90c1fd3346ea783ef826247a5e44dc7c743 | [
"BSD-2-Clause-Patent"
] | null | null | null | #!/usr/bin/env python3
import setuptools
setuptools.setup(
name='sql-remove-comma',
description='remove illegal trailing commas from your SQL code',
use_scm_version=True,
author='Io Mintz',
author_email='io@mintz.cc',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
license='BSD-2-Clause-Patent',
packages=['sql_remove_comma'],
setup_requires=['setuptools_scm'],
install_requires=['sqlparse>=0.3.0,<0.4.0'],
extras_require={
'test': [
'pytest',
'pytest-cov',
],
},
entry_points={
'console_scripts': [
'sql-remove-comma = sql_remove_comma.__main__:main',
],
},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Database',
'Topic :: Software Development :: Pre-processors',
'Topic :: Utilities',
],
)
| 26.255814 | 65 | 0.672276 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 737 | 0.65279 |
1032c536542b9f2909cde345955741bac4143f87 | 418 | py | Python | RQ1and2/code/results/test_all.py | CESEL/BatchBuilderResearch | 0a1e3ee095e568fdf2f7ab1ce864efc9b490af6e | [
"MIT"
] | null | null | null | RQ1and2/code/results/test_all.py | CESEL/BatchBuilderResearch | 0a1e3ee095e568fdf2f7ab1ce864efc9b490af6e | [
"MIT"
] | null | null | null | RQ1and2/code/results/test_all.py | CESEL/BatchBuilderResearch | 0a1e3ee095e568fdf2f7ab1ce864efc9b490af6e | [
"MIT"
] | null | null | null | from utils import project_list
from learning import IncrementalLearningModel
def get_testing_dataset_size(prj):
l = IncrementalLearningModel(prj['name'], 'RF', 30, 1)
y_proba, y_test = l.get_predicted_data()
return len(y_test)
def main():
for idx, prj in enumerate(project_list):
print(prj['name'])
# TestAll num of execution
print(get_testing_dataset_size(prj))
main()
| 19.904762 | 58 | 0.696172 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.100478 |
1033ffe67b91f750cd6a18292a63c21167fe56d7 | 228 | py | Python | src/zebra_refresh.py | r0x73/alfred-zebra | fe802044fdc34172811c77433967bce46f084ed1 | [
"MIT"
] | 3 | 2017-08-10T12:54:03.000Z | 2018-01-17T08:42:47.000Z | src/zebra_refresh.py | r0x73/alfred-zebra | fe802044fdc34172811c77433967bce46f084ed1 | [
"MIT"
] | 4 | 2016-08-03T09:41:06.000Z | 2016-09-01T06:34:12.000Z | src/zebra_refresh.py | r0x73/alfred-zebra | fe802044fdc34172811c77433967bce46f084ed1 | [
"MIT"
] | null | null | null | import zebra
from workflow import Workflow
if __name === '__main__':
wf = Workflow()
wf.cache_data('zebra_all_projects', zebra.get_all_projects())
wf.cache_data('zebra_aliased_activities', zebra.get_aliased_activities()) | 28.5 | 75 | 0.776316 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 56 | 0.245614 |
103416f9713f35181916b130993432d39011fa96 | 10,729 | py | Python | car_core/car.py | edwardyehuang/CAR | a9f179ebd9ac38516c9827ca95f1f38a28aa317f | [
"MIT"
] | 6 | 2022-03-15T03:32:35.000Z | 2022-03-31T13:43:33.000Z | car_core/car.py | edwardyehuang/CAR | a9f179ebd9ac38516c9827ca95f1f38a28aa317f | [
"MIT"
] | null | null | null | car_core/car.py | edwardyehuang/CAR | a9f179ebd9ac38516c9827ca95f1f38a28aa317f | [
"MIT"
] | 1 | 2022-03-31T13:43:37.000Z | 2022-03-31T13:43:37.000Z |
# ================================================================
# MIT License
# Copyright (c) 2022 edwardyehuang (https://github.com/edwardyehuang)
# ================================================================
import tensorflow as tf
from iseg.layers.normalizations import normalization
from iseg.utils.attention_utils import *
from iseg.layers.model_builder import resize_image, get_training_value
from iseg.vis.vismanager import get_visualization_manager
from car_core.utils import (
get_flatten_one_hot_label,
get_class_sum_features_and_counts,
get_inter_class_relative_loss,
get_intra_class_absolute_loss,
get_pixel_inter_class_relative_loss,
)
class ClassAwareRegularization(tf.keras.Model):
def __init__(
self,
train_mode=False,
use_inter_class_loss=True,
use_intra_class_loss=True,
intra_class_loss_remove_max=False,
use_inter_c2c_loss=True,
use_inter_c2p_loss=False,
intra_class_loss_rate=1,
inter_class_loss_rate=1,
num_class=21,
ignore_label=0,
pooling_rates=[1],
use_batch_class_center=True,
use_last_class_center=False,
last_class_center_decay=0.9,
inter_c2c_loss_threshold=0.5,
inter_c2p_loss_threshold=0.25,
filters=512,
apply_convs=False,
name=None,
):
super().__init__(name=name)
self.vis_manager = get_visualization_manager()
self.train_mode = train_mode
self.use_inter_class_loss = use_inter_class_loss
self.use_intra_class_loss = use_intra_class_loss
self.intra_class_loss_rate = intra_class_loss_rate
self.inter_class_loss_rate = inter_class_loss_rate
self.num_class = num_class
self.ignore_label = ignore_label
self.inter_c2c_loss_threshold = inter_c2c_loss_threshold
self.inter_c2p_loss_threshold = inter_c2p_loss_threshold
self.intra_class_loss_remove_max = intra_class_loss_remove_max
self.use_inter_c2c_loss = use_inter_c2c_loss
self.use_inter_c2p_loss = use_inter_c2p_loss
self.filters = filters
self.apply_convs = apply_convs
if isinstance(pooling_rates, tuple):
pooling_rates = list(pooling_rates)
if not isinstance(pooling_rates, list):
pooling_rates = [pooling_rates]
self.pooling_rates = pooling_rates
self.use_batch_class_center = use_batch_class_center
self.use_last_class_center = use_last_class_center
self.last_class_center_decay = last_class_center_decay
print(f"------CAR settings------")
print(f"------train_mode = {train_mode}")
print(f"------use_intra_class_loss = {use_intra_class_loss}")
print(f"------use_inter_class_loss = {use_inter_class_loss}")
print(f"------intra_class_loss_rate = {intra_class_loss_rate}")
print(f"------inter_class_loss_rate = {inter_class_loss_rate}")
print(f"------use_batch_class_center = {use_batch_class_center}")
print(f"------use_last_class_center = {use_last_class_center}")
print(f"------last_class_center_decay = {last_class_center_decay}")
print(f"------pooling_rates = {pooling_rates}")
print(f"------inter_c2c_loss_threshold = {inter_c2c_loss_threshold}")
print(f"------inter_c2p_loss_threshold = {inter_c2p_loss_threshold}")
print(f"------intra_class_loss_remove_max = {intra_class_loss_remove_max}")
print(f"------use_inter_c2c_loss = {use_inter_c2c_loss}")
print(f"------use_inter_c2p_loss = {use_inter_c2p_loss}")
print(f"------filters = {filters}")
print(f"------apply_convs = {apply_convs}")
print(f"------num_class = {num_class}")
print(f"------ignore_label = {ignore_label}")
def add_car_losses(self, features, label=None, extra_prefix=None, training=None):
# features : [N, H, W, C]
training = get_training_value(training)
loss_name_prefix = f"{self.name}"
if extra_prefix is not None:
loss_name_prefix = f"{loss_name_prefix}_{extra_prefix}"
inputs_shape = tf.shape(features)
height = inputs_shape[-3]
width = inputs_shape[-2]
label = resize_image(label, (height, width), method="nearest")
tf.debugging.check_numerics(features, "features contains nan or inf")
flatten_features = flatten_hw(features)
not_ignore_spatial_mask = tf.cast(label, tf.int32) != self.ignore_label # [N, H, W, 1]
not_ignore_spatial_mask = flatten_hw(not_ignore_spatial_mask)
one_hot_label = get_flatten_one_hot_label(
label, num_class=self.num_class, ignore_label=self.ignore_label
) # [N, HW, class]
####################################################################################
class_sum_features, class_sum_non_zero_map = get_class_sum_features_and_counts(
flatten_features, one_hot_label
) # [N, class, C]
if self.use_batch_class_center:
replica_context = tf.distribute.get_replica_context()
class_sum_features_in_cross_batch = tf.reduce_sum(
class_sum_features, axis=0, keepdims=True, name="class_sum_features_in_cross_batch"
)
class_sum_non_zero_map_in_cross_batch = tf.reduce_sum(
class_sum_non_zero_map, axis=0, keepdims=True, name="class_sum_non_zero_map_in_cross_batch"
)
if replica_context:
class_sum_features_in_cross_batch = replica_context.all_reduce(
tf.distribute.ReduceOp.SUM, class_sum_features_in_cross_batch
)
class_sum_non_zero_map_in_cross_batch = replica_context.all_reduce(
tf.distribute.ReduceOp.SUM, class_sum_non_zero_map_in_cross_batch
)
class_avg_features_in_cross_batch = tf.math.divide_no_nan(
class_sum_features_in_cross_batch, class_sum_non_zero_map_in_cross_batch
) # [1, class, C]
if self.use_last_class_center:
batch_class_ignore_mask = tf.cast(class_sum_non_zero_map_in_cross_batch != 0, tf.int32)
class_center_diff = class_avg_features_in_cross_batch - tf.cast(self.last_class_center, class_avg_features_in_cross_batch.dtype)
class_center_diff *= (1 - self.last_class_center_decay) * tf.cast(batch_class_ignore_mask, class_center_diff.dtype)
self.last_class_center.assign_add(class_center_diff)
class_avg_features_in_cross_batch = tf.cast(self.last_class_center, tf.float32)
class_avg_features = class_avg_features_in_cross_batch
else:
class_avg_features = tf.math.divide_no_nan(
class_sum_features, class_sum_non_zero_map
) # [N, class, C]
####################################################################################
if self.use_inter_class_loss and training:
inter_class_relative_loss = 0
if self.use_inter_c2c_loss:
inter_class_relative_loss += get_inter_class_relative_loss(
class_avg_features, inter_c2c_loss_threshold=self.inter_c2c_loss_threshold,
)
if self.use_inter_c2p_loss:
inter_class_relative_loss += get_pixel_inter_class_relative_loss(
flatten_features, class_avg_features, one_hot_label, inter_c2p_loss_threshold=self.inter_c2p_loss_threshold,
)
self.add_loss(inter_class_relative_loss * self.inter_class_loss_rate)
self.add_metric(inter_class_relative_loss, name=f"{loss_name_prefix}_orl")
if self.use_intra_class_loss:
same_avg_value = tf.matmul(one_hot_label, class_avg_features)
tf.debugging.check_numerics(same_avg_value, "same_avg_value contains nan or inf")
self_absolute_loss = get_intra_class_absolute_loss(
flatten_features,
same_avg_value,
remove_max_value=self.intra_class_loss_remove_max,
not_ignore_spatial_mask=not_ignore_spatial_mask,
)
if training:
self.add_loss(self_absolute_loss * self.intra_class_loss_rate)
self.add_metric(self_absolute_loss, name=f"{loss_name_prefix}_sal")
print("Using self-loss")
def build(self, input_shape):
# Note that, this is not the best design for specified architecture, but a trade-off for generalizability
channels = input_shape[0][-1]
channels = self.filters if channels > self.filters else channels
print(f"car channels = {channels}")
self.linear_conv = tf.keras.layers.Conv2D(channels, (1, 1), use_bias=True, name="linear_conv",)
if self.apply_convs:
self.end_conv = tf.keras.layers.Conv2D(channels, (1, 1), use_bias=False, name="end_conv",)
self.end_norm = normalization(name="end_norm")
if self.use_last_class_center:
self.last_class_center = self.add_weight(
name="last_class_center",
shape=[1, self.num_class, channels],
dtype=tf.float32,
initializer=tf.keras.initializers.GlorotUniform(),
trainable=False,
)
def call(self, inputs, training=None):
inputs, label = inputs
x = inputs
# This linear conv (w/o norm&activation) can be merged
# to the next one (end_conv) during inference
# Simple (x * w0 + b) * w1 dot product
# We keep it for better understanding
x = self.linear_conv(x)
y = tf.identity(x)
if self.train_mode and get_training_value(training):
x = tf.cast(x, tf.float32)
tf.debugging.check_numerics(x, "inputs contains nan or inf")
num_pooling_rates = len(self.pooling_rates)
for i in range(num_pooling_rates):
pooling_rate = self.pooling_rates[i]
sub_x = tf.identity(x, name=f"x_in_rate_{pooling_rate}")
if pooling_rate > 1:
stride_size = (1, pooling_rate, pooling_rate, 1)
sub_x = tf.nn.avg_pool2d(sub_x, stride_size, stride_size, padding="SAME")
self.add_car_losses(sub_x, label=label, extra_prefix=str(pooling_rate), training=training)
if self.apply_convs:
y = self.end_conv(y)
y = self.end_norm(y, training=training)
y = tf.nn.relu(y)
return y
| 37.383275 | 144 | 0.637245 | 10,045 | 0.936248 | 0 | 0 | 0 | 0 | 0 | 0 | 2,090 | 0.194799 |
1034d4fa21e4a774fdb7001458be9aa8431d220c | 2,009 | py | Python | packs/orion/actions/list_sdk_verb_args.py | prajwal222/prajwal | ce1431858a9b54ae2a9546e9afab9f4b722bd210 | [
"Apache-2.0"
] | null | null | null | packs/orion/actions/list_sdk_verb_args.py | prajwal222/prajwal | ce1431858a9b54ae2a9546e9afab9f4b722bd210 | [
"Apache-2.0"
] | 1 | 2022-03-08T17:03:46.000Z | 2022-03-08T17:03:46.000Z | packs/orion/actions/list_sdk_verb_args.py | isabella232/st2contrib | 182af2fb6e26a1d002954b19a5cc7afc73307872 | [
"Apache-2.0"
] | 1 | 2019-07-10T21:23:49.000Z | 2019-07-10T21:23:49.000Z | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lib.actions import OrionBaseAction
class ListSdkVerbArgs(OrionBaseAction):
def run(self, platform, entity_name, verb_name):
"""
List the Orion SDK Verbs
Args:
platform: The orion platform to act on.
entity_name: The EntityName to query.
verb_name: The VerbName to query.
Returns:
dict: Of a verb's args taken from the Orion DB.
Raises:
None: Does not raise any exceptions.
"""
results = {'verb_arguments': []}
self.connect(platform)
swql = """SELECT EntityName, VerbName, Position, Name, Type,
XmlTemplate, XmlSchemas, IsOptional
FROM Metadata.VerbArgument WHERE EntityName=@EntityName
and VerbName=@VerbName
ORDER BY Position"""
kargs = {'EntityName': entity_name,
'VerbName': verb_name}
orion_data = self.query(swql, **kargs)
for item in orion_data['results']:
results['verb_arguments'].append(
{'position': item['Position'],
'name': item['Name'],
'type': item['Type'],
'optional': item['IsOptional']})
return results
| 36.527273 | 74 | 0.648084 | 1,186 | 0.590343 | 0 | 0 | 0 | 0 | 0 | 0 | 1,470 | 0.731707 |
103601948f6165db5db865f0b5d72730ec02f8bd | 5,979 | py | Python | Ui_share.py | Mochongli/lanzou-gui | 0da48c627c70d7be4662e3312d135b28acdc1b57 | [
"MIT"
] | 2 | 2020-09-17T14:27:07.000Z | 2021-06-28T08:44:35.000Z | Ui_share.py | Mochongli/lanzou-gui | 0da48c627c70d7be4662e3312d135b28acdc1b57 | [
"MIT"
] | null | null | null | Ui_share.py | Mochongli/lanzou-gui | 0da48c627c70d7be4662e3312d135b28acdc1b57 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/rach/Documents/lanzou-gui/share.ui'
#
# Created by: PyQt5 UI code generator 5.13.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(390, 310)
Dialog.setMinimumSize(QtCore.QSize(340, 260))
self.verticalLayout_3 = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.out_layout = QtWidgets.QVBoxLayout()
self.out_layout.setObjectName("out_layout")
self.logo = QtWidgets.QLabel(Dialog)
self.logo.setObjectName("logo")
self.out_layout.addWidget(self.logo)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.lb_name = QtWidgets.QLabel(Dialog)
self.lb_name.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter
)
self.lb_name.setObjectName("lb_name")
self.verticalLayout.addWidget(self.lb_name)
self.lb_size = QtWidgets.QLabel(Dialog)
self.lb_size.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter
)
self.lb_size.setObjectName("lb_size")
self.verticalLayout.addWidget(self.lb_size)
self.lb_time = QtWidgets.QLabel(Dialog)
self.lb_time.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter
)
self.lb_time.setObjectName("lb_time")
self.verticalLayout.addWidget(self.lb_time)
self.lb_dl_count = QtWidgets.QLabel(Dialog)
self.lb_dl_count.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter
)
self.lb_dl_count.setObjectName("lb_dl_count")
self.verticalLayout.addWidget(self.lb_dl_count)
self.lb_share_url = QtWidgets.QLabel(Dialog)
self.lb_share_url.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter
)
self.lb_share_url.setObjectName("lb_share_url")
self.verticalLayout.addWidget(self.lb_share_url)
self.lb_pwd = QtWidgets.QLabel(Dialog)
self.lb_pwd.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter
)
self.lb_pwd.setObjectName("lb_pwd")
self.verticalLayout.addWidget(self.lb_pwd)
self.lb_dl_link = QtWidgets.QLabel(Dialog)
self.lb_dl_link.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter
)
self.lb_dl_link.setObjectName("lb_dl_link")
self.verticalLayout.addWidget(self.lb_dl_link)
self.horizontalLayout.addLayout(self.verticalLayout)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setHorizontalSpacing(10)
self.gridLayout.setObjectName("gridLayout")
self.tx_share_url = QtWidgets.QLineEdit(Dialog)
self.tx_share_url.setObjectName("tx_share_url")
self.gridLayout.addWidget(self.tx_share_url, 5, 0, 1, 1)
self.tx_time = QtWidgets.QLabel(Dialog)
self.tx_time.setText("")
self.tx_time.setObjectName("tx_time")
self.gridLayout.addWidget(self.tx_time, 3, 0, 1, 1)
self.tx_size = QtWidgets.QLabel(Dialog)
self.tx_size.setText("")
self.tx_size.setObjectName("tx_size")
self.gridLayout.addWidget(self.tx_size, 1, 0, 1, 1)
self.tx_name = QtWidgets.QLineEdit(Dialog)
self.tx_name.setObjectName("tx_name")
self.gridLayout.addWidget(self.tx_name, 0, 0, 1, 1)
self.tx_dl_link = QtWidgets.QTextBrowser(Dialog)
self.tx_dl_link.setObjectName("tx_dl_link")
self.gridLayout.addWidget(self.tx_dl_link, 8, 0, 1, 1)
self.tx_dl_count = QtWidgets.QLabel(Dialog)
self.tx_dl_count.setText("")
self.tx_dl_count.setObjectName("tx_dl_count")
self.gridLayout.addWidget(self.tx_dl_count, 4, 0, 1, 1)
self.tx_pwd = QtWidgets.QLineEdit(Dialog)
self.tx_pwd.setObjectName("tx_pwd")
self.gridLayout.addWidget(self.tx_pwd, 6, 0, 1, 1)
self.horizontalLayout.addLayout(self.gridLayout)
self.out_layout.addLayout(self.horizontalLayout)
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Close)
self.buttonBox.setObjectName("buttonBox")
self.out_layout.addWidget(self.buttonBox)
self.verticalLayout_3.addLayout(self.out_layout)
self.lb_name.setBuddy(self.tx_name)
self.lb_share_url.setBuddy(self.tx_share_url)
self.lb_pwd.setBuddy(self.tx_pwd)
self.lb_dl_link.setBuddy(self.tx_dl_link)
self.retranslateUi(Dialog)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.logo.setText(_translate("Dialog", "TextLabel"))
self.lb_name.setText(_translate("Dialog", "文件名:"))
self.lb_size.setText(_translate("Dialog", "文件大小:"))
self.lb_time.setText(_translate("Dialog", "上传时间:"))
self.lb_dl_count.setText(_translate("Dialog", "下载次数:"))
self.lb_share_url.setText(_translate("Dialog", "分享链接:"))
self.lb_pwd.setText(_translate("Dialog", "提取码:"))
self.lb_dl_link.setText(_translate("Dialog", "下载直链:"))
| 46.710938 | 95 | 0.68657 | 5,772 | 0.954839 | 0 | 0 | 0 | 0 | 0 | 0 | 677 | 0.111993 |
10360d23deea826893e597ec11b95b8d2db92420 | 129 | py | Python | codeforces/anirudhak47/1335/A.py | anirudhakulkarni/codes | d7a907951033b57314dfc0b837123aaa5c25a39a | [
"MIT"
] | 3 | 2020-07-09T16:15:42.000Z | 2020-07-17T13:19:42.000Z | codeforces/anirudhak47/1335/A.py | anirudhakulkarni/codes | d7a907951033b57314dfc0b837123aaa5c25a39a | [
"MIT"
] | null | null | null | codeforces/anirudhak47/1335/A.py | anirudhakulkarni/codes | d7a907951033b57314dfc0b837123aaa5c25a39a | [
"MIT"
] | 1 | 2020-07-17T13:19:48.000Z | 2020-07-17T13:19:48.000Z | for t in range(int(input())):
n=int(input())
if n%2==0:
print(int(n/2-1))
else:
print(int(n//2)) | 21.5 | 30 | 0.449612 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1037e099e65b884fd19f0953fbf98c040598fcd6 | 6,959 | py | Python | imageclassification/training/session.py | aisosalo/CIFAR-10 | 8747ba2404bbe7043268dc8afa722938dc3db058 | [
"MIT"
] | 4 | 2019-03-01T21:47:40.000Z | 2020-11-23T17:53:26.000Z | imageclassification/training/session.py | aisosalo/CIFAR-10 | 8747ba2404bbe7043268dc8afa722938dc3db058 | [
"MIT"
] | null | null | null | imageclassification/training/session.py | aisosalo/CIFAR-10 | 8747ba2404bbe7043268dc8afa722938dc3db058 | [
"MIT"
] | 5 | 2019-11-28T13:31:50.000Z | 2022-03-31T21:04:16.000Z | import sys
import os
import time
import random
import numpy as np
from termcolor import colored
from functools import partial
from tensorboardX import SummaryWriter
import torch
from torch.utils.data import DataLoader
from torchvision import transforms as tv_transforms
import solt.transforms as slt
import solt.core as slc
import operator
from imageclassification.training.arguments import parse_args
from imageclassification.training.dataset import ImageClassificationDataset
from imageclassification.training.dataset import apply_by_index, img_labels2solt, unpack_solt_data
from imageclassification.training.transformations import init_train_augs
from imageclassification.kvs import GlobalKVS
import imageclassification.training.transformations as trnsfs
PAD_TO = 34
CROP_SIZE = 32
DEBUG = sys.gettrace() is not None
def init_session():
if not torch.cuda.is_available():
raise EnvironmentError('The code must be run on GPU.')
kvs = GlobalKVS()
args = parse_args()
if DEBUG:
args.n_threads = 0
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
snapshot_name = time.strftime('%Y_%m_%d_%H_%M')
os.makedirs(os.path.join(args.snapshots, args.dataset_name, snapshot_name), exist_ok=True)
kvs.update('pytorch_version', torch.__version__)
print('Pytorch version: ', torch.__version__)
if torch.cuda.is_available():
kvs.update('cuda', torch.version.cuda)
kvs.update('gpus', torch.cuda.device_count())
print('CUDA version: ', torch.version.cuda)
else:
kvs.update('cuda', None)
kvs.update('gpus', None)
kvs.update('snapshot_name', snapshot_name)
kvs.update('args', args)
kvs.save_pkl(os.path.join(args.snapshots, args.dataset_name, snapshot_name, 'session.pkl'))
return args, snapshot_name
def init_data_processing(ds):
kvs = GlobalKVS()
train_augs = init_train_augs(crop_mode='r', pad_mode='r') # random crop, reflective padding
dataset = ImageClassificationDataset(ds, split=kvs['metadata'], color_space=kvs['args'].color_space, transformations=train_augs)
mean_vector, std_vector = trnsfs.init_mean_std(dataset=dataset,
batch_size=kvs['args'].bs,
n_threads=kvs['args'].n_threads,
save_mean_std=kvs['args'].snapshots + '/' + kvs['args'].dataset_name,
color_space=kvs['args'].color_space)
print('Color space: ', kvs['args'].color_space)
print(colored('====> ', 'red') + 'Mean:', mean_vector)
print(colored('====> ', 'red') + 'Std:', std_vector)
norm_trf = tv_transforms.Normalize(torch.from_numpy(mean_vector).float(),
torch.from_numpy(std_vector).float())
train_trf = tv_transforms.Compose([
train_augs,
partial(apply_by_index, transform=norm_trf, idx=0)
])
val_trf = tv_transforms.Compose([
img_labels2solt,
slc.Stream([
slt.PadTransform(pad_to=(PAD_TO, PAD_TO)),
slt.CropTransform(crop_size=(CROP_SIZE, CROP_SIZE), crop_mode='c'), # center crop
]),
unpack_solt_data,
partial(apply_by_index, transform=tv_transforms.ToTensor(), idx=0),
partial(apply_by_index, transform=norm_trf, idx=0)
])
kvs.update('train_trf', train_trf)
kvs.update('val_trf', val_trf)
kvs.save_pkl(os.path.join(kvs['args'].snapshots, kvs['args'].dataset_name, kvs['snapshot_name'], 'session.pkl'))
def init_loaders(dataset, x_train, x_val):
kvs = GlobalKVS()
train_dataset = ImageClassificationDataset(dataset,
split=x_train,
color_space=kvs['args'].color_space,
transformations=kvs['train_trf'])
val_dataset = ImageClassificationDataset(dataset,
split=x_val,
color_space=kvs['args'].color_space,
transformations=kvs['val_trf'])
train_loader = DataLoader(train_dataset,
batch_size=kvs['args'].bs,
num_workers=kvs['args'].n_threads,
drop_last=True,
worker_init_fn=lambda wid: np.random.seed(np.uint32(torch.initial_seed() + wid)))
val_loader = DataLoader(val_dataset,
batch_size=kvs['args'].val_bs,
num_workers=kvs['args'].n_threads)
return train_loader, val_loader
def init_folds():
kvs = GlobalKVS()
writers = {}
cv_split_train = {}
for fold_id, split in enumerate(kvs['cv_split_all_folds']):
if kvs['args'].fold != -1 and fold_id != kvs['args'].fold:
continue
kvs.update(f'losses_fold_[{fold_id}]', None, list)
kvs.update(f'val_metrics_fold_[{fold_id}]', None, list)
cv_split_train[fold_id] = split
writers[fold_id] = SummaryWriter(os.path.join(kvs['args'].logs,
kvs['args'].dataset_name,
'fold_{}'.format(fold_id), kvs['snapshot_name']))
kvs.update('cv_split_train', cv_split_train)
kvs.save_pkl(os.path.join(kvs['args'].snapshots, kvs['args'].dataset_name, kvs['snapshot_name'], 'session.pkl'))
return writers
def save_checkpoint(model, val_metric_name, comparator='lt'): # lt, less than
if isinstance(model, torch.nn.DataParallel):
model = model.module
kvs = GlobalKVS()
fold_id = kvs['cur_fold']
epoch = kvs['cur_epoch']
val_metric = kvs[f'val_metrics_fold_[{fold_id}]'][-1][0][val_metric_name]
comparator = getattr(operator, comparator)
cur_snapshot_name = os.path.join(kvs['args'].snapshots, kvs['args'].dataset_name, kvs['snapshot_name'],
f'fold_{fold_id}_epoch_{epoch+1}.pth')
if kvs['prev_model'] is None:
print(colored('====> ', 'red') + 'Snapshot was saved to', cur_snapshot_name)
torch.save(model.state_dict(), cur_snapshot_name)
kvs.update('prev_model', cur_snapshot_name)
kvs.update('best_val_metric', val_metric)
else:
if comparator(val_metric, kvs['best_val_metric']):
print(colored('====> ', 'red') + 'Snapshot was saved to', cur_snapshot_name)
os.remove(kvs['prev_model'])
torch.save(model.state_dict(), cur_snapshot_name)
kvs.update('prev_model', cur_snapshot_name)
kvs.update('best_val_metric', val_metric)
kvs.save_pkl(os.path.join(kvs['args'].snapshots, kvs['args'].dataset_name, kvs['snapshot_name'], 'session.pkl'))
| 36.626316 | 132 | 0.620348 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 963 | 0.138382 |
10387d5bca747ed2fb01a238f98496c602127cb4 | 293 | py | Python | remotelogin/oper_sys/busybox/__init__.py | filintod/pyremotelogin | e2a4df7fd69d21eccdf1aec55c33a839de9157f1 | [
"MIT"
] | 1 | 2018-11-20T17:45:20.000Z | 2018-11-20T17:45:20.000Z | remotelogin/oper_sys/busybox/__init__.py | filintod/pyremotelogin | e2a4df7fd69d21eccdf1aec55c33a839de9157f1 | [
"MIT"
] | 3 | 2018-10-16T18:07:50.000Z | 2018-10-16T18:10:06.000Z | remotelogin/oper_sys/busybox/__init__.py | filintod/pyremotelogin | e2a4df7fd69d21eccdf1aec55c33a839de9157f1 | [
"MIT"
] | null | null | null | from . import shellcommands
from ..linux import LinuxOS
__author__ = 'Filinto Duran (duranto@gmail.com)'
# TODO: break unix/linux to a bare and expand from there
class BusyBoxOS(LinuxOS):
"""
Embedded Linux device
"""
name = 'busybox'
cmd = shellcommands.get_instance()
| 20.928571 | 56 | 0.696246 | 127 | 0.433447 | 0 | 0 | 0 | 0 | 0 | 0 | 137 | 0.467577 |
1038f2cc4542c018a0f73531e58b1bd026455441 | 11,988 | py | Python | DIP/src/utils/utils.py | Jay-Lewis/phase_retrieval | 799cef92852c53e62e2a548f605652923e979456 | [
"MIT"
] | 4 | 2019-01-27T11:46:11.000Z | 2020-12-17T12:23:15.000Z | DIP/src/utils/utils.py | Jay-Lewis/phase_retrieval | 799cef92852c53e62e2a548f605652923e979456 | [
"MIT"
] | null | null | null | DIP/src/utils/utils.py | Jay-Lewis/phase_retrieval | 799cef92852c53e62e2a548f605652923e979456 | [
"MIT"
] | 1 | 2020-10-15T02:53:12.000Z | 2020-10-15T02:53:12.000Z | import math
import numpy as np
import os.path
import urllib.request as urllib
import gzip
import pickle
import pandas as pd
from scipy.misc import imsave
from src.utils.download import *
import src.utils.image_load_helpers as image_load_helpers
import glob
def CelebA_load(label_data = None, image_paths = None, batch_size = 64, isTrain=True):
path='./data/CelebA/'
assert os.path.exists(path + 'is_male.csv')
assert os.path.isdir(path + 'images/')
if( label_data is None ):
label_data = np.squeeze((pd.read_csv(path + 'is_male.csv') + 1).astype(np.bool).astype(np.int32).values)
image_paths = glob.glob(path + 'images/*')
image_paths.sort()
return 1 - label_data, image_paths
tot_len = len(label_data)
test_num = int(tot_len * 0.1)
if( isTrain ):
index = 1 + np.random.choice(tot_len - test_num, batch_size, False)
else:
index = 1 + tot_len - test_num + np.random.choice(test_num, batch_size, False)
images = np.array([image_load_helpers.get_image(image_paths[i], 128).reshape([64 * 64 * 3]) for i in index]) / 255.
labels = label_data[index-1]
return images, labels
def shuffle(images, targets):
rng_state = np.random.get_state()
np.random.shuffle(images)
np.random.set_state(rng_state)
np.random.shuffle(targets)
def cifar10_load():
path = './pretrained_models/cifar10/data/cifar10_train/cifar-10-batches-py/'
batches = ['data_batch_1', 'data_batch_2', 'data_batch_3', 'data_batch_4', 'data_batch_5']
data = []
targets = []
for batch in batches:
with open(path + batch, 'rb') as file_handle:
batch_data = pickle.load(file_handle)
batch_data['data'] = (batch_data['data'] / 255.0)
data.append(batch_data['data'])
targets.append(batch_data['labels'])
with open(path + 'test_batch') as file_handle:
batch_data = pickle.load(file_handle)
batch_data['data'] = (batch_data['data'] / 255.0)
return np.vstack(data), np.concatenate(targets), batch_data['data'], batch_data['labels']
def load_fmnist(path, kind='train'):
import os
import gzip
import numpy as np
"""Load MNIST data from `path`"""
labels_path = os.path.join(path,
'%s-labels-idx1-ubyte.gz'
% kind)
images_path = os.path.join(path,
'%s-images-idx3-ubyte.gz'
% kind)
with gzip.open(labels_path, 'rb') as lbpath:
labels = np.frombuffer(lbpath.read(), dtype=np.uint8,
offset=8)
with gzip.open(images_path, 'rb') as imgpath:
images = np.frombuffer(imgpath.read(), dtype=np.uint8,
offset=16).reshape(len(labels), 784) / 255.0
return images, labels
def F_MNIST_load():
tr_image, tr_label = load_fmnist('./data/f_mnist/', 'train')
ts_image, ts_label = load_fmnist('./data/f_mnist/', 't10k')
#shuffle(tr_image, tr_label)
#shuffle(ts_image, ts_label)
return (tr_image, tr_label, ts_image, ts_label)
def MNIST_load():
filepath = './data/mnist/mnist_py3k.pkl.gz'
url = 'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist_py3k.pkl.gz'
if not os.path.isfile(filepath):
print ("Couldn't find MNIST dataset in ./data, downloading...")
urllib.urlretrieve(url, filepath)
with gzip.open(filepath, 'rb') as f:
train_data, dev_data, test_data = pickle.load(f)
tr_image, tr_label = train_data
ts_image, ts_label = test_data
shuffle(tr_image, tr_label)
shuffle(ts_image, ts_label)
return (tr_image, tr_label, ts_image, ts_label)
def adv_load_dataset(batch_size, data):
train_data, train_target, test_data, test_target = data
def train_epoch():
tot_len = train_data.shape[0]
i = np.random.randint(0, batch_size)
while (i + batch_size < tot_len):
yield (np.copy(train_data[i:i + batch_size, :]), np.copy(train_target[i:i + batch_size]))
i = i + batch_size
return train_epoch
def mix_image(image, random_map):
flat = image.flatten()
new_flat = np.ndarray(flat.shape)
for index, rand_index in enumerate(random_map):
new_flat[index] = flat[rand_index]
return new_flat.reshape(image.shape)
def unmix_image(image, random_map):
flat = image.flatten()
new_flat = np.ndarray(flat.shape)
for index, rand_index in enumerate(random_map):
new_flat[rand_index] = flat[index]
return new_flat.reshape(image.shape)
def save_zip_data(object, filename, bin = 1):
"""Saves a compressed object to disk"""
file = gzip.GzipFile(filename, 'wb')
file.write(pickle.dumps(object, bin))
file.close()
def create_MNIST_mixed():
# Load MNIST data
filepath = './data/mnist/mnist_py3k.pkl.gz'
url = 'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist_py3k.pkl.gz'
if not os.path.isfile(filepath):
print("Couldn't find MNIST dataset in ./data, downloading...")
urllib.urlretrieve(url, filepath)
with gzip.open(filepath, 'rb') as f:
data_chunks = pickle.load(f)
# Create Random mapping of pixels
image_size = 784
indices = [index for index in range(0, image_size)]
np.random.shuffle(indices)
rand_map = indices
# Mix MNIST data
mixed_data = []
for i, chunk in enumerate(data_chunks):
images = chunk[0]
labels = chunk[1]
mixed_images = []
for image in images:
mixed_image = mix_image(image, rand_map)
mixed_images.append(mixed_image)
mixed_images = np.asarray(mixed_images)
mixed_chunk = (mixed_images, labels)
mixed_data.append(mixed_chunk)
# Save mixed data
filepath = './data/mixed_mnist_py3k.pkl.gz'
save_zip_data(mixed_data, filepath)
# Save random map
filepath = './data/random_map.pkl'
pickle.dump(rand_map, open(filepath, "wb"))
def MNIST_load_mixed():
filepath = './data/mixed_mnist_py3k.pkl.gz'
if not os.path.isfile(filepath):
print('Mixed MNIST dataset not found at:')
print(filepath)
print('Creating mixed MNIST dataset...')
create_MNIST_mixed()
with gzip.open(filepath, 'rb') as f:
train_data, dev_data, test_data = pickle.load(f)
tr_image, tr_label = train_data
ts_image, ts_label = test_data
shuffle(tr_image, tr_label)
shuffle(ts_image, ts_label)
return (tr_image, tr_label, ts_image, ts_label)
def file_exists(path):
return os.path.isfile(path)
def save_images(X, save_path):
# [0, 1] -> [0,255]
if isinstance(X.flatten()[0], np.floating):
X = (255.*X).astype('uint8')
n_samples = X.shape[0]
rows = int(np.sqrt(n_samples))
nh, nw = rows, int(n_samples/rows) + 1
if X.ndim == 2:
X = np.reshape(X, (X.shape[0], int(np.sqrt(X.shape[1])), int(np.sqrt(X.shape[1]))))
if X.ndim == 4:
# BCHW -> BHWC
if( X.shape[1] == 3 ):
X = X.transpose(0,2,3,1)
h, w = X[0].shape[:2]
img = np.zeros((h*nh, w*nw, 3))
elif X.ndim == 3:
h, w = X[0].shape[:2]
img = np.zeros((h*nh, w*nw))
for n, x in enumerate(X):
j = n/nw
i = n%nw
img[j*h:j*h+h, i*w:i*w+w] = x
imsave(save_path, img)
def test_2d():
it, TRAIN_SIZE, TEST_SIZE = 0, 260520, 2000
train_data, test_data = [], []
train_target, test_target = [], []
while( it < TRAIN_SIZE + TEST_SIZE ):
x0, y0 = 4 * (np.random.randint(3, size=2) - 1)
r = np.random.normal(0, 0.5)
t = np.random.uniform(0, 6.3)
xy = np.matrix([x0 + (r**2)*math.cos(t), y0 + (r**2)*math.sin(t)])
#x0, y0 = np.random.uniform(0, 1, size=2)
#xy = np.matrix([x0 + 1, y0 + 1])
label = 1
it = it + 1
if( it < TRAIN_SIZE ):
train_data.append(xy)
train_target.append(label)
else:
test_data.append(xy)
test_target.append(label)
train_data = np.vstack(train_data)
test_data = np.vstack(test_data)
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
plt.scatter(np.asarray(train_data[:, 0]).flatten(), np.asarray(train_data[:, 1]).flatten(), s=0.4, c='b', alpha=0.7)
fig.savefig('train.png')
plt.close()
return train_data, train_target, test_data, test_target
# Toy Testset
def swiss_load():
it, TRAIN_SIZE, TEST_SIZE = 0, 65536, 2000
train_data, test_data = [], []
train_target, test_target = [], []
while( it < TRAIN_SIZE + TEST_SIZE ):
t = np.random.uniform(0, 10)
xy = 0.5*np.matrix([t*math.cos(2*t), t*math.sin(2*t)])
label = int(t < 5)
it = it + 1
if( it < TRAIN_SIZE ):
train_data.append(xy)
train_target.append(label)
else:
test_data.append(xy)
test_target.append(label)
train_data = np.vstack(train_data)
test_data = np.vstack(test_data)
return train_data, train_target, test_data, test_target
def dynamic_load_dataset(batch_size, load_func, *func_args):
label_data, image_paths = load_func(*func_args)
tot_len = len(label_data)
test_len = int(tot_len*0.1)
train_len = tot_len - test_len
def train_epoch():
i = np.random.randint(0, batch_size)
while(i + batch_size < train_len):
data, label = load_func(label_data, image_paths, batch_size, isTrain=True)
yield data, label
i = i + batch_size
def test_epoch():
i = 0
while(i + batch_size < test_len):
data, label = load_func(label_data, image_paths, batch_size, isTrain=False)
yield data, label
i = i + batch_size
return train_epoch, None, test_epoch
def load_dataset(batch_size, load_func, dynamic_load = False, *func_args):
if( dynamic_load ):
return dynamic_load_dataset(batch_size, load_func, *func_args)
data = load_func(*func_args)
train_data, train_target, test_data, test_target = data
test_size = batch_size
def train_epoch():
tot_len = train_data.shape[0]
i = np.random.randint(0, batch_size)
while(i + batch_size < tot_len):
yield (np.copy(train_data[i:i+batch_size, :]), np.copy(train_target[i:i+batch_size]))
i = i + batch_size
def test_epoch():
tot_len = test_data.shape[0]
i = 0
#i = np.random.randint(0, test_size)
while(i + test_size < tot_len):
yield (np.copy(test_data[i:i+test_size, :]), np.copy(test_target[i:i+test_size]))
i = i + batch_size
return train_epoch, data, test_epoch
def batch_gen(gens, use_one_hot_encoding=False, out_dim=-1, num_iter=-1):
it = 0
while (it < num_iter) or (num_iter < 0):
it = it + 1
for images, targets in gens():
if( use_one_hot_encoding ):
n = len(targets)
one_hot_code = np.zeros((n, out_dim))
one_hot_code[range(n), targets] = 1
yield images, one_hot_code
else:
yield images, targets
def softmax(z):
z -= np.max(z)
sm = (np.exp(z).T / np.sum(np.exp(z),axis=1)).T
return sm
def get_factors(number):
upper_num = int(math.ceil(number*0.5))
factors = [x for x in range(1, upper_num+1) if number % x == 0]
factors.append(number)
return factors
def subplot_values(num_figures):
factors = get_factors(num_figures)
sqroot = math.sqrt(num_figures)
factor1 = min(factors, key=lambda x: abs(x - sqroot))
factor2 = int(num_figures/factor1)
return factor1, factor2
def make_one_hot(coll):
onehot = np.zeros((coll.shape[0], coll.max() + 1))
onehot[np.arange(coll.shape[0]), coll] = 1
return onehot | 31.464567 | 120 | 0.611445 | 0 | 0 | 2,536 | 0.211545 | 0 | 0 | 0 | 0 | 1,272 | 0.106106 |
1039b022c5fb3fa4258162d12d3e7a230a204279 | 282 | py | Python | 2. Conditional and Repetitive Execution/2.2. Even or Odd.py | ahmetutkuozkan/my_ceng240_exercises_solutions | 167bb9938515870ec1f01853933edc3b55937bff | [
"MIT"
] | null | null | null | 2. Conditional and Repetitive Execution/2.2. Even or Odd.py | ahmetutkuozkan/my_ceng240_exercises_solutions | 167bb9938515870ec1f01853933edc3b55937bff | [
"MIT"
] | null | null | null | 2. Conditional and Repetitive Execution/2.2. Even or Odd.py | ahmetutkuozkan/my_ceng240_exercises_solutions | 167bb9938515870ec1f01853933edc3b55937bff | [
"MIT"
] | null | null | null | value1 = int(input()); value2 = value1//100
if value1 % 2==0 and value2 % 2==0:
print("Even")
elif value1 % 2==0 and value2 % 2==1:
print("Even Odd")
elif value1 % 2==1 and value2 % 2==1:
print("Odd")
elif value1 % 2==1 and value2 % 2==0:
print("Odd Even")
| 28.2 | 44 | 0.567376 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.109929 |
103b74338ebd3a256125530370b8d63b824deb3c | 339 | py | Python | First_course/ex2_2.py | laetrid/learning | b28312c34db2118fb7d5691834b8f7e628117642 | [
"Apache-2.0"
] | null | null | null | First_course/ex2_2.py | laetrid/learning | b28312c34db2118fb7d5691834b8f7e628117642 | [
"Apache-2.0"
] | null | null | null | First_course/ex2_2.py | laetrid/learning | b28312c34db2118fb7d5691834b8f7e628117642 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
column1 = "NETWORK_NUMBER"
column2 = "FIRST_OCTET_BINARY"
column3 = "FIRST_OCTET_HEX"
ip_addr = '88.19.107.0'
formatter = '%-20s%-20s%-20s'
octets = ip_addr.split('.')
a = bin(int(octets[0]))
b = hex(int(octets[0]))
print ""
print formatter % (column1, column2, column3)
print formatter % (ip_addr, a, b)
print ""
| 19.941176 | 45 | 0.678466 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 111 | 0.327434 |
103c0a4ba3c0768fdcf0d8428f050d11782bc6b9 | 511 | py | Python | serendipity/set_and_map/singly_linked_list_set.py | globotree/serendipity | 8380635a4760a5393330460bb625834ffe02967c | [
"Unlicense"
] | 3 | 2019-03-12T10:00:53.000Z | 2019-05-29T10:40:28.000Z | serendipity/set_and_map/singly_linked_list_set.py | globotree/serendipity | 8380635a4760a5393330460bb625834ffe02967c | [
"Unlicense"
] | 12 | 2018-12-17T13:31:19.000Z | 2019-02-13T10:25:01.000Z | serendipity/set_and_map/singly_linked_list_set.py | globotree/serendipity | 8380635a4760a5393330460bb625834ffe02967c | [
"Unlicense"
] | 1 | 2020-01-24T00:48:57.000Z | 2020-01-24T00:48:57.000Z | from serendipity.linear_structures.singly_linked_list import LinkedList
class Set:
def __init__(self):
self._list = LinkedList()
def get_size(self):
return self._list.get_size()
def is_empty(self):
return self._list.is_empty()
def contains(self, e):
return self._list.contains(e)
def add(self, e):
if not self._list.contains(e):
self._list.add_first(e)
# def remove(self, e):
# """借助于链表的移出元素实现即可,此处不实现"""
# pass
| 21.291667 | 71 | 0.614481 | 476 | 0.863884 | 0 | 0 | 0 | 0 | 0 | 0 | 104 | 0.188748 |
103ddecd77ffe2029b07f7ee212a250ddb0a808d | 1,546 | py | Python | settings.py | gaomugong/flask-demo | 83bfb04634355565456cc16a5e98421338e3f562 | [
"MIT"
] | 12 | 2017-12-24T13:58:17.000Z | 2021-04-06T16:21:00.000Z | settings.py | gaomugong/flask-demo | 83bfb04634355565456cc16a5e98421338e3f562 | [
"MIT"
] | null | null | null | settings.py | gaomugong/flask-demo | 83bfb04634355565456cc16a5e98421338e3f562 | [
"MIT"
] | 1 | 2021-10-17T14:45:44.000Z | 2021-10-17T14:45:44.000Z | # -*- coding: utf-8 -*-
"""
settings = conf.default.py + settings_{env}.py
"""
# import os
# import importlib
from conf.default import *
# ========================================================================================
# IMPORT ENV SETTINGS
# ========================================================================================
# root directory -> app.root_path
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
APP_ENV = os.environ.get('APP_ENV', 'develop')
conf_mod = 'conf.settings_{APP_ENV}'.format(APP_ENV=APP_ENV)
try:
# print 'import %s' % conf_mod
mod = __import__(conf_mod, globals(), locals(), ["*"])
# mod = importlib.import_module(conf_module)
except ImportError as e:
raise ImportError("Could not import module '{}': {}".format(conf_mod, e))
# Overwrite upper keys
for setting in dir(mod):
if setting == setting.upper():
locals()[setting] = getattr(mod, setting)
# ========================================================================================
# FLASK-SQLALCHEMY
# ========================================================================================
DATABASE = DATABASES['default']
if DATABASE['ENGINE'] == 'sqlite':
SQLALCHEMY_DATABASE_URI = 'sqlite:///{NAME}'.format(**DATABASE)
else:
# SQLALCHEMY_DATABASE_URI = 'mysql+mysqldb://{USER}:{PASSWORD}@{HOST}:{PORT}/{NAME}'.format(**DATABASE)
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://{USER}:{PASSWORD}@{HOST}:{PORT}/{NAME}'.format(**DATABASE)
| 39.641026 | 107 | 0.493532 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 980 | 0.633894 |
103ee262884abe730921725155217f60f63708d4 | 7,958 | py | Python | scripts/elitech_device.py | grvstick/elitech-datareader | 21b778066477a9ba81624c883b7ab7f13302edec | [
"MIT"
] | 58 | 2015-06-16T06:08:11.000Z | 2022-01-14T21:06:00.000Z | scripts/elitech_device.py | grvstick/elitech-datareader | 21b778066477a9ba81624c883b7ab7f13302edec | [
"MIT"
] | 29 | 2015-11-20T14:38:32.000Z | 2022-02-16T20:26:05.000Z | scripts/elitech_device.py | grvstick/elitech-datareader | 21b778066477a9ba81624c883b7ab7f13302edec | [
"MIT"
] | 16 | 2016-05-18T20:56:36.000Z | 2021-07-29T09:03:32.000Z | #!/usr/bin/env python
# coding: utf-8
import argparse
import elitech
import datetime
from elitech.msg import (
StopButton,
ToneSet,
AlarmSetting,
TemperatureUnit,
)
from elitech.msg import _bin
import six
import os
def main():
args = parse_args()
if (args.command == 'simple-set'):
command_simpleset(args)
elif(args.command == 'get'):
command_get(args)
elif(args.command == 'set'):
command_set(args)
elif(args.command == 'devinfo'):
command_devinfo(args)
elif(args.command == 'clock'):
command_clock(args)
elif(args.command == 'raw'):
command_raw_send(args)
elif(args.command == 'latest'):
command_latest(args)
def _convert_time(sec):
hour = int(sec / 3600.0)
min = int((sec - hour * 3600) / 60.0)
sec = sec % 60
return datetime.time(hour=hour, minute=min, second=sec)
def command_simpleset(args):
device = elitech.Device(args.serial_port, args.ser_baudrate, args.ser_timeout)
device.init()
dev_info = device.get_devinfo()
device.set_clock(dev_info.station_no)
param_put = dev_info.to_param_put()
if args.interval:
param_put.rec_interval = _convert_time(args.interval)
device.update(param_put)
def command_get(args):
device = elitech.Device(args.serial_port, args.ser_baudrate, args.ser_timeout)
device.init()
def output(data_list):
for line in data_list:
if len(line) == 3:
print("{0}\t{1:%Y-%m-%d %H:%M:%S}\t{2:.1f}".format(*line))
elif len(line) == 4:
print("{0}\t{1:%Y-%m-%d %H:%M:%S}\t{2:.1f}\t{3:.1f}".format(*line))
if args.page_size:
device.get_data(callback=output, page_size=args.page_size)
else:
device.get_data(callback=output)
def command_latest(args):
device = elitech.Device(args.serial_port, args.ser_baudrate, args.ser_timeout)
device.init()
def output(latest):
if len(latest) == 3:
if args.value_only:
print("{2:.1f}".format(*latest))
else:
print("{0}\t{1:%Y-%m-%d %H:%M:%S}\t{2:.1f}".format(*latest))
elif len(latest) == 4:
if args.value_only:
print("{2:.1f}\t{3:.1f}".format(*latest))
else:
print("{0}\t{1:%Y-%m-%d %H:%M:%S}\t{2:.1f}\t{3:.1f}".format(*latest))
if args.page_size:
device.get_latest(callback=output, page_size=args.page_size)
else:
device.get_latest(callback=output)
def command_set(args):
device = elitech.Device(args.serial_port, args.ser_baudrate, args.ser_timeout)
device.encode = args.encode
device.init()
dev_info = device.get_devinfo()
param_put = dev_info.to_param_put()
station_no = dev_info.station_no
if args.interval is not None:
param_put.rec_interval = _convert_time(args.interval)
if args.upper_limit is not None:
param_put.upper_limit = args.upper_limit
if args.lower_limit is not None:
param_put.lower_limit = args.lower_limit
if args.station_no is not None:
param_put.update_station_no = int(args.station_no)
station_no = param_put.update_station_no
if args.stop_button is not None:
param_put.stop_button = StopButton.ENABLE if args.stop_button == 'y' else StopButton.DISABLE
if args.delay is not None:
param_put.delay = float(args.delay)
if args.tone_set is not None:
param_put.tone_set = ToneSet.PERMIT if args.tone_set == 'y' else ToneSet.NONE
if args.alarm is not None:
if args.alarm == 'x':
param_put.alarm = AlarmSetting.NONE
elif args.alarm == '3':
param_put.alarm = AlarmSetting.T3
elif args.alarm == '10':
param_put.alarm = AlarmSetting.T10
if args.temp_unit is not None:
param_put.temp_unit = TemperatureUnit.C if args.temp_unit == 'C' else TemperatureUnit.F
if args.temp_calibration is not None:
param_put.temp_calibration = float(args.temp_calibration)
if args.humi_upper_limit:
param_put.humi_upper_limit = args.humi_upper_limit
if args.humi_lower_limit:
param_put.humi_lower_limit = args.humi_lower_limit
if args.humi_calibration:
param_put.humi_calibration = float(args.humi_calibration)
for k,v in vars(param_put).items():
print("{}={}".format(k, v))
device.update(param_put)
if args.dev_num is not None:
device.set_device_number(station_no, args.dev_num)
print("{}={}".format("dev_num", args.dev_num))
if args.user_info is not None:
if type(args.user_info) == six.binary_type:
args.user_info = args.user_info.decode("utf-8")
device.set_user_info(station_no, args.user_info)
print(u"{}={}".format("user_info", args.user_info))
def command_devinfo(args):
device = elitech.Device(args.serial_port, args.ser_baudrate, args.ser_timeout)
device.encode = args.encode
device.init()
dev_info = device.get_devinfo()
for k,v in sorted(vars(dev_info).items()):
if k.startswith("_"): continue
print(u"{}={}".format(k, v))
def command_clock(args):
device = elitech.Device(args.serial_port, args.ser_baudrate, args.ser_timeout)
dev_info = device.get_devinfo()
if args.time:
clock = datetime.datetime.strptime(args.time, '%Y%m%d%H%M%S')
else:
clock = None
device.set_clock(dev_info.station_no, clock)
def command_raw_send(args):
device = elitech.Device(args.serial_port, args.ser_baudrate, args.ser_timeout)
request_bytes = _bin(args.req)
res = device.raw_send(request_bytes, args.res_len)
print("\nresponse length={}".format(len(res)))
for i, b in enumerate(res):
if six.PY2:
six.print_("{:02X} ".format(ord(b)), sep='', end='')
else:
six.print_("{:02X} ".format(b), end='')
if (i + 1) % 16 == 0:
six.print_()
six.print_()
def parse_args():
"""
:rtype: argparse.Namespace
"""
parser = argparse.ArgumentParser('description Elitech RC-4 / RC-5 data reader')
parser.add_argument('-c', "--command", choices=['init', 'get', 'latest', 'simple-set', 'set', 'devinfo', 'clock', 'raw'])
parser.add_argument('-i', "--interval", type=int)
parser.add_argument("--upper_limit", type=float)
parser.add_argument("--lower_limit", type=float)
parser.add_argument("--station_no", type=int)
parser.add_argument("--stop_button", choices=['y', 'n'])
parser.add_argument("--delay", choices=['0.0', '0.5', '1.0', '1.5', '2.0', '2.5', '3.0', '3.5', '4.0', '4.5', '5.0', '5.5', '6.0'])
parser.add_argument('--tone_set', choices=['y', 'n'])
parser.add_argument('--alarm', choices=['x', '3', '10'])
parser.add_argument('--temp_unit', choices=['C', 'F'])
parser.add_argument('--temp_calibration', type=float)
parser.add_argument("--humi_upper_limit", type=float)
parser.add_argument("--humi_lower_limit", type=float)
parser.add_argument('--humi_calibration', type=float)
parser.add_argument('--time', type=str)
parser.add_argument('--dev_num', type=str)
parser.add_argument('--user_info', type=str)
parser.add_argument('--encode', type=str, default='utf8', help='user_info encode')
parser.add_argument('--page_size', type=int, help='for command get')
parser.add_argument('--req', type=str, help='for raw command')
parser.add_argument('--res_len', type=int, help='for raw command', default=1000)
parser.add_argument('--value_only', help='for latest command', action='store_true')
parser.add_argument('--ser_baudrate', help='serial port baudrate default=115200', default=115200, type=int)
parser.add_argument('--ser_timeout', help='serial port reading timeout sec', default=5, type=int)
parser.add_argument('serial_port')
return parser.parse_args()
if __name__ == '__main__':
main()
| 35.846847 | 135 | 0.641995 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,187 | 0.149158 |
103f2a791154bb415c83bcbe5d49dfe926f934f8 | 295 | py | Python | Assignment 1/task3/map.py | JeetKamdar/Big-Data-Assignments | 54760de4b2f815168f61539eb9e79ff5e1bd7266 | [
"MIT"
] | null | null | null | Assignment 1/task3/map.py | JeetKamdar/Big-Data-Assignments | 54760de4b2f815168f61539eb9e79ff5e1bd7266 | [
"MIT"
] | null | null | null | Assignment 1/task3/map.py | JeetKamdar/Big-Data-Assignments | 54760de4b2f815168f61539eb9e79ff5e1bd7266 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import string
import re
for line in sys.stdin:
if '"' in line:
entry = re.split(''',(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', line)
else:
entry = line.split(",")
licence_type = entry[2]
amount_due = entry[-6]
print("%s\t%s" % (licence_type, amount_due))
| 16.388889 | 64 | 0.566102 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 73 | 0.247458 |
103fc50711d0b0de84f01f2724705c3318868eda | 273 | py | Python | src/aijack/attack/inversion/__init__.py | luoshenseeker/AIJack | 4e871a5b3beb4b7c976d38060d6956efcebf880d | [
"MIT"
] | 1 | 2022-03-17T21:17:44.000Z | 2022-03-17T21:17:44.000Z | src/aijack/attack/inversion/__init__.py | luoshenseeker/AIJack | 4e871a5b3beb4b7c976d38060d6956efcebf880d | [
"MIT"
] | null | null | null | src/aijack/attack/inversion/__init__.py | luoshenseeker/AIJack | 4e871a5b3beb4b7c976d38060d6956efcebf880d | [
"MIT"
] | 1 | 2022-03-17T21:17:46.000Z | 2022-03-17T21:17:46.000Z | from .gan_attack import GAN_Attack # noqa: F401
from .generator_attack import Generator_Attack # noqa: F401
from .gradientinversion import GradientInversion_Attack # noqa: F401
from .mi_face import MI_FACE # noqa: F401
from .utils import DataRepExtractor # noqa: F401
| 45.5 | 69 | 0.798535 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.21978 |
104022f80f0cfe30ed3aab519ac4eeadac303cfa | 867 | py | Python | INBa/2015/ZORIN_D_I/task_4_7.py | YukkaSarasti/pythonintask | eadf4245abb65f4400a3bae30a4256b4658e009c | [
"Apache-2.0"
] | null | null | null | INBa/2015/ZORIN_D_I/task_4_7.py | YukkaSarasti/pythonintask | eadf4245abb65f4400a3bae30a4256b4658e009c | [
"Apache-2.0"
] | null | null | null | INBa/2015/ZORIN_D_I/task_4_7.py | YukkaSarasti/pythonintask | eadf4245abb65f4400a3bae30a4256b4658e009c | [
"Apache-2.0"
] | null | null | null | # Задача 4. Вариант 7.
# Напишите программу, которая выводит имя, под которым скрывается Мария Луиза Чеччарелли. Дополнительно необходимо вывести область интересов указанной личности, место рождения, годы рождения и смерти (если человек умер), вычислить возраст на данный момент (или момент смерти). Для хранения всех необходимых данных требуется использовать переменные. После вывода информации программа должна дожидаться пока пользователь нажмет Enter для выхода.
# Зорин Д.И.
# 11.04.2016
name = "Мария Луиза Чеччарелли"
birthplace = "Рим, Италия"
date1 = 1931
date2 = 2016
age = date2 - date1
interest = "Киноиндустрия"
print(name+"- наиболее известена как Моника Витти- итальянская актриса")
print("Место рождения: "+birthplace)
print("Год рождения: ", date1)
print("Возраст: ", age)
print("Область интересов: "+interest)
input("\n\nДля выхода нажми ENTER")
| 43.35 | 443 | 0.7797 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,236 | 0.877841 |
10402fdf566c301bcd9dcb713ba61afdb6b551f7 | 676 | py | Python | backend/api/models/request.py | haroldadmin/transportation-analytics-platform | 366891dc422d3a72287b3224fbf5b0daf3d14751 | [
"Apache-2.0"
] | null | null | null | backend/api/models/request.py | haroldadmin/transportation-analytics-platform | 366891dc422d3a72287b3224fbf5b0daf3d14751 | [
"Apache-2.0"
] | null | null | null | backend/api/models/request.py | haroldadmin/transportation-analytics-platform | 366891dc422d3a72287b3224fbf5b0daf3d14751 | [
"Apache-2.0"
] | null | null | null | from flask_restplus import fields, Model
def add_models_to_namespace(namespace):
namespace.models[route_request_model.name] = route_request_model
route_request_model = Model("Represents a Route Request", {
"id": fields.Integer(description="Unique identifier for the ride"),
"start_point_lat": fields.Float(description="Represents the latitude of the starting point"),
"start_point_long": fields.Float(description="Represents the longitude of the starting point"),
"end_point_lat": fields.Float(description="Represents the latitude of the ending point"),
"end_point_long": fields.Float(description="Represents the longitude of the ending point")
})
| 45.066667 | 99 | 0.778107 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 316 | 0.467456 |
10413843e688dfdc69f4f93d1b633fcb197a316d | 4,554 | py | Python | uitester/ui/main_window.py | IfengAutomation/uitester | 6f9c78c86965b05efea875d38dbd9587386977fa | [
"Apache-2.0"
] | 4 | 2016-07-12T09:01:52.000Z | 2016-12-07T03:11:02.000Z | uitester/ui/main_window.py | IfengAutomation/uitester | 6f9c78c86965b05efea875d38dbd9587386977fa | [
"Apache-2.0"
] | null | null | null | uitester/ui/main_window.py | IfengAutomation/uitester | 6f9c78c86965b05efea875d38dbd9587386977fa | [
"Apache-2.0"
] | 3 | 2016-11-29T02:13:17.000Z | 2019-10-16T06:25:20.000Z | # @Time : 2016/8/17 10:56
# @Author : lixintong
import logging
import os
import sys
from PyQt5 import uic
from PyQt5.QtCore import pyqtSignal, Qt
from PyQt5.QtWidgets import QMainWindow, QApplication, QDesktopWidget, QMessageBox
from uitester.test_manager.tester import Tester
from uitester.ui.case_manager.case_editor import EditorWidget
from uitester.ui.case_manager.case_manager import CaseManagerWidget
from uitester.ui.case_report.task_record_stats import RunnerEventWidget
from uitester.ui.case_run.case_run import RunWidget
from uitester.ui.case_setting.case_setting import SettingWidget
logger = logging.getLogger("Tester")
class MainWindow(QMainWindow):
tester = Tester()
case_editor_add_type = 0
case_editor_modify_type = 1
show_case_editor_signal = pyqtSignal(int, int, name='show_case_editor')
refresh_case_data_signal = pyqtSignal(name='refresh_case_data')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.show_case_editor_signal.connect(self.show_case_editor, Qt.QueuedConnection)
ui_dir_path = os.path.dirname(__file__)
ui_file_path = os.path.join(ui_dir_path, 'mainwindow.ui')
uic.loadUi(ui_file_path, self)
# todo 更改窗体大小
screen = QDesktopWidget().screenGeometry()
self.resize(screen.width() / 2, screen.height() / 2)
self.setMinimumSize(700, 350)
self.setWindowTitle("uitest")
self.move((screen.width() - self.width()) / 2, (screen.height() - self.height()) / 2) # draw centered
# Add tab "Case"
case_manager_widget = CaseManagerWidget(self.show_case_editor_signal,
self.tester)
self.tabWidget.addTab(case_manager_widget, "Case")
# Add tab "Run"
case_run_widget = RunWidget(self.tester)
self.tabWidget.addTab(case_run_widget, "Run")
# Add tab "Report"
case_report_widget = RunnerEventWidget()
self.tabWidget.addTab(case_report_widget, "Report")
# Add tab "Setting"
case_setting_widget = SettingWidget()
self.tabWidget.addTab(case_setting_widget, "Setting")
self.refresh_case_data_signal.connect(case_manager_widget.refresh)
self.message_box = QMessageBox()
self.start_rpc_server()
self.is_editor_close_cancel = False
def start_rpc_server(self):
"""
start rpc server
:return:
"""
try:
self.tester.start_server()
except Exception as e:
logger.exception(str(e))
self.message_box.warning(self, "Message", "Fail to start RPC-Server, Please restart it in settings.",
QMessageBox.Ok)
def closeEvent(self, event):
"""
close window event
:return:
"""
if not hasattr(self, 'case_edit_window'): # case_edit_window is not exist
self.close()
return
if not self.case_edit_window.isVisible(): # case_edit_window is not visible
self.close()
return
# signal for case_edit_window's closeEvent
self.case_edit_window.close_cancel_signal.connect(self.editor_close_ignore, Qt.DirectConnection)
# case_edit_window is visible
reply = self.message_box.question(self, "Confirm Close?", "The editor is opened, still close?",
QMessageBox.Yes | QMessageBox.Cancel)
if reply == QMessageBox.Yes:
self.is_editor_close_cancel = False
self.case_edit_window.close()
if self.is_editor_close_cancel: # editor close is canceled
event.ignore()
return
self.close()
else:
event.ignore()
def editor_close_ignore(self):
"""
signal emit handle
signal show editor window close cancel
:return:
"""
self.is_editor_close_cancel = True
def show_case_editor(self, type, id):
if type == self.case_editor_add_type:
self.case_edit_window = EditorWidget(self.refresh_case_data_signal, self.tester)
else:
self.case_edit_window = EditorWidget(self.refresh_case_data_signal, self.tester, str(id))
self.case_edit_window.show()
def start():
app = QApplication(sys.argv)
# app.setStyle('Windows')
app.setStyle('fusion')
widget = MainWindow()
widget.show()
sys.exit(app.exec_())
if __name__ == '__main__':
start()
| 35.030769 | 113 | 0.647343 | 3,708 | 0.812089 | 0 | 0 | 0 | 0 | 0 | 0 | 817 | 0.178931 |
1041fae3393c3977b85dd8a7c05c2822f0550318 | 13,034 | py | Python | demos/DPSRGAN/dpsrmodels/basicblock.py | hduba/MDF | 62eb2a1a5b8274206aae71903da2c2ad33ce687d | [
"BSD-3-Clause"
] | 1 | 2021-05-18T06:20:49.000Z | 2021-05-18T06:20:49.000Z | demos/DPSRGAN/dpsrmodels/basicblock.py | hduba/MDF | 62eb2a1a5b8274206aae71903da2c2ad33ce687d | [
"BSD-3-Clause"
] | null | null | null | demos/DPSRGAN/dpsrmodels/basicblock.py | hduba/MDF | 62eb2a1a5b8274206aae71903da2c2ad33ce687d | [
"BSD-3-Clause"
] | 2 | 2021-09-09T19:07:57.000Z | 2021-10-02T15:56:59.000Z | from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Module
'''
# ===================================
# Advanced nn.Sequential
# reform nn.Sequentials and nn.Modules
# to a single nn.Sequential
# ===================================
'''
def sequential(*args):
if len(args) == 1:
if isinstance(args[0], OrderedDict):
raise NotImplementedError('sequential does not support OrderedDict input.')
return args[0] # No sequential is needed.
modules = []
for module in args:
if isinstance(module, nn.Sequential):
for submodule in module.children():
modules.append(submodule)
elif isinstance(module, nn.Module):
modules.append(module)
return nn.Sequential(*modules)
'''
# ===================================
# Useful blocks
# --------------------------------
# conv (+ normaliation + relu)
# concat
# sum
# resblock (ResBlock)
# resdenseblock (ResidualDenseBlock_5C)
# resinresdenseblock (RRDB)
# ===================================
'''
# -------------------------------------------------------
# return nn.Sequantial of (Conv + BN + ReLU)
# -------------------------------------------------------
def conv(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True, mode='CBR'):
L = []
for t in mode:
if t == 'C':
L.append(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias))
elif t == 'T':
L.append(nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias))
elif t == 'B':
L.append(nn.BatchNorm2d(out_channels, momentum=0.9, eps=1e-04, affine=True))
elif t == 'I':
L.append(nn.InstanceNorm2d(out_channels, affine=True))
elif t == 'R':
L.append(nn.ReLU(inplace=True))
elif t == 'r':
L.append(nn.ReLU(inplace=False))
elif t == 'L':
L.append(nn.LeakyReLU(negative_slope=1e-1, inplace=True))
elif t == 'l':
L.append(nn.LeakyReLU(negative_slope=1e-1, inplace=False))
elif t == '2':
L.append(nn.PixelShuffle(upscale_factor=2))
elif t == '3':
L.append(nn.PixelShuffle(upscale_factor=3))
elif t == '4':
L.append(nn.PixelShuffle(upscale_factor=4))
elif t == 'U':
L.append(nn.Upsample(scale_factor=2, mode='nearest'))
elif t == 'u':
L.append(nn.Upsample(scale_factor=3, mode='nearest'))
elif t == 'M':
L.append(nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=0))
elif t == 'A':
L.append(nn.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=0))
else:
raise NotImplementedError('Undefined type: '.format(t))
return sequential(*L)
class MeanShift(nn.Conv2d):
def __init__(self, rgb_range=255, rgb_mean=(0.4488, 0.4371, 0.4040), rgb_std=(1.0, 1.0, 1.0), sign=-1):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
std = torch.Tensor(rgb_std)
self.weight.data = torch.eye(3).view(3, 3, 1, 1) / std.view(3, 1, 1, 1)
self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean) / std
for p in self.parameters():
p.requires_grad = False
# -------------------------------------------------------
# Concat the output of a submodule to its input
# -------------------------------------------------------
class ConcatBlock(nn.Module):
def __init__(self, submodule):
super(ConcatBlock, self).__init__()
self.sub = submodule
def forward(self, x):
output = torch.cat((x, self.sub(x)), dim=1)
return output
def __repr__(self):
return self.sub.__repr__() + 'concat'
# -------------------------------------------------------
# Elementwise sum the output of a submodule to its input
# -------------------------------------------------------
class ShortcutBlock(nn.Module):
def __init__(self, submodule):
super(ShortcutBlock, self).__init__()
self.sub = submodule
def forward(self, x):
output = x + self.sub(x)
return output
def __repr__(self):
tmpstr = 'Identity + \n|'
modstr = self.sub.__repr__().replace('\n', '\n|')
tmpstr = tmpstr + modstr
return tmpstr
# -------------------------------------------------------
# Res Block: x + conv(relu(conv(x)))
# -------------------------------------------------------
class ResBlock(nn.Module):
def __init__(self, in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True, mode='CRC'):
super(ResBlock, self).__init__()
assert in_channels == out_channels, 'Only support in_channels==out_channels.'
if mode[0] in ['R','L']:
mode = mode[0].lower() + mode[1:]
self.res = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode)
def forward(self, x):
res = self.res(x)
return x + res
# -------------------------------------------------------
# Channel Attention (CA) Layer
# -------------------------------------------------------
class CALayer(nn.Module):
def __init__(self, channel=64, reduction=16):
super(CALayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_fc = nn.Sequential(
nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),
nn.Sigmoid()
)
def forward(self, x):
y = self.avg_pool(x)
y = self.conv_fc(y)
return x * y
# -------------------------------------------------------
# Residual Channel Attention Block (RCAB)
# -------------------------------------------------------
class RCABlock(nn.Module):
def __init__(self, in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True, mode='CRC', reduction=16):
super(RCABlock, self).__init__()
assert in_channels == out_channels, 'Only support in_channels==out_channels.'
if mode[0] in ['R','L']:
mode = mode[0].lower() + mode[1:]
self.res = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode)
self.ca = CALayer(out_channels, reduction)
def forward(self, x):
res = self.res(x)
res = self.ca(res)
return res + x
# -------------------------------------------------------
# Residual Channel Attention Group (RG)
# -------------------------------------------------------
class RCAGroup(nn.Module):
def __init__(self, in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True, mode='CRC', reduction=16, nb=12):
super(RCAGroup, self).__init__()
assert in_channels == out_channels, 'Only support in_channels==out_channels.'
if mode[0] in ['R','L']:
mode = mode[0].lower() + mode[1:]
RG = [RCABlock(in_channels, out_channels, kernel_size, stride, padding, bias, mode, reduction) for _ in range(nb)]
RG.append(conv(out_channels, out_channels, mode='C'))
self.rg = nn.Sequential(*RG) # self.rg = ShortcutBlock(nn.Sequential(*RG))
def forward(self, x):
res = self.rg(x)
return res + x
# -------------------------------------------------------
# Residual Dense Block
# style: 5 convs
# -------------------------------------------------------
class ResidualDenseBlock_5C(nn.Module):
def __init__(self, nc=64, gc=32, kernel_size=3, stride=1, padding=1, bias=True, mode='CR'):
super(ResidualDenseBlock_5C, self).__init__()
# gc: growth channel
self.conv1 = conv(nc, gc, kernel_size, stride, padding, bias, mode)
self.conv2 = conv(nc+gc, gc, kernel_size, stride, padding, bias, mode)
self.conv3 = conv(nc+2*gc, gc, kernel_size, stride, padding, bias, mode)
self.conv4 = conv(nc+3*gc, gc, kernel_size, stride, padding, bias, mode)
self.conv5 = conv(nc+4*gc, nc, kernel_size, stride, padding, bias, mode[:-1])
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(torch.cat((x, x1), 1))
x3 = self.conv3(torch.cat((x, x1, x2), 1))
x4 = self.conv4(torch.cat((x, x1, x2, x3), 1))
x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
return x5.mul_(0.2) + x
# -------------------------------------------------------
# Residual in Residual Dense Block
# 3x5c
# -------------------------------------------------------
class RRDB(nn.Module):
def __init__(self, nc=64, gc=32, kernel_size=3, stride=1, padding=1, bias=True, mode='CR'):
super(RRDB, self).__init__()
self.RDB1 = ResidualDenseBlock_5C(nc, gc, kernel_size, stride, padding, bias, mode)
self.RDB2 = ResidualDenseBlock_5C(nc, gc, kernel_size, stride, padding, bias, mode)
self.RDB3 = ResidualDenseBlock_5C(nc, gc, kernel_size, stride, padding, bias, mode)
def forward(self, x):
out = self.RDB1(x)
out = self.RDB2(out)
out = self.RDB3(out)
return out.mul_(0.2) + x
'''
# ======================
# Upsampler
# ======================
'''
# -------------------------------------------------------
# conv + subp + relu
# -------------------------------------------------------
def upsample_pixelshuffle(in_channels=64, out_channels=3, kernel_size=3, stride=1, padding=1, bias=True, mode='2R'):
assert len(mode)<4 and mode[0] in ['2', '3', '4'], 'mode examples: 2, 2R, 2BR, 3, ..., 4BR.'
up1 = conv(in_channels, out_channels * (int(mode[0]) ** 2), kernel_size, stride, padding, bias, mode='C'+mode)
return up1
# -------------------------------------------------------
# nearest_upsample + conv + relu
# -------------------------------------------------------
def upsample_upconv(in_channels=64, out_channels=3, kernel_size=3, stride=1, padding=1, bias=True, mode='2R'):
assert len(mode)<4 and mode[0] in ['2', '3'], 'mode examples: 2, 2R, 2BR, 3, ..., 3BR.'
if mode[0] == '2':
uc = 'UC'
elif mode[0] == '3':
uc = 'uC'
mode = mode.replace(mode[0], uc)
up1 = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode=mode)
return up1
# -------------------------------------------------------
# convTranspose + relu
# -------------------------------------------------------
def upsample_convtranspose(in_channels=64, out_channels=3, kernel_size=2, stride=2, padding=0, bias=True, mode='2R'):
assert len(mode)<4 and mode[0] in ['2', '3', '4'], 'mode examples: 2, 2R, 2BR, 3, ..., 4BR.'
kernel_size = int(mode[0])
stride = int(mode[0])
mode = mode.replace(mode[0], 'T')
up1 = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode)
return up1
'''
# ======================
# Downsampler
# ======================
'''
# -------------------------------------------------------
# strideconv + relu
# -------------------------------------------------------
def downsample_strideconv(in_channels=64, out_channels=64, kernel_size=2, stride=2, padding=0, bias=True, mode='2R'):
assert len(mode)<4 and mode[0] in ['2', '3', '4'], 'mode examples: 2, 2R, 2BR, 3, ..., 4BR.'
kernel_size = int(mode[0])
stride = int(mode[0])
mode = mode.replace(mode[0], 'C')
down1 = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode)
return down1
# -------------------------------------------------------
# maxpooling + conv + relu
# -------------------------------------------------------
def downsample_maxpool(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=0, bias=True, mode='2R'):
assert len(mode)<4 and mode[0] in ['2', '3'], 'mode examples: 2, 2R, 2BR, 3, ..., 3BR.'
kernel_size_pool = int(mode[0])
stride_pool = int(mode[0])
mode = mode.replace(mode[0], 'MC')
pool = conv(kernel_size=kernel_size_pool, stride=stride_pool, mode=mode[0])
pool_tail = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode=mode[1:])
return sequential(pool, pool_tail)
# -------------------------------------------------------
# averagepooling + conv + relu
# -------------------------------------------------------
def downsample_avgpool(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True, mode='2R'):
assert len(mode)<4 and mode[0] in ['2', '3'], 'mode examples: 2, 2R, 2BR, 3, ..., 3BR.'
kernel_size_pool = int(mode[0])
stride_pool = int(mode[0])
mode = mode.replace(mode[0], 'AC')
pool = conv(kernel_size=kernel_size_pool, stride=stride_pool, mode=mode[0])
pool_tail = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode=mode[1:])
return sequential(pool, pool_tail)
| 38.448378 | 160 | 0.529155 | 5,055 | 0.387832 | 0 | 0 | 0 | 0 | 0 | 0 | 3,586 | 0.275127 |
1042258a2c6461db6839a5ffc457fce5341726df | 8,251 | py | Python | ml-models-analyses/readahead-mixed-workload/kmlparsing.py | drewscottt/kernel-ml | 713fd825e3681d2d7d5250120c992cb19b662351 | [
"Apache-2.0"
] | 167 | 2021-11-23T04:10:12.000Z | 2022-03-19T21:31:22.000Z | ml-models-analyses/readahead-mixed-workload/kmlparsing.py | drewscottt/kernel-ml | 713fd825e3681d2d7d5250120c992cb19b662351 | [
"Apache-2.0"
] | null | null | null | ml-models-analyses/readahead-mixed-workload/kmlparsing.py | drewscottt/kernel-ml | 713fd825e3681d2d7d5250120c992cb19b662351 | [
"Apache-2.0"
] | 9 | 2021-11-24T18:03:25.000Z | 2022-02-06T01:30:23.000Z | #
# Copyright (c) 2019-2021 Ibrahim Umit Akgun
# Copyright (c) 2021-2021 Andrew Burford
# Copyright (c) 2021-2021 Mike McNeill
# Copyright (c) 2021-2021 Michael Arkhangelskiy
# Copyright (c) 2020-2021 Aadil Shaikh
# Copyright (c) 2020-2021 Lukas Velikov
# Copyright (c) 2019-2021 Erez Zadok
# Copyright (c) 2019-2021 Stony Brook University
# Copyright (c) 2019-2021 The Research Foundation of SUNY
#
# You can redistribute it and/or modify it under the terms of the Apache License,
# Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0).
#
from collections import defaultdict
import re
import sys
import os
def find_avg_faults(time_values):
x = []
y = []
for vals in time_values:
t_delta = vals[0]
maj_faults = vals[1]
avg = maj_faults / t_delta
prev = x[-1] if len(x) else 0
x.append(prev + t_delta)
y.append(avg)
return x, y
def avg(data):
total = 0
for duration, x in data:
total += x
return total/len(data)
def weighted_avg(data):
time = 0
total = 0
for duration, x in data:
time += duration
total += duration * x
return total/time
def parse_bench_time_values(values_dict, fn, workloads):
start = re.compile(r'\tCommand being timed: "\S+ --benchmarks=(\w+)')
elap = re.compile(r'\tElapsed \(wall clock\) time \(h:mm:ss or m:ss\): (\d+):([\d\.]+)')
major = re.compile(r'\tMajor \(requiring I/O\) page faults: (\d+)')
minor = re.compile(r'\tMinor \(reclaiming a frame\) page faults: (\d+)')
inputs = re.compile(r'\tFile system inputs: (\d+)')
outputs = re.compile(r'\tFile system outputs: (\d+)')
end = re.compile(r'\tExit status: \d+')
with open(fn) as f:
for line in f.readlines():
match = start.match(line)
if match:
curr_workload = match.group(1)
load_set = set(workloads)
load_set.remove(curr_workload)
other_workload = load_set.pop()
workload = values_dict[(curr_workload, other_workload)]
data = []
match = elap.match(line)
if match:
sec = 60 * int(match.group(1))
sec += float(match.group(2))
data.append(sec)
for exp in [major, minor, inputs, outputs]:
match = exp.match(line)
if match:
data.append(int(match.group(1)))
break
match = end.match(line)
if match:
workload.append(data)
def parse_bench_ops_sec(values_dict, fn):
start = re.compile(r'(read(seq|reverse)|readrandom(writerandom)?|mixgraph)\s*:.* (\d+) ops/sec;\s+([0-9\.]+) MB/s')
rwrandomstart = re.compile(r'readrandomwriterandom\s*:.* (\d+) ops/sec;')
total_occ_dict = {}
with open(fn) as f:
data = None
for line in f.readlines():
if data == None:
match = start.match(line)
if match:
curr_workload = match.group(1)
ops = match.group(4)
values_dict.setdefault(curr_workload, 0)
values_dict[curr_workload] += int(ops)
total_occ_dict.setdefault(curr_workload, 0)
total_occ_dict[curr_workload] += 1
data = None
match = rwrandomstart.match(line)
if match:
curr_workload = 'readrandomwriterandom'
ops = match.group(1)
values_dict.setdefault(curr_workload, 0)
values_dict[curr_workload] += int(ops)
total_occ_dict.setdefault(curr_workload, 0)
total_occ_dict[curr_workload] += 1
data = None
continue
for key in total_occ_dict.keys():
values_dict[key] /= total_occ_dict[key]
def parse_bench_throughput(values_dict, fn, workloads):
start = re.compile(r'(read(seq|reverse)|readrandom(writerandom)?|mixgraph)\s*:.*;\s+([0-9\.]+) MB/s')
rwrandomstart = re.compile(r'readrandomwriterandom\s*:.*;')
elap = re.compile(r'\tElapsed \(wall clock\) time \(h:mm:ss or m:ss\): (\d+):([\d\.]+)')
end = re.compile(r'\tExit status: \d+')
with open(fn) as f:
data = None
for line in f.readlines():
if data == None:
match = start.match(line)
if match:
curr_workload = match.group(1)
load_set = set(workloads)
load_set.remove(curr_workload)
other_workload = load_set.pop()
workload = values_dict[(curr_workload, other_workload)]
throughput = match.group(4)
data = [0, float(throughput)]
# jk we don't need elap time and sometimes output gets intermixed
workload.append(data)
data = None
match = rwrandomstart.match(line)
if match:
curr_workload = 'readrandomwriterandom'
load_set = set(workloads)
load_set.remove(curr_workload)
other_workload = load_set.pop()
workload = values_dict[(curr_workload, other_workload)]
data = [0, 1]
# jk we don't need elap time and sometimes output gets intermixed
workload.append(data)
data = None
continue
match = elap.match(line)
if match:
sec = 60 * int(match.group(1))
sec += float(match.group(2))
data.insert(0, sec)
continue
match = end.match(line)
if match:
workload.append(data)
data = None
def generate_combos():
wkload_combos = []
# needs to be in same order as iterated through in generate-result-*.sh
for seq in ["readseq", "readreverse"]:
#for rand in ["readrandom", "readrandomwriterandom"]:
#for rand in ["mixgraph"]:
for rand in ["readrandom", "readrandomwriterandom", "mixgraph"]:
wkload_combos.append((seq,rand))
return wkload_combos
def parse_detail_file(dict_exp, file_path) -> defaultdict:
combos = generate_combos()
i = 0
with open(os.path.join(os.curdir, file_path)) as f:
lines = f.readlines()
curr_exp = None
for line in lines:
values = line.split()
if len(values) == 2:
if values[1] == '1':
curr_exp = values[0]
while curr_exp not in combos[i]:
i += 1
if i == len(combos):
print(f'detail file {file_path} badly formatted')
print(f'{curr_exp} not in combos {combos}')
sys.exit(1)
background_exp = set(combos[i])
background_exp.remove(curr_exp)
background_exp = background_exp.pop()
curr_exp = (curr_exp, background_exp)
i += 1
elif values[0] not in curr_exp:
print(f'detail file {file_path} badly formatted')
sys.exit(1)
else:
if curr_exp == None:
print(f'detail file {file_path} badly formatted')
sys.exit(1)
x = 0 if len(dict_exp[curr_exp]) == 0 else dict_exp[curr_exp][-1][0] + float(values[4])
dict_exp[curr_exp].append([x, float(values[2])])
return dict_exp
def parse_kern_log_file(file_path) -> defaultdict:
dict_exp = defaultdict(list)
with open(os.path.join(os.curdir, file_path)) as f:
lines = f.readlines()
for line in lines:
values = line.split()
if len(values) == 2:
curr_exp = tuple(values)
elif values[5] == 'readahead':
dict_exp[curr_exp].append(float(values[8]))
return dict_exp
def mean(arr):
return sum(arr)/len(arr)
| 39.104265 | 119 | 0.533026 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,741 | 0.211005 |
10425f3043aed2d9ee265e0c0950ea8c8e237a9e | 6,374 | py | Python | src/py_dss_tools/secondary/Circuit.py | eniovianna/py_dss_tools | 3057fb0b74facd05a362e4e4a588f79f70aa9dd7 | [
"MIT"
] | 3 | 2021-05-29T00:40:10.000Z | 2021-09-30T17:56:14.000Z | src/py_dss_tools/secondary/Circuit.py | eniovianna/py_dss_tools | 3057fb0b74facd05a362e4e4a588f79f70aa9dd7 | [
"MIT"
] | null | null | null | src/py_dss_tools/secondary/Circuit.py | eniovianna/py_dss_tools | 3057fb0b74facd05a362e4e4a588f79f70aa9dd7 | [
"MIT"
] | 3 | 2021-05-29T00:40:46.000Z | 2022-01-13T22:04:49.000Z | # -*- encoding: utf-8 -*-
"""
Created by Ênio Viana at 01/09/2021 at 19:51:44
Project: py_dss_tools [set, 2021]
"""
import attr
import pandas as pd
from py_dss_tools.model.other import VSource
from py_dss_tools.utils import Utils
@attr.s
class Circuit(VSource):
_name = attr.ib(validator=attr.validators.instance_of(str), default='')
_basekv = attr.ib(validator=attr.validators.instance_of((int, float)), default=115)
_pu = attr.ib(validator=attr.validators.instance_of((int, float)), default=1.001)
_phases = attr.ib(validator=attr.validators.instance_of(int), default=3)
_bus1 = attr.ib(validator=attr.validators.instance_of(str), default='')
_angle = attr.ib(validator=attr.validators.instance_of((int, float)), default=0)
# TODO Rever default values
_mvasc3 = attr.ib(validator=attr.validators.instance_of((int, float)), default=21000)
_mvasc1 = attr.ib(validator=attr.validators.instance_of((int, float)), default=24000)
# TODO: checar existência de mais de um Circuit no momento da criação
def __attrs_post_init__(self):
if self._name != '':
self._name = Utils.remove_blank_spaces(self._name)
else:
self._name = 'my_circuit_' + Utils.generate_random_string()
def to_dataframe(self):
return pd.DataFrame.from_records([self.__dict__])
def to_dict(self) -> dict:
return self.__dict__
def to_list(self) -> list:
return list(self.__dict__)
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, value: str) -> None:
Utils.check_instance(value, 'name', ['str'], )
self._name = Utils.remove_blank_spaces(value)
@property
def basekv(self):
return self._basekv
@basekv.setter
def basekv(self, value):
self._basekv = value
@property
def phases(self):
return self._phases
@phases.setter
def phases(self, value):
self._phases = value
# @property
# def df_lines(self):
# return self._df_lines
# @df_lines.setter
# def df_lines(self, value):
# a_series = pd.Series(value, index=self._df_lines.columns)
# self._df_lines = self._df_lines.append(a_series, ignore_index=True)
# def create_circuit(self, dss_file):
# self.dss.text("compile [{}]".format(dss_file))
#
# def get_all_buses(self):
# buses = Bus(self.dss)
# return buses.get_buses()
# def get_all_lines(self):
# self.dss.lines_first()
# while self.dss.lines_next() != 0:
# print(self.dss.lines_read_phases())
# print(self.dss.lines_read_units())
#
# def reset(self):
# """
# Resets all Monitors, Energymeters, etc. If no argument specified, resets all options listed.
# :return:
# """
# self.dss.text("reset")
#
# def sample(self):
# """
# Force all monitors and meters to take a sample for the most recent solution. Keep in mind that meters will
# perform integration.
# :return:
# """
# self.dss.text("sample")
#
# def seq_currents(self):
# """
# Returns the sequence currents into all terminals of the active circuit element (see Select command) in Result
# string. Returned as comma-separated magnitude only values.Order of returned values: 0, 1, 2 (for each
# terminal).
# :return:
# """
# aux = self.dss.text("seqcurrents").strip().replace(" ", "").split(sep=",")
# seq_currents = list()
# for n in range(len(aux)):
# if aux[n] != '':
# seq_currents.append(float(aux[n]))
# return seq_currents
#
# def seq_powers(self):
# """
# Returns the sequence powers into all terminals of the active circuit element (see Select command) in Result
# string. Returned as comma-separated kw, kvar pairs.Order of returned values: 0, 1, 2 (for each terminal).
# :return:
# """
# aux = self.dss.text("seqpowers").strip().replace(" ", "").split(sep=",")
# seq_powers = list()
# for n in range(len(aux)):
# if aux[n] != '':
# seq_powers.append(float(aux[n]))
# return seq_powers
#
# def seq_voltages(self):
# """
# Returns the sequence voltages at all terminals of the active circuit element (see Select command) in Result
# string. Returned as comma-separated magnitude only values.Order of returned values: 0, 1, 2 (for each
# terminal).
# :return:
# """
# aux = self.dss.text("seqvoltages").strip().replace(" ", "").split(sep=",")
# seq_voltages = list()
# for n in range(len(aux)):
# if aux[n] != '':
# seq_voltages.append(float(aux[n]))
# return seq_voltages
#
# def get_voltages(self):
# return self.dss.circuit_allbusvmagpu()
#
# def get_voltage_min(self):
# v = Circuit.get_voltages(self.dss)
# return min(v)
#
# def get_voltage_max(self):
# v = Circuit.get_voltages(self.dss)
# return max(v)
#
# def get_active_power(self):
# return self.dss.circuit_total_power()[0]
#
# def get_reactive_power(self):
# return self.dss.circuit_total_power()[1]
#
# def create_load(self, **kwargs):
# pass
#
# def create_transformer(self, **kwargs):
# pass
#
# def create_line_code(self, **kwargs):
# pass
#
# def create_line(self, **kwargs):
# pass
#
# def create_pv_system(self, **kwargs):
# pass
#
# def create_fuse(self, **kwargs):
# pass
"""
new circuit.5Leg
~ bus1=MainBus basekV=230 pu=1.0 isc3=15000 isc1=17000 phases=3 z0=[10, 10] z1=[10, 10] angle=0 mvasc3=200000
mvasc1=200000
"""
# def _str_(self):
# output = ""
# for _, var in vars(self).items():
# output += str(var)
# return output
# def _str_(self):
# return "".join(
# f"{attrib_name} = {attrib_value}\n"
# for attrib_name, attrib_value in self._dict_.items()
# if '_Circuit_df' not in attrib_name and 'dss' not in attrib_name
# )
| 32.191919 | 119 | 0.588798 | 6,133 | 0.961587 | 0 | 0 | 6,141 | 0.962841 | 0 | 0 | 4,016 | 0.629664 |
1045970cd8dff036939d1d34b09329c2cb8a87e3 | 1,270 | py | Python | day2.py | cjfuller/adventofcode2015 | 1fbe75e29def156dbe1dc1336ff857945a6a365c | [
"MIT"
] | null | null | null | day2.py | cjfuller/adventofcode2015 | 1fbe75e29def156dbe1dc1336ff857945a6a365c | [
"MIT"
] | null | null | null | day2.py | cjfuller/adventofcode2015 | 1fbe75e29def156dbe1dc1336ff857945a6a365c | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from util import load_input, bear_init
box_specs = load_input(2)
@bear_init
@dataclass
class Box:
l: int
w: int
h: int
@classmethod
def from_str(cls, s: str) -> "Box":
l, w, h = tuple(map(int, s.split("x")))
return Box(l, w, h)
@property
def surface_area(self) -> int:
return 2 * self.l * self.w + 2 * self.w * self.h + 2 * self.h * self.l
@property
def paper_area(self) -> int:
"""Total area of paper required.
>>> Box.from_str("2x3x4").paper_area
58
>>> Box.from_str("1x1x10").paper_area
43
"""
return self.surface_area + min(
self.l * self.w, self.w * self.h, self.h * self.l
)
@property
def perimeters(self) -> tuple[int, int, int]:
return (2 * (self.l + self.w), 2 * (self.w + self.h), 2 * (self.h + self.l))
@property
def ribbon_length(self) -> int:
return min(self.perimeters) + self.l * self.w * self.h
boxes = [Box.from_str(spec) for spec in box_specs.splitlines()]
total_sqft = sum(box.paper_area for box in boxes)
print(f"Part 1: {total_sqft}")
total_ribbon_ft = sum(box.ribbon_length for box in boxes)
print(f"Part 2: {total_ribbon_ft}")
| 22.280702 | 84 | 0.588189 | 898 | 0.707087 | 0 | 0 | 920 | 0.724409 | 0 | 0 | 218 | 0.171654 |
1047517a8fc519c2245e3ae4ac28ca41f0c16f09 | 7,928 | py | Python | todo/commands/complete.py | Kuro-Rui/JojoCogs | 57b86694e29462c5f5b561bc4a060ed04cfb8deb | [
"MIT"
] | null | null | null | todo/commands/complete.py | Kuro-Rui/JojoCogs | 57b86694e29462c5f5b561bc4a060ed04cfb8deb | [
"MIT"
] | null | null | null | todo/commands/complete.py | Kuro-Rui/JojoCogs | 57b86694e29462c5f5b561bc4a060ed04cfb8deb | [
"MIT"
] | null | null | null | # Copyright (c) 2021 - Jojo#7791
# Licensed under MIT
import asyncio
from contextlib import suppress
from typing import List
import discord
from redbot.core import commands
from redbot.core.utils.chat_formatting import pagify
from redbot.core.utils.predicates import MessagePredicate
from ..abc import TodoMixin
from ..utils import PositiveInt, ViewTodo
from ..utils.formatting import _format_completed
__all__ = ["Complete"]
class Complete(TodoMixin):
"""Commands that have to do with completed todos"""
_no_completed_message: str = "You do not have any completed todos. You can add one with `{prefix}todo complete <indexes...>`"
@commands.group()
async def todo(self, *args):
pass
@todo.group(invoke_without_command=True, require_var_positional=True, aliases=["c"])
async def complete(self, ctx: commands.Context, *indexes: PositiveInt(False)): # type:ignore
"""Commands having to do with your completed tasks
**Arguments**
- `indexes` Optional indexes to complete. If left at none the help command will be shown
"""
indexes = [i - 1 for i in indexes] # type:ignore
data = await self.cache.get_user_data(ctx.author.id)
todos = data["todos"]
if not todos:
return await ctx.send(self._no_todo_message.format(prefix=ctx.clean_prefix))
completed = []
for index in indexes:
try:
completed.append((todos.pop(index))["task"])
except IndexError:
pass
except Exception as e:
self.log.error("Error in command 'todo complete'", exc_info=e)
amount = len(completed)
if amount == 0:
return await ctx.send(
"Hm, somehow I wasn't able to complete those todos. Please make sure that the inputted indexes are valid"
)
plural = "" if amount == 1 else "s"
msg = f"Completed {amount} todo{plural}."
if data["user_settings"]["extra_details"]:
msg += "\n" + "\n".join(f"`{task}`" for task in completed)
task = None
if len(msg) <= 2000:
await ctx.send(msg)
else:
task = self.bot.loop.create_task(ctx.send_interactive(pagify(msg)))
data["completed"].extend(completed)
data["todos"] = todos
await self.cache.set_user_data(ctx.author, data)
await self.cache._maybe_autosort(ctx.author)
if task is not None and not task.done():
await task
@complete.command(
name="delete", aliases=["del", "remove", "clear"], require_var_positional=True
)
async def complete_delete(self, ctx: commands.Context, *indexes: PositiveInt):
"""Delete completed todos
This will remove them from your completed list
**Arguments**
- `indexes` A list of integers for the indexes of your completed todos
"""
indexes: List[int] = [i - 1 for i in indexes] # type:ignore
indexes.sort(reverse=True) # type:ignore
completed = await self.cache.get_user_item(ctx.author, "completed")
if not completed:
return await ctx.send(self._no_completed_message.format(prefix=ctx.clean_prefix))
for index in indexes:
try:
completed.pop(index)
except IndexError:
pass
except Exception as e:
self.log.error("Exception in command 'todo complete delete'", exc_info=e)
amount = len(indexes)
if amount == 0:
return await ctx.send(
"Hm, somehow I wasn't able to delete those todos. Please make sure that the inputted indexes are valid"
)
plural = "" if amount == 1 else "s"
await ctx.send(f"Deleted {amount} completed todo{plural}")
await self.cache.set_user_item(ctx.author, "completed", completed)
@complete.command(name="deleteall", aliases=["delall", "removeall", "clearall"])
async def complete_remove_all(self, ctx: commands.Context, confirm: bool = False):
"""Remove all of your completed todos
**Arguments**
- `confirm` Skips the confirmation check. Defaults to False
"""
if not confirm:
msg = await ctx.send(
"Are you sure you would like to remove all of your completed todos? (y/N)"
)
pred = MessagePredicate.yes_or_no(ctx)
try:
umsg = await self.bot.wait_for("message", check=pred)
except asyncio.TimeoutError:
pass
finally:
with suppress(discord.NotFound, discord.Forbidden):
await msg.delete()
await umsg.add_reaction("\N{WHITE HEAVY CHECK MARK}")
if not pred.result:
return await ctx.send("Okay, I will not remove your completed todos.")
await self.cache.set_user_item(ctx.author, "completed", [])
await ctx.send("Done. Removed all of your completed todos.")
@complete.command(name="list")
async def complete_list(self, ctx: commands.Context):
"""List your completed todos
This will only list if you have completed todos
"""
data = await self.cache.get_user_data(ctx.author.id)
completed = data["completed"]
if not completed:
return await ctx.send(self._no_completed_message.format(prefix=ctx.clean_prefix))
settings = data["user_settings"]
completed = await _format_completed(completed, False, **settings)
await self.page_logic(ctx, completed, f"{ctx.author.name}'s Completed Todos", **settings)
@complete.command(name="reorder", aliases=["move"], usage="<from> <to>")
async def complete_reorder(
self, ctx: commands.Context, original: PositiveInt, new: PositiveInt
):
"""Move a completed todo from one index to another
This will error if the index is larger than your completed todo list
**Arguments**
- `from` The index of the completed todo
- `to` The new index of the completed todo
"""
if original == new:
return await ctx.send("You cannot move a todo from one index... to the same index")
completed = await self.cache.get_user_item(ctx.author, "completed")
if not completed:
return await ctx.send(self._no_completed_message.format(prefix=ctx.clean_prefix))
act_orig = original - 1
act_new = new - 1
try:
task = completed.pop(act_orig)
except IndexError:
return await ctx.send(f"I could not find a completed todo at index `{original}`")
completed.insert(act_new, task)
msg = f"Moved a completed todo from {original} to {new}"
await ctx.send(msg)
await self.cache.set_user_setting(ctx.author, "autosorting", False)
await self.cache.set_user_item(ctx.author, "completed", completed)
@complete.command(name="view")
async def complete_view(self, ctx: commands.Context, index: PositiveInt(False)): # type:ignore
"""View a completed todo. This has a similar effect to using `[p]todo <index>`
This will have a menu that will allow you to delete the todo
**Arguments**
- `index` The index of the todo you want to view.
"""
actual_index = index - 1
data = await self.cache.get_user_data(ctx.author.id)
completed = data["completed"]
settings = data["user_settings"]
if not completed:
return await ctx.send(self._no_completed_message.format(prefix=ctx.clean_prefix))
try:
todo = completed[actual_index]
except IndexError:
return await ctx.send("That index was invalid")
await ViewTodo(index, self.cache, todo, completed=True, **settings).start(ctx)
| 41.507853 | 129 | 0.621342 | 7,495 | 0.945383 | 0 | 0 | 7,240 | 0.913219 | 6,781 | 0.855323 | 2,533 | 0.319501 |
10485a191600110454b70bb7ff198f1ba032639d | 544 | py | Python | network/demo_espat_ap_test.py | 708yamaguchi/MaixPy_scripts | 5f1774e739fb7eecab344d619c0cd63a71ff3d4f | [
"MIT"
] | 485 | 2019-03-18T10:53:59.000Z | 2022-03-27T09:02:08.000Z | network/demo_espat_ap_test.py | 708yamaguchi/MaixPy_scripts | 5f1774e739fb7eecab344d619c0cd63a71ff3d4f | [
"MIT"
] | 110 | 2019-04-04T09:07:39.000Z | 2022-03-03T08:08:19.000Z | network/demo_espat_ap_test.py | 708yamaguchi/MaixPy_scripts | 5f1774e739fb7eecab344d619c0cd63a71ff3d4f | [
"MIT"
] | 379 | 2019-03-18T04:48:46.000Z | 2022-03-30T00:29:29.000Z | # This file is part of MaixPY
# Copyright (c) sipeed.com
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
#
from network_espat import wifi
wifi.reset()
print(wifi.at_cmd("AT\r\n"))
print(wifi.at_cmd("AT+GMR\r\n"))
'''
>>> reset...
b'\r\n\r\nOK\r\n'
b'AT version:1.1.0.0(May 11 2016 18:09:56)\r\nSDK version:1.5.4(baaeaebb)\r\ncompile time:May 20 2016 15:06:44\r\nOK\r\n'
MicroPython v0.5.1-136-g039f72b6c-dirty on 2020-11-18; Sipeed_M1 with kendryte-k210
Type "help()" for more information.
>>>
'''
| 24.727273 | 121 | 0.696691 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 448 | 0.823529 |
10491ce24b3f4d1202dbbd3d0299d988fc347ec0 | 1,513 | py | Python | scripts/plot_pca.py | taoyilee/ml_final_project | 0ac5ee3938d70e9ffcae8e186e0ef1a621391980 | [
"Unlicense"
] | null | null | null | scripts/plot_pca.py | taoyilee/ml_final_project | 0ac5ee3938d70e9ffcae8e186e0ef1a621391980 | [
"Unlicense"
] | null | null | null | scripts/plot_pca.py | taoyilee/ml_final_project | 0ac5ee3938d70e9ffcae8e186e0ef1a621391980 | [
"Unlicense"
] | null | null | null | from preprocessing.dataset import SVHNDataset
import numpy as np
import configparser as cp
from datetime import datetime as dt
import os
from sklearn.decomposition import PCA
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
if __name__ == "__main__":
config = cp.ConfigParser()
config.read("config.ini")
batch_size = config["general"].getint("batch_size")
ae_model = config["general"].get("ae_model")
color_mode = config["general"].get("color_mode")
noise_ratio = config["general"].getfloat("noise_ratio")
train_set = SVHNDataset.from_mat("dataset/train_32x32.mat")
print(train_set.images_flatten.shape)
pca = PCA(n_components=5)
image_pc = pca.fit_transform(train_set.images_flatten / 255.)
print(image_pc.shape)
df = pd.DataFrame(columns=["pc0", "pc1", "label"])
df["label"] = train_set.labels.flatten()
df["pc0"] = image_pc[:, 0]
df["pc1"] = image_pc[:, 1]
for i in range(4):
for j in range(i+1, 5):
plt.figure(figsize=(8, 8))
for k in np.unique(train_set.labels):
plt.scatter(image_pc[train_set.labels.flatten() == k, i], image_pc[train_set.labels.flatten() == k, j],
cmap='jet', s=1, label=f"{k}")
plt.xlabel(f"Principal Component {i}")
plt.ylabel(f"Principal Component {j}")
plt.grid()
plt.legend()
plt.savefig(f"images/pca_{train_set.name}_{i}_{j}.png")
plt.close()
| 36.902439 | 119 | 0.634501 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 269 | 0.177792 |
104941aa6110b4212489737e4b75dd284643d08f | 4,117 | py | Python | DoodleParser.py | luigiberducci/turni-biblioteca | ae1c65dd768734de4795d38859f23eb655434ad7 | [
"MIT"
] | null | null | null | DoodleParser.py | luigiberducci/turni-biblioteca | ae1c65dd768734de4795d38859f23eb655434ad7 | [
"MIT"
] | null | null | null | DoodleParser.py | luigiberducci/turni-biblioteca | ae1c65dd768734de4795d38859f23eb655434ad7 | [
"MIT"
] | null | null | null | # File: DoodleParser.py
#
# Author: Luigi Berducci
# Date: 2018-11-30
import sys
import datetime
import requests
import json
class DoodleParser:
"""
Retrieves poll data from doodle.com and fill data structure
for participants, options and preferences.
"""
pollID = ""
participants = []
options = dict()
calendar = dict()
def __init__(self, pollID):
"""
Build the DoodleParser object defining the pollID.
Parameteres:
------------
- `pollID`: poll identifier contained in the doodle URL address
"""
JSON = requests.get("https://doodle.com/api/v2.0/polls/" + pollID).content.decode('utf-8')
JSON = json.loads(JSON)
# Fill participants dict
for participant in JSON['participants']:
pName = participant['name']
self.participants.append(pName)
# Extract all the options (shifts)
flat_options = [ datetime.datetime.fromtimestamp(x['start']/1000)
for x in JSON['options']]
# Initialize options dict creating an empty list for each day
for d in flat_options:
self.options[format_date(d)] = []
# Fill the options dict
for d in flat_options:
self.options[format_date(d)].append(format_time(d))
# Initialize calendar dict creating an empty list for each option (day, shift)
for k, d in enumerate(self.options.keys()):
self.calendar[d] = dict()
for k, t in enumerate(self.options.get(d)):
self.calendar[d][t] = list()
# Fill list of participant who express preference for option (day, shift)
for participant in JSON['participants']:
pID = participant['id']
pName = participant['name']
for k, pref in enumerate(participant['preferences']):
if pref<=0: # Check if preference is not given
continue
(d, t) = self.map_opt_to_calendar(k)
self.calendar[d][t].append(pName)
def get_participants(self):
"""
Return the participants dict which map id->name, where
- `id` is an integer identifier
- `name` is the participant name
"""
return self.participants
def get_options(self):
"""
Return the options dict which map day->list, where
- `day` is a date
- `list` is the collection of shifts in `day`
"""
return self.options
def get_calendar(self):
"""
Return the calendar of preferences which map day->pref, where
- `day` is a date
- `pref` is a dict which map shift->participants, where
- `shift` is a shift in `day`
- `participants` is a list of participants which express this preference
"""
return self.calendar
def map_opt_to_calendar(self, i):
"""
Retrieves the (day, shift) associated to the i-th options
in flatten options representation.
Parameters:
-----------
- `i`: index of the options
Returns:
-------
a tuple (day, shift) where
- `day` is the identifier of the day associated to the i-th options
- `shift` is the identifier of the shift associated to the i-th options
"""
kk = 0
for k, d in enumerate(self.options.keys()):
for k, t in enumerate(self.options.get(d)):
if kk == i:
return (d, t)
kk = kk + 1
return (d, t)
def format_date(d):
""" Format a datetime `date` """
days = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "July", "Ago", "Sep", "Oct", "Nov", "Dec"]
return days[d.weekday()] + " " + str(d.day).zfill(2) + " " + months[d.month-1]
def format_time(d):
""" Format a datetime as hh:mm """
return str(d.hour).zfill(2) + ":" + str(d.minute).zfill(2)
| 32.936 | 98 | 0.549915 | 3,553 | 0.863007 | 0 | 0 | 0 | 0 | 0 | 0 | 2,094 | 0.508623 |
104a38928902816c83edccd5d37dfdec68ddd38c | 381 | py | Python | bot-stopots/configuracao.py | leosantosx/bot-stopots | b3de1610247ed6e64aba4313f087e7f373365502 | [
"MIT"
] | 2 | 2020-10-08T23:44:47.000Z | 2022-03-30T17:17:44.000Z | bot-stopots/configuracao.py | leosantosx/bot-stopots | b3de1610247ed6e64aba4313f087e7f373365502 | [
"MIT"
] | null | null | null | bot-stopots/configuracao.py | leosantosx/bot-stopots | b3de1610247ed6e64aba4313f087e7f373365502 | [
"MIT"
] | 2 | 2020-10-01T08:10:08.000Z | 2022-02-23T20:27:09.000Z | """
VARIÁVEIS DE CONFIGURAÇÃO DO BOT
True PARA ATIVAR E False PARA DESATIVAR
"""
escrever_nos_campos = True # PREENCHE OS CAMPOS COM AS RESPOSTAS
modo_de_aprendizado = False # APRENDE NOVAS RESPOSTAS SALVANDO AS RESPOSTAS DOS OUTROS JOGADORES
clica_botao_avaliar_respostas = True # CLICA NO BOTÃO "AVALIAR"
clica_botao_estou_pronto = True # CLICA NO BOTÃO "ESTOU PRONTO"
| 22.411765 | 96 | 0.784777 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 252 | 0.65285 |
104bea37e718643afefd50f43683a9a85ce8b8b1 | 1,164 | py | Python | InterpolLagrange.py | davidfotsa/Numerical_Methods_With_Python | 78ed749f9c1f0005efa1b5ae1f529356a4057462 | [
"MIT"
] | 1 | 2021-09-18T18:32:40.000Z | 2021-09-18T18:32:40.000Z | InterpolLagrange.py | davidfotsa/Numerical_Methods_With_Python | 78ed749f9c1f0005efa1b5ae1f529356a4057462 | [
"MIT"
] | null | null | null | InterpolLagrange.py | davidfotsa/Numerical_Methods_With_Python | 78ed749f9c1f0005efa1b5ae1f529356a4057462 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
def a(i,x,X,Y):
rep=1
for j in range(min(len(X),len(Y))):
if (i!=j):
rep*=(x-X[j])/(X[i]-X[j])
return (rep)
def P(x,X,Y):
rep=0
for i in range(min(len(X),len(Y))):
rep+=a(i,x,X,Y)*Y[i]
return (rep)
X=[-2,0,1,2]
Y=[49,5,7,49]
#x=float(input(" Vous voulez estimer f(x) pour x= "))
#print(P(x,X,Y))
from numpy.polynomial import Polynomial as poly
x=poly([0,1]) # Polynôme p(x)=0+x
p=P(x,X,Y)
print(p)
x=poly1d([1,0]) # Polynôme p(x)=0+x
p=P(x,X,Y)
print(p)
print(p.order)
print(p.coeffs)
print(p.roots)
print(roots(p))
print(p(0))
print(polyval(p,0))
print("%f"%(p.coef[0],),end=" ")
if (len(p)>1):
if (p.coef[1]>0):
print("+%f*x" %(p.coef[1],),end=" ")
elif (p.coef[1]<0):
print("%f*x" %(p.coef[1],),end=" ")
i=2
while (i<len(p)-1):
if (p.coef[i]>0):
print("+%f*x^%d" %(p.coef[i],i,),end=" ")
elif (p.coef[i]<0):
print("%f*x^%d" %(p.coef[i],i,),end=" ")
i=i+1
if (len(p)>1):
if (p.coef[len(p)-1]>0):
print("+%f*x^%d" %(p.coef[len(p)-1],len(p)-1,),end=" ")
elif (p.coef[len(p)-1]<0):
print("%f*x^%d" %(p.coef[len(p)-1],len(p)-1,),end=" ") | 21.163636 | 58 | 0.495704 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 214 | 0.183533 |
104c94905568b06f91f4a7f0d3ab448f5f6b6e7d | 990 | py | Python | stockroom_bot/stock_products.py | amjadmajid/rosbook | 20d4ab94d910adc62c4aecb471ceac13b5cef5ad | [
"Apache-2.0"
] | 442 | 2015-12-11T02:59:16.000Z | 2022-03-31T22:10:25.000Z | stockroom_bot/stock_products.py | amjadmajid/rosbook | 20d4ab94d910adc62c4aecb471ceac13b5cef5ad | [
"Apache-2.0"
] | 41 | 2016-01-07T19:15:29.000Z | 2021-12-03T01:52:58.000Z | stockroom_bot/stock_products.py | amjadmajid/rosbook | 20d4ab94d910adc62c4aecb471ceac13b5cef5ad | [
"Apache-2.0"
] | 249 | 2015-11-27T10:22:33.000Z | 2022-03-28T09:52:05.000Z | #!/usr/bin/env python
import rospy, tf
from gazebo_msgs.srv import *
from geometry_msgs.msg import *
if __name__ == '__main__':
rospy.init_node("stock_products")
rospy.wait_for_service("gazebo/delete_model") # <1>
rospy.wait_for_service("gazebo/spawn_sdf_model")
delete_model = rospy.ServiceProxy("gazebo/delete_model", DeleteModel)
s = rospy.ServiceProxy("gazebo/spawn_sdf_model", SpawnModel)
orient = Quaternion(*tf.transformations.quaternion_from_euler(0, 0, 0))
with open("models/product_0/model.sdf", "r") as f:
product_xml = f.read() # <2>
for product_num in xrange(0, 12):
item_name = "product_{0}_0".format(product_num)
delete_model(item_name) # <3>
for product_num in xrange(0, 12):
bin_y = 2.8 * (product_num / 6) - 1.4 # <4>
bin_x = 0.5 * (product_num % 6) - 1.5
item_name = "product_{0}_0".format(product_num)
item_pose = Pose(Point(x=bin_x, y=bin_y, z=2), orient) # <5>
s(item_name, product_xml, "", item_pose, "world") # <6>
| 41.25 | 73 | 0.692929 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 237 | 0.239394 |
104fe8a69a8815914b9402756fc0c531d5a4a9ad | 846 | py | Python | code/GC_mass_evolv.py | EnthalpyBill/GC-formation | 143a8cb464bd621e77092a010c9011ef056da400 | [
"MIT"
] | null | null | null | code/GC_mass_evolv.py | EnthalpyBill/GC-formation | 143a8cb464bd621e77092a010c9011ef056da400 | [
"MIT"
] | null | null | null | code/GC_mass_evolv.py | EnthalpyBill/GC-formation | 143a8cb464bd621e77092a010c9011ef056da400 | [
"MIT"
] | null | null | null | '''
Mass evolution of GC
Created Apr. 2020
Last Edit Apr. 2020
By Bill Chen
'''
import numpy as np
# Note: all times in [Gyr]
# ***** Dynamic evolution of GC in Choksi & Gnedin (2018) *****
def t_tid_cg18(m):
# Tidally-limited disruption timescale in Choksi & Gnedin (2018)
P = 0.5
return 5 * ((m/2e5)**(2/3)) * (P/0.5)
def GC_dyn_evolv_cg18(m0, t):
# Dynamic evolution of GC in Choksi & Gnedin (2018)
return (1 - (2/3)*(t/t_tid_cg18(m0)))**(2/3) # m(t)/m0
# ***** Stellar evolution of GC in Prieto & Gnedin (2008) *****
def GC_star_evolv_pg08(m0, t):
# Simple version!
return 0.6 # m(t)/m0
# ***** Mass evolution of GC in Choksi & Gnedin (2018) *****
def GC_mass_evolv_cg18(m0, t):
# Mass evolution of GC in Choksi & Gnedin (2018)
return m0 * GC_dyn_evolv_cg18(m0, t) * GC_star_evolv_pg08(m0, t)
| 22.263158 | 68 | 0.625296 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 491 | 0.580378 |
104fe95906a89882b90ee817c831630744acea53 | 355 | py | Python | tests/test_deploy.py | NCAR/marbl-solutions | 0840e2a594d49218b1510cd8cb95d9d058495a8a | [
"MIT"
] | null | null | null | tests/test_deploy.py | NCAR/marbl-solutions | 0840e2a594d49218b1510cd8cb95d9d058495a8a | [
"MIT"
] | 1 | 2022-02-11T22:53:37.000Z | 2022-02-11T22:53:37.000Z | tests/test_deploy.py | NCAR/marbl-solutions | 0840e2a594d49218b1510cd8cb95d9d058495a8a | [
"MIT"
] | null | null | null | import solutions
def test_deploy_config():
deploy_config = solutions.config.deploy_config
assert deploy_config['reference_case'] == 'ref_case'
assert type(deploy_config['reference_case_path']) == list
assert deploy_config['reference_case_file_format'] == 'history'
assert deploy_config['case_to_compare_file_format'] == 'timeseries'
| 35.5 | 71 | 0.766197 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 125 | 0.352113 |
10521ee81224fcf01be655be4e17446c05559c19 | 148 | py | Python | backend/home/models.py | crowdbotics-apps/test-29106 | 34df3fa66e798f61d9189fa248f21cabb9bca0e1 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/home/models.py | crowdbotics-apps/test-29106 | 34df3fa66e798f61d9189fa248f21cabb9bca0e1 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/home/models.py | crowdbotics-apps/test-29106 | 34df3fa66e798f61d9189fa248f21cabb9bca0e1 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | from django.conf import settings
from django.db import models
class Tasks(models.Model):
"Generated Model"
task_name = models.TextField()
| 18.5 | 34 | 0.75 | 83 | 0.560811 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.114865 |
10527d2aa82328718d79fcf72f8bc5b68e57039d | 405 | py | Python | tests/urls.py | xiu1/django-rest | 3ba381a8f8be8a27af464839dcdd5677857043ba | [
"MIT"
] | null | null | null | tests/urls.py | xiu1/django-rest | 3ba381a8f8be8a27af464839dcdd5677857043ba | [
"MIT"
] | null | null | null | tests/urls.py | xiu1/django-rest | 3ba381a8f8be8a27af464839dcdd5677857043ba | [
"MIT"
] | null | null | null | from django.conf.urls import include, url
from django.contrib import admin
from rest.views import TestRestView, TestAuthHeaderView, TestAuthUrlView
urlpatterns = [
url('^rest/$', TestRestView.as_view(), name='rest'),
url('^auth_header_rest/$', TestAuthHeaderView.as_view(), name='rest_auth_header'),
url('^auth_url_rest/(?P<api_key>.+)/$', TestAuthUrlView.as_view(), name='rest_auth_url'),
]
| 40.5 | 93 | 0.735802 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 103 | 0.254321 |
1052c3a88b2dcf7166396c893200f90ad073a780 | 16,625 | py | Python | services/controllers/thruster_controller.py | gizmo-cda/g2x-submarine-v2 | 2f4be8ad7234ab59fc6e98b8353a40195dd08ffb | [
"BSD-3-Clause"
] | 1 | 2017-05-05T04:32:11.000Z | 2017-05-05T04:32:11.000Z | services/controllers/thruster_controller.py | gizmo-cda/g2x-submarine-v2 | 2f4be8ad7234ab59fc6e98b8353a40195dd08ffb | [
"BSD-3-Clause"
] | 26 | 2017-05-08T02:06:47.000Z | 2017-06-02T02:50:18.000Z | services/controllers/thruster_controller.py | gizmo-cda/g2x-submarine-v2 | 2f4be8ad7234ab59fc6e98b8353a40195dd08ffb | [
"BSD-3-Clause"
] | 1 | 2018-06-25T19:00:20.000Z | 2018-06-25T19:00:20.000Z | import os
import json
from vector2d import Vector2D
from interpolator import Interpolator
from utils import map_range
# Each game controller axis returns a value in the closed interval [-1, 1]. We
# limit the number of decimal places we use with the PRECISION constant. This is
# done for a few reasons: 1) it makes the numbers more human-friendly (easier to
# read) and 2) it reduces the number of thruster updates.
#
# To elaborate on this last point, I was seeing a lot of very small fluctations
# with the values coming from my PS4 controller. The change in values were so
# small, they effectively would not change the current thruster value. By
# reducing the precision, these very small fluctuations get filtered out,
# resulting in fewer thruster updates. Also, I found that when I let go of a
# joystick, the value would hover around 0.0 but would never actually become
# zero. This means the thrusters would always be active, consuming battery power
# unnecessarily. Again, by limiting the precision, these small fluctuations were
# filtered out resulting in consistent zero values when then joysticks were in
# their resting positions.
#
# Using three digits of precisions was an arbitrary choice that just happened to
# work the first time. If we find that we need more fine control of the
# thrusters, we may need to increase this value.
PRECISION = 3
# Define a series of comstants, one for each thruster
HL = 0 # horizontal left
VL = 1 # vertical left
VC = 2 # vertical center
VR = 3 # vertical right
HR = 4 # horizontal right
LIGHT = 5
# Define a series of constants, one for each game controller axis
JL_H = 0 # left joystick horizontal axis
JL_V = 1 # left joystick vertical axis
JR_H = 2 # right joystick horizontal axis
JR_V = 3 # right joystick vertical axis
AL = 4 # left analog button
AR = 5 # right analog button
UP = 3
DOWN = 1
RESET = 0
# 271,[320],467
# Define constants for the PWM to run a thruster in full reverse, full forward,
# or neutral
FULL_REVERSE = 246
NEUTRAL = 369
FULL_FORWARD = 496
LIGHT_STEP = 0.05
# Use this file to load/store thruster and sensitivity settings
SETTINGS_FILE = 'thruster_settings.json'
class ThrusterController:
def __init__(self, simulate=False):
# setup motor controller. The PWM controller can control up to 16
# different devices. We have to add devices, one for each thruster that
# we can control. The first parameter is the human-friendly name of the
# device. That is used for logging to the console and/or a database. The
# next parameter indicates which PWM connector this device is connected
# to. This is refered to as the PWM channel. The last two values
# indicate at what time intervals (ticks) the PWM should turn on and
# off, respectively. We simply start each device at 0 time and control
# the duration of the pulses by adjusting the off time. Note that we may
# be able to shuffle on/off times to even out the current draw from the
# thrusters, but so far, that hasn't been an issue. It's even possible
# that the PWM controller may do that for us already.
if simulate is False:
from pwm_controller import PWMController
self.motor_controller = PWMController()
self.motor_controller.add_device("HL", HL, 0, NEUTRAL)
self.motor_controller.add_device("VL", VL, 0, NEUTRAL)
self.motor_controller.add_device("VC", VC, 0, NEUTRAL)
self.motor_controller.add_device("VR", VR, 0, NEUTRAL)
self.motor_controller.add_device("HR", HR, 0, NEUTRAL)
self.motor_controller.add_device("LIGHT", LIGHT, 0, FULL_REVERSE)
else:
self.motor_controller = None
# setup the joysticks. We use a 2D vector to represent the x and y
# values of the joysticks.
self.j1 = Vector2D()
self.j2 = Vector2D()
# create interpolators
self.horizontal_left = Interpolator()
self.vertical_left = Interpolator()
self.vertical_center = Interpolator()
self.vertical_right = Interpolator()
self.horizontal_right = Interpolator()
# setup interpolators from a file or manually
if os.path.isfile(SETTINGS_FILE):
with open(SETTINGS_FILE, 'r') as f:
self.set_settings(json.load(f), False)
else:
# Set the sensitivity to be applied to each thruster. 0 indicates a
# linear response which is the default when no sensitivity is applied. 1
# indicates full sensitivity. Values between 0 and 1 can be used to
# increase and to decrease the overall sensitivity. Increasing sensivity
# dampens lower values and amplifies larger values giving more precision
# at lower power levels.
self.sensitivity = 0.7
# We use a cubic to apply sensitivity. If you find that full sensitivity
# (dampening) does not give you fine enough control, you can increase\
# the degree of the polynomial used for dampening. Note that this must
# be a positive odd number. Any other values will cause unexpected
# results.
self.power = 3
# setup the various interpolators for each thruster. Each item we add
# to the interpolator consists of two values: an angle in degrees and a
# thrust value. An interpolator works by returning a value for any given
# input value. More specifically in this case, we will give each
# interpolator an angle and it will return a thrust value for that
# angle. Since we have only given the interpolator values for very
# specific angles, it will have to determine values for angles we have
# not provided. It does this using linear interpolation.
self.horizontal_left.addIndexValue(0.0, -1.0)
self.horizontal_left.addIndexValue(90.0, 1.0)
self.horizontal_left.addIndexValue(180.0, 1.0)
self.horizontal_left.addIndexValue(270.0, -1.0)
self.horizontal_left.addIndexValue(360.0, -1.0)
self.vertical_left.addIndexValue(0.0, 1.0)
self.vertical_left.addIndexValue(90.0, -1.0)
self.vertical_left.addIndexValue(180.0, -1.0)
self.vertical_left.addIndexValue(270.0, 1.0)
self.vertical_left.addIndexValue(360.0, 1.0)
self.vertical_center.addIndexValue(0.0, 0.0)
self.vertical_center.addIndexValue(90.0, 1.0)
self.vertical_center.addIndexValue(180.0, 0.0)
self.vertical_center.addIndexValue(270.0, -1.0)
self.vertical_center.addIndexValue(360.0, 0.0)
self.vertical_right.addIndexValue(0.0, -1.0)
self.vertical_right.addIndexValue(90.0, -1.0)
self.vertical_right.addIndexValue(180.0, 1.0)
self.vertical_right.addIndexValue(270.0, 1.0)
self.vertical_right.addIndexValue(360.0, -1.0)
self.horizontal_right.addIndexValue(0.0, 1.0)
self.horizontal_right.addIndexValue(90.0, 1.0)
self.horizontal_right.addIndexValue(180.0, -1.0)
self.horizontal_right.addIndexValue(270.0, -1.0)
self.horizontal_right.addIndexValue(360.0, 1.0)
# setup ascent/descent controllers
self.ascent = -1.0
self.descent = -1.0
# setup light
self.light = 0.0
def __del__(self):
'''
When an instance of this class gets destroyed, we need to make sure that
we turn off all motors. Otherwise, we could end up in a situation where
the vehicle could have thrusters running when we don't have scripts
running to control it.
'''
self.set_motor(HL, 0.0)
self.set_motor(VL, 0.0)
self.set_motor(VC, 0.0)
self.set_motor(VL, 0.0)
self.set_motor(HR, 0.0)
def update_axis(self, axis, value):
'''
This is the main method of this class. It is responsible for taking an
controller input value (referred to as an axis value) and then
converting that into the appropriate thrust values for the appropriate
thrusters associated with that axis.
For the two joysticks, we convert the joystick position into an angle.
We know which thrusters each joystick controls, so we feed the
calculated angle into the thruster interpolators for that joystick. This
gives us the new thruster value for each thruster, which we then apply
to the PWM controller devices for those thrusters.
Note that the angle of the joystick does not give us all of the
information that we need. If the joystick is close to the center
position, then we don't need to apply as much thrust. If it is pushed
all the way to the edge, then we nee 100% thrust. So, we treat the
center as 0% and the edge as 100%. The values we get back from the
interpolators are 100% values, so we simply apply the joystick
percentage to the interpolator value to find the actual thrust value we
need to use.
Things get a bit more complicated for the vertical thrusters because it
is possible that we will be pitiching or rolling the vehicle while
simultaneously trying to move the vehicle directly up or down. If we
pitch or roll the vehicle only, then the process is exactly as we
described above. However, if are pithing and/or rolling AND moveing the
vehicle vertically, we need to combine the two operations into one set
of thruster values. We have to first determine the values for pitch and
roll, then we increase or decrease all thruster values equally in the up
or down direction. However it is possible that we will not be able to
increase/decrease all thrusters by the same amount since we are already
applying thrust for pitch and roll. This means we need to make sure our
values do not go outside the closed intervale [-1,1]. This means that as
we pitch or roll harder, the vehical will flattern out as we apply
vertical thrust.
'''
# We need to keep track of which thrusters need updating. We use the
# following flags for that purpose
update_horizontal_thrusters = False
update_vertical_thrusters = False
# Round the incoming value to the specified precision to reduce input
# noise
value = round(value, PRECISION)
# Update the appropriate joystick vector based on which controller axis
# has changed. Note that we make sure the value is different from what
# we have already to prevent unnecessary updates. Recall that the
# controller may send values whose differences are smaller than our
# precision. This means we will get an update from the controller, but
# we decided to ignore it since it won't result in a significant change
# to our thrusters.
if axis == JL_H:
if self.j1.x != value:
self.j1.x = value
update_horizontal_thrusters = True
elif axis == JL_V:
if self.j1.y != value:
self.j1.y = value
update_horizontal_thrusters = True
elif axis == JR_H:
if self.j2.x != value:
self.j2.x = value
update_vertical_thrusters = True
elif axis == JR_V:
if self.j2.y != value:
self.j2.y = value
update_vertical_thrusters = True
elif axis == AL:
if self.descent != value:
self.descent = value
update_vertical_thrusters = True
elif axis == AR:
if self.ascent != value:
self.ascent = value
update_vertical_thrusters = True
else:
pass
# print("unknown axis ", event.axis)
# updating horizontal thrusters is easy: find current angle, convert
# angle to thruster values, apply values
if update_horizontal_thrusters:
left_value = self.horizontal_left.valueAtIndex(self.j1.angle)
right_value = self.horizontal_right.valueAtIndex(self.j1.angle)
power = min(1.0, self.j1.length)
self.set_motor(HL, left_value * power)
self.set_motor(HR, right_value * power)
# updating vertical thrusters is trickier. We do the same as above, but
# then post-process the values if we are applying vertical up/down
# thrust. As mentioned above, we have to be careful to stay within our
# [-1,1] interval.
if update_vertical_thrusters:
power = min(1.0, self.j2.length)
back_value = self.vertical_center.valueAtIndex(self.j2.angle) * power
front_left_value = self.vertical_left.valueAtIndex(self.j2.angle) * power
front_right_value = self.vertical_right.valueAtIndex(self.j2.angle) * power
if self.ascent != -1.0:
percent = (1.0 + self.ascent) / 2.0
max_thrust = max(back_value, front_left_value, front_right_value)
max_adjust = (1.0 - max_thrust) * percent
# back_value += max_adjust
front_left_value += max_adjust
front_right_value += max_adjust
elif self.descent != -1.0:
percent = (1.0 + self.descent) / 2.0
min_thrust = min(back_value, front_left_value, front_right_value)
max_adjust = (min_thrust - -1.0) * percent
# back_value -= max_adjust
front_left_value -= max_adjust
front_right_value -= max_adjust
self.set_motor(VC, back_value)
self.set_motor(VL, front_left_value)
self.set_motor(VR, front_right_value)
def update_button(self, button, value):
if button == UP:
self.light = min(1.0, self.light + LIGHT_STEP)
elif button == DOWN:
self.light = max(0.0, self.light - LIGHT_STEP)
elif button == RESET:
self.light = 0.0
light_value = map_range(self.light, 0.0, 1.0, -1.0, 1.0)
print("button %s, light = %s, light_value = %s" % (button, self.light, light_value))
self.set_motor(LIGHT, light_value)
def set_motor(self, motor_number, value):
if self.motor_controller is not None:
motor = self.motor_controller.devices[motor_number]
value = self.apply_sensitivity(value)
pwm_value = int(map_range(value, -1.0, 1.0, FULL_REVERSE, FULL_FORWARD))
# print("setting motor {0} to {1}".format(motor_number, pwm_value))
motor.off = pwm_value
def apply_sensitivity(self, value):
return self.sensitivity * value**self.power + (1.0 - self.sensitivity) * value
def get_settings(self):
return {
'version': 1,
'sensitivity': {
'strength': self.sensitivity,
'power': self.power
},
'thrusters': [
self.horizontal_left.to_array(),
self.vertical_left.to_array(),
self.vertical_center.to_array(),
self.vertical_right.to_array(),
self.horizontal_right.to_array()
]
}
def set_settings(self, data, save=True):
if data['version'] == 1:
# save settings for future loading
if save:
if data['name'] == "":
filename = SETTINGS_FILE
else:
filename = os.path.join("settings", data['name'] + ".json")
with open(filename, 'w') as out:
out.write(json.dumps(data, indent=2))
# update current settings
self.sensitivity = float(data['sensitivity']['strength'])
self.power = float(data['sensitivity']['power'])
self.horizontal_left.from_array(data['thrusters'][0])
self.vertical_left.from_array(data['thrusters'][1])
self.vertical_center.from_array(data['thrusters'][2])
self.vertical_right.from_array(data['thrusters'][3])
self.horizontal_right.from_array(data['thrusters'][4])
else:
print("Unsupported data version number '{}'".format(data['version']))
if __name__ == "__main__":
pass
| 45.925414 | 92 | 0.639759 | 14,416 | 0.867128 | 0 | 0 | 0 | 0 | 0 | 0 | 8,002 | 0.481323 |
1052f3c21d56f16c74fe435dcc9d878c1ebb23e3 | 2,387 | py | Python | montagem/models.py | Glaysonvisgueira/agendamento-de-servico | a7715f45914f361830303241916c0b85cd3bdfdf | [
"MIT"
] | null | null | null | montagem/models.py | Glaysonvisgueira/agendamento-de-servico | a7715f45914f361830303241916c0b85cd3bdfdf | [
"MIT"
] | null | null | null | montagem/models.py | Glaysonvisgueira/agendamento-de-servico | a7715f45914f361830303241916c0b85cd3bdfdf | [
"MIT"
] | null | null | null | from django.db import models
LOJAS = (
('TES', 'TES'),
('TEU', 'TEU'),
('TMA', 'TMA'),
('TPI', 'TPI'),
('TMO', 'TMO'),
('TEZ', 'TEZ'),
('TED', 'TED'),
('TPP', 'TPP'),
('TIM', 'TIM'),
('TEC', 'TEC'),
('RTT', 'RTT'),
('TSJ', 'TSJ'),
)
TURNO_DISPONIVEL = (
('MANHA', 'MANHA'),
('TARDE', 'TARDE'),
('DURANTE DIA', 'DURANTE DIA'),
)
ZONAS = (
('SUL', 'SUL'),
('DIRCEU', 'DIRCEU'),
('NORTE', 'NORTE'),
('LESTE', 'LESTE'),
('TIMON', 'TIMON'),
('R.LESTE', 'R.LESTE'),
('R.NORTE', 'R.NORTE'),
('R.SUL', 'R.SUL'),
('R.DIRCEU', 'R.DIRCEU'),
('R.TIMON', 'R.TIMON'),
)
STATUS = (
('REALIZADO', 'REALIZADO'),
('AGENDADO', 'AGENDADO'),
('CANCELADO', 'CANCELADO'),
)
TIPO = (
('T', 'T'),
('E', 'E'),
('CANCELADO', 'CANCELADO'),
)
class Minuta(models.Model):
id = models.AutoField(primary_key=True)
loja = models.CharField('Loja:', choices=LOJAS,max_length = 3, blank=False)
numMinuta = models.CharField('Minuta:', max_length = 7, blank=False)
cliente = models.CharField('Cliente:', max_length = 150, blank = False)
created_at = models.DateTimeField('Criado em',auto_now_add = True)
updated_at = models.DateTimeField('Atualizado em',auto_now = True)
zona = models.CharField('Zona:', choices=ZONAS,max_length = 8, blank=False)
dataAgendamento = models.DateField('Data de agendamento:', blank=False)
turnoAgendamento = models.CharField('Turno de agendamento:', choices=TURNO_DISPONIVEL,max_length = 11, blank=False,default='DURANTE DIA')
status = models.CharField('Status de montagem:', choices=STATUS,max_length = 9, blank=True)
def __str__(self):
return self.loja
class Meta:
verbose_name = "Minuta"
verbose_name_plural = "Minutas"
ordering = ['id','loja','numMinuta','cliente']
class Zona(models.Model):
id = models.AutoField(primary_key=True)
zona = models.CharField('Zona:', choices=ZONAS,max_length = 8)
def __str__(self):
return self.zona
class Meta:
verbose_name = "Zona"
verbose_name_plural = "Zonas"
ordering = ['id','zona']
class DataAux(models.Model):
dataInicio = models.DateField(blank = True)
dataFim = models.DateField(blank = True)
| 27.755814 | 138 | 0.56682 | 1,368 | 0.573104 | 0 | 0 | 0 | 0 | 0 | 0 | 643 | 0.269376 |
1053671d2efb3bcbff0f12731c2f9a421bb4da2e | 7,377 | py | Python | em/src/dataset/metrics.py | tecdatalab/biostructure | a30e907e83fa5bbfb934d951b7c663b622104fcc | [
"Apache-2.0"
] | null | null | null | em/src/dataset/metrics.py | tecdatalab/biostructure | a30e907e83fa5bbfb934d951b7c663b622104fcc | [
"Apache-2.0"
] | 15 | 2019-06-17T16:13:39.000Z | 2022-02-27T05:23:59.000Z | em/src/dataset/metrics.py | tecdatalab/biostructure | a30e907e83fa5bbfb934d951b7c663b622104fcc | [
"Apache-2.0"
] | null | null | null | import numpy as np
from scipy.optimize import linear_sum_assignment
def intersection_over_union(segmented_map, gt_map):
s_array = segmented_map.getEmMap().data()
gt_array = gt_map.getEmMap().data()
labels = np.unique(gt_array)
if s_array.shape != gt_array.shape:
return ValueError("Arrays must have same shape")
else:
iou_list = []
for label in labels:
if label == 0:
continue
s_mask = s_array == label
gt_mask = gt_array == label
overlap = np.sum(np.logical_and(s_mask,gt_mask))
union = np.sum(np.logical_or(s_mask,gt_mask))
iou_list.append(overlap/union)
iou = np.mean(iou_list)
return iou
def iou(s_array, gt_array, restricted=False):
if restricted:
labels = np.unique(gt_array)
else:
labels = np.unique(s_array)
if s_array.shape != gt_array.shape:
return ValueError("Arrays must have same shape")
else:
iou_list = []
for label in labels:
if label == 0:
continue
s_mask = s_array == label
gt_mask = gt_array == label
overlap = np.sum(np.logical_and(s_mask,gt_mask))
union = np.sum(np.logical_or(s_mask,gt_mask))
iou_list.append(overlap/union)
iou = np.mean(iou_list)
return iou
def matching_iou(segmented_map, gt_map):
s_array = segmented_map.getEmMap().data()
gt_array = gt_map.getEmMap().data()
segmented_labels = np.unique(s_array)
segmented_labels = segmented_labels[segmented_labels!=0]
gt_labels = np.unique(gt_array)
gt_labels = gt_labels[gt_labels!=0]
print(segmented_labels)
print(gt_labels)
iou_tensor = np.zeros([len(segmented_labels), len(gt_labels)])
for i,s in enumerate(segmented_labels):
for j,g in enumerate(gt_labels):
seg_mask = s_array==s
gt_mask = gt_array==g
iou_tensor[i, j] = iou(seg_mask, gt_mask)
row_ind, col_ind = linear_sum_assignment(iou_tensor, maximize=True)
last_label = len(segmented_labels)
label_replace_dict = {segmented_labels[r]:gt_labels[c]+last_label for c,r in zip(col_ind,row_ind)}
label_replace_back_dict = {v:v-last_label for v in label_replace_dict.values() }
iou_after = iou(s_array,gt_array)
print("**",label_replace_dict)
print("**",label_replace_back_dict)
vol_before = { l:np.sum(s_array==l) for l in np.unique(s_array)}
for k in label_replace_dict.keys():
s_array[s_array==k]=label_replace_dict[k]
for k in label_replace_back_dict.keys():
new_label = label_replace_back_dict[k]
existing_labels = np.unique(s_array)
if new_label in existing_labels:
s_array[s_array==new_label]= np.max(existing_labels)+1
s_array[s_array==k]= new_label
vol_after = {l:np.sum(s_array==l) for l in np.unique(s_array)}
print("**vol before: ", vol_before)
print("**vol after ", vol_after)
return iou(s_array,gt_array)
def average_precision(segmented_map, gt_map, thresholds=np.arange(0.05,0.95,0.1)):
segmented_array = segmented_map.getEmMap().data()
gt_array = gt_map.getEmMap().data()
segmented_labels = np.unique(segmented_array)
segmented_labels = segmented_labels[segmented_labels!=0]
gt_labels = np.unique(gt_array)
gt_labels = gt_labels[gt_labels!=0]
segmented_masks = [ segmented_array==l for l in segmented_labels ]
gt_masks = [ gt_array==l for l in gt_labels ]
iou_tensor = np.zeros([len(thresholds), len(segmented_masks), len(gt_masks)])
for i,seg_mask in enumerate(segmented_masks):
for j,gt_mask in enumerate(gt_masks):
iou_tensor[:, i, j] = iou_at_thresholds(gt_mask, seg_mask, thresholds)
TP = np.sum((np.sum(iou_tensor, axis=2) == 1), axis=1)
FP = np.sum((np.sum(iou_tensor, axis=1) == 0), axis=1)
FN = np.sum((np.sum(iou_tensor, axis=2) == 0), axis=1)
precision = TP / (TP + FP + FN + np.finfo(float).eps)
print(precision)
return np.mean(precision)
def iou_at_thresholds(seg_mask, gt_mask, thresholds=np.arange(0.05,0.95,0.1)):
intersection = np.logical_and(gt_mask, seg_mask)
union = np.logical_or(gt_mask, seg_mask)
iou = np.sum(intersection > 0) / np.sum(union > 0)
return iou > thresholds
def dice(segmented_map, gt_map):
s_array = segmented_map.getEmMap().data()
gt_array = gt_map.getEmMap().data()
labels = np.unique(gt_array)
if s_array.shape != gt_array.shape:
return ValueError("Arrays must have same shape")
else:
dice_list = []
for label in labels:
#if label == 0:
# continue
s_mask = s_array == label
gt_mask = gt_array == label
overlap = np.sum(np.logical_and(s_mask,gt_mask))
added = np.sum(s_mask) + np.sum(gt_mask)
dice_list.append(2*overlap/added)
dice = np.mean(dice_list)
return dice
def homogenity(segmented_map, gt_map):
s_array = segmented_map.getEmMap().data()
gt_array = gt_map.getEmMap().data()
labels = np.unique(s_array)
if s_array.shape != gt_array.shape:
return ValueError("Arrays must have same shape")
else:
h_list = []
for label in labels:
if label == 0:
continue
s_mask = s_array == label
gt_mask = gt_array == label
overlap = np.sum(np.logical_and(s_mask,gt_mask))
volume = np.sum(gt_mask)
h_list.append(overlap/(volume+np.finfo(float).eps))
print("label {} overlap {} falses {} result {}".format(label, overlap,volume, overlap/(volume+np.finfo(float).eps)))
h = np.mean(h_list)
return h
def proportion(segmented_map, gt_map):
s_array = segmented_map.getEmMap().data()
gt_array = gt_map.getEmMap().data()
labels = np.unique(gt_array)
if s_array.shape != gt_array.shape:
return ValueError("Arrays must have same shape")
else:
p_list = []
count_labels = 0
for label in labels:
if label == 0:
continue
s_mask = s_array == label
gt_mask = gt_array == label
s_mask_non_background = s_array != 0
proportion_mask = gt_mask * s_mask_non_background
# need to check if should remove 0s
num_labels = len(np.unique(s_array[proportion_mask]))
p_list.append(num_labels)
count_labels +=1
print("label {} proportion {}".format(label, num_labels))
p = np.sum(p_list)/count_labels
return p
def consistency(segmented_map, gt_map):
s_array = segmented_map.getEmMap().data()
gt_array = gt_map.getEmMap().data()
labels = np.unique(gt_array)
if s_array.shape != gt_array.shape:
return ValueError("Arrays must have same shape")
else:
volumes_dict = {}
for label in labels:
if label == 0:
continue
s_mask = s_array == label
gt_mask = gt_array == label
volumes_dict[label] = np.sum(s_mask)
label = max(volumes_dict, key=volumes_dict.get)
gt_mask = gt_array == label
c = len(np.unique(s_array[gt_mask]))
return c
| 37.830769 | 128 | 0.623153 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 340 | 0.046089 |
10549d50d1cb53a2fb1a2551fb8532427ebf65e9 | 1,554 | py | Python | asq/test/test_pre_scan.py | SlamJam/asq | e6e49a5ace421cb4f84f0bded5dbe5a2108b0cce | [
"MIT"
] | 3 | 2015-03-13T23:02:29.000Z | 2015-07-19T15:29:23.000Z | asq/test/test_pre_scan.py | SlamJam/asq | e6e49a5ace421cb4f84f0bded5dbe5a2108b0cce | [
"MIT"
] | null | null | null | asq/test/test_pre_scan.py | SlamJam/asq | e6e49a5ace421cb4f84f0bded5dbe5a2108b0cce | [
"MIT"
] | 1 | 2020-12-19T07:57:20.000Z | 2020-12-19T07:57:20.000Z | import operator
import unittest
from asq.queryables import Queryable
__author__ = "Robert Smallshire"
class TestPreScan(unittest.TestCase):
def test_pre_scan_empty_default(self):
a = []
b = Queryable(a).pre_scan().to_list()
c = []
self.assertEqual(b, c)
def test_pre_scan_single_default(self):
a = [47]
b = Queryable(a).pre_scan().to_list()
c = [0]
self.assertEqual(b, c)
def test_pre_scan_default(self):
a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
b = Queryable(a).pre_scan().to_list()
c = [0, 1, 3, 6, 10, 15, 21, 28, 36, 45]
self.assertEqual(b, c)
def test_pre_scan_empty_func(self):
a = []
b = Queryable(a).pre_scan(operator.mul).to_list()
c = []
self.assertEqual(b, c)
def test_pre_scan_single_func(self):
a = [47]
b = Queryable(a).pre_scan(operator.mul, seed=1).to_list()
c = [1]
self.assertEqual(b, c)
def test_pre_scan_func(self):
a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
b = Queryable(a).pre_scan(operator.mul, seed=1).to_list()
c = [1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880]
self.assertEqual(b, c)
def test_pre_scan_func_callable(self):
self.assertRaises(TypeError, lambda: Queryable([1, 2, 3]).pre_scan("not callable"))
def test_pre_scan_closed(self):
b = Queryable([])
b.close()
self.assertRaises(ValueError, lambda: b.pre_scan())
| 29.884615 | 92 | 0.557915 | 1,440 | 0.926641 | 0 | 0 | 0 | 0 | 0 | 0 | 33 | 0.021236 |
105586dba443dd6bd6a2f0bfc1884ab1e9b6d1f5 | 504 | py | Python | magicmethod__str__.py | maahi07m/OOPS | 1faa5168dc66c3597adfc0703af5e4c84c52117a | [
"MIT"
] | 1 | 2022-02-28T17:00:03.000Z | 2022-02-28T17:00:03.000Z | magicmethod__str__.py | maahi07m/OOPS | 1faa5168dc66c3597adfc0703af5e4c84c52117a | [
"MIT"
] | null | null | null | magicmethod__str__.py | maahi07m/OOPS | 1faa5168dc66c3597adfc0703af5e4c84c52117a | [
"MIT"
] | 4 | 2020-04-22T10:26:35.000Z | 2020-05-15T16:27:36.000Z | class ComplexNumber:
# TODO: write your code here
def __init__(self,real=0, imag=0):
self.real_part = real
self.imaginary_part = imag
def __str__(self):
return f"{self.real_part}{self.imaginary_part:+}i"
if __name__ == "__main__":
import json
input_args = list(json.loads(input()))
complex_number = ComplexNumber(*input_args)
complex_number_str_value = str(complex_number)
print(complex_number_str_value)
'''
[1,2]
1+2i
'''
| 22.909091 | 58 | 0.642857 | 239 | 0.474206 | 0 | 0 | 0 | 0 | 0 | 0 | 116 | 0.230159 |
10568ffdcff6c9ac4346c98dc2826ddaf3950010 | 1,853 | py | Python | src/form/panel/ParamAdvancePanel.py | miu200521358/pmx_tailor | 7f40ffd596f548d4c04f14b22d8796861056fdb5 | [
"MIT"
] | 4 | 2021-11-10T03:04:07.000Z | 2022-01-11T09:02:02.000Z | src/form/panel/ParamAdvancePanel.py | miu200521358/pmx_tailor | 7f40ffd596f548d4c04f14b22d8796861056fdb5 | [
"MIT"
] | 1 | 2022-01-05T01:23:18.000Z | 2022-01-08T16:23:42.000Z | src/form/panel/ParamAdvancePanel.py | miu200521358/pmx_tailor | 7f40ffd596f548d4c04f14b22d8796861056fdb5 | [
"MIT"
] | 3 | 2021-11-05T16:55:35.000Z | 2021-12-04T16:28:21.000Z | # -*- coding: utf-8 -*-
#
import wx
from form.panel.BasePanel import BasePanel
from utils.MLogger import MLogger # noqa
logger = MLogger(__name__)
class ParamAdvancePanel(BasePanel):
def __init__(self, frame: wx.Frame, export: wx.Notebook, tab_idx: int):
super().__init__(frame, export, tab_idx)
self.convert_export_worker = None
self.header_panel = wx.Panel(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL)
self.header_sizer = wx.BoxSizer(wx.VERTICAL)
self.description_txt = wx.StaticText(self, wx.ID_ANY, logger.transtext("パラ調整タブで材質を選択して、パラメーターを調整してください。\n") + \
logger.transtext("※パラ調整タブで変更した値は詳細タブに反映されますが、逆方向には反映されません"), wx.DefaultPosition, wx.DefaultSize, 0)
self.header_sizer.Add(self.description_txt, 0, wx.ALL, 5)
self.static_line01 = wx.StaticLine(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL)
self.header_sizer.Add(self.static_line01, 0, wx.EXPAND | wx.ALL, 5)
self.header_panel.SetSizer(self.header_sizer)
self.header_panel.Layout()
self.sizer.Add(self.header_panel, 0, wx.EXPAND | wx.ALL, 5)
# 詳細Sizer
self.advance_sizer = wx.BoxSizer(wx.VERTICAL)
self.scrolled_window = wx.ScrolledWindow(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.FULL_REPAINT_ON_RESIZE | wx.VSCROLL | wx.ALWAYS_SHOW_SB)
self.scrolled_window.SetScrollRate(5, 5)
self.scrolled_window.SetSizer(self.advance_sizer)
self.scrolled_window.Layout()
self.sizer.Add(self.scrolled_window, 1, wx.ALL | wx.EXPAND | wx.FIXED_MINSIZE, 5)
self.fit()
self.Layout()
self.fit()
def initialize(self, event: wx.Event):
self.frame.simple_param_panel_ctrl.initialize(event)
| 38.604167 | 161 | 0.680518 | 1,844 | 0.923385 | 0 | 0 | 0 | 0 | 0 | 0 | 259 | 0.129695 |
1057b731456147b4b92c8df8a2606e402b2c86e3 | 10,830 | py | Python | hexrd/ui/calibration/powder_calibration.py | bnmajor/hexrdgui | d19f7cf4a4469b0d3b6978f2f65c5e8a6bd81785 | [
"BSD-3-Clause"
] | null | null | null | hexrd/ui/calibration/powder_calibration.py | bnmajor/hexrdgui | d19f7cf4a4469b0d3b6978f2f65c5e8a6bd81785 | [
"BSD-3-Clause"
] | 1 | 2020-10-29T14:34:42.000Z | 2020-10-29T15:30:20.000Z | hexrd/ui/calibration/powder_calibration.py | cjh1/hexrdgui | eb8968ba763cebbffce61164f1bda1e2cc622461 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from scipy.optimize import leastsq, least_squares
from hexrd import instrument
from hexrd.matrixutil import findDuplicateVectors
from hexrd.fitting import fitpeak
from hexrd.ui.hexrd_config import HexrdConfig
from hexrd.ui.utils import convert_tilt_convention
class InstrumentCalibrator(object):
def __init__(self, *args):
assert len(args) > 0, \
"must have at least one calibrator"
self._calibrators = args
self._instr = self._calibrators[0].instr
@property
def instr(self):
return self._instr
@property
def calibrators(self):
return self._calibrators
# =========================================================================
# METHODS
# =========================================================================
def run_calibration(self, use_robust_optimization=False):
"""
FIXME: only coding serial powder case to get things going. Will
eventually figure out how to loop over multiple calibrator classes.
All will have a reference the same instrument, but some -- like single
crystal -- will have to add parameters as well as contribute to the RHS
"""
calib_class = self.calibrators[0]
obj_func = calib_class.residual
data_dict = calib_class._extract_powder_lines()
# grab reduced optimizaion parameter set
x0 = self._instr.calibration_parameters[
self._instr.calibration_flags
]
resd0 = obj_func(x0, data_dict)
if use_robust_optimization:
oresult = least_squares(
obj_func, x0, args=(data_dict, ),
method='trf', loss='soft_l1'
)
x1 = oresult['x']
else:
x1, cox_x, infodict, mesg, ierr = leastsq(
obj_func, x0, args=(data_dict, ),
full_output=True
)
resd1 = obj_func(x1, data_dict)
delta_r = sum(resd0**2)/float(len(resd0)) - \
sum(resd1**2)/float(len(resd1))
if delta_r > 0:
print(('OPTIMIZATION SUCCESSFUL\nfinal ssr: %f' % sum(resd1**2)))
print(('delta_r: %f' % delta_r))
# self.instr.write_config(instrument_filename)
else:
print('no improvement in residual!!!')
# %%
class PowderCalibrator(object):
def __init__(self, instr, plane_data, img_dict,
tth_tol=None, eta_tol=0.25,
pktype='pvoigt'):
assert list(instr.detectors.keys()) == list(img_dict.keys()), \
"instrument and image dict must have the same keys"
self._instr = instr
self._plane_data = plane_data
self._img_dict = img_dict
# for polar interpolation
self._tth_tol = tth_tol or np.degrees(plane_data.tThWidth)
self._eta_tol = eta_tol
# for peak fitting
# ??? fitting only, or do alternative peak detection?
self._pktype = pktype
@property
def instr(self):
return self._instr
@property
def plane_data(self):
return self._plane_data
@property
def img_dict(self):
return self._img_dict
@property
def tth_tol(self):
return self._tth_tol
@tth_tol.setter
def tth_tol(self, x):
assert np.isscalar(x), "tth_tol must be a scalar value"
self._tth_tol = x
@property
def eta_tol(self):
return self._eta_tol
@eta_tol.setter
def eta_tol(self, x):
assert np.isscalar(x), "eta_tol must be a scalar value"
self._eta_tol = x
@property
def pktype(self):
return self._pktype
@pktype.setter
def pktype(self, x):
"""
currently only 'pvoigt' or 'gaussian'
"""
assert isinstance(x, str), "tth_tol must be a scalar value"
self._pktype = x
def _interpolate_images(self):
"""
returns the iterpolated powder line data from the images in img_dict
??? interpolation necessary?
"""
return self.instr.extract_line_positions(
self.plane_data, self.img_dict,
tth_tol=self.tth_tol, eta_tol=self.eta_tol,
npdiv=2, collapse_eta=False, collapse_tth=False,
do_interpolation=True)
def _extract_powder_lines(self):
"""
return the RHS for the instrument DOF and image dict
The format is a dict over detectors, each containing
[index over ring sets]
[index over azimuthal patch]
[xy_meas, tth_meas, tth_ref, eta_ref]
FIXME: can not yet handle tth ranges with multiple peaks!
"""
# ideal tth
tth_ideal = self.plane_data.getTTh()
tth0 = []
for idx in self.plane_data.getMergedRanges()[0]:
if len(idx) > 1:
eqv, uidx = findDuplicateVectors(np.atleast_2d(tth_ideal[idx]))
if len(uidx) > 1:
raise NotImplementedError("can not handle multipeak yet")
else:
# if here, only degenerate ring case
uidx = idx[0]
else:
uidx = idx[0]
tth0.append(tth_ideal[uidx])
powder_lines = self._interpolate_images()
# GRAND LOOP OVER PATCHES
rhs = dict.fromkeys(self.instr.detectors)
for det_key, panel in self.instr.detectors.items():
rhs[det_key] = []
for i_ring, ringset in enumerate(powder_lines[det_key]):
tmp = []
for angs, intensities in ringset:
tth_centers = np.average(
np.vstack([angs[0][:-1], angs[0][1:]]),
axis=0)
eta_ref = angs[1]
int1d = np.sum(np.array(intensities).squeeze(), axis=0)
"""
DARREN: FIT [tth_centers, intensities[0]] HERE
RETURN TTH0
rhs.append([tth0, eta_ref])
"""
p0 = fitpeak.estimate_pk_parms_1d(
tth_centers, int1d, self.pktype
)
p = fitpeak.fit_pk_parms_1d(
p0, tth_centers, int1d, self.pktype
)
# !!! this is where we can kick out bunk fits
tth_meas = p[1]
center_err = abs(tth_meas - tth0[i_ring])
if p[0] < 0.1 or center_err > np.radians(self.tth_tol):
continue
xy_meas = panel.angles_to_cart([[tth_meas, eta_ref], ])
# distortion
if panel.distortion is not None:
xy_meas = panel.distortion.apply_inverse(xy_meas)
# cat results
tmp.append(
np.hstack(
[xy_meas.squeeze(),
tth_meas,
tth0[i_ring],
eta_ref]
)
)
pass
rhs[det_key].append(np.vstack(tmp))
pass
rhs[det_key] = np.vstack(rhs[det_key])
pass
return rhs
def residual(self, reduced_params, data_dict):
"""
"""
# first update instrument from input parameters
full_params = self.instr.calibration_parameters
full_params[self.instr.calibration_flags] = reduced_params
self.instr.update_from_parameter_list(full_params)
# build residual
resd = []
for det_key, panel in self.instr.detectors.items():
pdata = np.vstack(data_dict[det_key])
if len(pdata) > 0:
calc_xy = panel.angles_to_cart(pdata[:, -2:])
# distortion
if panel.distortion is not None:
calc_xy = panel.distortion.apply_inverse(calc_xy)
resd.append(
(pdata[:, :2].flatten() - calc_xy.flatten())
)
else:
continue
return np.hstack(resd)
def run_powder_calibration():
# Set up the tilt calibration mapping
rme = HexrdConfig().rotation_matrix_euler()
# Set up the instrument
iconfig = HexrdConfig().instrument_config_none_euler_convention
instr = instrument.HEDMInstrument(instrument_config=iconfig,
tilt_calibration_mapping=rme)
flags = HexrdConfig().get_statuses_instrument_format()
if len(flags) != len(instr.calibration_flags):
msg = 'Length of internal flags does not match instr.calibration_flags'
raise Exception(msg)
instr.calibration_flags = flags
# Plane data and images
plane_data = HexrdConfig().active_material.planeData
img_dict = HexrdConfig().current_images_dict()
# tolerances for patches
tth_tol = HexrdConfig().config['calibration']['powder']['tth_tol']
eta_tol = HexrdConfig().config['calibration']['powder']['eta_tol']
pktype = HexrdConfig().config['calibration']['powder']['pk_type']
# powder calibrator
pc = PowderCalibrator(instr, plane_data, img_dict,
tth_tol=tth_tol, eta_tol=eta_tol,
pktype=pktype)
# make instrument calibrator
ic = InstrumentCalibrator(pc)
use_robust_optimization = False
ic.run_calibration(use_robust_optimization)
# We might need to use this at some point
# data_dict = pc._extract_powder_lines()
# Add this so the calibration crystal gets written
cal_crystal = iconfig.get('calibration_crystal')
output_dict = instr.write_config(calibration_dict=cal_crystal)
# Convert back to whatever convention we were using before
eac = HexrdConfig().euler_angle_convention
if eac is not None:
convert_tilt_convention(output_dict, None, eac)
# Add the saturation levels, as they seem to be missing
sl = 'saturation_level'
for det in output_dict['detectors'].keys():
output_dict['detectors'][det][sl] = iconfig['detectors'][det][sl]
# Save the previous iconfig to restore the statuses
prev_iconfig = HexrdConfig().config['instrument']
# Update the config
HexrdConfig().config['instrument'] = output_dict
# This adds in any missing keys. In particular, it is going to
# add in any "None" detector distortions
HexrdConfig().set_detector_defaults_if_missing()
# Add status values
HexrdConfig().add_status(output_dict)
# Set the previous statuses to be the current statuses
HexrdConfig().set_statuses_from_prev_iconfig(prev_iconfig)
| 33.119266 | 79 | 0.570175 | 7,999 | 0.738596 | 0 | 0 | 958 | 0.088458 | 0 | 0 | 2,813 | 0.259741 |
1059338d2378b261aba08fb200894418dd9de4d1 | 503 | py | Python | covid-tweets/process-tweets-2.py | kadams4/NLPCoronavirus | 15936240adb6cd9d7a616c381980d17c03cdc8fd | [
"MIT"
] | 1 | 2020-05-02T23:26:56.000Z | 2020-05-02T23:26:56.000Z | covid-tweets/process-tweets-2.py | kadams4/NLPCoronavirus | 15936240adb6cd9d7a616c381980d17c03cdc8fd | [
"MIT"
] | null | null | null | covid-tweets/process-tweets-2.py | kadams4/NLPCoronavirus | 15936240adb6cd9d7a616c381980d17c03cdc8fd | [
"MIT"
] | null | null | null | import pandas as pd
root = "Split/"
types = ["train-70", "test-30"]
for type in types:
filenames = ["2020-03-"+str(i)+"-Labels-"+type for i in range(12, 29)]
for suffix in ["pos", "neg", "neu"]:
data = []
for filename in filenames:
path = root + filename
data += list(open(path+"."+suffix, "r", encoding='utf-8').readlines())
print(len(data))
with open('covid-tweets-'+type+'.'+suffix,'w') as f:
f.write(''.join(data))
| 22.863636 | 82 | 0.528827 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 98 | 0.194831 |
105a1021873de5967ff4229ce3f7cb97d8adb0d8 | 1,638 | py | Python | sample/crawler/login.py | xuegangliu/Python-Learning | 43e8c57debf6ec186d093dcc0e83c1a628982715 | [
"MIT"
] | null | null | null | sample/crawler/login.py | xuegangliu/Python-Learning | 43e8c57debf6ec186d093dcc0e83c1a628982715 | [
"MIT"
] | null | null | null | sample/crawler/login.py | xuegangliu/Python-Learning | 43e8c57debf6ec186d093dcc0e83c1a628982715 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
@Project: python
@Date: 8/30/2018 9:53 PM
@Author: xuegangliu
@Description: login
"""
import urllib.request
import http.cookiejar
import urllib.parse
def getOpener(header):
'''构造文件头'''
# 设置一个cookie处理器,它负责从服务器下载cookie到本地,并且在发送请求时带上本地的cookie
cookieJar = http.cookiejar.CookieJar()
cp = urllib.request.HTTPCookieProcessor(cookieJar)
opener = urllib.request.build_opener(cp)
headers = []
for key,value in header.items():
elem = (key,value)
headers.append(elem)
opener.addheaders = headers
return opener
def html_login(url,headers,postDict):
'''构造登陆信息进行登陆'''
req=urllib.request.Request(url,headers=headers)
res=urllib.request.urlopen(req)
data = res.read()
opener = getOpener(headers)
#给post数据编码
postData=urllib.parse.urlencode(postDict).encode()
#构造请求
res=opener.open(url,postData)
data = res.read()
print(data.decode())
if __name__ == "__main__":
url = "test"
# 根据网站报头信息设置headers
headers = {
'Connection': 'Keep-Alive',
'Accept': 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Language':'zh-CN,zh;q=0.9,zh-TW;q=0.8,zh-HK;q=0.7,en;q=0.6',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
'Accept-Encoding': 'gzip, deflate',
'Host': 'host',
'DNT':'1'
}
#分析构造post数据
postDict={
'aa':1,
'bb':2,
'cc':3
}
html_login(url,headers,postDict) | 27.762712 | 133 | 0.627595 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 815 | 0.456327 |
105a2a73f5eecce7fac87dc8417f55ba6d3a6da8 | 1,277 | py | Python | djangocms_comments/widgets.py | Nekmo/djangocms-comments-module | 591138db28d5341fd7809fc75f3fa4d91b5d3ef8 | [
"MIT"
] | 11 | 2016-04-13T14:04:42.000Z | 2020-06-21T10:48:37.000Z | djangocms_comments/widgets.py | Nekmo/djangocms-comments-module | 591138db28d5341fd7809fc75f3fa4d91b5d3ef8 | [
"MIT"
] | 14 | 2016-04-14T10:09:20.000Z | 2021-05-22T08:20:10.000Z | djangocms_comments/widgets.py | Nekmo/djangocms-comments-module | 591138db28d5341fd7809fc75f3fa4d91b5d3ef8 | [
"MIT"
] | 11 | 2016-04-14T09:48:48.000Z | 2021-04-11T03:43:08.000Z | from django.core.exceptions import SuspiciousOperation
from django.core.signing import Signer, BadSignature
from django.forms import HiddenInput
signer = Signer()
class SignedHiddenInput(HiddenInput):
def __init__(self, include_field_name=True, attrs=None):
self.include_field_name = include_field_name
super(SignedHiddenInput, self).__init__(attrs=attrs)
def value_from_datadict(self, data, files, name):
value = super(SignedHiddenInput, self).value_from_datadict(data, files, name)
try:
value = signer.unsign(value)
except BadSignature:
raise SuspiciousOperation()
if self.include_field_name:
name_key = '{0}-'.format(name)
if not value.startswith(name_key):
raise SuspiciousOperation()
value = value.replace(name_key, '', 1)
return value
def render(self, name, value, attrs=None):
value = self.sign_value(name, value)
return super(SignedHiddenInput, self).render(name, value, attrs=attrs)
def sign_value(self, name, value):
if self.include_field_name:
value = '-'.join(map(str, [name, value]))
value = signer.sign(value)
return value
def value(self):
pass | 34.513514 | 85 | 0.657009 | 1,111 | 0.870008 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.008614 |
105b04f24aada022dca8226b67acae586bf9c15f | 738 | py | Python | 860-lemonade-change.py | Iciclelz/leetcode | e4b698e0161033922851641885fdc6e47f9ce270 | [
"Apache-2.0"
] | null | null | null | 860-lemonade-change.py | Iciclelz/leetcode | e4b698e0161033922851641885fdc6e47f9ce270 | [
"Apache-2.0"
] | null | null | null | 860-lemonade-change.py | Iciclelz/leetcode | e4b698e0161033922851641885fdc6e47f9ce270 | [
"Apache-2.0"
] | null | null | null | class Solution:
def lemonadeChange(self, bills: List[int]) -> bool:
money = [0, 0, 0]
for x in bills:
if x == 5:
money[0] += 1
if x == 10:
if money[0] >= 1:
money[1] += 1
money[0] -= 1
else:
return False
if x == 20:
if money[1] >= 1 and money[0] >= 1:
money[2] += 1
money[1] -= 1
money[0] -= 1
elif money[0] >= 3:
money[2] += 1
money[0] -= 3
else:
return False
return True | 29.52 | 55 | 0.295393 | 738 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
105bb82bfc7b6f71c79e0e15226b493dbe1a9ea9 | 3,684 | py | Python | openspeech/search/beam_search_ctc.py | techthiyanes/openspeech | 71d28bae1232420da1d6f357f52ff1a607dc983f | [
"Apache-2.0",
"MIT"
] | 207 | 2021-07-22T02:04:47.000Z | 2022-03-31T07:24:12.000Z | openspeech/search/beam_search_ctc.py | tqslj2/openspeech | 10307587f08615224df5a868fb5249c68c70b12d | [
"Apache-2.0",
"MIT"
] | 81 | 2021-07-21T16:52:22.000Z | 2022-03-31T14:56:54.000Z | openspeech/search/beam_search_ctc.py | tqslj2/openspeech | 10307587f08615224df5a868fb5249c68c70b12d | [
"Apache-2.0",
"MIT"
] | 43 | 2021-07-21T16:33:27.000Z | 2022-03-23T09:43:49.000Z | # MIT License
#
# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch.nn as nn
from openspeech.utils import CTCDECODE_IMPORT_ERROR
class BeamSearchCTC(nn.Module):
r"""
Decodes probability output using ctcdecode package.
Args:
labels (list): the tokens you used to train your model
lm_path (str): the path to your external kenlm language model(LM).
alpha (int): weighting associated with the LMs probabilities.
beta (int): weight associated with the number of words within our beam
cutoff_top_n (int): cutoff number in pruning. Only the top cutoff_top_n characters with the highest probability
in the vocab will be used in beam search.
cutoff_prob (float): cutoff probability in pruning. 1.0 means no pruning.
beam_size (int): this controls how broad the beam search is.
num_processes (int): parallelize the batch using num_processes workers.
blank_id (int): this should be the index of the CTC blank token
Inputs: logits, sizes
- logits: Tensor of character probabilities, where probs[c,t] is the probability of character c at time t
- sizes: Size of each sequence in the mini-batch
Returns:
- outputs: sequences of the model's best prediction
"""
def __init__(
self,
labels: list,
lm_path: str = None,
alpha: int = 0,
beta: int = 0,
cutoff_top_n: int = 40,
cutoff_prob: float = 1.0,
beam_size: int = 3,
num_processes: int = 4,
blank_id: int = 0,
) -> None:
super(BeamSearchCTC, self).__init__()
try:
from ctcdecode import CTCBeamDecoder
except ImportError:
raise ImportError(CTCDECODE_IMPORT_ERROR)
assert isinstance(labels, list), "labels must instance of list"
self.decoder = CTCBeamDecoder(labels, lm_path, alpha, beta, cutoff_top_n,
cutoff_prob, beam_size, num_processes, blank_id)
def forward(self, logits, sizes=None):
r"""
Decodes probability output using ctcdecode package.
Inputs: logits, sizes
logits: Tensor of character probabilities, where probs[c,t] is the probability of character c at time t
sizes: Size of each sequence in the mini-batch
Returns:
outputs: sequences of the model's best prediction
"""
logits = logits.cpu()
outputs, scores, offsets, seq_lens = self.decoder.decode(logits, sizes)
return outputs
| 43.341176 | 119 | 0.678882 | 2,468 | 0.669924 | 0 | 0 | 0 | 0 | 0 | 0 | 2,624 | 0.712269 |
105c2eb00700830d7030c477994f8f358f536d08 | 1,231 | py | Python | Software/Services/__init__.py | Hackin7/BlockComPi | 36938e219b42c5d220db0a64e4718e95720c4850 | [
"X11"
] | null | null | null | Software/Services/__init__.py | Hackin7/BlockComPi | 36938e219b42c5d220db0a64e4718e95720c4850 | [
"X11"
] | null | null | null | Software/Services/__init__.py | Hackin7/BlockComPi | 36938e219b42c5d220db0a64e4718e95720c4850 | [
"X11"
] | null | null | null | #Do Not Edit
#colors R G B
white = (255, 255, 255)
red = (255, 0, 0)
green = ( 0, 255, 0)
blue = ( 0, 0, 255)
black = ( 0, 0, 0)
cyan = ( 50, 255, 255)
magenta = (255, 0, 255)
yellow = (255, 255, 0)
orange = (255, 127, 0)
#The Service/Notifications List
# Layout: import (service)
import FONAservice
import FONAmessage
service = []
def run(sv):
global service
#print service
if sv.check() == 1: service.insert(0,sv.func)
def code():
print 'WHAT THE FRUCK IS THIS MOTHERFUCKING SHIT?'
#Put code to direct to other program
notif = [("F", green, 24, code),("F", blue, 24, code),("F", red, 24, code),("F", cyan, 24, code),("F", yellow, 24, code)]
def putup(nc):
global notif
# Notif Structure (Label, colour, fontsize, setfunction)
if nc.check() == 1: notif.insert(0, nc.layout)
shit = 1
def check():
global notif
#Notif layout: putup(notif)
putup(FONAmessage)
global shit
if shit == 1:
#Notif Structure (Label, colour, fontsize, setfunction)
notif.insert(0, ("F", green, 24, code))
shit = 0
global service
#Service layout: run(service)
run(FONAservice)
| 24.62 | 122 | 0.57108 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 406 | 0.329813 |
105d5ca23ac6dfb590f2af5c0e2d2ba6bb4c09a3 | 324 | py | Python | src/ml_fastapi/routers.py | sebastianschramm/ml_fastapi | 719f332b573f1ae021f4753b25a940da040c9ea3 | [
"MIT"
] | 2 | 2020-05-08T11:15:43.000Z | 2020-05-08T11:29:52.000Z | src/ml_fastapi/routers.py | sebastianschramm/ml_fastapi | 719f332b573f1ae021f4753b25a940da040c9ea3 | [
"MIT"
] | null | null | null | src/ml_fastapi/routers.py | sebastianschramm/ml_fastapi | 719f332b573f1ae021f4753b25a940da040c9ea3 | [
"MIT"
] | null | null | null | from fastapi import APIRouter
from starlette.requests import Request
router = APIRouter()
@router.get('/')
async def read_root(request: Request):
return "ML serving with fastapi"
@router.get('api/predict')
async def predict_number(request: Request):
model = request.app.ml_model
return model.predict('bla')
| 20.25 | 43 | 0.740741 | 0 | 0 | 0 | 0 | 227 | 0.700617 | 183 | 0.564815 | 46 | 0.141975 |
105dc9a0c2abe995709581929119eb84a98260a9 | 1,902 | py | Python | spinoffs/inference_gym/inference_gym/targets/eight_schools_test.py | PavanKishore21/probability | 4bad1b796b0e6ed2959205915d42788817620c4c | [
"Apache-2.0"
] | 3,670 | 2018-02-14T03:29:40.000Z | 2022-03-30T01:19:52.000Z | spinoffs/inference_gym/inference_gym/targets/eight_schools_test.py | PavanKishore21/probability | 4bad1b796b0e6ed2959205915d42788817620c4c | [
"Apache-2.0"
] | 1,395 | 2018-02-24T02:28:49.000Z | 2022-03-31T16:12:06.000Z | spinoffs/inference_gym/inference_gym/targets/eight_schools_test.py | PavanKishore21/probability | 4bad1b796b0e6ed2959205915d42788817620c4c | [
"Apache-2.0"
] | 1,135 | 2018-02-14T01:51:10.000Z | 2022-03-28T02:24:11.000Z | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for inference_gym.targets.eight_schools."""
import tensorflow.compat.v2 as tf
from inference_gym.internal import test_util
from inference_gym.targets import eight_schools
@test_util.multi_backend_test(globals(), 'targets.eight_schools_test')
class EightSchoolsTest(test_util.InferenceGymTestCase):
def testEightSchools(self):
"""Checks that unconstrained parameters yield finite joint densities."""
model = eight_schools.EightSchools()
self.validate_log_prob_and_transforms(
model,
sample_transformation_shapes=dict(identity={
'avg_effect': [],
'log_stddev': [],
'school_effects': [8],
}),
check_ground_truth_mean_standard_error=True,
check_ground_truth_mean=True,
check_ground_truth_standard_deviation=True)
@test_util.numpy_disable_gradient_test
def testEightSchoolsHMC(self):
"""Checks approximate samples from the model against the ground truth."""
model = eight_schools.EightSchools()
self.validate_ground_truth_using_hmc(
model,
num_chains=4,
num_steps=4000,
num_leapfrog_steps=10,
step_size=0.4,
)
if __name__ == '__main__':
tf.test.main()
| 34.581818 | 78 | 0.699264 | 923 | 0.485279 | 0 | 0 | 994 | 0.522608 | 0 | 0 | 939 | 0.493691 |
105f337c3b2cee574054705ec45a67cc94c84757 | 788 | py | Python | ch01/dictionaries.py | PacktPublishing/Python-Networking-Cookbook | 26945c781a51fe72cc01409df6b5c5fa7df53f4c | [
"MIT"
] | 5 | 2021-06-11T11:24:04.000Z | 2022-03-22T03:22:57.000Z | ch01/dictionaries.py | PacktPublishing/Python-Networking-Cookbook | 26945c781a51fe72cc01409df6b5c5fa7df53f4c | [
"MIT"
] | null | null | null | ch01/dictionaries.py | PacktPublishing/Python-Networking-Cookbook | 26945c781a51fe72cc01409df6b5c5fa7df53f4c | [
"MIT"
] | 10 | 2021-04-18T12:31:14.000Z | 2022-03-28T07:21:16.000Z | config = {}
with open("config.txt", "r") as f:
lines = f.readlines()
for line in lines:
key, value = line.split("=")
value = value.replace("\n", "")
config[key] = value
print(f"Added key {key} with value {value}")
user_key = input("Which key would you like to see? ")
if user_key not in config:
print(f"I don't know the key {user_key}")
else:
val = config[user_key]
print(f"Current value for {user_key}:{val}")
next_step = input("Would you like to change?[y/n]")
if next_step == "y":
new_val = input("What is the new value? ")
config[user_key] = new_val
with open("config.txt", "w") as f:
for key, value in config.items():
l = f"{key}={value}\n"
f.write(l) | 30.307692 | 55 | 0.555838 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 260 | 0.329949 |
105f6e89e6b0c9427d9b4f9d438c31c6041b503d | 414 | py | Python | letra_m/extensions.py | frotacaio/tutorial_flask | 6b454bddfc32e384b765d831025fc54a22fc79c7 | [
"MIT"
] | null | null | null | letra_m/extensions.py | frotacaio/tutorial_flask | 6b454bddfc32e384b765d831025fc54a22fc79c7 | [
"MIT"
] | null | null | null | letra_m/extensions.py | frotacaio/tutorial_flask | 6b454bddfc32e384b765d831025fc54a22fc79c7 | [
"MIT"
] | null | null | null | """
Extensões populares
Flask Mail - Fornece uma interface SMTP para o aplicativo Flask
Flask WTF - Adicione renderização e validação de WTForms
Flask SQLAlchemy - Adicionando suporte SQLAlchemy para o aplicativo Flask
Flask Sijax-Sijax - biblioteca de interface-Python/jQuery para tornar o AJAX fácil de usar em aplicações web
"""
import flaskext_compat
flaskext_compat.activate()
from flask.ext import foo
| 24.352941 | 108 | 0.806763 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 345 | 0.817536 |
106251ef07afcc86c70965ff744410c8564d5727 | 1,128 | py | Python | File/Common/directory.py | nikminer/HomeCloud | 7571e8002ef0919b382c3802d680421bd094d866 | [
"MIT"
] | null | null | null | File/Common/directory.py | nikminer/HomeCloud | 7571e8002ef0919b382c3802d680421bd094d866 | [
"MIT"
] | null | null | null | File/Common/directory.py | nikminer/HomeCloud | 7571e8002ef0919b382c3802d680421bd094d866 | [
"MIT"
] | null | null | null | import os
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
def isAccess(path):
try:
os.listdir(path)
return True
except PermissionError:
return False
@login_required
def isExist(request):
return HttpResponse(os.path.exists(os.path.abspath(request.POST['path'])))
def getPathHierrarhy(fullPath):
pathes=[]
currpath=""
if fullPath:
for dir in fullPath[1:].split("/"):
path=Path()
path.dir=dir
currpath+=dir+"/"
path.hierrarhy=currpath
pathes.append(path)
pathes[-1].hierrarhy=pathes[-1].hierrarhy[0:-1]
return pathes
def getPathHierrarhyFile(fullPath):
pathes=[]
currpath=""
if fullPath:
for dir in fullPath[1:].split("/"):
path=Path()
path.dir=dir
currpath+=dir+"/"
path.hierrarhy=currpath
pathes.append(path)
pathes[-1].hierrarhy=pathes[-1].hierrarhy[0:-1]
del pathes[-1]
return pathes
class Path:
dir=""
hierrarhy="" | 25.066667 | 78 | 0.583333 | 39 | 0.034574 | 0 | 0 | 116 | 0.102837 | 0 | 0 | 26 | 0.02305 |
10632756b8a9faccc6b0059f4333f70590921702 | 1,887 | py | Python | plasmapy/examples/plot_distribution.py | techieashish/PlasmaPy | b1e4ea269e59011dcafd5bf3f658b43e683af645 | [
"MIT",
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2020-02-05T18:40:50.000Z | 2020-02-05T18:40:50.000Z | plasmapy/examples/plot_distribution.py | techieashish/PlasmaPy | b1e4ea269e59011dcafd5bf3f658b43e683af645 | [
"MIT",
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | plasmapy/examples/plot_distribution.py | techieashish/PlasmaPy | b1e4ea269e59011dcafd5bf3f658b43e683af645 | [
"MIT",
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | """
1D Maxwellian distribution function
===================================
We import the usual modules, and the hero of this notebook,
the Maxwellian 1D distribution:
"""
import numpy as np
from astropy import units as u
import matplotlib.pyplot as plt
from astropy.constants import (m_e, k_B)
from plasmapy.formulary import Maxwellian_1D
############################################################
# Given we'll be plotting, import astropy's quantity support:
from astropy.visualization import quantity_support
quantity_support()
############################################################
# As a first example, let's get the probability density of
# finding an electron with a speed of 1 m/s if we have a
# plasma at a temperature of 30 000 K:
p_dens = Maxwellian_1D(v=1 * u.m / u.s,
T=30000 * u.K,
particle='e',
v_drift=0 * u.m / u.s)
print(p_dens)
############################################################
# Note the units! Integrated over speed, this will give us a
# probability. Let's test that for a bunch of particles:
T = 3e4 * u.K
dv = 10 * u.m / u.s
v = np.arange(-5e6, 5e6, 10) * u.m / u.s
############################################################
# Check that the integral over all speeds is 1
# (the particle has to be somewhere):
for particle in ['p', 'e']:
pdf = Maxwellian_1D(v, T=T, particle=particle)
integral = (pdf).sum() * dv
print(f"Integral value for {particle}: {integral}")
plt.plot(v, pdf, label=particle)
plt.legend()
############################################################
# The standard deviation of this distribution should give us back the
# temperature:
std = np.sqrt((Maxwellian_1D(v, T=T, particle='e') * v ** 2 * dv).sum())
T_theo = (std ** 2 / k_B * m_e).to(u.K)
print('T from standard deviation:', T_theo)
print('Initial T:', T)
| 28.164179 | 72 | 0.54796 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,063 | 0.563328 |
1064936a6b00c876007e8bb9831e079b131672ea | 50 | py | Python | models/__init__.py | yoshikawat64m/kalman-variational-auto-encoder | 7d7d0ed170746267f26cc869a8645b2c93da350d | [
"MIT"
] | null | null | null | models/__init__.py | yoshikawat64m/kalman-variational-auto-encoder | 7d7d0ed170746267f26cc869a8645b2c93da350d | [
"MIT"
] | null | null | null | models/__init__.py | yoshikawat64m/kalman-variational-auto-encoder | 7d7d0ed170746267f26cc869a8645b2c93da350d | [
"MIT"
] | null | null | null | from .kvae import KVAE
__all__ = (
'KVAE',
)
| 8.333333 | 22 | 0.58 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.12 |
1065d659f9d77981c8c0c3a93121ee6395bb47c1 | 1,603 | py | Python | get_color_wordcloud.py | Joe606/scrape_sportshoes | dde27cfd97bae3212a2fc35c6fef822667799761 | [
"MIT"
] | null | null | null | get_color_wordcloud.py | Joe606/scrape_sportshoes | dde27cfd97bae3212a2fc35c6fef822667799761 | [
"MIT"
] | null | null | null | get_color_wordcloud.py | Joe606/scrape_sportshoes | dde27cfd97bae3212a2fc35c6fef822667799761 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pymysql
import time
import os
import matplotlib.pyplot as plt
print(os.getcwd())
db = pymysql.connect(
host='localhost',
user='root',
passwd='xxxx',
database='男运动鞋'
)
cur = db.cursor()
cur.execute('select productSize from all_comments;')
all_size = cur.fetchall()
size = list()
for i in all_size:
j = int(i[0])
size.append(j)
print(size)
x = range(1,len(size)+1)
y = size
plt.figure()
plt.scatter(x,y,c='green')
plt.title('distribution about size of shoes')
plt.xlabel('man',color='b')
plt.ylabel('size of shoes',color='r')
plt.annotate('size',(1,42))
plt.legend('point')
plt.show()
plt.savefig('size.jpg')
import wordcloud
import jieba
cur.execute('select productColor from all_comments;')
all_color = cur.fetchall()
color = str()
for i in all_color:
j = i[0]
color = color +','+ j
print(type(color),color.count('/'))
color = color.replace('/',',')
print(color)
#mytext = ''.join(jieba.cut(color))
#print(mytext)
wc = wordcloud.WordCloud(
collocations=False,
font_path='simfang.ttf',
background_color='black',
max_words=5000,
max_font_size=300,
width=1200,
height=600,
margin=2,
)
wc = wc.generate(text=color)
plt.imshow(wc)
plt.axis('off')
plt.show()
wc.to_file('wordcloud.png')
si = str()
for i in size:
si = si + ',' + str(i)
si = si + ',' + 'black'
print(si)
print(type(si))
wc2 = wordcloud.WordCloud(collocations=False).generate(si)
plt.imshow(wc2)
plt.show()
wc2.to_file('wc2.jpg')
| 17.423913 | 58 | 0.617592 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 352 | 0.218227 |
1065da3e4283e981e05c9ad525d6d52a88a2a890 | 3,769 | py | Python | Lesson05_Strings/DNAExtravaganzaSOLUTION.py | WomensCodingCircle/CodingCirclePython | 703aff89ffa0a3933baa02881f325d62087eb0af | [
"MIT"
] | 4 | 2017-02-09T20:05:04.000Z | 2018-12-06T13:13:35.000Z | Lesson05_Strings/DNAExtravaganzaSOLUTION.py | WomensCodingCircle/CodingCirclePython | 703aff89ffa0a3933baa02881f325d62087eb0af | [
"MIT"
] | null | null | null | Lesson05_Strings/DNAExtravaganzaSOLUTION.py | WomensCodingCircle/CodingCirclePython | 703aff89ffa0a3933baa02881f325d62087eb0af | [
"MIT"
] | 12 | 2015-12-07T17:22:01.000Z | 2021-12-29T02:50:15.000Z | # A little bit of molecular biology
# Codons are non-overlapping triplets of nucleotides.
# ATG CCC CTG GTA ... - this corresponds to four codons; spaces added for emphasis
# The start codon is 'ATG'
# Stop codons can be 'TGA' , 'TAA', or 'TAG', but they must be 'in frame' with the start codon. The first stop codon usually determines the end of the gene.
# In other words:
# 'ATGCCTGA...' - here TGA is not a stop codon, because the T is part of CCT
# 'ATGCCTTGA...' - here TGA is a stop codon because it is in frame (i.e. a multiple of 3 nucleic acids from ATG)
# The gene is start codon to stop codon, inclusive
# Example"
# dna - GGCATGAAAGTCAGGGCAGAGCCATCTATTTGAGCTTAC
# gene - ATGAAAGTCAGGGCAGAGCCATCTATTTGA
#dna ='GGCATGAAAGTCAGGGCAGAGCCATCTATTGCTTACATTTGCTTCTGACACAACTGTGTTCACTAGCAACCTCAAACAGACACCATGGTGCACCTGACTCCTGAGGAGAAGTCTGCCGTTACTGCCCTGTGGGGCAAGGTGAACGTGGATGAAGTTGGTGGTGAGGCCCTGGGCAGGTTGGTATCAAGGTTACAAGACAGGTTTAAGGAGACCAATAGAAACTGGGCATGTGGAGACAGAGAAGACTCTTGGGTTTCTGATAGGCACTGACTCTCTCTGCCTATTGGTCTATTTTCCCACCCTTAGGCTGCTGGTGGTCTACCCTTGGACCCAGAGGTTCTTTGAGTCCTTTGGGGATCTGTCCACTCCTGATGCTGTTATGGGCAACCCTAAGGTGAAGGCTCATGGCAAGAAAGTGCTCGGTGCCTTTAGTGATGGCCTGGCTCACCTGGACAACCTCAAGGGCACCTTTGCCACACTGAGTGAGCTGCACTGTGACAAGCTGCACGTGGATCCTGAGAACTTCAGGGTGAGTCTATGGGACCCTTGATGTTTTCTTTCCCCTTCTTTTCTATGGTTAAGTTCATGTCATAGGAAGGGGAGAAGTAACAGGGTACAGTTTAGAATGGGAAACAGACGAATGATT'
dna = 'GGGATGTTTGGGCCCTACGGGCCCTGATCGGCT'
def startCodonIndex(seq):
# input: list of CAPs characters corresponding to DNA sequence
# output: index of first letter of start codon; return -1 if none are found
start_idx = seq.find('ATG')
return start_idx
def stopCodonIndex(seq, start_codon):
# input: list of CAPs characters corresponding to DNA sequence and index of start codon
# output: index of first stop codon; return -1 if none are found
stop_idx = -1
codon_length = 3
search_start = start_codon + codon_length
search_stop = len(seq)
for i in range(search_start, search_stop, codon_length):
codon = seq[i: i+codon_length]
if codon == "TAA" or codon == "TGA" or codon == "TAG":
stop_idx = i
break
return stop_idx
def codingDNA(seq):
# input: list of CAPs characters corresponding to DNA
# output: coding sequence only, including start and stop codons
start_idx = startCodonIndex(seq)
stop_idx = stopCodonIndex(seq, start_idx)
codon_length = 3
new_seq = dna[start_idx: stop_idx + codon_length]
return new_seq
def numCodons(seq):
# calculate the number of codons in the gene
# input: coding DNA sequence
# output: number of codons
codon_length = 3
num_codons = len(seq) / codon_length
# You don't need to run this line in python 2 because
# of integer division.
num_codons = int(num_codons)
return num_codons
def transcription(seq):
# Transcription: (A->U), (T->A), (C->G), (G->C)
# input: DNA coding squence
# ouput: RNA sequence
rna_seq=''
for base in seq:
if base == "A":
rna_seq += "U"
if base == "T":
rna_seq += "A"
if base == "C":
rna_seq += "G"
if base == "G":
rna_seq += "C"
return rna_seq
# calling the functions
# It would be more accurate to calculate the number of codons from coding_dna
codons = numCodons(dna)
start = startCodonIndex(dna)
stop = stopCodonIndex(dna, start)
coding_dna = codingDNA(dna)
coding_rna = transcription(coding_dna)
print(("DNA: {}".format(dna)))
print(("CODONS: {}".format(codons)))
print(("START: {}".format(start)))
print(("STOP: {}".format(stop)))
print(("CODING DNA: {}".format(coding_dna)))
print(("TRANSCRIBED RNA: {}".format(coding_rna)))
| 39.673684 | 654 | 0.718493 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,300 | 0.610241 |
1065f40c6cc9ffeddf508a5f723541790f0616f0 | 528 | py | Python | backend/radio/views.py | dtcooper/jewpizza | 374b79c887c46560066e2e01e981a68d1acbd20f | [
"MIT"
] | 5 | 2021-12-15T06:33:03.000Z | 2022-03-04T00:46:59.000Z | backend/radio/views.py | dtcooper/jewpizza | 374b79c887c46560066e2e01e981a68d1acbd20f | [
"MIT"
] | 2 | 2021-10-11T17:58:17.000Z | 2022-03-09T07:13:19.000Z | backend/radio/views.py | dtcooper/jewpizza | 374b79c887c46560066e2e01e981a68d1acbd20f | [
"MIT"
] | null | null | null | from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.views.generic import TemplateView
class LiquidsoapScriptView(TemplateView):
content_type = "text/plain"
template_name = "radio/radio.liq"
def dispatch(self, request, *args, **kwargs):
secret_key = request.headers.get("X-Secret-Key")
if secret_key == settings.SECRET_KEY or settings.DEBUG:
return super().dispatch(request, *args, **kwargs)
else:
raise PermissionDenied
| 33 | 63 | 0.706439 | 394 | 0.746212 | 0 | 0 | 0 | 0 | 0 | 0 | 43 | 0.081439 |
106647c6640c0372a460301a5b4d20903e8a43b6 | 2,601 | py | Python | torcharc/module/merge.py | kengz/torcharc | e17043391c718a161956b4da98f9a7810efe62a2 | [
"MIT"
] | 1 | 2020-06-12T09:55:25.000Z | 2020-06-12T09:55:25.000Z | torcharc/module/merge.py | kengz/torcharc | e17043391c718a161956b4da98f9a7810efe62a2 | [
"MIT"
] | 5 | 2021-06-26T18:25:39.000Z | 2021-12-31T22:43:22.000Z | torcharc/module/merge.py | kengz/torcharc | e17043391c718a161956b4da98f9a7810efe62a2 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from torch import nn
from typing import Dict, List
import torch
class Merge(ABC, nn.Module):
'''A Merge module merges a dict of tensors into one tensor'''
@abstractmethod
def forward(self, xs: dict) -> torch.Tensor: # pragma: no cover
raise NotImplementedError
class ConcatMerge(Merge):
'''Merge layer to merge a dict of tensors by concatenating along dim=1. Reverse of Split'''
def forward(self, xs: dict) -> torch.Tensor:
return torch.cat(list(xs.values()), dim=1)
class FiLMMerge(Merge):
'''
Merge layer to merge a dict of 2 tensors by Feature-wise Linear Modulation layer https://distill.pub/2018/feature-wise-transformations/
Takes a feature tensor and conditioning tensor and affine-transforms it with a conditioning tensor:
output = conditioner_scale * feature + conditioner_shift
The conditioning tensor is a vector, and will be passed through a Linear layer with out_features = number of features or channels (image), and the operation is element-wise on the features or channels.
'''
def __init__(self, names: Dict[str, str], shapes: Dict[str, List[int]]) -> None:
super().__init__()
self.feature_name = names['feature']
self.conditioner_name = names['conditioner']
assert len(shapes) == 2, f'shapes {shapes} should specify only two keys for feature and conditioner'
self.feature_size = shapes[self.feature_name][0]
self.conditioner_size = shapes[self.conditioner_name][0]
self.conditioner_scale = nn.Linear(self.conditioner_size, self.feature_size)
self.conditioner_shift = nn.Linear(self.conditioner_size, self.feature_size)
@classmethod
def affine_transform(cls, feature: torch.Tensor, conditioner_scale: torch.Tensor, conditioner_shift: torch.Tensor) -> torch.Tensor:
'''Apply affine transform with safe-broadcast across the entire features/channels of the feature tensor'''
view_shape = list(conditioner_scale.shape) + [1] * (feature.dim() - conditioner_scale.dim())
return conditioner_scale.view(*view_shape) * feature + conditioner_shift.view(*view_shape)
def forward(self, xs: dict) -> torch.Tensor:
'''Apply FiLM affine transform on feature using conditioner'''
feature = xs[self.feature_name]
conditioner = xs[self.conditioner_name]
conditioner_scale = self.conditioner_scale(conditioner)
conditioner_shift = self.conditioner_shift(conditioner)
return self.affine_transform(feature, conditioner_scale, conditioner_shift)
| 49.075472 | 205 | 0.721646 | 2,492 | 0.958093 | 0 | 0 | 581 | 0.223376 | 0 | 0 | 957 | 0.367935 |
1067f500bc63c5208908d4836e04b87c0d12fba5 | 6,919 | py | Python | tests/milvus_benchmark/local_runner.py | NeatNerdPrime/milvus | 98de0f87e99cd1ff86d8e63b91c76589b195abe1 | [
"Apache-2.0"
] | 1 | 2020-05-31T00:34:00.000Z | 2020-05-31T00:34:00.000Z | tests/milvus_benchmark/local_runner.py | NeatNerdPrime/milvus | 98de0f87e99cd1ff86d8e63b91c76589b195abe1 | [
"Apache-2.0"
] | 1 | 2021-03-25T23:23:50.000Z | 2021-03-25T23:23:50.000Z | tests/milvus_benchmark/local_runner.py | flydragon2018/milvus | fa1effdb91d9fd9710ff5a9ae519bd538e79b0b0 | [
"Apache-2.0"
] | 1 | 2021-05-23T15:04:01.000Z | 2021-05-23T15:04:01.000Z | import os
import logging
import pdb
import time
import random
from multiprocessing import Process
import numpy as np
from client import MilvusClient
import utils
import parser
from runner import Runner
logger = logging.getLogger("milvus_benchmark.local_runner")
class LocalRunner(Runner):
"""run local mode"""
def __init__(self, ip, port):
super(LocalRunner, self).__init__()
self.ip = ip
self.port = port
def run(self, definition, run_type=None):
if run_type == "performance":
for op_type, op_value in definition.items():
run_count = op_value["run_count"]
run_params = op_value["params"]
if op_type == "insert":
for index, param in enumerate(run_params):
table_name = param["table_name"]
# random_1m_100_512
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
milvus = MilvusClient(table_name, ip=self.ip, port=self.port)
# Check has table or not
if milvus.exists_table():
milvus.delete()
time.sleep(10)
milvus.create_table(table_name, dimension, index_file_size, metric_type)
res = self.do_insert(milvus, table_name, data_type, dimension, table_size, param["ni_per"])
logger.info(res)
elif op_type == "query":
for index, param in enumerate(run_params):
logger.info("Definition param: %s" % str(param))
table_name = param["dataset"]
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
milvus = MilvusClient(table_name, ip=self.ip, port=self.port)
# parse index info
index_types = param["index.index_types"]
nlists = param["index.nlists"]
# parse top-k, nq, nprobe
top_ks, nqs, nprobes = parser.search_params_parser(param)
for index_type in index_types:
for nlist in nlists:
milvus.create_index(index_type, nlist)
# preload index
milvus.preload_table()
# Run query test
for nprobe in nprobes:
logger.info("index_type: %s, nlist: %s, metric_type: %s, nprobe: %s" % (index_type, nlist, metric_type, nprobe))
res = self.do_query(milvus, table_name, top_ks, nqs, nprobe, run_count)
headers = [param["dataset"]]
headers.extend([str(top_k) for top_k in top_ks])
utils.print_table(headers, nqs, res)
elif run_type == "stability":
for op_type, op_value in definition.items():
if op_type != "query":
logger.warning("invalid operation: %s in accuracy test, only support query operation" % op_type)
break
run_count = op_value["run_count"]
run_params = op_value["params"]
nq = 10000
for index, param in enumerate(run_params):
logger.info("Definition param: %s" % str(param))
table_name = param["dataset"]
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
# set default test time
if "during_time" not in param:
during_time = 100 # seconds
else:
during_time = int(param["during_time"]) * 60
# set default query process num
if "query_process_num" not in param:
query_process_num = 10
else:
query_process_num = int(param["query_process_num"])
milvus = MilvusClient(table_name)
# Check has table or not
if not milvus.exists_table():
logger.warning("Table %s not existed, continue exec next params ..." % table_name)
continue
start_time = time.time()
insert_vectors = [[random.random() for _ in range(dimension)] for _ in range(nq)]
while time.time() < start_time + during_time:
processes = []
# # do query
# for i in range(query_process_num):
# milvus_instance = MilvusClient(table_name)
# top_k = random.choice([x for x in range(1, 100)])
# nq = random.choice([x for x in range(1, 1000)])
# nprobe = random.choice([x for x in range(1, 500)])
# logger.info(nprobe)
# p = Process(target=self.do_query, args=(milvus_instance, table_name, [top_k], [nq], 64, run_count, ))
# processes.append(p)
# p.start()
# time.sleep(0.1)
# for p in processes:
# p.join()
milvus_instance = MilvusClient(table_name)
top_ks = random.sample([x for x in range(1, 100)], 4)
nqs = random.sample([x for x in range(1, 1000)], 3)
nprobe = random.choice([x for x in range(1, 500)])
res = self.do_query(milvus, table_name, top_ks, nqs, nprobe, run_count)
# milvus_instance = MilvusClient(table_name)
status, res = milvus_instance.insert(insert_vectors, ids=[x for x in range(len(insert_vectors))])
if not status.OK():
logger.error(status.message)
if (time.time() - start_time) % 300 == 0:
status = milvus_instance.drop_index()
if not status.OK():
logger.error(status.message)
index_type = random.choice(["flat", "ivf_flat", "ivf_sq8"])
status = milvus_instance.create_index(index_type, 16384)
if not status.OK():
logger.error(status.message)
| 52.022556 | 148 | 0.483162 | 6,653 | 0.961555 | 0 | 0 | 0 | 0 | 0 | 0 | 1,263 | 0.182541 |