hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5075a5097a61b674035f729519e126cb8f5c9b76 | 15,683 | py | Python | SVGPs/kernels.py | vincentadam87/SVGPs | 0de1194bf0f24997148dfce0cd6fbffae16fb3bc | [
"Apache-2.0"
] | 3 | 2017-09-28T21:02:58.000Z | 2018-02-06T17:58:48.000Z | SVGPs/kernels.py | vincentadam87/SVGPs | 0de1194bf0f24997148dfce0cd6fbffae16fb3bc | [
"Apache-2.0"
] | null | null | null | SVGPs/kernels.py | vincentadam87/SVGPs | 0de1194bf0f24997148dfce0cd6fbffae16fb3bc | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 James Hensman, alexggmatthews
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------
# Modification notice:
# This file was modified by Vincent ADAM
# ------------------------------------------
import tensorflow as tf
import numpy as np
from functools import reduce
from settings import int_type,float_type
from functions import eye
class Kern(object):
"""
The basic kernel class. Handles input_dim and active dims, and provides a
generic '_slice' function to implement them.
"""
def __init__(self, input_dim, active_dims=None):
"""
input dim is an integer
active dims is either an iterable of integers or None.
Input dim is the number of input dimensions to the kernel. If the
kernel is computed on a matrix X which has more columns than input_dim,
then by default, only the first input_dim columns are used. If
different columns are required, then they may be specified by
active_dims.
If active dims is None, it effectively defaults to range(input_dim),
but we store it as a slice for efficiency.
"""
self.input_dim = int(input_dim)
if active_dims is None:
self.active_dims = slice(input_dim)
elif type(active_dims) is slice:
self.active_dims = active_dims
if active_dims.start is not None and active_dims.stop is not None and active_dims.step is not None:
assert len(range(*active_dims)) == input_dim # pragma: no cover
else:
self.active_dims = np.array(active_dims, dtype=np.int32)
assert len(active_dims) == input_dim
self.num_gauss_hermite_points = 20
def _slice(self, X, X2):
"""
Slice the correct dimensions for use in the kernel, as indicated by
`self.active_dims`.
:param X: Input 1 (NxD[xB]).
:param X2: Input 2 (MxD), may be None.
:return: Sliced X, X2, (N x self.input_dim [x B]), (N x self.input_dim)
"""
if X.get_shape().ndims == 2: # M x D
if isinstance(self.active_dims, slice):
X = X[:, self.active_dims]
if X2 is not None:
X2 = X2[:, self.active_dims]
else:
X = tf.transpose(tf.gather(tf.transpose(X), self.active_dims))
if X2 is not None:
X2 = tf.transpose(tf.gather(tf.transpose(X2), self.active_dims))
elif X.get_shape().ndims == 3: # M x D x B
if isinstance(self.active_dims, slice):
X = X[:, self.active_dims, :]
if X2 is not None:
X2 = X2[:, self.active_dims]
else:
X = tf.transpose(tf.gather(tf.transpose(X, (1, 0, 2)), self.active_dims), (1, 0, 2))
if X2 is not None:
X2 = tf.transpose(tf.gather(X2, self.active_dims))
with tf.control_dependencies([ tf.assert_equal(tf.shape(X)[1], tf.constant(self.input_dim, dtype=int_type)) ]):
X = tf.identity(X)
return X, X2
def _slice_cov(self, cov):
"""
Slice the correct dimensions for use in the kernel, as indicated by
`self.active_dims` for covariance matrices. This requires slicing the
rows *and* columns. This will also turn flattened diagonal
matrices into a tensor of full diagonal matrices.
:param cov: Tensor of covariance matrices (NxDxD or NxD).
:return: N x self.input_dim x self.input_dim.
"""
cov = tf.cond(tf.equal(tf.rank(cov), 2), lambda: tf.matrix_diag(cov), lambda: cov)
if isinstance(self.active_dims, slice):
cov = cov[..., self.active_dims, self.active_dims]
else:
cov_shape = tf.shape(cov)
covr = tf.reshape(cov, [-1, cov_shape[-1], cov_shape[-1]])
gather1 = tf.gather(tf.transpose(covr, [2, 1, 0]), self.active_dims)
gather2 = tf.gather(tf.transpose(gather1, [1, 0, 2]), self.active_dims)
cov = tf.reshape(tf.transpose(gather2, [2, 0, 1]),
tf.concat_v2([cov_shape[:-2], [len(self.active_dims), len(self.active_dims)]], 0))
return cov
class Stationary(Kern):
"""
Base class for kernels that are stationary, that is, they only depend on
r = || x - x' ||
This class handles 'ARD' behaviour, which stands for 'Automatic Relevance
Determination'. This means that the kernel has one lengthscale per
dimension, otherwise the kernel is isotropic (has a single lengthscale).
"""
def __init__(self, input_dim, variance=1.0, lengthscales=1.,
active_dims=None):
"""
- input_dim is the dimension of the input to the kernel
- variance is the (initial) value for the variance parameter
- lengthscales is the initial value for the lengthscales parameter
defaults to 1.0
- active_dims is a list of length input_dim which controls which
columns of X are used.
"""
Kern.__init__(self, input_dim, active_dims)
self.lengthscales = tf.get_variable("lengthscales", [input_dim], initializer=tf.constant_initializer(lengthscales))
self.variance = tf.get_variable("variance", [1], initializer=tf.constant_initializer(variance))
def square_dist(self, X, X2):
"""
:param X: NxD[xB]
:param X2: MxD
:return: NxM[xB]
"""
if X.get_shape().ndims == 2: # M x D
X = X / self.lengthscales
Xs = tf.reduce_sum(tf.square(X), 1)
if X2 is None:
return -2 * tf.matmul(X, tf.transpose(X)) + \
tf.reshape(Xs, (-1, 1)) + tf.reshape(Xs, (1, -1))
else:
X2 = X2 / self.lengthscales
X2s = tf.reduce_sum(tf.square(X2), 1)
return -2 * tf.matmul(X, tf.transpose(X2)) + \
tf.reshape(Xs, (-1, 1)) + tf.reshape(X2s, (1, -1))
elif X.get_shape().ndims == 3: # M x D x B
X = X / tf.expand_dims(tf.expand_dims(self.lengthscales, -1), 0)
Xs = tf.reduce_sum(tf.square(X), 1) # NxB
if X2 is None:
d = -2 * tf.matmul(tf.transpose(X, (2, 0, 1)), tf.transpose(X, (2, 1, 0))) + \
tf.expand_dims(tf.transpose(Xs), 1) + \
tf.expand_dims(tf.transpose(Xs), -1)
else:
shape = tf.stack([1, 1, tf.shape(X)[-1]])
X2 = tf.tile(tf.expand_dims(X2 / self.lengthscales, -1), shape)
X2s = tf.reduce_sum(tf.square(X2), 1) # NxB
d = -2 * tf.matmul(tf.transpose(X, (2, 0, 1)), tf.transpose(X2, (2, 1, 0))) + \
tf.expand_dims(tf.transpose(Xs), -1) + \
tf.expand_dims(tf.transpose(X2s), 1)
# d is BxNxN
return tf.transpose(d, (1, 2, 0)) # N x N x B
def euclid_dist(self, X, X2):
r2 = self.square_dist(X, X2)
return tf.sqrt(r2 + 1e-12)
def Kdiag(self, X, presliced=False):
if X.get_shape().ndims == 2: # M x D
return tf.fill(tf.stack([tf.shape(X)[0]]), tf.squeeze(self.variance))
elif X.get_shape().ndims == 3: # M x D x B
return tf.fill(tf.stack([tf.shape(X)[0], tf.shape(X)[-1]]), tf.squeeze(self.variance))
class RBF(Stationary):
"""
The radial basis function (RBF) or squared exponential kernel
"""
def K(self, X, X2=None, presliced=False):
if not presliced:
X, X2 = self._slice(X, X2)
return self.variance * tf.exp(-self.square_dist(X, X2) / 2)
class PeriodicKernel(Kern):
"""
The periodic kernel. Defined in Equation (47) of
D.J.C.MacKay. Introduction to Gaussian processes. In C.M.Bishop, editor,
Neural Networks and Machine Learning, pages 133--165. Springer, 1998.
Derived using the mapping u=(cos(x), sin(x)) on the inputs.
"""
def __init__(self, input_dim, period=1.0, variance=1.0,
lengthscales=1.0, active_dims=None):
Kern.__init__(self, input_dim, active_dims)
self.lengthscales = tf.get_variable("lengthscales", [input_dim], initializer=tf.constant_initializer(lengthscales))
self.variance = tf.get_variable("variance", [1], initializer=tf.constant_initializer(variance))
self.period = tf.get_variable("period", [1], initializer=tf.constant_initializer(period))
def Kdiag(self, X, presliced=False):
return tf.fill(tf.stack([tf.shape(X)[0]]), tf.squeeze(self.variance))
def K(self, X, X2=None, presliced=False):
if not presliced:
X, X2 = self._slice(X, X2)
if X2 is None:
X2 = X
# Introduce dummy dimension so we can use broadcasting
f = tf.expand_dims(X, 1) # now N x 1 x D
f2 = tf.expand_dims(X2, 0) # now 1 x M x D
r = np.pi * (f - f2) / self.period
r = tf.reduce_sum(tf.square(tf.sin(r) / self.lengthscales), 2)
return self.variance * tf.exp(-0.5 * r)
class LocallyPeriodicKernel(Kern):
"""
k(t) = var * exp ( - t^2 / len^2 ) * cos ( 2 * pi * t / per )
"""
def __init__(self, input_dim, period=1.0, variance=1.0,
lengthscales=1.0, active_dims=None):
Kern.__init__(self, input_dim, active_dims)
self.lengthscales = tf.get_variable("lengthscales", [input_dim], initializer=tf.constant_initializer(lengthscales))
self.variance = tf.get_variable("variance", [1], initializer=tf.constant_initializer(variance))
self.period = tf.get_variable("period", [1], initializer=tf.constant_initializer(period))
def Kdiag(self, X, presliced=False):
return tf.fill(tf.stack([tf.shape(X)[0]]), tf.squeeze(self.variance))
def K(self, X, X2=None, presliced=False):
if not presliced:
X, X2 = self._slice(X, X2)
if X2 is None:
X2 = X
# Introduce dummy dimension so we can use broadcasting
f = tf.expand_dims(X, 1) # now N x 1 x D
f2 = tf.expand_dims(X2, 0) # now 1 x M x D
r = tf.reduce_sum(f-f2,2) #hack for 1d
return self.variance * tf.exp( - tf.square(r/self.lengthscales) ) * tf.cos(2.*np.pi *r/ self.period)
class Combination(Kern):
"""
Combine a list of kernels, e.g. by adding or multiplying (see inheriting
classes).
The names of the kernels to be combined are generated from their class
names.
"""
def __init__(self, kern_list):
for k in kern_list:
assert isinstance(k, Kern), "can only add Kern instances"
input_dim = np.max([k.input_dim
if type(k.active_dims) is slice else
np.max(k.active_dims) + 1
for k in kern_list])
Kern.__init__(self, input_dim=input_dim)
# add kernels to a list, flattening out instances of this class therein
self.kern_list = kern_list
class Add(Combination):
def K(self, X, X2=None, presliced=False):
return reduce(tf.add, [k.K(X, X2) for k in self.kern_list])
def Kdiag(self, X, presliced=False):
return reduce(tf.add, [k.Kdiag(X) for k in self.kern_list])
class Prod(Combination):
def K(self, X, X2=None, presliced=False):
return reduce(tf.multiply, [k.K(X, X2) for k in self.kern_list])
def Kdiag(self, X, presliced=False):
return reduce(tf.multiply, [k.Kdiag(X) for k in self.kern_list])
class Linear(Kern):
"""
The linear kernel
"""
def __init__(self, input_dim, variance=1.0, active_dims=None):
"""
- input_dim is the dimension of the input to the kernel
- variance is the (initial) value for the variance parameter(s)
- active_dims is a list of length input_dim which controls
which columns of X are used.
"""
Kern.__init__(self, input_dim, active_dims)
self.variance = tf.get_variable("variance", [1], initializer=tf.constant_initializer(variance))
def Kdiag(self, X, presliced=False):
if not presliced:
X, _ = self._slice(X, None)
return tf.reduce_sum(tf.square(X) * self.variance, 1)
def K(self, X, X2=None, presliced=False):
if not presliced:
X, X2 = self._slice_batch(X, X2)
if X.get_shape().ndims == 2: # M x D
if X2 is None:
return tf.matmul(X * self.variance, X, transpose_b=True)
else:
return tf.matmul(X * self.variance, X2, transpose_b=True)
elif X.get_shape().ndims == 3: # M x D x B
if X2 is None:
return tf.einsum('ndb,mdb->nmb', X, X)
else:
return tf.einsum('ndb,md->nmb', X, X2)
class Static(Kern):
"""
Kernels who don't depend on the value of the inputs are 'Static'. The only
parameter is a variance.
"""
def __init__(self, input_dim, variance=1.0, active_dims=None):
Kern.__init__(self, input_dim, active_dims)
self.variance = tf.get_variable("variance", [1], initializer=tf.constant_initializer(variance))
def Kdiag(self, X,presliced=False):
if not presliced:
X, _ = self._slice_batch(X, None)
if X.get_shape().ndims == 2: # M x D
return tf.fill(tf.stack([tf.shape(X)[0]]), tf.squeeze(self.variance))
elif X.get_shape().ndims == 3: # M x D x B
return tf.fill(tf.stack([tf.shape(X)[0],tf.shape(X)[-1]]), tf.squeeze(self.variance))
class White(Static):
"""
The White kernel
"""
def K(self, X, X2=None, presliced=False):
if X.get_shape().ndims == 2: # M x D
if X2 is None:
d = tf.fill(tf.stack([tf.shape(X)[0]]), tf.squeeze(self.variance))
return tf.diag(d)
else:
shape = tf.stack([tf.shape(X)[0], tf.shape(X2)[0]])
return tf.zeros(shape, float_type)
elif X.get_shape().ndims == 3: # M x D x B
if X2 is None:
d = tf.fill(tf.stack([tf.shape(X)[-1], tf.shape(X)[0]]), tf.squeeze(self.variance))
return tf.transpose(tf.matrix_diag(d), (1, 2, 0))
else:
shape = tf.stack([tf.shape(X)[0], tf.shape(X2)[0], tf.shape(X)[-1]])
return tf.zeros(shape, float_type)
class Constant(Static):
"""
The constant kernel
"""
def K(self, X, X2=None, presliced=False):
if X.get_shape().ndims == 2: # M x D
if X2 is None: # returns the prior
shape = tf.stack([tf.shape(X)[0], tf.shape(X)[0]])
else:
shape = tf.stack([tf.shape(X)[0], tf.shape(X2)[0]])
elif X.get_shape().ndims == 3: # M x D x B
if X2 is None: # returns the prior
shape = tf.stack([tf.shape(X)[0], tf.shape(X)[0], tf.shape(X)[-1]])
else:
shape = tf.stack([tf.shape(X)[0], tf.shape(X2)[0], tf.shape(X)[-1]])
return tf.fill(shape, tf.squeeze(self.variance))
| 38.723457 | 124 | 0.58184 | 14,757 | 0.940955 | 0 | 0 | 0 | 0 | 0 | 0 | 4,633 | 0.295415 |
5078031a29f9c615ac065fdda79ac390f846ff12 | 1,708 | py | Python | setup.py | schoenemeyer/pyheatmagic | c7e1dbaf1ff01fc990cd519f93449cae20fc2bab | [
"MIT"
] | null | null | null | setup.py | schoenemeyer/pyheatmagic | c7e1dbaf1ff01fc990cd519f93449cae20fc2bab | [
"MIT"
] | null | null | null | setup.py | schoenemeyer/pyheatmagic | c7e1dbaf1ff01fc990cd519f93449cae20fc2bab | [
"MIT"
] | null | null | null | #!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, "README.rst")) as f:
long_description = f.read()
setup(
# Name of the module
name="py-heat-magic",
# Details
version="0.0.2",
description="py-heat as IPython magic",
long_description=long_description,
# The project's main homepage.
url="https://github.com/csurfer/pyheatmagic",
# Author details
author="Vishwas B Sharma",
author_email="sharma.vishwas88@gmail.com",
# License
license="MIT",
py_modules=["heat"],
keywords="heatmap matplotlib profiling python IPython",
classifiers=[
# Intended Audience.
"Intended Audience :: Developers",
"Intended Audience :: Education",
# License.
"License :: OSI Approved :: MIT License",
# Project maturity.
"Development Status :: 3 - Alpha",
# Operating Systems.
"Operating System :: POSIX",
# Supported Languages.
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
# Topic tags.
"Topic :: Software Development :: Build Tools",
"Topic :: Software Development :: Libraries :: Python Modules",
],
install_requires=[
"numpy",
"scipy",
"matplotlib",
"ipython",
"jupyter",
"pandas",
"sympy",
"nose",
"py-heat",
],
)
| 27.548387 | 71 | 0.597775 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 961 | 0.562646 |
507808c6821aee8867cf4c8684a2d08868e253a3 | 1,104 | py | Python | certificator/meetup/__init__.py | lamenezes/certificator | fdb3d6f31499da8705de5453b8f55c0d313761bd | [
"MIT"
] | 19 | 2017-09-26T14:03:33.000Z | 2021-09-11T18:18:53.000Z | certificator/meetup/__init__.py | lamenezes/certificator | fdb3d6f31499da8705de5453b8f55c0d313761bd | [
"MIT"
] | 12 | 2017-10-01T20:10:51.000Z | 2019-05-22T01:39:48.000Z | certificator/meetup/__init__.py | lamenezes/certificator | fdb3d6f31499da8705de5453b8f55c0d313761bd | [
"MIT"
] | 5 | 2017-10-24T22:14:26.000Z | 2021-09-11T18:18:55.000Z | from datetime import datetime as dt
from .client import MeetupClient
from ..certificator import BaseCertificator
from .models import Event
class MeetupCertificator(BaseCertificator):
def __init__(self, urlname, event_id, api_key, **kwargs):
super().__init__(**kwargs)
self.urlname = urlname
self.event_id = event_id
self.client = MeetupClient(api_key=api_key)
@property
def certificate_data(self):
attendances = self.client.get_attendances(self.urlname, self.event_id)
return ({'name': attendance['member']['name']} for attendance in attendances)
@property
def meta(self):
event_data = self.client.get_event(self.urlname, self.event_id)
event = Event(**event_data)
event.clean()
return {
'city': event.venue['city'],
'date': dt.strftime(event.date, '%d/%m/%Y'),
'full_date': event.full_date,
'organizer': event.group['name'],
'place': event.venue['name'],
'title': event.name,
'workload': event.duration,
}
| 32.470588 | 85 | 0.624094 | 961 | 0.870471 | 0 | 0 | 693 | 0.627717 | 0 | 0 | 106 | 0.096014 |
507882446718b2c00a42bfdd52f4470e8480586d | 5,583 | py | Python | kolab/yk/yk2.py | KuramitsuLab/kolab | 91fa4bae4a440a15291ba2d2690e4e335cbfd21e | [
"MIT"
] | null | null | null | kolab/yk/yk2.py | KuramitsuLab/kolab | 91fa4bae4a440a15291ba2d2690e4e335cbfd21e | [
"MIT"
] | 1 | 2021-11-14T05:38:27.000Z | 2021-11-14T05:38:27.000Z | kolab/yk/yk2.py | KuramitsuLab/kolab | 91fa4bae4a440a15291ba2d2690e4e335cbfd21e | [
"MIT"
] | 7 | 2020-11-02T13:05:44.000Z | 2022-01-09T11:06:04.000Z | from os import read
import random
import sys
import pegtree as pg
import argparse
import csv
from pegtree.optimizer import optimize
peg = pg.grammar('yk.tpeg')
parse = pg.generate(peg)
parser = argparse.ArgumentParser(description='yk for Parameter Handling')
parser.add_argument('--notConv', action='store_true') # Python のトークナイズのみ
parser.add_argument('--diff', action='store_true') # 変数名 (name) とリテラル (val) に異なるものを付与
parser.add_argument('--shuffle', action='store_true') # 特殊トークンをランダムに付与 (順序を考慮しない)
parser.add_argument('--both', action='store_true') # shuffle ありとなしを両方追加
parser.add_argument('--files', nargs='*') # 入力ファイルを与える
args = parser.parse_args()
token_idx = list(range(1, 7))
def replace_as_special_parameter(s, mapped, token_idx=token_idx, tag=None): # mapped => {'df': '<A>'}
if s in mapped:
return mapped[s]
if tag == 'Name':
x = f'<name{token_idx[len(mapped)]}>'
elif tag == 'Value':
x = f'<val{token_idx[len(mapped)]}>'
else:
x = f'<var{token_idx[len(mapped)]}>'
mapped[s] = x
return x
def convert_nothing(tok, doc, mapped, token_idx, diff):
s = str(tok)
if s == ';': # ; だけはセミコロンに変える
return '<sep>'
return s
def convert_all(tok, doc, mapped, token_idx, diff):
tag = tok.getTag()
s = str(tok)
if diff:
if tag == 'Name':
if s in doc:
in_idx = [i for i, x in enumerate(doc) if x == s]
flag = 0
for idx in in_idx:
try:
if '軸' in doc[idx+1] or '座標' in doc[idx+1]:
flag += 1
except:
pass
if len(in_idx) == flag:
return s
else:
return replace_as_special_parameter(s, mapped, token_idx, tag='Name')
else:
if s.startswith('.'):
s = '. ' + s[1:]
return s
if tag == 'Value':
if s in doc:
return replace_as_special_parameter(s, mapped, token_idx, tag='Value')
s_q1 = f"'{s[1:-1]}'"
if s_q1 in doc:
return replace_as_special_parameter(s_q1, mapped, token_idx, tag='Value')
s_q2 = f'"{s[1:-1]}"'
if s_q2 in doc:
return replace_as_special_parameter(s_q2, mapped, token_idx, tag='Value')
else:
if tag == 'Name':
if s in doc:
return replace_as_special_parameter(s, mapped, token_idx)
else:
if s.startswith('.'):
s = '. ' + s[1:]
return s
if tag == 'Value':
if s in doc:
return replace_as_special_parameter(s, mapped, token_idx)
s_q1 = f"'{s[1:-1]}'"
if s_q1 in doc:
return replace_as_special_parameter(s_q1, mapped, token_idx)
s_q2 = f'"{s[1:-1]}"'
if s_q2 in doc:
return replace_as_special_parameter(s_q2, mapped, token_idx)
return convert_nothing(tok, doc, mapped, token_idx, diff)
def make(code, doc0, convert=convert_all, token_idx=token_idx, diff=False):
mapped = {}
doc = []
for tok in parse(doc0):
s = str(tok)
if tok.getTag() == 'Raw':
q = f"'{s}'"
q2 = f'"{s}"'
if q in code:
doc.append(q)
continue
if q2 in code:
doc.append(q2)
continue
doc.append(s)
ws = [convert(tok, doc, mapped, token_idx, diff) for tok in parse(code)]
code = ' '.join(ws)
ws = []
for idx, tok in enumerate(doc):
if tok.strip() != '':
if tok in mapped:
try:
if '軸' in doc[idx+1] and '座標' in doc[idx+1]:
ws.append(tok)
else:
ws.append(mapped[tok])
except:
ws.append(mapped[tok])
else:
ws.append(tok)
doc = ' '.join(ws)
return code, doc
def read_tsv(input_filename, output_filename=None):
with open(input_filename) as f:
reader = csv.reader(f, delimiter='\t')
if output_filename != None:
writer = csv.writer(output_filename, delimiter='\t')
for row in reader:
code0 = None
if args.both:
token_idx0 = list(range(1, 7))
code0, doc0 = make(row[0], row[1], convert=convert_all , token_idx=token_idx0, diff=args.diff)
if args.shuffle or args.both:
random.shuffle(token_idx)
if args.notConv:
code, doc = make(row[0], row[1], convert=convert_nothing , token_idx=token_idx, diff=args.diff)
else:
code, doc = make(row[0], row[1], convert=convert_all, token_idx=token_idx, diff=args.diff)
if output_filename == None:
print(code, doc)
if code0 != None and code0 != code:
print(code0, doc0)
else:
writer.writerow([code, doc])
if code0 != None and code0 != code:
writer.writerow([code0, doc0])
if __name__ == '__main__':
if args.files != None:
for filename in args.files:
try:
read_tsv(filename, sys.stdout)
except:
read_tsv(filename)
else:
pass
| 32.649123 | 111 | 0.506538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 764 | 0.132708 |
5078b6e119c9edbe09ab52c04f0e4fca017f3a0f | 3,491 | py | Python | Lesson_6_Homework.py | verafes/python_training | 3cf989e5924d40f78f218de323c07efa85f2edcf | [
"Apache-2.0"
] | null | null | null | Lesson_6_Homework.py | verafes/python_training | 3cf989e5924d40f78f218de323c07efa85f2edcf | [
"Apache-2.0"
] | null | null | null | Lesson_6_Homework.py | verafes/python_training | 3cf989e5924d40f78f218de323c07efa85f2edcf | [
"Apache-2.0"
] | null | null | null | # Homework #6. Loops
print("--- Task #1. 10 monkeys")
# Task #1. Write a program that output the following string: "1 monkey 2 monkeys ... 10 monkeys".
for x in range(1, 11):
if x == 1:
monkey = f"{x} monkey "
else:
monkey = monkey + f"{x} monkeys "
print(monkey.strip())
print("\n--- Task #2. Countdown timer")
# Task #2. Write a program that output the string that tracks the number of seconds that remain for the roket launching: "10 seconds...9 seconds...8 seconds...7 seconds...6 seconds...5 seconds...4 seconds...3 seconds...2 seconds...1 second"
for x in range(10, 0, -1):
print(str(x) + " seconds...")
print()
print("\n--- Task #3")
# Task #3. Input two numbers k and n. Calculate you own power (k**n) without using power (**) operator but by using repeated multiplication (number is being multiplied by itself).
# Example: 3**4 = 81 is equivalent to 3*3*3*3 = 81.
n = int(input("Please enter any number: "))
k = int(input("Please enter any number for a power: "))
x = 1
s = n
for x in range(1, k):
n = s * n
x += 1
print("k ** n =", n)
m = (str(s) + " * ") * (k-1)
print(f"{m}{s} = {n}")
print("\n--- Task #4")
# Task #4. The first day of training, the athlete ran 5 km. Each next day, he ran 5% more than the day before. How many kilometers will the athlete run on the 10th day?
day = 1
distance = 5
print("The first day distance = ", distance, "km")
distance2 = distance * (1.05**9) # for checking
print(f"The 10th day distance should be {distance} * (1.05 ** 9) =", round(distance2,2), "km")
print()
while day < 10:
distance += distance * 5/100
print(distance)
day += 1
print("On the 10th day, the athlete run ", round(distance,2), "km")
print("\n--- Task #5. ")
# Task #5. The student did not know a single English word at the beginning of the training. On the first day of class, he learned 5 English words. On each next day, he learned 2 more words than the day before. In how many days will the student know at least n English words?
n = int(input("Please enter number of words: "))
day = 0 # d2
words = 5
print(f"The student knew {day} words before training session.")
print(f"The student learned {words} on the first day.")
total = 5
while words <= n:
words = words + 2
day = day + 1
# total = total + words #?
print(f"The student will learn {n} words at the the {day} day, but he may learn {words} words by the end of the {day} day of the traning.")
print("Verify with addition: 5" + " + 2" * day + " = " + str(words) + " words")
print(total) #?
print("\n--- Task #6. ")
# Task #6. Prompt to a user to input the nunber of steps. Get the string that contains stairs made of sharp sign (#).
# #
# #
# #
# #
# #
num = int(input("How many steps in the stairs: "))
stairs = ''
x = 1
while x <= num:
print(" " * x + "#")
x += 1
print(stairs)
print("--- Task #7. ")
# 7. Output stars having the form of a pyramid. With the command input, get the number of levels. Use function for center align the string.
# *
# ***
# *****
# *******
levels = int(input("Please enter any number of levels for pyramid: "))
star = '*'
x = 1
# ver 1
c_point = levels * 2 # -> extra space before pyramid if levels*2+1
for x in range(levels):
star = "*" + x * 2 * "*"
x += 1
print(star.center(c_point))
# ver 2 - in class
# levels = int(input("Please enter any number of levels for pyramid: "))
c_point = levels * 2 - 1
for x in range(1,levels + 1):
stars = x * 2 - 1
print(("*" * stars).center(c_point))
| 33.247619 | 274 | 0.630765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,359 | 0.675738 |
5078d56ccc790f70f1e43a28737794a0f83dfcac | 1,022 | py | Python | creel_portal/api/filters/FN125Tag_Filter.py | AdamCottrill/CreelPortal | 5ec867c4f11b4231c112e8209116b6b96c2830ec | [
"MIT"
] | null | null | null | creel_portal/api/filters/FN125Tag_Filter.py | AdamCottrill/CreelPortal | 5ec867c4f11b4231c112e8209116b6b96c2830ec | [
"MIT"
] | null | null | null | creel_portal/api/filters/FN125Tag_Filter.py | AdamCottrill/CreelPortal | 5ec867c4f11b4231c112e8209116b6b96c2830ec | [
"MIT"
] | null | null | null | import django_filters
from .filter_utils import ValueInFilter
from ...models import FN125_Tag
from .FishAttr_Filter import FishAttrFilters
class FN125TagFilter(FishAttrFilters):
"""A filter set class for lamprey data. Inherits all of the filters in
FishAttrs and add some that are specific to Tag attributes.
"""
tagid = ValueInFilter(field_name="tagid")
tagid__like = ValueInFilter(field_name="tagid")
tagid__not_like = ValueInFilter(field_name="tagid", exclude=True)
tagdoc = ValueInFilter(field_name="tagdoc")
tagdoc__like = ValueInFilter(field_name="tagdoc")
tagdoc__not_like = ValueInFilter(field_name="tagdoc", exclude=True)
tagstat = ValueInFilter(field_name="tagstat")
tagstat__not = ValueInFilter(field_name="tagstat", exclude=True)
# consider splitting up tagdoc into consitiuent fields to make it
# easier to filter by colour, placement tag type and agency.
class Meta:
model = FN125_Tag
fields = ["tagstat", "tagid", "tagdoc"]
| 30.969697 | 74 | 0.736791 | 876 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 354 | 0.34638 |
50798cd80915d992ba6dd1be2547207b24526dd0 | 858 | py | Python | testing/test_file_path.py | xapple/autopaths | 4b9bdf220454c38d53c90e2375c00eee036dd02b | [
"MIT"
] | null | null | null | testing/test_file_path.py | xapple/autopaths | 4b9bdf220454c38d53c90e2375c00eee036dd02b | [
"MIT"
] | null | null | null | testing/test_file_path.py | xapple/autopaths | 4b9bdf220454c38d53c90e2375c00eee036dd02b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Some simple tests for the autopaths package.
You can run this file like this:
ipython -i -- ~/repos/autopaths/test/test_file_path.py
"""
# Built-in modules #
import os, inspect
# Get current directory (works always) #
file_name = os.path.abspath((inspect.stack()[0])[1])
this_dir = os.path.dirname(os.path.abspath(file_name)) + '/'
# All our example file system #
dummy_files = this_dir + 'dummy_file_system/'
# Internal modules #
from autopaths.dir_path import DirectoryPath
###############################################################################
def test_symlink():
d = DirectoryPath(dummy_files)
one = d['one.txt']
one.link_to(d + 'one_link.txt')
###############################################################################
if __name__ == '__main__':
test_symlink() | 26 | 79 | 0.564103 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 514 | 0.599068 |
507ab70263733fd377475d2677b804a7e98a6466 | 350 | py | Python | RecoEgamma/EgammaPhotonProducers/python/propOppoMomentumWithMaterialForElectrons_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | RecoEgamma/EgammaPhotonProducers/python/propOppoMomentumWithMaterialForElectrons_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | RecoEgamma/EgammaPhotonProducers/python/propOppoMomentumWithMaterialForElectrons_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
import TrackingTools.MaterialEffects.OppositeMaterialPropagator_cfi
#PropagatorWithMaterialESProducer
oppositeToMomElePropagator = TrackingTools.MaterialEffects.OppositeMaterialPropagator_cfi.OppositeMaterialPropagator.clone(
Mass = 0.000511,
ComponentName = 'oppositeToMomElePropagator'
)
| 38.888889 | 123 | 0.845714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 62 | 0.177143 |
507af7b7c926d5ad374babb7e525c9985c6d28d6 | 155 | py | Python | test/__init__.py | gjhiggins/rdflib-sqlalchemy | d4c057934cd2675083d3df943103bdffb20341d4 | [
"BSD-3-Clause"
] | 112 | 2015-02-21T15:56:34.000Z | 2022-02-22T12:10:26.000Z | test/__init__.py | gjhiggins/rdflib-sqlalchemy | d4c057934cd2675083d3df943103bdffb20341d4 | [
"BSD-3-Clause"
] | 64 | 2015-01-22T12:40:11.000Z | 2021-12-27T19:15:14.000Z | test/__init__.py | gjhiggins/rdflib-sqlalchemy | d4c057934cd2675083d3df943103bdffb20341d4 | [
"BSD-3-Clause"
] | 28 | 2015-06-22T08:06:58.000Z | 2022-02-16T11:17:49.000Z | from rdflib import plugin
from rdflib import store
plugin.register(
"SQLAlchemy",
store.Store,
"rdflib_sqlalchemy.store",
"SQLAlchemy",
)
| 15.5 | 30 | 0.703226 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 0.316129 |
507b62f01d2d7eb84fda5bfb12a25df2f50e5d9b | 943 | py | Python | Others/jsc/jsc2019-qual/b.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 2 | 2020-06-12T09:54:23.000Z | 2021-05-04T01:34:07.000Z | Others/jsc/jsc2019-qual/b.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 961 | 2020-06-23T07:26:22.000Z | 2022-03-31T21:34:52.000Z | Others/jsc/jsc2019-qual/b.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
def main():
n, k = map(int, input().split())
a = list(map(int, input().split()))
mod = 10 ** 9 + 7
ans = 0
# See:
# https://www.youtube.com/watch?v=JTH27weC38k
# https://atcoder.jp/contests/jsc2019-qual/submissions/7107452
# Key Insight
# 2つの整数の順序対(i, j)の選び方
# 同じブロックにある/異なるブロックにある で場合分け
# 同じブロックにある
# Bi > Bjとなる組み合わせを全探索
for i in range(n - 1):
count = 0
for j in range(i + 1, n):
if a[i] > a[j]:
count += 1
# 一つのブロックにある組み合わせのk倍
ans += count * k
ans %= mod
# 別のブロックにある
for i in range(n):
count = 0
for j in range(n):
if a[i] > a[j]:
count += 1
# k個のブロックから2個選ぶ(kC2)
ans += count * (k * (k - 1) // 2)
ans %= mod
print(ans)
if __name__ == '__main__':
main()
| 19.645833 | 67 | 0.448568 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 495 | 0.436123 |
507cca4ec63870b93d1cdafab433d17c43eeaf55 | 1,494 | py | Python | Python-Programs/dicord.py-bot-suggest-commands/main.py | adityaverma121/Simple-Programs | 8450560b97f89e0fa3da16a623ad35c0b26409c9 | [
"MIT"
] | 71 | 2021-09-30T11:25:12.000Z | 2021-10-03T11:33:22.000Z | Python-Programs/dicord.py-bot-suggest-commands/main.py | adityaverma121/Simple-Programs | 8450560b97f89e0fa3da16a623ad35c0b26409c9 | [
"MIT"
] | 186 | 2021-09-30T12:25:16.000Z | 2021-10-03T13:45:04.000Z | Python-Programs/dicord.py-bot-suggest-commands/main.py | adityaverma121/Simple-Programs | 8450560b97f89e0fa3da16a623ad35c0b26409c9 | [
"MIT"
] | 385 | 2021-09-30T11:34:23.000Z | 2021-10-03T13:41:00.000Z | import difflib
import discord
from discord.ext import commands
from discord.ext.commands import CommandNotFound
intents = discord.Intents.all()
client = commands.Bot(command_prefix="+", intents=intents, help_command=None)
@client.event
async def on_ready():
print("Bot Online")
@client.event
async def on_command_error(ctx: commands.Context, exc):
if isinstance(exc, CommandNotFound):
await send_command_suggestion(ctx, ctx.invoked_with)
else:
pass
async def send_command_suggestion(ctx: commands.Context, command_name: str) -> None:
"""Sends user similar commands if any can be found."""
raw_commands = []
for cmd in client.walk_commands():
if not cmd.hidden:
raw_commands += (cmd.name, *cmd.aliases)
if similar_command_data := difflib.get_close_matches(command_name, raw_commands, 1):
similar_command_name = similar_command_data[0]
similar_command = client.get_command(similar_command_name)
if not similar_command:
return
try:
if not await similar_command.can_run(ctx):
return
except commands.errors.CommandError:
return
misspelled_content = ctx.message.content
e = discord.Embed()
e.set_author(name="Did you mean:")
e.description = misspelled_content.replace(
command_name, similar_command_name, 1
)
await ctx.send(embed=e, delete_after=10.0)
client.run("TOKEN")
| 28.730769 | 88 | 0.678715 | 0 | 0 | 0 | 0 | 253 | 0.169344 | 1,211 | 0.810576 | 91 | 0.06091 |
507e76ba862e5ec623301b68c6d97748d8e7260c | 2,898 | py | Python | hummingbot/connector/exchange/bitfinex/bitfinex_api_user_stream_data_source.py | joedomino874/hummingbot | cb3ee5a30a2feb0a55ceca9d200c59662d7e3057 | [
"Apache-2.0"
] | 37 | 2020-07-08T03:44:26.000Z | 2022-01-16T12:35:26.000Z | hummingbot/connector/exchange/bitfinex/bitfinex_api_user_stream_data_source.py | joedomino874/hummingbot | cb3ee5a30a2feb0a55ceca9d200c59662d7e3057 | [
"Apache-2.0"
] | 17 | 2022-01-28T14:19:30.000Z | 2022-03-31T08:54:28.000Z | hummingbot/connector/exchange/bitfinex/bitfinex_api_user_stream_data_source.py | joedomino874/hummingbot | cb3ee5a30a2feb0a55ceca9d200c59662d7e3057 | [
"Apache-2.0"
] | 17 | 2021-04-07T21:29:46.000Z | 2022-02-03T02:01:04.000Z | import asyncio
import logging
import time
from typing import Optional, List
from hummingbot.core.data_type.user_stream_tracker_data_source import \
UserStreamTrackerDataSource
from hummingbot.logger import HummingbotLogger
from hummingbot.connector.exchange.bitfinex.bitfinex_order_book import BitfinexOrderBook
from hummingbot.connector.exchange.bitfinex.bitfinex_websocket import BitfinexWebsocket
from hummingbot.connector.exchange.bitfinex.bitfinex_auth import BitfinexAuth
from hummingbot.connector.exchange.bitfinex.bitfinex_order_book_message import \
BitfinexOrderBookMessage
class BitfinexAPIUserStreamDataSource(UserStreamTrackerDataSource):
MESSAGE_TIMEOUT = 30.0
_logger: Optional[HummingbotLogger] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._logger is None:
cls._logger = logging.getLogger(__name__)
return cls._logger
def __init__(self, bitfinex_auth: BitfinexAuth, trading_pairs: Optional[List[str]] = None):
if trading_pairs is None:
trading_pairs = []
self._bitfinex_auth: BitfinexAuth = bitfinex_auth
self._trading_pairs = trading_pairs
self._current_listen_key = None
self._listen_for_user_stream_task = None
self._last_recv_time: float = 0
super().__init__()
@property
def order_book_class(self):
return BitfinexOrderBook
@property
def last_recv_time(self) -> float:
return self._last_recv_time
async def listen_for_user_stream(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
while True:
try:
ws = await BitfinexWebsocket(self._bitfinex_auth).connect()
await ws.authenticate()
async for msg in ws.messages():
transformed_msg: BitfinexOrderBookMessage = self._transform_message_from_exchange(msg)
if transformed_msg is None:
continue
else:
output.put_nowait(transformed_msg)
except asyncio.CancelledError:
raise
except Exception:
self.logger().error(
"Unexpected error with Bitfinex WebSocket connection. " "Retrying after 30 seconds...",
exc_info=True,
)
await asyncio.sleep(self.MESSAGE_TIMEOUT)
def _transform_message_from_exchange(self, msg) -> Optional[BitfinexOrderBookMessage]:
order_book_message: BitfinexOrderBookMessage = BitfinexOrderBook.diff_message_from_exchange(msg, time.time())
if any([
order_book_message.type_heartbeat,
order_book_message.event_auth,
order_book_message.event_info,
]):
# skip unneeded events and types
return
return order_book_message
| 37.153846 | 117 | 0.677709 | 2,302 | 0.794341 | 0 | 0 | 324 | 0.111801 | 922 | 0.31815 | 117 | 0.040373 |
5080d44c3782fcef3563b3dbf70986c2e7c13739 | 283 | py | Python | test_mersenne.py | Crulzor/algorithms-python-intro-ex | b3c10eff978a2f0f20e41c6d5aba24746ec07076 | [
"Apache-2.0"
] | null | null | null | test_mersenne.py | Crulzor/algorithms-python-intro-ex | b3c10eff978a2f0f20e41c6d5aba24746ec07076 | [
"Apache-2.0"
] | null | null | null | test_mersenne.py | Crulzor/algorithms-python-intro-ex | b3c10eff978a2f0f20e41c6d5aba24746ec07076 | [
"Apache-2.0"
] | null | null | null | from mersenne import generatePotentialMP, isPrime
def test_generatePotentialMP():
assert(generatePotentialMP(2) == 3)
assert(generatePotentialMP(1) ==1)
def test_isPrime():
assert(isPrime(7))
assert(isPrime(2))
assert(isPrime(3))
assert(isPrime(4) == False) | 25.727273 | 49 | 0.710247 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
50817a3d79676d6cf698802c15669fffcdb668b3 | 18,873 | py | Python | sim21/provider/base.py | kpatvt/sim21 | 4cbbfcbef6371d3dc5404429545e003a48c69ba5 | [
"Artistic-2.0"
] | 7 | 2021-08-23T18:46:27.000Z | 2022-01-26T07:10:22.000Z | sim21/provider/base.py | kpatvt/sim21 | 4cbbfcbef6371d3dc5404429545e003a48c69ba5 | [
"Artistic-2.0"
] | null | null | null | sim21/provider/base.py | kpatvt/sim21 | 4cbbfcbef6371d3dc5404429545e003a48c69ba5 | [
"Artistic-2.0"
] | null | null | null | import math
import sys
import numpy as np
from numpy import ndarray
from sim21.data import chemsep
from sim21.data.chemsep_consts import GAS_CONSTANT
from numba import njit
from sim21.provider.generic import calc_ig_props
from sim21.provider.flash.basic import basic_flash_temp_press_2phase
from sim21.provider.flash.io import flash_press_prop_2phase, flash_press_vap_frac_2phase, flash_temp_vap_frac_2phase, \
flash_temp_prop_2phase
MIN_COMPOSITION = math.sqrt(sys.float_info.epsilon)
@njit(cache=True)
def calc_wilson_k_values(temp, press, tc_list, pc_list, omega_list):
k_values = np.empty(len(tc_list))
for i in range(len(k_values)):
pc = pc_list[i]
tc = tc_list[i]
omega = omega_list[i]
k_values[i] = (pc / press) * math.exp(5.37 * (1 + omega) * (1 - tc / temp))
return k_values
@njit(cache=True)
def estimate_nbp_value(feed_comp, tc_list, valid):
temp = 0
for i in valid:
temp += feed_comp[i] * tc_list[i]
return 0.7 * temp
class Provider:
def __init__(self, components=None):
self.observers = set()
self.flash_basis = 'mole'
self._components = None
self.all_comps = None
self._id_list = None
self._mw_list = None
self._tc_list = None
self._pc_list = None
self._omega_list = None
self._ig_temp_ref = None
self._ig_press_ref = None
self._ig_cp_coeffs = None
self._ig_h_form = h = None
self._ig_g_form = g = None
self._vap_visc = None
self._liq_visc = None
self._surf_tens = None
self._ig_s_form = None
self._std_liq_vol = None
def add_observer(self, new_obs):
self.observers.add(new_obs)
@property
def components(self):
return self._components
@property
def all_valid_components(self):
return self.all_comps
def setup_components(self, components, **kwargs):
self._components = components
self.all_comps = np.arange(0, len(components))
self._id_list = [c.identifier for c in components]
self._mw_list = np.array([c.mw for c in components])
self._tc_list = np.array([c.crit_temp for c in components])
self._pc_list = np.array([c.crit_press for c in components])
self._omega_list = np.array([c.acen_fact for c in components])
self._ig_temp_ref = np.array([c.ig_temp_ref for c in components])
self._ig_press_ref = np.array([c.ig_press_ref for c in components])
self._ig_cp_coeffs = np.array([c.ig_cp_mole_coeffs for c in components])
self._ig_h_form = h = np.array([c.ig_enthalpy_form_mole for c in components])
self._ig_g_form = g = np.array([c.ig_gibbs_form_mole for c in components])
self._vap_visc = [c.vap_visc for c in components]
self._liq_visc = [c.liq_visc for c in components]
self._surf_tens = [c.surf_tens for c in components]
self._ig_s_form = (g - h) / -298.15
self._std_liq_vol = np.array([c.std_liq_vol_mole for c in components])
@property
def mw(self):
return self._mw_list
@property
def std_liq_vol_mole(self):
return self._std_liq_vol
def vap_visc(self, temp, comp_mole):
return np.dot(comp_mole, [comp_visc(temp) for comp_visc in self._vap_visc])
def liq_visc(self, temp, comp_mole):
return np.dot(comp_mole, [comp_visc(temp) for comp_visc in self._liq_visc])
def surf_tens(self, temp, comp_mole):
return np.dot(comp_mole, [comp_surf_tens(temp) for comp_surf_tens in self._surf_tens])
def convert_to_mole_basis(self, flow_sum_basis, flow_sum_value, frac_basis, frac_value):
if frac_basis == 'mole':
frac_value_mole = frac_value
elif frac_basis == 'mass':
frac_value_mole = frac_value / self._mw_list
frac_value_mole /= np.sum(frac_value_mole)
else:
raise NotImplementedError
avg_mw = np.dot(frac_value_mole, self._mw_list)
if flow_sum_basis == 'mole':
flow_sum_value_mole = flow_sum_value
elif flow_sum_basis == 'mass':
flow_sum_value_mole = flow_sum_value / avg_mw
else:
raise NotImplementedError
return flow_sum_value_mole, frac_value_mole
def scaling(self, prop_type):
if prop_type in ('enthalpy_mole', 'ig_enthalpy_mole', 'res_enthalpy_mole',
'gibbs_mole', 'ig_gibbs_mole', 'res_gibbs_mole',
'int_energy_mole', 'ig_int_energy_mole', 'res_int_energy_mole',
'helmholtz_mole', 'ig_helmholtz_mole', 'res_helmholtz_mole'):
return GAS_CONSTANT * 298.15
elif prop_type in ('entropy_mole', 'ig_entropy_mole', 'res_entropy_mole'):
return GAS_CONSTANT
else:
raise NotImplementedError
def guess_k_value_vle(self, temp, press):
return calc_wilson_k_values(temp, press, self._tc_list, self._pc_list, self._omega_list)
def guess_nbp(self, feed_comp, valid):
return estimate_nbp_value(feed_comp, self._tc_list, valid)
def ig_props(self, temp, press, feed_comp, valid=None):
if valid is None:
valid = np.where(feed_comp > MIN_COMPOSITION)[0]
calc_mw, \
calc_ig_cp, \
calc_ig_enthalpy, \
calc_ig_entropy, \
calc_ig_gibbs = calc_ig_props(GAS_CONSTANT, temp, press, feed_comp, 1,
valid, self._tc_list, self._mw_list,
self._ig_cp_coeffs, self._ig_h_form, self._ig_s_form,
self._ig_temp_ref, self._ig_press_ref)
calc_ig_helmholtz = calc_ig_gibbs - GAS_CONSTANT * temp
calc_ig_int_energy = calc_ig_helmholtz + temp * calc_ig_entropy
vol = GAS_CONSTANT * temp / press
return vol, calc_mw, calc_ig_cp, calc_ig_enthalpy, calc_ig_entropy, calc_ig_int_energy, calc_ig_gibbs, calc_ig_helmholtz
def flash(self,
flow_sum_basis=None, flow_sum_value=None,
frac_basis=None, frac_value=None,
temp=None, press=None,
vol_basis=None, vol_value=None,
vap_frac_value=None, vap_frac_basis=None,
deg_subcool=None, deg_supheat=None,
enthalpy_basis=None, enthalpy_value=None,
entropy_basis=None, entropy_value=None,
int_energy_basis=None, int_energy_value=None,
previous=None):
assert None not in (flow_sum_basis, flow_sum_value)
assert frac_basis in ('mole', 'mass')
assert frac_value is not None and isinstance(frac_value, ndarray)
valid = np.where(frac_value > MIN_COMPOSITION)[0]
if temp is not None:
if press is not None:
# flash_temp_press
return self.flash_temp_press(flow_sum_basis, flow_sum_value, frac_basis, frac_value, temp, press,
previous, valid)
elif vap_frac_value is not None:
# flash_temp_vap_frac
return self.flash_temp_vap_frac(flow_sum_basis, flow_sum_value, frac_basis, frac_value, temp,
vap_frac_basis, vap_frac_value, previous, valid)
elif deg_subcool is not None:
# flash_temp_deg_subcool
return self.flash_temp_subcool(flow_sum_basis, flow_sum_value, frac_basis, frac_value, temp,
deg_subcool, previous, valid)
elif deg_supheat is not None:
# flash_temp_deg_supheat
return self.flash_temp_subcool(flow_sum_basis, flow_sum_value, frac_basis, frac_value, temp,
deg_supheat, previous, valid)
elif enthalpy_value is not None:
# flash_temp_enthalpy
return self.flash_temp_prop(flow_sum_basis, flow_sum_value, frac_basis, frac_value, temp, 'enthalpy',
enthalpy_basis, enthalpy_value, previous, valid)
elif entropy_value is not None:
# flash_temp_entropy
return self.flash_temp_prop(flow_sum_basis, flow_sum_value, frac_basis, frac_value, temp, 'entropy',
entropy_basis, entropy_value, previous, valid)
elif int_energy_value is not None:
# flash_temp_int_energy
return self.flash_temp_prop(flow_sum_basis, flow_sum_value, frac_basis, frac_value, temp, 'int_energy',
int_energy_basis, int_energy_value, previous, valid)
elif vol_value is not None:
# flash_temp_vol
return self.flash_temp_vol(flow_sum_basis, flow_sum_value, frac_basis, frac_value, temp, vol_basis,
vol_value, previous, valid)
else:
raise NotImplementedError
elif press is not None:
if vap_frac_value is not None:
# flash_press_vap_frac
return self.flash_press_vap_frac(flow_sum_basis, flow_sum_value, frac_basis, frac_value, press,
vap_frac_basis, vap_frac_value, previous, valid)
elif deg_subcool is not None:
# flash_press_deg_subcool
return self.flash_press_subcool(flow_sum_basis, flow_sum_value, frac_basis, frac_value, press,
deg_subcool, previous, valid)
elif deg_supheat is not None:
# flash_press_deg_supheat
return self.flash_press_subcool(flow_sum_basis, flow_sum_value, frac_basis, frac_value, press,
deg_supheat, previous, valid)
elif enthalpy_value is not None:
# flash_press_enthalpy
return self.flash_press_prop(flow_sum_basis, flow_sum_value, frac_basis, frac_value, press, 'enthalpy',
enthalpy_basis, enthalpy_value, previous, valid)
elif entropy_value is not None:
# flash_press_entropy
return self.flash_press_prop(flow_sum_basis, flow_sum_value, frac_basis, frac_value, press, 'entropy',
entropy_basis, entropy_value, previous, valid)
elif int_energy_value is not None:
# flash_press_int_energy
return self.flash_press_prop(flow_sum_basis, flow_sum_value, frac_basis, frac_value, press,
'int_energy', int_energy_basis, int_energy_value, previous, valid)
elif vol_value is not None:
# flash_press_vol
return self.flash_press_vol(flow_sum_basis, flow_sum_value, frac_basis, frac_value, press, vol_basis,
vol_value, previous, valid)
else:
raise NotImplementedError
elif None not in (enthalpy_value, entropy_value, int_energy_value):
prop_basis, prop_value = None, None
if enthalpy_value is not None:
prop_name, prop_basis, prop_value = 'enthalpy', enthalpy_basis, enthalpy_value
elif entropy_value is not None:
prop_name, prop_basis, prop_value = 'entropy', entropy_basis, entropy_value
elif int_energy_value is not None:
prop_name, prop_basis, prop_value = 'int_energy', int_energy_basis, int_energy_value
else:
raise NotImplementedError
if vap_frac_value is not None:
# flash_prop_vap_frac
return self.flash_prop_vap_frac(flow_sum_basis, flow_sum_value, frac_basis, frac_value,
prop_name, prop_basis, prop_value, vap_frac_basis, vap_frac_value,
previous, valid)
elif vol_value is not None:
return self.flash_prop_vol(flow_sum_basis, flow_sum_value, frac_basis, frac_value,
prop_name, prop_basis, prop_value,
vol_basis, vol_value, previous, valid)
else:
raise NotImplementedError
else:
raise NotImplementedError
def flash_temp_press(self, flow_sum_basis, flow_sum_value, frac_basis, frac_value, temp, press, previous, valid):
flow_sum_value_mole, frac_value_mole = self.convert_to_mole_basis(flow_sum_basis, flow_sum_value,
frac_basis, frac_value)
prev_k = None
if previous is not None and previous.contains('vap', 'liq'):
prev_k = previous.k_values_vle
results = basic_flash_temp_press_2phase(self, temp, press, frac_value_mole, valid, previous_k_values=prev_k)
results.scale(flow_sum_mole=flow_sum_value_mole)
return results
def flash_press_prop(self, flow_sum_basis, flow_sum_value,
frac_basis, frac_value, press,
prop_name, prop_basis, prop_value, previous, valid):
flow_sum_value_mole, frac_value_mole = self.convert_to_mole_basis(flow_sum_basis, flow_sum_value,
frac_basis, frac_value)
prop_flash_name = prop_name + '_' + prop_basis
start_temp = None
if previous is not None:
start_temp = previous.temp
results = flash_press_prop_2phase(self, press, prop_flash_name, prop_value,
0, frac_value, valid=valid,
previous=previous, start_temp=start_temp)
results.scale(flow_sum_mole=flow_sum_value_mole)
return results
def flash_temp_prop(self, flow_sum_basis, flow_sum_value,
frac_basis, frac_value, temp,
prop_name, prop_basis, prop_value, previous, valid):
flow_sum_value_mole, frac_value_mole = self.convert_to_mole_basis(flow_sum_basis, flow_sum_value,
frac_basis, frac_value)
prop_flash_name = prop_name + '_' + prop_basis
start_press = None
if previous is not None:
start_press = previous.press
results = flash_temp_prop_2phase(self, temp, prop_flash_name, prop_value, 0,
frac_value_mole, valid=valid, previous=previous,
start_press=start_press)
results.scale(flow_sum_mole=flow_sum_value_mole)
return results
def flash_press_vap_frac(self, flow_sum_basis, flow_sum_value, frac_basis, frac_value, press,
vap_frac_basis, vap_frac_value, previous, valid):
flow_sum_value_mole, frac_value_mole = self.convert_to_mole_basis(flow_sum_basis, flow_sum_value,
frac_basis, frac_value)
if vap_frac_basis != 'mole':
raise NotImplementedError
results = flash_press_vap_frac_2phase(self, press, vap_frac_value, frac_value_mole, valid=valid, previous=previous)
results.scale(flow_sum_mole=flow_sum_value_mole)
return results
def flash_temp_vap_frac(self, flow_sum_basis, flow_sum_value, frac_basis, frac_value, temp,
vap_frac_basis, vap_frac_value, previous, valid):
flow_sum_value_mole, frac_value_mole = self.convert_to_mole_basis(flow_sum_basis, flow_sum_value,
frac_basis, frac_value)
if vap_frac_basis != 'mole':
raise NotImplementedError
results = flash_temp_vap_frac_2phase(self, temp, vap_frac_value, frac_value_mole, valid=valid, previous=previous)
results.scale(flow_sum_mole=flow_sum_value_mole)
return results
def phase(self, temp, press, n, desired_phase,
allow_pseudo=True, valid=None, press_comp_derivs=False,
log_phi_temp_press_derivs=False, log_phi_comp_derivs=False):
raise NotImplementedError
def phases_vle(self, temp, press, liq_comp, vap_comp,
allow_pseudo=True, valid=None, press_comp_derivs=False,
log_phi_temp_press_derivs=False, log_phi_comp_derivs=False):
liq_ph = self.phase(temp, press, liq_comp, 'liq',
allow_pseudo, valid,
press_comp_derivs,
log_phi_temp_press_derivs,
log_phi_comp_derivs)
vap_ph = self.phase(temp, press, vap_comp, 'vap',
allow_pseudo, valid,
press_comp_derivs,
log_phi_temp_press_derivs,
log_phi_comp_derivs)
return liq_ph, vap_ph
def AddCompound(self, compound_by_name, compound_obj=None):
# print('AddCompound:', compound)
if compound_obj is None:
compound_obj = chemsep.pure(compound_by_name)
else:
pass
if self._components is None:
new_components = [compound_obj]
else:
new_components = self._components[:]
new_components.append(compound_obj)
# This is really inefficient, but it's simple
self.setup_components(new_components)
def GetAvCompoundNames(self):
return chemsep.available()
def DeleteCompound(self, compound):
compound = compound.upper()
idx = self._id_list.index(compound)
new_compounds = self._components[:]
new_compounds.pop(idx)
self.setup_components(new_compounds)
def ExchangeCompound(self, cmp1Name, cmp2Name):
cmp1Name = cmp1Name.upper()
cmp2Name = cmp2Name.upper()
idx_1 = self._id_list.index(cmp1Name)
idx_2 = self._id_list.index(cmp2Name)
new_compounds = self._components[:]
new_compounds[idx_1], new_compounds[idx_2] = new_compounds[idx_2], new_compounds[idx_1]
self.setup_components(new_compounds)
def MoveCompound(self, cmp1Name, cmp2Name):
cmp1Name = cmp1Name.upper()
cmp2Name = cmp2Name.upper()
new_compounds = self._components[:]
item_1 = new_compounds.pop(self._id_list.index(cmp1Name))
new_compounds.insert(self._id_list.index(cmp2Name), item_1)
self.setup_components(new_compounds)
| 45.808252 | 128 | 0.612303 | 17,864 | 0.946537 | 0 | 0 | 779 | 0.041276 | 0 | 0 | 852 | 0.045144 |
5081e2e48236509a27ddba158c1b708d27b5b395 | 2,516 | py | Python | server/routers/auth.py | CraftyChimera/nittfest-site | 71754a0f282dff247e9b5ef8bc992718c6643576 | [
"MIT"
] | null | null | null | server/routers/auth.py | CraftyChimera/nittfest-site | 71754a0f282dff247e9b5ef8bc992718c6643576 | [
"MIT"
] | null | null | null | server/routers/auth.py | CraftyChimera/nittfest-site | 71754a0f282dff247e9b5ef8bc992718c6643576 | [
"MIT"
] | null | null | null | """
Auth route
"""
import requests
from fastapi import APIRouter, HTTPException
from fastapi.param_functions import Depends
from sqlalchemy.orm import Session
from config.database import get_database
from config.logger import logger
from config.settings import settings
from server.controllers.auth import get_department_id, sign_jwt
from server.schemas.users import Users
router = APIRouter(
prefix="/auth",
)
@router.get("/callback/")
async def fetch_user_details(
code: str, session: Session = Depends(get_database)
):
"""
Handles the callback route and fetches the user details
"""
params = {
"client_id": settings.client_id,
"client_secret": settings.client_secret,
"grant_type": "authorization_code",
"code": code,
"redirect_uri": settings.redirect_url,
}
try:
token_response = requests.post(
url=settings.token_endpoint, data=params
).json()
logger.debug(token_response)
headers = {
"Authorization": "Bearer " + token_response["access_token"]
}
userdetails = requests.post(
url=settings.resource_endpoint,
headers=headers,
).json()
if (
not session.query(Users)
.filter_by(email=userdetails["email"])
.first()
):
new_user = Users(
name=userdetails["name"],
email=userdetails["email"],
mobile_number=userdetails["phoneNumber"],
gender=userdetails["gender"],
department_id=get_department_id(userdetails["email"]),
fcm_token="123",
)
session.add(new_user)
session.commit()
session.close()
jwt = sign_jwt(userdetails["email"], userdetails["name"])
logger.info(f'{userdetails["name"]} user logged in')
return {
"name": userdetails["name"],
"email": userdetails["email"],
"phoneNumber": userdetails["phoneNumber"],
"gender": userdetails["gender"],
"jwt": jwt["jwt_token"],
}
except Exception as exception:
logger.error(f"/dauth failed with {exception}")
raise HTTPException(
status_code=500,
detail="An unexpected error occurred while authentication",
headers={
"X-Error": "An unexpected error occurred while authentication"
},
) from exception
| 30.682927 | 78 | 0.595787 | 0 | 0 | 0 | 0 | 2,095 | 0.832671 | 2,069 | 0.822337 | 557 | 0.221383 |
50826185ee7d341557b552bde864d1a94b521ad3 | 3,480 | py | Python | isc_dhcp_leases/test_lease6.py | dholl/python-isc-dhcp-leases | 7bd07a3a4a7965bd7a6e0a45de7b30914e1bbced | [
"MIT"
] | 111 | 2015-02-11T21:36:40.000Z | 2022-03-18T13:36:12.000Z | isc_dhcp_leases/test_lease6.py | dholl/python-isc-dhcp-leases | 7bd07a3a4a7965bd7a6e0a45de7b30914e1bbced | [
"MIT"
] | 36 | 2015-05-05T12:04:07.000Z | 2021-06-17T12:58:30.000Z | isc_dhcp_leases/test_lease6.py | dholl/python-isc-dhcp-leases | 7bd07a3a4a7965bd7a6e0a45de7b30914e1bbced | [
"MIT"
] | 52 | 2015-05-02T19:31:20.000Z | 2022-03-18T13:36:29.000Z | import datetime
from unittest import TestCase
from isc_dhcp_leases.iscdhcpleases import Lease6, utc
from freezegun import freeze_time
__author__ = 'Martijn Braam <martijn@brixit.nl>'
class TestLease6(TestCase):
def setUp(self):
self.lease_time = datetime.datetime(2015, 8, 18, 16, 55, 37, tzinfo=utc)
self.lease_data = {
'binding': 'state active',
'ends': 'never',
'preferred-life': '375',
'max-life': '600'
}
def test_init(self):
lease = Lease6("2001:610:600:891d::60", self.lease_data, self.lease_time,
"4dv\\352\\000\\001\\000\\001\\035f\\037\\342\\012\\000'\\000\\000\\000", "na")
self.assertEqual(lease.ip, "2001:610:600:891d::60")
self.assertEqual(lease.host_identifier, b"4dv\xea\x00\x01\x00\x01\x1df\x1f\xe2\n\x00'\x00\x00\x00")
self.assertEqual(lease.valid, True)
self.assertEqual(lease.iaid, 3933627444)
self.assertEqual(lease.duid, b"\x00\x01\x00\x01\x1df\x1f\xe2\n\x00'\x00\x00\x00")
self.assertEqual(lease.active, True)
self.assertEqual(lease.binding_state, 'active')
self.assertEqual(lease.preferred_life, 375)
self.assertEqual(lease.max_life, 600)
self.assertEqual(lease.last_communication, self.lease_time)
self.assertEqual(lease.type, Lease6.NON_TEMPORARY)
def test_repr(self):
lease = Lease6("2001:610:600:891d::60", self.lease_data, self.lease_time,
"4dv\\352\\000\\001\\000\\001\\035f\\037\\342\\012\\000'\\000\\000\\000", "na")
self.assertEqual(repr(lease), '<Lease6 2001:610:600:891d::60>')
def _test_valid(self, now=None):
lease = Lease6("2001:610:600:891d::60", self.lease_data, self.lease_time,
"4dv\\352\\000\\001\\000\\001\\035f\\037\\342\\012\\000'\\000\\000\\000", "na",
now=now)
self.assertTrue(lease.valid) # Lease is forever
lease.end = datetime.datetime(2015, 7, 6, 13, 57, 4, tzinfo=utc)
self.assertTrue(lease.valid) # Lease is before end
lease.end = lease.end - datetime.timedelta(hours=7)
self.assertFalse(lease.valid) # Lease is ended
@freeze_time("2015-07-6 8:15:0")
def test_valid_frozen(self):
self._test_valid()
def test_valid_historical(self):
self._test_valid(
now=datetime.datetime(2015, 7, 6, 8, 15, 0, tzinfo=utc))
def test_eq(self):
lease_a = Lease6("2001:610:600:891d::60", self.lease_data, self.lease_time,
"4dv\\352\\000\\001\\000\\001\\035f\\037\\342\\012\\000'\\000\\000\\000", "na")
lease_b = Lease6("2001:610:600:891d::60", self.lease_data, self.lease_time,
"4dv\\352\\000\\001\\000\\001\\035f\\037\\342\\012\\000'\\000\\000\\000", "na")
self.assertEqual(lease_a, lease_b)
lease_b.ip = "2001:610:600:891d::42"
self.assertNotEqual(lease_a, lease_b)
lease_b.ip = "2001:610:600:891d::60"
lease_b.host_identifier = "gd4\352\000\001\000\001\035b\037\322\012\000'\000\000\000"
self.assertNotEqual(lease_a, lease_b)
def test_naive_time(self):
with self.assertRaises(ValueError):
Lease6("2001:610:600:891d::60", self.lease_data, self.lease_time,
"4dv\\352\\000\\001\\000\\001\\035f\\037\\342\\012\\000'\\000\\000\\000", "na",
now=datetime.datetime.now())
| 43.5 | 107 | 0.60977 | 3,293 | 0.946264 | 0 | 0 | 92 | 0.026437 | 0 | 0 | 1,051 | 0.302011 |
5082b9694adff95b4f7e54e0b6bed0eb02d39395 | 7,599 | py | Python | dril_pack/dril.py | RuohanW/il_baseline_fork | 824d5117e573292e707d648b087f1e10253cc8d6 | [
"MIT"
] | null | null | null | dril_pack/dril.py | RuohanW/il_baseline_fork | 824d5117e573292e707d648b087f1e10253cc8d6 | [
"MIT"
] | null | null | null | dril_pack/dril.py | RuohanW/il_baseline_fork | 824d5117e573292e707d648b087f1e10253cc8d6 | [
"MIT"
] | null | null | null | import os
import numpy as np
import torch
import gym
import pandas as pd
from stable_baselines3.common.running_mean_std import RunningMeanStd
from collections import defaultdict
from torch.utils.data import DataLoader, TensorDataset
# This file creates the reward function used by dril. Both reinforcement algorithms
# ppo (line: 102) and a2c (line: 92), have dril bc udpates.
class DRIL:
def __init__(self, device=None, envs=None, ensemble_policy=None, env_name=None,
expert_dataset=None, ensemble_size=None, ensemble_quantile_threshold=None,
dril_bc_model=None, dril_cost_clip=None, num_dril_bc_train_epoch=None,\
training_data_split=None):
self.ensemble_quantile_threshold = ensemble_quantile_threshold
self.dril_cost_clip = dril_cost_clip
self.device = device
self.num_dril_bc_train_epoch = num_dril_bc_train_epoch
self.env_name = env_name
self.returns = None
self.ret_rms = RunningMeanStd(shape=())
self.observation_space = envs.observation_space
if envs.action_space.__class__.__name__ == "Discrete":
self.num_actions = envs.action_space.n
elif envs.action_space.__class__.__name__ == "Box":
self.num_actions = envs.action_space.shape[0]
elif envs.action_space.__class__.__name__ == "MultiBinary":
self.num_actions = envs.action_space.shape[0]
self.ensemble_size = ensemble_size
# use full data since we don't use a validation set
self.trdata = expert_dataset.load_demo_data(1.0, 1, self.ensemble_size)['trdata']
self.ensemble = ensemble_policy
# self.bc = dril_bc_model
# self.bc.num_batches = num_dril_bc_train_epoch
self.clip_variance = self.policy_variance(envs=envs)
def policy_variance(self, q=0.98, envs=None):
q = self.ensemble_quantile_threshold
obs = None
acs = None
variance = defaultdict(lambda:[])
for batch_idx, batch in enumerate(self.trdata):
(state, action) = batch
action = action.float().to(self.device)
# Image observation
if len(self.observation_space.shape) == 3:
state = state.repeat(self.ensemble_size, 1,1,1).float().to(self.device)
# Feature observations
else:
state = state.repeat(self.ensemble_size, 1).float().to(self.device)
if isinstance(envs.action_space, gym.spaces.discrete.Discrete):
# Note: this is just a place holder
action_idx = int(action.item())
one_hot_action = torch.FloatTensor(np.eye(self.num_actions)[int(action.item())])
action = one_hot_action
elif envs.action_space.__class__.__name__ == "MultiBinary":
# create unique id for each combination
action_idx = int("".join(str(int(x)) for x in action[0].tolist()), 2)
else:
action_idx = 0
with torch.no_grad():
ensemble_action = self.ensemble(state).squeeze()
if isinstance(envs.action_space, gym.spaces.Box):
action = torch.clamp(action, envs.action_space.low[0], envs.action_space.high[0])
ensemble_action = torch.clamp(ensemble_action, envs.action_space.low[0],\
envs. action_space.high[0])
cov = np.cov(ensemble_action.T.cpu().numpy())
action = action.cpu().numpy()
# If the env has only one action then we need to reshape cov
if envs.action_space.__class__.__name__ == "Box":
if envs.action_space.shape[0] == 1:
cov = cov.reshape(-1,1)
#variance.append(np.matmul(np.matmul(action, cov), action.T).item())
if isinstance(envs.action_space, gym.spaces.discrete.Discrete):
for action_idx in range(envs.action_space.n):
one_hot_action = torch.FloatTensor(np.eye(self.num_actions)[action_idx])
variance[action_idx].append(np.matmul(np.matmul(one_hot_action, cov), one_hot_action.T).item())
else:
variance[action_idx].append(np.matmul(np.matmul(action, cov), action.T).item())
quantiles = {key: np.quantile(np.array(variance[key]), q) for key in list(variance.keys())}
if self.dril_cost_clip == '-1_to_1':
return {key: lambda x: -1 if x > quantiles[key] else 1 for key in list(variance.keys())}
elif self.dril_cost_clip == 'no_clipping':
return {key: lambda x: x for i in list(variance.keys())}
elif self.dril_cost_clip == '-1_to_0':
return {key: lambda x: -1 if x > quantiles[key] else 0 for key in list(variance.keys())}
def predict_reward(self, actions, states, envs):
rewards = []
for idx in range(actions.shape[0]):
# Image observation
if len(self.observation_space.shape) == 3:
state = states[[idx]].repeat(self.ensemble_size, 1,1,1).float().to(self.device)
# Feature observations
else:
state = states[[idx]].repeat(self.ensemble_size, 1).float().to(self.device)
if isinstance(envs.action_space, gym.spaces.discrete.Discrete):
one_hot_action = torch.FloatTensor(np.eye(self.num_actions)[int(actions[idx].item())])
action = one_hot_action
action_idx = int(actions[idx].item())
elif isinstance(envs.action_space, gym.spaces.Box):
action = actions[[idx]]
action_idx = 0
elif isinstance(envs.action_space, gym.spaces.MultiBinary):
raise Exception('Envrionment shouldnt be MultiBinary')
else:
raise Exception("Unknown Action Space")
with torch.no_grad():
ensemble_action = self.ensemble(state).squeeze().detach()
if isinstance(envs.action_space, gym.spaces.Box):
action = torch.clamp(action, envs.action_space.low[0], envs.action_space.high[0])
ensemble_action = torch.clamp(ensemble_action, envs.action_space.low[0],\
envs. action_space.high[0])
cov = np.cov(ensemble_action.T.cpu().numpy())
action = action.cpu().numpy()
# If the env has only one action then we need to reshape cov
if envs.action_space.__class__.__name__ == "Box":
if envs.action_space.shape[0] == 1:
cov = cov.reshape(-1,1)
ensemble_variance = (np.matmul(np.matmul(action, cov), action.T).item())
if action_idx in self.clip_variance:
reward = self.clip_variance[action_idx](ensemble_variance)
else:
reward = -1
rewards.append(reward)
return np.array(rewards)
# return torch.FloatTensor(np.array(rewards)[np.newaxis].T)
def normalize_reward(self, state, action, gamma, masks, reward, update_rms=True):
if self.returns is None:
self.returns = reward.clone()
if update_rms:
self.returns = self.returns * masks * gamma + reward
self.ret_rms.update(self.returns.cpu().numpy())
return reward / np.sqrt(self.ret_rms.var[0] + 1e-8)
def bc_update(self):
for dril_epoch in range(self.num_dril_bc_train_epoch):
dril_train_loss = self.bc.update(update=True, data_loader_type='train')
| 44.964497 | 115 | 0.614818 | 7,216 | 0.949599 | 0 | 0 | 0 | 0 | 0 | 0 | 826 | 0.108699 |
5086e67eb9871f894ec3e82717bcf87e2d9b440a | 20,226 | py | Python | graphlearn/python/values.py | hansugu/graph-learn | d20edf7734cc0246a7e38dd8c553237b04c6b897 | [
"Apache-2.0"
] | 1 | 2020-03-30T07:49:22.000Z | 2020-03-30T07:49:22.000Z | graphlearn/python/values.py | Sunsj2014/graph-learn | 7bbffceed2c69a7acf903d80ee5bbc7e3fec6ca1 | [
"Apache-2.0"
] | null | null | null | graphlearn/python/values.py | Sunsj2014/graph-learn | 7bbffceed2c69a7acf903d80ee5bbc7e3fec6ca1 | [
"Apache-2.0"
] | 1 | 2020-08-10T08:25:52.000Z | 2020-08-10T08:25:52.000Z | # Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
""" Values include Nodes, Edges, Layer, Layers that returned
by samplers. Values should be extended with customized samplers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from graphlearn.python.decoder import Decoder
class Values(object):
""" Base value class.
"""
def __init__(self,
int_attrs=None,
float_attrs=None,
string_attrs=None,
weights=None,
labels=None,
shape=None,
graph=None):
self._weights = weights
self._labels = labels
self._int_attrs = int_attrs
self._float_attrs = float_attrs
self._string_attrs = string_attrs
self._shape = shape
self._graph = graph
self._attred = True
if self._int_attrs is None and self._float_attrs is None and \
self._string_attrs is None:
self._attred = False
def _get_decoder(self):
return Decoder()
@property
def int_attrs(self):
self._set_attrs()
return self._int_attrs
@property
def float_attrs(self):
self._set_attrs()
return self._float_attrs
@property
def string_attrs(self):
self._set_attrs()
return self._string_attrs
@property
def weights(self):
if self._weights is None and self._get_decoder().weighted:
self._lookup_and_set_values()
return self._weights
@property
def labels(self):
if self._labels is None and self._get_decoder().labeled:
self._lookup_and_set_values()
return self._labels
@property
def shape(self):
return self._shape
@property
def graph(self):
return self._graph
@int_attrs.setter
def int_attrs(self, int_attrs):
self._int_attrs = self._reshape(int_attrs, expand_shape=True)
@float_attrs.setter
def float_attrs(self, float_attrs):
self._float_attrs = self._reshape(float_attrs, expand_shape=True)
@string_attrs.setter
def string_attrs(self, string_attrs):
self._string_attrs = self._reshape(string_attrs, expand_shape=True)
@weights.setter
def weights(self, weights):
self._weights = self._reshape(weights)
@labels.setter
def labels(self, labels):
self._labels = self._reshape(labels)
@shape.setter
def shape(self, shape):
self._shape = shape
if not isinstance(shape, tuple):
raise ValueError("shape must be a tuple, got {}.".format(type(shape)))
@graph.setter
def graph(self, graph):
self._graph = graph
def _reshape(self, value, expand_shape=False):
""" Reshape for value when `shape` is not None.
"""
if value is None or value.size == 0:
return value
if not isinstance(value, np.ndarray):
raise ValueError("{} must be a numpy.ndarray.".format(value))
if self._shape:
if not isinstance(self._shape, tuple):
raise ValueError("shape must be a tuple, got {}."
.format(type(self._shape)))
if expand_shape:
return np.reshape(value, self._shape + (-1, )) # pylint: disable=bad-whitespace
return np.reshape(value, self._shape)
return value
def _lookup_and_set_values(self):
pass
def _set_attrs(self):
if self._get_decoder().attributed:
if not self._attred:
self._lookup_and_set_values()
self._attred = True
def _set_values(self, values):
self.int_attrs = values.int_attrs
self.float_attrs = values.float_attrs
self.string_attrs = values.string_attrs
self.weights = values.weights
self.labels = values.labels
class SparseBase(object):
""" Sparse Value, the base class of SparseNodes and SparseEdges.
"""
def __init__(self, offsets, dense_shape):
""" Init a SparseBase object.
Args:
offsets: list or 1D ndarraay, the number of values on each line.
dense_shape: the corresponding 2D dense shape.
"""
self._it = 0
self._offsets = offsets
self._dense_shape = dense_shape
self._global_offsets = [0]
sum_offsets = 0
for offset in self._offsets:
sum_offsets += offset
self._global_offsets.append(sum_offsets)
@property
def offsets(self):
return self._offsets
@property
def indices(self):
indices = []
for x in range(len(self._offsets)):
for y in range(self._offsets[x]):
indices.append([x, y])
return indices
@property
def dense_shape(self):
return self._dense_shape
@offsets.setter
def offsets(self, offsets):
""" row offsets
"""
self._offsets = offsets
@dense_shape.setter
def dense_shape(self, dense_shape):
self._dense_shape = dense_shape
def __iter__(self):
return self
def __next__(self):
pass
def next(self):
return self.__next__()
class Nodes(Values):
""" As returned object of `get_next` api of `node_sampler` and `negative_sampler`,
as returned object of `get_nodes` of `Graph` or as in-memory object
for constructing graph.
"""
def __init__(self,
ids,
node_type,
int_attrs=None,
float_attrs=None,
string_attrs=None,
weights=None,
labels=None,
shape=None,
graph=None):
super(Nodes, self).__init__(int_attrs=int_attrs,
float_attrs=float_attrs,
string_attrs=string_attrs,
weights=weights,
labels=labels,
shape=shape,
graph=graph)
if not isinstance(ids, np.ndarray):
raise ValueError("ids must be an instance of numpy.ndarray, "
"got {}.".format(type(ids)))
self._shape = shape if shape is not None else ids.shape
self._ids = self._reshape(ids)
self._type = node_type
def _get_decoder(self):
return self._graph.get_node_decoder(self._type)
@property
def ids(self):
return self._ids
@property
def type(self): # pylint: disable=redefined-builtin
return self._type
@property
def shape(self):
return self._shape
def _lookup_and_set_values(self):
values = self._graph.lookup_nodes(self._type, self._ids)
self._set_values(values)
@ids.setter
def ids(self, ids):
self._ids = self._reshape(ids)
@type.setter
def type(self, node_type): # pylint: disable=redefined-builtin
self._type = node_type
class SparseNodes(Nodes, SparseBase):
""" SparseNodes is the returned value of full neighbor sampler which
is 2D. It can be easily transformed to Tensorflow or PyTorch Sparse
Tensors.
"""
def __init__(self,
ids,
offsets,
dense_shape,
node_type,
int_attrs=None,
float_attrs=None,
string_attrs=None,
weights=None,
labels=None,
graph=None):
""" Sparse Nodes.
Args:
ids: A 1D numpy array, the ids of the nodes.
offsets: A python list, each elem of list is an int,
which indicates the number of nodes.
dense_shape: The shape of the the corresponding dense Nodes.
For example, ids=[5, 2, 1, 6, 2, 4],
offsets=[3, 2, 1],
dense_shape=[3, 5].
The corresponding dense Nodes is
[[ 5, 2, 1, -1, -1],
[ 6, 2, -1, -1, -1],
[ 4, -1, -1, -1, -1]]
"""
Nodes.__init__(self, ids,
node_type,
int_attrs=None,
float_attrs=None,
string_attrs=None,
weights=weights,
labels=labels,
shape=None,
graph=graph)
SparseBase.__init__(self, offsets, dense_shape)
num_nodes = sum(offsets)
if ids.shape[0] != num_nodes:
raise ValueError("Ids must be the same length of indices")
def __next__(self):
if self._it < len(self._offsets):
l = self._global_offsets[self._it]
r = self._global_offsets[self._it + 1]
self._it += 1
nodes = Nodes(self._ids[l: r], self._type, graph=self._graph,
int_attrs=np.array([int_attr[l: r] \
for int_attr in self._int_attrs]) \
if self._int_attrs is not None else None,
float_attrs=np.array([float_attr[l: r] \
for float_attr in self._float_attrs]) \
if self._float_attrs is not None else None, \
string_attrs=np.array([string_attr[l: r] \
for string_attr in self._string_attrs]) \
if self._string_attrs is not None else None,
weights=self._weights[l:r] \
if self._weights is not None else None,
labels=self._labels[l:r] \
if self._labels is not None else None)
return nodes
else:
raise StopIteration
class Edges(Values):
""" As returned object of `get_next` api of `edge_sampler` ,
as returned object of `get_edges` of `Graph` or as in-memory object
for constructing graph.
"""
def __init__(self,
src_ids=None,
src_type=None,
dst_ids=None,
dst_type=None,
edge_type=None,
edge_ids=None,
src_nodes=None,
dst_nodes=None,
int_attrs=None,
float_attrs=None,
string_attrs=None,
weights=None,
labels=None,
shape=None,
graph=None):
super(Edges, self).__init__(int_attrs=None,
float_attrs=None,
string_attrs=None,
weights=weights,
labels=labels,
shape=shape,
graph=graph)
self._shape = shape
if not self._shape:
if src_ids is not None:
if not isinstance(src_ids, np.ndarray):
raise ValueError("src_ids must be an instance of numpy.ndarray, "
"got {}.".format(type(src_ids)))
self._shape = src_ids.shape
if edge_ids is not None:
if not isinstance(dst_ids, np.ndarray):
raise ValueError("dst_ids must be an instance of numpy.ndarray, "
"got {}.".format(type(dst_ids)))
self._shape = edge_ids.shape
self._src_ids = self._reshape(src_ids)
self._src_type = src_type
self._dst_ids = self._reshape(dst_ids)
self._dst_type = dst_type
self._edge_type = edge_type
self._edge_ids = self._reshape(edge_ids)
self._src_nodes = src_nodes
self._dst_nodes = dst_nodes
if self._src_ids is not None and self._src_nodes is None:
self._src_nodes = Nodes(src_ids, src_type, shape=shape, graph=graph)
if self._dst_ids is not None and self._dst_nodes is None:
self._dst_nodes = Nodes(dst_ids, dst_type, shape=shape, graph=graph)
if self._src_ids is not None and self._dst_ids is not None:
if self._src_ids.shape != self._dst_ids.shape:
raise ValueError("src_ids and dst_ids must be same shape.")
def _get_decoder(self):
return self._graph.get_edge_decoder(self._edge_type)
@property
def src_nodes(self):
return self._src_nodes
@property
def dst_nodes(self):
return self._dst_nodes
@property
def edge_ids(self):
return self._edge_ids
@property
def src_ids(self):
return self._src_ids
@property
def dst_ids(self):
return self._dst_ids
@property
def src_type(self):
return self._src_type
@property
def dst_type(self):
return self._dst_type
@property
def edge_type(self):
return self._edge_type
@property
def type(self): # pylint: disable=redefined-builtin
return self._src_type, self._dst_type, self._edge_type
@property
def shape(self):
return self._shape
def _lookup_and_set_values(self):
values = self._graph.lookup_edges(self._edge_type,
self._src_ids,
self._edge_ids)
self._set_values(values)
@edge_ids.setter
def edge_ids(self, edge_ids):
self._edge_ids = self._reshape(edge_ids)
@src_ids.setter
def src_ids(self, src_ids):
self._src_ids = self._reshape(src_ids)
@dst_ids.setter
def dst_ids(self, dst_ids):
self._dst_ids = self._reshape(dst_ids)
@type.setter
def type(self, type): # pylint: disable=redefined-builtin
if not isinstance(type, tuple) or len(type) != 3:
raise ValueError("property type must be a tuple of "
"(src_type, dst_type, edge_type).")
self._src_type, self._dst_type, self._edge_type = type
@src_nodes.setter
def src_nodes(self, src_nodes):
if not isinstance(src_nodes, Nodes):
raise ValueError("property src_nodes must be a Nodes object.")
self._src_nodes = src_nodes
@dst_nodes.setter
def dst_nodes(self, dst_nodes):
if not isinstance(dst_nodes, Nodes):
raise ValueError("property dst_nodes must be a Nodes object.")
self._dst_nodes = dst_nodes
class SparseEdges(Edges, SparseBase):
""" SparseEdges is the return value of full neighbor sampler.
It can be easily transformed to Tensorflow or PyTorch Sparse Tensors.
"""
def __init__(self,
src_ids=None,
src_type=None,
dst_ids=None,
dst_type=None,
edge_type=None,
offsets=None,
dense_shape=None,
edge_ids=None,
src_nodes=None,
dst_nodes=None,
int_attrs=None,
float_attrs=None,
string_attrs=None,
weights=None,
labels=None,
graph=None):
""" Sparse Edges.
"""
Edges.__init__(self, src_ids=src_ids,
src_type=src_type,
dst_ids=dst_ids,
dst_type=dst_type,
edge_type=edge_type,
edge_ids=edge_ids,
src_nodes=src_nodes,
dst_nodes=dst_nodes,
int_attrs=None,
float_attrs=None,
string_attrs=None,
weights=weights,
labels=labels,
shape=None,
graph=graph)
SparseBase.__init__(self, offsets, dense_shape)
if not src_nodes:
num_edges = sum(offsets)
if src_ids is not None and src_ids.shape[0] != num_edges:
raise ValueError("Ids must be the same length of indices")
self._src_nodes = SparseNodes(src_ids, offsets, dense_shape,
src_type, graph=graph)
self._dst_nodes = SparseNodes(dst_ids, offsets, dense_shape,
dst_type, graph=graph)
else:
self._dense_shape = dst_nodes.dense_shape
self._offsets = dst_nodes.offsets
def __next__(self):
if self._it < len(self._offsets):
l = self._global_offsets[self._it]
r = self._global_offsets[self._it + 1]
self._it += 1
edges = Edges(self._src_ids[l: r] \
if self._src_ids is not None else None,
self._src_type,
self._dst_ids[l: r] \
if self._dst_ids is not None else None,
self._dst_type,
self._edge_type,
self._edge_ids[l: r] \
if self._edge_ids is not None else None,
next(self._src_nodes),
next(self._dst_nodes),
weights=self._weights[l:r] \
if self._weights is not None else None,
labels=self._labels[l:r] \
if self._labels is not None else None,
graph=self._graph)
edges.int_attrs = np.array(
[int_attr[l: r] for int_attr in self._int_attrs]) \
if self._int_attrs is not None else None
edges.float_attrs = np.array(
[float_attr[l: r] for float_attr in self._float_attrs]) \
if self._float_attrs is not None else None
edges.string_attrs = np.array(
[string_attr[l: r] for string_attr in self._string_attrs]) \
if self._string_attrs is not None else None
return edges
else:
raise StopIteration
class Layers(object):
""" As returned object of `get_next` api of `meta_path_sampler`.
"""
def __init__(self, layers=None):
self.layers = layers if layers else []
def layer(self, layer_id):
""" Get one `Layer`.
"""
layer_id -= 1
if isinstance(self.layers, list) and layer_id < len(self.layers):
return self.layers[layer_id]
else:
raise ValueError("layer id beyond the layers length.")
def layer_size(self, layer_id):
""" Get size of the given `Layer`.
"""
layer_id -= 1
if isinstance(self.layers, list) and layer_id < len(self.layers):
return self.layers[layer_id].shape
else:
raise ValueError("layer id beyond the layers length.")
def layer_nodes(self, layer_id):
""" Get `Nodes` of the given `Layer`.
"""
layer_id -= 1
if isinstance(self.layers, list) and layer_id < len(self.layers):
return self.layers[layer_id].nodes
else:
raise ValueError("layer id beyond the layers length.")
def layer_edges(self, layer_id):
""" Get `Edges` of the given `Layer`.
"""
layer_id -= 1
if isinstance(self.layers, list) and layer_id < len(self.layers):
return self.layers[layer_id].edges
else:
raise ValueError("layer id beyond the layers length.")
def set_layer_nodes(self, layer_id, nodes):
""" Set `Nodes` of the given `Layer`.
"""
layer_id -= 1
if isinstance(self.layers, list) and layer_id < len(self.layers):
if isinstance(self.layers[layer_id], Layer):
self.layers[layer_id].set_nodes(nodes)
else:
raise ValueError("layer {} is not a SingleLayer".format(layer_id))
else:
raise ValueError("layer id beyond the layers length.")
def set_layer_edges(self, layer_id, edges):
""" Set `Edges` of the given `Layer`.
"""
layer_id -= 1
if isinstance(self.layers, list) and layer_id < len(self.layers):
if isinstance(self.layers[layer_id], Layer):
self.layers[layer_id].set_edges(edges)
else:
raise ValueError("layer {} is not a SingleLayer".format(layer_id))
else:
raise ValueError("layer id beyond the layers length.")
def append_layer(self, layer):
""" Append a `Layer` to layers
"""
self.layers.append(layer)
class Layer(object):
""" Layer is 1 hop neighbor nodes and the between edges.
"""
def __init__(self, nodes, edges=None, shape=None):
""" A `Layer` maintain one hop of `Nodes` and `Edges`."""
self._nodes = nodes
self._edges = edges
self._shape = shape if shape else nodes.shape
@property
def nodes(self):
return self._nodes
@property
def edges(self):
return self._edges
@property
def shape(self):
return self._shape
@nodes.setter
def nodes(self, nodes):
self._nodes = nodes
@edges.setter
def edges(self, edges):
self._edges = edges
@shape.setter
def shape(self, shape):
self._shape = shape
| 30.278443 | 88 | 0.60442 | 19,202 | 0.949372 | 0 | 0 | 4,225 | 0.20889 | 0 | 0 | 3,763 | 0.186048 |
50871214482998dd46fb289bdd14022510cc1714 | 10,360 | py | Python | .github/scripts/version_number_update.py | wattch/FreeRTOS | 52cabc06846daef817959b800650e251a5e2aafb | [
"MIT"
] | null | null | null | .github/scripts/version_number_update.py | wattch/FreeRTOS | 52cabc06846daef817959b800650e251a5e2aafb | [
"MIT"
] | 3 | 2020-09-18T00:55:54.000Z | 2020-10-30T02:32:16.000Z | .github/scripts/version_number_update.py | wattch/FreeRTOS | 52cabc06846daef817959b800650e251a5e2aafb | [
"MIT"
] | null | null | null | import os
import re
import argparse
from collections import defaultdict
_AFR_COMPONENTS = [
'demos',
'freertos_kernel',
os.path.join('libraries','abstractions','ble_hal'),
os.path.join('libraries','abstractions','common_io'),
os.path.join('libraries','abstractions','pkcs11'),
os.path.join('libraries','abstractions','platform'),
os.path.join('libraries','abstractions','posix'),
os.path.join('libraries','abstractions','secure_sockets'),
os.path.join('libraries','abstractions','wifi'),
os.path.join('libraries','c_sdk','aws','defender'),
os.path.join('libraries','c_sdk','aws','shadow'),
os.path.join('libraries','c_sdk','standard','ble'),
os.path.join('libraries','c_sdk','standard','common'),
os.path.join('libraries','c_sdk','standard','https'),
os.path.join('libraries','c_sdk','standard','mqtt'),
os.path.join('libraries','c_sdk','standard','serializer'),
os.path.join('libraries','freertos_plus','aws','greengrass'),
os.path.join('libraries','freertos_plus','aws','ota'),
os.path.join('libraries','freertos_plus','standard','crypto'),
os.path.join('libraries','freertos_plus','standard','freertos_plus_posix'),
os.path.join('libraries','freertos_plus','standard','freertos_plus_tcp'),
os.path.join('libraries','freertos_plus','standard','pkcs11'),
os.path.join('libraries','freertos_plus','standard','tls'),
os.path.join('libraries','freertos_plus','standard','utils'),
'tests'
]
def ask_question(question):
answer = input('{}: '.format(question))
return answer.strip()
def ask_multiple_choice_question(question, choices):
while True:
print('{}?'.format(question))
for i in range(len(choices)):
print('{}. {}'.format(i, choices[i]))
try:
user_choice = int(ask_question('Enter Choice'))
except ValueError:
print('Incorrect choice. Please choose a number between 0 and {}'.format(len(choices) - 1))
continue
if user_choice in range(len(choices)):
break
else:
print('Incorrect choice. Please choose a number between 0 and {}'.format(len(choices) - 1))
return user_choice
def ask_yes_no_question(question):
while True:
answer = ask_question('{} (Y/N)'.format(question))
if answer.lower() == 'y':
answer = 'yes'
break
elif answer.lower() == 'n':
answer = 'no'
break
else:
print('Incorrect response. Please answer Y/N.')
return answer
def print_file_list(file_list):
version_line_list = []
for file in file_list:
version_number = extract_version_number_from_file(file)
version_line_list.append(version_number[0] if version_number[0] is not None else 'Could not detect version')
max_filepath_length = len(max(file_list, key=len))
max_version_line_length = len(max(version_line_list, key=len))
print('-' * (max_filepath_length + max_version_line_length + 7))
print('| {file:<{max_filepath_length}} | {version:<{max_version_line_length}} |'.format(file='File',
max_filepath_length=max_filepath_length,
version='Version Line',
max_version_line_length=max_version_line_length))
print('-' * (max_filepath_length + max_version_line_length + 7))
for i in range(len(file_list)):
print('| {file:<{max_filepath_length}} | {version:<{max_version_line_length}} |'.format(file=file_list[i],
max_filepath_length=max_filepath_length,
version=version_line_list[i],
max_version_line_length=max_version_line_length))
print('-' * (max_filepath_length + max_version_line_length + 7))
print('\n')
def list_files_in_a_component(component, afr_path):
'''
Returns a list of all the files in a component.
'''
list_of_files = []
search_path = os.path.join(afr_path, component)
for root, dirs, files in os.walk(search_path, topdown=True):
# Do not search 'portable' and 'third_party' folders.
dirs[:] = [d for d in dirs if d not in ['portable', 'third_party']]
# Do not include hidden files and folders.
dirs[:] = [d for d in dirs if not d[0] == '.']
files = [f for f in files if not f[0] == '.']
for f in files:
if f.endswith('.c') or f.endswith('.h'):
list_of_files.append(os.path.join(os.path.relpath(root, afr_path), f))
return list_of_files
def extract_version_number_from_file(file_path):
'''
Extracts version number from the License header in a file.
'''
with open(file_path) as f:
content = f.read()
match = re.search('\s*\*\s*(FreeRTOS.*V(.*))', content, re.MULTILINE)
# Is it a kernel file?
if match is None:
match = re.search('\s*\*\s*(FreeRTOS Kernel.*V(.*))', content, re.MULTILINE)
# Is it s FreeRTOS+TCP file?
if match is None:
match = re.search('\s*\*\s*(FreeRTOS\+TCP.*V(.*))', content, re.MULTILINE)
return (match.group(1), match.group(2)) if match is not None else (None, None)
def update_version_number_in_files(file_paths, old_version_line, new_version_line):
'''
Replaces old_version_line with new_version_line in all the files specified
by file_paths.
'''
for file_path in file_paths:
with open(file_path) as f:
content = f.read()
content = content.replace(old_version_line, new_version_line)
with open(file_path, 'w') as f:
f.write(content)
def update_version_number_in_a_component(component, afr_path):
'''
Updates version numbers in all the files of an AFR component based on user
choices.
'''
# Get all the files in the component.
files_in_component = list_files_in_a_component(component, afr_path)
version_numbers = defaultdict(list)
# Extract version numbers from all the files.
for f in files_in_component:
file_path = os.path.join(afr_path, f)
version_number = extract_version_number_from_file(file_path)
version_numbers[version_number].append(file_path)
for key in version_numbers.keys():
old_version_line = key[0]
old_version_number = key[1]
files_to_update = version_numbers[key]
if old_version_line is None:
print('\nFailed to detect the version number in the following files:')
while True:
print_file_list(files_to_update)
print('Please update the above files manually!')
confirm = ask_yes_no_question('Done updating')
if confirm == 'yes':
print_file_list(files_to_update)
looks_good = ask_yes_no_question('Does it look good')
if looks_good == 'yes':
break
else:
print('\n{} files have the following version: {}\n'.format(len(files_to_update), old_version_line))
options = [ 'Update version number [i.e. update "{}"].'.format(old_version_number),
'Update version line [i.e. update "{}"].'.format(old_version_line),
'List files.',
'Do not update.' ]
while True:
user_selected_option = ask_multiple_choice_question('What do you want to do', options)
if user_selected_option == 0:
new_version_number = ask_question('Enter new version number')
new_version_line = old_version_line.replace(old_version_number, new_version_number)
print('Old version line: "{}". New version line: "{}".'.format(old_version_line, new_version_line))
confirm = ask_yes_no_question('Does it look good')
if confirm == 'yes':
update_version_number_in_files(files_to_update, old_version_line, new_version_line)
print('Updated version line to "{}".\n'.format(new_version_line))
break
elif user_selected_option == 1:
new_version_line = ask_question('Enter new version line')
print('Old version line: "{}". New version line: "{}".'.format(old_version_line, new_version_line))
confirm = ask_yes_no_question('Does it look good')
if confirm == 'yes':
update_version_number_in_files(files_to_update, old_version_line, new_version_line)
print('Updated version line to "{}".\n'.format(new_version_line))
break
elif user_selected_option == 2:
print_file_list(files_to_update)
else:
print('Skipping update of {}.\n'.format(old_version_line))
break
def parse_arguments():
'''
Parses the command line arguments.
'''
parser = argparse.ArgumentParser(description='FreeRTOS Checksum Generator')
parser.add_argument('--afr', required=True, help='Location of the AFR Code.')
args = parser.parse_args()
return vars(args)
def main():
'''
Main entry point.
'''
args = parse_arguments()
afr_path = args['afr']
print('AFR Code: {}'.format(afr_path))
for component in _AFR_COMPONENTS:
print('\n---------------------------------------------')
print('Component: {}'.format(component))
print('---------------------------------------------\n')
wanna_update_version = ask_yes_no_question('Do you want to update the component "{}"'.format(component))
if wanna_update_version == 'yes':
update_version_number_in_a_component(component, afr_path)
if __name__ == '__main__':
main()
| 41.94332 | 145 | 0.584653 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,963 | 0.286004 |
50879439035ca2e6d3397d74ace1320560aeb002 | 2,232 | py | Python | Medium/452.MinimumNumberofArrowstoBurstBalloons.py | YuriSpiridonov/LeetCode | 2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781 | [
"MIT"
] | 39 | 2020-07-04T11:15:13.000Z | 2022-02-04T22:33:42.000Z | Medium/452.MinimumNumberofArrowstoBurstBalloons.py | YuriSpiridonov/LeetCode | 2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781 | [
"MIT"
] | 1 | 2020-07-15T11:53:37.000Z | 2020-07-15T11:53:37.000Z | Medium/452.MinimumNumberofArrowstoBurstBalloons.py | YuriSpiridonov/LeetCode | 2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781 | [
"MIT"
] | 20 | 2020-07-14T19:12:53.000Z | 2022-03-02T06:28:17.000Z | """
There are some spherical balloons spread in two-dimensional space. For each
balloon, provided input is the start and end coordinates of the horizontal
diameter. Since it's horizontal, y-coordinates don't matter, and hence the
x-coordinates of start and end of the diameter suffice. The start is always
smaller than the end.
An arrow can be shot up exactly vertically from different points along the
x-axis. A balloon with x(start) and x(end) bursts by an arrow shot at x if
x(start) ≤ x ≤ x(end). There is no limit to the number of arrows that can
be shot. An arrow once shot keeps traveling up infinitely.
Given an array points where points[i] = [x(start), x(end)], return the
minimum number of arrows that must be shot to burst all balloons.
Example:
Input: points = [[10,16],[2,8],[1,6],[7,12]]
Output: 2
Explanation: One way is to shoot one arrow for example at x = 6
(bursting the balloons [2,8] and [1,6]) and another arrow at
x = 11 (bursting the other two balloons).
Example:
Input: points = [[1,2],[3,4],[5,6],[7,8]]
Output: 4
Example:
Input: points = [[1,2],[2,3],[3,4],[4,5]]
Output: 2
Example:
Input: points = [[1,2]]
Output: 1
Example:
Input: points = [[2,3],[2,3]]
Output: 1
Constraints:
- 0 <= points.length <= 10**4
- points.length == 2
- -2**31 <= xstart < xend <= 2**31 - 1
"""
#Difficulty: Medium
#45 / 45 test cases passed.
#Runtime: 424 ms
#Memory Usage: 18.4 MB
#Runtime: 424 ms, faster than 86.48% of Python3 online submissions for Minimum Number of Arrows to Burst Balloons.
#Memory Usage: 18.4 MB, less than 97.86% of Python3 online submissions for Minimum Number of Arrows to Burst Balloons.
class Solution:
def findMinArrowShots(self, points: List[List[int]]) -> int:
i = 0
length = len(points)
points.sort(key=lambda points : points[1])
while True:
j = i + 1
if j >= length:
return length
if points[j][0] <= points[i][1]:
points.pop(j)
i -= 1
length -= 1
i += 1
| 33.818182 | 118 | 0.59991 | 419 | 0.187388 | 0 | 0 | 0 | 0 | 0 | 0 | 1,807 | 0.80814 |
5087fd9779109ae3757b5e0e1a9ac31884d9c109 | 1,844 | py | Python | pl_extension/callbacks/speed.py | DuinoDu/pl-extension | 1ed8f3dd95aa569ee3493fcc69634d3ab9322430 | [
"Apache-2.0"
] | null | null | null | pl_extension/callbacks/speed.py | DuinoDu/pl-extension | 1ed8f3dd95aa569ee3493fcc69634d3ab9322430 | [
"Apache-2.0"
] | null | null | null | pl_extension/callbacks/speed.py | DuinoDu/pl-extension | 1ed8f3dd95aa569ee3493fcc69634d3ab9322430 | [
"Apache-2.0"
] | null | null | null | import logging
from pytorch_lightning.callbacks.base import Callback
__all__ = ["Speed"]
logger = logging.getLogger(__name__)
class Speed(Callback):
r"""
Training speed callback, require 'simple' or 'advanced' profiler.
"""
def on_train_batch_end(
self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
):
legacy_metrics = (
trainer.logger_connector.cached_results.legacy_batch_log_metrics
)
legacy_metrics["iter"] = trainer.global_step
legacy_metrics["epoch"] = trainer.current_epoch
if not self.__has_profiler(trainer):
# if not profiler provided, skip speed and batch_time.
return
# get training one batch time
run_training_batch_time = trainer.profiler.recorded_durations[
"run_training_batch"
][-1]
if hasattr(trainer.datamodule, "batch_size"):
total_batch_size = (
trainer.datamodule.batch_size * trainer.world_size
)
legacy_metrics["speed"] = (
1.0 * total_batch_size / run_training_batch_time
)
else:
legacy_metrics["batch_time"] = run_training_batch_time
def on_train_epoch_end(self, trainer, pl_module, *args, **kwargs):
if not self.__has_profiler(trainer):
return
run_training_epoch_time = trainer.profiler.recorded_durations[
"run_training_epoch"
]
if len(run_training_epoch_time) > 0 and hasattr(
trainer.logger, "log_metrics"
):
epoch_time = {"epoch_time": run_training_epoch_time[-1]}
trainer.logger.log_metrics(epoch_time, step=trainer.current_epoch)
def __has_profiler(self, trainer):
return hasattr(trainer.profiler, "recorded_durations")
| 32.928571 | 78 | 0.642082 | 1,712 | 0.928416 | 0 | 0 | 0 | 0 | 0 | 0 | 302 | 0.163774 |
50889e61c09c24ca6b275f770b232c7c15f511c1 | 3,158 | py | Python | spectacles/validators/validator.py | felipefrancisco/spectacles | 92f7af5810e2669343dd18425b2a8cb49d7167d2 | [
"MIT"
] | 150 | 2019-10-05T18:35:36.000Z | 2022-03-26T21:21:44.000Z | spectacles/validators/validator.py | felipefrancisco/spectacles | 92f7af5810e2669343dd18425b2a8cb49d7167d2 | [
"MIT"
] | 406 | 2019-10-03T14:54:22.000Z | 2022-03-28T04:02:31.000Z | spectacles/validators/validator.py | felipefrancisco/spectacles | 92f7af5810e2669343dd18425b2a8cb49d7167d2 | [
"MIT"
] | 26 | 2019-11-08T16:21:50.000Z | 2022-03-28T06:06:14.000Z | from typing import Optional, List
from abc import ABC, abstractmethod
from spectacles.client import LookerClient
from spectacles.lookml import Project, Model, Dimension
from spectacles.select import is_selected
from spectacles.exceptions import LookMlNotFound
class Validator(ABC): # pragma: no cover
"""Defines abstract base interface for validators.
Not intended to be used directly, only inherited.
Attributes:
client: Looker API client.
"""
def __init__(self, client: LookerClient, project: str):
self.client = client
self.project = Project(project, models=[])
@abstractmethod
def validate(self):
raise NotImplementedError
def build_project(
self,
selectors: Optional[List[str]] = None,
exclusions: Optional[List[str]] = None,
build_dimensions: bool = False,
) -> None:
"""Creates an object representation of the project's LookML.
Args:
selectors: List of selector strings in 'model_name/explore_name' format.
The '*' wildcard selects all models or explores. For instance,
'model_name/*' would select all explores in the 'model_name' model.
"""
# Assign default values for selectors and exclusions
if selectors is None:
selectors = ["*/*"]
if exclusions is None:
exclusions = []
all_models = [
Model.from_json(model)
for model in self.client.get_lookml_models(
fields=["name", "project_name", "explores"]
)
]
project_models = [
model for model in all_models if model.project_name == self.project.name
]
if not project_models:
raise LookMlNotFound(
name="project-models-not-found",
title="No configured models found for the specified project.",
detail=(
f"Go to {self.client.base_url}/projects and confirm "
"a) at least one model exists for the project and "
"b) it has an active configuration."
),
)
for model in project_models:
model.explores = [
explore
for explore in model.explores
if is_selected(model.name, explore.name, selectors, exclusions)
]
if build_dimensions:
for explore in model.explores:
dimensions_json = self.client.get_lookml_dimensions(
model.name, explore.name
)
for dimension_json in dimensions_json:
dimension = Dimension.from_json(
dimension_json, model.name, explore.name
)
dimension.url = self.client.base_url + dimension.url
if not dimension.ignore:
explore.add_dimension(dimension)
self.project.models = [
model for model in project_models if len(model.explores) > 0
]
| 34.703297 | 84 | 0.569981 | 2,895 | 0.916719 | 0 | 0 | 73 | 0.023116 | 0 | 0 | 828 | 0.262191 |
5088c0c6396ddfd3443bddad31b38216e9bda0c1 | 1,330 | py | Python | python/periodic-web-scrapper/scraper/Scraper.py | MarioCodes/ProyectosClaseDAM | df568b4feda8bf3a6cf7cc8e81e7dfa4156dcfd9 | [
"Apache-2.0"
] | null | null | null | python/periodic-web-scrapper/scraper/Scraper.py | MarioCodes/ProyectosClaseDAM | df568b4feda8bf3a6cf7cc8e81e7dfa4156dcfd9 | [
"Apache-2.0"
] | 17 | 2019-06-14T12:30:46.000Z | 2022-02-18T11:38:50.000Z | python/periodic-web-scrapper/scraper/Scraper.py | MarioCodes/ProyectosClaseDAM | df568b4feda8bf3a6cf7cc8e81e7dfa4156dcfd9 | [
"Apache-2.0"
] | null | null | null | '''
Created on Apr 18, 2018
@author: msanchez
'''
from scraper.RequestScraper import RequestScraper
from scraper.HTMLFilter import HTMLFilter
from scraper.NewsFilter import NewsFilter
from scraper.utilities.WebUtilities import WebUtilities
class Scraper(object):
''' Full scrap operation. Downloads the request with an URL. Checks the HTTP status code. In case it's correct, proceeds with the scrap & filter operation.
'''
def __init__(self):
'''
Constructor
'''
def scrap(self):
web = self.__download()
result = list()
if(200 == web.status_code):
scraper = RequestScraper(web)
html_news_tags = scraper.scrap_news()
cleaned_tags = self.__clean(html_news_tags)
result = self.__filter(cleaned_tags)
else:
print("There was an error on download operation. Status code: ", str(web.status_code))
return result
def __download(self):
downloader = WebUtilities()
return downloader.download("https://www.heraldo.es/")
def __clean(self, html_tags):
tag_filter = HTMLFilter(html_tags)
return tag_filter.filter()
def __filter(self, unfiltered_tags):
matcher = NewsFilter(unfiltered_tags)
return matcher.search() | 30.930233 | 159 | 0.64812 | 1,088 | 0.818045 | 0 | 0 | 0 | 0 | 0 | 0 | 330 | 0.24812 |
50897cd7ac40da6c45acf1571aad36057bdb2d94 | 2,877 | py | Python | cabot_alert_pushover/models.py | dnelson/cabot-alert-pushover | c10f34e59c9f289e60016e7240c4e6ada611f611 | [
"MIT"
] | null | null | null | cabot_alert_pushover/models.py | dnelson/cabot-alert-pushover | c10f34e59c9f289e60016e7240c4e6ada611f611 | [
"MIT"
] | 1 | 2015-02-09T22:26:48.000Z | 2015-02-09T22:26:48.000Z | cabot_alert_pushover/models.py | packetcollision/cabot-alert-pushover | c10f34e59c9f289e60016e7240c4e6ada611f611 | [
"MIT"
] | 1 | 2015-04-29T15:43:31.000Z | 2015-04-29T15:43:31.000Z | from django.db import models
from django.conf import settings
from django.template import Context, Template
from cabot.cabotapp.alert import AlertPlugin, AlertPluginUserData
from os import environ as env
import requests
pushover_alert_url = "https://api.pushover.net/1/messages.json"
pushover_template = "Service {{ service.name }} {% if service.overall_status == service.PASSING_STATUS %}is back to normal{% else %}reporting {{ service.overall_status }} status{% endif %}: {{ scheme }}://{{ host }}{% url 'service' pk=service.id %}."
class PushoverAlert(AlertPlugin):
name = "Pushover"
author = "Daniel Nelson"
def send_alert(self, service, users, duty_officers):
# Pushover handles repeat alerts, so we can skip them
if service.overall_status == service.old_overall_status:
return
for u in users:
alert = True
priority = 1
try:
data = AlertPluginUserData.objects.get(user=u, title=PushoverAlertUserData.name)
except:
pass
if service.overall_status == service.WARNING_STATUS:
if not data.alert_on_warn:
alert = False
priority = 0
elif service.overall_status == service.ERROR_STATUS:
priority = 1
elif service.overall_status == service.CRITICAL_STATUS:
priority = 2
elif service.overall_status == service.PASSING_STATUS:
priority = 0
if service.old_overall_status == service.CRITICAL_STATUS:
# cancel the recurring crit
pass
else:
# something weird happened
alert = False
if not alert:
return
# now let's send
c = Context({
'service': service,
'host': settings.WWW_HTTP_HOST,
'scheme': settings.WWW_SCHEME,
'jenkins_api': settings.JENKINS_API,
})
message = Template(pushover_template).render(c)
self._send_pushover_alert(message, key=data.key, priority=priority)
def _send_pushover_alert(self, message, key, priority=0):
payload = {
'token':env['PUSHOVER_TOKEN'],
'user': key,
'priority': priority,
'title': 'Cabot ALERT',
'message': message,
}
if priority == 2:
payload['retry'] = 60
payload['expire'] = 3600
r = requests.post(pushover_alert_url, data=payload)
class PushoverAlertUserData(AlertPluginUserData):
name = "Pushover Plugin"
key = models.CharField(max_length=32, blank=False, verbose_name="User/Group Key")
alert_on_warn = models.BooleanField(default=False)
| 35.518519 | 250 | 0.583246 | 2,332 | 0.810567 | 0 | 0 | 0 | 0 | 0 | 0 | 571 | 0.198471 |
508b71956b5d3b6c286150774b34745d6e9b14c4 | 683 | py | Python | mezzanine_faq/templatetags/faq_tags.py | fpytloun/mezzanine-faq | 01f8d5a33bb97a2c72679675ef98c4318ac70779 | [
"BSD-3-Clause"
] | null | null | null | mezzanine_faq/templatetags/faq_tags.py | fpytloun/mezzanine-faq | 01f8d5a33bb97a2c72679675ef98c4318ac70779 | [
"BSD-3-Clause"
] | null | null | null | mezzanine_faq/templatetags/faq_tags.py | fpytloun/mezzanine-faq | 01f8d5a33bb97a2c72679675ef98c4318ac70779 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from django import template
from mezzanine.conf import settings
from mezzanine_faq.models import FaqPage
register = template.Library()
@register.inclusion_tag('includes/faqlist.html')
def faq_list(**kwargs):
page = FaqPage.objects.get(**kwargs)
return {
'page': page,
'faq_questions': page.faqquestion_set.all(),
'MEDIA_URL': settings.MEDIA_URL,
}
@register.inclusion_tag('includes/faqlist.html')
def faq_last(**kwargs):
page = FaqPage.objects.get(**kwargs)
return {
'page': page,
'faq_questions': page.faqquestion_set.all().order_by('-id')[:1],
'MEDIA_URL': settings.MEDIA_URL,
}
| 24.392857 | 72 | 0.66325 | 0 | 0 | 0 | 0 | 516 | 0.75549 | 0 | 0 | 138 | 0.20205 |
508bdde093af310cb7c78c41d80054d13c12905d | 1,234 | py | Python | easy/543-Diameter of Binary Tree.py | Davidxswang/leetcode | d554b7f5228f14c646f726ddb91014a612673e06 | [
"Apache-2.0"
] | 2 | 2020-05-08T02:17:17.000Z | 2020-05-17T04:55:56.000Z | easy/543-Diameter of Binary Tree.py | Davidxswang/leetcode | d554b7f5228f14c646f726ddb91014a612673e06 | [
"Apache-2.0"
] | null | null | null | easy/543-Diameter of Binary Tree.py | Davidxswang/leetcode | d554b7f5228f14c646f726ddb91014a612673e06 | [
"Apache-2.0"
] | null | null | null | """
https://leetcode.com/problems/diameter-of-binary-tree/
Given a binary tree, you need to compute the length of the diameter of the tree. The diameter of a binary tree is the length of the longest path between any two nodes in a tree. This path may or may not pass through the root.
Example:
Given a binary tree
1
/ \
2 3
/ \
4 5
Return 3, which is the length of the path [4,2,1,3] or [5,2,1,3].
Note: The length of path between two nodes is represented by the number of edges between them.
"""
# Thanks to the solution provided by the problem.
# time complexity: O(n), space complexity: O(1)
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def diameterOfBinaryTree(self, root: TreeNode) -> int:
self.long = 1
self.dfs(root)
return self.long - 1
def dfs(self, root: TreeNode) -> int:
if root is None:
return 0
left = self.dfs(root.left)
right = self.dfs(root.right)
self.long = max(self.long, left + right + 1)
return max(left, right) + 1
| 31.641026 | 225 | 0.626418 | 398 | 0.322528 | 0 | 0 | 0 | 0 | 0 | 0 | 824 | 0.667747 |
508c6c7312c3774719180054ec3e60ba8af3b4ea | 1,114 | py | Python | hsf_website_helpers/bin/hsf_reformat_training_events.py | HSF/website-helpers | 7b01db3648d9f8026a318a4fac2fd3a8aeea354e | [
"MIT"
] | null | null | null | hsf_website_helpers/bin/hsf_reformat_training_events.py | HSF/website-helpers | 7b01db3648d9f8026a318a4fac2fd3a8aeea354e | [
"MIT"
] | null | null | null | hsf_website_helpers/bin/hsf_reformat_training_events.py | HSF/website-helpers | 7b01db3648d9f8026a318a4fac2fd3a8aeea354e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Quick script to read all training schools from data file and write them out
again to e.g. update the formatting.
"""
import argparse
from hsf_website_helpers.events.event import EventDatabase
from hsf_website_helpers.util.cli import add_website_home_option
def get_parser() -> argparse.ArgumentParser:
d = (
"Quick script to read all training schools from data file and write "
"them out again to e.g. update the formatting."
)
parser = argparse.ArgumentParser(description=d)
add_website_home_option(parser)
return parser
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
path = args.home / "_data" / "training-schools.yml"
if path.is_file():
edb = EventDatabase.from_file(path)
print(f"Loaded {len(edb.events)} events from database.")
else:
print(f"Did not find database at {path}. Initializing empty one.")
edb = EventDatabase()
edb.write(path)
print(
"Reformated database. Please commit and submit a PR to add it to "
"the webpage."
)
| 28.564103 | 77 | 0.682226 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 485 | 0.435368 |
508e3fc0c39607a000fe637e25c5db2d8a7dc060 | 275 | py | Python | clothstream/styletags/urls.py | julienaubert/clothstream | daa76389be8b359208e88cd1f7aa8e7e98766656 | [
"MIT"
] | null | null | null | clothstream/styletags/urls.py | julienaubert/clothstream | daa76389be8b359208e88cd1f7aa8e7e98766656 | [
"MIT"
] | null | null | null | clothstream/styletags/urls.py | julienaubert/clothstream | daa76389be8b359208e88cd1f7aa8e7e98766656 | [
"MIT"
] | null | null | null | from clothstream.lib.rest import SharedAPIRootRouter
from .views import ItemStyleTagCreate, StyleTagList
router = SharedAPIRootRouter()
router.register(r'styletag-item/create', ItemStyleTagCreate, base_name='itemstyletag-create')
router.register(r'styletags', StyleTagList)
| 39.285714 | 93 | 0.84 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 56 | 0.203636 |
508e54ebec8de34c224abfb2a9e4b84d25629fa1 | 6,652 | py | Python | aws_automation/s3.py | VCCRI/Scavenger | a17f48b242a1cd8d626ad3a21439fa2c80b06533 | [
"MIT"
] | 4 | 2018-07-25T15:52:00.000Z | 2020-10-20T00:47:35.000Z | aws_automation/s3.py | VCCRI/Scavenger | a17f48b242a1cd8d626ad3a21439fa2c80b06533 | [
"MIT"
] | null | null | null | aws_automation/s3.py | VCCRI/Scavenger | a17f48b242a1cd8d626ad3a21439fa2c80b06533 | [
"MIT"
] | null | null | null | # a module that wraps some of the S3 commands
import boto3
from botocore.exceptions import ClientError
from boto3.s3.transfer import S3Transfer
import re
import os
# check for existance of bucket
def list_bucket(bucket_name, region):
s3 = boto3.resource('s3', region)
bucket = s3.Bucket(bucket_name)
object_list = []
try:
for key in bucket.objects.all():
print(key.key)
object_list.append(key.key)
except ClientError as e:
#print('code: {}, msg: {}, op name: {}'.format([
# e.error_code, e.error_message, e.operation_name]))
#print(e.msg)
print(str(e))
print(e.response)
except Exception as e:
# other response Error keys: Code, Message, BucketName
print(e.response['Error']['Code'])
print(str(e))
print(e.response)
print(e.response['ResponseMetadata']['HTTPStatusCode'])
return object_list
# get list of bucket contents
def get_bucket_list(bucket_name, region):
s3 = boto3.resource('s3', region)
bucket = s3.Bucket(bucket_name)
object_list = []
for key in bucket.objects.all():
object_list.append(key.key)
return object_list
# check bucket exists (efficient version)
# NOTE: s3 bucket name space is for all AWS users
# therefore need to also check that have rights to read & write (+list)
def bucket_exists(bucket, region):
s3 = boto3.resource('s3', region)
exists = True
try:
s3.meta.client.head_bucket(Bucket=bucket)
except ClientError as e:
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response['Error']['Code'])
if error_code == 404:
exists = False
return exists
#upload a file
def upload_file(bucket, region, source_file, dest_file):
client = boto3.client('s3', region)
transfer = S3Transfer(client)
transfer.upload_file(source_file, bucket, dest_file)
# determine next unique number
def get_next_id(bucket_name, region, prefix):
"""
determines the next sequential numbering for a given folder prefix
e.g. prefix is "01092015Tue-"; if a file exists in the folder, then
it will be of the form "01092015Tue-xxx/somefilename.ext" - where
xxx is some number"; if there is such a file, then next folder will
be 01092015Tue-yyy - where yyy = xxx + 1; otherwise, next folder is
01092015Tue-1
Args:
prefix: a string that represents the absolute folder name
Returns: a string that represents the next folder name in the
sequence
"""
# added () to get group
pattern = re.compile(prefix + '([0-9]+)/')
ids = get_bucket_list(bucket_name, region)
next_num = 1
for name in ids:
match = pattern.match(name)
if match:
# there is only one bracketed group - the number
next_num = max(int(match.groups()[0]) + 1, next_num)
result = prefix + str(next_num)
# want to strip out any "directories" in path & just return id
return result.split('/')[-1]
# return a list of bucket objects that match a given prefix
#TODO remove default bucket name
def list_by_prefix(bucket_name, region, prefix=''):
""" returns a list of names of bucket objects that start with a
given prefix
Args:
bucket_name: string - the name of the s3 bucket
prefix: string - the prefix of the name (key) of the bucket
objects
Returns:
a list of objects whose name (key) starts with the given prefix
"""
s3 = boto3.resource('s3', region)
bucket = s3.Bucket(bucket_name)
names = []
# osi - object summary iterator
for osi in bucket.objects.filter(
Prefix=prefix):
name = osi.key
names.append(name)
return names
# determine if a given object key exists in the bucket
def key_exists(bucket_name, region, key):
""" indicates if a key (object name) is in the bucket
Args:
bucket_name: string - the name of the s3 bucket
key: string - the name of the object key (file-name)
Returns:
True if key in bucket; False otherwise
"""
if key in list_by_prefix(bucket_name, region, key):
return True
return False
def get_timing_info(bucket_name, region, prefix):
""" gets the timing information for jobs - labelled start & finish
Returns:
a 3-tuple of (finish time, elapsed time string, task name string)
"""
s3 = boto3.resource('s3', region)
bucket = s3.Bucket(bucket_name)
start_dict = {}
finish_dict = {}
# osi - object summary iterator
for osi in bucket.objects.filter(
Prefix=prefix):
name = osi.key
last_mod = osi.last_modified
if 'start' in name:
start_dict[name] = last_mod
if 'finish' in name:
finish_dict[name] = last_mod
results = []
for name, finish_time in finish_dict.items():
start_name = name.replace('finish', 'start')
if start_name in start_dict:
elapsed = str(finish_time - start_dict[start_name])
results.append((finish_time, elapsed, name.replace('finish', 'task').split('/')[-1].
split('.')[0]))
return sorted(results)
# download files matching regex
def download_files(bucket_name, region, prefix='', suffix='', dest_dir=''):
""" downloads files who's path & name match given prefix & suffix
to specified dir
Args:
bucket_name: the name of the s3 bucket to download from
prefix: string - start of full path the s3 file
suffix: string - the end characters of the file (e.g. '.vcf')
dest_dir: string - the (local) directory to which the files are
downloaded
"""
# TODO better to raise ValueError??
assert (prefix or suffix), 'must have a value for either prefix or suffix'
# get rid of '/' at end of dir if exists
if dest_dir.endswith('/'):
dest_dir = dest_dir[:-1]
# create directory in case not exist
if dest_dir:
os.makedirs(dest_dir, exist_ok=True)
else:
# no dir provided - default to current dir
dest_dir = '.'
names = []
client = boto3.client('s3', region)
transfer = S3Transfer(client)
for name in list_by_prefix(bucket_name, region, prefix):
if name.endswith(suffix):
# remove any path from the file name
fname = name.split('/').pop()
# download the file
transfer.download_file(bucket_name, name, dest_dir + '/' + fname)
| 34.827225 | 96 | 0.637853 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,089 | 0.464372 |
508f157fb5ef3d702783d7357c19874b1334a4e9 | 7,597 | py | Python | bot/exts/info/codeblock/_instructions.py | zwycl/bot | 862fb070a501ca45cccb481d62f079b3bdb1d16f | [
"MIT",
"BSD-3-Clause"
] | 1 | 2020-12-22T09:13:09.000Z | 2020-12-22T09:13:09.000Z | bot/exts/info/codeblock/_instructions.py | Ezamey/bot | 0b7a1be1dd57a464fc79fcb235b79d75bec43f99 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | bot/exts/info/codeblock/_instructions.py | Ezamey/bot | 0b7a1be1dd57a464fc79fcb235b79d75bec43f99 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | """This module generates and formats instructional messages about fixing Markdown code blocks."""
import logging
from typing import Optional
from bot.exts.info.codeblock import _parsing
log = logging.getLogger(__name__)
_EXAMPLE_PY = "{lang}\nprint('Hello, world!')" # Make sure to escape any Markdown symbols here.
_EXAMPLE_CODE_BLOCKS = (
"\\`\\`\\`{content}\n\\`\\`\\`\n\n"
"**This will result in the following:**\n"
"```{content}```"
)
def _get_example(language: str) -> str:
"""Return an example of a correct code block using `language` for syntax highlighting."""
# Determine the example code to put in the code block based on the language specifier.
if language.lower() in _parsing.PY_LANG_CODES:
log.trace(f"Code block has a Python language specifier `{language}`.")
content = _EXAMPLE_PY.format(lang=language)
elif language:
log.trace(f"Code block has a foreign language specifier `{language}`.")
# It's not feasible to determine what would be a valid example for other languages.
content = f"{language}\n..."
else:
log.trace("Code block has no language specifier.")
content = "\nHello, world!"
return _EXAMPLE_CODE_BLOCKS.format(content=content)
def _get_bad_ticks_message(code_block: _parsing.CodeBlock) -> Optional[str]:
"""Return instructions on using the correct ticks for `code_block`."""
log.trace("Creating instructions for incorrect code block ticks.")
valid_ticks = f"\\{_parsing.BACKTICK}" * 3
instructions = (
"It looks like you are trying to paste code into this channel.\n\n"
"You seem to be using the wrong symbols to indicate where the code block should start. "
f"The correct symbols would be {valid_ticks}, not `{code_block.tick * 3}`."
)
log.trace("Check if the bad ticks code block also has issues with the language specifier.")
addition_msg = _get_bad_lang_message(code_block.content)
if not addition_msg and not code_block.language:
addition_msg = _get_no_lang_message(code_block.content)
# Combine the back ticks message with the language specifier message. The latter will
# already have an example code block.
if addition_msg:
log.trace("Language specifier issue found; appending additional instructions.")
# The first line has double newlines which are not desirable when appending the msg.
addition_msg = addition_msg.replace("\n\n", " ", 1)
# Make the first character of the addition lower case.
instructions += "\n\nFurthermore, " + addition_msg[0].lower() + addition_msg[1:]
else:
log.trace("No issues with the language specifier found.")
example_blocks = _get_example(code_block.language)
instructions += f"\n\n**Here is an example of how it should look:**\n{example_blocks}"
return instructions
def _get_no_ticks_message(content: str) -> Optional[str]:
"""If `content` is Python/REPL code, return instructions on using code blocks."""
log.trace("Creating instructions for a missing code block.")
if _parsing.is_python_code(content):
example_blocks = _get_example("python")
return (
"It looks like you're trying to paste code into this channel.\n\n"
"Discord has support for Markdown, which allows you to post code with full "
"syntax highlighting. Please use these whenever you paste code, as this "
"helps improve the legibility and makes it easier for us to help you.\n\n"
f"**To do this, use the following method:**\n{example_blocks}"
)
else:
log.trace("Aborting missing code block instructions: content is not Python code.")
def _get_bad_lang_message(content: str) -> Optional[str]:
"""
Return instructions on fixing the Python language specifier for a code block.
If `code_block` does not have a Python language specifier, return None.
If there's nothing wrong with the language specifier, return None.
"""
log.trace("Creating instructions for a poorly specified language.")
info = _parsing.parse_bad_language(content)
if not info:
log.trace("Aborting bad language instructions: language specified isn't Python.")
return
lines = []
language = info.language
if info.has_leading_spaces:
log.trace("Language specifier was preceded by a space.")
lines.append(f"Make sure there are no spaces between the back ticks and `{language}`.")
if not info.has_terminal_newline:
log.trace("Language specifier was not followed by a newline.")
lines.append(
f"Make sure you put your code on a new line following `{language}`. "
f"There must not be any spaces after `{language}`."
)
if lines:
lines = " ".join(lines)
example_blocks = _get_example(language)
# Note that _get_bad_ticks_message expects the first line to have two newlines.
return (
f"It looks like you incorrectly specified a language for your code block.\n\n{lines}"
f"\n\n**Here is an example of how it should look:**\n{example_blocks}"
)
else:
log.trace("Nothing wrong with the language specifier; no instructions to return.")
def _get_no_lang_message(content: str) -> Optional[str]:
"""
Return instructions on specifying a language for a code block.
If `content` is not valid Python or Python REPL code, return None.
"""
log.trace("Creating instructions for a missing language.")
if _parsing.is_python_code(content):
example_blocks = _get_example("python")
# Note that _get_bad_ticks_message expects the first line to have two newlines.
return (
"It looks like you pasted Python code without syntax highlighting.\n\n"
"Please use syntax highlighting to improve the legibility of your code and make "
"it easier for us to help you.\n\n"
f"**To do this, use the following method:**\n{example_blocks}"
)
else:
log.trace("Aborting missing language instructions: content is not Python code.")
def get_instructions(content: str) -> Optional[str]:
"""
Parse `content` and return code block formatting instructions if something is wrong.
Return None if `content` lacks code block formatting issues.
"""
log.trace("Getting formatting instructions.")
blocks = _parsing.find_code_blocks(content)
if blocks is None:
log.trace("At least one valid code block found; no instructions to return.")
return
if not blocks:
log.trace("No code blocks were found in message.")
instructions = _get_no_ticks_message(content)
else:
log.trace("Searching results for a code block with invalid ticks.")
block = next((block for block in blocks if block.tick != _parsing.BACKTICK), None)
if block:
log.trace("A code block exists but has invalid ticks.")
instructions = _get_bad_ticks_message(block)
else:
log.trace("A code block exists but is missing a language.")
block = blocks[0]
# Check for a bad language first to avoid parsing content into an AST.
instructions = _get_bad_lang_message(block.content)
if not instructions:
instructions = _get_no_lang_message(block.content)
if instructions:
instructions += "\nYou can **edit your original message** to correct your code block."
return instructions
| 41.064865 | 97 | 0.675925 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,371 | 0.575359 |
508f3ea247a38d0be96d2163379720704f7d48b6 | 533 | py | Python | app/urls.py | julesc00/madmin | c02d8c61e96831270075aa827d3de9d53ff8048d | [
"MIT"
] | null | null | null | app/urls.py | julesc00/madmin | c02d8c61e96831270075aa827d3de9d53ff8048d | [
"MIT"
] | null | null | null | app/urls.py | julesc00/madmin | c02d8c61e96831270075aa827d3de9d53ff8048d | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
app_name = "app"
urlpatterns = [
path('', views.index, name="index"),
path('posts/', views.posts, name="posts"),
path('categories/', views.categories, name="categories"),
path('comments/', views.comments, name="comments"),
path('users/', views.users, name="users"),
path('test/', views.test, name="test"),
path('login/', views.login, name="login"),
path('logout/', views.logout, name="logout"),
path('details/', views.details, name="details"),
] | 31.352941 | 61 | 0.634146 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 154 | 0.288931 |
5092dda4e656ef453bfc8239860a22fcd917e66f | 11,775 | py | Python | lib/python/cellranger/analysis/pca.py | qiangli/cellranger | 046e24c3275cfbd4516a6ebc064594513a5c45b7 | [
"MIT"
] | 1 | 2019-03-29T04:05:58.000Z | 2019-03-29T04:05:58.000Z | lib/python/cellranger/analysis/pca.py | qiangli/cellranger | 046e24c3275cfbd4516a6ebc064594513a5c45b7 | [
"MIT"
] | null | null | null | lib/python/cellranger/analysis/pca.py | qiangli/cellranger | 046e24c3275cfbd4516a6ebc064594513a5c45b7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2017 10X Genomics, Inc. All rights reserved.
#
import cellranger.analysis.io as analysis_io
import cellranger.analysis.constants as analysis_constants
import cellranger.h5_constants as h5_constants
import cellranger.io as cr_io
import cellranger.analysis.stats as analysis_stats
import collections
from irlb import irlb
import numpy as np
import os
import tables
# The RUNPCA stage attempts to run the PCA at this threshold, and if that
# fails it reruns at zero. In the event thresholding prevents us from
# returning the requested number of components and we are at this threshold
# value, we throw an exception.
DEFAULT_RUNPCA_THRESHOLD = 2
from sklearn.utils import sparsefuncs
class MatrixRankTooSmallException(Exception):
pass
PCA = collections.namedtuple('PCA', ['transformed_pca_matrix', 'components', 'variance_explained', 'dispersion', 'features_selected'])
def get_original_columns_used(cols_not_removed, cols_used_after_removal):
"""If a matrix is subset down to only have columns indexed by cols_not_removed, and then is further subset to
only contain cols_used_after removal, in that order, than this method returns the index of which columns in the old
matrix correspond the the columns in the new matrix."""
return [cols_not_removed[x] for x in cols_used_after_removal]
def run_pca(matrix, pca_features=None, pca_bcs=None, n_pca_components=None, random_state=None, min_count_threshold=0):
""" Run a PCA on the matrix using the IRLBA matrix factorization algorithm. Prior to the PCA analysis, the
matrix is modified so that all barcodes/columns have the same counts, and then the counts are transformed
by a log2(1+X) operation.
If desired, only a subset of features (e.g. sample rows) can be selected for PCA analysis. Each feature is ranked
by its dispersion relative to other features that have a similar mean count. The top `pca_features` as ranked by
this method will then be used for the PCA.
One can also select to subset number of barcodes to use (e.g. sample columns), but in this case they are simply
randomly sampled.
Args:
matrix (CountMatrix): The matrix to perform PCA on.
pca_features (int): Number of features to subset from matrix and use in PCA. The top pca_features ranked by
dispersion are used
pca_bcs (int): Number of barcodes to randomly sample for the matrix.
n_pca_components (int): How many PCA components should be used.
random_state (int): The seed for the RNG
min_count_threshold (int): The minimum sum of each row/column for that row/column to be passed to PCA
(this filter is prior to any subsetting that occurs).
Returns:
A PCA object
"""
if random_state is None:
random_state=analysis_constants.RANDOM_STATE
np.random.seed(0)
# Threshold the rows/columns of matrix, will throw error if an empty matrix results.
thresholded_matrix, _, thresholded_features = matrix.select_axes_above_threshold(min_count_threshold)
# If requested, we can subsample some of the barcodes to get a smaller matrix for PCA
pca_bc_indices = np.arange(thresholded_matrix.bcs_dim)
if pca_bcs is None:
pca_bcs = thresholded_matrix.bcs_dim
pca_bc_indices = np.arange(thresholded_matrix.bcs_dim)
elif pca_bcs < thresholded_matrix.bcs_dim:
pca_bc_indices = np.sort(np.random.choice(np.arange(thresholded_matrix.bcs_dim), size=pca_bcs, replace=False))
elif pca_bcs > thresholded_matrix.bcs_dim:
msg = ("You requested {} barcodes but the matrix after thresholding only "
"included {}, so the smaller amount is being used.").format(pca_bcs, thresholded_matrix.bcs_dim)
print(msg)
pca_bcs = thresholded_matrix.bcs_dim
pca_bc_indices = np.arange(thresholded_matrix.bcs_dim)
# If requested, select fewer features to use by selecting the features with highest normalized dispersion
if pca_features is None:
pca_features = thresholded_matrix.features_dim
elif pca_features > thresholded_matrix.features_dim:
msg = ("You requested {} features but the matrix after thresholding only included {} features,"
"so the smaller amount is being used.").format(pca_features, thresholded_matrix.features_dim)
print(msg)
pca_features = thresholded_matrix.features_dim
# Calc mean and variance of counts after normalizing
# But don't transform to log space, in order to preserve the mean-variance relationship
m = analysis_stats.normalize_by_umi(thresholded_matrix)
# Get mean and variance of rows
(mu, var) = analysis_stats.summarize_columns(m.T)
dispersion = analysis_stats.get_normalized_dispersion(mu.squeeze(), var.squeeze()) # TODO set number of bins?
pca_feature_indices = np.argsort(dispersion)[-pca_features:]
# Now determine how many components.
if n_pca_components is None:
n_pca_components = analysis_constants.PCA_N_COMPONENTS_DEFAULT
likely_matrix_rank = min(pca_features, pca_bcs)
if likely_matrix_rank < n_pca_components:
if min_count_threshold == DEFAULT_RUNPCA_THRESHOLD:
# Kick back to run_pca stage so it can retry with no threshold, this is for historical reasons
raise MatrixRankTooSmallException()
else:
print(("There are fewer nonzero features or barcodes ({}) than requested "
"PCA components ({}); reducing the number of components.").format(likely_matrix_rank, n_pca_components))
n_pca_components = likely_matrix_rank
if (likely_matrix_rank * 0.5) <= float(n_pca_components):
print("Requested number of PCA components is large relative to the matrix size, an exact approach to matrix factorization may be faster.")
# Note, after subsetting it is possible some rows/cols in pca_mat have counts below the threshold.
# However, we are not performing a second thresholding as in practice subsetting is not used and we explain
# that thresholding occurs prior to subsetting in the doc string.
pca_mat = thresholded_matrix.select_barcodes(pca_bc_indices).select_features(pca_feature_indices)
(pca_norm_mat, pca_center, pca_scale) = normalize_and_transpose(pca_mat)
(u, d, v, _, _) = irlb(pca_norm_mat, n_pca_components, center=pca_center.squeeze(), scale=pca_scale.squeeze(), random_state=random_state)
# make sure to project the matrix before centering, to avoid densification
(full_norm_mat, full_center, full_scale) = normalize_and_transpose(matrix)
sparsefuncs.inplace_column_scale(full_norm_mat, 1 / full_scale.squeeze()) # can have some zeros here
# Get a coordinate map so we know which columns in the old matrix correspond to columns in the new
org_cols_used = get_original_columns_used(thresholded_features, pca_feature_indices)
transformed_irlba_matrix = full_norm_mat[:,org_cols_used].dot(v) - (full_center / full_scale)[:,org_cols_used].dot(v)
irlba_components = np.zeros((n_pca_components, matrix.features_dim))
irlba_components[:,org_cols_used] = v.T
# calc proportion of variance explained
variance_sum = len(pca_feature_indices) # each feature has variance=1, mean=0 after normalization
variance_explained = np.square(d)/((len(pca_bc_indices)-1) * variance_sum)
features_selected = np.array([f.id for f in matrix.feature_ref.feature_defs])[org_cols_used]
# Now project back up the dispersion to return.
full_dispersion = np.empty(matrix.features_dim)
full_dispersion[:] = np.nan
full_dispersion[thresholded_features] = dispersion
# sanity check dimensions
assert transformed_irlba_matrix.shape == (matrix.bcs_dim, n_pca_components)
assert irlba_components.shape == (n_pca_components, matrix.features_dim)
assert variance_explained.shape == (n_pca_components,)
return PCA(transformed_irlba_matrix, irlba_components, variance_explained, full_dispersion, features_selected)
def normalize_and_transpose(matrix):
matrix.tocsc()
m = analysis_stats.normalize_by_umi(matrix)
# Use log counts
m.data = np.log2(1 + m.data)
# Transpose
m = m.T
# compute centering (mean) and scaling (stdev)
(c,v) = analysis_stats.summarize_columns(m)
# TODO: Inputs to this function shouldn't have zero variance columns
v[np.where(v == 0.0)] = 1.0
s = np.sqrt(v)
return (m, c, s)
def get_irlb_mem_gb_from_matrix_dim(nonzero_entries):
irlba_mem_gb = round(np.ceil(1.0 * nonzero_entries / analysis_constants.NUM_IRLB_MATRIX_ENTRIES_PER_MEM_GB)) + analysis_constants.IRLB_BASE_MEM_GB
return h5_constants.MATRIX_MEM_GB_MULTIPLIER * max(h5_constants.MIN_MEM_GB, irlba_mem_gb)
def save_pca_csv(pca_map, matrix, base_dir):
save_pca_csv_with_bc_feature(pca_map, matrix.bcs, matrix.feature_ref.feature_defs, base_dir)
def save_pca_csv_with_bc_feature(pca_map, barcodes, features, base_dir):
for n_components, pca in pca_map.iteritems():
n_components_dir = os.path.join(base_dir, '%d_components' % n_components)
cr_io.makedirs(n_components_dir, allow_existing=True)
matrix_fn = os.path.join(n_components_dir, 'projection.csv')
n_columns = pca.transformed_pca_matrix.shape[1]
assert n_columns <= n_components
matrix_header = ['Barcode'] + ['PC-%d' % (i+1) for i in xrange(n_columns)]
analysis_io.save_matrix_csv(matrix_fn, pca.transformed_pca_matrix, matrix_header,
barcodes)
# FBPCA presently provides 0-sized entries for the following PCA() member variables.
# This allows us to distinguish FBPCA from IRLBA, and also avoids weird empty files.
if pca.components.size > 0:
components_fn = os.path.join(n_components_dir, 'components.csv')
components_header = ['PC'] + [f.id for f in features]
analysis_io.save_matrix_csv(components_fn, pca.components, components_header,
range(1, n_components+1))
if pca.variance_explained.size > 0:
variance_fn = os.path.join(n_components_dir, 'variance.csv')
variance_header = ['PC','Proportion.Variance.Explained']
analysis_io.save_matrix_csv(variance_fn, pca.variance_explained, variance_header,
range(1, n_components+1))
if pca.dispersion.size > 0:
dispersion_fn = os.path.join(n_components_dir, 'dispersion.csv')
dispersion_header = ['Feature','Normalized.Dispersion']
analysis_io.save_matrix_csv(dispersion_fn, pca.dispersion, dispersion_header,
[f.id for f in features])
if pca.features_selected.size > 0:
features_fn = os.path.join(n_components_dir, 'features_selected.csv')
# TODO: there are two columns here, but only 1 entry in the header...BAD
features_header = ['Feature']
analysis_io.save_matrix_csv(features_fn, pca.features_selected, features_header, range(1, len(pca.features_selected)+1))
def save_pca_h5(pca_map, f):
group = f.create_group(f.root, analysis_constants.ANALYSIS_H5_PCA_GROUP)
for n_components, pca in pca_map.iteritems():
analysis_io.save_h5(f, group, str(n_components), pca)
def load_pca_from_h5(filename):
""" Load just the PCA info from an analysis h5 """
with tables.open_file(filename, 'r') as f:
group = f.root._v_groups[analysis_constants.ANALYSIS_H5_PCA_GROUP]
# Just take the first PCA object, assuming we never have multiple
for _, pca in analysis_io.load_h5_iter(group, PCA):
return pca
| 52.802691 | 150 | 0.726369 | 54 | 0.004586 | 0 | 0 | 0 | 0 | 0 | 0 | 4,491 | 0.381401 |
5093de6bb237feb29997a009c2a055d96c61745b | 221 | py | Python | src/atc/etl/__init__.py | atc-net/atc-dataplatform | 4614efbbc9a5b2b4e063cc12211aa5c697cad115 | [
"MIT"
] | 6 | 2021-09-10T09:50:20.000Z | 2022-02-12T09:30:47.000Z | src/atc/etl/__init__.py | atc-net/atc-dataplatform | 4614efbbc9a5b2b4e063cc12211aa5c697cad115 | [
"MIT"
] | 40 | 2021-09-10T11:20:42.000Z | 2022-03-25T08:26:57.000Z | src/atc/etl/__init__.py | atc-net/atc-dataplatform | 4614efbbc9a5b2b4e063cc12211aa5c697cad115 | [
"MIT"
] | 2 | 2021-09-10T10:07:04.000Z | 2022-02-16T17:49:08.000Z | from .loader import Loader
from .extractor import Extractor
from .transformer import Transformer
from .orchestrator import Orchestrator
__all__ = [
"Loader",
"Extractor",
"Transformer",
"Orchestrator",
]
| 18.416667 | 38 | 0.728507 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.208145 |
5096dbccfc3c601a9f1661b60c8a31eb3826dccd | 3,182 | py | Python | a.py | Planecrayon/DAT120_Oblig_9 | 7cb6a6e9443ed609d27177746f532ee09dcf32e4 | [
"MIT"
] | null | null | null | a.py | Planecrayon/DAT120_Oblig_9 | 7cb6a6e9443ed609d27177746f532ee09dcf32e4 | [
"MIT"
] | null | null | null | a.py | Planecrayon/DAT120_Oblig_9 | 7cb6a6e9443ed609d27177746f532ee09dcf32e4 | [
"MIT"
] | null | null | null | # Som del av et spørrespill skal du lage en klasse for flervalgspørsmål.
# Et flervalgspørsmål skal ha en spørsmålstekst, ei liste med svaralternativer
# (hvert svaralternativ er en tekststreng), og et tall som sier hvilket av
# svaralternativene som er korrekt.Klassen skal ha en __str__ metode som returnerer
# en streng som inneholder spørsmålsteksten og nummerte svaralternativer
# på et lett leselig format. Klassen skal ha en sjekk_svar metode som tar
# som parameter et heltall som representerer hvilket svar brukeren velger.
# Sjekk_svar metoden skal sjekke om svaret brukeren har oppgitt er korrekt.
class Question:
def __init__(self, question: str, correct: int, alt: list,): # Klassen tar inn tre argument, og alle har definert verditype :str, :int, :list
self.question = question
self.alt = alt
self.correct = correct
def sjekk_svar(self, answer):
return answer - 1 == self.correct # kontollerar om brukeres svar == correct, (-1 fordi python byrjar å telle på 0)
def korrekt_svar_tekst(self):
return print(f'Rett svar er: {self.alt[self.correct]} \n') #returnerar rett svar
def ask(self):
print(self) #printar spørsmål og alternativ, synar __str__
inn_1 = input('Svar frå spiller 1: ') # tek inn svar frå spiller 1
inn_2 = input('Svar frå spiller 2: ') # tek inn svar frå spiller 2
try:
correct_1 = self.sjekk_svar(int(inn_1)) #sendar svar frå spiller 1 til def sjekk_svar
except:
correct_1 = False
try:
correct_2 = self.sjekk_svar(int(inn_2)) #sendar svar frå spiller 2 til def sjekk_svar
except:
correct_2 = False
print('Spiller 1: '+ ('Riktig svar!' if correct_1 else 'Feil svar')) #printar ut om svaret var rett eller feil, dersom correct_1 er True blir if utførst og 'Riktig svar' printa
if correct_1 == True:
self.poeng_1 += 1
print('Spiller 2: '+ ('Riktig svar!' if correct_2 else 'Feil svar'))
if correct_2 == True:
self.poeng_2 += 1
self.korrekt_svar_tekst() # hentar korrekt_svar_tekst, tek ikkje argument fordi den finn dei via self.
def __str__(self):
questions_str = '' # sett questions_str = lik ei blank str for at python veit kva verdi type det skal vær og for at det skal være ein verdi i variabelen
for i in range(0, len(self.alt)): # ein for loop som går frå 0 og til og med lengda av alternativ i lista alt.
questions_str += f'{i + 1}. {self.alt[i]} \n'
return f'{self.question} \n{questions_str}'
def les_fil():
with open('sporsmaalsfil.txt') as sporsmaal:
liste_retur = []
for linje in sporsmaal:
sporsmaal_liste = ''
sporsmaal_split = linje.split(':')
sporsmaal_liste = sporsmaal_split
sporsmaal_liste[2] = sporsmaal_liste[2].strip()[1:-1].split(', ')
liste_retur.append(Question(sporsmaal_liste[0],sporsmaal_liste[1],sporsmaal_liste[2]))
return liste_retur
if __name__ == "__main__":
sporsmaal_liste = les_fil()
for a in sporsmaal_liste:
print(a.ask())
| 39.775 | 184 | 0.664676 | 2,012 | 0.627378 | 0 | 0 | 0 | 0 | 0 | 0 | 1,659 | 0.517306 |
5096fa7b9ba14d4d27ff9de996781aa68a5007aa | 4,047 | py | Python | py/abd/abdcmd_instaweb.py | valhallasw/phabricator-tools | 397406485901ce3bd26e1aee122e652539f99f98 | [
"Apache-2.0"
] | null | null | null | py/abd/abdcmd_instaweb.py | valhallasw/phabricator-tools | 397406485901ce3bd26e1aee122e652539f99f98 | [
"Apache-2.0"
] | null | null | null | py/abd/abdcmd_instaweb.py | valhallasw/phabricator-tools | 397406485901ce3bd26e1aee122e652539f99f98 | [
"Apache-2.0"
] | null | null | null | """Start a local webserver to report the status of an arcyd instance."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# abdcmd_instaweb
#
# Public Functions:
# getFromfilePrefixChars
# setupParser
# process
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
import BaseHTTPServer
import os
import abdcmd_arcydstatushtml
import abdcmd_repostatushtml
def getFromfilePrefixChars():
return None
def setupParser(parser):
parser.add_argument(
'--port',
metavar="PORT",
type=int,
default=8000,
help="port to serve pages on")
parser.add_argument(
'--report-file',
metavar="REPORTFILE",
type=str,
required=True,
help="path to the arcyd report file to render")
parser.add_argument(
'--repo-file-dir',
metavar="REPOFILEDIR",
type=str,
required=True,
help="path to the repo files to render")
class _NotFoundError(Exception):
pass
class _RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def __init__(self, instaweb_args, *args):
self._instaweb_args = instaweb_args
self.path = None # for pychecker
self.wfile = None # for pychecker
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args)
def do_GET(self):
try:
content = self._get_content()
except _NotFoundError:
self.send_response(404)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write("<html><body><h1>404</h1></body></html>")
self.wfile.close()
else:
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(content)
self.wfile.close()
def _get_content(self):
args = self._instaweb_args
if self.path == '/':
content = abdcmd_arcydstatushtml.render_content(
args.report_file, '')
elif self.path.lower().endswith('favicon.ico'):
raise _NotFoundError('could not find favicon')
else:
relative_path = self.path.lstrip('/')
dir_path = os.path.join(args.repo_file_dir, relative_path)
# XXX: this is fragile, will go away once arcyd folder
# layout is standardized
repo_path = dir_path + '.try'
branches_path = dir_path + '.ok'
content = abdcmd_repostatushtml.render_content(
repo_path, branches_path)
return content
def _request_handler_factory(instaweb_args):
def factory(*args):
return _RequestHandler(instaweb_args, *args)
return factory
def process(args):
# start a webserver
server_address = ('', args.port)
factory = _request_handler_factory(args)
httpd = BaseHTTPServer.HTTPServer(server_address, factory)
httpd.serve_forever()
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| 29.757353 | 79 | 0.57277 | 1,658 | 0.409686 | 0 | 0 | 0 | 0 | 0 | 0 | 1,734 | 0.428466 |
5097201247dfae21276abab2938bc5634f746acc | 23,328 | py | Python | instana/span.py | tirkarthi/python-sensor | 9872d146ac00baff2673fde5ba97fdbe596869a4 | [
"MIT"
] | 2 | 2019-12-02T10:08:58.000Z | 2020-10-04T09:34:20.000Z | instana/span.py | tirkarthi/python-sensor | 9872d146ac00baff2673fde5ba97fdbe596869a4 | [
"MIT"
] | 2 | 2020-06-09T12:24:54.000Z | 2021-03-15T12:49:15.000Z | instana/span.py | takeaway/python-sensor | 52d6eaa2d6a8e625201bad36ac2448201c4bd63d | [
"MIT"
] | null | null | null | # (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2017
"""
This module contains the classes that represents spans.
InstanaSpan - the OpenTracing based span used during tracing
When an InstanaSpan is finished, it is converted into either an SDKSpan
or RegisteredSpan depending on type.
BaseSpan: Base class containing the commonalities for the two descendants
- SDKSpan: Class that represents an SDK type span
- RegisteredSpan: Class that represents a Registered type span
"""
import six
from basictracer.span import BasicSpan
import opentracing.ext.tags as ot_tags
from .log import logger
from .util import DictionaryOfStan
class InstanaSpan(BasicSpan):
stack = None
synthetic = False
def mark_as_errored(self, tags=None):
"""
Mark this span as errored.
@param tags: optional tags to add to the span
"""
try:
ec = self.tags.get('ec', 0)
self.set_tag('ec', ec + 1)
if tags is not None and isinstance(tags, dict):
for key in tags:
self.set_tag(key, tags[key])
except Exception:
logger.debug('span.mark_as_errored', exc_info=True)
def assure_errored(self):
"""
Make sure that this span is marked as errored.
@return: None
"""
try:
ec = self.tags.get('ec', None)
if ec is None or ec == 0:
self.set_tag('ec', 1)
except Exception:
logger.debug('span.assure_errored', exc_info=True)
def log_exception(self, exc):
"""
Log an exception onto this span. This will log pertinent info from the exception and
assure that this span is marked as errored.
@param e: the exception to log
"""
try:
message = ""
self.mark_as_errored()
if hasattr(exc, '__str__') and len(str(exc)) > 0:
message = str(exc)
elif hasattr(exc, 'message') and exc.message is not None:
message = exc.message
else:
message = repr(exc)
if self.operation_name in ['rpc-server', 'rpc-client']:
self.set_tag('rpc.error', message)
elif self.operation_name == "mysql":
self.set_tag('mysql.error', message)
elif self.operation_name == "postgres":
self.set_tag('pg.error', message)
elif self.operation_name in RegisteredSpan.HTTP_SPANS:
self.set_tag('http.error', message)
elif self.operation_name in ["celery-client", "celery-worker"]:
self.set_tag('error', message)
elif self.operation_name == "sqlalchemy":
self.set_tag('sqlalchemy.err', message)
else:
self.log_kv({'message': message})
except Exception:
logger.debug("span.log_exception", exc_info=True)
raise
class BaseSpan(object):
sy = None
def __str__(self):
return "BaseSpan(%s)" % self.__dict__.__str__()
def __repr__(self):
return self.__dict__.__str__()
def __init__(self, span, source, service_name, **kwargs):
# pylint: disable=invalid-name
self.t = span.context.trace_id
self.p = span.parent_id
self.s = span.context.span_id
self.ts = int(round(span.start_time * 1000))
self.d = int(round(span.duration * 1000))
self.f = source
self.ec = span.tags.pop('ec', None)
self.data = DictionaryOfStan()
self.stack = span.stack
if span.synthetic is True:
self.sy = span.synthetic
self.__dict__.update(kwargs)
def _validate_tags(self, tags):
"""
This method will loop through a set of tags to validate each key and value.
:param tags: dict of tags
:return: dict - a filtered set of tags
"""
filtered_tags = DictionaryOfStan()
for key in tags.keys():
validated_key, validated_value = self._validate_tag(key, tags[key])
if validated_key is not None and validated_value is not None:
filtered_tags[validated_key] = validated_value
return filtered_tags
def _validate_tag(self, key, value):
"""
This method will assure that <key> and <value> are valid to set as a tag.
If <value> fails the check, an attempt will be made to convert it into
something useful.
On check failure, this method will return None values indicating that the tag is
not valid and could not be converted into something useful
:param key: The tag key
:param value: The tag value
:return: Tuple (key, value)
"""
validated_key = None
validated_value = None
try:
# Tag keys must be some type of text or string type
if isinstance(key, (six.text_type, six.string_types)):
validated_key = key[0:1024] # Max key length of 1024 characters
if isinstance(value, (bool, float, int, list, dict, six.text_type, six.string_types)):
validated_value = value
else:
validated_value = self._convert_tag_value(value)
else:
logger.debug("(non-fatal) tag names must be strings. tag discarded for %s", type(key))
except Exception:
logger.debug("instana.span._validate_tag: ", exc_info=True)
return (validated_key, validated_value)
def _convert_tag_value(self, value):
final_value = None
try:
final_value = repr(value)
except Exception:
final_value = "(non-fatal) span.set_tag: values must be one of these types: bool, float, int, list, " \
"set, str or alternatively support 'repr'. tag discarded"
logger.debug(final_value, exc_info=True)
return None
return final_value
class SDKSpan(BaseSpan):
ENTRY_KIND = ["entry", "server", "consumer"]
EXIT_KIND = ["exit", "client", "producer"]
def __init__(self, span, source, service_name, **kwargs):
# pylint: disable=invalid-name
super(SDKSpan, self).__init__(span, source, service_name, **kwargs)
span_kind = self.get_span_kind(span)
self.n = "sdk"
self.k = span_kind[1]
if self.k == 1 and service_name is not None:
self.data["service"] = service_name
self.data["sdk"]["name"] = span.operation_name
self.data["sdk"]["type"] = span_kind[0]
self.data["sdk"]["custom"]["tags"] = self._validate_tags(span.tags)
if span.logs is not None and len(span.logs) > 0:
logs = DictionaryOfStan()
for log in span.logs:
filtered_key_values = self._validate_tags(log.key_values)
if len(filtered_key_values.keys()) > 0:
logs[repr(log.timestamp)] = filtered_key_values
self.data["sdk"]["custom"]["logs"] = logs
if "arguments" in span.tags:
self.data['sdk']['arguments'] = span.tags["arguments"]
if "return" in span.tags:
self.data['sdk']['return'] = span.tags["return"]
if len(span.context.baggage) > 0:
self.data["baggage"] = span.context.baggage
def get_span_kind(self, span):
"""
Will retrieve the `span.kind` tag and return a tuple containing the appropriate string and integer
values for the Instana backend
:param span: The span to search for the `span.kind` tag
:return: Tuple (String, Int)
"""
kind = ("intermediate", 3)
if "span.kind" in span.tags:
if span.tags["span.kind"] in self.ENTRY_KIND:
kind = ("entry", 1)
elif span.tags["span.kind"] in self.EXIT_KIND:
kind = ("exit", 2)
return kind
class RegisteredSpan(BaseSpan):
HTTP_SPANS = ("aiohttp-client", "aiohttp-server", "django", "http", "soap", "tornado-client",
"tornado-server", "urllib3", "wsgi")
EXIT_SPANS = ("aiohttp-client", "boto3", "cassandra", "celery-client", "couchbase", "log", "memcache",
"mongo", "mysql", "postgres", "rabbitmq", "redis", "rpc-client", "sqlalchemy",
"soap", "tornado-client", "urllib3", "pymongo", "gcs", "gcps-producer")
ENTRY_SPANS = ("aiohttp-server", "aws.lambda.entry", "celery-worker", "django", "wsgi", "rabbitmq",
"rpc-server", "tornado-server", "gcps-consumer")
LOCAL_SPANS = ("render")
def __init__(self, span, source, service_name, **kwargs):
# pylint: disable=invalid-name
super(RegisteredSpan, self).__init__(span, source, service_name, **kwargs)
self.n = span.operation_name
self.k = 1
if span.operation_name in self.ENTRY_SPANS:
# entry
self._populate_entry_span_data(span)
self.data["service"] = service_name
elif span.operation_name in self.EXIT_SPANS:
self.k = 2 # exit
self._populate_exit_span_data(span)
elif span.operation_name in self.LOCAL_SPANS:
self.k = 3 # intermediate span
self._populate_local_span_data(span)
if "rabbitmq" in self.data and self.data["rabbitmq"]["sort"] == "publish":
self.k = 2 # exit
# unify the span operation_name for gcps-producer and gcps-consumer
if "gcps" in span.operation_name:
self.n = 'gcps'
# Store any leftover tags in the custom section
if len(span.tags) > 0:
self.data["custom"]["tags"] = self._validate_tags(span.tags)
def _populate_entry_span_data(self, span):
if span.operation_name in self.HTTP_SPANS:
self._collect_http_tags(span)
elif span.operation_name == "aws.lambda.entry":
self.data["lambda"]["arn"] = span.tags.pop('lambda.arn', "Unknown")
self.data["lambda"]["alias"] = None
self.data["lambda"]["runtime"] = "python"
self.data["lambda"]["functionName"] = span.tags.pop('lambda.name', "Unknown")
self.data["lambda"]["functionVersion"] = span.tags.pop('lambda.version', "Unknown")
self.data["lambda"]["trigger"] = span.tags.pop('lambda.trigger', None)
self.data["lambda"]["error"] = None
trigger_type = self.data["lambda"]["trigger"]
if trigger_type in ["aws:api.gateway", "aws:application.load.balancer"]:
self._collect_http_tags(span)
elif trigger_type == 'aws:cloudwatch.events':
self.data["lambda"]["cw"]["events"]["id"] = span.tags.pop('data.lambda.cw.events.id', None)
self.data["lambda"]["cw"]["events"]["more"] = span.tags.pop('lambda.cw.events.more', False)
self.data["lambda"]["cw"]["events"]["resources"] = span.tags.pop('lambda.cw.events.resources', None)
elif trigger_type == 'aws:cloudwatch.logs':
self.data["lambda"]["cw"]["logs"]["group"] = span.tags.pop('lambda.cw.logs.group', None)
self.data["lambda"]["cw"]["logs"]["stream"] = span.tags.pop('lambda.cw.logs.stream', None)
self.data["lambda"]["cw"]["logs"]["more"] = span.tags.pop('lambda.cw.logs.more', None)
self.data["lambda"]["cw"]["logs"]["events"] = span.tags.pop('lambda.cw.logs.events', None)
elif trigger_type == 'aws:s3':
self.data["lambda"]["s3"]["events"] = span.tags.pop('lambda.s3.events', None)
elif trigger_type == 'aws:sqs':
self.data["lambda"]["sqs"]["messages"] = span.tags.pop('lambda.sqs.messages', None)
elif span.operation_name == "celery-worker":
self.data["celery"]["task"] = span.tags.pop('task', None)
self.data["celery"]["task_id"] = span.tags.pop('task_id', None)
self.data["celery"]["scheme"] = span.tags.pop('scheme', None)
self.data["celery"]["host"] = span.tags.pop('host', None)
self.data["celery"]["port"] = span.tags.pop('port', None)
self.data["celery"]["retry-reason"] = span.tags.pop('retry-reason', None)
self.data["celery"]["error"] = span.tags.pop('error', None)
elif span.operation_name == "gcps-consumer":
self.data["gcps"]["op"] = span.tags.pop('gcps.op', None)
self.data["gcps"]["projid"] = span.tags.pop('gcps.projid', None)
self.data["gcps"]["sub"] = span.tags.pop('gcps.sub', None)
elif span.operation_name == "rabbitmq":
self.data["rabbitmq"]["exchange"] = span.tags.pop('exchange', None)
self.data["rabbitmq"]["queue"] = span.tags.pop('queue', None)
self.data["rabbitmq"]["sort"] = span.tags.pop('sort', None)
self.data["rabbitmq"]["address"] = span.tags.pop('address', None)
self.data["rabbitmq"]["key"] = span.tags.pop('key', None)
elif span.operation_name == "rpc-server":
self.data["rpc"]["flavor"] = span.tags.pop('rpc.flavor', None)
self.data["rpc"]["host"] = span.tags.pop('rpc.host', None)
self.data["rpc"]["port"] = span.tags.pop('rpc.port', None)
self.data["rpc"]["call"] = span.tags.pop('rpc.call', None)
self.data["rpc"]["call_type"] = span.tags.pop('rpc.call_type', None)
self.data["rpc"]["params"] = span.tags.pop('rpc.params', None)
self.data["rpc"]["baggage"] = span.tags.pop('rpc.baggage', None)
self.data["rpc"]["error"] = span.tags.pop('rpc.error', None)
else:
logger.debug("SpanRecorder: Unknown entry span: %s" % span.operation_name)
def _populate_local_span_data(self, span):
if span.operation_name == "render":
self.data["render"]["name"] = span.tags.pop('name', None)
self.data["render"]["type"] = span.tags.pop('type', None)
self.data["log"]["message"] = span.tags.pop('message', None)
self.data["log"]["parameters"] = span.tags.pop('parameters', None)
else:
logger.debug("SpanRecorder: Unknown local span: %s" % span.operation_name)
def _populate_exit_span_data(self, span):
if span.operation_name in self.HTTP_SPANS:
self._collect_http_tags(span)
elif span.operation_name == "boto3":
# boto3 also sends http tags
self._collect_http_tags(span)
for tag in ['op', 'ep', 'reg', 'payload', 'error']:
value = span.tags.pop(tag, None)
if value is not None:
if tag == 'payload':
self.data["boto3"][tag] = self._validate_tags(value)
else:
self.data["boto3"][tag] = value
elif span.operation_name == "cassandra":
self.data["cassandra"]["cluster"] = span.tags.pop('cassandra.cluster', None)
self.data["cassandra"]["query"] = span.tags.pop('cassandra.query', None)
self.data["cassandra"]["keyspace"] = span.tags.pop('cassandra.keyspace', None)
self.data["cassandra"]["fetchSize"] = span.tags.pop('cassandra.fetchSize', None)
self.data["cassandra"]["achievedConsistency"] = span.tags.pop('cassandra.achievedConsistency', None)
self.data["cassandra"]["triedHosts"] = span.tags.pop('cassandra.triedHosts', None)
self.data["cassandra"]["fullyFetched"] = span.tags.pop('cassandra.fullyFetched', None)
self.data["cassandra"]["error"] = span.tags.pop('cassandra.error', None)
elif span.operation_name == "celery-client":
self.data["celery"]["task"] = span.tags.pop('task', None)
self.data["celery"]["task_id"] = span.tags.pop('task_id', None)
self.data["celery"]["scheme"] = span.tags.pop('scheme', None)
self.data["celery"]["host"] = span.tags.pop('host', None)
self.data["celery"]["port"] = span.tags.pop('port', None)
self.data["celery"]["error"] = span.tags.pop('error', None)
elif span.operation_name == "couchbase":
self.data["couchbase"]["hostname"] = span.tags.pop('couchbase.hostname', None)
self.data["couchbase"]["bucket"] = span.tags.pop('couchbase.bucket', None)
self.data["couchbase"]["type"] = span.tags.pop('couchbase.type', None)
self.data["couchbase"]["error"] = span.tags.pop('couchbase.error', None)
self.data["couchbase"]["error_type"] = span.tags.pop('couchbase.error_type', None)
self.data["couchbase"]["sql"] = span.tags.pop('couchbase.sql', None)
elif span.operation_name == "rabbitmq":
self.data["rabbitmq"]["exchange"] = span.tags.pop('exchange', None)
self.data["rabbitmq"]["queue"] = span.tags.pop('queue', None)
self.data["rabbitmq"]["sort"] = span.tags.pop('sort', None)
self.data["rabbitmq"]["address"] = span.tags.pop('address', None)
self.data["rabbitmq"]["key"] = span.tags.pop('key', None)
elif span.operation_name == "redis":
self.data["redis"]["connection"] = span.tags.pop('connection', None)
self.data["redis"]["driver"] = span.tags.pop('driver', None)
self.data["redis"]["command"] = span.tags.pop('command', None)
self.data["redis"]["error"] = span.tags.pop('redis.error', None)
self.data["redis"]["subCommands"] = span.tags.pop('subCommands', None)
elif span.operation_name == "rpc-client":
self.data["rpc"]["flavor"] = span.tags.pop('rpc.flavor', None)
self.data["rpc"]["host"] = span.tags.pop('rpc.host', None)
self.data["rpc"]["port"] = span.tags.pop('rpc.port', None)
self.data["rpc"]["call"] = span.tags.pop('rpc.call', None)
self.data["rpc"]["call_type"] = span.tags.pop('rpc.call_type', None)
self.data["rpc"]["params"] = span.tags.pop('rpc.params', None)
self.data["rpc"]["baggage"] = span.tags.pop('rpc.baggage', None)
self.data["rpc"]["error"] = span.tags.pop('rpc.error', None)
elif span.operation_name == "sqlalchemy":
self.data["sqlalchemy"]["sql"] = span.tags.pop('sqlalchemy.sql', None)
self.data["sqlalchemy"]["eng"] = span.tags.pop('sqlalchemy.eng', None)
self.data["sqlalchemy"]["url"] = span.tags.pop('sqlalchemy.url', None)
self.data["sqlalchemy"]["err"] = span.tags.pop('sqlalchemy.err', None)
elif span.operation_name == "mysql":
self.data["mysql"]["host"] = span.tags.pop('host', None)
self.data["mysql"]["port"] = span.tags.pop('port', None)
self.data["mysql"]["db"] = span.tags.pop(ot_tags.DATABASE_INSTANCE, None)
self.data["mysql"]["user"] = span.tags.pop(ot_tags.DATABASE_USER, None)
self.data["mysql"]["stmt"] = span.tags.pop(ot_tags.DATABASE_STATEMENT, None)
self.data["mysql"]["error"] = span.tags.pop('mysql.error', None)
elif span.operation_name == "postgres":
self.data["pg"]["host"] = span.tags.pop('host', None)
self.data["pg"]["port"] = span.tags.pop('port', None)
self.data["pg"]["db"] = span.tags.pop(ot_tags.DATABASE_INSTANCE, None)
self.data["pg"]["user"] = span.tags.pop(ot_tags.DATABASE_USER, None)
self.data["pg"]["stmt"] = span.tags.pop(ot_tags.DATABASE_STATEMENT, None)
self.data["pg"]["error"] = span.tags.pop('pg.error', None)
elif span.operation_name == "mongo":
service = "%s:%s" % (span.tags.pop('host', None), span.tags.pop('port', None))
namespace = "%s.%s" % (span.tags.pop('db', "?"), span.tags.pop('collection', "?"))
self.data["mongo"]["service"] = service
self.data["mongo"]["namespace"] = namespace
self.data["mongo"]["command"] = span.tags.pop('command', None)
self.data["mongo"]["filter"] = span.tags.pop('filter', None)
self.data["mongo"]["json"] = span.tags.pop('json', None)
self.data["mongo"]["error"] = span.tags.pop('error', None)
elif span.operation_name == "gcs":
self.data["gcs"]["op"] = span.tags.pop('gcs.op')
self.data["gcs"]["bucket"] = span.tags.pop('gcs.bucket', None)
self.data["gcs"]["object"] = span.tags.pop('gcs.object', None)
self.data["gcs"]["entity"] = span.tags.pop('gcs.entity', None)
self.data["gcs"]["range"] = span.tags.pop('gcs.range', None)
self.data["gcs"]["sourceBucket"] = span.tags.pop('gcs.sourceBucket', None)
self.data["gcs"]["sourceObject"] = span.tags.pop('gcs.sourceObject', None)
self.data["gcs"]["sourceObjects"] = span.tags.pop('gcs.sourceObjects', None)
self.data["gcs"]["destinationBucket"] = span.tags.pop('gcs.destinationBucket', None)
self.data["gcs"]["destinationObject"] = span.tags.pop('gcs.destinationObject', None)
self.data["gcs"]["numberOfOperations"] = span.tags.pop('gcs.numberOfOperations', None)
self.data["gcs"]["projectId"] = span.tags.pop('gcs.projectId', None)
self.data["gcs"]["accessId"] = span.tags.pop('gcs.accessId', None)
elif span.operation_name == "gcps-producer":
self.data["gcps"]["op"] = span.tags.pop('gcps.op', None)
self.data["gcps"]["projid"] = span.tags.pop('gcps.projid', None)
self.data["gcps"]["top"] = span.tags.pop('gcps.top', None)
elif span.operation_name == "log":
# use last special key values
for l in span.logs:
if "message" in l.key_values:
self.data["log"]["message"] = l.key_values.pop("message", None)
if "parameters" in l.key_values:
self.data["log"]["parameters"] = l.key_values.pop("parameters", None)
else:
logger.debug("SpanRecorder: Unknown exit span: %s" % span.operation_name)
def _collect_http_tags(self, span):
self.data["http"]["host"] = span.tags.pop("http.host", None)
self.data["http"]["url"] = span.tags.pop(ot_tags.HTTP_URL, None)
self.data["http"]["path"] = span.tags.pop("http.path", None)
self.data["http"]["params"] = span.tags.pop('http.params', None)
self.data["http"]["method"] = span.tags.pop(ot_tags.HTTP_METHOD, None)
self.data["http"]["status"] = span.tags.pop(ot_tags.HTTP_STATUS_CODE, None)
self.data["http"]["path_tpl"] = span.tags.pop("http.path_tpl", None)
self.data["http"]["error"] = span.tags.pop('http.error', None)
if len(span.tags) > 0:
if span.operation_name == "soap":
self.data["soap"]["action"] = span.tags.pop('soap.action', None)
custom_headers = []
for key in span.tags:
if key[0:12] == "http.header.":
custom_headers.append(key)
for key in custom_headers:
trimmed_key = key[12:]
self.data["http"]["header"][trimmed_key] = span.tags.pop(key)
| 46.656 | 116 | 0.578018 | 22,672 | 0.971879 | 0 | 0 | 0 | 0 | 0 | 0 | 7,686 | 0.329475 |
5098655d84e342408ca02ab0c8f4527b84ed0fab | 2,787 | py | Python | code/import_iworx.py | StolkArjen/human-interaction | 0559a58ea8131f61ed661c093045b34284568bc4 | [
"Apache-2.0"
] | 1 | 2022-01-10T02:18:45.000Z | 2022-01-10T02:18:45.000Z | code/import_iworx.py | StolkArjen/human-interaction | d87badf7db20a50db058ca714f3d77664111100b | [
"Apache-2.0"
] | null | null | null | code/import_iworx.py | StolkArjen/human-interaction | d87badf7db20a50db058ca714f3d77664111100b | [
"Apache-2.0"
] | 1 | 2022-02-14T15:28:48.000Z | 2022-02-14T15:28:48.000Z | #!/usr/bin/env python
"""
--------------------------------------------------------
IMPORT_IWORX reads and converts various IWORX datafiles into a
FieldTrip-type data structure.
Use as
data, event = import_iworx(filename)
where the filename should point to a .mat or .txt datafile.
data has the following nested fields:
.trial
.time
.label
event has the following nested fields:
.type
.sample
.value
Copyright (C) 2022, Arjen Stolk
--------------------------------------------------------
"""
import os
import scipy.io
def import_iworx(filename):
# check the input
path = os.path.split(filename)[0] # xxx/
name = os.path.split(filename)[-1][:-4] # xxx
ext = os.path.splitext(filename)[-1] # .xxx
if ext != ".mat" and ext != ".txt":
print("file extension should be either .mat or .txt for this function")
hasmat = False
if ext == ".mat":
hasmat = True
hastxt = False
hasmark = False
if ext == ".txt":
hastxt = True
if name[-10:] == "_MarksData":
hasmark = True
# organize the input
if hasmark:
datafile = os.path.join(path, name[:-10] + ".mat")
headerfile = os.path.join(path, name[:-10] + ".txt")
markerfile = filename
elif hastxt or hasmat:
datafile = os.path.join(path, name + ".mat")
headerfile = os.path.join(path, name + ".txt")
markerfile = os.path.join(path, name + "_MarksData.txt")
# read the data
mat = scipy.io.loadmat(datafile)
# initialize data structure
class Data(object):
def __init__(self):
self.trial = []
self.time = []
self.label = []
# organize data structure
data = Data()
for t in range(mat["n"][0][0]): # n is a variable contained by the mat file
data.trial.append(mat["b" + str(t + 1)].T)
data.time.append(mat["b" + str(t + 1)][:, 0])
# read the header information
try:
with open(headerfile) as f:
contents = f.readlines()
data.label = contents[0].split(" ")
except:
print("could not read the header information")
# initialize event structure
class Event(object):
def __init__(self):
self.type = []
self.sample = []
self.value = []
# read the markers
event = Event()
try:
with open(markerfile) as f:
contents = f.readlines()
for e in range(1, len(contents)):
event.type.append(contents[e].split(" ")[0])
event.sample.append(contents[e].split(" ")[1])
event.value.append(contents[e].split(" ")[4])
except:
print("could not read the marker information")
return data, event
| 27.323529 | 80 | 0.547542 | 262 | 0.094008 | 0 | 0 | 0 | 0 | 0 | 0 | 1,019 | 0.365626 |
5098854b1bebe51b75f24ed1504c767942b2632b | 961 | py | Python | ex/classifier.py | scw/conda-uc-2017 | c113583b88beb58adf51e4fc4ee6b6f6a963638d | [
"Apache-2.0"
] | 1 | 2019-09-19T01:37:27.000Z | 2019-09-19T01:37:27.000Z | ex/classifier.py | scw/conda-uc-2017 | c113583b88beb58adf51e4fc4ee6b6f6a963638d | [
"Apache-2.0"
] | null | null | null | ex/classifier.py | scw/conda-uc-2017 | c113583b88beb58adf51e4fc4ee6b6f6a963638d | [
"Apache-2.0"
] | null | null | null | import arcpy
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
input_csv = arcpy.GetParameterAsText(0)
test_string = arcpy.GetParameterAsText(1)
df = pd.read_csv(input_csv)
target = df['is_there_an_emotion_directed_at_a_brand_or_product']
text = df['tweet_text']
fixed_text = text[pd.notnull(text)]
fixed_target = target[pd.notnull(text)]
count_vect = CountVectorizer()
count_vect.fit(fixed_text)
counts = count_vect.transform(fixed_text)
# NB has a bunch of parameters -- somewhat scary for those who haven't
# used it before. That said, Scikit-Learn mostly has sane defaults,
# and usually it's not necessary to modify them. Can also try to
# change a new algorithm, but usually it's not the best way to spend
# your time.
nb = MultinomialNB()
nb.fit(counts, fixed_target)
arcpy.AddMessage(nb.predict(count_vect.transform([test_string])))
# testing an addition to the script. | 31 | 70 | 0.790843 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 381 | 0.396462 |
50993243321f041c53a88a278ea5aece495a4f45 | 9,453 | py | Python | pysnmp/TRANGO-APEX-TRAP-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/TRANGO-APEX-TRAP-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/TRANGO-APEX-TRAP-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module TRANGO-APEX-TRAP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/TRANGO-APEX-TRAP-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:19:34 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, NotificationType, IpAddress, Gauge32, Unsigned32, TimeTicks, iso, ModuleIdentity, Bits, Counter32, Integer32, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "NotificationType", "IpAddress", "Gauge32", "Unsigned32", "TimeTicks", "iso", "ModuleIdentity", "Bits", "Counter32", "Integer32", "ObjectIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
MibScalar, MibTable, MibTableRow, MibTableColumn, apex, NotificationType, Unsigned32, ModuleIdentity, ObjectIdentity = mibBuilder.importSymbols("TRANGO-APEX-MIB", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "apex", "NotificationType", "Unsigned32", "ModuleIdentity", "ObjectIdentity")
class DisplayString(OctetString):
pass
trangotrap = MibIdentifier((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6))
trapReboot = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 1))
if mibBuilder.loadTexts: trapReboot.setStatus('current')
trapStartUp = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 2))
if mibBuilder.loadTexts: trapStartUp.setStatus('current')
traplock = MibIdentifier((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 3))
trapModemLock = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 3, 1))
if mibBuilder.loadTexts: trapModemLock.setStatus('current')
trapTimingLock = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 3, 2))
if mibBuilder.loadTexts: trapTimingLock.setStatus('current')
trapInnerCodeLock = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 3, 3))
if mibBuilder.loadTexts: trapInnerCodeLock.setStatus('current')
trapEqualizerLock = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 3, 4))
if mibBuilder.loadTexts: trapEqualizerLock.setStatus('current')
trapFrameSyncLock = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 3, 5))
if mibBuilder.loadTexts: trapFrameSyncLock.setStatus('current')
trapthreshold = MibIdentifier((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4))
trapmse = MibIdentifier((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 1))
trapMSEMinThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 1, 1))
if mibBuilder.loadTexts: trapMSEMinThreshold.setStatus('current')
trapMSEMaxThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 1, 2))
if mibBuilder.loadTexts: trapMSEMaxThreshold.setStatus('current')
trapber = MibIdentifier((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 2))
trapBERMinThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 2, 1))
if mibBuilder.loadTexts: trapBERMinThreshold.setStatus('current')
trapBERMaxThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 2, 2))
if mibBuilder.loadTexts: trapBERMaxThreshold.setStatus('current')
trapfer = MibIdentifier((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 3))
trapFERMinThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 3, 1))
if mibBuilder.loadTexts: trapFERMinThreshold.setStatus('current')
trapFERMaxThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 3, 2))
if mibBuilder.loadTexts: trapFERMaxThreshold.setStatus('current')
traprssi = MibIdentifier((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 4))
trapRSSIMinThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 4, 1))
if mibBuilder.loadTexts: trapRSSIMinThreshold.setStatus('current')
trapRSSIMaxThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 4, 2))
if mibBuilder.loadTexts: trapRSSIMaxThreshold.setStatus('current')
trapidutemp = MibIdentifier((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 5))
trapIDUTempMinThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 5, 1))
if mibBuilder.loadTexts: trapIDUTempMinThreshold.setStatus('current')
trapIDUTempMaxThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 5, 2))
if mibBuilder.loadTexts: trapIDUTempMaxThreshold.setStatus('current')
trapodutemp = MibIdentifier((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 6))
trapODUTempMinThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 6, 1))
if mibBuilder.loadTexts: trapODUTempMinThreshold.setStatus('current')
trapODUTempMaxThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 6, 2))
if mibBuilder.loadTexts: trapODUTempMaxThreshold.setStatus('current')
trapinport = MibIdentifier((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 7))
trapInPortUtilMinThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 7, 1))
if mibBuilder.loadTexts: trapInPortUtilMinThreshold.setStatus('current')
trapInPortUtilMaxThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 7, 2))
if mibBuilder.loadTexts: trapInPortUtilMaxThreshold.setStatus('current')
trapoutport = MibIdentifier((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 8))
trapOutPortUtilMinThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 8, 1))
if mibBuilder.loadTexts: trapOutPortUtilMinThreshold.setStatus('current')
trapOutPortUtilMaxThreshold = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 4, 8, 2))
if mibBuilder.loadTexts: trapOutPortUtilMaxThreshold.setStatus('current')
trapstandby = MibIdentifier((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 5))
trapStandbyLinkDown = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 5, 1))
if mibBuilder.loadTexts: trapStandbyLinkDown.setStatus('current')
trapStandbyLinkUp = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 5, 2))
if mibBuilder.loadTexts: trapStandbyLinkUp.setStatus('current')
trapSwitchover = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 5, 3))
if mibBuilder.loadTexts: trapSwitchover.setStatus('current')
trapeth = MibIdentifier((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 6))
trapethstatus = MibIdentifier((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 6, 1))
trapEth1StatusUpdate = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 6, 1, 1))
if mibBuilder.loadTexts: trapEth1StatusUpdate.setStatus('current')
trapEth2StatusUpdate = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 6, 1, 2))
if mibBuilder.loadTexts: trapEth2StatusUpdate.setStatus('current')
trapEth3StatusUpdate = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 6, 1, 3))
if mibBuilder.loadTexts: trapEth3StatusUpdate.setStatus('current')
trapEth4StatusUpdate = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 6, 1, 4))
if mibBuilder.loadTexts: trapEth4StatusUpdate.setStatus('current')
trapDownShift = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 8))
if mibBuilder.loadTexts: trapDownShift.setStatus('current')
trapRapidPortShutdown = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 9))
if mibBuilder.loadTexts: trapRapidPortShutdown.setStatus('current')
trapRPSPortUp = NotificationType((1, 3, 6, 1, 4, 1, 5454, 1, 60, 6, 10))
if mibBuilder.loadTexts: trapRPSPortUp.setStatus('current')
mibBuilder.exportSymbols("TRANGO-APEX-TRAP-MIB", trapber=trapber, trapEth3StatusUpdate=trapEth3StatusUpdate, DisplayString=DisplayString, trapFERMinThreshold=trapFERMinThreshold, trapStandbyLinkDown=trapStandbyLinkDown, trapInPortUtilMinThreshold=trapInPortUtilMinThreshold, trapMSEMinThreshold=trapMSEMinThreshold, trapRSSIMaxThreshold=trapRSSIMaxThreshold, traprssi=traprssi, trapStandbyLinkUp=trapStandbyLinkUp, trapIDUTempMinThreshold=trapIDUTempMinThreshold, trapRapidPortShutdown=trapRapidPortShutdown, trangotrap=trangotrap, trapStartUp=trapStartUp, trapMSEMaxThreshold=trapMSEMaxThreshold, trapSwitchover=trapSwitchover, traplock=traplock, trapethstatus=trapethstatus, trapEth2StatusUpdate=trapEth2StatusUpdate, trapodutemp=trapodutemp, trapinport=trapinport, trapReboot=trapReboot, trapthreshold=trapthreshold, trapmse=trapmse, trapEth4StatusUpdate=trapEth4StatusUpdate, trapIDUTempMaxThreshold=trapIDUTempMaxThreshold, trapFrameSyncLock=trapFrameSyncLock, trapOutPortUtilMinThreshold=trapOutPortUtilMinThreshold, trapInnerCodeLock=trapInnerCodeLock, trapfer=trapfer, trapTimingLock=trapTimingLock, trapFERMaxThreshold=trapFERMaxThreshold, trapstandby=trapstandby, trapModemLock=trapModemLock, trapInPortUtilMaxThreshold=trapInPortUtilMaxThreshold, trapOutPortUtilMaxThreshold=trapOutPortUtilMaxThreshold, trapoutport=trapoutport, trapODUTempMinThreshold=trapODUTempMinThreshold, trapDownShift=trapDownShift, trapBERMinThreshold=trapBERMinThreshold, trapRPSPortUp=trapRPSPortUp, trapEqualizerLock=trapEqualizerLock, trapeth=trapeth, trapRSSIMinThreshold=trapRSSIMinThreshold, trapEth1StatusUpdate=trapEth1StatusUpdate, trapidutemp=trapidutemp, trapODUTempMaxThreshold=trapODUTempMaxThreshold, trapBERMaxThreshold=trapBERMaxThreshold)
| 95.484848 | 1,742 | 0.756056 | 42 | 0.004443 | 0 | 0 | 0 | 0 | 0 | 0 | 1,309 | 0.138475 |
509a3e7b6cd578f69b49591849f510331b44efb2 | 8,780 | py | Python | test/test_gg_snippets.py | ooz/ggpy | a222299c3fc23739576ae0d3dfc75362fb1152cd | [
"MIT"
] | null | null | null | test/test_gg_snippets.py | ooz/ggpy | a222299c3fc23739576ae0d3dfc75362fb1152cd | [
"MIT"
] | 2 | 2020-05-13T22:12:39.000Z | 2021-05-01T22:41:55.000Z | test/test_gg_snippets.py | ooz/ggpy | a222299c3fc23739576ae0d3dfc75362fb1152cd | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import gg
from ggconfig import config
##############################################################################
# CONTENT SNIPPETS
##############################################################################
def test_logo_url():
assert gg.logo_url(config) == 'https://oliz.io/ggpy/static/gg.png'
assert gg.logo_url() == ''
def test_pagetitle():
assert gg.pagetitle('Good Generator.py', config) == 'Good Generator.py'
assert gg.pagetitle('Some Page', config) == 'Some Page | Good Generator.py'
assert gg.pagetitle('Title with default config') == 'Title with default config'
assert gg.pagetitle('') == ''
assert gg.pagetitle() == ''
assert gg.pagetitle('', config) == 'Good Generator.py'
def test_meta():
meta = gg.meta('oz', 'Nice text!', '__draft__, foo, __inline__, bar, tags, __no_header__')
assert meta == \
'''<meta name="author" content="oz">
<meta name="description" content="Nice text!">
<meta name="keywords" content="foo, bar, tags">'''
def test_meta_single_special_tag():
meta = gg.meta('oz', 'Nice text!', '__draft__')
assert meta == \
'''<meta name="author" content="oz">
<meta name="description" content="Nice text!">'''
def test_opengraph():
opengraph = gg.opengraph('Title!', 'https://oliz.io/ggpy/', 'Nice text!', '2020-02-20', config)
assert opengraph == \
'''<meta property="og:title" content="Title!">
<meta property="og:type" content="article">
<meta property="og:url" content="https://oliz.io/ggpy/">
<meta property="og:description" content="Nice text!">
<meta property="og:image" content="https://oliz.io/ggpy/static/gg.png">
<meta property="og:locale" content="en-US">
<meta property="article:published_time" content="2020-02-20">'''
opengraph_default_config = gg.opengraph('Title!', 'https://oliz.io/ggpy/', 'Nice text!', '2020-02-20')
assert opengraph_default_config == \
'''<meta property="og:title" content="Title!">
<meta property="og:type" content="article">
<meta property="og:url" content="https://oliz.io/ggpy/">
<meta property="og:description" content="Nice text!">
<meta property="og:locale" content="en-US">
<meta property="article:published_time" content="2020-02-20">'''
def test_json_ld():
json_ld = gg.json_ld('Title! "BAM!"', 'https://oliz.io/ggpy/', 'It says "BAM!"', config)
assert json_ld == \
'''<script type="application/ld+json">
{"@context":"http://schema.org","@type":"WebSite","headline":"Title! \\"BAM!\\"","url":"https://oliz.io/ggpy/","name":"Good Generator.py","description":"It says \\"BAM!\\""}</script>'''
json_ld_default_config = gg.json_ld('Title! "BAM!"', 'https://oliz.io/ggpy/', 'It says "BAM!"')
assert json_ld_default_config == \
'''<script type="application/ld+json">
{"@context":"http://schema.org","@type":"WebSite","headline":"Title! \\"BAM!\\"","url":"https://oliz.io/ggpy/","description":"It says \\"BAM!\\""}</script>'''
def test_header():
header = gg.header('https://example.com/logo.png', '<h1>Title!</h1>', '2021-03-27', config)
assert header == \
'''<a href="https://oliz.io/ggpy"><img src="https://example.com/logo.png" class="avatar" /></a>
<div style="text-align:right;">
<h1>Title!</h1>
<small><a href="https://oliz.io/ggpy">Good Gen</a>, 2021-03-27</small>
</div>'''
header_default_config = gg.header('', '<h1>Title!</h1>', '2021-03-27')
assert header_default_config == \
'''<div style="text-align:right;">
<h1>Title!</h1>
<small>2021-03-27</small>
</div>'''
def test_post_header():
post_header = gg.post_header('<h1 id="title">Title!</h1>', '2020-02-20', config)
assert post_header == \
'''<div style="text-align:right;">
<h1 id="title">Title!</h1>
<small><a href="https://oliz.io/ggpy">Good Gen</a>, 2020-02-20</small>
</div>'''
post_header_default_config = gg.post_header('<h1 id="title">Title!</h1>', '2020-02-20')
assert post_header_default_config == \
'''<div style="text-align:right;">
<h1 id="title">Title!</h1>
<small>2020-02-20</small>
</div>'''
def test_footer_navigation():
footer_nav = gg.footer_navigation()
assert footer_nav == \
'''<a href="#" class="nav">top</a>
<a href="javascript:toggleTheme()" class="nav">🌓</a>
<a href="javascript:toggleFontSize()" class="nav">aA</a>'''
def test_about_and_social_icons():
about_and_social = gg.about_and_social_icons(config)
assert about_and_social == \
'''<a href="mailto:example@example.com" class="social">email</a>
<a href="https://nitter.net/" class="social">twitter</a>
<a href="https://github.com/ooz/ggpy" class="social">github</a>
<a href="https://oliz.io/about.html" class="social">about</a>'''
about_and_social_default_config = gg.about_and_social_icons()
assert about_and_social_default_config == ''
def test_posts_index():
'''Generate index without inlined posts.
'''
posts = gg.scan_posts(['.'])
posts = [post for post in posts if gg.TAG_INLINE not in post['tags']]
posts_index = gg.posts_index(posts)
assert posts_index == \
'''<div>
<div class="card"><small class="social">2021-04-04</small><a href="test/features/meta.html"><b>Markdown Meta Data</b></a></div>
<div class="card"><small class="social">2018-03-17</small><a href="test/some-post.html"><b>Some Post</b></a></div>
<div class="card"><small class="social">1996-06-06</small><a href="test/features/"><b>Markdown Feature Test without "quotes bug"</b></a></div>
</div>'''
def test_posts_index_inline():
'''Generate index with inlined posts.
Four cases:
1. Lots of content but not description -> details block with title as summary
2. Lots of content with description -> details block with description as summary
3. Has description but no content -> only show description
4. Else -> show content directly
'''
posts = gg.scan_posts(['test/features/index-inline-posts/'])
posts_index = gg.posts_index(posts)
assert posts_index == \
'''<div>
<div class="card"><small class="social">2021-07-17</small>
<a href="little-inline-content-no-description.html"><b>Little inline content, no description</b></a>
<div>
<p>This shows directly on the card, without details+summary blocks.</p>
</div>
</div>
<div class="card"><small class="social">2021-07-17</small>
<a href="no-content-with-description.html"><b>No content, but with description</b></a>
<div>
Just some more minor text from the description
</div>
</div>
<div class="card"><small class="social">2021-07-17</small>
<a href="lots-of-content-with-description.html"><b>Lots of content, with description</b></a>
<details><summary>Click here to expand...</summary>
<ul>
<li>One</li>
<li>Two</li>
<li>Three</li>
<li>Four</li>
<li>Five</li>
<li>Six</li>
<li>Seven</li>
<li>Eight</li>
<li>Nine</li>
<li>Ten</li>
</ul>
<p>... and some more lines.</p>
</details>
</div>
<div class="card"><small class="social">2021-07-17</small>
<details><summary><a href="lots-of-content-no-description.html"><b>Lots of content, no description</b></a></summary>
<ul>
<li>One</li>
<li>Two</li>
<li>Three</li>
<li>Four</li>
<li>Five</li>
<li>Six</li>
<li>Seven</li>
<li>Eight</li>
<li>Nine</li>
<li>Ten</li>
</ul>
<p>... and some more lines.</p>
</details>
</div>
</div>'''
##############################################################################
# HTML SNIPPETS
##############################################################################
def test_html_opening_boilerplate():
assert gg.html_opening_boilerplate() == \
'''<!DOCTYPE html>
<html lang="en-US">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<meta name="viewport" content="width=device-width,initial-scale=1">'''
def test_html_head_body_boilerplate():
assert gg.html_head_body_boilerplate() == \
'''</head>
<body onload="initTheme()">'''
def test_html_tag_line():
assert gg.html_tag_line('title', 'Nice!') == '<title>Nice!</title>'
def test_html_tag_block():
assert gg.html_tag_block('footer', '<p>in closing</p>') == \
'''<footer>
<p>in closing</p>
</footer>'''
def test_html_tag_empty():
link_tag = gg.html_tag_empty('link', [('rel', 'canonical'), ('href','https://example.com')])
assert link_tag == '<link rel="canonical" href="https://example.com">'
omit_empty_tag = gg.html_tag_empty('link', [])
assert omit_empty_tag == ''
def test_html_closing_boilerplate():
assert gg.html_closing_boilerplate() == \
'''</body>
</html>
'''
def test_inline_style():
style = gg.inline_style()
assert 'body {' in style
assert '.dark-mode' in style
assert '.avatar' in style
assert '.nav' in style
assert '.social' in style
def test_inline_javascript():
js = gg.inline_javascript()
assert 'function toggleTheme' in js
assert 'function initTheme' in js
| 37.20339 | 185 | 0.637813 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,059 | 0.689855 |
509af6bcf44faa152d90bad8b829b7b4847920a9 | 359 | py | Python | example/controller/tests/helper/security/web/csrf/verify/by_value.py | donghak-shin/dp-tornado | 095bb293661af35cce5f917d8a2228d273489496 | [
"MIT"
] | 18 | 2015-04-07T14:28:39.000Z | 2020-02-08T14:03:38.000Z | example/controller/tests/helper/security/web/csrf/verify/by_value.py | donghak-shin/dp-tornado | 095bb293661af35cce5f917d8a2228d273489496 | [
"MIT"
] | 7 | 2016-10-05T05:14:06.000Z | 2021-05-20T02:07:22.000Z | example/controller/tests/helper/security/web/csrf/verify/by_value.py | donghak-shin/dp-tornado | 095bb293661af35cce5f917d8a2228d273489496 | [
"MIT"
] | 11 | 2015-12-15T09:49:39.000Z | 2021-09-06T18:38:21.000Z | # -*- coding: utf-8 -*-
from dp_tornado.engine.controller import Controller
class ByValueController(Controller):
def get(self):
param_key = 'csrf'
if not self.helper.security.web.csrf.verify_token(controller=self, value=self.get_argument(param_key)):
return self.parent.finish_with_error(400)
self.finish('done')
| 23.933333 | 111 | 0.688022 | 278 | 0.774373 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.097493 |
509bc06e0b8597b7dcfcd3d366bc4b0a1b33cced | 1,913 | py | Python | dis_snek/api/http/route.py | BoredManCodes/Dis-Snek | 662dbc3f86c133fd704c22d3d6d55af5ee1f6f5b | [
"MIT"
] | null | null | null | dis_snek/api/http/route.py | BoredManCodes/Dis-Snek | 662dbc3f86c133fd704c22d3d6d55af5ee1f6f5b | [
"MIT"
] | null | null | null | dis_snek/api/http/route.py | BoredManCodes/Dis-Snek | 662dbc3f86c133fd704c22d3d6d55af5ee1f6f5b | [
"MIT"
] | null | null | null | from typing import TYPE_CHECKING, Any, ClassVar, Optional
from urllib.parse import quote as _uriquote
if TYPE_CHECKING:
from dis_snek.models.discord.snowflake import Snowflake_Type
__all__ = ["Route"]
class Route:
BASE: ClassVar[str] = "https://discord.com/api/v9"
path: str
params: dict[str, str | int]
webhook_id: Optional["Snowflake_Type"]
webhook_token: Optional[str]
def __init__(self, method: str, path: str, **parameters: Any):
self.path: str = path
self.method: str = method
self.params = parameters
self.channel_id = parameters.get("channel_id")
self.guild_id = parameters.get("guild_id")
self.webhook_id = parameters.get("webhook_id")
self.webhook_token = parameters.get("webhook_token")
self.known_bucket: Optional[str] = None
def __eq__(self, other):
if isinstance(other, Route):
return self.rl_bucket == other.rl_bucket
return NotImplemented
def __hash__(self):
return hash(self.rl_bucket)
def __repr__(self):
return f"<Route {self.endpoint}>"
def __str__(self):
return self.endpoint
@property
def rl_bucket(self) -> str:
"""This route's full rate limit bucket"""
if self.known_bucket:
return self.known_bucket
if self.webhook_token:
return f"{self.webhook_id}{self.webhook_token}:{self.channel_id}:{self.guild_id}:{self.endpoint}"
return f"{self.channel_id}:{self.guild_id}:{self.endpoint}"
@property
def endpoint(self) -> str:
"""The endpoint for this route"""
return f"{self.method} {self.path}"
@property
def url(self) -> str:
"""The full url for this route"""
return f"{self.BASE}{self.path}".format_map(
{k: _uriquote(v) if isinstance(v, str) else v for k, v in self.params.items()}
)
| 28.984848 | 109 | 0.634605 | 1,702 | 0.889702 | 0 | 0 | 725 | 0.378986 | 0 | 0 | 428 | 0.223732 |
509cf64c7635b5d2dcfcf6e0039a3e50e9325a99 | 13,539 | py | Python | second/data/udi_dataset.py | muzi2045/second_TANET.pytorch | 3e10c93075a76684871fe0f188819c7b282671fd | [
"MIT"
] | 6 | 2020-02-15T09:11:53.000Z | 2021-11-12T09:03:41.000Z | second/data/udi_dataset.py | muzi2045/second_TANET.pytorch | 3e10c93075a76684871fe0f188819c7b282671fd | [
"MIT"
] | 2 | 2020-04-15T02:40:44.000Z | 2020-11-28T02:14:32.000Z | second/data/udi_dataset.py | muzi2045/second_TANET.pytorch | 3e10c93075a76684871fe0f188819c7b282671fd | [
"MIT"
] | 3 | 2020-02-11T20:12:50.000Z | 2021-05-28T07:31:02.000Z | # udi dataset process module
# modiflied from nuscenes_dataset.py
import json
import pickle
import time
import random
from copy import deepcopy
from functools import partial
from pathlib import Path
import subprocess
import fire
import numpy as np
import os
from second.core import box_np_ops
from second.core import preprocess as prep
from second.data import kitti_common as kitti
from second.data.dataset import Dataset, register_dataset
from second.utils.eval import get_coco_eval_result, get_official_eval_result
from second.utils.progress_bar import progress_bar_iter as prog_bar
from second.utils.timer import simple_timer
@register_dataset
class UDIDataset(Dataset):
NumPointFeatures = 4
NameMapping = {
'car': 'car',
'pedestrian': 'pedestrian',
'cyclist': 'cyclist',
'truck': 'truck',
'forklift': 'forklift',
'golf car': 'golf car',
'motorcyclist': 'motorcyclist',
'bicycle': 'bicycle',
'motorbike': 'motorbike'
}
DefaultAttribute = {
"car": "object_action_parked",
"pedestrain": "object_action_walking",
"bicycle": "object_action_driving_straight_forward",
"motorcycle": "object_action_parked",
"other_vehicle": "object_action_driving_straight_forward",
"emergency_vehicle": "object_action_driving_straight_forward",
"truck": "object_action_parked",
"animal": "",
"bus": "object_action_driving_straight_forward",
}
def __init__(self,
root_path,
info_path,
class_names=None,
prep_func=None,
num_point_features=None):
self._root_path = Path(root_path)
self._info_path = Path(info_path)
with open(info_path, 'rb') as f:
data = pickle.load(f)
self._udi_infos = data["infos"]
self._metadata = data["metadata"]
self._class_names = class_names
self._prep_func = prep_func
self.version = self._metadata["version"]
self._with_velocity = False
def __len__(self):
return len(self._udi_infos)
def __getitem__(self, idx):
input_dict = self.get_sensor_data(idx)
example = self._prep_func(input_dict=input_dict)
example["metadata"] = input_dict["metadata"]
if "anchors_mask" in example:
example["anchors_mask"] = example["anchors_mask"].astype(np.uint8)
return example
def get_sensor_data(self, query):
idx = query
if isinstance(query, dict):
assert "lidar" in query
idx = query["lidar"]["idx"]
info = self._udi_infos[idx]
res = {
"lidar": {
"type": "lidar",
"points": None,
},
"metadata": {
"token": info["token"]
},
}
lidar_path = Path(info['lidar_path'])
points = np.fromfile(str(lidar_path), dtype=np.float32).reshape((-1,4))
points[:, 3] /= 255
res["lidar"]["points"] = points
if 'gt_boxes' in info:
res["lidar"]["annotations"] = {
'boxes': info["gt_boxes"],
'names': info["gt_names"]
}
return res
def evaluation_udi(self, detections, output_dir):
version = self.version
eval_set_map = {
# "v1.0-mini": "mini_train",
"v1.0-trainval": "val",
}
# gt_annos = self.ground_truth_annotations
# if gt_annos is None:
# return None
udi_annos = {}
mapped_class_names = self._class_names
token2info = {}
for info in self._udi_infos:
token2info[info["token"]] = info
for det in detections:
annos = []
boxes = _second_det_to_udi_box(det)
for i, box in enumerate(boxes):
name = mapped_class_names[box.label]
velocity = box.velocity[:2].tolist()
box.velocity = np.array([*velocity, 0.0])
for i, box in enumerate(boxes):
name = mapped_class_names[box.label]
velocity = box.velocity[:2].tolist()
nusc_anno = {
"sample_token": det["metadata"]["token"],
"translation": box.center.tolist(),
"size": box.wlh.tolist(),
"rotation": box.orientation.elements.tolist(),
"velocity": velocity,
"detection_name": name,
"detection_score": box.score,
"attribute_name": "",
}
annos.append(nusc_anno)
udi_annos[det["metadata"]["token"]] = annos
nusc_submissions = {
"meta": {
"use_camera": False,
"use_lidar": False,
"use_radar": False,
"use_map": False,
"use_external": False,
},
"results": udi_annos,
}
res_path = Path(output_dir) / "results_udi.json"
with open(res_path, "w") as f:
json.dump(nusc_submissions, f)
eval_main_file = Path(__file__).resolve().parent / "udi_eval.py"
# why add \"{}\"? to support path with spaces.
cmd = f"python3 {str(eval_main_file)} --root_path=\"{str(self._root_path)}\""
cmd += f" --info_path=\"{str(self._info_path)}\""
cmd += f" --version={self.version}"
cmd += f" --res_path=\"{str(res_path)}\" --eval_set={eval_set_map[self.version]}"
cmd += f" --output_dir=\"{output_dir}\""
# use subprocess can release all nusc memory after evaluation
subprocess.check_output(cmd, shell=True)
with open(Path(output_dir) / "metrics_summary.json", "r") as f:
metrics = json.load(f)
detail = {}
res_path.unlink() # delete results_nusc.json since it's very large
result = f"Nusc {version} Evaluation\n"
for name in mapped_class_names:
detail[name] = {}
for k, v in metrics["label_aps"][name].items():
detail[name][f"dist@{k}"] = v
tp_errs = []
tp_names = []
for k, v in metrics["label_tp_errors"][name].items():
detail[name][k] = v
tp_errs.append(f"{v:.4f}")
tp_names.append(k)
threshs = ', '.join(list(metrics["label_aps"][name].keys()))
scores = list(metrics["label_aps"][name].values())
scores = ', '.join([f"{s * 100:.2f}" for s in scores])
result += f"{name} Nusc dist AP@{threshs} and TP errors\n"
result += scores
result += "\n"
result += ', '.join(tp_names) + ": " + ', '.join(tp_errs)
result += "\n"
return {
"results": {
"nusc": result
},
"detail": {
"nusc": detail
},
}
def evaluation(self, detections, output_dir):
res_udi = self.evaluation_udi(detections, output_dir)
res = {
"results": {
"nusc": res_udi["result"]["nusc"],
},
"detail": {
"eval.nusc": res_udi["detail"]["nusc"],
},
}
return res
def _second_det_to_udi_box(detection):
from udi_eval import Box
import pyquaternion
box3d = detection["box3d_lidar"].detach().cpu().numpy()
scores = detection["scores"].detach().cpu().numpy()
labels = detection["label_preds"].detach().cpu().numpy()
box3d[:, 6] = -box3d[:, 6] - np.pi/2
box_list = []
for i in range(box3d.shape[0]):
quat = pyquaternion.Quaternion(axis=[0, 0, 1], radians=box3d[i,6])
velocity = (np.nan, np.nan, np.nan)
# if box3d.shape[1] == 9:
# velocity = (*box3d[i, 7:9], 0.0)
box = Box(
box3d[i, :3],
box3d[i, 3:6],
quat,
label=labels[i],
score=scores[i],
velocity=velocity)
box_list.append(box)
return box_list
# def _lidar_nusc_box_to_global(info, boxes, classes, eval_version="ICLR 2019"):
# import pyquaternion
# box_list = []
# for box in boxes:
# box.rotate(pyquaternion.Quaternion(info['lidar2ego_rotation']))
# box.translate(np.array(info['lidar2ego_translation']))
# box.rotate(pyquaternion.Quaternion(info['ego2global_rotation']))
# box.translate(np.array(info['ego2global_translation']))
# box_list.append(box)
# return box_list
# def _get_available_scenes(lyft):
# available_scenes = []
# print("total scene num:", len(lyft.scene))
# for scene in lyft.scene:
# scene_token = scene["token"]
# scene_rec = lyft.get('scene', scene_token)
# sample_rec = lyft.get('sample', scene_rec['first_sample_token'])
# sd_rec = lyft.get('sample_data', sample_rec['data']["LIDAR_TOP"])
# has_more_frames = True
# scene_not_exist = False
# while has_more_frames:
# lidar_path, boxes, _ = lyft.get_sample_data(sd_rec['token'])
# if not Path(lidar_path).exists():
# scenes_not_exist = True
# break
# else:
# break
# if not sd_rec['next'] == "":
# sd_rec = lyft.get('sample_data', sd_rec['next'])
# else:
# has_more_frames = False
# if scene_not_exist:
# continue
# available_scenes.append(scene)
# print("exist scene num:", len(available_scenes))
# return available_scenes
def _fill_train_infos(root_path):
train_udi_infos = []
lidar_root_path = root_path+ "/lidar"
label_root_path = root_path + "/label"
img_root_path = root_path + "/image"
filenames = os.listdir(lidar_root_path)
for filename in prog_bar(filenames):
index = filename.split(".")[0]
lidar_path = lidar_root_path + "/" + index + ".bin"
cam_path = img_root_path + "/" + index + ".jpg"
label_path = label_root_path + "/" + index + "_bin.json"
assert Path(lidar_path).exists()
assert Path(cam_path).exists()
assert Path(label_path).exists()
with open(label_path, encoding='utf-8') as f:
res = f.read()
result = json.loads(res)
boxes = result["elem"]
info = {
"lidar_path": lidar_path,
"cam_front_path": cam_path,
"filename": filename,
"token": int(index),
}
gt_locs_list = []
gt_dims_list = []
print("label file path:", label_path)
for box in boxes:
box_loc = box["position"]
box_size = box["size"]
box_loc_ = np.array([box_loc["x"],box_loc["y"], box_loc["z"]], dtype=np.float)
box_size_ = np.array([box_size["width"],box_size["depth"],box_size["height"]], dtype=np.float)
box_loc_ = box_loc_.reshape(-1, 3)
box_size_ = box_size_.reshape(-1, 3)
gt_locs_list.append(box_loc_)
gt_dims_list.append(box_size_)
locs = np.concatenate(gt_locs_list, axis=0)
dims = np.concatenate(gt_dims_list, axis=0)
rots = np.array([b["yaw"] for b in boxes]).reshape(-1, 1)
names = [b["class"] for b in boxes]
for i in range(len(names)):
if names[i] in UDIDataset.NameMapping:
names[i] = UDIDataset.NameMapping[names[i]]
names = np.array(names)
# we need to convert rot to SECOND format.
# change the rot format will break all checkpoint.
gt_boxes = np.concatenate([locs, dims, -rots - np.pi / 2], axis=1)
info["gt_boxes"] = gt_boxes
info["gt_names"] = names
train_udi_infos.append(info)
return train_udi_infos
def create_udi_infos(root_path):
# root_path = Path(root_path)
root_path = str(root_path)
train_udi_infos = _fill_train_infos(root_path)
metadata = {
"version": "v0.1-train",
}
print(
f"train sample: {len(train_udi_infos)}"
)
data = {
"infos": train_udi_infos,
"metadata": metadata,
}
with open(root_path + "/infos_udi_train.pkl", 'wb') as f:
pickle.dump(data, f)
def get_box_mean(info_path, class_name="car"):
with open(info_path, 'rb') as f:
lyft_infos = pickle.load(f)["infos"]
gt_boxes_list = []
for info in lyft_infos:
gt_boxes = info["gt_boxes"]
gt_names = info["gt_names"]
mask = np.array([s == class_name for s in info["gt_names"]], dtype=np.bool_)
gt_names = gt_names[mask]
gt_boxes = gt_boxes[mask]
gt_boxes_list.append(gt_boxes.reshape(-1, 7))
gt_boxes_list = np.concatenate(gt_boxes_list, axis=0)
return {
"box3d": gt_boxes_list.mean(0).tolist(),
"detail": gt_boxes_list
}
def get_all_box_mean(info_path):
det_names = set()
for k, v in UDIDataset.NameMapping.items():
if v not in det_names:
det_names.add(v)
det_names = sorted(list(det_names))
res = {}
details = {}
for k in det_names:
result = get_box_mean(info_path, k)
details[k] = result["detail"]
res[k] = result["box3d"]
print(json.dumps(res, indent=2))
return details
if __name__ == "__main__":
fire.Fire()
| 34.189394 | 106 | 0.557648 | 6,749 | 0.498486 | 0 | 0 | 6,767 | 0.499815 | 0 | 0 | 4,062 | 0.300022 |
509dc1b0cc588c10476a8cfd8e1bdc0626468d3f | 14,657 | py | Python | storm_analysis/diagnostics/multicolor/configure.py | oxfordni/storm-analysis | 835a5c17497c563c3632db561ae7e7c9144a8dd1 | [
"CNRI-Python"
] | null | null | null | storm_analysis/diagnostics/multicolor/configure.py | oxfordni/storm-analysis | 835a5c17497c563c3632db561ae7e7c9144a8dd1 | [
"CNRI-Python"
] | null | null | null | storm_analysis/diagnostics/multicolor/configure.py | oxfordni/storm-analysis | 835a5c17497c563c3632db561ae7e7c9144a8dd1 | [
"CNRI-Python"
] | null | null | null | #!/usr/bin/env python
"""
Configure folder for Multicolor testing.
Hazen 01/18
"""
import argparse
import inspect
import numpy
import os
import pickle
import subprocess
import storm_analysis
import storm_analysis.sa_library.parameters as parameters
import storm_analysis.sa_library.sa_h5py as saH5Py
import storm_analysis.simulator.background as background
import storm_analysis.simulator.camera as camera
import storm_analysis.simulator.drift as drift
import storm_analysis.simulator.photophysics as photophysics
import storm_analysis.simulator.psf as psf
import storm_analysis.simulator.simulate as simulate
import storm_analysis.sCMOS.scmos_analysis as scmos
import storm_analysis.diagnostics.multicolor.settings as settings
def testingParametersSCMOS():
"""
Create a sCMOS parameters object.
"""
params = parameters.ParametersSCMOS()
params.setAttr("max_frame", "int", -1)
params.setAttr("start_frame", "int", -1)
params.setAttr("background_sigma", "float", 8.0)
params.setAttr("camera_calibration", "filename", "calib.npy")
params.setAttr("find_max_radius", "int", 5)
params.setAttr("foreground_sigma", "float", 1.5)
params.setAttr("iterations", "int", settings.iterations)
params.setAttr("model", "string", "2dfixed")
params.setAttr("pixel_size", "float", settings.pixel_size)
params.setAttr("sigma", "float", 150.0/settings.pixel_size)
params.setAttr("threshold", "float", 6.0)
# Don't do tracking.
params.setAttr("descriptor", "string", "1")
params.setAttr("radius", "float", "0.0")
# Don't do drift-correction.
params.setAttr("d_scale", "int", 2)
params.setAttr("drift_correction", "int", 0)
params.setAttr("frame_step", "int", 500)
params.setAttr("z_correction", "int", 0)
return params
def testingParametersMC():
"""
Create a Multiplane parameters object.
"""
params = parameters.ParametersMultiplaneArb()
params.setAttr("max_frame", "int", -1)
params.setAttr("start_frame", "int", -1)
params.setAttr("background_sigma", "float", 8.0)
params.setAttr("find_max_radius", "int", 2)
params.setAttr("independent_heights", "int", settings.independent_heights)
params.setAttr("iterations", "int", settings.iterations)
params.setAttr("mapping", "filename", "map.map")
params.setAttr("no_fitting", "int", 0)
params.setAttr("pixel_size", "float", settings.pixel_size)
params.setAttr("sigma", "float", 1.5)
params.setAttr("threshold", "float", 6.0)
params.setAttr("weights", "filename", "weights.npy")
params.setAttr("z_value", "float-array", settings.z_value)
params.setAttr("channel0_cal", "filename", "calib.npy")
params.setAttr("channel1_cal", "filename", "calib.npy")
params.setAttr("channel2_cal", "filename", "calib.npy")
params.setAttr("channel3_cal", "filename", "calib.npy")
params.setAttr("channel0_ext", "string", "_c1.dax")
params.setAttr("channel1_ext", "string", "_c2.dax")
params.setAttr("channel2_ext", "string", "_c3.dax")
params.setAttr("channel3_ext", "string", "_c4.dax")
params.setAttr("channel0_offset", "int", 0)
params.setAttr("channel1_offset", "int", 0)
params.setAttr("channel2_offset", "int", 0)
params.setAttr("channel3_offset", "int", 0)
params.setAttr("spline0", "filename", "c1_psf.spline")
params.setAttr("spline1", "filename", "c2_psf.spline")
params.setAttr("spline2", "filename", "c3_psf.spline")
params.setAttr("spline3", "filename", "c4_psf.spline")
# Do tracking (localization color analysis depends on the tracks).
params.setAttr("descriptor", "string", "1")
params.setAttr("radius", "float", "1.0")
params.setAttr("max_z", "float", str(0.001 * settings.psf_z_range))
params.setAttr("min_z", "float", str(-0.001 * settings.psf_z_range))
# Don't do drift-correction.
params.setAttr("d_scale", "int", 2)
params.setAttr("drift_correction", "int", 0)
params.setAttr("frame_step", "int", 500)
params.setAttr("z_correction", "int", 0)
return params
def configure():
# Get relevant paths.
mm_path = os.path.dirname(inspect.getfile(storm_analysis)) + "/micrometry/"
mp_path = os.path.dirname(inspect.getfile(storm_analysis)) + "/multi_plane/"
sp_path = os.path.dirname(inspect.getfile(storm_analysis)) + "/spliner/"
# Create analysis XML files.
#
print("Creating XML files.")
params = testingParametersSCMOS()
params.toXMLFile("scmos.xml")
params = testingParametersMC()
params.toXMLFile("multicolor.xml")
# Useful variables
aoi_size = int(settings.psf_size/2)+1
# Create sCMOS data and HDF5 files we'll need for the simulation.
#
if True:
# Create sCMOS camera calibration files.
#
numpy.save("calib.npy", [numpy.zeros((settings.y_size, settings.x_size)) + settings.camera_offset,
numpy.ones((settings.y_size, settings.x_size)) * settings.camera_variance,
numpy.ones((settings.y_size, settings.x_size)) * settings.camera_gain,
1])
# Create localization on a grid file.
#
print("Creating gridded localizations.")
sim_path = os.path.dirname(inspect.getfile(storm_analysis)) + "/simulator/"
subprocess.call(["python", sim_path + "emitters_on_grid.py",
"--bin", "grid_list.hdf5",
"--nx", str(settings.nx),
"--ny", str(settings.ny),
"--spacing", "20",
"--zrange", str(settings.test_z_range),
"--zoffset", str(settings.test_z_offset)])
# Create randomly located localizations file (for STORM movies).
#
print("Creating random localizations.")
subprocess.call(["python", sim_path + "emitters_uniform_random.py",
"--bin", "random_storm.hdf5",
"--density", "1.0",
"--margin", str(settings.margin),
"--sx", str(settings.x_size),
"--sy", str(settings.y_size),
"--zrange", str(settings.test_z_range)])
# Create randomly located localizations file (for mapping measurement).
#
print("Creating random localizations.")
subprocess.call(["python", sim_path + "emitters_uniform_random.py",
"--bin", "random_map.hdf5",
"--density", "0.0003",
"--margin", str(settings.margin),
"--sx", str(settings.x_size),
"--sy", str(settings.y_size)])
# Create sparser grid for PSF measurement.
#
print("Creating data for PSF measurement.")
sim_path = os.path.dirname(inspect.getfile(storm_analysis)) + "/simulator/"
subprocess.call(["python", sim_path + "emitters_on_grid.py",
"--bin", "psf_list.hdf5",
"--nx", "6",
"--ny", "3",
"--spacing", "40"])
## This part makes / tests measuring the mapping.
##
if True:
print("Measuring mapping.")
# Make localization files for simulations.
#
locs = saH5Py.loadLocalizations("random_map.hdf5")
locs["z"][:] = 1.0e-3 * settings.z_planes[0]
saH5Py.saveLocalizations("c1_random_map.hdf5", locs)
for i in range(1,4):
locs["x"] += settings.dx
locs["y"] += settings.dy
locs["z"][:] = settings.z_planes[i]
saH5Py.saveLocalizations("c" + str(i+1) + "_random_map.hdf5", locs)
# Make localization files for simulations.
#
locs = saH5Py.loadLocalizations("random_map.hdf5")
locs["z"][:] = 1.0e-3 * settings.z_planes[0]
saH5Py.saveLocalizations("c1_random_map.hdf5", locs)
for i in range(1,4):
locs["x"] += settings.dx
locs["y"] += settings.dy
locs["z"][:] = settings.z_planes[i]
saH5Py.saveLocalizations("c" + str(i+1) + "_random_map.hdf5", locs)
# Make simulated mapping data.
#
bg_f = lambda s, x, y, h5 : background.UniformBackground(s, x, y, h5, photons = 10)
cam_f = lambda s, x, y, h5 : camera.SCMOS(s, x, y, h5, "calib.npy")
pp_f = lambda s, x, y, h5 : photophysics.AlwaysOn(s, x, y, h5, 20000.0)
psf_f = lambda s, x, y, i3 : psf.GaussianPSF(s, x, y, i3, settings.pixel_size)
sim = simulate.Simulate(background_factory = bg_f,
camera_factory = cam_f,
photophysics_factory = pp_f,
psf_factory = psf_f,
x_size = settings.x_size,
y_size = settings.y_size)
for i in range(4):
sim.simulate("c" + str(i+1) + "_map.dax", "c" + str(i+1) + "_random_map.hdf5", 1)
# Analyze simulated mapping data
#
for i in range(4):
scmos.analyze("c" + str(i+1) + "_map.dax", "c" + str(i+1) + "_map.hdf5", "scmos.xml")
# Measure mapping.
#
for i in range(3):
subprocess.call(["python", mm_path + "micrometry.py",
"--locs1", "c1_map.hdf5",
"--locs2", "c" + str(i+2) + "_map.hdf5",
"--results", "c1_c" + str(i+2) + "_map.map",
"--no_plots"])
# Merge mapping.
#
subprocess.call(["python", mm_path + "merge_maps.py",
"--results", "map.map",
"--maps", "c1_c2_map.map", "c1_c3_map.map", "c1_c4_map.map"])
# Print mapping.
#
if True:
print("Mapping is:")
subprocess.call(["python", mp_path + "print_mapping.py",
"--mapping", "map.map"])
print("")
# Check that mapping is close to what we expect (within 5%).
#
with open("map.map", 'rb') as fp:
mappings = pickle.load(fp)
for i in range(3):
if not numpy.allclose(mappings["0_" + str(i+1) + "_x"], numpy.array([settings.dx*(i+1), 1.0, 0.0]), rtol = 0.05, atol = 0.05):
print("X mapping difference for channel", i+1)
if not numpy.allclose(mappings["0_" + str(i+1) + "_y"], numpy.array([settings.dy*(i+1), 0.0, 1.0]), rtol = 0.05, atol = 0.05):
print("Y mapping difference for channel", i+1)
## This part measures / test the PSF measurement.
##
if True:
# Create drift file, this is used to displace the localizations in the
# PSF measurement movie.
#
dz = numpy.arange(-settings.psf_z_range, settings.psf_z_range + 0.05, 0.01)
drift_data = numpy.zeros((dz.size, 3))
drift_data[:,2] = dz
numpy.savetxt("drift.txt", drift_data)
# Also create the z-offset file.
#
z_offset = numpy.ones((dz.size, 2))
z_offset[:,1] = dz
numpy.savetxt("z_offset.txt", z_offset)
# Create simulated data for PSF measurements.
#
bg_f = lambda s, x, y, h5 : background.UniformBackground(s, x, y, h5, photons = 10)
cam_f = lambda s, x, y, h5 : camera.SCMOS(s, x, y, h5, "calib.npy")
drift_f = lambda s, x, y, h5 : drift.DriftFromFile(s, x, y, h5, "drift.txt")
pp_f = lambda s, x, y, h5 : photophysics.AlwaysOn(s, x, y, h5, 20000.0)
psf_f = lambda s, x, y, h5 : psf.PupilFunction(s, x, y, h5, settings.pixel_size, [])
sim = simulate.Simulate(background_factory = bg_f,
camera_factory = cam_f,
drift_factory = drift_f,
photophysics_factory = pp_f,
psf_factory = psf_f,
x_size = settings.x_size,
y_size = settings.y_size)
if True:
for i in range(4):
sim.simulate("c" + str(i+1) + "_zcal.dax",
"c" + str(i+1) + "_random_map.hdf5",
dz.size)
# Get localizations to use for PSF measurement.
#
subprocess.call(["python", mp_path + "psf_localizations.py",
"--bin", "c1_map_ref.hdf5",
"--map", "map.map",
"--aoi_size", str(aoi_size)])
# Create PSF z stacks.
#
for i in range(4):
subprocess.call(["python", mp_path + "psf_zstack.py",
"--movie", "c" + str(i+1) + "_zcal.dax",
"--bin", "c1_map_ref_c" + str(i+1) + "_psf.hdf5",
"--zstack", "c" + str(i+1) + "_zstack",
"--scmos_cal", "calib.npy",
"--aoi_size", str(aoi_size)])
# Measure PSF.
#
for i in range(4):
subprocess.call(["python", mp_path + "measure_psf.py",
"--zstack", "c" + str(i+1) + "_zstack.npy",
"--zoffsets", "z_offset.txt",
"--psf_name", "c" + str(i+1) + "_psf_normed.psf",
"--z_range", str(settings.psf_z_range),
"--normalize"])
## This part creates the splines.
##
if True:
print("Measuring Splines.")
for i in range(4):
subprocess.call(["python", sp_path + "psf_to_spline.py",
"--psf", "c" + str(i+1) + "_psf_normed.psf",
"--spline", "c" + str(i+1) + "_psf.spline",
"--spline_size", str(settings.psf_size)])
## This part measures the Cramer-Rao weights.
##
if True:
print("Calculating weights.")
subprocess.call(["python", mp_path + "plane_weighting.py",
"--background", str(settings.photons[0][0]),
"--photons", str(settings.photons[0][1]),
"--output", "weights.npy",
"--xml", "multicolor.xml",
"--no_plots"])
if (__name__ == "__main__"):
configure()
| 39.720867 | 138 | 0.542608 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,636 | 0.316299 |
509ed4cc66c4ac402187a077d75f1b426136225c | 430 | py | Python | tests/views/test_application_views.py | snowdensb/domain-manager-api | 9bde9cec4231c0a0e27a12730182315571967980 | [
"CC0-1.0"
] | null | null | null | tests/views/test_application_views.py | snowdensb/domain-manager-api | 9bde9cec4231c0a0e27a12730182315571967980 | [
"CC0-1.0"
] | 28 | 2021-08-09T15:04:22.000Z | 2022-03-19T01:11:58.000Z | tests/views/test_application_views.py | snowdensb/domain-manager-api | 9bde9cec4231c0a0e27a12730182315571967980 | [
"CC0-1.0"
] | null | null | null | """Application View Tests."""
# Standard Python Libraries
import json
# cisagov Libraries
from tests.data.application_data import get_applications
def test_applications_get(client, mocker):
"""Test getting list of applications."""
mocker.patch("api.manager.ApplicationManager.all", return_value=get_applications(5))
resp = client.get("/api/applications/")
data = json.loads(resp.data)
assert len(data) == 5
| 28.666667 | 88 | 0.74186 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 171 | 0.397674 |
50a2baaf689466aee04130a2a99ad570fae29004 | 967 | py | Python | infratabapp/tasks.py | sheeshmohsin/infratabtask | 5884c06d21d0c62a92ae203d941baafffab2e278 | [
"MIT"
] | null | null | null | infratabapp/tasks.py | sheeshmohsin/infratabtask | 5884c06d21d0c62a92ae203d941baafffab2e278 | [
"MIT"
] | null | null | null | infratabapp/tasks.py | sheeshmohsin/infratabtask | 5884c06d21d0c62a92ae203d941baafffab2e278 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from infratabtask.celery import app
from celery import Task
from infratabapp.utils import send_email_notf, send_phone_notf
class SendNotf(Task):
def __init__(self, *args, **kwargs):
self.pk = kwargs.get('pk', None)
def run(self):
self.get_object()
self.email_notf()
self.phone_notf()
def get_object(self):
from infratabapp.models import ReminderDetails
self.obj = ReminderDetails.objects.get(pk=self.pk)
self.message = self.obj.message
def email_notf(self):
self.email_list = []
for x in self.obj.emailnotification_set.all():
self.email_list.append(x.email)
send_email_notf(self.email_list, self.message)
def phone_notf(self):
self.phone_list = []
for y in self.obj.smsnotification_set.all():
self.phone_list.append(y.phone)
send_phone_notf(self.phone_notf, self.message)
| 28.441176 | 62 | 0.66908 | 801 | 0.828335 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.004137 |
50a3bc0cc77af6e3f24e20ed927fe327a30a1517 | 695 | py | Python | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/survey/admin.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 3 | 2021-12-15T04:58:18.000Z | 2022-02-06T12:15:37.000Z | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/survey/admin.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | null | null | null | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/survey/admin.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 1 | 2019-01-02T14:38:50.000Z | 2019-01-02T14:38:50.000Z | """
Provide accessors to these models via the Django Admin pages
"""
from django import forms
from django.contrib import admin
from lms.djangoapps.survey.models import SurveyForm
class SurveyFormAdminForm(forms.ModelForm):
"""Form providing validation of SurveyForm content."""
class Meta:
model = SurveyForm
fields = ('name', 'form')
def clean_form(self):
"""Validate the HTML template."""
form = self.cleaned_data["form"]
SurveyForm.validate_form_html(form)
return form
class SurveyFormAdmin(admin.ModelAdmin):
"""Admin for SurveyForm"""
form = SurveyFormAdminForm
admin.site.register(SurveyForm, SurveyFormAdmin)
| 21.71875 | 60 | 0.700719 | 456 | 0.656115 | 0 | 0 | 0 | 0 | 0 | 0 | 199 | 0.286331 |
50a4666c1ec0642dcf588201b7c9a50b15e6dd48 | 1,804 | py | Python | resources/lib/guisettings.py | gade01/xbmcbackup | 6f4921a77eb1f73cdf30ddd863c38609ce5bd164 | [
"MIT"
] | 86 | 2015-01-11T12:41:31.000Z | 2022-02-08T09:59:59.000Z | resources/lib/guisettings.py | weblate/xbmcbackup | 40b626052160ce06663d3ea467122641b84651ae | [
"MIT"
] | 126 | 2015-01-28T20:18:44.000Z | 2022-02-27T16:03:26.000Z | resources/lib/guisettings.py | weblate/xbmcbackup | 40b626052160ce06663d3ea467122641b84651ae | [
"MIT"
] | 43 | 2015-01-11T02:57:49.000Z | 2022-03-01T00:13:54.000Z | import json
import xbmc
from . import utils as utils
class GuiSettingsManager:
filename = 'kodi_settings.json'
systemSettings = None
def __init__(self):
# get all of the current Kodi settings
json_response = json.loads(xbmc.executeJSONRPC('{"jsonrpc":"2.0", "id":1, "method":"Settings.GetSettings","params":{"level":"expert"}}'))
self.systemSettings = json_response['result']['settings']
def backup(self):
utils.log('Backing up Kodi settings')
# return all current settings
return self.systemSettings
def restore(self, restoreSettings):
utils.log('Restoring Kodi settings')
updateJson = {"jsonrpc": "2.0", "id": 1, "method": "Settings.SetSettingValue", "params": {"setting": "", "value": ""}}
# create a setting=value dict of the current settings
settingsDict = {}
for aSetting in self.systemSettings:
# ignore action types, no value
if(aSetting['type'] != 'action'):
settingsDict[aSetting['id']] = aSetting['value']
restoreCount = 0
for aSetting in restoreSettings:
# only update a setting if its different than the current (action types have no value)
if(aSetting['type'] != 'action' and settingsDict[aSetting['id']] != aSetting['value']):
if(utils.getSettingBool('verbose_logging')):
utils.log('%s different than current: %s' % (aSetting['id'], str(aSetting['value'])))
updateJson['params']['setting'] = aSetting['id']
updateJson['params']['value'] = aSetting['value']
xbmc.executeJSONRPC(json.dumps(updateJson))
restoreCount = restoreCount + 1
utils.log('Update %d settings' % restoreCount)
| 37.583333 | 145 | 0.608093 | 1,748 | 0.968958 | 0 | 0 | 0 | 0 | 0 | 0 | 666 | 0.36918 |
50a50aa1a40424c9e929c23a7eb5cdd5ccf71c81 | 120 | py | Python | distopia/mapping/__init__.py | kevinguo344/distopia | 077dd3501bd43565d1a9647a151fb20b90b71a54 | [
"MIT"
] | null | null | null | distopia/mapping/__init__.py | kevinguo344/distopia | 077dd3501bd43565d1a9647a151fb20b90b71a54 | [
"MIT"
] | 7 | 2019-07-11T04:19:01.000Z | 2019-10-31T15:26:49.000Z | distopia/mapping/__init__.py | kevinguo344/distopia | 077dd3501bd43565d1a9647a151fb20b90b71a54 | [
"MIT"
] | 4 | 2018-10-29T20:32:03.000Z | 2019-10-02T03:15:57.000Z | """
District Mapping
================
Defines the algorithms that perform the mapping from precincts to districts.
"""
| 17.142857 | 76 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 119 | 0.991667 |
50a51e2bd1f9dab244096057e39978a0aa037170 | 1,741 | py | Python | pymilldb/context/Column.py | Toka-Taka/mill-db | 1edf390f2ce89d9232ba91d722cb4b104c398078 | [
"MIT"
] | 2 | 2019-11-05T06:24:59.000Z | 2020-03-06T09:04:38.000Z | pymilldb/context/Column.py | bmstu-iu9/mill-db | a3725b11fcd995953dabc21f7fe6f4d5f5d38815 | [
"MIT"
] | 2 | 2019-05-22T09:40:51.000Z | 2020-03-03T12:17:12.000Z | pymilldb/context/Column.py | Toka-Taka/mill-db | 1edf390f2ce89d9232ba91d722cb4b104c398078 | [
"MIT"
] | 6 | 2018-05-03T16:04:13.000Z | 2019-12-01T11:01:07.000Z | from .DataType import BaseType
class Column(object):
COLUMN_COMMON = 0
COLUMN_BLOOM = 1
COLUMN_INDEXED = 2
COLUMN_PRIMARY = 3
DEFAULT_FAIL_SHARE = 0.2
__NAME_TO_MOD = dict(
bloom=1,
indexed=2,
pk=3,
)
def __init__(self, name: str, kind: BaseType, mod: int, table=None, fail_share=None):
self.name = name
self.kind = kind
self.mod = mod
self.table = table
self.fail_share = self.DEFAULT_FAIL_SHARE if fail_share is None else fail_share
@classmethod
def auto(cls, name: str, kind: BaseType, mod: str, table=None, fail_share=None):
return Column(name, kind, cls.__NAME_TO_MOD[mod.lower()], table, fail_share)
@classmethod
def common(cls, name: str, kind: BaseType, table=None, fail_share=None):
return Column(name, kind, cls.COLUMN_COMMON, table, fail_share)
@property
def is_common(self):
return self.mod == self.COLUMN_COMMON
@classmethod
def bloom(cls, name: str, kind: BaseType, table=None, fail_share=None):
return Column(name, kind, cls.COLUMN_BLOOM, table, fail_share)
@property
def is_bloom(self):
return self.mod == self.COLUMN_BLOOM
@classmethod
def indexed(cls, name: str, kind: BaseType, table=None, fail_share=None):
return Column(name, kind, cls.COLUMN_INDEXED, table, fail_share)
@property
def is_indexed(self):
return self.mod == self.COLUMN_INDEXED
@classmethod
def primary(cls, name: str, kind: BaseType, table=None, fail_share=None):
return Column(name, kind, cls.COLUMN_PRIMARY, table, fail_share)
@property
def is_primary(self):
return self.mod == self.COLUMN_PRIMARY
| 28.540984 | 89 | 0.657668 | 1,707 | 0.980471 | 0 | 0 | 1,150 | 0.66054 | 0 | 0 | 0 | 0 |
50a5a6f4507fa5ed92be275eb4339130e457b023 | 6,369 | py | Python | calico/etcddriver/test/test_hwm.py | ozdanborne/felix | 5eff313e6498b3a7d775aa16cb09fd4578331701 | [
"Apache-2.0"
] | 6 | 2016-10-18T04:04:25.000Z | 2016-10-18T04:06:49.000Z | calico/etcddriver/test/test_hwm.py | ozdanborne/felix | 5eff313e6498b3a7d775aa16cb09fd4578331701 | [
"Apache-2.0"
] | 1 | 2021-06-01T21:45:37.000Z | 2021-06-01T21:45:37.000Z | calico/etcddriver/test/test_hwm.py | ozdanborne/felix | 5eff313e6498b3a7d775aa16cb09fd4578331701 | [
"Apache-2.0"
] | 2 | 2018-10-31T08:55:19.000Z | 2019-04-16T02:14:50.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
test_hwm
~~~~~~~~
Tests for high water mark tracking function.
"""
import logging
from unittest import TestCase
from mock import Mock, call, patch
from calico.etcddriver import hwm
from calico.etcddriver.hwm import HighWaterTracker
_log = logging.getLogger(__name__)
class TestHighWaterTracker(TestCase):
def setUp(self):
self.hwm = HighWaterTracker()
def test_mainline(self):
# Test merging of updates between a snapshot with etcd_index 10 and
# updates coming in afterwards with indexes 11, 12, ...
# We use prefix "/a/$" because $ is not allowed in the trie so it
# implicitly tests encoding/decoding is being properly applied.
old_hwm = self.hwm.update_hwm("/a/$/c", 9) # Pre-snapshot
self.assertEqual(old_hwm, None)
old_hwm = self.hwm.update_hwm("/b/c/d", 9) # Pre-snapshot
self.assertEqual(old_hwm, None)
old_hwm = self.hwm.update_hwm("/j/c/d", 9) # Pre-snapshot
self.assertEqual(old_hwm, None)
self.assertEqual(len(self.hwm), 3)
# While merging a snapshot we track deletions.
self.hwm.start_tracking_deletions()
# Send in some keys from the snapshot.
old_hwm = self.hwm.update_hwm("/a/$/c", 10) # From snapshot
self.assertEqual(old_hwm, 9)
old_hwm = self.hwm.update_hwm("/a/$/d", 10) # From snapshot
self.assertEqual(old_hwm, None)
old_hwm = self.hwm.update_hwm("/d/e/f", 10) # From snapshot
self.assertEqual(old_hwm, None)
self.assertEqual(len(self.hwm), 5)
# This key is first seen in the event stream, so the snapshot version
# should be ignored.
old_hwm = self.hwm.update_hwm("/a/h/i", 11) # From events
self.assertEqual(old_hwm, None)
old_hwm = self.hwm.update_hwm("/a/h/i", 10) # From snapshot
self.assertEqual(old_hwm, 11)
old_hwm = self.hwm.update_hwm("/a/h/i", 12) # From events
self.assertEqual(old_hwm, 11) # Still 11, snapshot ignored.
self.assertEqual(len(self.hwm), 6)
# Then a whole subtree gets deleted by the events.
deleted_keys = self.hwm.store_deletion("/a/$", 13)
self.assertEqual(set(deleted_keys), set(["/a/$/c", "/a/$/d"]))
self.assertEqual(len(self.hwm), 4)
# But afterwards, we see a snapshot key within the subtree, it should
# be ignored.
old_hwm = self.hwm.update_hwm("/a/$/e", 10)
self.assertEqual(old_hwm, 13) # Returns the etcd_index of the delete.
# Then a new update from the event stream, recreates the directory.
old_hwm = self.hwm.update_hwm("/a/$/f", 14)
self.assertEqual(old_hwm, None)
self.assertEqual(len(self.hwm), 5)
# And subsequent updates are processed ignoring the delete.
old_hwm = self.hwm.update_hwm("/a/$/f", 15)
self.assertEqual(old_hwm, 14)
# However, snapshot updates from within the deleted subtree are still
# ignored.
old_hwm = self.hwm.update_hwm("/a/$/e", 10)
self.assertEqual(old_hwm, 13) # Returns the etcd_index of the delete.
old_hwm = self.hwm.update_hwm("/a/$/f", 10)
self.assertEqual(old_hwm, 13) # Returns the etcd_index of the delete.
old_hwm = self.hwm.update_hwm("/a/$/g", 10)
self.assertEqual(old_hwm, 13) # Returns the etcd_index of the delete.
self.assertEqual(len(self.hwm), 5)
# But ones outside the subtree ar not.
old_hwm = self.hwm.update_hwm("/f/g", 10)
self.assertEqual(old_hwm, None)
# And subsequent updates are processed ignoring the delete.
old_hwm = self.hwm.update_hwm("/a/$/f", 16)
self.assertEqual(old_hwm, 15)
# End of snapshot: we stop tracking deletions, which should free up the
# resources.
self.hwm.stop_tracking_deletions()
self.assertEqual(self.hwm._deletion_hwms, None)
# Then, subseqent updates should be handled normally.
old_hwm = self.hwm.update_hwm("/a/$/f", 17)
self.assertEqual(old_hwm, 16) # From previous event
old_hwm = self.hwm.update_hwm("/g/b/f", 18)
self.assertEqual(old_hwm, None) # Seen for the first time.
old_hwm = self.hwm.update_hwm("/d/e/f", 19)
self.assertEqual(old_hwm, 10) # From the snapshot.
self.assertEqual(len(self.hwm), 7)
# We should be able to find all the keys that weren't seen during
# the snapshot.
old_keys = self.hwm.remove_old_keys(10)
self.assertEqual(set(old_keys), set(["/b/c/d", "/j/c/d"]))
self.assertEqual(len(self.hwm), 5)
# They should now be gone from the index.
old_hwm = self.hwm.update_hwm("/b/c/d", 20)
self.assertEqual(old_hwm, None)
self.assertEqual(len(self.hwm), 6)
class TestKeyEncoding(TestCase):
def test_encode_key(self):
self.assert_enc_dec("/calico/v1/foo/bar", "/calico/v1/foo/bar/")
self.assert_enc_dec("/:_-./foo", "/:_-./foo/")
self.assert_enc_dec("/:_-.~/foo", "/:_-.%7E/foo/")
self.assert_enc_dec("/%/foo", "/%25/foo/")
self.assert_enc_dec(u"/\u01b1/foo", "/%C6%B1/foo/")
self.assertEqual(hwm.encode_key("/foo/"), "/foo/")
def assert_enc_dec(self, key, expected_encoding):
encoded = hwm.encode_key(key)
self.assertEqual(
encoded,
expected_encoding,
msg="Expected %r to encode as %r but got %r" %
(key, expected_encoding, encoded))
decoded = hwm.decode_key(encoded)
self.assertEqual(
decoded,
key,
msg="Expected %r to decode as %r but got %r" %
(encoded, key, decoded))
| 41.627451 | 79 | 0.633851 | 5,459 | 0.85712 | 0 | 0 | 0 | 0 | 0 | 0 | 2,617 | 0.410897 |
50a67e7e71443f0123964ab785888984cda3fc3b | 404 | py | Python | skdecide/hub/domain/gym/__init__.py | jeromerobert/scikit-decide | 900916e627669fb3f7520edb2aaef55e08064b25 | [
"MIT"
] | null | null | null | skdecide/hub/domain/gym/__init__.py | jeromerobert/scikit-decide | 900916e627669fb3f7520edb2aaef55e08064b25 | [
"MIT"
] | null | null | null | skdecide/hub/domain/gym/__init__.py | jeromerobert/scikit-decide | 900916e627669fb3f7520edb2aaef55e08064b25 | [
"MIT"
] | 1 | 2021-02-26T17:31:51.000Z | 2021-02-26T17:31:51.000Z | # Copyright (c) AIRBUS and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .gym import GymDomain, DeterministicInitializedGymDomain, GymWidthDomain, \
GymDiscreteActionDomain, DeterministicGymDomain, CostDeterministicGymDomain, \
GymPlanningDomain, GymDomainStateProxy, GymDomainHashable, AsGymEnv
| 50.5 | 82 | 0.816832 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 164 | 0.405941 |
50a78e2e1ef64a3b922e626522f2580ee9837a7c | 7,092 | py | Python | openwisp_network_topology/tests/test_admin.py | mafalaz/openwisp-network-topology | 36188cfd20ad62787e8202bb6a4e106aaa26ef18 | [
"BSD-3-Clause"
] | 1 | 2018-12-07T14:24:26.000Z | 2018-12-07T14:24:26.000Z | openwisp_network_topology/tests/test_admin.py | mafalaz/openwisp-network-topology | 36188cfd20ad62787e8202bb6a4e106aaa26ef18 | [
"BSD-3-Clause"
] | null | null | null | openwisp_network_topology/tests/test_admin.py | mafalaz/openwisp-network-topology | 36188cfd20ad62787e8202bb6a4e106aaa26ef18 | [
"BSD-3-Clause"
] | null | null | null | from django.test import TestCase
from django.urls import reverse
from django_netjsongraph.tests import CreateGraphObjectsMixin
from django_netjsongraph.tests.base.test_admin import TestAdminMixin
from openwisp_users.tests.utils import TestOrganizationMixin
from openwisp_utils.tests.utils import TestMultitenantAdminMixin
from . import CreateOrgMixin
from ..apps import OpenwispNetworkTopologyConfig as appconfig
from ..models import Link, Node, Topology
class TestAdmin(CreateGraphObjectsMixin, CreateOrgMixin,
TestAdminMixin, TestCase):
topology_model = Topology
link_model = Link
node_model = Node
@property
def prefix(self):
return 'admin:{0}'.format(appconfig.label)
def setUp(self):
org = self._create_org()
t = self._create_topology(organization=org)
self._create_node(label="node1",
addresses="192.168.0.1;",
topology=t,
organization=org)
self._create_node(label="node2",
addresses="192.168.0.2;",
topology=t,
organization=org)
super(TestAdmin, self).setUp()
class TestMultitenantAdmin(CreateGraphObjectsMixin, TestMultitenantAdminMixin,
TestOrganizationMixin, TestCase):
topology_model = Topology
node_model = Node
link_model = Link
operator_permission_filters = [
{'codename__endswith': 'topology'},
{'codename__endswith': 'node'},
{'codename__endswith': 'link'},
]
def _create_multitenancy_test_env(self):
org1 = self._create_org(name='test1org')
org2 = self._create_org(name='test2org')
inactive = self._create_org(name='inactive-org', is_active=False)
operator = self._create_operator(organizations=[org1, inactive])
t1 = self._create_topology(label='topology1org', organization=org1)
t2 = self._create_topology(label='topology2org', organization=org2)
t3 = self._create_topology(label='topology3org', organization=inactive)
n11 = self._create_node(label='node1org1', topology=t1, organization=org1)
n12 = self._create_node(label='node2org1', topology=t1, organization=org1)
n21 = self._create_node(label='node1org2', topology=t2, organization=org2)
n22 = self._create_node(label='node2org2', topology=t2, organization=org2)
n31 = self._create_node(label='node1inactive', topology=t3, organization=inactive)
n32 = self._create_node(label='node2inactive', topology=t3, organization=inactive)
l1 = self._create_link(topology=t1,
organization=org1,
source=n11,
target=n12)
l2 = self._create_link(topology=t2,
organization=org2,
source=n21,
target=n22)
l3 = self._create_link(topology=t3,
organization=inactive,
source=n31,
target=n32)
data = dict(t1=t1, t2=t2, t3_inactive=t3,
n11=n11, n12=n12, l1=l1,
n21=n21, n22=n22, l2=l2,
n31=n31, n32=n32, l3_inactive=l3,
org1=org1, org2=org2,
inactive=inactive,
operator=operator)
return data
def test_topology_queryset(self):
data = self._create_multitenancy_test_env()
self._test_multitenant_admin(
url=reverse('admin:topology_topology_changelist'),
visible=[data['t1'].label, data['org1'].name],
hidden=[data['t2'].label, data['org2'].name,
data['t3_inactive'].label]
)
def test_topology_organization_fk_queryset(self):
data = self._create_multitenancy_test_env()
self._test_multitenant_admin(
url=reverse('admin:topology_topology_add'),
visible=[data['org1'].name],
hidden=[data['org2'].name, data['inactive']],
select_widget=True
)
def test_node_queryset(self):
data = self._create_multitenancy_test_env()
self._test_multitenant_admin(
url=reverse('admin:topology_node_changelist'),
visible=[data['n11'].label, data['n12'].label, data['org1'].name],
hidden=[data['n21'].label, data['n22'].label, data['org2'].name,
data['n31'].label, data['n32'].label, data['inactive']]
)
def test_node_organization_fk_queryset(self):
data = self._create_multitenancy_test_env()
self._test_multitenant_admin(
url=reverse('admin:topology_node_add'),
visible=[data['org1'].name],
hidden=[data['org2'].name, data['inactive']],
select_widget=True
)
def test_link_queryset(self):
data = self._create_multitenancy_test_env()
self._test_multitenant_admin(
url=reverse('admin:topology_link_changelist'),
visible=[str(data['l1']), data['org1'].name],
hidden=[str(data['l2']), data['org2'].name,
str(data['l3_inactive'])]
)
def test_link_organization_fk_queryset(self):
data = self._create_multitenancy_test_env()
self._test_multitenant_admin(
url=reverse('admin:topology_link_add'),
visible=[data['org1'].name],
hidden=[data['org2'].name, data['inactive']],
select_widget=True
)
def test_node_topology_fk_queryset(self):
data = self._create_multitenancy_test_env()
self._test_multitenant_admin(
url=reverse('admin:topology_node_add'),
visible=[data['t1'].label],
hidden=[data['t2'].label, data['t3_inactive'].label]
)
def test_link_topology_fk_queryset(self):
data = self._create_multitenancy_test_env()
self._test_multitenant_admin(
url=reverse('admin:topology_link_add'),
visible=[data['t1'].label],
hidden=[data['t2'].label, data['t3_inactive'].label]
)
def test_node_topology_filter(self):
data = self._create_multitenancy_test_env()
t_special = self._create_topology(label='special', organization=data['org1'])
self._test_multitenant_admin(
url=reverse('admin:topology_node_changelist'),
visible=[data['t1'].label, t_special.label],
hidden=[data['t2'].label, data['t3_inactive'].label]
)
def test_link_topology_filter(self):
data = self._create_multitenancy_test_env()
t_special = self._create_topology(label='special', organization=data['org1'])
self._test_multitenant_admin(
url=reverse('admin:topology_link_changelist'),
visible=[data['t1'].label, t_special.label],
hidden=[data['t2'].label, data['t3_inactive'].label]
)
| 41.473684 | 90 | 0.607727 | 6,629 | 0.934715 | 0 | 0 | 82 | 0.011562 | 0 | 0 | 876 | 0.123519 |
50a7e0a80d5cef9b4bb80a0f46b84c78195b8ae4 | 527 | py | Python | FaceLandmarking.LearningProcess/readers/regressor_example_reader.py | TomaszRewak/FaceLandmarking | 74bc5cbcdb61cd9835e5bfc61e978fad256940f4 | [
"MIT"
] | 78 | 2018-02-09T14:30:06.000Z | 2020-10-01T08:43:32.000Z | FaceLandmarking.LearningProcess/readers/regressor_example_reader.py | YoungerGao/Face-Landmarking | 74bc5cbcdb61cd9835e5bfc61e978fad256940f4 | [
"MIT"
] | 1 | 2019-03-20T00:16:47.000Z | 2019-03-24T09:12:25.000Z | FaceLandmarking.LearningProcess/readers/regressor_example_reader.py | YoungerGao/Face-Landmarking | 74bc5cbcdb61cd9835e5bfc61e978fad256940f4 | [
"MIT"
] | 8 | 2019-03-03T11:57:43.000Z | 2021-07-01T09:27:20.000Z | import csv
def read_regressor_examples(num_of_features, num_of_decisions, file_path):
xs = []
ys = []
with open(file_path, mode='r', encoding='utf-8') as file:
reader = csv.reader(file, delimiter=' ')
for row in reader:
x = [float(value) for value in row[0 : num_of_features]]
y = [float(value) for value in row[num_of_features : num_of_features + num_of_decisions]]
xs.append(x)
ys.append(y)
return {
'x': xs,
'y': ys
}
| 23.954545 | 101 | 0.571157 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.036053 |
50a82399bf9b9b372729a8b7cc2d453fc77656d4 | 3,355 | py | Python | tests/test_models.py | jmichalicek/django-mail-viewer | ede04bc6a0809b8e62621f5646396893ad966d71 | [
"MIT"
] | 3 | 2017-07-19T23:35:31.000Z | 2022-02-13T18:49:04.000Z | tests/test_models.py | jmichalicek/django-mail-viewer | ede04bc6a0809b8e62621f5646396893ad966d71 | [
"MIT"
] | 1 | 2019-10-05T14:51:39.000Z | 2019-10-05T14:51:40.000Z | tests/test_models.py | jmichalicek/django-mail-viewer | ede04bc6a0809b8e62621f5646396893ad966d71 | [
"MIT"
] | null | null | null | from pathlib import Path
import shutil
from django.conf import settings
from django.core import cache, mail
from django.test import TestCase
from django_mail_viewer.backends.database.models import EmailMessage
class DatabaseBackendEmailMessageTest(TestCase):
connection_backend = 'django_mail_viewer.backends.database.backend.EmailBackend'
@classmethod
def setUpTestData(cls):
m = mail.EmailMultiAlternatives(
'Email subject', 'Email text', 'test@example.com', ['to1@example.com', 'to2.example.com']
)
m.attach_alternative(
'<html><body><p style="background-color: #AABBFF; color: white">Email html</p></body></html>', 'text/html',
)
current_dir = Path(__file__).resolve().parent
m.attach_file(current_dir / 'test_files' / 'icon.gif', 'image/gif')
with mail.get_connection(cls.connection_backend) as connection:
connection.send_messages([m])
cls.multipart_message = EmailMessage.objects.filter(parent=None).first()
@classmethod
def tearDownClass(cls) -> None:
try:
shutil.rmtree(settings.MEDIA_ROOT)
finally:
super().tearDownClass()
def test_get(self):
test_matrix = [
{'header_name': 'Content-Type', 'value': 'multipart/mixed'},
{'header_name': 'Subject', 'value': 'Email subject'},
]
for t in test_matrix:
with self.subTest(header=t['header_name']):
self.assertEqual(self.multipart_message.get(t['header_name']), t['value'])
# test that looking up by headeris not case sensitive
self.assertEqual(
self.multipart_message.get(t['header_name']), self.multipart_message.get(t['header_name'].lower())
)
def test_is_multipart(self):
self.assertTrue(self.multipart_message.is_multipart())
with mail.get_connection(self.connection_backend) as connection:
mail.EmailMultiAlternatives(
f'Not multipart',
f'Not multipart',
'test@example.com',
['to1@example.com', 'to2.example.com'],
connection=connection,
).send()
m = EmailMessage.objects.filter(parent=None).latest('id')
self.assertFalse(m.is_multipart())
def test_walk(self):
self.assertEqual(
list(EmailMessage.objects.filter(parent=self.multipart_message).order_by('-created_at', 'id')),
list(self.multipart_message.walk()),
)
def test_get_content_type(self):
# The main message followed by each of its parts
expected_content_types = ['multipart/mixed', 'multipart/alternative', 'text/plain', 'text/html', 'image/gif']
self.assertEqual(
expected_content_types,
[m.get_content_type() for m in EmailMessage.objects.all().order_by('created_at', 'id')],
)
def test_get_payload(self):
m = self.multipart_message.parts.exclude(file_attachment='').get()
# May need to seek back to 0 after this
self.assertEqual(m.file_attachment.read(), m.get_payload())
def test_get_filename(self):
m = self.multipart_message.parts.exclude(file_attachment='').get()
self.assertEqual('icon.gif', m.get_filename())
| 37.277778 | 119 | 0.633085 | 3,139 | 0.935618 | 0 | 0 | 843 | 0.251267 | 0 | 0 | 778 | 0.231893 |
50a8805bffcc07db1ad4a801fc9b5ce33c494c35 | 5,977 | py | Python | utils.py | mfouilleul/Haddock | de5f0ad9966707b09ff2a77f10d64290fb26083b | [
"MIT"
] | null | null | null | utils.py | mfouilleul/Haddock | de5f0ad9966707b09ff2a77f10d64290fb26083b | [
"MIT"
] | null | null | null | utils.py | mfouilleul/Haddock | de5f0ad9966707b09ff2a77f10d64290fb26083b | [
"MIT"
] | null | null | null | '''
utils.py
General utility functions: unit conversions, great-circle
distances, CSV queries, platform-independent web browsing.
'''
import csv
import math
import webbrowser
# UNIT CONVERSIONS
MPS_TO_KTS = 1.944
class units:
def mps_to_kts(mps):
return mps*MPS_TO_KTS
def enforceTwoDigits(numStr):
if len(numStr) == 1:
return "0"+numStr
return numStr
def enforceDigitsLeading(numStr, maxDig):
digits = len(numStr)
if digits < maxDig:
for i in range(maxDig-digits):
numStr = "0" + numStr
return numStr
def enforceDigitsTrailing(numStr, maxDig):
digits = len(numStr)
if digits < maxDig:
for i in range(maxDig-digits):
numStr = numStr + "0"
return numStr
class geo:
def nearestSea(lat, lon):
# true if a is inside the range [b, c]
def within(a, b, c):
if b > c:
c,b=b,c
return a >= b and a <= c
def inBbox(e):
lat0,lon0 = float(e['lat0']),float(e['lon0'])
lat1,lon1 = float(e['lat1']),float(e['lon1'])
clat,clon = float(e['clat']),float(e['clon'])
dist = geo.dist_coord(lat, lon, clat, clon)
return (within(lat, lat0, lat1) and within(lon, lon0, lon1),dist)
def saveDist(e, args):
e['dist'] = args[0]
def sortDist(e):
return e['dist']
seas = db.query("./data/worldseas.csv", inBbox, saveDist)
seas.sort(key=sortDist)
if len(seas) > 0:
return seas[0]['name']
return ""
def latlon_to_nmea(lat, lon):
latDeg = lat
latMin = (latDeg - math.floor(latDeg))*60
lonDeg = lon
lonMin = (lonDeg - math.floor(lonDeg))*60
if latDeg > 0:
latDir = "N"
else:
latDir = "S"
if lonDeg > 0:
lonDir = "E"
else:
lonDir = "W"
latMinStr = str(round(latMin,4))
latMinMajorStr = latMinStr[:latMinStr.find(".")]
latMinMinorStr = latMinStr[latMinStr.find(".")+1:]
latMinMajorStr = units.enforceDigitsLeading(latMinMajorStr, 2)
latMinMinorStr = units.enforceDigitsTrailing(latMinMinorStr, 4)
latMinStr = latMinMajorStr + "." + latMinMinorStr
lonMinStr = str(round(lonMin,4))
lonMinMajorStr = lonMinStr[:lonMinStr.find(".")]
lonMinMinorStr = lonMinStr[lonMinStr.find(".")+1:]
lonMinMajorStr = units.enforceDigitsLeading(lonMinMajorStr, 2)
lonMinMinorStr = units.enforceDigitsTrailing(lonMinMinorStr, 4)
lonMinStr = lonMinMajorStr + "." + lonMinMinorStr
return str(int(abs(latDeg)))+latMinStr + "," + latDir + "," + str(int(abs(lonDeg)))+lonMinStr + "," + lonDir
def deg_to_dms(deg, type='lat'):
# source: https://stackoverflow.com/questions/2579535/convert-dd-decimal-degrees-to-dms-degrees-minutes-seconds-in-python
decimals, number = math.modf(deg)
d = int(number)
m = int(decimals * 60)
s = (deg - d - m / 60) * 3600.00
compass = {
'lat': ('N','S'),
'lon': ('E','W')
}
compass_str = compass[type][0 if d >= 0 else 1]
return '{}{}º{}\'{:.2f}"'.format(compass_str, abs(d), abs(m), abs(s))
def latlon_to_str(lat, lon):
return geo.deg_to_dms(lat,'lat'),geo.deg_to_dms(lon,'lon')
# distance between two global points in nautical miles
def dist_coord(lat1,lon1,lat2,lon2):
# source: https://stackoverflow.com/questions/19412462/getting-distance-between-two-points-based-on-latitude-longitude
R = 6373.0 # approximate radius of earth in km
lat1 = math.radians(lat1)
lon1 = math.radians(lon1)
lat2 = math.radians(lat2)
lon2 = math.radians(lon2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = math.sin(dlat / 2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2)**2
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
return 0.539957*R * c
# wraps angle to range [0, 360)
def wrap_angle(b):
deg = b
while deg < 0:
deg = 360+deg
while deg >= 360:
deg = deg-360
return deg
class webviz:
def loadURL(url):
webbrowser.open(url)
def openseamap(lat, lon):
return "https://map.openseamap.org/?zoom=8&lat=" + lat + "&lon=" + lon + "&mlat=" + lat + "&mlon=" + lon + "&layers=BFTFFFTFFTF0FFFFFFFFFF"
def pirosail(boatid):
return "http://piro.biz/tracker/?2d&marineid=" + boatid
def earthwindmap(lat, lon):
return "https://earth.nullschool.net/#current/wind/surface/level/orthographic=" + lon + "," + lat + ",3000/loc=" + lon + "," + lat
class db:
# execute a function on each element of a CSV
def execute(csvFile, executeFunc):
with open(csvFile, newline='') as csvfile:
elements = csv.DictReader(csvfile)
for element in elements:
executeFunc(element)
# return results filtered by a query function, and optionally post-process results
def query(csvFile, queryFunc, processFunc=None):
results = []
with open(csvFile, newline='') as csvfile:
elements = csv.DictReader(csvfile)
for element in elements:
res = queryFunc(element)
if res[0]:
if processFunc != None:
processFunc(element, res[1:])
results.append(element)
return results
# return first element matching query function
def findFirst(csvFile, queryFunc):
with open(csvFile, newline='') as csvfile:
elements = csv.DictReader(csvfile)
for element in elements:
if queryFunc(element):
return element
return None | 34.549133 | 147 | 0.565501 | 5,743 | 0.960689 | 0 | 0 | 0 | 0 | 0 | 0 | 1,149 | 0.192205 |
50a8f8b8a04737c09a518f900e9a3dbfb59b7774 | 1,862 | py | Python | frontend/front.py | streampizza/chirrup | 39444cdaa1af7fdcac9d8dcd34df644d4feae496 | [
"Apache-2.0"
] | null | null | null | frontend/front.py | streampizza/chirrup | 39444cdaa1af7fdcac9d8dcd34df644d4feae496 | [
"Apache-2.0"
] | null | null | null | frontend/front.py | streampizza/chirrup | 39444cdaa1af7fdcac9d8dcd34df644d4feae496 | [
"Apache-2.0"
] | null | null | null | from flask import Flask
from flask import request, render_template, redirect
from datetime import datetime
from pymongo import MongoClient
import html
import random
import json
import ast
from flask.ext.pymongo import PyMongo
from flask import make_response, request, current_app
from functools import update_wrapper
app = Flask(__name__)
mongo = PyMongo(app)
app.config['MONGO_HOST'] = 'localhost'
app.config['MONGO_PORT'] = 27017
app.config['MONGO_DBNAME'] = 'chirrup'
mClient = MongoClient('localhost',27017)
collection = mClient['chirrup']['tweets']
@app.route('/', methods=['GET','POST'])
def home():
if request.method=='POST':
var = request.form['query']
return redirect('/'+var, code=302)
else:
distincthashtags = collection.distinct("hashtags")
return render_template("home.html",distincthashtags=distincthashtags)
@app.route('/<input>', methods=['GET','POST'])
def analyze(input):
hashtag = input
country_sentiment_query = list(collection.aggregate([{"$match":{"hashtags":hashtag}},{"$group":{'_id':'$country',"avgsentiment": {"$avg":"$sentiment"}}}]))
average_sentiment_query = list(collection.aggregate([{"$match":{"hashtags":hashtag}},{"$group":{'_id':'sentiment',"avgsentiment": {"$avg":"$sentiment"}}}]))
if len(average_sentiment_query)==0:
return render_template('fourohfour.html')
country_wise_sentiment = json.dumps(country_sentiment_query)
average_sentiment = json.dumps(average_sentiment_query[0])
sorter = [('timestamp', 1)]
last_ten_tweets = list(collection.find({"hashtags":hashtag},{'timestamp':0, '_id': 0}).sort(sorter))[:10]
return render_template("analysis.html",country_wise_sentiment=country_wise_sentiment, average_sentiment=average_sentiment, hashtag=hashtag, last_ten_tweets=last_ten_tweets)
if __name__=="__main__":
app.run(debug=True)
| 40.478261 | 176 | 0.727175 | 0 | 0 | 0 | 0 | 1,252 | 0.672395 | 0 | 0 | 384 | 0.20623 |
50abbdac4aab572c750bb3e9b9b2333c098a8e14 | 1,578 | py | Python | resticweb/engine_configure.py | XXL6/resticweb | 48f8c86438d6c67b9f20f062d8c9e04ea6732767 | [
"MIT"
] | 1 | 2019-07-29T13:54:58.000Z | 2019-07-29T13:54:58.000Z | resticweb/engine_configure.py | XXL6/resticweb | 48f8c86438d6c67b9f20f062d8c9e04ea6732767 | [
"MIT"
] | 1 | 2020-04-30T09:57:31.000Z | 2020-04-30T09:57:31.000Z | resticweb/engine_configure.py | XXL6/resticweb | 48f8c86438d6c67b9f20f062d8c9e04ea6732767 | [
"MIT"
] | 1 | 2019-08-02T22:51:28.000Z | 2019-08-02T22:51:28.000Z | from resticweb.dictionary.resticweb_variables import Config
import resticweb.engine as local_engine
from resticweb.dictionary.resticweb_exceptions import NoEngineAvailable
import subprocess
import os.path as path
def configure_engine():
return_value = False
command = [Config.ENGINE_COMMAND, 'version']
try:
finished_process = subprocess.run(
command,
shell=False,
capture_output=True)
if finished_process:
line = finished_process.stdout.decode('utf-8')
errors = finished_process.stderr.decode('utf-8')
print(errors)
if len(line) > 0:
if "compiled with go" in line:
return_value = True
if return_value:
return return_value
except FileNotFoundError:
pass
location, throwaway = path.split(local_engine.__file__)
Config.ENGINE_COMMAND = f'{location}{path.sep}restic'
command = [Config.ENGINE_COMMAND, 'version']
try:
finished_process = subprocess.run(
command,
shell=False,
capture_output=True)
line = finished_process.stdout.decode('utf-8')
if len(line) > 0:
if "compiled with go" in line:
return_value = True
except FileNotFoundError:
raise NoEngineAvailable("Unable to find a backup engine.")
if return_value:
return return_value
else:
raise NoEngineAvailable("Unable to find a backup engine.") | 35.863636 | 71 | 0.609632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 170 | 0.107731 |
50acb7ed0b3df74f0efa284a48e9366ef80c9de6 | 2,104 | py | Python | saas/aiops/api/anomalydetection/main/anomaly_detection.py | iuskye/SREWorks | a2a7446767d97ec5f6d15bd00189c42150d6c894 | [
"Apache-2.0"
] | 407 | 2022-03-16T08:09:38.000Z | 2022-03-31T12:27:10.000Z | saas/aiops/api/anomalydetection/main/anomaly_detection.py | Kwafoor/SREWorks | 37a64a0a84b29c65cf6b77424bd2acd0c7b42e2b | [
"Apache-2.0"
] | 25 | 2022-03-22T04:27:31.000Z | 2022-03-30T08:47:28.000Z | saas/aiops/api/anomalydetection/main/anomaly_detection.py | Kwafoor/SREWorks | 37a64a0a84b29c65cf6b77424bd2acd0c7b42e2b | [
"Apache-2.0"
] | 109 | 2022-03-21T17:30:44.000Z | 2022-03-31T09:36:28.000Z | import pandas as pd
import json
import time
from bentoml import env, artifacts, api, BentoService
from bentoml.adapters import DataframeInput, JsonInput, StringInput
from bentoml.frameworks.sklearn import SklearnModelArtifact
@env(infer_pip_packages=True)
@artifacts([SklearnModelArtifact('model')])
class AnomalyDetection(BentoService):
"""
A minimum prediction service exposing a Scikit-learn model
"""
@api(input=JsonInput())
def analyse(self, param: json):
"""
An inference API named `analyse` with Dataframe input adapter, which codifies
how HTTP requests or CSV files are converted to a pandas Dataframe object as the
inference API function iwnput
"""
dic = {}
if param['taskType']=='async':
time.sleep(30)
try:
if len(param['seriesList'])<2:
raise Exception()
else:
series = []
series.append([1635216096000, 23.541])
dic['predictSeriesList'] = series
except Exception as ex:
dic['code'] = 'detectorError'
dic['message'] = 'some error in detector internal!'
return dic
@api(input=DataframeInput(), batch=True)
def predict(self, df: pd.DataFrame):
"""
An inference API named `predict` with Dataframe input adapter, which codifies
how HTTP requests or CSV files are converted to a pandas Dataframe object as the
inference API function input
"""
return self.artifacts.model.predict(df)
@api(input=JsonInput())
def analyze(self, param: json):
"""
An inference API named `predict` with Dataframe input adapter, which codifies
how HTTP requests or CSV files are converted to a pandas Dataframe object as the
inference API function input
"""
return "good"
@api(input=StringInput())
def doc(self, message: str):
"""
get README.md
"""
f = open("README.md")
doc = f.read()
f.close()
return doc
| 28.821918 | 88 | 0.613593 | 1,798 | 0.854563 | 0 | 0 | 1,872 | 0.889734 | 0 | 0 | 929 | 0.44154 |
50acfb5ef2c2292d0cbbb5ceac5eacf6da2c2045 | 139 | py | Python | test cases/windows/10 vs module defs generated custom target/subdir/make_def.py | kira78/meson | 0ae840656c5b87f30872072aa8694667c63c96d2 | [
"Apache-2.0"
] | 4,047 | 2015-06-18T10:36:48.000Z | 2022-03-31T09:47:02.000Z | test cases/windows/10 vs module defs generated custom target/subdir/make_def.py | kira78/meson | 0ae840656c5b87f30872072aa8694667c63c96d2 | [
"Apache-2.0"
] | 8,206 | 2015-06-14T12:20:48.000Z | 2022-03-31T22:50:37.000Z | test cases/windows/10 vs module defs generated custom target/subdir/make_def.py | kira78/meson | 0ae840656c5b87f30872072aa8694667c63c96d2 | [
"Apache-2.0"
] | 1,489 | 2015-06-27T04:06:38.000Z | 2022-03-29T10:14:48.000Z | #!/usr/bin/env python3
import sys
with open(sys.argv[1], 'w') as f:
print('EXPORTS', file=f)
print(' somedllfunc', file=f)
| 19.857143 | 40 | 0.597122 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 55 | 0.395683 |
50ad9dd5356464000203ac7fc55703309310959c | 730 | py | Python | class-notes/fukushu/F-0728_graph.py | rhoenkelevra/python_simple_applications | 28ceb5f9fe7ecf11d606d49463385e92927e8f98 | [
"MIT"
] | null | null | null | class-notes/fukushu/F-0728_graph.py | rhoenkelevra/python_simple_applications | 28ceb5f9fe7ecf11d606d49463385e92927e8f98 | [
"MIT"
] | null | null | null | class-notes/fukushu/F-0728_graph.py | rhoenkelevra/python_simple_applications | 28ceb5f9fe7ecf11d606d49463385e92927e8f98 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 28 13:31:06 2021
@author: user24
"""
'''
Suchi wo nyuryoku
only accepts integer
end shuryou
creates a graph as image
'''
import matplotlib.pyplot as plt
cnt = 0
Y = []
while True:
ans = input("数値を入力してください \n-->")
if ans == "end":
break
try:
ans_int = int(ans)
Y.append(ans_int)
cnt += 1
except:
print("文字列を読めない!数値を入れてください。")
except Exception as error:
print(error)
X = range(0, cnt)
plt.plot(X, Y, marker="o", color="r", linestyle="--")
plt.savefig("test.png")
# plt.xlabel("入力順番") # Japanese char return erro
plt.show()
# ==========================================================================
'''
''' | 16.590909 | 76 | 0.521918 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 434 | 0.5425 |
50ae8802ab2f67d8085eed2af76c35268d1f3319 | 3,167 | py | Python | flan/exports/awssqs.py | bretlowery/flan | b79319044fcdb2230ac090232e9056719cb09f17 | [
"MIT"
] | 3 | 2019-08-03T13:27:31.000Z | 2021-06-08T16:25:31.000Z | flan/exports/awssqs.py | bretlowery/flan | b79319044fcdb2230ac090232e9056719cb09f17 | [
"MIT"
] | 2 | 2020-09-24T10:44:55.000Z | 2021-06-25T15:31:24.000Z | flan/exports/awssqs.py | bretlowery/flan | b79319044fcdb2230ac090232e9056719cb09f17 | [
"MIT"
] | null | null | null | from flanexport import FlanExport, timeout_after
import os
import ast
try:
from boto.sqs import connection
from boto.sqs.message import Message
except:
pass
class AWSSQS(FlanExport):
def __init__(self, meta, config):
name = self.__class__.__name__
super().__init__(name, meta, config)
@timeout_after(10)
def prepare(self):
aws_access_key_id = self._getsetting('aws_access_key_id', checkenv=True)
aws_secret_access_key = self._getsetting('aws_secret_access_key', checkenv=True)
is_secure = self._getsetting('is_secure', erroronnone=False, defaultvalue=True)
port = self._getsetting('port', erroronnone=False)
proxy = self._getsetting('proxy', erroronnone=False)
proxy_port = self._getsetting('proxy_port', erroronnone=False)
proxy_user = self._getsetting('proxy_user', erroronnone=False)
proxy_pass = self._getsetting('proxy_pass', erroronnone=False)
region = self._getsetting('region', erroronnone=False)
path = self._getsetting('region', defaultvalue="/")
security_token = self._getsetting('security_token', erroronnone=False)
validate_certs = self._getsetting('region', defaultvalue=True)
profile_name = self._getsetting('profile_name', erroronnone=False)
queue_name = self._getsetting('queue_name', erroronnone=True, defaultvalue="flan")
sqs_message_attributes = self._getsetting('sqs_message_attributes', erroronnone=False)
if sqs_message_attributes:
self.sqs_message_attributes = ast.literal_eval(sqs_message_attributes)
else:
self.sqs_message_attributes = {}
try:
self.conn = connection.SQSConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
is_secure=is_secure,
port=port,
proxy=proxy,
proxy_port=proxy_port,
proxy_user=proxy_user,
proxy_pass=proxy_pass,
region=region,
path=path,
security_token=security_token,
validate_certs=validate_certs,
profile_name=profile_name
)
self.sender = self.conn.create_queue(queue_name, self._getsetting('timeout'))
except Exception as e:
self.logerr('Flan->%s connection to %s:%s failed: %s' %
(self.name, self.config["host"], self.config["port"], str(e)))
os._exit(1)
@timeout_after(10)
def send(self, data):
try:
m = Message()
m.message_attributes = self.sqs_message_attributes
m.set_body(data)
self.sender.write(m)
except Exception as e:
self.logerr('Flan->%s delivery failed: %s' % (self.name, str(e)))
pass
return
@property
def closed(self):
return False
@timeout_after(10)
def close(self):
try:
self.conn.close()
except:
pass
return | 38.156627 | 94 | 0.610988 | 2,995 | 0.94569 | 0 | 0 | 2,823 | 0.89138 | 0 | 0 | 293 | 0.092517 |
50af227f5cf4f7092ecffc1ecb9dc0ca94929c50 | 905 | py | Python | setup.py | jongwon-jay-lee/ko_lm_dataformat | 6ab51d9a021c550bab92534caf130f9cd7e26972 | [
"MIT"
] | 22 | 2021-06-30T03:13:21.000Z | 2022-01-12T05:16:59.000Z | setup.py | jongwon-jay-lee/ko_lm_dataformat | 6ab51d9a021c550bab92534caf130f9cd7e26972 | [
"MIT"
] | 10 | 2021-07-08T09:14:29.000Z | 2021-11-07T18:54:50.000Z | setup.py | jongwon-jay-lee/ko_lm_dataformat | 6ab51d9a021c550bab92534caf130f9cd7e26972 | [
"MIT"
] | 1 | 2021-07-08T01:44:51.000Z | 2021-07-08T01:44:51.000Z | import os
import sys
from setuptools import setup, find_packages
if sys.version_info < (3, 6):
sys.exit("Sorry, Python >= 3.6 is required for ko_lm_dataformat")
with open("requirements.txt") as f:
require_packages = [line.strip() for line in f]
with open(os.path.join("ko_lm_dataformat", "version.txt")) as f:
version = f.read().strip()
setup(
name="ko_lm_dataformat",
version=version,
author="Jangwon Park",
author_email="adieujw@gmail.com",
description="A utility for storing and reading files for Korean LM training.",
long_description=open("./README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
url="https://github.com/monologg/ko_lm_dataformat",
packages=find_packages(exclude=["tests"]),
python_requires=">=3.6",
zip_safe=False,
include_package_data=True,
install_requires=require_packages,
)
| 31.206897 | 82 | 0.707182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 318 | 0.351381 |
50b00721b02820e3b4b32a57d5d51dcc9d8741c6 | 5,852 | py | Python | tools/prepare_iata_airline_dump_file.py | mtrampont/opentraveldata | 94d10fc313f75058079d9a3e1a766c704fbe9488 | [
"CC-BY-4.0"
] | 208 | 2015-03-24T13:42:20.000Z | 2022-01-16T23:48:02.000Z | tools/prepare_iata_airline_dump_file.py | mtrampont/opentraveldata | 94d10fc313f75058079d9a3e1a766c704fbe9488 | [
"CC-BY-4.0"
] | 155 | 2015-02-11T13:53:43.000Z | 2022-03-12T08:25:13.000Z | tools/prepare_iata_airline_dump_file.py | mtrampont/opentraveldata | 94d10fc313f75058079d9a3e1a766c704fbe9488 | [
"CC-BY-4.0"
] | 99 | 2015-01-27T14:32:19.000Z | 2022-02-05T15:13:13.000Z | #!/usr/bin/env python
import getopt, sys, io
import pandas as pd
#
# Usage
#
def usage (script_name):
"""
Display the usage.
"""
print ("")
print ("Usage: {} [options]".format(script_name))
print ("")
print ("That script transforms and filter a fix width data file into a hat symbol separated CSV one")
print ("")
print ("Options:")
print (" -h, --help : outputs this help and exits")
print (" -v, --verbose : verbose output (debugging)")
print (" -i, --input <input data file-path>")
print (" -o, --output <output data file-path>")
print ("")
#
# Command-line arguments
#
def handle_opt():
"""
Handle the command-line options
"""
try:
opts, args = getopt.getopt (sys.argv[1:], "hv:i:o:",
["help", "verbose", "input", "output"])
except (getopt.GetoptError, err):
# Print help information and exit. It will print something like
# "option -d not recognized"
print (str (err))
usage (sys.argv[0], usage_doc)
sys.exit(2)
# Options
verboseFlag = False
airline_input_filepath = ''
airline_output_filepath = ''
airline_input_file = sys.stdin # '/dev/stdin'
airline_output_file = sys.stdout # '/dev/stdout'
# Input stream/file
if len (args) != 0:
airline_input_filepath = args[0]
# Handling
for o, a in opts:
if o in ("-h", "--help"):
usage (sys.argv[0])
sys.exit()
elif o in ("-v", "--verbose"):
verboseFlag = True
elif o in ("-i", "--input"):
airline_input_filepath = a
elif o in ("-o", "--output"):
airline_output_filepath = a
else:
raise ValueError ("That option ({}) is unknown. Rerun that script with the -h option to see the accepted options".format(o))
# Input file. That file may be compressed with GNU Zip (gzip)
if (airline_input_filepath != ''):
airline_input_file = open (airline_input_filepath, 'rb')
# Output file-path
if (airline_output_filepath != ''):
airline_output_file = open (airline_output_filepath, 'w')
# Report the configuration
airline_input_filepath_str = airline_input_filepath \
if airline_input_filepath != '' \
else 'Standard input'
airline_output_filepath_str = airline_output_filepath \
if airline_output_filepath != '' \
else 'Standard output'
if (airline_output_filepath_str != 'Standard output'):
print ("Input data file: '{}'".format(airline_input_filepath_str))
print ("Output data file: '{}'".format(airline_output_filepath_str))
#
return (verboseFlag, airline_input_filepath, airline_output_file)
def extract_df (airline_input_filepath):
"""
Parse a fix width data file containing details
about IATA referenced airlines, and fill in a Pandas data-frame
"""
# Using Pandas with column specification
col_names = ['name', 'num_code', '3char_code', '2char_code',
'address_street_1', 'address_street_2', 'address_city_name',
'address_state_name', 'address_country_name',
'address_postal_code',
'flag_1', 'flag_2', 'flag_3', 'flag_4', 'type',
'num_code_2']
col_specs = [(0, 80), (80, 84), (84, 87), (87, 90),
(90, 130), (130, 170), (170, 195),
(195, 215), (215, 259),
(259, 373),
(373, 374), (374, 375), (375, 376), (376, 377), (377, 379),
(379, 385)]
col_converters = {
'num_code': lambda x: str(int(x)),
'num_code_2': lambda x: str(int(x))}
airline_df = pd.read_fwf(airline_input_filepath,
colspecs = col_specs, header = None,
names = col_names, converters = col_converters)
# Leave empty fields empty (otherwise, Pandas specifies NaN)
airline_df.fillna (value = '', method = None, inplace = True)
# Merge num_code and num_code2
airline_df['num_code'] = airline_df \
.apply(lambda r: r['num_code'] if r['num_code'] != '' else r['num_code_2'],
axis = 1)
# DEBUG
#print (str(airline_df.head()))
#print (str(airline_df.dtypes))
#
return (airline_df)
def dump_to_csv (airline_df, airline_output_file):
"""
Dump a sub-set of the the Pandas data-frame into a CSV file.
The field delimiter is the hat symbol ('^').
"""
subcol_names = ['2char_code', '3char_code', 'num_code', 'name', 'type']
# DEBUG
#airline_spec_df = airline_df[airline_df['2char_code'] == 'LH'][subcol_names]
#print (str(airline_spec_df))
# Sort by IATA and ICAO codes
airline_df.sort_values(['2char_code', '3char_code', 'num_code', 'name'],
ascending = True, inplace = True)
# Dump the data-frame into a CSV file
airline_df.to_csv (airline_output_file, sep = '^', columns = subcol_names,
header = True, index = False, doublequote = False,
quotechar = '|')
#
# Main
#
def main():
"""
Main
"""
# Parse command options
(verboseFlag, airline_input_filepath, airline_output_file) = handle_opt()
# DEBUG
#print ("Type of file: '{}'".format(type(airline_input_filepath)))
# Parse the fixed width data file of airline details
airline_df = extract_df (airline_input_filepath)
# Dump the Pandas data-frame into a CSV file
dump_to_csv (airline_df, airline_output_file)
#
# Main, when launched from a library
#
if __name__ == "__main__":
main()
| 32.876404 | 136 | 0.576384 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,304 | 0.393712 |
50b0d3191e31237febc87109e80b45a79739c498 | 8,117 | py | Python | sanalberto/views/polls.py | xJavii8/dafi-system | 6f4a3f77c40424a0c1d7c80236f0bf52be4304d2 | [
"MIT"
] | 7 | 2019-08-03T12:25:18.000Z | 2021-11-02T12:51:33.000Z | sanalberto/views/polls.py | xJavii8/dafi-system | 6f4a3f77c40424a0c1d7c80236f0bf52be4304d2 | [
"MIT"
] | 11 | 2019-08-20T17:07:37.000Z | 2021-11-23T14:26:07.000Z | sanalberto/views/polls.py | xJavii8/dafi-system | 6f4a3f77c40424a0c1d7c80236f0bf52be4304d2 | [
"MIT"
] | 4 | 2020-04-06T11:33:02.000Z | 2021-10-31T09:10:53.000Z | from collections import Counter
from typing import (
Any,
cast,
)
from django.contrib import messages
from django.contrib.auth.mixins import (
LoginRequiredMixin,
UserPassesTestMixin,
)
from django.db import transaction
from django.db.models import (
Count,
Q,
)
from django.forms.models import ModelForm
from django.http import (
HttpRequest,
HttpResponse,
)
from django.http.response import (
HttpResponseBase,
HttpResponseRedirectBase,
)
from django.shortcuts import redirect
from django.utils import timezone
from django.views.generic import (
CreateView,
DetailView,
UpdateView,
)
from meta.views import MetadataMixin
from sanalberto.forms import PollVoteForm
from users.models import User
from ..models import (
Poll,
PollDesign,
PollVote,
)
from .common import EventMixin
class PollMixin(EventMixin):
"""Poll mixin.
"""
poll: Poll
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['poll'] = self.poll
return context
def get_subtitle(self, context: dict[str, Any]) -> str:
return self.poll.title
def check_poll(self, poll: Poll) -> bool:
# Default method
return True
def check_poll_redirect(self, poll: Poll) -> HttpResponseRedirectBase:
# Default method
return redirect('sanalberto:index')
def dispatch(self, request, *args, **kwargs):
try:
slug = kwargs.get('slug')
self.poll = Poll.objects.filter(slug=slug).get()
assert self.check_poll(self.poll) is True
except (Poll.DoesNotExist, AssertionError):
return self.check_poll_redirect(self.poll)
return super().dispatch(request, *args, **kwargs)
class PollDetailView(EventMixin, MetadataMixin, DetailView):
"""Poll detail view.
"""
model = Poll
def get_queryset(self):
return super().get_queryset().prefetch_related('designs', 'winner__user')
def get_subtitle(self, context: dict[str, Any]) -> str:
return cast(Poll, context['object']).title
def get_context_data(self, **kwargs):
poll = self.get_object()
user = self.request.user
query = Q(is_approved=True)
if user.is_authenticated:
query |= Q(user=user)
designs = poll.designs.filter(query)
my_designs: list[PollDesign] = []
approved_designs: list[PollDesign] = []
for design in designs:
if user.is_authenticated and design.user == user:
my_designs.append(design)
if design.is_approved:
approved_designs.append(design)
my_vote: 'PollVote | None' = None
if user.is_authenticated and poll.voting_enabled:
my_vote = (
poll
.votes
.filter(user=user)
.prefetch_related('first', 'second', 'third')
.first()
)
context = super().get_context_data(**kwargs)
context['now'] = timezone.now()
context['approved_designs'] = approved_designs
context['my_designs'] = my_designs
context['my_vote'] = my_vote
return context
class DesignCreateView(PollMixin, MetadataMixin, LoginRequiredMixin, CreateView):
"""Design create view.
"""
model = PollDesign
fields = ['title', 'image', 'source_file', 'vector_file']
def check_poll(self, poll: Poll) -> bool:
return poll.register_enabled
def check_poll_redirect(self, poll: Poll) -> HttpResponseRedirectBase:
return redirect('sanalberto:poll_detail', slug=poll.slug)
def get_subtitle(self, context: dict[str, Any]) -> str:
title = cast(Poll, context['object']).title
return f'Presentar diseño para {title}'
def form_valid(self, form: 'ModelForm[PollDesign]') -> HttpResponse:
obj = form.save(commit=False)
obj.poll = self.poll
obj.user = cast(User, self.request.user)
obj.save()
return redirect('sanalberto:poll_detail', slug=self.poll.slug)
class PollVoteCreateView(
PollMixin,
MetadataMixin,
LoginRequiredMixin,
UpdateView):
"""Poll vote create view.
"""
model = PollVote
form_class = PollVoteForm
def get_object(self, *args) -> 'PollVote | None':
return PollVote.objects.filter(user=self.request.user).first()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['designs'] = self.poll.designs.all()
return context
def get_subtitle(self, context: dict[str, Any]) -> str:
title = cast(Poll, context['object']).title
return f'Votar diseño para {title}'
def get_initial(self) -> dict[str, Any]:
initial = super().get_initial()
try:
initial['first'] = int(self.request.GET['selected'])
except (ValueError, KeyError):
pass
return initial
def get_form_kwargs(self) -> dict[str, Any]:
kwargs = super().get_form_kwargs()
kwargs['designs'] = self.poll.designs.all()
return kwargs
def check_poll(self, poll: Poll) -> bool:
return poll.voting_enabled
def check_poll_redirect(self, poll: Poll) -> HttpResponseRedirectBase:
return redirect('sanalberto:poll_detail', slug=poll.slug)
def form_valid(self, form: 'ModelForm[PollVote]') -> HttpResponse:
obj = form.save(commit=False)
with transaction.atomic():
existing = (
PollVote
.objects
.filter(user=self.request.user, poll=self.poll)
.select_for_update()
.first()
)
if existing is None:
obj.poll = self.poll
obj.user = cast(User, self.request.user)
obj.save()
else:
existing.first = obj.first
existing.second = obj.second
existing.third = obj.third
existing.save()
return redirect('sanalberto:poll_detail', slug=obj.poll.slug)
def dispatch(self, request: HttpRequest, *args: str, **kwargs: Any) -> HttpResponseBase:
if isinstance(request.user, User) and not request.user.is_verified:
messages.error(
request,
'Debes verificar tu e-mail para poder votar',
extra_tags='show_profile_btn'
)
return redirect('sanalberto:poll_detail', kwargs['slug'])
return super().dispatch(request, *args, **kwargs)
class PollAdminView(
PollMixin,
MetadataMixin,
UserPassesTestMixin,
DetailView):
model = Poll
template_name_suffix = '_admin'
def test_func(self) -> 'bool | None':
user = self.request.user
return isinstance(user, User) and user.has_perms((
'sanalberto.view_poll',
'sanalberto.view_pollvote',
))
def get_subtitle(self, context: dict[str, Any]) -> str:
title = cast(Poll, context['object']).title
return f'Administrar encuesta para {title}'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
poll: Poll = context['object']
points: Counter[int] = Counter()
votes = 0
if poll.voting_start < timezone.now():
votes = poll.votes.count()
for field, multiplier in (('first', 3), ('second', 2), ('third', 1)):
all_votes = poll.votes.values(field).annotate(count=Count(field))
for item in all_votes:
points[item[field]] += item['count'] * multiplier
designs = [
(obj, points.get(obj.id, 0)) for obj in poll.designs.all()
]
designs.sort(key=lambda obj: obj[1], reverse=True)
context['designs'] = designs
context['votes'] = votes
if designs[0][1] > 0:
context['winner'] = designs[0][0]
return context
| 28.28223 | 92 | 0.604164 | 7,257 | 0.893829 | 0 | 0 | 0 | 0 | 0 | 0 | 865 | 0.10654 |
50b1a3151683891fe29f85b69660417956b904a6 | 4,473 | py | Python | face_morpher/facemorpher/warper.py | ivan-uskov/faces | 59a27c305888e8e000cb1549f8b06216449b1f05 | [
"MIT"
] | 1 | 2021-09-16T00:02:48.000Z | 2021-09-16T00:02:48.000Z | facemorpher/warper.py | ImpactCrater/AutoFaceMorpher | 9955a02d9cec309ca0db8c2454f9466f7e1633c5 | [
"MIT"
] | null | null | null | facemorpher/warper.py | ImpactCrater/AutoFaceMorpher | 9955a02d9cec309ca0db8c2454f9466f7e1633c5 | [
"MIT"
] | null | null | null | import numpy as np
import scipy.spatial as spatial
def bilinear_interpolate(img, coords):
""" Interpolates over every image channel
http://en.wikipedia.org/wiki/Bilinear_interpolation
:param img: max 3 channel image
:param coords: 2 x _m_ array. 1st row = xcoords, 2nd row = ycoords
:returns: array of interpolated pixels with same shape as coords
"""
int_coords = np.int32(coords)
x0, y0 = int_coords
dx, dy = coords - int_coords
# 4 Neighour pixels
q11 = img[y0, x0]
q21 = img[y0, x0+1]
q12 = img[y0+1, x0]
q22 = img[y0+1, x0+1]
btm = q21.T * dx + q11.T * (1 - dx)
top = q22.T * dx + q12.T * (1 - dx)
inter_pixel = top * dy + btm * (1 - dy)
return inter_pixel.T
def grid_coordinates(points):
""" x,y grid coordinates within the ROI of supplied points
:param points: points to generate grid coordinates
:returns: array of (x, y) coordinates
"""
xmin = np.min(points[:, 0])
xmax = np.max(points[:, 0]) + 1
ymin = np.min(points[:, 1])
ymax = np.max(points[:, 1]) + 1
return np.asarray([(x, y) for y in range(ymin, ymax)
for x in range(xmin, xmax)], np.uint32)
def process_warp(src_img, result_img, tri_affines, dst_points, delaunay):
"""
Warp each triangle from the src_image only within the
ROI of the destination image (points in dst_points).
"""
roi_coords = grid_coordinates(dst_points)
# indices to vertices. -1 if pixel is not in any triangle
roi_tri_indices = delaunay.find_simplex(roi_coords)
for simplex_index in range(len(delaunay.simplices)):
coords = roi_coords[roi_tri_indices == simplex_index]
num_coords = len(coords)
out_coords = np.dot(tri_affines[simplex_index],
np.vstack((coords.T, np.ones(num_coords))))
x, y = coords.T
result_img[y, x] = bilinear_interpolate(src_img, out_coords)
return None
def triangular_affine_matrices(vertices, src_points, dest_points):
"""
Calculate the affine transformation matrix for each
triangle (x,y) vertex from dest_points to src_points
:param vertices: array of triplet indices to corners of triangle
:param src_points: array of [x, y] points to landmarks for source image
:param dest_points: array of [x, y] points to landmarks for destination image
:returns: 2 x 3 affine matrix transformation for a triangle
"""
ones = [1, 1, 1]
for tri_indices in vertices:
src_tri = np.vstack((src_points[tri_indices, :].T, ones))
dst_tri = np.vstack((dest_points[tri_indices, :].T, ones))
mat = np.dot(src_tri, np.linalg.inv(dst_tri))[:2, :]
yield mat
def warp_image(src_img, src_points, dest_points, dest_shape, dtype=np.uint8):
# Resultant image will not have an alpha channel
num_chans = 3
src_img = src_img[:, :, :3]
rows, cols = dest_shape[:2]
result_img = np.zeros((rows, cols, num_chans), dtype)
delaunay = spatial.Delaunay(dest_points)
tri_affines = np.asarray(list(triangular_affine_matrices(
delaunay.simplices, src_points, dest_points)))
process_warp(src_img, result_img, tri_affines, dest_points, delaunay)
return result_img
def test_local():
from functools import partial
import cv2
import scipy.misc
import locator
import aligner
from matplotlib import pyplot as plt
# Load source image
face_points_func = partial(locator.face_points, '../data')
base_path = '../females/Screenshot 2015-03-04 17.11.12.png'
src_path = '../females/BlDmB5QCYAAY8iw.jpg'
src_img = cv2.imread(src_path)
# Define control points for warps
src_points = face_points_func(src_path)
base_img = cv2.imread(base_path)
base_points = face_points_func(base_path)
size = (600, 500)
src_img, src_points = aligner.resize_align(src_img, src_points, size)
base_img, base_points = aligner.resize_align(base_img, base_points, size)
result_points = locator.weighted_average_points(src_points, base_points, 0.2)
# Perform transform
dst_img1 = warp_image(src_img, src_points, result_points, size)
dst_img2 = warp_image(base_img, base_points, result_points, size)
import blender
ave = blender.weighted_average(dst_img1, dst_img2, 0.6)
mask = blender.mask_from_points(size, result_points)
blended_img = blender.poisson_blend(dst_img1, dst_img2, mask)
plt.subplot(2, 2, 1)
plt.imshow(ave)
plt.subplot(2, 2, 2)
plt.imshow(dst_img1)
plt.subplot(2, 2, 3)
plt.imshow(dst_img2)
plt.subplot(2, 2, 4)
plt.imshow(blended_img)
plt.show()
if __name__ == "__main__":
test_local()
| 31.95 | 79 | 0.709144 | 0 | 0 | 717 | 0.160295 | 0 | 0 | 0 | 0 | 1,245 | 0.278337 |
50b1e8b1216b573ce1073b4417985c18776e49bb | 346 | py | Python | paddlehub/serving/gunicorn.py | 18621579069/PaddleHub-yu | 15e8bcef2addf239081e235bdcfd039de12330e0 | [
"Apache-2.0"
] | 4 | 2021-02-25T03:27:38.000Z | 2021-05-15T03:20:23.000Z | paddlehub/serving/gunicorn.py | Melissa9596/PaddleHub | 0b002a0add1c982d52f7591a5e773bf09b3e6361 | [
"Apache-2.0"
] | null | null | null | paddlehub/serving/gunicorn.py | Melissa9596/PaddleHub | 0b002a0add1c982d52f7591a5e773bf09b3e6361 | [
"Apache-2.0"
] | 2 | 2021-03-01T07:04:01.000Z | 2021-05-14T05:54:18.000Z | #!/usr/bin/env python
# coding=utf-8
# coding: utf8
"""
configuration for gunicorn
"""
import multiprocessing
bind = '0.0.0.0:8888'
backlog = 2048
workers = multiprocessing.cpu_count() * 2 + 1
threads = 1
worker_class = 'sync'
worker_connections = 1000
timeout = 500
keepalive = 40
daemon = False
loglevel = 'info'
errorlog = '-'
accesslog = '-'
| 17.3 | 45 | 0.702312 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 0.33237 |
50b2c8b6eb4eb1e401d69a4ec9a24c73d1dd8ed4 | 220 | py | Python | Exercices/Secao04/exercicio46.py | Guilt-tech/PythonExercices | e59bffae997a1974d3e3cdcfff7700afbed65e6e | [
"MIT"
] | null | null | null | Exercices/Secao04/exercicio46.py | Guilt-tech/PythonExercices | e59bffae997a1974d3e3cdcfff7700afbed65e6e | [
"MIT"
] | null | null | null | Exercices/Secao04/exercicio46.py | Guilt-tech/PythonExercices | e59bffae997a1974d3e3cdcfff7700afbed65e6e | [
"MIT"
] | null | null | null | print('Digite um número inteiro positivo de três dígitos (100 a 999), para gerar o número invertido')
num = int(input('Número: '))
num = str(num)
reverso = num[::-1]
print(f'O número ao contrário de: {num} é: {reverso}') | 44 | 101 | 0.695455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 159 | 0.697368 |
50b40243d48dd7096f88e7ab86cfe6c9d963a1b7 | 1,277 | py | Python | webapp/assessdb/scripts/import_csv_instruments.py | sspickle/assessdb | 89f99424ef7ab1405d7fa37a74764dd12cef1abf | [
"BSD-2-Clause"
] | null | null | null | webapp/assessdb/scripts/import_csv_instruments.py | sspickle/assessdb | 89f99424ef7ab1405d7fa37a74764dd12cef1abf | [
"BSD-2-Clause"
] | null | null | null | webapp/assessdb/scripts/import_csv_instruments.py | sspickle/assessdb | 89f99424ef7ab1405d7fa37a74764dd12cef1abf | [
"BSD-2-Clause"
] | null | null | null | import os
import sys
import transaction
import csv
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from pyramid.scripts.common import parse_vars
from ..models.meta import Base
from ..models import (
get_engine,
get_session_factory,
get_tm_session,
)
from ..models import Instrument
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> [var=value]\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) < 3:
usage(argv)
config_uri = argv[1]
fname = argv[2]
options = parse_vars(argv[3:])
setup_logging(config_uri)
settings = get_appsettings(config_uri, options=options)
engine = get_engine(settings)
Base.metadata.create_all(engine)
session_factory = get_session_factory(engine)
with transaction.manager:
dbsession = get_tm_session(session_factory, transaction.manager)
with open(fname) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
p=Instrument()
p.name=row['name']
p.description=row['description']
p.importTag=row['importTag']
dbsession.add(p)
| 24.557692 | 72 | 0.636648 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.079092 |
50b4618057184dfc1016b24c9b7fde5730dbf199 | 14,440 | py | Python | stock/experiment_compare_iterations_autions.py | dvirg/auctions | da706f3d11b9582c7f811de9f50b96b43ac8cbd0 | [
"MIT"
] | 1 | 2021-11-20T19:27:45.000Z | 2021-11-20T19:27:45.000Z | stock/experiment_compare_iterations_autions.py | dvirg/auctions | da706f3d11b9582c7f811de9f50b96b43ac8cbd0 | [
"MIT"
] | null | null | null | stock/experiment_compare_iterations_autions.py | dvirg/auctions | da706f3d11b9582c7f811de9f50b96b43ac8cbd0 | [
"MIT"
] | null | null | null | #!python3
"""
A utility for performing simulation experiments on auction mechanisms.
The experiment is similar to the one described by McAfee (1992), Table I (page 448).
In each experiment, we measure the actual vs. the optimal gain-from-trade.
This experiment using the real prices from Stock market. the prices are in csv files in stocks folder.
The results are printed to a CSV file. The columns are:
* stock_name - The stock name which the prices came from.
* recipe - (1,1) for McAfee; can be any vector of ones, e.g. (1,1,1),
for our trade-reduction mechanism, or any vector of positive integers for our ascending-auction mechanism.
* num_possible_trades = n = total number of potential procurement-sets
(e.g. if n=100 and recipe=[1,2] then there are 100 buyers and 200 sellers).
* optimal_count = k = number of deals in the optimal trade, averaged over all iterations.
Note that k <= n. E.g., there may be 100 buyers and 100 sellers, but only 50 procurement-sets with positive GFT, so k=50.
* optimal_gft - OPT = gain-from-trade in the optimal trade,
* optimal_trade_with_gft_zero - OPT = gain-from-trade in the optimal trade, including sets with GFT=0
* count = k' = number of deals done by our auction, averaged over all iterations.
Theoretically, since at most one deal is removed, it should be either k or k-1.
* count_ratio = %k' = count / optimal_count * 100%.
* total_gft = GFT = gain-from-trade in the auction, including auctioneer
* total_gft_ratio = %GFT = total_gft / optimal_gft * 100%.
Theoretically it should be at least 1 - 1/k.
In the results, it is usually higher.
* market_gft = Market GFT = gain-from-trade in the auction, not including auctioneer.
* market_gft_ratio = Market %GFT = market_gft / optimal_gft * 100%.
Theoretically it should be at least 1 - 1/k.
In the results, it is usually higher.
Recommended: add manually at the beginning of the file the header line:
,recipe,n,k,k+0,OPT_GFT,OPT_GFT+0,McAfee_k',%k',total_gft,%total_gft,market_gft,market_%gft,McAfee_Without_Heuristic_k',%k',total_gft,%total_gft,market_gft,market_%gft,SBB_External_Competition_k',%k',gft,%gft,market_gft,market_%gft,SBB_Ascending_Prices_k,%k',gft,%gft,market_gft,market_%gft
Author: Dvir Gilor
Since: 2020-08
"""
from markets import Market
from agents import AgentCategory
from tee_table.tee_table import TeeTable
from collections import OrderedDict
from get_stocks_data import getStocksPricesShuffled
import random
from os import path
def experiment(results_csv_file:str, auction_functions:list, auction_names:str, recipe:tuple, nums_of_agents=None,
stocks_prices:list=None, stock_names:list=None, num_of_iterations=1000, run_with_stock_prices=True,
report_diff=False):
"""
Run an experiment similar to McAfee (1992) experiment on the given auction.
:param results_csv_file: the experiment result file.
:param auction_functions: list of functions for executing the auction under consideration.
:param auction_names: titles of the experiment, for printouts.
:param recipe: can be any vector of ones, e.g. (1,1,1), for our trade-reduction mechanism, or any vector of positive integers for our ascending-auction mechanism.
:param stocks_prices: list of prices for each stock and each agent.
:param stock_names: list of stocks names which prices are belongs, for naming only.
"""
TABLE_COLUMNS = ["iterations", "stockname", "recipe", "numpossibletrades", "optimalcount", "gftratioformula",
"optimalcountwithgftzero", "optimalgft", "optimalgftwithgftzero"]
AUCTION_COLUMNS = ["count", "countratio", "totalgft", "totalgftratio",
"withoutgftzerocountratio", "withoutgftzerototalgft", "withoutgftzerototalgftratio",
"marketgft", "marketgftratio"]
if path.exists(results_csv_file):
print('The file', results_csv_file, 'already exists, skipping')
return
else:
print('Running for the file', results_csv_file)
if stocks_prices is None:
(stocks_prices, stock_names) = getStocksPricesShuffled()
column_names = TABLE_COLUMNS
column_names += [auction_name + column for auction_name in auction_names for column in AUCTION_COLUMNS]
results_table = TeeTable(column_names, results_csv_file)
recipe_str = ":".join(map(str,recipe))
recipe_sum = sum(recipe)
recipe_sum_for_buyer = (recipe_sum-recipe[0])/recipe[0]
if nums_of_agents is None:
nums_of_agents = [10000000]
#print(nums_of_agents)
total_results = {}
for num_of_agents_per_category in nums_of_agents:
total_results[str(num_of_agents_per_category)] = []
#print(total_results)
for i in range(len(stocks_prices)):
stock_prices = stocks_prices[i]
for num_of_possible_ps in nums_of_agents:
for iteration in range(num_of_iterations):
categories = []
if run_with_stock_prices:
while len(stock_prices) < num_of_possible_ps * recipe_sum:
stock_prices = stock_prices + stock_prices
random.shuffle(stock_prices)
index = 0
for category in recipe:
next_index = index + num_of_possible_ps * category
price_sign = recipe_sum_for_buyer if index == 0 else -1
#price_value_multiple = -1 * buyer_agent_count if index > 0 else recipe_sum - buyer_agent_count
categories.append(AgentCategory("agent", [int(price*price_sign) for price in stock_prices[index:next_index]]))
index = next_index
else: #prices from random.
for index in range(len(recipe)):
#for category in recipe:
min_value = -100000 if index > 0 else recipe_sum_for_buyer
max_value = -1 if index > 0 else 100000 * recipe_sum_for_buyer
categories.append(AgentCategory.uniformly_random("agent", num_of_possible_ps*recipe[index],
min_value, max_value))
market = Market(categories)
(optimal_trade, _) = market.optimal_trade(ps_recipe=list(recipe), max_iterations=10000000, include_zero_gft_ps=False)
optimal_count = optimal_trade.num_of_deals()
optimal_gft = optimal_trade.gain_from_trade()
(optimal_trade_with_gft_zero, _) = market.optimal_trade(ps_recipe=list(recipe), max_iterations=10000000)
optimal_count_with_gft_zero = optimal_trade_with_gft_zero.num_of_deals()
optimal_gft_with_gft_zero = optimal_trade_with_gft_zero.gain_from_trade()
results = [("iterations", num_of_iterations),
("stockname", stock_names[i]),
("recipe", recipe_str),
("numpossibletrades", int(num_of_possible_ps)),
("optimalcount", optimal_count),
("gftratioformula", (optimal_count - 1) * 100 / (optimal_count if min(recipe) == max(recipe) and recipe[0] == 1 else optimal_count + 1) if optimal_count > 1 else 0),
("optimalcountwithgftzero", optimal_count_with_gft_zero),
("optimalgft", optimal_gft),
("optimalgftwithgftzero", optimal_gft_with_gft_zero)]
for auction_index in range(len(auction_functions)):
auction_trade = auction_functions[auction_index](market, recipe)
count = auction_trade.num_of_deals()
total_gft = auction_trade.gain_from_trade(including_auctioneer=True)
market_gft = auction_trade.gain_from_trade(including_auctioneer=False)
auction_name = auction_names[auction_index]
results.append((auction_name + "count", auction_trade.num_of_deals()))
results.append((auction_name + "countratio",
0 if optimal_count==0 else (count / optimal_count_with_gft_zero) * 100))
results.append((auction_name + "totalgft", total_gft))
results.append((auction_name + "totalgftratio", 0 if optimal_gft==0 else total_gft / optimal_gft_with_gft_zero*100))
results.append((auction_name + "marketgft", market_gft))
results.append((auction_name + "marketgftratio",
0 if optimal_gft == 0 else market_gft / optimal_gft_with_gft_zero * 100))
results.append((auction_name + "withoutgftzerocountratio",
0 if optimal_count==0 else (count / optimal_count) * 100))
results.append((auction_name + "withoutgftzerototalgft", total_gft))
results.append((auction_name + "withoutgftzerototalgftratio", 0 if optimal_gft==0 else total_gft / optimal_gft*100))
#We check which auction did better and print the market and their results.
if report_diff:
gft_to_compare = -1
k_to_compare = -1
gft_found = False
k_found = False
for (label, value) in results:
if 'SBB' in label:
if gft_found is False and label.endswith('totalgft'):
if gft_to_compare < 0:
gft_to_compare = value
elif gft_to_compare != value:
with open('diff_in_sbbs_gft.txt', 'a') as f:
f.write('There is diff in gft between two auctions: ' + str(gft_to_compare) + ' ' + str(value) + '\n')
f.write(str(results) + '\n')
if num_of_possible_ps < 10:
f.write(str(market) + '\n')
gft_found = True
elif k_found is False and label.endswith('count'):
if k_to_compare < 0:
k_to_compare = value
elif k_to_compare != value:
with open('diff_in_sbbs_k.txt', 'a') as f:
f.write('There is diff in gft between two auctions: ' + str(k_to_compare) + ' ' + str(value) + '\n')
f.write(str(results) + '\n')
if num_of_possible_ps < 10:
f.write(str(market) + '\n')
k_found = True
compare_sbbs = True
if compare_sbbs:
gft_to_compare = -1
k_to_compare = -1
gft_found = False
k_found = False
for (label, value) in results:
if 'SBB' in label:
if gft_found is False and label.endswith('totalgft'):
if gft_to_compare < 0:
gft_to_compare = value
elif gft_to_compare > value:
with open('diff_in_sbbs_gft.txt', 'a') as f:
f.write('There is diff in gft between two auctions: ' + str(gft_to_compare) + ' ' + str(value) + '\n')
f.write(str(results) + '\n')
if num_of_possible_ps < 10:
f.write(str(market) + '\n')
gft_found = True
elif k_found is False and label.endswith('count'):
if k_to_compare < 0:
k_to_compare = value
elif k_to_compare > value:
with open('diff_in_sbbs_k.txt', 'a') as f:
f.write('There is diff in gft between two auctions: ' + str(k_to_compare) + ' ' + str(value) + '\n')
f.write(str(results) + '\n')
if num_of_possible_ps < 10:
f.write(str(market) + '\n')
k_found = True
#results_table.add(OrderedDict(results))
#print(results)
if len(total_results[str(num_of_possible_ps)]) == 0:
total_results[str(num_of_possible_ps)] = results[0:len(results)]
else:
sum_result = total_results[str(num_of_possible_ps)]
for index in range(len(results)):
if index > 3:
sum_result[index] = (results[index][0], sum_result[index][1] + results[index][1])
#print(total_results)
print(stock_names[i], end=',')
#break
print()
division_number = num_of_iterations * len(stocks_prices)
#division_number = num_of_iterations
for num_of_possible_ps in nums_of_agents:
results = total_results[str(num_of_possible_ps)]
for index in range(len(results)):
if 'gftratio' in results[index][0]:
results[index] = (results[index][0], padding_zeroes(results[index][1] / division_number, 3))
elif index > 3:
results[index] = (results[index][0], padding_zeroes(results[index][1] / division_number, 2))
elif index == 1:
results[index] = (results[index][0], 'Average')
#print(results)
results_table.add(OrderedDict(results))
results_table.done()
def padding_zeroes(result, num_digits:int):
str_result = str(result)
str_result += ("0" * num_digits) if '.' in str_result else '.' + ("0" * num_digits)
return str_result[0 : str_result.index('.') + num_digits + 1]
| 60.92827 | 290 | 0.572853 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,444 | 0.307756 |
50b799f3f9190f81ef4e7396819f3c43e59e132b | 106 | py | Python | categorias/iniciante/python/1095.py | carlos3g/URI-solutions | dc7f9b896cdff88aedf67611917b178d3ad60ab3 | [
"MIT"
] | 1 | 2022-01-26T23:38:17.000Z | 2022-01-26T23:38:17.000Z | categorias/iniciante/python/1095.py | carlos3g/URI-solutions | dc7f9b896cdff88aedf67611917b178d3ad60ab3 | [
"MIT"
] | 1 | 2020-07-12T00:49:35.000Z | 2021-06-26T20:53:18.000Z | categorias/iniciante/python/1095.py | carlos3g/URI-solutions | dc7f9b896cdff88aedf67611917b178d3ad60ab3 | [
"MIT"
] | 1 | 2020-07-04T03:27:04.000Z | 2020-07-04T03:27:04.000Z | # -*- coding: utf-8 -*-
i = 1
for x in range(60, -1, -5):
print('I={} J={}'.format(i, x))
i += 3
| 15.142857 | 35 | 0.415094 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.320755 |
50bb8c5b0c6ec60a8f6499ccba72330254c202d1 | 6,572 | py | Python | py_version/primary.py | the-phinisher/tic-tac-toe | d8aa6c396fb3c97c3c97ebbcf68c80663bb04723 | [
"MIT"
] | null | null | null | py_version/primary.py | the-phinisher/tic-tac-toe | d8aa6c396fb3c97c3c97ebbcf68c80663bb04723 | [
"MIT"
] | 1 | 2022-02-22T09:48:54.000Z | 2022-02-22T09:48:54.000Z | py_version/primary.py | the-phinisher/tic-tac-toe | d8aa6c396fb3c97c3c97ebbcf68c80663bb04723 | [
"MIT"
] | null | null | null | from copy import deepcopy
from math import inf
import platform
from os import system
if platform.system() == 'Windows':
def clear():
system('cls')
else:
def clear():
system('clear')
board = [[0,0,0],
[0,0,0],
[0,0,0]]
player1 = 1
player2 = -1
null_move = [None, None]
draw = 0
def _allequal(*args) -> bool:
value = args[0]
for arg in args[1:]:
if arg != value:
return False
return True
def wins(state: list, player: int) -> bool:
"""
Returns if the player won in the state
For the given board position 'state' returns
if the given 'player' has won the game or not.
Parameters
----------
state : list
The board position to be evaluated
player : int
The player to be checked if they won or not
"""
if _allequal(player, state[0][0], state[1][1], state[2][2]) \
or _allequal(player, state[0][2], state[1][1], state[2][0]):
return True
for i in range(len(state)):
if _allequal(player, state[i][0], state[i][1], state[i][2]) \
or _allequal(player, state[0][i], state[1][i], state[2][i]):
return True
return False
def game_end(state: list) -> bool:
"""
Returns if the game has ended
For the given board position 'state' returns
if the game can be continued or not.
Parameters
----------
state : list
The board position to be evaluated
"""
if wins(state, player1) or wins(state, player2):
return True
else:
for row in state:
for cell in row:
if cell == 0:
return False
return True
def evaluate(state: list) -> int:
"""
Returns which player has won the game
For the given board position 'state' returns
the player who has won the game
Parameters
----------
state : list
The board position to be evaluated
"""
if wins(state, player1):
return player1
elif wins(state, player2):
return player2
else:
return 0
def possible_moves(state: list) -> list:
"""
Returns list of all possible moves
For the given board position 'state' returns
list of all possible moves for the next turn
Parameters
----------
state : list
The board position to be evaluated
"""
moves = []
for x, row in enumerate(state):
for y, cell in enumerate(row):
if cell == 0:
moves.append([x,y])
return moves
def play_move(state: list, move: list, player: int) -> list:
"""
Returns the board position with the specified move played
For the given board position 'state', the move to be
played and the player playing the move returns the
state after the move was played
Parameters
----------
state : list
The board position for the move to be played
move : list
The move to be played on the given board position
player : int
The player playing the move
"""
new_state = state
new_state[move[0]][move[1]] = player
return new_state
def minimax(state: list, depth: int, player: int) -> list:
"""
Returns the [Move, Best Evaluation] for given state, depth, player
Runs the minimax algorithm with depth 'depth' for player 'player'
for the given board position 'state'. Returns the best move and
the best evaluation for it.
Parameters
----------
state : list
The board position to be evaluated
depth : int
The maximum depth to which the minimax algorithm should evaluate
player : int
The player whose move is to be optimised
"""
if depth == 0 or game_end(state):
return [null_move, evaluate(state)]
else:
if player == player1:
best = [null_move, -inf]
for move in possible_moves(state):
next_state = play_move(deepcopy(state), move, player)
tree_eval = minimax(next_state, depth - 1, -player)
if tree_eval[1] > best[1]:
best = [move, tree_eval[1]]
return best
else:
best = [null_move, inf]
for move in possible_moves(state):
next_state = play_move(deepcopy(state), move, player)
tree_eval = minimax(next_state, depth - 1, -player)
if tree_eval[1] < best[1]:
best = [move, tree_eval[1]]
return best
def display(state: list) -> None:
"""
Displays the board in a much better way
Parameters
----------
state : list
The board position to be displayed
"""
char = {-1: 'O',0: ' ',1: 'X'}
for row in state:
print(char[row[0]], "|", char[row[1]], "|", char[row[2]])
print("---------")
def get_comp_move(state: list, depth: int, player: int) -> list:
"""
Returns the best move for the computer based on the given parameters
For a given board position, depth and the computer player returns the
best move by the minimax algorithm
Parameters
----------
state : list
The state to be evaluated for best move
depth : int
The depth to which the minimax algorithm searches
player : int
The player whose move it is in the given state
"""
evaluation = minimax(state, depth, player)
best_move = evaluation[0]
return best_move
def get_player_move() -> list:
inp_string = input("Enter your move: ")
coords = inp_string.split(',')
move = [int(coord) for coord in coords]
return move
def main() -> None:
"""
The main function containing all of the functions
"""
global board
difficulty = 10
comp_player = None
current_player = player1
while not game_end(board):
if current_player == comp_player:
move = get_comp_move(board, difficulty, current_player)
else:
clear()
display(board)
move = get_player_move()
board = play_move(board, move, current_player)
current_player *= -1
result = evaluate(board)
if result == player1:
clear()
display(board)
print("Yay! you won")
elif result == 0:
clear()
display(board)
print("You got a draw")
elif result == player2:
clear()
display(board)
print("You lost, good luck next time")
if __name__ == "__main__":
main() | 24.522388 | 73 | 0.574559 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,752 | 0.418746 |
50bd37ee8461876376290a970632ede83e07fe93 | 183 | py | Python | set.py | git4satya/koleksyon | 966f3f6ea16a9c5c0bb12d2aec52c5c89e42090c | [
"MIT"
] | null | null | null | set.py | git4satya/koleksyon | 966f3f6ea16a9c5c0bb12d2aec52c5c89e42090c | [
"MIT"
] | null | null | null | set.py | git4satya/koleksyon | 966f3f6ea16a9c5c0bb12d2aec52c5c89e42090c | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
from distutils.core import setup
from Cython.Build import cythonize
setup(name="mcmc", ext_modules=cythonize("./src/koleksyon/mcmc.pyx"))
| 30.5 | 69 | 0.808743 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.174863 |
50becd1ac57ae3a984bcaa01bb929787c41d56e8 | 995 | py | Python | tests/test_functions/http_log_exception/main.py | KaylaNguyen/functions-framework-python | e44416e8e35a0f997d0dacd9f313306fe63646a4 | [
"Apache-2.0"
] | 479 | 2020-01-09T19:50:54.000Z | 2022-03-31T11:26:16.000Z | tests/test_functions/http_log_exception/main.py | KaylaNguyen/functions-framework-python | e44416e8e35a0f997d0dacd9f313306fe63646a4 | [
"Apache-2.0"
] | 97 | 2020-01-10T21:45:46.000Z | 2022-03-29T12:19:17.000Z | tests/test_functions/http_log_exception/main.py | KaylaNguyen/functions-framework-python | e44416e8e35a0f997d0dacd9f313306fe63646a4 | [
"Apache-2.0"
] | 81 | 2020-01-09T22:20:08.000Z | 2022-03-25T21:55:20.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Function used in Worker tests of legacy GCF Python 3.7 logging."""
import logging
X_GOOGLE_FUNCTION_NAME = "gcf-function"
X_GOOGLE_ENTRY_POINT = "function"
HOME = "/tmp"
def function(request):
"""Test function which logs exceptions.
Args:
request: The HTTP request which triggered this function.
"""
try:
raise Exception
except:
logging.exception("log")
return None
| 29.264706 | 74 | 0.725628 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 787 | 0.790955 |
50bf59fcf053969178b6b0b82c944711edaaf728 | 574 | py | Python | tivol/tests/assets/migration_handlers.py | RoySegall/tivol | b3f32470e10f1d94c88df095e4cb67adb97b6b1a | [
"MIT"
] | 2 | 2020-02-14T01:34:03.000Z | 2020-03-16T16:22:56.000Z | tivol/tests/assets/migration_handlers.py | RoySegall/tivol | b3f32470e10f1d94c88df095e4cb67adb97b6b1a | [
"MIT"
] | 8 | 2020-01-23T14:12:23.000Z | 2020-02-01T09:49:28.000Z | tivol/tests/assets/migration_handlers.py | RoySegall/django-tivol | b3f32470e10f1d94c88df095e4cb67adb97b6b1a | [
"MIT"
] | null | null | null | from tivol.base_classes.mappers import CsvMapper
from tivol.base_classes.migration_handler_base import MigrationHandlerBase
import os
class AnimalMigration(MigrationHandlerBase):
def init_metadata(self):
csv_mapper = CsvMapper()
path = os.path.join(
os.getcwd(), 'tivol', 'tests', 'assets', 'animals.csv'
)
csv_mapper.set_destination_file(path=path)
self.id = 'animal'
self.name = 'Animal migration'
self.description = 'Migrating animals into the system'
self.add_source_mapper(csv_mapper)
| 30.210526 | 74 | 0.688153 | 437 | 0.761324 | 0 | 0 | 0 | 0 | 0 | 0 | 96 | 0.167247 |
50c1727f4cbaa7d21dda9d008ae65222d11294d4 | 1,718 | py | Python | 2017/day25/day25.py | icemanblues/advent-of-code | eac937ac2762d1c8b8cec358a13af352e339446c | [
"Apache-2.0"
] | null | null | null | 2017/day25/day25.py | icemanblues/advent-of-code | eac937ac2762d1c8b8cec358a13af352e339446c | [
"Apache-2.0"
] | 2 | 2020-04-06T18:56:13.000Z | 2022-03-30T20:32:50.000Z | 2017/day25/day25.py | icemanblues/advent-of-code | eac937ac2762d1c8b8cec358a13af352e339446c | [
"Apache-2.0"
] | null | null | null | from typing import Set
day_num = "25"
day_title = "The Halting Problem"
def part1():
tape: Set[int] = set()
curr: int = 0
state: str = 'a'
for _ in range(12523873):
is_one = curr in tape
if state == 'a' and not is_one:
tape.add(curr)
curr += 1
state = 'b'
elif state == 'a' and is_one:
tape.add(curr)
curr -= 1
state = 'e'
elif state == 'b' and not is_one:
tape.add(curr)
curr += 1
state = 'c'
elif state == 'b' and is_one:
tape.add(curr)
curr += 1
state = 'f'
elif state == 'c' and not is_one:
tape.add(curr)
curr -= 1
state = 'd'
elif state == 'c' and is_one:
tape.remove(curr)
curr += 1
state = 'b'
elif state == 'd' and not is_one:
tape.add(curr)
curr += 1
state = 'e'
elif state == 'd' and is_one:
tape.remove(curr)
curr -= 1
state = 'c'
elif state == 'e' and not is_one:
tape.add(curr)
curr -= 1
state = 'a'
elif state == 'e' and is_one:
tape.remove(curr)
curr += 1
state = 'd'
elif state == 'f' and not is_one:
tape.add(curr)
curr += 1
state = 'a'
elif state == 'f' and is_one:
tape.add(curr)
curr += 1
state = 'c'
print("Part 1:", len(tape))
def main():
print(f"Day {day_num}: {day_title}")
part1()
if __name__ == '__main__':
main()
| 23.534247 | 41 | 0.419092 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 148 | 0.086147 |
50c19a0a1d56c769b8744b086fd0f546baaa2e03 | 263 | py | Python | Code.py | sad786/Python | 738198bbab5abfa1d521e83291eed7026a4f6f12 | [
"Apache-2.0"
] | null | null | null | Code.py | sad786/Python | 738198bbab5abfa1d521e83291eed7026a4f6f12 | [
"Apache-2.0"
] | null | null | null | Code.py | sad786/Python | 738198bbab5abfa1d521e83291eed7026a4f6f12 | [
"Apache-2.0"
] | null | null | null | def process(N):
temp = str(N)
temp = temp.replace('4','2')
res1 = int(temp)
res2 = N-res1
return res1,res2
T = int(input())
for t in range(T):
N = int(input())
res1,res2 = process(N)
print('Case #{}: {} {}'.format(t+1,res1,res2)) | 20.230769 | 50 | 0.536122 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.087452 |
50c26ee0fff34f323c178c0a015c36474ab32bbb | 1,067 | py | Python | repos/insightface/deploy/test.py | batermj/DeepVideoAnalytics | daad116b87370fce1799b7948af73b92f617cf41 | [
"BSD-3-Clause",
"Apache-2.0",
"MIT"
] | 1 | 2021-03-23T12:31:59.000Z | 2021-03-23T12:31:59.000Z | repos/insightface/deploy/test.py | batermj/DeepVideoAnalytics | daad116b87370fce1799b7948af73b92f617cf41 | [
"BSD-3-Clause",
"Apache-2.0",
"MIT"
] | null | null | null | repos/insightface/deploy/test.py | batermj/DeepVideoAnalytics | daad116b87370fce1799b7948af73b92f617cf41 | [
"BSD-3-Clause",
"Apache-2.0",
"MIT"
] | null | null | null | import face_embedding
import argparse
import cv2
import numpy as np
parser = argparse.ArgumentParser(description='face model test')
# general
parser.add_argument('--image-size', default='112,112', help='')
parser.add_argument('--model', default='../models/model-r34-amf/model,0', help='path to load model.')
parser.add_argument('--gpu', default=None, type=int, help='gpu id')
parser.add_argument('--det', default=2, type=int, help='mtcnn option, 2 means using R+O, else using O')
parser.add_argument('--flip', default=0, type=int, help='whether do lr flip aug')
parser.add_argument('--threshold', default=1.24, type=float, help='ver dist threshold')
args = parser.parse_args()
if __name__ == '__main__':
model = face_embedding.FaceModel(args)
img = cv2.imread('/Users/aub3/1.jpg')
f1 = model.get_feature(img)
img = cv2.imread('/Users/aub3/2.jpg')
f2 = model.get_feature(img)
img = cv2.imread('/Users/aub3/3.jpg')
f3 = model.get_feature(img)
dist1 = np.sum(np.square(f1-f2))
dist2 = np.sum(np.square(f1-f3))
print(dist1,dist2) | 41.038462 | 103 | 0.700094 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 315 | 0.29522 |
50c35512a7e771c592f2f59783b3d7b03116ff00 | 6,509 | py | Python | apollon/io.py | bader28/apollon | 588a347f02e4c78a455d7c6ae7e61af80058eafa | [
"BSD-3-Clause"
] | null | null | null | apollon/io.py | bader28/apollon | 588a347f02e4c78a455d7c6ae7e61af80058eafa | [
"BSD-3-Clause"
] | null | null | null | apollon/io.py | bader28/apollon | 588a347f02e4c78a455d7c6ae7e61af80058eafa | [
"BSD-3-Clause"
] | null | null | null | # Licensed under the terms of the BSD-3-Clause license.
# Copyright (C) 2019 Michael Blaß
# michael.blass@uni-hamburg.de
"""apollon/io.py -- General I/O functionallity.
Classes:
ArrayEncoder Serialize numpy array to JSON.
FileAccessControl Descriptor for file name attributes.
Functions:
array_print_opt Set format for printing numpy arrays.
decode_array Decode numpy array from JSON.
files_in_folder Iterate over all files in given folder.
load Load pickled data.
repath Change path but keep file name.
save Pickle some data.
"""
from contextlib import contextmanager as _contextmanager
import json as _json
import pathlib as _pathlib
import pickle
import typing
import numpy as _np
from . import types as _types
class ArrayEncoder(_json.JSONEncoder):
# pylint: disable=E0202
# Issue: False positive for E0202 (method-hidden) #414
# https://github.com/PyCQA/pylint/issues/414
"""Encode np.ndarrays to JSON.
Simply set the `cls` parameter of the dump method to this class.
"""
def default(self, o):
"""Custon default JSON encoder. Properly handles numpy arrays and JSONEncoder.default
for all other types.
Params:
o (any) Object to encode.
Returns:
(dict)
"""
if isinstance(o, _np.ndarray):
out = {'__ndarray__': True,
'__dtype__': o.dtype.str,
'data': o.astype('float64').tolist()}
return out
return _json.JSONEncoder.default(self, o)
def decode_array(json_data: dict) -> typing.Any:
"""Properly decodes numpy arrays from a JSON data stream.
This method need to be called on the return value of ``json.load`` or ``json.loads``.
Args:
json_data (dict) JSON formatted dict to encode.
Returns:
(any)
"""
if '__ndarray__' in json_data and '__dtype__' in json_data:
return _np.array(json_data['data'], dtype=json_data['__dtype__'])
return json_data
class PoissonHmmEncoder(ArrayEncoder):
"""JSON encoder for PoissonHmm.
"""
def default(self, o):
"""Custon default JSON encoder. Properly handles <class 'PoissonHMM'>.
Note: Falls back to ``ArrayEncoder`` for all types that do not implement
a ``to_dict()`` method.
Params:
o (any) Object to encode.
Returns:
(dict)
"""
if isinstance(o, HMM):
items = {}
for attr in o.__slots__:
try:
items[attr] = getattr(o, attr).to_dict()
except AttributeError:
items[attr] = getattr(o, attr)
return items
return ArrayEncoder.default(self, o)
def dump_json(obj, path: _types.PathType = None) -> None:
"""Write ``obj`` to JSON.
This function can handel numpy arrays.
If ``path`` is None, this fucntion writes to stdout. Otherwise, encoded
object is written to ``path``.
Args:
obj (any) Object to be encoded.
path (PathType) Output file path.
"""
if path is None:
print(_json.dumps(obj, cls=ArrayEncoder))
else:
path = _pathlib.Path(path)
with path.open('w') as json_file:
_json.dump(obj, json_file, cls=ArrayEncoder)
class WavFileAccessControl:
"""Control initialization and access to the ``file`` attribute of class:``AudioData``.
This assures that the path indeed points to a file, which has to be a .wav file. Otherwise
an error is raised. The path to the file is saved as absolute path and the attribute is
read-only.
"""
def __init__(self):
"""Hi there!"""
self.__attribute = {}
def __get__(self, obj, objtype):
return self.__attribute[obj]
def __set__(self, obj, file_name):
if obj not in self.__attribute.keys():
_path = _pathlib.Path(file_name).resolve()
if _path.exists():
if _path.is_file():
if _path.suffix == '.wav':
self.__attribute[obj] = _path
else:
raise IOError('`{}` is not a .wav file.'
.format(file_name))
else:
raise IOError('`{}` is not a file.'.format(file_name))
else:
raise FileNotFoundError('`{}` does not exists.'
.format(file_name))
else:
raise AttributeError('File name cannot be changed.')
def __delete__(self, obj):
del self.__attribute[obj]
@_contextmanager
def array_print_opt(*args, **kwargs):
"""Set print format for numpy arrays.
Thanks to unutbu:
https://stackoverflow.com/questions/2891790/how-to-pretty-print-a-numpy-array-without-
scientific-notation-and-with-given-pre
"""
std_options = _np.get_printoptions()
_np.set_printoptions(*args, **kwargs)
try:
yield
finally:
_np.set_printoptions(**std_options)
def load(path: _types.PathType) -> typing.Any:
"""Load a pickled file.
Args:
path (str) Path to file.
Returns:
(object) unpickled object
"""
path = _pathlib.Path(path)
with path.open('rb') as file:
data = pickle.load(file)
return data
def repath(current_path: _types.PathType, new_path: _types.PathType,
ext: str = None) -> _types.PathType:
"""Change the path and keep the file name. Optinally change the extension, too.
Args:
current_path (str or Path) The path to change.
new_path (str or Path) The new path.
ext (str or None) Change file extension if ``ext`` is not None.
Returns:
(pathlib.Path)
"""
current_path = _pathlib.Path(current_path)
new_path = _pathlib.Path(new_path)
file_path = current_path.stem
if ext is not None:
if not ext.startswith('.'):
ext = '.' + ext
file_path.join(ext)
return new_path.joinpath(file_path)
def save(data: typing.Any, path: _types.PathType):
"""Pickles data to path.
Args:
data (Any) Pickleable object.
path (str or Path) Path to safe the file.
"""
path = _pathlib.Path(path)
with path.open('wb') as file:
pickle.dump(data, file)
| 29.586364 | 94 | 0.591796 | 2,860 | 0.439324 | 408 | 0.062673 | 425 | 0.065284 | 0 | 0 | 3,297 | 0.506452 |
50c4211594641f15f4d0dd51846dc4859fa97b1a | 3,327 | py | Python | stockbot/provider/yahoo.py | Habbie/stockbot | 648e70604828d0ed762c243b4eb83122fd393b4d | [
"Apache-2.0"
] | null | null | null | stockbot/provider/yahoo.py | Habbie/stockbot | 648e70604828d0ed762c243b4eb83122fd393b4d | [
"Apache-2.0"
] | null | null | null | stockbot/provider/yahoo.py | Habbie/stockbot | 648e70604828d0ed762c243b4eb83122fd393b4d | [
"Apache-2.0"
] | null | null | null | import logging
import requests
import urllib.parse
from datetime import datetime
from stockbot.provider.base import BaseQuoteService, BaseQuote
LOGGER = logging.getLogger(__name__)
class YahooFallbackQuote(object):
def __init__(self, *args, **kwargs):
pass
def __str__(self):
return "Didn't find anything"
def is_empty(self):
return False
def is_fresh(self):
return False
class YahooQuote(BaseQuote):
def __init__(self, o):
for k, v in o["optionChain"]["result"][0]["quote"].items():
setattr(self, k, v)
if self.regularMarketTime == "N/A":
self.timestamp = None
self.timestamp_str = "unknown"
else:
self.timestamp = datetime.fromtimestamp(int(self.regularMarketTime))
self.timestamp_str = self.timestamp.strftime("%Y-%m-%d %H:%M:%S")
self.is_pre_market = self.marketState == "PRE"
self.fields = [
["Name", self.shortName],
["Price", self.regularMarketPrice],
["Low Price", self.regularMarketDayLow],
["High Price", self.regularMarketDayHigh],
["Percent Change 1 Day", self.regularMarketChangePercent]
]
if self.is_pre_market:
self.fields.extend([
["Price Pre Market", self.preMarketPrice],
["Percent Change Pre Market", self.preMarketChangePercent]
])
self.fields.extend([
["Market", self.market],
["Update Time", self.timestamp_str]
])
def is_fresh(self):
if self.timestamp is None:
return False
return (datetime.now() - self.timestamp).total_seconds() < 16 * 60
class YahooSearchResult(object):
def __init__(self, o):
self.o = o
def get_tickers(self):
return [x["symbol"] for x in self.o["quotes"] if "symbol" in x]
def is_empty(self):
return not (
"quotes" in self.o and
len(self.o["quotes"]) > 0 and
any([True for x in self.o["quotes"] if "symbol" in x])
)
class YahooQueryService(BaseQuoteService):
# search results probably don't change that much so cache them
search_cache = {}
def __init__(self, *args, **kwargs):
pass
def get_quote(self, ticker):
search_result = self.search(ticker)
if not search_result.is_empty():
t = search_result.get_tickers()[0]
response = requests.get("https://query1.finance.yahoo.com/v7/finance/options/{t}".format(t=t))
response.raise_for_status()
return YahooQuote(response.json())
else:
return YahooFallbackQuote()
def search(self, query):
query_encoded = urllib.parse.quote(query)
response = requests.get(
'https://query2.finance.yahoo.com/v1/finance/search?q='
'{query}&lang=en-US®ion=US"esCount=1&newsCount=0&enableFuzzyQuery=false"esQueryId'
'=tss_match_phrase_query&multiQuoteQueryId=multi_quote_single_token_query&newsQueryId=news_cie_vespa'
'&enableCb=true&enableNavLinks=true&enableEnhancedTrivialQuery=true'.format(query=query_encoded))
response.raise_for_status()
return YahooSearchResult(response.json())
| 31.990385 | 113 | 0.613766 | 3,132 | 0.941389 | 0 | 0 | 0 | 0 | 0 | 0 | 704 | 0.211602 |
50c582358879f0d88ea08b7b116c6d9019fa47eb | 850 | py | Python | Database/SQLIte/15.Limit/U1.py | sarincr/Business-analytics-Course-with-Python- | 10e577fdb3cf90bb87c97cd23ee3ecd6a083bfc4 | [
"MIT"
] | 3 | 2022-01-18T05:35:52.000Z | 2022-03-25T06:13:54.000Z | Database/SQLIte/15.Limit/U1.py | sarincr/Business-analytics-Course-with-Python- | 10e577fdb3cf90bb87c97cd23ee3ecd6a083bfc4 | [
"MIT"
] | null | null | null | Database/SQLIte/15.Limit/U1.py | sarincr/Business-analytics-Course-with-Python- | 10e577fdb3cf90bb87c97cd23ee3ecd6a083bfc4 | [
"MIT"
] | 2 | 2022-01-17T08:23:59.000Z | 2022-01-17T08:28:18.000Z | import sqlite3
X = sqlite3.connect('NeDB.db')
Y = X.cursor()
Y.execute('''CREATE TABLE IF NOT EXISTS EMPLOYEE (
ID integer,
Name text NOT NULL,
Date_Join text,
Place text,
Age integer,
Salary real);''')
Y.execute('''INSERT INTO Employee VALUES (1,'John','2020-03-01','Kerala',32,25000),(2,'Adam','2020-01-01','TN',22,30000),(3,'Mary','2022-01-01','Karnataka',24,120000)
,(4,'Jacob','2022-01-01','Mharashtra',24,430000),(5,'Johny','2022-01-01','Karnataka',24,34000),(6,'Lynda','2022-01-01','Delhi',24,56700),
(7,'Smith','2022-01-01','Kerala',24,234000),(8,'Gem','2022-01-01','Karnataka',24,120000)''')
data = Y.execute("SELECT * from Employee LIMIT 2 OFFSET 4");
for k in data:
print (k)
X.commit()
Y.close()
| 27.419355 | 167 | 0.556471 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 660 | 0.776471 |
50c5e0a8570cdd739aa3c8e994e48c1fd64e8100 | 196 | py | Python | lambda/python/ip/what_is_my_ip.py | enrikiko/AWS | e8984e5fc5d015285ef2cc3f4295a273d1994e22 | [
"MIT"
] | null | null | null | lambda/python/ip/what_is_my_ip.py | enrikiko/AWS | e8984e5fc5d015285ef2cc3f4295a273d1994e22 | [
"MIT"
] | null | null | null | lambda/python/ip/what_is_my_ip.py | enrikiko/AWS | e8984e5fc5d015285ef2cc3f4295a273d1994e22 | [
"MIT"
] | null | null | null | import json
def lambda_handler(event, context):
ip = event["requestContext"]["identity"]["sourceIp"]
return {
'statusCode': 200,
'body': json.dumps(ip)
}
| 21.777778 | 57 | 0.561224 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.27551 |
50c7a3be78d0fd416ee2c6259ac08e9ae566c6ec | 47 | py | Python | week1/8.py | kamorozov/coursera_python | 706bc1bc46839f8b3debdf293240ad5ce20c9775 | [
"Unlicense"
] | 2 | 2019-05-17T13:42:02.000Z | 2019-05-18T04:00:35.000Z | week1/8.py | kamorozov/coursera_python | 706bc1bc46839f8b3debdf293240ad5ce20c9775 | [
"Unlicense"
] | null | null | null | week1/8.py | kamorozov/coursera_python | 706bc1bc46839f8b3debdf293240ad5ce20c9775 | [
"Unlicense"
] | 2 | 2019-10-03T09:07:44.000Z | 2019-12-28T19:17:20.000Z | n = int(input())
n = n % 100
print(n // 10)
| 11.75 | 17 | 0.468085 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
50c9aef6ef070fdd9753a4e730c01e945a86b51c | 3,581 | py | Python | qtpp/views/auth.py | xzdylyh/qtppy | baa37c59082863ace3cddb09c30a55a92d64f656 | [
"Apache-2.0"
] | 2 | 2020-04-23T06:10:05.000Z | 2021-01-21T08:56:09.000Z | qtpp/views/auth.py | xzdylyh/qtppy | baa37c59082863ace3cddb09c30a55a92d64f656 | [
"Apache-2.0"
] | 2 | 2020-04-22T10:11:29.000Z | 2020-05-12T10:02:14.000Z | qtpp/views/auth.py | xzdylyh/qtppy | baa37c59082863ace3cddb09c30a55a92d64f656 | [
"Apache-2.0"
] | 4 | 2020-04-22T09:35:14.000Z | 2020-12-19T08:15:25.000Z | import functools
from flask import (
Blueprint, flash, g, redirect, render_template, request, session, url_for, jsonify, abort
)
from werkzeug.security import check_password_hash, generate_password_hash
from qtpp import db
from qtpp.libs.framework.operate_db import OperationDB
from qtpp.libs.framework.constant import Const
from qtpp.models.user import User
'''
这里创建了一个名称为 'auth' 的 Blueprint 。和应用对象一样,
蓝图需要知道是在哪里定义的,因此把 __name__ 作为函数的第二个参数。
url_prefix 会添加到所有与该蓝图关联的 URL 前面。
'''
bp = Blueprint('auth', __name__, url_prefix='/auth')
odb = OperationDB()
'''
认证蓝图将包括注册新用户、登录和注销视图。
'''
@bp.route('/register', methods=('GET', 'POST'))
def register():
'''
当用访问 /auth/register URL 时, register 视图会返回用于填写注册 内容的表单的 HTML 。
当用户提交表单时,视图会验证表单内容,然后要么再次 显示表单并显示一个出错信息,
要么创建新用户并显示登录页面。
'''
if request.method == 'POST':
username = request.json['username']
password = request.json['password']
# 带有 ? 占位符 的 SQL 查询语句。占位符可以代替后面的元组参数中相应的值。
# 使用占位符的 好处是会自动帮你转义输入值,以抵御 SQL 注入攻击 。
# fetchone() 根据查询返回一个记录行。 如果查询没有结果,则返回 None 。
# 后面还用到 fetchall() ,它返回包括所有结果的列表。
# 使用 generate_password_hash() 生成安全的哈希值并储存 到数据库中。
# url_for() 根据登录视图的名称生成相应的 URL
if not (username and password):
return jsonify(Const.errcode('1003'))
elif odb.query_per(User, 'username', username) is not None:
return jsonify(Const.errcode('1004', res={"username": username}))
odb.add(User(username, generate_password_hash(password)))
return jsonify(Const.errcode('0'))
return abort(404)
@bp.route('/login', methods=('GET', 'POST'))
def login():
if request.method == 'POST':
username = request.json['username']
password = request.json['password']
error = None
user = odb.query_per(User, 'username', username)
# check_password_hash() 以相同的方式哈希提交的 密码并安全的比较哈希值。
# 如果匹配成功,那么密码就是正确的。
# session 是一个 dict ,它用于储存横跨请求的值。
# 当验证 成功后,用户的 id 被储存于一个新的会话中。
# 会话数据被储存到一个 向浏览器发送的 cookie 中,在后继请求中,浏览器会返回它。
# Flask 会安全对数据进行 签名 以防数据被篡改。
if (user is None) or (not check_password_hash(user.password, password)):
return jsonify(Const.errcode('1003'))
if error is None:
session.clear()
session['user_id'] = user.uid
session['user_name'] = user.username
res = {
"user_id": user.uid,
"name": user.username
}
return jsonify(Const.errcode('0', res=res))
# flash(error)
return abort(404)
'''
bp.before_app_request() 注册一个 在视图函数之前运行的函数,不论其 URL 是什么。
load_logged_in_user 检查用户 id 是否已经储存在 session 中,并从数据库中获取用户数据,
然后储存在 g.user 中。 g.user 的持续时间比请求要长。 如果没有用户 id ,或者 id 不存在,
那么 g.user 将会是 None 。
'''
@bp.before_app_request
def load_logged_in_user():
user_id = session.get('user_id')
if user_id is None:
g.user = None
else:
g.user = odb.query_per(User, 'uid', user_id)
'''
注销的时候需要把用户 id 从 session 中移除。
然后 load_logged_in_user 就不会在后继请求中载入用户了。
'''
@bp.route('/logout', methods=('GET', 'POST'))
def logout():
session.clear()
return jsonify(Const.errcode('0'))
'''
用户登录以后才能创建、编辑和删除。
在每个视图中可以使用 装饰器 来完成这个工作。
装饰器返回一个新的视图,该视图包含了传递给装饰器的原视图。
新的函数检查用户 是否已载入。如果已载入,那么就继续正常执行原视图,
否则就重定向到登录页面。 我们会在应用视图中使用这个装饰器。
'''
def login_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if g.user is None:
return jsonify(Const.errcode('1001'))
return view(**kwargs)
return wrapped_view | 26.330882 | 93 | 0.647584 | 0 | 0 | 0 | 0 | 3,183 | 0.644985 | 0 | 0 | 2,759 | 0.559068 |
50cb859c45255f62ffcfcc061001338143096d48 | 403 | py | Python | engine/mixer.py | UnidayStudio/Easy-2D-Game-Engine | 1a8501cba538d7542b0e24bf64eead388085480f | [
"MIT"
] | 8 | 2019-12-15T22:32:30.000Z | 2021-06-14T07:38:51.000Z | engine/mixer.py | UnidayStudio/Easy-2D-Game-Engine | 1a8501cba538d7542b0e24bf64eead388085480f | [
"MIT"
] | null | null | null | engine/mixer.py | UnidayStudio/Easy-2D-Game-Engine | 1a8501cba538d7542b0e24bf64eead388085480f | [
"MIT"
] | 2 | 2020-09-10T17:34:23.000Z | 2021-03-11T09:26:26.000Z | import pygame
import engine.file
class Mixer():
def __init__(self):
self._sounds = {}
def _getSound(self, name):
if not name in self._sounds:
try:
img = pygame.mixer.Sound(engine.file.getPath(name))
except:
return None
self._sounds[name] = img
return self._sounds[name]
def playSound(self, name):
sound = self._getSound(name)
if sound == None:
return
sound.play()
| 17.521739 | 55 | 0.674938 | 367 | 0.91067 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
50cdb2534223082d1a5457fe8849bcb64350b906 | 3,984 | py | Python | csv sift.py | hitime1234/progress-8-sorter | 91007d823f0b64c082c39a619464b3c0b7af7bd9 | [
"Unlicense"
] | null | null | null | csv sift.py | hitime1234/progress-8-sorter | 91007d823f0b64c082c39a619464b3c0b7af7bd9 | [
"Unlicense"
] | null | null | null | csv sift.py | hitime1234/progress-8-sorter | 91007d823f0b64c082c39a619464b3c0b7af7bd9 | [
"Unlicense"
] | null | null | null | import time
def readcsv(filename, linenumber):
import csv
data = list(csv.reader(open(filename + ".csv")))
return data
def schoolpo8finder(data,name):
info = []
for i in range(0, len(data)):
try:
if (str(name)).lower() in (str(data[i][4])).lower():
info.append(data[i][4])
info.append(data[i][60])
except:
print("")
try:
for i in range(0, len(info)):
if i % 2 == 0:
try:
e = float(info[i])
print(print(info[i+1] + ": progress 8 score is " + info[i]))
except:
print(info[i] + ": progress 8 score is " + info[i+1])
except:
print("")
def listfl(data):
array = []
import sys
for i in range(1,len(data)-1):
try:
if float(data[i][60]) < float(100):
array.append(float(data[i][60]))
except:
array.append(float(0))
return array
def listnames(data):
array = []
for i in range(0,len(data)):
try:
array.append(str(data[i][4]))
except:
array.append("null")
return array
def combind(array, names):
newarray = []
for i in range(0, len(names)):
try:
hold = [names[i],array[i]]
newarray.append(hold)
except:
print("")
return newarray
def schoolranker(data):
swaps = True
array = listfl(data)
names = listnames(data)
newranking = combind(array, names)
j = 0
while swaps == True:
swaps = False
j += 1
for i in range(0, len(array)-1):
if newranking[i][1] < newranking[i+1][1]:
hold = newranking[i]
newranking[i] = newranking[i+1]
newranking[i+1] = hold
swaps = True
return newranking
def output(data, slient):
print("\n\n\n\n")
listrank = schoolranker(data)
rank = open("rank.txt","w")
if slient == 1:
for i in range(0, len(listrank)):
print(str(i+1) + ". " + str(listrank[i][0]) + " with average progress 8 score of " + str(listrank[i][1]))
rank.write(str(i+1) + ". " + str(listrank[i][0]) + " with average progress 8 score of " + str(listrank[i][1]))
rank.write("\n")
else:
for i in range(0, len(listrank)):
rank.write(str(i+1) + ". " + str(listrank[i][0]) + " with average progress 8 score of " + str(listrank[i][1]))
rank.write("\n")
rank.close()
def readingrank(find):
file = open("rank.txt","r")
try:
for i in range(0, 10000):
hold = file.readline()
if find.lower() in str(hold).lower():
print(hold)
file.close()
except:
file.close()
print("starting up")
filename = input("filename without csv? ")
data = readcsv(filename, 5680)
print("data loaded")
print("\n")
while True:
print("\n")
choice = input("would you like to do?\n1.generate rank text file\n2.search for progress 8 average\n3.search for ranking number by school\n > ")
if choice == "1":
print("making rank table")
output(data, 1)
print("done")
elif choice == "2":
schoolname = input("what is the name of the school? ")
print("\n\n")
print("here is what is found in csv:")
schoolpo8finder(data, schoolname)
elif choice == "3":
find = input("name of school? ")
try:
print("here is what's been found? ")
readingrank(find)
except:
print("please wait..")
output(data, 0)
print("here is what's been found? ")
readingrank(find)
| 27.86014 | 148 | 0.482681 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 611 | 0.153363 |
50cf002b29b4d43070275df7465fc14466be2932 | 3,430 | py | Python | pcircle/globals.py | fwang2/ccss | f2521cc492c5f459363cbaf4a55c9c504762efa4 | [
"Apache-2.0"
] | 20 | 2015-10-02T14:43:17.000Z | 2020-01-21T15:17:42.000Z | pcircle/globals.py | fwang2/ccss | f2521cc492c5f459363cbaf4a55c9c504762efa4 | [
"Apache-2.0"
] | 37 | 2015-10-01T18:52:08.000Z | 2018-11-20T21:05:39.000Z | pcircle/globals.py | fwang2/ccss | f2521cc492c5f459363cbaf4a55c9c504762efa4 | [
"Apache-2.0"
] | 7 | 2015-12-04T02:36:16.000Z | 2018-08-31T18:16:39.000Z | class T:
WORK_REQUEST = 1
WORK_REPLY = 2
REDUCE = 3
BARRIER = 4
TOKEN = 7
class Tally:
total_dirs = 0
total_files = 0
total_filesize = 0
total_stat_filesize = 0
total_symlinks = 0
total_skipped = 0
total_sparse = 0
max_files = 0
total_nlinks = 0
total_nlinked_files = 0
total_0byte_files = 0
devfile_cnt = 0
devfile_sz = 0
spcnt = 0 # stripe cnt account per process
# ZFS
total_blocks = 0
class G:
ZERO = 0
ABORT = -1
WHITE = 50
BLACK = 51
NONE = -99
TERMINATE = -100
MSG = 99
MSG_VALID = True
MSG_INVALID = False
fmt1 = '%(asctime)s - %(levelname)s - %(rank)s:%(filename)s:%(lineno)d - %(message)s'
fmt2 = '%(asctime)s - %(rank)s:%(filename)s:%(lineno)d - %(message)s'
bare_fmt = '%(name)s - %(levelname)s - %(message)s'
mpi_fmt = '%(name)s - %(levelname)s - %(rank)s - %(message)s'
bare_fmt2 = '%(message)s'
str = {WHITE: "white", BLACK: "black", NONE: "not set", TERMINATE: "terminate",
ABORT: "abort", MSG: "message"}
KEY = "key"
VAL = "val"
logger = None
logfile = None
loglevel = "warn"
use_store = False
fix_opt = False
preserve = False
DB_BUFSIZE = 10000
memitem_threshold = 100000
tempdir = None
total_chunks = 0
rid = None
chk_file = None
chk_file_db = None
totalsize = 0
src = None
dest = None
args_src = None
args_dest = None
resume = None
reduce_interval = 30
reduce_enabled = False
verbosity = 0
am_root = False
copytype = 'dir2dir'
# Lustre file system
fs_lustre = None
lfs_bin = None
stripe_threshold = None
b0 = 0
b4k = 4 * 1024
b8k = 8 * 1024
b16k = 16 * 1024
b32k = 32 * 1024
b64k = 64 * 1024
b128k = 128 * 1024
b256k = 256 * 1024
b512k = 512 * 1024
b1m = 1024 * 1024
b2m = 2 * b1m
b4m = 4 * b1m
b8m = 8 * b1m
b16m = 16 * b1m
b32m = 32 * b1m
b64m = 64 * b1m
b128m = 128 * b1m
b256m = 256 * b1m
b512m = 512 * b1m
b1g = 1024 * b1m
b4g = 4 * b1g
b16g = 16 * b1g
b64g = 64 * b1g
b128g = 128 * b1g
b256g = 256 * b1g
b512g = 512 * b1g
b1tb = 1024 * b1g
b4tb = 4 * b1tb
FSZ_BOUND = 64 * b1tb
# 25 bins
bins = [b0, b4k, b8k, b16k, b32k, b64k, b128k, b256k, b512k,
b1m, b2m, b4m, b16m, b32m, b64m, b128m, b256m, b512m,
b1g, b4g, b64g, b128g, b256g, b512g, b1tb, b4tb]
# 17 bins, the last bin is special
# This is error-prone, to be refactored.
# bins_fmt = ["B1_000k_004k", "B1_004k_008k", "B1_008k_016k", "B1_016k_032k", "B1_032k_064k", "B1_064k_256k",
# "B1_256k_512k", "B1_512k_001m",
# "B2_001m_004m", "B2_m004_016m", "B2_016m_512m", "B2_512m_001g",
# "B3_001g_100g", "B3_100g_256g", "B3_256g_512g",
# "B4_512g_001t",
# "B5_001t_up"]
# GPFS
gpfs_block_size = ("256k", "512k", "b1m", "b4m", "b8m", "b16m", "b32m")
gpfs_block_cnt = [0, 0, 0, 0, 0, 0, 0]
gpfs_subs = (b256k/32, b512k/32, b1m/32, b4m/32, b8m/32, b16m/32, b32m/32)
dev_suffixes = [".C", ".CC", ".CU", ".H", ".CPP", ".HPP", ".CXX", ".F", ".I", ".II",
".F90", ".F95", ".F03", ".FOR", ".O", ".A", ".SO", ".S",
".IN", ".M4", ".CACHE", ".PY", ".PYC"]
| 25.597015 | 113 | 0.533819 | 3,424 | 0.998251 | 0 | 0 | 0 | 0 | 0 | 0 | 971 | 0.28309 |
50d06ff15aa34045442b090d6e40b36a03bc3175 | 714 | py | Python | kge/util/glove_to_word2vec.py | Team-Project-OKG/kge | d09e41ccc30b54c1de983e8c4f6f95b92983bcb3 | [
"MIT"
] | null | null | null | kge/util/glove_to_word2vec.py | Team-Project-OKG/kge | d09e41ccc30b54c1de983e8c4f6f95b92983bcb3 | [
"MIT"
] | 1 | 2020-12-10T16:59:43.000Z | 2020-12-10T16:59:43.000Z | kge/util/glove_to_word2vec.py | Team-Project-OKG/kge | d09e41ccc30b54c1de983e8c4f6f95b92983bcb3 | [
"MIT"
] | null | null | null | import os
import sys
from gensim.scripts.glove2word2vec import glove2word2vec
from kge.misc import kge_base_dir
def _convert_to_word2vec(
filename: str
):
"""
Convert file of pretrained embeddings in GloVe format to word2vec format.
"""
folder = os.path.join(kge_base_dir(), "pretrained")
input_file = os.path.join(folder, filename)
index = filename.rindex(".")
output_file = filename[0:index] + "_word2vec" + filename[index:len(filename)]
output_file = os.path.join(folder, output_file)
glove2word2vec(input_file, output_file)
# give file name in folder 'pretrained' as first command line argument
if __name__ == '__main__':
_convert_to_word2vec(sys.argv[1])
| 27.461538 | 81 | 0.72409 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 195 | 0.273109 |
50d086d27fad503205a3c61c81321db64af884db | 25,829 | py | Python | ui/command_list.py | wangjing1215/COM-DEV | 2d8e412447cb2b98847c871208af3d2497f19d1b | [
"MIT"
] | null | null | null | ui/command_list.py | wangjing1215/COM-DEV | 2d8e412447cb2b98847c871208af3d2497f19d1b | [
"MIT"
] | null | null | null | ui/command_list.py | wangjing1215/COM-DEV | 2d8e412447cb2b98847c871208af3d2497f19d1b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'command_list.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(531, 473)
Dialog.setStyleSheet("QDialog {\n"
" background-color:#ddedff;\n"
"}\n"
"QTextEdit {\n"
" border-width: 1px;\n"
" border-style: solid;\n"
" border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
"}\n"
"QPlainTextEdit {\n"
" border-width: 1px;\n"
" border-style: solid;\n"
" border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
"}\n"
"QToolButton {\n"
" border-style: solid;\n"
" border-top-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n"
" border-right-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(217, 217, 217), stop:1 rgb(227, 227, 227));\n"
" border-left-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(227, 227, 227), stop:1 rgb(217, 217, 217));\n"
" border-bottom-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n"
" border-width: 1px;\n"
" border-radius: 5px;\n"
" color: rgb(0,0,0);\n"
" padding: 2px;\n"
" background-color: rgb(255,255,255);\n"
"}\n"
"QToolButton:hover{\n"
" border-style: solid;\n"
" border-top-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(195, 195, 195), stop:1 rgb(222, 222, 222));\n"
" border-right-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(197, 197, 197), stop:1 rgb(227, 227, 227));\n"
" border-left-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(227, 227, 227), stop:1 rgb(197, 197, 197));\n"
" border-bottom-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(195, 195, 195), stop:1 rgb(222, 222, 222));\n"
" border-width: 1px;\n"
" border-radius: 5px;\n"
" color: rgb(0,0,0);\n"
" padding: 2px;\n"
" background-color: rgb(255,255,255);\n"
"}\n"
"QToolButton:pressed{\n"
" border-style: solid;\n"
" border-top-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n"
" border-right-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(217, 217, 217), stop:1 rgb(227, 227, 227));\n"
" border-left-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(227, 227, 227), stop:1 rgb(217, 217, 217));\n"
" border-bottom-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n"
" border-width: 1px;\n"
" border-radius: 5px;\n"
" color: rgb(0,0,0);\n"
" padding: 2px;\n"
" background-color: rgb(142,142,142);\n"
"}\n"
"QPushButton{\n"
" border-style: solid;\n"
" border-top-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n"
" border-right-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(217, 217, 217), stop:1 rgb(227, 227, 227));\n"
" border-left-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(227, 227, 227), stop:1 rgb(217, 217, 217));\n"
" border-bottom-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n"
" border-width: 1px;\n"
" border-radius: 5px;\n"
" color: rgb(0,0,0);\n"
" padding: 2px;\n"
" background-color: rgb(255,255,255);\n"
"}\n"
"QPushButton::default{\n"
" border-style: solid;\n"
" border-top-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n"
" border-right-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(217, 217, 217), stop:1 rgb(227, 227, 227));\n"
" border-left-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(227, 227, 227), stop:1 rgb(217, 217, 217));\n"
" border-bottom-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n"
" border-width: 1px;\n"
" border-radius: 5px;\n"
" color: rgb(0,0,0);\n"
" padding: 2px;\n"
" background-color: rgb(255,255,255);\n"
"}\n"
"QPushButton:hover{\n"
" border-style: solid;\n"
" border-top-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(195, 195, 195), stop:1 rgb(222, 222, 222));\n"
" border-right-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(197, 197, 197), stop:1 rgb(227, 227, 227));\n"
" border-left-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(227, 227, 227), stop:1 rgb(197, 197, 197));\n"
" border-bottom-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(195, 195, 195), stop:1 rgb(222, 222, 222));\n"
" border-width: 1px;\n"
" border-radius: 5px;\n"
" color: rgb(0,0,0);\n"
" padding: 2px;\n"
" background-color: rgb(255,255,255);\n"
"}\n"
"QPushButton:pressed{\n"
" border-style: solid;\n"
" border-top-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n"
" border-right-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(217, 217, 217), stop:1 rgb(227, 227, 227));\n"
" border-left-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(227, 227, 227), stop:1 rgb(217, 217, 217));\n"
" border-bottom-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n"
" border-width: 1px;\n"
" border-radius: 5px;\n"
" color: rgb(0,0,0);\n"
" padding: 2px;\n"
" background-color: rgb(142,142,142);\n"
"}\n"
"QPushButton:disabled{\n"
" border-style: solid;\n"
" border-top-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n"
" border-right-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(217, 217, 217), stop:1 rgb(227, 227, 227));\n"
" border-left-color: qlineargradient(spread:pad, x1:0, y1:0.5, x2:1, y2:0.5, stop:0 rgb(227, 227, 227), stop:1 rgb(217, 217, 217));\n"
" border-bottom-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgb(215, 215, 215), stop:1 rgb(222, 222, 222));\n"
" border-width: 1px;\n"
" border-radius: 5px;\n"
" color: #808086;\n"
" padding: 2px;\n"
" background-color: rgb(142,142,142);\n"
"}\n"
"QLineEdit {\n"
" border-width: 1px; border-radius: 4px;\n"
" border-style: solid;\n"
" border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
"}\n"
"QLabel {\n"
" color: #000000;\n"
"}\n"
"QLCDNumber {\n"
" color: rgb(0, 113, 255, 255);\n"
"}\n"
"QProgressBar {\n"
" text-align: center;\n"
" color: rgb(240, 240, 240);\n"
" border-width: 1px; \n"
" border-radius: 10px;\n"
" border-color: rgb(230, 230, 230);\n"
" border-style: solid;\n"
" background-color:rgb(207,207,207);\n"
"}\n"
"QProgressBar::chunk {\n"
" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(49, 147, 250, 255), stop:1 rgba(34, 142, 255, 255));\n"
" border-radius: 10px;\n"
"}\n"
"QMenuBar {\n"
" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(207, 209, 207, 255), stop:1 rgba(230, 229, 230, 255));\n"
"}\n"
"QMenuBar::item {\n"
" color: #000000;\n"
" spacing: 3px;\n"
" padding: 1px 4px;\n"
" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(207, 209, 207, 255), stop:1 rgba(230, 229, 230, 255));\n"
"}\n"
"\n"
"QMenuBar::item:selected {\n"
" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
" color: #FFFFFF;\n"
"}\n"
"QMenu::item:selected {\n"
" border-style: solid;\n"
" border-top-color: transparent;\n"
" border-right-color: transparent;\n"
" border-left-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
" border-bottom-color: transparent;\n"
" border-left-width: 2px;\n"
" color: #000000;\n"
" padding-left:15px;\n"
" padding-top:4px;\n"
" padding-bottom:4px;\n"
" padding-right:7px;\n"
"}\n"
"QMenu::item {\n"
" border-style: solid;\n"
" border-top-color: transparent;\n"
" border-right-color: transparent;\n"
" border-left-color: transparent;\n"
" border-bottom-color: transparent;\n"
" border-bottom-width: 1px;\n"
" color: #000000;\n"
" padding-left:17px;\n"
" padding-top:4px;\n"
" padding-bottom:4px;\n"
" padding-right:7px;\n"
"}\n"
"QTabWidget {\n"
" color:rgb(0,0,0);\n"
" background-color:#000000;\n"
"}\n"
"QTabWidget::pane {\n"
" border-color: rgb(223,223,223);\n"
" background-color:rgb(226,226,226);\n"
" border-style: solid;\n"
" border-width: 2px;\n"
" border-radius: 6px;\n"
"}\n"
"QTabBar::tab:first {\n"
" border-style: solid;\n"
" border-left-width:1px;\n"
" border-right-width:0px;\n"
" border-top-width:1px;\n"
" border-bottom-width:1px;\n"
" border-top-color: rgb(209,209,209);\n"
" border-left-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(209, 209, 209, 209), stop:1 rgba(229, 229, 229, 229));\n"
" border-bottom-color: rgb(229,229,229);\n"
" border-top-left-radius: 4px;\n"
" border-bottom-left-radius: 4px;\n"
" color: #000000;\n"
" padding: 3px;\n"
" margin-left:0px;\n"
" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(247, 247, 247, 255), stop:1 rgba(255, 255, 255, 255));\n"
"}\n"
"QTabBar::tab:last {\n"
" border-style: solid;\n"
" border-width:1px;\n"
" border-top-color: rgb(209,209,209);\n"
" border-left-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(209, 209, 209, 209), stop:1 rgba(229, 229, 229, 229));\n"
" border-right-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(209, 209, 209, 209), stop:1 rgba(229, 229, 229, 229));\n"
" border-bottom-color: rgb(229,229,229);\n"
" border-top-right-radius: 4px;\n"
" border-bottom-right-radius: 4px;\n"
" color: #000000;\n"
" padding: 3px;\n"
" margin-left:0px;\n"
" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(247, 247, 247, 255), stop:1 rgba(255, 255, 255, 255));\n"
"}\n"
"QTabBar::tab {\n"
" border-style: solid;\n"
" border-top-width:1px;\n"
" border-bottom-width:1px;\n"
" border-left-width:1px;\n"
" border-top-color: rgb(209,209,209);\n"
" border-left-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(209, 209, 209, 209), stop:1 rgba(229, 229, 229, 229));\n"
" border-bottom-color: rgb(229,229,229);\n"
" color: #000000;\n"
" padding: 3px;\n"
" margin-left:0px;\n"
" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(247, 247, 247, 255), stop:1 rgba(255, 255, 255, 255));\n"
"}\n"
"QTabBar::tab:selected, QTabBar::tab:last:selected, QTabBar::tab:hover {\n"
" border-style: solid;\n"
" border-left-width:1px;\n"
" border-right-color: transparent;\n"
" border-top-color: rgb(209,209,209);\n"
" border-left-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(209, 209, 209, 209), stop:1 rgba(229, 229, 229, 229));\n"
" border-bottom-color: rgb(229,229,229);\n"
" color: #FFFFFF;\n"
" padding: 3px;\n"
" margin-left:0px;\n"
" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
"}\n"
"\n"
"QTabBar::tab:selected, QTabBar::tab:first:selected, QTabBar::tab:hover {\n"
" border-style: solid;\n"
" border-left-width:1px;\n"
" border-bottom-width:1px;\n"
" border-top-width:1px;\n"
" border-right-color: transparent;\n"
" border-top-color: rgb(209,209,209);\n"
" border-left-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(209, 209, 209, 209), stop:1 rgba(229, 229, 229, 229));\n"
" border-bottom-color: rgb(229,229,229);\n"
" color: #FFFFFF;\n"
" padding: 3px;\n"
" margin-left:0px;\n"
" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
"}\n"
"\n"
"QCheckBox {\n"
" color: #000000;\n"
" padding: 2px;\n"
"}\n"
"QCheckBox:disabled {\n"
" color: #808086;\n"
" padding: 2px;\n"
"}\n"
"\n"
"QCheckBox:hover {\n"
" border-radius:4px;\n"
" border-style:solid;\n"
" padding-left: 1px;\n"
" padding-right: 1px;\n"
" padding-bottom: 1px;\n"
" padding-top: 1px;\n"
" border-width:1px;\n"
" border-color: transparent;\n"
"}\n"
"QCheckBox::indicator:checked {\n"
"\n"
" height: 10px;\n"
" width: 10px;\n"
" border-style:solid;\n"
" border-width: 1px;\n"
" border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
" color: #000000;\n"
" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
"}\n"
"QCheckBox::indicator:unchecked {\n"
"\n"
" height: 10px;\n"
" width: 10px;\n"
" border-style:solid;\n"
" border-width: 1px;\n"
" border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
" color: #000000;\n"
"}\n"
"QRadioButton {\n"
" color: 000000;\n"
" padding: 1px;\n"
"}\n"
"QRadioButton::indicator:checked {\n"
" height: 10px;\n"
" width: 10px;\n"
" border-style:solid;\n"
" border-radius:5px;\n"
" border-width: 1px;\n"
" border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
" color: #a9b7c6;\n"
" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
"}\n"
"QRadioButton::indicator:!checked {\n"
" height: 10px;\n"
" width: 10px;\n"
" border-style:solid;\n"
" border-radius:5px;\n"
" border-width: 1px;\n"
" border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
" color: #a9b7c6;\n"
" background-color: transparent;\n"
"}\n"
"QStatusBar {\n"
" color:#027f7f;\n"
"}\n"
"QSpinBox {\n"
" border-style: solid;\n"
" border-width: 1px;\n"
" border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
"}\n"
"QDoubleSpinBox {\n"
" border-style: solid;\n"
" border-width: 1px;\n"
" border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
"}\n"
"QTimeEdit {\n"
" border-style: solid;\n"
" border-width: 1px;\n"
" border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
"}\n"
"QDateTimeEdit {\n"
" border-style: solid;\n"
" border-width: 1px;\n"
" border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
"}\n"
"QDateEdit {\n"
" border-style: solid;\n"
" border-width: 1px;\n"
" border-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(0, 113, 255, 255), stop:1 rgba(91, 171, 252, 255));\n"
"}\n"
"\n"
"QToolBox {\n"
" color: #a9b7c6;\n"
" background-color:#000000;\n"
"}\n"
"QToolBox::tab {\n"
" color: #a9b7c6;\n"
" background-color:#000000;\n"
"}\n"
"QToolBox::tab:selected {\n"
" color: #FFFFFF;\n"
" background-color:#000000;\n"
"}\n"
"QScrollArea {\n"
" color: #FFFFFF;\n"
" background-color:#000000;\n"
"}\n"
"QSlider::groove:horizontal {\n"
" height: 5px;\n"
" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(49, 147, 250, 255), stop:1 rgba(34, 142, 255, 255));\n"
"}\n"
"QSlider::groove:vertical {\n"
" width: 5px;\n"
" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(49, 147, 250, 255), stop:1 rgba(34, 142, 255, 255));\n"
"}\n"
"QSlider::handle:horizontal {\n"
" background: rgb(253,253,253);\n"
" border-style: solid;\n"
" border-width: 1px;\n"
" border-color: rgb(207,207,207);\n"
" width: 12px;\n"
" margin: -5px 0;\n"
" border-radius: 7px;\n"
"}\n"
"QSlider::handle:vertical {\n"
" background: rgb(253,253,253);\n"
" border-style: solid;\n"
" border-width: 1px;\n"
" border-color: rgb(207,207,207);\n"
" height: 12px;\n"
" margin: 0 -5px;\n"
" border-radius: 7px;\n"
"}\n"
"QSlider::add-page:horizontal {\n"
" background: rgb(181,181,181);\n"
"}\n"
"QSlider::add-page:vertical {\n"
" background: rgb(181,181,181);\n"
"}\n"
"QSlider::sub-page:horizontal {\n"
" background-color: qlineargradient(spread:pad, x1:0.5, y1:1, x2:0.5, y2:0, stop:0 rgba(49, 147, 250, 255), stop:1 rgba(34, 142, 255, 255));\n"
"}\n"
"QSlider::sub-page:vertical {\n"
" background-color: qlineargradient(spread:pad, y1:0.5, x1:1, y2:0.5, x2:0, stop:0 rgba(49, 147, 250, 255), stop:1 rgba(34, 142, 255, 255));\n"
"}\n"
"QScrollBar:horizontal {\n"
" max-height: 20px;\n"
" border: 1px transparent grey;\n"
" margin: 0px 20px 0px 20px;\n"
"}\n"
"QScrollBar:vertical {\n"
" max-width: 20px;\n"
" border: 1px transparent grey;\n"
" margin: 20px 0px 20px 0px;\n"
"}\n"
"QScrollBar::handle:horizontal {\n"
" background: rgb(253,253,253);\n"
" border-style: solid;\n"
" border-width: 1px;\n"
" border-color: rgb(207,207,207);\n"
" border-radius: 7px;\n"
" min-width: 25px;\n"
"}\n"
"QScrollBar::handle:horizontal:hover {\n"
" background: rgb(253,253,253);\n"
" border-style: solid;\n"
" border-width: 1px;\n"
" border-color: rgb(147, 200, 200);\n"
" border-radius: 7px;\n"
" min-width: 25px;\n"
"}\n"
"QScrollBar::handle:vertical {\n"
" background: rgb(253,253,253);\n"
" border-style: solid;\n"
" border-width: 1px;\n"
" border-color: rgb(207,207,207);\n"
" border-radius: 7px;\n"
" min-height: 25px;\n"
"}\n"
"QScrollBar::handle:vertical:hover {\n"
" background: rgb(253,253,253);\n"
" border-style: solid;\n"
" border-width: 1px;\n"
" border-color: rgb(147, 200, 200);\n"
" border-radius: 7px;\n"
" min-height: 25px;\n"
"}\n"
"QScrollBar::add-line:horizontal {\n"
" border: 2px transparent grey;\n"
" border-top-right-radius: 7px;\n"
" border-bottom-right-radius: 7px;\n"
" background: rgba(34, 142, 255, 255);\n"
" width: 20px;\n"
" subcontrol-position: right;\n"
" subcontrol-origin: margin;\n"
"}\n"
"QScrollBar::add-line:horizontal:pressed {\n"
" border: 2px transparent grey;\n"
" border-top-right-radius: 7px;\n"
" border-bottom-right-radius: 7px;\n"
" background: rgb(181,181,181);\n"
" width: 20px;\n"
" subcontrol-position: right;\n"
" subcontrol-origin: margin;\n"
"}\n"
"QScrollBar::add-line:vertical {\n"
" border: 2px transparent grey;\n"
" border-bottom-left-radius: 7px;\n"
" border-bottom-right-radius: 7px;\n"
" background: rgba(34, 142, 255, 255);\n"
" height: 20px;\n"
" subcontrol-position: bottom;\n"
" subcontrol-origin: margin;\n"
"}\n"
"QScrollBar::add-line:vertical:pressed {\n"
" border: 2px transparent grey;\n"
" border-bottom-left-radius: 7px;\n"
" border-bottom-right-radius: 7px;\n"
" background: rgb(181,181,181);\n"
" height: 20px;\n"
" subcontrol-position: bottom;\n"
" subcontrol-origin: margin;\n"
"}\n"
"QScrollBar::sub-line:horizontal {\n"
" border: 2px transparent grey;\n"
" border-top-left-radius: 7px;\n"
" border-bottom-left-radius: 7px;\n"
" background: rgba(34, 142, 255, 255);\n"
" width: 20px;\n"
" subcontrol-position: left;\n"
" subcontrol-origin: margin;\n"
"}\n"
"QScrollBar::sub-line:horizontal:pressed {\n"
" border: 2px transparent grey;\n"
" border-top-left-radius: 7px;\n"
" border-bottom-left-radius: 7px;\n"
" background: rgb(181,181,181);\n"
" width: 20px;\n"
" subcontrol-position: left;\n"
" subcontrol-origin: margin;\n"
"}\n"
"QScrollBar::sub-line:vertical {\n"
" border: 2px transparent grey;\n"
" border-top-left-radius: 7px;\n"
" border-top-right-radius: 7px;\n"
" background: rgba(34, 142, 255, 255);\n"
" height: 20px;\n"
" subcontrol-position: top;\n"
" subcontrol-origin: margin;\n"
"}\n"
"QScrollBar::sub-line:vertical:pressed {\n"
" border: 2px transparent grey;\n"
" border-top-left-radius: 7px;\n"
" border-top-right-radius: 7px;\n"
" background: rgb(181,181,181);\n"
" height: 20px;\n"
" subcontrol-position: top;\n"
" subcontrol-origin: margin;\n"
"}\n"
"QScrollBar::left-arrow:horizontal {\n"
" border: 1px transparent grey;\n"
" border-top-left-radius: 3px;\n"
" border-bottom-left-radius: 3px;\n"
" width: 6px;\n"
" height: 6px;\n"
" background: white;\n"
"}\n"
"QScrollBar::right-arrow:horizontal {\n"
" border: 1px transparent grey;\n"
" border-top-right-radius: 3px;\n"
" border-bottom-right-radius: 3px;\n"
" width: 6px;\n"
" height: 6px;\n"
" background: white;\n"
"}\n"
"QScrollBar::up-arrow:vertical {\n"
" border: 1px transparent grey;\n"
" border-top-left-radius: 3px;\n"
" border-top-right-radius: 3px;\n"
" width: 6px;\n"
" height: 6px;\n"
" background: white;\n"
"}\n"
"QScrollBar::down-arrow:vertical {\n"
" border: 1px transparent grey;\n"
" border-bottom-left-radius: 3px;\n"
" border-bottom-right-radius: 3px;\n"
" width: 6px;\n"
" height: 6px;\n"
" background: white;\n"
"}\n"
"QScrollBar::add-page:horizontal, QScrollBar::sub-page:horizontal {\n"
" background: none;\n"
"}\n"
"QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {\n"
" background: none;\n"
"}")
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label = QtWidgets.QLabel(Dialog)
self.label.setObjectName("label")
self.horizontalLayout_2.addWidget(self.label)
self.comboBox = QtWidgets.QComboBox(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.comboBox.sizePolicy().hasHeightForWidth())
self.comboBox.setSizePolicy(sizePolicy)
self.comboBox.setObjectName("comboBox")
self.horizontalLayout_2.addWidget(self.comboBox)
self.label_2 = QtWidgets.QLabel(Dialog)
self.label_2.setObjectName("label_2")
self.horizontalLayout_2.addWidget(self.label_2)
self.lineEdit = QtWidgets.QLineEdit(Dialog)
self.lineEdit.setObjectName("lineEdit")
self.horizontalLayout_2.addWidget(self.lineEdit)
self.pushButton_4 = QtWidgets.QPushButton(Dialog)
self.pushButton_4.setObjectName("pushButton_4")
self.horizontalLayout_2.addWidget(self.pushButton_4)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.listWidget = QtWidgets.QListWidget(Dialog)
self.listWidget.setObjectName("listWidget")
self.verticalLayout.addWidget(self.listWidget)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.pushButton_2 = QtWidgets.QPushButton(Dialog)
self.pushButton_2.setObjectName("pushButton_2")
self.horizontalLayout.addWidget(self.pushButton_2)
self.pushButton_3 = QtWidgets.QPushButton(Dialog)
self.pushButton_3.setObjectName("pushButton_3")
self.horizontalLayout.addWidget(self.pushButton_3)
spacerItem = QtWidgets.QSpacerItem(268, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.pushButton = QtWidgets.QPushButton(Dialog)
self.pushButton.setObjectName("pushButton")
self.horizontalLayout.addWidget(self.pushButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label.setText(_translate("Dialog", "分类"))
self.label_2.setText(_translate("Dialog", "搜索"))
self.pushButton_4.setText(_translate("Dialog", "查找"))
self.pushButton_2.setText(_translate("Dialog", "导入"))
self.pushButton_3.setText(_translate("Dialog", "导出"))
self.pushButton.setText(_translate("Dialog", "添加"))
| 41.3264 | 150 | 0.628634 | 25,512 | 0.98681 | 0 | 0 | 0 | 0 | 0 | 0 | 22,305 | 0.862763 |
50d091c9f0425cffc07e257ef10b1881ccd4fbda | 1,514 | py | Python | pedidos/migrations/0001_initial.py | tiagocordeiro/zumaq-partners | ba2c5d4257438ec062ef034096cd203efe58ef4a | [
"MIT"
] | 1 | 2019-02-13T11:01:25.000Z | 2019-02-13T11:01:25.000Z | pedidos/migrations/0001_initial.py | tiagocordeiro/zumaq-partners | ba2c5d4257438ec062ef034096cd203efe58ef4a | [
"MIT"
] | 619 | 2018-11-26T06:11:05.000Z | 2022-03-31T22:56:13.000Z | pedidos/migrations/0001_initial.py | tiagocordeiro/zumaq-partners | ba2c5d4257438ec062ef034096cd203efe58ef4a | [
"MIT"
] | 1 | 2020-03-12T16:34:13.000Z | 2020-03-12T16:34:13.000Z | # Generated by Django 2.1.7 on 2019-02-14 13:07
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Pedido',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='criado em')),
('modified', models.DateTimeField(auto_now=True, verbose_name='modificado em')),
('active', models.BooleanField(default=True, verbose_name='ativo')),
('status', models.IntegerField(blank=True, choices=[(0, 'Aberto'), (1, 'Enviado'), (2, 'Finalizado'), (3, 'Cancelado')], default=0, verbose_name='Situação')),
('parceiro', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='parceiro')),
],
options={
'verbose_name': 'pedido',
'verbose_name_plural': 'pedidos',
},
),
migrations.CreateModel(
name='PedidoItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
]
| 38.820513 | 174 | 0.602378 | 1,357 | 0.895119 | 0 | 0 | 0 | 0 | 0 | 0 | 275 | 0.181398 |
50d1ff29cc95475cd6346e510fcbdb66477154e4 | 2,358 | py | Python | spyderlib/utils/debug.py | MarlaJahari/Marve | d8d18122c19f050429a91ef23edc87a85fecad1d | [
"MIT"
] | 1 | 2021-01-25T02:13:36.000Z | 2021-01-25T02:13:36.000Z | SMlib/utils/debug.py | koll00/Gui_SM | d02d28b20ef2ae1aa602b9bb52a6bb55fd66be9c | [
"MIT"
] | null | null | null | SMlib/utils/debug.py | koll00/Gui_SM | d02d28b20ef2ae1aa602b9bb52a6bb55fd66be9c | [
"MIT"
] | 1 | 2021-08-04T08:13:34.000Z | 2021-08-04T08:13:34.000Z | # -*- coding: utf-8 -*-
#
# Copyright © 2009-2010 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
"""Debug utilities"""
import inspect
import traceback
import time
def log_time(fd):
timestr = "Logging time: %s" % time.ctime(time.time())
print >>fd, "="*len(timestr)
print >>fd, timestr
print >>fd, "="*len(timestr)
print >>fd, ""
def log_last_error(fname, context=None):
"""Log last error in filename *fname* -- *context*: string (optional)"""
fd = open(fname, 'a')
log_time(fd)
if context:
print >>fd, "Context"
print >>fd, "-------"
print >>fd, ""
print >>fd, context
print >>fd, ""
print >>fd, "Traceback"
print >>fd, "---------"
print >>fd, ""
traceback.print_exc(file=fd)
print >>fd, ""
print >>fd, ""
def log_dt(fname, context, t0):
fd = open(fname, 'a')
log_time(fd)
print >>fd, "%s: %d ms" % (context, 10*round(1e2*(time.time()-t0)))
print >>fd, ""
print >>fd, ""
def caller_name(skip=2):
"""Get a name of a caller in the format module.class.method
`skip` specifies how many levels of stack to skip while getting caller
name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.
An empty string is returned if skipped levels exceed stack height
"""
stack = inspect.stack()
start = 0 + skip
if len(stack) < start + 1:
return ''
parentframe = stack[start][0]
name = []
module = inspect.getmodule(parentframe)
# `modname` can be None when frame is executed directly in console
# TODO(techtonik): consider using __main__
if module:
name.append(module.__name__)
# detect classname
if 'self' in parentframe.f_locals:
# I don't know any way to detect call from the object method
# XXX: there seems to be no way to detect static method call - it will
# be just a function call
name.append(parentframe.f_locals['self'].__class__.__name__)
codename = parentframe.f_code.co_name
if codename != '<module>': # top level usually
name.append( codename ) # function or a method
del parentframe
return ".".join(name)
| 31.026316 | 79 | 0.583121 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,019 | 0.431963 |
50d2c1c7aa4bbe6ee66540b15f05fba5392b4c86 | 5,495 | bzl | Python | swift/internal/swift_c_module.bzl | BalestraPatrick/rules_swift | 35ef1d6ebd7adb8d20c096bb4355cf41c9a0b5cf | [
"Apache-2.0"
] | 215 | 2018-06-06T18:05:25.000Z | 2022-03-28T09:46:50.000Z | swift/internal/swift_c_module.bzl | BalestraPatrick/rules_swift | 35ef1d6ebd7adb8d20c096bb4355cf41c9a0b5cf | [
"Apache-2.0"
] | 416 | 2018-06-06T22:13:18.000Z | 2022-03-31T15:57:04.000Z | swift/internal/swift_c_module.bzl | BalestraPatrick/rules_swift | 35ef1d6ebd7adb8d20c096bb4355cf41c9a0b5cf | [
"Apache-2.0"
] | 92 | 2018-06-10T17:45:35.000Z | 2022-03-17T21:45:27.000Z | # Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the `swift_c_module` rule."""
load(":swift_common.bzl", "swift_common")
load(":utils.bzl", "merge_runfiles")
def _swift_c_module_impl(ctx):
module_map = ctx.file.module_map
deps = ctx.attr.deps
cc_infos = [dep[CcInfo] for dep in deps]
data_runfiles = [dep[DefaultInfo].data_runfiles for dep in deps]
default_runfiles = [dep[DefaultInfo].default_runfiles for dep in deps]
if cc_infos:
cc_info = cc_common.merge_cc_infos(cc_infos = cc_infos)
compilation_context = cc_info.compilation_context
else:
cc_info = None
compilation_context = cc_common.create_compilation_context()
providers = [
# We must repropagate the dependencies' DefaultInfos, otherwise we
# will lose runtime dependencies that the library expects to be
# there during a test (or a regular `bazel run`).
DefaultInfo(
data_runfiles = merge_runfiles(data_runfiles),
default_runfiles = merge_runfiles(default_runfiles),
files = depset([module_map]),
),
swift_common.create_swift_info(
modules = [
swift_common.create_module(
name = ctx.attr.module_name,
clang = swift_common.create_clang_module(
compilation_context = compilation_context,
module_map = module_map,
# TODO(b/142867898): Precompile the module and place it
# here.
precompiled_module = None,
),
),
],
),
]
if cc_info:
providers.append(cc_info)
return providers
swift_c_module = rule(
attrs = {
"module_map": attr.label(
allow_single_file = True,
doc = """\
The module map file that should be loaded to import the C library dependency
into Swift.
""",
mandatory = True,
),
"module_name": attr.string(
doc = """\
The name of the top-level module in the module map that this target represents.
A single `module.modulemap` file can define multiple top-level modules. When
building with implicit modules, the presence of that module map allows any of
the modules defined in it to be imported. When building explicit modules,
however, there is a one-to-one correspondence between top-level modules and
BUILD targets and the module name must be known without reading the module map
file, so it must be provided directly. Therefore, one may have multiple
`swift_c_module` targets that reference the same `module.modulemap` file but
with different module names and headers.
""",
mandatory = True,
),
"deps": attr.label_list(
allow_empty = False,
doc = """\
A list of C targets (or anything propagating `CcInfo`) that are dependencies of
this target and whose headers may be referenced by the module map.
""",
mandatory = True,
providers = [[CcInfo]],
),
},
doc = """\
Wraps one or more C targets in a new module map that allows it to be imported
into Swift to access its C interfaces.
The `cc_library` rule in Bazel does not produce module maps that are compatible
with Swift. In order to make interop between Swift and C possible, users have
one of two options:
1. **Use an auto-generated module map.** In this case, the `swift_c_module`
rule is not needed. If a `cc_library` is a direct dependency of a
`swift_{binary,library,test}` target, a module map will be automatically
generated for it and the module's name will be derived from the Bazel target
label (in the same fashion that module names for Swift targets are derived).
The module name can be overridden by setting the `swift_module` tag on the
`cc_library`; e.g., `tags = ["swift_module=MyModule"]`.
2. **Use a custom module map.** For finer control over the headers that are
exported by the module, use the `swift_c_module` rule to provide a custom
module map that specifies the name of the module, its headers, and any other
module information. The `cc_library` targets that contain the headers that
you wish to expose to Swift should be listed in the `deps` of your
`swift_c_module` (and by listing multiple targets, you can export multiple
libraries under a single module if desired). Then, your
`swift_{binary,library,test}` targets should depend on the `swift_c_module`
target, not on the underlying `cc_library` target(s).
NOTE: Swift at this time does not support interop directly with C++. Any headers
referenced by a module map that is imported into Swift must have only C features
visible, often by using preprocessor conditions like `#if __cplusplus` to hide
any C++ declarations.
""",
implementation = _swift_c_module_impl,
)
| 41.315789 | 80 | 0.682621 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,644 | 0.663148 |
50d2c63c50baed76074a457c6cee5b8d1cf60572 | 2,170 | py | Python | PytomationMailStandalone.py | riensWinoto/PytomationMail | c29f406a86eecabd7e1258795ed95b5328be9d5f | [
"MIT"
] | null | null | null | PytomationMailStandalone.py | riensWinoto/PytomationMail | c29f406a86eecabd7e1258795ed95b5328be9d5f | [
"MIT"
] | null | null | null | PytomationMailStandalone.py | riensWinoto/PytomationMail | c29f406a86eecabd7e1258795ed95b5328be9d5f | [
"MIT"
] | null | null | null | import smtplib
import sys
import time
from datetime import datetime
# variable
trigger = 0
myName = "your name"
myEmail = "your@email.com"
myPass = "y0urP4s5w0rd"
myEmailSMTP = "smtp.yourEmailProvider.com" #for gmail: smtp.gmail.com for outlook: smtp.office365.com
mySMTPPort = 587
receivers = {"receiver name": "receiver@email.com"}
emailSubject = "I'm Pytomation Mail"
emailBody = """
Hello there,
Feel free to use this Pytomation Mail and modify it \
base on your needs
Thanks and Regards,
Riens Winoto
"""
# function
def initial_setup():
try:
broad_caster = smtplib.SMTP(myEmailSMTP, mySMTPPort)
broad_caster.ehlo()
broad_caster.starttls()
broad_caster.login(myEmail, myPass)
except IOError as err:
print(str(err))
time.sleep(1.0)
sys.exit()
return broad_caster
def get_date_time():
date_and_time = datetime.now()
str_date_time = date_and_time.strftime('%b %-d,%Y, %-I:%M%p')
return str_date_time
def get_sender(sender_name, sender_email):
from_sender = "from:" + " " + sender_name + " " + "<" + sender_email + ">"
return from_sender
def get_receiver(receiver_name, receiver_email):
to_receiver = "to:" + " " + receiver_name + " " + "<" + receiver_email + ">"
return to_receiver
def get_email_message(email_subject, email_body):
email_message = "subject:" + " " + email_subject + "\n" + email_body
return email_message
if __name__ == "__main__":
broadCaster = initial_setup()
if trigger >= len(receivers):
print("Enter receiver name and email next time")
else:
for receiverName, receiverEmail in receivers.items():
fromSender = get_sender(myName, myEmail)
toReceiver = get_receiver(receiverName, receiverEmail)
emailMessage = get_email_message(emailSubject, emailBody)
messenger = fromSender + "\n" + toReceiver + "\n" + emailMessage
broadCaster.sendmail(myEmail, receiverEmail, messenger)
sendDateTime = get_date_time()
print("e-mail sent successfully to {} at {} \n".format(receiverName, sendDateTime))
broadCaster.quit() | 28.181818 | 102 | 0.670507 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 507 | 0.233641 |
50d316e96203c330defac6b5f9275d3b8cbbe62b | 220 | py | Python | tempgui.py | mustafaIhssan/semantic-segmentation-maker | 83ac9af29fccdba83e463762116e22445938987b | [
"MIT"
] | 1 | 2016-12-29T07:59:10.000Z | 2016-12-29T07:59:10.000Z | tempgui.py | mustafaIhssan/semantic-segmentation-maker | 83ac9af29fccdba83e463762116e22445938987b | [
"MIT"
] | null | null | null | tempgui.py | mustafaIhssan/semantic-segmentation-maker | 83ac9af29fccdba83e463762116e22445938987b | [
"MIT"
] | null | null | null | import Tkinter
parent_widget = Tkinter.Tk()
scale_widget = Tkinter.Scale(parent_widget, from_=0, to=100,
orient=Tkinter.HORIZONTAL)
scale_widget.set(25)
scale_widget.pack()
Tkinter.mainloop() | 31.428571 | 60 | 0.7 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
50d3680b3ac0276b323b0fe43cf5218d56058d3e | 2,864 | py | Python | main.py | agoupy/parrainages | 383e69e962ef256c9375d2b5ce01dbcad656c579 | [
"MIT"
] | null | null | null | main.py | agoupy/parrainages | 383e69e962ef256c9375d2b5ce01dbcad656c579 | [
"MIT"
] | null | null | null | main.py | agoupy/parrainages | 383e69e962ef256c9375d2b5ce01dbcad656c579 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 24 14:12:38 2022
@author: j64280
"""
import alluvial
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.cm
list_2017 = pd.read_csv('parrainagestotal_2017.csv',sep=';')
list_2022 = pd.read_csv('parrainagestotal_2022.csv',sep=';')
list_2017['Candidat 2022']='Pas de parrainage en 2022'
n_2017,_=list_2017.shape
n_2022,_=list_2022.shape
n=0
for i in range(n_2017):
ind = ((list_2022.Nom == list_2017.Nom.iloc[i]) & (list_2022.Prénom == list_2017.Prénom.iloc[i])
& (list_2022.Département == list_2017.Département.iloc[i]) & (list_2022.Circonscription == list_2017.Circonscription.iloc[i]))
if ind.any():
list_2017['Candidat 2022'].iloc[i] = list_2022[ind].Candidat.values[0]
list_2017['Candidat-e parrainé-e'].iloc[i] = list_2017['Candidat-e parrainé-e'].iloc[i] + ' '
conserve_2017 = ['FILLON François ','MACRON Emmanuel ', 'HAMON Benoît ','ARTHAUD Nathalie ',
'DUPONT-AIGNAN Nicolas ','MELENCHON Jean-Luc ',
'LASSALLE Jean ','POUTOU Philippe ','CHEMINADE Jacques ','ASSELINEAU François ',
'LE PEN Marine ','YADE Rama ','JUPPE Alain ','JARDIN Alexandre ','MARCHANDISE Charlotte ',
'ALLIOT-MARIE Michèle ','TAUZIN Didier ','GORGES Jean-Pierre ','TROADEC Christian ',
'LARROUTUROU Pierre ','GUAINO Henri ','BAROIN François ']
conserve_2022 = ['PÉCRESSE Valérie','MACRON Emmanuel','HIDALGO Anne','ARTHAUD Nathalie',
'DUPONT-AIGNAN Nicolas','ROUSSEL Fabien','MÉLENCHON Jean-Luc','LASSALLE Jean','POUTOU Philippe',
'ZEMMOUR Éric','ASSELINEAU François','LE PEN Marine','JADOT Yannick','KAZIB Anasse',
'KUZMANOVIC Georges','THOUY Hélène','TAUBIRA Christiane','KOENIG Gaspard','MIGUET Nicolas']
list_2017_filtered = list_2017[list_2017['Candidat 2022']!='Pas de parrainage en 2022']
n_2017_filtered,_=list_2017_filtered.shape
couple_par = []
for i in range(n_2017_filtered):
if list_2017_filtered['Candidat-e parrainé-e'].iloc[i] in conserve_2017:
if list_2017_filtered['Candidat 2022'].iloc[i] in conserve_2022:
couple_par.append([list_2017_filtered['Candidat-e parrainé-e'].iloc[i], list_2017_filtered['Candidat 2022'].iloc[i]])
#%%
cmap = matplotlib.cm.get_cmap('jet')
ax = alluvial.plot(
couple_par, alpha=0.8, color_side=0, rand_seed=4, figsize=(10,15),
disp_width=True, wdisp_sep=' '*2, fontname='Monospace',
colors = cmap(np.linspace(0,8,len(conserve_2017)) % 1),
a_sort=conserve_2017[::-1],b_sort=conserve_2022[::-1])
ax.set_title('Transferts de parrainage entre 2017 et 2022', fontsize=14, fontname='Monospace')
plt.text(1.1,-150,'@Alexandre_Goupy')
plt.savefig('report_signatures.png',bbox_inches='tight',dpi=200)#,transparent=True) | 44.061538 | 133 | 0.688198 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,283 | 0.444868 |
50d3d47aab29740b1510e0aea9c31e455ae9cd65 | 21,767 | py | Python | bridge_sim/sim/run/opensees/build/d3/__init__.py | jerbaroo/bridge-sim | c4ec1c18a07a78462ccf3b970a99a1bd7efcc2af | [
"MIT"
] | 2 | 2020-05-12T11:41:49.000Z | 2020-08-10T15:00:58.000Z | bridge_sim/sim/run/opensees/build/d3/__init__.py | barischrooneyj/bridge-sim | c4ec1c18a07a78462ccf3b970a99a1bd7efcc2af | [
"MIT"
] | 48 | 2020-05-11T23:58:22.000Z | 2020-09-18T20:28:52.000Z | bridge_sim/sim/run/opensees/build/d3/__init__.py | jerbaroo/bridge-sim | c4ec1c18a07a78462ccf3b970a99a1bd7efcc2af | [
"MIT"
] | 1 | 2020-05-27T12:43:37.000Z | 2020-05-27T12:43:37.000Z | """Build OpenSees 3D model files."""
import os
from collections import OrderedDict, defaultdict
from itertools import chain
from typing import List, Optional, Tuple
import numpy as np
from bridge_sim.model import PierSettlement, PointLoad, Config, Material
from bridge_sim.sim.model import (
BuildContext,
DeckNodes,
DeckShells,
Node,
PierNodes,
PierShells,
SimParams,
)
from bridge_sim.sim.build import (
det_nodes_id_str,
det_shells_id_str,
det_shells,
get_bridge_shells_and_nodes,
to_deck_nodes,
)
from bridge_sim.sim.run.opensees.build.d3.self_weight import opensees_self_weight_loads
from bridge_sim.sim.run.opensees.build.d3.thermal import (
opensees_thermal_axial_deck_loads,
opensees_thermal_moment_deck_loads,
)
from bridge_sim.sim.run.opensees.build.d3.util import comment
from bridge_sim.util import flatten, print_d, print_i, print_w, round_m
# Print debug information for this file.
# D: str = "fem.run.opensees.build.d3"
D: bool = False
##### Begin nodes #####
def opensees_support_nodes(
c: Config, deck_nodes: DeckNodes, all_support_nodes: PierNodes,
) -> str:
"""Opensees node commands for the supports (ignoring deck).
By 'ignoring deck' we mean that nodes that belong to both supports and the
deck will not be returned by this function but instead by
'opensees_deck_nodes'.
Args:
c: Config, global configuration object.
deck_nodes: DeckNodes, to check for already added support nodes.
all_support_nodes: AllSupportNodes, all support nodes to generate
commands for.
"""
# We want to avoid generating commands for support nodes that also belong to
# the deck, thus we create a set for fast indexing to allow this check.
deck_nodes = set(chain.from_iterable(deck_nodes))
nodes = OrderedDict()
# For each support.
for s_nodes in all_support_nodes:
# For each wall of the support (there are two).
for w_nodes in s_nodes:
# For each ~vertical line of nodes for a z position at top of wall.
for y_nodes in w_nodes:
# For each node in the ~vertical line.
for y, node in enumerate(y_nodes):
# Insert the node, if not part of the deck nodes.
if node not in deck_nodes:
# A dictionary is used incase the node is already added,
# incase it is a bottom node shared by both walls.
nodes[node] = None
return comment(
"support nodes",
"\n".join(map(lambda n: n.command_3d(), nodes.keys())),
units="node nodeTag x y z",
)
def opensees_deck_nodes(c: Config, deck_nodes: DeckNodes) -> str:
"""OpenSees node commands for a bridge deck.
The nodes are created based on given positions of deck nodes.
Args:
c: Config, global configuratin object.
"""
node_strings = []
node_strings += list(
map(lambda node: node.command_3d(), list(chain.from_iterable(deck_nodes)),)
)
return comment("deck nodes", "\n".join(node_strings), units="node nodeTag x y z")
##### End nodes #####
##### Begin fixed nodes #####
class FixNode:
"""A command to fix a node in some degrees of freedom (dof).
Args:
node: Node, the node with dof to fix specified.
comment_: Optional[str], an optional comment for the command.
"""
def __init__(
self,
node: Node,
fix_x_translation: bool,
fix_y_translation: bool,
fix_z_translation: bool,
fix_x_rotation: bool,
fix_y_rotation: bool,
fix_z_rotation: bool,
comment: Optional[str] = None,
):
self.node = node
self.fix_x_translation = fix_x_translation
self.fix_y_translation = fix_y_translation
self.fix_z_translation = fix_z_translation
self.fix_x_rotation = fix_x_rotation
self.fix_y_rotation = fix_y_rotation
self.fix_z_rotation = fix_z_rotation
self.comment = comment
def command_3d(self):
"""The command in string format for a TCL file."""
# TODO: Update comment to include support ID.
comment_ = "" if self.comment is None else f"; # {self.comment}"
return (
f"fix {self.node.n_id}"
+ f" {int(self.fix_x_translation)}"
+ f" {int(self.fix_y_translation)}"
+ f" {int(self.fix_z_translation)}"
+ f" {int(self.fix_x_rotation)}"
+ f" {int(self.fix_y_rotation)}"
+ f" {int(self.fix_z_rotation)}"
+ f"{comment_}"
)
def opensees_fixed_abutment_nodes(
c: Config, sim_params: SimParams, deck_nodes: DeckNodes
) -> str:
"""OpenSees fix commands for fixed nodes on the abument.
Fixed for translation but not for rotation.
"""
thermal = (sim_params.axial_delta_temp is not None) or (
sim_params.moment_delta_temp is not None
)
fixed_nodes: List[FixNode] = []
for i_x, x_nodes in enumerate(deck_nodes):
assert len(x_nodes) >= 2
for node in [x_nodes[0], x_nodes[-1]]:
fixed_nodes.append(
FixNode(
node=node,
fix_x_translation=False,
fix_y_translation=True,
fix_z_translation=True,
# fix_z_translation=(not thermal) or (i_x == (len(deck_nodes) // 2)),
fix_x_rotation=False,
fix_y_rotation=False,
fix_z_rotation=False,
)
)
return comment(
"fixed deck nodes",
"\n".join(map(lambda f: f.command_3d(), fixed_nodes)),
units="fix nodeTag x y z rx ry rz",
)
def opensees_fixed_pier_nodes(
c: Config,
sim_params: SimParams,
all_support_nodes: PierNodes,
pier_disp: List[PierSettlement],
) -> str:
"""OpenSees fix commands for fixed support nodes."""
# First, for thermal loading, we determine the piers at each longitudinal
# (x) position, so for each x position we can then determine which piers
# will be fixed in transverse (z) translation.
pier_positions = defaultdict(set)
for p_i, _ in enumerate(all_support_nodes):
pier = c.bridge.supports[p_i]
pier_positions[round_m(pier.x)].add(round_m(pier.z))
pier_positions = {
pier_x: sorted(pier_zs) for pier_x, pier_zs in pier_positions.items()
}
fixed_nodes: List[FixNode] = []
# Iterate through each pier. Note that p_nodes is a tuple of nodes for each
# pier wall. And each wall is a 2-d array of nodes.
for p_i, p_nodes in enumerate(all_support_nodes):
pier = c.bridge.supports[p_i]
# If pier displacement for this pier then select the bottom central node
# for the integrator command, and attach it to the pier.
free_y_trans = False
for ps in pier_disp:
if p_i == ps.pier:
free_y_trans = True
pier = c.bridge.supports[ps.pier]
pier.disp_node = p_nodes[0][len(p_nodes[0]) // 2][-1]
if len(p_nodes[0]) % 2 == 0:
print_w("Pier settlement:")
print_w(" no central node (even number of nodes)")
# For each ~vertical line of nodes for a z position at top of wall.
for y_i, y_nodes in enumerate(p_nodes[0]):
# We will fix the bottom node.
node = y_nodes[-1]
fixed_nodes.append(
FixNode(
node=node,
fix_x_translation=pier.fix_x_translation,
fix_y_translation=False if free_y_trans else pier.fix_y_translation,
# fix_z_translation=fix_pier_z_translation(pier),
fix_z_translation=True,
fix_x_rotation=pier.fix_x_rotation,
fix_y_rotation=pier.fix_y_rotation,
fix_z_rotation=pier.fix_z_rotation,
comment=f"pier {p_i} y {y_i}",
)
)
return comment(
"fixed support nodes",
"\n".join(map(lambda f: f.command_3d(), fixed_nodes)),
units="fix nodeTag x y z rx ry rz",
)
##### End fixed nodes #####
##### Begin sections #####
def opensees_section(section: Material):
"""OpenSees ElasticMembranePlateSection command for a Material."""
# TODO: Implicit information, assumption that if young's modulus in x
# direction is modified that cracking is desired (poisson's set to 0).
CRACK_Z = not np.isclose(section.youngs_x(), section.youngs)
# New orthotropic method.
return (
f"nDMaterial ElasticOrthotropic {section.id}"
f" {section.youngs_x() * 1E6} {section.youngs * 1E6} {section.youngs * 1E6}"
f" {0 if CRACK_Z else section.poissons} {section.poissons} {section.poissons}"
f" {(section.youngs * 1E6) / (2 * (1 + section.poissons))}"
f" {(section.youngs * 1E6) / (2 * (1 + section.poissons))}"
f" {(section.youngs * 1E6) / (2 * (1 + section.poissons))}"
f" {section.density * 1E-3}"
f"\nsection PlateFiber {section.id} {section.id} {section.thickness}"
)
# Old isotropic method.
raise ValueError("Not using orthotropic method")
return (
f"section ElasticMembranePlateSection {section.id}"
+ f" {section.youngs * 1E6} {section.poissons} {section.thickness}"
+ f" {section.density * 1E-3}"
)
def opensees_deck_sections(c: Config):
"""Sections used in the bridge deck."""
return comment(
"deck sections",
"\n".join([opensees_section(section) for section in c.bridge.sections]),
units=(
"section ElasticMembranePlateSection secTag youngs_modulus"
+ " poisson_ratio depth mass_density"
),
)
def opensees_pier_sections(c: Config, all_pier_elements: PierShells):
"""Sections used in the bridge's piers."""
pier_shells = det_shells(all_pier_elements)
# Some pier's may refer to the same section so we create a set to avoid
# rendering duplicate section definitions into the .tcl file.
pier_sections = set([pier_shell.section for pier_shell in pier_shells])
return comment(
"pier sections",
"\n".join([opensees_section(section) for section in pier_sections]),
units=(
"section ElasticMembranePlateSection secTag youngs_modulus"
+ " poisson_ratio depth mass_density"
),
)
##### End sections #####
##### Begin shell elements #####
def opensees_deck_elements(c: Config, deck_elements: DeckShells) -> str:
"""OpenSees element commands for a bridge deck."""
deck_shells = det_shells(deck_elements)
return comment(
"deck shell elements",
"\n".join(map(lambda e: e.command_3d(), deck_shells)),
units="element ShellMITC4 eleTag iNode jNode kNode lNode secTag",
)
def opensees_pier_elements(c: Config, all_pier_elements: PierShells) -> str:
"""OpenSees element commands for a bridge's piers."""
pier_shells = det_shells(all_pier_elements)
return comment(
"pier shell elements",
"\n".join(map(lambda e: e.command_3d(), pier_shells)),
units="element ShellMITC4 eleTag iNode jNode kNode lNode secTag",
)
# End shell elements #
# Begin loads #
def opensees_load(
c: Config, pload: PointLoad, deck_nodes: DeckNodes,
):
"""An OpenSees load command."""
assert deck_nodes[0][0].y == 0
assert deck_nodes[-1][-1].y == 0
best_node = sorted(
chain.from_iterable(deck_nodes),
key=lambda node: node.distance(x=pload.x, y=0, z=pload.z),
)[0]
assert np.isclose(best_node.y, 0)
print(f"before assert load.x = {pload.x}")
print(f"best_node_x = {best_node.x}")
assert np.isclose(best_node.x, pload.x)
assert np.isclose(best_node.z, pload.z)
return f"load {best_node.n_id} 0 {pload.load} 0 0 0 0"
def opensees_loads(
c: Config,
ploads: List[PointLoad],
deck_nodes: DeckNodes,
pier_disp: List[PierSettlement],
):
"""OpenSees load commands for a .tcl file."""
# In case of pier displacement apply load at the pier's central bottom node,
# the load intensity doesn't matter though, only the position matters.
if len(pier_disp) > 0:
load_str = ""
for ps in pier_disp:
node = c.bridge.supports[ps.pier].disp_node
load_str += f"\nload {node.n_id} 0 {ps.settlement * 1000} 0 0 0 0"
# Otherwise find the deck nodes which best suit given point loads.
else:
load_str = "\n".join(
opensees_load(c=c, pload=pload, deck_nodes=deck_nodes) for pload in ploads
)
return comment("loads", load_str, units="load nodeTag N_x N_y N_z N_rx N_ry N_rz")
##### End loads #####
##### Begin recorders #####
def opensees_translation_recorders(
c: Config, fem_params: SimParams, os_runner: "OSRunner", ctx: BuildContext
) -> str:
"""OpenSees recorder commands for translation."""
# A list of tuples of ResponseType and OpenSees direction index, for
# translation response types, if requested in fem_params.response_types.
translation_response_types = []
# X translation.
x_path = os_runner.x_translation_path(c, fem_params)
translation_response_types.append((x_path, 1))
print_i(f"OpenSees: saving x translation at {x_path}")
# Y translation.
y_path = os_runner.y_translation_path(c, fem_params)
translation_response_types.append((y_path, 2))
print_i(f"OpenSees: saving y translation at {y_path}")
# Z translation.
z_path = os_runner.z_translation_path(c, fem_params)
translation_response_types.append((z_path, 3))
print_i(f"OpenSees: saving z translation at {z_path}")
# Append a recorder string for each response type (recording nodes).
recorder_strs = []
node_str = det_nodes_id_str(ctx)
for response_path, direction in translation_response_types:
print_d(D, f"Adding response path to build: {response_path}")
recorder_strs.append(
f"recorder Node -file {response_path} -node {node_str} -dof"
+ f" {direction} disp"
)
return comment(
"translation recorders",
"\n".join(recorder_strs),
units="recorder Node -file path -node nodeTags -dof direction disp",
)
def opensees_strain_recorders(
c: Config, sim_params: SimParams, os_runner: "OSRunner", ctx: BuildContext
):
"""OpenSees recorder commands for translation."""
return "\n".join(
f"recorder Element"
f" -file {os_runner.strain_path(config=c, sim_params=sim_params, point=point)}"
f" -ele {det_shells_id_str(ctx)} material {str(point)} deformation"
for point in [1, 2, 3, 4]
)
def opensees_forces(
config: Config, sim_params: SimParams, os_runner: "OSRunner", ctx: BuildContext
):
return (
f"recorder Element"
f" -file {os_runner.forces_path(config=config, sim_params=sim_params)}"
f" -ele {det_shells_id_str(ctx)} forces"
)
def opensees_stress_variables(
c: Config, sim_params: SimParams, os_runner: "OSRunner", ctx: BuildContext
) -> Tuple[str, str]:
"""OpenSees stress recorder variables.
These replace <<ELEM_IDS>> and <<FORCES_OUT_FILE>> in the TCL file.
"""
return (
det_shells_id_str(ctx),
os_runner.stress_path(config=c, sim_params=sim_params),
)
def opensees_integrator(c: Config, pier_disp: List[PierSettlement]):
"""The integrator command to use based on FEMParams."""
if len(pier_disp) > 0:
node = c.bridge.supports[pier_disp[0].pier].disp_node
if len(pier_disp) > 1:
print_w(f"Using pier {pier_disp[0].pier} for DisplacementControl")
return (
f"integrator DisplacementControl {node.n_id} 2"
+ f" {pier_disp[0].settlement}"
)
return "integrator LoadControl 1"
def opensees_algorithm(pier_disp: List[PierSettlement]):
"""The algorithm command to use based on FEMParams."""
if len(pier_disp) > 0:
return "algorithm Linear"
return "algorithm Newton"
def opensees_test(pier_disp: List[PierSettlement]):
"""The test command to use based on FEMParams."""
if len(pier_disp) > 0:
return ""
return "test NormDispIncr 1.0e-12 1000"
##### End recorders #####
def build_model_3d(c: Config, expt_params: List[SimParams], os_runner: "OSRunner"):
"""Build OpenSees 3D model files.
TODO: ExptParams -> SimParams.
"""
# Read in the template model file.
dir_path = os.path.dirname(os.path.realpath(__file__))
template_path = os.path.normpath(
os.path.join(dir_path, "../../../../../../", c.os_3d_model_template_path)
)
with open(template_path) as f:
in_tcl = f.read()
# Build a model file for each simulation.
for sim_params in expt_params:
# Setup the 'BuildContext' for this simulation.
sim_ctx = sim_params.build_ctx()
# Determine nodes and shells.
bridge_shells, bridge_nodes = get_bridge_shells_and_nodes(
bridge=c.bridge, ctx=sim_ctx
)
deck_shells, pier_shells = bridge_shells
deck_shell_nodes, pier_nodes = bridge_nodes
deck_nodes = to_deck_nodes(deck_shell_nodes)
# Attaching nodes and shells to the 'SimParams'. This allows the convert
# process to build a deterministic list of nodes and shells. They should
# be deleted again at that point.
sim_params.bridge_shells = bridge_shells
sim_params.bridge_nodes = bridge_nodes
# Build the 3D model file by replacements in the template model file.
out_tcl = (
in_tcl.replace(
"<<DECK_NODES>>", opensees_deck_nodes(c=c, deck_nodes=deck_nodes),
)
.replace(
"<<SUPPORT_NODES>>",
opensees_support_nodes(
c=c, deck_nodes=deck_nodes, all_support_nodes=pier_nodes,
),
)
.replace(
"<<FIX_DECK>>",
opensees_fixed_abutment_nodes(
c=c, sim_params=sim_params, deck_nodes=deck_nodes
),
)
.replace(
"<<FIX_SUPPORTS>>",
opensees_fixed_pier_nodes(
c=c,
sim_params=sim_params,
all_support_nodes=pier_nodes,
pier_disp=sim_params.pier_settlement,
),
)
.replace(
"<<LOAD>>",
opensees_loads(
c=c,
ploads=sim_params.ploads,
deck_nodes=deck_nodes,
pier_disp=sim_params.pier_settlement,
),
)
.replace(
"<<THERMAL_AXIAL_LOAD_DECK>>",
opensees_thermal_axial_deck_loads(
c=c, sim_params=sim_params, deck_elements=deck_shells, ctx=sim_ctx,
),
)
.replace(
"<<THERMAL_MOMENT_LOAD_DECK>>",
opensees_thermal_moment_deck_loads(
c=c, sim_params=sim_params, deck_elements=deck_shells, ctx=sim_ctx,
),
)
.replace(
"<<SELF_WEIGHT>>",
opensees_self_weight_loads(c, sim_params, deck_shells),
)
.replace("<<SUPPORTS>>", "")
.replace("<<DECK_SECTIONS>>", opensees_deck_sections(c=c))
.replace(
"<<TRANS_RECORDERS>>",
opensees_translation_recorders(
c=c, fem_params=sim_params, os_runner=os_runner, ctx=sim_ctx
),
)
.replace(
"<<FORCES>>",
opensees_forces(
config=c, sim_params=sim_params, os_runner=os_runner, ctx=sim_ctx
),
)
.replace(
"<<DECK_ELEMENTS>>",
opensees_deck_elements(c=c, deck_elements=deck_shells),
)
.replace(
"<<PIER_ELEMENTS>>",
opensees_pier_elements(c=c, all_pier_elements=pier_shells),
)
.replace(
"<<PIER_SECTIONS>>",
opensees_pier_sections(c=c, all_pier_elements=pier_shells),
)
.replace(
"<<INTEGRATOR>>",
opensees_integrator(c=c, pier_disp=sim_params.pier_settlement),
)
.replace("<<ALGORITHM>>", opensees_algorithm(sim_params.pier_settlement))
.replace("<<TEST>>", opensees_test(sim_params.pier_settlement))
)
elem_ids, forces_out_file = opensees_stress_variables(
c=c, sim_params=sim_params, os_runner=os_runner, ctx=sim_ctx
)
out_tcl = out_tcl.replace("<<ELEM_IDS>>", elem_ids).replace(
"<<FORCES_OUT_FILE>>", forces_out_file
)
out_tcl = out_tcl.replace(
"<<STRAIN_RECORDERS>>",
opensees_strain_recorders(
c=c, sim_params=sim_params, os_runner=os_runner, ctx=sim_ctx
),
)
# Write the generated model file.
model_path = os_runner.sim_model_path(
config=c, sim_params=sim_params, ext="tcl"
)
with open(model_path, "w") as f:
f.write(out_tcl)
num_nodes = len(set(flatten(bridge_nodes, Node)))
print_i(f"OpenSees: saved 3D model ({num_nodes} nodes) file to {model_path}")
return expt_params
| 35.45114 | 89 | 0.612119 | 1,440 | 0.066155 | 0 | 0 | 0 | 0 | 0 | 0 | 7,822 | 0.359351 |
50d3ff539d0dc35fa385af4b1fce1942ee1690c8 | 1,944 | py | Python | setup.py | KathleenLabrie/KLpysci | 848f659dd6743e2f390ee08ac7d350817ef5e9b3 | [
"0BSD"
] | null | null | null | setup.py | KathleenLabrie/KLpysci | 848f659dd6743e2f390ee08ac7d350817ef5e9b3 | [
"0BSD"
] | null | null | null | setup.py | KathleenLabrie/KLpysci | 848f659dd6743e2f390ee08ac7d350817ef5e9b3 | [
"0BSD"
] | null | null | null | from setuptools import setup, find_packages
from codecs import open
import os
import klpysci
VERSION = klpysci.__version__
cwd = os.path.abspath(os.path.dirname(__file__))
# Get the long_description from the Description.rst file
with open(os.path.join(cwd, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
MODULENAME = 'klpysci'
DATA_FILES = []
DOC_FILES = [(os.path.join('share',MODULENAME,root), [os.path.join(root,f) for f in files]) \
for root, dirs, files in os.walk('docs')]
DATA_FILES.extend(DOC_FILES)
setup(
name = MODULENAME,
version = VERSION,
description = 'Scientific Python utilities and tools',
long_description = long_description,
url = 'https://github.com/KathleenLabrie/KLpysci',
author = 'Kathleen Labrie',
author_email = 'kathleen.labrie.phd@gmail.com',
license = 'LICENSE',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers = [
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: ISC License (ISCL)',
'Operating System :: Mac OS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Astronomy'
],
keywords = 'mathematics physics data processing',
packages = find_packages(exclude=['docs']),
#install_requires = ['']
#extras_require = {
# 'dev': [''],
#},
#package_data = {
# 'klpysci': [''],
# },
data_files = DATA_FILES,
#scripts = [
# 'klpysci/...'
# ],
zip_safe = False,
)
| 29.014925 | 93 | 0.542181 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 810 | 0.416667 |
50d58d7591d92e204d555b99d0383a3caa6df18d | 240 | py | Python | lnc/lib/exceptions.py | atrosinenko/lecture-notes-compiler | 40764c48664cec2001f1a5cbfa579ae970af0dac | [
"MIT"
] | 3 | 2015-09-10T16:10:57.000Z | 2016-12-03T01:49:28.000Z | lnc/lib/exceptions.py | atrosinenko/lecture-notes-compiler | 40764c48664cec2001f1a5cbfa579ae970af0dac | [
"MIT"
] | null | null | null | lnc/lib/exceptions.py | atrosinenko/lecture-notes-compiler | 40764c48664cec2001f1a5cbfa579ae970af0dac | [
"MIT"
] | null | null | null | class ProgramError(Exception):
"""Generic exception class for errors in this program."""
pass
class PluginError(ProgramError):
pass
class NoOptionError(ProgramError):
pass
class ExtCommandError(ProgramError):
pass
| 15 | 61 | 0.729167 | 230 | 0.958333 | 0 | 0 | 0 | 0 | 0 | 0 | 57 | 0.2375 |
50d864ab62ce6a701debae2af6bac07b0dbf0ee8 | 9,439 | py | Python | libsolresol.py | MishaKlopukh/solresol-language | 075332912fdda7412e11c759c958997fee6c87a9 | [
"MIT"
] | null | null | null | libsolresol.py | MishaKlopukh/solresol-language | 075332912fdda7412e11c759c958997fee6c87a9 | [
"MIT"
] | null | null | null | libsolresol.py | MishaKlopukh/solresol-language | 075332912fdda7412e11c759c958997fee6c87a9 | [
"MIT"
] | null | null | null | import enum
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from scipy.io import wavfile
import json
jupyter_nb_mode = False
try:
assert jupyter_nb_mode
from IPython.display import Audio
except:
import sounddevice as sd
def Audio(array,rate=44100):
sd.play(array,rate)
class SolfegeSymbol(enum.Enum):
DO,Do,do,D,d,p,o = 1,1,1,1,1,1,1
RE,Re,re,R,r,k,e = 2,2,2,2,2,2,2
MI,Mi,mi,M,m,i = 3,3,3,3,3,3
FA,Fa,fa,F,f,a = 4,4,4,4,4,4
SOL,Sol,sol,So,so,S,s,u = 5,5,5,5,5,5,5,5
LA,La,la,L,l,au = 6,6,6,6,6,6
SI,Si,si,TI,Ti,ti,T,t,ai = 7,7,7,7,7,7,7,7,7
@property
def freq(self,octave=4):
notes = [261.63,293.66,329.63,349.23,392.00,440.00,493.88]
return notes[self.value-1]*(2**(octave-4))
@property
def shortname(self):
names = 'drmfslt'
return names[self.value-1]
@property
def sescons(self):
names = 'pkmfslt'
return names[self.value-1]
@property
def sesvowel(self):
names = list('oeiau')+['ai','au']
return names[self.value-1]
def makeglyph(self,xy,scale=1,color='black',weight=2,doubler=False):
x,y=xy
if doubler:
shape = [
patches.FancyArrowPatch((x-scale/2,y+2*scale/6),(x-scale/2,y+4*scale/6),arrowstyle='-',color=color,linewidth=weight),
patches.FancyArrowPatch((x-scale/6,y+scale/2),(x+scale/6,y+scale/2),arrowstyle='-',color=color,linewidth=weight),
patches.FancyArrowPatch((x-scale/2,y+2*scale/6),(x-scale/2,y+4*scale/6),arrowstyle='-',color=color,linewidth=weight),
patches.FancyArrowPatch((x-4*scale/6,y+scale/2),(x-scale/3,y+scale/2),arrowstyle='-',color=color,linewidth=weight),
patches.FancyArrowPatch((x-scale/2,y-scale/6),(x-scale/2,y+scale/6),arrowstyle='-',color=color,linewidth=weight),
patches.FancyArrowPatch((x-2*scale/6,y+scale/2),(x-4*scale/6,y+scale/2),arrowstyle='-',color=color,linewidth=weight),
patches.FancyArrowPatch((x-4*scale/6,y-scale/2),(x-scale/3,y-scale/2),arrowstyle='-',color=color,linewidth=weight),
][self.value-1]
attachment = xy
else:
shape, attachment = [
(patches.Circle((x+scale/2,y),scale/2,fill=False,color=color,linewidth=weight),(x+scale,y)),
(patches.FancyArrowPatch((x,y),(x,y-scale),arrowstyle='-',color=color,linewidth=weight),(x,y-scale)),
(patches.Arc((x+scale/2,y),scale,scale,theta1=0.0,theta2=180.0,color=color,linewidth=weight),(x+scale,y)),
(patches.FancyArrowPatch((x,y),(x+scale,y-scale),arrowstyle='-',color=color,linewidth=weight),(x+scale,y-scale)),
(patches.FancyArrowPatch((x,y),(x+scale,y),arrowstyle='-',color=color,linewidth=weight),(x+scale,y)),
(patches.Arc((x,y-scale/2),scale,scale,theta1=90.0,theta2=-90.0,color=color,linewidth=weight),(x,y-scale)),
(patches.FancyArrowPatch((x,y),(x+scale,y+scale),arrowstyle='-',color=color,linewidth=weight),(x+scale,y+scale))
][self.value-1]
return shape, attachment
def generate_note(frequency, duration, sample_rate=44100, amplitude=1, envelope_ratio=1/3):
fmul = 2*frequency*np.pi/sample_rate
note = np.sin(fmul*np.arange(sample_rate*duration))
env_time = int(envelope_ratio*sample_rate*duration)
envelope = np.concatenate((np.linspace(0,amplitude,env_time),amplitude*np.ones(int(sample_rate*duration-2*env_time)),np.linspace(amplitude,0,env_time)))
return note*envelope
class SolresolWord():
def __init__(self, word, syntax='default'):
if isinstance(word, list):
if isinstance(word[0], SolfegeSymbol):
self.word = word
elif isinstance(word[0], str):
self.word = [SolfegeSymbol[s] for s in word]
elif isinstance(word[0], int):
self.word = [SolfegeSymbol(i) for i in word]
elif isinstance(word, str):
if syntax in ['ses','s']:
self.word = [SolfegeSymbol[s] for s in word.replace('ai','l').replace('au','t')]
elif syntax in ['num','#',0]:
self.word = [SolfegeSymbol(int(s)) for s in word.strip('0')]
elif syntax in ['full','default']:
self.word = []
while len(word) > 0:
if word.lower().startswith('sol') and not word.lower().startswith('sola'):
self.word.append(SolfegeSymbol.SOL)
word = word[3:]
else:
self.word.append(SolfegeSymbol[word[:2]])
word = word[2:]
elif isinstance(word, int):
self.word = [SolfegeSymbol(int(s)) for s in oct(word)[2:].strip('0')]
def __repr__(self):
return f"{type(self).__name__}(['"+"','".join(smb.name for smb in self.word)+"'])"
def __getitem__(self,ix):
return self.word.__getitem__(ix)
def __len__(self):
return len(self.word)
def __iter__(self):
return iter(self.word)
@property
def ses(self):
if len(self) == 1:
return self.word[0].sesvowel
else:
return ''.join(ltr.sescons if ix%2==0 else ltr.sesvowel for ix,ltr in enumerate(self.word))
@property
def fulltext(self):
return ''.join(smb.name for smb in self.word).lower()
def __str__(self):
return self.fulltext
@property
def value(self):
return int(''.join(str(ltr.value) for ltr in self.word),8)
@property
def definition(self):
return solresol_dict[self.fulltext]
def __int__(self):
return self.value
def melody(self, note_len=0.2, amplitude=1, envelope_ratio=0.2, sample_rate=44100):
return np.concatenate([generate_note(ltr.freq,note_len,sample_rate,amplitude,envelope_ratio) for ltr in self.word])
def draw(self,ax,color='black',weight=2,startpos=(0,0)):
pos=startpos
for ix,ltr in enumerate(self.word):
if ltr==SolfegeSymbol.LA and (self.word[ix-1]==SolfegeSymbol.SI or self.word[ix-1]==SolfegeSymbol.DO) and ix>0:
pos = (pos[0]+0.5,pos[1]+0.5)
g,pos = ltr.makeglyph(pos,color=color,weight=weight,doubler=(ltr==self.word[ix-1] and ix>0))
ax.add_patch(g)
ax.axis('scaled')
ax.axis('off')
return pos[0]+2,startpos[1]
class Solresol():
def __init__(self, text, syntax='default'):
if isinstance(text,str):
text = text.translate(str.maketrans('','','!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'))
self.words = [SolresolWord(word,syntax) for word in text.split()]
elif isinstance(text,list):
self.words = [SolresolWord(word,syntax) for word in text]
elif isinstance(text,int):
sw = oct(text)[2:]
self.words = [SolresolWord(int(sw[i:i+5],8)) for i in range(0,len(sw),5)]
@property
def fulltext(self):
return ' '.join(word.fulltext for word in self.words)
@property
def ses(self):
return ' '.join(word.ses for word in self.words)
@property
def numlist(self):
return [int(word) for word in self.words]
@property
def value(self):
return int(''.join(oct(num)[2:].ljust(5,'0') for num in self.numlist),8)
def __int__(self):
return self.value
def __str__(self):
return self.fulltext
def __getitem__(self,ix):
return self.words.__getitem__(ix)
def __len__(self):
return len(self.words)
def __iter__(self):
return iter(self.words)
def __repr__(self):
return f"Solresol('{str(self)}')"
def melody(self, note_len=0.2, amplitude=1, envelope_ratio=0.2, gap_ratio=1, sample_rate=44100):
notes = []
for word in self.words:
notes.append(word.melody(note_len, amplitude, envelope_ratio, sample_rate))
notes.append(np.zeros(int(note_len*sample_rate*gap_ratio)))
return np.concatenate(notes)
def play(self, note_len=0.2, amplitude=1, envelope_ratio=0.2, gap_ratio=1):
return Audio(self.melody(note_len, amplitude, envelope_ratio, gap_ratio, 44100),rate=44100)
def draw(self,color='black',weight=2,subplot_mode=False,rowmax=5):
if len(self) > 1 and subplot_mode:
fig,axs = plt.subplots(len(self)//rowmax+1,(len(self)-1)%rowmax+1)
for word,ax in zip(self.words,axs):
word.draw(ax,color=color,weight=weight)
else:
fig,ax = plt.subplots()
pos = (0,0)
for word in self.words:
pos = word.draw(ax,color=color,weight=weight,startpos=pos)
return fig
def translate(self,alldefs=False,random=False,ix=0):
translation = []
for word in self.words:
if alldefs:
translation.append(f'{word.fulltext}: ({word.definition})')
else:
dfn = word.definition.split(',')
ix = np.random.randint(len(dfn)) if random else ix
translation.append(dfn[ix].strip())
return ' '.join(translation)
with open('solresol_dict.json') as f:
solresol_dict = json.load(f)
dictionary_url = "https://docs.google.com/spreadsheets/d/1-3lBxMURGN4AtGG846kuVGVNuEiHewCT88PiBahnODA/edit#gid=0"
| 44.523585 | 156 | 0.599534 | 8,484 | 0.898824 | 0 | 0 | 1,312 | 0.138998 | 0 | 0 | 461 | 0.04884 |
50d889e431ae6bc543ceb74e6af10a5b1cfede00 | 4,351 | py | Python | src/icegrams/trie_build.py | vthorsteinsson/Icegrams | d20d137a7e2b3316f38dfd5ace826993704c2050 | [
"MIT"
] | 7 | 2019-03-04T18:14:37.000Z | 2019-03-29T02:57:02.000Z | src/icegrams/trie_build.py | vthorsteinsson/Icegrams | d20d137a7e2b3316f38dfd5ace826993704c2050 | [
"MIT"
] | 1 | 2020-09-04T13:19:38.000Z | 2020-09-06T20:14:58.000Z | src/icegrams/trie_build.py | vthorsteinsson/Icegrams | d20d137a7e2b3316f38dfd5ace826993704c2050 | [
"MIT"
] | 1 | 2019-05-31T11:45:51.000Z | 2019-05-31T11:45:51.000Z | """
Icegrams: A trigrams library for Icelandic
CFFI builder for _trie module
Copyright (C) 2020 Miðeind ehf.
Original author: Vilhjálmur Þorsteinsson
This software is licensed under the MIT License:
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
This module only runs at setup/installation time. It is invoked
from setup.py as requested by the cffi_modules=[] parameter of the
setup() function. It causes the _trie.*.so CFFI wrapper library
to be built from its source in trie.cpp.
"""
import os
import platform
import cffi
# Don't change the name of this variable unless you
# change it in setup.py as well
ffibuilder = cffi.FFI()
_PATH = os.path.dirname(__file__) or "."
WINDOWS = platform.system() == "Windows"
MACOS = platform.system() == "Darwin"
# What follows is the actual Python-wrapped C interface to trie.*.so
# It must be kept in sync with trie.h
declarations = """
typedef unsigned int UINT;
typedef uint8_t BYTE;
typedef uint32_t UINT32;
typedef uint64_t UINT64;
typedef void VOID;
UINT mapping(const BYTE* pbMap, const BYTE* pbWord);
UINT bitselect(const BYTE* pb, UINT n);
UINT retrieve(const BYTE* pb, UINT nStart, UINT n);
UINT lookupFrequency(const BYTE* pb,
UINT nQuantumSize, UINT nIndex);
UINT64 lookupMonotonic(const BYTE* pb,
UINT nQuantumSize, UINT nIndex);
VOID lookupPairMonotonic(const BYTE* pb,
UINT nQuantumSize, UINT nIndex,
UINT64* pn1, UINT64* pn2);
UINT64 lookupPartition(const BYTE* pb,
UINT nOuterQuantum, UINT nInnerQuantum, UINT nIndex);
VOID lookupPairPartition(const BYTE* pb,
UINT nQuantumSize, UINT nInnerQuantum, UINT nIndex,
UINT64* pn1, UINT64* pn2);
UINT searchMonotonic(const BYTE* pb,
UINT nQuantumSize, UINT nP1, UINT nP2, UINT64 n);
UINT searchMonotonicPrefix(const BYTE* pb,
UINT nQuantumSize, UINT nP1, UINT nP2, UINT64 n);
UINT searchPartition(const BYTE* pb,
UINT nOuterQuantum, UINT nInnerQuantum,
UINT nP1, UINT nP2, UINT64 n);
UINT searchPartitionPrefix(const BYTE* pb,
UINT nOuterQuantum, UINT nInnerQuantum,
UINT nP1, UINT nP2, UINT64 n);
"""
# Do the magic CFFI incantations necessary to get CFFI and setuptools
# to compile trie.cpp at setup time, generate a .so library and
# wrap it so that it is callable from Python and PyPy as _trie
if WINDOWS:
extra_compile_args = ["/Zc:offsetof-"]
elif MACOS:
os.environ["CFLAGS"] = "-stdlib=libc++" # Fixes PyPy build on macOS 10.15.6+
extra_compile_args = ["-mmacosx-version-min=10.7", "-stdlib=libc++"]
else:
# Adding -O3 to the compiler arguments doesn't seem to make
# any discernible difference in lookup speed
extra_compile_args = ["-std=c++11"]
ffibuilder.set_source(
"icegrams._trie",
# trie.cpp is written in C++ but must export a pure C interface.
# This is the reason for the "extern 'C' { ... }" wrapper.
'extern "C" {\n' + declarations + "\n}\n",
source_extension=".cpp",
sources=["src/icegrams/trie.cpp"],
extra_compile_args=extra_compile_args,
)
ffibuilder.cdef(declarations)
if __name__ == "__main__":
ffibuilder.compile(verbose=False)
| 35.08871 | 81 | 0.69846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,777 | 0.867478 |