hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f94195b0e745d91852d2ea4775d406dd9acd653a
| 3,336
|
py
|
Python
|
mcenter_client/tests/mcenter_server_api/controllers/users_controller.py
|
lisapm/mlpiper
|
74ad5ae343d364682cc2f8aaa007f2e8a1d84929
|
[
"Apache-2.0"
] | 7
|
2019-04-08T02:31:55.000Z
|
2021-11-15T14:40:49.000Z
|
mcenter_client/tests/mcenter_server_api/controllers/users_controller.py
|
lisapm/mlpiper
|
74ad5ae343d364682cc2f8aaa007f2e8a1d84929
|
[
"Apache-2.0"
] | 31
|
2019-02-22T22:23:26.000Z
|
2021-08-02T17:17:06.000Z
|
mcenter_client/tests/mcenter_server_api/controllers/users_controller.py
|
lisapm/mlpiper
|
74ad5ae343d364682cc2f8aaa007f2e8a1d84929
|
[
"Apache-2.0"
] | 8
|
2019-03-15T23:46:08.000Z
|
2020-02-06T09:16:02.000Z
|
import connexion
import six
import flask
import copy
import os
import base64
import time
from mcenter_server_api.models.inline_response200 import InlineResponse200 # noqa: E501
from mcenter_server_api.models.inline_response2001 import InlineResponse2001 # noqa: E501
from mcenter_server_api.models.user import User # noqa: E501
from mcenter_server_api import util
from . import base
users = dict(AdminID=dict(username='admin', password='admin',
createdBy='admin', created=0, id='AdminID'))
def _cleanuser(u):
u = copy.copy(u)
del u['password']
return u
def _finduserbyname(username):
for u in users.values():
if u['username'] == username:
return u
return None
def _finduser(userId):
if userId in users:
return users[userId]
flask.abort(404)
def auth_login_post(body): # noqa: E501
"""Authenticate user
# noqa: E501
:param user: username and password fields for authentication
:type user: dict | bytes
:rtype: InlineResponse200
"""
u = _finduserbyname(body['username'])
if u is None:
flask.abort(403)
if u['password'] != body['password']:
flask.abort(403)
return dict(token=base.add_session(u))
def auth_validate_post(body): # noqa: E501
"""Register an user
# noqa: E501
:param authorization: Bearer Token
:type authorization: str
:rtype: InlineResponse2001
"""
return 'do some magic!'
def me_get(): # noqa: E501
"""Get user detail of current user
# noqa: E501
:rtype: User
"""
s = base.check_session()
return _cleanuser(s['user'])
def users_get(): # noqa: E501
"""Get list of users
# noqa: E501
:rtype: List[User]
"""
base.check_session()
ret = []
for u in users.values():
ret.append(_cleanuser(u))
return ret
def users_post(body): # noqa: E501
"""Create a new user
# noqa: E501
:param user: User detail description
:type user: dict | bytes
:rtype: User
"""
s = base.check_session()
if _finduserbyname(body['username']) is not None:
flask.abort(500)
if not body['password']:
flask.abort(500)
base.finish_creation(body, s, users)
return _cleanuser(body)
def users_user_id_delete(userId): # noqa: E501
"""Deregister an user
# noqa: E501
:param user_id: User identifier
:type user_id: str
:rtype: None
"""
s = base.check_session()
u = _finduser(userId)
k = u['username']
if k == s['user']['username']:
flask.abort(500)
del users[userId]
def users_user_id_get(userId): # noqa: E501
"""List details of specific user
# noqa: E501
:param user_id: User identifier
:type user_id: str
:rtype: User
"""
base.check_session()
return _cleanuser(_finduser(userId))
def users_user_id_put(userId, body): # noqa: E501
"""Update user information
# noqa: E501
:param user_id: User identifier
:type user_id: str
:param user: Update user object
:type user: dict | bytes
:rtype: User
"""
base.check_session()
u = _finduser(userId)
for k, v in body.items():
if k not in ['id', 'created', 'createdBy'] and v is not None:
u[k] = v
return _cleanuser(u)
| 20.096386
| 90
| 0.627098
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,371
| 0.410971
|
f941abe12e92f9a9d99898da1845f80024a4bf16
| 105
|
py
|
Python
|
dash_react_json_schema_form/_imports_.py
|
dabble-of-devops-bioanalyze/dash_react_json_schema_form
|
f8b8826e6798efca1a7f603aa73b9e054056dc9a
|
[
"Apache-2.0"
] | null | null | null |
dash_react_json_schema_form/_imports_.py
|
dabble-of-devops-bioanalyze/dash_react_json_schema_form
|
f8b8826e6798efca1a7f603aa73b9e054056dc9a
|
[
"Apache-2.0"
] | null | null | null |
dash_react_json_schema_form/_imports_.py
|
dabble-of-devops-bioanalyze/dash_react_json_schema_form
|
f8b8826e6798efca1a7f603aa73b9e054056dc9a
|
[
"Apache-2.0"
] | null | null | null |
from .DashReactJsonSchemaForm import DashReactJsonSchemaForm
__all__ = [
"DashReactJsonSchemaForm"
]
| 21
| 60
| 0.819048
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 25
| 0.238095
|
f94563e81861f76b57c556bc8928617eb8ac0410
| 19,471
|
py
|
Python
|
symbol.py
|
LizhengMathAi/symbol_FEM
|
a2679ff90cfffa40316e33102be1a802e210768a
|
[
"Apache-2.0"
] | 1
|
2021-02-07T00:53:51.000Z
|
2021-02-07T00:53:51.000Z
|
symbol.py
|
LizhengMathAi/symbol_FEM
|
a2679ff90cfffa40316e33102be1a802e210768a
|
[
"Apache-2.0"
] | null | null | null |
symbol.py
|
LizhengMathAi/symbol_FEM
|
a2679ff90cfffa40316e33102be1a802e210768a
|
[
"Apache-2.0"
] | null | null | null |
from functools import reduce
import numpy as np
from sparse import SparseTensor
def reduce_prod(seq): return reduce(lambda item_1, item_2: item_1 * item_2, seq)
class Polynomial:
def __init__(self, coeff, indices, merge=True):
"""\\sum_{i=0}^{N-1} coeff[i] \\Pi_{j=0}^{NV-1} x_j^{indices[i, j]}"""
self.degree = np.max(np.sum(indices, axis=-1))
self.n_elements = indices.shape[-1]
if merge:
self.coeff, self.indices = SparseTensor.merge(coeff, indices)
else:
self.coeff, self.indices = coeff, indices
def __call__(self, x):
coeff = np.reshape(self.coeff, newshape=(1, -1))
x = np.reshape(x, newshape=(-1, 1, self.n_elements))
indices = np.reshape(self.indices, newshape=(1, -1, self.n_elements))
return np.sum(coeff * np.prod(np.power(x, indices), axis=2), axis=1)
def __str__(self): return '\n'.join(["{:.2f}\t{}".format(c, index) for c, index in zip(self.coeff, self.indices)])
def __neg__(self): return Polynomial(-self.coeff, self.indices, merge=False)
def __add__(self, other):
if type(other).__name__ in ["bool", "int", "float", "int64", "float64"]:
other = Polynomial(np.array([other, ]), np.zeros(shape=(1, self.n_elements), dtype=self.indices.dtype), merge=False)
return self.__add__(other)
elif isinstance(other, Polynomial):
assert self.n_elements == other.n_elements
return Polynomial(np.hstack([self.coeff, other.coeff]), np.vstack([self.indices, other.indices]), merge=True)
else:
raise ValueError
def __sub__(self, other): return self.__add__(other.__neg__())
def __mul__(self, other):
if type(other).__name__ in ["bool", "int", "float", "int64", "float64"]:
return Polynomial(self.coeff * other, self.indices, merge=False)
elif isinstance(other, Polynomial):
assert self.n_elements == other.n_elements
coeff = np.expand_dims(self.coeff, axis=0) * np.expand_dims(other.coeff, axis=1)
coeff = coeff.flatten()
indices = np.expand_dims(self.indices, axis=0) + np.expand_dims(other.indices, axis=1)
indices = np.reshape(indices, newshape=(-1, self.n_elements))
return Polynomial(coeff, indices, merge=True)
else:
raise ValueError
def derivative(self, order=1):
"""
+----------+-----------------+--------------+
| item | data type | shape |
+----------+-----------------+--------------+
| order | int | [] |
| return | PolynomialArray | [ND] * order |
+----------+-----------------+--------------+
"""
array = [self]
for _ in range(order):
collection = []
for poly in array:
for i in range(self.indices.shape[1]):
coeff = poly.coeff * poly.indices[:, i]
indices = np.maximum(poly.indices - np.eye(poly.n_elements, dtype=poly.indices.dtype)[[i], :], 0)
collection.append(Polynomial(coeff, indices, merge=True))
array = collection
return PolynomialArray(array, shape=[self.indices.shape[1]] * order)
def directional_derivative(self, c, order=1):
"""
+----------+---------------+--------------+
| item | data type | shape |
+----------+---------------+--------------+
| order | numpy.ndarray | [ND] * order |
| order | int | [] |
| return | Polynomial | [ND] * order |
+----------+---------------+--------------+
return: \\sum_{ij...} c_{ij...} \\frac{\\partial^ self}{\partial \lambda_i \partial \lambda_j ...}
"""
coeff = self.coeff
indices = self.indices
dim = self.n_elements
for axis in range(order):
coeff = np.expand_dims(coeff, axis=0) * np.transpose(indices, axes=[-1] + list(range(axis+1)))
indices = np.expand_dims(indices, axis=0) - np.expand_dims(np.eye(dim, dtype=np.int), axis=list(range(1, axis + 2)))
indices = np.maximum(indices, 0)
coeff = (np.expand_dims(c, axis=-1) * coeff).flatten()
indices = np.reshape(indices, newshape=(-1, dim))
return Polynomial(coeff, indices, merge=True)
class PolynomialArray:
def __init__(self, array, shape): self.array, self.shape = array, list(shape)
def reshape(self, shape):
shape = list(shape)
for axis in range(shape.__len__()):
if shape[axis] == -1:
shape[axis] = -reduce_prod(self.shape) // reduce_prod(shape)
break
return PolynomialArray(self.array, shape)
def transpose(self, axes):
transpose_indices = np.transpose(np.reshape(np.arange(self.array.__len__()), newshape=self.shape), axes=axes)
array = [self.array[index] for index in transpose_indices.flatten()]
shape = [self.shape[axis] for axis in axes]
return PolynomialArray(array, shape)
def sum(self, axis, keep_dim=False):
axes = [axis] + [ax for ax in range(self.shape.__len__()) if ax != axis]
transpose_array = self.transpose(axes)
result = reduce(lambda u, v: u + v, [transpose_array[k] for k in range(transpose_array.shape[0])])
if keep_dim:
result.shape.insert(axis, 1)
return result
def __call__(self, x): return np.reshape(np.stack([poly(x) for poly in self.array], axis=1), newshape=[-1] + self.shape)
def __getitem__(self, item):
valid_indices = np.reshape(np.arange(self.array.__len__()), newshape=self.shape)[item]
array = [self.array[index] for index in valid_indices.flatten()]
shape = valid_indices.shape
return array[0] if shape == () else PolynomialArray(array, shape)
def __eq__(self, other): return (self.shape == other.shape) and sum([sp != op for sp, op in zip(self.array, other.array)]) == 0
def __neg__(self): return PolynomialArray([-array for array in self.array], self.shape)
def __add__(self, other): # TODO: in large scale calculation, this operator works slowly in serial mode.
if type(other).__name__ in ["bool", "int", "float", "Polynomial"]:
array = PolynomialArray([sa + other for sa in self.array], self.shape)
return array.reshape(self.shape)
elif isinstance(other, np.ndarray):
n_elements, dtype = self.array[0].n_elements, self.array[0].indices.dtype
arr = [Polynomial(np.array([item, ]), np.zeros(shape=(1, n_elements), dtype=dtype)) for item in other.flatten()]
return self.__add__(PolynomialArray(arr, shape=other.shape))
elif isinstance(other, PolynomialArray):
self_indices = np.reshape(np.arange(np.prod(self.shape)), self.shape)
o_indices = np.reshape(np.arange(np.prod(other.shape)), other.shape)
self_indices, o_indices = self_indices + np.zeros_like(o_indices), o_indices + np.zeros_like(self_indices)
array = [self.array[si] + other.array[oi] for si, oi in zip(self_indices.flatten(), o_indices.flatten())]
return PolynomialArray(array, shape=self_indices.shape)
else:
raise ValueError
def __sub__(self, other): return self.__add__(other.__neg__())
def __mul__(self, other): # TODO: in large scale calculation, this operator works slowly in serial mode.
if type(other).__name__ in ["bool", "int", "float", "Polynomial"]:
array = PolynomialArray([sa * other for sa in self.array], self.shape)
return array.reshape(self.shape)
elif isinstance(other, np.ndarray):
n_elements, dtype = self.array[0].n_elements, self.array[0].indices.dtype
arr = [Polynomial(np.array([item, ]), np.zeros(shape=(1, n_elements), dtype=dtype)) for item in other.flatten()]
return self.__mul__(PolynomialArray(arr, shape=other.shape))
elif isinstance(other, PolynomialArray):
self_indices = np.reshape(np.arange(np.prod(self.shape)), self.shape)
o_indices = np.reshape(np.arange(np.prod(other.shape)), other.shape)
self_indices, o_indices = self_indices + np.zeros_like(o_indices), o_indices + np.zeros_like(self_indices)
array = [self.array[si] * other.array[oi] for si, oi in zip(self_indices.flatten(), o_indices.flatten())]
return PolynomialArray(array, shape=self_indices.shape)
else:
raise ValueError
@classmethod
def stack(cls, arrays, axis):
axis %= arrays[0].shape.__len__() + 1
array = sum([item.array for item in arrays], [])
shape = [arrays.__len__()] + list(arrays[0].shape)
axes = [i for i in range(shape.__len__()) if i != axis]
axes.insert(axis, 0)
return PolynomialArray(array, shape).transpose(axes)
@classmethod
def concat(cls, arrays, axis):
axes = [axis] + [i for i in range(arrays[0].shape.__len__()) if i != axis]
shape = [-1] + [dim for i, dim in enumerate(arrays[0].shape) if i != axis]
arrays = sum([cls.transpose(array, axes).array for array in arrays], [])
arrays = cls(arrays, shape=(arrays.__len__(), ))
arrays = arrays.reshape(shape)
axes = list(range(1, shape.__len__()))
axes.insert(axis, 0)
return arrays.transpose(axes)
def derivative(self, order=1):
"""
+----------+-----------------+---------------------------+
| item | data type | shape |
+----------+-----------------+---------------------------+
| order | int | [] |
| return | PolynomialArray | self.shape + [ND] * order |
+----------+-----------------+---------------------------+
"""
array = PolynomialArray.stack([poly.derivative(order) for poly in self.array], axis=0)
return array.reshape(self.shape + array.shape[1:])
def directional_derivative(self, c, order=1):
"""
+----------+-----------------+---------------------------+
| item | data type | shape |
+----------+-----------------+---------------------------+
| c | numpy.ndarray | self.shape + [ND] * order |
| order | int | [] |
| return | numpy.ndarray | self.shape |
+----------+-----------------+---------------------------+
return: \\sum_{ij...} c_{ij...}^{uv...} \\frac{\\partial^ self_{uv...}}{\partial \lambda_i \partial \lambda_j ...}
"""
ni = max([p.coeff.__len__() for p in self.array])
dim = self.array[0].n_elements
coeff = [np.concatenate([p.coeff, np.zeros(shape=(ni - p.coeff.__len__(), ))], axis=0) for p in self.array]
coeff = np.stack(coeff, axis=1) # shape = [NI, ?]
indices = [np.concatenate([p.indices, np.zeros(shape=(ni - p.coeff.__len__(), dim), dtype=np.int)], axis=0) for p in self.array]
indices = np.stack(indices, axis=2) # shape = [NI, ND, ?]
for axis in range(order):
axes = [axis + 1] + [i for i in range(axis + 3) if i != axis + 1]
coeff = np.expand_dims(coeff, axis=0) * np.transpose(indices, axes=axes)
axes = list(range(1, axis + 2)) + [axis + 3]
indices = np.expand_dims(indices, axis=0) - np.expand_dims(np.eye(dim, dtype=np.int), axis=axes)
indices = np.maximum(indices, 0)
c = np.reshape(c, newshape=[-1, 1] + [dim] * order)
c = np.transpose(c, axes=list(range(2, order + 2)) + [1, 0]) # shape = [ND] * order + [1] + [?]
coeff = np.reshape((c * coeff), newshape=(dim ** order * ni, -1)) # shape = [ND] * order + [NI] + [?]
indices = np.reshape(indices, newshape=(dim ** order * ni, dim, -1)) # shape = [ND] * order + [NI] + [ND] + [?]
return PolynomialArray([Polynomial(coeff[:, i], indices[:, :, i], merge=True) for i in range(coeff.shape[-1])], shape=self.shape)
def integral(self, dim, determinant):
"""
Working correctly in triangulation grid only!
\Pi_i \alpha_i!
\int_K \Pi_i \lambda_i^{\alpha_i} dx = ------------------------ * determinant
(dim + \Sum_i \alpha_i)!
"""
ni = max([p.coeff.__len__() for p in self.array])
nd = self.array[0].n_elements
coeff = [np.concatenate([p.coeff, np.zeros(shape=(ni - p.coeff.__len__(), ))], axis=0) for p in self.array]
coeff = np.stack(coeff, axis=1) # shape = [NI, ?]
indices = [np.concatenate([p.indices, np.zeros(shape=(ni - p.coeff.__len__(), nd), dtype=np.int)], axis=0) for p in self.array]
indices = np.stack(indices, axis=2) # shape = [NI, ND, ?]
degree = np.max(indices)
if degree == 0:
numerator = np.ones_like(indices) # shape = [NI, ND, ?]
else:
numerator = reduce_prod([np.maximum(indices - i, 1) for i in range(degree)]) # shape = [NI, ND, ?]
numerator = np.prod(numerator, axis=1) # shape = [NI, ?]
denominator = np.sum(indices, axis=1) + dim # shape = [NI, ?]
denominator = reduce_prod([np.maximum(denominator - i, 1) for i in range(degree + dim)]) # shape = [NI, ?]
return np.reshape(np.sum(coeff * numerator / denominator, axis=0), newshape=self.shape) * determinant
def unit_test():
np.set_printoptions(precision=2)
x = np.random.rand(4, 3)
const_array = np.random.rand(8, 7)
# item 6, degree 2, elements 3
poly = Polynomial(coeff=np.random.rand(6), indices=np.random.randint(0, 3, size=(6, 3)))
polys_1 = [Polynomial(coeff=np.random.rand(5), indices=np.random.randint(0, 5, size=(5, 3))) for _ in range(56)]
polys_1 = PolynomialArray(polys_1, [8, 7])
polys_2 = [Polynomial(coeff=np.random.rand(4), indices=np.random.randint(0, 5, size=(4, 3))) for i in range(56)]
polys_2 = PolynomialArray(polys_2, [8, 7])
polys_3 = [Polynomial(coeff=np.random.rand(3), indices=np.random.randint(0, 5, size=(3, 3))) for i in range(7*8*9)]
polys_3 = PolynomialArray(polys_3, [9, 8, 7])
# four fundamental rules
print("polys_1(x) + np.pi - (polys_1 + np.pi)(x):")
print(np.max(np.abs(polys_1(x) + np.pi - (polys_1 + np.pi)(x))))
print("polys_1(x) + poly(x) - (polys_1 + poly)(x):")
print(np.max(np.abs(polys_1(x) + np.reshape(poly(x), (-1, 1, 1)) - (polys_1 + poly)(x))))
print("polys_1(x) + np.expand_dims(const_array, axis=0) - (polys_1 + const_array)(x):")
print(np.max(np.abs(polys_1(x) + np.expand_dims(const_array, axis=0) - (polys_1 + const_array)(x))))
print("polys_1(x) + polys_2(x) - (polys_1 + polys_2)(x):")
print(np.max(np.abs(polys_1(x) + polys_2(x) - (polys_1 + polys_2)(x))))
print("polys_1[:, [1]](x) + polys_2[[-1], :](x) - (polys_1[:, [1]] + polys_2[[-1], :])(x):")
print(np.max(np.abs(polys_1[:, [1]](x) + polys_2[[-1], :](x) - (polys_1[:, [1]] + polys_2[[-1], :])(x))))
print("polys_1(x) - np.pi - (polys_1 - np.pi)(x):")
print(np.max(np.abs(polys_1(x) - np.pi - (polys_1 - np.pi)(x))))
print("polys_1(x) - poly(x) - (polys_1 - poly)(x):")
print(np.max(np.abs(polys_1(x) - np.reshape(poly(x), (-1, 1, 1)) - (polys_1 - poly)(x))))
print("polys_1(x) - np.expand_dims(const_array, axis=0) - (polys_1 - const_array)(x):")
print(np.max(np.abs(polys_1(x) - np.expand_dims(const_array, axis=0) - (polys_1 - const_array)(x))))
print("polys_1(x) - polys_2(x) - (polys_1 - polys_2)(x):")
print(np.max(np.abs(polys_1(x) - polys_2(x) - (polys_1 - polys_2)(x))))
print("polys_1[:, [1]](x) - polys_2[[-1], :](x) - (polys_1[:, [1]] - polys_2[[-1], :])(x):")
print(np.max(np.abs(polys_1[:, [1]](x) - polys_2[[-1], :](x) - (polys_1[:, [1]] - polys_2[[-1], :])(x))))
print("polys_1(x) * np.pi - (polys_1 * np.pi)(x):")
print(np.max(np.abs(polys_1(x) * np.pi - (polys_1 * np.pi)(x))))
print("polys_1(x) * poly(x) - (polys_1 * poly)(x):")
print(np.max(np.abs(polys_1(x) * np.reshape(poly(x), (-1, 1, 1)) - (polys_1 * poly)(x))))
print("polys_1(x) * np.expand_dims(const_array, axis=0) - (polys_1 * const_array)(x):")
print(np.max(np.abs(polys_1(x) * np.expand_dims(const_array, axis=0) - (polys_1 * const_array)(x))))
print("polys_1(x) * polys_2(x) - (polys_1 * polys_2)(x):")
print(np.max(np.abs(polys_1(x) * polys_2(x) - (polys_1 * polys_2)(x))))
print("polys_1[:, [1]](x) * polys_2[[-1], :](x) - (polys_1[:, [1]] * polys_2[[-1], :])(x):")
print(np.max(np.abs(polys_1[:, [1]](x) * polys_2[[-1], :](x) - (polys_1[:, [1]] * polys_2[[-1], :])(x))))
print(np.max(np.abs(polys_1.reshape(shape=[2, 4, 7])(x) - np.reshape(polys_1(x), newshape=(-1, 2, 4, 7)))))
# check concat
print("PolynomialArray.concat([polys_1, polys_2], axis=1)(x) - np.concatenate([polys_1(x), polys_2(x)], axis=1):")
print(np.max(np.abs(PolynomialArray.concat([polys_1, polys_2], axis=1)(x) - np.concatenate([polys_1(x), polys_2(x)], axis=2))))
# check sum
print(np.max(np.abs(polys_3.sum(axis=0, keep_dim=True)(x) - np.sum(polys_3(x), axis=0 + 1, keepdims=True))))
print(np.max(np.abs(polys_3.sum(axis=1, keep_dim=True)(x) - np.sum(polys_3(x), axis=1 + 1, keepdims=True))))
print(np.max(np.abs(polys_3.sum(axis=2, keep_dim=True)(x) - np.sum(polys_3(x), axis=2 + 1, keepdims=True))))
# check integral
poly_1 = Polynomial(
coeff=np.array([
1,
3,
]),
indices=np.array([
[1, 2, 3, 4],
[1, 1, 1, 1],
])
)
poly_2 = Polynomial(
coeff=np.array([
2,
4,
]),
indices=np.array([
[4, 3, 2, 1],
[0, 0, 0, 0],
])
)
poly = PolynomialArray(array=[poly_1, poly_2], shape=(2, ))
ans_1 = 0.5 * 1 * (1 * 2 * 6 * 24) / reduce_prod(list(range(1, 14)))
ans_1 += 0.5 * 3 * (1 * 1 * 1 * 1) / reduce_prod(list(range(1, 8)))
ans_2 = 2 * 2 * (1 * 2 * 6 * 24) / reduce_prod(list(range(1, 14)))
ans_2 += 2 * 4 * (1 * 1 * 1 * 1) / reduce_prod(list(range(1, 4)))
print(poly.integral(dim=3, determinant=np.array([0.5, 2])) - np.array([ans_1, ans_2]))
# check derivative
poly = poly.derivative(order=1)
print(poly[0, 1])
# check derivative in Polynomial
c = np.random.rand(3, 3)
coeff = np.random.randint(100, size=(4, )) / 100
indices = np.random.randint(10, size=(4, 3))
poly = Polynomial(coeff, indices)
type_1 = (poly.derivative(order=2) * c).sum(axis=0).sum(axis=0)
type_2 = poly.directional_derivative(c, order=2)
error = type_1 - type_2
error = Polynomial(error.coeff, error.indices, merge=True)
print("error:", error)
# check derivative in PolynomialArray
poly = PolynomialArray([poly, poly+1, poly-1, poly*2], shape=(2, 2))
c = np.random.rand(2, 2, 3, 3)
type_1 = (poly.derivative(order=2) * c).sum(axis=2).sum(axis=2)
type_2 = poly.directional_derivative(c, order=2)
for item in (type_1 - type_2).array:
item = Polynomial(item.coeff, item.indices, merge=True)
print("error:", item)
if __name__ == "__main__":
unit_test()
| 49.544529
| 137
| 0.558472
| 13,496
| 0.693133
| 0
| 0
| 867
| 0.044528
| 0
| 0
| 4,025
| 0.206718
|
f9466d3c2d2932494116e2cb70d044cef50ea795
| 266
|
py
|
Python
|
pollbot/helper/display/management.py
|
3wille/ultimate-poll-bot
|
7a99659df463a891b20a1ab424665cd84d4242b4
|
[
"MIT"
] | null | null | null |
pollbot/helper/display/management.py
|
3wille/ultimate-poll-bot
|
7a99659df463a891b20a1ab424665cd84d4242b4
|
[
"MIT"
] | null | null | null |
pollbot/helper/display/management.py
|
3wille/ultimate-poll-bot
|
7a99659df463a891b20a1ab424665cd84d4242b4
|
[
"MIT"
] | null | null | null |
"""The poll management text."""
from .poll import get_poll_text
def get_poll_management_text(session, poll, show_warning=False):
"""Create the management interface for a poll."""
poll_text = get_poll_text(session, poll, show_warning)
return poll_text
| 26.6
| 64
| 0.74812
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 80
| 0.300752
|
f9469121eeab103831a2110844d01c4c5cbbd7f5
| 354
|
py
|
Python
|
codewof/programming/migrations/0009_auto_20200417_0013.py
|
uccser-admin/programming-practice-prototype
|
3af4c7d85308ac5bb35bb13be3ec18cac4eb8308
|
[
"MIT"
] | 3
|
2019-08-29T04:11:22.000Z
|
2021-06-22T16:05:51.000Z
|
codewof/programming/migrations/0009_auto_20200417_0013.py
|
uccser-admin/programming-practice-prototype
|
3af4c7d85308ac5bb35bb13be3ec18cac4eb8308
|
[
"MIT"
] | 265
|
2019-05-30T03:51:46.000Z
|
2022-03-31T01:05:12.000Z
|
codewof/programming/migrations/0009_auto_20200417_0013.py
|
samuelsandri/codewof
|
c9b8b378c06b15a0c42ae863b8f46581de04fdfc
|
[
"MIT"
] | 7
|
2019-06-29T12:13:37.000Z
|
2021-09-06T06:49:14.000Z
|
# Generated by Django 2.2.3 on 2020-04-16 12:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('programming', '0008_auto_20200415_1406'),
]
operations = [
migrations.AlterModelOptions(
name='badge',
options={'ordering': ['badge_tier']},
),
]
| 19.666667
| 51
| 0.59887
| 269
| 0.759887
| 0
| 0
| 0
| 0
| 0
| 0
| 114
| 0.322034
|
f946e2fce4d695420e4afffc8e580dcd4dade5ec
| 273
|
py
|
Python
|
diy_programs/diy_9_csv_module.py
|
bhalajin/blueprints
|
7ad1d7860aafbb4c333de9efbbb7e546ed43c569
|
[
"MIT"
] | null | null | null |
diy_programs/diy_9_csv_module.py
|
bhalajin/blueprints
|
7ad1d7860aafbb4c333de9efbbb7e546ed43c569
|
[
"MIT"
] | null | null | null |
diy_programs/diy_9_csv_module.py
|
bhalajin/blueprints
|
7ad1d7860aafbb4c333de9efbbb7e546ed43c569
|
[
"MIT"
] | null | null | null |
import csv
a = [[1,2,3], [4,5,6]]
with open('test.csv', 'w', newline='') as testfile:
csvwriter = csv.writer(testfile)
for row in a:
csvwriter.writerow(row)
with open('test.csv', 'r') as testfile:
csvreader = csv.reader(testfile)
for row in csvreader:
print(row)
| 21
| 51
| 0.663004
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 28
| 0.102564
|
f948dbae262921813e79d529b722c0b66116eaf6
| 543
|
py
|
Python
|
sourceFiles/ex027_LerNomeMostraUltimo.py
|
mcleber/Aulas_Python
|
bd224b593fcf907d54c8a2b92eb3afa88d327171
|
[
"MIT"
] | null | null | null |
sourceFiles/ex027_LerNomeMostraUltimo.py
|
mcleber/Aulas_Python
|
bd224b593fcf907d54c8a2b92eb3afa88d327171
|
[
"MIT"
] | null | null | null |
sourceFiles/ex027_LerNomeMostraUltimo.py
|
mcleber/Aulas_Python
|
bd224b593fcf907d54c8a2b92eb3afa88d327171
|
[
"MIT"
] | null | null | null |
'''
Faça um programa que leia o nome completo de uma pessoa, mostrando em seguida o primeiro e o último
nome separadamente.
Ex.: Ana Maria de Souza
primeiro = Ana
último = Souza
'''
n = str(input('Digite seu nome completo: ')).strip()
nome = n.split() # split particiona e cria uma lista começando no indice 0
print('Muito prazer em te conhecer!')
print('Seu primeiro nome é {}'.format(nome[0]))
print('Seu segundo nome é {}'.format(nome[1])) # indice 1 pega o segundo nome
print('Seu último nome é {}'.format(nome[len(nome)-1]))
| 38.785714
| 100
| 0.696133
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 411
| 0.745917
|
f949b7feca2216ed779a38104fad871de931f5cd
| 1,715
|
py
|
Python
|
hknweb/forms.py
|
Boomaa23/hknweb
|
2c2ce38b5f1c0c6e04ba46282141557357bd5326
|
[
"MIT"
] | null | null | null |
hknweb/forms.py
|
Boomaa23/hknweb
|
2c2ce38b5f1c0c6e04ba46282141557357bd5326
|
[
"MIT"
] | null | null | null |
hknweb/forms.py
|
Boomaa23/hknweb
|
2c2ce38b5f1c0c6e04ba46282141557357bd5326
|
[
"MIT"
] | null | null | null |
from django import forms
from django.contrib.auth.forms import (
UserCreationForm,
SetPasswordForm,
)
from hknweb.models import User, Profile
class SettingsForm(forms.ModelForm):
class Meta:
model = User
fields = ("username", "email", "first_name", "last_name", "password")
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = (
"picture",
"private",
"phone_number",
"date_of_birth",
"resume",
"graduation_date",
"candidate_semester",
)
class SignupForm(UserCreationForm):
first_name = forms.CharField(max_length=30, required=True)
last_name = forms.CharField(max_length=30, required=True)
email = forms.EmailField(max_length=200, required=True)
def clean_email(self):
email = self.cleaned_data.get("email")
if (email is None) or not email.endswith("berkeley.edu"):
raise forms.ValidationError(
"Please a berkeley.edu email to register!", code="invalid"
)
else:
return email
class Meta:
model = User
fields = (
"first_name",
"last_name",
"username",
"email",
"password1",
"password2",
)
class UpdatePasswordForm(SetPasswordForm):
new_password1 = forms.CharField(
max_length=30, label="New password", widget=forms.PasswordInput
)
new_password1.help_text = ""
class ValidPasswordForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
fields = ("password",)
| 24.855072
| 77
| 0.593586
| 1,549
| 0.903207
| 0
| 0
| 0
| 0
| 0
| 0
| 302
| 0.176093
|
f94e553843e7ec006e6711f29cd3c8bedc298b1e
| 18,184
|
py
|
Python
|
pfstats.py
|
altinukshini/pfstats
|
90137cdfdc7c5ae72b782c3fc113d56231e2667d
|
[
"MIT"
] | 18
|
2017-09-03T19:59:08.000Z
|
2022-02-02T11:59:48.000Z
|
pfstats.py
|
altinukshini/pfstats
|
90137cdfdc7c5ae72b782c3fc113d56231e2667d
|
[
"MIT"
] | 3
|
2018-04-23T14:09:47.000Z
|
2020-09-30T10:26:16.000Z
|
pfstats.py
|
altinukshini/pfstats
|
90137cdfdc7c5ae72b782c3fc113d56231e2667d
|
[
"MIT"
] | 14
|
2017-09-03T19:59:10.000Z
|
2022-03-15T12:19:57.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Postfix mail log parser and filter.
This script filters and parses Postfix logs based on provided filter parameters.
Example:
To use this script type 'python pfstats.py -h'. Below is an example
that filteres postfix log file (even gziped) based on date,
sender of the email and email status::
$ python pfstats.py -d 'Jul 26' -t 'bounced' -s 'info@altinukshini.com'
Todo:
* Filter and parse logs from a year ago
* Add receiver filter
* Maybe provide from-to date filtering option
"""
__author__ = "Altin Ukshini"
__copyright__ = "Copyright (c) 2017, Altin Ukshini"
__license__ = "MIT License"
__version__ = "1.0"
__maintainer__ = "Altin Ukshini"
__email__ = "altin.ukshini@gmail.com"
__status__ = "Production"
import re
import os
import sys
import gzip
import time
import argparse
import datetime
from random import randint
from argparse import RawTextHelpFormatter
from collections import defaultdict
########################################################
# Config
########################################################
default_log_file = r'/var/log/postfix/mail.log'
default_log_dir = r'/var/log/postfix/' # Must end with slash '/'
########################################################
# Predefined variables
########################################################
sender_lines = []
status_lines = []
status_lines_by_type = {'bounced' : [], 'deferred' : [], 'sent' : [], 'rejected' : []}
status_types = ['bounced', 'deferred', 'sent', 'rejected']
file_random_no = randint(100000, 999990)
generated_results = defaultdict(dict)
working_dir = os.getcwd() + '/'
start_time = time.time()
### All this formatting bcs of postfix date format :)
date = datetime.datetime.now()
date_today_year = date.strftime("%Y")
date_today_month = date.strftime("%b")
date_today_day = date.strftime("%d").lstrip('0')
date_today = date_today_month + " " + date_today_day
if int(date_today_day) < 10:
date_today = date_today_month + " " + date_today_day
########################################################
# Functions
########################################################
def get_receiver(line):
"""Return a string
Filter line and get the email receiver to=<>.
"""
receiver = re.search('(?<=to=<).*?(?=>)', line)
return receiver.group()
def get_sender(line):
"""Return a string
Filter line and get the email sender from=<>.
"""
sender = re.search('(?<=from=<).*?(?=>)', line)
return sender.group()
def get_email_subject(line):
"""Return a string
Filter line and get the email subject Subject:.
"""
subject = re.search('(?<=Subject: ).*?(?=\sfrom)', line)
return subject.group()
def get_email_status(line):
"""Return a string
Filter line and get the email status (sent, bounced, deferred, rejected).
"""
status = re.search('(?<=status=).*?(?=\s)', line)
return status.group()
def get_host_message(line, status):
"""Return a string
Filter line and get the host message located after status.
"""
message = re.search('status=' + status + ' (.*)', line)
return message.group(1)
def get_message_id(line):
"""Return a string
Filter line and get the email/message id.
"""
return line.split()[5].replace(":","")
def get_line_date(line):
"""Return a string
Filter line and get the email date (beginning of the line).
"""
return line.split()[0] + " " + str(line.split()[1])
def check_sender_line(line):
"""Return a boolean
Check if line contains specific words to validate if that's the line
we want.
"""
return 'cleanup' in line and 'from=' in line and 'Subject' in line
def filter_line_sender_subject(line):
"""Return void
Filter line based on sender and subject message and
append it to predefined dicts.
"""
global args, sender_lines
if args.sender is not None and args.message is not None:
if args.sender in line and args.message in line:
sender_lines.append(line)
elif args.sender is not None and args.message is None:
if args.sender in line:
sender_lines.append(line)
elif args.message is not None and args.sender is None:
if args.message in line:
sender_lines.append(line)
else:
sender_lines.append(line)
def filter_line(line):
"""Return void
Filter line based on check_sender_line() and email status type and append to
corresponding predefined dicts
"""
global sender_lines, status_lines, status_lines_by_type, status_types
if check_sender_line(line):
filter_line_sender_subject(line)
elif args.type in status_types:
if str('status='+args.type) in line and 'to=' in line and 'dsn=' in line:
status_lines.append(line)
else:
if 'status=' in line and 'to=' in line and 'dsn=' in line :
line_email_status = get_email_status(line)
if line_email_status in status_types:
status_lines_by_type[line_email_status].append(line)
def check_if_gz(file_name):
"""Return a boolean
Check if filename ends with gz extension
"""
return file_name.endswith('.gz')
def filter_log_file(log_file):
"""Return a string
Open file and start filtering line by line.
Apply date filtering as well.
"""
global date_today, date_filter
if check_if_gz(log_file):
with gzip.open(log_file, 'rt') as log_file:
for line in log_file:
print(line)
if date_filter in line:
filter_line(line)
else:
with open(log_file,'r') as log_file:
for line in log_file:
if date_filter in line:
filter_line(line)
log_file.close()
def process_line(sender_line, status_lines, status_type, file):
"""Return void
For each sender, check corresponding message status by message id, extract the required
parameters from lines and write them to generated file.
"""
global args, generated_results
message_id = get_message_id(sender_line)
sender = get_sender(sender_line)
subject = get_email_subject(sender_line)
for status_line in status_lines:
if message_id in status_line:
receiver = get_receiver(status_line)
host_message = get_host_message(status_line, status_type)
line_date = get_line_date(status_line)
generated_results[status_type] += 1
file.write(
line_date + args.output_delimiter +
sender + args.output_delimiter +
receiver + args.output_delimiter +
message_id + args.output_delimiter +
subject + args.output_delimiter +
host_message + "\n")
def write_file_header(file):
"""Return void
Writes file header that represent columns.
"""
global args
file.write(
"date" + args.output_delimiter +
"sender" + args.output_delimiter +
"receiver" + args.output_delimiter +
"message_id" + args.output_delimiter +
"subject" + args.output_delimiter +
"host_message\n")
def date_filter_formated(date_filter):
"""Return datetime
Returns the date provided to a specific format '%Y %b %d'.
"""
return datetime.datetime.strptime(datetime.datetime.now().strftime('%Y ') + date_filter, '%Y %b %d')
def date_filter_int(date_filter):
"""Return int
Returns the datetime provided to a specific format '%Y%b%d' as integer.
"""
return int(date_filter_formated(date_filter).strftime('%Y%m%d'))
def get_files_in_log_dir(default_log_dir):
"""Return list
Returns a list of files from provided directory path.
"""
all_log_files = [f for f in os.listdir(default_log_dir) if os.path.isfile(os.path.join(default_log_dir, f))]
if not all_log_files:
sys.exit("Default log directory has no files in it!")
return all_log_files
def generate_files_to_check(date_filter):
"""Return list
Based on the date filter provided as argument (or today's date), generate the supposed filenames (with specific date and format)
to check in log directory. This will return two filenames.
"""
today_plusone = datetime.datetime.now() + datetime.timedelta(days = 1)
today_minusone = datetime.datetime.now() - datetime.timedelta(days = 1)
date_filter_plusone = date_filter_formated(date_filter) + datetime.timedelta(days = 1)
if (date_filter_int(date_filter) < int(datetime.datetime.now().strftime('%Y%m%d')) and
date_filter_int(date_filter) == int(today_minusone.strftime('%Y%m%d'))):
return [
'mail.log-' + datetime.datetime.now().strftime('%Y%m%d'),
'mail.log-' + date_filter_formated(date_filter).strftime('%Y%m%d') + '.gz'
]
elif (date_filter_int(date_filter) < int(datetime.datetime.now().strftime('%Y%m%d')) and
date_filter_int(date_filter) < int(today_minusone.strftime('%Y%m%d'))):
return [
'mail.log-' + date_filter_formated(date_filter).strftime('%Y%m%d') + '.gz',
'mail.log-' + date_filter_plusone.strftime('%Y%m%d') + '.gz'
]
return []
def populate_temp_log_file(file_name, temp_log_file):
"""Return void
Populates the combined temporary log file from provided log in log directory.
"""
if check_if_gz(file_name):
with gzip.open(file_name, 'rt') as gz_mail_log:
for line in gz_mail_log:
temp_log_file.write(line)
gz_mail_log.close()
else:
with open(file_name, 'r') as mail_log:
for line in mail_log:
temp_log_file.write(line)
mail_log.close()
def generate_working_log(date_filter):
"""Return void
Generates combined working log from different logs from postfix log directory based on date filter.
"""
global args, log_file, working_dir
log_dir_files = get_files_in_log_dir(args.log_dir)
selected_files = generate_files_to_check(date_filter)
temp_log_file = open(working_dir + 'temp-' + str(date_filter_formated(date_filter).strftime('%Y%m%d')) + '.log', 'w')
for selected_file in selected_files:
if selected_file in log_dir_files:
populate_temp_log_file(args.log_dir + selected_file, temp_log_file)
else:
print("File not found: " + selected_file)
temp_log_file.close()
log_file = working_dir + 'temp-' + str(date_filter_formated(date_filter).strftime('%Y%m%d')) + '.log'
def print_results(results):
"""Return void
Prints the end results of the file processing
"""
global args, file_random_no
print("\n************************* RESULTS *************************\n")
if results:
total = 0
for result in results:
total += results[result]
if result == 'sent':
print(result + ": \t" + str(results[result]) \
+ "\t\t" + result + "-" + str(file_random_no) \
+ "." + args.output_filetype)
else:
print(result + ":\t" + str(results[result]) + "\t\t" \
+ result + "-" + str(file_random_no) \
+ "." + args.output_filetype)
print("\n-----\nTotal:\t\t" + str(total))
else:
print('Results could not be printed')
print("\n***********************************************************")
if __name__ == "__main__":
########################################################
# Argument(s) Parser
########################################################
parser = argparse.ArgumentParser(description='Filter and parse Postfix log files.', formatter_class=RawTextHelpFormatter)
parser.add_argument('-d', '--date',
dest='date',
default=date_today,
metavar='',
help='''Specify different date. Default is current date.\nFormat: Jan 20 (note one space) &
Jan 2 (note two spaces).\nDefault is todays date: ''' + date_today + '\n\n')
parser.add_argument('-t', '--type',
dest='type',
default='all',
metavar='',
help='Type of email status: bounced, sent, rejected, deferred.\nDefault is all.\n\n')
parser.add_argument('-s', '--sender',
dest='sender',
metavar='',
help='Specify senders address in order to query logs matching this parameter\n\n')
parser.add_argument('-m', '--message',
dest='message',
metavar='',
help='''Postfix default log format must be changed for this option to work.
Add subject message in logs, and then you can use this option to query\nthose emails with specific subject message.\n\n''')
parser.add_argument('-l', '--log',
dest='log',
default=default_log_file,
metavar='',
help='Specify the log file you want to use.\nDefault is: ' + default_log_file + '\n\n')
parser.add_argument('--log-dir',
dest='log_dir',
default=default_log_dir,
metavar='',
help='Specify the log directory.\nDefault is: ' + default_log_dir + '\n\n')
parser.add_argument('--output-directory',
dest='output_directory',
default=working_dir,
metavar='',
help='Specify the generated file(s) directory.\nDefault is current working directory: ' + working_dir + '\n\n')
parser.add_argument('--output-delimiter',
dest='output_delimiter',
default=';',
metavar='',
help='Specify the generated output delimiter.\nDefault is ";"\n\n')
parser.add_argument('--output-filetype',
dest='output_filetype',
default='csv',
metavar='',
help='Specify the generated output file type.\nDefault is "csv"\n\n')
args = parser.parse_args()
## Validate arguments
log_file = default_log_file
date_filter = date_today
# Check if provided parameters are valid
if os.path.isfile(args.log) is not True:
parser.error('Provided log file does not exist: ' + args.log)
if args.output_directory != working_dir and args.output_directory.endswith('/') is not True:
parser.error('Generated output file(s) directory must end with slash "/"')
if args.log_dir != default_log_dir and args.log_dir.endswith('/') is not True:
parser.error('Log directory must end with slash "/"')
if os.path.exists(args.output_directory) is not True:
parser.error('Generated output file(s) directory does not exist: ' + args.output_directory)
if os.path.exists(args.log_dir) is not True:
parser.error('This log directory does not exist in this system: ' + args.log_dir + '\nMaybe provide a different log dir with --log-dir')
# If date provided, change date filter to the provided one
if args.date != date_filter:
date_filter = args.date
# If log provided, change default log file to provided one
if args.log != log_file:
log_file = args.log
########################################################
# Execution / Log parsing and filtering
########################################################
# Check if provided date is valid
if int(date_filter_formated(date_filter).strftime('%Y%m%d')) > int(datetime.datetime.now().strftime('%Y%m%d')):
sys.exit("Provided date format is wrong or higher than today's date!")
# In case the date filter is provided, and it is different from today,
# it means that we will have to generate a temp log which contains
# combined logs from default log dir (gzip logrotated files included)
if date_filter != date_today and log_file == default_log_file:
generate_working_log(date_filter)
# Start filtering log file based on provided filters
filter_log_file(log_file)
# If there were no senders/filter matches, exit
if not sender_lines:
sys.exit("No matching lines found to be processed with provided filters in log file (" + log_file + "). Exiting...")
# Start parsing
# If message status type provided, filter only those messages
if args.type in status_types:
generated_results[args.type] = 0
with open(args.output_directory + args.type + '-' \
+ str(file_random_no) + '.' \
+ args.output_filetype, 'w') as generated_file:
write_file_header(generated_file)
for sender_line in sender_lines:
process_line(sender_line, status_lines, args.type, generated_file)
generated_file.close()
# Else, filter all status types (bounced, sent, rejected, deferred)
else:
for status_type in status_types:
generated_results[status_type] = 0
with open(args.output_directory + status_type + '-' \
+ str(file_random_no) + '.' \
+ args.output_filetype, 'w') as generated_file:
write_file_header(generated_file)
for sender_line in sender_lines:
process_line(sender_line, status_lines_by_type[status_type], \
status_type, generated_file)
generated_file.close()
# Generate and print results
print_results(generated_results)
print("--- %s seconds ---" % (time.time() - start_time))
| 31.460208
| 144
| 0.592444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6,801
| 0.37401
|
f951cf837ee7d78498aad48b843418086e875c47
| 1,524
|
py
|
Python
|
test/atw_config_auto.py
|
lichengwu/python_tools
|
3ebf70e6a6f6689ce2b615bed1500b8817f0b82a
|
[
"Apache-2.0"
] | null | null | null |
test/atw_config_auto.py
|
lichengwu/python_tools
|
3ebf70e6a6f6689ce2b615bed1500b8817f0b82a
|
[
"Apache-2.0"
] | null | null | null |
test/atw_config_auto.py
|
lichengwu/python_tools
|
3ebf70e6a6f6689ce2b615bed1500b8817f0b82a
|
[
"Apache-2.0"
] | null | null | null |
__author__ = 'lichengwu'
def get_groups(sharding):
sc = (int(sharding) - 1) * 8
group_list = ""
for s in xrange(sc, sc + 8):
group_list += str(s) + ","
return group_list[:-1]
def get_note(host):
v = host[3]
if v == 'm':
return 'message_' + host[17:]
else:
t = host[4]
if t == 'o':
return 'b2c_' + v + '_' + host[19:]
elif t == 'r':
return 'rt_' + v + '_' + host[19:]
elif t == 'b':
return "b2b_" + v + '_' + host[18:]
if __name__ == "__main__":
config = """atw7b010100054060.et2\t10.100.54.60
atw8b010100054070.et2\t10.100.54.70
atw5b010100054057.et2\t10.100.54.57
atw6b010100054058.et2\t10.100.54.58
atw5b010179212040.s.et2\t10.179.212.40
atw6b010179213116.s.et2\t10.179.213.116
atw7b010179213117.s.et2\t10.179.213.117
atw8b010179213164.s.et2\t10.179.213.164"""
for line in config.split("\n"):
pair = line.split("\t")
host = pair[0].strip()
ip = pair[1].strip()
sharding = host[3]
if sharding == 'm':
print "insert into atw_server_config(gmt_create, gmt_modified, server_ip, biz_type, note, group_list, start_mode) values(now(), now(), '%s', 3, '%s', '0', 2);" % (
ip, get_note(host))
else:
print "insert into atw_server_config(gmt_create, gmt_modified, server_ip, biz_type, note, group_list, start_mode) values(now(), now(), '%s', 3, '%s', '%s', 2);" % (
ip, get_note(host),get_groups(sharding))
| 30.48
| 176
| 0.571522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 700
| 0.459318
|
f9557c2acc79de6411f64feb5d4d5550266b917c
| 992
|
py
|
Python
|
release/stubs.min/System/Diagnostics/__init___parts/EventLogPermissionEntry.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/Diagnostics/__init___parts/EventLogPermissionEntry.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/Diagnostics/__init___parts/EventLogPermissionEntry.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
class EventLogPermissionEntry(object):
"""
Defines the smallest unit of a code access security permission that is set for an System.Diagnostics.EventLog.
EventLogPermissionEntry(permissionAccess: EventLogPermissionAccess,machineName: str)
"""
@staticmethod
def __new__(self, permissionAccess, machineName):
""" __new__(cls: type,permissionAccess: EventLogPermissionAccess,machineName: str) """
pass
MachineName = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the name of the computer on which to read or write events.
Get: MachineName(self: EventLogPermissionEntry) -> str
"""
PermissionAccess = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the permission access levels used in the permissions request.
Get: PermissionAccess(self: EventLogPermissionEntry) -> EventLogPermissionAccess
"""
| 24.8
| 112
| 0.686492
| 990
| 0.997984
| 0
| 0
| 178
| 0.179435
| 0
| 0
| 609
| 0.613911
|
f9564d9454e04c5d07bedcb3655d9efe0ca449c7
| 133
|
py
|
Python
|
compound_types/built_ins/lists.py
|
vahndi/compound-types
|
cda4f49651b4bfbcd9fe199de276be472620cfad
|
[
"MIT"
] | null | null | null |
compound_types/built_ins/lists.py
|
vahndi/compound-types
|
cda4f49651b4bfbcd9fe199de276be472620cfad
|
[
"MIT"
] | null | null | null |
compound_types/built_ins/lists.py
|
vahndi/compound-types
|
cda4f49651b4bfbcd9fe199de276be472620cfad
|
[
"MIT"
] | null | null | null |
from typing import List
BoolList = List[bool]
DictList = List[dict]
FloatList = List[float]
IntList = List[int]
StrList = List[str]
| 16.625
| 23
| 0.736842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
f956a3d5495345885097a51ce9c2704ddca7f850
| 3,396
|
py
|
Python
|
Sketches/TG/soc2007/shard_final/BranchShard.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 12
|
2015-10-20T10:22:01.000Z
|
2021-07-19T10:09:44.000Z
|
Sketches/TG/soc2007/shard_final/BranchShard.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 2
|
2015-10-20T10:22:55.000Z
|
2017-02-13T11:05:25.000Z
|
Sketches/TG/soc2007/shard_final/BranchShard.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 6
|
2015-03-09T12:51:59.000Z
|
2020-03-01T13:06:21.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Shard import *
class switchShard(shard):
"""
Generates a switch-type if statement. General form is:
...
elif <switchVar> <compare> <conditions[i]>:
shards[i]
...
Arguments:
name = name of new shard, default None. If no name is specified
a default name will be generated
switchVar = the switch variable as a string, e.g. 'event.type'
conditions = list of variables (as strings) to compare against
switchVar, one for each branch. Any branches without
conditions will be placed in an 'else' branch. Any
conditions without branches will be ignored
compare = string of comparison operator. The same operator
will be used for all branches, default is '=='
shards = list containing one shard for each branch, in the same
order as the relevant condition. If there are fewer
conditions than shards, those remaining will be placed
in an 'else' branch
"""
# default initialisation parameters
initargs = {}
initargs['name'] = None
initargs['switchVar'] = ''
initargs['conditions'] = []
initargs['shards'] = []
initargs['compare'] = '=='
# compulsory init parameters
required = ['switchVar', 'shards', 'conditions']
def __init__(self, name = None, switchVar = '', conditions = [], shards = [], compare = '=='):
if not (switchVar or shards or conditions):
raise ArgumentError, 'a switch variable and at least one branch and condition must be provided'
compare = ' ' + compare + ' '
ifbr, cond = shards.pop(0), conditions.pop(0)
ifline = ['if ' + switchVar + compare + cond + ':\n']
ifbranch = shard('if branch', shards = [ifbr])
code = ifline + ifbranch.addindent()
if len(conditions) > len(shards):
conditions = conditions[0:len(shards)] # ignore excess conditions
while conditions:
elifbr, cond = shards.pop(0), conditions.pop(0)
elifline = ['elif ' + switchVar + compare + cond + ':\n']
sh = shard('elif branch', shards = [elifbr])
code += elifline + sh.addindent()
if shards: # shards remaining, place into else branch
sh = shard('else branch', shards = shards)
code += ['else:\n'] + sh.addindent()
super(switchShard, self).__init__(shards = [code])
| 39.488372
| 107
| 0.608952
| 2,560
| 0.753828
| 0
| 0
| 0
| 0
| 0
| 0
| 2,189
| 0.644582
|
f9570198e7a5f622e1af77b862f79e6f0ce39380
| 486
|
py
|
Python
|
infrastructure/crypto_ml/agent/SimpleAgent.py
|
ATCUWgithub/CryptoML
|
6010c5daf7d985217fa76197b29331457a60a306
|
[
"MIT"
] | 1
|
2020-02-18T00:38:16.000Z
|
2020-02-18T00:38:16.000Z
|
infrastructure/crypto_ml/agent/SimpleAgent.py
|
ATCUWgithub/CryptoML
|
6010c5daf7d985217fa76197b29331457a60a306
|
[
"MIT"
] | null | null | null |
infrastructure/crypto_ml/agent/SimpleAgent.py
|
ATCUWgithub/CryptoML
|
6010c5daf7d985217fa76197b29331457a60a306
|
[
"MIT"
] | 1
|
2020-02-18T00:39:12.000Z
|
2020-02-18T00:39:12.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2020 UWATC. All rights reserved.
#
# Use of this source code is governed by an MIT license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/MIT
from .AgentTemplate import AgentTemplate
class SimpleAgent(AgentTemplate):
def handle_orders(self, place_order, cancel_order, current_orders, wallet):
# data = get_data(self.data_array[0], self.timestamp)
# Do nothing for SimpleAgent
pass
| 32.4
| 76
| 0.748971
| 204
| 0.418891
| 0
| 0
| 0
| 0
| 0
| 0
| 314
| 0.644764
|
f9576382290725337f6455bafa4ade3618c4bd12
| 8,349
|
py
|
Python
|
pod.py
|
ddh0/pod
|
5c630f609db6d4e2d6704874144faf9fe64ee15b
|
[
"MIT"
] | 1
|
2020-11-20T16:35:07.000Z
|
2020-11-20T16:35:07.000Z
|
pod.py
|
ddh0/pod
|
5c630f609db6d4e2d6704874144faf9fe64ee15b
|
[
"MIT"
] | null | null | null |
pod.py
|
ddh0/pod
|
5c630f609db6d4e2d6704874144faf9fe64ee15b
|
[
"MIT"
] | null | null | null |
# Program that downloads all episodes of a podcast
# Features
# -- Functions: add, remove, update
# - Run the file to update without having to use python interpreter
# - Download all episodes of a podcast, put into the correct folder
# - Tag each file with metadata from the feed and the stored config
import os
import sys
import pickle
import requests
import datetime
import feedparser
import subprocess
STORAGE_DIR = "C:\\Users\\Dylan\\Python\\pod\\storage\\"
LOGFILE = "C:\\Users\\Dylan\\Python\\pod\\log.txt"
FFMPEG_PATH = "C:\\Users\\Dylan\\Python\\pod\\ffmpeg.exe"
TEMP_DIR = "C:\\Users\\Dylan\\AppData\\Local\\Temp\\"
debug = False
class Podcast:
"""For internal use. Class to hold attributes of a podcast."""
def __init__(self, name: str, feed: str, storage_dir: str, prefix: str,
album: str, artist: str, year: str, art: str):
self.name = name
self.feed = feed
self.storage_dir = storage_dir
self.prefix = prefix
self.album = album
self.artist = artist
self.year = year
self.art = art
def log(text):
"""For internal use. Easily log events.
To display these events onscreen as they occur, set pod.debug = True."""
if debug: print("--debug: " + text)
with open(LOGFILE, 'a') as log:
log.write(datetime.datetime.now().isoformat() + ': ' + str(text) + '\n')
def add():
"""Creates a stored configuration file for the given feed, "*.pod", so that
the feed can be checked quickly without having to specify the URL or metadata again."""
podcast_obj = Podcast(
input("Podcast name: "),
input("Feed URL: "),
input("Storage dir: "),
input("Prefix: "),
input("Album: "),
input("Artist: "),
input("Release year: "),
input("Album art URL: ")
)
with open(STORAGE_DIR + podcast_obj.name + '.pod', 'wb') as file:
pickle.dump(podcast_obj, file)
def remove():
"""Removes the configuration file associated with the given podcast."""
name = input("Name of podcast to remove: ")
if os.path.exists(STORAGE_DIR + name + '.pod'):
os.remove(STORAGE_DIR + name + '.pod')
else:
print('-- %s does not exist' % name)
def update():
"""Checks for new entries from all feeds, download and tag new episodes."""
# For each stored podcast config
for file in os.listdir(STORAGE_DIR):
with open(STORAGE_DIR + file, 'rb') as f:
podcast_obj = pickle.load(f)
log("Updating podcast: %s" % podcast_obj.name)
print('Updating "%s":' % podcast_obj.name)
# Get feed
feed = feedparser.parse(podcast_obj.feed)
length = len(feed.entries)
# Create storage dir if it does not exist
if not os.path.exists(podcast_obj.storage_dir):
os.mkdir(podcast_obj.storage_dir)
# Download image if it does not exist
image_path = podcast_obj.storage_dir + podcast_obj.prefix + "_Album_Art.png"
if not os.path.exists(image_path):
print("Downloading podcast cover art...")
log("Downloading image")
response = requests.get(podcast_obj.art)
with open(image_path, 'wb') as imgfile:
imgfile.write(response.content)
# Set podcast-specific metadata
# image_path set above, title set per-episode
album = podcast_obj.album
artist = podcast_obj.artist
year = podcast_obj.year
# Get episodes from feed in chronological order
for i in range(length-1, -1, -1):
# Get current episode number
ep_num = length - i
display_prefix = podcast_obj.prefix + "_" + str(ep_num).zfill(3)
# Get episode title
title = feed.entries[i].title
# Get episode URL
episode_url = "" # Variables for
x = 0 # the while loop
skip_this_item = False
while ('.mp3' not in episode_url and
'.wav' not in episode_url and
'.m4a' not in episode_url):
try:
episode_url = feed.entries[i]['links'][x]['href']
except:
skip_this_item = True
break
log("episode_url: %s" % episode_url)
x += 1
if ".mp3" in episode_url:
ext = ".mp3"
if ".wav" in episode_url:
ext = ".wav"
if ".m4a" in episode_url:
ext = ".m4a"
# Get full episode destination path
# xpath is the temporary file as it was downloaded with only the name changed
# path is the final file
xpath = TEMP_DIR + display_prefix + "X" + ext
path = podcast_obj.storage_dir + display_prefix + ext
# Skip this episode if already downloaded
if os.path.exists(path):
continue
if skip_this_item:
print(display_prefix + ": Skipped due to file extension (item likely not audio)")
log(display_prefix + ": Skipped due to file extension (item likely not audio)")
skip_this_item = False
continue
# Show which episode is in progress
print(display_prefix + ': Downloading...')
log('In progress: %s' % display_prefix)
# Download episode
HEADER_STRING = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36 Edg/87.0.664.66'}
response = requests.get(episode_url, headers=HEADER_STRING)
# Fail if size is less than 1MB
if sys.getsizeof(response.content) < 1000000: # If size is less than 1MB
log("FATAL ERROR: response.content = %s bytes" % sys.getsizeof(response))
raise IOError("-- response.content was only %s bytes" % sys.getsizeof(response.content))
# Fail upon bad HTTP status code
if not response.ok:
log("FATAL ERROR: Bad response: status code %s" % response.status_code)
raise ConnectionError("-- Response not ok, status code %s" % response.status_code)
# Write mp3 data to file
# Since this is done after the download is complete, interruptions will only break episodes
# if they occur during the file being written to disk. If the script is interrupted during download,
# the script will simply restart the download of the interrupted episode on the next run.
with open(xpath, 'wb') as f:
f.write(response.content)
# Write correct metadata to clean file
# Force using ID3v2.3 tags for best results
# Only fatal errors will be displayed
print(display_prefix + ": Writing correct metadata...")
log("Writing metadata")
subprocess.run([FFMPEG_PATH, "-i", xpath, "-i", image_path, "-map", "0:0", "-map", "1:0", "-codec", "copy",
"-id3v2_version", "3", "-metadata:s:v", 'title="Album cover"',"-metadata:s:v", 'comment="Cover (front)"',
"-metadata", "track=" + str(ep_num),
"-metadata", "title=" + title,
"-metadata", "album=" + album,
"-metadata", "album_artist=" + artist,
"-metadata", "artist=" + artist,
"-metadata", "year=" + year,
"-metadata", "genre=Podcast",
"-loglevel", "fatal", path])
# Delete temporary file
os.remove(xpath)
log("Download complete: %s" % path)
log("Update complete.")
print("Files located in the following folder: %s" % podcast_obj.storage_dir)
if __name__ == '__main__':
update()
| 39.947368
| 177
| 0.548928
| 454
| 0.054378
| 0
| 0
| 0
| 0
| 0
| 0
| 3,410
| 0.408432
|
f9577ac9ab9b2574ecfc469b539a86e4c283b783
| 1,954
|
py
|
Python
|
threading_ext/RecordingThread.py
|
Antoine-BL/chess-ai.py
|
c68ca76063c14b1b8b91d338c8cead9f411521ca
|
[
"MIT"
] | 2
|
2019-08-21T15:52:29.000Z
|
2021-09-11T23:07:17.000Z
|
threading_ext/RecordingThread.py
|
Antoine-BL/chess-ai.py
|
c68ca76063c14b1b8b91d338c8cead9f411521ca
|
[
"MIT"
] | 5
|
2020-09-25T23:15:31.000Z
|
2022-02-10T00:07:33.000Z
|
threading_ext/RecordingThread.py
|
Antoine-BL/EuroTruck-ai.py
|
c68ca76063c14b1b8b91d338c8cead9f411521ca
|
[
"MIT"
] | null | null | null |
import time
import numpy as np
import cv2
from mss import mss
from threading_ext.GameRecorder import GameRecorder
from threading_ext.PausableThread import PausableThread
class RecordingThread(PausableThread):
def __init__(self, training_data_path: str, session_number: int, recorder: GameRecorder):
super(RecordingThread, self).__init__()
self.recorder = recorder
self.training_data = []
self.training_data_path = training_data_path
self.session_number = session_number
def run(self):
monitor = {"top": 40, "left": 0, "width": 1024, "height": 728}
s_to_ms = 1000
with mss() as sct:
while not self.killed:
start_time_ms = round(time.time() * s_to_ms, 0)
screen = np.asarray(sct.grab(monitor))
screen = cv2.resize(screen, (480, 270))
screen = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)
self.training_data.append([screen, np.asarray(self.recorder.flattened_state())])
self.sleep_if_paused()
self.save_if_necessary()
self.wait(start_time_ms)
def save_if_necessary(self):
if len(self.training_data) % 100 == 0:
print(len(self.training_data))
if len(self.training_data) == 500:
np.save(self.training_data_path.format(self.session_number), self.training_data)
print('saved_data in file nb {}'.format(self.session_number))
self.session_number += 1
self.training_data = []
def wait(self, start_time_ms: int):
delay_ms = 1000 / 6
end_time_ms = round(time.time() * 1000, 0)
duration_ms = end_time_ms - start_time_ms
print('loop time {}ms'.format(duration_ms))
time.sleep(max((delay_ms - duration_ms)/1000, 0))
def rewind(self):
self.session_number -= 1
self.training_data = []
| 33.689655
| 96
| 0.619754
| 1,778
| 0.909928
| 0
| 0
| 0
| 0
| 0
| 0
| 68
| 0.0348
|
f957da3a4215ef9104b40d885730febc525fd16f
| 638
|
py
|
Python
|
multicast/mtc_recv.py
|
Tatchakorn/Multi-threaded-Server-
|
d5502a3da942e06736d07efc8d64186bc03a23d7
|
[
"Beerware"
] | 2
|
2021-11-11T12:14:35.000Z
|
2021-12-07T15:03:41.000Z
|
multicast/mtc_recv.py
|
Tatchakorn/Multi-threaded-Server-
|
d5502a3da942e06736d07efc8d64186bc03a23d7
|
[
"Beerware"
] | null | null | null |
multicast/mtc_recv.py
|
Tatchakorn/Multi-threaded-Server-
|
d5502a3da942e06736d07efc8d64186bc03a23d7
|
[
"Beerware"
] | null | null | null |
#! /usr/bin/python3
import threading
import socket
from test import create_upd_clients
from client import multicast_receive
def test_multicast_receive():
clients = create_upd_clients(3)
def run(client: socket.socket) -> None:
multicast_receive(client)
threads = [threading.Thread(name=f'client_[{i+1}]', target=run, args=(client,))
for i, client in enumerate(clients)]
for t in threads: t.start()
for t in threads: t.join()
if __name__ == '__main__':
try:
test_multicast_receive()
except Exception as e:
print(e)
finally:
input('Press [ENTER]...')
| 24.538462
| 84
| 0.652038
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 64
| 0.100313
|
f958f1f208280fca2b61c5a648551399de305a52
| 2,135
|
py
|
Python
|
train/name_test.py
|
csgwon/dl-pipeline
|
5ac2cdafe0daac675d3f3e810918133de3466f8a
|
[
"Apache-2.0"
] | 7
|
2018-06-26T13:09:12.000Z
|
2020-07-15T18:18:38.000Z
|
train/name_test.py
|
csgwon/dl-pipeline
|
5ac2cdafe0daac675d3f3e810918133de3466f8a
|
[
"Apache-2.0"
] | null | null | null |
train/name_test.py
|
csgwon/dl-pipeline
|
5ac2cdafe0daac675d3f3e810918133de3466f8a
|
[
"Apache-2.0"
] | 1
|
2018-08-30T19:51:08.000Z
|
2018-08-30T19:51:08.000Z
|
from tools import *
from model import *
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
class NamesDataset(Dataset):
"""Name Classification dataset"""
def __init__(self, path):
self.data = pd.read_csv(path, sep='\t').dropna()
self.X = self.data['name']
self.y = self.data['label']
def __len__(self):
return len(self.X)
def __getitem__(self, index):
content = torch.from_numpy(encode_input(self.data['name'][index])).float()
label = label_to_number[self.data['label'][index]]
sample = {'X': content, 'y': label}
return sample
name_dataset = NamesDataset('data/names/names_train_new.csv')
dataloader = DataLoader(name_dataset, batch_size=32, shuffle=True, num_workers=0)
charcnn = CharCNN(n_classes=len(set(name_data['label'])), vocab_size=len(chars), max_seq_length=max_name_len)
criterion = nn.CrossEntropyLoss()
from tqdm import tqdm_notebook
def train(model, dataloader, num_epochs):
cuda = torch.cuda.is_available()
if cuda:
model.cuda()
optimizer = torch.optim.Adam(model.parameters())
loss_history_avg = []
loss_history = []
#bar = tqdm_notebook(total=len(dataloader))
for i in range(num_epochs):
per_epoch_losses = []
for batch in dataloader:
X = Variable(batch['X'])
y = Variable(batch['y'])
if cuda:
X = X.cuda()
y = y.cuda()
model.zero_grad()
outputs = model(X)
loss = criterion(outputs, y)
loss.backward()
optimizer.step()
per_epoch_losses.append(loss.data[0])
#bar.set_postfix(loss=loss.data[0])
#bar.update(1)
loss_history_avg.append(np.mean(per_epoch_losses))
loss_history.append( loss.data[0] )
print('epoch[%d] loss: %.4f' % (i, loss.data[0]))
return loss_history, loss_history_avg
loss_history, loss_history_avg = train(charcnn, dataloader, 100)
torch.save(charcnn, 'charcnn.pth')
| 32.348485
| 109
| 0.635597
| 516
| 0.241686
| 0
| 0
| 0
| 0
| 0
| 0
| 241
| 0.112881
|
f95969e5274454c89e1f512e9e3893dfdf0ca196
| 737
|
py
|
Python
|
automated_logging/migrations/0019_auto_20210504_1247.py
|
rewardz/django-automated-logging
|
3f7c578b42de1e5ddc72cac79014715fc7dffa46
|
[
"MIT"
] | null | null | null |
automated_logging/migrations/0019_auto_20210504_1247.py
|
rewardz/django-automated-logging
|
3f7c578b42de1e5ddc72cac79014715fc7dffa46
|
[
"MIT"
] | null | null | null |
automated_logging/migrations/0019_auto_20210504_1247.py
|
rewardz/django-automated-logging
|
3f7c578b42de1e5ddc72cac79014715fc7dffa46
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1 on 2021-05-04 03:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('automated_logging', '0018_decoratoroverrideexclusiontest_foreignkeytest_fullclassbasedexclusiontest_fulldecoratorbasedexclusio'),
]
operations = [
migrations.AlterField(
model_name='modelevent',
name='user',
field=models.IntegerField(help_text='User ID from the Authentication system', null=True),
),
migrations.AlterField(
model_name='requestevent',
name='user',
field=models.IntegerField(help_text='User ID from the Authentication system', null=True),
),
]
| 30.708333
| 139
| 0.662144
| 646
| 0.876526
| 0
| 0
| 0
| 0
| 0
| 0
| 289
| 0.39213
|
f95b45ce076430bae5232cdd5ec93fdf00431354
| 2,037
|
py
|
Python
|
libdiscid/tests/common.py
|
phw/python-libdiscid
|
fac3ca94057c7da2857af2fd7bd099f726a02869
|
[
"MIT"
] | null | null | null |
libdiscid/tests/common.py
|
phw/python-libdiscid
|
fac3ca94057c7da2857af2fd7bd099f726a02869
|
[
"MIT"
] | null | null | null |
libdiscid/tests/common.py
|
phw/python-libdiscid
|
fac3ca94057c7da2857af2fd7bd099f726a02869
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2013 Sebastian Ramacher <sebastian+dev@ramacher.at>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
""" Tests for the libdiscid module
"""
from __future__ import unicode_literals
class PutSuccess(object):
first = 1
last = 15
sectors = 258725
seconds = 3450
offsets = (150, 17510, 33275, 45910, 57805, 78310, 94650, 109580, 132010,
149160, 165115, 177710, 203325, 215555, 235590)
track_seconds = (231, 210, 168, 159, 273, 218, 199, 299, 229, 213, 168, 342,
163, 267, 308)
disc_id = 'TqvKjMu7dMliSfmVEBtrL7sBSno-'
freedb_id = 'b60d770f'
toc = ' '.join(map(str, [first, last, sectors] + list(offsets)))
class _PutFail(object):
sectors = 200
offsets = (1, 2, 3, 4, 5, 6, 7)
class PutFail1(_PutFail):
first = 13
last = 1
class PutFail2(_PutFail):
first = 0
last = 10
class PutFail2_2(_PutFail):
first = 100
last = 200
class PutFail3(_PutFail):
first = 0
last = 0
class PutFail3_2(_PutFail):
first = 1
last = 100
| 31.338462
| 79
| 0.716249
| 795
| 0.389515
| 0
| 0
| 0
| 0
| 0
| 0
| 1,210
| 0.592847
|
f95b8e23ac103c21bff72619bd1a14be401e08f2
| 161
|
py
|
Python
|
alexa_skill_boilerplate/__init__.py
|
variable/alexa_skill_boilerplate
|
c2c7fc2a3fe8f0bc69ec7559ec9b11f211d76bdc
|
[
"MIT"
] | null | null | null |
alexa_skill_boilerplate/__init__.py
|
variable/alexa_skill_boilerplate
|
c2c7fc2a3fe8f0bc69ec7559ec9b11f211d76bdc
|
[
"MIT"
] | null | null | null |
alexa_skill_boilerplate/__init__.py
|
variable/alexa_skill_boilerplate
|
c2c7fc2a3fe8f0bc69ec7559ec9b11f211d76bdc
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Top-level package for Alexa Skill Boilerplate."""
__author__ = """James Lin"""
__email__ = 'james@lin.net.nz'
__version__ = '0.1.0'
| 20.125
| 52
| 0.639752
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 115
| 0.714286
|
f95ba865fff759b92ca23cecc5920a5a1660850c
| 1,881
|
py
|
Python
|
train_teachers.py
|
offthewallace/DP_CNN
|
e7f4607cbb890a348d088b515c4aa7093fadb878
|
[
"MIT"
] | 9
|
2018-02-28T06:09:23.000Z
|
2022-03-15T13:42:47.000Z
|
train_teachers.py
|
offthewallace/DP_CNN
|
e7f4607cbb890a348d088b515c4aa7093fadb878
|
[
"MIT"
] | null | null | null |
train_teachers.py
|
offthewallace/DP_CNN
|
e7f4607cbb890a348d088b515c4aa7093fadb878
|
[
"MIT"
] | 4
|
2018-01-21T06:42:10.000Z
|
2020-08-17T09:07:42.000Z
|
#Author: Wallace He
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import keras
from keras.models import Sequential
from keras.models import model_from_json
from keras.models import load_model
import partition
import train_CNN
def train_teacher (nb_teachers, teacher_id):
"""
This function trains a single teacher model with responds teacher's ID among an ensemble of nb_teachers
models for the dataset specified.
The model will be save in directory.
:param nb_teachers: total number of teachers in the ensemble
:param teacher_id: id of the teacher being trained
:return: True if everything went well
"""
# Load the dataset
X_train, X_test, y_train, y_test = train_CNN.get_dataset()
# Retrieve subset of data for this teacher
data, labels = partition.partition_dataset(X_train,
y_train,
nb_teachers,
teacher_id)
print("Length of training data: " + str(len(labels)))
# Define teacher checkpoint filename and full path
filename = str(nb_teachers) + '_teachers_' + str(teacher_id) + '.hdf5'
filename2 = str(nb_teachers) + '_teachers_' + str(teacher_id) + '.h5'
# Perform teacher training need to modify
# Create teacher model
model, opt = train_CNN.create_six_conv_layer(data.shape[1:])
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
model, hist = train_CNN.training(model, data, X_test, labels, y_test,filename, data_augmentation=True)
#modify
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(filename2)
print("Saved model to disk")
return True
| 31.35
| 105
| 0.698033
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 710
| 0.377459
|
f95de109f7f76174c635351d3c9d2f28ebfb7d06
| 3,651
|
py
|
Python
|
descartes_rpa/fetch/descartes.py
|
reactome/descartes
|
7e7f21c5ccdf42b867db9e68fe0cb7a17d06fb25
|
[
"Apache-2.0"
] | 2
|
2021-08-02T18:09:07.000Z
|
2022-01-18T08:29:59.000Z
|
descartes_rpa/fetch/descartes.py
|
reactome/descartes
|
7e7f21c5ccdf42b867db9e68fe0cb7a17d06fb25
|
[
"Apache-2.0"
] | 5
|
2021-06-22T22:27:23.000Z
|
2021-08-04T02:04:09.000Z
|
descartes_rpa/fetch/descartes.py
|
reactome/descartes_rpa
|
7e7f21c5ccdf42b867db9e68fe0cb7a17d06fb25
|
[
"Apache-2.0"
] | null | null | null |
import requests
import shutil
import pandas as pd
from typing import Dict, List
def fetch_descartes_human_tissue(out_file: str, verbose: bool = True) -> None:
"""Function to fetch Loom Single-Cell tissue data from
Descartes human database.
Args:
out_file: Output file that is going to store .loom data
verbose: If True (default), print statements about download
Examples:
>>> fetch_descartes_human_tissue("Human_Tissue.loom")
"""
url = (
"https://shendure-web.gs.washington.edu/content/members/cao1025/"
"public/FCA_RNA_supp_files/scanpy_cells_all/"
"Human_RNA_processed.loom"
)
if verbose:
print("Downloading Human Single-Cell data from Descartes database")
print(f"data url: {url}")
with requests.get(url, stream=True, timeout=60) as data:
with open(out_file, 'wb') as out:
shutil.copyfileobj(data.raw, out)
if verbose:
print(f"Downloaded data to {out_file}")
def fetch_descartes_by_tissue(
list_tissues: List[str],
out_dir: str,
verbose: bool = True
) -> None:
"""Function to fetch Loom Single-Cell tissue data from
Descartes human database by choosing which tissues will be donwloaded.
Args:
list_tissues: List of tissues names to be downloaded.
out_dir: Output directory that is going to store .loom data.
verbose: If True (default), print statements about download.
Examples:
>>> fetch_descartes_by_tissue(
list_tissues=["Thymus", "Hearth"]
out_dir="data"
)
"""
base_url = (
"https://shendure-web.gs.washington.edu/content/members/cao1025/"
"public/FCA_RNA_supp_files/scanpy_cells_by_tissue"
)
for tissue in list_tissues:
url = f"{base_url}/{tissue}_processed.loom"
if verbose:
print((
f"Downloading {tissue} tissue Human Single-Cell data "
"from Descartes database"
))
print(f"data url: {url}")
file_name = f"{out_dir}/{tissue}_data.loom"
with requests.get(url, stream=True, timeout=60) as data:
with open(file_name, 'wb') as out:
shutil.copyfileobj(data.raw, out)
if verbose:
print(f"Downloaded {file_name} to {out_dir}")
def fetch_de_genes_for_cell_type(
verbose: bool = False
) -> Dict[str, List[str]]:
"""Function to fetch Differentially Expressed (DE) genes from Descartes
Human Atlas from 77 Main Cell types found in 15 Organs.
Args:
verbose: If True (default), print statements about download
Returns:
Dictionary mapping each main cell type to its differentially
expressed genes. Example: {
"Acinar cells": ["MIR1302-11", "FAM138A", ...],
"Myeloid cells": ["CU459201.1", "OR4G4P", ...] ...
}
"""
url = (
"https://atlas.fredhutch.org/data/bbi/descartes/human_gtex/"
"downloads/data_summarize_fetus_data/DE_gene_77_main_cell_type.csv"
)
if verbose:
print((
"Downloading Human Single-Cell Differentially Expressed"
"genes for 77 Main Cell types found in 15 Organs."
))
print(f"data url: {url}")
de_df = pd.read_csv(url)
cell_types = de_df["max.cluster"].unique()
de_mapping = {}
for type in cell_types:
list_genes = de_df[
de_df["max.cluster"] == type
]["gene_id"].tolist()
list_genes = [gene.replace("'", "") for gene in list_genes]
de_mapping[type] = list_genes
return de_mapping
| 30.940678
| 78
| 0.621747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,145
| 0.58751
|
f95e41290f52dc02f46f6b46f92ae4c07e63dc68
| 9,528
|
py
|
Python
|
run.py
|
magechaP/passwordlocker2.0
|
e0ae82aa650c4f1cd285f319ec16de38ba0670ea
|
[
"Unlicense",
"MIT"
] | null | null | null |
run.py
|
magechaP/passwordlocker2.0
|
e0ae82aa650c4f1cd285f319ec16de38ba0670ea
|
[
"Unlicense",
"MIT"
] | null | null | null |
run.py
|
magechaP/passwordlocker2.0
|
e0ae82aa650c4f1cd285f319ec16de38ba0670ea
|
[
"Unlicense",
"MIT"
] | null | null | null |
#!/usr/bin/env python3.6
import random #importing random module
from user import User #importing class User
from credential import Credential #importing class Credential
def create_credential(username,accountname,password):
"""
create_credential function that creates an instance of the class credential
"""
new_credential = Credential(username,accountname,password)
return new_credential
def create_user(name, login_password):
"""
create_user function that creates an instance of the class user
"""
new_user = User(name,login_password)
return new_user
def saveCredential(credential):
"""
saveCredential function to save the credential created by the user
"""
credential.save_credential()
def saveUser(user):
"""
saveUser function to create a user account whenever a user
signs up with password locker
"""
user.save_user()
def deleteCredential(credential):
"""
deleteCredential function that helps user delete an existing credential
"""
credential.delete_credential()
def findCredential(account_name):
"""
findCredential function to search for a credential by accountname and
return all its details
"""
return Credential.find_accountname(account_name)
def credentialExists(account_name):
"""
credentialExists function to check whether a credential exists
and return True or False
"""
return Credential.credential_exists(account_name)
def displayCredentials():
"""
displayCredentials function to display the credentials currently saved
"""
return Credential.display_credentials()
def displayUser():
"""
displayUser function to display user details if user has an account
"""
return User.display_all()
def copyUsername(account_name):
"""
copyUsername function that enables user to copy their user name to their
machine clip board
"""
return Credential.copy_username(account_name)
def copyAccountname(account_name):
"""
copyAccountname function that enables user to copy their
accountname to the machine clipboard
"""
return Credential.copy_accountname(account_name)
def copyPassword(account_name):
"""
copyPassword function that enables user to copy their password
to the machine clipboard
"""
return Credential.copy_password(account_name)
def main():
user_name = input("Hello, welcome to Password_Locker. What is your name? \n")
print("Hi {}. What would you like to do?".format(user_name))
while True:
userShortCodes = input("Use these shortcodes to pick an action: \n ca - Create Account \n ex - Exit Password Locker:\n")
if userShortCodes == "ca":
userName = input("Please enter a user name for account set up:")
loginPassword = input("Please enter your password: ")
saveUser(create_user(userName, loginPassword))
print("The following are details to your account: \n Username: {} \n password: {}".format(userName, loginPassword))
username_login = input("Thank you for signing up with us. \n please enter your username to login: ")
password_login = input("Please enter your password: ")
if loginPassword == password_login and userName == username_login:
if User.display_all():
userShortCodes = input("Would you like to proceed to your credentials? Use this short code: \n sc - See your credential: \n ")
if userShortCodes == "sc":
while True:
short_code = input("Use these shortcodes to choose an action: \n cc- create new credential \n delc - delete credential \n fc - find credential \n cp - copy credential \n ex - exit credentials \n dc -display credential :\n ").lower()
if short_code == "cc":
print("New Credential:")
username = input("Please enter your user name: \n")
accountname = input("Please enter your account name: \n")
password_choice = input("Would you like to have your password auto-generated? y/n : \n ").lower()
# Password generator
if password_choice == "n":
password = input("Please enter your password: \n")
else:
password_length = input("What length of password would you like to have? \n ")
random_password = []
for i in range (0, int(password_length)): #loop through the number of times equal to preferred length of password
random_password.append(random.randint(0,9))
def convert (random_password):
s = [str(i) for i in random_password]
res = int("".join(s))
return (res)
print ("Your generated password of ",password_length," characters is ", convert(random_password))
password = input("Please enter the above generated password: \n")
response = input("Would you like to save the credential you just made? y/n: \n")
if response == "y":
saveCredential(create_credential(username, accountname, password))
print("--" * 10)
print(f"credentials successfully saved! \n {username} \n {accountname} \n {password}")
print("--" * 10)
elif short_code == "dc":
if displayCredentials():
print("Here is a list of all your credentials: \n")
print("--" * 10)
for credential in displayCredentials():
print(f"{credential.username} \n {credential.accountname} \n {password}")
else:
print("You dont seem to have any credentials saved")
elif short_code == "fc":
search_accountname = input("Please enter the account you want to search for: \n")
if credentialExists(search_accountname):
found_credential = findCredential(search_accountname)
print("Account searched \n")
print("-- \n" * 10)
print (f"Username: {found_credential.username} \n Accountname: {found_credential.accountname} \n Password: {found_credential.password}")
else:
print("That credential does not exist")
elif short_code == "cp":
copy_account_name = input("Please enter your accountname: ")
if credentialExists(copy_account_name):
copy_choices = input("What would you like to copy from {}? \n pd - Password \n us - username \n ac - accountname \n".format(copy_account_name)).lower()
if copy_choices == "us":
copyUsername(copy_account_name)
print("Username successfully copied!")
elif copy_choices == "pd":
copyPassword(copy_account_name)
print("Password successfully copied!")
elif copy_choices == "ac":
copyAccountname(copy_account_name)
print("Account-name successfully copied!")
else:
print("{} account does not exist!".format(copy_account_name))
elif short_code == "delc":
search_accountname = input("Please enter the account you would like deleted: \n")
if credentialExists(search_accountname):
found_credential = findCredential(search_accountname)
deleteCredential(found_credential)
print(f"{search_accountname} has successfully been deleted")
else:
print(f"{search_accountname} does not exist")
elif short_code == "ex":
print("Exiting credentials........")
break
else:
print("Incorrect username or password")
elif userShortCodes == "ex":
print("Logging out.......")
break
if __name__ == '__main__':
main()
| 46.478049
| 260
| 0.524244
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,532
| 0.370697
|
f96072c2c69a90c36b742be295db7b6791bf37ec
| 1,173
|
py
|
Python
|
src/Support/Scripts/AlignOperators.py
|
bobthecow/ManipulateCoda
|
5a0e03fb535cfc623070ddd44a9e09d34d313193
|
[
"MIT"
] | 5
|
2015-01-05T21:44:18.000Z
|
2017-09-08T09:31:44.000Z
|
src/Support/Scripts/AlignOperators.py
|
bobthecow/ManipulateCoda
|
5a0e03fb535cfc623070ddd44a9e09d34d313193
|
[
"MIT"
] | 3
|
2015-01-06T15:21:58.000Z
|
2019-04-09T12:03:13.000Z
|
src/Support/Scripts/AlignOperators.py
|
bobthecow/ManipulateCoda
|
5a0e03fb535cfc623070ddd44a9e09d34d313193
|
[
"MIT"
] | null | null | null |
'''Line up operators...'''
import cp_actions as cp
import re
def act(controller, bundle, options):
'''
Required action method
'''
context = cp.get_context(controller)
line_ending = cp.get_line_ending(context)
lines, range = cp.lines_and_range(context)
newlines = line_ending.join(balance_operators(lines.split(line_ending)))
cp.insert_text_and_select(context, newlines, range, cp.new_range(range.location, len(newlines)))
def balance_operators(lines):
r = re.compile("^(.*[^\s])\s*((?:==?|<<|>>|&|\||\^)=|=[>&\*]|(?<![\.\+\-\*\/~%])[\.\+\-\*\/~%]?=)\s*([^\s].*)$")
vars = []
ops = []
vals = []
ret = []
for line in lines:
result = r.match(line)
if result:
vars.append(result.group(1))
ops.append(result.group(2))
vals.append(result.group(3))
for line in lines:
result = r.match(line)
if result:
ret.append(" ".join((result.group(1).ljust(len(max(vars, key=len))), result.group(2).rjust(len(max(ops, key=len))), result.group(3))))
else:
ret.append(line)
return ret
| 27.27907
| 146
| 0.546462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 163
| 0.13896
|
f9635ddcc69eb4603c2a017bb384ecbb61ddeafe
| 1,333
|
py
|
Python
|
continuous-variables/literature-code-in-python/random_reaction_gen.py
|
YANGZ001/OrganicChem-LabMate-AI
|
fb826d85dd852aab987b9bef6856d8da6a4bd9be
|
[
"MIT"
] | null | null | null |
continuous-variables/literature-code-in-python/random_reaction_gen.py
|
YANGZ001/OrganicChem-LabMate-AI
|
fb826d85dd852aab987b9bef6856d8da6a4bd9be
|
[
"MIT"
] | null | null | null |
continuous-variables/literature-code-in-python/random_reaction_gen.py
|
YANGZ001/OrganicChem-LabMate-AI
|
fb826d85dd852aab987b9bef6856d8da6a4bd9be
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import itertools
'''
Section below creates lists for your reaction parameters. Change names of lists where appropriate
'''
#For bigger lists use np.arange(min_value, max_value, step)
Pyridine = [0.1, 0.2, 0.3] # in mmol
Aldehyde = [0.1, 0.2, 0.3] # in mmol
Isocyanide = [0.1, 0.2, 0.3] # in mmol
Temperature = [10, 20, 40, 60, 80] # in C
Solvent = [0.1, 0.25, 0.5, 1, 1.5] # in mL
Catalyst = [0, 1, 2, 3, 4, 5, 7.5, 10] # in mol%
Time = [5, 10, 15, 30, 60] # in minutes
'''
The following lines create all combos possible for the values listed above and saves as text file. Change names where appropriate.
'''
combinations = list(itertools.product(Pyridine, Aldehyde, Isocyanide, Temperature, Solvent, Catalyst, Time))
df = pd.DataFrame(combinations)
df.to_csv('all_combos716.txt', sep = '\t', header = ['Pyridine', 'Aldehyde', 'Isocyanide', 'Temperature', 'Solvent', 'Catalyst', 'Time'])
'''
Below, 10 random reaction are selected from all possible combinations. The reactions are stored in a text file. Change names of header as appropriate.
'''
random_data = df.sample(n=10, random_state=1)
df_random_data = pd.DataFrame(random_data)
df_random_data.to_csv('train_data716.txt', sep= '\t', header = ['Pyridine', 'Aldehyde', 'Isocyanide', 'Temperature', 'Solvent', 'Catalyst', 'Time'])
| 37.027778
| 150
| 0.701425
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 707
| 0.530383
|
f96819f7fca3841f9dba660989413c0f8440f951
| 183
|
py
|
Python
|
agent/tandem/agent/models/connection_state.py
|
geoffxy/tandem
|
81e76f675634f1b42c8c3070c73443f3f68f8624
|
[
"Apache-2.0"
] | 732
|
2018-03-11T03:35:17.000Z
|
2022-01-06T12:22:03.000Z
|
agent/tandem/agent/models/connection_state.py
|
geoffxy/tandem
|
81e76f675634f1b42c8c3070c73443f3f68f8624
|
[
"Apache-2.0"
] | 21
|
2018-03-11T02:28:22.000Z
|
2020-08-30T15:36:40.000Z
|
plugin/tandem_lib/agent/tandem/agent/models/connection_state.py
|
typeintandem/vim
|
e076a9954d73ccb60cd6828e53adf8da76462fc6
|
[
"Apache-2.0"
] | 24
|
2018-03-14T05:37:17.000Z
|
2022-01-18T14:44:42.000Z
|
import enum
class ConnectionState(enum.Enum):
PING = "ping"
SEND_SYN = "syn"
WAIT_FOR_SYN = "wait"
OPEN = "open"
RELAY = "relay"
UNREACHABLE = "unreachable"
| 16.636364
| 33
| 0.617486
| 168
| 0.918033
| 0
| 0
| 0
| 0
| 0
| 0
| 43
| 0.234973
|
f9686a6e64b3ada450c52aa9db27ba394fa0f073
| 2,241
|
py
|
Python
|
mechroutines/es/newts/_fs.py
|
keceli/mechdriver
|
978994ba5c77b6df00078b639c4482dacf269440
|
[
"Apache-2.0"
] | null | null | null |
mechroutines/es/newts/_fs.py
|
keceli/mechdriver
|
978994ba5c77b6df00078b639c4482dacf269440
|
[
"Apache-2.0"
] | null | null | null |
mechroutines/es/newts/_fs.py
|
keceli/mechdriver
|
978994ba5c77b6df00078b639c4482dacf269440
|
[
"Apache-2.0"
] | 8
|
2019-12-18T20:09:46.000Z
|
2020-11-14T16:37:28.000Z
|
""" rpath task function
"""
from mechlib import filesys
from mechlib.filesys import build_fs
from mechlib.filesys import root_locs
def rpath_fs(ts_dct, tsname,
mod_ini_thy_info,
es_keyword_dct,
run_prefix, save_prefix):
""" reaction path filesystem
"""
# Set up coordinate name
rxn_coord = es_keyword_dct.get('rxncoord')
# Get the zma and ts locs
zma_locs = (ts_dct['zma_idx'],)
ts_locs = (int(tsname.split('_')[-1]),)
# Build filesys object down to TS FS
ts_fs = build_fs(
run_prefix, save_prefix, 'TRANSITION STATE',
thy_locs=mod_ini_thy_info[1:],
**root_locs(ts_dct, saddle=True))
ini_ts_run_fs, ini_ts_save_fs = ts_fs
# generate fs
if rxn_coord == 'irc':
# Try and locate a minimum-energy conformer
cnf_fs = build_fs(
run_prefix, save_prefix, 'CONFORMER',
thy_locs=mod_ini_thy_info[1:],
**root_locs(ts_dct, saddle=True, name=tsname))
ini_cnf_run_fs, ini_cnf_save_fs = cnf_fs
ini_loc_info = filesys.mincnf.min_energy_conformer_locators(
ini_cnf_save_fs, mod_ini_thy_info)
ini_min_locs, ini_pfx_save_path = ini_loc_info
if any(ini_min_locs):
# Run IRC from saddle point minimum-energy conformer
ini_pfx_run_path = ini_cnf_run_fs[-1].path(ini_min_locs)
ini_pfx_save_path = ini_cnf_save_fs[-1].path(ini_min_locs)
scn_alg = 'irc-sadpt'
else:
# Run IRC from series of points {Rmax, Rmax-1, ...}
ini_pfx_run_path = ini_ts_run_fs[-1].path(ts_locs)
ini_pfx_save_path = ini_ts_save_fs[-1].path(ts_locs)
scn_alg = 'irc-rmax'
else:
# Run a scan along the requested reaction coordinates
# Have an auto option that just selects the coordinate?
ini_pfx_run_path = ini_ts_run_fs[-1].path(ts_locs)
ini_pfx_save_path = ini_ts_save_fs[-1].path(ts_locs)
scn_alg = 'drp'
# Set up the scan filesystem objects using the predefined prefix
scn_fs = build_fs(
ini_pfx_run_path, ini_pfx_save_path, 'SCAN',
zma_locs=zma_locs)
return scn_alg, scn_fs, cnf_fs, ini_min_locs
| 33.954545
| 70
| 0.646586
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 567
| 0.253012
|
f968be6f1cca8629346c90e2699c898d9571ac20
| 1,361
|
py
|
Python
|
computation/listallfiles.py
|
thirschbuechler/didactic-barnacles
|
88d0a2b572aacb2cb45e68bb4f05fa5273224439
|
[
"MIT"
] | null | null | null |
computation/listallfiles.py
|
thirschbuechler/didactic-barnacles
|
88d0a2b572aacb2cb45e68bb4f05fa5273224439
|
[
"MIT"
] | null | null | null |
computation/listallfiles.py
|
thirschbuechler/didactic-barnacles
|
88d0a2b572aacb2cb45e68bb4f05fa5273224439
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 29 14:38:54 2020
@author: https://stackoverflow.com/questions/18262293/how-to-open-every-file-in-a-folder
"""
import os #os module imported here
location = os.getcwd() # get present working directory location here
counter = 0 #keep a count of all files found
csvfiles = [] #list to store all csv files found at location
filebeginwithhello = [] # list to keep all files that begin with 'hello'
otherfiles = [] #list to keep any other file that do not match the criteria
for file in os.listdir(location):
try:
if file.endswith(".csv"):
print( "csv file found:\t", file)
csvfiles.append(str(file))
counter = counter+1
elif file.endswith(".csv"): #because some files may start with hello and also be a csv file
print( "csv file found:\t", file)
csvfiles.append(str(file))
counter = counter+1
elif file.startswith("hello"):
print( "hello files found: \t", file)
filebeginwithhello.append(file)
counter = counter+1
else:
otherfiles.append(file)
print(file)
counter = counter+1
except Exception as e:
raise e
print("No files found here!")
print("Total files found:\t", counter)
| 34.897436
| 100
| 0.603968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 609
| 0.447465
|
f96a73f7ebbc6d2b474f86a30e29cb3233db9724
| 5,317
|
py
|
Python
|
rootfs/api/apps_extra/social_core/actions.py
|
jianxiaoguo/controller
|
8cc1e11601e5725e583f0fa82cdb2c10872ca485
|
[
"Apache-2.0"
] | null | null | null |
rootfs/api/apps_extra/social_core/actions.py
|
jianxiaoguo/controller
|
8cc1e11601e5725e583f0fa82cdb2c10872ca485
|
[
"Apache-2.0"
] | 19
|
2020-07-30T06:31:29.000Z
|
2022-03-14T07:33:44.000Z
|
rootfs/api/apps_extra/social_core/actions.py
|
jianxiaoguo/controller
|
8cc1e11601e5725e583f0fa82cdb2c10872ca485
|
[
"Apache-2.0"
] | 9
|
2020-07-30T02:50:12.000Z
|
2020-12-11T06:44:19.000Z
|
from urllib.parse import quote
from social_core.utils import sanitize_redirect, user_is_authenticated, \
user_is_active, partial_pipeline_data, setting_url
def do_auth(backend, redirect_name='next'):
# Save any defined next value into session
data = backend.strategy.request_data(merge=False)
# Save extra data into session.
for field_name in backend.setting('FIELDS_STORED_IN_SESSION', []):
if field_name in data:
backend.strategy.session_set(field_name, data[field_name])
else:
backend.strategy.session_set(field_name, None)
# uri = None
if redirect_name in data:
# Check and sanitize a user-defined GET/POST next field value
redirect_uri = data[redirect_name]
if backend.setting('SANITIZE_REDIRECTS', True):
allowed_hosts = backend.setting('ALLOWED_REDIRECT_HOSTS', []) + \
[backend.strategy.request_host()]
redirect_uri = sanitize_redirect(allowed_hosts, redirect_uri)
backend.strategy.session_set(
redirect_name,
redirect_uri or backend.setting('LOGIN_REDIRECT_URL')
)
response = backend.start()
url = response.url.split('?')[1]
def form2json(form_data):
from urllib.parse import parse_qs, urlparse
query = urlparse('?' + form_data).query
params = parse_qs(query)
return {key: params[key][0] for key in params}
from django.core.cache import cache
cache.set("oidc_key_" + data.get('key', ''), form2json(url).get('state'), 60 * 10)
return response
def do_complete(backend, login, user=None, redirect_name='next',
*args, **kwargs):
data = backend.strategy.request_data()
is_authenticated = user_is_authenticated(user)
user = user if is_authenticated else None
partial = partial_pipeline_data(backend, user, *args, **kwargs)
if partial:
user = backend.continue_pipeline(partial)
# clean partial data after usage
backend.strategy.clean_partial_pipeline(partial.token)
else:
user = backend.complete(user=user, *args, **kwargs)
# pop redirect value before the session is trashed on login(), but after
# the pipeline so that the pipeline can change the redirect if needed
redirect_value = backend.strategy.session_get(redirect_name, '') or \
data.get(redirect_name, '')
# check if the output value is something else than a user and just
# return it to the client
user_model = backend.strategy.storage.user.user_model()
if user and not isinstance(user, user_model):
return user
if is_authenticated:
if not user:
url = setting_url(backend, redirect_value, 'LOGIN_REDIRECT_URL')
else:
url = setting_url(backend, redirect_value,
'NEW_ASSOCIATION_REDIRECT_URL',
'LOGIN_REDIRECT_URL')
elif user:
if user_is_active(user):
# catch is_new/social_user in case login() resets the instance
is_new = getattr(user, 'is_new', False)
social_user = user.social_user
login(backend, user, social_user)
# store last login backend name in session
backend.strategy.session_set('social_auth_last_login_backend',
social_user.provider)
if is_new:
url = setting_url(backend,
'NEW_USER_REDIRECT_URL',
redirect_value,
'LOGIN_REDIRECT_URL')
else:
url = setting_url(backend, redirect_value,
'LOGIN_REDIRECT_URL')
else:
if backend.setting('INACTIVE_USER_LOGIN', False):
social_user = user.social_user
login(backend, user, social_user)
url = setting_url(backend, 'INACTIVE_USER_URL', 'LOGIN_ERROR_URL',
'LOGIN_URL')
else:
url = setting_url(backend, 'LOGIN_ERROR_URL', 'LOGIN_URL')
if redirect_value and redirect_value != url:
redirect_value = quote(redirect_value)
url += ('&' if '?' in url else '?') + \
'{0}={1}'.format(redirect_name, redirect_value)
if backend.setting('SANITIZE_REDIRECTS', True):
allowed_hosts = backend.setting('ALLOWED_REDIRECT_HOSTS', []) + \
[backend.strategy.request_host()]
url = sanitize_redirect(allowed_hosts, url) or \
backend.setting('LOGIN_REDIRECT_URL')
response = backend.strategy.redirect(url)
social_auth = user.social_auth.filter(provider='drycc').\
order_by('-modified').last()
response.set_cookie("name", user.username,
max_age=social_auth.extra_data.get('expires_in'))
response.set_cookie("id_token", social_auth.extra_data.get('id_token'),
max_age=social_auth.extra_data.get('expires_in'))
from django.core.cache import cache
cache.set("oidc_state_" + data.get('state'),
{'token': social_auth.extra_data.get('id_token', 'fail'),
'username': user.username},
60 * 10)
return response
| 42.536
| 86
| 0.618018
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,123
| 0.211209
|
f96c7c015c3ad71d48f4085619b3a3dcae5954cc
| 1,667
|
py
|
Python
|
Part_1/src/manual_split_test/make_paired_cases.py
|
Bhaskers-Blu-Org2/datascience4managers
|
2410182fe6913a8c986d2f28f5db6850cddb75f2
|
[
"MIT"
] | 8
|
2019-11-24T08:23:12.000Z
|
2021-01-19T02:48:46.000Z
|
Part_1/src/manual_split_test/make_paired_cases.py
|
Bhaskers-Blu-Org2/datascience4managers
|
2410182fe6913a8c986d2f28f5db6850cddb75f2
|
[
"MIT"
] | 1
|
2021-06-02T02:05:15.000Z
|
2021-06-02T02:05:15.000Z
|
Part_1/src/manual_split_test/make_paired_cases.py
|
microsoft/datascience4managers
|
7c332bf23a85f281237c841e1981ab21ed4ca072
|
[
"MIT"
] | 9
|
2019-10-29T18:45:36.000Z
|
2021-03-27T07:23:13.000Z
|
#!/usr/bin/python
# Oct 2019 JMA
# make_samples.py Use the splits_aggregator module to create samples
'''
Write a short description of what the program does here.
Usage:
$ ./make_samples.py [-v] [-d ROOT_DIR] [-c pair_cnt]
-v verbose output
-d data directory to read from
-c number of randomly paired cases to generate
'''
import os, sys
import glob
import pprint
import re
import time
from pathlib import Path
import pyarrow
import pandas as pd
import splits_aggregator as sa
### config constants
VERBOSE = False
ROOT_DIR = Path('D:/OneDrive - Microsoft/data/20news')
PAIR_CNT = 1
########################################################################
class x(object):
''
pass
###############################################################################
def main(input_dir, pair_cnt):
cs = sa.BinaryComparisons(input_dir)
pairs_df = cs.random_pairs(pair_cnt)
if VERBOSE:
print("Pairs: ", pairs_df.shape)
cs.embed_in_excel(pairs_df)
########################################################################
if __name__ == '__main__':
if '-v' in sys.argv:
k = sys.argv.index('-v')
VERBOSE = True
## Inputs
if '-d' in sys.argv:
d = sys.argv.index('-d')
ROOT_DIR = Path(sys.argv[d+1])
# else:
if '-c' in sys.argv:
g = sys.argv.index('-c')
PAIR_CNT= int(sys.argv[g+1])
main(ROOT_DIR, PAIR_CNT)
print(sys.argv, "\nDone in ",
'%5.3f' % time.process_time(),
" secs! At UTC: ",
time.asctime(time.gmtime()), file=sys.stderr)
| 24.880597
| 80
| 0.515297
| 34
| 0.020396
| 0
| 0
| 0
| 0
| 0
| 0
| 732
| 0.439112
|
f96ca18e0cac4358cc1ae51e86890db7bc505477
| 1,550
|
py
|
Python
|
src/pySUMOQt/Designer/css_rc.py
|
pySUMO/pysumo
|
889969f94bd45e2b67e25ff46452378351ca5186
|
[
"BSD-2-Clause"
] | 7
|
2015-08-21T17:17:35.000Z
|
2021-03-02T21:40:00.000Z
|
src/pySUMOQt/Designer/css_rc.py
|
pySUMO/pysumo
|
889969f94bd45e2b67e25ff46452378351ca5186
|
[
"BSD-2-Clause"
] | 2
|
2015-04-14T12:40:37.000Z
|
2015-04-14T12:44:03.000Z
|
src/pySUMOQt/Designer/css_rc.py
|
pySUMO/pysumo
|
889969f94bd45e2b67e25ff46452378351ca5186
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Di. Feb 3 12:11:53 2015
# by: The Resource Compiler for PySide (Qt v4.8.4)
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore
qt_resource_data = b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x009QStatusBar::item \x0a{ \x0a\x09border-left: 1px solid #AAAAAA;\x0a} \x0a\x00\x00\x00\x00"
qt_resource_name = b"\x00\x03\x00\x00j\xa3\x00c\x00s\x00s\x00\x0e\x0bq\xe6\xc3\x00M\x00a\x00i\x00n\x00W\x00i\x00n\x00d\x00o\x00w\x00.\x00c\x00s\x00s\x00\x0b\x08\x22\xc1\xc3\x00M\x00e\x00n\x00u\x00b\x00a\x00r\x00.\x00c\x00s\x00s\x00\x10\x05[\x0fC\x00P\x00y\x00S\x00u\x00m\x00o\x00W\x00i\x00d\x00g\x00e\x00t\x00.\x00c\x00s\x00s\x00\x0d\x0d\xd42\xe3\x00S\x00t\x00a\x00t\x00u\x00s\x00b\x00a\x00r\x00.\x00c\x00s\x00s\x00\x0b\x083\x86\xe3\x00T\x00o\x00o\x00l\x00b\x00a\x00r\x00.\x00c\x00s\x00s"
qt_resource_struct = b"\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x02\x00\x00\x00\x05\x00\x00\x00\x02\x00\x00\x00J\x00\x00\x00\x00\x00\x01\x00\x00\x00\x08\x00\x00\x00.\x00\x00\x00\x00\x00\x01\x00\x00\x00\x04\x00\x00\x00\x90\x00\x00\x00\x00\x00\x01\x00\x00\x00I\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00p\x00\x00\x00\x00\x00\x01\x00\x00\x00\x0c"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 70.454545
| 488
| 0.75871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,194
| 0.770323
|
f96e0469991c8e15ab4a23bec3525036f33b7b33
| 12,081
|
py
|
Python
|
tests/test_core.py
|
sobamchan/lineflow
|
708a875c090b7df48c9eca3f630915a9c6e5bbd6
|
[
"MIT"
] | null | null | null |
tests/test_core.py
|
sobamchan/lineflow
|
708a875c090b7df48c9eca3f630915a9c6e5bbd6
|
[
"MIT"
] | null | null | null |
tests/test_core.py
|
sobamchan/lineflow
|
708a875c090b7df48c9eca3f630915a9c6e5bbd6
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from unittest.mock import patch, Mock
import lineflow
from lineflow import Dataset
from lineflow.core import ConcatDataset, ZipDataset
from lineflow.core import RandomAccessConcat, RandomAccessZip
class RandomAccessConcatTestCase(TestCase):
def setUp(self):
self.n = 5
self.base = range(100)
self.data = RandomAccessConcat(*[self.base for _ in range(5)])
def test_init(self):
self.assertEqual(len(self.data._datasets), self.n)
self.assertIsNone(self.data._offsets)
self.assertIsNone(self.data._length)
def test_iter(self):
for x, y in zip(self.data, list(self.base) * self.n):
self.assertEqual(x, y)
self.assertIsNone(self.data._offsets)
self.assertIsNone(self.data._length)
def test_supports_random_access_lazily(self):
import itertools
self.assertIsNone(self.data._offsets)
self.assertSequenceEqual(self.data, list(self.base) * self.n)
expected_lengths = list(itertools.accumulate(len(self.base) for _ in range(self.n)))
self.assertListEqual(self.data._lengths, expected_lengths)
self.assertListEqual(self.data._offsets, [0] + expected_lengths[:-1])
def test_raises_index_error_with_invalid_index(self):
with self.assertRaises(IndexError):
self.data[len(self.data)]
self.data[-1]
def test_returns_length_lazily(self):
self.assertIsNone(self.data._length)
self.assertEqual(len(self.data), len(self.base) * self.n)
self.assertEqual(self.data._length, len(self.data))
class RandomAccessZipTestCase(TestCase):
def setUp(self):
self.n = 5
self.base = range(100)
self.data = RandomAccessZip(*[self.base for _ in range(5)])
def test_init(self):
self.assertEqual(len(self.data._datasets), self.n)
self.assertIsNone(self.data._length)
def test_iter(self):
for x, y in zip(self.data, self.base):
self.assertEqual(x, tuple([y] * self.n))
self.assertIsNone(self.data._length)
def test_supports_random_access(self):
self.assertSequenceEqual(self.data, list(zip(*[self.base for _ in range(self.n)])))
def test_raises_index_error_with_invalid_index(self):
with self.assertRaises(IndexError):
self.data[len(self.data)]
self.data[-1]
def test_returns_lengths_lazily(self):
self.assertIsNone(self.data._length)
self.assertEqual(len(self.data), len(self.base))
self.assertEqual(self.data._length, len(self.data))
class DatasetTestCase(TestCase):
def setUp(self):
self.base = range(100)
self.data = Dataset(self.base)
def test_getitem(self):
self.assertSequenceEqual(self.data, self.base)
def test_supports_slicing(self):
slice1 = slice(10, 20)
slice2 = slice(0, 99)
self.assertListEqual(self.data[slice1], list(self.base[slice1]))
self.assertListEqual(self.data[slice2], list(self.base[slice2]))
def test_len(self):
self.assertEqual(len(self.data), len(self.base))
def test_add(self):
data = self.data + self.data + self.data
expected = list(self.base) * 3
self.assertSequenceEqual(data, expected)
def test_map(self):
def f(x): return x ** 2
self.assertSequenceEqual(
self.data.map(f),
list(map(f, self.base)))
def test_keeps_original_dataset_after_multiple_maps(self):
def f(x): return x
data = self.data
for i in range(100):
data = data.map(f)
self.assertEqual(data._dataset, self.base)
self.assertEqual(len(data._funcs), i + 1)
def test_supports_method_chain(self):
data = self.data.map(lambda x: x ** 2).map(lambda x: x / 2)
self.assertSequenceEqual(
data, [x ** 2 / 2 for x in self.base])
def test_all(self):
self.assertListEqual(self.data.all(), list(self.base))
def test_first(self):
self.assertEqual(self.data.first(), self.base[0])
def test_take(self):
n = 50
self.assertListEqual(self.data.take(n), list(self.base[:n]))
@patch('lineflow.core.Path.open')
@patch('lineflow.core.Path')
@patch('lineflow.core.pickle.dump')
def test_saves_yourself(self, pickle_dump_mock, Path_mock, open_mock):
path = Mock()
Path_mock.return_value = path
# Assume cache doesn't exist, but a directory exists
path.exists.return_value = False
path.parent.exists.return_value = True
# Setup Path.open
fp = Mock()
open_mock.return_value.__enter__.return_value = fp
path.open = open_mock
filepath = '/path/to/cache'
data = self.data.save(filepath)
path.exists.assert_called_once()
path.parent.exists.assert_called_once()
path.open.assert_called_once_with('wb')
pickle_dump_mock.assert_called_once_with(self.data.all(), fp)
self.assertIsInstance(data, lineflow.core.CacheDataset)
@patch('lineflow.core.Path.open')
@patch('lineflow.core.Path')
@patch('lineflow.core.pickle.dump')
def test_makes_a_directory_and_saves_yourself(self,
pickle_dump_mock,
Path_mock,
open_mock):
path = Mock()
Path_mock.return_value = path
# Assume cache doesn't exist, also a directory doesn't exist
path.exists.return_value = False
path.parent.exists.return_value = False
# Setup Path.open
fp = Mock()
open_mock.return_value.__enter__.return_value = fp
path.open = open_mock
filepath = '/path/to/cache'
data = self.data.save(filepath)
path.exists.assert_called_once()
path.parent.exists.assert_called_once()
path.parent.mkdir.assert_called_once_with(parents=True)
path.open.assert_called_once_with('wb')
pickle_dump_mock.assert_called_once_with(self.data.all(), fp)
self.assertIsInstance(data, lineflow.core.CacheDataset)
@patch('lineflow.core.Path.open')
@patch('lineflow.core.Path')
@patch('lineflow.core.pickle.dump')
def test_maps_func_and_saves_yourself(self,
pickle_dump_mock,
Path_mock,
open_mock):
path = Mock()
Path_mock.return_value = path
# Assume cache doesn't exist, but a directory exists
path.exists.return_value = False
path.parent.exists.return_value = True
# Setup Path.open
fp = Mock()
open_mock.return_value.__enter__.return_value = fp
path.open = open_mock
filepath = '/path/to/cache'
data = self.data.map(lambda x: x ** 2).save(filepath)
path.exists.assert_called_once()
path.parent.exists.assert_called_once()
path.open.assert_called_once_with('wb')
pickle_dump_mock.assert_called_once_with(data.all(), fp)
self.assertIsInstance(data, lineflow.core.CacheDataset)
self.assertListEqual(data._dataset, [x ** 2 for x in self.base])
for i, x in enumerate(data):
y = self.data[i] ** 2
self.assertEqual(x, y)
self.assertEqual(data[i], y)
@patch('lineflow.core.Path.open')
@patch('lineflow.core.Path')
@patch('lineflow.core.pickle.load')
def test_loads_existed_cache_implicitly(self,
pickle_load_mock,
Path_mock,
open_mock):
path = Mock()
Path_mock.return_value = path
# Assume cache exists
path.exists.return_value = True
# Setup Path.open
fp = Mock()
open_mock.return_value.__enter__.return_value = fp
path.open = open_mock
# Setup pickle.load
pickle_load_mock.return_value = list(self.base)
filepath = '/path/to/cache'
data = self.data.save(filepath)
path.exists.assert_called_once()
path.open.assert_called_once_with('rb')
pickle_load_mock.assert_called_once_with(fp)
self.assertIsInstance(data, lineflow.core.CacheDataset)
@patch('lineflow.core.open')
@patch('lineflow.core.pickle.load')
def test_load(self, pickle_load_mock, open_mock):
pickle_load_mock.return_value = list(self.base)
enter_mock = Mock()
open_mock.return_value.__enter__.return_value = enter_mock
filepath = '/path/to/dataset'
data = lineflow.load(filepath)
open_mock.assert_called_once_with(filepath, 'rb')
pickle_load_mock.assert_called_once_with(enter_mock)
self.assertListEqual(data.all(), list(self.base))
self.assertEqual(data._dataset, list(self.base))
with self.assertWarns(DeprecationWarning):
lineflow.Dataset.load(filepath)
class LineflowConcatTestCase(TestCase):
def setUp(self):
self.base = range(100)
self.n = 5
self.data = lineflow.concat(*[Dataset(self.base)] * self.n)
def test_returns_concat_dataset(self):
self.assertIsInstance(self.data, ConcatDataset)
self.assertIsInstance(self.data._dataset, RandomAccessConcat)
def test_keeps_original_dataset_after_multiple_maps(self):
def f(x): return x
data = self.data
for i in range(100):
data = data.map(f)
self.assertIsInstance(data._dataset, RandomAccessConcat)
self.assertEqual(len(data._funcs), i + 1)
def test_supports_random_access(self):
self.assertSequenceEqual(self.data, list(self.base) * self.n)
class LineflowZipTestCase(TestCase):
def setUp(self):
self.base = range(100)
self.n = 5
self.data = lineflow.zip(*[Dataset(self.base)] * self.n)
def test_returns_zip_dataset(self):
self.assertIsInstance(self.data, ZipDataset)
self.assertIsInstance(self.data._dataset, RandomAccessZip)
def test_keeps_original_dataset_after_multiple_maps(self):
def f(x): return x
data = self.data
for i in range(100):
data = data.map(f)
self.assertIsInstance(data._dataset, RandomAccessZip)
self.assertEqual(len(data._funcs), i + 1)
def test_supports_random_access(self):
self.assertSequenceEqual(self.data, list(zip(*[self.base for _ in range(self.n)])))
class LineflowFilterTestCase(TestCase):
def setUp(self):
self.data = Dataset(range(100))
def test_returns_filtered_data_eagerly(self):
result = lineflow.filter(lambda x: x % 2 == 0, self.data)
expected = [x for x in self.data if x % 2 == 0]
self.assertListEqual(result, expected)
def test_returns_filtered_data_lazily(self):
result = lineflow.filter(lambda x: x % 2 == 0, self.data, lazy=True)
self.assertIsInstance(result, filter)
expected = [x for x in self.data if x % 2 == 0]
for x, y in zip(result, expected):
self.assertEqual(x, y)
class LineflowFlatMapTestCase(TestCase):
def setUp(self):
self.data = Dataset(range(100))
def test_returns_flat_mapped_data_eagerly(self):
result = lineflow.flat_map(lambda x: [x] * 3, self.data)
expected = [[x] * 3 for x in self.data]
expected = [x for xs in expected for x in xs]
self.assertListEqual(result, expected)
def test_returns_flat_mapped_data_lazily(self):
import itertools
result = lineflow.flat_map(lambda x: [x] * 3, self.data, lazy=True)
self.assertIsInstance(result, itertools.chain)
expected = list(itertools.chain.from_iterable(
[[x] * 3 for x in self.data]))
for x, y in zip(result, expected):
self.assertEqual(x, y)
| 35.017391
| 92
| 0.632067
| 11,832
| 0.979389
| 0
| 0
| 4,889
| 0.404685
| 0
| 0
| 709
| 0.058687
|
f96ed484656fab8971f82e7e48fafd3dcd557e30
| 2,393
|
py
|
Python
|
aerosandbox/tools/miscellaneous.py
|
SzymonSzyszko/AeroSandbox
|
d4084899b665f735c1ec218282b2e4aee08eacff
|
[
"MIT"
] | null | null | null |
aerosandbox/tools/miscellaneous.py
|
SzymonSzyszko/AeroSandbox
|
d4084899b665f735c1ec218282b2e4aee08eacff
|
[
"MIT"
] | null | null | null |
aerosandbox/tools/miscellaneous.py
|
SzymonSzyszko/AeroSandbox
|
d4084899b665f735c1ec218282b2e4aee08eacff
|
[
"MIT"
] | null | null | null |
import math
import numpy as np
def eng_string(x, format='%.3g', si=True):
'''
Taken from: https://stackoverflow.com/questions/17973278/python-decimal-engineering-notation-for-mili-10e-3-and-micro-10e-6/40691220
Returns float/int value <x> formatted in a simplified engineering format -
using an exponent that is a multiple of 3.
format: printf-style string used to format the value before the exponent.
si: if true, use SI suffix for exponent, e.g. k instead of e3, n instead of
e-9 etc.
E.g. with format='%.2f':
1.23e-08 => 12.30e-9
123 => 123.00
1230.0 => 1.23e3
-1230000.0 => -1.23e6
and with si=True:
1230.0 => 1.23k
-1230000.0 => -1.23M
'''
sign = ''
if x < 0:
x = -x
sign = '-'
exp = int(math.floor(math.log10(x)))
exp3 = exp - (exp % 3)
x3 = x / (10 ** exp3)
if si and exp3 >= -24 and exp3 <= 24 and exp3 != 0:
exp3_text = 'yzafpnum kMGTPEZY'[(exp3 + 24) // 3]
elif exp3 == 0:
exp3_text = ''
else:
exp3_text = 'e%s' % exp3
return ('%s' + format + '%s') % (sign, x3, exp3_text)
remove_nans = lambda x: x[~np.isnan(x)]
import sys
import os
from contextlib import contextmanager
@contextmanager
def stdout_redirected(to=os.devnull):
'''
From StackOverflow: https://stackoverflow.com/questions/5081657/how-do-i-prevent-a-c-shared-library-to-print-on-stdout-in-python
Usage:
import os
with stdout_redirected(to=filename):
print("from Python")
os.system("echo non-Python applications are also supported")
'''
fd = sys.stdout.fileno()
##### assert that Python and C stdio write using the same file descriptor
####assert libc.fileno(ctypes.c_void_p.in_dll(libc, "stdout")) == fd == 1
def _redirect_stdout(to):
sys.stdout.close() # + implicit flush()
os.dup2(to.fileno(), fd) # fd writes to 'to' file
sys.stdout = os.fdopen(fd, 'w') # Python writes to fd
with os.fdopen(os.dup(fd), 'w') as old_stdout:
with open(to, 'w') as file:
_redirect_stdout(to=file)
try:
yield # allow code to be run with the redirected stdout
finally:
_redirect_stdout(to=old_stdout) # restore stdout.
# buffering and flags such as
# CLOEXEC may be different
| 28.488095
| 136
| 0.600501
| 0
| 0
| 1,110
| 0.463853
| 1,126
| 0.470539
| 0
| 0
| 1,362
| 0.56916
|
f96ffcf56dbd7255308f0925c87a38f826c98376
| 595
|
py
|
Python
|
web_app/routes/Directory.py
|
AmyBeisel/BW_Med_Cabinet
|
3cce7ff14f2324cdb81a7a1ea313037a6e3eead6
|
[
"MIT"
] | null | null | null |
web_app/routes/Directory.py
|
AmyBeisel/BW_Med_Cabinet
|
3cce7ff14f2324cdb81a7a1ea313037a6e3eead6
|
[
"MIT"
] | null | null | null |
web_app/routes/Directory.py
|
AmyBeisel/BW_Med_Cabinet
|
3cce7ff14f2324cdb81a7a1ea313037a6e3eead6
|
[
"MIT"
] | null | null | null |
# Directory.py
# Import
from flask import Blueprint, render_template
# Make Blueprint for __init__.py
Directory = Blueprint("Directory", __name__)
# App Welcome Page
@Directory.route('/')
def index():
return render_template("home.html", message = "DS Med Cabinet API using natural language processing to recommend the best cannabis strains to Med Cabinet members.")
# Strain JSON Page
@Directory.route("/strainjson")
def df():
return render_template("json.html")
# Strain Table Page
@Directory.route("/straintable")
def dataframe():
return render_template("df.html")
| 17.5
| 168
| 0.734454
| 0
| 0
| 0
| 0
| 371
| 0.623529
| 0
| 0
| 298
| 0.50084
|
f97211763e2fc9e54ef976a18d06e39b22339ef9
| 567
|
py
|
Python
|
saleor/product/migrations/0125_auto_20200916_1511.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 15,337
|
2015-01-12T02:11:52.000Z
|
2021-10-05T19:19:29.000Z
|
saleor/product/migrations/0125_auto_20200916_1511.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 7,486
|
2015-02-11T10:52:13.000Z
|
2021-10-06T09:37:15.000Z
|
saleor/product/migrations/0125_auto_20200916_1511.py
|
aminziadna/saleor
|
2e78fb5bcf8b83a6278af02551a104cfa555a1fb
|
[
"CC-BY-4.0"
] | 5,864
|
2015-01-16T14:52:54.000Z
|
2021-10-05T23:01:15.000Z
|
# Generated by Django 3.1.1 on 2020-09-16 15:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("product", "0124_auto_20200909_0904"),
]
operations = [
migrations.AlterModelOptions(
name="productvariant",
options={"ordering": ("sort_order", "sku")},
),
migrations.AddField(
model_name="productvariant",
name="sort_order",
field=models.IntegerField(db_index=True, editable=False, null=True),
),
]
| 24.652174
| 80
| 0.59612
| 474
| 0.835979
| 0
| 0
| 0
| 0
| 0
| 0
| 152
| 0.268078
|
f974a1282e8728c135564243668674955a9e7d22
| 943
|
py
|
Python
|
lintcode/NineChapters/03/binary-tree-level-order-traversal-ii.py
|
shootsoft/practice
|
49f28c2e0240de61d00e4e0291b3c5edd930e345
|
[
"Apache-2.0"
] | null | null | null |
lintcode/NineChapters/03/binary-tree-level-order-traversal-ii.py
|
shootsoft/practice
|
49f28c2e0240de61d00e4e0291b3c5edd930e345
|
[
"Apache-2.0"
] | null | null | null |
lintcode/NineChapters/03/binary-tree-level-order-traversal-ii.py
|
shootsoft/practice
|
49f28c2e0240de61d00e4e0291b3c5edd930e345
|
[
"Apache-2.0"
] | null | null | null |
__author__ = 'yinjun'
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
this.val = val
this.left, this.right = None, None
"""
class Solution:
"""
@param root: The root of binary tree.
@return: buttom-up level order in a list of lists of integers
"""
def levelOrderBottom(self, root):
# write your code here
result = []
if root == None:
return result
queue = []
queue.append(root)
l = 1
while l >0:
level = []
for i in range(l):
n = queue.pop(0)
l -= 1
level.append(n.val)
if n.left!=None:
queue.append(n.left)
l+=1
if n.right!=None:
queue.append(n.right)
l+=1
result.insert(0, level)
return result
| 18.86
| 65
| 0.45175
| 775
| 0.821845
| 0
| 0
| 0
| 0
| 0
| 0
| 291
| 0.30859
|
f974a73c7d07887b66165d4b3f68128150448a37
| 530
|
py
|
Python
|
setup.py
|
Moi-Teaching-Referral-Hospital/ERPNextMTRHModifications
|
393cef3294d6b07f2c7ff21899c99a82276be43f
|
[
"MIT"
] | null | null | null |
setup.py
|
Moi-Teaching-Referral-Hospital/ERPNextMTRHModifications
|
393cef3294d6b07f2c7ff21899c99a82276be43f
|
[
"MIT"
] | 1
|
2021-01-09T20:00:38.000Z
|
2021-01-09T20:00:38.000Z
|
setup.py
|
Moi-Teaching-Referral-Hospital/mtrh_dev
|
367af3922d0fe0c19e35b0edd999dfc42f9a225b
|
[
"MIT"
] | 2
|
2020-07-28T22:22:04.000Z
|
2020-08-16T16:12:56.000Z
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('requirements.txt') as f:
install_requires = f.read().strip().split('\n')
# get version from __version__ variable in mtrh_dev/__init__.py
from mtrh_dev import __version__ as version
setup(
name='mtrh_dev',
version=version,
description='For all MTRH dev Frappe and ERPNext modifications',
author='MTRH',
author_email='erp@mtrh.go.ke',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
| 25.238095
| 65
| 0.762264
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 191
| 0.360377
|
f974ccb0279e3323702f280c06f3f6d71a27a8f5
| 23,062
|
py
|
Python
|
tools/programController.py
|
brewpi-remix/uno-test
|
a153a5277bea2a8e58ee479792d6977f0beb853e
|
[
"MIT"
] | null | null | null |
tools/programController.py
|
brewpi-remix/uno-test
|
a153a5277bea2a8e58ee479792d6977f0beb853e
|
[
"MIT"
] | null | null | null |
tools/programController.py
|
brewpi-remix/uno-test
|
a153a5277bea2a8e58ee479792d6977f0beb853e
|
[
"MIT"
] | 1
|
2021-07-31T15:23:07.000Z
|
2021-07-31T15:23:07.000Z
|
#!/usr/bin/env python3
import subprocess as sub
import time
import simplejson as json
import os
from sys import stderr
import subprocess
import platform
import sys
import stat
import pwd
import grp
import BrewPiUtil as util
import brewpiVersion
import expandLogMessage
from packaging import version
from MigrateSettings import MigrateSettings
from ConvertBrewPiDevice import ConvertBrewPiDevice
msg_map = {"a": "Arduino"}
def printStdErr(*objs):
# Log to stderr.txt
print(*objs, file=sys.stderr)
sys.stderr.flush()
def printStdOut(*objs):
# Log to stdout.txt
print(*objs, file=sys.stdout)
sys.stderr.flush()
def asbyte(v):
return chr(v & 0xFF)
class LightYModem:
"""
Receive_Packet
- first byte SOH/STX (for 128/1024 byte size packets)
- EOT (end)
- CA CA abort
- ABORT1 or ABORT2 is abort
Then 2 bytes for seqno (although the sequence number isn't checked)
Then the packet data
Then CRC16?
First packet sent is a filename packet:
- zero-terminated filename
- file size (ascii) followed by space?
"""
packet_len = 1024
stx = 2
eot = 4
ack = 6
nak = 0x15
ca = 0x18
crc16 = 0x43
abort1 = 0x41
abort2 = 0x61
def __init__(self):
self.seq = None
self.ymodem = None
def _read_response(self):
ch1 = ''
while not ch1:
ch1 = self.ymodem.read(1)
ch1 = ord(ch1)
if ch1 == LightYModem.ack and self.seq == 0: # may send also a crc16
ch2 = self.ymodem.read(1)
elif ch1 == LightYModem.ca: # cancel, always sent in pairs
ch2 = self.ymodem.read(1)
return ch1
def _send_ymodem_packet(self, data):
# pad string to 1024 chars
data = data.ljust(LightYModem.packet_len)
seqchr = asbyte(self.seq & 0xFF)
seqchr_neg = asbyte((-self.seq-1) & 0xFF)
crc16 = '\x00\x00'
packet = asbyte(LightYModem.stx) + seqchr + seqchr_neg + data + crc16
if len(packet) != 1029:
raise Exception("packet length is wrong!")
self.ymodem.write(packet)
self.ymodem.flush()
response = self._read_response()
if response == LightYModem.ack:
printStdErr("Sent packet nr %d " % (self.seq))
self.seq += 1
return response
def _send_close(self):
self.ymodem.write(asbyte(LightYModem.eot))
self.ymodem.flush()
response = self._read_response()
if response == LightYModem.ack:
self.send_filename_header("", 0)
self.ymodem.close()
def send_packet(self, file, output):
response = LightYModem.eot
data = file.read(LightYModem.packet_len)
if len(data):
response = self._send_ymodem_packet(data)
return response
def send_filename_header(self, name, size):
self.seq = 0
packet = name + asbyte(0) + str(size) + ' '
return self._send_ymodem_packet(packet)
def transfer(self, file, ymodem, output):
self.ymodem = ymodem
# file: the file to transfer via ymodem
# ymodem: the ymodem endpoint (a file-like object supporting write)
# output: a stream for output messages
file.seek(0, os.SEEK_END)
size = file.tell()
file.seek(0, os.SEEK_SET)
response = self.send_filename_header("binary", size)
while response == LightYModem.ack:
response = self.send_packet(file, output)
file.close()
if response == LightYModem.eot:
self._send_close()
return response
def fetchBoardSettings(boardsFile, boardType):
boardSettings = {}
for line in boardsFile:
line = line.decode()
if line.startswith(boardType):
# strip board name, period and \n
setting = line.replace(boardType + '.', '', 1).strip()
[key, sign, val] = setting.rpartition('=')
boardSettings[key] = val
return boardSettings
def loadBoardsFile(arduinohome):
boardsFileContent = None
try:
boardsFileContent = open(
arduinohome + 'hardware/arduino/boards.txt', 'rb').readlines()
except IOError:
printStdErr(
"Could not read boards.txt from Arduino, probably because Arduino has not been installed.")
printStdErr("Please install it with: 'sudo apt install arduino-core'")
return boardsFileContent
def programController(config, boardType, hexFile, restoreWhat):
programmer = SerialProgrammer.create(config, boardType)
return programmer.program(hexFile, restoreWhat)
def json_decode_response(line):
try:
return json.loads(line[2:])
except json.decoder.JSONDecodeError as e:
printStdErr("\nJSON decode error: {0}".format(str(e)))
printStdErr("\nLine received was: {0}".format(line))
class SerialProgrammer:
@staticmethod
def create(config, boardType):
if boardType == 'arduino':
msg_map["a"] = "Arduino"
programmer = ArduinoProgrammer(config, boardType)
if boardType == 'uno':
msg_map["a"] = "Arduino"
programmer = ArduinoProgrammer(config, boardType)
else:
msg_map["a"] = "Arduino"
programmer = ArduinoProgrammer(config, boardType)
return programmer
def __init__(self, config):
self.config = config
self.restoreSettings = False
self.restoreDevices = False
self.ser = None
self.versionNew = None
self.versionOld = None
self.oldSettings = {}
def program(self, hexFile, restoreWhat):
printStdErr("\n%(a)s program script started." % msg_map)
self.parse_restore_settings(restoreWhat)
if self.restoreSettings or self.restoreDevices:
printStdErr("Checking old version before programming.\n")
if not self.open_serial(self.config, 57600, 0.2):
return 0
self.delay_serial_open()
# request all settings from board before programming
if self.fetch_current_version():
self.retrieve_settings_from_serial()
self.save_settings_to_file()
if not self.ser:
if not self.open_serial(self.config, 57600, 0.2):
return 0
self.delay_serial_open()
if(hexFile):
if not self.flash_file(hexFile):
return 0
self.fetch_new_version()
self.reset_settings()
if self.restoreSettings or self.restoreDevices:
printStdErr(
"\nChecking which settings and devices may be restored.")
if self.versionNew is None:
printStdErr("\nWarning: Cannot receive version number from controller after programming.",
"\nRestoring settings/devices settings failed.")
return 0
if not self.versionOld and (self.restoreSettings or self.restoreDevices):
printStdErr("\nCould not receive valid version number from old board, no settings/devices",
"\nhave been restored.")
return 0
if self.restoreSettings:
printStdErr("\nTrying to restore compatible settings from {0} to {1}".format(self.versionOld.toString(), self.versionNew.toString()))
if(self.versionNew.isNewer("0.2")):
printStdErr(
"\nSettings may only be restored when updating to BrewPi 0.2.0 or higher")
self.restoreSettings = False
if self.restoreSettings:
self.restore_settings()
if self.restoreDevices:
self.restore_devices()
printStdErr("\n%(a)s program script complete." % msg_map)
self.ser.close()
self.ser = None
return 1
def parse_restore_settings(self, restoreWhat):
restoreSettings = False
restoreDevices = False
if 'settings' in restoreWhat:
if restoreWhat['settings']:
if version.parse(self.versionNew) >= version.parse(self.versionOld): # Only restore settings on same or newer
restoreSettings = True
if 'devices' in restoreWhat:
if restoreWhat['devices']:
if version.parse(self.versionNew) >= version.parse(self.versionOld): # Only restore devices on same or newer
restoreDevices = True
# Even when restoreSettings and restoreDevices are set to True here,
# they might be set to false due to version incompatibility later
printStdErr("\nSettings will {0}be restored{1}.".format(("" if restoreSettings else "not "), (" if possible" if restoreSettings else "")))
printStdErr("\nDevices will {0}be restored{1}.\n".format(("" if restoreDevices else "not "), (" if possible" if restoreDevices else "")))
self.restoreSettings = restoreSettings
self.restoreDevices = restoreDevices
def open_serial(self, config, baud, timeout):
if self.ser:
self.ser.close()
self.ser = None
self.ser = util.setupSerial(config, baud, timeout, 1.0, True)
if self.ser is None:
return False
return True
def open_serial_with_retry(self, config, baud, timeout):
# reopen serial port
retries = 30
self.ser = None
while retries:
time.sleep(1)
if self.open_serial(config, baud, timeout):
return True
retries -= 1
return False
def delay_serial_open(self):
pass
def fetch_version(self, msg):
version = brewpiVersion.getVersionFromSerial(self.ser)
if version is None:
printStdErr("\nWarning: Cannot receive version number from controller. Your controller is",
"\neither not programmed yet or running a very old version of BrewPi. It will",
"\nbe reset to defaults.")
else:
printStdErr("{0}\nFound:\n{1}\non port:{2}".format(msg, version.toExtendedString(), self.ser.name))
return version
def fetch_current_version(self):
self.versionOld = self.fetch_version("\nChecking current version:\n")
return self.versionOld
def fetch_new_version(self):
self.versionNew = self.fetch_version("\nChecking new version:\n")
return self.versionNew
def retrieve_settings_from_serial(self):
ser = self.ser
self.oldSettings.clear()
printStdErr("\nRequesting old settings from %(a)s." % msg_map)
expected_responses = 2
# versions older than 2.0.0 did not have a device manager
if not self.versionOld.isNewer("0.2.0"):
expected_responses += 1
ser.write("d{}".encode()) # installed devices
time.sleep(1)
ser.write("c".encode()) # control constants
ser.write("s".encode()) # control settings
time.sleep(2)
while expected_responses:
line = ser.readline().decode()
if line:
line = util.asciiToUnicode(str(line))
if line[0] == 'C':
expected_responses -= 1
self.oldSettings['controlConstants'] = json_decode_response(
line)
elif line[0] == 'S':
expected_responses -= 1
self.oldSettings['controlSettings'] = json_decode_response(
line)
elif line[0] == 'd':
expected_responses -= 1
self.oldSettings['installedDevices'] = json_decode_response(
line)
def save_settings_to_file(self):
# This is format" "2019-01-08-16-50-15"
oldSettingsFileName = 'settings-{0}.json'.format(time.strftime("%Y-%m-%dT%H-%M-%S"))
settingsBackupDir = '{0}settings/controller-backup/'.format(util.addSlash(util.scriptPath()))
if not os.path.exists(settingsBackupDir):
os.makedirs(settingsBackupDir)
# Set owner and permissions for directory
fileMode = stat.S_IRWXU | stat.S_IRWXG | stat.S_IROTH | stat.S_IXOTH # 775
owner = 'brewpi'
group = 'brewpi'
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
os.chown(settingsBackupDir, uid, gid) # chown dir
os.chmod(settingsBackupDir, fileMode) # chmod dir
oldSettingsFilePath = os.path.join(
settingsBackupDir, oldSettingsFileName)
oldSettingsFile = open(oldSettingsFilePath, 'w')
oldSettingsFile.write(json.dumps(self.oldSettings))
oldSettingsFile.truncate()
oldSettingsFile.close()
# Set owner and permissions for file
fileMode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP # 660
owner = 'brewpi'
group = 'brewpi'
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
os.chown(oldSettingsFilePath, uid, gid) # chown file
os.chmod(oldSettingsFilePath, fileMode) # chmod file
printStdErr("\nSaved old settings to file {0}.".format(oldSettingsFileName))
def delay(self, countDown):
printStdErr("")
while countDown > 0:
time.sleep(1)
printStdErr("Back up in {0}.".format(str(countDown)))
countDown -= 1
def reset_settings(self, setTestMode=False):
printStdErr("\nResetting EEPROM to default settings.")
self.ser.write('E\n'.encode())
if setTestMode:
self.ser.write('j{mode:t}'.encode())
time.sleep(5) # resetting EEPROM takes a while, wait 5 seconds
# read log messages from arduino
while 1: # read all lines on serial interface
line = self.ser.readline()
if line: # line available?
if line[0] == 'D':
self.print_debug_log(line)
else:
break
def print_debug_log(self, line):
try: # debug message received
expandedMessage = expandLogMessage.expandLogMessage(line[2:])
printStdErr(expandedMessage)
except Exception as e: # catch all exceptions, because out of date file could cause errors
printStdErr("\nError while expanding log message: {0}".format(str(e)))
printStdErr(("%(a)s debug message: " % msg_map) + line[2:])
def restore_settings(self):
oldSettingsDict = self.get_combined_settings_dict(self.oldSettings)
ms = MigrateSettings()
restored, omitted = ms.getKeyValuePairs(oldSettingsDict,
self.versionOld.toString(),
self.versionNew.toString())
printStdErr("\nMigrating these settings:\n{0}".format(json.dumps(dict(restored.items()))))
printStdErr("\nOmitting these settings:\n{0}".format(json.dumps(dict(omitted.items()))))
self.send_restored_settings(restored)
def get_combined_settings_dict(self, oldSettings):
# copy keys/values from controlConstants
combined = oldSettings.get('controlConstants').copy()
# add keys/values from controlSettings
combined.update(oldSettings.get('controlSettings'))
return combined
def send_restored_settings(self, restoredSettings):
for key in restoredSettings:
setting = restoredSettings[key]
command = "j{" + json.dumps(key) + ":" + \
json.dumps(setting) + "}\n"
self.ser.write(command.encode())
# make readline blocking for max 5 seconds to give the controller time to respond after every setting
oldTimeout = self.ser.timeout
self.ser.timeout = 5
# read all replies
while 1:
line = self.ser.readline()
if line: # line available?
if line[0] == 'D':
self.print_debug_log(line)
if self.ser.inWaiting() == 0:
#if self.ser.readline() == 0: # WiFi Change
break
self.ser.timeout = 5
def restore_devices(self):
ser = self.ser
oldDevices = self.oldSettings.get('installedDevices')
if oldDevices:
printStdErr("\nNow trying to restore previously installed devices:\n{0}".format(oldDevices))
else:
printStdErr("\nNo devices to restore.")
return
detectedDevices = None
for device in oldDevices:
printStdErr("\nRestoring device:\n{0}".format(json.dumps(device)))
if "a" in device.keys(): # check for sensors configured as first on bus
if int(device['a'], 16) == 0:
printStdErr("A OneWire sensor was configured to be autodetected as the first sensor on the",
"\nbus, but this is no longer supported. We'll attempt to automatically find the",
"\naddress and add the sensor based on its address.")
if detectedDevices is None:
ser.write("h{}".encode()) # installed devices
time.sleep(1)
# get list of detected devices
for line in ser:
if line[0] == 'h':
detectedDevices = json_decode_response(line)
for detectedDevice in detectedDevices:
if device['p'] == detectedDevice['p']:
# get address from sensor that was first on bus
device['a'] = detectedDevice['a']
_temp = "U" + json.dumps(device)
ser.write(_temp.encode())
requestTime = time.time()
# read log messages from arduino
while 1: # read all lines on serial interface
line = ser.readline()
if line: # line available?
if line[0] == 'D':
self.print_debug_log(line)
elif line[0] == 'U':
printStdErr(
("%(a)s reports: device updated to: " % msg_map) + line[2:])
break
if time.time() > requestTime + 5: # wait max 5 seconds for an answer
break
printStdErr("\nRestoring installed devices done.")
class ArduinoProgrammer(SerialProgrammer):
def __init__(self, config, boardType):
SerialProgrammer.__init__(self, config)
self.boardType = boardType
def delay_serial_open(self):
if self.boardType == "uno":
# Allow Arduino UNO to restart
time.sleep(10)
def flash_file(self, hexFile):
config, boardType = self.config, self.boardType
printStdErr("\nLoading programming settings from board.txt.")
# Location of Arduino sdk
arduinohome = config.get('arduinoHome', '/usr/share/arduino/')
# Location of avr tools
avrdudehome = config.get(
'avrdudeHome', arduinohome + 'hardware/tools/')
# Default to empty string because avrsize is on path
avrsizehome = config.get('avrsizeHome', '')
# Location of global avr conf
avrconf = config.get('avrConf', avrdudehome + 'avrdude.conf')
boardsFile = loadBoardsFile(arduinohome)
if not boardsFile:
return False
boardSettings = fetchBoardSettings(boardsFile, boardType)
# Parse the Arduino board file to get the right program settings
for line in boardsFile:
line = line.decode()
if line.startswith(boardType):
# strip board name, period and \n
_boardType = (boardType + '.').encode()
setting = line.encode().replace(_boardType, ''.encode(), 1).strip()
[key, sign, val] = setting.rpartition('='.encode())
boardSettings[key] = val
printStdErr("\nChecking hex file size with avr-size.")
# Start programming the Arduino
avrsizeCommand = avrsizehome + 'avr-size ' + "\"" + hexFile + "\""
# Check program size against maximum size
p = sub.Popen(avrsizeCommand, stdout=sub.PIPE,
stderr=sub.PIPE, shell=True)
output, errors = p.communicate()
programSize = output.split()[7]
printStdErr('\nProgram size: {0} bytes out of max {1}.'.format(programSize.decode(), boardSettings['upload.maximum_size']))
# Another check just to be sure
if int(programSize.decode()) > int(boardSettings['upload.maximum_size']):
printStdErr(
"\nERROR: Program size is bigger than maximum size for your Arduino {0}.".format(boardType))
return False
hexFileDir = os.path.dirname(hexFile)
hexFileLocal = os.path.basename(hexFile)
time.sleep(1)
# Get serial port while in bootloader
# Convert udev rule based port to /dev/tty*
if not config['port'].startswith("/dev/tty"):
convert = ConvertBrewPiDevice()
config['port'] = convert.get_device_from_brewpidev(config['port'])
bootLoaderPort = util.findSerialPort(bootLoader=True, my_port=config['port'])
if not bootLoaderPort:
printStdErr("\nERROR: Could not find port in bootloader.")
return False
programCommand = (avrdudehome + 'avrdude' +
' -F' + # override device signature check
' -e' + # erase flash and eeprom before programming. This prevents issues with corrupted EEPROM
' -p ' + boardSettings['build.mcu'] +
' -c ' + boardSettings['upload.protocol'] +
' -b ' + boardSettings['upload.speed'] +
' -P ' + bootLoaderPort +
' -U ' + 'flash:w:' + "\"" + hexFileLocal + "\"" +
' -C ' + avrconf)
print("DEBUG: Programming command: {}".format(programCommand))
printStdErr("\nProgramming Arduino with avrdude.")
p = sub.Popen(programCommand, stdout=sub.PIPE,
stderr=sub.PIPE, shell=True, cwd=hexFileDir)
output, errors = p.communicate()
output = output.decode()
errors = errors.decode()
# avrdude only uses stderr, append its output to the returnString
printStdErr("\nResult of invoking avrdude:{0}".format(errors))
if("bytes of flash verified" in errors):
printStdErr("Avrdude done, programming successful.")
else:
printStdErr("There was an error while programming.")
return False
printStdErr("\nGiving the Arduino 10 seconds to reset.")
self.delay(10)
return True
| 37.683007
| 146
| 0.588674
| 21,112
| 0.915445
| 0
| 0
| 451
| 0.019556
| 0
| 0
| 5,763
| 0.249892
|
f975435861ec73bfce0399c6d6ca18e0c1beb891
| 10,086
|
py
|
Python
|
common/hil_slurm_helpers.py
|
mghpcc-projects/user_level_slurm_reservations
|
eae56588bb00abfe043714317a27481e036fcc29
|
[
"MIT"
] | null | null | null |
common/hil_slurm_helpers.py
|
mghpcc-projects/user_level_slurm_reservations
|
eae56588bb00abfe043714317a27481e036fcc29
|
[
"MIT"
] | 11
|
2017-09-14T17:21:31.000Z
|
2021-06-01T21:48:47.000Z
|
common/hil_slurm_helpers.py
|
mghpcc-projects/user_level_slurm_reservations
|
eae56588bb00abfe043714317a27481e036fcc29
|
[
"MIT"
] | 3
|
2017-08-16T13:54:40.000Z
|
2018-01-10T19:26:59.000Z
|
"""
MassOpenCloud / Hardware Isolation Layer (MOC/HIL)
Slurm and *NX Subprocess Command Helpers
May 2017, Tim Donahue tpd001@gmail.com
"""
import os
from pwd import getpwnam, getpwuid
from subprocess import Popen, PIPE
from time import time
from hil_slurm_constants import (HIL_RESNAME_PREFIX, HIL_RESNAME_FIELD_SEPARATOR,
HIL_RESERVATION_OPERATIONS, RES_CREATE_FLAGS,
HIL_RESERVE, HIL_RELEASE)
from hil_slurm_settings import SLURM_INSTALL_DIR
from hil_slurm_logging import log_debug, log_info, log_error
def _output_debug_info(fname, stdout_data, stderr_data):
log_debug('%s: Stdout %s' % (fname, stdout_data))
log_debug('%s: Stderr %s' % (fname, stderr_data))
def _exec_subprocess_cmd(cmd):
'''
Execute a command in a subprocess and wait for completion
'''
debug = False
p = None
try:
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
(stdout_data, stderr_data) = p.communicate()
except Exception as e:
stdout_data = None
stderr_data ='error: Exception on Popen or communicate'
log_debug('Exception on Popen or communicate')
log_debug('Exception: %s' % e)
if debug:
f = _exec_subprocess_cmd.__name__
log_debug('%s: cmd is %s' % (f, cmd))
log_debug('%s: stdout is %s' % (f, stdout_data))
log_debug('%s: stderr is %s' % (f, stderr_data))
return stdout_data, stderr_data
def _scontrol_show_stdout_to_dict_list(stdout_data, stderr_data, debug=False):
'''
Convert the 'scontrol show' stdout data to a list of dicts
Nearly all params are of the form "keyword=value".
If they all were, a neat functional one-liner would do...
'''
stdout_dict_list = []
if len(stderr_data):
return []
# Split the output and remove the trailing None from the subprocess output
stdout_lines = stdout_data.split(os.linesep)
stdout_lines = filter(None, stdout_lines)
# Convert the output to a list of dicts
for line in stdout_lines:
stdout_line_dict = {}
for kv_pair in line.split(' '):
kv = kv_pair.split('=')
if (len(kv) == 2):
stdout_line_dict[kv[0]] = kv[1]
elif debug:
log_debug('Failed to convert `$s`' % kv_pair)
stdout_dict_list.append(stdout_line_dict)
return stdout_dict_list
def exec_scontrol_cmd(action, entity, entity_id=None, debug=True, **kwargs):
'''
Build an 'scontrol <action> <entity>' command and pass to an executor
Specify single-line output to support stdout postprocessing
'''
cmd = [os.path.join(SLURM_INSTALL_DIR, 'scontrol'), action]
if entity:
cmd.append(entity)
if entity_id:
cmd.append(entity_id)
cmd.append('-o')
if kwargs:
for k, v in kwargs.iteritems():
cmd.append('%s=%s' % (k,v))
if debug:
log_debug('exec_scontrol_cmd(): Command %s' % cmd)
stdout_data, stderr_data = _exec_subprocess_cmd(cmd)
if debug:
log_debug('exec_scontrol_cmd(): Stdout %s' % stdout_data)
log_debug('exec_scontrol_cmd(): Stderr %s' % stderr_data)
return stdout_data, stderr_data
def exec_scontrol_show_cmd(entity, entity_id, debug=False, **kwargs):
'''
Run the 'scontrol show' command on the entity and ID
Convert standard output data to a list of dictionaries, one per line
'''
stdout_data, stderr_data = exec_scontrol_cmd('show', entity, entity_id, debug=debug, **kwargs)
# Check for errors.
# If anything in stderr, return it
# Next, check if stdout includes various error strings - 'scontrol show'
# writes error output to stdout.
# Failure indications:
# Reservation: stdout includes 'not found'
# Job: stdout includes 'Invalid job id'
# Copy stdout to stderr if found.
# If stderr is empty, and stdout does not contain an error string,
# convert stdout to a list of dicts and return that
stdout_dict_list = []
entity_error_dict = {
'reservation': 'not found',
'job': 'Invalid job id'
}
cmd = 'scontrol show ' + entity
if (len(stderr_data) != 0):
log_debug('Command `%s` failed' % cmd)
log_debug(' stderr: %s' % stderr_data)
elif (entity in entity_error_dict) and (entity_error_dict[entity] in stdout_data):
if debug:
log_debug('Command `%s` failed' % cmd)
log_debug(' stderr: %s' % stderr_data)
stderr_data = stdout_data
stdout_data = None
else:
stdout_dict_list = _scontrol_show_stdout_to_dict_list(stdout_data, stderr_data)
return stdout_dict_list, stdout_data, stderr_data
def create_slurm_reservation(name, user, t_start_s, t_end_s, nodes=None,
flags=RES_CREATE_FLAGS, features=None, debug=False):
'''
Create a Slurm reservation via 'scontrol create reservation'
'''
if nodes is None:
nodes = 'ALL'
t_end_arg = {'duration': 'UNLIMITED'} if t_end_s is None else {'endtime': t_end_s}
return exec_scontrol_cmd('create', 'reservation', entity_id=None, debug=debug,
ReservationName=name, starttime=t_start_s,
user=user, nodes=nodes, flags=flags, features=features,
**t_end_arg)
def delete_slurm_reservation(name, debug=False):
'''
Delete a Slurm reservation via 'scontrol delete reservation=<name>'
'''
return exec_scontrol_cmd('delete', None, debug=debug, reservation=name)
def update_slurm_reservation(name, debug=False, **kwargs):
'''
Update a Slurm reservation via 'scontrol update reservation=<name> <kwargs>'
'''
return exec_scontrol_cmd('update', None, reservation=name, debug=debug, **kwargs)
def get_hil_reservation_name(env_dict, restype_s, t_start_s):
'''
Create a reservation name, combining the HIL reservation prefix,
the username, the job ID, and the ToD (YMD_HMS)
Structure:
NamePrefix _ [release|reserve] _ uname _ job_UID _ str(int(time()))
'''
resname = HIL_RESNAME_PREFIX + restype_s + HIL_RESNAME_FIELD_SEPARATOR
resname += env_dict['username'] + HIL_RESNAME_FIELD_SEPARATOR
resname += env_dict['job_uid'] + HIL_RESNAME_FIELD_SEPARATOR
resname += str(int(time()))
return resname
def parse_hil_reservation_name(resname):
'''
Attempt to split a reservation name into HIL reservation name components:
HIL reservation prefix, reservation type, user name, uid, and time
This looks like overkill, except for the presence of other reservations in the
system, with semi-arbitrary names.
'''
prefix = None
restype = None
user = None
uid = None
time_s = None
if resname.startswith(HIL_RESNAME_PREFIX):
resname_partitions = resname.partition(HIL_RESNAME_PREFIX)
prefix = resname_partitions[1]
try:
restype, user, uid, time_s = resname_partitions[2].split(HIL_RESNAME_FIELD_SEPARATOR)
except:
pass
return prefix, restype, user, uid, time_s
def is_hil_reservation(resname, restype_in):
'''
Check if the passed reservation name:
- Starts with the HIL reservation prefix
- Is a HIL reserve or release reservation
- Contains a valid user name and UID
- Optionally, is specifically a reserve or release reservation
- $$$ Could verify nodes have HIL property set
'''
prefix, restype, uname, uid, _ = parse_hil_reservation_name(resname)
if (prefix != HIL_RESNAME_PREFIX):
# log_error('No HIL reservation prefix')
return False
if restype_in:
if (restype != restype_in):
# log_error('Reservation type mismatch')
return False
elif restype not in HIL_RESERVATION_OPERATIONS:
log_error('Unknown reservation type')
return False
try:
pwdbe1 = getpwnam(uname)
pwdbe2 = getpwuid(int(uid))
if pwdbe1 != pwdbe2:
# log_error('Reservation `%s`: User and UID inconsistent' % resname)
return False
except KeyError:
# log_error('Key error')
return False
return True
def get_object_data(what_obj, obj_id, debug=False):
'''
Get a list of dictionaries of information on the object, via
'scontrol show <what_object> <object_id>'
'''
objdata_dict_list, stdout_data, stderr_data = exec_scontrol_show_cmd(what_obj,
obj_id, debug=False)
if (len(stderr_data) != 0):
if debug:
log_debug('Failed to retrieve data for %s `%s`' % (what_obj, obj_id))
log_debug(' %s' % stderr_data)
return objdata_dict_list
def get_partition_data(partition_id):
'''
Get a list of dictionaries of information on the partition(s),
via 'scontrol show partition'
'''
return get_object_data('partition', partition_id, debug=False)
def get_job_data(job_id):
'''
Get a list of dictionaries of information on the job(s),
via 'scontrol show job'
'''
return get_object_data('job', job_id, debug=False)
def get_hil_reservations():
'''
Get a list of all Slurm reservations, return that subset which are HIL reservations
'''
resdata_dict_list = []
resdata_dict_list, stdout_data, stderr_data = exec_scontrol_show_cmd('reservation', None)
for resdata_dict in resdata_dict_list:
if resdata_dict and is_hil_reservation(resdata_dict['ReservationName'], None):
continue
else:
resdata_dict_list.remove(resdata_dict)
return resdata_dict_list
def log_hil_reservation(resname, stderr_data, t_start_s=None, t_end_s=None):
if len(stderr_data):
log_error('Error creating reservation `%s`'% resname)
log_error(' Error string: %s' % stderr_data.strip('\n'), separator=False)
else:
log_info('Created HIL reservation `%s`' % resname)
# EOF
| 31.716981
| 98
| 0.65348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,708
| 0.367638
|
f976fcf3758aba64e0576f78efb657866db8fe14
| 17,596
|
py
|
Python
|
tools/contourlet_transform/tools/dfilters.py
|
yilinshao/CoT-Contourlet-Transformer
|
44d36a05f81ec168e3ccd8b9438ddaee6283189e
|
[
"MIT"
] | 4
|
2021-12-21T07:45:01.000Z
|
2021-12-21T09:15:47.000Z
|
tools/contourlet_transform/tools/dfilters.py
|
yilinshao/CoT-Contourlet-Transformer
|
44d36a05f81ec168e3ccd8b9438ddaee6283189e
|
[
"MIT"
] | null | null | null |
tools/contourlet_transform/tools/dfilters.py
|
yilinshao/CoT-Contourlet-Transformer
|
44d36a05f81ec168e3ccd8b9438ddaee6283189e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PyContourlet
#
# A Python library for the Contourlet Transform.
#
# Copyright (C) 2011 Mazay Jiménez
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from numpy import *
# from scipy.signal.filter_design import firwin
from .mctrans import *
from .modulate2 import *
from .ldfilter import *
from .ld2quin import *
from .reverse2 import *
from .dmaxflat import *
def dfilters(fname, type):
""" DFILTERS Generate directional 2D filters
Input:
fname: Filter name. Available 'fname' are:
'haar': the Haar filters
'vk': McClellan transformed of the filter from the VK book
'ko': orthogonal filter in the Kovacevic's paper
'kos': smooth 'ko' filter
'lax': 17 x 17 by Lu, Antoniou and Xu
'sk': 9 x 9 by Shah and Kalker
'cd': 7 and 9 McClellan transformed by Cohen and Daubechies
'pkva': ladder filters by Phong et al.
'oqf_362': regular 3 x 6 filter
'dvmlp': regular linear phase biorthogonal filter with 3 dvm
'sinc': ideal filter (*NO perfect recontruction*)
'dmaxflat': diamond maxflat filters obtained from a three stage ladder
type: 'd' or 'r' for decomposition or reconstruction filters
Output:
h0, h1: diamond filter pair (lowpass and highpass)
To test those filters (for the PR condition for the FIR case),
verify that:
convolve(h0, modulate2(h1, 'b')) + convolve(modulate2(h0, 'b'), h1) = 2
(replace + with - for even size filters)
To test for orthogonal filter
convolve(h, reverse2(h)) + modulate2(convolve(h, reverse2(h)), 'b') = 2
"""
# The diamond-shaped filter pair
if fname == "haar":
if str.lower(type[0]) == 'd':
h0 = array([1, 1]) / sqrt(2)
h1 = array([-1, 1]) / sqrt(2)
else:
h0 = array([1, 1]) / sqrt(2)
h1 = array([1, -1]) / sqrt(2)
return h0, h1
elif fname == "vk":
if str.lower(type[0]) == 'd':
h0 = array([1, 2, 1]) / 4.0
h1 = array([-1, -2, 6, -2, -1]) / 4.0
else:
h0 = array([-1, 2, 6, 2, -1]) / 4.0
h1 = array([-1, 2, -1]) / 4.0
# McClellan transfrom to obtain 2D diamond filters
t = array([[0, 1, 0], [1, 0, 1], [0, 1, 0]]) / 4.0 # diamond kernel
h0 = mctrans(h0, t)
h1 = mctrans(h1, t)
return h0, h1
elif fname == "ko": # orthogonal filters in Kovacevic's thesis
a0, a1, a2 = 2, 0.5, 1
h0 = array([[0, -a1, -a0 * a1, 0],
[-a2, -a0 * a2, -a0, 1],
[0, a0 * a1 * a2, -a1 * a2, 0]])
# h1 = qmf2(h0);
h1 = array([[0, -a1 * a2, -a0 * a1 * a2, 0],
[1, a0, -a0 * a2, a2],
[0, -a0 * a1, a1, 0]])
# Normalize filter sum and norm;
norm = sqrt(2) / sum(h0)
h0 = h0 * norm
h1 = h1 * norm
if str.lower(type[0]) == 'r':
# Reverse filters for reconstruction
h0 = h0[::-1, ::-1]
h1 = h1[::-1, ::-1]
return h0, h1
elif fname == "kos": # Smooth orthogonal filters in Kovacevic's thesis
a0, a1, a2 = -sqrt(3), -sqrt(3), 2 + sqrt(3)
h0 = array([[0, -a1, -a0 * a1, 0],
[-a2, -a0 * a2, -a0, 1],
[0, a0 * a1 * a2, -a1 * a2, 0]])
# h1 = qmf2(h0);
h1 = array([[0, -a1 * a2, -a0 * a1 * a2, 0],
[1, a0, -a0 * a2, a2],
[0, -a0 * a1, a1, 0]])
# Normalize filter sum and norm;
norm = sqrt(2) / sum(h0)
h0 = h0 * norm
h1 = h1 * norm
if str.lower(type[0]) == 'r':
# Reverse filters for reconstruction
h0 = h0[::-1, ::-1]
h1 = h1[::-1, ::-1]
return h0, h1
elif fname == "lax": # by Lu, Antoniou and Xu
h = array([[-1.2972901e-5, 1.2316237e-4, -7.5212207e-5, 6.3686104e-5,
9.4800610e-5, -7.5862919e-5, 2.9586164e-4, -1.8430337e-4],
[1.2355540e-4, -1.2780882e-4, -1.9663685e-5, -4.5956538e-5,
-6.5195193e-4, -2.4722942e-4, -2.1538331e-5, -7.0882131e-4],
[-7.5319075e-5, -1.9350810e-5, -7.1947086e-4, 1.2295412e-3,
5.7411214e-4, 4.4705422e-4, 1.9623554e-3, 3.3596717e-4],
[6.3400249e-5, -2.4947178e-4, 4.4905711e-4, -4.1053629e-3,
-2.8588307e-3, 4.3782726e-3, -3.1690509e-3, -3.4371484e-3],
[9.6404973e-5, -4.6116254e-5, 1.2371871e-3, -1.1675575e-2,
1.6173911e-2, -4.1197559e-3, 4.4911165e-3, 1.1635130e-2],
[-7.6955555e-5, -6.5618379e-4, 5.7752252e-4, 1.6211426e-2,
2.1310378e-2, -2.8712621e-3, -4.8422645e-2, -5.9246338e-3],
[2.9802986e-4, -2.1365364e-5, 1.9701350e-3, 4.5047673e-3,
-4.8489158e-2, -3.1809526e-3, -2.9406153e-2, 1.8993868e-1],
[-1.8556637e-4, -7.1279432e-4, 3.3839195e-4, 1.1662001e-2,
-5.9398223e-3, -3.4467920e-3, 1.9006499e-1, 5.7235228e-1]])
h0 = sqrt(2) * vstack((hstack((h, h[:, len(h) - 2::-1])),
hstack((h[len(h) - 2::-1, :],
h[len(h) - 2::-1, len(h) - 2::-1]))))
h1 = modulate2(h0, 'b', None)
return h0, h1
elif fname == "sk": # by Shah and Kalker
h = array([[0.621729, 0.161889, -0.0126949, -0.00542504, 0.00124838],
[0.161889, -0.0353769, -0.0162751, -0.00499353, 0],
[-0.0126949, -0.0162751, 0.00749029, 0, 0],
[-0.00542504, 0.00499353, 0, 0, 0],
[0.00124838, 0, 0, 0, 0]])
h0 = sqrt(2) * vstack((hstack((h[len(h):0:-1, len(h):0:-1],
h[len(h):0:-1, :])),
hstack((h[:, len(h):0:-1], h))))
h1 = modulate2(h0, 'b', None)
return h0, h1
elif fname == "dvmlp":
q = sqrt(2)
b = 0.02
b1 = b * b
h = array([[b / q, 0, -2 * q * b, 0, 3 * q * b, 0, -2 * q * b, 0, b / q],
[0, -1 / (16 * q), 0, 9 / (16 * q), 1 / q, 9 / (16 * q), 0, -1 / (16 * q), 0],
[b / q, 0, -2 * q * b, 0, 3 * q * b, 0, -2 * q * b, 0, b / q]])
g0 = array([[-b1 / q, 0, 4 * b1 * q, 0, -14 * q * b1, 0, 28 * q * b1, 0, -35 * q * b1, 0,
28 * q * b1, 0, -14 * q * b1, 0, 4 * b1 * q, 0, -b1 / q],
[0, b / (8 * q), 0, -13 * b / (8 * q), b / q, 33 * b / (8 * q), -2 * q * b,
-21 * b / (8 * q), 3 * q * b, -21 * b / (8 * q), -2 * q * b, 33 * b / (8 * q),
b / q, -13 * b / (8 * q), 0, b / (8 * q), 0],
[-q * b1, 0, -1 / (256 * q) + 8 * q * b1, 0, 9 / (128 * q) - 28 * q * b1,
-1 / (q * 16), -63 / (256 * q) + 56 * q * b1, 9 / (16 * q),
87 / (64 * q) - 70 * q * b1, 9 / (16 * q), -63 / (256 * q) + 56 * q * b1,
-1 / (q * 16), 9 / (128 * q) - 28 * q * b1, 0, -1 / (256 * q) + 8 * q * b1,
0, -q * b1],
[0, b / (8 * q), 0, -13 * b / (8 * q), b / q, 33 * b / (8 * q), -2 * q * b,
-21 * b / (8 * q), 3 * q * b, -21 * b / (8 * q), -2 * q * b, 33 * b / (8 * q),
b / q, -13 * b / (8 * q), 0, b / (8 * q), 0],
[-b1 / q, 0, 4 * b1 * q, 0, -14 * q * b1, 0, 28 * q * b1, 0, -35 * q * b1,
0, 28 * q * b1, 0, -14 * q * b1, 0, 4 * b1 * q, 0, -b1 / q]])
h1 = modulate2(g0, 'b', None)
h0 = h.copy()
if str.lower(type[0]) == 'r':
h1 = modulate2(h, 'b', None)
h0 = g0.copy()
return h0, h1
elif fname == "cd" or fname == "7-9": # by Cohen and Daubechies
# 1D prototype filters: the '7-9' pair
h0 = array([0.026748757411, -0.016864118443, -0.078223266529,
0.266864118443, 0.602949018236, 0.266864118443,
-0.078223266529, -0.016864118443, 0.026748757411])
g0 = array([-0.045635881557, -0.028771763114, 0.295635881557,
0.557543526229, 0.295635881557, -0.028771763114,
-0.045635881557])
if str.lower(type[0]) == 'd':
h1 = modulate2(g0, 'c', None)
else:
h1 = modulate2(h0, 'c', None)
h0 = g0.copy()
# Use McClellan to obtain 2D filters
t = array([[0, 1, 0], [1, 0, 1], [0, 1, 0]]) / 4.0 # diamond kernel
h0 = sqrt(2) * mctrans(h0, t)
h1 = sqrt(2) * mctrans(h1, t)
return h0, h1
elif fname == "pkva" or fname == "ldtest":
# Filters from the ladder structure
# Allpass filter for the ladder structure network
beta = ldfilter(fname)
# Analysis filters
h0, h1 = ld2quin(beta)
# Normalize norm
h0 = sqrt(2) * h0
h1 = sqrt(2) * h1
# Synthesis filters
if str.lower(type[0]) == 'r':
f0 = modulate2(h1, 'b', None)
f1 = modulate2(h0, 'b', None)
h0 = f0.copy()
h1 = f1.copy()
return h0, h1
# elif fname == "pkva-half4": # Filters from the ladder structure
# # Allpass filter for the ladder structure network
# beta = ldfilterhalf(4)
#
# # Analysis filters
# h0, h1 = ld2quin(beta)
#
# # Normalize norm
# h0 = sqrt(2) * h0
# h1 = sqrt(2) * h1
#
# # Synthesis filters
# if str.lower(type[0]) == 'r':
# f0 = modulate2(h1, 'b', None)
# f1 = modulate2(h0, 'b', None)
# h0 = f0
# h1 = f1
# return h0, h1
#
# elif fname == "pkva-half6": # Filters from the ladder structure
# # Allpass filter for the ladder structure network
# beta = ldfilterhalf(6)
#
# # Analysis filters
# h0, h1 = ld2quin(beta)
#
# # Normalize norm
# h0 = sqrt(2) * h0
# h1 = sqrt(2) * h1
#
# # Synthesis filters
# if srtring.lower(type[0]) == 'r':
# f0 = modulate2(h1, 'b', None)
# f1 = modulate2(h0, 'b', None)
# h0 = f0
# h1 = f1
# return h0, h1
#
# elif fname == "pkva-half8": # Filters from the ladder structure
# # Allpass filter for the ladder structure network
# beta = ldfilterhalf(8)
#
# # Analysis filters
# h0, h1 = ld2quin(beta)
#
# # Normalize norm
# h0 = sqrt(2) * h0
# h1 = sqrt(2) * h1
#
# # Synthesis filters
# if str.lower(type[0]) == 'r':
# f0 = modulate2(h1, 'b', None)
# f1 = modulate2(h0, 'b', None)
# h0 = f0
# h1 = f1
# return h0, h1
# elif fname == "sinc": # The "sinc" case, NO Perfect Reconstruction
# # Ideal low and high pass filters
# flength = 30
#
# h0 = firwin(flength + 1, 0.5)
# h1 = modulate2(h0, 'c', None)
#
# # Use McClellan to obtain 2D filters
# t = array([[0, 1, 0], [1, 0, 1], [0, 1, 0]]) / 4.0 # diamond kernel
# h0 = sqrt(2) * mctrans(h0, t)
# h1 = sqrt(2) * mctrans(h1, t)
# return h0, h1
elif fname == "oqf_362": # Some "home-made" filters!
h0 = sqrt(2) / 64 * array([[sqrt(15), -3, 0],
[0, 5, -sqrt(15)],
[-2 * sqrt(15), 30, 0],
[0, 30, 2 * sqrt(15)],
[sqrt(15), 5, 0],
[0, -3, -sqrt(15)]]).conj().T
h1 = -reverse2(modulate2(h0, 'b', None))
if str.lower(type[0]) == 'r':
# Reverse filters for reconstruction
h0 = h0[::-1, ::-1]
h1 = h1[::-1, ::-1]
return h0, h1
elif fname == "test": # Only for the shape, not for PR
h0 = array([[0, 1, 0], [1, 4, 1], [0, 1, 0]])
h1 = array([[0, -1, 0], [-1, 4, -1], [0, -1, 0]])
return h0, h1
elif fname == "testDVM": # Only for directional vanishing moment
h0 = array([[1, 1], [1, 1]]) / sqrt(2)
h1 = array([[-1, 1], [1, -1]]) / sqrt(2)
return h0, h1
elif fname == "qmf": # by Lu, Antoniou and Xu
# ideal response
# window
m, n = 2, 2
w = empty([5, 5])
w1d = kaiser(4 * m + 1, 2.6)
for n1 in range(-m, m + 1):
for n2 in range(-n, n + 1):
w[n1 + m, n2 + n] = w1d[2 * m + n1 + n2] * w1d[2 * m + n1 - n2]
h = empty([5, 5])
for n1 in range(-m, m + 1):
for n2 in range(-n, n + 1):
h[n1 + m, n2 + n] = .5 * sinc((n1 + n2) / 2.0) * .5 * sinc((n1 - n2) / 2.0)
c = sum(h)
h = sqrt(2) * h / c
h0 = h * w
h1 = modulate2(h0, 'b', None)
return h0, h1
#h0 = modulate2(h,'r');
#h1 = modulate2(h,'b');
elif fname == "qmf2": # by Lu, Antoniou and Xu
# ideal response
# window
h = array([[-.001104, .002494, -0.001744, 0.004895,
-0.000048, -.000311],
[0.008918, -0.002844, -0.025197, -0.017135,
0.003905, -0.000081],
[-0.007587, -0.065904, 0.100431, -0.055878,
0.007023, 0.001504],
[0.001725, 0.184162, 0.632115, 0.099414,
-0.027006, -0.001110],
[-0.017935, -0.000491, 0.191397, -0.001787,
-0.010587, 0.002060],
[.001353, 0.005635, -0.001231, -0.009052,
-0.002668, 0.000596]])
h0 = h / sum(h)
h1 = modulate2(h0, 'b', None)
return h0, h1
#h0 = modulate2(h,'r');
#h1 = modulate2(h,'b');
elif fname == "dmaxflat4":
M1 = 1 / sqrt(2)
M2 = M1
k1 = 1 - sqrt(2)
k3 = k1
k2 = M1
h = array([.25 * k2 * k3, .5 * k2, 1 + .5 * k2 * k3]) * M1
h = hstack((h, h[len(h) - 2::-1]))
g = array([-.125 * k1 * k2 * k3, 0.25 * k1 * k2, (-0.5 * k1 - 0.5 * k3 - 0.375 * k1 * k2 * k3),
1 + .5 * k1 * k2]) * M2
g = hstack((g, g[len(g) - 2::-1]))
B = dmaxflat(4, 0)
h0 = mctrans(h, B)
g0 = mctrans(g, B)
h0 = sqrt(2) * h0 / sum(h0)
g0 = sqrt(2) * g0 / sum(g0)
h1 = modulate2(g0, 'b', None)
if str.lower(type[0]) == 'r':
h1 = modulate2(h0, 'b', None)
h0 = g0.copy()
return h0, h1
elif fname == "dmaxflat5":
M1 = 1 / sqrt(2)
M2 = M1
k1 = 1 - sqrt(2)
k3 = k1
k2 = M1
h = array([.25 * k2 * k3, .5 * k2, 1 + .5 * k2 * k3]) * M1
h = hstack((h, h[len(h) - 2::-1]))
g = array([-.125 * k1 * k2 * k3, 0.25 * k1 * k2,
(-0.5 * k1 - 0.5 * k3 - 0.375 * k1 * k2 * k3), 1 + .5 * k1 * k2]) * M2
g = hstack((g, g[len(g) - 2::-1]))
B = dmaxflat(5, 0)
h0 = mctrans(h, B)
g0 = mctrans(g, B)
h0 = sqrt(2) * h0 / sum(h0)
g0 = sqrt(2) * g0 / sum(g0)
h1 = modulate2(g0, 'b', None)
if str.lower(type[0]) == 'r':
h1 = modulate2(h0, 'b', None)
h0 = g0.copy()
return h0, h1
elif fname == "dmaxflat6":
M1 = 1 / sqrt(2)
M2 = M1
k1 = 1 - sqrt(2)
k3 = k1
k2 = M1
h = array([.25 * k2 * k3, .5 * k2, 1 + .5 * k2 * k3]) * M1
h = hstack((h, h[len(h) - 2::-1]))
g = array([-.125 * k1 * k2 * k3, 0.25 * k1 * k2,
(-0.5 * k1 - 0.5 * k3 - 0.375 * k1 * k2 * k3), 1 + .5 * k1 * k2]) * M2
g = hstack((g, g[len(g) - 2::-1]))
B = dmaxflat(6, 0)
h0 = mctrans(h, B)
g0 = mctrans(g, B)
h0 = sqrt(2) * h0 / sum(h0)
g0 = sqrt(2) * g0 / sum(g0)
h1 = modulate2(g0, 'b', None)
if str.lower(type[0]) == 'r':
h1 = modulate2(h0, 'b', None)
h0 = g0.copy()
return h0, h1
elif fname == "dmaxflat7":
M1 = 1 / sqrt(2)
M2 = M1
k1 = 1 - sqrt(2)
k3 = k1
k2 = M1
h = array([.25 * k2 * k3, .5 * k2, 1 + .5 * k2 * k3]) * M1
h = hstack((h, h[len(h) - 2::-1]))
g = array([-.125 * k1 * k2 * k3, 0.25 * k1 * k2,
(-0.5 * k1 - 0.5 * k3 - 0.375 * k1 * k2 * k3), 1 + .5 * k1 * k2]) * M2
g = hstack((g, g[len(g) - 2::-1]))
B = dmaxflat(7, 0)
h0 = mctrans(h, B)
g0 = mctrans(g, B)
h0 = sqrt(2) * h0 / sum(h0)
g0 = sqrt(2) * g0 / sum(g0)
h1 = modulate2(g0, 'b')
if str.lower(type[0]) == 'r':
h1 = modulate2(h0, 'b')
h0 = g0.copy()
return h0, h1
| 37.759657
| 103
| 0.436065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,007
| 0.284537
|
f9773a9cbbac8043bcf3bf565130d13c371454b2
| 96
|
py
|
Python
|
src/wavestate/iirrational/external/tabulate/__init__.py
|
wavestate/wavestate-iirrational
|
01d6dba8b2131fa2a099a74f17e6540f30cee606
|
[
"Apache-2.0"
] | null | null | null |
src/wavestate/iirrational/external/tabulate/__init__.py
|
wavestate/wavestate-iirrational
|
01d6dba8b2131fa2a099a74f17e6540f30cee606
|
[
"Apache-2.0"
] | null | null | null |
src/wavestate/iirrational/external/tabulate/__init__.py
|
wavestate/wavestate-iirrational
|
01d6dba8b2131fa2a099a74f17e6540f30cee606
|
[
"Apache-2.0"
] | null | null | null |
"""
External libraries packaged with for version stability
"""
from .tabulate import tabulate
| 13.714286
| 54
| 0.770833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 62
| 0.645833
|
f97830507a9e81ba352de5e77becd93d7de239ce
| 1,112
|
py
|
Python
|
tests/test_statsutils.py
|
ofek/boltons
|
395f690f4a24331c4554e2169ac18a15955a4eab
|
[
"BSD-3-Clause"
] | 6,058
|
2015-03-18T16:44:39.000Z
|
2022-03-28T08:42:16.000Z
|
tests/test_statsutils.py
|
ofek/boltons
|
395f690f4a24331c4554e2169ac18a15955a4eab
|
[
"BSD-3-Clause"
] | 289
|
2015-04-09T23:09:24.000Z
|
2022-03-30T00:29:33.000Z
|
tests/test_statsutils.py
|
ofek/boltons
|
395f690f4a24331c4554e2169ac18a15955a4eab
|
[
"BSD-3-Clause"
] | 407
|
2015-04-09T20:09:15.000Z
|
2022-03-30T10:43:22.000Z
|
# -*- coding: utf-8 -*-
from boltons.statsutils import Stats
def test_stats_basic():
da = Stats(range(20))
assert da.mean == 9.5
assert round(da.std_dev, 2) == 5.77
assert da.variance == 33.25
assert da.skewness == 0
assert round(da.kurtosis, 1) == 1.9
assert da.median == 9.5
def _test_pearson():
import random
from statsutils import pearson_type
def get_pt(dist):
vals = [dist() for x in range(10000)]
pt = pearson_type(vals)
return pt
for x in range(3):
# pt = get_pt(dist=lambda: random.normalvariate(15, 5)) # expect 0, normal
# pt = get_pt(dist=lambda: random.weibullvariate(2, 3)) # gets 1, beta, weibull not specifically supported
# pt = get_pt(dist=lambda: random.gammavariate(2, 3)) # expect 3, gamma
# pt = get_pt(dist=lambda: random.betavariate(2, 3)) # expect 1, beta
# pt = get_pt(dist=lambda: random.expovariate(0.2)) # expect 3, beta
pt = get_pt(dist=lambda: random.uniform(0.0, 10.0)) # gets 2
print('pearson type:', pt)
# import pdb;pdb.set_trace()
| 32.705882
| 115
| 0.615108
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 467
| 0.419964
|
f979feef783a84ff7f70e9da364235d7c960d2cb
| 1,018
|
py
|
Python
|
funcs.py
|
pgDora56/shinyintro
|
15cc153106ebd88a5f73801f2bf0bef52d37cdab
|
[
"MIT"
] | null | null | null |
funcs.py
|
pgDora56/shinyintro
|
15cc153106ebd88a5f73801f2bf0bef52d37cdab
|
[
"MIT"
] | null | null | null |
funcs.py
|
pgDora56/shinyintro
|
15cc153106ebd88a5f73801f2bf0bef52d37cdab
|
[
"MIT"
] | null | null | null |
import os
import pprint
import json
import random
accept = False
colors = {
"Vo": "#e05ab4",
"Da": "#59afe1",
"Vi": "#e0e05a"
}
with open('idols.json') as f:
idols = (json.load(f))["idols"] # Insert 23 data
def pick(msg):
global accept
if not accept:
print("Not accept")
return
for i in range(5):
pick_one(msg)
accept = False
def pick_one(msg):
global idols
idolno = random.randrange(len(idols))
idol = idols[idolno]
print(f"{msg.user['real_name']} gets {idol['unit']} {idol['name']}")
attachments = [
{
'title': idol['name'],
'text': f"所属ユニット: {idol['unit']}",
'color': colors[idol["type"]],
'image_url': idol["url"]
}]
msg.send_webapi('', json.dumps(attachments))
def command():
global accept
ipt = input()
print(f"Get input: {ipt}")
if ipt == "a":
accept = True
print("Set accept")
elif ipt == "d":
accept = False
print("Set deny")
| 18.851852
| 72
| 0.54224
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 281
| 0.272816
|
f97b2f6c294156a507ee4e398ae4a7d90fba5ed9
| 240
|
py
|
Python
|
world_state.py
|
puskini33/Calculator
|
79cc0021e8c9b5235d6c57c2d721deb254d73a33
|
[
"MIT"
] | null | null | null |
world_state.py
|
puskini33/Calculator
|
79cc0021e8c9b5235d6c57c2d721deb254d73a33
|
[
"MIT"
] | null | null | null |
world_state.py
|
puskini33/Calculator
|
79cc0021e8c9b5235d6c57c2d721deb254d73a33
|
[
"MIT"
] | null | null | null |
class WorldState(object):
def __init__(self):
self.variables = {}
def clone(self):
temporary_world_state = WorldState()
temporary_world_state.variables = self.variables
return temporary_world_state
| 24
| 56
| 0.675
| 239
| 0.995833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
f97c2537579109b781456eb2fe785026c3ea5e59
| 10,782
|
py
|
Python
|
UWBsim/interface/plot_widgets.py
|
kianheus/uwb-simulator
|
888cdcae0d4ca101970971afbdf0113ba3bb1480
|
[
"MIT"
] | 2
|
2021-08-25T03:27:06.000Z
|
2021-09-26T05:08:19.000Z
|
UWBsim/interface/plot_widgets.py
|
kianheus/uwb-simulator
|
888cdcae0d4ca101970971afbdf0113ba3bb1480
|
[
"MIT"
] | null | null | null |
UWBsim/interface/plot_widgets.py
|
kianheus/uwb-simulator
|
888cdcae0d4ca101970971afbdf0113ba3bb1480
|
[
"MIT"
] | 1
|
2021-07-17T10:59:15.000Z
|
2021-07-17T10:59:15.000Z
|
"""Plot Widgets for the UWB Simulation GUI
This file contains several plot widgets that can be used to plot
simulation data in real time and redraw the plots with matplotlib for
better quality.
Classes:
QLivePlot: Base class for real time plots
QLivePlot_Groundtrack: Real time plot for groundtrack
QLivePlot_Position: Real time plot for x, y, z positions
QLivePlot_Velocity: Real time plot for x, y, z velocities
QLivePlot_Attitude: Real time plot for attitude
"""
from PyQt5 import QtWidgets
import pyqtgraph as pg
import matplotlib.pyplot as plt
import numpy as np
from UWBsim.utils import dataTypes
class QLivePlot(QtWidgets.QWidget):
"""Base Class for real time plots using pyqtgraph
Methods:
reset: clear the plot area and data
update_data: Pass new data to the plot widget
update_plot: Update the plot with the most recent data
"""
def __init__(self, *args, **kwargs):
"""Initialize the QLivePlot class
Initializes the widget and creates the basic elements required
for plotting.
"""
super(QLivePlot,self).__init__(*args, **kwargs)
self.layout = QtWidgets.QVBoxLayout()
self.canvas = pg.GraphicsLayoutWidget()
self.canvas.setBackground('#FAFAFA')
self.layout.addWidget(self.canvas)
self.export_button = QtWidgets.QPushButton('plot with matplotlib')
self.export_button.clicked.connect(self._export_button_clicked)
self.layout.addWidget(self.export_button)
self.setLayout(self.layout)
# same colors used by matplotlib
#self.data_colors = ['#1F77B4','#FF7F0E','#2CA02C','#D62728','#9467BD','#8C564B']
# Switch true and ekf color for publication
self.data_colors = ['#2CA02C','#FF7F0E','#1F77B4','#D62728','#9467BD','#8C564B']
self.n_subplots = 1
self.data = [{}]
self.lines = [{}]
def reset(self):
for i in range(self.n_subplots):
self.plot_area[i].clear()
self.plot_area[i].legend.items = []
self.data = [{} for _ in range(self.n_subplots)]
self.lines = [{} for _ in range(self.n_subplots)]
def update_data(self, **estimates):
return NotImplemented
def update_plot(self):
for i in range(self.n_subplots):
for key, values in self.data[i].items():
if key in self.lines[i]:
self.lines[i][key].setData(values[0], values[1])
else:
color_i = len(self.lines[i])
self.lines[i][key] = self.plot_area[i].plot(values[0], values[1], name=key, pen=self.data_colors[color_i])
def _export_button_clicked(self):
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.serif": ["Computer Modern Roman"],
})
fig = plt.figure()
ax = []
for i in range(self.n_subplots):
ax.append(fig.add_subplot(self.n_subplots,1,i+1))
legend = []
for key, values in self.data[i].items():
ax[i].plot(values[0], values[1])
legend.append(key)
ax[i].legend(legend)
ax[i].grid(b=True)
plt.show()
class QLivePlot_GroundTrack(QLivePlot):
def __init__(self, *args, **kwargs):
super(QLivePlot_GroundTrack, self).__init__(*args, **kwargs)
self.n_subplots = 1
self.plot_area = []
self.plot_area.append(self.canvas.addPlot())
self.plot_area[0].setTitle('Ground Track')
self.plot_area[0].setLabels(left='y [m]', bottom='x [m]')
self.plot_area[0].setXRange(-4,4,padding=0)
self.plot_area[0].setYRange(-4,4,padding=0)
self.plot_area[0].showGrid(x=True, y=True)
self.plot_area[0].addLegend()
self.color_i = 0
def update_data(self, **drone_state_data):
for key, state in drone_state_data.items():
if key == 'time':
continue
else:
x = state[0]
y = state[1]
if key in self.data[0]:
self.data[0][key][0].append(x)
self.data[0][key][1].append(y)
else:
self.data[0][key] = [[x],[y]]
self.color_i += 1
def _export_button_clicked(self):
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.serif": ["Computer Modern Roman"],
})
fig = plt.figure()
ax = []
for i in range(self.n_subplots):
ax.append(fig.add_subplot(self.n_subplots,1,i+1))
legend = []
j = 0
for key, values in self.data[i].items():
ax[i].plot(values[0], values[1], color=self.data_colors[j])
legend.append(key)
j += 1
#ax[i].legend(legend)
# Publication legend
ax[i].legend(["Ground truth","MHE", "EKF"])
ax[i].grid(b=True)
ax[i].set_xlabel('x [m]')
ax[i].set_ylabel('y [m]')
#ax[i].set_title('Groundtrack')
plt.show()
class QLivePlot_Position(QLivePlot):
def __init__(self, *args, **kwargs):
super(QLivePlot_Position, self).__init__(*args, **kwargs)
self.n_subplots = 3
self.data = [{},{},{}]
self.lines = [{},{},{}]
self.plot_area = []
for i in range(self.n_subplots):
self.plot_area.append(self.canvas.addPlot())
self.plot_area[i].showGrid(x=True, y=True)
self.plot_area[i].addLegend()
self.canvas.nextRow()
self.plot_area[0].setLabels(left='x [m]', bottom='t [s]')
self.plot_area[1].setLabels(left='y [m]', bottom='t [s]')
self.plot_area[2].setLabels(left='z [m]', bottom='t [s]')
self.color_i = 0
def update_data(self, **drone_state_data):
for key,state in drone_state_data.items():
if key == 'time':
continue
else:
x = state[0]
y = state[1]
z = state[2]
t = drone_state_data['time']
if key in self.data[0]:
self.data[0][key][0].append(t)
self.data[0][key][1].append(x)
self.data[1][key][0].append(t)
self.data[1][key][1].append(y)
self.data[2][key][0].append(t)
self.data[2][key][1].append(z)
else:
self.data[0][key] = [[t],[x]]
self.data[1][key] = [[t],[y]]
self.data[2][key] = [[t],[z]]
def _export_button_clicked(self):
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.serif": ["Computer Modern Roman"],
})
ylabels = ['x [m]', 'y [m]', 'z [m]']
fig = plt.figure()
ax = []
for i in range(self.n_subplots):
ax.append(fig.add_subplot(self.n_subplots,1,i+1))
legend = []
for key, values in self.data[i].items():
ax[i].plot(values[0], values[1])
legend.append(key)
ax[i].legend(legend)
ax[i].grid(b=True)
ax[i].set_xlabel('t [s]')
ax[i].set_ylabel(ylabels[i])
plt.show()
class QLivePlot_Velocity(QLivePlot):
def __init__(self, *args, **kwargs):
super(QLivePlot_Velocity, self).__init__(*args, **kwargs)
self.n_subplots = 3
self.data = [{},{},{}]
self.lines = [{},{},{}]
self.plot_area = []
for i in range(self.n_subplots):
self.plot_area.append(self.canvas.addPlot())
self.plot_area[i].showGrid(x=True, y=True)
self.plot_area[i].addLegend()
self.canvas.nextRow()
self.plot_area[0].setLabels(left='vx [m/s]', bottom='t [s]')
self.plot_area[1].setLabels(left='vy [m/s]', bottom='t [s]')
self.plot_area[2].setLabels(left='vz [m/s]', bottom='t [s]')
self.color_i = 0
def update_data(self, **drone_state_data):
for key,state in drone_state_data.items():
if key == 'time':
continue
else:
vx = state[3]
vy = state[4]
vz = state[5]
t = drone_state_data['time']
if key in self.data[0]:
self.data[0][key][0].append(t)
self.data[0][key][1].append(vx)
self.data[1][key][0].append(t)
self.data[1][key][1].append(vy)
self.data[2][key][0].append(t)
self.data[2][key][1].append(vz)
else:
self.data[0][key] = [[t],[vx]]
self.data[1][key] = [[t],[vy]]
self.data[2][key] = [[t],[vz]]
class QLivePlot_Attitude(QLivePlot):
def __init__(self, *args, **kwargs):
super(QLivePlot_Attitude, self).__init__(*args, **kwargs)
self.n_subplots = 3
self.data = [{},{},{}]
self.lines = [{},{},{}]
self.plot_area = []
for i in range(self.n_subplots):
self.plot_area.append(self.canvas.addPlot())
self.plot_area[i].showGrid(x=True, y=True)
self.plot_area[i].addLegend()
self.canvas.nextRow()
self.plot_area[0].setLabels(left='Roll [rad]', bottom='t [s]')
self.plot_area[1].setLabels(left='Pitch [rad]', bottom='t [s]')
self.plot_area[2].setLabels(left='Yaw [rad]', bottom='t [s]')
self.color_i = 0
def update_data(self, **kwargs):
for key,value in kwargs.items():
if isinstance(value, dataTypes.State_XVQW):
r = value.q.get_roll()
p = value.q.get_pitch()
y = value.q.get_yaw()
t = value.timestamp
if key in self.data[0]:
self.data[0][key][0].append(t)
self.data[0][key][1].append(r)
self.data[1][key][0].append(t)
self.data[1][key][1].append(p)
self.data[2][key][0].append(t)
self.data[2][key][1].append(y)
else:
self.data[0][key] = [[t],[r]]
self.data[1][key] = [[t],[p]]
self.data[2][key] = [[t],[y]]
else:
pass
#print('Plot can only be updated with State_XVQW data type.')
| 34.66881
| 126
| 0.516602
| 10,134
| 0.9399
| 0
| 0
| 0
| 0
| 0
| 0
| 1,715
| 0.159061
|
f97ca4c83d65c548b29075ec69330e20d6ca30b3
| 1,018
|
py
|
Python
|
scripts/plot_summary_stats.py
|
JackKelly/slicedpy
|
c2fa7eb4c7b7374f8192a43d8e617b63c9e25e62
|
[
"Apache-2.0"
] | 3
|
2017-02-03T22:05:25.000Z
|
2017-08-29T19:06:17.000Z
|
scripts/plot_summary_stats.py
|
JackKelly/slicedpy
|
c2fa7eb4c7b7374f8192a43d8e617b63c9e25e62
|
[
"Apache-2.0"
] | null | null | null |
scripts/plot_summary_stats.py
|
JackKelly/slicedpy
|
c2fa7eb4c7b7374f8192a43d8e617b63c9e25e62
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from pda.dataset import init_aggregate_and_appliance_dataset_figure
import matplotlib.pyplot as plt
from scipy.stats import *
import numpy as np
subplots, chan = init_aggregate_and_appliance_dataset_figure(
start_date='2013/6/4 10:00', end_date='2013/6/4 13:30',
n_subplots=2, date_format='%H:%M:%S', alpha=0.6,
plot_appliance_ground_truth=False)
DISPLAY = ['mean', 'std', 'ptp', 'gmean', 'skew']
WINDOW = 60
n = chan.series.size - WINDOW
labels = ['mean', 'std', 'ptp', 'gmean', 'skew']
summary_stats = np.empty((n,len(labels)))
print("Calculating...")
for i in range(1,n):
chunk = chan.series.values[i:i+WINDOW]
summary_stats[i] = (chunk.mean(), chunk.std(), chunk.ptp(),
gmean(chunk), skew(chunk))
print("Plotting...")
for i, label in enumerate(labels):
if label in DISPLAY:
subplots[1].plot(chan.series.index[WINDOW:], summary_stats[:,i],
label=label)
plt.legend()
plt.grid()
plt.show()
print("Done!")
| 28.277778
| 73
| 0.656189
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 157
| 0.154224
|
f97cacad56a833075fdbf1486e99e188f8024b55
| 2,691
|
py
|
Python
|
sciibo/network/connection.py
|
fdev/sciibo
|
984ec1945cd0f371bce148c1eb1e811befadb478
|
[
"MIT"
] | 14
|
2017-06-16T14:16:57.000Z
|
2021-02-26T13:53:56.000Z
|
sciibo/network/connection.py
|
fdev/sciibo
|
984ec1945cd0f371bce148c1eb1e811befadb478
|
[
"MIT"
] | 1
|
2018-06-27T16:11:48.000Z
|
2019-01-23T12:02:17.000Z
|
sciibo/network/connection.py
|
fdev/sciibo
|
984ec1945cd0f371bce148c1eb1e811befadb478
|
[
"MIT"
] | null | null | null |
import socket
import json
import struct
from sciibo.core.helpers import Queue
from .thread import SocketThread
class ConnectionThread(SocketThread):
def __init__(self, sock):
super(ConnectionThread, self).__init__()
self.sock = sock
# The number of bytes we are expecting
self.expecting = None
# Partial message we received so far
self.partial = None
# Outgoing message queue
self.queue = Queue()
# Player id this connection belongs to
self.player = None
def disconnected(self):
self.stop()
self.trigger('receive', self, {'type': 'disconnect'})
def action(self):
if not self.expecting:
# Send messages
while not self.queue.empty():
data = self.queue.get()
message = json.dumps(data)
self.sock.sendall(struct.pack("i", len(message)) + message.encode())
self.queue.task_done()
# Receive message size
data = self.sock.recv(struct.calcsize("i"))
if not data:
self.disconnected()
return
# We are now looking for a message
self.expecting = struct.unpack("i", data)[0]
self.partial = ""
return
# Receive at most what we are expecting
data = self.sock.recv(self.expecting)
if not data:
self.disconnected()
return
# Bytes to string
data = data.decode()
self.partial += data
self.expecting -= len(data)
# Received complete message
if not self.expecting:
try:
data = json.loads(self.partial)
except ValueError:
return
if not isinstance(data, dict):
return
type = data.get('type')
if not type:
return
self.trigger('receive', self, data)
self.expecting = None
self.partial = None
def send(self, data):
self.queue.put(data)
def on_error(self):
# Trigger disconnect when an error occurs, but not
# when the connection was stopped (using after_actions).
self.disconnected()
def after_actions(self):
# Send out queued messages before closing socket
try:
while not self.queue.empty():
data = self.queue.get()
message = json.dumps(data)
self.sock.sendall(struct.pack("i", len(message)) + message.encode())
self.queue.task_done()
except socket.error:
return
| 27.459184
| 84
| 0.544036
| 2,575
| 0.956893
| 0
| 0
| 0
| 0
| 0
| 0
| 500
| 0.185805
|
f97d02d723f0a4441c6c06372a7158427073778d
| 2,651
|
py
|
Python
|
libs/PieMeter.py
|
lionheart/TimeTracker-Linux
|
64405d53fd12d2593ef4879b867ff38a4d5b9ca9
|
[
"MIT"
] | 12
|
2015-02-06T19:06:49.000Z
|
2019-09-24T17:58:17.000Z
|
libs/PieMeter.py
|
lionheart/TimeTracker-Linux
|
64405d53fd12d2593ef4879b867ff38a4d5b9ca9
|
[
"MIT"
] | null | null | null |
libs/PieMeter.py
|
lionheart/TimeTracker-Linux
|
64405d53fd12d2593ef4879b867ff38a4d5b9ca9
|
[
"MIT"
] | 6
|
2015-11-22T01:58:31.000Z
|
2019-11-04T22:56:38.000Z
|
# Copyright (C) 2008 Jimmy Do <jimmydo@users.sourceforge.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import math
import gobject
import gtk
class PieMeter(gtk.Image):
_DEFAULT_SIZE = 24
def __init__(self):
gtk.Image.__init__(self)
self._progress = 0.0
self._fill_color = (0.0, 1.0, 0.0)
def set_progress(self, progress):
assert progress >= 0.0
assert progress <= 1.0
self._progress = progress
if self.window is not None:
self.window.invalidate_rect(self.allocation, True)
def set_fill_color(self, red, green, blue):
assert 0.0 <= red <= 1.0
assert 0.0 <= green <= 1.0
assert 0.0 <= blue <= 1.0
self._fill_color = (red, green, blue)
if self.window is not None:
self.window.invalidate_rect(self.allocation, True)
def do_size_request(self, requisition):
requisition.width = PieMeter._DEFAULT_SIZE
requisition.height = PieMeter._DEFAULT_SIZE
def do_expose_event(self, event):
context = event.window.cairo_create()
rect = self.allocation
x = rect.x + (rect.width / 2)
y = rect.y + (rect.height / 2)
radius = (min(rect.width, rect.height) / 2)
# Draw background circle
context.arc(x, y, radius, 0, 2 * math.pi)
context.set_source_rgba(0.8, 0.8, 0.8)
context.fill()
# Draw pie
context.arc(x, y, radius, (-0.5 * math.pi) + self._progress * 2 * math.pi, 1.5 * math.pi)
context.line_to(x, y)
context.close_path()
(red, green, blue) = self._fill_color
context.set_source_rgb(red, green, blue)
context.fill()
# Draw circle outline
context.arc(x, y, radius, 0, 2 * math.pi)
context.set_source_rgba(1, 1, 1)
context.set_line_width(1.0)
context.stroke()
gobject.type_register(PieMeter)
| 33.987179
| 97
| 0.626556
| 1,812
| 0.683516
| 0
| 0
| 0
| 0
| 0
| 0
| 804
| 0.303282
|
f97d4fd046debdeff0094ec80a682b86eb50db54
| 6,192
|
py
|
Python
|
examples/pinball.py
|
jgrigonis/arcade
|
9b624da7da52e3909f6e82c552446b90249041f1
|
[
"MIT"
] | 1
|
2021-05-23T20:30:46.000Z
|
2021-05-23T20:30:46.000Z
|
examples/pinball.py
|
jgrigonis/arcade
|
9b624da7da52e3909f6e82c552446b90249041f1
|
[
"MIT"
] | null | null | null |
examples/pinball.py
|
jgrigonis/arcade
|
9b624da7da52e3909f6e82c552446b90249041f1
|
[
"MIT"
] | null | null | null |
import arcade
import timeit
BALL_DRAG = 0.001
NO_FLIPPER = 0
FLIPPER_UP = 1
class MyApplication(arcade.Window):
""" Main application class. """
def __init__(self, width, height, resizable):
super().__init__(width, height, resizable=resizable)
self.sprite_list = arcade.SpriteList()
self.left_flipper_list = arcade.SpriteList()
self.right_flipper_list = arcade.SpriteList()
self.left_flipper_state = NO_FLIPPER
self.right_flipper_state = NO_FLIPPER
self.time = 0
arcade.set_background_color(arcade.color.DARK_SLATE_GRAY)
# Top wall
for x in range(20, 800, 40):
wall = arcade.PhysicsAABB("images/boxCrate_double.png", [x, 980], [40, 40], [0, 0], 1, 100, 0)
wall.static = True
self.sprite_list.append(wall)
# Left wall
for y in range(260, 980, 40):
wall = arcade.PhysicsAABB("images/boxCrate_double.png", [20, y], [40, 40], [0, 0], 1, 100, 0)
wall.static = True
self.sprite_list.append(wall)
# Right wall
for y in range(260, 980, 40):
wall = arcade.PhysicsAABB("images/boxCrate_double.png", [780, y], [40, 40], [0, 0], 1, 100, 0)
wall.static = True
self.sprite_list.append(wall)
# Left bottom slope
y = 260
for x in range(40, 280, 10):
y -= 5
wall = arcade.PhysicsAABB("images/boxCrate_double.png", [x, y], [10, 10], [0, 0], 1, 100, 0)
wall.static = True
self.sprite_list.append(wall)
# Right bottom slope
y = 260
for x in range(760, 520, -10):
y -= 5
wall = arcade.PhysicsAABB("images/boxCrate_double.png", [x, y], [10, 10], [0, 0], 1, 100, 0)
wall.static = True
self.sprite_list.append(wall)
# Left flipper
y = 135
for x in range(280, 350, 10):
wall = arcade.PhysicsAABB("images/boxCrate_double.png", [x, y], [10, 10], [0, 0], 1, 100, 0)
wall.static = True
self.sprite_list.append(wall)
self.left_flipper_list.append(wall)
y -= 5
# Right flipper
y = 135
for x in range(520, 440, -10):
wall = arcade.PhysicsAABB("images/boxCrate_double.png", [x, y], [10, 10], [0, 0], 1, 100, 0)
wall.static = True
self.sprite_list.append(wall)
self.right_flipper_list.append(wall)
y -= 5
# Bumpers
for row in range(2):
for column in range(2):
bumper = arcade.PhysicsCircle("images/bumper.png", [250 + 300 * column, 450 + 300 * row], 35, [0, 0], 1.5, 100, BALL_DRAG)
bumper.static = True
self.sprite_list.append(bumper)
wall = arcade.PhysicsAABB("images/python_logo.png", [400, 600], [150, 150], [0, 0], 1, 100, 0)
wall.static = True
self.sprite_list.append(wall)
def on_draw(self):
"""
Render the screen.
"""
# This command has to happen before we start drawing
arcade.start_render()
self.sprite_list.draw()
start_x = 20
start_y = 10
arcade.draw_text("Processing time: {:.3f}".format(self.time), start_x, start_y, arcade.color.BLACK, 12)
def update(self, x):
""" Move everything """
start_time = timeit.default_timer()
arcade.process_2d_physics_movement(self.sprite_list, gravity=0.08)
arcade.process_2d_physics_collisions(self.sprite_list)
# -- Left flipper control
if self.left_flipper_state == FLIPPER_UP and self.left_flipper_list[0].center_y < 145:
y = 2
y_change = 2
for sprite in self.left_flipper_list:
sprite.change_y = y
y += y_change
sprite.frozen = False
elif self.left_flipper_state == NO_FLIPPER and self.left_flipper_list[0].center_y > 135:
y = -2
y_change = -2
for sprite in self.left_flipper_list:
sprite.change_y = y
y += y_change
sprite.frozen = False
else:
for sprite in self.left_flipper_list:
sprite.change_y = 0
sprite.frozen = True
# -- Right flipper control
if self.right_flipper_state == FLIPPER_UP and self.right_flipper_list[0].center_y < 145:
y = 2
y_change = 2
for sprite in self.right_flipper_list:
sprite.change_y = y
y += y_change
sprite.frozen = False
elif self.right_flipper_state == NO_FLIPPER and self.right_flipper_list[0].center_y > 135:
y = -2
y_change = -2
for sprite in self.right_flipper_list:
sprite.change_y = y
y += y_change
sprite.frozen = False
else:
for sprite in self.right_flipper_list:
sprite.change_y = 0
sprite.frozen = True
for sprite in self.sprite_list:
if sprite.center_y < -20:
sprite.kill()
self.time = timeit.default_timer() - start_time
def on_key_press(self, key, modifiers):
"""
Called whenever the mouse moves.
"""
if key == arcade.key.LEFT:
self.left_flipper_state = FLIPPER_UP
elif key == arcade.key.RIGHT:
self.right_flipper_state = FLIPPER_UP
elif key == arcade.key.SPACE:
x = 720
y = 300
ball = arcade.PhysicsCircle("images/pool_cue_ball.png", [x, y], 15, [0, +20], 1, .25, BALL_DRAG)
self.sprite_list.append(ball)
def on_key_release(self, key, modifiers):
"""
Called when the user presses a mouse button.
"""
if key == arcade.key.LEFT:
self.left_flipper_state = NO_FLIPPER
elif key == arcade.key.RIGHT:
self.right_flipper_state = NO_FLIPPER
window = MyApplication(800, 1000, resizable=False)
window.set_size(700, 700)
arcade.run()
| 33.652174
| 138
| 0.55491
| 6,021
| 0.972384
| 0
| 0
| 0
| 0
| 0
| 0
| 723
| 0.116764
|
f97e5968772769d07d1c5c3519564d5e93b96cb9
| 2,350
|
py
|
Python
|
pygomas/pack.py
|
sfp932705/pygomas
|
8cdd7e973b8b4e8de467803c106ec44ca6b8bd03
|
[
"MIT"
] | 3
|
2019-06-20T08:55:36.000Z
|
2019-07-04T14:10:40.000Z
|
pygomas/pack.py
|
sfp932705/pygomas
|
8cdd7e973b8b4e8de467803c106ec44ca6b8bd03
|
[
"MIT"
] | null | null | null |
pygomas/pack.py
|
sfp932705/pygomas
|
8cdd7e973b8b4e8de467803c106ec44ca6b8bd03
|
[
"MIT"
] | null | null | null |
import json
from loguru import logger
from .config import PERFORMATIVE, PERFORMATIVE_PACK, PERFORMATIVE_PACK_TAKEN, TEAM, X, Y, Z, NAME, ACTION, CREATE, \
TYPE
from .agent import AbstractAgent, LONG_RECEIVE_WAIT
from .vector import Vector3D
from spade.message import Message
from spade.behaviour import OneShotBehaviour, CyclicBehaviour
from spade.template import Template
from spade.agent import Agent
PACK_NONE: int = 1000
PACK_MEDICPACK: int = 1001
PACK_AMMOPACK: int = 1002
PACK_OBJPACK: int = 1003
PACK_NAME = {
PACK_NONE: 'NONE',
PACK_MEDICPACK: 'MEDIC',
PACK_AMMOPACK: 'AMMO',
PACK_OBJPACK: 'OBJ'
}
PACK_AUTODESTROY_TIMEOUT: int = 25
class Pack(AbstractAgent, Agent):
def __str__(self):
return "P(" + str(PACK_NAME[self.type]) + "," + str(self.position) + ")"
def __init__(self, name, passwd="secret", manager_jid="cmanager@localhost", x=0, z=0, team=0):
Agent.__init__(self, name, passwd)
AbstractAgent.__init__(self, name, team)
self.type = PACK_NONE
self.manager = manager_jid
self.position = Vector3D()
self.position.x = x
self.position.y = 0
self.position.z = z
async def setup(self):
self.add_behaviour(self.CreatePackBehaviour())
t = Template()
t.set_metadata(PERFORMATIVE, PERFORMATIVE_PACK_TAKEN)
self.add_behaviour(self.PackTakenResponderBehaviour(), t)
class CreatePackBehaviour(OneShotBehaviour):
async def run(self):
msg = Message(to=self.agent.manager)
msg.set_metadata(PERFORMATIVE, PERFORMATIVE_PACK)
msg.body = json.dumps({
NAME: self.agent.name,
TEAM: self.agent.team,
ACTION: CREATE,
TYPE: self.agent.type,
X: self.agent.position.x,
Y: self.agent.position.y,
Z: self.agent.position.z
})
await self.send(msg)
logger.info("CreatePack msg sent: {}".format(msg))
class PackTakenResponderBehaviour(CyclicBehaviour):
async def run(self):
msg = await self.receive(timeout=LONG_RECEIVE_WAIT)
if msg is not None:
content = msg.body
await self.agent.perform_pack_taken(content)
# await self.agent.stop()
| 31.333333
| 116
| 0.635745
| 1,682
| 0.715745
| 0
| 0
| 0
| 0
| 1,035
| 0.440426
| 112
| 0.04766
|
f97e89c0eb4e106c1ec357be4b95f0207161d996
| 2,178
|
py
|
Python
|
Utils/initialize.py
|
soshishimada/PhysCap_demo_release
|
542756ed9ecdca77eda8b6b44ba2348253b999c3
|
[
"Unlicense"
] | 62
|
2021-09-05T19:36:06.000Z
|
2022-03-29T11:47:09.000Z
|
Utils/initialize.py
|
soshishimada/PhysCap_demo_release
|
542756ed9ecdca77eda8b6b44ba2348253b999c3
|
[
"Unlicense"
] | 4
|
2021-09-21T09:52:02.000Z
|
2022-03-27T09:08:30.000Z
|
Utils/initialize.py
|
soshishimada/PhysCap_demo_release
|
542756ed9ecdca77eda8b6b44ba2348253b999c3
|
[
"Unlicense"
] | 10
|
2021-09-05T00:27:17.000Z
|
2022-03-22T13:25:57.000Z
|
import numpy as np
import pybullet as p
class Initializer():
def __init__(self,floor_known=None,floor_frame_path=None,):
if floor_known:
self.RT = np.load(floor_frame_path)
else:
self.RT =np.eye(4)
self.rbdl2bullet = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 20, 21, 22]
r_knee_id = 26
r_ankle_id = 28
r_foot_id = 30
r_toe_id = 34
r_heel_id = 35
self.r_kafth_ids = [r_knee_id, r_ankle_id, r_foot_id, r_toe_id, r_heel_id]
l_knee_id = 9
l_ankle_id = 11
l_foot_id = 13
l_toe_id = 17
l_heel_id = 18
self.l_kafth_ids = [l_knee_id, l_ankle_id, l_foot_id, l_toe_id, l_heel_id]
self.params1={
"scale":1000,"iter":8,"delta_t":0.001,"j_kp":117497,"j_kd":3300,"bt_kp":155000,
"bt_kd":2300,"br_kp":50000,"br_kd":2800}
self.params2={
"scale":1000,"iter":8,"delta_t":0.01,"j_kp":300,"j_kd":150,"bt_kp":600,
"bt_kd":300,"br_kp":300,"br_kd":150}
self.con_j_ids_bullet = {"r_toe_id":34,"r_heel_id":35,"l_toe_id":17,"l_heel_id":18}
def get_params(self):
return self.params2#self.params1
def get_con_j_idx_bullet(self):
return self.con_j_ids_bullet
def remove_collisions(self,id_a,id_b):
### turn of collision between humanoids ###
for i in range(p.getNumJoints(id_a)):
for j in range(p.getNumJoints(id_b)):
p.setCollisionFilterPair(id_a, id_b, i, j, 0)
return 0
def get_knee_ankle_foot_toe_heel_ids_rbdl(self):
return self.l_kafth_ids,self.r_kafth_ids
def get_rbdl2bullet(self):
return self.rbdl2bullet
def change_humanoid_color(self,id_robot,color):
for j in range(p.getNumJoints(id_robot)):
p.changeVisualShape(id_robot, j, rgbaColor=color)
return 0
def get_R_T(self):
R = self.RT[:3, :3]
T = self.RT[:-1, 3:].reshape(3)
return R,T
| 34.571429
| 166
| 0.573462
| 2,134
| 0.979798
| 0
| 0
| 0
| 0
| 0
| 0
| 224
| 0.102847
|
f97e91890c0cdcab8847df722787798324fca2ec
| 3,220
|
py
|
Python
|
nlptasks/padding.py
|
ulf1/nlptasks
|
07d36448b517a18f76088f5d9cfb853e7602b079
|
[
"Apache-2.0"
] | 2
|
2020-12-30T13:11:09.000Z
|
2021-11-04T19:40:31.000Z
|
nlptasks/padding.py
|
ulf1/nlptasks
|
07d36448b517a18f76088f5d9cfb853e7602b079
|
[
"Apache-2.0"
] | 99
|
2020-11-02T14:58:04.000Z
|
2021-04-09T18:01:34.000Z
|
nlptasks/padding.py
|
ulf1/nlptasks
|
07d36448b517a18f76088f5d9cfb853e7602b079
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow.keras as keras # pad_sequences
from pad_sequences import pad_sequences_adjacency
from pad_sequences import pad_sequences_sparse
def pad_idseqs(func):
def wrapper(*args, **kwargs):
# read and remove padding settings
maxlen = kwargs.pop('maxlen', None)
padding = kwargs.pop('padding', 'pre')
truncating = kwargs.pop('truncating', 'pre')
# run the NLP task
idseqs, VOCAB = func(*args, **kwargs)
# padding and update vocabulary
if maxlen is not None:
if "[PAD]" not in VOCAB:
VOCAB.append("[PAD]")
idseqs = keras.preprocessing.sequence.pad_sequences(
idseqs, maxlen=maxlen, value=VOCAB.index("[PAD]"),
padding=padding, truncating=truncating).tolist()
return idseqs, VOCAB
return wrapper
def pad_adjacmatrix(func):
def wrapper(*args, **kwargs):
# read and remove padding settings
maxlen = kwargs.pop('maxlen', None)
padding = kwargs.pop('padding', 'pre')
truncating = kwargs.pop('truncating', 'pre')
# run the NLP task
adjac_matrix, seqs_lens = func(*args, **kwargs)
# pad adjacency matrix of children relationships
if maxlen is not None:
adjac_matrix = pad_sequences_adjacency(
sequences=adjac_matrix, seqlen=seqs_lens,
maxlen=maxlen, padding=padding, truncating=truncating)
return adjac_matrix, seqs_lens
return wrapper
def pad_maskseqs(func):
def wrapper(*args, **kwargs):
# read and remove padding settings
maxlen = kwargs.pop('maxlen', None)
padding = kwargs.pop('padding', 'pre')
truncating = kwargs.pop('truncating', 'pre')
# run the NLP task
maskseqs, seqs_lens, VOCAB = func(*args, **kwargs)
# pad sparse mask sequence
if maxlen is not None:
maskseqs = pad_sequences_sparse(
sequences=maskseqs, seqlen=seqs_lens,
maxlen=maxlen, padding=padding, truncating=truncating)
return maskseqs, seqs_lens, VOCAB
return wrapper
def pad_merge_adjac_maskseqs(func):
def wrapper(*args, **kwargs):
# read and remove padding settings
maxlen = kwargs.pop('maxlen', None)
padding = kwargs.pop('padding', 'pre')
truncating = kwargs.pop('truncating', 'pre')
# run the NLP task
adjac, onehot, seqs_lens, n_classes = func(*args, **kwargs)
# pad adjacency matrix of children relationships
if maxlen is not None:
adjac = pad_sequences_adjacency(
sequences=adjac, seqlen=seqs_lens,
maxlen=maxlen, padding=padding, truncating=truncating)
onehot = pad_sequences_sparse(
sequences=onehot, seqlen=seqs_lens,
maxlen=maxlen, padding=padding, truncating=truncating)
# shift index of adjac matrix
adjac = [[(i + n_classes, j) for i, j in sent] for sent in adjac]
# merge both sparse matrices
maskseqs = [adjac[k] + onehot[k] for k in range(len(adjac))]
# done
return maskseqs, seqs_lens
return wrapper
| 34.255319
| 73
| 0.617081
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 616
| 0.191304
|
f97eb5e10ca3b047fe571ca899a9ef09001fcef1
| 4,894
|
py
|
Python
|
py/g1/threads/tests/test_locks.py
|
clchiou/garage
|
446ff34f86cdbd114b09b643da44988cf5d027a3
|
[
"MIT"
] | 3
|
2016-01-04T06:28:52.000Z
|
2020-09-20T13:18:40.000Z
|
py/g1/threads/tests/test_locks.py
|
clchiou/garage
|
446ff34f86cdbd114b09b643da44988cf5d027a3
|
[
"MIT"
] | null | null | null |
py/g1/threads/tests/test_locks.py
|
clchiou/garage
|
446ff34f86cdbd114b09b643da44988cf5d027a3
|
[
"MIT"
] | null | null | null |
import unittest
import threading
from g1.threads import locks
class ReadWriteLockTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.rwlock = locks.ReadWriteLock()
def assert_state(self, num_readers, num_writers):
self.assertEqual(self.rwlock._num_readers, num_readers)
self.assertEqual(self.rwlock._num_writers, num_writers)
def test_read_lock(self):
self.assert_state(0, 0)
self.assertTrue(self.rwlock.reader_acquire(timeout=0.01))
self.assert_state(1, 0)
self.assertTrue(self.rwlock.reader_acquire(timeout=0.01))
self.assert_state(2, 0)
self.assertFalse(self.rwlock.writer_acquire(timeout=0.01))
self.assert_state(2, 0)
self.rwlock.reader_release()
self.rwlock.reader_release()
self.assert_state(0, 0)
def test_write_lock(self):
self.assert_state(0, 0)
self.assertTrue(self.rwlock.writer_acquire(timeout=0.01))
self.assert_state(0, 1)
self.assertFalse(self.rwlock.reader_acquire(timeout=0.01))
self.assert_state(0, 1)
self.assertFalse(self.rwlock.writer_acquire(timeout=0.01))
self.assert_state(0, 1)
self.rwlock.writer_release()
self.assert_state(0, 0)
def start_reader_thread(self, event):
thread = threading.Thread(
target=acquire_then_set,
args=(self.rwlock.reader_acquire, event),
daemon=True,
)
thread.start()
def start_writer_thread(self, event):
thread = threading.Thread(
target=acquire_then_set,
args=(self.rwlock.writer_acquire, event),
daemon=True,
)
thread.start()
def test_reader_notify_writers(self):
self.rwlock.reader_acquire()
event1 = threading.Event()
event2 = threading.Event()
event3 = threading.Event()
self.start_writer_thread(event1)
self.start_writer_thread(event2)
self.start_writer_thread(event3)
self.assertFalse(event1.wait(0.01))
self.assertFalse(event2.wait(0.01))
self.assertFalse(event3.wait(0.01))
self.rwlock.reader_release()
self.assertEqual(
sorted([
event1.wait(0.01),
event2.wait(0.01),
event3.wait(0.01),
]),
[False, False, True],
)
def test_writer_notify_readers(self):
self.rwlock.writer_acquire()
event1 = threading.Event()
event2 = threading.Event()
self.start_reader_thread(event1)
self.start_reader_thread(event2)
self.assertFalse(event1.wait(0.01))
self.assertFalse(event2.wait(0.01))
self.rwlock.writer_release()
self.assertTrue(event1.wait(0.01))
self.assertTrue(event2.wait(0.01))
def test_writer_notify_writers(self):
self.rwlock.writer_acquire()
event1 = threading.Event()
event2 = threading.Event()
event3 = threading.Event()
self.start_writer_thread(event1)
self.start_writer_thread(event2)
self.start_writer_thread(event3)
self.assertFalse(event1.wait(0.01))
self.assertFalse(event2.wait(0.01))
self.assertFalse(event3.wait(0.01))
self.rwlock.writer_release()
self.assertEqual(
sorted([
event1.wait(0.01),
event2.wait(0.01),
event3.wait(0.01),
]),
[False, False, True],
)
def test_writer_notify_readers_and_writers(self):
self.rwlock.writer_acquire()
event1 = threading.Event()
event2 = threading.Event()
event3 = threading.Event()
event4 = threading.Event()
event5 = threading.Event()
self.start_reader_thread(event1)
self.start_reader_thread(event2)
self.start_writer_thread(event3)
self.start_writer_thread(event4)
self.start_writer_thread(event5)
self.assertFalse(event1.wait(0.01))
self.assertFalse(event2.wait(0.01))
self.assertFalse(event3.wait(0.01))
self.assertFalse(event4.wait(0.01))
self.assertFalse(event5.wait(0.01))
self.rwlock.writer_release()
self.assertIn(
(
[
event1.wait(0.01),
event2.wait(0.01),
],
sorted([
event3.wait(0.01),
event4.wait(0.01),
event5.wait(0.01),
]),
),
[
([True, True], [False, False, False]),
([False, False], [False, False, True]),
],
)
def acquire_then_set(acquire, event):
acquire()
event.set()
if __name__ == '__main__':
unittest.main()
| 28.619883
| 66
| 0.585411
| 4,708
| 0.961994
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.002043
|
f9835a83cc85b616ebc4877cb26f2e75d6afe07f
| 1,104
|
py
|
Python
|
statisticalDistributions.py
|
mrhsce/simPython
|
94598164abc9833bad1121a978acb94c4fecec27
|
[
"Apache-2.0"
] | 2
|
2015-12-19T04:27:12.000Z
|
2016-11-23T18:53:50.000Z
|
statisticalDistributions.py
|
mrhsce/simPython
|
94598164abc9833bad1121a978acb94c4fecec27
|
[
"Apache-2.0"
] | null | null | null |
statisticalDistributions.py
|
mrhsce/simPython
|
94598164abc9833bad1121a978acb94c4fecec27
|
[
"Apache-2.0"
] | null | null | null |
""" Here definitions and attributes of all statistical distributions that are used in the simulation are defined"""
from abc import ABCMeta, abstractmethod
import random
#import np
class StatDis(object):
__metaclass__ = ABCMeta
def __init__(self):
pass
@abstractmethod
def generate(self):
pass
class UniformDis(StatDis):
def __init__(self, minVal, maxVal):
self.minVal = minVal
self.maxVal = maxVal
def generate(self):
return random.uniform(self.minVal, self.maxVal)
class NormalDis(StatDis):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def generate(self):
return np.random.normal(self.mean, self.std, 1)[0]
class ConstantDis(StatDis):
def __init__(self, val):
self.val = val
def generate(self):
return self.val
class TriangularDis(StatDis):
def __init__(self, low, high, mode):
self.low = low
self.high = high
self.mode = mode
def generate(self):
return random.triangular(self.low, self.high, self.mode)
| 19.714286
| 115
| 0.645833
| 908
| 0.822464
| 0
| 0
| 52
| 0.047101
| 0
| 0
| 125
| 0.113225
|
f983fe925ecae418e3ac67726cae140e97825556
| 6,594
|
py
|
Python
|
RainbowGrades/parsexml.py
|
hifiadi/Submitty
|
62a8239313cff7e3f841ff66aeda6b0557e9c15b
|
[
"BSD-3-Clause"
] | 2
|
2017-10-11T17:48:33.000Z
|
2020-12-15T16:05:05.000Z
|
RainbowGrades/parsexml.py
|
hifiadi/Submitty
|
62a8239313cff7e3f841ff66aeda6b0557e9c15b
|
[
"BSD-3-Clause"
] | 4
|
2019-04-25T02:47:34.000Z
|
2020-03-31T18:56:45.000Z
|
RainbowGrades/parsexml.py
|
hifiadi/Submitty
|
62a8239313cff7e3f841ff66aeda6b0557e9c15b
|
[
"BSD-3-Clause"
] | 1
|
2020-02-07T19:19:20.000Z
|
2020-02-07T19:19:20.000Z
|
#!/usr/bin/env python3
import csv
import xml.etree.ElementTree as ET
import sys
import os.path
class QuestionData:
final_answer = ""
final_answer_time = 0
first_answer = ""
attempts = 0
first_answer_time = 0
def __init__(self,final_answer,final_answer_time,attempts,first_answer,first_answer_time):
self.final_answer = final_answer
self.final_answer_time = final_answer_time
self.first_answer = first_answer
self.first_answer_time = first_answer_time
self.attempts = attempts
def xml_to_csv(xml_filename):
"""
Parses .xml files generated by newer versions of iClicker software in SessionData
A CSV file will be written to the same path as the XML file, so it is important that any path, be it
absolute or relative, is included in the xml_filename argument. The CSV file is not a perfect replica of
older (i.e. iClicker 6) CSV files, but is our best approximation at this time. It should be enough for
Rainbow Grades to function properly.
"""
csv_filename = xml_filename[:-3] + "csv"
try:
with open(xml_filename,"r") as readfile:
tree = ET.parse(xml_filename)
root = tree.getroot()
questions_in_order = []
start_times = {}
stop_times = {}
user_question_data = {}
for child in root:
if child.tag == "p": # This is a polling tag
question = child.attrib["qn"]
start_times[question] = child.attrib["strt"]
stop_times[question] = child.attrib["stp"]
questions_in_order.append(question)
question_votes = {}
for qchild in child:
if qchild.tag == "v": # This is a voting tag
clicker_id = qchild.attrib["id"]
if clicker_id not in user_question_data:
user_question_data[clicker_id] = {}
user_question_data[clicker_id][question] = {}
if "fans" in qchild.attrib:
user_question_data[clicker_id][question] = QuestionData(qchild.attrib["ans"],
qchild.attrib["fanst"],
qchild.attrib["att"],
qchild.attrib["fans"],
qchild.attrib["tm"])
question_votes[clicker_id] = qchild.attrib["ans"]
with open(csv_filename, 'w') as writefile:
csvwriter = csv.writer(writefile) # Need to change dialect to be iclicker compliant
# Write the header
# Right now we don't have min reply/min correct in XML land, instead we have MinPart_S
next_row = ["Scoring"]
if "perf" in root.attrib:
performance = root.attrib["perf"]
else:
performance = -1
if "part" in root.attrib:
participation = root.attrib["part"]
else:
participation = 1
csvwriter.writerow(["Scoring", "Performance = " + performance,
"Participation = " + participation, "Min Reply = 2",
"Min Correct = 0",
" "])
next_row = ["Question", " ", " "]
for i in range(len(questions_in_order)):
next_row = next_row + ["Question " + str(i + 1), "Score", "Final Answer Time", "Number of Attempts",
"First Response", "Time"]
csvwriter.writerow(next_row)
next_row = ["Start Time", " ", " "]
for question in questions_in_order:
next_row = next_row + [" " + start_times[question], " ", " ", " ", " ", " "]
csvwriter.writerow(next_row)
next_row = ["Stop Time", " ", " "]
first_stop = True
for question in questions_in_order:
if not first_stop:
next_row = next_row + [" " + stop_times[question], " ", " ", " ", " ", " "]
else:
next_row = next_row + [stop_times[question], " ", " ", " ", " ", " "]
first_stop = False
csvwriter.writerow(next_row)
next_row = ["Correct Answer", " ", " "]
first_stop = True
for question in questions_in_order:
if not first_stop:
next_row = next_row + [" ", " ", " ", " ", " ", " "]
else:
next_row = next_row + ["", " ", " ", " ", " ", " "]
first_stop = False
csvwriter.writerow(next_row)
for user in sorted(user_question_data.keys()):
next_row = [user, "", "0"]
for question in questions_in_order:
if question in user_question_data[user]:
qd = user_question_data[user][question]
next_row = next_row + [qd.final_answer, 0, qd.final_answer_time, qd.attempts,
qd.first_answer, qd.first_answer_time]
else:
next_row = next_row + ["", "", "", "", "", ""]
csvwriter.writerow(next_row)
except IOError as e:
print("File I/O error: {}".format(e))
exit(-1)
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Correct usage is {} [file with iclicker {\"file\":...} entries]".format(sys.argv[0]))
exit(-1)
files = []
try:
with open(sys.argv[1]) as json_file:
for line in json_file:
# Extract just the filenames of the session data
files += [x.strip()[1:-1] for x in line.split("[")[1].split("]")[0].split(",")]
except IOError as e:
print("Error reading JSON excerpt: {}".format(e))
for filename in files:
if len(filename) >= 4 and filename[-4:] == ".xml":
xml_to_csv(filename)
| 44.554054
| 120
| 0.474826
| 445
| 0.067486
| 0
| 0
| 0
| 0
| 0
| 0
| 1,313
| 0.19912
|
f98449b95d48df636ca504bf4073160f56093406
| 2,255
|
py
|
Python
|
header.py
|
yufernando/inDelphi-app
|
37938f7aaa1630fb80e7568d3d13472eedb76a6d
|
[
"FSFAP"
] | 13
|
2018-11-18T21:53:46.000Z
|
2021-03-01T16:14:21.000Z
|
header.py
|
yufernando/inDelphi-app
|
37938f7aaa1630fb80e7568d3d13472eedb76a6d
|
[
"FSFAP"
] | 2
|
2020-02-11T22:34:41.000Z
|
2020-06-05T18:16:10.000Z
|
header.py
|
yufernando/inDelphi-app
|
37938f7aaa1630fb80e7568d3d13472eedb76a6d
|
[
"FSFAP"
] | 3
|
2018-12-03T05:20:01.000Z
|
2021-07-28T22:33:54.000Z
|
import dash
import dash_core_components as dcc
import dash_html_components as html
divider_text = ' • '
def get_navigation_header(page_nm):
font_size_param = 16
dot_style = dict(
color = 'gray',
fontSize = '%spx' % (font_size_param),
)
default_style = dict(
position = 'relative',
textDecoration = 'none',
textTransform = 'uppercase',
fontFamily = 'sans-serif',
color = 'black',
marginBottom = '2px',
fontSize = '%spx' % (font_size_param),
)
import copy
selected_style = copy.copy(default_style)
selected_style['borderBottom'] = '1px solid'
styles = dict()
for nm in ['single', 'batch', 'gene', 'guide', 'about']:
if page_nm == nm:
styles[nm] = selected_style
else:
styles[nm] = default_style
return html.Div(
[
html.H4(
'inDelphi',
style = dict(
textAlign = 'center',
),
),
html.Div(
[
html.A(
'Single mode',
href = 'single',
style = styles['single'],
className = 'dynamicunderline',
),
html.Span(
divider_text,
),
html.A(
'Batch mode',
href = 'batch',
style = styles['batch'],
className = 'dynamicunderline',
),
html.Span(
divider_text,
),
html.A(
'Gene mode',
href = 'gene',
style = styles['gene'],
className = 'dynamicunderline',
),
html.Span(
divider_text,
),
html.A(
'User guide',
href = 'guide',
style = styles['guide'],
className = 'dynamicunderline',
),
html.Span(
divider_text,
),
html.A(
'About',
href = 'about',
style = styles['about'],
className = 'dynamicunderline',
),
],
style = dict(
marginBottom = 20,
textAlign = 'center',
),
className = 'row',
),
],
)
| 23.010204
| 59
| 0.446563
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 386
| 0.171023
|
f9850dd79a394638d5a5c2e62aadd31fb4c2407b
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/poetry/core/_vendor/packaging/tags.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/poetry/core/_vendor/packaging/tags.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/poetry/core/_vendor/packaging/tags.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/68/e2/05/188e3a14bbe42690f0cbce7c7c576b1dbc9d3d1bb571a2d3908f144cea
| 96
| 96
| 0.895833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
f98574c16f4148423e98de5b6034c5f75d3ac988
| 217
|
py
|
Python
|
toolbox/util/normalize.py
|
Yilin1010/DeleteKnowledge
|
49b7e0e3a2247e482ba8876762719e4adb3074c6
|
[
"Apache-2.0"
] | null | null | null |
toolbox/util/normalize.py
|
Yilin1010/DeleteKnowledge
|
49b7e0e3a2247e482ba8876762719e4adb3074c6
|
[
"Apache-2.0"
] | null | null | null |
toolbox/util/normalize.py
|
Yilin1010/DeleteKnowledge
|
49b7e0e3a2247e482ba8876762719e4adb3074c6
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue May 24 11:14:03 2016
@author: Wajih-PC
"""
import numpy as np
def normalize(x,mu,sigma):
x = np.subtract(x,mu)
x = np.true_divide(x,sigma)
return x
| 19.727273
| 36
| 0.580645
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 90
| 0.414747
|
f9867c0f2c0b8271efb4f277dc9a847054941a6b
| 1,070
|
py
|
Python
|
34_Find_First_and_Last_Position_of_Element_in_Sorted_Array.py
|
yuqingchen/Leetcode
|
6cbcb36e66a10a226ddb0966701e61ce4c2434d4
|
[
"MIT"
] | 1
|
2019-12-12T20:16:08.000Z
|
2019-12-12T20:16:08.000Z
|
34_Find_First_and_Last_Position_of_Element_in_Sorted_Array.py
|
yuqingchen/Leetcode
|
6cbcb36e66a10a226ddb0966701e61ce4c2434d4
|
[
"MIT"
] | null | null | null |
34_Find_First_and_Last_Position_of_Element_in_Sorted_Array.py
|
yuqingchen/Leetcode
|
6cbcb36e66a10a226ddb0966701e61ce4c2434d4
|
[
"MIT"
] | null | null | null |
class Solution:
def searchRange(self, nums: List[int], target: int) -> List[int]:
left, right = self.first(nums, target), self.last(nums, target)
return [left, right]
def last(self, nums, target) :
if not nums :
return -1
left, right = 0, len(nums) -1
while left + 1 < right :
mid = (left + right) // 2
if nums[mid] <= target :
left = mid
else :
right = mid
if nums[right] == target :
return right
if nums[left] == target :
return left
return -1
def first(self, nums, target) :
if not nums :
return -1
left, right = 0, len(nums) -1
while left + 1 < right :
mid = (left + right) // 2
if nums[mid] < target :
left = mid
else :
right = mid
if nums[left] == target :
return left
if nums[right] == target :
return right
return -1
| 29.722222
| 71
| 0.44486
| 1,070
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
f986bf3f2c420c696f6c53ea84f10ad7ccfa26ea
| 1,526
|
py
|
Python
|
demos/pandas/pandas_concat&append.py
|
szj2ys/deal_with_the_tasks_and_challenges
|
94b9f4aad26c7e2ec5a59cf67e9e977bfa3d5221
|
[
"Apache-2.0"
] | null | null | null |
demos/pandas/pandas_concat&append.py
|
szj2ys/deal_with_the_tasks_and_challenges
|
94b9f4aad26c7e2ec5a59cf67e9e977bfa3d5221
|
[
"Apache-2.0"
] | null | null | null |
demos/pandas/pandas_concat&append.py
|
szj2ys/deal_with_the_tasks_and_challenges
|
94b9f4aad26c7e2ec5a59cf67e9e977bfa3d5221
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import numpy as np
#定义资料集
df1 = pd.DataFrame(np.ones((3, 4)) * 0, columns=['a', 'b', 'c', 'd'])
df2 = pd.DataFrame(np.ones((3, 4)) * 1, columns=['a', 'b', 'c', 'd'])
df3 = pd.DataFrame(np.ones((3, 4)) * 2, columns=['a', 'b', 'c', 'd'])
#concat纵向合并 axis=0纵向,axis=1横向
res = pd.concat([df1, df2, df3], axis=1)
#打印结果
print(res)
#将index_ignore设定为True,序号升序显示(index从0-8)
res = pd.concat([df1, df2, df3], axis=0, ignore_index=True)
#打印结果
print(res)
#定义资料集
df1 = pd.DataFrame(np.ones((3, 4)) * 0,
columns=['a', 'b', 'c', 'd'],
index=[1, 2, 3])
df2 = pd.DataFrame(np.ones((3, 4)) * 1,
columns=['b', 'c', 'd', 'e'],
index=[2, 3, 4])
#纵向"外"合并df1与df2,默认join为outer,即相同的column才合并,不同则NaN
res = pd.concat([df1, df2], axis=0, join='outer')
#打印结果
print(res)
#纵向"内"合并df1与df2,当join为inner时column不同的数据将会被丢弃
res = pd.concat([df1, df2], axis=0, join='inner')
#打印结果
print(res)
#定义资料集
df1 = pd.DataFrame(np.ones((3, 4)) * 0, columns=['a', 'b', 'c', 'd'])
df2 = pd.DataFrame(np.ones((3, 4)) * 1, columns=['a', 'b', 'c', 'd'])
df3 = pd.DataFrame(np.ones((3, 4)) * 1, columns=['a', 'b', 'c', 'd'])
s1 = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
#将df2合并到df1的下面,以及重置index,并打印出结果
res = df1.append(df2, ignore_index=True) #注意append仅能纵向合并
print(res)
#合并多个df,将df2与df3合并至df1的下面,以及重置index,并打印出结果
res = df1.append([df2, df3], ignore_index=True)
print(res)
#合并series,将s1合并至df1,以及重置index,并打印出结果
res = df1.append(s1, ignore_index=True)
print(res)
| 26.310345
| 69
| 0.593054
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 773
| 0.416936
|
f9873b3ec6305739faa020963cb0f6929823dc6d
| 799
|
py
|
Python
|
e/mail-relay/web/apps/core/migrations/0051_customersetting_transfer_max_size.py
|
zhouli121018/nodejsgm
|
0ccbc8acf61badc812f684dd39253d55c99f08eb
|
[
"MIT"
] | null | null | null |
e/mail-relay/web/apps/core/migrations/0051_customersetting_transfer_max_size.py
|
zhouli121018/nodejsgm
|
0ccbc8acf61badc812f684dd39253d55c99f08eb
|
[
"MIT"
] | 18
|
2020-06-05T18:17:40.000Z
|
2022-03-11T23:25:21.000Z
|
e/mail-relay/web/apps/core/migrations/0051_customersetting_transfer_max_size.py
|
zhouli121018/nodejsgm
|
0ccbc8acf61badc812f684dd39253d55c99f08eb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0050_customersetting_can_view_mail'),
]
operations = [
migrations.AddField(
model_name='customersetting',
name='transfer_max_size',
field=models.IntegerField(default=0, help_text='\u5355\u4f4d\uff1aM\uff0c\u90ae\u4ef6\u5927\u5c0f\u8d85\u8fc7\u8be5\u9600\u503c\uff0c\u5219\u8be5\u90ae\u4ef6\u53d1\u9001\u65f6\u81ea\u52a8\u8f6c\u7f51\u7edc\u9644\u4ef6, \u9ed8\u8ba4\u503c\uff10, \u8868\u793a\u7528\u7cfb\u7edf\u9ed8\u8ba4\u8bbe\u7f6e\u503c', verbose_name='\u4e2d\u7ee7\uff1a\u81ea\u52a8\u8f6c\u7f51\u7edc\u9644\u4ef6\u6700\u5927\u9600\u503c'),
),
]
| 39.95
| 421
| 0.713392
| 690
| 0.863579
| 0
| 0
| 0
| 0
| 0
| 0
| 446
| 0.558198
|
f9873ed126ec1fdd07f8e281595ede0f1e5dfbf6
| 4,128
|
py
|
Python
|
python-django/oauth2demo/oauth/core/oauthclient.py
|
SequencingDOTcom/oAuth2-demo
|
609bd138cff07643a0c3e1df48f4f2e4adc9be34
|
[
"MIT"
] | 1
|
2020-11-05T22:16:37.000Z
|
2020-11-05T22:16:37.000Z
|
python-django/oauth2demo/oauth/core/oauthclient.py
|
SequencingDOTcom/oAuth2-demo
|
609bd138cff07643a0c3e1df48f4f2e4adc9be34
|
[
"MIT"
] | 3
|
2018-02-24T15:01:20.000Z
|
2021-11-29T17:29:02.000Z
|
python-django/oauth2demo/oauth/core/oauthclient.py
|
SequencingDOTcom/oAuth2-demo
|
609bd138cff07643a0c3e1df48f4f2e4adc9be34
|
[
"MIT"
] | 3
|
2017-04-06T01:38:20.000Z
|
2017-05-17T09:44:35.000Z
|
import urllib
import sched
import time
from threading import Thread
from token import Token
from ..utils.http import do_basic_secure_post
from ..exceptions.exceptions import BasicAuthenticationFailedException
class DefaultSequencingOAuth2Client(object):
# Attribute for value of redirect url
ATTR_REDIRECT_URL = "redirect_uri"
# Attribute for value of response type
ATTR_RESPONSE_TYPE = "response_type"
# Attribute for value state
ATTR_STATE = "state"
# Attribute for value client id
ATTR_CLIENT_ID = "client_id"
# Attribute for value scope
ATTR_SCOPE = "scope"
# Attribute for value code
ATTR_CODE = "code"
# Attribute for value refresh token
ATTR_REFRESH_TOKEN = "refresh_token"
# Attribute for access token
ATTR_ACCESS_TOKEN = "access_token"
# Attribute for value grant type
ATTR_GRANT_TYPE = "grant_type"
# Attribute for value expires in
ATTR_EXPIRES_IN = "expires_in"
def __init__(self, auth_parameters):
self.auth_parameters = auth_parameters
self.token = None
self._token_refresher = None
def http_redirect_parameters(self):
attributes = {
self.ATTR_REDIRECT_URL: self.auth_parameters.redirect_uri,
self.ATTR_RESPONSE_TYPE: self.auth_parameters.response_type,
self.ATTR_STATE: self.auth_parameters.state,
self.ATTR_CLIENT_ID: self.auth_parameters.client_id,
self.ATTR_SCOPE: self.auth_parameters.scope
}
return attributes
def login_redirect_url(self):
params = urllib.urlencode(self.http_redirect_parameters())
return '%s?%s' % (self.auth_parameters.oauth_authorization_uri, params)
def authorize(self, response_code, response_state):
if response_state != self.auth_parameters.state:
raise ValueError("Invalid state parameter")
uri = self.auth_parameters.oauth_token_uri
params = {
self.ATTR_GRANT_TYPE: self.auth_parameters.grant_type,
self.ATTR_CODE: response_code,
self.ATTR_REDIRECT_URL: self.auth_parameters.redirect_uri
}
result = do_basic_secure_post(uri, self.auth_parameters, params)
if result is None:
raise BasicAuthenticationFailedException("Failure authentication.")
access_token = result[self.ATTR_ACCESS_TOKEN]
refresh_token = result[self.ATTR_REFRESH_TOKEN]
timelife = int(result[self.ATTR_EXPIRES_IN])
self.token = Token(access_token, refresh_token, timelife)
self._token_refresher = self.__TokenRefresher(self, timelife - 60)
self._token_refresher.start()
return self.token
def is_authorized(self):
return (self.token is not None) and (self.token.lifetime != 0)
def _refresh_token(self):
uri = self.auth_parameters.oauth_token_refresh_uri
params = {
self.ATTR_GRANT_TYPE: self.auth_parameters.grant_type_refresh_token,
self.ATTR_REFRESH_TOKEN: self.token.refresh_token
}
result = do_basic_secure_post(uri, self.auth_parameters, params)
if result is None:
raise BasicAuthenticationFailedException("Authentication against backend failed. " +
"Server replied with: " + result)
access_token = result[self.ATTR_ACCESS_TOKEN]
refresh_token = self.token.refresh_token
timelife = result[self.ATTR_EXPIRES_IN]
self.token = Token(access_token, refresh_token, timelife)
class __TokenRefresher(Thread):
def __init__(self, outer, frequency):
Thread.__init__(self)
self.outer = outer
self.frequency = frequency
self.scheduler = sched.scheduler(time.time, time.sleep)
def run(self):
self.scheduler.enter(self.frequency, 1, self.__run_refresh_token, ())
self.scheduler.run()
def __run_refresh_token(self):
self.outer._refresh_token()
self.scheduler.enter(self.frequency, 1, self.__run_refresh_token, ())
| 34.115702
| 96
| 0.678537
| 3,915
| 0.948401
| 0
| 0
| 0
| 0
| 0
| 0
| 548
| 0.132752
|
f987b372c8da570186369c27352bfdc8a2dc0b25
| 1,019
|
py
|
Python
|
commands/elastic/utils.py
|
surfedushare/search-portal
|
f5486d6b07b7b04a46ce707cee5174db4f8da222
|
[
"MIT"
] | 2
|
2021-08-19T09:40:59.000Z
|
2021-12-14T11:08:20.000Z
|
commands/elastic/utils.py
|
surfedushare/search-portal
|
708a0d05eee13c696ca9abd7e84ab620d3900fbe
|
[
"MIT"
] | 159
|
2020-05-14T14:17:34.000Z
|
2022-03-23T10:28:13.000Z
|
commands/elastic/utils.py
|
nppo/search-portal
|
aedf21e334f178c049f9d6cf37cafd6efc07bc0d
|
[
"MIT"
] | 1
|
2021-11-11T13:37:22.000Z
|
2021-11-11T13:37:22.000Z
|
from elasticsearch import Elasticsearch, RequestsHttpConnection
from requests_aws4auth import AWS4Auth
import boto3
def get_es_client(conn, silent=False):
"""
Returns the elasticsearch client connected through port forwarding settings
"""
elastic_url = "https://localhost:9222"
protocol_config = {
"scheme": "https",
"port": 9222,
"use_ssl": True,
"verify_certs": False,
}
credentials = boto3.Session(profile_name=conn.aws.profile_name).get_credentials()
http_auth = AWS4Auth(credentials.access_key, credentials.secret_key, "eu-central-1", "es",
session_token=credentials.token)
es_client = Elasticsearch(
[elastic_url],
http_auth=http_auth,
connection_class=RequestsHttpConnection,
**protocol_config
)
# test if it works
if not silent and not es_client.cat.health(request_timeout=30):
raise ValueError('Credentials do not work for Elastic search')
return es_client
| 31.84375
| 94
| 0.684004
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 239
| 0.234544
|
f98a6174a8d5b6ced1433ccb3968837fdf52d7af
| 7,135
|
py
|
Python
|
tests/test_signals.py
|
sulfadimetoxin/oarepo-taxonomies
|
b8385173614aa711a5e316c8fc24ac065b48aa3d
|
[
"MIT"
] | null | null | null |
tests/test_signals.py
|
sulfadimetoxin/oarepo-taxonomies
|
b8385173614aa711a5e316c8fc24ac065b48aa3d
|
[
"MIT"
] | 9
|
2020-08-24T08:49:15.000Z
|
2021-08-05T16:45:23.000Z
|
tests/test_signals.py
|
sulfadimetoxin/oarepo-taxonomies
|
b8385173614aa711a5e316c8fc24ac065b48aa3d
|
[
"MIT"
] | 1
|
2020-08-20T18:39:43.000Z
|
2020-08-20T18:39:43.000Z
|
from pprint import pprint
import pytest
from flask_taxonomies.proxies import current_flask_taxonomies
from flask_taxonomies.term_identification import TermIdentification
from invenio_records import Record
from oarepo_taxonomies.exceptions import DeleteAbortedError
from oarepo_taxonomies.signals import lock_term
from oarepo_taxonomies.tasks import unlock_term
def test_taxonomy_delete(app, db, taxonomy_tree, test_record):
taxonomies = current_flask_taxonomies.list_taxonomies(session=None).all()
assert len(taxonomies) == 1
with pytest.raises(DeleteAbortedError):
current_flask_taxonomies.delete_taxonomy(taxonomies[0])
taxonomies = current_flask_taxonomies.list_taxonomies(session=None).all()
assert len(taxonomies) == 1
def test_taxonomy_delete_2(app, db, taxonomy_tree):
taxonomies = current_flask_taxonomies.list_taxonomies(session=None).all()
assert len(taxonomies) == 1
current_flask_taxonomies.delete_taxonomy(taxonomies[0])
taxonomies = current_flask_taxonomies.list_taxonomies(session=None).all()
assert len(taxonomies) == 0
def test_taxonomy_term_delete(app, db, taxonomy_tree):
taxonomy = current_flask_taxonomies.get_taxonomy("test_taxonomy")
terms = current_flask_taxonomies.list_taxonomy(taxonomy).all()
term = terms[1]
ti = TermIdentification(term=term)
current_flask_taxonomies.delete_term(ti)
def test_taxonomy_term_delete_2(app, db, taxonomy_tree, test_record):
taxonomy = current_flask_taxonomies.get_taxonomy("test_taxonomy")
terms = current_flask_taxonomies.list_taxonomy(taxonomy).all()
term = terms[1]
ti = TermIdentification(term=term)
with pytest.raises(DeleteAbortedError):
current_flask_taxonomies.delete_term(ti)
def test_taxonomy_term_moved(app, db, taxonomy_tree, test_record):
taxonomy = current_flask_taxonomies.get_taxonomy("test_taxonomy")
terms = current_flask_taxonomies.list_taxonomy(taxonomy).all()
old_record = Record.get_record(id_=test_record.id)
old_taxonomy = old_record["taxonomy"]
assert old_taxonomy == [{
'is_ancestor': True,
'level': 1,
'links': {
'self': 'http://127.0.0.1:5000/2.0/taxonomies/test_taxonomy/a'
},
'test': 'extra_data'
},
{
'is_ancestor': True,
'level': 2,
'links': {
'parent':
'http://127.0.0.1:5000/2.0/taxonomies/test_taxonomy/a',
'self': 'http://127.0.0.1:5000/2.0/taxonomies/test_taxonomy/a/b'
},
'test': 'extra_data'
},
{
'is_ancestor': False,
'level': 3,
'links': {
'parent':
'http://127.0.0.1:5000/2.0/taxonomies/test_taxonomy/a/b',
'self':
'http://127.0.0.1:5000/2.0/taxonomies/test_taxonomy/a/b/c'
},
'test': 'extra_data'
}]
ti = TermIdentification(term=terms[2])
current_flask_taxonomies.move_term(ti, new_parent=terms[0], remove_after_delete=False)
db.session.commit()
new_record = Record.get_record(id_=test_record.id)
new_taxonomy = new_record["taxonomy"]
new_terms = current_flask_taxonomies.list_taxonomy(taxonomy).all()
assert new_terms[-1].parent_id == 1
# assert new_taxonomy == [{
# 'is_ancestor': True,
# 'links': {
# 'self': 'http://127.0.0.1:5000/2.0/taxonomies/test_taxonomy/a'
# },
# 'test': 'extra_data'
# },
# {
# 'is_ancestor': False,
# 'links': {
# 'parent': 'http://127.0.0.1:5000/2.0/taxonomies/test_taxonomy/a',
# 'self': 'http://127.0.0.1:5000/2.0/taxonomies/test_taxonomy/a/c'
# },
# 'test': 'extra_data'
# }]
def test_taxonomy_term_update(app, db, taxonomy_tree, test_record):
taxonomy = current_flask_taxonomies.get_taxonomy("test_taxonomy")
terms = current_flask_taxonomies.list_taxonomy(taxonomy).all()
old_record = Record.get_record(id_=test_record.id)
assert old_record == {
'pid': 1,
'taxonomy': [{
'is_ancestor': True,
'level': 1,
'links': {'self': 'http://127.0.0.1:5000/2.0/taxonomies/test_taxonomy/a'},
'test': 'extra_data'
},
{
'is_ancestor': True,
'level': 2,
'links': {
'parent': 'http://127.0.0.1:5000/2.0/taxonomies/test_taxonomy/a',
'self': 'http://127.0.0.1:5000/2.0/taxonomies/test_taxonomy/a/b'
},
'test': 'extra_data'
},
{
'is_ancestor': False,
'level': 3,
'links': {
'parent': 'http://127.0.0.1:5000/2.0/taxonomies/test_taxonomy/a/b',
'self': 'http://127.0.0.1:5000/2.0/taxonomies/test_taxonomy/a/b/c'
},
'test': 'extra_data'
}],
'title': 'record 1'
}
term = terms[-1]
current_flask_taxonomies.update_term(term, extra_data={"new_data": "changed extra data"})
new_record = Record.get_record(id_=test_record.id)
assert new_record == {
'pid': 1,
'taxonomy': [{
'is_ancestor': True,
'level': 1,
'links': {'self': 'http://127.0.0.1:5000/2.0/taxonomies/test_taxonomy/a'},
'test': 'extra_data'
},
{
'is_ancestor': True,
'level': 2,
'links': {
'parent': 'http://127.0.0.1:5000/2.0/taxonomies/test_taxonomy/a',
'self': 'http://127.0.0.1:5000/2.0/taxonomies/test_taxonomy/a/b'
},
'test': 'extra_data'
},
{
'is_ancestor': False,
'level': 3,
'links': {
'parent': 'http://127.0.0.1:5000/2.0/taxonomies/test_taxonomy/a/b',
'self': 'http://127.0.0.1:5000/2.0/taxonomies/test_taxonomy/a/b/c'
},
'new_data': 'changed extra data',
'test': 'extra_data'
}],
'title': 'record 1'
}
def test_lock_unlock_term(app, db, taxonomy_tree):
term_identification = TermIdentification(taxonomy="test_taxonomy", slug="a/b/c")
term = list(current_flask_taxonomies.filter_term(
term_identification))[0]
lock_term(locked_terms=[term.id], term=term)
db.session.commit()
term = list(current_flask_taxonomies.filter_term(
term_identification))[0]
assert term.busy_count == 1
unlock_term(url=term.links().envelope["self"])
term = list(current_flask_taxonomies.filter_term(
term_identification))[0]
assert term.busy_count == 0
| 38.777174
| 99
| 0.568605
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,017
| 0.282691
|
f98c3635dfd0d3ae569222c031b018e24dab8ea9
| 1,322
|
py
|
Python
|
sendemail/views.py
|
sami-sinnari/MilestoneProject4
|
4a66f5cd5e44e9ff4dbaeeb3e8733c0e2db6629e
|
[
"W3C",
"PostgreSQL"
] | null | null | null |
sendemail/views.py
|
sami-sinnari/MilestoneProject4
|
4a66f5cd5e44e9ff4dbaeeb3e8733c0e2db6629e
|
[
"W3C",
"PostgreSQL"
] | null | null | null |
sendemail/views.py
|
sami-sinnari/MilestoneProject4
|
4a66f5cd5e44e9ff4dbaeeb3e8733c0e2db6629e
|
[
"W3C",
"PostgreSQL"
] | 1
|
2021-08-31T03:29:02.000Z
|
2021-08-31T03:29:02.000Z
|
from django.core.mail import send_mail, BadHeaderError
from django.http import HttpResponse
from django.shortcuts import render, redirect
from .forms import ContactForm
from profiles.models import UserProfile
def contactView(request):
if request.user.is_authenticated:
try:
profile = UserProfile.objects.get(user=request.user)
form = ContactForm(initial={
'from_email': profile.user.email,
})
except UserProfile.DoesNotExist:
form = ContactForm()
else:
form = ContactForm()
if request.method == 'POST':
form = ContactForm(request.POST)
if form.is_valid():
subject = form.cleaned_data['subject']
from_email = form.cleaned_data['from_email']
message = form.cleaned_data['message']
try:
send_mail(
subject, message, from_email, ['elsinnarisami@gmail.com'])
except BadHeaderError:
return HttpResponse('Invalid header found.')
return redirect('success')
context = {
'contact_page': 'active',
'form': form,
}
return render(request, "sendemail/contact.html", context)
def successView(request):
return render(request, "sendemail/contact_success.html")
| 31.47619
| 78
| 0.621785
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 189
| 0.142965
|
f98cbeedab7f46e8e4601542568092c0c0c15c19
| 592
|
py
|
Python
|
tests/test_accounts.py
|
edgeee/buycoins-python
|
72a3130cf43d0c618e58418b3d8cb7ce73b0f133
|
[
"MIT"
] | 55
|
2021-02-02T22:09:37.000Z
|
2022-02-24T12:17:23.000Z
|
tests/test_accounts.py
|
edgeee/buycoins-python
|
72a3130cf43d0c618e58418b3d8cb7ce73b0f133
|
[
"MIT"
] | 2
|
2021-03-24T20:11:02.000Z
|
2021-04-27T13:13:27.000Z
|
tests/test_accounts.py
|
edgeee/buycoins-python
|
72a3130cf43d0c618e58418b3d8cb7ce73b0f133
|
[
"MIT"
] | 8
|
2021-02-08T17:06:53.000Z
|
2022-02-13T09:38:59.000Z
|
from tests.utils import _mock_gql
create_deposit_response = dict(
createDepositAccount=dict(
accountNumber="123",
accountName="john doe",
accountType="deposit",
bankName="Providus",
accountReference="ref",
)
)
def test_create_deposit():
from buycoins import accounts
_mock_gql(create_deposit_response)
acc = accounts.create_deposit("john doe")
assert type(acc) == accounts.VirtualDepositAccountType
assert acc.account_number == "123"
assert acc.account_reference == "ref"
assert acc.account_name == "john doe"
| 23.68
| 58
| 0.689189
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 69
| 0.116554
|
f98d3f6b60f1e702d811a09d53f295688785eb0a
| 3,755
|
py
|
Python
|
fabfile/s3_copy.py
|
MadeInHaus/django-template
|
73d3bdb85e9b1b2723a1de67d6765ce0647dce3a
|
[
"MIT"
] | 1
|
2015-06-18T22:37:28.000Z
|
2015-06-18T22:37:28.000Z
|
fabfile/s3_copy.py
|
MadeInHaus/django-template
|
73d3bdb85e9b1b2723a1de67d6765ce0647dce3a
|
[
"MIT"
] | 11
|
2015-01-07T16:58:55.000Z
|
2022-01-27T16:22:22.000Z
|
fabfile/s3_copy.py
|
MadeInHaus/django-template
|
73d3bdb85e9b1b2723a1de67d6765ce0647dce3a
|
[
"MIT"
] | null | null | null |
from fabric.decorators import task, roles
from haus_vars import APP_INFO, parse_vars
from fabric.api import run, execute
from fabric.context_managers import cd
from heroku import create_fixture_on_s3, grab_fixture_on_s3
import cStringIO
def copyBucket(srcBucketName, dstBucketName, aws_key, aws_secret, folder_name='uploads'):
from boto.s3.connection import S3Connection
conn = S3Connection(aws_key, aws_secret)
source = conn.get_bucket(srcBucketName);
destination = conn.get_bucket(dstBucketName);
if folder_name:
s3keys = source.list(folder_name)
else:
s3keys = source.list()
for k in s3keys:
print 'Copying ' + k.key + ' from ' + srcBucketName + ' to ' + dstBucketName
destination.copy_key(k.key, srcBucketName, k.key, preserve_acl=True)
def copyBucketDifferentOwners(src_settings, dst_settings, folder_name='uploads'):
from boto.s3.connection import S3Connection
srcBucketName = src_settings['AWS_BUCKET_NAME']
dstBucketName = dst_settings['AWS_BUCKET_NAME']
src_conn = S3Connection(src_settings['AWS_ACCESS_KEY_ID'], src_settings['AWS_SECRET_ACCESS_KEY'])
dst_conn = S3Connection(dst_settings['AWS_ACCESS_KEY_ID'], dst_settings['AWS_SECRET_ACCESS_KEY'])
source = src_conn.get_bucket(srcBucketName);
destination = dst_conn.get_bucket(dstBucketName);
if folder_name:
s3keys = source.list(folder_name)
else:
s3keys = source.list()
for k in s3keys:
print 'Copying ' + k.key + ' from ' + srcBucketName + ' to ' + dstBucketName
f = cStringIO.StringIO()
k.get_contents_to_file(f)
nk = destination.new_key(k.key)
f.seek(0)
nk.set_contents_from_file(f,
policy='public-read',
headers={ 'Content-Type': k.content_type }
)
@task
@roles('vagrant')
def update_uploads(src_env='staging', dst_env='dev', different_owners=False):
"""copies the uploads folder from src_env to dst_env s3 buckets, pass different_owners=True to copy between s3 buckets that belong to different accounts"""
print "UPDATING UPLOADS...."
different_owners = str(different_owners).lower() == 'true'
print "{}".format(APP_INFO)
src_app_env = APP_INFO[src_env]['APP_ENV']
dst_app_env = APP_INFO[dst_env]['APP_ENV']
with(cd('/var/www')):
src_settings = parse_vars(run("APP_ENV={} ./manage.py settings_vars AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_BUCKET_NAME".format(src_app_env)))
dst_settings = parse_vars(run("APP_ENV={} ./manage.py settings_vars AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_BUCKET_NAME".format(dst_app_env)))
src = src_settings['AWS_BUCKET_NAME']
dst = dst_settings['AWS_BUCKET_NAME']
folder = 'uploads'
owners = {
'dev': 1,
'staging': 1,
'production': 1,
}
if different_owners or owners[src_env] != owners[dst_env]:
copyBucketDifferentOwners(src_settings, dst_settings, folder)
else:
copyBucket(src, dst, src_settings['AWS_ACCESS_KEY_ID'], src_settings['AWS_SECRET_ACCESS_KEY'], folder)
@task
@roles('vagrant')
def update_fixture(src_env='staging', dst_env='dev', do_update_uploads=True, *args, **kwargs):
""" updates fixture and downloads it for given src_env (staging by default) copies uploads directory from src_env to dst_env s3 buckets unless update_uploads=False"""
if str(do_update_uploads).lower() == 'false':
do_update_uploads = False
execute(create_fixture_on_s3, env=src_env)
execute(grab_fixture_on_s3, env=src_env)
if do_update_uploads:
execute(update_uploads, src_env=src_env, dst_env=dst_env)
| 40.376344
| 170
| 0.694541
| 0
| 0
| 0
| 0
| 1,853
| 0.493475
| 0
| 0
| 944
| 0.251398
|
f98d78abc4c61ae00ffd3cee5b5299a82b124239
| 505
|
py
|
Python
|
umysqldb/__init__.py
|
arozumenko/pyumysql
|
34b61faf33e2db644b02c483c07ddca32165539a
|
[
"Apache-2.0"
] | null | null | null |
umysqldb/__init__.py
|
arozumenko/pyumysql
|
34b61faf33e2db644b02c483c07ddca32165539a
|
[
"Apache-2.0"
] | null | null | null |
umysqldb/__init__.py
|
arozumenko/pyumysql
|
34b61faf33e2db644b02c483c07ddca32165539a
|
[
"Apache-2.0"
] | null | null | null |
from umysqldb import connections
from umysqldb import cursors
def connect(db, host="localhost", port=3306, user="root", passwd="root",
charset="utf8", cursorclass=cursors.Cursor, autocommit=False):
return connections.Connection(database=db, host=host, port=port, user=user,
passwd=passwd, charset=charset,
cursorclass=cursorclass,
autocommit=autocommit)
Connection = Connect = connect
| 42.083333
| 79
| 0.609901
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 29
| 0.057426
|
f98fe6f86f42085c174f1fcd733ce2400cb3d904
| 2,696
|
py
|
Python
|
system_monitor_script/build_network.py
|
carabri/carabri
|
c8b94080331ab66ee116ee6e87e13e295e4f9604
|
[
"Apache-2.0"
] | null | null | null |
system_monitor_script/build_network.py
|
carabri/carabri
|
c8b94080331ab66ee116ee6e87e13e295e4f9604
|
[
"Apache-2.0"
] | null | null | null |
system_monitor_script/build_network.py
|
carabri/carabri
|
c8b94080331ab66ee116ee6e87e13e295e4f9604
|
[
"Apache-2.0"
] | null | null | null |
import re
import networkx
import itertools
import argparse
import json
def build_network(filename,outfile,mb_threshold):
regex = re.compile('([0-9]+) kB: (.*)(?=\()')
network_list = []
current_network = []
with open(filename, 'r') as memlog:
ignore_line = True
for line in memlog.readlines():
if not ignore_line:
result = regex.search(line)
if (result is not None):
current_network.append((result.group(1), result.group(2),))
if 'Total PSS by process:' in line:
ignore_line = False
if 'Total PSS by OOM adjustment:' in line:
ignore_line = True
if 'SAMPLE_TIME:' in line:
edges = itertools.combinations(current_network,2)
g = networkx.Graph()
g.add_nodes_from(current_network)
g.add_edges_from(edges)
current_network = []
network_list.append(g)
G = networkx.Graph()
for n in network_list:
for i in n.nodes():
if int(i[0]) > mb_threshold: #if it's using more than the memory threshold
if i[1] not in G.nodes():
#include it in the summary graph
G.add_node(i[1])
for j in n.neighbors(i):
if int(j[0]) > mb_threshold:
w = int(i[0])+int(j[0])
if ([i[1]],[j[1]]) in G.edges():
G[i[1]][j[1]]['weight'] += w
elif ([j[1]],[i[1]]) in G.edges():
G[j[1]][i[1]]['weight'] += w
else:
G.add_edge(i[1],j[1],weight=w)
# write result to edge list (CSV-type file)
networkx.write_edgelist(G, outfile, data=['weight'])
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description='Build a network from the given usage log file, then write it to an edge list.')
argparser.add_argument('--filename',type=str,help='provide the memory log file for building the network. Defaults to ./memory.log.sample',default='./memory.log.sample')
argparser.add_argument('--outfile',type=str,help='specify the desired path/name for the output edge list. Defaults to ./example.edgelist',default='./example.edgelist')
argparser.add_argument('--threshold',type=int,help='specify the minimum memory threshold (in MB) of the processes used in the final network. Defaults to 1000',default=1000)
args = argparser.parse_args()
build_network(args.filename,args.outfile,args.threshold)
| 42.125
| 177
| 0.555267
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 690
| 0.255935
|
f98fff79925e8d8be7997e37273c0b32991d86de
| 1,428
|
py
|
Python
|
BigO/insertion_sort.py
|
jeff-lund/CS350
|
7227b53275c62d45e899a531136a96a866c32a16
|
[
"MIT"
] | 2
|
2019-10-16T01:59:42.000Z
|
2019-11-13T19:25:00.000Z
|
BigO/insertion_sort.py
|
jeff-lund/CS350
|
7227b53275c62d45e899a531136a96a866c32a16
|
[
"MIT"
] | null | null | null |
BigO/insertion_sort.py
|
jeff-lund/CS350
|
7227b53275c62d45e899a531136a96a866c32a16
|
[
"MIT"
] | 2
|
2019-10-16T01:59:49.000Z
|
2019-11-15T01:19:18.000Z
|
from sys import argv
from random import randint
from time import time
import matplotlib.pyplot as plt
def insertion_sort(arr):
n = len(arr)
for i in range(1, n):
v = arr[i]
j = i - 1
while j >= 0 and arr[j] > v:
arr[j + 1] = arr[j]
j -= 1
arr[j + 1] = v
if __name__ == '__main__':
if len(argv) > 1:
sz = int(argv[1])
arr = [randint(1, 1000) for _ in range(sz)]
#print(arr)
start = time()
insertion_sort(arr)
end = time()
#print(arr)
print(end - start)
else:
# performs automated testing
x = []
y = []
sizes = [10, 50, 100, 200, 500, 1000, 1200, 1500, 2000, 2500, 3000, 5000, 6000, 7000, 8000, 9000, 10000, 20000, 30000, 40000]
for sz in sizes:
t = 0
print("running size", sz)
for _ in range(10):
arr = [randint(1, 10000) for _ in range(sz)]
start = time()
insertion_sort(arr)
end = time()
t += (end - start) * 1000
x.append(sz)
y.append(t // 10)
# Plot results of tests
plt.plot(x, y)
plt.xlabel("n (size of array)")
plt.ylabel("time (ms)")
plt.show()
#plt.savefig("python_running_times.png", format='png')
| 28.56
| 134
| 0.459384
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 186
| 0.130252
|
f994d4e53b4b2fdca4fdd7080892fff0acd2645e
| 638
|
py
|
Python
|
tasks.py
|
RevolutionTech/revolutiontech.ca
|
a3f0f1526812554938674c4fc9e7ea90ed4ffe6d
|
[
"0BSD"
] | null | null | null |
tasks.py
|
RevolutionTech/revolutiontech.ca
|
a3f0f1526812554938674c4fc9e7ea90ed4ffe6d
|
[
"0BSD"
] | 171
|
2017-11-02T05:39:37.000Z
|
2022-03-07T01:13:53.000Z
|
tasks.py
|
RevolutionTech/carrier-owl
|
f72f47e39ea819681fa7b50de2b52e393edeeb96
|
[
"0BSD"
] | 1
|
2018-01-13T08:11:26.000Z
|
2018-01-13T08:11:26.000Z
|
from invoke import Collection, task
from opstrich.invoke import check, openssl
@task
def deploy(c):
"""
Build and run a Docker container to deploy.
"""
c.run("docker build -t zappa-lambda .")
c.run("docker run -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY zappa-lambda")
c.run(
"DJANGO_CONFIGURATION=ProdCollectStaticConfig poetry run python manage.py collectstatic --noinput"
)
@task
def ci_deploy(c):
"""
Perform pre-deploy steps needed in CI and then deploy.
"""
openssl.decrypt(c, "zappa_settings.json")
deploy(c)
namespace = Collection(check, openssl, deploy, ci_deploy)
| 23.62963
| 106
| 0.694357
| 0
| 0
| 0
| 0
| 493
| 0.772727
| 0
| 0
| 351
| 0.550157
|
f994d8e1e290868506a6fadbde009edec090ad8b
| 2,490
|
py
|
Python
|
bear/scripts/lunar_lander/remove_data.py
|
junmokane/AI602_Project
|
59c132ae04751f9fb6cf6ebb491042cbf4de003d
|
[
"Apache-2.0"
] | 1
|
2020-10-14T05:51:36.000Z
|
2020-10-14T05:51:36.000Z
|
bear/scripts/lunar_lander/remove_data.py
|
junmokane/AI602_Project
|
59c132ae04751f9fb6cf6ebb491042cbf4de003d
|
[
"Apache-2.0"
] | null | null | null |
bear/scripts/lunar_lander/remove_data.py
|
junmokane/AI602_Project
|
59c132ae04751f9fb6cf6ebb491042cbf4de003d
|
[
"Apache-2.0"
] | null | null | null |
import torch
import numpy as np
import copy
def remove(path):
data = torch.load(path)
location_list, action_list = [np.reshape(st[0], (1, 8)) for st in data], [st[1] for st in data]
location_list = np.concatenate(location_list, axis=0)
action_list = np.asarray(action_list)
action_0 = action_list == 0
action_1 = action_list == 1
action_2 = action_list == 2
action_3 = action_list == 3
location_0 = location_list[action_0, :]
location_1 = location_list[action_1, :]
location_2 = location_list[action_2, :]
location_3 = location_list[action_3, :]
action_0 = action_list[action_list == 0]
action_1 = action_list[action_list == 1]
action_2 = action_list[action_list == 2]
action_3 = action_list[action_list == 3]
action_l = copy.deepcopy([action_0, action_1, action_2, action_3])
location_l = copy.deepcopy([location_0, location_1, location_2, location_3])
a_hori, l_hori = [], []
for a, l in zip(action_l, location_l):
a = a[l[:, 0] > 0.1]
l = l[l[:, 0] > 0.1]
a_hori.append(a)
l_hori.append(l)
location_hori = np.concatenate(l_hori, axis=0)
action_hori = np.concatenate(a_hori, axis=0)
print("horizontal : ", location_hori.shape, action_hori.shape)
# Vertical
action_l = copy.deepcopy([action_0, action_1, action_2, action_3])
location_l = copy.deepcopy([location_0, location_1, location_2, location_3])
a_verti, l_verti = [], []
for a, l in zip(action_l, location_l):
a = a[l[:, 1] < 0.8]
l = l[l[:, 1] < 0.8]
a_verti.append(a)
l_verti.append(l)
location_verti = np.concatenate(l_verti, axis=0)
action_verti = np.concatenate(a_verti, axis=0)
print("vertical : ", location_verti.shape, action_verti.shape)
# Save
for i, (a, l) in enumerate(zip(a_verti, l_verti)):
torch.save([l, a], f"/home/seungjae/Desktop/lunarlander/replay_buffer_vertical_{i}.pt")
for i, (a, l) in enumerate(zip(a_hori, l_hori)):
torch.save([l, a], f"/home/seungjae/Desktop/lunarlander/replay_buffer_horizontal_{i}.pt")
# torch.save([location_hori, action_hori], "/home/seungjae/Desktop/lunarlander/replay_buffer_horizontal.pt")
# torch.save([location_verti, action_verti], "/home/seungjae/Desktop/lunarlander/replay_buffer_vertical.pt")
if __name__ == "__main__":
path = "/home/seungjae/Desktop/lunarlander/replay_buffer.pt"
remove(path)
| 35.070423
| 112
| 0.65743
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 459
| 0.184337
|
f9962c0754c5c1d3d6c4db26b056e29e7ec2794f
| 188
|
py
|
Python
|
tests/data/demo-project/demo/c/d.py
|
youngquan/goldenmask
|
5b16eea94df7ddd988fae0c1a4e265b16af9ded2
|
[
"Apache-2.0"
] | 6
|
2020-04-28T18:13:54.000Z
|
2021-12-23T18:26:30.000Z
|
tests/data/demo-project/demo/c/d.py
|
youngquan/goldenmask
|
5b16eea94df7ddd988fae0c1a4e265b16af9ded2
|
[
"Apache-2.0"
] | 1
|
2020-04-28T18:08:46.000Z
|
2020-04-30T12:49:46.000Z
|
tests/data/demo-project/demo/c/d.py
|
youngquan/goldenmask
|
5b16eea94df7ddd988fae0c1a4e265b16af9ded2
|
[
"Apache-2.0"
] | 1
|
2020-08-18T21:03:39.000Z
|
2020-08-18T21:03:39.000Z
|
def e():
print("This is function e in file d!")
class E:
def __init__(self):
self.content = "This is class E in file d"
def print(self):
print(self.content)
| 17.090909
| 50
| 0.585106
| 133
| 0.707447
| 0
| 0
| 0
| 0
| 0
| 0
| 58
| 0.308511
|
f9990629b2219f3dd1a7da2e2230e3d0eb99c9a8
| 2,435
|
py
|
Python
|
flex/http/negotiators.py
|
centergy/flex
|
4fc11d3ad48e4b5016f53256015e3eed2157daae
|
[
"MIT"
] | null | null | null |
flex/http/negotiators.py
|
centergy/flex
|
4fc11d3ad48e4b5016f53256015e3eed2157daae
|
[
"MIT"
] | null | null | null |
flex/http/negotiators.py
|
centergy/flex
|
4fc11d3ad48e4b5016f53256015e3eed2157daae
|
[
"MIT"
] | null | null | null |
"""
Content negotiation deals with selecting an appropriate renderer given the
incoming request. Typically this will be based on the request's Accept header.
"""
import flask as fl
from . import exc
class BaseContentNegotiator(object):
def get_accept_mimetypes(self, request=None):
"""Given the incoming request, return a list of mimetypes this client
supports as :class:`~werkzeug.datastructures.MIMEAccept` object.
"""
return (request or fl.request).accept_mimetypes
def select_renderer(self, renderers, mimetype=None, prefer=None, request=None):
raise NotImplementedError('.select_renderer() must be implemented')
class DefaultContentNegotiator(BaseContentNegotiator):
def _get_precedence(self, accepts, mimetype):
ix = accepts.find(mimetype)
if ix < 0: return 0
v = accepts[ix][0]
if '/' not in v: return 0
vtype, vsubtype = v == '*' and ('*', '*') or v.split('/', 1)
vtype, vsubtype = vtype.strip(), vsubtype.strip()
if not vtype or not vsubtype or (vtype == '*' and vsubtype != '*'):
return 0
elif vtype == '*':
return 1
elif vsubtype == '*':
return 2
else:
return 3
def _filter_renderers(self, accepts, renderers):
best_quality = -1
for r in renderers:
q = accepts.quality(r.mimetype)
if q > 0 and q >= best_quality:
p = self._get_precedence(accepts, r.mimetype)
best_quality = q
yield r, q, p
def best_matches(self, accepts, renderers, limit=None):
rv = []
best_quality = -1
renderers = self._filter_renderers(accepts, renderers)
for renderer, quality, precedence in sorted(renderers, key=lambda i: i[1:], reverse=True):
if quality < best_quality or (limit and len(rv) >= limit):
break
best_quality = quality
rv.append((renderer, renderer.mimetype))
return rv
def select_renderer(self, renderers, mimetype=None, prefer=None, request=None):
"""Given the incoming request and a list of renderers, return a
two-tuple of: (renderer, mimetype).
"""
accepts = self.get_accept_mimetypes(request)
if mimetype:
if accepts.quality(mimetype) > 0:
for renderer in renderers:
if renderer.mimetype == mimetype:
return renderer, mimetype
raise exc.NotAcceptable()
renderers = self.best_matches(accepts, renderers)
if renderers:
if prefer:
for renderer, mimetype in renderers:
if mimetype == prefer:
return renderer, mimetype
return renderers[0]
raise exc.NotAcceptable()
| 27.988506
| 92
| 0.70308
| 2,227
| 0.914579
| 249
| 0.102259
| 0
| 0
| 0
| 0
| 478
| 0.196304
|
f999e5eac5ca0101fdb8157a0b22f62d11306d51
| 403
|
py
|
Python
|
accounts/migrations/0005_addtocart_cart_useremail.py
|
noelsj007/ecommerceweb
|
e00edfe9110d2cc54deebd97043e0aa152c8afd4
|
[
"Unlicense"
] | null | null | null |
accounts/migrations/0005_addtocart_cart_useremail.py
|
noelsj007/ecommerceweb
|
e00edfe9110d2cc54deebd97043e0aa152c8afd4
|
[
"Unlicense"
] | null | null | null |
accounts/migrations/0005_addtocart_cart_useremail.py
|
noelsj007/ecommerceweb
|
e00edfe9110d2cc54deebd97043e0aa152c8afd4
|
[
"Unlicense"
] | null | null | null |
# Generated by Django 3.0.8 on 2020-08-11 14:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_addtocart'),
]
operations = [
migrations.AddField(
model_name='addtocart',
name='cart_useremail',
field=models.EmailField(default=None, max_length=254),
),
]
| 21.210526
| 66
| 0.60794
| 310
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 100
| 0.248139
|
f99b06a71dceb4f5f766c0212d0a17f6f162c70a
| 539
|
py
|
Python
|
uva_answers/10018/main.py
|
andriisoldatenko/fan
|
e7ed6ea0f39bd71af4e286af8d81ebc137ae8ff4
|
[
"MIT"
] | 6
|
2018-11-18T15:00:02.000Z
|
2022-03-23T21:32:24.000Z
|
uva_answers/10018/main.py
|
andriisoldatenko/leetcode
|
8fef4da00234f8acbea9b71ee730b2267b70395f
|
[
"MIT"
] | null | null | null |
uva_answers/10018/main.py
|
andriisoldatenko/leetcode
|
8fef4da00234f8acbea9b71ee730b2267b70395f
|
[
"MIT"
] | null | null | null |
import pprint
import sys
import re
FILE = sys.stdin
#FILE = open('sample.in')
def is_palindrome(n):
k = str(n)
return list(k) == list(reversed(k))
def reverse_add(n):
return n + int(str(n)[::-1])
#import ipdb;ipdb.set_trace()
test_cases = range(int(FILE.readline()))
#import ipdb; ipdb.set_trace()
for tc in test_cases:
n = int(FILE.readline().strip())
total_sum_count = 1
n = reverse_add(n)
while not is_palindrome(n):
n = reverse_add(n)
total_sum_count += 1
print(total_sum_count, n)
| 20.730769
| 40
| 0.64564
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 85
| 0.157699
|
f99c7dc6ce29a2ffb9fe2abe6759971a3c9cf033
| 214
|
py
|
Python
|
cogkge/data/processor/fb15k237processor.py
|
jinzhuoran/CogKGE
|
b0e819a1d34cf61a7d70c33808da3377b73c8fd6
|
[
"MIT"
] | 18
|
2022-01-22T09:52:57.000Z
|
2022-03-22T15:02:12.000Z
|
cogkge/data/processor/fb15k237processor.py
|
CogNLP/CogKGE
|
70d851d6489600c1e90eb25b0388a3ceba2f078c
|
[
"MIT"
] | null | null | null |
cogkge/data/processor/fb15k237processor.py
|
CogNLP/CogKGE
|
70d851d6489600c1e90eb25b0388a3ceba2f078c
|
[
"MIT"
] | null | null | null |
from .baseprocessor import BaseProcessor
class FB15K237Processor(BaseProcessor):
def __init__(self, node_lut, relation_lut, reprocess):
super().__init__("FB15K237", node_lut, relation_lut, reprocess)
| 30.571429
| 71
| 0.771028
| 170
| 0.794393
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.046729
|
f99d843d5eb26f48db07153b199bcf310e6ecade
| 1,256
|
pyw
|
Python
|
python/discord.pyw
|
hentai-burner/mastur-beta
|
2893a4ef3ee4aae5a9a78141badae4e6544fb071
|
[
"MIT"
] | null | null | null |
python/discord.pyw
|
hentai-burner/mastur-beta
|
2893a4ef3ee4aae5a9a78141badae4e6544fb071
|
[
"MIT"
] | null | null | null |
python/discord.pyw
|
hentai-burner/mastur-beta
|
2893a4ef3ee4aae5a9a78141badae4e6544fb071
|
[
"MIT"
] | null | null | null |
import time
import os
import pathlib
import sys
from subprocess import call
#TODO: Maybe make a pip_handler file idk
def pip_install(packageName: str):
try:
call(f'py -m pip install {packageName}')
except:
call(f'pip install {packageName}')
try:
from pypresence import presence
except:
pip_install("pypresence")
STATUS_TEXT = str(sys.argv)
IMGID_CONSTS = ['furcock_img', 'blacked_img', 'censored_img', 'goon_img',
'goon2_img', 'hypno_img', 'futa_img', 'healslut_img', 'gross_img']
if not STATUS_TEXT == '':
try:
#if has file, tries to split at newline break
# uses first line as the string for text description
# uses second line as the image id for requesting image from discord api
ls = STATUS_TEXT.split('\n')
STATUS_TEXT[0] = ls[0]
if ls[1] in IMGID_CONSTS:
STATUS_TEXT[1] = ls[1]
except:
print('failed line split') #tweak this
def do_discord():
# conn = presence.Presence('') #TODO: Make Discord API go brrr
# conn.connect()
# conn.update(state=textObj[0], large_image=textObj[1], start=int(time.time()))
while True:
time.sleep(15)
do_discord()
| 27.304348
| 84
| 0.621815
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 595
| 0.473726
|
f99fbafb536fdd0dcee45c41f8a1a58e47ef7f46
| 1,086
|
py
|
Python
|
2dtree/bin/bpy_validation.py
|
LeanderSilur/Snippets
|
3ff718f91439450bd5aff13342aa98a9d4957e85
|
[
"MIT"
] | 9
|
2020-02-04T05:41:09.000Z
|
2022-03-08T06:14:54.000Z
|
2dtree/bin/bpy_validation.py
|
LeanderSilur/Snippets
|
3ff718f91439450bd5aff13342aa98a9d4957e85
|
[
"MIT"
] | 2
|
2020-06-14T19:58:01.000Z
|
2021-07-04T14:21:33.000Z
|
2dtree/bin/bpy_validation.py
|
LeanderSilur/Snippets
|
3ff718f91439450bd5aff13342aa98a9d4957e85
|
[
"MIT"
] | 2
|
2020-07-29T19:54:44.000Z
|
2020-07-29T20:00:24.000Z
|
import bpy
import subprocess
REBUILD = 0
if REBUILD:
subprocess.call([
"g++",
bpy.path.abspath('//../main.cpp'),
bpy.path.abspath('//../PtTree.cpp'),
"-o",
bpy.path.abspath('//PtTree')
])
# Collect the input data.
verts = bpy.data.meshes['PointCloud'].vertices
query_amount = 5
query_obj = bpy.data.objects['Search']
query_pos = query_obj.location
query_radius = query_obj.dimensions[0] / 2
points = [str(v.co.x) + ',' + str(v.co.y) for v in verts]
args = [
bpy.path.abspath('//PtTree.exe'),
str(query_amount),
str(query_radius),
str(query_pos.x) + ',' + str(query_pos.y),
*points
]
# Make the call.
proc = subprocess.run(args, encoding='utf-8', stdout=subprocess.PIPE)
stdout = proc.stdout.split('\n')
[print(line) for line in stdout]
ids = [int(line.split(" ")[0]) for line in stdout]
# Visualize the output.
bpy.ops.object.mode_set(mode="OBJECT")
for i in range(len(verts)):
verts[i].select = False
if i in ids:
verts[i].select = True
bpy.ops.object.mode_set(mode="EDIT")
| 22.163265
| 69
| 0.621547
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 183
| 0.168508
|
f9a216fdaa67e5fc6913f5aa98c379d5a25e120e
| 10,274
|
py
|
Python
|
zgrab2_schemas/zgrab2/ssh.py
|
aspacewalz/zgrab2
|
d9ed4f141dae102d65ba1e08bf2eb4179678d172
|
[
"Apache-2.0"
] | 1,031
|
2016-11-29T15:12:05.000Z
|
2022-03-31T05:02:50.000Z
|
zgrab2_schemas/zgrab2/ssh.py
|
vl4deee11/zgrab2
|
c859e9ef1173955dadae88416289ef8cc8910495
|
[
"Apache-2.0"
] | 191
|
2017-07-24T17:27:57.000Z
|
2022-03-16T04:59:59.000Z
|
zgrab2_schemas/zgrab2/ssh.py
|
vl4deee11/zgrab2
|
c859e9ef1173955dadae88416289ef8cc8910495
|
[
"Apache-2.0"
] | 230
|
2017-11-14T07:25:57.000Z
|
2022-03-31T04:20:46.000Z
|
# zschema sub-schema for zgrab2's ssh module (modules/ssh.go)
# Registers zgrab2-ssh globally, and ssh with the main zgrab2 schema.
from zschema.leaves import *
from zschema.compounds import *
import zschema.registry
import zcrypto_schemas.zcrypto as zcrypto
from . import zgrab2
# NOTE: Despite the fact that we have e.g. "supportedHostKeyAlgos",
# "allSupportedCiphers", etc, including a different value is not syntactically
# incorrect...so all of the following algorithm identifiers are Strings with
# examples=[...], rather tha Enums with values=[...].
# lib/ssh/common.go -- allSupportedKexAlgos
KexAlgorithm = String.with_args(
doc="An ssh key exchange algorithm identifier, named according to section 6 of https://www.ietf.org/rfc/rfc4251.txt; see https://www.iana.org/assignments/ssh-parameters/ssh-parameters.xhtml#ssh-parameters-15 for standard values.",
examples=[
"diffie-hellman-group1-sha1",
"diffie-hellman-group14-sha1",
"ecdh-sha2-nistp256",
"ecdh-sha2-nistp384",
"ecdh-sha2-nistp521",
"curve25519-sha256@libssh.org",
"diffie-hellman-group-exchange-sha1",
"diffie-hellman-group-exchange-sha256",
]
)
KexAlgorithms = ListOf.with_args(KexAlgorithm())
# Defined in lib/ssh/common.go -- supportedHostKeyAlgos, though they are
# generated via PublicKey.Type()
KeyAlgorithm = String.with_args(
doc="An ssh public key algorithm identifier, named according to section 6 of https://www.ietf.org/rfc/rfc4251.txt; see https://www.iana.org/assignments/ssh-parameters/ssh-parameters.xhtml#ssh-parameters-19 for standard values.",
examples=[
"ssh-rsa-cert-v01@openssh.com",
"ssh-dss-cert-v01@openssh.com",
"ecdsa-sha2-nistp256-cert-v01@openssh.com",
"ecdsa-sha2-nistp384-cert-v01@openssh.com",
"ecdsa-sha2-nistp521-cert-v01@openssh.com",
"ssh-ed25519-cert-v01@openssh.com",
"ssh-rsa",
"ssh-dss",
"ecdsa-sha2-nistp256",
"ecdsa-sha2-nistp384",
"ecdsa-sha2-nistp521",
"ssh-ed25519",
]
)
KeyAlgorithms = ListOf.with_args(KeyAlgorithm())
# From lib/ssh/common.go -- allSupportedCiphers
CipherAlgorithm = String.with_args(
doc="An ssh cipher algorithm identifier, named according to section 6 of https://www.ietf.org/rfc/rfc4251.txt; see https://www.iana.org/assignments/ssh-parameters/ssh-parameters.xhtml#ssh-parameters-16 for standard values.",
examples=[
"aes128-ctr", "aes192-ctr", "aes256-ctr", "aes128-gcm@openssh.com",
"aes128-cbc", "3des-cbc", "arcfour256", "arcfour128", "arcfour",
]
)
CipherAlgorithms = ListOf.with_args(CipherAlgorithm())
# From lib/ssh/common.go -- supportedMACs.
MACAlgorithm = String.with_args(
doc="An ssh MAC algorithm identifier, named according to section 6 of https://www.ietf.org/rfc/rfc4251.txt; see https://www.iana.org/assignments/ssh-parameters/ssh-parameters.xhtml#ssh-parameters-18 for standard values.",
examples=["hmac-sha2-256", "hmac-sha1", "hmac-sha1-96"]
)
MACAlgorithms = ListOf.with_args(MACAlgorithm())
# From lib/ssh/common.go -- supportedCompressions
CompressionAlgorithm = String.with_args(
doc="An ssh compression algorithm identifier, named according to section 6 of https://www.ietf.org/rfc/rfc4251.txt; see https://www.iana.org/assignments/ssh-parameters/ssh-parameters.xhtml#ssh-parameters-20 for standard values.",
examples=["none", "zlib"]
)
CompressionAlgorithms = ListOf.with_args(CompressionAlgorithm())
LanguageTag = String.with_args(doc="A language tag, as defined in https://www.ietf.org/rfc/rfc3066.txt.")
LanguageTags = ListOf.with_args(LanguageTag(), doc="A name-list of language tags in order of preference.")
# zgrab2/lib/ssh/messages.go: (Json)kexInitMsg
KexInitMessage = SubRecordType({
"cookie": Binary(),
"kex_algorithms": KexAlgorithms(doc="Key exchange algorithms used in the handshake."),
"host_key_algorithms": KeyAlgorithms(doc="Asymmetric key algorithms for the host key supported by the client."),
"client_to_server_ciphers": CipherAlgorithms(),
"server_to_client_ciphers": CipherAlgorithms(),
"client_to_server_macs": MACAlgorithms(),
"server_to_client_macs": MACAlgorithms(),
"client_to_server_compression": CompressionAlgorithms(),
"server_to_client_compression": CompressionAlgorithms(),
"client_to_server_languages": LanguageTags(),
"server_to_client_languages": LanguageTags(),
"first_kex_follows": Boolean(),
"reserved": Unsigned32BitInteger(),
})
# zgrab2/lib/ssh/log.go: EndpointId
EndpointID = SubRecordType({
"raw": String(),
"version": String(),
"software": String(),
"comment": String(),
})
# This could be merged into a single class with e.g. an analyzed param,
# but it's probably clearer to just duplicate it.
AnalyzedEndpointID = SubRecordType({
"raw": AnalyzedString(),
"version": String(),
"software": AnalyzedString(),
"comment": AnalyzedString(),
})
# zgrab2/lib/ssh/kex.go: kexResult
KexResult = SubRecordType({
"H": Binary(),
"K": Binary(),
"session_id": Binary()
})
# zgrab2/lib/ssh/keys.go: ed25519PublicKey
ED25519PublicKey = SubRecordType({
"public_bytes": Binary(),
})
# zgrab2/lib/ssh/kex.go: curve25519sha256JsonLogParameters (via curve25519sha256)
Curve25519SHA256Params = SubRecordType({
"client_public": Binary(required=False),
"client_private": Binary(required=False),
"server_public": Binary(required=False),
})
# zgrab2/lib/ssh/certs.go: JsonSignature
Signature = SubRecordType({
"parsed": SubRecord({
"algorithm": KeyAlgorithm(),
"value": Binary(),
}),
"raw": Binary(),
"h": Binary(),
})
# lib/ssh/kex.go: PublicKeyJsonLog, sans the certkey_public_key (since that would create a loop)
SSHPublicKey = SubRecordType({
"raw": Binary(),
"fingerprint_sha256": String(),
# TODO: Enum? Obviously must serialize to one of rsa/dsa/ecdsa/ed25519_public_key...
"algorithm": String(),
# For compatiblity with ztag
"key_algorithm":String(),
"rsa_public_key": zcrypto.RSAPublicKey(),
"dsa_public_key": zcrypto.DSAPublicKey(),
"ecdsa_public_key": zcrypto.ECDSAPublicKey(),
"ed25519_public_key": ED25519PublicKey(),
})
# lib/ssh/certs.go: JsonCertType
CertType = SubRecordType({
"id": Unsigned32BitInteger(doc="The numerical certificate type value. 1 identifies user certificates, 2 identifies host certificates."),
"name": Enum(values=["USER", "HOST", "unknown"], doc="The human-readable name for the certificate type."),
})
# lib/ssh/certs.go: JsonCertificate
SSHPublicKeyCert = SubRecord.with_args({
# TODO: Use / include our cert type here, or maybe somewhere else in the response?
"certkey_public_key": SubRecord({
"nonce": Binary(),
# Note that this is not recursive, since SSHPublicKey() does not include certkey_public_key.
"key": SSHPublicKey(),
"serial": String(doc="The certificate serial number, encoded as a base-10 string."),
"cert_type": CertType(),
"key_id": String(doc="A free-form text field filled in by the CA at the time of signing, intended to identify the principal in log messages."),
"valid_principals": ListOf(String(), doc="Names for which this certificate is valid; hostnames for cert_type=HOST certificates and usernames for cert_type=USER certificates."),
"validity": SubRecord({
"valid_after": DateTime(doc="Timestamp of when certificate is first valid. Timezone is UTC."),
"valid_before": DateTime(doc="Timestamp of when certificate expires. Timezone is UTC."),
"length": Signed64BitInteger(),
}),
"reserved": Binary(),
"signature_key": SSHPublicKey(),
"signature": Signature(),
"parse_error": String(),
"extensions": SubRecord({
"known": SubRecord({
"permit_X11_forwarding": String(),
"permit_agent_forwarding": String(),
"permit_port_forwarding": String(),
"permit_pty": String(),
"permit_user_rc": String(),
}),
"unknown": ListOf(String()),
}),
"critical_options": SubRecord({
"known": SubRecord({
"force_command": String(),
"source_address": String(),
}),
"unknown": ListOf(String()),
})
})
}, extends=SSHPublicKey())
# zgrab2/lib/ssh/common.go: directionAlgorithms
DirectionAlgorithms = SubRecordType({
"cipher": CipherAlgorithm(),
"mac": MACAlgorithm(),
"compression": CompressionAlgorithm(),
})
# zgrab2/lib/ssh/kex.go: interface kexAlgorithm
# Searching usages of kexAlgorithm turns up:
# - dhGroup: dh_params, server_signature, server_host_key
# - ecdh: ecdh_params, server_signature, server_host_key
# - curve25519sha256: curve25519_sha256_params, server_signature, server_host_key
# - dhGEXSHA: dh_params, server_signature, server_host_key
KeyExchange = SubRecordType({
"curve25519_sha256_params": Curve25519SHA256Params(),
"ecdh_params": zcrypto.ECDHParams(),
"dh_params": zcrypto.DHParams(),
"server_signature": Signature(),
"server_host_key": SSHPublicKeyCert(),
})
# zgrab2/lib/ssh/common.go: algorithms (aux in MarshalJSON)
AlgorithmSelection = SubRecordType({
"dh_kex_algorithm": KexAlgorithm(),
"host_key_algorithm": KeyAlgorithm(),
"client_to_server_alg_group": DirectionAlgorithms(),
"server_to_client_alg_group": DirectionAlgorithms(),
})
# zgrab2/lib/ssh/log.go: HandshakeLog
# TODO: Can ssh re-use any of the generic TLS model?
ssh_scan_response = SubRecord({
"result": SubRecord({
"banner": WhitespaceAnalyzedString(),
"server_id": AnalyzedEndpointID(),
"client_id": EndpointID(),
"server_key_exchange": KexInitMessage(),
"client_key_exchange": KexInitMessage(),
"algorithm_selection": AlgorithmSelection(),
"key_exchange": KeyExchange(),
"userauth": ListOf(String()),
"crypto": KexResult(),
})
}, extends=zgrab2.base_scan_response)
zschema.registry.register_schema("zgrab2-ssh", ssh_scan_response)
zgrab2.register_scan_response_type("ssh", ssh_scan_response)
| 40.448819
| 234
| 0.694569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6,058
| 0.589644
|
f9a21f50645fbd8f00212379587d12cc0568bcd5
| 5,123
|
py
|
Python
|
app/main/views.py
|
Juru-10/blog
|
ac554952e884c35ce7dd50cf0ef9748a8da96a3f
|
[
"MIT"
] | null | null | null |
app/main/views.py
|
Juru-10/blog
|
ac554952e884c35ce7dd50cf0ef9748a8da96a3f
|
[
"MIT"
] | null | null | null |
app/main/views.py
|
Juru-10/blog
|
ac554952e884c35ce7dd50cf0ef9748a8da96a3f
|
[
"MIT"
] | null | null | null |
from flask import render_template,request,redirect,url_for,abort
from ..models import User,Post,Comment,Subscriber
from ..requests import get_quotes
from . import main
from .forms import PostForm,CommentForm,DelForm,UpdateProfile
from app.auth.forms import SubscriptionForm
from .. import db,photos
from flask_login import login_required,current_user
import markdown2
from ..email import mail_message
from app.auth import views,forms
# from sqlalchemy import desc
@main.route('/',methods = ['GET','POST'])
def index():
'''
View root page function that returns the index page and its data
'''
quotes=get_quotes()
title = 'Home - Welcome to The best Blogging Website Online'
posts=Post.query.all()
# desc(posts)
users= None
for post in posts:
comments=Comment.query.filter_by(post_id=post.id).all()
return render_template('index.html', title = title,posts=posts, users=users,quotes=quotes,comments=comments)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update/bio',methods = ['GET','POST'])
@login_required
def update_bio(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update_bio.html',form =form)
# @main.route('/user/update/pitch/<id>',methods = ['GET','POST'])
# def single_review(id):
# pitch=Pitch.query.get(id)
# if pitch is None:
# abort(404)
# form = PitchForm()
#
# if form.validate_on_submit():
# user.pitches = form.pitches.data
#
# db.session.add(user)
# db.session.commit()
#
# return redirect(url_for('.profile',pitch=user.pitches))
#
# format_pitch = markdown2.markdown(pitch.movie_pitch,extras=["code-friendly", "fenced-code-blocks"])
# return render_template('new_pitch.html',pitch = pitch,format_pitch=format_pitch)
@main.route('/new_post/',methods = ['GET','POST'])
@login_required
def new_post():
form = PostForm()
if form.validate_on_submit():
post = Post(name = form.name.data, user_id = current_user.id)
db.session.add(post)
db.session.commit()
subscribers=Subscriber.query.filter_by(email=Subscriber.email).all()
form = SubscriptionForm()
for subscriber in subscribers:
mail_message("A New Blog Post is added","email/welcome_user",subscriber.email,subscribers=subscribers)
return redirect(url_for('.index'))
return render_template('profile/new_post.html',post_form=form)
# @main.route('/delete_post/<int:id>',methods = ['GET','POST'])
# def del_post(id):
@main.route('/new_comment/<int:id>',methods = ['GET','POST'])
def new_comment(id):
form = CommentForm()
form2=DelForm()
posts=Post.query.filter_by(id=id).all()
comments=Comment.query.filter_by(post_id=id).all()
if form.validate_on_submit():
comment = Comment(name = form.name.data, post_id = id)
db.session.add(comment)
db.session.commit()
if form2.validate_on_submit():
comment=Comment.query.filter_by(id=id).delete()
# db.session.delete(comment)
db.session.commit()
# if button.click()
return redirect(url_for('.index'))
return render_template('profile/new_comment.html',comment_form=form,del_form=form2,comments=comments,posts=posts)
# @main.route('/new_vote/',methods = ['GET','POST'])
# @login_required
# def new_vote():
# form = VoteForm()
# # votes = get_vote(id)
#
# if form.validate_on_submit():
# pitch = Pitch(name = form.name.data, user_id = current_user.id)
# upvote = Vote(upvote = form.validate_on_submit(),pitch_id = pitch.id)
# downvote = Vote(downvote = form.validate_on_submit(),pitch_id = pitch.id)
# up=0
# down=0
# for upvote in vote:
# up+=1
# db.session.add(upvote=up)
# db.session.commit()
# for downvote in vote:
# down+=1
# db.session.add(downvote=down)
# db.session.commit()
# user=User.query.filter_by(id = pitch.id).first()
# return redirect(url_for('.index'))
#
# return render_template('profile/new_comment.html',comment_form=form)
# return render_template('new_vote.html',upvote = upvote, downvote = downvote, vote_form=form, votes=votes)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
| 33.927152
| 117
| 0.662112
| 0
| 0
| 0
| 0
| 2,960
| 0.577786
| 0
| 0
| 2,243
| 0.437829
|
f9a2d31ea31e4c014413d3196c6cde1579895080
| 100
|
py
|
Python
|
data_types/numeric_type/float_type.py
|
aryanz-co-in/python-indentation-datatypes-tamil
|
69c291f0b6bd911fec96e8cd9e670880501d9959
|
[
"Apache-2.0"
] | null | null | null |
data_types/numeric_type/float_type.py
|
aryanz-co-in/python-indentation-datatypes-tamil
|
69c291f0b6bd911fec96e8cd9e670880501d9959
|
[
"Apache-2.0"
] | null | null | null |
data_types/numeric_type/float_type.py
|
aryanz-co-in/python-indentation-datatypes-tamil
|
69c291f0b6bd911fec96e8cd9e670880501d9959
|
[
"Apache-2.0"
] | null | null | null |
# int, float, complex Numeric Types
# Float are nothing but decimal values
pi = 3.14
print(pi)
| 12.5
| 38
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 75
| 0.75
|
f9a2fdd3c94e96ddac9d38ba04e226d5f50ff29b
| 452
|
py
|
Python
|
docker/dev_app/views.py
|
uw-it-aca/uw-django-saml2
|
04cd99c0f8fff6160c13e3aa0e44324f6a4079fe
|
[
"Apache-2.0"
] | 2
|
2018-04-20T19:02:11.000Z
|
2020-01-21T07:08:48.000Z
|
docker/dev_app/views.py
|
uw-it-aca/uw-django-saml2
|
04cd99c0f8fff6160c13e3aa0e44324f6a4079fe
|
[
"Apache-2.0"
] | 71
|
2018-03-27T17:52:31.000Z
|
2022-02-18T23:09:05.000Z
|
docker/dev_app/views.py
|
uw-it-aca/uw-django-saml2
|
04cd99c0f8fff6160c13e3aa0e44324f6a4079fe
|
[
"Apache-2.0"
] | 1
|
2018-12-04T19:20:36.000Z
|
2018-12-04T19:20:36.000Z
|
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from uw_saml.utils import is_member_of_group
# Create your views here.
@login_required
def index(request):
if is_member_of_group(request, settings.UW_SAML_PERMISSIONS['perm2']):
return HttpResponse("Hello, world. You have perm2.")
else:
return HttpResponse("Hello, world. You don't have perm2.")
| 28.25
| 74
| 0.765487
| 0
| 0
| 0
| 0
| 248
| 0.548673
| 0
| 0
| 100
| 0.221239
|
f9a3781192095c8ae404cd60bb006a2f14049443
| 4,370
|
py
|
Python
|
genoml/steps/model_validate.py
|
GenoML/genoml
|
bfe0164e99a27d5ec2b720b5a24e059294603e3f
|
[
"Apache-2.0"
] | 13
|
2019-03-22T12:12:12.000Z
|
2021-10-04T15:06:18.000Z
|
genoml/steps/model_validate.py
|
GenoML/genoml
|
bfe0164e99a27d5ec2b720b5a24e059294603e3f
|
[
"Apache-2.0"
] | 21
|
2019-03-15T15:40:59.000Z
|
2020-08-03T21:44:26.000Z
|
genoml/steps/model_validate.py
|
GenoML/genoml
|
bfe0164e99a27d5ec2b720b5a24e059294603e3f
|
[
"Apache-2.0"
] | 4
|
2019-06-28T18:25:37.000Z
|
2020-01-21T01:22:07.000Z
|
#! /usr/bin/env python -u
# coding=utf-8
from shutil import copyfile
from genoml.steps import PhenoScale, StepBase
from genoml.utils import DescriptionLoader
__author__ = 'Sayed Hadi Hashemi'
class ModelValidateStep(StepBase):
"""performs validation with existing data"""
_valid_prefix = None
def _reduce_validate(self):
self.execute_command([
self._dependecies["Plink"],
"--bfile", self._opt.valid_geno_dir,
"--extract", "{}.reduced_genos_snpList".format(self._opt.prune_prefix),
"--recode", "A",
"--out", "{}.reduced_genos".format(self._valid_prefix)
], name="Plink")
def _merge(self):
self.merge_reduced()
def _main(self):
if self._opt.pheno_scale == PhenoScale.DISCRETE:
script_name = self._opt.VALIDATE_DISC if self._opt.pheno_scale == PhenoScale.DISCRETE \
else self._opt.VALIDATE_CONT
self.execute_command([
self._dependecies["R"],
script_name,
self._valid_prefix,
self._opt.n_cores,
self._opt.impute_data,
self._opt.prune_prefix #todo: new best_model
], name="VALIDATE_CONT, please make sure you have included .cov and .addit validation files, if used for "
"training.")
@DescriptionLoader.function_description("validation_step")
def process(self):
self._valid_prefix = "{}_validation".format(self._opt.prune_prefix)
self.model_validate()
def merge_reduced(self):
self.execute_command([
self._dependecies["R"],
self._opt.MERGE,
self._opt.valid_geno_dir,
self._opt.valid_pheno_file,
self.xna(self._opt.valid_cov_file),
self.xna(self._opt.valid_addit_file),
self._valid_prefix
], name="R")
@staticmethod
def xna(s):
return s if s is not None else "NA"
# todo: new
def model_validate(self):
"""this function performs validation with existing data"""
# check if GWAS is present (meaning it was also present in training), otherwise the Prune option has been used
# TODO: find a way to ensure user is providing GWAS for validation, in case it was used during training
if self._opt.gwas_file is None:
# we need to specify the forced allele here from the training set genotype file, this pulls the allele to
# force
# TODO: refactor to a Python code
self.cut_column(self._opt.geno_prefix + ".bim",
"2,5",
self._opt.prune_prefix + ".allelesToForce")
# plink
self.execute_command([
self._dependecies["Plink"],
"--bfile", self._opt.valid_geno_dir,
"--extract", self._opt.prune_prefix + '.reduced_genos_snpList',
"--recode", "A",
"--recode-allele", self._opt.prune_prefix + '.allelesToForce',
"--out", self._valid_prefix + '.reduced_genos'
], name="model_validate")
else: # gwas_file is not None
# plink
self.execute_command([
self._dependecies["Plink"],
"--bfile", self._opt.valid_geno_dir,
"--extract", self._opt.prune_prefix + '.reduced_genos_snpList',
"--recode", "A",
"--recode-allele", self._opt.prune_prefix + '.variantWeightings',
"--out", self._valid_prefix + '.reduced_genos'
], name="model_validate")
# copy
copyfile(self._opt.prune_prefix + ".temp.snpsToPull2", self._valid_prefix + ".temp.snpsToPull2")
self.execute_command([
self._dependecies["R"],
self._opt.SCALE_VAR_DOSES_VALID,
self._opt.prune_prefix,
self._opt.gwas_file,
self._opt.valid_geno_dir,
self._opt.geno_prefix
], name="validate")
self.merge_reduced()
self._main()
self.execute_command([
self._dependecies["R"],
self._opt.CHECK_VALIDATION,
self._valid_prefix,
self._opt.valid_pheno_file
], name="CHECK_VALIDATION")
| 38
| 118
| 0.576659
| 4,173
| 0.95492
| 0
| 0
| 260
| 0.059497
| 0
| 0
| 1,223
| 0.279863
|
f9a4a4c34323aeec9f75c70fae31ad785ce964df
| 721
|
py
|
Python
|
resotocore/tests/resotocore/dependencies_test.py
|
someengineering/resoto
|
ee17313f5376e9797ed305e7fdb62d40139a6608
|
[
"Apache-2.0"
] | 126
|
2022-01-13T18:22:03.000Z
|
2022-03-31T11:03:14.000Z
|
resotocore/tests/resotocore/dependencies_test.py
|
someengineering/resoto
|
ee17313f5376e9797ed305e7fdb62d40139a6608
|
[
"Apache-2.0"
] | 110
|
2022-01-13T22:27:55.000Z
|
2022-03-30T22:26:50.000Z
|
resotocore/tests/resotocore/dependencies_test.py
|
someengineering/resoto
|
ee17313f5376e9797ed305e7fdb62d40139a6608
|
[
"Apache-2.0"
] | 8
|
2022-01-15T10:28:16.000Z
|
2022-03-30T16:38:21.000Z
|
from typing import Tuple, List
from resotocore.dependencies import parse_args
from resotocore.types import JsonElement
def test_parse_override() -> None:
def parse(args: str) -> List[Tuple[str, JsonElement]]:
return parse_args(args.split()).config_override # type: ignore
assert parse(f"--override a=foo") == [("a", "foo")]
assert parse(f"--override a=foo,bla") == [("a", ["foo", "bla"])]
assert parse(f"--override a=foo,bla b=a,b,c") == [("a", ["foo", "bla"]), ("b", ["a", "b", "c"])]
assert parse(f'--override a="value,with,comma,in,quotes"') == [("a", "value,with,comma,in,quotes")]
assert parse(f'--override a=some,value,"with,comma"') == [("a", ["some", "value", "with,comma"])]
| 45.0625
| 103
| 0.617198
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 275
| 0.381415
|
f9a5ad42dfd6f80195b93f6de20b3058e7e2213b
| 7,374
|
py
|
Python
|
pyquil/api/_benchmark.py
|
stjordanis/pyquil
|
36987ecb78d5dc85d299dd62395b7669a1cedd5a
|
[
"Apache-2.0"
] | 677
|
2017-01-09T23:20:22.000Z
|
2018-11-26T10:57:49.000Z
|
pyquil/api/_benchmark.py
|
stjordanis/pyquil
|
36987ecb78d5dc85d299dd62395b7669a1cedd5a
|
[
"Apache-2.0"
] | 574
|
2018-11-28T05:38:40.000Z
|
2022-03-23T20:38:28.000Z
|
pyquil/api/_benchmark.py
|
stjordanis/pyquil
|
36987ecb78d5dc85d299dd62395b7669a1cedd5a
|
[
"Apache-2.0"
] | 202
|
2018-11-30T06:36:28.000Z
|
2022-03-29T15:38:18.000Z
|
##############################################################################
# Copyright 2018 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from typing import List, Optional, Sequence, cast
from qcs_api_client.client import QCSClientConfiguration
from pyquil.api._abstract_compiler import AbstractBenchmarker
from pyquil.api._compiler_client import (
GenerateRandomizedBenchmarkingSequenceRequest,
ConjugatePauliByCliffordRequest,
CompilerClient,
)
from pyquil.paulis import PauliTerm, is_identity
from pyquil.quil import address_qubits, Program
from pyquil.quilbase import Gate
class BenchmarkConnection(AbstractBenchmarker):
"""
Represents a connection to a server that generates benchmarking data.
"""
def __init__(self, *, timeout: float = 10.0, client_configuration: Optional[QCSClientConfiguration] = None):
"""
Client to communicate with the benchmarking data endpoint.
:param timeout: Time limit for requests, in seconds.
:param client_configuration: Optional client configuration. If none is provided, a default one will be loaded.
"""
self._compiler_client = CompilerClient(
client_configuration=client_configuration or QCSClientConfiguration.load(),
request_timeout=timeout,
)
def apply_clifford_to_pauli(self, clifford: Program, pauli_in: PauliTerm) -> PauliTerm:
r"""
Given a circuit that consists only of elements of the Clifford group,
return its action on a PauliTerm.
In particular, for Clifford C, and Pauli P, this returns the PauliTerm
representing CPC^{\dagger}.
:param clifford: A Program that consists only of Clifford operations.
:param pauli_in: A PauliTerm to be acted on by clifford via conjugation.
:return: A PauliTerm corresponding to clifford * pauli_in * clifford^{\dagger}
"""
# do nothing if `pauli_in` is the identity
if is_identity(pauli_in):
return pauli_in
indices_and_terms = list(zip(*list(pauli_in.operations_as_set())))
request = ConjugatePauliByCliffordRequest(
pauli_indices=list(indices_and_terms[0]),
pauli_symbols=list(indices_and_terms[1]),
clifford=clifford.out(calibrations=False),
)
response = self._compiler_client.conjugate_pauli_by_clifford(request)
phase_factor, paulis = response.phase_factor, response.pauli
pauli_out = PauliTerm("I", 0, 1.0j ** phase_factor)
clifford_qubits = clifford.get_qubits()
pauli_qubits = pauli_in.get_qubits()
all_qubits = sorted(set(cast(List[int], pauli_qubits)).union(set(cast(List[int], clifford_qubits))))
# The returned pauli will have specified its value on all_qubits, sorted by index.
# This is maximal set of qubits that can be affected by this conjugation.
for i, pauli in enumerate(paulis):
pauli_out = cast(PauliTerm, pauli_out * PauliTerm(pauli, all_qubits[i]))
return cast(PauliTerm, pauli_out * pauli_in.coefficient)
def generate_rb_sequence(
self,
depth: int,
gateset: Sequence[Gate],
seed: Optional[int] = None,
interleaver: Optional[Program] = None,
) -> List[Program]:
"""
Construct a randomized benchmarking experiment on the given qubits, decomposing into
gateset. If interleaver is not provided, the returned sequence will have the form
C_1 C_2 ... C_(depth-1) C_inv ,
where each C is a Clifford element drawn from gateset, C_{< depth} are randomly selected,
and C_inv is selected so that the entire sequence composes to the identity. If an
interleaver G (which must be a Clifford, and which will be decomposed into the native
gateset) is provided, then the sequence instead takes the form
C_1 G C_2 G ... C_(depth-1) G C_inv .
The JSON response is a list of lists of indices, or Nones. In the former case, they are the
index of the gate in the gateset.
:param depth: The number of Clifford gates to include in the randomized benchmarking
experiment. This is different than the number of gates in the resulting experiment.
:param gateset: A list of pyquil gates to decompose the Clifford elements into. These
must generate the clifford group on the qubits of interest. e.g. for one qubit
[RZ(np.pi/2), RX(np.pi/2)].
:param seed: A positive integer used to seed the PRNG.
:param interleaver: A Program object that encodes a Clifford element.
:return: A list of pyquil programs. Each pyquil program is a circuit that represents an
element of the Clifford group. When these programs are composed, the resulting Program
will be the randomized benchmarking experiment of the desired depth. e.g. if the return
programs are called cliffords then `sum(cliffords, Program())` will give the randomized
benchmarking experiment, which will compose to the identity program.
"""
# Support QubitPlaceholders: we temporarily index to arbitrary integers.
# `generate_rb_sequence` handles mapping back to the original gateset gates.
gateset_as_program = address_qubits(sum(gateset, Program()))
qubits = len(gateset_as_program.get_qubits())
gateset_for_api = gateset_as_program.out().splitlines()
interleaver_out: Optional[str] = None
if interleaver:
assert isinstance(interleaver, Program)
interleaver_out = interleaver.out(calibrations=False)
depth = int(depth) # needs to be jsonable, no np.int64 please!
request = GenerateRandomizedBenchmarkingSequenceRequest(
depth=depth,
num_qubits=qubits,
gateset=gateset_for_api,
seed=seed,
interleaver=interleaver_out,
)
response = self._compiler_client.generate_randomized_benchmarking_sequence(request)
programs = []
for clifford in response.sequence:
clifford_program = Program()
if interleaver:
clifford_program._calibrations = interleaver.calibrations
# Like below, we reversed the order because the API currently hands back the Clifford
# decomposition right-to-left.
for index in reversed(clifford):
clifford_program.inst(gateset[index])
programs.append(clifford_program)
# The programs are returned in "textbook style" right-to-left order. To compose them into
# the correct pyquil program, we reverse the order.
return list(reversed(programs))
| 46.670886
| 118
| 0.675481
| 6,151
| 0.834147
| 0
| 0
| 0
| 0
| 0
| 0
| 4,049
| 0.549091
|
f9a664bbf396891165319f919889de28be4868a4
| 212
|
py
|
Python
|
mine/collatz.py
|
zahessi/unileaks
|
3ed2462e11f8e3decc64ed8faceee42438ec06ff
|
[
"MIT"
] | null | null | null |
mine/collatz.py
|
zahessi/unileaks
|
3ed2462e11f8e3decc64ed8faceee42438ec06ff
|
[
"MIT"
] | null | null | null |
mine/collatz.py
|
zahessi/unileaks
|
3ed2462e11f8e3decc64ed8faceee42438ec06ff
|
[
"MIT"
] | null | null | null |
n = input("Мы будем проверять теорию коллатца. Введите число")
def collatz(num):
num = int(num)
if not num: return None
while num != 1:
num = num/2 if num % 2 == 0 else 3*num + 1
collatz(n)
| 21.2
| 62
| 0.603774
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 93
| 0.366142
|
f9a78f0606518ebedfb5bc19389f7930753e4683
| 554
|
py
|
Python
|
questions/q292_pair_with_given_difference/simple_storage.py
|
aadhityasw/Competitive-Programs
|
901a48d35f024a3a87c32a45b7f4531e8004a203
|
[
"MIT"
] | null | null | null |
questions/q292_pair_with_given_difference/simple_storage.py
|
aadhityasw/Competitive-Programs
|
901a48d35f024a3a87c32a45b7f4531e8004a203
|
[
"MIT"
] | 1
|
2021-05-15T07:56:51.000Z
|
2021-05-15T07:56:51.000Z
|
questions/q292_pair_with_given_difference/simple_storage.py
|
aadhityasw/Competitive-Programs
|
901a48d35f024a3a87c32a45b7f4531e8004a203
|
[
"MIT"
] | null | null | null |
class Solution:
def findPair(self, arr, L,N):
store = set()
for num in arr :
if num in store :
return True
store.add(num - N)
store.add(num + N)
return False
if __name__ == '__main__':
t = int(input())
for _ in range(t):
L,N = [int(x) for x in input().split()]
arr = [int(x) for x in input().split()]
solObj = Solution()
if(solObj.findPair(arr,L, N)):
print(1)
else:
print(-1)
| 19.103448
| 47
| 0.440433
| 256
| 0.462094
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.018051
|
f9a9b8479139b00ee0e9e3f90eb6496c035fb7c6
| 1,270
|
py
|
Python
|
tlh/data/pointer.py
|
notyourav/the-little-hat
|
f52b38b18b762e704b36cef06c07656348ea6995
|
[
"MIT"
] | null | null | null |
tlh/data/pointer.py
|
notyourav/the-little-hat
|
f52b38b18b762e704b36cef06c07656348ea6995
|
[
"MIT"
] | null | null | null |
tlh/data/pointer.py
|
notyourav/the-little-hat
|
f52b38b18b762e704b36cef06c07656348ea6995
|
[
"MIT"
] | 2
|
2021-10-05T20:40:12.000Z
|
2022-01-05T00:17:36.000Z
|
from dataclasses import dataclass
from tlh.const import RomVariant
from intervaltree import IntervalTree, Interval
@dataclass
class Pointer:
rom_variant: RomVariant = None
address: int = 0
points_to: int = 0
certainty: int = 0
author: str = ''
note: str = ''
class PointerList:
def __init__(self, pointers: list[Pointer], rom_variant: RomVariant) -> None:
intervals = []
for pointer in pointers:
if pointer.rom_variant == rom_variant:
intervals.append(Interval(pointer.address, pointer.address+4, pointer))
self.tree = IntervalTree(intervals)
def get_pointers_at(self, index: int) -> list[Pointer]:
pointers = []
for interval in self.tree.at(index):
pointers.append(interval.data)
return pointers
def append(self, pointer: Pointer) -> None:
self.tree.add(Interval(pointer.address, pointer.address+4, pointer))
def remove(self, pointer: Pointer) -> None:
self.tree.remove(Interval(pointer.address, pointer.address+4, pointer))
def __iter__(self):
return map(lambda x: x.data, self.tree.__iter__())
def get_sorted_pointers(self) -> list[Pointer]:
return map(lambda x: x.data, sorted(self.tree))
| 30.97561
| 87
| 0.659843
| 1,140
| 0.897638
| 0
| 0
| 167
| 0.131496
| 0
| 0
| 4
| 0.00315
|
f9ad826a4941dde9f3abe0bd1c8c6c6ea3cdfc2e
| 554
|
py
|
Python
|
tests/testapp/models.py
|
jcass77/django-yearlessdate
|
19ed3ecb16efe33eea6f02138bb4365447cb2ea7
|
[
"BSD-3-Clause"
] | 16
|
2016-09-23T07:09:40.000Z
|
2022-01-13T13:22:31.000Z
|
tests/testapp/models.py
|
jcass77/django-yearlessdate
|
19ed3ecb16efe33eea6f02138bb4365447cb2ea7
|
[
"BSD-3-Clause"
] | 8
|
2017-12-06T08:32:12.000Z
|
2021-05-13T15:31:21.000Z
|
tests/testapp/models.py
|
jcass77/django-yearlessdate
|
19ed3ecb16efe33eea6f02138bb4365447cb2ea7
|
[
"BSD-3-Clause"
] | 16
|
2016-03-04T07:55:56.000Z
|
2021-04-16T15:14:26.000Z
|
from django.db import models
from djangoyearlessdate.models import YearlessDateField, YearField
from djangoyearlessdate.helpers import YearlessDate
class YearlessDateModel(models.Model):
"""Sample model for testing YearlessDateField.
"""
yearless_date = YearlessDateField()
optional_yearless_date = YearlessDateField(blank=True, null=True)
yearless_date_with_default = YearlessDateField(default=YearlessDate(day=3, month=5))
class YearModel(models.Model):
"""Sample model for testing YearField.
"""
year = YearField()
| 30.777778
| 88
| 0.772563
| 400
| 0.722022
| 0
| 0
| 0
| 0
| 0
| 0
| 100
| 0.180505
|
f9ade94d5d26429d7edd4cdfcee8f28919e4bd4f
| 597
|
py
|
Python
|
ror/NoTieResolver.py
|
jakub-tomczak/ror
|
cf9ab38a2d66f4816a1289b9726911960059fce7
|
[
"MIT"
] | null | null | null |
ror/NoTieResolver.py
|
jakub-tomczak/ror
|
cf9ab38a2d66f4816a1289b9726911960059fce7
|
[
"MIT"
] | null | null | null |
ror/NoTieResolver.py
|
jakub-tomczak/ror
|
cf9ab38a2d66f4816a1289b9726911960059fce7
|
[
"MIT"
] | null | null | null |
from ror.RORParameters import RORParameters
from ror.RORResult import RORResult
from ror.AbstractTieResolver import AbstractTieResolver
from ror.result_aggregator_utils import Rank
class NoTieResolver(AbstractTieResolver):
def __init__(self) -> None:
super().__init__('NoResolver')
def resolve_rank(self, rank: Rank, result: RORResult, parameters: RORParameters) -> Rank:
super().resolve_rank(rank, result, parameters)
return rank
def help(self) -> str:
return 'This resolver does nothing. It just returns same rank as was provided as an input.'
| 37.3125
| 99
| 0.740369
| 414
| 0.693467
| 0
| 0
| 0
| 0
| 0
| 0
| 96
| 0.160804
|
f9ae44085aad4c16f6592f43de3a099254f05d59
| 47
|
py
|
Python
|
conjur/util/__init__.py
|
mbjahnoon/conjur-api-python3
|
ec1f62bb1baf2bdcd34d2fb92c97db724f761020
|
[
"Apache-2.0"
] | 16
|
2019-05-17T15:34:59.000Z
|
2021-11-08T10:30:21.000Z
|
conjur/util/__init__.py
|
mbjahnoon/conjur-api-python3
|
ec1f62bb1baf2bdcd34d2fb92c97db724f761020
|
[
"Apache-2.0"
] | 301
|
2019-05-07T18:27:10.000Z
|
2022-01-26T13:03:49.000Z
|
conjur/util/__init__.py
|
cyberark/cyberark-conjur-cli
|
2507e8769808643d89efa7e2496cfc14f505bd7e
|
[
"Apache-2.0"
] | 10
|
2019-07-30T17:00:13.000Z
|
2022-01-20T17:00:34.000Z
|
"""
util
This package is for util modules
"""
| 7.833333
| 32
| 0.659574
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 46
| 0.978723
|
f9ae75ada823a3568610724a901cd66400be071c
| 331
|
py
|
Python
|
Algo and DSA/LeetCode-Solutions-master/Python/bulb-switcher-iii.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 3,269
|
2018-10-12T01:29:40.000Z
|
2022-03-31T17:58:41.000Z
|
Algo and DSA/LeetCode-Solutions-master/Python/bulb-switcher-iii.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 53
|
2018-12-16T22:54:20.000Z
|
2022-02-25T08:31:20.000Z
|
Algo and DSA/LeetCode-Solutions-master/Python/bulb-switcher-iii.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 1,236
|
2018-10-12T02:51:40.000Z
|
2022-03-30T13:30:37.000Z
|
# Time: O(n)
# Space: O(1)
class Solution(object):
def numTimesAllBlue(self, light):
"""
:type light: List[int]
:rtype: int
"""
result, right = 0, 0
for i, num in enumerate(light, 1):
right = max(right, num)
result += (right == i)
return result
| 22.066667
| 42
| 0.483384
| 301
| 0.909366
| 0
| 0
| 0
| 0
| 0
| 0
| 92
| 0.277946
|
f9af9e5f05d82322479bf82bec5cb2e8770d26f1
| 119
|
py
|
Python
|
code/aoc/day1.py
|
souradeepta/PythonPractice
|
fa956ca4b87a0eb92fee21fa78e59757ce665770
|
[
"MIT"
] | null | null | null |
code/aoc/day1.py
|
souradeepta/PythonPractice
|
fa956ca4b87a0eb92fee21fa78e59757ce665770
|
[
"MIT"
] | 4
|
2021-03-19T02:04:20.000Z
|
2021-09-22T18:54:16.000Z
|
code/aoc/day1.py
|
souradeepta/PythonPractice
|
fa956ca4b87a0eb92fee21fa78e59757ce665770
|
[
"MIT"
] | null | null | null |
def input_parse():
with open("day1.txt", r) as inputFile:
input_list = inputFile.readline()
return
| 23.8
| 42
| 0.621849
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.084034
|
f9b08e37d9b636d67355ff414f22cda84aa9f53b
| 3,472
|
py
|
Python
|
tilapia/lib/provider/chains/bch/provider.py
|
huazhouwang/python_multichain_wallet
|
52e0acdc2984c08990cb36433ef17a414fbe8312
|
[
"MIT"
] | 2
|
2021-09-23T13:47:08.000Z
|
2021-09-24T02:39:14.000Z
|
tilapia/lib/provider/chains/bch/provider.py
|
huazhouwang/tilapia
|
52e0acdc2984c08990cb36433ef17a414fbe8312
|
[
"MIT"
] | null | null | null |
tilapia/lib/provider/chains/bch/provider.py
|
huazhouwang/tilapia
|
52e0acdc2984c08990cb36433ef17a414fbe8312
|
[
"MIT"
] | null | null | null |
import itertools
import logging
from typing import Dict, Tuple
from tilapia.lib.basic import bip44
from tilapia.lib.basic.functional.require import require
from tilapia.lib.hardware import interfaces as hardware_interfaces
from tilapia.lib.provider import data
from tilapia.lib.provider.chains import btc
from tilapia.lib.provider.chains.bch.sdk import cash_address
from tilapia.lib.secret import interfaces as secret_interfaces
logger = logging.getLogger("app.chain")
class BCHProvider(btc.BTCProvider):
ADDRESS_PREFIX = "bitcoincash"
def pubkey_to_address(self, verifier: secret_interfaces.VerifierInterface, encoding: str = None) -> str:
require(encoding == "P2PKH", f"Invalid address encoding: {encoding}")
pubkey = verifier.get_pubkey(compressed=True)
pubkey_hash = self.network.keys.public(pubkey).hash160(is_compressed=True)
if encoding == "P2PKH": # Pay To Public Key Hash
address = cash_address.to_cash_address(self.ADDRESS_PREFIX, pubkey_hash)
else:
raise Exception("Should not be here")
return address
def verify_address(self, address: str) -> data.AddressValidation:
is_valid, encoding = False, None
try:
if ":" not in address:
address = f"{self.ADDRESS_PREFIX}:{address}"
prefix, _ = address.split(":")
if prefix == self.ADDRESS_PREFIX:
is_valid = cash_address.is_valid_cash_address(address)
encoding = "P2PKH" if is_valid else None
except Exception as e:
logger.exception(f"Illegal address: {address}, error: {e}")
address = address if is_valid else ""
return data.AddressValidation(
normalized_address=address,
display_address=address,
is_valid=is_valid,
encoding=encoding,
)
def _cash_address_to_legacy_address(self, address: str) -> str:
if ":" not in address:
return address
pubkey_hash = cash_address.export_pubkey_hash(address)
return self.network.address.for_p2pkh(pubkey_hash)
def _pre_process_unsigned_tx(self, unsigned_tx: data.UnsignedTx, signers: dict) -> Tuple[data.UnsignedTx, dict]:
for i in itertools.chain(unsigned_tx.inputs, unsigned_tx.outputs):
i.address = self._cash_address_to_legacy_address(i.address) # pycoin supports legacy bch address only
signers = {self._cash_address_to_legacy_address(k): v for k, v in signers.items()}
return unsigned_tx, signers
def sign_transaction(
self, unsigned_tx: data.UnsignedTx, signers: Dict[str, secret_interfaces.SignerInterface]
) -> data.SignedTx:
unsigned_tx, signers = self._pre_process_unsigned_tx(unsigned_tx, signers)
return super(BCHProvider, self).sign_transaction(unsigned_tx, signers)
def hardware_sign_transaction(
self,
hardware_client: hardware_interfaces.HardwareClientInterface,
unsigned_tx: data.UnsignedTx,
bip44_path_of_signers: Dict[str, bip44.BIP44Path],
) -> data.SignedTx:
unsigned_tx, bip44_path_of_signers = self._pre_process_unsigned_tx(unsigned_tx, bip44_path_of_signers)
return super(BCHProvider, self).hardware_sign_transaction(hardware_client, unsigned_tx, bip44_path_of_signers)
def get_token_info_by_address(self, token_address: str) -> Tuple[str, str, int]:
raise NotImplementedError()
| 39.908046
| 118
| 0.701613
| 2,998
| 0.863479
| 0
| 0
| 0
| 0
| 0
| 0
| 255
| 0.073445
|
f9b1801867198f74dece84cf86c7c8fce031dea8
| 1,747
|
py
|
Python
|
Excercises/Automata.py
|
RyanClinton777/graph-theory-project
|
a3ff9512da3ac2d48138b59d2e8eb8899d8e552d
|
[
"Unlicense"
] | null | null | null |
Excercises/Automata.py
|
RyanClinton777/graph-theory-project
|
a3ff9512da3ac2d48138b59d2e8eb8899d8e552d
|
[
"Unlicense"
] | null | null | null |
Excercises/Automata.py
|
RyanClinton777/graph-theory-project
|
a3ff9512da3ac2d48138b59d2e8eb8899d8e552d
|
[
"Unlicense"
] | null | null | null |
""" DFA Automata Implementation """
class State:
""" Nodes/States in an automaton """
def __init__(self, isAccept, arrows):
# Boolean, wether or not this state is an accept state
self.isAccept = isAccept
# dictionary of keys/labels:Other states
self.arrows = arrows
class DFA:
""" A DFA """
def __init__(self, start):
# Starting state
self.start = start
def match(self, s):
""" check and return wether or not string s is accepted by our automaton """
# Current state we are in
currentState = self.start
# Loop through characters in state
for c in s:
# Set current state as one pointed to by key of c
currentState = currentState.arrows[c]
# Return wether or not current state is an accept state
return currentState.isAccept
def compile():
""" Create our automaton """
# Creating an DFA with two states; 0 points to themselves, and 1 points to the other. (checking for even parity)
# Compile is standard terminoligy for creating something like this
# Create start state
start = State(True, {})
# Other state
other = State(False, {})
# The states point to themselves for 0
start.arrows['0'] = start
other.arrows['0'] = other
# They point to eachother for 1
start.arrows['1'] = other
other.arrows['1'] = start
a = DFA(start)
return a
# Create automaton instance
myAuto = compile()
# tests
for s in ['1100', '11111', '', '1', '0']:
result = myAuto.match(s)
print(f"{s} accepted? {result}")
for s in ['000', '001', '010', '011', '100', '101', '110', '111']:
result = myAuto.match(s)
print(f"{s} accepted? {result}")
| 28.639344
| 117
| 0.611906
| 835
| 0.477962
| 0
| 0
| 0
| 0
| 0
| 0
| 899
| 0.514596
|
f9b1deee9145e20a82bd34752d2e98a230a5a620
| 568
|
py
|
Python
|
src/database/migrations/0027_auto_20190829_1530.py
|
gregory-chekler/api
|
11ecbea945e7eb6fa677a0c0bb32bda51ba15f28
|
[
"MIT"
] | 2
|
2020-07-24T12:58:17.000Z
|
2020-12-17T02:26:13.000Z
|
src/database/migrations/0027_auto_20190829_1530.py
|
gregory-chekler/api
|
11ecbea945e7eb6fa677a0c0bb32bda51ba15f28
|
[
"MIT"
] | 214
|
2019-06-26T17:33:54.000Z
|
2022-03-26T00:02:34.000Z
|
src/database/migrations/0027_auto_20190829_1530.py
|
massenergize/portalBackEnd
|
7ed971b2be13901667a216d8c8a46f0bed6d6ccd
|
[
"MIT"
] | 6
|
2020-03-13T20:29:06.000Z
|
2021-08-20T16:15:08.000Z
|
# Generated by Django 2.2.3 on 2019-08-29 15:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('database', '0026_auto_20190829_1528'),
]
operations = [
migrations.AddField(
model_name='vendor',
name='email',
field=models.EmailField(blank=True, max_length=100),
),
migrations.AddField(
model_name='vendor',
name='phone_number',
field=models.CharField(blank=True, max_length=100),
),
]
| 23.666667
| 64
| 0.584507
| 475
| 0.836268
| 0
| 0
| 0
| 0
| 0
| 0
| 119
| 0.209507
|
f9b3824e8469cbf08d1932118a1c35ca85f4a512
| 207
|
py
|
Python
|
robots/PDF.py
|
NaskIII/Projeto-Alpha-X
|
23e3de59185cdc85b7fc13299cfc51846bfc63b6
|
[
"MIT"
] | null | null | null |
robots/PDF.py
|
NaskIII/Projeto-Alpha-X
|
23e3de59185cdc85b7fc13299cfc51846bfc63b6
|
[
"MIT"
] | 3
|
2019-08-26T03:51:57.000Z
|
2019-08-29T04:24:51.000Z
|
robots/PDF.py
|
NaskIII/Projeto-Alpha-X
|
23e3de59185cdc85b7fc13299cfc51846bfc63b6
|
[
"MIT"
] | null | null | null |
import sys
import os
class PDF(object):
def pdf(self, docx):
sofficepath = 'soffice'
convertcmd = '%s --headless --convert-to pdf %%s' % sofficepath
os.popen(convertcmd % docx)
| 20.7
| 71
| 0.613527
| 183
| 0.884058
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 0.217391
|
f9b5b83c0da1cf14ae65df8fb1a134313f52638f
| 1,181
|
py
|
Python
|
ntlmlib/__init__.py
|
Dlat/ntlmlib
|
49eadfe4701bcce84a4ca9cbab5b6d5d72eaad05
|
[
"Apache-2.0"
] | 1
|
2018-08-20T19:33:58.000Z
|
2018-08-20T19:33:58.000Z
|
ntlmlib/__init__.py
|
Dlat/ntlmlib
|
49eadfe4701bcce84a4ca9cbab5b6d5d72eaad05
|
[
"Apache-2.0"
] | null | null | null |
ntlmlib/__init__.py
|
Dlat/ntlmlib
|
49eadfe4701bcce84a4ca9cbab5b6d5d72eaad05
|
[
"Apache-2.0"
] | null | null | null |
"""
_ _ _ _ _
_ __ | |_| |_ __ ___ | (_) |__
| '_ \| __| | '_ ` _ \| | | '_ \
| | | | |_| | | | | | | | | |_) |
|_| |_|\__|_|_| |_| |_|_|_|_.__/
A robust, fast and efficient 'first-class' Python Library for NTLM authentication, signing and encryption
(c) 2015, Ian Clegg <ian.clegg@sourcewarp.com>
ntlmlib is licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
# Set default logging handler to avoid "No handler found" warnings.
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
| 33.742857
| 106
| 0.679932
| 84
| 0.071126
| 0
| 0
| 0
| 0
| 0
| 0
| 956
| 0.809483
|