hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5bc56560720d2cf232c7165fe370c0dba7a927b7 | 3,065 | py | Python | control/adaptive_affine.py | jnez71/misc | 397ac0b8e3bccec4daa73db8963bb0510eebebfe | [
"MIT"
] | 3 | 2017-05-12T21:52:53.000Z | 2018-04-02T16:05:08.000Z | control/adaptive_affine.py | jnez71/misc | 397ac0b8e3bccec4daa73db8963bb0510eebebfe | [
"MIT"
] | null | null | null | control/adaptive_affine.py | jnez71/misc | 397ac0b8e3bccec4daa73db8963bb0510eebebfe | [
"MIT"
] | 4 | 2017-04-13T01:06:21.000Z | 2019-01-22T09:43:38.000Z | #!/usr/bin/env python3
"""
Adaptive Affine Control:
My favorite myopic (not MPC, DP, or RL) control-law when absolutely nothing is known about your system except
that the control is additive and fully-actuated:
```
dx/dt = f(x,t) + u # drift f unknown, state x at time t known, choose control u to make x=r
u = W.dot(x) + b # policy is affine function of state
dW/dt = outer(k*(r-x), x) # parameters are adapted (learned online) to oppose the...
db/dt = k*(r-x) # ... gradient of the error-energy-rate d/dt((k/2)*(r-x)^2)
```
Try this with any crazy f. Even throw in a B(x,t) transformation on u (though no guarantees for that).
It's basically PID but with the PD gains evolving according to the regression-like dW/dt I gave.
PID with stationary PD gains fails when the f is reasonably nonlinear. This law still works.
Of course, additive-control fully-actuated systems pretty much only model lame low-level problems, but still neat.
"""
# Dependencies
import numpy as np
from matplotlib import pyplot
##################################################
# Controller
class C:
def __init__(self, n, k):
self.n = int(n)
self.k = float(k)
self.W = np.zeros((n, n), dtype=float)
self.b = np.zeros(n, dtype=float)
def u(self, r, x, dt):
ked = self.k*(r - x)*dt
self.W += np.outer(ked, x)
self.b += ked
return self.W.dot(x) + self.b
##################################################
# Drift dynamic
n = 3
def f(x, t):
return np.array([10.0*(x[1] - x[0]),
x[0]*(28.0 - x[2]) - x[1],
x[0]*x[1] - 2.6*x[2]])
# Actuator dynamic
# (needs to be identity for Lyapunov proof, but might still work otherwise)
def B(x, t):
return np.array([[x[1], 0.0, 0.0],
[ 0.0, 2*x[0], 0.0],
[ 0.0, 0.0, 1.0]])
##################################################
# Time
dt = 0.001
T = np.arange(0.0, 3.0, dt)
# State
X = np.zeros((len(T), n), dtype=float)
X[0] = [-1.0, 2.0, 3.0]
# Control
U = np.zeros((len(T), n), dtype=float)
c = C(n, 1.0)
# Reference
R = np.array([[6.0, 7.0, -7.0]] * len(T))
##################################################
# Simulation
control = True
for i in range(len(T)-1):
if control: U[i] = c.u(R[i], X[i], dt)
dxdt = f(X[i], T[i]) + B(X[i], T[i]).dot(U[i])
X[i+1] = X[i] + dxdt*dt
##################################################
# Plot
fig = pyplot.figure()
if control: fig.suptitle("Controlled Response", fontsize=26)
else: fig.suptitle("Natural Response", fontsize=26)
ax = None
for i in range(n):
ax = fig.add_subplot(n, 1, i+1, sharex=ax)
ax.plot(T, X[:, i], color='b', linewidth=2, label="state")
ax.plot(T, R[:, i], color='g', linewidth=3, linestyle=':', label="desire")
ax.plot(T[:-1], U[:-1, i], color='r', linewidth=0.5, label="action", scaley=False)
ax.set_xlim([T[0], T[-1]])
ax.set_ylabel("state "+str(i), fontsize=20)
ax.grid(True)
ax.set_xlabel("time", fontsize=20)
ax.legend()
pyplot.show()
| 31.597938 | 114 | 0.54323 | 331 | 0.107993 | 0 | 0 | 0 | 0 | 0 | 0 | 1,490 | 0.486134 |
5bc679b5e30f69fabf91b151ec50839acee5a93d | 1,227 | py | Python | hermione/module_templates/__IMPLEMENTED_BASE__/src/ml/preprocessing/preprocessing.py | RodrigoATorres/hermione | 6cbed73e309f8025a48f33165d8f29561c6a3cc7 | [
"Apache-2.0"
] | 183 | 2020-06-03T22:43:14.000Z | 2022-03-17T22:39:07.000Z | hermione/module_templates/__IMPLEMENTED_BASE__/src/ml/preprocessing/preprocessing.py | RodrigoATorres/hermione | 6cbed73e309f8025a48f33165d8f29561c6a3cc7 | [
"Apache-2.0"
] | 31 | 2020-06-03T22:55:18.000Z | 2022-03-27T20:06:17.000Z | hermione/module_templates/__IMPLEMENTED_BASE__/src/ml/preprocessing/preprocessing.py | RodrigoATorres/hermione | 6cbed73e309f8025a48f33165d8f29561c6a3cc7 | [
"Apache-2.0"
] | 43 | 2020-06-03T22:45:03.000Z | 2021-12-29T19:43:54.000Z | import pandas as pd
from ml.preprocessing.normalization import Normalizer
from category_encoders import *
import logging
logging.getLogger().setLevel(logging.INFO)
class Preprocessing:
"""
Class to perform data preprocessing before training
"""
def clean_data(self, df: pd.DataFrame):
"""
Perform data cleansing.
Parameters
----------
df : pd.Dataframe
Dataframe to be processed
Returns
-------
pd.Dataframe
Cleaned Data Frame
"""
logging.info("Cleaning data")
df_copy = df.copy()
df_copy['Pclass'] = df_copy.Pclass.astype('object')
df_copy = df_copy.dropna()
return df_copy
def categ_encoding(self, df: pd.DataFrame):
"""
Perform encoding of the categorical variables
Parameters
----------
df : pd.Dataframe
Dataframe to be processed
Returns
-------
pd.Dataframe
Cleaned Data Frame
"""
logging.info("Category encoding")
df_copy = df.copy()
df_copy = pd.get_dummies(df_copy)
return df_copy
| 23.596154 | 59 | 0.546862 | 1,059 | 0.863081 | 0 | 0 | 0 | 0 | 0 | 0 | 649 | 0.528932 |
5bc752f683441124b17790de08739437980201a1 | 738 | py | Python | python-backend/tests/status/resources/test_mine_status_resource.py | MaxWardle/mds | 15d8405e6e95af98da9588f353c5d6692d1aa3d6 | [
"Apache-2.0"
] | null | null | null | python-backend/tests/status/resources/test_mine_status_resource.py | MaxWardle/mds | 15d8405e6e95af98da9588f353c5d6692d1aa3d6 | [
"Apache-2.0"
] | null | null | null | python-backend/tests/status/resources/test_mine_status_resource.py | MaxWardle/mds | 15d8405e6e95af98da9588f353c5d6692d1aa3d6 | [
"Apache-2.0"
] | 1 | 2019-01-12T23:44:13.000Z | 2019-01-12T23:44:13.000Z | import json
from app.api.constants import MINE_STATUS_OPTIONS
from tests.constants import TEST_MINE_GUID
# GET
def test_get_mine_status_option(test_client, auth_headers):
get_resp = test_client.get('/mines/status', headers=auth_headers['full_auth_header'])
get_data = json.loads(get_resp.data.decode())
assert get_data == {'options': MINE_STATUS_OPTIONS}
assert get_resp.status_code == 200
def test_get_mine_status_not_found(test_client, auth_headers):
get_resp = test_client.get('/mines/status/' + TEST_MINE_GUID, headers=auth_headers['full_auth_header'])
get_data = json.loads(get_resp.data.decode())
assert get_data['error']['message'] == 'Mine Status not found'
assert get_resp.status_code == 404
| 36.9 | 107 | 0.764228 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.162602 |
5bc7db691db6bd3aeb10eeebc1044c688e5b67aa | 3,124 | py | Python | DevTools/lineCount.py | spiiin/CadEditor | b28316ddd9be4e79ac5e6adf89b7751df609d94e | [
"MIT"
] | 164 | 2015-01-16T17:09:45.000Z | 2022-03-17T03:39:08.000Z | DevTools/lineCount.py | spiiin/CadEditor | b28316ddd9be4e79ac5e6adf89b7751df609d94e | [
"MIT"
] | 13 | 2015-02-20T17:07:40.000Z | 2021-07-11T05:33:17.000Z | DevTools/lineCount.py | spiiin/CadEditor | b28316ddd9be4e79ac5e6adf89b7751df609d94e | [
"MIT"
] | 23 | 2015-09-01T22:29:11.000Z | 2022-01-10T01:32:32.000Z | #!/usr/bin/env python2
#Script for calculate LoC of all source files of project
import os,string
import sys
extension_list = ['h','hpp','cpp','c','pas','dpr','asm','py','q3asm','def','sh','bat','cs','java','cl','lisp','ui',"nut"]
comment_sims = {'asm' : ';', 'py' : '#', 'cl':';','lisp':';'}
source_files = { }
exclude_names = ["libs", "release", ".git"]
if len(sys.argv)!=2:
print "You must call script as 'lineCount.py <path_to_folder>'"
raw_input()
exit(-1)
path = sys.argv[1]
files_count = 0
def calc_files_count (arg,dirname, names):
global files_count
files_count+=len(names)
def calc_strings_count(arg, dirname, names):
#print "%32s"%dirname
if any(dirname.lower().find(exclude_name) != -1 for exclude_name in exclude_names):
return
for name in names:
full_name = os.path.join(dirname,name)
file_name,file_ext = os.path.splitext(full_name)
file_ext = file_ext[1:].lower()
if comment_sims.has_key(file_ext):
comment_sim = comment_sims[file_ext]
else:
comment_sim = "//"
if file_ext in extension_list:
#.designer.cs files don't count
if file_name.lower().find(".designer") != -1:
continue
f = file(full_name)
file_text = f.readlines()
empty_lines_count = 0
comment_lines = 0
for line in file_text :
line_without_spaces = line.lstrip(string.whitespace)
if line_without_spaces=="":
empty_lines_count += 1
elif line_without_spaces.startswith(comment_sim):
comment_lines +=1
source_files[full_name]= {"full" : len(file_text) ,"empty" :empty_lines_count, "comment":comment_lines}
f.close()
def calc(path_root):
os.path.walk(path_root,calc_files_count,0)
print "Found : %4i files"%files_count
print ""
#calculate line count
os.path.walk(path_root,calc_strings_count,0)
#convert to list and sort
lst = source_files.items()
lst.sort(key = lambda (key, val): val["full"])
strings_count=0
empty_lines_count=0
comment_lines_count=0
for name,val in lst:
l_f,l_e,l_c = val["full"],val["empty"],val["comment"]
dummy,short_name = os.path.split(name)
print "%-36s : %5i (%i/%i/%i)"%(short_name,l_f, l_f-l_c-l_e,l_c,l_e )
strings_count+=l_f
empty_lines_count+=l_e
comment_lines_count+=l_c
print "\nformat -\nfilename : full_lines_count (code_lines_count/comments_count/empty_lines_count)"
print 24*"-"
print "Found : %4i files"%files_count
print "Summary : %4i lines"%strings_count
print "Code : %4i lines"%(strings_count - comment_lines_count - empty_lines_count)
print "Comments: %4i lines"%comment_lines_count
print "Empty : %4i lines"%empty_lines_count
print 24*"-"
print "%s %s %s"%( "="*24, "================", "="*24)
print "%-24s %s %24s"%( "="*3, "Spiiin LineCounter", "="*3)
print "%s %s %s"%( "="*24, "================", "="*24)
calc(path)
raw_input() | 34.32967 | 121 | 0.604033 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 0.256082 |
5bc948954ddbc5d9352dfc11d6c0ba8370fd9e74 | 14,307 | py | Python | objectrocket/instances/mongodb.py | objectrocket/python-client | ae739b4946b84cbe7e7f06ead91c6319b8e6b324 | [
"MIT"
] | 5 | 2015-07-15T19:20:33.000Z | 2018-02-13T21:36:49.000Z | objectrocket/instances/mongodb.py | objectrocket/python-client | ae739b4946b84cbe7e7f06ead91c6319b8e6b324 | [
"MIT"
] | 28 | 2015-07-15T17:54:22.000Z | 2020-12-23T13:14:25.000Z | objectrocket/instances/mongodb.py | objectrocket/python-client | ae739b4946b84cbe7e7f06ead91c6319b8e6b324 | [
"MIT"
] | 5 | 2016-04-14T19:01:20.000Z | 2019-06-24T19:50:26.000Z | """MongoDB instance classes and logic."""
import datetime
import json
import logging
import time
import pymongo
import requests
from concurrent import futures
from distutils.version import LooseVersion
from objectrocket import bases
from objectrocket import util
logger = logging.getLogger(__name__)
class MongodbInstance(bases.BaseInstance, bases.Extensible, bases.InstanceAclsInterface):
"""An ObjectRocket MongoDB service instance.
:param dict instance_document: A dictionary representing the instance object, most likey coming
from the ObjectRocket API.
:param objectrocket.instances.Instances instances: An instance of
:py:class:`objectrocket.instances.Instances`.
"""
def __init__(self, instance_document, instances):
super(MongodbInstance, self).__init__(
instance_document=instance_document,
instances=instances
)
# Bind required pseudo private attributes from API response document.
# Smallest plans may not have an SSL/TLS connection string.
self._ssl_connect_string = instance_document.get('ssl_connect_string')
# Register any extensions for this class.
self._register_extensions('objectrocket.instances.mongodb.MongodbInstance')
# on demand initialization of new_relic_stats
# TODO: (kmagge) we can get rid of this when we have newer version of stats
self._new_relic_stats = None
#####################
# Public interface. #
#####################
@util.token_auto_auth
def compaction(self, request_compaction=False):
"""Retrieve a report on, or request compaction for this instance.
:param bool request_compaction: A boolean indicating whether or not to request compaction.
"""
url = self._service_url + 'compaction/'
if request_compaction:
response = requests.post(url, **self._instances._default_request_kwargs)
else:
response = requests.get(url, **self._instances._default_request_kwargs)
return response.json()
def get_authenticated_connection(self, user, passwd, db='admin', ssl=True, **kwargs):
"""Get an authenticated connection to this instance.
:param str user: The username to use for authentication.
:param str passwd: The password to use for authentication.
:param str db: The name of the database to authenticate against. Defaults to ``'Admin'``.
:param bool ssl: Use SSL/TLS if available for this instance. Defaults to ``True``.
:param kwargs: Additional keyword arguments to pass to MongoClient.
:raises: :py:class:`pymongo.errors.OperationFailure` if authentication fails.
"""
# Attempt to establish an authenticated connection.
try:
connection = self.get_connection(ssl=ssl, **kwargs)
connection[db].authenticate(user, passwd)
return connection
# Catch exception here for logging, then just re-raise.
except pymongo.errors.OperationFailure as ex:
logger.exception(ex)
raise
def get_connection(self, ssl=True, **kwargs):
"""Get a live connection to this instance.
:param bool ssl: Use SSL/TLS if available for this instance.
"""
return self._get_connection(ssl=ssl, **kwargs)
@util.token_auto_auth
def shards(self, add_shard=False):
"""Get a list of shards belonging to this instance.
:param bool add_shard: A boolean indicating whether to add a new shard to the specified
instance.
"""
url = self._service_url + 'shards/'
if add_shard:
response = requests.post(url, **self._instances._default_request_kwargs)
else:
response = requests.get(url, **self._instances._default_request_kwargs)
return response.json()
def get_aggregate_database_stats(self):
return requests.get(self._service_url + 'aggregate_database_stats/',
**self._instances._default_request_kwargs).json()['data']
@property
@util.token_auto_auth
def new_relic_stats(self):
"""
Get stats for this instance.
"""
if self._new_relic_stats is None:
# if this is a sharded instance, fetch shard stats in parallel
if self.type == 'mongodb_sharded':
shards = [Shard(self.name, self._service_url + 'shards/',
self._client, shard_doc)
for shard_doc in self.shards().get('data')]
fs = []
with futures.ThreadPoolExecutor(len(shards)) as executor:
for shard in shards:
fs.append(executor.submit(shard.get_shard_stats))
futures.wait(fs, timeout=None, return_when=futures.ALL_COMPLETED)
stats_this_second = self._rollup_shard_stats_to_instance_stats(
{shard.name: future.result() for (shard, future) in zip(shards, fs)})
# power nap
time.sleep(1)
# fetch again
fs = []
with futures.ThreadPoolExecutor(len(shards)) as executor:
for shard in shards:
fs.append(executor.submit(shard.get_shard_stats))
futures.wait(fs, timeout=None, return_when=futures.ALL_COMPLETED)
stats_next_second = self._rollup_shard_stats_to_instance_stats(
{shard.name: future.result() for (shard, future) in zip(shards, fs)})
self._new_relic_stats = self._compile_new_relic_stats(stats_this_second, stats_next_second)
else:
# fetch stats like we did before (by hitting new_relic_stats API resource)
response = requests.get('{}{}'.format(self._url,
'new-relic-stats'),
**self._instances._default_request_kwargs)
self._new_relic_stats = json.loads(response.content).get(
'data') if response.status_code == 200 else {}
return self._new_relic_stats
def _rollup_shard_stats_to_instance_stats(self, shard_stats):
"""
roll up all shard stats to instance level stats
:param shard_stats: dict of {shard_name: shard level stats}
"""
instance_stats = {}
opcounters_per_node = []
# aggregate replication_lag
instance_stats['replication_lag'] = max(map(lambda s: s['replication_lag'], shard_stats.values()))
aggregate_server_statistics = {}
for shard_name, stats in shard_stats.items():
for statistic_key in stats.get('shard_stats'):
if statistic_key != 'connections' and statistic_key in aggregate_server_statistics:
aggregate_server_statistics[statistic_key] = util.sum_values(aggregate_server_statistics[statistic_key],
stats.get('shard_stats')[statistic_key])
else:
aggregate_server_statistics[statistic_key] = stats.get('shard_stats')[statistic_key]
# aggregate per_node_stats into opcounters_per_node
opcounters_per_node.append({shard_name: {member: node_stats['opcounters']
for member, node_stats in stats.get('per_node_stats').items()}})
instance_stats['opcounters_per_node'] = opcounters_per_node
instance_stats['aggregate_server_statistics'] = aggregate_server_statistics
return instance_stats
def _compile_new_relic_stats(self, stats_this_second, stats_next_second):
"""
from instance 'stats_this_second' and instance 'stats_next_second', compute some per
second stats metrics and other aggregated metrics
:param dict stats_this_second:
:param dict stats_next_second:
:return: compiled instance stats that has metrics
{'opcounters_per_node_per_second': {...},
'server_statistics_per_second': {...},
'aggregate_server_statistics': {...},
'replication_lag': 0.0,
'aggregate_database_statistics': {}
}
"""
server_statistics_per_second = {}
opcounters_per_node_per_second = []
for subdoc in ["opcounters", "network"]:
first_doc = stats_this_second['aggregate_server_statistics'][subdoc]
second_doc = stats_next_second['aggregate_server_statistics'][subdoc]
keys = set(first_doc.keys()) | set(second_doc.keys())
server_statistics_per_second[subdoc] = {key: int(second_doc[key]) - int(first_doc[key]) for key in keys if isinstance(first_doc[key], int)}
for node1, node2 in zip(stats_this_second['opcounters_per_node'], stats_next_second['opcounters_per_node']):
node_opcounters_per_second = {}
for repl, members in node2.items():
node_opcounters_per_second[repl] = {}
for member, ops in members.items():
node_opcounters_per_second[repl][member] = {}
for op, count in ops.items():
node_opcounters_per_second[repl][member][op] = count - node1[repl][member][op]
opcounters_per_node_per_second.append(node_opcounters_per_second)
return {'opcounters_per_node_per_second': opcounters_per_node_per_second,
'server_statistics_per_second': server_statistics_per_second,
'aggregate_server_statistics': stats_next_second.get('aggregate_server_statistics'),
'replication_lag': stats_next_second.get('replication_lag'),
'aggregate_database_statistics': self.get_aggregate_database_stats()}
@property
def ssl_connect_string(self):
"""This instance's SSL connection string."""
return self._ssl_connect_string
@util.token_auto_auth
def get_stepdown_window(self):
"""Get information on this instance's stepdown window."""
url = self._service_url + 'stepdown/'
response = requests.get(url, **self._instances._default_request_kwargs)
return response.json()
@util.token_auto_auth
def set_stepdown_window(self, start, end, enabled=True, scheduled=True, weekly=True):
"""Set the stepdown window for this instance.
Date times are assumed to be UTC, so use UTC date times.
:param datetime.datetime start: The datetime which the stepdown window is to open.
:param datetime.datetime end: The datetime which the stepdown window is to close.
:param bool enabled: A boolean indicating whether or not stepdown is to be enabled.
:param bool scheduled: A boolean indicating whether or not to schedule stepdown.
:param bool weekly: A boolean indicating whether or not to schedule compaction weekly.
"""
# Ensure a logical start and endtime is requested.
if not start < end:
raise TypeError('Parameter "start" must occur earlier in time than "end".')
# Ensure specified window is less than a week in length.
week_delta = datetime.timedelta(days=7)
if not ((end - start) <= week_delta):
raise TypeError('Stepdown windows can not be longer than 1 week in length.')
url = self._service_url + 'stepdown/'
data = {
'start': int(start.strftime('%s')),
'end': int(end.strftime('%s')),
'enabled': enabled,
'scheduled': scheduled,
'weekly': weekly,
}
response = requests.post(
url,
data=json.dumps(data),
**self._instances._default_request_kwargs
)
return response.json()
######################
# Private interface. #
######################
def _get_connection(self, ssl, **kwargs):
"""Get a live connection to this instance."""
# Use SSL/TLS if requested and available.
connect_string = self.connect_string
if ssl and self.ssl_connect_string:
connect_string = self.ssl_connect_string
return pymongo.MongoClient(connect_string, **kwargs)
class Shard(bases.Extensible):
"""An ObjectRocket MongoDB instance shard.
:param dict instance_name: Name of the instance the shard belongs to
:param string stats_base_url: Base url to fetch information and stats for this shard.
:param objectrocket.client.Client or_client: handle to talk to OR API
:param dict shard_document: a dictionary representing a mongodb shard
"""
def __init__(self, instance_name, stats_base_url, or_client, shard_document):
self._instance_name = instance_name
self._shardstr = shard_document['shardstr']
self._plan = shard_document['plan']
self._id = shard_document['id']
self._name = shard_document['name']
self._stats_base_url = stats_base_url
self._client = or_client
@property
def instance_name(self):
"""
:return: name of parent instance
"""
return self._instance_name
@property
def shard_string(self):
"""
:return: shard string
"""
return self._shardstr
@property
def plan(self):
"""
:return: Objectrocket plan that the parent instance is on
"""
return self._plan
@property
def name(self):
"""
:return: shard's name
"""
return self._name
@property
def id(self):
"""
:return: shard's unique ID
"""
return self._id
def get_shard_stats(self):
"""
:return: get stats for this mongodb shard
"""
return requests.get(self._stats_url, params={'include_stats': True},
headers={'X-Auth-Token': self._client.auth._token}
).json()['data']['stats']
@property
def _stats_url(self):
"""
:return: Objectrocket API endpoint to send shard stats request to
"""
return '%s%s/' % (self._stats_base_url, self.name)
| 41.230548 | 151 | 0.628783 | 13,998 | 0.978402 | 0 | 0 | 6,008 | 0.419934 | 0 | 0 | 5,466 | 0.382051 |
5bc9e6c226f00a75b8cdcda4f0faf8b1dc74f397 | 429 | py | Python | quickforex/providers/__init__.py | jean-edouard-boulanger/python-quickforex | be95a795719f8bb77eceeb543d2ab963182b3f56 | [
"MIT"
] | null | null | null | quickforex/providers/__init__.py | jean-edouard-boulanger/python-quickforex | be95a795719f8bb77eceeb543d2ab963182b3f56 | [
"MIT"
] | null | null | null | quickforex/providers/__init__.py | jean-edouard-boulanger/python-quickforex | be95a795719f8bb77eceeb543d2ab963182b3f56 | [
"MIT"
] | null | null | null | from quickforex.providers.base import ProviderBase
from quickforex.providers.provider_metadata import (
ProviderMetadata,
SettingFieldDescription,
)
from quickforex.providers.exchangerate_host import ExchangeRateHostProvider
from quickforex.providers.dummy import DummyProvider
__all__ = [
"ProviderBase",
"ExchangeRateHostProvider",
"DummyProvider",
"ProviderMetadata",
"SettingFieldDescription",
]
| 26.8125 | 75 | 0.797203 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 98 | 0.228438 |
5bca412f256ff93ff0c9ab5a15be3bb92d689d74 | 5,154 | py | Python | springleaf/generator.py | OMKE/SpringLeaf | 0ee13f9fe53bb62f5ba144672c781ca66dd8164c | [
"MIT"
] | 9 | 2020-06-15T14:44:21.000Z | 2021-01-26T20:17:25.000Z | springleaf/generator.py | OMKE/SpringLeaf | 0ee13f9fe53bb62f5ba144672c781ca66dd8164c | [
"MIT"
] | 6 | 2020-06-17T19:16:25.000Z | 2020-08-25T04:44:28.000Z | springleaf/generator.py | OMKE/SpringLeaf | 0ee13f9fe53bb62f5ba144672c781ca66dd8164c | [
"MIT"
] | null | null | null | from springleaf.utils.file_handler import FileHandler
from springleaf.utils.template_util import TemplateUtil
from .base_generator import BaseGenerator
class Generator(BaseGenerator):
def __init__(self, selected_file, files_to_create, attributes, structure):
super().__init__()
self.file = selected_file
self.files = files_to_create
self.attributes = attributes
self.structure = structure
"""
prepare_templates_data
@desc:
Instantiates TemplateUtils with corresponding data
@return: list - List of TemplateUtil objects
"""
def prepare_templates_data(self):
# getting root_package so we can append corespondig sub-package of the file which we have in project_structures.json
root_package = FileHandler.get_from_config_file('package')
# Getting type of methods so we can easiy check in the template if it's Standard getters and setters or Lombok
methods = FileHandler.get_from_config_file('methods')
# Getting structure content
structure_content = FileHandler.get_project_structure_content(
self.structure)
controller_type = FileHandler.get_from_config_file('controller-type')
response = FileHandler.get_from_config_file('response')
template_utils = []
for i in range(len(self.files)):
if self.autowire():
if "Controller" == self.files[i]:
template_utils.append(TemplateUtil(self.file + self.files[i], self.files[i],
self.attributes, methods, root_package + "." + structure_content[self.files[i].lower()], controller_type, response, self.file, root_package + "." + structure_content["service"].replace("&", self.file.lower())))
elif "Repository" == self.files[i]:
template_utils.append(TemplateUtil(self.file + self.files[i], self.files[i],
self.attributes, methods, root_package + "." + structure_content[self.files[i].lower()], controller_type, response, self.file, root_package + "." + structure_content["entity"]))
elif "DTO" == self.files[i]:
template_utils.append(TemplateUtil(self.file + self.files[i], self.files[i],
self.attributes, methods, root_package + "." + structure_content[self.files[i].lower()], controller_type, response, self.file, root_package + "." + structure_content["entity"]))
elif "Mapper" == self.files[i]:
template_utils.append(TemplateUtil(self.file + self.files[i], self.files[i],
self.attributes, methods, root_package + "." + structure_content[self.files[i].lower()], controller_type, response, self.file, root_package + "." + structure_content["entity"] + ":" + root_package + "." + structure_content["dto"]))
elif "Service" == self.files[i]:
template_utils.append(TemplateUtil(self.file + self.files[i] + "Impl", self.files[i] + "Impl",
self.attributes, methods, root_package + "." + structure_content[self.files[i].lower()], controller_type, response, self.file, root_package + "." + structure_content["repository"]))
else:
template_utils.append(TemplateUtil(self.file + self.files[i], self.files[i],
self.attributes, methods, root_package + "." + structure_content[self.files[i].lower()], controller_type, response, self.file, None))
else:
if "Service" == self.files[i]:
template_utils.append(TemplateUtil(self.file + self.files[i] + "Impl", self.files[i] + "Impl",
self.attributes, methods, root_package + "." + structure_content[self.files[i].lower()], controller_type, response, self.file, self.file + "Repository"))
elif "Mapper" == self.files[i]:
template_utils.append(TemplateUtil(self.file + self.files[i], self.files[i],
self.attributes, methods, root_package + "." + structure_content[self.files[i].lower()], controller_type, response, self.file, root_package + "." + structure_content["entity"] + ":" + root_package + "." + structure_content["dto"]))
else:
template_utils.append(TemplateUtil(self.file + self.files[i], self.files[i],
self.attributes, methods, root_package + "." + structure_content[self.files[i].lower()], controller_type, response, self.file, None))
return template_utils
"""
autowire
@desc:
If all files are selected, we can do autowiring automatically
@return: boolean - True if all are selected, else False
"""
def autowire(self):
if len(self.files) == 7:
return True
else:
return False
| 64.425 | 286 | 0.596624 | 4,998 | 0.969732 | 0 | 0 | 0 | 0 | 0 | 0 | 842 | 0.163368 |
5bcacc54aaaf4f3456b49e1b1d734428817a5f4b | 3,722 | py | Python | MetadataProvider/metadata_provider.py | PiotrJTomaszewski/InternetRadioReciever | d7d5654b93b846e754057ad72cbc1c01f7b3ee6e | [
"MIT"
] | null | null | null | MetadataProvider/metadata_provider.py | PiotrJTomaszewski/InternetRadioReciever | d7d5654b93b846e754057ad72cbc1c01f7b3ee6e | [
"MIT"
] | null | null | null | MetadataProvider/metadata_provider.py | PiotrJTomaszewski/InternetRadioReciever | d7d5654b93b846e754057ad72cbc1c01f7b3ee6e | [
"MIT"
] | null | null | null | from mpd_connection import MPDConnection
from processing_metadata import process_metadata, join_metadata
from lastfm_api import LastFmMetadataGetter, LastFmApiException
from multiprocessing.managers import SyncManager
from multiprocessing import Event
import time
# Shared objects
shared_song_metadata = {}
shared_mpd_status = {}
shared_playlist = {}
control_panel_new_song_event = Event()
control_panel_new_player_metadata_event = Event()
webapp_new_status_event = Event()
DEBUG_MODE = True
def initializer(self):
server = self.manager.get_server()
server.serve_forever()
class MetadataProvider:
def __init__(self):
# Set shared objects
SyncManager.register('SharedSongMetadata', callable=lambda: shared_song_metadata)
SyncManager.register('SharedMpdStatus', callable=lambda: shared_mpd_status)
SyncManager.register('SharedPlaylist', callable=lambda: shared_playlist)
SyncManager.register('ControlPanelNewSongEvent', callable=lambda: control_panel_new_song_event)
SyncManager.register('ControlPanelNewPlayerMetadataEvent', callable=lambda: control_panel_new_player_metadata_event)
SyncManager.register('WebappNewStatusEvent', callable=lambda: webapp_new_status_event)
self.manager = SyncManager(address=('', 50000), authkey=b'abc')
self.manager.start()
self.song_metadata = self.manager.SharedSongMetadata()
self.mpd_status = self.manager.SharedMpdStatus()
self.current_playlist = self.manager.SharedPlaylist()
self.webapp_new_status_event = self.manager.WebappNewStatusEvent()
self.control_panel_new_song_event = self.manager.ControlPanelNewSongEvent()
self.control_panel_new_player_metadata_event = self.manager.ControlPanelNewPlayerMetadataEvent()
self.lastfm = LastFmMetadataGetter()
self.mpd_connection = MPDConnection(host='localhost', port=6600,
new_player_status_callback=self.new_player_status_callback,
new_song_callback=self.new_song_callback,
playlist_callback=self.playlist_change_callback)
def new_song_callback(self):
metadata_from_mpd = self.mpd_connection.get_current_song_metadata()
processed_metadata = process_metadata(metadata_from_mpd)
track = processed_metadata.get('track')
artist = processed_metadata.get('artist')
# album = processed_metadata.get('album')
try:
album_metadata, track_metadata = \
self.lastfm.get_metadata(artist_name=artist, track_name=track)
except LastFmApiException:
track_metadata = {}
album_metadata = {}
joined_metadata = join_metadata(
metadata_from_mpd=processed_metadata,
track_metadata=track_metadata,
album_metadata=album_metadata
)
self.song_metadata.update(joined_metadata)
time.sleep(3)
self.control_panel_new_song_event.set()
# if DEBUG_MODE:
# import pprint
# pprint.pprint(joined_metadata)
# print(joined_metadata)
def new_player_status_callback(self):
player_status = self.mpd_connection.get_player_status()
self.mpd_status.update(player_status)
self.webapp_new_status_event.set()
self.control_panel_new_player_metadata_event.set()
def playlist_change_callback(self):
playlist = self.mpd_connection.get_playlist()
self.current_playlist = playlist.copy()
self.webapp_new_status_event.set()
if __name__ == '__main__':
prov = MetadataProvider()
while 1:
time.sleep(10000)
| 41.820225 | 124 | 0.711177 | 3,036 | 0.81569 | 0 | 0 | 0 | 0 | 0 | 0 | 345 | 0.092692 |
5bcb08c67e2fb3285c52b357489d036e67867bc6 | 1,546 | py | Python | src/dataset.py | EmanuelSamir/adaptive-learning-qpcbfclf-elm | d913a8b98600a0d844adf156700b3b69c73c694a | [
"MIT"
] | null | null | null | src/dataset.py | EmanuelSamir/adaptive-learning-qpcbfclf-elm | d913a8b98600a0d844adf156700b3b69c73c694a | [
"MIT"
] | null | null | null | src/dataset.py | EmanuelSamir/adaptive-learning-qpcbfclf-elm | d913a8b98600a0d844adf156700b3b69c73c694a | [
"MIT"
] | null | null | null | from collections import deque, namedtuple
import random
class ELMDataset:
def __init__(self, dt, features = ('x'), time_th = 0.5, maxlen = 5):
self.time_th = time_th
self._maxlen = maxlen
self.D_pre = deque()
self.D_post = deque(maxlen = self._maxlen)
self.dt = dt
self.trans = namedtuple('trans',
features)
def reset(self):
self.D_pre = deque()
self.D_post = deque(maxlen = self._maxlen)
def update(self, t, *args):
if t/self.dt < self.time_th:
self.D_pre.append(self.trans(*args))
self.D_post.append(self.trans(*args))
else:
self.D_post.append(self.trans(*args))
def shuffle(self):
random.shuffle(self.D_post)
def get_D(self, t):
if t/self.dt < self.time_th:
return self.trans(*zip(*self.D_pre))
else:
return self.trans(*zip(*self.D_post))
class NNDataset:
def __init__(self, features = ('x'), maxlen = 5):
self.D = deque(maxlen = maxlen)
self.trans = namedtuple('trans',
features) # ('x', 'k', 'dh', 'dh_e')
def reset(self):
self.D = deque()
def update(self, *args):
self.D.append(self.trans(*args))
def shuffle(self):
random.shuffle(self.D)
def get_D(self):
sample = self.trans(*zip(*self.D))
return sample
| 28.62963 | 72 | 0.510996 | 1,465 | 0.947607 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.029754 |
5bcb99c6eaf056e3bdbaec31592016bcd7721ad5 | 118 | py | Python | python/rna-transcription/rna_transcription.py | guillaume-martin/exercism | 7008c31668bb8cc1d386421c665341568339c380 | [
"MIT"
] | null | null | null | python/rna-transcription/rna_transcription.py | guillaume-martin/exercism | 7008c31668bb8cc1d386421c665341568339c380 | [
"MIT"
] | null | null | null | python/rna-transcription/rna_transcription.py | guillaume-martin/exercism | 7008c31668bb8cc1d386421c665341568339c380 | [
"MIT"
] | null | null | null | def to_rna(dna_strand):
pairs = {'G':'C','C':'G','T':'A','A':'U'}
return ''.join(pairs[n] for n in dna_strand) | 39.333333 | 48 | 0.542373 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.220339 |
5bcd4fe94307999ec0e963c569a27c56d4a42eff | 985 | py | Python | common/stat.py | 7workday/TT | 8887a85652c387a50a65e2598abdc833400e56f3 | [
"Apache-2.0"
] | null | null | null | common/stat.py | 7workday/TT | 8887a85652c387a50a65e2598abdc833400e56f3 | [
"Apache-2.0"
] | null | null | null | common/stat.py | 7workday/TT | 8887a85652c387a50a65e2598abdc833400e56f3 | [
"Apache-2.0"
] | null | null | null | '''程序的状态码'''
OK = 0
class LogicErr(Exception):
code = None
data = None
def __init__(self,data=None):
self.data = data or self.__class__.__name__ # 如果 data 为 None, 使用类的名字作为 data 值
def gen_logic_err(name, code):
'''生成一个新的 LogicErr 的子类 (LogicErr 的工厂函数)'''
return type(name, (LogicErr,), {'code': code})
SmsErr = gen_logic_err('SmsErr', 1000) # 短信发送失败
VcodeErr = gen_logic_err('VcodeErr', 1001) # 验证码错误
LoginRequired = gen_logic_err('LoginRequired', 1002) # 用户未登录
UserFormErr = gen_logic_err('UserFormErr', 1003) # 用户表单数据错误
ProfileFormErr = gen_logic_err('ProfileFormErr', 1004) # 用户资料表单错误
RepeatSwipeErr = gen_logic_err('RepeatSwipeErr', 1005) # 重复滑动的错误
AreadyFriends = gen_logic_err('AreadyFriends',1006) # 重复好友
RewindLimited = gen_logic_err('RewindLimited',1007) #当天反悔次数达到上线
RewindTimeout = gen_logic_err('RewindTimeout',1008) #反悔超时
PermRequired = gen_logic_err('PermRequired',1009) #缺少某种权限 | 35.178571 | 85 | 0.690355 | 203 | 0.172766 | 0 | 0 | 0 | 0 | 0 | 0 | 500 | 0.425532 |
5bcdccd8610159a86084244c116c9f3fdbd180fe | 372 | py | Python | pythontutor-ru/02_ifelse/09_bishop_move.py | ornichola/learning-new | e567218d8887805e38b1361715d5e3bd51a6bcaf | [
"Unlicense"
] | 2 | 2019-05-24T20:10:16.000Z | 2020-07-11T06:06:43.000Z | pythontutor-ru/02_ifelse/09_bishop_move.py | ornichola/learning-new | e567218d8887805e38b1361715d5e3bd51a6bcaf | [
"Unlicense"
] | null | null | null | pythontutor-ru/02_ifelse/09_bishop_move.py | ornichola/learning-new | e567218d8887805e38b1361715d5e3bd51a6bcaf | [
"Unlicense"
] | 21 | 2019-03-11T20:25:05.000Z | 2022-02-28T13:53:10.000Z | '''
http://pythontutor.ru/lessons/ifelse/problems/bishop_move/
Шахматный слон ходит по диагонали.
Даны две различные клетки шахматной доски, определите, может ли слон попасть с первой клетки на вторую одним ходом.
'''
a_x = int(input())
a_y = int(input())
b_x = int(input())
b_y = int(input())
if abs(a_x - b_x) == abs(a_y - b_y):
print('YES')
else:
print('NO')
| 23.25 | 115 | 0.688172 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 350 | 0.705645 |
5bcebc4ea06b4aa53df97d80529e592e380fed34 | 3,831 | py | Python | zebROS_ws/src/controller_node/scripts/transform_odom.py | mattwalstra/2019RobotCode | 44f2543876b95428a68dc84820f931571244e49d | [
"BSD-3-Clause"
] | 4 | 2020-11-25T18:30:33.000Z | 2021-11-14T04:50:16.000Z | zebROS_ws/src/controller_node/scripts/transform_odom.py | mattwalstra/2019RobotCode | 44f2543876b95428a68dc84820f931571244e49d | [
"BSD-3-Clause"
] | 19 | 2021-03-20T01:10:04.000Z | 2022-01-17T22:51:05.000Z | zebROS_ws/src/controller_node/scripts/transform_odom.py | mattwalstra/2019RobotCode | 44f2543876b95428a68dc84820f931571244e49d | [
"BSD-3-Clause"
] | 3 | 2019-10-29T01:13:09.000Z | 2022-01-14T21:53:06.000Z | #!/usr/bin/env python
import rospy
import tf
from nav_msgs.msg import Odometry
import numpy as np
import sys
import math
def qv_mult(q, v):
v_unit = None
if v == [0, 0, 0]: v_unit = [0.0, 0.0, 0.0]
else: v_unit = tf.transformations.unit_vector(v)
qp = list(v_unit)
qp.append(0.0)
return tf.transformations.quaternion_multiply(tf.transformations.quaternion_multiply(q, qp), tf.transformations.quaternion_conjugate(q))
class TransformOdometry:
def __init__(self, args):
self.x_pos_shift = float(args[1])
self.y_pos_shift = float(args[2])
self.z_pos_shift = float(args[3])
self.x_ori_shift = float(args[4])
self.y_ori_shift = float(args[5])
self.z_ori_shift = float(args[6])
self.w_ori_shift = float(args[7])
self.odom_sub_topic = args[8]
self.odom_pub_topic = args[9]
self.sub_odom_transform = rospy.Subscriber(self.odom_sub_topic, Odometry, self._transform_callback)
self.pub_odom_transform = rospy.Publisher(self.odom_pub_topic, Odometry, queue_size=1)
def get_node_name(self):
return self.odom_node_name
def _transform_callback(self, odom_msg):
#odom_msg.pose.pose.position.x = odom_msg.pose.pose.position.x * math.cos(self.z_ori_shift) - odom_msg.pose.pose.position.y * math.sin(self.z_ori_shift)
#odom_msg.pose.pose.position.y = odom_msg.pose.pose.position.x * math.sin(self.z_ori_shift) + odom_msg.pose.pose.position.y * math.cos(self.z_ori_shift)
#odom_msg.pose.pose.position.x = odom_msg.pose.pose.position.x * math.cos(self.y_ori_shift) - odom_msg.pose.pose.position.z * math.sin(self.y_ori_shift)
#odom_msg.pose.pose.position.y = odom_msg.pose.pose.position.x * math.sin(self.y_ori_shift) + odom_msg.pose.pose.position.z * math.cos(self.y_ori_shift)
#odom_msg.pose.pose.position.z = 0
#q = [odom_msg.pose.pose.orientation.x,
# odom_msg.pose.pose.orientation.y,
# odom_msg.pose.pose.orientation.z,
# odom_msg.pose.pose.orientation.w]
#new_vec = qv_mult(q, [self.x_pos_shift, self.y_pos_shift, self.z_pos_shift])
#tx = self.x_pos_shift - new_vec[0]
#ty = self.y_pos_shift - new_vec[1]
#tz = self.z_pos_shift - new_vec[2]
#odom_pub_msg = Odometry()
#odom_pub_msg.header.seq = odom_msg.header.seq
#odom_pub_msg.header.stamp = odom_msg.header.stamp
#odom_pub_msg.header.frame_id = odom_msg.header.frame_id
#odom_pub_msg.child_frame_id = ""
#odom_pub_msg.pose.pose.position.x = odom_msg.pose.pose.position.x + tx - self.x_pos_shift
#odom_pub_msg.pose.pose.position.y = odom_msg.pose.pose.position.y + ty - self.y_pos_shift
#odom_pub_msg.pose.pose.position.z = odom_msg.pose.pose.position.z + tz - self.z_pos_shift
#odom_pub_msg.pose.pose.orientation.x = odom_msg.pose.pose.orientation.x
#odom_pub_msg.pose.pose.orientation.y = odom_msg.pose.pose.orientation.y
#odom_pub_msg.pose.pose.orientation.z = odom_msg.pose.pose.orientation.z
#odom_pub_msg.pose.pose.orientation.w = odom_msg.pose.pose.orientation.w
#odom_pub_msg.pose.covariance = odom_msg.pose.covariance
self.pub_odom_transform.publish(odom_msg)
if __name__ == "__main__":
rospy.init_node("odom_transform")
# Parse input args
def parse_args(args):
if len(args) != 10: return False
for arg in args[1:8]:
try: float(arg)
except: return False
return True
args = rospy.myargv(argv=sys.argv)
if not parse_args(args):
rospy.logfatal("Invalid commandline arguments specified")
rospy.signal_shutdown("Invalid commandline arguments specified")
to = TransformOdometry(args)
rospy.spin()
| 42.098901 | 160 | 0.681545 | 2,854 | 0.744975 | 0 | 0 | 0 | 0 | 0 | 0 | 1,953 | 0.509789 |
5bcff95678159476daf98ca938631d4f52d0298a | 380 | py | Python | Aula07/chef007.py | AdryanPablo/Python | d469d394b41f44dbd753bf9a7f7eebaa81096562 | [
"MIT"
] | null | null | null | Aula07/chef007.py | AdryanPablo/Python | d469d394b41f44dbd753bf9a7f7eebaa81096562 | [
"MIT"
] | null | null | null | Aula07/chef007.py | AdryanPablo/Python | d469d394b41f44dbd753bf9a7f7eebaa81096562 | [
"MIT"
] | null | null | null | # Desenvolva um programa que leia as duas notas de um aluno, calcule e mostre a sua média.
nome = str(input("Digite o nome do(a) aluno(a): "))
not1 = float(input("Digite a 1ª nota de {}: ".format(nome)))
not2 = float(input("Digite a 2ª nota de {}: ".format(nome)))
media = (not1 + not2) / 2
print("Já que {} tirou {} e {}, sua média é {:.2f}.".format(nome, not1, not2, media))
| 38 | 90 | 0.642105 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 226 | 0.585492 |
5bd0173040b2f2d05f3a1e8d1fe723aa238abeb6 | 421 | py | Python | Windows10_structed_data/get_report_data.py | naporium/tools_for_windows-main | 4be11a033b5b5ee39c1d4af2e2013e7c5b144772 | [
"MIT"
] | null | null | null | Windows10_structed_data/get_report_data.py | naporium/tools_for_windows-main | 4be11a033b5b5ee39c1d4af2e2013e7c5b144772 | [
"MIT"
] | null | null | null | Windows10_structed_data/get_report_data.py | naporium/tools_for_windows-main | 4be11a033b5b5ee39c1d4af2e2013e7c5b144772 | [
"MIT"
] | null | null | null | from json import load, dumps
REPORT_FILE = "report.txt"
with open(REPORT_FILE, "r") as file:
data = load(file)
# EXAMPLE ON ACESSING THE JSON DATA REPORT
print(dumps(data["INTERFACES_INFO"]["Ethernet adapter Ethernet"], indent=4))
print(dumps(data["INTERFACES_INFO"]["Ethernet adapter Ethernet"]["IPv4 Address"]))
print(dumps(data["INTERFACES_INFO"]["Ethernet adapter Ethernet"]["Physical Address"]))
| 38.272727 | 87 | 0.72209 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 222 | 0.527316 |
5bd0361cf3603ec414df04eeb5b3c2d7c604b9a7 | 188 | py | Python | fastapi/signals/__init__.py | zhangnian/fastapi | 65eb49ec58041fb1212c3e867d19a405d7e40662 | [
"MIT"
] | 33 | 2017-08-14T09:39:12.000Z | 2021-09-11T14:54:28.000Z | fastapi/signals/__init__.py | zhangnian/fastapi | 65eb49ec58041fb1212c3e867d19a405d7e40662 | [
"MIT"
] | null | null | null | fastapi/signals/__init__.py | zhangnian/fastapi | 65eb49ec58041fb1212c3e867d19a405d7e40662 | [
"MIT"
] | 9 | 2017-12-05T11:54:01.000Z | 2020-11-10T08:03:35.000Z | from blinker import signal
from fastapi.signals.signal_handler import *
sig_user = signal('userinfo_modifiy')
def register_signal_handlers():
sig_user.connect(on_userinfo_modify) | 17.090909 | 44 | 0.803191 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.095745 |
5bd092c1901157574c2e9727d580986c03ac6463 | 2,000 | py | Python | kevlar/augment.py | johnsmith2077/kevlar | 3ed06dae62479e89ccd200391728c416d4df8052 | [
"MIT"
] | 24 | 2016-12-07T07:59:09.000Z | 2019-03-11T02:05:36.000Z | kevlar/augment.py | johnsmith2077/kevlar | 3ed06dae62479e89ccd200391728c416d4df8052 | [
"MIT"
] | 325 | 2016-12-07T07:37:17.000Z | 2019-03-12T19:01:40.000Z | kevlar/augment.py | standage/kevlar | 622d1869266550422e91a60119ddc7261eea434a | [
"MIT"
] | 8 | 2017-08-17T01:37:39.000Z | 2019-03-01T16:17:44.000Z | #!/usr/bin/env python
#
# -----------------------------------------------------------------------------
# Copyright (c) 2018 The Regents of the University of California
#
# This file is part of kevlar (http://github.com/dib-lab/kevlar) and is
# licensed under the MIT license: see LICENSE.
# -----------------------------------------------------------------------------
import kevlar
def augment(augseqstream, nakedseqstream, upint=10000):
"""
Augment an unannotated stream of sequences.
- `augseqstream`: a stream of sequences annotated with k-mers of interest
- `nakedseqstream`: a stream of unannotated sequences, to be augmented with
k-mers of interest from `augseqstream`
"""
ksize = None
ikmers = dict()
for n, record in enumerate(augseqstream):
if n > 0 and n % upint == 0:
kevlar.plog('[kevlar::augment] processed', n, 'input reads')
for ikmer in record.annotations:
seq = record.ikmerseq(ikmer)
ikmers[seq] = ikmer.abund
ikmers[kevlar.revcom(seq)] = ikmer.abund
ksize = ikmer.ksize
for record in nakedseqstream:
qual = None
if hasattr(record, 'quality') and record.quality is not None:
qual = record.quality
newrecord = kevlar.sequence.Record(
name=record.name, sequence=record.sequence, quality=qual,
)
numkmers = len(record.sequence) - ksize + 1
for offset in range(numkmers):
kmer = record.sequence[offset:offset+ksize]
if kmer in ikmers:
abund = ikmers[kmer]
newrecord.annotate(kmer, offset, abund)
yield newrecord
def main(args):
augseqs = kevlar.parse_augmented_fastx(kevlar.open(args.augseqs, 'r'))
nakedseqs = kevlar.parse_augmented_fastx(kevlar.open(args.seqs, 'r'))
outstream = kevlar.open(args.out, 'w')
for record in augment(augseqs, nakedseqs):
kevlar.print_augmented_fastx(record, outstream)
| 37.037037 | 79 | 0.592 | 0 | 0 | 1,299 | 0.6495 | 0 | 0 | 0 | 0 | 685 | 0.3425 |
5bd0af57ddc333eef3201ac5b0c5716349dabdee | 861 | py | Python | ML.4/animate.py | jfnavarro/old_python_courses | fb500e8eeae6c5d10bf77e1ff52725627527222a | [
"MIT"
] | 1 | 2018-02-20T03:26:35.000Z | 2018-02-20T03:26:35.000Z | ML.4/animate.py | jfnavarro/BioInfo_ML_courses | fb500e8eeae6c5d10bf77e1ff52725627527222a | [
"MIT"
] | null | null | null | ML.4/animate.py | jfnavarro/BioInfo_ML_courses | fb500e8eeae6c5d10bf77e1ff52725627527222a | [
"MIT"
] | null | null | null | '''
Created on 5.10.2010.
@author: Tin Franovic
'''
from Tkinter import *
from PIL import Image, ImageTk
def do_animation(currentframe):
def do_image():
wrap.create_image(50,50,image=frame[currentframe])
try:
do_image()
except IndexError:
currentframe = 0
do_image()
wrap.update_idletasks()
currentframe = currentframe + 1
root.after(1000, do_animation, currentframe)
def draw(stateSequence):
global wrap,frame,root
root = Tk()
root.title("WalkingRobot")
frame=[]
for i in stateSequence:
fname="step"+str(i+1)+".png"
img=ImageTk.PhotoImage(Image.open(fname))
frame+=[img]
wrap = Canvas(root, width=200, height=120)
wrap.pack()
root.after(10, do_animation, 0)
root.mainloop()
| 23.27027 | 66 | 0.591173 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 78 | 0.090592 |
5bd160163205780016073f8afc2ec9c199abdcf2 | 797 | py | Python | INFOTC_4320/Anagram_BigO/Anagram_BigO.py | j0shbl0ck/Mizzou_Coding_IT | a730e46113b25daf13ad244e38613b138886206d | [
"Unlicense"
] | null | null | null | INFOTC_4320/Anagram_BigO/Anagram_BigO.py | j0shbl0ck/Mizzou_Coding_IT | a730e46113b25daf13ad244e38613b138886206d | [
"Unlicense"
] | null | null | null | INFOTC_4320/Anagram_BigO/Anagram_BigO.py | j0shbl0ck/Mizzou_Coding_IT | a730e46113b25daf13ad244e38613b138886206d | [
"Unlicense"
] | null | null | null | # October 27th, 2021
# INFOTC 4320
# Josh Block
# Challenge: Anagram Alogrithm and Big-O
# References: https://bradfieldcs.com/algos/analysis/an-anagram-detection-example/
print("===Anagram Dector===")
print("This program determines if two words are anagrams of each other\n")
first_word = input("Please enter first word: ")
second_word = input("Please enter second word: ")
##def dector(first_word,second_word):
## if len(first_word) != len(second_word):
## return True
##
## first_word = sorted(first_word)
## second_word = sorted(second_word)
##
## if first_word != second_word:
## return False
##
## return True
## pass
def dector(first_word, second_word):
return sorted(first_word) == sorted(second_word)
print(dector(first_word,second_word))
| 23.441176 | 82 | 0.695107 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 580 | 0.727729 |
5bd168e636c697b8dbaf73f30e8402fe9d300c82 | 1,035 | py | Python | mriutils/utils/tonii.py | kuangmeng/MRIUtils | 3a79e8104071deb0dc17c402ac878f94161d9b4a | [
"MIT"
] | null | null | null | mriutils/utils/tonii.py | kuangmeng/MRIUtils | 3a79e8104071deb0dc17c402ac878f94161d9b4a | [
"MIT"
] | null | null | null | mriutils/utils/tonii.py | kuangmeng/MRIUtils | 3a79e8104071deb0dc17c402ac878f94161d9b4a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import skimage.io as skio
from skimage.transform import resize
import nibabel as nib
class SaveNiiFile():
def __init__(self, data, save_path, new_shape = (10, 256, 256), order = 3):
self.data = data
self.save_path = save_path
self.new_shape = new_shape
def showSingleMRI(self, frame):
skio.imshow(self.data[int(frame)], cmap = 'gray')
skio.show()
def resizeData(self):
if len(self.data) > 0:
self.data = resize(self.data, self.new_shape, order = self.order, mode='edge')
def load_nii(self, img_path):
nimg = nib.load(img_path)
return nimg.get_data(), nimg.affine, nimg.header
def save_nii(self, data = None, save_path = None, affine = None, header = None):
if data == None:
data = self.data
if save_path == None:
save_path = self.save_path
nimg = nib.Nifti1Image(data, affine = affine, header = header)
nimg.to_filename(save_path)
| 28.75 | 90 | 0.607729 | 909 | 0.878261 | 0 | 0 | 0 | 0 | 0 | 0 | 33 | 0.031884 |
5bd366b30abb8fedacf7c21ecc291b16250a6276 | 9,194 | py | Python | shc/utils.py | TransparentHealth/healthcards_python_sample_scripts | 85ffa973c21912399a6abc8b14faf99b5ee86f4d | [
"Apache-2.0"
] | 1 | 2022-03-08T22:29:23.000Z | 2022-03-08T22:29:23.000Z | shc/utils.py | TransparentHealth/healthcards_python_sample_scripts | 85ffa973c21912399a6abc8b14faf99b5ee86f4d | [
"Apache-2.0"
] | null | null | null | shc/utils.py | TransparentHealth/healthcards_python_sample_scripts | 85ffa973c21912399a6abc8b14faf99b5ee86f4d | [
"Apache-2.0"
] | 1 | 2021-10-20T13:28:08.000Z | 2021-10-20T13:28:08.000Z | #!/usr/bin/env python
import zlib
import requests
from jose import jwk as jose_jwk, jws
import json
import qrcode
sample_payload = "eyJ6aXAiOiJERUYiLCJhbGciOiJFUzI1NiIsImtpZCI6IjNLZmRnLVh3UC03Z1h5eXd0VWZVQUR3QnVtRE9QS01ReC1pRUxMMTFXOXMifQ.3VRNT9swGP4rkbmmTZxQSnIaZZPWCRBSy3aYOLiOSbw5duSPlq7qf99rp4FOg952WW62Xz_v8-E3O8SNQSVqrO1MmSQUF2NKdGUS2HdMoxhxYlGJJwVO82lxnsZoTVG5Qx-okpY9w9n3l9ubzWa8ycdK10mW4suEalYxaTkRJllj9Bgju-2Yv_GVaf7EyUqw65ca6DUAmZZo2zAibNPTOaNqzStcnKzpFyO_OFnH29ZJ_otYrqQn9Upz4VY_GLVe31PDNbA0vqZE5-N0jAHU786crATzNZoZ5TRly6AKHQ4GlYgqIQDNI8QIGugtSAdkJ8SDFlAw3C9TKBgWbwDfA1W471GeLZM9J0ByAWUQWnPbuNWYqjZZaiJNRzRc-hyUJ5dpOrrIR_nIamcsWPLz7B7w1kQ4dq2q0AWjPbghSct6nqTlAiijKwk0tAkyar5mvjn6ohpJbEP85ozXtWDGKhnN53P06GFqJit4PyVqSfBkxSGFj8T6TriY4FGKR5nXTaoK5JrQslPGEnHgg7P8fAIFVLneO_SwAIb7ffymh_i0h_Pj0GMEfawzIaS2E8yyKrhBKZcHP3ZwVHFZB2JmayxrD2aD142Yhmfu30NieJXQ9XOgGphn6dQTjVF3CC7QeWKQh-d2nDsUKUqdDkfeniVve4gsWJR6WZ3mkNh2cdAGEbIYCUV7Me-B5x5cKHvn2lVI4kbZ6CwNH0T9jo3Zf2ljVvwTG6fv2piftvFm6DrMG7qaXUf3DdEtodvjqdghAV76icsKfJlFwN420S3hMlrY6O6T_4H9MTaTfJpdeBO59TNzSypuXsPyFd_mf03V3s_Vfv8b.6RJ6ZFwPRqsVdDXsEUaDhkRo0u3nKC1cSCgN7YyPM1tteqPziRNbEkMdvURrkZ3baECxqmDybQvpGKVmEorTNw"
sample_numeric_encoded_payload = "5676290952432060346029243740446031222959532654603460292540772804336028702864716745222809286133314564376531415906402203064504590856435503414245413640370636654171372412363803043756220467374075323239254334433260573601064137333912707426350769625364643945753638652852454535422237213876065244343160343853740855723835636810685712126967072534581208054242090543775073110024063411383336384168693404326438364025053330552640380527553266755975446326292974413411056375657045663941260836750066626744127412650663127253774252117654553004036442077072245263447552396237296363611221586172000544735026415257102476406874090854520923402064454566057720605333353934523368773871546530776725763450342565270512452950667144696836651240677707414450263141560604352333532003736845240800330361202740101109546920397733456645083937637609203027360726634458682836233328113628267258713820556229113823256320740622123930215842537423572004420710042656314532122903217620424036426535537233424468614545526029333777375400597640290855673469692837506528526454704235317710211074046236075568056803204261355358593854710965683963206060613074620371206276526908647361650966596729532435110866774371422326305965330806350309262568296071073576416838572162753826256111390939696044072526303708654339526630082969367063352624652758581035115720282541316556345038742028531057577664595060035950356103263224575274772852380524117676306959213045542735064574412725452105296452767569230552230407054459645772060333605629433612433458266759650955363961412506222210365642303659005505652370403040685523625756656041735607587042293709424506646233590554552026245328442240411032560021087508543027736505634128076253450922743327033912616606455705636141737439260957730567605771732564092369322610685243405706235524716655736168555859204110096054703057296325743307050338065932636944093409395007271232395239572967555621340812393377387667724370357625555064570306410670504157731153010937290945257435376870415523437024405223596237660372066530220454382258331044763532047171566835776037335324623255734037696245065352242275686423765336736726304164246669393374"
SMALLEST_B64_CHAR_CODE = ord('-')
SMART_HEALTH_CARD_PREFIX = 'shc:/'
# https://stackoverflow.com/a/1089787
def deflate(data, compresslevel=9):
compress = zlib.compressobj(
compresslevel, # level: 0-9
zlib.DEFLATED, # method: must be DEFLATED
-zlib.MAX_WBITS, # window size in bits:
# -15..-8: negate, suppress header
# 8..15: normal
# 16..30: subtract 16, gzip header
zlib.DEF_MEM_LEVEL, # mem level: 1..8/9
0 # strategy:
# 0 = Z_DEFAULT_STRATEGY
# 1 = Z_FILTERED
# 2 = Z_HUFFMAN_ONLY
# 3 = Z_RLE
# 4 = Z_FIXED
)
deflated = compress.compress(data)
deflated += compress.flush()
return deflated
def inflate(data):
# needed to add `-zlib.MAX_WBITS` here due to
# zlib.error: Error -3 while decompressing data: incorrect header check
decompress = zlib.decompressobj(
-zlib.MAX_WBITS # see above
)
inflated = decompress.decompress(data)
inflated += decompress.flush()
return inflated
def resolve_key_from_issuer():
def resolve(iss, kid, algorithm):
r = requests.get(f'{iss}/.well-known/jwks.json')
r.raise_for_status()
jwks = r.json()
for key in jwks['keys']:
if kid == key.get('kid'):
return key
# TODO - the following line causes an exception to occur during verficiation
# There's a fix on master for this, but for now, it does not work
#
# File "/usr/local/lib/python3.7/site-packages/jose/jws.py", line 233, in _get_keys
# if 'keys' in key:
# TypeError: argument of type 'CryptographyECKey' is not iterable
# return jwk.construct(key, algorithm)
raise Exception(f'Key with kid = {kid} not found')
return resolve
def resolve_key_from_file(jwks_filename):
def resolve(iss, kid, algorithm):
with open(jwks_filename, 'r', newline='') as jwks_file:
jwks = json.load(jwks_file)
for key in jwks['keys']:
if kid == key.get('kid'):
return key
# TODO - the following line causes an exception to occur during verficiation
# There's a fix on master for this, but for now, it does not work
#
# File "/usr/local/lib/python3.7/site-packages/jose/jws.py", line 233, in _get_keys
# if 'keys' in key:
# TypeError: argument of type 'CryptographyECKey' is not iterable
# return jwk.construct(key, algorithm)
raise Exception(f'Key with kid = {kid} not found')
return resolve
def load_private_key_from_file(jwks_filename, use, algorithm):
with open(jwks_filename, 'r', newline='') as jwks_file:
jwks = json.load(jwks_file)
for key in jwks['keys']:
if algorithm == key.get('alg') and use == key.get('use'):
return (key.get('kid'), key)
# TODO - the following line causes an exception to occur during verficiation
# There's a fix on master for this, but for now, it does not work
#
# File "/usr/local/lib/python3.7/site-packages/jose/jws.py", line 233, in _get_keys
# if 'keys' in key:
# TypeError: argument of type 'CryptographyECKey' is not iterable
# return jwk.construct(key, algorithm)
# return (key.get('kid'), jose_jwk.construct(key, algorithm))
raise Exception(f'Key with use = {use} algorithm = {algorithm} not found')
def _decode_vc(jws_raw, key_resolver):
# before we can verify the vc, we first need to resolve the key
# the key ID is stored in the header
# Per the health cards IG,
## "Issuers SHALL publish keys as JSON Web Key Sets (see RFC7517), available at <<iss value from Signed JWT>> + .well-known/jwks.json"
# therefore, we need decode the claims to get the iss value in order to resolve the key
# The claims are compressed via Deflate, so decompress the data
# then, extract the iss claim to get access to the base URL, use that to resolve key with id = kid
# then, verify the jws
unverified_headers = jws.get_unverified_headers(jws_raw)
# we expect data to be zipped, so deflate the data
if unverified_headers.get('zip') == 'DEF':
unverfied_claims_zip = jws.get_unverified_claims(jws_raw)
raw_data = inflate(unverfied_claims_zip)
data = json.loads(raw_data)
else:
raise Exception('Expecting payload to be compressed')
iss = data['iss']
kid = unverified_headers['kid']
key = key_resolver(iss, kid, 'ES256')
verified_jws = jws.verify(jws_raw, key, algorithms='ES256')
payload = json.loads(inflate(verified_jws))
return payload
def decode_vc(jws_raw):
resolver = resolve_key_from_issuer()
return _decode_vc(jws_raw, resolver)
def decode_vc_from_local_issuer(jws_raw, jwks_file):
resolver = resolve_key_from_file(jwks_file)
return _decode_vc(jws_raw, resolver)
def encode_vc(payload, private_signing_key, kid):
payload_bytes = json.dumps(payload, separators=(',', ':')).encode('utf-8')
compressed_payload = deflate(payload_bytes)
headers = {"kid": kid, 'zip': 'DEF'}
return jws.sign(compressed_payload, private_signing_key, headers=headers, algorithm='ES256')
def encode_char_to_numeric(ch):
numeric_value = ord(ch) - SMALLEST_B64_CHAR_CODE
return '%02d' % (numeric_value)
def encode_to_numeric(payload):
return ''.join([encode_char_to_numeric(ch) for ch in payload])
def create_qr_code(numeric_encoded_payload):
qr = qrcode.QRCode()
qr.add_data(SMART_HEALTH_CARD_PREFIX)
qr.add_data(numeric_encoded_payload)
return qr.make_image(fill_color="black", back_color="white")
| 52.83908 | 2,103 | 0.758647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,612 | 0.610398 |
5bd463296b1effec64cbf25d42163fc58e8ce63e | 259 | py | Python | setup.py | kevinmooreiii/moldriver | 65a0a9cef8971737076f81720a61aa5b607333d2 | [
"Apache-2.0"
] | null | null | null | setup.py | kevinmooreiii/moldriver | 65a0a9cef8971737076f81720a61aa5b607333d2 | [
"Apache-2.0"
] | null | null | null | setup.py | kevinmooreiii/moldriver | 65a0a9cef8971737076f81720a61aa5b607333d2 | [
"Apache-2.0"
] | null | null | null | """ Install moldriver
"""
from distutils.core import setup
setup(name="moldr",
version="0.1.1",
packages=["moldr",
"autofile",
"autofile.info",
"autofile.file",
"autofile.system"])
| 19.923077 | 35 | 0.490347 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 103 | 0.397683 |
5bd629978047909abad4e7ab77bfc34163f7b633 | 3,388 | py | Python | satchmo/apps/payment/modules/dummy/processor.py | predatell/satchmo | 6ced1f845aadec240c7e433c3cbf4caca96e0d92 | [
"BSD-3-Clause"
] | 1 | 2019-10-08T16:19:59.000Z | 2019-10-08T16:19:59.000Z | satchmo/apps/payment/modules/dummy/processor.py | predatell/satchmo | 6ced1f845aadec240c7e433c3cbf4caca96e0d92 | [
"BSD-3-Clause"
] | null | null | null | satchmo/apps/payment/modules/dummy/processor.py | predatell/satchmo | 6ced1f845aadec240c7e433c3cbf4caca96e0d92 | [
"BSD-3-Clause"
] | null | null | null | """
This is a stub and used as the default processor.
It doesn't do anything but it can be used to build out another
interface.
See the authorizenet module for the reference implementation
"""
from django.utils.translation import ugettext_lazy as _
from payment.modules.base import BasePaymentProcessor, ProcessorResult
class PaymentProcessor(BasePaymentProcessor):
def __init__(self, settings):
super(PaymentProcessor, self).__init__('dummy', settings)
def authorize_payment(self, order=None, testing=False, amount=None):
"""
Make an authorization for an order. This payment will then be captured when the order
is set marked 'shipped'.
"""
if order == None:
order = self.order
if amount is None:
amount = order.balance
cc = order.credit_card
if cc:
ccn = cc.decryptedCC
ccv = cc.ccv
if ccn == '4222222222222':
if ccv == '222':
self.log_extra('Bad CCV forced')
payment = self.record_failure(amount=amount, transaction_id='2',
reason_code='2', details='CCV error forced')
return ProcessorResult(self.key, False, _('Bad CCV - order declined'), payment)
else:
self.log_extra('Setting a bad credit card number to force an error')
payment = self.record_failure(amount=amount, transaction_id='2',
reason_code='2', details='Credit card number error forced')
return ProcessorResult(self.key, False, _('Bad credit card number - order declined'), payment)
orderauth = self.record_authorization(amount=amount, reason_code="0")
return ProcessorResult(self.key, True, _('Success'), orderauth)
def can_authorize(self):
return True
def capture_payment(self, testing=False, amount=None):
"""
Process the transaction and return a ProcessorResult:
Example:
>>> from livesettings.functions import config_get_group
>>> settings = config_get_group('PAYMENT_DUMMY')
>>> from payment.modules.dummy.processor import PaymentProcessor
>>> processor = PaymentProcessor(settings)
# If using a normal payment module, data should be an Order object.
>>> data = {}
>>> processor.prepare_data(data)
>>> processor.process()
ProcessorResult: DUMMY [Success] Success
"""
orderpayment = self.record_payment(amount=amount, reason_code="0")
return ProcessorResult(self.key, True, _('Success'), orderpayment)
def capture_authorized_payment(self, authorization, amount=None):
"""
Given a prior authorization, capture remaining amount not yet captured.
"""
if amount is None:
amount = authorization.remaining()
orderpayment = self.record_payment(amount=amount, reason_code="0",
transaction_id="dummy", authorization=authorization)
return ProcessorResult(self.key, True, _('Success'), orderpayment)
def release_authorized_payment(self, order=None, auth=None, testing=False):
"""Release a previously authorized payment."""
auth.complete = True
auth.save()
return ProcessorResult(self.key, True, _('Success'))
| 39.395349 | 114 | 0.635478 | 3,065 | 0.904664 | 0 | 0 | 0 | 0 | 0 | 0 | 1,314 | 0.387839 |
5bd77e4c61c0d5d7f63689e483c2e6eeec0b5053 | 310 | py | Python | kitsune/community/urls.py | yfdyh000/kitsune | 9a50b03b715bbabe0543b45d2146875995a6d1e4 | [
"BSD-3-Clause"
] | null | null | null | kitsune/community/urls.py | yfdyh000/kitsune | 9a50b03b715bbabe0543b45d2146875995a6d1e4 | [
"BSD-3-Clause"
] | null | null | null | kitsune/community/urls.py | yfdyh000/kitsune | 9a50b03b715bbabe0543b45d2146875995a6d1e4 | [
"BSD-3-Clause"
] | null | null | null | from django.conf.urls import patterns, url
urlpatterns = patterns(
'kitsune.community.views',
url(r'^/contributor_results$', 'contributor_results', name='community.contributor_results'),
url(r'^/view_all$', 'view_all', name='community.view_all'),
url(r'^$', 'home', name='community.home'),
)
| 31 | 96 | 0.693548 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 173 | 0.558065 |
5bd81eabed49d75dd2407602a6036e1582495777 | 4,650 | py | Python | sw/groundstation/tools/imu-raw-logger.py | nzjrs/wasp | b763309af59e7784811baa6dd80e17dba1b27d81 | [
"MIT"
] | 2 | 2021-07-11T13:47:17.000Z | 2021-11-08T11:21:51.000Z | sw/groundstation/tools/imu-raw-logger.py | nzjrs/wasp | b763309af59e7784811baa6dd80e17dba1b27d81 | [
"MIT"
] | null | null | null | sw/groundstation/tools/imu-raw-logger.py | nzjrs/wasp | b763309af59e7784811baa6dd80e17dba1b27d81 | [
"MIT"
] | 2 | 2015-10-03T06:24:07.000Z | 2016-01-21T11:36:20.000Z | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import time
import os.path
import optparse
import gobject
import wasp
import wasp.transport as transport
import wasp.communication as communication
import wasp.messages as messages
import calibration
import calibration.utils
FREQ = 25.0 #Hz
STEP_DELAY = 5 #Seconds
class IMUCalibrator:
def __init__(self, sensor, logfile, messages):
pass
class IMULogger:
STATES = \
(["WAIT"] * 2 ) +\
["%s %s" % (o,d) for o in ("ROLL", "PITCH") for d in (90, 180, 270, 360)] +\
(["WAIT"] * 2 )
def __init__(self, port, messages_file, sensor_name, log_file):
raw_imu_message_name = "IMU_%s_RAW" % sensor_name
self.sensor_name = sensor_name
self.m = messages.MessagesFile(path=messages_file, debug=False)
self.m.parse()
self.msg = self.m.get_message_by_name(raw_imu_message_name)
if not self.msg:
raise SystemExit("Could Not Find Message %s" % raw_imu_message_name)
self.loop = gobject.MainLoop()
self.state = 0
self.measurements = []
self.log = None
if log_file:
if os.path.exists(log_file):
self.measurements = calibration.utils.read_log(log_file, sensor_name)
self.capture = False
else:
self.log = open(log_file, "w")
self.capture = True
else:
self.capture = True
if self.capture:
self.s = communication.SerialCommunication(
transport.Transport(check_crc=True, debug=False),
self.m,
wasp.transport.TransportHeaderFooter(acid=wasp.ACID_GROUNDSTATION))
self.s.configure_connection(serial_port=port,serial_speed=57600,serial_timeout=1)
self.s.connect("message-received", self._on_message_received)
self.s.connect("uav-connected", self._on_uav_connected)
def _on_message_received(self, comm, msg, header, payload):
if msg == self.msg:
x,y,z = msg.unpack_values(payload)
if self.log:
self.log.write("%s %d %d %d\n" % (msg.name,x,y,z))
self.measurements.append( (float(x),float(y),float(z)) )
print ".",
def _request_message(self):
self.s.send_message(
self.m.get_message_by_name("REQUEST_TELEMETRY"),
(self.msg.id, FREQ))
return False
def _on_uav_connected(self, comm, connected):
if connected:
gobject.timeout_add(1000, self._request_message)
else:
self.loop.quit()
raise SystemExit("Not Connected")
def _rotate_state_machine(self):
try:
self.state += 1
print self.STATES[self.state]
return True
except IndexError:
self.loop.quit()
return False
def collect(self):
if self.capture:
self.s.connect_to_uav()
gobject.timeout_add(STEP_DELAY*1000, self._rotate_state_machine)
self.loop.run()
if self.log:
self.log.close()
self.measurements = calibration.utils.read_list(self.measurements)
return self.measurements
if __name__ == "__main__":
thisdir = os.path.abspath(os.path.dirname(__file__))
default_messages = os.path.join(thisdir, "..", "..", "onboard", "config", "messages.xml")
parser = optparse.OptionParser()
parser.add_option("-m", "--messages",
default=default_messages,
help="messages xml file", metavar="FILE")
parser.add_option("-p", "--port",
default="/dev/ttyUSB0",
help="Serial port")
parser.add_option("-l", "--log-file",
help="log file for analysis. "\
"If it exists it will be used. "\
"If it does not exist it will be created. "\
"If not supplied the data will be captured directly "\
"from the UAV for analysis", metavar="FILE")
parser.add_option("-s", "--sensor", choices=calibration.SENSORS,
help="sensor to calibrate",
metavar="[%s]" % ",".join(calibration.SENSORS))
options, args = parser.parse_args()
if not options.sensor:
parser.error("must supply sensor")
logger = IMULogger(options.port, options.messages, options.sensor, options.log_file)
measurements = logger.collect()
calibration.calibrate_sensor(options.sensor, measurements, True)
| 34.191176 | 93 | 0.585161 | 2,978 | 0.64043 | 0 | 0 | 0 | 0 | 0 | 0 | 614 | 0.132043 |
5bda02a714241bd79791ee140a659d57825d9aa5 | 6,434 | py | Python | rainbowtables/directories.py | JustBennnn/rainbowtables | 5ec800ae498ee8d1e1e4c0d016ca36f5a207462c | [
"MIT"
] | 3 | 2021-09-08T13:17:43.000Z | 2021-09-14T05:45:56.000Z | rainbowtables/directories.py | JustBennnn/rainbowtables | 5ec800ae498ee8d1e1e4c0d016ca36f5a207462c | [
"MIT"
] | null | null | null | rainbowtables/directories.py | JustBennnn/rainbowtables | 5ec800ae498ee8d1e1e4c0d016ca36f5a207462c | [
"MIT"
] | 1 | 2021-09-13T11:45:22.000Z | 2021-09-13T11:45:22.000Z | """Manage the directories
This includes options to store the hashtable in a file or keep
it temporarily, where it will be returned from the function...
"""
import json
import os
import platform
from typing import Tuple, Union
from .errors import FilenameError, PathError, SystemNotSupported
__all__ = [
"set_directory",
"set_filename",
"get_directory",
"get_filename",
"get_full_path",
"create_directory",
"create_file"
]
directory = os.getcwd()
filename = "hash_table.json"
def check_for_invalid_characters(string, os, path_input=False) -> Tuple[bool, str]:
"""Give an error if any illegal characters are detected based on the os.
Forward and back slashes will be ignored since they are used to define each section in a path.
"""
if os == "Windows":
if path_input == True:
illegal_characters = ["<", ">", ":", '"', "|", "?", "*"]
elif path_input == False:
illegal_characters = ["<", ">", ":", '"', "/", "\\", "|", "?", "*"]
elif os == "Linux":
if path_input == False:
illegal_characters = ["/"]
else:
return (False, "")
for character in illegal_characters:
if character in string:
if character == ":":
for x in range(0, len(string)):
if string[x] == ":" and x != 1:
return (True, ":")
else:
return (True, character)
return (False, "")
def check_for_reserved_names(string, os) -> Tuple[bool, str]:
"""Give an error if any reserved names are used in the file path."""
if os == "Windows":
illegal_names = [
"CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3",
"COM4", "COM5", "COM6", "COM7", "COM8", "COM9", "LPT1",
"LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9"
]
elif os == "Linux":
"""Linux has no reserved names."""
return (False, "")
else:
raise SystemNotSupported(f"Unsupported OS {os}.")
"""An exception should only be given if an illegal name is found on its own with no other characters."""
for reserved_name in illegal_names:
if string.upper() == reserved_name:
return (True, reserved_name)
if reserved_name in string.upper():
reserved_name_index = string.upper().find(reserved_name)
if string[reserved_name_index-1] == "\\" or string[reserved_name_index-1] == "/":
if reserved_name_index+(len(reserved_name)-1) == len(string)-1:
return (True, reserved_name)
if string[reserved_name_index+len(reserved_name)] == "\\" or string[reserved_name_index+len(reserved_name)] == "/":
return (True, reserved_name)
return (False, "")
def set_directory(file_directory, full_path=False) -> None:
"""Sets the directory of where the file will be stored. If 'full_path' is true,
set the directory as the full path.
If no directory is set, the file will be stored in the program location.
"""
global directory
invalid_characters = check_for_invalid_characters(file_directory, platform.system(), path_input=True)
if invalid_characters[0] == True:
raise PathError(f"Invalid character ({invalid_characters[1]}) found in file path.")
reserved_names = check_for_reserved_names(file_directory, platform.system())
if reserved_names[0] == True:
raise PathError(f"OS reserved name ({reserved_names[1]}) found in file path.")
if full_path == False:
directory = directory + file_directory
elif full_path == True:
directory = file_directory
def set_filename(file_name) -> None:
"""Sets the filename.
If no filename is set, it will remain the default name..
"""
global filename
file_name = file_name.strip()
if file_name == "":
raise FilenameError("The filename cannot be set as an empty value.")
invalid_characters = check_for_invalid_characters(file_name, platform.system())
if invalid_characters[0] == True:
raise FilenameError(f"Invalid character ({invalid_characters[1]}) found in filename.")
reserved_names = check_for_reserved_names(file_name, platform.system())
if reserved_names[0] == True:
raise FilenameError(f"OS reserved name ({reserved_names[1]}) found in filename.")
filename = file_name + ".json"
def get_directory() -> str:
"""Returns the current set directory."""
return directory
def get_filename(file_extension=False) -> str:
"""Returns the current set filename."""
if file_extension == False:
return filename[0:filename.find(".json")]
else:
return filename
def get_full_path(file_extension=True) -> str:
"""Returns the current set directory and filename."""
return get_directory() + "/" + get_filename(file_extension=file_extension)
def create_directory() -> None:
"""Creates the set/default directory."""
slash_indexes = []
for x in range(0, len(directory)):
if directory[x] == "/" or directory[x] == "\\":
slash_indexes.append(x)
directories_to_create = []
for x in range(0, len(slash_indexes)):
if x == len(slash_indexes)-1:
if os.path.isdir(directory[0:len(directory)]):
existing_directory = directory[0:len(directory)]
else:
directories_to_create.append(directory[0:len(directory)])
else:
if os.path.isdir(directory[0:slash_indexes[x+1]]):
existing_directory = directory[0:slash_indexes[x+1]]
else:
directories_to_create.append(directory[0:slash_indexes[x+1]])
for _dir in directories_to_create:
os.mkdir(_dir)
def create_file(overwrite_existing=False) -> Union[bool, None]:
"""Creates the file inside of the set/default directory.
Only proceed if the set/default directory exists.
"""
if os.path.isfile(get_full_path()):
if overwrite_existing == False:
return False
if os.path.isdir(directory):
file_path = get_full_path()
create_file = open(file_path, "w")
create_file.write(json.dumps([{}, {}, {}, []]))
create_file.close()
else:
raise PathError("The directory must be created (.create_directory()) before the file is.") | 35.351648 | 131 | 0.619988 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,930 | 0.299969 |
5be00ec4042c62d09e71fd43458751614ef646dd | 1,182 | py | Python | emendation_box/serializers.py | fga-eps-mds/2017.2-SiGI-Op_API | 4532019c15414fd17e06bb3aa78501886e00da1d | [
"BSD-3-Clause"
] | 6 | 2017-08-24T13:18:21.000Z | 2017-10-03T18:06:13.000Z | emendation_box/serializers.py | fga-gpp-mds/2017.2-Grupo9 | 4532019c15414fd17e06bb3aa78501886e00da1d | [
"BSD-3-Clause"
] | 173 | 2017-08-31T15:29:01.000Z | 2017-12-14T13:40:13.000Z | emendation_box/serializers.py | fga-gpp-mds/2017.2-SiGI-Op_API | 4532019c15414fd17e06bb3aa78501886e00da1d | [
"BSD-3-Clause"
] | 2 | 2018-11-19T10:33:00.000Z | 2019-06-19T22:35:43.000Z | from rest_framework import serializers
from .models import EmendationBoxStructure, EmendationBoxType, EmendationBox
class EmendationBoxTypeSerializer(serializers.ModelSerializer):
class Meta:
model = EmendationBoxType
fields = [
'id',
'description',
]
class EmendationBoxStructureSerializer(serializers.ModelSerializer):
class Meta:
model = EmendationBoxStructure
fields = [
'id',
'description',
]
class EmendationBoxSerializer(serializers.ModelSerializer):
class Meta:
model = EmendationBox
emendation_type = EmendationBoxTypeSerializer(many=True,
read_only=True)
emendation_structure = EmendationBoxStructureSerializer(many=True,
read_only=True)
fields = [
'id',
'lattitude',
'longitude',
'designNumber',
'access_box',
'creation_date',
'extinction_date',
'emendation_type',
'emendation_structure',
]
| 28.829268 | 79 | 0.555838 | 1,057 | 0.894247 | 0 | 0 | 0 | 0 | 0 | 0 | 157 | 0.132826 |
5be0937af64c23aae060b769c001fa1fcce73356 | 14,989 | py | Python | autohandshake/src/HandshakeBrowser.py | cedwards036/autohandshake | 7f57b242a612b0f0aad634bc111a3db3050c6597 | [
"MIT"
] | 3 | 2018-05-18T16:15:32.000Z | 2019-08-01T23:06:44.000Z | autohandshake/src/HandshakeBrowser.py | cedwards036/autohandshake | 7f57b242a612b0f0aad634bc111a3db3050c6597 | [
"MIT"
] | null | null | null | autohandshake/src/HandshakeBrowser.py | cedwards036/autohandshake | 7f57b242a612b0f0aad634bc111a3db3050c6597 | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from enum import Enum
import re
import os
from autohandshake.src.exceptions import (
InvalidURLError, NoSuchElementError, WrongPageForMethodError,
InsufficientPermissionsError, InvalidUserTypeError
)
from autohandshake.src.constants import MAX_WAIT_TIME
from autohandshake.src.constants import BASE_URL
class UserType(Enum):
"""
The possible user types in Handshake
* Employer - a Handshake employer account
* Staff - a career services staff/admin account
* Student - a student or alumni account
"""
EMPLOYER = 'Employers'
STAFF = 'Career Services'
STUDENT = 'Students'
class HandshakeBrowser:
"""
An automated browser for navigating Handshake.
Since a logged-in instance of this class is returned by HandshakeSession's
__enter__ method, it does not usually need to be manually instantiated. Additionally,
for most use cases, the user only needs to pass a HandshakeBrowser object to
a Page object, then let the Page's methods do the heavy-lifting.
For example, you almost never need to write:
::
browser = HandshakeBrowser()
The vast majority of use cases look something like:
::
with HandshakeSession(school_url, email) as browser:
some_page = SomePage(browser)
some_page.do_something()
If you need to specify a custom max_wait_time, that can be done through the
HandshakeSession object:
::
# this
with HandshakeSession(school_url, email, max_wait_time = 60) as browser:
some_page = SomePage(browser)
# not this
browser = HandshakeBrowser(max_wait_time = 60)
"""
def __init__(self, max_wait_time: int = MAX_WAIT_TIME, chromedriver_path: str = None, download_dir: str = None):
"""
:param max_wait_time: the maximum time (in seconds) to wait for an element
to load before throwing a timeout error
:type max_wait_time: int
:param chromedriver_path: the filepath to chromedriver.exe. If not specified, the package's own driver will be used
:type chromedriver_path: str
:param download_dir: the directory in which to download any files. If not
specified, defaults to system's default download location.
:type download_dir: str
"""
options = webdriver.ChromeOptions()
options.add_argument('--window-size=1920,1080')
if download_dir:
options.add_experimental_option('prefs', {'download.default_directory': download_dir})
dirname = os.path.dirname(__file__)
if not chromedriver_path:
chromedriver_path = os.path.join(dirname, '../chromedriver.exe')
self._browser = webdriver.Chrome(executable_path=chromedriver_path,
options=options)
self.max_wait_time = max_wait_time
self._user_type = None
def get(self, url: str):
"""Go to the web page specified by the given Handshake url.
:param url: the url to visit. Must be of the form
"https://[...].joinhandshake.com[/...]"
:type url: str
"""
self._validate_url_str(url)
self._browser.get(url)
self._validate_page_exists()
self._validate_permissions()
def quit(self):
"""Close the browser"""
self._browser.quit()
def element_exists_by_xpath(self, xpath: str) -> bool:
"""
Determine whether or not an element with the given xpath exists in the page.
:param xpath: the xpath of the element to search for
:type xpath: str
:return: True if the element exists, false otherwise
:rtype: bool
"""
try:
self._browser.find_element_by_xpath(xpath)
return True
except NoSuchElementException:
return False
def wait_until_element_exists_by_xpath(self, xpath: str):
"""
Wait until an element with the given xpath exists on the page.
:param xpath: the xpath of the element to wait for
:type xpath: str
"""
try:
WebDriverWait(self._browser, self.max_wait_time).until(
EC.visibility_of_element_located((By.XPATH, xpath)))
except TimeoutException:
raise TimeoutError(f"Element with xpath {xpath} did not appear in time")
def wait_until_element_does_not_exist_by_xpath(self, xpath: str):
"""
Wait until an element with the given xpath exists on the page.
:param xpath: the xpath of the element to wait for
:type xpath: str
"""
try:
WebDriverWait(self._browser, self.max_wait_time).until(
EC.invisibility_of_element_located((By.XPATH, xpath)))
except TimeoutException:
raise TimeoutError(f"Element with xpath {xpath} did not disappear in tme")
def wait_until_element_is_clickable_by_xpath(self, xpath: str):
"""
Wait until an element with the given xpath is clickable.
:param xpath: the xpath of the element to wait for
:type xpath: str
"""
try:
WebDriverWait(self._browser, self.max_wait_time).until(
EC.element_to_be_clickable((By.XPATH, xpath)))
except TimeoutException:
raise TimeoutError(f"Element with xpath {xpath} did not become clickable")
def send_text_to_element_by_xpath(self, xpath: str, text: str, clear: bool = True):
"""
Send a string to an input field identified by the given xpath
:param text: the text to send
:type text: str
:param xpath: the xpath of the input field to which to send the text
:type xpath: str
:param clear: whether or not to clear the field before sending text. If
False, text will be appended to any text already present.
:type clear: bool
"""
try:
element = self._browser.find_element_by_xpath(xpath)
if clear:
element.clear()
element.send_keys(text)
except NoSuchElementException:
raise NoSuchElementError(f'No element found for xpath: "{xpath}"')
def click_element_by_xpath(self, xpath):
"""
Click an element on the page given its xpath
:param xpath: the xpath of the element to click
:type xpath: str
"""
try:
self._browser.find_element_by_xpath(xpath).click()
except NoSuchElementException:
raise NoSuchElementError(f'No element found for xpath: "{xpath}"')
def wait_then_click_element_by_xpath(self, xpath):
"""
Click an element on the page given its xpath after waiting to make sure
it exists
:param xpath: the xpath of the element to click
:type xpath: str
"""
self.wait_until_element_exists_by_xpath(xpath)
self.click_element_by_xpath(xpath)
def get_element_attribute_by_xpath(self, xpath: str, attribute: str) -> str:
"""
Get the value of the given attribute from the element with the given xpath
:param xpath: the xpath of the element of interest
:type xpath: str
:param attribute: the name of the attribute of interest, e.g. 'value'
:type attribute: str
:return: the value of the attribute on the element of interest
:rtype: str
"""
try:
if attribute.lower() == 'text':
return self._browser.find_element_by_xpath(xpath).text
return self._browser.find_element_by_xpath(xpath).get_attribute(attribute)
except NoSuchElementException:
raise NoSuchElementError(f'No element found for xpath: "{xpath}"')
def get_elements_attribute_by_xpath(self, xpath: str, attribute: str) -> list:
"""
Get the value of a given attribute for all elements with the given xpath
:param xpath: the xpath of the elements of interest
:type xpath: str
:param attribute: the name of the attribute of interest, e.g. 'value'
:type attribute: str
:return: a list of values of the given attribute for each matching element
:rtype: list
"""
try:
elements = self._browser.find_elements_by_xpath(xpath)
if attribute.lower() == 'text':
return [element.text for element in elements]
return [element.get_attribute(attribute) for element in elements]
except NoSuchElementException:
raise NoSuchElementError(f'No elements found for xpath: "{xpath}"')
def execute_script_on_element_by_xpath(self, script: str, xpath: str = None):
"""
Execute the given javascript expression. If xpath of element is provided,
the element becomes available to use in the script, and can be accessed
using arguments[0].
:param script: the javascript to be executed
:type script: str
:param xpath: the xpath of the optional element to be passed to the script
:type xpath: str
"""
if not xpath:
self._browser.execute_script(script)
else:
try:
self._browser.execute_script(script, self._browser.find_element_by_xpath(xpath))
except NoSuchElementException:
raise NoSuchElementError(f'No elements found for xpath: "{xpath}"')
def element_is_selected_by_xpath(self, xpath: str) -> bool:
"""Get whether or not the element specified by the given xpath is selected
:param xpath: the xpath of the elements of interest
:type xpath: str
:return: True if the element is selected, False otherwise
:rtype: bool
"""
try:
return self._browser.find_element_by_xpath(xpath).is_selected()
except:
raise NoSuchElementError(f'No elements found for xpath: "{xpath}"')
def switch_to_frame_by_xpath(self, xpath):
try:
frame = self._browser.find_element_by_xpath(xpath)
self._browser.switch_to.frame(frame)
except NoSuchElementException:
raise NoSuchElementError(f'No elements found for xpath: "{xpath}"')
def update_constants(self):
"""
Update any Handshake environment constants such as School ID or User ID.
This should be done every time the browser switches user types and upon
initial login.
"""
if self._user_type == UserType.EMPLOYER:
self.employer_id = self._get_meta_constant('logged_in_user_institution_id')
else:
self.school_id = self._get_meta_constant('logged_in_user_institution_id')
self.user_id = self._get_meta_constant('logged_in_user_id')
self._user_type = UserType(self._get_meta_constant('current_user_type'))
def _get_meta_constant(self, name: str) -> str:
"""
Get the content of a meta tag with the given name.
The method is used to pull data like the current school id, user id,
or employer id from Handshake's <meta> tags. All tags are of the form:
<meta content="foo" name="bar">. Given "bar," this method returns "foo".
:param name: the name of the meta tag to query
:type name: str
:return: the content of the meta tag
:rtype: str
"""
return self.get_element_attribute_by_xpath(f'//meta[@name="{name}"]',
'content')
def switch_to_new_tab(self):
"""Wait for the new tab to finish loaded, then switch to it."""
WebDriverWait(self._browser, self.max_wait_time).until(
EC.number_of_windows_to_be(2))
self._browser.switch_to.window(self._browser.window_handles[-1])
def return_to_main_tab(self):
"""With a second tab open, close the current tab and return to the main tab."""
self._browser.execute_script('window.close();')
self._browser.switch_to.window(self._browser.window_handles[0])
def maximize_window(self):
"""Maximize the browser window."""
self._browser.maximize_window()
@property
def user_type(self) -> UserType:
"""
Get the user type of the account currently logged into Handshake.
:return: the browser's currently-logged-in user type
:rtype: UserType
"""
return self._user_type
def switch_users(self, user_type: UserType):
"""
Switch to the system view specified by the given user type.
This method automates the built-in "Switch Users" function in Handshake.
:param user_type: the user type to which to switch
:type user_type: UserType
"""
if self._user_type == user_type:
return # do nothing if the browser already has the desired user type
self.get(f'{BASE_URL}/user_switcher/options')
try:
self.click_element_by_xpath(f'//a[div[h4[contains(text(), "{user_type.value}")]]]')
except NoSuchElementError:
raise InvalidUserTypeError("User does not have a linked account of the given type")
self.update_constants()
@property
def current_url(self):
"""Get the url of the browser's current page"""
return self._browser.current_url
@staticmethod
def _validate_url_str(url: str):
"""
Determine whether or not a given Handshake URL is valid
:param login_url: the login url to test
:type login_url: str
"""
try:
re.match(r'^https://[a-zA-Z]+\.joinhandshake\.com', url) \
.group(0)
except AttributeError:
raise InvalidURLError('URL must be of the form '
'"https://app.joinhandshake.com[/...]" or '
'"https://[school name].joinhandshake.com[/...]"')
def _validate_page_exists(self):
"""Determine whether the current page exists or gave a 404 error."""
if self.element_exists_by_xpath("//p[contains(text(), 'You may want "
"to head back to the homepage.')]"):
raise InvalidURLError(self._browser.current_url)
def _validate_permissions(self):
"""Determine whether or not the logged in user has permission to view the current page"""
if self.element_exists_by_xpath("//div[contains(text(), 'You do not "
"have permission to do that.')]"):
raise InsufficientPermissionsError
| 39.341207 | 123 | 0.635333 | 14,395 | 0.960371 | 0 | 0 | 976 | 0.065114 | 0 | 0 | 7,872 | 0.525185 |
5be2e58101a5336a454fb0c25ffdd250d09250a0 | 2,058 | py | Python | example/pums_downloader.py | opendp/opendp-pytorch | 5c0fe1dd7bac0fe7554d79f3f56afb4d2e097a12 | [
"MIT"
] | null | null | null | example/pums_downloader.py | opendp/opendp-pytorch | 5c0fe1dd7bac0fe7554d79f3f56afb4d2e097a12 | [
"MIT"
] | null | null | null | example/pums_downloader.py | opendp/opendp-pytorch | 5c0fe1dd7bac0fe7554d79f3f56afb4d2e097a12 | [
"MIT"
] | null | null | null | import zipfile
import requests
import os
import tempfile
# this script downloads additional pums data files
# legend
# https://www2.census.gov/programs-surveys/acs/tech_docs/pums/data_dict/PUMS_Data_Dictionary_2019.txt
def download_pums_data(year, record_type, state, **_kwargs):
assert record_type in ('person', 'housing')
output_dir = get_pums_data_dir(year, record_type, state)
if os.path.exists(output_dir):
return
download_url = f"https://www2.census.gov/programs-surveys/acs/data/pums/{year}/1-Year/csv_{record_type[0]}{state}.zip"
with tempfile.TemporaryDirectory() as temp_download_dir:
temp_download_path = os.path.join(temp_download_dir, "temp.zip")
with open(temp_download_path, 'wb') as resource_file:
resource_file.write(requests.get(download_url).content)
os.makedirs(output_dir, exist_ok=True)
with zipfile.ZipFile(temp_download_path, 'r') as zip_file:
[zip_file.extract(file, output_dir) for file in zip_file.namelist()]
def get_pums_data_dir(year, record_type, state, **_kwargs):
base_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data")
return os.path.join(base_dir, f'PUMS_DL_{year}_{record_type}_{state}')
def get_pums_data_path(year, record_type, state, **_kwargs):
"""returns path to the first .csv file"""
data_dir = get_pums_data_dir(year, record_type, state)
for file_name in os.listdir(data_dir):
if file_name.endswith('.csv'):
return os.path.join(data_dir, file_name)
datasets = [
{'public': True, 'year': 2010, 'record_type': 'person', 'state': 'al'},
{'public': False, 'year': 2010, 'record_type': 'person', 'state': 'ct'},
{'public': False, 'year': 2010, 'record_type': 'person', 'state': 'ma'},
{'public': False, 'year': 2010, 'record_type': 'person', 'state': 'vt'},
]
if __name__ == '__main__':
for metadata in datasets:
print("downloading", metadata)
print(get_pums_data_dir(**metadata))
download_pums_data(**metadata)
| 34.881356 | 122 | 0.689504 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 595 | 0.289116 |
5be37c14422abd9ec3c208e9edccf25f09246d47 | 5,534 | py | Python | devtracker/test_devtracker.py | satvikel4/devtracker | ea892d6d48aa5d4627b429469b59ae3f0ce7f10f | [
"MIT"
] | 1 | 2019-03-29T00:53:01.000Z | 2019-03-29T00:53:01.000Z | devtracker/test_devtracker.py | satvikel4/devtracker | ea892d6d48aa5d4627b429469b59ae3f0ce7f10f | [
"MIT"
] | null | null | null | devtracker/test_devtracker.py | satvikel4/devtracker | ea892d6d48aa5d4627b429469b59ae3f0ce7f10f | [
"MIT"
] | 1 | 2021-09-14T05:00:20.000Z | 2021-09-14T05:00:20.000Z | import os
import csv
import time
from datetime import datetime
from datetime import timedelta
from pathlib import Path
from .dir_and_path_helpers import get_working_dir, mk_file_path, check_file
from .time_and_date_helpers import get_date, get_sec, get_time, total_time
from .time import current_status, start_time, end_time, make_project_csv
from .report import today_report, full_report
# dir_and_path_helpers tests
# -----------------------------------------------------------------------------------
home = str(Path.home())
path = home + "/devtrack_projects"
if os.path.exists(path):
pass
else:
os.mkdir(path)
def test_install_project_folder():
path = home + "/devtrack_projects"
assert os.path.exists(path) == True
def test_get_working_dir():
assert get_working_dir() == 'devtracker'
def test_mk_file_path():
assert mk_file_path('test') == '{}/devtrack_projects/test.csv'.format(home)
def test_check_file():
test_path = mk_file_path('test') == '{}/devtrack_projects/test.csv'.format(home)
assert check_file(test_path) == True
# time_and_date_helpers tests
# -----------------------------------------------------------------------------------
def test_get_date():
assert get_date() == datetime.today().strftime("%Y-%m-%d")
def test_get_time():
assert get_time() == datetime.today().strftime("%H:%M:%S")
def test_get_sec():
assert get_sec('00:01:00') == 60
def test_total_time():
now = datetime.now()
later = datetime.now() + timedelta(minutes=15)
assert total_time(now.strftime("%H:%M:%S"), later.strftime("%H:%M:%S")) == timedelta(minutes=15)
# main method tests
# -----------------------------------------------------------------------------------
path = home + "/devtrack_projects/" + "test.csv"
make_project_csv('test')
new_start_date = datetime.today().strftime("%Y-%m-%d")
new_start_time = datetime.today().strftime("%H:%M:%S")
def test_make_project_csv():
assert os.path.isfile(path) == True
def test_make_project_csv_initalize():
initalize_list = ["start_date", "start_time", "end_date", "end_time", "total"]
with open(path, 'r') as myfile:
reader = csv.reader(myfile)
headers = next(reader)
assert initalize_list == headers
def test_start_time():
with open(path, 'r') as myfile:
reader = csv.reader(myfile)
next(reader)
start_info = next(reader)
assert start_info == [new_start_date, new_start_time]
def test_end_time_total():
time.sleep(5)
end_time("test")
new_end_date = str(datetime.today().strftime("%Y-%m-%d"))
new_end_time = str(datetime.today().strftime("%H:%M:%S"))
FMT = "%H:%M:%S"
new_total = str(datetime.strptime(new_end_time, FMT) - datetime.strptime(new_start_time, FMT))
with open(path, 'r') as myfile:
reader = csv.reader(myfile)
next(reader)
full_entry = next(reader)
assert full_entry == [new_start_date, new_start_time, new_end_date, new_end_time, new_total]
def test_current_status(capsys):
current_status("test")
captured = capsys.readouterr()
assert captured.out == "[+] Status report for 'test': You have not started work on this project yet.\n"
# report test
# -----------------------------------------------------------------------------------
def test_today_report_success(capsys):
today_report("test")
captured = capsys.readouterr()
res_one = "[+] Generating daily report for test...\n[+] You have worked on test for a total of 0:00:05 today. Way to go!\n"
res_two = "[+] Generating daily report for test...\n[+] You have worked on test for a total of 0:00:06 today. Way to go!\n"
assert captured.out == res_one or res_two
def test_add_start():
start_time("test")
def test_today_report_err_one(capsys):
today_report("test")
captured = capsys.readouterr()
assert captured.out == "[+] Generating daily report for test...\n[-] You are in the middle of tracking, end this session with `devtracker stop` before generation a report.\n"
def test_no_dir_error(capsys):
today_report("no_dir")
captured = capsys.readouterr()
assert captured.out == "[-] 'devtracker report' was entered. You must run 'devtracker start' to create project tracking file.\n"
def test_full_report(capsys):
next_line = ['1999-01-01', '00:00:00', '1999-01-01', '00:00:00', '00:00:00']
following_line = ['1999-01-02', '00:00:01', '1999-01-02', '00:00:02', '00:00:01']
with open(path, 'r', newline='') as myFile:
reader = list(csv.reader(myFile))
reader.pop()
reader.pop()
with open(path, 'w', newline='') as myfile:
wr = csv.writer(myfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
for i in reader:
wr.writerow(i)
wr.writerow(next_line)
wr.writerow(following_line)
full_report("test")
captured = capsys.readouterr()
assert captured.out == """[+] Generating Full Report
-------------------------------------------------------------------------
start_date | start_time | end_date | end_time | total |
-------------------------------------------------------------------------
January 1, 1999 | 12:00 AM | January 1, 1999 | 12:00 AM | 00:00:00 |
-------------------------------------------------------------------------
January 2, 1999 | 12:00 AM | January 2, 1999 | 12:00 AM | 00:00:01 |
[+] You have worked on test for a total of 0:00:01. Way to go!
"""
def test_remove():
os.remove(path)
| 34.805031 | 178 | 0.60318 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,029 | 0.366643 |
5be5e0787b558661beaa8ad1a91e017e093c0a89 | 12,511 | py | Python | ablinfer/model/normalize.py | Auditory-Biophysics-Lab/ablinfer | 94e64eacd8c8b30088637d7dc5fd3f25b70adc0e | [
"BSD-3-Clause"
] | null | null | null | ablinfer/model/normalize.py | Auditory-Biophysics-Lab/ablinfer | 94e64eacd8c8b30088637d7dc5fd3f25b70adc0e | [
"BSD-3-Clause"
] | null | null | null | ablinfer/model/normalize.py | Auditory-Biophysics-Lab/ablinfer | 94e64eacd8c8b30088637d7dc5fd3f25b70adc0e | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
from collections.abc import Collection as ABCCollection
from collections.abc import Mapping as ABCMapping
from collections import OrderedDict as OD
import json
import logging
from numbers import Number
from typing import Mapping, Optional, Collection, Union, IO, Tuple, Dict, Set, Any
from .typing_shim import get_origin, get_args
import collections
from .update import __version__, update_model
from ..processing import processing_ops
def get_origin_w(tp):
o = get_origin(tp)
return o if o is not None else tp
def normalize_model(model: Mapping, processing: bool = False) -> Mapping:
"""Normalize the given model.
Also validates what fields it can on the model.
:param model: The model to normalize.
:param processing: Whether or not to parse in detail the processing operations.
"""
if model["json_version"] != __version__:
raise ValueError("Can only normalize v%s models, please update it first" % __version__)
def isinstance_w(a, t):
if isinstance(t, Collection):
if Any in t:
return True
return isinstance(a, t)
if t == Any:
return True
return isinstance(a, t)
def check_rec(v, t):
origin = get_origin_w(t)
args = get_args(t)
if origin == Union:
if None in args and v is None:
return True
for a in args:
if a is None:
continue
ret = check_rec(v, a)
if ret:
return True
return False
#return any((check_rec(v, a) for a in args if a is not None))
elif origin in (Collection, ABCCollection):
if args:
return isinstance(v, ABCCollection) and all((check_rec(i, args[0]) for i in v))
return isinstance(v, ABCCollection)
elif origin in (Mapping, ABCMapping):
if args:
return isinstance(v, ABCMapping) and all((isinstance_w(i, args[0]) and isinstance_w(j, args[1]) for i, j in v.items()))
return isinstance(v, Mapping)
return isinstance_w(v, origin)
def simple_verify(d, spec_part, fragment=(), warn_extra=True):
if warn_extra:
for k in d:
if k not in spec_part:
logging.warning("Extraneous field %s" % '/'.join(fragment+(k,)))
for k, t in spec_part.items():
if isinstance(t, Collection) and not isinstance(t, Mapping):
t, c = t
else:
c = None
optional = (get_origin_w(t) == Union) and (type(None) in t.__args__)
if not optional and k not in d:
raise ValueError("Missing required field %s" % '/'.join(fragment+(k,)))
elif optional and k not in d:
logging.warning("Missing optional field %s" % '/'.join(fragment+(k,)))
if callable(c):
d[k] = c()
elif c is not None:
d[k] = c
elif isinstance(t, Mapping): ## Recurse
simple_verify(d[k], t, fragment=fragment+(k,), warn_extra=warn_extra)
elif not check_rec(d[k], t):
raise ValueError("Improper type for %s" % '/'.join(fragment+(k,)))
## All of these specs indicate the structure of the object and have the following rules
## - Each string name maps to a type, a duple, or a mapping
## - If mapped to a mapping, then the verification recurses
## - If mapped to a type, the value is checked to see if it's that type
## - If mapped to a type and the type is Optional[...], then a missing value only triggers a
## warning, not an exception
## - If mapped to a tuple, the first element is the type and the second is what should be used
## to populate the value if it's missing (only used if the type is Optiona[...])
## - If mapped to a tuple, the second element is either a callable, a boolean, or None; in the
## latter two cases, missing values are set to the given literal and in the former they are
## set to the result of the callable (with no arguments)
## Check the outer layer
spec = {
"json_version": str,
"id": str,
"type": str,
"name": str,
"organ": str,
"task": str,
"status": str,
"modality": str,
"version": str,
"description": Optional[str],
"website": Optional[str],
"maintainers": (Optional[Collection[str]], list),
"citation": Optional[str],
"docker": {
"image_name": str,
"image_tag": str,
"data_path": str,
},
"inputs": ABCMapping,
"params": (Optional[ABCMapping], OD),
"outputs": ABCMapping,
"order": Optional[ABCCollection],
}
simple_verify(model, spec)
if not model["id"].isidentifier():
raise ValueError("Model ID must be a valid Python identifier")
params: Set[str] = set()
part_spec = {
"name": str,
"description": str,
"status": str,
"flag": str,
"extension": str,
"type": str,
"pre": (Optional[ABCCollection], list),
}
type_spec = {
"volume": {
"labelmap": (Optional[bool], False),
},
"segmentation": {
"labelmap": (Optional[bool], False),
"master": Optional[str],
"colours": (Optional[Mapping], OD),
"names": (Optional[Mapping], OD),
},
"int": {
"min": (Optional[Number], -2147483648),
"max": (Optional[Number], 2147483647),
"default": Number,
},
"float": {
"min": (Optional[Number], -3.40282e+038),
"max": (Optional[Number], 3.40282e+038),
"default": Number,
},
"bool": {
"default": bool,
},
"string": {
"default": str,
},
"enum": {
"enum": Union[Collection[str], Mapping],
"default": str,
},
}
process_spec = {
"name": str,
"description": str,
"status": str,
"locked": (Optional[bool], False),
"operation": str,
"action": Optional[str],
"targets": Optional[Collection[int]],
"params": Mapping,
}
def verify_part(name):
cname = name.title()
for k, v in model[name].items():
if k in params:
raise ValueError("Names must be unique (%s is already used)" % repr(k))
elif not isinstance(k, str):
raise ValueError("%s name %s must be a string" % (cname, repr(k)))
elif not k.isidentifier():
raise ValueError("%s name %s must be a valid Python identifier" % (cname, repr(k)))
params.add(k)
simple_verify(v, part_spec, fragment=(name, k), warn_extra=False)
typ = v["type"]
if name == "params" and typ not in ("int", "float", "bool", "enum", "string"):
raise ValueError("Invalid %s type %s" % (name, repr(typ)))
elif name in ("outputs", "inputs") and typ not in ("volume", "segmentation"):
raise ValueError("Invalid %s type %s" % (name, repr(typ)))
simple_verify(v, type_spec[typ], fragment=(name, k), warn_extra=False)
if typ == "enum":
if not isinstance(v["enum"], Mapping):
v["enum"] = OD(((i, i) for i in v["enum"]))
elif typ == "segmentation":
if "colours" not in v:
v["colours"] = OD()
for colourk, colourv in v["colours"].items():
if not isinstance(colourv, Collection) or len(colourv) not in (3, 4):
raise ValueError("Segmentation colours must be 3- or 4-element arrays of floats, not \"%s\"" % repr(colourv))
elif len(colourv) == 3:
## Fill in opacity
v["colours"][colourk] = (*colourv, 1)
for colour in colourv:
if colour < 0 or colour > 1:
raise ValueError("Invalid segementation colour %s, must be a float on [0,1]" % repr(colour))
if name in ("outputs", "inputs"):
if "status" not in v:
v["status"] = "required"
elif v["status"] not in ("required", "suggested", "optional"):
raise ValueError("Invalid status %s for %s" % (repr(v["status"]), '/'.join((name, k))))
if name != "params":
sname = "pre" if name == "inputs" else "post"
if sname not in v:
v[sname] = []
for n, sec in enumerate(v[sname]):
process_spec_this = process_spec.copy()
if sec["operation"] in processing_ops[name[:-1]]:
psec = processing_ops[name[:-1]][sec["operation"]][2]
if psec is not None:
action = sec["action"] if "action" in sec else None
if action in psec or None in psec:
if action in psec:
act = psec[action]
else:
logging.info("Unknown %sprocessing action %s for %s, using default" % (sname, action, sec["operation"]))
act = psec[None]
if act:
process_spec_this["params"] = {param_name: (Optional[type_spec[param_val["type"]]["default"]], param_val["default"]) for param_name, param_val in act.items()}
else:
logging.warning("Unknown %sprocessing action %s for %s, skipping" % (sname, action, sec["operation"]))
else:
logging.warning("Unknown %sprocessing operation %s, skipping" % (sname, sec["operation"]))
simple_verify(sec, process_spec_this, fragment=(name, k, sname, str(n)))
if sec["status"] not in ("required", "suggested", "optional"):
raise ValueError("Invalid status %s for %s" % (repr(sec["status"]), '/'.join((name, k, sname, str(n)))))
verify_part("inputs")
del part_spec["pre"]
part_spec["post"] = (Optional[Collection], list)
verify_part("outputs")
del part_spec["post"]
del part_spec["status"]
del part_spec["extension"]
verify_part("params")
field_set = set((*model["inputs"], *model["params"], *model["outputs"]))
flag_set = set((
*(i["flag"] for i in model["inputs"].values()),
*(i["flag"] for i in model["params"].values()),
*(i["flag"] for i in model["outputs"].values()),
))
if "order" in model and model["order"]:
if len(model["order"]) != len(model["inputs"]) + len(model["params"]) + len(model["outputs"]):
raise ValueError("Length of order does not match the number of inputs, outputs, and params")
elif set(model["order"]) != field_set:
raise ValueError("Order elements must match field names")
elif ("order" not in model or not model["order"]) and "" in flag_set:
raise ValueError("Order must be specified if positional arguments are used")
return model
def update_normalize_model(model: Mapping) -> Tuple[Mapping, bool]:
"""Update and normalize a model.
:param model: The model to fix.
:returns: The updated model and a boolean indicating whether an update was performed.
"""
upd = update_model(model)
return (normalize_model(upd[0]), upd[1])
def load_model(f: Union[str, IO], fp: bool = True, normalize: bool = True) -> Tuple[Mapping, bool]:
"""Load a model.
:param f: The file or string to load from.
:param fp: Whether f is a file-like object or a string.
:param normalize: Whether or not to normalize the result.
:returns: The updated model and a boolean indicating whether an update was performed.
"""
if fp:
inp = json.load(f, object_pairs_hook=OD)
else:
inp = json.loads(f, object_pairs_hook=OD)
upd = update_model(inp)
if normalize:
upd = (normalize_model(upd[0]), upd[1])
return upd
| 40.62013 | 194 | 0.539285 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,632 | 0.290305 |
5be7e3e5fb635d52ffc510b5892ea90ed38f68fa | 151 | py | Python | configs/example/spectre_benchmark.py | Yujie-Cui/cleanupspec | 5bdfc551e27e673b170c15b3328559736f3d7848 | [
"BSD-3-Clause"
] | 9 | 2019-12-23T06:06:22.000Z | 2021-05-03T21:33:43.000Z | configs/example/spectre_benchmark.py | limengming/cleanupspec | 5426a0e2e3ce5c00edcb9734617dacf26ab18ef5 | [
"BSD-3-Clause"
] | 1 | 2020-11-12T06:23:36.000Z | 2020-11-12T06:23:36.000Z | configs/example/spectre_benchmark.py | limengming/cleanupspec | 5426a0e2e3ce5c00edcb9734617dacf26ab18ef5 | [
"BSD-3-Clause"
] | 9 | 2020-08-20T10:08:13.000Z | 2022-03-20T13:00:38.000Z | import m5
from m5.objects import *
#Spectre
spectre = Process() # Added by Gururaj
spectre.executable = 'spectre'
spectre.cmd = [spectre.executable]
| 16.777778 | 38 | 0.748344 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.231788 |
5be8b8cbba14983952b8b1c2bcd0468e376f1933 | 9,038 | py | Python | src/networks/wav2vec2.py | nikvaessen/w2v2-speaker-few-samples | 98139c2234a63532ebc8dffeacb284f3d7403ca2 | [
"MIT"
] | null | null | null | src/networks/wav2vec2.py | nikvaessen/w2v2-speaker-few-samples | 98139c2234a63532ebc8dffeacb284f3d7403ca2 | [
"MIT"
] | null | null | null | src/networks/wav2vec2.py | nikvaessen/w2v2-speaker-few-samples | 98139c2234a63532ebc8dffeacb284f3d7403ca2 | [
"MIT"
] | null | null | null | ################################################################################
#
# Provide embeddings from raw audio with the wav2vec2 model from huggingface.
#
# Author(s): Nik Vaessen
################################################################################
from typing import Optional, List
import torch as t
import pytorch_lightning as pl
from pytorch_lightning import LightningModule
from src.networks.wav2vec2_components.network import Wav2vec2
from src.util.torch import reset_model
from src.networks.wav2vec2_components.base_components import (
Wav2Vec2NetworkRegularisationConfig,
retrieve_pretrained_wav2vec2_config,
)
########################################################################################
# wrapper which holds all components of wav2vec2 network
class Wav2Vec2Network(pl.LightningModule):
def __init__(
self,
wav2vec2_huggingface_id: str,
reset_weights: bool,
reg_cfg: Optional[Wav2Vec2NetworkRegularisationConfig] = None,
insert_cls_token: bool = False,
learnable_cls_token: bool = False,
cls_token_constant: float = 1,
gradient_checkpointing: bool = False,
shuffling_location: Optional[str] = None,
shuffling_std: Optional[float] = None,
):
super().__init__()
self.wav2vec2 = Wav2vec2(
cfg=retrieve_pretrained_wav2vec2_config(wav2vec2_huggingface_id, reg_cfg),
enable_gradient_checkpointing=gradient_checkpointing,
pretrained_weights=wav2vec2_huggingface_id,
shuffling_location=shuffling_location,
shuffling_std=shuffling_std,
)
self.insert_cls_token = insert_cls_token
self.cls_token_constant = cls_token_constant
if "base" in wav2vec2_huggingface_id:
self.num_features = 768
elif "large" in wav2vec2_huggingface_id:
self.num_features = 1024
else:
raise ValueError("cannot determine num features")
if self.insert_cls_token:
cls_token = t.ones((self.num_embedding_features,)) * t.Tensor(
[self.cls_token_constant]
)
self.cls_token = t.nn.parameter.Parameter(
cls_token, requires_grad=learnable_cls_token
)
if reset_weights:
reset_model(self.wav2vec2)
@property
def num_embedding_features(self):
return self.num_features
def forward(
self, wav_input: t.Tensor, num_audio_samples: Optional[List[int]] = None
):
# assume wav_input is of shape [batch_size, num_audio_features]
assert len(wav_input.shape) == 2
if num_audio_samples is None:
num_audio_samples = [wav_input.shape[1] for _ in range(wav_input.shape[0])]
else:
assert len(num_audio_samples) == wav_input.shape[0]
assert all([0 >= le > wav_input.shape[1] for le in num_audio_samples])
audio_features, num_audio_features, attention_mask = self.extract_features(
wav_input, num_audio_samples
)
wav2vec2_embeddings = self.compute_wav2vec2_embeddings(
audio_features, attention_mask
)
return wav2vec2_embeddings, num_audio_features
def extract_features(self, wav_input: t.Tensor, num_audio_samples: List[int]):
# wav input should be of shape [BATCH_SIZE, NUM_AUDIO_SAMPLES]
# first compute audio features with CNN
features = self.wav2vec2.feature_extractor(wav_input)
features = features.transpose(1, 2)
# project channels of CNN output into a sequence of input token embeddings
features, _ = self.wav2vec2.feature_projector(features)
num_feature_tokens = self.compute_feature_extractor_lengths(num_audio_samples)
attention_mask = self.construct_attention_mask(
num_audio_samples, max(num_feature_tokens), device=wav_input.device
)
# optionally apply masking to sequence (in time and feature axis)
features = self.wav2vec2._mask_hidden_states(
features, attention_mask=attention_mask
)
# features should be of shape [BATCH_SIZE, NUM_FRAMES, NUM_FEATURES]
bs, num_frames, num_features = features.shape
assert bs == wav_input.shape[0]
assert num_frames == max(num_feature_tokens)
assert num_features == self.num_embedding_features
return features, num_feature_tokens, attention_mask
def compute_wav2vec2_embeddings(
self, input_token_sequence: t.Tensor, attention_mask: t.Tensor = None
):
# input token sequence is of shape [BATCH_SIZE, NUM_FRAMES, NUM_FEATURES]
# optional attention mask is of shape [BATCH_SIZE, NUM_FRAMES], where
# 1 means `pay attention` and 0 means `skip processing this frame`.
if self.insert_cls_token:
batched_cls_token = self.cls_token.repeat(
input_token_sequence.shape[0],
1,
1,
)
input_token_sequence = t.cat(
[batched_cls_token, input_token_sequence],
dim=1,
).to(device=input_token_sequence.device)
if attention_mask is not None:
# we want to attend to the class token
attention_mask = t.cat(
[
t.ones(
(attention_mask.shape[0], 1),
dtype=attention_mask.dtype,
device=attention_mask.device,
),
attention_mask,
],
dim=1,
).to(input_token_sequence.device)
encoder_output = self.wav2vec2.encoder(
input_token_sequence, attention_mask=attention_mask
)
embedding = encoder_output.last_hidden_state
# embedding should be of shape [BATCH_SIZE, NUM_FRAMES, NUM_FEATURES]
bs, num_frames, num_features = embedding.shape
assert bs == input_token_sequence.shape[0] == attention_mask.shape[0]
assert num_frames == input_token_sequence.shape[1] == attention_mask.shape[1]
assert num_features == self.num_embedding_features
return embedding
def construct_attention_mask(
self, num_audio_samples: List[int], feature_sequence_length: int, device: str
):
assert len(num_audio_samples) >= 1
# init assumes all tokens are attended to
bs = len(num_audio_samples)
max_num_audio_samples = max(num_audio_samples)
attention_mask = t.ones((bs, max_num_audio_samples), dtype=t.long)
for idx, length in enumerate(num_audio_samples):
assert length >= 0
# set each token which is 'padding' to 0
attention_mask[idx, length:] = 0
attention_mask = self.wav2vec2._get_feature_vector_attention_mask(
feature_sequence_length, attention_mask
)
return attention_mask.to(device=device)
def compute_feature_extractor_lengths(self, num_audio_samples: List[int]):
num_feature_lengths = self.wav2vec2._get_feat_extract_output_lengths(
t.LongTensor(num_audio_samples)
).tolist()
return num_feature_lengths
########################################################################################
# freezing logic
def freeze_wav2vec2_on_train_start(
module: LightningModule,
network: Wav2Vec2Network,
wav2vec2_initially_frozen: bool,
completely_freeze_feature_extractor: bool,
) -> None:
if hasattr(module, "_steps_wav2vec2_freeze"):
raise ValueError(
"expected to initialize the attribute '_steps_wav2vec2_freeze'"
)
if hasattr(module, "_is_wav2vec_frozen"):
raise ValueError("expected to initialize the attribute '_is_wav2vec_frozen'")
module._steps_wav2vec2_freeze = 0
if wav2vec2_initially_frozen:
network.freeze()
module._is_wav2vec_frozen = True
else:
module._is_wav2vec_frozen = False
if completely_freeze_feature_extractor:
network.wav2vec2.feature_extractor.requires_grad_(False)
def freeze_wav2vec2_on_after_backward(
module: LightningModule,
network: Wav2Vec2Network,
num_frozen_steps: int,
completely_freeze_feature_extractor: bool,
) -> None:
if not hasattr(module, "_steps_wav2vec2_freeze"):
raise ValueError("expected attribute '_steps_wav2vec2_freeze'")
if not hasattr(module, "_is_wav2vec_frozen"):
raise ValueError("expected attribute '_is_wav2vec_frozen'")
module._steps_wav2vec2_freeze += 1
if (
module._is_wav2vec_frozen
and num_frozen_steps is not None
and module._steps_wav2vec2_freeze >= num_frozen_steps
):
network.unfreeze()
module._is_wav2vec_frozen = False
if completely_freeze_feature_extractor:
network.wav2vec2.feature_extractor.requires_grad_(False)
| 36.297189 | 88 | 0.640518 | 6,532 | 0.722726 | 0 | 0 | 80 | 0.008852 | 0 | 0 | 1,619 | 0.179133 |
5be9d14c46f5ec0e81e233191be816361e7093d2 | 9,384 | py | Python | ai-car-simulation-master/newcar.py | Jeevananthamcse/Palanisamy | 9b62f7dbcb9f7a747e5ef5b722e07111a6648b3c | [
"Unlicense"
] | 2 | 2021-08-30T08:04:04.000Z | 2021-09-27T06:01:05.000Z | ai-car-simulation-master/newcar.py | Jeevananthamcse/Palanisamy | 9b62f7dbcb9f7a747e5ef5b722e07111a6648b3c | [
"Unlicense"
] | 1 | 2022-02-08T00:01:16.000Z | 2022-02-08T00:01:16.000Z | ai-car-simulation-master/newcar.py | Jeevananthamcse/Palanisamy | 9b62f7dbcb9f7a747e5ef5b722e07111a6648b3c | [
"Unlicense"
] | 1 | 2021-09-13T07:03:11.000Z | 2021-09-13T07:03:11.000Z | # This Code is Heavily Inspired By The YouTuber: Cheesy AI
# Code Changed, Optimized And Commented By: NeuralNine (Florian Dedov)
import math
import random
import sys
import os
import neat
import pygame
# Constants
# WIDTH = 1600
# HEIGHT = 880
WIDTH = 1920
HEIGHT = 1080
CAR_SIZE_X = 60
CAR_SIZE_Y = 60
BORDER_COLOR = (255, 255, 255, 255) # Color To Crash on Hit
current_generation = 0 # Generation counter
class Car:
def __init__(self):
# Load Car Sprite and Rotate
self.sprite = pygame.image.load('car.png').convert() # Convert Speeds Up A Lot
self.sprite = pygame.transform.scale(self.sprite, (CAR_SIZE_X, CAR_SIZE_Y))
self.rotated_sprite = self.sprite
# self.position = [690, 740] # Starting Position
self.position = [830, 920] # Starting Position
self.angle = 0
self.speed = 0
self.speed_set = False # Flag For Default Speed Later on
self.center = [self.position[0] + CAR_SIZE_X / 2, self.position[1] + CAR_SIZE_Y / 2] # Calculate Center
self.radars = [] # List For Sensors / Radars
self.drawing_radars = [] # Radars To Be Drawn
self.alive = True # Boolean To Check If Car is Crashed
self.distance = 0 # Distance Driven
self.time = 0 # Time Passed
def draw(self, screen):
screen.blit(self.rotated_sprite, self.position) # Draw Sprite
self.draw_radar(screen) #OPTIONAL FOR SENSORS
def draw_radar(self, screen):
# Optionally Draw All Sensors / Radars
for radar in self.radars:
position = radar[0]
pygame.draw.line(screen, (0, 255, 0), self.center, position, 1)
pygame.draw.circle(screen, (0, 255, 0), position, 5)
def check_collision(self, game_map):
self.alive = True
for point in self.corners:
# If Any Corner Touches Border Color -> Crash
# Assumes Rectangle
if game_map.get_at((int(point[0]), int(point[1]))) == BORDER_COLOR:
self.alive = False
break
def check_radar(self, degree, game_map):
length = 0
x = int(self.center[0] + math.cos(math.radians(360 - (self.angle + degree))) * length)
y = int(self.center[1] + math.sin(math.radians(360 - (self.angle + degree))) * length)
# While We Don't Hit BORDER_COLOR AND length < 300 (just a max) -> go further and further
while not game_map.get_at((x, y)) == BORDER_COLOR and length < 300:
length = length + 1
x = int(self.center[0] + math.cos(math.radians(360 - (self.angle + degree))) * length)
y = int(self.center[1] + math.sin(math.radians(360 - (self.angle + degree))) * length)
# Calculate Distance To Border And Append To Radars List
dist = int(math.sqrt(math.pow(x - self.center[0], 2) + math.pow(y - self.center[1], 2)))
self.radars.append([(x, y), dist])
def update(self, game_map):
# Set The Speed To 20 For The First Time
# Only When Having 4 Output Nodes With Speed Up and Down
if not self.speed_set:
self.speed = 20
self.speed_set = True
# Get Rotated Sprite And Move Into The Right X-Direction
# Don't Let The Car Go Closer Than 20px To The Edge
self.rotated_sprite = self.rotate_center(self.sprite, self.angle)
self.position[0] += math.cos(math.radians(360 - self.angle)) * self.speed
self.position[0] = max(self.position[0], 20)
self.position[0] = min(self.position[0], WIDTH - 120)
# Increase Distance and Time
self.distance += self.speed
self.time += 1
# Same For Y-Position
self.position[1] += math.sin(math.radians(360 - self.angle)) * self.speed
self.position[1] = max(self.position[1], 20)
self.position[1] = min(self.position[1], WIDTH - 120)
# Calculate New Center
self.center = [int(self.position[0]) + CAR_SIZE_X / 2, int(self.position[1]) + CAR_SIZE_Y / 2]
# Calculate Four Corners
# Length Is Half The Side
length = 0.5 * CAR_SIZE_X
left_top = [self.center[0] + math.cos(math.radians(360 - (self.angle + 30))) * length, self.center[1] + math.sin(math.radians(360 - (self.angle + 30))) * length]
right_top = [self.center[0] + math.cos(math.radians(360 - (self.angle + 150))) * length, self.center[1] + math.sin(math.radians(360 - (self.angle + 150))) * length]
left_bottom = [self.center[0] + math.cos(math.radians(360 - (self.angle + 210))) * length, self.center[1] + math.sin(math.radians(360 - (self.angle + 210))) * length]
right_bottom = [self.center[0] + math.cos(math.radians(360 - (self.angle + 330))) * length, self.center[1] + math.sin(math.radians(360 - (self.angle + 330))) * length]
self.corners = [left_top, right_top, left_bottom, right_bottom]
# Check Collisions And Clear Radars
self.check_collision(game_map)
self.radars.clear()
# From -90 To 120 With Step-Size 45 Check Radar
for d in range(-90, 120, 45):
self.check_radar(d, game_map)
def get_data(self):
# Get Distances To Border
radars = self.radars
return_values = [0, 0, 0, 0, 0]
for i, radar in enumerate(radars):
return_values[i] = int(radar[1] / 30)
return return_values
def is_alive(self):
# Basic Alive Function
return self.alive
def get_reward(self):
# Calculate Reward (Maybe Change?)
# return self.distance / 50.0
return self.distance / (CAR_SIZE_X / 2)
def rotate_center(self, image, angle):
# Rotate The Rectangle
rectangle = image.get_rect()
rotated_image = pygame.transform.rotate(image, angle)
rotated_rectangle = rectangle.copy()
rotated_rectangle.center = rotated_image.get_rect().center
rotated_image = rotated_image.subsurface(rotated_rectangle).copy()
return rotated_image
def run_simulation(genomes, config):
# Empty Collections For Nets and Cars
nets = []
cars = []
# Initialize PyGame And The Display
pygame.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT), pygame.FULLSCREEN)
# For All Genomes Passed Create A New Neural Network
for i, g in genomes:
net = neat.nn.FeedForwardNetwork.create(g, config)
nets.append(net)
g.fitness = 0
cars.append(Car())
# Clock Settings
# Font Settings & Loading Map
clock = pygame.time.Clock()
generation_font = pygame.font.SysFont("Arial", 30)
alive_font = pygame.font.SysFont("Arial", 20)
game_map = pygame.image.load('map.png').convert() # Convert Speeds Up A Lot
global current_generation
current_generation += 1
# Simple Counter To Roughly Limit Time (Not Good Practice)
counter = 0
while True:
# Exit On Quit Event
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit(0)
# For Each Car Get The Acton It Takes
for i, car in enumerate(cars):
output = nets[i].activate(car.get_data())
choice = output.index(max(output))
if choice == 0:
car.angle += 10 # Left
elif choice == 1:
car.angle -= 10 # Right
elif choice == 2:
if(car.speed - 2 >= 12):
car.speed -= 2 # Slow Down
else:
car.speed += 2 # Speed Up
# Check If Car Is Still Alive
# Increase Fitness If Yes And Break Loop If Not
still_alive = 0
for i, car in enumerate(cars):
if car.is_alive():
still_alive += 1
car.update(game_map)
genomes[i][1].fitness += car.get_reward()
if still_alive == 0:
break
counter += 1
if counter == 30 * 40: # Stop After About 20 Seconds
break
# Draw Map And All Cars That Are Alive
screen.blit(game_map, (0, 0))
for car in cars:
if car.is_alive():
car.draw(screen)
# Display Info
text = generation_font.render("Generation: " + str(current_generation), True, (0,0,0))
text_rect = text.get_rect()
text_rect.center = (900, 450)
screen.blit(text, text_rect)
text = alive_font.render("Still Alive: " + str(still_alive), True, (0, 0, 0))
text_rect = text.get_rect()
text_rect.center = (900, 490)
screen.blit(text, text_rect)
pygame.display.flip()
clock.tick(60) # 60 FPS
if __name__ == "__main__":
# Load Config
config_path = "voice.txt"
config = neat.config.Config(neat.DefaultGenome,
neat.DefaultReproduction,
neat.DefaultSpeciesSet,
neat.DefaultStagnation,
config_path)
# Create Population And Add Reporters
population = neat.Population(config)
population.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
population.add_reporter(stats)
# Run Simulation For A Maximum of 1000 Generations
population.run(run_simulation, 1000)
| 35.816794 | 175 | 0.59676 | 5,620 | 0.598892 | 0 | 0 | 0 | 0 | 0 | 0 | 2,002 | 0.213342 |
5bed867c2db74e773ad7534adb7486f3bfecb2cd | 707 | py | Python | calculator/calculator.py | swilltec/calculator | 3c5e076a996e53117ba19ef438369cfe8753a3cc | [
"MIT"
] | null | null | null | calculator/calculator.py | swilltec/calculator | 3c5e076a996e53117ba19ef438369cfe8753a3cc | [
"MIT"
] | null | null | null | calculator/calculator.py | swilltec/calculator | 3c5e076a996e53117ba19ef438369cfe8753a3cc | [
"MIT"
] | null | null | null | """Main module."""
from functools import reduce
class Calc:
def add(self, *args):
return sum(args)
def subtract(self, a, b):
return a - b
def multiply(self, *args):
if not all(args):
raise ValueError
return reduce(lambda x, y: x*y, args)
def divide(self, a, b):
try:
return a / b
except:
return "inf"
def avg(self, args, ut=None, lt=None):
_args = args[:]
if ut:
_args = [x for x in _args if x <= ut]
if lt:
_args = [x for x in _args if x >= lt]
if not len(_args):
return 0
return sum(_args)/len(_args)
| 18.128205 | 49 | 0.478076 | 655 | 0.92645 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.032532 |
5bf082466ab074d7189898704732914978f112bb | 1,896 | py | Python | utils.py | nildip/DeepLearn_NUMBERS | 57171abc5d922df3aae24b8b86bc4674371608e9 | [
"MIT"
] | 1 | 2018-08-23T06:31:31.000Z | 2018-08-23T06:31:31.000Z | utils.py | nildip/DeepLearn_NUMBERS | 57171abc5d922df3aae24b8b86bc4674371608e9 | [
"MIT"
] | null | null | null | utils.py | nildip/DeepLearn_NUMBERS | 57171abc5d922df3aae24b8b86bc4674371608e9 | [
"MIT"
] | 1 | 2020-05-25T13:50:14.000Z | 2020-05-25T13:50:14.000Z | import matplotlib.pyplot as plt
import sys
import itertools
import numpy as np
from sklearn.metrics import confusion_matrix
from keras import backend as K
import keras
# function to plot the confusion matrix
def plot_confusion_matrix(Y_true, Y_predicted, classes, normalize=False):
cm = confusion_matrix(Y_true, Y_predicted, labels = classes)
plt.figure()
plt.imshow(cm, interpolation = 'nearest', cmap = plt.get_cmap('gray_r'))
plt.title('Confusion matrix')
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis = 1)[:, np.newaxis]
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j], horizontalalignment = "center", color = "white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
# function to visualize 2D CNN filters
def visualize_filter(model, input_X, input_Y, layer_num):
if type(model.layers[layer_num]) != keras.layers.convolutional.Conv2D:
sys.exit('The selected layer is not a 2D CNN filter')
get_filter_output = K.function([model.layers[0].input], [model.layers[layer_num].output])
for k in range(0,10):
X_layer = np.mean(input_X[np.where(input_Y == k)], axis=0)
X_layer = np.expand_dims(X_layer, axis=0)
X_layer = np.expand_dims(X_layer, axis=4)
layer_output = get_filter_output([X_layer])[0]
plt.figure(figsize=(20,20))
for i in range(0, model.layers[layer_num].get_config()['filters']):
plt.subplot(1, 5, i+1)
plt.title('Output of filter {0}, input = {1}'.format(i+1, k))
plt.imshow(layer_output[0][:,:,i], cmap = plt.get_cmap('gray_r')) | 44.093023 | 116 | 0.664557 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 269 | 0.141878 |
5bf1c0749ddf0180154efbdebf997bae15495e6c | 6,159 | py | Python | Curso de Python USP Part1/Exercicios/ProgramaCompleto_similaridade_COH-PIAH.py | JorgeTranin/Cursos_Coursera | 37d26b5f92d9324225f6701d0eb0fd466cff9d86 | [
"MIT"
] | null | null | null | Curso de Python USP Part1/Exercicios/ProgramaCompleto_similaridade_COH-PIAH.py | JorgeTranin/Cursos_Coursera | 37d26b5f92d9324225f6701d0eb0fd466cff9d86 | [
"MIT"
] | null | null | null | Curso de Python USP Part1/Exercicios/ProgramaCompleto_similaridade_COH-PIAH.py | JorgeTranin/Cursos_Coursera | 37d26b5f92d9324225f6701d0eb0fd466cff9d86 | [
"MIT"
] | null | null | null | import re
def le_assinatura():
"""[A funcao le os valores dos tracos linguisticos do modelo e devolve uma assinatura a ser comparada com os textos fornecidos]
Returns:
[list] -- [description]
"""
print("Bem-vindo ao detector automático de COH-PIAH.")
print("Informe a assinatura típica de um aluno infectado:")
wal = float(input("Entre o tamanho médio de palavra:"))
ttr = float(input("Entre a relação Type-Token:"))
hlr = float(input("Entre a Razão Hapax Legomana:"))
sal = float(input("Entre o tamanho médio de sentença:"))
sac = float(input("Entre a complexidade média da sentença:"))
pal = float(input("Entre o tamanho medio de frase:"))
return [wal, ttr, hlr, sal, sac, pal]
def le_textos():
"""[A funcao le todos os textos a serem comparados e devolve uma lista contendo cada texto como um elemento]
Returns:
[Lista] -- [Cada texto com 1 elemento]
"""
i = 1
textos = []
texto = input("Digite o texto " + str(i) + " (aperte enter para sair):")
while texto:
textos.append(texto)
i += 1
texto = input("Digite o texto " + str(i) +
" (aperte enter para sair):")
return textos
def separa_sentencas(texto):
"""[A funcao recebe um texto e devolve uma lista das sentencas dentro do texto]
Arguments:
texto {[type]} -- [description]
Returns:
[type] -- [description]
"""
sentencas = re.split(r'[.!?]+', texto)
if sentencas[-1] == '':
del sentencas[-1]
return sentencas
def separa_frases(sentenca):
"""[A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca]
Arguments:
sentenca {[str]} -- [recebe uma frase]
Returns:
[lista] -- [lista das frases contidas na sentença]
"""
return re.split(r'[,:;]+', sentenca)
def separa_palavras(frase):
"""[A funcao recebe uma frase e devolve uma lista das palavras dentro da frase]
Arguments:
frase {[str]} -- [Uma frase]
Returns:
[lista] -- [Retorna uma lista de palavras dentro da frase recebida]
"""
return frase.split()
def n_palavras_unicas(lista_palavras):
"""[Essa funcao recebe uma lista de palavras e devolve o numero de palavras que aparecem uma unica vez]
Arguments:
lista_palavras {[list]} -- [Recebe uma lista de palavras]
Returns:
[int] -- [Devolve o numero de palavras que aparecem uma unica vez]
"""
freq = dict()
unicas = 0
for palavra in lista_palavras:
p = palavra.lower()
if p in freq:
if freq[p] == 1:
unicas -= 1
freq[p] += 1
else:
freq[p] = 1
unicas += 1
return unicas
def n_palavras_diferentes(lista_palavras):
"""[Essa funcao recebe uma lista de palavras e devolve o numero de palavras diferentes utilizadas]
Arguments:
lista_palavras {[list]} -- [lista de palavras]
Returns:
[int] -- [Retorna o tamanho de palavras diferentes]
"""
freq = dict()
for palavra in lista_palavras:
p = palavra.lower()
if p in freq:
freq[p] += 1
else:
freq[p] = 1
return len(freq)
def compara_assinatura(as_a, as_b):
"""[IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas.]
Arguments:
as_a {[list]} -- [description]
as_b {[list]} -- [description]
Returns:
[float] -- [Grau de similaridade dos textos]
"""
soma_das_similaridade = 0
for i in range(0, 6):
soma_das_similaridade += (abs(as_a[i] - as_b[i]))
return soma_das_similaridade / 6
def calcula_assinatura(texto):
"""[IMPLEMENTAR. Essa funcao recebe um texto e deve devolver a assinatura do texto.]
Arguments:
texto {[list]} -- [lista recebida de um texto]
Returns:
[list] -- [devolve a assinatura que o usuario é ou não infectado pelo COH-PIAH]
"""
''''''
sentenças = separa_sentencas(texto)
frases = list()
palavras = list()
meias_palavras = 0
meias_sentenças = 0
comp_sentenças = 0
rel_type_token = 0
hapax = 0
soma_caractere_das_sentenças = 0
soma_das_palavras = 0
soma_os_caracteres_das_frases = 0
tamanho_meio_frase = 0
for sentença in sentenças:
soma_caractere_das_sentenças += len(sentença)
l_frases = separa_frases(sentença)
for fra in l_frases:
frases.append(fra)
for frase in frases:
soma_os_caracteres_das_frases += len(frase)
lista_palavra = separa_palavras(frase)
for palavra in lista_palavra:
palavras.append(palavra)
for palavra in palavras:
soma_das_palavras += len(palavra)
meias_palavras = soma_das_palavras / len(palavras)
rel_type_token = n_palavras_diferentes(palavras) / len(palavras)
hapax = n_palavras_unicas(palavras) / len(palavras)
meias_sentenças = soma_caractere_das_sentenças / len(sentenças)
comp_sentenças = len(frases) / len(sentenças)
tamanho_meio_frase = soma_os_caracteres_das_frases / len(frases)
return [meias_palavras, rel_type_token, hapax,meias_sentenças,comp_sentenças, tamanho_meio_frase]
def avalia_textos(textos, ass_cp):
"""[IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.]
Arguments:
textos {[list]} -- [description]
ass_cp {[list]} -- [description]
Returns:
[int] -- [description]
"""
''''''
inf = []
for texto in textos:
ass_texto = calcula_assinatura(texto)
inf.append(compara_assinatura(ass_texto, ass_cp))
menor = inf[0]
c = 1
for i in range(1, len(inf)):
if (menor < inf[i]):
c = i
return c
# Programa Principal main
assinatura = le_assinatura()
textos = le_textos()
avaliar = avalia_textos(textos, assinatura)
print(f'O autor do texto {avaliar} está infectado com COH-PIAH')
| 26.662338 | 186 | 0.629485 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,944 | 0.475683 |
5bf3392c6a3a312fe91823c02d2df51fa662cee1 | 1,074 | py | Python | challanges/shape-challenge/find_arrows/webcam_gui.py | fatcloud/PyCV-Climbing-Wall | b9a20d1cf65fb89debbad15d4451efbf77cddb5d | [
"MIT"
] | 55 | 2015-06-20T20:15:33.000Z | 2022-02-10T02:45:14.000Z | challanges/shape-challenge/find_arrows/webcam_gui.py | fatcloud/PyCV-Climbing-Wall | b9a20d1cf65fb89debbad15d4451efbf77cddb5d | [
"MIT"
] | 8 | 2015-06-20T18:46:52.000Z | 2015-10-31T11:08:04.000Z | challanges/shape-challenge/find_arrows/webcam_gui.py | fatcloud/PyCV-Climbing-Wall | b9a20d1cf65fb89debbad15d4451efbf77cddb5d | [
"MIT"
] | 73 | 2015-06-20T15:59:27.000Z | 2020-03-15T22:43:36.000Z | import cv2
def webcam_gui(filter_func, video_src=0):
cap = cv2.VideoCapture(video_src)
key_code = -1
while(key_code == -1):
# read a frame
ret, frame = cap.read()
# run filter with the arguments
frame_out = filter_func(frame)
# show the image
cv2.imshow('Press any key to exit', frame_out)
# wait for the key
key_code = cv2.waitKey(10)
cap.release()
cv2.destroyAllWindows()
def edge_filter(frame_in):
# convert into gray scale
frame_gray = cv2.cvtColor(frame_in, cv2.COLOR_BGR2GRAY)
# blur the image to reduce noise
frame_blur = cv2.blur(frame_gray, (3,3))
# Canny edge detection
frame_out = cv2.Canny(frame_blur, 30, 120)
return frame_out
def gray_filter(frame_in):
# convert into gray scale
frame_out = cv2.cvtColor(frame_in, cv2.COLOR_BGR2GRAY)
return frame_out
if __name__ == "__main__":
cv2.VideoCapture.set(CV_CAP_PROP_FPS, 10)
webcam_gui(edge_filter) | 21.48 | 59 | 0.615456 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 216 | 0.201117 |
5bf36b81a5557f4a83bb24b4b66ced5aca960191 | 8,132 | py | Python | PDBtoDots.py | RMeli/sensaas | f9a23c65cf6b70d4a6ccf482b233bf17b86c7b3b | [
"BSD-3-Clause"
] | 12 | 2021-06-18T07:58:00.000Z | 2022-01-19T04:57:46.000Z | PDBtoDots.py | RMeli/sensaas | f9a23c65cf6b70d4a6ccf482b233bf17b86c7b3b | [
"BSD-3-Clause"
] | 11 | 2021-06-18T14:38:09.000Z | 2022-01-21T11:53:16.000Z | PDBtoDots.py | RMeli/sensaas | f9a23c65cf6b70d4a6ccf482b233bf17b86c7b3b | [
"BSD-3-Clause"
] | 3 | 2021-06-18T14:28:01.000Z | 2022-01-19T01:16:43.000Z | #!/usr/bin/python3.7
#Author: Lucas GRANDMOUGIN
import sys
import os
import math
import re
import numpy as np
#print('usage: <>.py <file.pdb> \nexecute nsc to generate point-based surface and create tables and if verbose==1 files dotslabel1.xyzrgb dotslabel2.xyzrgb dotslabel3.xyzrgb and dotslabel4.xyzrgb\n')
def pdbsurface(filepdb,nscexe):
verbose=0
#label1 {H, Cl, Br, I} white/grey 0.9 0.9 0.9
#label2 {O, N, S, F} red 1 0 0
#label3 {C, P, B} green 0 1 0
#label4 {others} blue 0 0 1
tabR= {'C':'%.2f' % 1.70, 'O':1.52, 'N':1.55, 'S':1.80, 'P':1.80, 'B':1.72, 'Br':1.85, 'Cl':1.75, 'I':1.98, 'F':1.47, 'H':'%.2f' % 1.20, 'Hp':'%.2f' % 1.10, 'X':'%.2f' % 1.10}
label= {'C':3, 'P':3, 'B':3, 'O':2, 'N':2, 'S':2, 'F':2, 'Hp':2, 'H':1, 'Cl':1, 'Br':1, 'I':1}
rgb= np.array([[0, 0, 0], [0.9, 0.9, 0.9], [1, 0, 0], [0, 1, 0], [0, 0, 1]])
espace5=' '
espace6=' '
fichier2D=0
filepdb=open(filepdb,'r')
getstr=filepdb.read().split('\n')
filepdb.close()
tabLignesPdb=[]
tabLignesPdb.append('')
compt=1
while (compt < len(getstr)):
tabLignesPdb.append(re.split('\s+', getstr[compt].strip()))
compt=compt+1
compt=1
comptatomes=0
getx=[]
getx.append('')
gety=[]
gety.append('')
getz=[]
getz.append('')
getA=[]
getA.append('')
getRayon=[]
getRayon.append('')
while (compt < len(tabLignesPdb)):
if (tabLignesPdb[compt][0] == 'HETATM' or tabLignesPdb[compt][0] == 'ATOM'):
xAtome=float(tabLignesPdb[compt][5])
yAtome=float(tabLignesPdb[compt][6])
zAtome=float(tabLignesPdb[compt][7])
getx.append(xAtome)
gety.append(yAtome)
getz.append(zAtome)
if (float(zAtome) == 0):
fichier2D=fichier2D+1
getA.append(tabLignesPdb[compt][2])
if(getA[compt]!='C' and getA[compt]!='O' and getA[compt]!='N' and getA[compt]!='P' and getA[compt]!='B' and getA[compt]!='H' and getA[compt]!='F' and getA[compt]!='Br' and getA[compt]!='Cl' and getA[compt]!='S' and getA[compt]!='I' and getA[compt]!='X' and getA[compt]!='Hp'):
print("Warning: atom %s set as C because it is not the tab (unusual in medchem)" % getA[compt])
getA[compt]='C'
getRayon.append(tabR[getA[compt]])
comptatomes=comptatomes+1
compt=compt+1
nbatomes=comptatomes
if (fichier2D==int(nbatomes)):
print("Warning: pdb file in 2D; SenSaaS needs 3D coordinates to work properly")
compt=1
while (compt <= nbatomes):
if (getA[compt] == 'H'):
compt2=1
while(compt2 <= nbatomes):
if (getA[compt2] == 'N' or getA[compt2] == 'O'):
distHp= math.sqrt((getx[compt] - getx[compt2])**2 + (gety[compt] - gety[compt2])**2 + (getz[compt] - getz[compt2])**2)
if (distHp <= 1.2):
getRayon[compt]=tabR['Hp']
compt2=compt2+1
compt=compt+1
#nsc:
compt=1
psaIn=open('psa.in','w')
psaIn.write('* XYZR\n')
psaIn.write(espace6+str(nbatomes)+'\n')
while (compt <= nbatomes):
x='%.2f' % getx[compt]
y='%.2f' % gety[compt]
z='%.2f' % getz[compt]
psaIn.write('%8s %8s %8s %8s %8s \n'%(x,y,z,getRayon[compt],getA[compt]))
compt=compt+1
psaIn.close()
cmd = '%s psa.in ' % (nscexe)
os.system(cmd)
psaOut=open('psa.out', 'r')
lignepsaOut= psaOut.readlines()
psaOut.close()
tabLignesPsaOut=[]
compt=3
while (compt < len(lignepsaOut)):
tabLignesPsaOut.append(re.split('\s+', lignepsaOut[compt].strip()))
compt=compt+1
nbDots= int(tabLignesPsaOut[0][2])
#print("nbDots= %6s" % (nbDots))
del tabLignesPsaOut[0]
del tabLignesPsaOut[0]
getDots=np.empty(shape=[nbDots,3], dtype='float64')
getrgb=np.empty(shape=[nbDots,3], dtype='float64')
compt=nbatomes+2
comptDots=0
ligneFicDots=[]
label1=[]
label2=[]
label3=[]
label4=[]
if(verbose==1):
dotsFic=open('dots.xyzrgb', 'w')
while (compt < nbatomes+nbDots+2):
xDot=float(tabLignesPsaOut[compt][2])
yDot=float(tabLignesPsaOut[compt][3])
zDot=float(tabLignesPsaOut[compt][4])
compt2=1
m=100
mi=0
while(compt2 <= nbatomes):
xa=getx[compt2]
ya=gety[compt2]
za=getz[compt2]
goodDots= math.sqrt((xDot - xa)**2 + (yDot - ya)**2 + (zDot - za)**2)
if(goodDots < m):
m=goodDots
mi=compt2
compt2=compt2+1
atomeCorrespondant=getA[mi]
rgbi=label[atomeCorrespondant]
if(getRayon[mi]==tabR['Hp']):
rgbi=label['O']
getrgb[comptDots,:]=[rgb[rgbi,0], rgb[rgbi,1], rgb[rgbi,2]]
getDots[comptDots,:]=[xDot,yDot,zDot]
if (rgbi == 1):
label1.append(np.vstack([getDots[comptDots], getrgb[comptDots]]))
elif (rgbi == 2):
label2.append(np.vstack([getDots[comptDots], getrgb[comptDots]]))
elif (rgbi == 3):
label3.append(np.vstack([getDots[comptDots], getrgb[comptDots]]))
elif (rgbi == 4):
label4.append(np.vstack([getDots[comptDots], getrgb[comptDots]]))
else:
print("no label for dot no %5s ?\n" %(comptDots))
if(verbose==1):
dotsFic.write('%8s'%xDot+'%8s'%yDot+'%8s'%zDot+espace5+'%5s'%(rgb[rgbi,0])+'%5s'%(rgb[rgbi,1])+'%5s'%(rgb[rgbi,2])+'\n')
comptDots=comptDots+1
compt=compt+1
if(verbose==1):
dotsFic.close()
dotslabel1=open('dotslabel1.xyzrgb', 'w')
dotslabel2=open('dotslabel2.xyzrgb', 'w')
dotslabel3=open('dotslabel3.xyzrgb', 'w')
dotslabel4=open('dotslabel4.xyzrgb', 'w')
getDots1=np.empty(shape=[len(label1),3], dtype='float64')
getrgb1=np.empty(shape=[len(label1),3], dtype='float64')
getDots2=np.empty(shape=[len(label2),3], dtype='float64')
getrgb2=np.empty(shape=[len(label2),3], dtype='float64')
getDots3=np.empty(shape=[len(label3),3], dtype='float64')
getrgb3=np.empty(shape=[len(label3),3], dtype='float64')
getDots4=np.empty(shape=[len(label4),3], dtype='float64')
getrgb4=np.empty(shape=[len(label4),3], dtype='float64')
compt=0
while(compt < len(label1)):
getDots1[compt]= label1[compt][0]
getrgb1[compt]= label1[compt][1]
if(verbose==1):
dotslabel1.write('%8s'%getDots1[compt,0]+'%8s'%getDots1[compt,1]+'%8s'%getDots1[compt,2]+espace5+'%5s'%getrgb1[compt,0]+'%5s'%getrgb1[compt,1]+'%5s'%getrgb1[compt,2]+'\n')
compt=compt+1
compt=0
while(compt < len(getDots2)):
getDots2[compt]= label2[compt][0]
getrgb2[compt]= label2[compt][1]
if(verbose==1):
dotslabel2.write('%8s'%getDots2[compt,0]+'%8s'%getDots2[compt,1]+'%8s'%getDots2[compt,2]+espace5+'%5s'%getrgb2[compt,0]+'%5s'%getrgb2[compt,1]+'%5s'%getrgb2[compt,2]+'\n')
compt=compt+1
compt=0
while(compt < len(getDots3)):
getDots3[compt]= label3[compt][0]
getrgb3[compt]= label3[compt][1]
if(verbose==1):
dotslabel3.write('%8s'%getDots3[compt,0]+'%8s'%getDots3[compt,1]+'%8s'%getDots3[compt,2]+espace5+'%5s'%getrgb3[compt,0]+'%5s'%getrgb3[compt,1]+'%5s'%getrgb3[compt,2]+'\n')
compt=compt+1
compt=0
while(compt < len(getDots4)):
getDots4[compt]= label4[compt][0]
getrgb4[compt]= label4[compt][1]
if(verbose==1):
dotslabel4.write('%8s'%getDots4[compt,0]+'%8s'%getDots4[compt,1]+'%8s'%getDots4[compt,2]+espace5+'%5s'%getrgb4[compt,0]+'%5s'%getrgb4[compt,1]+'%5s'%getrgb4[compt,2]+'\n')
compt=compt+1
if(verbose==1):
dotslabel1.close()
dotslabel2.close()
dotslabel3.close()
dotslabel4.close()
else:
os.remove("psa.in")
os.remove("psa.out")
return getDots, getrgb, getDots1, getrgb1, getDots2, getrgb2, getDots3, getrgb3, getDots4, getrgb4
| 34.457627 | 288 | 0.564191 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,286 | 0.158141 |
5bf397bbc01a1e8b00e15b192628fbb2035b23a8 | 4,465 | py | Python | test/test_utils.py | duncanmmacleod/cwinpy | fe4c2002f9fd129e6a4eece32b888e1f3e58327e | [
"MIT"
] | null | null | null | test/test_utils.py | duncanmmacleod/cwinpy | fe4c2002f9fd129e6a4eece32b888e1f3e58327e | [
"MIT"
] | null | null | null | test/test_utils.py | duncanmmacleod/cwinpy | fe4c2002f9fd129e6a4eece32b888e1f3e58327e | [
"MIT"
] | null | null | null | """
Test script for utils.py function.
"""
import os
import numpy as np
import pytest
from astropy import units as u
from cwinpy.utils import (
ellipticity_to_q22,
gcd_array,
get_psr_name,
initialise_ephemeris,
int_to_alpha,
is_par_file,
logfactorial,
q22_to_ellipticity,
)
from lalpulsar.PulsarParametersWrapper import PulsarParametersPy
def test_logfactorial():
"""
Test log factorial function
"""
a = 3
assert logfactorial(a) == np.log(3 * 2 * 1)
a = 3.0
assert logfactorial(a) == np.log(3 * 2 * 1)
def test_gcd_array():
"""
Test greatest common divisor function.
"""
a = 1 # non-list value
with pytest.raises(TypeError):
gcd_array(a)
a = [1] # single value
with pytest.raises(ValueError):
gcd_array(a)
a = [5, 25, 90]
assert gcd_array(a) == 5
def test_int_to_alpha():
"""
Test integer to alphabetical string conversion.
"""
pos = 2.3
with pytest.raises(TypeError):
int_to_alpha(pos)
pos = -1
with pytest.raises(ValueError):
int_to_alpha(pos)
assert int_to_alpha(1) == "A"
assert int_to_alpha(1, case="lower") == "a"
assert int_to_alpha(26) == "Z"
assert int_to_alpha(26, case="lower") == "z"
assert int_to_alpha(27) == "AA"
assert int_to_alpha(28) == "AB"
assert int_to_alpha(200) == "GR"
assert int_to_alpha(1000) == "ALL"
def test_is_par_file():
"""
Test failure of is_par_file.
"""
assert is_par_file("blah_blah_blah") is False
# test par files that don't contain required attributes
brokenpar = "broken.par"
values = {
"F": [100.0],
"RAJ": 0.1,
"DECJ": -0.1,
"PSRJ": "J0101-0101",
}
for leavekey in list(values.keys()):
keys = list(values.keys())
psr = PulsarParametersPy()
for key in keys:
if key != leavekey:
psr[key] = values[key]
psr.pp_to_par(brokenpar)
assert is_par_file(brokenpar) is False
os.remove(brokenpar)
def test_get_psr_name():
"""
Test extraction of pulsar name.
"""
for item, name in zip(
["PSRJ", "PSRB", "PSR", "NAME"],
["J0123+1234", "B0124+12", "J0123+1234", "B0124+12"],
):
psr = PulsarParametersPy()
psr[item] = name
assert get_psr_name(psr) == name
def test_ellipticity_to_q22():
"""
Test ellipticity conversion to mass quadrupole.
"""
epsilon = [1e-9, 1e-8]
expected_q22 = np.array([1e29, 1e30]) * np.sqrt(15.0 / (8.0 * np.pi))
q22 = ellipticity_to_q22(epsilon[0])
assert np.isclose(q22, expected_q22[0])
# test units
q22units = ellipticity_to_q22(epsilon[0], units=True)
assert np.isclose(q22units.value, expected_q22[0])
assert q22units.unit == u.Unit("kg m2")
# test array like
q22 = ellipticity_to_q22(epsilon)
assert len(q22) == len(epsilon)
assert np.allclose(q22, expected_q22)
def test_q22_to_ellipticity_to_q22():
"""
Test mass quadrupole conversion to ellipticity.
"""
q22 = [1e29, 1e30]
expected_epsilon = np.array([1e-9, 1e-8]) / np.sqrt(15.0 / (8.0 * np.pi))
epsilon = q22_to_ellipticity(q22[0])
assert np.isclose(epsilon, expected_epsilon[0])
# test array like
epsilon = q22_to_ellipticity(q22)
assert len(q22) == len(epsilon)
assert np.allclose(epsilon, expected_epsilon)
# test no unit
epsilon = q22_to_ellipticity(q22[0] * u.kg * u.m ** 2)
assert np.isclose(epsilon, expected_epsilon[0])
assert not hasattr(epsilon, "unit")
def test_initialise_ephemeris():
"""
Test reading of ephemeris files.
"""
with pytest.raises(ValueError):
initialise_ephemeris(units="lhfld")
with pytest.raises(IOError):
initialise_ephemeris(
earthfile="jksgdksg", sunfile="lhlbca", timefile="lshdldgls"
)
with pytest.raises(IOError):
initialise_ephemeris(
earthfile="jksgdksg", sunfile="lhlbca", timefile="lshdldgls"
)
with pytest.raises(IOError):
initialise_ephemeris(timefile="lshdldgls")
edat, tdat = initialise_ephemeris()
assert edat.nentriesE == 175322
assert edat.nentriesS == 17534
assert edat.dtEtable == 7200.0
assert edat.dtStable == 72000.0
assert edat.etype == 2
assert tdat.nentriesT == 87660
assert tdat.dtTtable == 14400.0
| 22.325 | 77 | 0.619261 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 871 | 0.195073 |
5bf631e50626c44c993a9984bac16fda807b9125 | 596 | py | Python | fullcontact/schema/company_schema.py | michaelcredera/fullcontact-python-client | 482970b00b134409e6c9f303e7c2a7a6fc4a4685 | [
"Apache-2.0"
] | 8 | 2020-04-13T15:53:43.000Z | 2022-02-04T07:37:17.000Z | fullcontact/schema/company_schema.py | michaelcredera/fullcontact-python-client | 482970b00b134409e6c9f303e7c2a7a6fc4a4685 | [
"Apache-2.0"
] | 9 | 2020-06-04T15:30:50.000Z | 2022-02-04T07:36:39.000Z | fullcontact/schema/company_schema.py | michaelcredera/fullcontact-python-client | 482970b00b134409e6c9f303e7c2a7a6fc4a4685 | [
"Apache-2.0"
] | 7 | 2020-09-18T16:02:43.000Z | 2022-02-17T09:22:54.000Z | # -*- coding: utf-8 -*-
"""
This module serves the class for validating
FullContact Company Enrich and Search API requests.
"""
from .base.schema_base import BaseRequestSchema
class CompanyEnrichRequestSchema(BaseRequestSchema):
schema_name = "Company Enrich"
domain: str
webhookUrl: str
required_fields = ("domain",)
class CompanySearchRequestSchema(BaseRequestSchema):
schema_name = "Company Search"
companyName: str
sort: str
location: str
locality: str
region: str
country: str
webhookUrl: str
required_fields = ("companyName",)
| 18.625 | 52 | 0.706376 | 411 | 0.689597 | 0 | 0 | 0 | 0 | 0 | 0 | 180 | 0.302013 |
5bf659522a05710484e11d3e5746211900971315 | 815 | py | Python | tests/test_pure_checker.py | best-doctor/mr_proper | 994b109da759a5b466c3ee53b0d92789395af5e5 | [
"MIT"
] | 10 | 2019-11-27T17:19:53.000Z | 2022-03-03T18:21:57.000Z | tests/test_pure_checker.py | best-doctor/mr_proper | 994b109da759a5b466c3ee53b0d92789395af5e5 | [
"MIT"
] | 14 | 2019-10-28T15:22:56.000Z | 2021-09-26T13:15:25.000Z | tests/test_pure_checker.py | best-doctor/mr_proper | 994b109da759a5b466c3ee53b0d92789395af5e5 | [
"MIT"
] | 5 | 2020-06-01T14:17:33.000Z | 2021-02-13T14:03:13.000Z | import ast
import os
from mr_proper.public_api import is_function_pure
from mr_proper.utils.ast import get_ast_tree
def test_ok_for_destructive_assignment():
funcdef = ast.parse("""
def foo(a):
b, c = a
return b * c
""".strip()).body[0]
assert is_function_pure(funcdef)
def test_is_function_pure_fail_case():
funcdef = ast.parse("""
def foo(a):
print(a)
""".strip()).body[0]
assert not is_function_pure(funcdef)
def test_is_function_pure_fail_case_for_recursive():
test_file_path = os.path.join(os.path.dirname(__file__), 'test_files/test.py')
ast_tree = get_ast_tree(test_file_path)
foo_node = ast_tree.body[0]
assert not is_function_pure(
foo_node,
file_ast_tree=ast_tree,
pyfilepath=test_file_path,
recursive=True,
)
| 23.285714 | 82 | 0.69816 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 109 | 0.133742 |
5bf865e52df2196a3325cf1ad8afbcae366d75d2 | 18,324 | py | Python | app/forms/validators.py | pricem14pc/eq-questionnaire-runner | 54cc2947ba181a2673ea1fb7cf6b4acdd609e06b | [
"MIT"
] | null | null | null | app/forms/validators.py | pricem14pc/eq-questionnaire-runner | 54cc2947ba181a2673ea1fb7cf6b4acdd609e06b | [
"MIT"
] | null | null | null | app/forms/validators.py | pricem14pc/eq-questionnaire-runner | 54cc2947ba181a2673ea1fb7cf6b4acdd609e06b | [
"MIT"
] | null | null | null | from __future__ import annotations
import re
from datetime import datetime, timezone
from decimal import Decimal, InvalidOperation
from typing import TYPE_CHECKING, Iterable, List, Mapping, Optional, Sequence, Union
import flask_babel
from babel import numbers
from dateutil.relativedelta import relativedelta
from flask_babel import ngettext
from flask_wtf import FlaskForm
from structlog import get_logger
from wtforms import Field, StringField, validators
from wtforms.compat import string_types
from app.forms import error_messages
from app.forms.fields import (
DateField,
DecimalFieldWithSeparator,
IntegerFieldWithSeparator,
)
from app.jinja_filters import format_number, get_formatted_currency
from app.questionnaire.rules.utils import parse_datetime
from app.utilities import safe_content
if TYPE_CHECKING:
from app.forms.questionnaire_form import QuestionnaireForm # pragma: no cover
logger = get_logger()
tld_part_regex = re.compile(
r"^([a-z]{2,63}|xn--([a-z0-9]+-)*[a-z0-9]+)$", re.IGNORECASE
)
email_regex = re.compile(r"^.+@([^.@][^@\s]+)$")
OptionalMessage = Optional[Mapping[str, str]]
NumType = Union[int, Decimal]
PeriodType = Mapping[str, int]
class NumberCheck:
def __init__(self, message: Optional[str] = None):
self.message = message or error_messages["INVALID_NUMBER"]
def __call__(
self,
form: FlaskForm,
field: Union[DecimalFieldWithSeparator, IntegerFieldWithSeparator],
) -> None:
try:
Decimal(
field.raw_data[0].replace(
numbers.get_group_symbol(flask_babel.get_locale()), ""
)
)
except (ValueError, TypeError, InvalidOperation, AttributeError) as exc:
raise validators.StopValidation(self.message) from exc
if "e" in field.raw_data[0].lower():
raise validators.StopValidation(self.message)
class ResponseRequired:
"""
Validates that input was provided for this field. This is a copy of the
InputRequired validator provided by wtforms, which checks that form-input data
was provided, but additionally adds a kwarg to strip whitespace, as is available
on the Optional() validator wtforms provides. Oddly, stripping whitespace is not
an option for DataRequired or InputRequired validators in wtforms.
"""
field_flags = ("required",)
def __init__(self, message: str, strip_whitespace: bool = True):
self.message = message
if strip_whitespace:
self.string_check = lambda s: s.strip()
else:
self.string_check = lambda s: s
def __call__(self, form: "QuestionnaireForm", field: Field) -> None:
if (
not field.raw_data
or not field.raw_data[0]
or not self.string_check(field.raw_data[0])
):
field.errors[:] = []
raise validators.StopValidation(self.message)
class NumberRange:
"""
Validates that a number is of a minimum and/or maximum value, inclusive.
This will work with any comparable number type, such as floats and
decimals, not just integers.
:param minimum:
The minimum required value of the number. If not provided, minimum
value will not be checked.
:param maximum:
The maximum value of the number. If not provided, maximum value
will not be checked.
"""
def __init__(
self,
minimum: Optional[NumType] = None,
minimum_exclusive: bool = False,
maximum: Optional[NumType] = None,
maximum_exclusive: bool = False,
messages: OptionalMessage = None,
currency: Optional[str] = None,
):
self.minimum = minimum
self.maximum = maximum
self.minimum_exclusive = minimum_exclusive
self.maximum_exclusive = maximum_exclusive
self.messages: Mapping[str, str] = {**error_messages, **(messages or {})}
self.currency = currency
def __call__(
self,
form: "QuestionnaireForm",
field: Union[DecimalFieldWithSeparator, IntegerFieldWithSeparator],
) -> None:
value: Union[int, Decimal] = field.data
if value is not None:
error_message = self.validate_minimum(value) or self.validate_maximum(value)
if error_message:
raise validators.ValidationError(error_message)
def validate_minimum(self, value: NumType) -> Optional[str]:
if self.minimum is None:
return None
if self.minimum_exclusive and value <= self.minimum:
return self.messages["NUMBER_TOO_SMALL_EXCLUSIVE"] % dict(
min=format_playback_value(self.minimum, self.currency)
)
if value < self.minimum:
return self.messages["NUMBER_TOO_SMALL"] % dict(
min=format_playback_value(self.minimum, self.currency)
)
return None
def validate_maximum(self, value: NumType) -> Optional[str]:
if self.maximum is None:
return None
if self.maximum_exclusive and value >= self.maximum:
return self.messages["NUMBER_TOO_LARGE_EXCLUSIVE"] % dict(
max=format_playback_value(self.maximum, self.currency)
)
if value > self.maximum:
return self.messages["NUMBER_TOO_LARGE"] % dict(
max=format_playback_value(self.maximum, self.currency)
)
return None
class DecimalPlaces:
"""
Validates that an input has less than or equal to a
set number of decimal places
:param max_decimals:
The maximum allowed number of decimal places.
"""
def __init__(self, max_decimals: int = 0, messages: OptionalMessage = None):
self.max_decimals = max_decimals
self.messages = {**error_messages, **(messages or {})}
def __call__(
self, form: "QuestionnaireForm", field: DecimalFieldWithSeparator
) -> None:
data = (
field.raw_data[0]
.replace(numbers.get_group_symbol(flask_babel.get_locale()), "")
.replace(" ", "")
)
decimal_symbol = numbers.get_decimal_symbol(flask_babel.get_locale())
if data and decimal_symbol in data:
if self.max_decimals == 0:
raise validators.ValidationError(self.messages["INVALID_INTEGER"])
if len(data.split(decimal_symbol)[1]) > self.max_decimals:
raise validators.ValidationError(
self.messages["INVALID_DECIMAL"] % dict(max=self.max_decimals)
)
class OptionalForm:
"""
Allows completely empty form and stops the validation chain from continuing.
Will not stop the validation chain if any one of the fields is populated.
"""
field_flags = ("optional",)
def __call__(self, form: Sequence["QuestionnaireForm"], field: Field) -> None:
empty_form = True
for formfield in form:
has_raw_data = hasattr(formfield, "raw_data")
is_empty = has_raw_data and len(formfield.raw_data) == 0
is_blank = (
has_raw_data
and len(formfield.raw_data) >= 1
and isinstance(formfield.raw_data[0], string_types)
and not formfield.raw_data[0]
)
# By default we'll receive empty arrays for values not posted, so need to allow empty lists
empty_field = True if is_empty else is_blank
empty_form &= empty_field
if empty_form:
raise validators.StopValidation()
class DateRequired:
field_flags = ("required",)
def __init__(self, message: Optional[str] = None):
self.message = message or error_messages["MANDATORY_DATE"]
def __call__(self, form: "QuestionnaireForm", field: DateField) -> None:
"""
Raise exception if ALL fields have not been filled out.
Not having that field is the same as not filling it out
as the remaining fields would also have to be empty for
exception to be raised.
"""
day_not_entered = not form.day.data if hasattr(form, "day") else True
month_not_entered = not form.month.data if hasattr(form, "month") else True
year_not_entered = not form.year.data
if day_not_entered and month_not_entered and year_not_entered:
raise validators.StopValidation(self.message)
class DateCheck:
def __init__(self, message: Optional[str] = None):
self.message = message or error_messages["INVALID_DATE"]
def __call__(self, form: "QuestionnaireForm", field: StringField) -> None:
if not form.data:
raise validators.StopValidation(self.message)
try:
if hasattr(form, "day"):
datetime.strptime(form.data, "%Y-%m-%d").replace(tzinfo=timezone.utc)
elif hasattr(form, "month"):
datetime.strptime(form.data, "%Y-%m").replace(tzinfo=timezone.utc)
else:
datetime.strptime(form.data, "%Y").replace(tzinfo=timezone.utc)
except ValueError as exc:
raise validators.StopValidation(self.message) from exc
class SingleDatePeriodCheck:
def __init__(
self,
messages: OptionalMessage = None,
date_format: str = "d MMMM yyyy",
minimum_date: Optional[datetime] = None,
maximum_date: Optional[datetime] = None,
):
self.messages = {**error_messages, **(messages or {})}
self.minimum_date = minimum_date
self.maximum_date = maximum_date
self.date_format = date_format
def __call__(self, form: "QuestionnaireForm", field: StringField) -> None:
date = parse_datetime(form.data)
if self.minimum_date and date and date < self.minimum_date:
raise validators.ValidationError(
self.messages["SINGLE_DATE_PERIOD_TOO_EARLY"]
% dict(
min=self._format_playback_date(
self.minimum_date + relativedelta(days=-1), self.date_format
)
)
)
if self.maximum_date and date and date > self.maximum_date:
raise validators.ValidationError(
self.messages["SINGLE_DATE_PERIOD_TOO_LATE"]
% dict(
max=self._format_playback_date(
self.maximum_date + relativedelta(days=+1), self.date_format
)
)
)
@staticmethod
def _format_playback_date(date: datetime, date_format: str = "d MMMM yyyy") -> str:
formatted_date: str = flask_babel.format_date(date, format=date_format)
return formatted_date
class DateRangeCheck:
def __init__(
self,
messages: OptionalMessage = None,
period_min: Optional[dict[str, int]] = None,
period_max: Optional[dict[str, int]] = None,
):
self.messages = {**error_messages, **(messages or {})}
self.period_min = period_min
self.period_max = period_max
def __call__(
self, form: "QuestionnaireForm", from_field: DateField, to_field: DateField
) -> None:
from_date = parse_datetime(from_field.data)
to_date = parse_datetime(to_field.data)
if from_date and to_date:
if from_date >= to_date:
raise validators.ValidationError(self.messages["INVALID_DATE_RANGE"])
answered_range_relative = relativedelta(to_date, from_date)
if self.period_min:
min_range = self._return_relative_delta(self.period_min)
if self._is_first_relative_delta_largest(
min_range, answered_range_relative
):
raise validators.ValidationError(
self.messages["DATE_PERIOD_TOO_SMALL"]
% dict(min=self._build_range_length_error(self.period_min))
)
if self.period_max:
max_range = self._return_relative_delta(self.period_max)
if self._is_first_relative_delta_largest(
answered_range_relative, max_range
):
raise validators.ValidationError(
self.messages["DATE_PERIOD_TOO_LARGE"]
% dict(max=self._build_range_length_error(self.period_max))
)
@staticmethod
def _return_relative_delta(period_object: PeriodType) -> relativedelta:
return relativedelta(
years=period_object.get("years", 0),
months=period_object.get("months", 0),
days=period_object.get("days", 0),
)
@staticmethod
def _is_first_relative_delta_largest(
relativedelta1: relativedelta, relativedelta2: relativedelta
) -> bool:
epoch = datetime.min # generic epoch for comparison purposes only
date1 = epoch + relativedelta1
date2 = epoch + relativedelta2
return date1 > date2
@staticmethod
def _build_range_length_error(period_object: PeriodType) -> str:
error_message = ""
if "years" in period_object:
error_message = ngettext(
"%(num)s year", "%(num)s years", period_object["years"]
)
if "months" in period_object:
message_addition = ngettext(
"%(num)s month", "%(num)s months", period_object["months"]
)
error_message += (
message_addition if error_message == "" else ", " + message_addition
)
if "days" in period_object:
message_addition = ngettext(
"%(num)s day", "%(num)s days", period_object["days"]
)
error_message += (
message_addition if error_message == "" else ", " + message_addition
)
return error_message
class SumCheck:
def __init__(
self, messages: OptionalMessage = None, currency: Optional[str] = None
):
self.messages = {**error_messages, **(messages or {})}
self.currency = currency
def __call__(
self,
form: QuestionnaireForm,
conditions: List[str],
total: Union[Decimal, int],
target_total: Union[Decimal, float],
) -> None:
if len(conditions) > 1:
try:
conditions.remove("equals")
except ValueError as exc:
raise Exception(
"There are multiple conditions, but equals is not one of them. "
"We only support <= and >="
) from exc
condition = f"{conditions[0]} or equals"
else:
condition = conditions[0]
is_valid, message = self._is_valid(condition, total, target_total)
if not is_valid:
raise validators.ValidationError(
self.messages[message]
% dict(total=format_playback_value(target_total, self.currency))
)
@staticmethod
def _is_valid(
condition: str,
total: Union[Decimal, float],
target_total: Union[Decimal, float],
) -> tuple[bool, str]:
if condition == "equals":
return total == target_total, "TOTAL_SUM_NOT_EQUALS"
if condition == "less than":
return total < target_total, "TOTAL_SUM_NOT_LESS_THAN"
if condition == "greater than":
return total > target_total, "TOTAL_SUM_NOT_GREATER_THAN"
if condition == "greater than or equals":
return total >= target_total, "TOTAL_SUM_NOT_GREATER_THAN_OR_EQUALS"
if condition == "less than or equals":
return total <= target_total, "TOTAL_SUM_NOT_LESS_THAN_OR_EQUALS"
def format_playback_value(
value: Union[float, Decimal], currency: Optional[str] = None
) -> str:
if currency:
return get_formatted_currency(value, currency)
formatted_number: str = format_number(value)
return formatted_number
def format_message_with_title(error_message: str, question_title: str) -> str:
return error_message % {"question_title": safe_content(question_title)}
class MutuallyExclusiveCheck:
def __init__(self, question_title: str, messages: OptionalMessage = None):
self.messages = {**error_messages, **(messages or {})}
self.question_title = question_title
def __call__(
self, answer_values: Iterable, is_mandatory: bool, is_only_checkboxes: bool
) -> None:
total_answered = sum(1 for value in answer_values if value)
if total_answered > 1:
raise validators.ValidationError(self.messages["MUTUALLY_EXCLUSIVE"])
if is_mandatory and total_answered < 1:
message = format_message_with_title(
self.messages["MANDATORY_CHECKBOX"]
if is_only_checkboxes
else self.messages["MANDATORY_QUESTION"],
self.question_title,
)
raise validators.ValidationError(message)
def sanitise_mobile_number(data: str) -> str:
data = re.sub(r"[\s.,\t\-{}\[\]()/]", "", data)
return re.sub(r"^(0{1,2}44|\+44|0)", "", data)
class MobileNumberCheck:
def __init__(self, message: OptionalMessage = None):
self.message = message or error_messages["INVALID_MOBILE_NUMBER"]
def __call__(self, form: "QuestionnaireForm", field: StringField) -> None:
data = sanitise_mobile_number(field.data)
if len(data) != 10 or not re.match("^7[0-9]+$", data):
raise validators.ValidationError(self.message)
class EmailTLDCheck:
def __init__(self, message: Optional[str] = None):
self.message = message or error_messages["INVALID_EMAIL_FORMAT"]
def __call__(self, form: "QuestionnaireForm", field: StringField) -> None:
if match := email_regex.match(field.data):
hostname = match.group(1)
try:
hostname = hostname.encode("idna").decode("ascii")
except UnicodeError as exc:
raise validators.StopValidation(self.message) from exc
parts = hostname.split(".")
if len(parts) > 1 and not tld_part_regex.match(parts[-1]):
raise validators.StopValidation(self.message)
| 35.789063 | 103 | 0.625791 | 16,531 | 0.90215 | 0 | 0 | 2,431 | 0.132668 | 0 | 0 | 2,986 | 0.162956 |
5bf8a875ecf531c15a7f8d15eea40ef0738f6a6d | 709 | py | Python | reveries/common/publish/publish_subset.py | davidlatwe/reveries-config | 4a282dd64a32a9b87bd1a070759b6425ff785d68 | [
"MIT"
] | 3 | 2020-04-01T10:51:17.000Z | 2021-08-05T18:35:23.000Z | reveries/common/publish/publish_subset.py | davidlatwe/reveries-config | 4a282dd64a32a9b87bd1a070759b6425ff785d68 | [
"MIT"
] | null | null | null | reveries/common/publish/publish_subset.py | davidlatwe/reveries-config | 4a282dd64a32a9b87bd1a070759b6425ff785d68 | [
"MIT"
] | 1 | 2020-07-05T12:06:30.000Z | 2020-07-05T12:06:30.000Z | from avalon import io
def publish(asset_id, subset_name, families):
"""
Publish subset.
:param asset_id: (object)
:param subset_name: (str)
:param families: (list)
:return:
"""
subset_context = {
'name': subset_name,
'parent': asset_id,
'type': 'subset',
'data': {
'families': families,
'subsetGroup': ''
},
'schema': 'avalon-core:subset-3.0'}
_filter = {"parent": asset_id, "name": subset_name}
subset_data = io.find_one(_filter)
if subset_data is None:
subset_id = io.insert_one(subset_context).inserted_id
else:
subset_id = subset_data['_id']
return subset_id
| 22.15625 | 61 | 0.575458 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 242 | 0.341326 |
5bfb60401fb3180662fe2cf6854036c39c7bffb8 | 3,352 | py | Python | tests/test_neofoodclub.py | diceroll123/neofoodclub.py | ac82ee32bb6ed4f98ebce222273910bc8acfcd95 | [
"MIT"
] | 2 | 2021-04-21T06:34:50.000Z | 2021-04-21T06:38:09.000Z | tests/test_neofoodclub.py | diceroll123/neofoodclub.py | ac82ee32bb6ed4f98ebce222273910bc8acfcd95 | [
"MIT"
] | 2 | 2021-12-14T00:19:42.000Z | 2022-02-19T02:15:52.000Z | tests/test_neofoodclub.py | diceroll123/neofoodclub.py | ac82ee32bb6ed4f98ebce222273910bc8acfcd95 | [
"MIT"
] | 1 | 2021-05-05T02:25:20.000Z | 2021-05-05T02:25:20.000Z | import unittest
from typing import Tuple
from neofoodclub import NeoFoodClub # type: ignore
from neofoodclub.types import RoundData # type: ignore
# i picked the smallest round I could quickly find
test_round_data: RoundData = {
"currentOdds": [
[1, 2, 13, 3, 5],
[1, 4, 2, 4, 6],
[1, 3, 13, 7, 2],
[1, 13, 2, 3, 3],
[1, 8, 2, 4, 13],
],
"foods": [
[26, 25, 4, 9, 21, 1, 33, 11, 7, 10],
[12, 9, 14, 35, 25, 6, 21, 19, 40, 37],
[17, 30, 21, 39, 37, 15, 29, 40, 31, 10],
[10, 18, 35, 9, 34, 23, 27, 32, 28, 12],
[11, 20, 9, 33, 7, 14, 4, 23, 31, 26],
],
"lastChange": "2021-02-16T23:47:18+00:00",
"openingOdds": [
[1, 2, 13, 3, 5],
[1, 4, 2, 4, 6],
[1, 3, 13, 7, 2],
[1, 13, 2, 3, 3],
[1, 8, 2, 4, 12],
],
"pirates": [
[2, 8, 14, 11],
[20, 7, 6, 10],
[19, 4, 12, 15],
[3, 1, 5, 13],
[17, 16, 18, 9],
],
"round": 7956,
"start": "2021-02-15T23:47:41+00:00",
"timestamp": "2021-02-16T23:47:37+00:00",
"winners": [1, 3, 4, 2, 4],
"changes": [
{"arena": 1, "new": 6, "old": 5, "pirate": 4, "t": "2021-02-16T23:47:18+00:00"},
{
"arena": 4,
"new": 8,
"old": 12,
"pirate": 1,
"t": "2021-02-16T23:47:18+00:00",
},
{"arena": 4, "new": 4, "old": 6, "pirate": 3, "t": "2021-02-16T23:47:18+00:00"},
{
"arena": 4,
"new": 12,
"old": 13,
"pirate": 4,
"t": "2021-02-16T23:47:18+00:00",
},
],
}
test_bet_hash = "ltqvqwgimhqtvrnywrwvijwnn"
test_indices: Tuple[Tuple[int, ...], ...] = (
(2, 1, 3, 4, 3),
(1, 4, 1, 3, 1),
(4, 2, 1, 1, 1),
(3, 2, 2, 1, 2),
(3, 1, 3, 4, 4),
(1, 3, 2, 2, 3),
(4, 4, 4, 2, 3),
(2, 4, 2, 4, 1),
(1, 3, 1, 4, 4),
(2, 2, 3, 2, 3),
)
test_binaries: Tuple[int, ...] = (
0x48212,
0x81828,
0x14888,
0x24484,
0x28211,
0x82442,
0x11142,
0x41418,
0x82811,
0x44242,
)
test_expected_results = (test_bet_hash, test_indices, test_binaries)
test_nfc = NeoFoodClub(test_round_data)
hash_bets = test_nfc.make_bets_from_hash(test_bet_hash)
indices_bets = test_nfc.make_bets_from_indices(test_indices) # type: ignore
binaries_bets = test_nfc.make_bets_from_binaries(*test_binaries)
########################################################################################################################
class BetDecodingTest(unittest.TestCase):
def test_bet_hash_encoding(self):
self.assertEqual(
(hash_bets.bets_hash, hash_bets.indices, tuple(hash_bets)),
test_expected_results,
)
def test_bet_indices_encoding(self):
self.assertEqual(
(indices_bets.bets_hash, indices_bets.indices, tuple(indices_bets)),
test_expected_results,
)
def test_bet_binary_encoding(self):
self.assertEqual(
(binaries_bets.bets_hash, binaries_bets.indices, tuple(binaries_bets)),
test_expected_results,
)
class BetEquivalenceTest(unittest.TestCase):
def test_bet_equivalence(self):
self.assertTrue(hash_bets == indices_bets and indices_bets == binaries_bets)
| 27.47541 | 120 | 0.498807 | 777 | 0.231802 | 0 | 0 | 0 | 0 | 0 | 0 | 637 | 0.190036 |
5bfcf311b8aa728cf923ee0a028bdc0a9bd5b881 | 384 | py | Python | backend/api/migrations/0023_auto_20190823_1611.py | yamamz/BRMI_LOANAPP | e6f79789855a633ee78a168452bca508622bcca8 | [
"MIT"
] | null | null | null | backend/api/migrations/0023_auto_20190823_1611.py | yamamz/BRMI_LOANAPP | e6f79789855a633ee78a168452bca508622bcca8 | [
"MIT"
] | 6 | 2020-06-05T22:43:22.000Z | 2022-02-10T12:32:19.000Z | backend/api/migrations/0023_auto_20190823_1611.py | yamamz/BRMI_LOANAPP | e6f79789855a633ee78a168452bca508622bcca8 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.1 on 2019-08-23 08:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0022_auto_20190823_1553'),
]
operations = [
migrations.AlterField(
model_name='loan',
name='loan_period',
field=models.FloatField(default=0.0),
),
]
| 20.210526 | 49 | 0.59375 | 291 | 0.757813 | 0 | 0 | 0 | 0 | 0 | 0 | 96 | 0.25 |
5bfdae4e26ca7424c71438bb03e0a6c9225cfd72 | 122 | py | Python | api/urls.py | trinhcaokhoa/Mebook_hub | bb889d990e7b1bc6fd84b2b34733b00151a52f0d | [
"MIT"
] | 1 | 2022-03-30T20:00:33.000Z | 2022-03-30T20:00:33.000Z | api/urls.py | trinhcaokhoa/Mebook_hub | bb889d990e7b1bc6fd84b2b34733b00151a52f0d | [
"MIT"
] | null | null | null | api/urls.py | trinhcaokhoa/Mebook_hub | bb889d990e7b1bc6fd84b2b34733b00151a52f0d | [
"MIT"
] | null | null | null | from django.urls import path
from .views import BookView
urlpatterns = [
path('book_api', BookView.as_view()),
] | 20.333333 | 42 | 0.696721 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.081967 |
5bfeec944742d552c4975938f3ab15392025675a | 16,566 | py | Python | pythonnest/views.py | d9pouces/PythonNest | 53ad0c53f5c1b411a2af630099869e55a3549d22 | [
"CECILL-B"
] | 1 | 2017-05-01T20:00:14.000Z | 2017-05-01T20:00:14.000Z | pythonnest/views.py | d9pouces/PythonNest | 53ad0c53f5c1b411a2af630099869e55a3549d22 | [
"CECILL-B"
] | null | null | null | pythonnest/views.py | d9pouces/PythonNest | 53ad0c53f5c1b411a2af630099869e55a3549d22 | [
"CECILL-B"
] | 2 | 2015-07-30T18:14:50.000Z | 2019-11-02T10:06:59.000Z | """Here are defined Python functions of views.
Views are binded to URLs in :mod:`.urls`.
"""
import datetime
import hashlib
import json
import os
from distutils.version import LooseVersion
from json.encoder import JSONEncoder
from urllib.parse import quote
from django import forms
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import PermissionDenied
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.urls import reverse
from django.db.models import Q
from django.http import HttpResponse, Http404, QueryDict, HttpResponseRedirect
from django.shortcuts import get_object_or_404, redirect, resolve_url
from django.template.response import TemplateResponse
from django.utils.html import escape
from django.utils.http import is_safe_url, urlencode
from django.utils.timezone import utc
from django.utils.translation import ugettext_lazy as _, ugettext
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.debug import sensitive_post_parameters
from pythonnest.models import Package, Release, ReleaseDownload, PackageRole, Classifier, Dependence, MEDIA_ROOT_LEN, \
PackageType, normalize_str
from pythonnest.xmlrpc import XMLRPCSite, site
__author__ = 'Matthieu Gallet'
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S'
XML_RPC_SITE = XMLRPCSite()
def prepare_query(previous_query, prefix, key, value, global_and=True):
kwargs = {}
if (isinstance(value, list) or isinstance(value, tuple)) and len(value) > 1:
value = [normalize_str(x) for x in value]
kwargs = {prefix + key + '__in': value}
elif isinstance(value, list) or isinstance(value, tuple):
value = normalize_str(value[0])
if not kwargs:
kwargs = {prefix + key + '__icontains': value}
if previous_query is None:
return Q(**kwargs)
elif global_and:
return previous_query & Q(**kwargs)
return previous_query | Q(**kwargs)
class JSONDatetime(JSONEncoder):
def default(self, o):
if isinstance(o, datetime.datetime):
return o.strftime(DATE_FORMAT)
return super(JSONDatetime, self).default(o)
@csrf_exempt
def xmlrpc(request):
return site.dispatch(request)
# noinspection PyUnusedLocal
def package_json(request, package_name):
package = get_object_or_404(Package, normalized_name__iexact=normalize_str(package_name))
releases = list(Release.objects.filter(package=package).order_by('-id')[0:1])
if not releases:
raise Http404
release = releases[0]
result = {'info': release.data(), 'urls': [x.data() for x in ReleaseDownload.objects.filter(release=release)]}
return HttpResponse(json.dumps(result, ensure_ascii=False, cls=JSONDatetime, indent=4),
content_type='application/json')
# noinspection PyUnusedLocal
def version_json(request, package_name, version):
release = get_object_or_404(Release, package__normalized_name__iexact=normalize_str(package_name), version=version)
result = {'info': release.data(), 'urls': [x.data() for x in ReleaseDownload.objects.filter(release=release)]}
return HttpResponse(json.dumps(result, ensure_ascii=False, cls=JSONDatetime, indent=4),
content_type='application/json')
class SearchForm(forms.Form):
"""Upload form"""
search = forms.CharField(max_length=255)
@never_cache
def simple(request, package_name=None, version=None):
if package_name is not None:
package = get_object_or_404(Package, normalized_name__iexact=normalize_str(package_name))
if version is not None:
release = get_object_or_404(Release, package=package, version__iexact=version)
downloads = ReleaseDownload.objects.filter(release=release)
else:
downloads = ReleaseDownload.objects.filter(package=package)
else:
package = None
downloads = ReleaseDownload.objects.all()
template_values = {'package': package, 'downloads': downloads}
return TemplateResponse(request, 'pythonnest/simple.html', template_values)
@csrf_exempt
def setup(request):
if request.method != 'POST':
raise PermissionDenied(_('Only POST request are allowed'))
ct_type = request.META.get('CONTENT_TYPE', '')
infos = [x.strip().partition('=') for x in ct_type.split(';')]
boundary, encoding = None, 'ascii'
for info in infos:
if info[0] == 'boundary':
boundary = info[2]
elif info[0] == 'charset':
encoding = info[2]
if boundary is None:
raise PermissionDenied(_('Invalid POST form'))
# parse the POST query by hand
mid_boundary = ('\n--' + boundary + '\n').encode(encoding)
end_boundary = ('\n--' + boundary + '--\n').encode(encoding)
fields = request.body.split(mid_boundary)
values = QueryDict('', mutable=True, encoding=encoding)
files = {}
for part in fields:
lines = part.split(b'\n\n', 1)
if len(lines) != 2:
continue
infos = [x.strip().partition('=') for x in lines[0].decode(encoding).split(';')]
key, filename = None, None
for info in infos:
if info[0] == 'name':
key = info[2][1:-1]
elif info[0] == 'filename':
filename = info[2][1:-1]
if key is None:
continue
value = lines[1]
if value.endswith(end_boundary):
value = value[:-len(end_boundary)]
if filename is None:
values.setlistdefault(key, [])
values.appendlist(key, value)
else:
files[key] = filename, value
# the POST data are parsed, let's go
action = values.get(':action')
if action in ('submit', 'file_upload'):
package_name = values.get('name', '')
version_name = values.get('version', '')
if not package_name or not version_name:
raise PermissionDenied(_('No package name provided'))
if request.user.is_anonymous:
return HttpResponse(ugettext('You must be authenticated'), status=401)
package, package_created = Package.objects.get_or_create(name=package_name)
if package_created:
PackageRole(package=package, user=request.user, role=PackageRole.OWNER).save()
elif not request.user.is_superuser:
if PackageRole.objects.filter(package=package, user=request.user).count() == 0:
return HttpResponse(ugettext('You are not allowed to update this package'), status=401)
for attr_name in ('name', 'home_page', 'author_email', 'download_url', 'author', 'license', 'summary',
'maintainer', 'maintainer_email', 'project_url', ):
if values.get(attr_name):
setattr(package, attr_name, values.get(attr_name))
package.save()
release, created = Release.objects.get_or_create(package=package, version=version_name)
for attr_name in ('stable_version', 'description', 'platform', 'keywords', 'docs_url',):
if values.get(attr_name):
setattr(package, attr_name, values.get(attr_name))
release.classifiers.clear()
for classifier in values.getlist('classifiers', []):
release.classifiers.add(Classifier.get(classifier))
for attr_name in ('requires', 'requires_dist', 'provides', 'provides_dist', 'obsoletes', 'obsoletes_dist',
'requires_external', 'requires_python'):
getattr(release, attr_name).clear()
for dep in values.getlist(attr_name, []):
getattr(release, attr_name).add(Dependence.get(dep))
release.save()
if action == 'file_upload':
if 'content' not in files:
raise PermissionDenied
filename, content = files['content']
# noinspection PyUnboundLocalVariable
if ReleaseDownload.objects.filter(package=package, release=release, filename=filename).count() > 0:
raise PermissionDenied
md5 = hashlib.md5(content).hexdigest()
if md5 != values.get('md5_digest'):
raise PermissionDenied
download = ReleaseDownload(package=package, release=release, filename=filename)
path = download.abspath
path_dirname = os.path.dirname(path)
if not os.path.isdir(path_dirname):
os.makedirs(path_dirname)
with open(path, 'wb') as out_fd:
out_fd.write(content)
download.md5_digest = md5
download.size = len(content)
download.upload_time = datetime.datetime.utcnow().replace(tzinfo=utc)
download.url = settings.MEDIA_URL + path[MEDIA_ROOT_LEN:]
download.file = download.relpath
download.package_type = PackageType.get(values.get('filetype', 'source'))
download.comment_text = values.get('comment', '')
download.python_version = values.get('pyversion')
download.log()
template_values = {}
return TemplateResponse(request, 'pythonnest/simple.html', template_values)
def search_result(request, query, alt_text):
url_query = urlencode({k: v for (k, v) in request.GET.items() if k != 'page'})
nav_url = '%s?%s' % (request.path, url_query)
paginator = Paginator(query, 30)
page_number = request.GET.get('page')
try:
result_page = paginator.page(page_number)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
result_page = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
result_page = paginator.page(paginator.num_pages)
template_values = {'result_page': result_page, 'nav_url': nav_url, 'alt_text': alt_text, 'query': query, }
return TemplateResponse(request, 'pythonnest/search.html', template_values)
def index(request):
"""Index view, displaying and processing a form."""
search = SearchForm(request.GET)
if search.is_valid():
orig_pattern = search.cleaned_data['search']
patterns = orig_pattern.split()
sub_query = None
for pattern in patterns:
sub_query = prepare_query(sub_query, '', 'normalized_name', pattern, global_and=True)
query = Package.objects.filter(sub_query).distinct().select_related()
alt_text = None
if orig_pattern.find('*') == -1:
alt_text = _('You should search “<a href="?search=%%2A%(pattern)s%%2A">*%(text)s*</a>” '
'to find more packages.') % {'pattern': quote(orig_pattern), 'text': escape(orig_pattern)}
return search_result(request, query, alt_text)
full_uri = settings.SERVER_BASE_URL[:-1]
base_url = settings.SERVER_NAME
use_ssl = settings.USE_SSL
template_values = {'base_url': base_url, 'use_ssl': use_ssl, 'full_uri': full_uri, }
return TemplateResponse(request, 'pythonnest/index.html', template_values)
def all_packages(request, order_by='normalized_name'):
alt_text = _('No package matched your query.')
query = Package.objects.all().select_related().order_by(order_by)
return search_result(request, query, alt_text)
def show_classifier(request, classifier_id):
classifier = get_object_or_404(Classifier, id=classifier_id)
alt_text = _('No package matched your query.')
query = classifier.release_set.all().select_related('package')
return search_result(request, query, alt_text)
def show_package(request, package_id, release_id=None):
package = get_object_or_404(Package, id=package_id)
roles = PackageRole.objects.filter(package=package).order_by('role').select_related()
releases = list(Release.objects.filter(package=package).order_by('-id').select_related())
release = None
releases = sorted(releases, key=lambda x: LooseVersion(str(x.version)), reverse=True)
if release_id is not None:
release = get_object_or_404(Release, id=release_id, package=package)
elif releases:
release = releases[0]
class RoleForm(forms.Form):
username = forms.CharField(max_length=255, label=_('Username'))
role = forms.ChoiceField(required=False, widget=forms.Select(), choices=PackageRole.ROLES, label=_('Role'))
downloads = ReleaseDownload.objects.filter(release=release).order_by('filename')
is_admin = package.is_admin(request.user)
add_user_form = None
if is_admin:
if request.method == 'POST':
add_user_form = RoleForm(request.POST)
if add_user_form.is_valid():
username = add_user_form.cleaned_data['username']
role = add_user_form.cleaned_data['role']
user = User.objects.get_or_create(username=username)[0]
PackageRole.objects.get_or_create(package=package, user=user, role=int(role))
return redirect('pythonnest.views.show_package', package_id=package_id)
else:
add_user_form = RoleForm()
template_values = {'title': _('PythonNest'),
'package': package, 'roles': roles, 'is_admin': is_admin, 'add_user_form': add_user_form,
'is_editable': request.user in set([x.user for x in roles]),
'release': release, 'releases': releases, 'downloads': downloads, }
return TemplateResponse(request, 'pythonnest/package.html', template_values)
@login_required
def delete_download(request, download_id):
download = get_object_or_404(ReleaseDownload, id=download_id)
package = download.package
release = download.release
if PackageRole.objects.filter(package=package, role=PackageRole.OWNER, user=request.user).count() == 0 and \
not request.user.is_superuser:
raise PermissionDenied
abspath = download.abspath
download.delete()
if os.path.isfile(abspath):
os.remove(abspath)
if ReleaseDownload.objects.filter(release=release).count() == 0:
release.delete()
response = HttpResponseRedirect(reverse('pythonnest.views.show_package', kwargs={'package_id': package.id}))
else:
response = HttpResponseRedirect(reverse('pythonnest.views.show_package',
kwargs={'package_id': package.id, 'release_id': release.id}))
if Release.objects.filter(package=package).count() == 0:
package.delete()
response = HttpResponseRedirect(reverse('pythonnest.views.index'))
return response
@login_required
def delete_role(request, role_id):
role = get_object_or_404(PackageRole, id=role_id)
package = role.package
if not package.is_admin(request.user):
raise PermissionDenied
role.delete()
return redirect('pythonnest.views.show_package', package_id=package.id)
@sensitive_post_parameters()
@never_cache
def create_user(request, template_name='create_user.html',
redirect_field_name=REDIRECT_FIELD_NAME,
user_creation_form=UserCreationForm,
current_app=None, extra_context=None):
"""
Displays the login form and handles the login action.
"""
# noinspection PyUnusedLocal
current_app = current_app
redirect_to = request.REQUEST.get(redirect_field_name, '')
if request.method == "POST":
form = user_creation_form(data=request.POST)
if form.is_valid():
# Ensure the user-originating redirection url is safe.
if not is_safe_url(url=redirect_to, host=request.get_host()):
redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL)
form.save(commit=True)
if request.session.test_cookie_worked():
request.session.delete_test_cookie()
return HttpResponseRedirect(redirect_to)
else:
form = user_creation_form()
request.session.set_test_cookie()
current_site = get_current_site(request)
context = {
'form': form,
redirect_field_name: redirect_to,
'site': current_site,
'site_name': current_site.name,
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
| 43.825397 | 119 | 0.671315 | 509 | 0.030718 | 0 | 0 | 8,630 | 0.520821 | 0 | 0 | 2,255 | 0.136089 |
5bff77751373faff40182bd6235ab1b9ab10c668 | 158 | py | Python | dirbot/items.py | lihongqiang/crawl-image | d0132156143942628e041ccf2b8bff07b1873a60 | [
"Apache-2.0"
] | 1 | 2017-03-24T09:46:26.000Z | 2017-03-24T09:46:26.000Z | dirbot/items.py | lihongqiang/crawl-image | d0132156143942628e041ccf2b8bff07b1873a60 | [
"Apache-2.0"
] | 1 | 2017-03-24T08:51:17.000Z | 2017-03-24T08:51:17.000Z | dirbot/items.py | lihongqiang/crawl-image | d0132156143942628e041ccf2b8bff07b1873a60 | [
"Apache-2.0"
] | null | null | null | from scrapy.item import Item, Field
class MyImage(Item):
image_urls = Field()
images = Field()
image_paths = Field()
image_label = Field()
| 15.8 | 35 | 0.651899 | 119 | 0.753165 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7500cd4e14f32132aab50eca5049410d1341ad5f | 1,976 | py | Python | olea/core/blueprints/auth/views.py | Pix-00/olea | 98bee1fd8866a3929f685a139255afb7b6813f31 | [
"Apache-2.0"
] | 2 | 2020-06-18T03:25:52.000Z | 2020-06-18T07:33:45.000Z | olea/core/blueprints/auth/views.py | Pix-00/olea | 98bee1fd8866a3929f685a139255afb7b6813f31 | [
"Apache-2.0"
] | 15 | 2021-01-28T07:11:04.000Z | 2021-05-24T07:11:37.000Z | olea/core/blueprints/auth/views.py | Pix-00/olea | 98bee1fd8866a3929f685a139255afb7b6813f31 | [
"Apache-2.0"
] | null | null | null | from flask import g
from flask_json import json_response
from core.auth import allow_anonymous
from . import bp
from .forms import ForgetPwd, Login, Refresh, ResetPwd, SetPwd, VEmail
from .services import LemonMgr, PinkMgr
@bp.route('/login', methods=['POST'])
@allow_anonymous
def login():
form = Login()
lemon = LemonMgr.login(name=form.name, pwd=form.pwd, device_id=form.device_id)
return json_response(id=lemon.id, key=lemon.key, exp=lemon.expiration)
@bp.route('/set-pwd', methods=['POST'])
def set_pwd():
form = SetPwd()
lemon = PinkMgr(g.pink_id).set_pwd(pwd=form.pwd, device_id=form.device_id)
return json_response(id=lemon.id, key=lemon.key, exp=lemon.expiration)
@bp.route('/forget-pwd', methods=['POST'])
@allow_anonymous
def forget_pwd():
form = ForgetPwd()
PinkMgr.forget_pwd(name=form.name, email=form.email)
return json_response()
@bp.route('/reset-pwd', methods=['POST'])
@allow_anonymous
def reset_pwd():
form = ResetPwd()
lemon = PinkMgr.reset_pwd(token=form.token, pwd=form.pwd, device_id=form.device_id)
return json_response(id=lemon.id, key=lemon.key, exp=lemon.expiration)
@bp.route('/email-verification', methods=['POST'])
@allow_anonymous
def email_verification():
form = VEmail()
PinkMgr.verify_email(email=form.email)
return json_response()
@bp.route('/refresh', methods=['POST'])
@allow_anonymous
def refresh():
form = Refresh()
token, exp = LemonMgr(form.id).grant_access_token(key=form.key, device_id=form.device_id)
return json_response(token=token, exp=exp)
@bp.route('/lemons/', methods=['GET'])
def sessions():
lemons = PinkMgr(g.pink_id).all_lemons()
return json_response(data_=lemons)
@bp.route('/<id_>/revoke-lemon', methods=['POST'])
def revoke_lemon(id_):
LemonMgr(id_).revoke()
return json_response()
@bp.route('/revoke-all-lemons', methods=['POST'])
def revoke_all_lemons():
LemonMgr.revoke_all()
return json_response()
| 26.702703 | 93 | 0.712551 | 0 | 0 | 0 | 0 | 1,724 | 0.87247 | 0 | 0 | 178 | 0.090081 |
7500edb1cdfbe9210d83cd5db498ecdcdcb19eb8 | 1,073 | py | Python | quantitative_finance/L3/python/setup.py | InAccel/Vitis_Libraries | 876e262201069f46a78050e23a13db20dd73ad8b | [
"Apache-2.0"
] | null | null | null | quantitative_finance/L3/python/setup.py | InAccel/Vitis_Libraries | 876e262201069f46a78050e23a13db20dd73ad8b | [
"Apache-2.0"
] | null | null | null | quantitative_finance/L3/python/setup.py | InAccel/Vitis_Libraries | 876e262201069f46a78050e23a13db20dd73ad8b | [
"Apache-2.0"
] | null | null | null | """Setup inaccel-vitis package."""
from setuptools import find_namespace_packages, setup
setup(
name = 'inaccel-vitis',
packages = find_namespace_packages(include = ['inaccel.*']),
namespace_packages = ['inaccel'],
version = '0.2',
license = 'Apache-2.0',
description = 'InAccel Vitis Libraries',
author = 'InAccel',
author_email = 'info@inaccel.com',
url = 'https://docs.inaccel.com',
keywords = ['InAccel Coral', 'FPGA', 'inaccel', 'Vitis'],
install_requires = [
'coral-api==2.*', 'opencv-python',
],
include_package_data = True,
zip_safe = True,
classifiers = [
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
python_requires = '>=3.7',
)
| 33.53125 | 64 | 0.602982 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 558 | 0.520037 |
75014674848a2f29086728ea03ae9d8586903acf | 2,472 | py | Python | leavemanager/cli.py | kiki407/leavemanager | a9fb3533d44735518239ece5b9c922062009349c | [
"MIT"
] | null | null | null | leavemanager/cli.py | kiki407/leavemanager | a9fb3533d44735518239ece5b9c922062009349c | [
"MIT"
] | null | null | null | leavemanager/cli.py | kiki407/leavemanager | a9fb3533d44735518239ece5b9c922062009349c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Console script for leavemanager."""
import sys
import click
import leavemanager
from leavemanager.configuration import getconf, setconf, get_keys
from leavemanager.utils import slightlybeautify, clickDate
from leavemanager.leavemanager import Leave, AllLeave
from datetime import datetime
from dateutil.parser import parse
@click.group(chain=False)
@click.version_option(version=leavemanager.__version__)
def main(args=None):
"""Console script for leavemanager."""
return 0
@main.command()
def configure():
"""
Prompts user for configuration settings
"""
keys = get_keys()
actualconfig = getconf()
newconf = {}
click.echo("Modify configuration")
for k in keys:
newconf[k[0]] = click.prompt(
f"{slightlybeautify(k[0])}".capitalize(),
default=actualconfig.get(k[0], None),
type=k[1],
)
setconf(newconf)
@main.command()
def sim():
click.echo("simulation mode")
@main.command()
@click.argument("date", type=clickDate.DATE)
@click.option("--until", "-t", type=clickDate.DATE)
@click.option("--approved", "-A", type=click.BOOL)
def add(date, until, approved):
"""
Adds a new entry in dates of leave
"""
if until:
leave = LeaveRange(date, until)
else:
leave = Leave(date)
click.echo(leave.store())
@main.command()
@click.argument("date", type=clickDate.DATE)
def rem(date):
"""
Removes an entry from leaves
"""
leave = Leave(date)
click.echo(leave.remove())
@main.group(chain=False)
def approve():
"""
Approves leave
"""
return 0
@approve.command()
@click.argument("date", type=clickDate.DATE)
def one(date):
leave = Leave(date)
click.echo(leave.approve())
@approve.command()
def old():
"""
Auto approves leaves previous to today
"""
leave = AllLeave()
click.echo(leave.approve_old())
@main.command()
@click.option("--year", "-y", default="current", help="year of leave")
def report(year):
"""
shows a report of days taken and days left for the year
"""
leave = AllLeave()
click.echo(leave.report(year))
@main.command()
@click.option("--year", "-y", default="current", help="year of leave")
def left(year):
"""
Tell how many days are left
"""
leave = AllLeave()
res = leave.left(year=year)
click.echo(f"{res}")
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| 21.310345 | 70 | 0.639159 | 0 | 0 | 0 | 0 | 2,021 | 0.817557 | 0 | 0 | 669 | 0.270631 |
75022887b6f82317b30d98bb409f69f029c65114 | 486 | py | Python | tests/frameworks/fast/test_fast_template.py | Jhsmit/awesome-panel-extensions | 41eba7cf84caa911be4ed0df2a96e16fc1e70263 | [
"CC-BY-4.0"
] | 3 | 2020-07-16T07:28:45.000Z | 2020-07-17T12:53:56.000Z | tests/frameworks/fast/test_fast_template.py | MarcSkovMadsen/panel-extensions-template | f41ad8d8fb8502f87de3a4992917cbffb6299012 | [
"CC-BY-4.0"
] | null | null | null | tests/frameworks/fast/test_fast_template.py | MarcSkovMadsen/panel-extensions-template | f41ad8d8fb8502f87de3a4992917cbffb6299012 | [
"CC-BY-4.0"
] | null | null | null | # pylint: disable=redefined-outer-name,protected-access
# pylint: disable=missing-function-docstring,missing-module-docstring,missing-class-docstring
import panel as pn
from panel import Template
from awesome_panel_extensions.frameworks.fast import FastTemplate
def test_constructor():
# Given
column = pn.Column()
main = [column]
# When
template = FastTemplate(main=main)
# Then
assert issubclass(FastTemplate, Template)
assert template.main == main
| 27 | 93 | 0.753086 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 167 | 0.343621 |
7502dff8f0fd39e9a5e925ef11b5e1b9ca9564a0 | 2,371 | py | Python | doc2json/jats2json/pmc_utils/back_tag_utils.py | josephcc/s2orc-doc2json | 8a6a21b7a8a3c6ad11cd42bdd0d46ee32a5a990d | [
"Apache-2.0"
] | 132 | 2021-02-15T18:16:12.000Z | 2022-03-29T04:47:17.000Z | doc2json/jats2json/pmc_utils/back_tag_utils.py | josephcc/s2orc-doc2json | 8a6a21b7a8a3c6ad11cd42bdd0d46ee32a5a990d | [
"Apache-2.0"
] | 6 | 2021-02-21T09:52:11.000Z | 2022-02-01T17:45:43.000Z | doc2json/jats2json/pmc_utils/back_tag_utils.py | josephcc/s2orc-doc2json | 8a6a21b7a8a3c6ad11cd42bdd0d46ee32a5a990d | [
"Apache-2.0"
] | 18 | 2021-02-15T18:18:05.000Z | 2022-03-11T19:37:47.000Z | from typing import Dict, List
def _wrap_text(tag):
return tag.text if tag else ''
def parse_authors(authors_tag) -> List:
"""The PMC XML has a slightly different format than authors listed in front tag."""
if not authors_tag:
return []
authors = []
for name_tag in authors_tag.find_all('name', recursive=False):
surname = name_tag.find('surname')
given_names = name_tag.find('given-names')
given_names = given_names.text.split(' ') if given_names else None
suffix = name_tag.find('suffix')
authors.append({
'first': given_names[0] if given_names else '',
'middle': given_names[1:] if given_names else [],
'last': surname.text if surname else '',
'suffix': suffix.text if suffix else ''
})
return authors
def parse_bib_entries(back_tag) -> Dict:
bib_entries = {}
# TODO: PMC2778891 does not have 'ref-list' in its back_tag. do we even need this, or can directly .find_all('ref')?
ref_list_tag = back_tag.find('ref-list')
if ref_list_tag:
for ref_tag in ref_list_tag.find_all('ref'):
# The ref ID and label are semantically swapped between CORD-19 and PMC, lol
ref_label = ref_tag['id']
ref_id = ref_tag.find('label')
authors_tag = ref_tag.find('person-group', {'person-group-type': 'author'})
year = ref_tag.find('year')
fpage = ref_tag.find('fpage')
lpage = ref_tag.find('lpage')
pages = f'{fpage.text}-{lpage.text}' if fpage and lpage else None
dois = [tag.text for tag in ref_tag.find_all('pub-id', {'pub-id-type': 'doi'})]
bib_entries[ref_label] = {
'ref_id': _wrap_text(ref_id),
'title': _wrap_text(ref_tag.find('article-title')),
'authors': parse_authors(authors_tag),
'year': int(year.text) if year and year.text.isdigit() else None,
'venue': _wrap_text(ref_tag.find('source')),
'volume': _wrap_text(ref_tag.find('volume')),
'issn': _wrap_text(ref_tag.find('issue')),
'pages': pages,
'other_ids': {
'DOI': dois,
}
}
return bib_entries | 42.339286 | 122 | 0.564319 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 607 | 0.25601 |
750381121aa3cf7a3891c547ae46158207b13b45 | 968 | py | Python | machomachomangler/tests/test_destruct.py | dsteinmo/probewheel | 458cfaf319d793cae496cb902220b31697fb3c41 | [
"MIT"
] | null | null | null | machomachomangler/tests/test_destruct.py | dsteinmo/probewheel | 458cfaf319d793cae496cb902220b31697fb3c41 | [
"MIT"
] | null | null | null | machomachomangler/tests/test_destruct.py | dsteinmo/probewheel | 458cfaf319d793cae496cb902220b31697fb3c41 | [
"MIT"
] | null | null | null | import pytest
from ..destruct import StructType
_fields = [
("I", "magic"),
("c", "byte"),
]
TEST = StructType("TEST", _fields)
TEST_BE = StructType("TEST_BE", _fields, endian=">")
def test_destruct():
assert TEST.size == 5
assert TEST_BE.size == 5
raw = bytearray(b"\x00\x00\x01\x02\x03\x04")
view_0_le = TEST.view(raw, 0)
view_1_be = TEST_BE.view(raw, 1)
for view in [view_0_le, view_1_be]:
assert len(view_0_le) == 2
assert view_0_le.size == 5
# smoke test
repr(view)
assert view_0_le.end_offset == 5
assert view_1_be.end_offset == 6
assert dict(view_0_le) == {"magic": 0x02010000, "byte": b"\x03"}
assert dict(view_1_be) == {"magic": 0x00010203, "byte": b"\x04"}
view_0_le["magic"] = 0x12345678
assert raw == bytearray(b"\x78\x56\x34\x12\x03\x04")
assert view_1_be["magic"] == 0x56341203
with pytest.raises(NotImplementedError):
del view_0_le["magic"]
| 26.162162 | 68 | 0.627066 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 164 | 0.169421 |
75049038ef408868d10288b82c85095dff68131e | 1,178 | py | Python | baseline_configs/one_phase/one_phase_rgb_ppo.py | zju-zry/jueshaRearrangement. | 3499c917e613f9c347e5aa95b4f3186d68db7363 | [
"Apache-2.0"
] | 37 | 2020-10-12T19:08:11.000Z | 2022-02-13T07:09:53.000Z | baseline_configs/one_phase/one_phase_rgb_ppo.py | zju-zry/jueshaRearrangement. | 3499c917e613f9c347e5aa95b4f3186d68db7363 | [
"Apache-2.0"
] | 24 | 2021-02-25T21:37:34.000Z | 2022-03-21T06:25:52.000Z | baseline_configs/one_phase/one_phase_rgb_ppo.py | zju-zry/jueshaRearrangement. | 3499c917e613f9c347e5aa95b4f3186d68db7363 | [
"Apache-2.0"
] | 12 | 2021-02-18T06:15:33.000Z | 2022-03-03T15:18:19.000Z | from typing import Dict, Any
from allenact.algorithms.onpolicy_sync.losses import PPO
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.utils.experiment_utils import LinearDecay, PipelineStage
from baseline_configs.one_phase.one_phase_rgb_base import (
OnePhaseRGBBaseExperimentConfig,
)
class OnePhaseRGBPPOExperimentConfig(OnePhaseRGBBaseExperimentConfig):
USE_RESNET_CNN = False
@classmethod
def tag(cls) -> str:
return "OnePhaseRGBPPO"
@classmethod
def num_train_processes(cls) -> int:
return 40
@classmethod
def _training_pipeline_info(cls, **kwargs) -> Dict[str, Any]:
"""Define how the model trains."""
training_steps = cls.TRAINING_STEPS
return dict(
named_losses=dict(
ppo_loss=PPO(clip_decay=LinearDecay(training_steps), **PPOConfig)
),
pipeline_stages=[
PipelineStage(loss_names=["ppo_loss"], max_stage_steps=training_steps,)
],
num_steps=64,
num_mini_batch=1,
update_repeats=3,
use_lr_decay=True,
lr=3e-4,
)
| 29.45 | 87 | 0.663837 | 851 | 0.722411 | 0 | 0 | 736 | 0.624788 | 0 | 0 | 60 | 0.050934 |
75054ce723fd893b6681ad004d075813a927586a | 2,593 | py | Python | gachianalyzer/analysis_organizer.py | mitsu-ksgr/splatoon2-gachianalyzer | 9d964f514c85b626bc8e604a0bf3a2598ac91765 | [
"MIT"
] | null | null | null | gachianalyzer/analysis_organizer.py | mitsu-ksgr/splatoon2-gachianalyzer | 9d964f514c85b626bc8e604a0bf3a2598ac91765 | [
"MIT"
] | 1 | 2019-07-15T16:54:57.000Z | 2019-07-15T18:41:39.000Z | gachianalyzer/analysis_organizer.py | mitsu-ksgr/splatoon2-gachianalyzer | 9d964f514c85b626bc8e604a0bf3a2598ac91765 | [
"MIT"
] | null | null | null | import cv2
from .video_analyzer import VideoAnalyzer
class AnalysisOrganizer:
"""
VideoAnalyzer の結果を編成します.
"""
def __organize(self):
"""
self.result をイベント毎に編成しなおします.
"""
events = []
prev = self.result[0]
for i in range(1, len(self.result)):
# 前フレームと同じなら同じイベントとみなす.
# 前フレームとイベントが変わったら記録する.
if prev['event'] != self.result[i]['event']:
end = self.result[i - 1]
events.append({
'event': prev['event'],
'start_time': prev['time'],
'end_time': end['time'],
'start_frame': prev['frame'],
'end_frame': end['frame']
})
prev = self.result[i]
return events
def __init__(self, analyze_results):
"""
analyze_results - VideoAnalyzer のリザルトの配列.
#TODO 並列処理させない場合を考慮して、一次元配列だけを受け取れるようにする
"""
self.result = sorted(
[x for sub in analyze_results for x in sub],
key = lambda x: x['frame']
)
self.events = self.__organize()
def dump(self):
for event in self.events:
print('Time:{:4d} ~ {:4d}, Frame:{:5.0f} ~ {:5.0f}, Event={}'.format(
event['start_time'], event['end_time'],
event['start_frame'], event['end_frame'],
event['event']
))
def extract_battles(self):
"""
試合部分(ガチマ開始〜リザルト表示)の開始時間, 試合時間を返します.
"""
battles = []
st_time = None
battle_event = None
for event in self.events:
e = event['event']
if e == 'Loading':
st_time = event['end_time']
elif e == 'ResultUdemae':
battle_event = prev
st_time = battle_event['start_time']
elif e == 'ResultOkaneRank':
# 直前にローディングを挟んで居ない場合、ロード画面後(バトル中)
# から録画開始したと仮定し、battle_event の開始時刻を記録する
if not st_time:
if not battle_event:
continue
st_time = battle_event['start_time']
diff = int(event['end_time']) - int(st_time)
battles.append((st_time, diff))
st_time, battle_event = None, None
elif e in ['LobbyStandby', 'LobbyModeSelect', 'LobbyFindBattle']:
st_time, battle_event = None, None
else:
pass
prev = event
return battles
| 30.505882 | 81 | 0.482838 | 2,918 | 0.980181 | 0 | 0 | 0 | 0 | 0 | 0 | 1,120 | 0.376218 |
75079e3853a4f1c859f848cdaeaff5686ff6bb5e | 934 | py | Python | setup.py | sfable/malias | 6a5f6dc17eb42b6579e7981696427cf9deae56fb | [
"MIT"
] | null | null | null | setup.py | sfable/malias | 6a5f6dc17eb42b6579e7981696427cf9deae56fb | [
"MIT"
] | null | null | null | setup.py | sfable/malias | 6a5f6dc17eb42b6579e7981696427cf9deae56fb | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
from malias import __version__
setup(
name = "malias",
version = __version__,
author = 'Stuart Fable',
author_email = 'stuart.fable@gmail.com',
license = 'MIT',
keywords = 'malias system alias',
description = '',
url = 'https://github.com/sfable/malias',
download_url = 'https://github.com/sfable/malias/archive/master.zip',
packages = find_packages(),
install_requires = ['docopt'],
zip_safe = True,
entry_points = {
'console_scripts': ['malias=malias.cli:main']
},
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4'
]
)
| 30.129032 | 73 | 0.611349 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 470 | 0.503212 |
75083e4350e39196ba53d1e4dfa29f5cc91a501b | 1,149 | py | Python | libs/strategies.py | jonowens/mr_si_boller_strategy | 53d02e62514442e285a002975536bb264d6c1e3c | [
"MIT"
] | null | null | null | libs/strategies.py | jonowens/mr_si_boller_strategy | 53d02e62514442e285a002975536bb264d6c1e3c | [
"MIT"
] | null | null | null | libs/strategies.py | jonowens/mr_si_boller_strategy | 53d02e62514442e285a002975536bb264d6c1e3c | [
"MIT"
] | null | null | null | # Strategies for trading
# Import necessary libraries
import pandas as pd
from stockstats import StockDataFrame as Sdf
def test_macd_strategy(stock_df, stock_symbol):
''' Tests MACD Strategy
Args:
stock_df (df): Asset dataframe containing ticker symbol key and
column headings: 'open', 'close', 'high', 'low', and 'volume'.
Returns:
BUY, SELL, or HOLD string signal
'''
# Sort columns for stockdataframe
data = stock_df[stock_symbol][['open', 'close', 'high', 'low', 'volume']]
# Change from pandas dataframe to stockdataframe
stock = Sdf.retype(data)
# Signal line
signal = stock['macds']
# The MACD that needs to cross the signal line to give you a Buy/Sell signal
macd = stock['macd']
for i in range(1, len(signal)):
# If the MACD crosses the signal line upward
if macd[i] > signal[i] and macd[i - 1] <= signal[i - 1]:
return 'BUY'
# The other way around
elif macd[i] < signal[i] and macd[i - 1] >= signal[i - 1]:
return 'SELL'
# Do nothing if not crossed
else:
return 'HOLD'
| 30.236842 | 80 | 0.614447 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 615 | 0.535248 |
750918e8872a1b7a4b9376e1b5490cc0edf34be0 | 210 | py | Python | hjhj.py | jatinchaudhary/python_dump | 5f7d63237fcb96a66bc3c4151599c7849f0f2735 | [
"bzip2-1.0.6"
] | null | null | null | hjhj.py | jatinchaudhary/python_dump | 5f7d63237fcb96a66bc3c4151599c7849f0f2735 | [
"bzip2-1.0.6"
] | null | null | null | hjhj.py | jatinchaudhary/python_dump | 5f7d63237fcb96a66bc3c4151599c7849f0f2735 | [
"bzip2-1.0.6"
] | null | null | null | a=int(input())
b=int(input())
c=int(input())
if(a>b and a>c):
print("number 1 is greatest")
elif(b>a and b>c):
print("number 2 is greatest")
else:
print("number 3 is greatest")
| 15 | 34 | 0.557143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 66 | 0.314286 |
7509211755a7245800dfbd3042066dfd9b209c87 | 288 | py | Python | answers/x_2_6.py | ofl/kuku | 76eefc0d3d859051473ee0d5f48b5d42d17d05a6 | [
"MIT"
] | null | null | null | answers/x_2_6.py | ofl/kuku | 76eefc0d3d859051473ee0d5f48b5d42d17d05a6 | [
"MIT"
] | 4 | 2021-09-23T03:19:52.000Z | 2021-11-13T10:38:21.000Z | answers/x_2_6.py | ofl/kuku | 76eefc0d3d859051473ee0d5f48b5d42d17d05a6 | [
"MIT"
] | null | null | null | # x_2_6
#
# ヒントを参考に「a」「b」「c」「d」がそれぞれどんな値となるかを予想してください
# ヒント
print(type('桃太郎'))
print(type(10))
print(type(12.3))
a = type('777') # => str
b = type(10 + 3.5) # => float
c = type(14 / 7) # => float
d = type(10_000_000) # => int
# print(a)
# print(b)
# print(c)
# print(d)
| 15.157895 | 43 | 0.541667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 228 | 0.609626 |
750a3aa09148f31ca78defa66da3c665dedc7552 | 695 | py | Python | Python with functions/W11-12/week 12/Activities/fruit.py | marcosamos/Python-tasks-and-proyects | 00426323647639016a407c40af1fd00f35ea2229 | [
"MIT"
] | null | null | null | Python with functions/W11-12/week 12/Activities/fruit.py | marcosamos/Python-tasks-and-proyects | 00426323647639016a407c40af1fd00f35ea2229 | [
"MIT"
] | null | null | null | Python with functions/W11-12/week 12/Activities/fruit.py | marcosamos/Python-tasks-and-proyects | 00426323647639016a407c40af1fd00f35ea2229 | [
"MIT"
] | null | null | null | def main():
# Create and print a list named fruit.
fruit_list = ["pear", "banana", "apple", "mango"]
print(f"original: {fruit_list}")
fruit_list.reverse()
print(f"Reverse {fruit_list}")
fruit_list.append("Orange")
print(f"Append Orange {fruit_list}")
pos = fruit_list.index("apple")
fruit_list.insert(pos, "cherry")
print(f"insert cherry: {fruit_list}")
fruit_list.remove("banana")
print(f"Remove Banana: {fruit_list}")
last = fruit_list.pop()
print(f"pop {last}: {fruit_list}")
fruit_list.sort()
print(f"Sorted: {fruit_list}")
fruit_list.clear()
print(f"cleared: {fruit_list}")
if __name__ == "__main__":
main() | 24.821429 | 53 | 0.635971 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 318 | 0.457554 |
750b3b2ba41e96337a40f45d5e335eb02fee0e87 | 779 | py | Python | deepcode_go_to_file_and_ignore.py | TheSecEng/sublime-plugin | 8e43a175c7a282eb280e25ce73ddf2350a9adce7 | [
"MIT"
] | null | null | null | deepcode_go_to_file_and_ignore.py | TheSecEng/sublime-plugin | 8e43a175c7a282eb280e25ce73ddf2350a9adce7 | [
"MIT"
] | null | null | null | deepcode_go_to_file_and_ignore.py | TheSecEng/sublime-plugin | 8e43a175c7a282eb280e25ce73ddf2350a9adce7 | [
"MIT"
] | null | null | null | import sublime
import sublime_plugin
class DeepcodeGoToFileAndIgnoreCommand(sublime_plugin.TextCommand):
def open_file_and_add_deepcode_ignore_comment(
self, view, file, row, col, type, reason
):
view.run_command(
"deep_code_ignore",
{"point": view.text_point(row - 1, col), "type": type, "id": reason},
)
sublime.set_timeout_async(
view.show_at_center(view.text_point(row - 1, col)), 500
)
def run(self, edit, file, row, col, type, reason):
view = self.view.window().open_file(file)
sublime.set_timeout_async(
lambda: self.open_file_and_add_deepcode_ignore_comment(
view, file, row, col, type, reason
),
500,
)
| 31.16 | 81 | 0.607189 | 739 | 0.948652 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.044929 |
750bab79259e61720a722f8d803d015d29bbc248 | 8,817 | py | Python | src/generate_training_data.py | thesukantadey/OpeNPDN | 116ce3c4ba82d326f04041606008baf3049b12b0 | [
"BSD-3-Clause"
] | 13 | 2019-11-17T04:03:26.000Z | 2021-09-02T13:56:00.000Z | src/generate_training_data.py | thesukantadey/OpeNPDN | 116ce3c4ba82d326f04041606008baf3049b12b0 | [
"BSD-3-Clause"
] | 3 | 2020-07-02T14:51:08.000Z | 2022-02-04T07:46:41.000Z | src/generate_training_data.py | thesukantadey/OpeNPDN | 116ce3c4ba82d326f04041606008baf3049b12b0 | [
"BSD-3-Clause"
] | 12 | 2019-12-28T17:48:16.000Z | 2022-01-02T13:08:35.000Z | #BSD 3-Clause License
#
#Copyright (c) 2019, The Regents of the University of Minnesota
#
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#* Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
#* Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#* Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
#FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
#DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 21 21:16:37 2019
This is the main code that runs simulated annleaing to generate the data
@author: Vidya A Chhabria
"""
import sys
import numpy as np
from create_template import define_templates
from T6_PSI_settings import T6_PSI_settings
from simulated_annealer import simulated_annealer
import random
from tqdm import tqdm
#from eliminate_templates import load_template_list
def main():
# Read the json user input file and the current maps that need to be run
# taken as an argument from the scripts
settings_obj = T6_PSI_settings.load_obj()
if len(sys.argv) == 3:
map_start = int(sys.argv[1])
num_maps = int(sys.argv[2])
congestion_enabled = 1
elif len(sys.argv) == 4:
map_start = int(sys.argv[1])
num_maps = int(sys.argv[2])
if sys.argv[3] == "no_congestion":
congestion_enabled = 0
else:
congestion_enabled = 1
else:
map_start = 1
num_maps = 15
congestion_enabled = 1
print("Warning defaulting to %d %d and with congestion" % (map_start, num_maps))
print(sys.argv)
#print(num_maps)
# Initialize the SA parameters
T_init = 70
T_final = 0.0005
alpha_temp = 0.95
num_moves_per_step = 5
state = [] #np.zeros((num_maps, settings_obj.NUM_REGIONS))
e = []#np.zeros(num_maps)
max_drop = [] #np.zeros((num_maps, settings_obj.NUM_REGIONS))
template_list = define_templates(settings_obj, generate_g=0)
congestion = []
all_templates = settings_obj.load_template_list()#range(settings_obj.NUM_TEMPLATES))
size_region_x = int(settings_obj.WIDTH_REGION * 1e6)
size_region_y = int(settings_obj.LENGTH_REGION * 1e6)
current_maps = []
for i in tqdm(range(num_maps)):
# print(i)
power_map_file = settings_obj.map_dir + "current_map_%d.csv" % (
i + map_start)
currents = np.genfromtxt(power_map_file, delimiter=',')
for y in range(settings_obj.current_map_num_regions):
for x in range(settings_obj.current_map_num_regions):
print("%d %d "%(x,y))
#region and neighbors
current_region = np.zeros((3*size_region_x,3*size_region_y))
init_state = np.zeros(9, int)
if congestion_enabled == 1 :
signal_cong = [0.3 + 0.7*random.uniform(0, 1) for _ in range(9) ]
else:
signal_cong = [0 for _ in range(9) ]
if x == 0:
x_start = 0
x_end = x_start+2*size_region_x
if y == 0:
y_start = 0
y_end = y_start+2*size_region_y
current_region[size_region_x:,size_region_y:] = (
currents[x_start:x_end,y_start:y_end])
elif y == settings_obj.current_map_num_regions-1:
y_start = (y-1)*size_region_y
y_end = y_start+2*size_region_y
current_region[size_region_x:,0:2*size_region_y] = (
currents[x_start:x_end,y_start:y_end])
else:
y_start = (y-1)*size_region_y
y_end = y_start+3*size_region_y
current_region[size_region_x:,:] = (
currents[x_start:x_end,y_start:y_end])
elif x == settings_obj.current_map_num_regions-1:
x_start = (x-1)*size_region_x
x_end = x_start+2*size_region_x
if y == 0:
y_start = 0
y_end = y_start+2*size_region_y
current_region[0:2*size_region_x,size_region_y:] = (
currents[x_start:x_end,y_start:y_end])
elif y == settings_obj.current_map_num_regions-1:
y_start = (y-1)*size_region_y
y_end = y_start+2*size_region_y
current_region[0:2*size_region_x,0:2*size_region_y] = (
currents[x_start:x_end,y_start:y_end])
else:
y_start = (y-1)*size_region_y
y_end = y_start+3*size_region_y
current_region[0:2*size_region_x,:] = (
currents[x_start:x_end,y_start:y_end])
else:
x_start = (x-1)*size_region_x
x_end = x_start+3*size_region_x
if y == 0:
y_start = 0
y_end = y_start+2*size_region_y
current_region[:,size_region_y:] = (
currents[x_start:x_end,y_start:y_end])
elif y == settings_obj.current_map_num_regions-1:
y_start = (y-1)*size_region_y
y_end = y_start+2*size_region_y
current_region[:,0:2*size_region_y] = (
currents[x_start:x_end,y_start:y_end])
else:
y_start = (y-1)*size_region_y
y_end = y_start+3*size_region_y
current_region[:,:] = (
currents[x_start:x_end,y_start:y_end])
pdn_opt = simulated_annealer(init_state, T_init, T_final,
alpha_temp, num_moves_per_step,
current_region,congestion_enabled)
n_state, n_e, n_max_drop = pdn_opt.sim_anneal(
all_templates, template_list,signal_cong)
state.append(n_state)
max_drop.append(n_max_drop)
congestion.append(signal_cong)
current_maps.append(current_region.reshape(-1))
e.append(n_e)
#print(n_state,n_max_drop,signal_cong,n_e)
with open(
settings_obj.parallel_run_dir + 'max_drop_%d_to_%d.csv' %
(map_start, map_start + num_maps - 1), 'w') as outfile:
np.savetxt(outfile, max_drop, delimiter=',', fmt='%f')
with open(
settings_obj.parallel_run_dir + 'state_%d_to_%d.csv' %
(map_start, map_start + num_maps - 1), 'w') as outfile:
np.savetxt(outfile, state, delimiter=',', fmt='%d')
with open(
settings_obj.parallel_run_dir + 'energy_%d_to_%d.csv' %
(map_start, map_start + num_maps - 1), 'w') as outfile:
np.savetxt(outfile, e, delimiter=',', fmt='%f')
if congestion_enabled ==1:
with open(
settings_obj.parallel_run_dir + 'congest_%d_to_%d.csv' %
(map_start, map_start + num_maps - 1), 'w') as outfile:
np.savetxt(outfile,congestion, delimiter=',', fmt='%f')
with open(
settings_obj.parallel_run_dir + 'current_maps_%d_to_%d.csv' %
(map_start, map_start + num_maps - 1), 'w') as outfile:
np.savetxt(outfile,current_maps, delimiter=',', fmt='%f')
if __name__ == '__main__':
main()
| 45.683938 | 88 | 0.589543 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,434 | 0.276058 |
750cd8982da9405f375e4b29225621151e43efde | 7,859 | py | Python | otcextensions/sdk/cce/v3/cluster_node.py | kucerakk/python-otcextensions | d74d6aaa6dcf7c46d2c5fbe3676656baaf8e81d6 | [
"Apache-2.0"
] | null | null | null | otcextensions/sdk/cce/v3/cluster_node.py | kucerakk/python-otcextensions | d74d6aaa6dcf7c46d2c5fbe3676656baaf8e81d6 | [
"Apache-2.0"
] | null | null | null | otcextensions/sdk/cce/v3/cluster_node.py | kucerakk/python-otcextensions | d74d6aaa6dcf7c46d2c5fbe3676656baaf8e81d6 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import exceptions
from openstack import resource
from otcextensions.sdk.cce.v3 import _base
class VolumeSpec(resource.Resource):
# Properties
#: Disk Size in GB.
size = resource.Body('size', type=int)
#: Volume type: [SATA, SAS, SSD].
type = resource.Body('volumetype')
class StatusSpec(resource.Resource):
# Properties
#: ID of the VM where the node resides in the ECS.
instance_id = resource.Body('serverId')
#: Elastic IP address of a node.
floating_ip = resource.Body('publicIP')
#: Private IP address of a node.
private_ip = resource.Body('privateIP')
#: Status.
status = resource.Body('phase')
class PublicIPSpec(resource.Resource):
# Properties:
#: List of IDs for the existing floating ips.
ids = resource.Body('ids')
#: Count of the IP addresses to be dynamically created.
count = resource.Body('count', type=int)
#: Elastic IP address. Dict of {
#: type,
#: bandwidth:{
#: chargemode, size, sharetype
#: }
#: }.
floating_ip = resource.Body('eip', type=dict)
class NodeSpec(resource.Resource):
# Properties
#: Name of the AZ where the node resides.
availability_zone = resource.Body('az')
#: Billing mode of a node. Currently, only pay-per-use is supported.
billing_mode = resource.Body('billingMode', type=int, default=0)
#: Number of nodes.
count = resource.Body('count', type=int)
#: Data disk parameters of a node. At present, only one data
#: disk can be configured
data_volumes = resource.Body('dataVolumes', type=list,
list_type=VolumeSpec)
#: Flavor (mandatory)
flavor = resource.Body('flavor')
#: Elastic IP address parameters of a node.
floating_ip = resource.Body('publicIP', type=PublicIPSpec)
#: Parameters for logging in to the node.
login = resource.Body('login')
#: Operating System of the node. Currently only EulerOS is supported.
os = resource.Body('os')
#: System disk parameters of the node.
root_volume = resource.Body('rootVolume', type=VolumeSpec)
class ClusterNode(_base.Resource):
base_path = '/clusters/%(cluster_id)s/nodes'
allow_create = True
allow_fetch = True
allow_commit = True
allow_delete = True
allow_list = True
# Properties
#: Cluster id.
cluster_id = resource.URI('cluster_id')
#: Spec
spec = resource.Body('spec', type=NodeSpec)
#: Status
status = resource.Body('status', type=StatusSpec)
@classmethod
def new(cls, **kwargs):
if 'kind' not in kwargs:
kwargs['kind'] = 'Node'
if 'apiVersion' not in kwargs:
kwargs['apiVersion'] = 'v3'
return cls(_synchronized=False, **kwargs)
def __getattribute__(self, name):
"""Return an attribute on this instance
This is mostly a pass-through except for a specialization on
the 'id' name, as this can exist under a different name via the
`alternate_id` argument to resource.Body.
"""
if name == 'id' or name == 'name':
if name in self._body:
return self._body[name]
else:
try:
metadata = self._body['metadata']
if name == 'id':
if isinstance(metadata, dict):
return metadata['uid']
elif isinstance(metadata, _base.Metadata):
return metadata._body[metadata._alternate_id()]
else:
if isinstance(metadata, dict):
return metadata['name']
elif isinstance(metadata, _base.Metadata):
return metadata.name
except KeyError:
return None
else:
return object.__getattribute__(self, name)
@classmethod
def list(cls, session, paginated=True, base_path=None, **params):
"""This method is a generator which yields resource objects.
This resource object list generator handles pagination and takes query
params for response filtering.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param bool paginated: ``True`` if a GET to this resource returns
a paginated series of responses, or ``False``
if a GET returns only one page of data.
**When paginated is False only one
page of data will be returned regardless
of the API's support of pagination.**
:param str base_path: Base part of the URI for listing resources, if
different from
:data:`~openstack.resource.Resource.base_path`.
:param dict params: These keyword arguments are passed through the
:meth:`~openstack.resource.QueryParamter._transpose` method
to find if any of them match expected query parameters to be
sent in the *params* argument to
:meth:`~keystoneauth1.adapter.Adapter.get`. They are additionally
checked against the
:data:`~openstack.resource.Resource.base_path` format string
to see if any path fragments need to be filled in by the contents
of this argument.
:return: A generator of :class:`Resource` objects.
:raises: :exc:`~openstack.exceptions.MethodNotSupported` if
:data:`Resource.allow_list` is not set to ``True``.
:raises: :exc:`~openstack.exceptions.InvalidResourceQuery` if query
contains invalid params.
"""
if not cls.allow_list:
raise exceptions.MethodNotSupported(cls, "list")
session = cls._get_session(session)
microversion = cls._get_microversion_for_list(session)
if base_path is None:
base_path = cls.base_path
cls._query_mapping._validate(params, base_path=base_path)
query_params = cls._query_mapping._transpose(params)
uri = base_path % params
while uri:
# Copy query_params due to weird mock unittest interactions
response = session.get(
uri,
headers={
'Accept': 'application/json',
'Content-Type': 'application/json'},
params=query_params.copy())
exceptions.raise_from_response(response)
if response.json() and 'items' in response.json():
data = response.json()['items'] or []
if cls.resources_key:
resources = data[cls.resources_key]
else:
resources = data
if not isinstance(resources, list):
resources = [resources]
for raw_resource in resources:
value = cls.existing(
microversion=microversion,
connection=session._get_connection(),
**raw_resource)
yield value
return
| 38.150485 | 78 | 0.600204 | 7,191 | 0.915002 | 3,321 | 0.422573 | 3,576 | 0.45502 | 0 | 0 | 3,931 | 0.500191 |
750de47d7f0a435cfabb43c7f53caeaa653c6db4 | 6,437 | py | Python | ceilometer/agent.py | aristanetworks/ceilometer | 8776b137f82f71eef1241bcb1600de10c1f77394 | [
"Apache-2.0"
] | null | null | null | ceilometer/agent.py | aristanetworks/ceilometer | 8776b137f82f71eef1241bcb1600de10c1f77394 | [
"Apache-2.0"
] | null | null | null | ceilometer/agent.py | aristanetworks/ceilometer | 8776b137f82f71eef1241bcb1600de10c1f77394 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2013 Julien Danjou
# Copyright 2014 Red Hat, Inc
#
# Authors: Julien Danjou <julien@danjou.info>
# Eoghan Glynn <eglynn@redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import itertools
import six
from six.moves.urllib import parse as urlparse
from stevedore import extension
from ceilometer.openstack.common import context
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log
from ceilometer.openstack.common import service as os_service
from ceilometer import pipeline
LOG = log.getLogger(__name__)
class Resources(object):
def __init__(self, agent_manager):
self.agent_manager = agent_manager
self._resources = []
self._discovery = []
def extend(self, pipeline):
self._resources.extend(pipeline.resources)
self._discovery.extend(pipeline.discovery)
@property
def resources(self):
source_discovery = (self.agent_manager.discover(self._discovery)
if self._discovery else [])
return self._resources + source_discovery
class PollingTask(object):
"""Polling task for polling samples and inject into pipeline.
A polling task can be invoked periodically or only once.
"""
def __init__(self, agent_manager):
self.manager = agent_manager
self.pollsters = set()
# we extend the amalgamation of all static resources for this
# set of pollsters with a common interval, so as to also
# include any dynamically discovered resources specific to
# the matching pipelines (if either is present, the per-agent
# default discovery is overridden)
resource_factory = lambda: Resources(agent_manager)
self.resources = collections.defaultdict(resource_factory)
self.publish_context = pipeline.PublishContext(
agent_manager.context)
def add(self, pollster, pipelines):
self.publish_context.add_pipelines(pipelines)
for pipe_line in pipelines:
self.resources[pollster.name].extend(pipe_line)
self.pollsters.update([pollster])
def poll_and_publish(self):
"""Polling sample and publish into pipeline."""
agent_resources = self.manager.discover()
with self.publish_context as publisher:
cache = {}
for pollster in self.pollsters:
key = pollster.name
LOG.info(_("Polling pollster %s"), key)
source_resources = list(self.resources[key].resources)
try:
samples = list(pollster.obj.get_samples(
manager=self.manager,
cache=cache,
resources=source_resources or agent_resources,
))
publisher(samples)
except Exception as err:
LOG.warning(_(
'Continue after error from %(name)s: %(error)s')
% ({'name': pollster.name, 'error': err}),
exc_info=True)
class AgentManager(os_service.Service):
def __init__(self, namespace, default_discovery=None):
super(AgentManager, self).__init__()
default_discovery = default_discovery or []
self.default_discovery = default_discovery
self.pollster_manager = self._extensions('poll', namespace)
self.discovery_manager = self._extensions('discover')
self.context = context.RequestContext('admin', 'admin', is_admin=True)
@staticmethod
def _extensions(category, agent_ns=None):
namespace = ('ceilometer.%s.%s' % (category, agent_ns) if agent_ns
else 'ceilometer.%s' % category)
return extension.ExtensionManager(
namespace=namespace,
invoke_on_load=True,
)
def create_polling_task(self):
"""Create an initially empty polling task."""
return PollingTask(self)
def setup_polling_tasks(self):
polling_tasks = {}
for pipe_line, pollster in itertools.product(
self.pipeline_manager.pipelines,
self.pollster_manager.extensions):
if pipe_line.support_meter(pollster.name):
polling_task = polling_tasks.get(pipe_line.get_interval())
if not polling_task:
polling_task = self.create_polling_task()
polling_tasks[pipe_line.get_interval()] = polling_task
polling_task.add(pollster, [pipe_line])
return polling_tasks
def start(self):
self.pipeline_manager = pipeline.setup_pipeline()
for interval, task in six.iteritems(self.setup_polling_tasks()):
self.tg.add_timer(interval,
self.interval_task,
task=task)
@staticmethod
def interval_task(task):
task.poll_and_publish()
@staticmethod
def _parse_discoverer(url):
s = urlparse.urlparse(url)
return (s.scheme or s.path), (s.netloc + s.path if s.scheme else None)
def _discoverer(self, name):
for d in self.discovery_manager:
if d.name == name:
return d.obj
return None
def discover(self, discovery=None):
resources = []
for url in (discovery or self.default_discovery):
name, param = self._parse_discoverer(url)
discoverer = self._discoverer(name)
if discoverer:
try:
discovered = discoverer.discover(param)
resources.extend(discovered)
except Exception as err:
LOG.exception(_('Unable to discover resources: %s') % err)
else:
LOG.warning(_('Unknown discovery extension: %s') % name)
return resources
| 36.782857 | 78 | 0.632282 | 5,325 | 0.827249 | 0 | 0 | 753 | 0.11698 | 0 | 0 | 1,388 | 0.215628 |
750e393db3ff6b81b70b29c6cc402c893928186d | 11,366 | py | Python | tests/legacy/test_pypi.py | hickford/warehouse | 87d3403b3e09d1213177133744f20430fca030ca | [
"Apache-2.0"
] | 1 | 2019-01-31T02:44:40.000Z | 2019-01-31T02:44:40.000Z | tests/legacy/test_pypi.py | hickford/warehouse | 87d3403b3e09d1213177133744f20430fca030ca | [
"Apache-2.0"
] | 1 | 2016-09-14T00:57:23.000Z | 2016-09-14T00:57:23.000Z | tests/legacy/test_pypi.py | hickford/warehouse | 87d3403b3e09d1213177133744f20430fca030ca | [
"Apache-2.0"
] | 1 | 2020-11-05T09:46:12.000Z | 2020-11-05T09:46:12.000Z | # Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import datetime
import pretend
import pytest
from werkzeug.exceptions import NotFound, BadRequest
from werkzeug.routing import Map
from warehouse.legacy import pypi, xmlrpc
from warehouse.packaging import urls
@pytest.mark.parametrize("content_type", [None, "text/html", "__empty__"])
def test_pypi_index(content_type):
headers = {}
if content_type != "__empty__":
headers["Content-Type"] = content_type
app = pretend.stub()
request = pretend.stub(
args={},
headers=headers,
url_adapter=pretend.stub(
build=pretend.call_recorder(
lambda *a, **kw: "/",
),
),
)
# request for /pypi with no additional request information redirects
# to site root
#
resp = pypi.pypi(app, request)
assert resp.status_code == 301
assert resp.headers["Location"] == "/"
assert request.url_adapter.build.calls == [
pretend.call(
"warehouse.views.index",
{},
force_external=False,
),
]
def test_pypi_route_action(monkeypatch):
app = pretend.stub()
request = pretend.stub(
args={':action': 'test'},
headers={},
)
_action_methods = {}
monkeypatch.setattr(pypi, '_action_methods', _action_methods)
@pypi.register('test')
def test(app, request):
test.called = True
return 'success'
resp = pypi.pypi(app, request)
assert resp == 'success'
assert test.called
def test_pypi_route_action_double(monkeypatch):
_action_methods = {'test': None}
monkeypatch.setattr(pypi, '_action_methods', _action_methods)
with pytest.raises(KeyError):
pypi.register('test')
def test_daytime(monkeypatch):
app = pretend.stub()
request = pretend.stub()
monkeypatch.setattr(time, 'time', lambda: 0)
resp = pypi.daytime(app, request)
assert resp.response[0] == b'19700101T00:00:00\n'
@pytest.mark.parametrize(("version", "callback"), [
(None, None),
(None, 'yes'),
('1.0', 'yes'),
('1.0', None),
])
def test_json(monkeypatch, version, callback):
get_project = pretend.call_recorder(lambda n: {'name': 'spam'})
get_project_versions = pretend.call_recorder(lambda n: ['2.0', '1.0'])
get_last_serial = pretend.call_recorder(lambda *n: 42)
app = pretend.stub(
config=pretend.stub(cache=pretend.stub(browser=False, varnish=False)),
db=pretend.stub(
packaging=pretend.stub(
get_project=get_project,
get_project_versions=get_project_versions,
get_last_serial=get_last_serial,
)
)
)
request = pretend.stub(args={})
if callback:
request.args['callback'] = callback
release_data = pretend.call_recorder(lambda n, v: dict(some='data'))
release_urls = pretend.call_recorder(lambda n, v: [dict(
some='url',
upload_time=datetime.date(1970, 1, 1)
)])
all_release_urls = pretend.call_recorder(lambda *n: {
'1.0': [dict(some='data', upload_time=datetime.date(1970, 1, 1))],
'2.0': [dict(some='data', upload_time=datetime.date(1970, 1, 1))],
})
Interface = pretend.call_recorder(lambda a, r: pretend.stub(
release_data=release_data,
release_urls=release_urls,
all_release_urls=all_release_urls,
))
monkeypatch.setattr(xmlrpc, 'Interface', Interface)
resp = pypi.project_json(app, request, project_name='spam',
version=version)
assert get_project.calls == [pretend.call('spam')]
assert get_project_versions.calls == [pretend.call('spam')]
assert release_data.calls == [pretend.call('spam', version or '2.0')]
assert release_urls.calls == [pretend.call('spam', version or '2.0')]
assert all_release_urls.calls == [pretend.call('spam')]
assert get_last_serial.calls == [pretend.call()]
expected = '{"info": {"some": "data"}, ' \
'"releases": ' \
'{"1.0": [{"some": "data", "upload_time": "1970-01-01T00:00:00"}], ' \
'"2.0": [{"some": "data", "upload_time": "1970-01-01T00:00:00"}]}, ' \
'"urls": [{"some": "url", "upload_time": "1970-01-01T00:00:00"}]}'
if callback:
expected = '/**/ %s(%s);' % (callback, expected)
assert resp.data == expected.encode("utf8")
def test_jsonp_invalid():
app = pretend.stub()
request = pretend.stub(args={'callback': 'quite invalid'})
with pytest.raises(BadRequest):
pypi.project_json(app, request, project_name='spam')
@pytest.mark.parametrize(("project", "version"), [
(None, None),
(pretend.stub(name="spam"), None),
(pretend.stub(name="spam"), '1'),
])
def test_json_missing(monkeypatch, project, version):
return_value = {'name': project} if project else None
get_project = pretend.call_recorder(lambda n: return_value)
get_project_versions = pretend.call_recorder(lambda n: [])
app = pretend.stub(
db=pretend.stub(
packaging=pretend.stub(
get_project=get_project,
get_project_versions=get_project_versions,
)
)
)
request = pretend.stub(args={})
with pytest.raises(NotFound):
pypi.project_json(app, request, project_name='spam', version=version)
def test_rss(app, monkeypatch):
now = datetime.datetime.utcnow()
get_recently_updated = pretend.call_recorder(lambda num=10: [
dict(name='spam', version='1.0', summary='hai spam', created=now),
dict(name='ham', version='2.0', summary='hai ham', created=now),
dict(name='spam', version='2.0', summary='hai spam v2', created=now),
])
app.db = pretend.stub(
packaging=pretend.stub(
get_recently_updated=get_recently_updated,
)
)
app.config = pretend.stub(
site={"url": "http://test.server/", "name": "PyPI"},
)
request = pretend.stub(
url_adapter=Map(urls.urls).bind('test.server', '/'),
)
resp = pypi.rss(app, request)
assert resp.response.context == {
"description": "package updates",
"site": {"name": "PyPI", "url": "http://test.server/"},
"releases": [
{
"url": "http://test.server/project/spam/1.0/",
"version": "1.0",
"name": "spam",
"summary": "hai spam",
"created": now,
},
{
"url": "http://test.server/project/ham/2.0/",
"version": "2.0",
"name": "ham",
"summary": "hai ham",
"created": now,
},
{
"url": "http://test.server/project/spam/2.0/",
"version": "2.0",
"name": "spam",
"summary": "hai spam v2",
"created": now,
}
],
}
assert get_recently_updated.calls == [pretend.call(num=40)]
def test_packages_rss(app, monkeypatch):
now = datetime.datetime.utcnow()
get_recent_projects = pretend.call_recorder(lambda num=10: [
dict(name='spam', version='1.0', summary='hai spam', created=now),
dict(name='ham', version='2.0', summary='hai ham', created=now),
dict(name='eggs', version='21.0', summary='hai eggs!', created=now),
])
app.db = pretend.stub(
packaging=pretend.stub(
get_recent_projects=get_recent_projects,
)
)
app.config = pretend.stub(
site={"url": "http://test.server/", "name": "PyPI"},
)
request = pretend.stub(
url_adapter=Map(urls.urls).bind('test.server', '/'),
)
resp = pypi.packages_rss(app, request)
assert resp.response.context == {
"description": "new projects",
"site": {"name": "PyPI", "url": "http://test.server/"},
"releases": [
{
"url": "http://test.server/project/spam/",
"version": "1.0",
"name": "spam",
"summary": "hai spam",
"created": now,
},
{
"url": "http://test.server/project/ham/",
"version": "2.0",
"name": "ham",
"summary": "hai ham",
"created": now,
},
{
"url": "http://test.server/project/eggs/",
"version": "21.0",
"name": "eggs",
"summary": "hai eggs!",
"created": now,
},
],
}
assert get_recent_projects.calls == [pretend.call(num=40)]
def test_rss_xml_template(app, monkeypatch):
template = app.templates.get_template('legacy/rss.xml')
content = template.render(
site=dict(url='http://test.server/', name="PyPI"),
description='package updates',
releases=[
{
'url': 'http://test.server/project/spam/',
'version': u'1.0',
'name': u'spam',
'summary': u'hai spam',
'created': datetime.date(1970, 1, 1),
}, {
'url': 'http://test.server/project/ham/',
'version': u'2.0',
'name': u'ham',
'summary': u'hai ham',
'created': datetime.date(1970, 1, 1),
}, {
'url': 'http://test.server/project/eggs/',
'version': u'21.0',
'name': u'eggs',
'summary': u'hai eggs!',
'created': datetime.date(1970, 1, 1),
}
],
)
assert content == '''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE rss PUBLIC "-//Netscape Communications//DTD RSS 0.91//EN" \
"http://my.netscape.com/publish/formats/rss-0.91.dtd">
<rss version="0.91">
<channel>
<title>PyPI Recent Package Updates</title>
<link>http://test.server/</link>
<description>Recent package updates at PyPI</description>
<language>en</language>
\n\
<item>
<title>spam 1.0</title>
<link>http://test.server/project/spam/</link>
<guid>http://test.server/project/spam/</guid>
<description>hai spam</description>
<pubDate>01 Jan 1970 00:00:00 GMT</pubDate>
</item>
\n\
<item>
<title>ham 2.0</title>
<link>http://test.server/project/ham/</link>
<guid>http://test.server/project/ham/</guid>
<description>hai ham</description>
<pubDate>01 Jan 1970 00:00:00 GMT</pubDate>
</item>
\n\
<item>
<title>eggs 21.0</title>
<link>http://test.server/project/eggs/</link>
<guid>http://test.server/project/eggs/</guid>
<description>hai eggs!</description>
<pubDate>01 Jan 1970 00:00:00 GMT</pubDate>
</item>
\n\
</channel>
</rss>'''
| 31.572222 | 78 | 0.570649 | 0 | 0 | 0 | 0 | 4,093 | 0.360109 | 0 | 0 | 3,788 | 0.333275 |
75141fdc0d4a4c71c0178c6ccd7f200961cef89d | 8,204 | py | Python | analysis/gfp/seqtools.py | johli/genesis | 5424c1888d4330e505ad87412e7f1cc5dd828888 | [
"MIT"
] | 12 | 2020-02-02T14:29:15.000Z | 2021-09-12T08:05:43.000Z | analysis/gfp/seqtools.py | johli/genesis | 5424c1888d4330e505ad87412e7f1cc5dd828888 | [
"MIT"
] | 1 | 2022-01-04T08:04:00.000Z | 2022-01-10T08:49:04.000Z | analysis/gfp/seqtools.py | johli/genesis | 5424c1888d4330e505ad87412e7f1cc5dd828888 | [
"MIT"
] | 3 | 2020-03-10T22:24:05.000Z | 2021-05-05T13:23:01.000Z | import numpy as np
class SequenceTools(object):
dna2gray_ = {'c': (0, 0), 't': (1, 0), 'g': (1, 1), 'a': (0, 1)}
gray2dna_ = {(0, 0): 'c', (1, 0): 't', (1, 1): 'g', (0, 1): 'a'}
codon2protein_ = {'ttt': 'f', 'ttc': 'f', 'tta': 'l', 'ttg': 'l', 'tct': 's', 'tcc': 's', 'tca': 's',
'tcg': 's', 'tat': 'y', 'tac': 'y', 'taa': '!', 'tag': '!', 'tgt': 'c', 'tgc': 'c',
'tga': '!', 'tgg': 'w', 'ctt': 'l', 'ctc': 'l', 'cta': 'l', 'ctg': 'l', 'cct': 'p',
'ccc': 'p', 'cca': 'p', 'ccg': 'p', 'cat': 'h', 'cac': 'h', 'caa': 'q', 'cag': 'q',
'cgt': 'r', 'cgc': 'r', 'cga': 'r', 'cgg': 'r', 'att': 'i', 'atc': 'i', 'ata': 'i',
'atg': 'm', 'act': 't', 'acc': 't', 'aca': 't', 'acg': 't', 'aat': 'n', 'aac': 'n',
'aaa': 'k', 'aag': 'k', 'agt': 's', 'agc': 's', 'aga': 'r', 'agg': 'r', 'gtt': 'v',
'gtc': 'v', 'gta': 'v', 'gtg': 'v', 'gct': 'a', 'gcc': 'a', 'gca': 'a', 'gcg': 'a',
'gat': 'd', 'gac': 'd', 'gaa': 'e', 'gag': 'e', 'ggt': 'g', 'ggc': 'g', 'gga': 'g',
'ggg': 'g'}
protein2codon_ = {
'l': ['tta', 'ttg', 'ctt', 'ctc', 'cta', 'ctg'],
's': ['tct', 'tcc', 'tca', 'tcg', 'agt', 'agc'],
'r': ['cgt', 'cgc', 'cga', 'cgg', 'aga', 'agg'],
'v': ['gtt', 'gtc', 'gta', 'gtg'],
'a': ['gct', 'gcc', 'gca', 'gcg'],
'p': ['cct', 'ccc', 'cca', 'ccg'],
't': ['act', 'acc', 'aca', 'acg'],
'g': ['ggt', 'ggc', 'gga', 'ggg'],
'stop': ['taa', 'tag', 'tga'],
'i': ['att', 'atc', 'ata'],
'y': ['tat', 'tac'],
'f': ['ttt', 'ttc'],
'c': ['tgt', 'tgc'],
'h': ['cat', 'cac'],
'q': ['caa', 'cag'],
'n': ['aat', 'aac'],
'k': ['aaa', 'aag'],
'd': ['gat', 'gac'],
'e': ['gaa', 'gag'],
'w': ['tgg'],
'm': ['atg']
}
protein2constraint_ = {
'l': {(1,): {('t',)}, (0, 2): {('t', 'a'), ('t', 'g'), ('c', 't'), ('c', 'c'), ('c', 'a'), ('c', 'g')}},
's': {(0, 1, 2): {('t', 'c', 't'), ('t', 'c', 'c'), ('t', 'c', 'a'), ('t', 'c', 'g'), ('a', 'g', 't'),
('a', 'g', 'c')}},
'r': {(1,): {('g',)}, (0, 2): {('c', 't'), ('c', 'c'), ('c', 'a'), ('c', 'g'), ('a', 'a'), ('a', 'g')}},
'v': {(0,): {('g',)}, (1,): {('t',)}, (2,): {('g',), ('t',), ('a',), ('c',)}},
'a': {(0,): {('g',)}, (1,): {('c',)}, (2,): {('g',), ('t',), ('a',), ('c',)}},
'p': {(0,): {('c',)}, (1,): {('c',)}, (2,): {('g',), ('t',), ('a',), ('c',)}},
't': {(0,): {('a',)}, (1,): {('c',)}, (2,): {('g',), ('t',), ('a',), ('c',)}},
'g': {(0,): {('g',)}, (1,): {('g',)}, (2,): {('g',), ('t',), ('a',), ('c',)}},
'stop': {(0,): {('t',)}, (1, 2): {('a', 'a'), ('a', 'g'), ('g', 'a')}},
'i': {(0,): {('a',)}, (1,): {('t',)}, (2,): {('t',), ('a',), ('c',)}},
'y': {(0,): {('t',)}, (1,): {('a',)}, (2,): {('t',), ('c',)}},
'f': {(0,): {('t',)}, (1,): {('t',)}, (2,): {('t',), ('c',)}},
'c': {(0,): {('t',)}, (1,): {('g',)}, (2,): {('t',), ('c',)}},
'h': {(0,): {('c',)}, (1,): {('a',)}, (2,): {('t',), ('c',)}},
'q': {(0,): {('c',)}, (1,): {('a',)}, (2,): {('a',), ('g',)}},
'n': {(0,): {('a',)}, (1,): {('a',)}, (2,): {('t',), ('c',)}},
'k': {(0,): {('a',)}, (1,): {('a',)}, (2,): {('a',), ('g',)}},
'd': {(0,): {('g',)}, (1,): {('a',)}, (2,): {('t',), ('c',)}},
'e': {(0,): {('g',)}, (1,): {('a',)}, (2,): {('a',), ('g',)}},
'w': {(0,): {('t',)}, (1,): {('g',)}, (2,): {('g',)}},
'm': {(0,): {('a',)}, (1,): {('t',)}, (2,): {('g',)}},
}
# Integer mapping from Fernandes and Vinga (2016)
codon2idx_ = {'aaa': 1, 'aac': 2, 'aag': 3, 'aat': 4, 'aca': 5, 'acc': 6, 'acg': 7, 'act': 8, 'aga': 9,
'agc': 10, 'agg': 11, 'agt': 12, 'ata': 13, 'atc': 14, 'atg': 15, 'att': 16, 'caa': 17,
'cac': 18, 'cag': 19, 'cat': 20, 'cca': 21, 'ccc': 22, 'ccg': 23, 'cct': 24, 'cga': 25,
'cgc': 26, 'cgg': 27, 'cgt': 28, 'cta': 29, 'ctc': 30, 'ctg': 31, 'ctt': 32, 'gaa': 33,
'gac': 34, 'gag': 35, 'gat': 36, 'gca': 37, 'gcc': 38, 'gcg': 39, 'gct': 40, 'gga': 41,
'ggc': 42, 'ggg': 43, 'ggt': 44, 'gta': 45, 'gtc': 46, 'gtg': 47, 'gtt': 48, 'taa': 49,
'tac': 50, 'tag': 51, 'tat': 52, 'tca': 53, 'tcc': 54, 'tcg': 55, 'tct': 56, 'tga': 57,
'tgc': 58, 'tgg': 59, 'tgt': 60, 'tta': 61, 'ttc': 62, 'ttg': 63, 'ttt': 64}
@staticmethod
def convert_dna_to_rna(seq):
dna2rna = {'t': 'u', 'a': 'a', 'g': 'g', 'c': 'c'}
return "".join([dna2rna[s] for s in seq])
@staticmethod
def convert_dna_arr_to_str(dna_arr, base_order='ATCG'):
""" Convert N x 4 tokenized array into length N string """
dna_seq_str = ''
for i in range(dna_arr.shape[0]):
token = np.argmax(dna_arr[i, :])
dna_seq_str += base_order[token]
return dna_seq_str
@staticmethod
def get_aa_codons():
aa_list = sorted(list(SequenceTools.protein2codon_.keys()))
aa_codons = np.zeros((len(aa_list), 6, 3, 4))
i = 0
for aa in aa_list:
cods = SequenceTools.protein2codon_[aa]
j = 0
for c in cods:
cod_arr = SequenceTools.convert_dna_str_to_arr(c)
aa_codons[i, j] = cod_arr
j += 1
i += 1
return aa_codons
@staticmethod
def convert_dna_str_to_arr(dna_str, base_order='ATCG'):
""" Convert length N string into N x 4 tokenized array"""
dna_str = dna_str.upper()
N = len(dna_str)
dna_arr = np.zeros((N, 4))
for i in range(N):
idx = base_order.index(dna_str[i])
dna_arr[i, idx] = 1.
return dna_arr
@staticmethod
def convert_dna_arr_to_gray(dna_arr, base_order='ATCG'):
""" Convert N x 4 tokenized array into 2N x 2 tokenized gray code array"""
N = dna_arr.shape[0]
gray_arr = np.zeros((2 * N, 2))
for i in range(N):
token = np.argmax(dna_arr[i, :])
dna_i = base_order[token]
gray_i = SequenceTools.dna2gray_[dna_i]
for j in range(2):
gray_arr[2 * i + j, gray_i[j]] = 1
return gray_arr
@staticmethod
def convert_gray_to_dna_str(gray_arr):
Ngray = gray_arr.shape[0]
dna_str = ''
i = 0
while i < Ngray:
g1 = int(np.argmax(gray_arr[i, :]))
g2 = int(np.argmax(gray_arr[i + 1, :]))
dna_str += SequenceTools.gray2dna_[(g1, g2)]
i += 2
return dna_str
@staticmethod
def convert_dna_str_to_gray(dna_str):
"""Convert length N string into 2N x 2 tokenized gray code array"""
dna_str = dna_str.lower()
N = len(dna_str)
gray_arr = np.zeros((2 * N, 2))
for i in range(N):
gray_i = SequenceTools.dna2gray_[dna_str[i]]
for j in range(2):
gray_arr[2 * i + j, gray_i[j]] = 1
return gray_arr
@staticmethod
def convert_rna_to_dna(seq):
rna2dna = {'u': 't', 'a': 'a', 'g': 'g', 'c': 'c'}
return "".join([rna2dna[s] for s in seq])
@classmethod
def get_codon_from_idx(cls, idx):
idx2codon = {val: key for key, val in SequenceTools.codon2idx_.items()}
return idx2codon[idx]
@classmethod
def get_start_codon_int(cls):
return SequenceTools.codon2idx_['atg']
@classmethod
def get_stop_codon_ints(cls):
stop_codons = SequenceTools.protein2codon_['stop']
return [SequenceTools.codon2idx_[s] for s in stop_codons]
@classmethod
def translate_dna_str(cls, dna_seq):
dna_seq = dna_seq.lower()
prot_seq = []
i = 0
while i < len(dna_seq):
cod = dna_seq[i:i + 3]
prot_seq.append(SequenceTools.codon2protein_[cod])
i += 3
prot_seq = "".join(prot_seq)
return prot_seq
| 44.345946 | 112 | 0.384203 | 8,182 | 0.997318 | 0 | 0 | 3,490 | 0.425402 | 0 | 0 | 2,084 | 0.254022 |
7515cac7e4d1ef43edd544d3910c5a6a20ce7a6c | 98 | py | Python | python/eet/__init__.py | SidaZh/EET | 6414faa734abfdb666556304ca3df5b7f5e54c38 | [
"Apache-2.0"
] | null | null | null | python/eet/__init__.py | SidaZh/EET | 6414faa734abfdb666556304ca3df5b7f5e54c38 | [
"Apache-2.0"
] | null | null | null | python/eet/__init__.py | SidaZh/EET | 6414faa734abfdb666556304ca3df5b7f5e54c38 | [
"Apache-2.0"
] | null | null | null | from .fairseq import *
from .transformers import *
from .utils import *
from .pipelines import *
| 16.333333 | 27 | 0.744898 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7517c166337bb5a829a1b931762f8f58fa31c7b9 | 4,743 | py | Python | examples/client_example.py | christian-oudard/AIOPyFix | cb9e9372075e6119c0ef363d2095b3b6fddafcf3 | [
"CC0-1.0"
] | null | null | null | examples/client_example.py | christian-oudard/AIOPyFix | cb9e9372075e6119c0ef363d2095b3b6fddafcf3 | [
"CC0-1.0"
] | null | null | null | examples/client_example.py | christian-oudard/AIOPyFix | cb9e9372075e6119c0ef363d2095b3b6fddafcf3 | [
"CC0-1.0"
] | null | null | null | import asyncio
from enum import Enum
import logging
import random
from aiopyfix.connection import ConnectionState, MessageDirection
from aiopyfix.client_connection import FIXClient
from aiopyfix.engine import FIXEngine
from aiopyfix.message import FIXMessage
class Side(Enum):
buy = 1
sell = 2
class Client(FIXEngine):
def __init__(self):
FIXEngine.__init__(self, "client_example.store")
self.clOrdID = 0
self.msgGenerator = None
# create a FIX Client using the FIX 4.4 standard
self.client = FIXClient(self, "aiopyfix.FIX44", "TARGET", "SENDER")
# we register some listeners since we want to know when the connection goes up or down
self.client.addConnectionListener(self.onConnect, ConnectionState.CONNECTED)
self.client.addConnectionListener(self.onDisconnect, ConnectionState.DISCONNECTED)
async def start(self, host, port, loop):
await self.client.start(host, port, loop)
async def onConnect(self, session):
logging.info("Established connection to %s" % (session.address(),))
# register to receive message notifications on the session which has just been created
session.addMessageHandler(self.onLogin, MessageDirection.INBOUND, self.client.protocol.msgtype.LOGON)
session.addMessageHandler(self.onExecutionReport, MessageDirection.INBOUND,
self.client.protocol.msgtype.EXECUTIONREPORT)
async def onDisconnect(self, session):
logging.info("%s has disconnected" % (session.address(),))
# we need to clean up our handlers, since this session is disconnected now
session.removeMessageHandler(self.onLogin, MessageDirection.INBOUND, self.client.protocol.msgtype.LOGON)
session.removeMessageHandler(self.onExecutionReport, MessageDirection.INBOUND,
self.client.protocol.msgtype.EXECUTIONREPORT)
async def sendOrder(self, connectionHandler):
self.clOrdID = self.clOrdID + 1
codec = connectionHandler.codec
msg = FIXMessage(codec.protocol.msgtype.NEWORDERSINGLE)
msg.setField(codec.protocol.fixtags.Price, "%0.2f" % (random.random() * 2 + 10))
msg.setField(codec.protocol.fixtags.OrderQty, int(random.random() * 100))
msg.setField(codec.protocol.fixtags.Symbol, "VOD.L")
msg.setField(codec.protocol.fixtags.SecurityID, "GB00BH4HKS39")
msg.setField(codec.protocol.fixtags.SecurityIDSource, "4")
msg.setField(codec.protocol.fixtags.Account, "TEST")
msg.setField(codec.protocol.fixtags.HandlInst, "1")
msg.setField(codec.protocol.fixtags.ExDestination, "XLON")
msg.setField(codec.protocol.fixtags.Side, int(random.random() * 2) + 1)
msg.setField(codec.protocol.fixtags.ClOrdID, str(self.clOrdID))
msg.setField(codec.protocol.fixtags.Currency, "GBP")
await connectionHandler.sendMsg(msg)
side = Side(int(msg.getField(codec.protocol.fixtags.Side)))
logging.debug("---> [%s] %s: %s %s %s@%s" % (
codec.protocol.msgtype.msgTypeToName(msg.msgType), msg.getField(codec.protocol.fixtags.ClOrdID),
msg.getField(codec.protocol.fixtags.Symbol), side.name, msg.getField(codec.protocol.fixtags.OrderQty),
msg.getField(codec.protocol.fixtags.Price)))
async def onLogin(self, connectionHandler, msg):
logging.info("Logged in")
await self.sendOrder(connectionHandler)
async def onExecutionReport(self, connectionHandler, msg):
codec = connectionHandler.codec
if codec.protocol.fixtags.ExecType in msg:
if msg.getField(codec.protocol.fixtags.ExecType) == "0":
side = Side(int(msg.getField(codec.protocol.fixtags.Side)))
logging.debug("<--- [%s] %s: %s %s %s@%s" % (
codec.protocol.msgtype.msgTypeToName(msg.getField(codec.protocol.fixtags.MsgType)),
msg.getField(codec.protocol.fixtags.ClOrdID), msg.getField(codec.protocol.fixtags.Symbol), side.name,
msg.getField(codec.protocol.fixtags.OrderQty), msg.getField(codec.protocol.fixtags.Price)))
elif msg.getField(codec.protocol.fixtags.ExecType) == "4":
reason = "Unknown" if codec.protocol.fixtags.Text not in msg else msg.getField(
codec.protocol.fixtags.Text)
logging.info("Order Rejected '%s'" % (reason,))
else:
logging.error("Received execution report without ExecType")
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
loop = asyncio.get_event_loop()
client = Client()
loop.run_until_complete(client.start('', 9898, loop))
loop.run_forever()
| 48.896907 | 117 | 0.685853 | 4,265 | 0.89922 | 0 | 0 | 0 | 0 | 3,618 | 0.762808 | 607 | 0.127978 |
7518f8a86bdcb8318519e5f88ff22d6bb07f2c4c | 1,236 | py | Python | quantize-gym/gym/envs/robotics/fetch/pick_and_place.py | YunchuZhang/Visually-Grounded-Library-of-Behaviors-for-Generalizing-Manipulation-Across-Objects-Configurations- | 896afda942dfc04e4aaad2ee751c32df1eb17913 | [
"MIT"
] | 1 | 2022-03-14T22:25:17.000Z | 2022-03-14T22:25:17.000Z | quantize-gym/gym/envs/robotics/fetch/pick_and_place.py | YunchuZhang/Visually-Grounded-Library-of-Behaviors | 896afda942dfc04e4aaad2ee751c32df1eb17913 | [
"MIT"
] | null | null | null | quantize-gym/gym/envs/robotics/fetch/pick_and_place.py | YunchuZhang/Visually-Grounded-Library-of-Behaviors | 896afda942dfc04e4aaad2ee751c32df1eb17913 | [
"MIT"
] | null | null | null | import os
from gym import utils
from gym.envs.robotics import fetch_env
# Ensure we get the path separator correct on windows
MODEL_XML_PATH = os.path.join('fetch', 'pick_and_place.xml')
class FetchPickAndPlaceEnv(fetch_env.FetchEnv, utils.EzPickle):
def __init__(self, reward_type='sparse', xml_path=None, rim_pts=None, use_bbox_indicator=False,
n_actions=4, init_info=None, obs_type='xyz'):
initial_qpos = {
'robot0:slide0': 0.405,
'robot0:slide1': 0.48,
'robot0:slide2': 0.0,
'object0:joint': [1.25, 0.53, 0.5, 1., 0., 0., 0.],
}
if xml_path is not None:
global MODEL_XML_PATH
MODEL_XML_PATH = xml_path
fetch_env.FetchEnv.__init__(
self, MODEL_XML_PATH, has_object=True, block_gripper=False, n_substeps=20,
gripper_extra_height=0.3, target_in_the_air=True, target_offset=0.0,
obj_range=0.15, target_range=0.15, distance_threshold=0.05,
initial_qpos=initial_qpos, reward_type=reward_type, rim_pts=rim_pts,
use_bbox_indicator=use_bbox_indicator, n_actions=n_actions, init_info=init_info, obs_type=obs_type)
utils.EzPickle.__init__(self)
| 41.2 | 111 | 0.666667 | 1,044 | 0.84466 | 0 | 0 | 0 | 0 | 0 | 0 | 153 | 0.123786 |
751a5509067e9ef3020f185d496e96ee2816760b | 1,272 | py | Python | logic_inference_dataset/generate_sample_data.py | pedersor/google-research | 6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6 | [
"Apache-2.0"
] | null | null | null | logic_inference_dataset/generate_sample_data.py | pedersor/google-research | 6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6 | [
"Apache-2.0"
] | null | null | null | logic_inference_dataset/generate_sample_data.py | pedersor/google-research | 6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates and prints a few examples for illustration purposes.
"""
import random
from absl import app
import rules
import splits
def main(_):
random.seed(0)
rules.precompute_rules()
(train, test) = splits.generate_training_and_test_sets_iid(
50, 20, 500, 0.9, answer_at_the_end=False)
print(f"train: {len(train)}, test: {len(test)}")
for e in train[:5]:
print("- Train ---------------")
print(" inputs: " + e.inputs)
print(" targets: " + e.targets)
for e in test[:5]:
print("- Test ---------------")
print(" inputs: " + e.inputs)
print(" targets: " + e.targets)
if __name__ == "__main__":
app.run(main)
| 27.652174 | 74 | 0.680031 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 814 | 0.639937 |
751cf1f93872f6c6b2d86dc97e766003d090960e | 5,414 | py | Python | UQ/UQtutorial.py | ISR3D/ISR3D | e4f31a2bddbedb8e2743e088ee24f865bc97ec30 | [
"BSD-3-Clause"
] | null | null | null | UQ/UQtutorial.py | ISR3D/ISR3D | e4f31a2bddbedb8e2743e088ee24f865bc97ec30 | [
"BSD-3-Clause"
] | null | null | null | UQ/UQtutorial.py | ISR3D/ISR3D | e4f31a2bddbedb8e2743e088ee24f865bc97ec30 | [
"BSD-3-Clause"
] | null | null | null | import subprocess
lib_list = ['numpy','ymmsl','sobol_seq','csv','seaborn','zenodo_get']
for lib_name in lib_list:
try:
import lib_name
except ImportError:
if lib_name == 'csv':
print(lib_name,' Module not installed')
subprocess.run(['pip','install','python-csv'])
else:
print(lib_name,' Module not installed')
subprocess.run(['pip','install','%s'%lib_name])
import numpy as np
import ymmsl
import sobol_seq
import csv
import os
import seaborn as sns
import zenodo_get
# Transform the normalized sample matrix to ranges of uncertain parameters
def dim_transform(sobol_vector,uncertain_list):
dim = len(uncertain_list)
for num_dim in range(dim):
para_max = uncertain_list[num_dim].get('max')
para_min = uncertain_list[num_dim].get('min')
sobol_vector[:,num_dim] = para_min + (para_max-para_min)*sobol_vector[:,num_dim]
return sobol_vector
####################################################################################
##### Sample generation and UQ campaign creation (including instances folder)#######
####################################################################################
# Note:
# This is used to generate UQ samples for only four biological parameters:
# 1) Endothelium endpoint 2)smc max stran 3)balloon extension 4) Fenestration probability
# Naming of Folder and files for samples
# Level 0: UQtest (UQ campaign name)
# Level 1: UQtest/A (sample matrix of sobol sequence)
# Level 2: UQtest/A/A_X where X vary from 1 -> N (N: number of samples)
# Level 3: UQtest/A/A_X/input.ymmsl
### Main function
# Number of samples for UQ
# Note that ISR3D is a computationally intensive application.
# Running 128 instances would need some cluster resources
# You can start with a small number, 16 for instances.
NumSample = 128
# Template path to the ymmsl file (relative path from ISR3D/Result/UQtest/ to ISR3D/UQ/template/input_stage4.ymmsl)
input_path = '../../UQ/template/'
input_ymmsl_filename = 'input_stage4.ymmsl'
# Output directory for UQ campagin folder and name
output_path = './'
experiment_name = 'UQtest'
# Read in the data of template ymmsl file
with open(input_path+input_ymmsl_filename,'r') as f:
ymmsl_data = ymmsl.load(f)
# Take out the unchanged model part and need-for-change settings part for ymmsl model
model = ymmsl_data.model
settings = ymmsl_data.settings
# Set uncertain parameters and its ranges as a list
ymmsl_uncertain_parameters = [
{
'name': 'smc.endo_endpoint',
'min': 10.0,
'max': 20.0
},
{
'name': 'smc.balloon_extension',
'min': 0.5,
'max': 1.5
},
{
'name': 'smc.smc_max_strain',
'min': 1.2,
'max': 1.8
},
{
'name': 'smc.fenestration_probability',
'min': 0.0,# Calculate the lumen volume from (lumen_area_of_each_slice*depth_of_slice)
'max': 0.1
}]
# Count the total uncertain input dimensions (here 4 parameters)
num_uncer_para = len(ymmsl_uncertain_parameters)
print('Number of uncertain parameter: '+str(num_uncer_para))
# Generate sobel sequence range (0,1), save the file and transform to (min,max)
A = sobol_seq.i4_sobol_generate(num_uncer_para,NumSample)
A = dim_transform(A,ymmsl_uncertain_parameters)
np.savetxt("A.csv",A)
# Create corresponding directory and folders
try:
os.mkdir(output_path+experiment_name)
except OSError:
print ("Creation of the directory %s failed" % output_path+experiment_name)
else:
print ("Successfully created the directory %s" % output_path+experiment_name)
# A: Replace the corresponding value within the dict and output the file
os.mkdir(output_path+experiment_name+'/A')
checklist = ['A']
for n in range(NumSample):
sample_path = output_path+experiment_name+'/A'+'/A_'+str(n)
os.mkdir(sample_path)
# Generate file for ymmsl
num_para = 0
for para in ymmsl_uncertain_parameters:
settings[para.get('name')] = float(A[n,num_para])
num_para = num_para + 1
config = ymmsl.Configuration(model, settings, None, None)
with open(sample_path+'/input_stage4.ymmsl', 'w') as f:
ymmsl.save(config, f)
print('ymmsl input for each UQ instance has been generated')
####################################################################################
##### Run shell script to broadcast other input files to each sample folder#########
####################################################################################
import subprocess
# Download Other input files from Zenodo
print('Start to download other input files for ISR3D from Zenodo')
subprocess.run(['wget https://zenodo.org/record/4603912/files/stage3.test_vessel.dat'],shell = True)
subprocess.run(['wget https://zenodo.org/record/4603912/files/stage3.test_vessel_nb.dat'],shell = True)
subprocess.run(['wget https://zenodo.org/record/4603912/files/test_vessel_centerline.csv'],shell = True)
print('Start to broadcast the input to each UQ instance directory')
# Template path to the ymmsl file (relative path from ISR3D/Result/UQtest/ to ISR3D/UQ/function/BCastStage3.sh)
pass_arg = str(NumSample)
subprocess.run(['bash','../../UQ/function/BCastStage3.sh', '%s'%pass_arg])
print('Sample generation done') | 36.581081 | 115 | 0.648689 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,119 | 0.576099 |
751dc8bcc74a49ad7a489ad38470f4ef64fc97a5 | 12,715 | py | Python | launch/test/launch/test_logging.py | pal-robotics-forks/launch | 9a0a01924cd88f832c5a50903553ddcec6ef648d | [
"Apache-2.0"
] | null | null | null | launch/test/launch/test_logging.py | pal-robotics-forks/launch | 9a0a01924cd88f832c5a50903553ddcec6ef648d | [
"Apache-2.0"
] | null | null | null | launch/test/launch/test_logging.py | pal-robotics-forks/launch | 9a0a01924cd88f832c5a50903553ddcec6ef648d | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the launch.logging module."""
import logging
import os
import pathlib
import re
from unittest import mock
import launch.logging
import pytest
@pytest.fixture
def log_dir(tmpdir_factory):
"""Test fixture that generates a temporary directory for log files."""
return str(tmpdir_factory.mktemp('logs'))
def test_bad_logging_launch_config():
"""Tests that setup throws at bad configuration."""
launch.logging.reset()
with pytest.raises(ValueError):
launch.logging.launch_config.log_dir = 'not/a/real/dir'
with pytest.raises(ValueError):
launch.logging.launch_config.set_screen_format('default', screen_style='%')
with pytest.raises(ValueError):
launch.logging.launch_config.set_log_format(log_format='default', log_style='%')
def test_output_loggers_bad_configuration(log_dir):
"""Tests that output loggers setup throws at bad configuration."""
launch.logging.launch_config.reset()
launch.logging.launch_config.log_dir = log_dir
with pytest.raises(ValueError):
launch.logging.get_output_loggers('some-proc', 'not_an_alias')
with pytest.raises(ValueError):
launch.logging.get_output_loggers('some-proc', {'garbage': {'log'}})
with pytest.raises(ValueError):
launch.logging.get_output_loggers('some-proc', {'stdout': {'garbage'}})
@pytest.mark.parametrize('config,checks', [
('screen', {'stdout': {'screen'}, 'stderr': {'screen'}}),
('log', {'stdout': {'log'}, 'stderr': {'log', 'screen'}}),
('both', {'both': {'log', 'screen'}}),
('own_log', {
'stdout': {'own_log'},
'stderr': {'own_log'},
'both': {'own_log'},
}),
('full', {
'stdout': {'log', 'own_log', 'screen'},
'stderr': {'log', 'own_log', 'screen'},
'both': {'own_log'},
}),
(
{'stdout': {'screen', 'log'}, 'stderr': {'own_log'}},
{
'stdout': {'screen', 'log'},
'stderr': {'own_log'}
},
)
])
def test_output_loggers_configuration(capsys, log_dir, config, checks):
checks = {'stdout': set(), 'stderr': set(), 'both': set(), **checks}
launch.logging.reset()
launch.logging.launch_config.log_dir = log_dir
logger = launch.logging.get_logger('some-proc')
logger.addHandler(launch.logging.launch_config.get_screen_handler())
logger.addHandler(launch.logging.launch_config.get_log_file_handler())
logger.setLevel(logging.ERROR)
stdout_logger, stderr_logger = launch.logging.get_output_loggers('some-proc', config)
logger.debug('oops')
logger.error('baz')
stdout_logger.info('foo')
stderr_logger.info('bar')
capture = capsys.readouterr()
lines = list(reversed(capture.out.splitlines()))
assert '[ERROR] [some-proc]: baz' == lines.pop()
if 'screen' in (checks['stdout'] | checks['both']):
assert 'foo' == lines.pop()
if 'screen' in (checks['stderr'] | checks['both']):
assert 'bar' == lines.pop()
assert 0 == len(lines)
assert 0 == len(capture.err)
launch.logging.launch_config.get_log_file_handler().flush()
main_log_path = launch.logging.launch_config.get_log_file_path()
assert os.path.exists(main_log_path)
assert 0 != os.stat(main_log_path).st_size
with open(main_log_path, 'r') as f:
lines = list(reversed(f.readlines()))
assert re.match(r'[0-9]+\.[0-9]+ \[ERROR\] \[some-proc\]: baz', lines.pop()) is not None
if 'log' in (checks['stdout'] | checks['both']):
assert re.match(r'[0-9]+\.[0-9]+ foo', lines.pop()) is not None
if 'log' in (checks['stderr'] | checks['both']):
assert re.match(r'[0-9]+\.[0-9]+ bar', lines.pop()) is not None
assert 0 == len(lines)
if 'own_log' in (checks['stdout'] | checks['both']):
launch.logging.launch_config.get_log_file_handler('some-proc-stdout.log').flush()
own_log_path = launch.logging.launch_config.get_log_file_path('some-proc-stdout.log')
assert os.path.exists(own_log_path)
assert 0 != os.stat(own_log_path).st_size
with open(own_log_path, 'r') as f:
lines = f.read().splitlines()
assert 1 == len(lines)
assert 'foo' == lines[0]
else:
own_log_path = launch.logging.launch_config.get_log_file_path('some-proc-stdout.log')
assert (not os.path.exists(own_log_path) or 0 == os.stat(own_log_path).st_size)
if 'own_log' in (checks['stderr'] | checks['both']):
launch.logging.launch_config.get_log_file_handler('some-proc-stderr.log').flush()
own_log_path = launch.logging.launch_config.get_log_file_path('some-proc-stderr.log')
assert os.path.exists(own_log_path)
assert 0 != os.stat(own_log_path).st_size
with open(own_log_path, 'r') as f:
lines = f.read().splitlines()
assert 1 == len(lines)
assert 'bar' == lines[0]
else:
own_log_path = launch.logging.launch_config.get_log_file_path('some-proc-stderr.log')
assert (not os.path.exists(own_log_path) or 0 == os.stat(own_log_path).st_size)
if 'own_log' in checks['both']:
launch.logging.launch_config.get_log_file_handler('some-proc.log').flush()
own_log_path = launch.logging.launch_config.get_log_file_path('some-proc.log')
assert os.path.exists(own_log_path)
assert 0 != os.stat(own_log_path).st_size
with open(own_log_path, 'r') as f:
lines = f.read().splitlines()
assert 2 == len(lines)
assert 'foo' == lines[0]
assert 'bar' == lines[1]
else:
own_log_path = launch.logging.launch_config.get_log_file_path('some-proc.log')
assert (not os.path.exists(own_log_path) or 0 == os.stat(own_log_path).st_size)
def test_screen_default_format_with_timestamps(capsys, log_dir):
"""Test screen logging when using the default logs format with timestamps."""
launch.logging.reset()
launch.logging.launch_config.level = logging.DEBUG
launch.logging.launch_config.log_dir = log_dir
launch.logging.launch_config.set_screen_format('default_with_timestamp')
logger = launch.logging.get_logger('some-proc')
logger.addHandler(launch.logging.launch_config.get_screen_handler())
assert logger.getEffectiveLevel() == logging.DEBUG
logger.debug('foo')
capture = capsys.readouterr()
lines = capture.out.splitlines()
assert 1 == len(lines)
assert re.match(r'[0-9]+\.[0-9]+ \[DEBUG\] \[some-proc\]: foo', lines[0]) is not None
assert 0 == len(capture.err)
def test_screen_default_format(capsys):
"""Test screen logging when using the default logs format."""
launch.logging.reset()
logger = launch.logging.get_logger('some-proc')
logger.addHandler(launch.logging.launch_config.get_screen_handler())
assert logger.getEffectiveLevel() == logging.INFO
logger.info('bar')
capture = capsys.readouterr()
lines = capture.out.splitlines()
assert 1 == len(lines)
assert '[INFO] [some-proc]: bar' == lines[0]
assert 0 == len(capture.err)
def test_log_default_format(log_dir):
"""Test logging to the main log file when using the default logs format."""
launch.logging.reset()
launch.logging.launch_config.level = logging.WARN
launch.logging.launch_config.log_dir = log_dir
logger = launch.logging.get_logger('some-proc')
logger.addHandler(launch.logging.launch_config.get_log_file_handler())
assert logger.getEffectiveLevel() == logging.WARN
logger.error('baz')
launch.logging.launch_config.get_log_file_handler().flush()
assert os.path.exists(launch.logging.launch_config.get_log_file_path())
assert 0 != os.stat(launch.logging.launch_config.get_log_file_path()).st_size
with open(launch.logging.launch_config.get_log_file_path(), 'r') as f:
lines = f.readlines()
assert 1 == len(lines)
assert re.match(r'[0-9]+\.[0-9]+ \[ERROR\] \[some-proc\]: baz', lines[0]) is not None
def test_log_handler_factory(log_dir):
"""Test logging using a custom log handlers."""
class TestStreamHandler(launch.logging.handlers.Handler):
def __init__(self, output):
super().__init__()
self._output = output
def emit(self, record):
self._output.append(self.format(record))
import collections
outputs = collections.defaultdict(list)
launch.logging.reset()
launch.logging.launch_config.level = logging.WARN
launch.logging.launch_config.log_dir = log_dir
launch.logging.launch_config.log_handler_factory = (
lambda path, encoding=None: TestStreamHandler(
output=outputs[path]
)
)
logger = launch.logging.get_logger('some-proc')
logger.addHandler(launch.logging.launch_config.get_log_file_handler())
logger.debug('foo')
logger.error('baz')
path = launch.logging.launch_config.get_log_file_path()
assert path in outputs
assert len(outputs[path]) == 1
assert outputs[path][0].endswith('baz')
def fake_make_unique_log_dir(*, base_path):
# Passthrough; do not create the directory
return base_path
@mock.patch('launch.logging._make_unique_log_dir', mock.MagicMock(wraps=fake_make_unique_log_dir))
def test_get_logging_directory():
launch.logging.launch_config.reset()
os.environ.pop('ROS_LOG_DIR', None)
os.environ.pop('ROS_HOME', None)
home = pathlib.Path.home()
assert str(home)
# Default case without ROS_LOG_DIR or ROS_HOME being set (but with HOME)
default_dir = str(home / '.ros/log')
# This ensures that the launch config will check the environment again
launch.logging.launch_config.log_dir = None
assert launch.logging.launch_config.log_dir == default_dir
# Use $ROS_LOG_DIR if it is set
my_log_dir_raw = '/my/ros_log_dir'
my_log_dir = str(pathlib.Path(my_log_dir_raw))
os.environ['ROS_LOG_DIR'] = my_log_dir
launch.logging.launch_config.log_dir = None
assert launch.logging.launch_config.log_dir == my_log_dir
# Make sure it converts path separators when necessary
os.environ['ROS_LOG_DIR'] = my_log_dir_raw
launch.logging.launch_config.log_dir = None
assert launch.logging.launch_config.log_dir == my_log_dir
# Setting ROS_HOME won't change anything since ROS_LOG_DIR is used first
os.environ['ROS_HOME'] = '/this/wont/be/used'
launch.logging.launch_config.log_dir = None
assert launch.logging.launch_config.log_dir == my_log_dir
os.environ.pop('ROS_HOME', None)
# Empty is considered unset
os.environ['ROS_LOG_DIR'] = ''
launch.logging.launch_config.log_dir = None
assert launch.logging.launch_config.log_dir == default_dir
# Make sure '~' is expanded to the home directory
os.environ['ROS_LOG_DIR'] = '~/logdir'
launch.logging.launch_config.log_dir = None
assert launch.logging.launch_config.log_dir == str(home / 'logdir')
os.environ.pop('ROS_LOG_DIR', None)
# Without ROS_LOG_DIR, use $ROS_HOME/log
fake_ros_home = home / '.fakeroshome'
fake_ros_home_log_dir = str(fake_ros_home / 'log')
os.environ['ROS_HOME'] = str(fake_ros_home)
launch.logging.launch_config.log_dir = None
assert launch.logging.launch_config.log_dir == fake_ros_home_log_dir
# Make sure it converts path separators when necessary
my_ros_home_raw = '/my/ros/home'
my_ros_home_log_dir = str(pathlib.Path(my_ros_home_raw) / 'log')
os.environ['ROS_HOME'] = my_ros_home_raw
launch.logging.launch_config.log_dir = None
assert launch.logging.launch_config.log_dir == my_ros_home_log_dir
# Empty is considered unset
os.environ['ROS_HOME'] = ''
launch.logging.launch_config.log_dir = None
assert launch.logging.launch_config.log_dir == default_dir
# Make sure '~' is expanded to the home directory
os.environ['ROS_HOME'] = '~/.fakeroshome'
launch.logging.launch_config.log_dir = None
assert launch.logging.launch_config.log_dir == fake_ros_home_log_dir
os.environ.pop('ROS_HOME', None)
launch.logging.launch_config.reset()
| 39.243827 | 98 | 0.683366 | 245 | 0.019269 | 0 | 0 | 7,538 | 0.592843 | 0 | 0 | 3,199 | 0.251593 |
751f0dac6fc587c1fb5bfe7b2c73eb70a7dff82d | 690 | py | Python | code/C10/pkg/mysql.py | DataScienceCoding/Python-Web-Crawler | 7348f20b09fc89263ac4a7109b3890c64b687265 | [
"MIT"
] | 2 | 2020-12-17T06:33:48.000Z | 2020-12-26T01:21:13.000Z | code/C10/pkg/mysql.py | DataScienceCoding/Python-Web-Crawler | 7348f20b09fc89263ac4a7109b3890c64b687265 | [
"MIT"
] | null | null | null | code/C10/pkg/mysql.py | DataScienceCoding/Python-Web-Crawler | 7348f20b09fc89263ac4a7109b3890c64b687265 | [
"MIT"
] | null | null | null | import pymysql
from conf.cfg import Conf
class MysqlCli():
def __init__(self):
conf = Conf()
option = conf.section('mysql')
try:
self._db = pymysql.connect(host=option['host'],
port=int(option['port']),
user=option['username'],
password=option['password'])
self.cursor = self._db.cursor()
except Exception as e:
print(e.reason())
# def execute(self, sql, *arg):
# return self._db.execute(sql, arg)
def __del__(self):
print('Mysql will be closed.')
self._db.close()
| 28.75 | 67 | 0.484058 | 646 | 0.936232 | 0 | 0 | 0 | 0 | 0 | 0 | 132 | 0.191304 |
751f12c2fc158ec39592f00d7e74f388ca110773 | 12,621 | py | Python | matrixprofile/io/protobuf/protobuf_utils.py | MORE-EU/matrixprofile | 7c598385f7723f337d7bf7d3f90cffb690c6b0df | [
"Apache-2.0"
] | 262 | 2020-02-28T20:42:27.000Z | 2022-03-30T14:02:28.000Z | matrixprofile/io/protobuf/protobuf_utils.py | MORE-EU/matrixprofile | 7c598385f7723f337d7bf7d3f90cffb690c6b0df | [
"Apache-2.0"
] | 79 | 2020-03-01T01:42:14.000Z | 2022-03-30T07:15:48.000Z | matrixprofile/io/protobuf/protobuf_utils.py | MORE-EU/matrixprofile | 7c598385f7723f337d7bf7d3f90cffb690c6b0df | [
"Apache-2.0"
] | 56 | 2020-03-03T14:56:27.000Z | 2022-03-22T07:18:42.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import numpy as np
from matrixprofile import core
from matrixprofile.io.protobuf.proto_messages_pb2 import (
Location, Motif, MPFOutput
)
def get_matrix_attributes(matrix):
"""
Utility function to extract the rows, cols and flattened array from a
numpy array so it can be stored in the MPFOutput protobuf message.
Parameters
----------
matrix : np.ndarray
The numpy array to extract the attributes from.
Returns
-------
tuple :
A tuple containing the rows, cols and flattened array.
"""
if not core.is_array_like(matrix) or len(matrix) < 1:
return None, None, None
rows = matrix.shape[0]
cols = 0
if len(matrix.shape) > 1:
cols = matrix.shape[1]
return rows, cols, matrix.flatten()
def get_windows(profile):
"""
Utility function to format the windows from a profile structure ensuring
that the windows are in an array.
Parameters
----------
profile : dict
The MatrixProfile or PMP profile.
Returns
-------
list :
The window(s) in a list.
"""
windows = []
if core.is_mp_obj(profile):
windows.append(profile.get('w'))
elif core.is_pmp_obj(profile):
windows = profile.get('windows')
return windows
def get_proto_motif(motif):
"""
Utility function to convert a motif from a MatrixProfile or PMP structure
ensuring that it is compatible with the MPFOutput message.
Note
----
A single dimensional motif location will only have a row index and
a column index of 0.
Parameters
----------
motif : dict
The motif to convert.
Returns
-------
Motif :
The motif object for MPFOutput message.
"""
out_motif = Motif()
for indices in motif['motifs']:
tmp = Location()
tmp.row = 0
tmp.col = 0
# handle single integer location
if core.is_array_like(indices):
tmp.row = indices[0]
tmp.col = indices[1]
else:
tmp.row = indices
out_motif.motifs.append(tmp)
for neighbor in motif['neighbors']:
tmp = Location()
tmp.row = 0
tmp.col = 0
# handle single integer location
if core.is_array_like(neighbor):
tmp.row = neighbor[0]
tmp.col = neighbor[1]
else:
tmp.row = neighbor
out_motif.neighbors.append(tmp)
return out_motif
def get_proto_discord(discord):
"""
Utility function to convert a discord into the MPFOutput message
format.
Note
----
A single dimensional discord location will only have a row index and
a column index of 0.
Parameters
----------
discord : int or tuple
The discord with row, col index or single index.
Returns
-------
Location :
The Location message used in the MPFOutput protobuf message.
"""
out_discord = Location()
out_discord.row = 0
out_discord.col = 0
if core.is_array_like(discord):
out_discord.row = discord[0]
out_discord.col = discord[1]
else:
out_discord.row = discord
return out_discord
def profile_to_proto(profile):
"""
Utility function that takes a MatrixProfile or PMP profile data structure
and converts it to the MPFOutput protobuf message object.
Parameters
----------
profile : dict
The profile to convert.
Returns
-------
MPFOutput :
The MPFOutput protobuf message object.
"""
output = MPFOutput()
# add higher level attributes that work for PMP and MP
output.klass = profile.get('class')
output.algorithm = profile.get('algorithm')
output.metric = profile.get('metric')
output.sample_pct = profile.get('sample_pct')
# add time series data
ts = profile.get('data').get('ts')
query = profile.get('data').get('query')
rows, cols, data = get_matrix_attributes(ts)
output.ts.rows = rows
output.ts.cols = cols
output.ts.data.extend(data)
# add query data
query = profile.get('data').get('query')
rows, cols, data = get_matrix_attributes(query)
if rows and cols and core.is_array_like(data):
output.query.rows = rows
output.query.cols = cols
output.query.data.extend(data)
# add window(s)
output.windows.extend(get_windows(profile))
# add motifs
motifs = profile.get('motifs')
if not isinstance(motifs, type(None)):
for motif in motifs:
output.motifs.append(get_proto_motif(motif))
# add discords
discords = profile.get('discords')
if not isinstance(discords, type(None)):
for discord in discords:
output.discords.append(get_proto_discord(discord))
# add cmp
cmp = profile.get('cmp')
if not isinstance(cmp, type(None)):
rows, cols, data = get_matrix_attributes(cmp)
output.cmp.rows = rows
output.cmp.cols = cols
output.cmp.data.extend(data)
# add av
av = profile.get('av')
if not isinstance(av, type(None)):
rows, cols, data = get_matrix_attributes(av)
output.av.rows = rows
output.av.cols = cols
output.av.data.extend(data)
# add av_type
av_type = profile.get('av_type')
if not isinstance(av_type, type(None)) and len(av_type) > 0:
output.av_type = av_type
# add the matrix profile specific attributes
if core.is_mp_obj(profile):
output.mp.ez = profile.get('ez')
output.mp.join = profile.get('join')
# add mp
rows, cols, data = get_matrix_attributes(profile.get('mp'))
output.mp.mp.rows = rows
output.mp.mp.cols = cols
output.mp.mp.data.extend(data)
# add pi
rows, cols, data = get_matrix_attributes(profile.get('pi'))
output.mp.pi.rows = rows
output.mp.pi.cols = cols
output.mp.pi.data.extend(data)
# add lmp
rows, cols, data = get_matrix_attributes(profile.get('lmp'))
if rows and cols and core.is_array_like(data):
output.mp.lmp.rows = rows
output.mp.lmp.cols = cols
output.mp.lmp.data.extend(data)
# add lpi
rows, cols, data = get_matrix_attributes(profile.get('lpi'))
if rows and cols and core.is_array_like(data):
output.mp.lpi.rows = rows
output.mp.lpi.cols = cols
output.mp.lpi.data.extend(data)
# add rmp
rows, cols, data = get_matrix_attributes(profile.get('rmp'))
if rows and cols and core.is_array_like(data):
output.mp.rmp.rows = rows
output.mp.rmp.cols = cols
output.mp.rmp.data.extend(data)
# add rpi
rows, cols, data = get_matrix_attributes(profile.get('rpi'))
if rows and cols and core.is_array_like(data):
output.mp.rpi.rows = rows
output.mp.rpi.cols = cols
output.mp.rpi.data.extend(data)
# add the pan matrix profile specific attributes
elif core.is_pmp_obj(profile):
# add pmp
rows, cols, data = get_matrix_attributes(profile.get('pmp'))
output.pmp.pmp.rows = rows
output.pmp.pmp.cols = cols
output.pmp.pmp.data.extend(data)
# add pmpi
rows, cols, data = get_matrix_attributes(profile.get('pmpi'))
output.pmp.pmpi.rows = rows
output.pmp.pmpi.cols = cols
output.pmp.pmpi.data.extend(data)
else:
raise ValueError('Expecting Pan-MatrixProfile or MatrixProfile!')
return output
def to_mpf(profile):
"""
Converts a given profile object into MPF binary file format.
Parameters
----------
profile : dict_like
A MatrixProfile or Pan-MatrixProfile data structure.
Returns
-------
str :
The profile as a binary formatted string.
"""
obj = profile_to_proto(profile)
return obj.SerializeToString()
def from_proto_to_array(value):
"""
Utility function to convert a protobuf array back into the correct
dimensions.
Parameters
----------
value : array_like
The array to transform.
Returns
-------
np.ndarray :
The transformed array.
"""
if isinstance(value, type(None)) or len(value.data) < 1:
return None
shape = (value.rows, value.cols)
out = np.array(value.data)
if shape[1] > 0:
out = out.reshape(shape)
return out
def discords_from_proto(discords, is_one_dimensional=False):
"""
Utility function to transform discord locations back to single dimension
or multi-dimension location.
Parameter
---------
discords : array_like
The protobuf formatted array.
is_one_dimensional : boolean
A flag to indicate if the original locations should be 1D.
Returns
-------
np.ndarray :
The transformed discord locations.
"""
out = []
for discord in discords:
if is_one_dimensional:
out.append(discord.row)
else:
out.append((discord.row, discord.col))
return np.array(out, dtype=int)
def motifs_from_proto(motifs, is_one_dimensional=False):
"""
Utility function to transform motif locations back to single dimension
or multi-dimension location.
Parameter
---------
motifs : array_like
The protobuf formatted array.
is_one_dimensional : boolean
A flag to indicate if the original locations should be 1D.
Returns
-------
list :
The transformed motif locations.
"""
out = []
for motif in motifs:
tmp = {'motifs': [], 'neighbors': []}
for location in motif.motifs:
if is_one_dimensional:
tmp['motifs'].append(location.row)
else:
tmp['motifs'].append((location.row, location.col))
for neighbor in motif.neighbors:
if is_one_dimensional:
tmp['neighbors'].append(neighbor.row)
else:
tmp['neighbors'].append((neighbor.row, neighbor.col))
out.append(tmp)
return out
def from_mpf(profile):
"""
Converts binary formatted MPFOutput message into a profile data structure.
Parameters
----------
profile : str
The profile as a binary formatted MPFOutput message.
Returns
-------
profile : dict_like
A MatrixProfile or Pan-MatrixProfile data structure.
"""
obj = MPFOutput()
obj.ParseFromString(profile)
out = {}
is_one_dimensional = False
# load in all higher level attributes
out['class'] = obj.klass
out['algorithm'] = obj.algorithm
out['metric'] = obj.metric
out['sample_pct'] = obj.sample_pct
out['data'] = {
'ts': from_proto_to_array(obj.ts),
'query': from_proto_to_array(obj.query)
}
if obj.klass == 'MatrixProfile':
out['mp'] = from_proto_to_array(obj.mp.mp)
out['pi'] = from_proto_to_array(obj.mp.pi)
out['lmp'] = from_proto_to_array(obj.mp.lmp)
out['lpi'] = from_proto_to_array(obj.mp.lpi)
out['rmp'] = from_proto_to_array(obj.mp.rmp)
out['rpi'] = from_proto_to_array(obj.mp.rpi)
out['ez'] = obj.mp.ez
out['join'] = obj.mp.join
out['w'] = obj.windows[0]
is_one_dimensional = len(out['mp'].shape) == 1
elif obj.klass == 'PMP':
out['pmp'] = from_proto_to_array(obj.pmp.pmp)
out['pmpi'] = from_proto_to_array(obj.pmp.pmpi)
out['windows'] = np.array(obj.windows)
if not isinstance(obj.discords, type(None)) and len(obj.discords) > 0:
out['discords'] = discords_from_proto(
obj.discords, is_one_dimensional=is_one_dimensional)
if not isinstance(obj.motifs, type(None)) and len(obj.motifs) > 0:
out['motifs'] = motifs_from_proto(
obj.motifs, is_one_dimensional=is_one_dimensional)
if not isinstance(obj.cmp, type(None)) and len(obj.cmp.data) > 0:
out['cmp'] = from_proto_to_array(obj.cmp)
if not isinstance(obj.av, type(None)) and len(obj.av.data) > 0:
out['av'] = from_proto_to_array(obj.av)
if not isinstance(obj.av_type, type(None)) and len(obj.av_type) > 0:
out['av_type'] = obj.av_type
return out
| 26.459119 | 78 | 0.614056 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,434 | 0.351319 |
751f7bec046094aa0251500c529606eae0d8aaaa | 62 | py | Python | projects/thesis/continuous/custom/evaluation/__init__.py | cpark90/rrrcnn | ba66cc391265be76fa3896b66459ff7241b47972 | [
"Apache-2.0"
] | null | null | null | projects/thesis/continuous/custom/evaluation/__init__.py | cpark90/rrrcnn | ba66cc391265be76fa3896b66459ff7241b47972 | [
"Apache-2.0"
] | null | null | null | projects/thesis/continuous/custom/evaluation/__init__.py | cpark90/rrrcnn | ba66cc391265be76fa3896b66459ff7241b47972 | [
"Apache-2.0"
] | null | null | null | from .classification_evaluation import ClassificationEvaluator | 62 | 62 | 0.935484 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
75216809d842cd4da667e46bd80746c5bf9a0d5c | 4,196 | py | Python | swamp/wrappers/phenixcc.py | rigdenlab/SWAMP | 3e93ab27f4acf0124f7cb2d78a151cc3352b9c6e | [
"BSD-3-Clause"
] | 2 | 2020-02-15T11:06:34.000Z | 2020-04-10T08:48:49.000Z | swamp/wrappers/phenixcc.py | rigdenlab/SWAMP | 3e93ab27f4acf0124f7cb2d78a151cc3352b9c6e | [
"BSD-3-Clause"
] | 15 | 2020-02-04T10:56:07.000Z | 2021-02-12T09:11:03.000Z | swamp/wrappers/phenixcc.py | rigdenlab/SWAMP | 3e93ab27f4acf0124f7cb2d78a151cc3352b9c6e | [
"BSD-3-Clause"
] | 4 | 2020-02-04T13:25:09.000Z | 2022-03-23T13:44:17.000Z | import os
import shutil
from pyjob import cexec
from swamp.wrappers.wrapper import Wrapper
from swamp.parsers import PhenixParser
class PhenixCC(Wrapper):
"""Wrapper around phenix.get_cc_mtz_pdb
:param str workdir: working directory
:param str mtzin: file name of the mtz with the phase information to calculate correlation coefficient
:param str pdbin: pdb file name to calculate correlation coefficient
:param `~swamp.logger.swamplogger.SwampLogger` logger: logging interface for the wrapper (default None)
:param bool silent_start: if True, the logger will not display the start banner (default False)
:ivar bool error: if True an error has occurred along the process
:ivar str ovarall_CC: overall correlation coefficient between the phases in the mtz file and the pdb file
:ivar str local_CC: local correlation coefficient between the phases in the mtz file and the pdb file
:example:
>>> from swamp.wrappers import PhenixCC
>>> my_pehnix = PhenixCC('<workdir>', '<pdbin>', '<mtzin>')
>>> my_pehnix.run()
"""
def __init__(self, workdir, pdbin, mtzin, logger=None, silent_start=False):
super(PhenixCC, self).__init__(workdir=workdir, logger=logger, silent_start=silent_start)
self.overall_CC = "NA"
self.local_CC = "NA"
self.pdbin = pdbin
self.mtzin = mtzin
@property
def wrapper_name(self):
"""The name of this `~swamp.wrapper.wrapper.Wrapper` child class (phenix_cc)"""
return "phenix.get_cc_mtz_pdb"
@property
def source(self):
"""Override :py:attr:`~swamp.wrappers.wrapper.Wrapper.source` to include PHENIX/build/bin instead of CCP4/bin"""
return os.path.join(os.environ['PHENIX'], 'build', 'bin', self.wrapper_name)
@property
def cmd(self):
"""Command to be executed on the shell"""
return [self.source, self.mtzin, self.pdbin]
@property
def keywords(self):
"""No keywords are used through stdin in phenix.get_cc_mtz_pdb"""
return None
@property
def summary_results(self):
"""A summary with the obtained figures of merit"""
return "Results: Local CC - %s Overall CC - %s" % (self.local_CC, self.overall_CC)
def _check_error(self):
"""Check if errors have occurred during execution of :py:attr:`~swamp.wrappers.phenixcc.PhenixCC.cmd`"""
if not os.path.isfile(self.logfile):
self.logger.error("No phenix output, aborting!")
self.error = True
def _clean_files(self):
"""Remove annoying files"""
try:
shutil.rmtree(os.path.join(self.workdir, "temp_dir"))
os.remove(os.path.join(self.workdir, "cc.log"))
os.remove(os.path.join(self.workdir, "offset.pdb"))
except OSError:
self.logger.warning("Unable to clean files after phenix.get_cc! Current wd : %s" % self.workdir)
def _run(self):
"""Run :py:attr:`~swamp.wrappers.phenixcc.PhenixCC.cmd` and store the results"""
# Manage workdirs
self.make_workdir()
current_workdir = os.getcwd()
os.chdir(self.workdir)
# Run phenix.get_cc
self.logger.debug(" ".join(self.cmd))
self.logcontents = cexec(self.cmd, permit_nonzero=True)
self.make_logfile()
self._clean_files()
self._check_error()
if self.error:
return
self.get_scores()
# Return to original working directory
os.chdir(current_workdir)
def get_scores(self, logfile=None):
"""Use :py:obj:`~swamp.parsers.phenixparser.PhenixParser` to parse the log file and obtain values for \
:py:attr:`~swamp.wrappers.phenixcc.PhenixCC.local_CC` and \
:py:attr:`~swamp.wrappers.phenixcc.PhenixCC.overall_CC`
:param logfile: Not in use
"""
parser = PhenixParser(logger=self.logger, stdout=self.logcontents)
parser.parse()
if parser.error:
self.error = True
self.logger.warning('Previous errors while parsing phenix.get_cc output detected!')
else:
self.local_CC, self.overall_CC = parser.summary
| 36.486957 | 120 | 0.656816 | 4,063 | 0.968303 | 0 | 0 | 851 | 0.202812 | 0 | 0 | 2,104 | 0.50143 |
75217fe93a80109a6e221e14b5e96a11ae8b35fe | 89 | py | Python | api/src/app/settings.py | mdcg/fastapi-processes-microservices | cfa2309e0cb6b811525925cfa447c6f6dff37b3a | [
"MIT"
] | null | null | null | api/src/app/settings.py | mdcg/fastapi-processes-microservices | cfa2309e0cb6b811525925cfa447c6f6dff37b3a | [
"MIT"
] | null | null | null | api/src/app/settings.py | mdcg/fastapi-processes-microservices | cfa2309e0cb6b811525925cfa447c6f6dff37b3a | [
"MIT"
] | null | null | null | from decouple import config
MEDIA_PATH = config("MEDIA_PATH", "/home/mdcg/Documents/")
| 17.8 | 58 | 0.752809 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.393258 |
75218be058245f88ba545a6dbbbb4d1e406ae82c | 658 | py | Python | HookMandrill2Mongo.py | krisfremen/hookmandrill2mongo | 2533ba8ec3e7d38ecccd0529c1f2e86175233156 | [
"MIT"
] | null | null | null | HookMandrill2Mongo.py | krisfremen/hookmandrill2mongo | 2533ba8ec3e7d38ecccd0529c1f2e86175233156 | [
"MIT"
] | null | null | null | HookMandrill2Mongo.py | krisfremen/hookmandrill2mongo | 2533ba8ec3e7d38ecccd0529c1f2e86175233156 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
from flask import *
from pymongo import MongoClient
application = app = Flask(__name__)
dbhost = "localhost"
dbname = "webhooks"
dbcoll = "mandrill"
@app.route('/', methods=['GET', 'POST'])
def hook():
try:
mongo = MongoClient(host=dbhost)
mandrillhookjson = json.loads(request.form['mandrill_events'])
for mandrillhook in mandrillhookjson:
mongo[dbname][dbcoll].insert({"hook": mandrillhook})
return "OK"
except Exception, err:
return Response(status=400, response=str(err))
if __name__ == '__main__':
app.debug = True
app.run(host="0.0.0.0") | 25.307692 | 70 | 0.642857 | 0 | 0 | 0 | 0 | 386 | 0.586626 | 0 | 0 | 131 | 0.199088 |
7521abe9ff6b8eb7fdbe61e78df34e69a3c17afc | 1,493 | py | Python | dc_interactions/payloads.py | merlinfuchs/discord-interactions | 922f40e6294e6f86ee2bd38b5cc7594ba9e16a57 | [
"MIT"
] | 1 | 2021-01-08T18:22:59.000Z | 2021-01-08T18:22:59.000Z | dc_interactions/payloads.py | merlinfuchs/discord-interactions | 922f40e6294e6f86ee2bd38b5cc7594ba9e16a57 | [
"MIT"
] | null | null | null | dc_interactions/payloads.py | merlinfuchs/discord-interactions | 922f40e6294e6f86ee2bd38b5cc7594ba9e16a57 | [
"MIT"
] | 1 | 2021-03-22T01:41:57.000Z | 2021-03-22T01:41:57.000Z | from enum import IntEnum
__all__ = (
"InteractionType",
"InteractionPayload",
"CommandInteractionData",
"CommandInteractionDataOption",
)
class InteractionType(IntEnum):
PING = 1
APPLICATION_COMMAND = 2
class ResolvedEntities:
def __init__(self, data):
self.users = data.get("users", {})
self.members = data.get("members", {})
self.roles = data.get("roles", {})
self.channels = data.get("channels", {})
class InteractionPayload:
def __init__(self, data):
print(data.get("resolved"))
self.id = data["id"]
self.type = InteractionType(data["type"])
self.guild_id = data.get("guild_id")
self.channel_id = data.get("channel_id")
self.token = data.get("token")
self.version = data.get("version")
self.resolved = ResolvedEntities(data.get("resolved", {}))
if self.type == InteractionType.APPLICATION_COMMAND:
self.data = CommandInteractionData(data["data"])
self.member = data["member"]
class CommandInteractionData:
def __init__(self, data):
self.id = data["id"]
self.name = data["name"]
self.options = [CommandInteractionDataOption(o) for o in data.get("options", [])]
class CommandInteractionDataOption:
def __init__(self, data):
self.name = data["name"]
self.value = data.get("value")
self.options = [CommandInteractionDataOption(o) for o in data.get("options", [])]
| 28.169811 | 89 | 0.625586 | 1,322 | 0.885466 | 0 | 0 | 0 | 0 | 0 | 0 | 247 | 0.165439 |
75226d063cbc9f7c130c7e84b92141262adc03ae | 293 | py | Python | prepoint/__init__.py | edose/prepoint | 1239ea3962cc33c4d640bb4ba8b1b00e032b74d4 | [
"MIT"
] | null | null | null | prepoint/__init__.py | edose/prepoint | 1239ea3962cc33c4d640bb4ba8b1b00e032b74d4 | [
"MIT"
] | null | null | null | prepoint/__init__.py | edose/prepoint | 1239ea3962cc33c4d640bb4ba8b1b00e032b74d4 | [
"MIT"
] | null | null | null | __author__ = "Eric Dose :: New Mexico Mira Project, Albuquerque"
# PyInstaller (__init__.py file should be in place as peer to .py file to run):
# in Windows Command Prompt (E. Dose dev PC):
# cd c:\Dev\prepoint\prepoint
# C:\Programs\Miniconda\Scripts\pyinstaller app.spec
| 36.625 | 79 | 0.699659 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 273 | 0.931741 |
75233bda5c1d82d7661f64415b9128860bbf01e0 | 1,811 | py | Python | module1-web-application-development-with-flask/web_app/app.py | brucebra000/DS-Unit-3-Sprint-3-Productization-and-Cloud | 79517add84597ea8ee03020a610febd990125a77 | [
"MIT"
] | null | null | null | module1-web-application-development-with-flask/web_app/app.py | brucebra000/DS-Unit-3-Sprint-3-Productization-and-Cloud | 79517add84597ea8ee03020a610febd990125a77 | [
"MIT"
] | null | null | null | module1-web-application-development-with-flask/web_app/app.py | brucebra000/DS-Unit-3-Sprint-3-Productization-and-Cloud | 79517add84597ea8ee03020a610febd990125a77 | [
"MIT"
] | null | null | null |
from flask import Flask, jsonify, request, render_template
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
app = Flask(__name__)
app.config['CUSTOM_VAR'] = 5
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///web_app.db'
db = SQLAlchemy(app)
migrate = Migrate(app, db)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128))
class Tweet(db.Model):
id = db.Column(db.Integer, primary_key=True)
status = db.Column(db.String)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
#Routing
@app.route('/')
def index():
#return 'hello world'
return render_template('homepage.html')
@app.route('/about')
def about():
return 'about me'
@app.route('/users')
@app.route('/users.json')
def users():
users = User.query.all()
users_response = []
for u in users:
user_dict = u.__dict__
del user_dict['_sa_instance_state']
users_response.append(user_dict)
return jsonify(users_response)
@app.route('/users/create', methods=['Post'])
def create_user():
print('Creating a new user...')
print('Form data:', dict(request.form))
if 'name' in request.form:
name = request.form['name']
print(name)
db.session.add(User(name=name))
db.session.commit()
return jsonify({'message': 'created ok', 'name': name})
else:
return jsonify({'message': 'oops please specify a name'})
@app.route('/hello')
def hello(name=None):
print('Visiting the hello page')
print('Request params:', dict(request.args))
if 'name' in request.args:
name = request.args['name']
message = f'hello, {name}'
else:
message = 'hello world'
return render_template('hello.html', message=message)
| 24.808219 | 65 | 0.651022 | 274 | 0.151298 | 0 | 0 | 1,210 | 0.668139 | 0 | 0 | 410 | 0.226394 |
75235861d739f4f133107a3c37b9a2690bd13959 | 1,741 | pyde | Python | mode/examples/Topics/Simulate/Spring/Spring.pyde | katnino/processing.py | ff6450a6606908a014ce795340c06c8f6d90023d | [
"Apache-2.0"
] | 4 | 2016-08-09T14:14:36.000Z | 2021-12-10T07:51:35.000Z | mode/examples/Topics/Simulate/Spring/Spring.pyde | katnino/processing.py | ff6450a6606908a014ce795340c06c8f6d90023d | [
"Apache-2.0"
] | null | null | null | mode/examples/Topics/Simulate/Spring/Spring.pyde | katnino/processing.py | ff6450a6606908a014ce795340c06c8f6d90023d | [
"Apache-2.0"
] | null | null | null | """
Spring.
Click, drag, and release the horizontal bar to start the spring.
"""
# Spring drawing constants for top bar
springHeight = 32 # Height
left = 0 # Left position
right = 0 # Right position
max = 200 # Maximum Y value
min = 100 # Minimum Y value
over = False # If mouse over
move = False # If mouse down and over
# Spring simulation constants
M = 0.8 # Mass
K = 0.2 # Spring constant
D = 0.92 # Damping
R = 150 # Rest position
# Spring simulation variables
ps = R # Position
v = 0.0 # Velocity
a = 0 # Acceleration
f = 0 # Force
def setup():
size(640, 360)
rectMode(CORNERS)
noStroke()
left = width / 2 - 100
right = width / 2 + 100
def draw():
background(102)
updateSpring()
drawSpring()
def drawSpring():
# Draw base
fill(0.2)
baseWidth = 0.5 * ps + -8
rect(width / 2 - baseWidth, ps + springHeight,
width / 2 + baseWidth, height)
# Set color and draw top bar.
if over or move:
fill(255)
else:
fill(204)
rect(left, ps, right, ps + springHeight)
def updateSpring():
# Update the spring position.
if not move:
f = -K * (ps - R) # f=-ky
a = f / M # Set the acceleration. f=ma == a=f/m
v = D * (v + a) # Set the velocity.
ps = ps + v # Updated position
if abs(v) < 0.1:
v = 0.0
# Test if mouse is over the top bar
over = left < mouseX < right and ps < mouseY < ps + springHeight
# Set and constrain the position of top bar.
if move:
ps = mouseY - springHeight / 2
ps = constrain(ps, min, max)
def mousePressed():
if over:
move = True
def mouseReleased():
move = False
| 21.7625 | 68 | 0.570362 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 608 | 0.349225 |
7523a0d4955311eb3320311b8ea38e93f8cc74e7 | 10,255 | py | Python | ldbcache.py | Exahilosys/ldbcache | 7fd21b94eac1eaf007952d0e2811adb88cfb2328 | [
"MIT"
] | null | null | null | ldbcache.py | Exahilosys/ldbcache | 7fd21b94eac1eaf007952d0e2811adb88cfb2328 | [
"MIT"
] | null | null | null | ldbcache.py | Exahilosys/ldbcache | 7fd21b94eac1eaf007952d0e2811adb88cfb2328 | [
"MIT"
] | null | null | null | import pathing
import collections
import itertools
import copy
__all__ = ('Tree', 'Cache', 'Entry', 'EntryCache', 'RowCache', 'BulkRowCache',
'AlikeBulkRowCache')
class Tree(dict):
"""
Simple **BTree** implementation.
"""
__slots__ = ()
def _make(self):
"""
Create a subtree; called with no arguments.
"""
return ()
def _crawl(self, keys, stop = True, fail = False):
"""
Reach the node corresponding to ``keys``.
If set to not ``stop``, create subtrees.
Yield are nodes along the path.
"""
node = self
keys = iter(keys)
while True:
yield node
try:
key = next(keys)
except StopIteration:
break
try:
node = node[key]
except KeyError:
if stop:
if fail:
raise
return
args = self._make()
node[key] = node = self.__class__(*args)
def select(self, keys):
"""
Get the value corresponding to ``keys``.
"""
(*trees, value) = self._crawl(keys, fail = True)
return value
def create(self, keys, value):
"""
Place a value at ``keys``, creating subtrees.
"""
(*keys, key) = keys
(*trees, tree) = self._crawl(keys, stop = False)
tree[key] = value
def _brush(self, keys):
"""
Remove the final value and all empty subtrees along ``keys``.
"""
trees = self._crawl(keys)
pairs = tuple(zip(trees, keys))
for (tree, key) in reversed(pairs):
yield tree.pop(key)
if tree:
break
def delete(self, keys):
"""
Remove and return the value at ``keys``, deleting empty subtrees.
"""
(value, *trees) = self._brush(keys)
return value
def switch(self, old, new):
"""
Move the value at ``old`` to ``new``, deleting and creating subtrees.
"""
node = Tree.delete(self, old)
Tree.create(self, new, node)
return node
class Cache(Tree):
"""
Same as :class:`.Tree`, but declaring its expected depth allows for floor
traversing.
"""
__slots__ = ('_depth',)
def __init__(self, depth):
super().__init__()
self._depth = depth
@property
def depth(self):
"""
Get the expected depth.
"""
return self._depth
def _make(self):
depth = self._depth - 1
return (depth,)
def traverse(self):
"""
Yield ``(keys, value)`` pairs.
"""
yield from pathing.derive(self, min = self._depth, max = self._depth)
def entries(self):
"""
Yield all floor values.
"""
for (keys, value) in self.traverse():
yield value
def __repr__(self):
entries = tuple(self.entries())
size = len(entries)
return f'<Cache({size})>'
class Entry:
"""
:class:`dict`\-like attribute-accessible and read-only representation of
data.
.. code-block:: py
>>> entry = Entry({'name': 'Pup', 'age': 6})
>>> entry.name
'Pup'
>>> list(entry)
['name', 'age']
"""
__slots__ = ('__data__',)
def __init__(self, data = None, direct = False):
self.__data__ = {} if data is None else data if direct else data.copy()
def __getitem__(self, key):
return self.__data__[key]
def __getattr__(self, key):
try:
value = self[key]
except KeyError as error:
raise AttributeError(*error.args) from None
return value
def __copy__(self):
data = self.__data__.copy()
fake = self.__class__(data, True)
return fake
def __deepcopy__(self, memo):
fake = copy.copy(self)
data = fake.__data__
for (key, value) in data.items():
data[key] = copy.deepcopy(value)
return fake
def __iter__(self):
yield from self.__data__.keys()
def __repr__(self):
items = self.__data__.items()
make = '{0}={1}'.format
show = '|'.join(itertools.starmap(make, items))
return f'<{self.__class__.__name__}({show})>'
def _create(value):
"""
Create or return an entry from the value.
"""
return value if isinstance(value, Entry) else Entry(value)
def _modify(entry, data):
"""
Modify an :class:`.Entry` with the data.
"""
entry.__data__.update(data)
class EntryCache(Cache):
"""
Store, create and modify :class:`.Entry` instances.
"""
__slots__ = ('_manage',)
_Asset = collections.namedtuple('Asset', 'create modify')
def __init__(self, depth, create = _create, modify = _modify):
super().__init__(depth)
self._manage = self._Asset(create, modify)
def _make(self, *args, **kwargs):
args = super()._make()
return (*args, self._manage.create, self._manage.modify)
def create(self, keys, data):
"""
Create and put an entry at ``keys``.
"""
entry = self._manage.create(data)
super().create(keys, entry)
return entry
def modify(self, keys, data):
"""
Modify the entry at ``keys``\'s with ``data`` and return ``(old, new)``.
"""
entry = Tree.select(self, keys)
dummy = copy.deepcopy(entry)
self._manage.modify(entry, data)
return (dummy, entry)
class RowCache(EntryCache):
"""
Knowing primary allows for better handling.
"""
__slots__ = ('_primary',)
def __init__(self, primary):
super().__init__(len(primary))
self._primary = primary
@property
def primary(self):
"""
Get the primary keys.
"""
return self._primary
def _make(self):
primary = self._primary[:1]
return (primary,)
def query(self, data):
"""
Get all available values against the ``data``\'s primary keys.
"""
store = []
try:
store.extend(data[key] for key in self._primary)
except KeyError:
pass
return tuple(store)
def create(self, data):
"""
Create and put an entry in the spot designated by its primary keys.
"""
keys = self.query(data)
result = super().create(keys, data)
return result
def modify(self, keys, data):
"""
Modify an entry and change its spot if necessary.
"""
result = super().modify(keys, data)
new = self.query(data)
size = len(new)
old = keys[:size]
if not tuple(old) == new:
super().switch(old, new)
return result
class BulkRowCache(RowCache):
"""
Similar to its base, but data inputs should be arrays of data.
"""
__slots__ = ()
def create(self, data):
"""
Create and return all entries.
"""
results = []
for data in data:
result = super().create(data)
results.append(result)
return tuple(results)
def modify(self, keys, data):
"""
Modify all entries at ``keys`` and change their spot if necessary.
The keys don't need to be a full path to specific entries.
.. tip::
- Assume two entries exist at ``(0, 1, 2)`` and ``(0, 1, 4)``.
- Updating ``(0, 1)`` requires ``data`` to be a two-item array.
- That array should contain the respective data in order.
"""
node = super().select(keys)
depth = self._depth - len(keys)
items = pathing.derive(node, max = depth, min = depth)
try:
(subkeys, _entries) = zip(*items)
except ValueError:
subkeys = ()
results = []
for (subkeys, data) in zip(subkeys, data):
curkeys = (*keys, *subkeys)
result = super().modify(curkeys, data)
results.append(result)
return results
class SoftBulkRowCache(BulkRowCache):
"""
Lookup methods return an array of entries, or empty instead of failing.
"""
__slots__ = ()
@classmethod
def _flat(cls, node):
"""
Turn the node into either a tree's entries or a single-item array.
"""
return tuple(node.entries()) if isinstance(node, cls) else (node,)
def select(self, keys):
"""
Refer to :meth:`.Tree.select`.
"""
try:
node = super().select(keys)
except KeyError:
result = ()
else:
result = self._flat(node)
return result
def modify(self, keys, data):
"""
Refer to :meth:`.BulkRowCache.modify`.
"""
try:
result = super().modify(keys, data)
except KeyError:
result = ()
return result
def delete(self, keys):
"""
Refer to :meth:`.Tree.delete`.
"""
try:
node = super().delete(keys)
except KeyError:
result = ()
else:
result = self._flat(node)
return result
class AlikeBulkRowCache(SoftBulkRowCache):
"""
Active methods accept ``(keys, data)`` for consistency.
"""
__slots__ = ()
def create(self, keys, data):
"""
``data`` is used, ``keys`` is not.
Refer to :meth:`.BulkRowCache.create`.
"""
result = super().create(data)
return result
def modify(self, keys, data):
"""
``data`` and ``keys`` are used.
Refer to :meth:`.SoftBulkRowCache.modify`.
"""
result = super().modify(keys, data)
return result
def delete(self, keys, data):
"""
``keys`` is used, ``data`` is not.
Refer to :meth:`.SoftBulkRowCache.delete`.
"""
result = super().delete(keys)
return result
| 19.721154 | 80 | 0.518381 | 9,783 | 0.953974 | 1,376 | 0.134178 | 446 | 0.043491 | 0 | 0 | 3,348 | 0.326475 |
7523dddeccce7c26beff51912adb1dd4fceff950 | 11,210 | py | Python | logging_automation.py | aws-samples/aws-centeralized-logging-with-datadog | f1fb778664915b645a0fec5c56a34654ac761233 | [
"MIT-0"
] | 3 | 2020-02-05T03:52:33.000Z | 2021-10-30T01:41:04.000Z | logging_automation.py | aws-samples/aws-centeralized-logging-with-datadog | f1fb778664915b645a0fec5c56a34654ac761233 | [
"MIT-0"
] | null | null | null | logging_automation.py | aws-samples/aws-centeralized-logging-with-datadog | f1fb778664915b645a0fec5c56a34654ac761233 | [
"MIT-0"
] | 6 | 2018-09-06T05:48:07.000Z | 2021-10-30T01:40:56.000Z | # Copyright 2008-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
# http://aws.amazon.com/apache2.0/
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
from __future__ import print_function
import boto3
import botocore
import time
import sys
import argparse
import json
import os
import base64
encrypted_token = os.environ['DD_KMS_API_KEY']
ddApiKey = boto3.client('kms').decrypt(CiphertextBlob=base64.b64decode(encrypted_token))['Plaintext']
def lambda_handler(event, context):
access_to_billing = "DENY"
if event['existing_accountid'] is None:
print("Creating new account: " + event['account_name'] + " (" + event['account_email'] + ")")
print("********************************")
credentials = assume_role(event['masteraccount_id'], 'ST-S-Automation', None)
account_id = create_account(event['account_name'], event['account_email'], 'OrganizationAccountAccessRole', access_to_billing, credentials)
print("********************************")
print("Created acount: " + account_id)
print("********************************")
else:
account_id = event['existing_accountid']
print("Updating Shared Security account policy...")
credentials = assume_role(event['securityaccount_id'], 'ST-S-Automation', None)
update_policy(account_id, event['cloudtrail_bucket'], event['datadogcode_bucket'], credentials)
print("********************************")
print("Deploying resources from " + 'Member.yml' + " as " + 'Member' + " in " + 'us-east-1')
mastercredentials = assume_role(event['masteraccount_id'], 'ST-S-Automation', None)
credentials = assume_role(account_id, 'OrganizationAccountAccessRole', mastercredentials)
template = get_template('Member.yml')
stack = deploy_resources(template, 'Member', 'us-east-1', event['cloudtrail_bucket'], event['datadogcode_bucket'], event['securityaccount_id'], ddApiKey, credentials)
print("********************************")
print(stack)
print("********************************")
print("Resources deployed for account " + account_id)
def assume_role(account_id, account_role, credentials):
if credentials is None:
sts_client = boto3.client('sts')
else:
sts_client = boto3.client('sts', aws_access_key_id=credentials['AccessKeyId'], aws_secret_access_key=credentials['SecretAccessKey'], aws_session_token=credentials['SessionToken'],)
role_arn = 'arn:aws:iam::' + account_id + ':role/' + account_role
assuming_role = True
while assuming_role is True:
try:
assuming_role = False
assumedRoleObject = sts_client.assume_role(RoleArn=role_arn, RoleSessionName="NewRole")
except botocore.exceptions.ClientError as e:
assuming_role = True
print(e)
time.sleep(10)
return assumedRoleObject['Credentials']
def create_account(account_name, account_email, account_role, access_to_billing, credentials):
'''
Create a new AWS account and add it to an organization
'''
client = boto3.client('organizations', aws_access_key_id=credentials['AccessKeyId'], aws_secret_access_key=credentials['SecretAccessKey'], aws_session_token=credentials['SessionToken'],)
try:
create_account_response = client.create_account(Email=account_email, AccountName=account_name, RoleName=account_role, IamUserAccessToBilling=access_to_billing)
except botocore.exceptions.ClientError as e:
print(e)
sys.exit(1)
time.sleep(10)
account_status = 'IN_PROGRESS'
while account_status == 'IN_PROGRESS':
create_account_status_response = client.describe_create_account_status(CreateAccountRequestId=create_account_response.get('CreateAccountStatus').get('Id'))
print("Create account status "+str(create_account_status_response))
account_status = create_account_status_response.get('CreateAccountStatus').get('State')
if account_status == 'SUCCEEDED':
account_id = create_account_status_response.get('CreateAccountStatus').get('AccountId')
elif account_status == 'FAILED':
print("Account creation failed: " + create_account_status_response.get('CreateAccountStatus').get('FailureReason'))
sys.exit(1)
root_id = client.list_roots().get('Roots')[0].get('Id')
return account_id
def update_policy(account_id, cloudtrail_bucket, datadogcode_bucket, credentials):
s3 = boto3.client('s3', aws_access_key_id=credentials['AccessKeyId'], aws_secret_access_key=credentials['SecretAccessKey'], aws_session_token=credentials['SessionToken'],)
iam = boto3.client('iam', aws_access_key_id=credentials['AccessKeyId'], aws_secret_access_key=credentials['SecretAccessKey'], aws_session_token=credentials['SessionToken'],)
'''
Update CloudTrail bucket policy
'''
cloudtrail_arn = "arn:aws:s3:::" + cloudtrail_bucket +"/AWSLogs/" + account_id + "/*"
cloudtrail_response = s3.get_bucket_policy(Bucket=cloudtrail_bucket)
cloudtrailpolicy = json.loads(cloudtrail_response['Policy'])
for cloudtrail_index in range(len(cloudtrailpolicy['Statement'])):
if cloudtrailpolicy['Statement'][cloudtrail_index]['Sid'] == 'AWSCloudTrailWrite':
folder_list = cloudtrailpolicy['Statement'][cloudtrail_index]['Resource']
folder_list.append(cloudtrail_arn)
cloudtrailpolicy['Statement'][cloudtrail_index]['Resource'] = folder_list
s3.put_bucket_policy(Bucket=cloudtrail_bucket, Policy=json.dumps(cloudtrailpolicy))
'''
Update Datadog Lambda Code bucket policy
'''
newaccount_arn = "arn:aws:iam::" + account_id + ":root"
datadog_response = s3.get_bucket_policy(Bucket=datadogcode_bucket)
datadogcodepolicy = json.loads(datadog_response['Policy'])
datadog_index = 0
for statement in datadogcodepolicy['Statement']:
if statement['Sid'] == 'CodeReadAccess':
account_list = statement['Principal']['AWS']
account_list.append(newaccount_arn)
statement['Principal']['AWS'] = account_list
datadogcodepolicy['Statement'][datadog_index] = statement
datadog_index += 1
s3.put_bucket_policy(Bucket=datadogcode_bucket, Policy=json.dumps(datadogcodepolicy))
'''
Update LoggingLambdaRole role policy
'''
account_arn = "arn:aws:iam::" + account_id + ":role/ST-S-Automation"
assumerole_response = iam.get_role_policy(RoleName='LoggingLambdaRole', PolicyName='AssumeRole')
assumerole_policy = assumerole_response['PolicyDocument']
for assumerole_index in range(len(assumerole_policy['Statement'])):
if assumerole_policy['Statement'][assumerole_index]['Sid'] == 'AWSAssumeRole':
account_list = assumerole_policy['Statement'][assumerole_index]['Resource']
account_list.append(account_arn)
assumerole_policy['Statement'][assumerole_index]['Resource'] = account_list
iam.put_role_policy(RoleName='LoggingLambdaRole', PolicyName='AssumeRole', PolicyDocument=json.dumps(assumerole_policy))
print("Policies successfully updated")
def get_template(template_file):
'''
Read a template file and return the contents
'''
print("Reading resources from " + template_file)
f = open(template_file, "r")
cf_template = f.read()
return cf_template
def deploy_resources(template, stack_name, stack_region, cloudtrail_bucket, datadogcode_bucket, securityaccount_id, datadog_apikey, credentials):
print(datadog_apikey)
'''
Create a CloudFormation stack of resources within the new account
'''
datestamp = time.strftime("%d/%m/%Y")
client = boto3.client('cloudformation',
aws_access_key_id=credentials['AccessKeyId'],
aws_secret_access_key=credentials['SecretAccessKey'],
aws_session_token=credentials['SessionToken'],
region_name=stack_region)
print("Creating stack " + stack_name + " in " + stack_region)
creating_stack = True
while creating_stack is True:
try:
creating_stack = False
create_stack_response = client.create_stack(
StackName=stack_name,
TemplateBody=template,
Parameters=[
{
'ParameterKey' : 'cloudtrailbucket',
'ParameterValue' : cloudtrail_bucket
},
{
'ParameterKey' : 'securityaccountid',
'ParameterValue' : securityaccount_id
},
{
'ParameterKey' : 'Datadogbucket',
'ParameterValue' : datadogcode_bucket
},
{
'ParameterKey' : 'DatadogAPIToken',
'ParameterValue' : datadog_apikey
}
],
NotificationARNs=[],
Capabilities=[
'CAPABILITY_NAMED_IAM',
],
OnFailure='ROLLBACK',
Tags=[
{
'Key': 'ManagedResource',
'Value': 'True'
},
{
'Key': 'DeployDate',
'Value': datestamp
}
]
)
except botocore.exceptions.ClientError as e:
creating_stack = True
print(e)
time.sleep(10)
stack_building = True
print("********************************")
print("Stack creation in process...")
print("********************************")
print(create_stack_response)
while stack_building is True:
event_list = client.describe_stack_events(StackName=stack_name).get("StackEvents")
stack_event = event_list[0]
if (stack_event.get('ResourceType') == 'AWS::CloudFormation::Stack' and
stack_event.get('ResourceStatus') == 'CREATE_COMPLETE'):
stack_building = False
print("Stack construction complete.")
elif (stack_event.get('ResourceType') == 'AWS::CloudFormation::Stack' and
stack_event.get('ResourceStatus') == 'ROLLBACK_COMPLETE'):
stack_building = False
print("Stack construction failed.")
sys.exit(1)
else:
print(stack_event)
print("********************************")
print("Stack building . . .")
print("********************************")
time.sleep(10)
stack = client.describe_stacks(StackName=stack_name)
return stack | 46.322314 | 268 | 0.632739 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,516 | 0.313649 |
7523e5848177e8777c98952e7fd199b4781b9cbb | 1,329 | py | Python | setup.py | merwok-forks/shortuuid | 4da632a986c3a43f75c7df64f27a90bbf7ff8039 | [
"BSD-3-Clause"
] | null | null | null | setup.py | merwok-forks/shortuuid | 4da632a986c3a43f75c7df64f27a90bbf7ff8039 | [
"BSD-3-Clause"
] | null | null | null | setup.py | merwok-forks/shortuuid | 4da632a986c3a43f75c7df64f27a90bbf7ff8039 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import sys
from setuptools import setup
from shortuuid import __version__
assert sys.version >= "2.5", "Requires Python v2.5 or above."
classifiers = [
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development :: Libraries :: Python Modules",
]
setup(
name="shortuuid",
version=__version__,
author="Stochastic Technologies",
author_email="info@stochastictechnologies.com",
url="https://github.com/stochastic-technologies/shortuuid/",
description="A generator library for concise, " "unambiguous and URL-safe UUIDs.",
long_description="A library that generates short, pretty, "
"unambiguous unique IDs "
"by using an extensive, case-sensitive alphabet and omitting "
"similar-looking letters and numbers.",
license="BSD",
classifiers=classifiers,
packages=["shortuuid"],
test_suite="shortuuid.tests",
tests_require=[],
)
| 31.642857 | 86 | 0.676448 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 896 | 0.674191 |
75256c2774647f7511c9c5789e9a1b6ac39eb890 | 661 | py | Python | scripts/tweakstatus.py | willingc/pc-bot | 8a8ba6bc4c1fbb4bd3dbc740509bf30b3077b06c | [
"BSD-3-Clause"
] | 5 | 2015-05-29T04:18:15.000Z | 2020-07-01T00:28:27.000Z | scripts/tweakstatus.py | alex/pc-bot | ff6d9fcb16363c447e9abff1079708f945f795e9 | [
"BSD-3-Clause"
] | 1 | 2022-02-11T03:48:57.000Z | 2022-02-11T03:48:57.000Z | scripts/tweakstatus.py | willingc/pc-bot | 8a8ba6bc4c1fbb4bd3dbc740509bf30b3077b06c | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""
Manually tweak a talk's status, leaving a note about why.
"""
import sys
import argparse
import pycon_bot.mongo
from pycon_bot.models import TalkProposal, Note
p = argparse.ArgumentParser()
p.add_argument('talk_id', type=int)
p.add_argument('new_status', choices=[c[0] for c in TalkProposal.STATUSES])
p.add_argument('note')
p.add_argument('--dsn')
args = p.parse_args()
if not pycon_bot.mongo.connect(args.dsn):
sys.stderr.write("Need to pass --dsn or set env[MONGO_DSN].")
sys.exit(1)
t = TalkProposal.objects.get(talk_id=args.talk_id)
t.update(
push__notes = Note(text=args.note),
set__status = args.new_status
)
| 25.423077 | 75 | 0.733737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 163 | 0.246596 |
7525706ad54bd4b2a9f0d23e9ee500de4af64324 | 5,034 | py | Python | src/javactl/setting/java_setting.py | mogproject/javactl | 4f933f434695938d0ce5ab02d1b28fec983645b1 | [
"Apache-2.0"
] | 5 | 2015-12-12T06:45:52.000Z | 2018-07-31T14:47:23.000Z | src/javactl/setting/java_setting.py | mogproject/javactl | 4f933f434695938d0ce5ab02d1b28fec983645b1 | [
"Apache-2.0"
] | 18 | 2015-09-13T13:50:28.000Z | 2019-04-13T08:00:58.000Z | src/javactl/setting/java_setting.py | mogproject/javactl | 4f933f434695938d0ce5ab02d1b28fec983645b1 | [
"Apache-2.0"
] | 4 | 2016-03-14T08:43:25.000Z | 2019-04-12T08:10:07.000Z | from __future__ import division, print_function, absolute_import, unicode_literals
import os
from mog_commons.case_class import CaseClass
from mog_commons.functional import omap, oget
class JavaSetting(CaseClass):
class Memory(CaseClass):
def __init__(self,
java_version, # just use for assertion
heap_min=None,
heap_max=None,
perm_min=None,
perm_max=None,
metaspace_min=None,
metaspace_max=None,
new_min=None,
new_max=None,
survivor_ratio=None,
target_survivor_ratio=None):
# constraints
assert perm_min is None or java_version < 1.8, 'java.memory.perm_min is not applicable to java >= 1.8'
assert perm_max is None or java_version < 1.8, 'java.memory.perm_max is not applicable to java >= 1.8'
assert metaspace_min is None or java_version >= 1.8, \
'java.memory.metaspace_min is not applicable to java < 1.8'
assert metaspace_max is None or java_version >= 1.8, \
'java.memory.metaspace_max is not applicable to java < 1.8'
CaseClass.__init__(
self,
('heap_min', heap_min),
('heap_max', heap_max),
('perm_min', perm_min),
('perm_max', perm_max),
('metaspace_min', metaspace_min),
('metaspace_max', metaspace_max),
('new_min', new_min),
('new_max', new_max),
('survivor_ratio', survivor_ratio),
('target_survivor_ratio', target_survivor_ratio)
)
def get_opts(self):
xs = [
omap(lambda s: '-Xms%s' % s, self.heap_min),
omap(lambda s: '-Xmx%s' % s, self.heap_max),
omap(lambda s: '-XX:PermSize=%s' % s, self.perm_min),
omap(lambda s: '-XX:MaxPermSize=%s' % s, self.perm_max),
omap(lambda s: '-XX:MetaspaceSize=%s' % s, self.metaspace_min),
omap(lambda s: '-XX:MaxMetaspaceSize=%s' % s, self.metaspace_max),
omap(lambda s: '-Xmn%s' % s, self.new_min),
omap(lambda s: '-XX:MaxNewSize=%s' % s, self.new_max),
omap(lambda x: '-XX:SurvivorRatio=%d' % x, self.survivor_ratio),
omap(lambda x: '-XX:TargetSurvivorRatio=%d' % x, self.target_survivor_ratio),
]
return [x for x in xs if x is not None]
class JMX(CaseClass):
def __init__(self, port=None, ssl=None, authenticate=None):
CaseClass.__init__(self, ('port', port), ('ssl', ssl), ('authenticate', authenticate))
def get_opts(self):
if self.port is None:
return []
xs = [
'-Dcom.sun.management.jmxremote',
omap(lambda x: '-Dcom.sun.management.jmxremote.port=%d' % x, self.port),
omap(lambda b: '-Dcom.sun.management.jmxremote.ssl=%s' % JavaSetting.py_to_java_str(b), self.ssl),
omap(lambda b: '-Dcom.sun.management.jmxremote.authenticate=%s' % JavaSetting.py_to_java_str(b), self.authenticate),
]
return [x for x in xs if x is not None]
def __init__(self,
home=None,
version=None,
server=None,
memory=None,
jmx=None,
prop=None,
option=None):
# constraints
assert home is not None and os.path.isabs(home), 'java.home is required and must be an absolute path'
assert version is not None, 'java.version is required'
# TODO: check value types and format
assert prop is None or isinstance(prop, dict), 'java.prop must be a dict'
assert option is None or isinstance(option, list), 'java.option must be a list'
CaseClass.__init__(
self,
('home', home),
('version', version),
('server', server),
('memory', JavaSetting.Memory(version, **oget(memory, {}))),
('jmx', JavaSetting.JMX(**oget(jmx, {}))),
('prop', oget(prop, {})),
('option', oget(option, [])),
)
def get_executable(self):
return os.path.join(self.home, 'bin', 'java')
def get_opts(self):
sv = ['-server'] if self.server else []
pr = ['-D%s=%s' % (k, JavaSetting.py_to_java_str(v)) for k, v in sorted(self.prop.items())]
return sv + self.memory.get_opts() + self.jmx.get_opts() + pr + self.option
def get_args(self):
return [self.get_executable()] + self.get_opts()
@staticmethod
def py_to_java_str(value):
"""Convert python data to Java-like string"""
if isinstance(value, bool):
return str(value).lower()
else:
return str(value)
| 42.302521 | 132 | 0.53973 | 4,846 | 0.962654 | 0 | 0 | 216 | 0.042908 | 0 | 0 | 1,058 | 0.210171 |
7525a7d9252dd56445bf00805d482a8fadc93bb8 | 405 | py | Python | test/test_solidsnake.py | mattcoding4days/MetalGear | 964ca2f7b031d5787b00a3d9ff92a9d79371646d | [
"BSD-2-Clause"
] | 1 | 2020-05-24T17:57:53.000Z | 2020-05-24T17:57:53.000Z | test/test_solidsnake.py | mattcoding4days/MetalGear | 964ca2f7b031d5787b00a3d9ff92a9d79371646d | [
"BSD-2-Clause"
] | null | null | null | test/test_solidsnake.py | mattcoding4days/MetalGear | 964ca2f7b031d5787b00a3d9ff92a9d79371646d | [
"BSD-2-Clause"
] | null | null | null | """
Test code for the MetalGear package
Test solidsnake.py
"""
import unittest
from MetalGear.solidsnake import Snake
class TestSolidSnake(unittest.TestCase):
def test_solidsnake(self):
solid_snake = Snake()
assert solid_snake.__str__(), 'Return should not be empty'
print(f'{self.test_solidsnake.__name__} => {solid_snake}')
if __name__ == '__main__':
unittest.main()
| 22.5 | 66 | 0.708642 | 235 | 0.580247 | 0 | 0 | 0 | 0 | 0 | 0 | 151 | 0.37284 |
7529f8e53ff7f49c5aec1728ca3a0893d882a584 | 4,339 | py | Python | prebuilder/systems/cmake.py | prebuilder/prebuilder.py | e5ee9dce0e46a3bc022dfaa0ce9f1be0563e2bdc | [
"Unlicense"
] | 4 | 2019-11-10T19:53:00.000Z | 2020-11-03T00:35:25.000Z | prebuilder/systems/cmake.py | prebuilder/prebuilder.py | e5ee9dce0e46a3bc022dfaa0ce9f1be0563e2bdc | [
"Unlicense"
] | null | null | null | prebuilder/systems/cmake.py | prebuilder/prebuilder.py | e5ee9dce0e46a3bc022dfaa0ce9f1be0563e2bdc | [
"Unlicense"
] | 1 | 2019-11-15T08:49:49.000Z | 2019-11-15T08:49:49.000Z | __all__ = ("CMakeBS", "CMake")
import typing
import sys
from pathlib import Path
import re
import shlex
from pprint import pprint
import os
from collections import OrderedDict, defaultdict
from .ninja import NinjaBS
from ..core.Package import VersionedPackageRef, Package, PackageMetadata, PackageInstalledFiles, getPackageInstalledFilesFromRefAndParent, PackagingSpec, AssociateTearingSpecAndMetadata
from ..core.BuildSystem import BuildSystem
from ..core.BuiltPackage import BuiltPackage
from ..tools import getCfj
from ..importers.cpack import cpackInterpreter
from .. import globalPrefs
from ..styles import styles
class CMakeBS(NinjaBS):
essentialFiles = ("CMakeLists.txt",)
@classmethod
def ripPackageWithCMake(cls, pkgInst, buildDir, firejailCfg, installEnv, componentCMakeId):
installEnv["DESTDIR"] = str(pkgInst.root)
firejailCfg1 = type(firejailCfg)(firejailCfg)
firejailCfg1["apparmor"] = True
firejailCfg1["paths"].extend((pkgInst.root, buildDir))
args = ["-DCOMPONENT=" + shlex.quote(str(componentCMakeId)), "-P", "cmake_install.cmake"]
getCfj(_cwd=buildDir, **firejailCfg).cmake(*args, _cwd=buildDir, _env=installEnv) # IMPORTANT, only relative path to cmake_install.cmake here! Otherwise doesn't work!
pkgInst.registerPath(pkgInst.root)
@classmethod
def __call__(cls, sourceDir: Path, ref: VersionedPackageRef, packagesRootsDir: Path, gnuDirs, buildOptions: typing.Optional[typing.Dict[str, bool]] = None, firejailCfg: typing.List[typing.Any] = (), ripWithCMake: bool = True) -> typing.Iterator[Package]:
cmakeCLIOptions = ["-GNinja", "-DCMAKE_BUILD_TYPE=RelWithDebInfo", "-DCMAKE_VERBOSE_MAKEFILE=1"]
buildOptions = type(buildOptions)(buildOptions)
for k, v in gnuDirs.toGnuArgs().items():
buildOptions["CMAKE_INSTALL_" + k.upper()] = v
if buildOptions:
for k, v in buildOptions.items():
if isinstance(v, bool):
v = "ON" if v else "OFF"
cmakeCLIOptions.append("=".join(("-D" + k, str(v))))
buildDir = sourceDir / "build"
buildDir.mkdir(exist_ok=True)
firejailCfgConfigure = type(firejailCfg)(firejailCfg)
firejailCfgConfigure["paths"].extend((sourceDir, buildDir))
firejailCfgConfigure["apparmor"] = False
cfjBuild = getCfj(_cwd=buildDir, **firejailCfgConfigure)
cmake = cfjBuild.cmake
ninjaPackagingRes = None
try:
if globalPrefs.configure:
print(styles.operationName("Configuring") + "...")
print(cmakeCLIOptions)
cmake(sourceDir, cmakeCLIOptions, _cwd=buildDir)
super().__call__(buildDir, ref=ref, packagesRootsDir=packagesRootsDir, gnuDirs=gnuDirs, buildOptions={}, installRule=None, firejailCfg=firejailCfgConfigure)
print(styles.operationName("Packaging") + "...")
installEnv = os.environ.copy()
tearingSpecs = OrderedDict()
cpackComponents = cpackInterpreter(buildDir)
print("cpackComponents", cpackComponents)
def components2Packages(componentz):
for cmakeComponentPackageSpec in componentz:
pkgRef = VersionedPackageRef(cmakeComponentPackageSpec.dic["name"], version=cmakeComponentPackageSpec.dic["version"])
del cmakeComponentPackageSpec.dic["name"], cmakeComponentPackageSpec.dic["version"]
metadata = PackageMetadata(pkgRef, **cmakeComponentPackageSpec.dic, packagerSpecific=cmakeComponentPackageSpec.packagerSpecific)
if ripWithCMake:
pkgInst = getPackageInstalledFilesFromRefAndParent(pkgRef, packagesRootsDir)
if globalPrefs.install:
cls.ripPackageWithCMake(pkgInst, buildDir, firejailCfg, installEnv, cmakeComponentPackageSpec.id)
yield (metadata, pkgInst)
else:
raise NotImplementedError()
if not len(cpackComponents.sub):
metadata, inst = next(components2Packages((cpackComponents.main,)))
inst.needsTearing = True
yield AssociateTearingSpecAndMetadata(inst, PackagingSpec(metadata))
else:
for metadata, inst in components2Packages(cpackComponents.sub.values()):
yield Package(metadata, inst)
if not ripWithCMake:
return PackagingSpec() # __init__(self, commonMetadata, tearingSpecs: typing.Mapping[typing.Union[VersionedPackageRef, PackageMetadata], "TearingSpec"] = None)
except Exception as ex:
if hasattr(ex, "stdout") and hasattr(ex, "stderr"):
print(ex.stdout.decode("utf-8"))
print(ex.stderr.decode("utf-8"))
raise
CMake = CMakeBS()
| 37.730435 | 255 | 0.751786 | 3,693 | 0.851118 | 3,021 | 0.696243 | 3,625 | 0.835446 | 0 | 0 | 556 | 0.12814 |
752aada457244d235edd0e7ad9d6e9af88e5c545 | 3,640 | py | Python | apache_manager/interactive.py | xolox/python-apache-manager | 83fafc4455a3ddb6f2254993ef5a80e45339249e | [
"MIT"
] | 8 | 2018-05-16T02:34:32.000Z | 2020-12-03T03:44:37.000Z | apache_manager/interactive.py | xolox/python-apache-manager | 83fafc4455a3ddb6f2254993ef5a80e45339249e | [
"MIT"
] | 2 | 2018-11-18T15:04:59.000Z | 2021-05-26T15:32:47.000Z | apache_manager/interactive.py | xolox/python-apache-manager | 83fafc4455a3ddb6f2254993ef5a80e45339249e | [
"MIT"
] | 2 | 2017-02-20T07:45:08.000Z | 2017-06-11T16:52:56.000Z | # Monitor and control Apache web server workers from Python.
#
# Author: Peter Odding <peter@peterodding.com>
# Last Change: November 27, 2019
# URL: https://apache-manager.readthedocs.io
"""
A ``top`` like interactive viewer for Apache web server metrics.
The :mod:`~apache_manager.interactive` module implements a ``top`` like
interactive viewer for Apache web server metrics using curses_. It can be
invoked from the command line using ``apache-manager --watch``.
Please note that the functions in this module are not included in the test
suite and are excluded from coverage calculations because:
1. For now this module is just an interesting experiment. It might disappear
completely or I might change it significantly, it all depends on time and
interest. For example it would be cool to have a tool like mytop_ or
innotop_ for Apache workers, but it's going to take time to build something
like that and I have 40+ open source projects and limited spare time, so I'm
not going to commit to anything :-).
2. This is my first time working with Python's :mod:`curses` module (and
curses_ interfaces in general) and it's not yet clear to me how feasible it
is to test an interactive command line interface that's not line based.
.. _curses: https://en.wikipedia.org/wiki/Curses_(programming_library)
.. _innotop: https://github.com/innotop/innotop
.. _mytop: http://jeremy.zawodny.com/mysql/mytop/
"""
# Standard library modules.
import curses
import logging
import sys
import time
# External dependencies.
import coloredlogs
from humanfriendly.terminal import connected_to_terminal, warning
def watch_metrics(manager):
"""Watch Apache web server metrics in a ``top`` like interface."""
if connected_to_terminal(sys.stdout):
try:
curses.wrapper(redraw_loop, manager)
except KeyboardInterrupt:
pass
else:
warning("Error: The 'apache-manager --watch' command requires an interactive terminal!")
sys.exit(1)
def redraw_loop(screen, manager):
"""The main loop that continuously redraws Apache web server metrics."""
# Ugly workaround to avoid circular import errors due to interdependencies
# between the apache_manager.cli and apache_manager.interactive modules.
from apache_manager.cli import report_metrics, line_is_heading
# Hide warnings (they'll mess up the curses layout).
coloredlogs.set_level(logging.ERROR)
# Hide the text cursor.
cursor_mode = curses.curs_set(0)
# Make Control-C behave normally.
curses.noraw()
# Enable non-blocking getch().
screen.nodelay(True)
try:
# Repeat until the user aborts.
while True:
lnum = 0
for line in report_metrics(manager):
attributes = 0
if line_is_heading(line):
attributes |= curses.A_BOLD
screen.addstr(lnum, 0, line, attributes)
lnum += 1
# Redraw screen.
screen.refresh()
# Wait a while before refreshing the screen, but enable the user to
# quit in the mean time.
for i in range(10):
if screen.getch() == ord('q'):
return
# Don't burn through CPU like crazy :-).
time.sleep(0.1)
# Update metrics in next iteration.
manager.refresh()
# Clear screen for next iteration.
screen.erase()
finally:
# Restore cursor mode.
curses.curs_set(cursor_mode)
# Clean up the screen after we're done.
screen.erase()
| 37.525773 | 96 | 0.672527 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,288 | 0.628571 |
752bc94bdb813f33b60a4fa6280c58bed1b2d03d | 1,803 | py | Python | src/translation.py | Wlodarski/DR-Altimeter | b831a2ce4f05d89fcadba5bab623a39d6527264e | [
"MIT"
] | null | null | null | src/translation.py | Wlodarski/DR-Altimeter | b831a2ce4f05d89fcadba5bab623a39d6527264e | [
"MIT"
] | 6 | 2020-03-28T18:56:47.000Z | 2020-04-06T13:09:55.000Z | src/translation.py | Wlodarski/DR-Altimeter | b831a2ce4f05d89fcadba5bab623a39d6527264e | [
"MIT"
] | null | null | null | #! python3
"""
MIT License
Copyright (c) 2020 Walter Wlodarski
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import gettext
from locale import getdefaultlocale
class Translation:
__slots__ = ["gettext"]
@staticmethod
def temporary_(message):
return message
def __init__(self):
self.gettext = self.temporary_
def __call__(self, text):
return self.gettext(text)
def set_lang(self, clp):
current_locale, encoding = getdefaultlocale()
lang = clp.args.lang if clp.args.lang in clp.all_lang else ""
chosen_lang = gettext.translation(
"DR-Altimeter", localedir=clp.localedir, languages=[lang, current_locale], fallback=True,
)
chosen_lang.install()
self.gettext = chosen_lang.gettext
return self.gettext
| 34.673077 | 101 | 0.742651 | 656 | 0.363838 | 0 | 0 | 65 | 0.036051 | 0 | 0 | 1,115 | 0.618414 |
752d3e4ee7475225bb08bd56270853593c9c7737 | 676 | py | Python | config/urls.py | stkrizh/otus-django-hasker | 9692b8060a789b0b66b4cf3591a78e32c8a10380 | [
"MIT"
] | null | null | null | config/urls.py | stkrizh/otus-django-hasker | 9692b8060a789b0b66b4cf3591a78e32c8a10380 | [
"MIT"
] | 10 | 2020-06-05T22:56:30.000Z | 2022-02-10T08:54:18.000Z | config/urls.py | stkrizh/otus-django-hasker | 9692b8060a789b0b66b4cf3591a78e32c8a10380 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
from users.views import LogIn, LogOut, Settings, SignUp
urlpatterns = [
path(f"{settings.ADMIN_URL}/", admin.site.urls),
path("", include("questions.urls")),
path("api/v1/", include("api.urls")),
path("login", LogIn.as_view(), name="login"),
path("logout", LogOut.as_view(), name="logout"),
path("settings", Settings.as_view(), name="settings"),
path("signup", SignUp.as_view(), name="signup"),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 32.190476 | 80 | 0.704142 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 127 | 0.18787 |
752f075e4bec9e1141a182be3804295aa84221ba | 1,444 | py | Python | project_folder/project_code/feature_selection/infogain.py | Nvouk/Titanic_Survival_Classifier | 9b5c17a78c873315441a2ec1cb554ba02860b7af | [
"Unlicense"
] | null | null | null | project_folder/project_code/feature_selection/infogain.py | Nvouk/Titanic_Survival_Classifier | 9b5c17a78c873315441a2ec1cb554ba02860b7af | [
"Unlicense"
] | null | null | null | project_folder/project_code/feature_selection/infogain.py | Nvouk/Titanic_Survival_Classifier | 9b5c17a78c873315441a2ec1cb554ba02860b7af | [
"Unlicense"
] | null | null | null | from numpy import *
from math import log
# x: features, y: classes
def infogain(x, y):
info_gains = zeros(x.shape[1]) # features of x
# calculate entropy of the data *hy*
# with regards to class y
cl = unique(y)
hy = 0
for i in range(len(cl)):
c = cl[i]
py = float(sum(y==c))/len(y) # probability of the class c in the data
hy = hy+py*log(py,2)
hy = -hy
# compute IG for each feature (columns)
for col in range(x.shape[1]): # features are on the columns
values = unique(x[:,col]) # the distinct values of each feature
# calculate conditional entropy *hyx = H(Y|X)*
hyx = 0
for i in range(len(values)): # for all values of the feature
f = values[i] # value of the specific feature
yf = y[where(x[:,col]==f)] # array with the the data points index where feature i = f
# calculate h for classes given feature f
yclasses = unique(yf) # number of classes
# hyx = 0; # conditional class probability initialization
for j in range(len(yclasses)):
yc = yclasses[j]
pyf = float(sum(yf==yc))/len(yf) # probability calls condition on the feature value
hyx = hyx+pyf*log(pyf,2) # conditional entropy
hyx = -hyx
# Information gain
info_gains[col] = hy - hyx
return info_gains
| 36.1 | 99 | 0.567175 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 618 | 0.427978 |
752f4072c4e2f6e85f05845d2b09cee5ab59bc91 | 864 | py | Python | wiki_stocks/get_wiki_sp.py | SamMonk/data-bot | 2311870e993c5c2d1de617d31b3f7a6641da2a9b | [
"MIT"
] | null | null | null | wiki_stocks/get_wiki_sp.py | SamMonk/data-bot | 2311870e993c5c2d1de617d31b3f7a6641da2a9b | [
"MIT"
] | 5 | 2021-03-31T20:06:34.000Z | 2022-03-12T00:58:22.000Z | wiki_stocks/get_wiki_sp.py | SamMonk/data-bot | 2311870e993c5c2d1de617d31b3f7a6641da2a9b | [
"MIT"
] | null | null | null | import bs4 as bs
import json
import requests
def save_sp500_tickers():
resp = requests.get('http://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
soup = bs.BeautifulSoup(resp.text, 'lxml')
table = soup.find('table', {'class': 'wikitable sortable'})
tickers = {"stocks":[]}
for row in table.findAll('tr')[1:]:
ticker = row.findAll('td')[0].text.strip()
sector = row.findAll('td')[3].text.strip()
industry = row.findAll('td')[4].text.strip()
print(row.findAll('td')[2])
reports = row.findAll('td')[2].find('a')['href'].strip()
tickers['stocks'].append({"ticker":ticker,"sector":sector,"industry":industry,"reports":reports})
print(ticker)
with open("wiki_stocks/sp500tickers.json","w") as f:
json.dump(tickers,f)
return tickers
#save_sp500_tickers() | 36 | 105 | 0.62037 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 237 | 0.274306 |
752f44cd9c7f05513515a5513eb3cb736aef39b7 | 2,309 | py | Python | day/day14part2.py | swemoney/adventofcode2020 | 94c3c032e68b182881dc545771a79fd884b7cf93 | [
"MIT"
] | 1 | 2020-12-07T13:18:09.000Z | 2020-12-07T13:18:09.000Z | day/day14part2.py | swemoney/adventofcode2020 | 94c3c032e68b182881dc545771a79fd884b7cf93 | [
"MIT"
] | null | null | null | day/day14part2.py | swemoney/adventofcode2020 | 94c3c032e68b182881dc545771a79fd884b7cf93 | [
"MIT"
] | null | null | null | from functools import cached_property
from puzzle import Puzzle # pylint: disable=import-error
from itertools import product
from enum import Enum
import re
BIT_LENGTH = 36
class Command(Enum):
mask = "mask"
mem = "mem"
class Instruction:
def __init__(self, command: str, address: str, argument: str):
self.command = Command(command)
self.address = None if address == None else int(address)
self.argument = argument
class Day14Part2:
puzzle = None
def run(self):
self.instructions = [self.parse_instruction(instruction_string) for instruction_string in self.parsed_input]
self.memory = {0: 0}
self.mask = "X" * BIT_LENGTH
self.run_docking_program()
print(sum(self.memory.values()))
def run_docking_program(self):
for instruction in self.instructions:
self.execute_instruction(instruction)
def execute_instruction(self, instruction):
if instruction.command == Command.mask:
self.mask = instruction.argument
elif instruction.command == Command.mem:
bin_string = self.int_to_bin(instruction.address)
masked_addr = self.apply_mask(bin_string)
for addr in self.expanded_floating_bits(masked_addr):
self.memory[addr] = int(instruction.argument)
def apply_mask(self, bit_string):
bits = list(bit_string)
for i, bit in enumerate(self.mask):
if bit != "0": bits[i] = bit
return "".join(bits)
def expanded_floating_bits(self, masked_bits):
all_bits = []
indices = [i for i, c in enumerate(masked_bits) if c == "X"]
for t in product("01", repeat=len(indices)):
bits = list(masked_bits)
for i, c in zip(indices, t):
bits[i] = c
all_bits.append(int("".join(bits),2))
return all_bits
def int_to_bin(self, integer):
return bin(int(integer))[2:].zfill(BIT_LENGTH)
def parse_instruction(self, instruction_string):
instruction_parts = re.search(r"^(\w*)\[?(\d+)?\]?\s=\s(.*)$", instruction_string).groups()
return Instruction(*instruction_parts)
@cached_property
def parsed_input(self):
return [line.strip('\n') for line in self.puzzle.input]
| 33.955882 | 116 | 0.637072 | 2,125 | 0.920312 | 0 | 0 | 108 | 0.046773 | 0 | 0 | 93 | 0.040277 |
752fa3c7ccd9fe1e1943f250e56533c498e879e8 | 353 | py | Python | pysaxon/__init__.py | ajelenak/pysaxon | fb0b1f79f67454284f2f5f4d10fdd2af7ef24b35 | [
"CC0-1.0"
] | 31 | 2016-05-31T14:49:25.000Z | 2022-03-11T15:57:46.000Z | pysaxon/__init__.py | ajelenak/pysaxon | fb0b1f79f67454284f2f5f4d10fdd2af7ef24b35 | [
"CC0-1.0"
] | 3 | 2017-10-17T09:20:19.000Z | 2020-04-18T14:51:53.000Z | pysaxon/__init__.py | ajelenak/pysaxon | fb0b1f79f67454284f2f5f4d10fdd2af7ef24b35 | [
"CC0-1.0"
] | 7 | 2017-01-16T13:06:56.000Z | 2022-01-19T02:06:45.000Z | from .version import version as __version__
# Delay importing extension modules till after they are built...
try:
from .sxn import *
from . import xdm
# This SaxonProcessor object is used only to control creation and
# destruction of the Saxon/C Java VM...
_sp_init = SaxonProcessor(False, init=True)
except ImportError:
pass
| 25.214286 | 69 | 0.72238 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 168 | 0.475921 |
753208be193d95d4fcfd451b6fb663e48dc739a0 | 3,047 | py | Python | socialnews/news/users.py | agiliq/django-socialnews | aa4a1a4a0e3279e6c7999071648ba37c71df9d15 | [
"BSD-3-Clause"
] | 30 | 2015-01-18T16:34:03.000Z | 2021-05-23T20:05:54.000Z | socialnews/news/users.py | agiliq/django-socialnews | aa4a1a4a0e3279e6c7999071648ba37c71df9d15 | [
"BSD-3-Clause"
] | null | null | null | socialnews/news/users.py | agiliq/django-socialnews | aa4a1a4a0e3279e6c7999071648ba37c71df9d15 | [
"BSD-3-Clause"
] | 11 | 2015-02-21T10:45:41.000Z | 2021-01-24T21:08:20.000Z | from django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden
from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required
from helpers import *
from django.core import serializers
import bforms
import exceptions
from django.conf import settings as settin
from django.contrib import auth
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.urlresolvers import reverse
import helpers
from django.core.mail import send_mail
from django.template.loader import render_to_string
def user_main(request, username):
user = User.objects.get(username = username)
if request.user.is_authenticated():
links = Link.objects.get_query_set_with_user(request.user).filter(user = user).select_related()
else:
links = Link.objects.filter(user = user).select_related()
links, page_data = get_paged_objects(links, request, defaults.LINKS_PER_PAGE)
payload = dict(pageuser=user, links=links, page_data=page_data)
return render(request, payload, 'news/userlinks.html')
def user_comments(request, username):
user = User.objects.get(username = username)
if request.user.is_authenticated():
comments = Comment.objects.get_query_set_with_user(request.user).filter(user = user).select_related()
else:
comments = Comment.objects.filter(user = user).select_related()
comments = comments.order_by('-created_on')
payload = dict(pageuser=user, comments=comments)
return render(request, payload, 'news/usercomments.html')
@login_required
def liked_links(request):
votes = LinkVote.objects.get_user_data().filter(user = request.user, direction = True).select_related()
page = 'liked'
return _user_links(request, votes, page)
@login_required
def liked_links_secret(request, username, secret_key):
user = User.objects.get(username = username)
if not user.get_profile().secret_key == secret_key:
raise Http404
votes = LinkVote.objects.get_user_data().filter(user = request.user, direction = True).select_related()[:10]
votes_id = [vote.link_id for vote in votes]
links = Link.objects.filter(id__in = votes_id)
return HttpResponse(serializers.serialize('json', links))
@login_required
def disliked_links(request):
votes = LinkVote.objects.get_user_data().filter(user = request.user, direction = False).select_related()
page = 'disliked'
return _user_links(request, votes, page)
@login_required
def saved_links(request):
saved = SavedLink.objects.get_user_data().filter(user = request.user).select_related()
page = 'saved'
return _user_links(request, saved, page)
def _user_links(request, queryset, page):
queryset = queryset.order_by('-created_on')
queryset, page_data = get_paged_objects(queryset, request, defaults.LINKS_PER_PAGE)
payload = dict(objects = queryset, page_data=page_data, page = page)
return render(request, payload, 'news/mylinks.html')
| 42.319444 | 113 | 0.739744 | 0 | 0 | 0 | 0 | 1,120 | 0.367575 | 0 | 0 | 120 | 0.039383 |
75329e3c7ac7214627f20aba5ddcea8e9e8d1a46 | 4,375 | py | Python | examples/SIRV_.py | JennyCCDD/epidemix | 30816ada8f376afc59782ee7504373546179fa61 | [
"BSD-2-Clause"
] | 7 | 2020-11-19T02:08:20.000Z | 2020-12-16T02:35:31.000Z | examples/SIRV_.py | JennyCCDD/epidemix | 30816ada8f376afc59782ee7504373546179fa61 | [
"BSD-2-Clause"
] | null | null | null | examples/SIRV_.py | JennyCCDD/epidemix | 30816ada8f376afc59782ee7504373546179fa61 | [
"BSD-2-Clause"
] | 1 | 2021-01-07T14:36:02.000Z | 2021-01-07T14:36:02.000Z | ########################################################################
'''
Author: Kuo Chun-Lin
Email : guojl19@mails.tsinghua.edu.cn
'''
########################################################################
import numpy as np
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
from tqdm import tqdm
from epidemix.epidemic import EpiModel
from epidemix.equations import SIRV
from epidemix.vaccination import VacciModel
from epidemix.utils.plot import draw_probs, get_neighbor
from epidemix.utils.config import set_params
########################################################################
num_node = 100
# G = nx.watts_strogatz_graph(num_node, 5, 0.4)
# G = nx.powerlaw_cluster_graph(num_node, 5, 0.4)
G = nx.gnp_random_graph(num_node, 0.08)
days = np.arange(0, 50, 0.1)
colors = ['blue', 'red', 'green', 'orange']
beta_list = np.linspace(0, 1, 21)[9:13]
for b in tqdm(beta_list, total=len(beta_list)):
b = np.round(b, decimals=2)
# [0.05, 0.10, 0.15, ..., 0.90, 0.95]
eps_list = np.linspace(0, 1, 21)[1:-1]
# eps_list = np.linspace(0, 1, 6)[1:-1]
success_rates = []
vacc_avgs = []
for e in eps_list: # -----> epsilon settings <----- #
e = np.round(e, decimals=2)
success_rate = []
vacc_avg = []
for i in range(1, 10): # -----> initial settings <----- #
# for i in range(1, 3):
I0 = int(num_node * 0.05 * i)
success_list = []
vacc_list = []
for _ in range(20): # -----> repeat how many times <----- #
# ======================================================
# I0, R0, V0, beta, gamma, eps
epi = VacciModel(G, SIRV, num_state=4, params=[
I0, 0, 0, b, 0.1, e],
vacci_strategy=None, state_colors=colors)
probs = epi.simulate(days)
epi.set_propagate(0, 1, neighbor=1, update_state=False)
epi.set_propagate(0, 3, neighbor=None, update_state=True)
epi.set_propagate(1, 2, neighbor=None, update_state=False)
status, _ = epi.propagation()
# epi.visualize(status, np.arange(0, 64, 4),
# figsize=(15, 15), n_row=4, n_col=4)
# draw_probs(probs, colors)
# ======================================================
nei_s = []
s_trend = []
for i in range(len(days)):
# Get the total number of I's neighbors which are S state.
idx = get_neighbor(
status[i]['adj'], status[i]['state'] == 1)
i_nei = np.sum(status[i]['state'][idx] == 0)
nei_s.append(i_nei)
# Get the total number of S under each moment.
s_tot = np.sum(status[i]['state'] == 0)
s_trend.append(s_tot)
# Get the moment when there is no S nearby I.
no_Snei = np.where(np.array(nei_s) == 0.0)[0][0]
# Get the moment when there is no S in the graph.
try:
no_Sstat = np.where(np.array(s_trend) == 0.0)[0][0]
except:
no_Sstat = np.where(np.array(s_trend) == 0.0)[0]
# If nei_s get to 0 faster than s_trend, regard it as a success case.
success = 1 if no_Snei < no_Sstat else 0
success_list.append(success)
# Count how many vacc is implemented.
vacc_quantity = np.sum(status[no_Snei]['state'] == 3)
vacc_list.append(vacc_quantity)
# Get the success rate under diff init case.
rate = np.sum(success_list) / len(success_list)
success_rate.append(rate)
# Get the minimum mean vaccination quantity under diff init cases.
vacc_avg.append(np.mean(vacc_list))
print(' >>> finished repeat <<<')
success_rates.append(success_rate)
vacc_avgs.append(vacc_avg)
df_succ = pd.DataFrame(np.array(success_rates).T, columns=eps_list)
df_succ.to_csv('succ_beta_{}.csv'.format(b))
df_vacc = pd.DataFrame(np.array(vacc_avgs).T, columns=eps_list)
df_vacc.to_csv('vacc_beta_{}.csv'.format(b))
| 36.157025 | 85 | 0.501943 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,384 | 0.316343 |