hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e7f30077f490cc616f7f71217c5e89c086968e6a
| 1,807
|
py
|
Python
|
www/purple_admin/urls.py
|
SubminO/vas
|
3096df12e637fc614d18cb3eef43c18be0775e5c
|
[
"Apache-2.0"
] | null | null | null |
www/purple_admin/urls.py
|
SubminO/vas
|
3096df12e637fc614d18cb3eef43c18be0775e5c
|
[
"Apache-2.0"
] | null | null | null |
www/purple_admin/urls.py
|
SubminO/vas
|
3096df12e637fc614d18cb3eef43c18be0775e5c
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
from purple_admin import views
urlpatterns = [
path('', views.cabinet, name='admin_panel_cabinet'),
# Адмника Наименований маршрутов
path('route_list', views.cabinet_list, {'type': 'route'}, name='admin_panel_route_list'),
path('route_add', views.cabinet_add, {'type': 'route'}, name='admin_panel_route_add'),
path('route_edit/<int:pk>/', views.cabinet_edit, {'type': 'route'}, name='admin_panel_route_edit'),
path('route_delete/<int:pk>/', views.cabinet_delete, {'type': 'route'}, name='admin_panel_route_delete'),
# Адмника наименований остановок
path('route_platform_list', views.cabinet_list, {'type': 'route_platform'}, name='admin_panel_route_platform_list'),
path('route_platform_add', views.cabinet_add, {'type': 'route_platform'}, name='admin_panel_route_platform_add'),
path('route_platform_edit/<int:pk>/', views.cabinet_edit, {'type': 'route_platform'},
name='admin_panel_route_platform_edit'),
path('route_platform_delete/<int:pk>/', views.cabinet_delete, {'type': 'route_platform'},
name='admin_panel_route_platform_delete'),
path('route_relation_add_ajax', views.cabinet_add, {'type': 'route_platform_type'},
name='admin_panel_route_platform_type_relation_ajax_add'),
# Админка ТС
path('ts_list', views.cabinet_list, {'type': 'ts'}, name='admin_panel_ts_list'),
path('ts_add', views.cabinet_add, {'type': 'ts'}, name='admin_panel_ts_add'),
path('ts_edit/<int:pk>/', views.cabinet_edit, {'type': 'ts'}, name='admin_panel_ts_edit'),
path('ts_delete/<int:pk>/', views.cabinet_delete, {'type': 'ts'}, name='admin_panel_ts_delete'),
# Адмника Создания маршрута на карте
path('map_route_editor_add', views.mapped_route_add, name='admin_panel_mapped_route_add'),
]
| 62.310345
| 120
| 0.716657
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,111
| 0.584122
|
e7f60dd013f54bbf4fa181ff948f295cdc87e462
| 1,893
|
py
|
Python
|
tests/mock_dbcli_config.py
|
bluelabsio/db-facts
|
fc8faa59f450a5cc00a0e50160ca57e47291b375
|
[
"Apache-2.0"
] | 2
|
2020-11-25T20:11:50.000Z
|
2020-12-12T18:39:09.000Z
|
tests/mock_dbcli_config.py
|
bluelabsio/db-facts
|
fc8faa59f450a5cc00a0e50160ca57e47291b375
|
[
"Apache-2.0"
] | 5
|
2020-01-24T15:05:50.000Z
|
2020-02-29T13:34:40.000Z
|
tests/mock_dbcli_config.py
|
bluelabsio/db-facts
|
fc8faa59f450a5cc00a0e50160ca57e47291b375
|
[
"Apache-2.0"
] | 1
|
2021-05-16T17:07:40.000Z
|
2021-05-16T17:07:40.000Z
|
mock_dbcli_config = {
'exports_from': {
'lpass': {
'pull_lastpass_from': "{{ lastpass_entry }}",
},
'lpass_user_and_pass_only': {
'pull_lastpass_username_password_from': "{{ lastpass_entry }}",
},
'my-json-script': {
'json_script': [
'some-custom-json-script'
]
},
'invalid-method': {
},
},
'dbs': {
'baz': {
'exports_from': 'my-json-script',
},
'bing': {
'exports_from': 'invalid-method',
},
'bazzle': {
'exports_from': 'lpass',
'lastpass_entry': 'lpass entry name'
},
'bazzle-bing': {
'exports_from': 'lpass',
'lastpass_entry': 'different lpass entry name'
},
'frazzle': {
'exports_from': 'lpass',
'lastpass_entry': 'lpass entry name'
},
'frink': {
'exports_from': 'lpass_user_and_pass_only',
'lastpass_entry': 'lpass entry name',
'jinja_context_name': 'standard',
'exports': {
'some_additional': 'export',
'a_numbered_export': 123
},
},
'gaggle': {
'jinja_context_name': [
'env',
'base64',
],
'exports': {
'type': 'bigquery',
'protocol': 'bigquery',
'bq_account': 'bq_itest',
'bq_service_account_json':
"{{ env('ITEST_BIGQUERY_SERVICE_ACCOUNT_JSON_BASE64') | b64decode }}",
'bq_default_project_id': 'bluelabs-tools-dev',
'bq_default_dataset_id': 'bq_itest',
},
},
},
'orgs': {
'myorg': {
'full_name': 'MyOrg',
},
},
}
| 28.253731
| 86
| 0.43159
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 973
| 0.513999
|
e7f7aa1ed993e5ba94893e2ddce56e42c0e3c43a
| 586
|
py
|
Python
|
java/test/src/main/resources/test_cross_language_invocation.py
|
hershg/ray
|
a1744f67fe954d8408c5b84e28ecccc130157f8e
|
[
"Apache-2.0"
] | 2
|
2017-12-15T19:36:55.000Z
|
2019-02-24T16:56:06.000Z
|
java/test/src/main/resources/test_cross_language_invocation.py
|
hershg/ray
|
a1744f67fe954d8408c5b84e28ecccc130157f8e
|
[
"Apache-2.0"
] | 4
|
2019-03-04T13:03:24.000Z
|
2019-06-06T11:25:07.000Z
|
java/test/src/main/resources/test_cross_language_invocation.py
|
hershg/ray
|
a1744f67fe954d8408c5b84e28ecccc130157f8e
|
[
"Apache-2.0"
] | 2
|
2017-10-31T23:20:07.000Z
|
2019-11-13T20:16:03.000Z
|
# This file is used by CrossLanguageInvocationTest.java to test cross-language
# invocation.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import ray
@ray.remote
def py_func(value):
assert isinstance(value, bytes)
return b"Response from Python: " + value
@ray.remote
class Counter(object):
def __init__(self, value):
self.value = int(value)
def increase(self, delta):
self.value += int(delta)
return str(self.value).encode("utf-8") if six.PY3 else str(self.value)
| 21.703704
| 78
| 0.723549
| 229
| 0.390785
| 0
| 0
| 353
| 0.602389
| 0
| 0
| 123
| 0.209898
|
e7f8e7564db7dfcbe99ed0496a94327a80f2134b
| 534
|
py
|
Python
|
game_stats.py
|
DeqianBai/Project-Alien-Invasion
|
3beac9eaba6609b8cecce848269b1ffe7b7bf493
|
[
"Apache-2.0"
] | 4
|
2019-02-25T13:11:30.000Z
|
2019-07-23T11:42:38.000Z
|
game_stats.py
|
DeqianBai/Project-Alien-Invasion
|
3beac9eaba6609b8cecce848269b1ffe7b7bf493
|
[
"Apache-2.0"
] | 1
|
2019-11-22T12:50:01.000Z
|
2019-11-22T12:50:01.000Z
|
game_stats.py
|
DeqianBai/Project-Alien-Invasion
|
3beac9eaba6609b8cecce848269b1ffe7b7bf493
|
[
"Apache-2.0"
] | 3
|
2019-06-13T03:00:50.000Z
|
2020-03-04T08:46:42.000Z
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# author:dabai time:2019/2/24
class GameStats():
"""跟踪游戏的统计信息"""
def __init__(self,ai_settings):
"""初始化统计信息"""
self.ai_settings=ai_settings
self.reset_stats()
# 让游戏一开始处于非活动状态
self.game_active=False
# 在任何情况下都不应重置最高得分
self.high_score=0
def reset_stats(self):
"""初始化在游戏运行期间可能变化的统计信息"""
self.ships_left=self.ai_settings.ship_limit
self.score=0
self.level=1
| 19.777778
| 52
| 0.567416
| 568
| 0.858006
| 0
| 0
| 0
| 0
| 0
| 0
| 292
| 0.441088
|
e7f94faea0813341ebda497d2d676c1095cd32fd
| 4,464
|
py
|
Python
|
ros/src/tl_detector/light_classification/carla.py
|
xiangjiang/Capstone_1
|
68e6d044041f5759f3596d6d547bd871afb1970b
|
[
"MIT"
] | null | null | null |
ros/src/tl_detector/light_classification/carla.py
|
xiangjiang/Capstone_1
|
68e6d044041f5759f3596d6d547bd871afb1970b
|
[
"MIT"
] | null | null | null |
ros/src/tl_detector/light_classification/carla.py
|
xiangjiang/Capstone_1
|
68e6d044041f5759f3596d6d547bd871afb1970b
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from os import path
import numpy as np
from scipy import misc
from styx_msgs.msg import TrafficLight
import cv2
import rospy
import tensorflow as tf
class CarlaModel(object):
def __init__(self, model_checkpoint):
self.sess = None
self.checkpoint = model_checkpoint
self.prob_thr = 0.90
self.TRAFFIC_LIGHT_CLASS = 10
self.image_no = 10000
tf.reset_default_graph()
def predict(self, img):
if self.sess == None:
gd = tf.GraphDef()
gd.ParseFromString(tf.gfile.GFile(self.checkpoint, "rb").read())
tf.import_graph_def(gd, name="object_detection_api")
self.sess = tf.Session()
g = tf.get_default_graph()
self.image = g.get_tensor_by_name("object_detection_api/image_tensor:0")
self.boxes = g.get_tensor_by_name("object_detection_api/detection_boxes:0")
self.scores = g.get_tensor_by_name("object_detection_api/detection_scores:0")
self.classes = g.get_tensor_by_name("object_detection_api/detection_classes:0")
img_h, img_w = img.shape[:2]
self.image_no = self.image_no+1
cv2.imwrite("full_"+str(self.image_no)+".png", img)
for h0 in [img_h//3, (img_h//3)-150]:
for w0 in [0, img_w//3, img_w*2//3]:
grid = img[h0:h0+img_h//3+50, w0:w0+img_w//3, :] # grid
pred_boxes, pred_scores, pred_classes = self.sess.run([self.boxes, self.scores, self.classes],
feed_dict={self.image: np.expand_dims(grid, axis=0)})
pred_boxes = pred_boxes.squeeze()
pred_scores = pred_scores.squeeze() # descreding order
pred_classes = pred_classes.squeeze()
traffic_light = None
h, w = grid.shape[:2]
cv2.imwrite("grid_"+str(self.image_no)+"_"+str(h0)+"_"+str(w0)+".png",grid)
rospy.loginfo("w,h is %s,%s",h0,w0)
for i in range(pred_boxes.shape[0]):
box = pred_boxes[i]
score = pred_scores[i]
if score < self.prob_thr: continue
if pred_classes[i] != self.TRAFFIC_LIGHT_CLASS: continue
x0, y0 = box[1] * w, box[0] * h
x1, y1 = box[3] * w, box[2] * h
x0, y0, x1, y1 = map(int, [x0, y0, x1, y1])
x_diff = x1 - x0
y_diff = y1 - y0
xy_ratio = x_diff/float(y_diff)
rospy.loginfo("image_no is %s", self.image_no)
rospy.loginfo("x,y ratio is %s",xy_ratio)
rospy.loginfo("score is %s",score)
if xy_ratio > 0.48: continue
area = np.abs((x1-x0) * (y1-y0)) / float(w*h)
rospy.loginfo("area is %s",area)
if area <= 0.001: continue
traffic_light = grid[y0:y1, x0:x1]
rospy.loginfo("traffic light given")
# select first -most confidence
if traffic_light is not None: break
if traffic_light is not None: break
if traffic_light is None:
pass
else:
rospy.loginfo("w,h is %s,%s",h0,w0)
rospy.loginfo("x,y ratio is %s",xy_ratio)
rospy.loginfo("score is %s",score)
cv2.imwrite("light_"+str(self.image_no)+".png",traffic_light)
#cv2.imwrite("full_"+str(self.image_no)+".png", img)
#cv2.imwrite("grid_"+str(self.image_no)+".png",grid)
#self.image_no = self.image_no+1
brightness = cv2.cvtColor(traffic_light, cv2.COLOR_RGB2HSV)[:,:,-1]
hs, ws = np.where(brightness >= (brightness.max()-30))
hs_mean = hs.mean()
tl_h = traffic_light.shape[0]
if hs_mean / tl_h < 0.4:
rospy.loginfo("image"+str(self.image_no-1)+" is RED")
return TrafficLight.RED
elif hs_mean / tl_h >= 0.55:
rospy.loginfo("image"+str(self.image_no-1)+" is GREEN")
return TrafficLight.GREEN
else:
rospy.loginfo("image"+str(self.image_no-1)+" is YELLOW")
return TrafficLight.YELLOW
return TrafficLight.UNKNOWN
| 43.764706
| 114
| 0.533602
| 4,287
| 0.960349
| 0
| 0
| 0
| 0
| 0
| 0
| 614
| 0.137545
|
e7fca0855906e19926ef43a259b033f9d1d6ddb0
| 542
|
py
|
Python
|
transform/indexed_transform.py
|
cviaai/unsupervised-heartbeat-anomaly-detection
|
3586bf505256463c030422607e95e4cee40fa086
|
[
"MIT"
] | 2
|
2020-10-14T05:50:25.000Z
|
2021-05-11T03:42:02.000Z
|
transform/indexed_transform.py
|
cviaai/unsupervised-heartbeat-anomaly-detection
|
3586bf505256463c030422607e95e4cee40fa086
|
[
"MIT"
] | null | null | null |
transform/indexed_transform.py
|
cviaai/unsupervised-heartbeat-anomaly-detection
|
3586bf505256463c030422607e95e4cee40fa086
|
[
"MIT"
] | null | null | null |
from typing import Tuple, List
from transform.transformer import TimeSeriesTransformer
import numpy as np
class IndexedTransformer:
def __init__(self, transformer: TimeSeriesTransformer, padding: int, step: int):
self.transformer = transformer
self.padding = padding
self.step = step
def __call__(self, data: np.ndarray) -> Tuple[List[int], np.ndarray]:
tr_data = self.transformer(data)
indices = [self.padding + i * self.step for i in range(len(tr_data))]
return indices, tr_data
| 30.111111
| 84
| 0.695572
| 432
| 0.797048
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e7fca20cce05d1364eee53a17bec476012eb661d
| 2,177
|
py
|
Python
|
dropconnect/combine_pred_mod.py
|
zygmuntz/kaggle-cifar
|
16936af9cf621d668c50491291e042a7849a1ac3
|
[
"BSD-2-Clause"
] | 26
|
2015-01-12T18:00:50.000Z
|
2020-12-19T23:49:16.000Z
|
dropconnect/combine_pred_mod.py
|
zygmuntz/kaggle-cifar
|
16936af9cf621d668c50491291e042a7849a1ac3
|
[
"BSD-2-Clause"
] | null | null | null |
dropconnect/combine_pred_mod.py
|
zygmuntz/kaggle-cifar
|
16936af9cf621d668c50491291e042a7849a1ac3
|
[
"BSD-2-Clause"
] | 26
|
2015-01-10T22:35:01.000Z
|
2020-01-15T08:56:53.000Z
|
#------------------------------------------
# this script combine result of different
# nets and report final result
#------------------------------------------
import sys
import numpy as np
from util import pickle, unpickle
def evaluate_result( result, text ):
# pre-condition check
num_batches = len( result['labels'] )
assert( num_batches == len(result['labels']) )
# compute error
num_cases = 0
num_wrong = 0
for ii in range( num_batches ):
act_index = result['labels'][ii]
num_cases_ii = act_index.shape[0]
assert( num_cases_ii == result['preds'][ii].shape[0] )
num_cases += num_cases_ii
pred_index = np.argmax( result['preds'][ii], 1 )
for jj in range( num_cases_ii ):
if pred_index[jj] != act_index[jj]:
num_wrong += 1
print text + "----Testing Error: %2.4f" % ( 1.0 *num_wrong / num_cases )
return ( 1.0 *num_wrong / num_cases )
def main():
num_args = len(sys.argv)
# load result from file
num_nets = num_args - 1
assert( num_nets > 0 )
errors = []
# 0th net
# result['labels']
# result['preds']
result = unpickle( sys.argv[1] )
errors.append( evaluate_result( result, sys.argv[1] ) )
num_batches = len( result['labels'] )
#import pdb; pdb.set_trace()
# collet all results
for ii in range( num_nets - 1 ):
result_ii = unpickle( sys.argv[ii+2] )
# evaluate result_ii
errors.append( evaluate_result( result_ii, sys.argv[ii+2] ) )
# check num of batches is consistant
num_batches_ii = len( result_ii['labels'] )
for jj in range( num_batches ):
# check label is consistant
assert( np.array_equal(
result_ii['labels'][jj], result['labels'][jj] ) )
# nc result['pred'][jj]
result['preds'][jj] += result_ii['preds'][jj]
pickle( 'combine_result', result )
# classifier mean/std accuracy
errors = np.array( errors )
#import pdb; pdb.set_trace()
print "mean: " , str(100*np.mean( errors )) , " std: " , str(100*(np.std( errors )))
# evaluate result
evaluate_result( result, "After combine" )
if __name__ == "__main__":
main()
| 30.661972
| 87
| 0.592559
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 658
| 0.302251
|
e7fcb403c125d5647a5fdcb4339ffbade5bc81e8
| 1,556
|
py
|
Python
|
goless/__init__.py
|
ctismer/goless
|
02168a40902691264b32c7da6f453819ed7a91cf
|
[
"Apache-2.0"
] | 1
|
2015-05-28T03:12:47.000Z
|
2015-05-28T03:12:47.000Z
|
goless/__init__.py
|
ctismer/goless
|
02168a40902691264b32c7da6f453819ed7a91cf
|
[
"Apache-2.0"
] | null | null | null |
goless/__init__.py
|
ctismer/goless
|
02168a40902691264b32c7da6f453819ed7a91cf
|
[
"Apache-2.0"
] | null | null | null |
"""
``goless`` introduces go-like channels and select to Python,
built on top of Stackless Python (and maybe one day gevent).
Use :func:`goless.chan` to create a synchronous or buffered channel.
Use :func:`goless.select` like you would the ``Select`` function in Go's reflect package
(since Python lacks a switch/case statement, replicating Go's select statement syntax
wasn't very effective).
"""
import logging
import sys
import traceback
from .backends import current as _be
# noinspection PyUnresolvedReferences
from .channels import chan, ChannelClosed
# noinspection PyUnresolvedReferences
from .selecting import dcase, rcase, scase, select
version_info = 0, 0, 1
version = '.'.join([str(v) for v in version_info])
def on_panic(etype, value, tb):
"""
Called when there is an unhandled error in a goroutine.
By default, logs and exits the process.
"""
logging.critical(traceback.format_exception(etype, value, tb))
_be.propagate_exc(SystemExit, 1)
def go(func, *args, **kwargs):
"""
Run a function in a new tasklet, like a goroutine.
If the goroutine raises an unhandled exception (*panics*),
the :func:`goless.on_panic` will be called,
which by default logs the error and exits the process.
:param args: Positional arguments to ``func``.
:param kwargs: Keyword arguments to ``func``.
"""
def safe_wrapped(f):
# noinspection PyBroadException
try:
f(*args, **kwargs)
except:
on_panic(*sys.exc_info())
_be.start(safe_wrapped, func)
| 30.509804
| 88
| 0.703728
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 958
| 0.615681
|
e7fcf109cce1b1c57ca682a8b6f5606efb8ee46b
| 643
|
py
|
Python
|
data/test1.py
|
moses-alexander/simple-python-parser
|
a15f53a86d61fa5d98f5ade149d8c3a178ebfb50
|
[
"BSD-3-Clause"
] | null | null | null |
data/test1.py
|
moses-alexander/simple-python-parser
|
a15f53a86d61fa5d98f5ade149d8c3a178ebfb50
|
[
"BSD-3-Clause"
] | null | null | null |
data/test1.py
|
moses-alexander/simple-python-parser
|
a15f53a86d61fa5d98f5ade149d8c3a178ebfb50
|
[
"BSD-3-Clause"
] | null | null | null |
1+2
3+5
7+8
6>7
abs(-3)
if 8 < 9: min(3,5)
else 4 < 5: abs(-2)
else 4 > 5: max(3, 7)
round(2.1)
round(3.6)
len("jfdgge")
type(4)
any(1, 3, 4)
any(0.0, 0.0, 0.0)
all("abc", "a")
all(0, 1)
bin(45)
lower("ABC")
upper("abc")
join("abc", "abc")
bool(0)
bool("abc")
ord('r')
chr(100)
str(130)
globals()
help()
hex(15)
oct(27)
pow(4,2)
sum(1,2, 3)
id(4)
id("abc")
not False
none()
none(0)
# breaks here ... for now
b = 1
print("a", b); print();
a = 5
#def append_element(self, val): newest =__Node(val);newestprev = self__trailerprev;self__trailerprevnext = newest;self__trailerprev = newest;newestnext = self__trailer;self__size = self__size + 1;
| 14.613636
| 196
| 0.62986
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 273
| 0.424572
|
e7fd1190b6509c18afc6e8dc44570e03220fb1f1
| 235
|
py
|
Python
|
python/funciones2.py
|
Tai-Son/Python-Chile
|
fd3aa28304caa806ee334686adbb029e81514912
|
[
"MIT"
] | null | null | null |
python/funciones2.py
|
Tai-Son/Python-Chile
|
fd3aa28304caa806ee334686adbb029e81514912
|
[
"MIT"
] | null | null | null |
python/funciones2.py
|
Tai-Son/Python-Chile
|
fd3aa28304caa806ee334686adbb029e81514912
|
[
"MIT"
] | null | null | null |
# Practica de funciones
#! /usr/bin/python
# -*- coding: iso-8859-15
def f(x):
y = 2 * x ** 2 + 1
return y
# Programa que usa la funcion f
n = int(input("Ingrese número: "))
for i in range(n):
y = f(i)
print (i,y)
| 13.823529
| 34
| 0.557447
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 116
| 0.491525
|
e7fdb3f99099bfa047bbe790a686f91e9a3ed33c
| 1,070
|
py
|
Python
|
setup.py
|
br-g/pyroaman
|
86d9a4771e4e0657c96e1c45dacbbde579e527d9
|
[
"MIT"
] | 2
|
2021-06-16T01:54:36.000Z
|
2021-11-08T13:00:39.000Z
|
setup.py
|
br-g/pyroaman
|
86d9a4771e4e0657c96e1c45dacbbde579e527d9
|
[
"MIT"
] | null | null | null |
setup.py
|
br-g/pyroaman
|
86d9a4771e4e0657c96e1c45dacbbde579e527d9
|
[
"MIT"
] | 1
|
2021-04-24T17:02:26.000Z
|
2021-04-24T17:02:26.000Z
|
from distutils.core import setup
from setuptools import find_packages
with open('README.md', 'r') as fh:
long_description = fh.read()
setup(
name='pyroaman',
version='0.1.1',
license='MIT',
description='Roam Research with Python',
author = 'Bruno Godefroy',
author_email='brgo@mail.com',
url = 'https://github.com/br-g/pyroaman',
download_url = 'https://github.com/br-g/pyroaman/archive/v0.1.1.tar.gz',
keywords = ['Roam Research'],
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(exclude=['tests']),
python_requires='>=3.6',
install_requires=[
'cached_property',
'dataclasses',
'loguru',
'tqdm',
'pathlib',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| 29.722222
| 76
| 0.627103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 500
| 0.46729
|
e7ff7ca7cdc4e23499b3182976ee2bee8f1569cf
| 974
|
py
|
Python
|
pgel_sat.py
|
AndrewIjano/pgel-sat
|
25b6ef5922a9fa79bbcf9896cf9a5eefd9925e45
|
[
"MIT"
] | null | null | null |
pgel_sat.py
|
AndrewIjano/pgel-sat
|
25b6ef5922a9fa79bbcf9896cf9a5eefd9925e45
|
[
"MIT"
] | null | null | null |
pgel_sat.py
|
AndrewIjano/pgel-sat
|
25b6ef5922a9fa79bbcf9896cf9a5eefd9925e45
|
[
"MIT"
] | null | null | null |
import sys
from pgel_sat import ProbabilisticKnowledgeBase, solve
import argparse
def str_lp(lp):
return f'''lp solution:
x: {lp.x}
y: {lp.y}
cost: {lp.cost}'''
def main():
parser = init_argparse()
args = parser.parse_args()
filename = args.file[0]
kb = ProbabilisticKnowledgeBase.from_file(filename)
result = solve(kb)
print('is satisfiable:', result['satisfiable'])
print(str_lp(result['lp']))
def init_argparse():
parser = argparse.ArgumentParser(
description='Computes the Probabilistic SAT algorithm' \
'in a Probabilistic Graphic EL knowledge base.'
)
parser.add_argument(
'file', nargs=1, type=str,
help='path of the OWL file with the Probabilistic Graphic EL ontology')
parser.add_argument('-v', '--verbose', action='store_true',
help='prints the problem and solution')
return parser
if __name__ == '__main__':
main()
| 23.190476
| 79
| 0.637577
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 331
| 0.339836
|
e7ffb07502a866daacad535d6c162c3df47ed0fa
| 1,075
|
py
|
Python
|
001-050/029-divide-two-integers.py
|
bbram10/leetcode-master
|
565f5f0cb3c9720e59a78ddf2e5e6e829c70bac6
|
[
"MIT"
] | 134
|
2017-01-16T11:17:44.000Z
|
2022-03-16T17:13:26.000Z
|
001-050/029-divide-two-integers.py
|
bbram10/leetcode-master
|
565f5f0cb3c9720e59a78ddf2e5e6e829c70bac6
|
[
"MIT"
] | 1
|
2019-11-18T02:10:51.000Z
|
2019-11-18T02:10:51.000Z
|
001-050/029-divide-two-integers.py
|
bbram10/leetcode-master
|
565f5f0cb3c9720e59a78ddf2e5e6e829c70bac6
|
[
"MIT"
] | 54
|
2017-07-17T01:24:00.000Z
|
2022-02-06T05:28:44.000Z
|
"""
STATEMENT
Divide two integers without using multiplication, division and mod operator.
CLARIFICATIONS
- Do I have to handle 32-bit integer overflow? Yes, return the MAX_INT in that case.
- Can the divisor be zero? Yes, return the MAX_INT.
EXAMPLES
34/3 -> 11
COMMENTS
- This solution is by tusizi in Leetcode (picked up from https://discuss.leetcode.com/topic/8714/clear-python-code)
"""
def divide(dividend, divisor):
"""
:type dividend: int
:type divisor: int
:rtype: int
"""
sign = (dividend < 0) is (divisor < 0)
dividend, divisor = abs(dividend), abs(divisor)
INT_MIN, INT_MAX = -2147483648, 2147483647
if (not divisor) or (dividend < INT_MIN and divisor == -1):
return INT_MAX
to_return = 0
while dividend >= divisor:
temp, i = divisor, 1
while dividend >= temp:
dividend -= temp
to_return += i
i <<= 1
temp <<= 1
if not sign:
to_return = -to_return
return min(max(INT_MIN, to_return), INT_MAX)
| 27.564103
| 115
| 0.613953
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 484
| 0.450233
|
f000259412224e1de269649b857a4c3b51a9f98c
| 1,409
|
py
|
Python
|
lang/langs/deutsch.py
|
SuRoryz/surbot-osu
|
2b27697417d2a69f40937b4c463530f2a6b98166
|
[
"MIT"
] | null | null | null |
lang/langs/deutsch.py
|
SuRoryz/surbot-osu
|
2b27697417d2a69f40937b4c463530f2a6b98166
|
[
"MIT"
] | 1
|
2020-05-01T17:56:27.000Z
|
2020-05-01T17:56:27.000Z
|
lang/langs/deutsch.py
|
SuRoryz/surbot-osu
|
2b27697417d2a69f40937b4c463530f2a6b98166
|
[
"MIT"
] | null | null | null |
class Samples:
def __init__(self):
#COMMANDS
self.PP = ('Für [https://osu.ppy.sh/b/{} {} [{}]{}] (OD {}, AR {}, '
'CS {}, {}★, {}:{}) wirst du {} {}')
self.PP_FOR = ('| {}pp bekommen für {}% ')
self.PP_PRED = ('Für [https://osu.ppy.sh/b/{} {} [{}]{}] (OD {}, AR {}, '
'CS {}, {}★, {}:{}) wirst du {} {} # {}')
self.PP_PRED_IMPOSSIBLE = ('Unmöglich zu FC für dich')
self.PP_PRED_FUTURE = ('Es erwarten dich: {}pp')
self.INFO = ('Sie kannst Quelle und Information '
'[https://suroryz.github.io/surbot-osu/ hier] finden')
self.LANG_CHANGED = ('Sprache erfolgreich geändert. '
'Localizer: some HiNative guy')
#ERRORS
self.ERROR_SYNTAX = ('Sie hast etwas falsches eingegeben. '
'Kontrollieren Sie die Hilfeseite -> .info')
self.ERROR_NP_NEED = ('Sie müssen /np vorher verwenden')
self.ERROR_NO_LANGUAGE = ('Entschuldigung, aber ich kann deine/Ihre Sprache nicht in meiner Datenbank finden. '
'Versuchen Sie den ISO 639-1 Sprachen-Code zu nutzen. '
'Wenn Ihre dort nicht vorzufinden ist, können Sie das '
'[https://suroryz.github.io/surbot-osu/lang/langs hier] melden')
| 44.03125
| 119
| 0.499645
| 1,420
| 0.999296
| 0
| 0
| 0
| 0
| 0
| 0
| 816
| 0.574243
|
f000c275681d6eb860ca8edd89619bd04e3efa9d
| 508
|
py
|
Python
|
conv/setup.py
|
hughpyle/GW-BASIC
|
f0c1ef3c9655b36cd312d18e4620bb076f03afd3
|
[
"MIT"
] | 26
|
2020-05-23T18:09:05.000Z
|
2022-01-30T10:07:04.000Z
|
conv/setup.py
|
hughpyle/GW-BASIC
|
f0c1ef3c9655b36cd312d18e4620bb076f03afd3
|
[
"MIT"
] | 1
|
2020-06-25T06:20:01.000Z
|
2020-06-25T06:20:01.000Z
|
conv/setup.py
|
hughpyle/GW-BASIC
|
f0c1ef3c9655b36cd312d18e4620bb076f03afd3
|
[
"MIT"
] | 4
|
2020-05-23T12:36:44.000Z
|
2022-01-16T00:20:20.000Z
|
from setuptools import setup, find_packages
"""
https://tia.mat.br/posts/2020/06/21/converting-gwbasic-to-z80.html
"""
setup(
name="z80conv",
version='0.0.1',
author="lp",
description="Porting GW-BASIC from 8086 back to the Z80",
license="GPLv2",
packages=find_packages(),
long_description="Porting GW-BASIC from 8086 back to the Z80",
install_requires=[],
tests_require=['pytest'],
entry_points = {
'console_scripts': ['z80conv=z80conv.conv:main'],
}
)
| 24.190476
| 66
| 0.661417
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 241
| 0.474409
|
f000f73c7ff791dd3f202fae2e9cd2cdf7773f23
| 8,046
|
py
|
Python
|
hera_cc_utils/catalog.py
|
pagano-michael/hera_cc_utils
|
2d61f8ab0bb4d75b9a2e5891450256195851db08
|
[
"MIT"
] | null | null | null |
hera_cc_utils/catalog.py
|
pagano-michael/hera_cc_utils
|
2d61f8ab0bb4d75b9a2e5891450256195851db08
|
[
"MIT"
] | 6
|
2021-09-08T21:28:12.000Z
|
2021-09-15T18:18:33.000Z
|
hera_cc_utils/catalog.py
|
pagano-michael/hera_cc_utils
|
2d61f8ab0bb4d75b9a2e5891450256195851db08
|
[
"MIT"
] | 1
|
2021-12-01T15:29:55.000Z
|
2021-12-01T15:29:55.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021 The HERA Collaboration
# Licensed under the MIT License
"""Utilities for dealing with galaxy/QSO catalogs."""
import numpy as np
import matplotlib.pyplot as plt
from astropy.coordinates import SkyCoord
from .util import deg_per_hr
_xshooter_ref = "https://ui.adsabs.harvard.edu/abs/2020ApJ...905...51S/abstract"
# VIKING
_viking_ref1 = "https://ui.adsabs.harvard.edu/abs/2013ApJ...779...24V/abstract"
_viking_ref2 = "https://ui.adsabs.harvard.edu/abs/2015MNRAS.453.2259V/abstract"
_viking = {
"J2348-3054": {
"ra": "23h48m33.34s",
"dec": "-30d54m10.0s",
"z": 6.886,
"ref": _viking_ref1,
},
"J0109-3047": {
"ra": "01h09m53.13s",
"dec": "-30d47m26.3s",
"z": 6.745,
"ref": _viking_ref1,
},
"J0305-3150": {
"ra": "03h05m16.92s",
"dec": "-31d50m56.0s",
"z": 6.604,
"ref": _viking_ref1,
},
"J0328-3253": {
"ra": "03h28m35.511s",
"dec": "-32d53m22.92s",
"z": 5.860,
"ref": _viking_ref2,
},
"J0046-2837": {
"ra": "00h46m23.645s",
"dec": "-28d37m47.34s",
"z": 5.9926,
"ref": _xshooter_ref,
},
"J2211-3206": {
"ra": "22h11m12.391s",
"dec": "-32d06m12.95s",
"z": 6.3394,
"ref": _xshooter_ref,
},
"J2318-3029": {
"ra": "23h18m33.103s",
"dec": "-30d29m33.36s",
"z": 6.1456,
"ref": _xshooter_ref,
},
"J2348-3054_xshooter": {
"ra": "23h48m33.336s",
"dec": "-30d54m10.24s",
"z": 6.9007,
"ref": _xshooter_ref,
},
}
# Pan-STARRS1
_ps1_ref1 = "https://ui.adsabs.harvard.edu/abs/2014AJ....148...14B/abstract"
_ps1_ref2 = "https://ui.adsabs.harvard.edu/abs/2017ApJ...849...91M/abstract"
_ps1 = {
"PSO 231-20": {"ra": "231.6576", "dec": "-20.8335", "z": 6.5864, "ref": _ps1_ref2},
"PSO J037.9706-28.8389": {
"ra": "02h31m52.96s",
"dec": "-28d50m20.1s",
"z": 5.99,
"ref": _ps1_ref1,
},
"PSO J065.4085-26.9543": {
"ra": "04h21m38.049s",
"dec": "-26d57m15.61s",
"z": 6.1871,
"ref": _xshooter_ref,
},
}
# Banados+ 2016 https://ui.adsabs.harvard.edu/abs/2016ApJS..227...11B/abstract
# has table of all z > 5.6 quasars known at that point (March 2016).
# https://ned.ipac.caltech.edu/inrefcode?search_type=Search&refcode=2016ApJS..227...11B
# VLT ATLAS
# https://ui.adsabs.harvard.edu/abs/2015MNRAS.451L..16C/abstract
_atlas_ref1 = "https://ui.adsabs.harvard.edu/abs/2015MNRAS.451L..16C/abstract"
_atlas_ref2 = "https://ui.adsabs.harvard.edu/abs/2018MNRAS.478.1649C/abstract"
_atlas = {
"J025.6821-33.4627": {
"ra": "025.6821",
"dec": "-33.4627",
"z": 6.31,
"ref": _atlas_ref1,
},
"J332.8017-32.1036": {
"ra": "332.8017",
"dec": "-32.1036",
"z": 6.32,
"ref": _atlas_ref2,
},
}
# VHS-DES
_ps1_vhs_des = "https://ui.adsabs.harvard.edu/abs/2019MNRAS.487.1874R/abstract"
_des = {
"VDES J0020-3653": {
"ra": "00h20m31.47s",
"dec": "-36d53m41.8s",
"z": 6.5864,
"ref": _ps1_vhs_des,
},
}
_yang = "https://ui.adsabs.harvard.edu/abs/2020ApJ...904...26Y/abstract"
_decarli = "https://ui.adsabs.harvard.edu/abs/2018ApJ...854...97D/abstract"
_other = {
"J0142−3327": {"ra": "0142", "dec": "-3327", "z": 6.3379, "ref": _yang},
"J0148−2826": {"ra": "0148", "dec": "-2826", "z": 6.54, "ref": _yang},
"J2002−3013": {"ra": "2002", "dec": "-3013", "z": 6.67, "ref": _yang},
"J2318–3113": {
"ra": "23h18m18.351s",
"dec": "-31d13m46.35s",
"z": 6.444,
"ref": _decarli,
},
}
def _to_decimal(s):
if "." in s:
out = float(s)
elif s[0] == "-":
out = float(s[0:3] + "." + s[3:])
else:
out = float(s[0:2] + "." + s[2:])
return out
_qso_catalogs = {"viking": _viking, "panstarrs": _ps1, "atlas": _atlas, "other": _other}
class Catalog(object):
"""
Define a class for handling QSO catalogs.
Parameters
----------
data : str
The type of data to handle. Right now "qso" is the only allowed value.
kwargs : dict
Keyword arguments to save directly on the object.
"""
def __init__(self, data, **kwargs):
self.data = data
self.kwargs = kwargs
def plot_catalog(
self, ax=None, zmin=None, num=1, projection="rectilinear", **fig_kwargs
):
"""
Plot a catalog using matplotlib.
Parameters
----------
ax : matplotlib axis object, optional
The axes to use for plotting. If None, then a new figure and axis
will be created.
zmin : float, optional
The minimum redshift to use for plotting objects.
num : int, optional
The figure number to create if `ax` is not provided.
projection : str, optional
The projection to use for plotting.
kwargs : dict, optional
Additional kwargs passed to matplotlib.pyplot.figure
Returns
-------
ax : matplotlib axis object
If `ax` is provided as a parameter, the same axis object. Otherwise,
a new one.
Raises
------
NotImplementedError
Raised if any projection besides "rectilinear" is passed.
"""
if projection != "rectilinear":
raise NotImplementedError("Only know rectilinear projection right now!")
# Setup plot window
has_ax = True
if ax is None:
fig = plt.figure(num=num, **fig_kwargs)
ax = fig.gca()
has_ax = False
# Get all objects in catalog
names, coords = self.get_all_pos(zmin=zmin)
# Loop over them all and plot. Could do a lot more efficiently if
# we ever end up with big catalogs.
for i, coord in enumerate(coords):
ra, dec, z = coord
ax.scatter(ra, dec)
if not has_ax:
ax.set_xlabel(r"Right Ascension [hours]", fontsize=24, labelpad=5)
ax.set_ylabel(r"Declination [deg]", fontsize=24, labelpad=5)
return ax
def get_all_pos(self, zmin=None):
"""
Return a list of (RA, DEC, redshift) for all objects.
Parameters
----------
zmin : float
The minimum redshift to include for objects in the catalog.
Returns
-------
names : list of str, shape (n_objects)
The names of objects in the catalog.
data : ndarray, shape (n_objects, 3)
The RA [hour angle], dec [degree], and redshift of the objects.
Raises
------
ValueError
This is raised if `self.data` is not "qso", as this is the only type
of data we know how to handle right now.
"""
if not self.data.lower().startswith("qso"):
raise ValueError("Only know how to do QSOs right now.")
data = []
names = []
for cat in _qso_catalogs.keys():
for element in _qso_catalogs[cat]:
obj = _qso_catalogs[cat][element]
if zmin is not None:
if obj["z"] < zmin:
continue
if "h" in obj["ra"]:
kw = {"frame": "icrs"}
ra = obj["ra"]
dec = obj["dec"]
else:
kw = {"unit": "degree", "frame": "icrs"}
if len(obj["ra"]) == 4:
ra = _to_decimal(obj["ra"]) * deg_per_hr
else:
ra = _to_decimal(obj["ra"])
dec = _to_decimal(obj["dec"])
coord = SkyCoord(ra, dec, **kw)
names.append(element)
data.append((coord.ra.hour, coord.dec.degree, obj["z"]))
return names, np.array(data)
| 28.83871
| 88
| 0.527716
| 4,001
| 0.496772
| 0
| 0
| 0
| 0
| 0
| 0
| 4,404
| 0.546809
|
f002326f1a28c7e060443caad098a4b8ad312c0c
| 216
|
py
|
Python
|
src/myutils/__init__.py
|
yyHaker/TextClassification
|
dc3c5ffe0731609c8f0c7a18a4daa5f149f83e9f
|
[
"MIT"
] | 3
|
2019-06-08T14:11:56.000Z
|
2020-05-26T15:08:23.000Z
|
src/myutils/__init__.py
|
yyHaker/TextClassification
|
dc3c5ffe0731609c8f0c7a18a4daa5f149f83e9f
|
[
"MIT"
] | null | null | null |
src/myutils/__init__.py
|
yyHaker/TextClassification
|
dc3c5ffe0731609c8f0c7a18a4daa5f149f83e9f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# coding:utf-8
"""
@author: yyhaker
@contact: 572176750@qq.com
@file: __init__.py
@time: 2019/3/9 15:41
"""
from .util import *
from .functions import *
from .nn import *
from .attention import *
| 14.4
| 26
| 0.685185
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 123
| 0.569444
|
f0023ff5d4658332709a6d9a26c8392cbad88994
| 1,236
|
py
|
Python
|
configs/config.py
|
AcordUch/open-heartmagic
|
aa76b098cc19b2ac6d1bc149461c421fcbbd3301
|
[
"MIT"
] | null | null | null |
configs/config.py
|
AcordUch/open-heartmagic
|
aa76b098cc19b2ac6d1bc149461c421fcbbd3301
|
[
"MIT"
] | null | null | null |
configs/config.py
|
AcordUch/open-heartmagic
|
aa76b098cc19b2ac6d1bc149461c421fcbbd3301
|
[
"MIT"
] | null | null | null |
from configparser import ConfigParser
from os import path
def create_config() -> None:
_config.add_section("Telegram")
_config.set("Telegram", "api_id", "you api_id here")
_config.set("Telegram", "api_hash", "you api_hash here")
_config.set("Telegram", "username", "magicBot")
_config.set("Telegram", "session_string", "None")
with open(_path, "w") as config_file:
_config.write(config_file)
def write_session_string_in_config(session_string: str) -> None:
_config.set("Telegram", "session_string", session_string)
with open(_path, "w") as config_file:
_config.write(config_file)
_config: ConfigParser = ConfigParser()
_path: str = path.join(path.dirname(__file__), "config.ini")
if not path.exists(_path):
create_config()
print("Отсутствовал файл configs.ini файл, заполните api в нём")
exit()
_config.read(_path)
API_ID = _config['Telegram']['api_id']
API_HASH = _config['Telegram']['api_hash']
USERNAME: str = _config['Telegram']['username']
SESSION_STRING = (None
if _config['Telegram']['session_string'] == "None" or
_config['Telegram']['session_string'] == ""
else _config['Telegram']['session_string'])
| 32.526316
| 71
| 0.673139
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 424
| 0.334121
|
f0057eec2984c2e77cf59e2e17b878ea511d289d
| 609
|
py
|
Python
|
Python/squirrel-simulation.py
|
xiaohalo/LeetCode
|
68211ba081934b21bb1968046b7e3c1459b3da2d
|
[
"MIT"
] | 9
|
2019-06-30T07:15:18.000Z
|
2022-02-10T20:13:40.000Z
|
Python/squirrel-simulation.py
|
pnandini/LeetCode
|
e746c3298be96dec8e160da9378940568ef631b1
|
[
"MIT"
] | 1
|
2018-07-10T03:28:43.000Z
|
2018-07-10T03:28:43.000Z
|
Python/squirrel-simulation.py
|
pnandini/LeetCode
|
e746c3298be96dec8e160da9378940568ef631b1
|
[
"MIT"
] | 9
|
2019-01-16T22:16:49.000Z
|
2022-02-06T17:33:41.000Z
|
# Time: O(n)
# Space: O(1)
class Solution(object):
def minDistance(self, height, width, tree, squirrel, nuts):
"""
:type height: int
:type width: int
:type tree: List[int]
:type squirrel: List[int]
:type nuts: List[List[int]]
:rtype: int
"""
def distance(a, b):
return abs(a[0] - b[0]) + abs(a[1] - b[1])
result = 0
d = float("inf")
for nut in nuts:
result += (distance(nut, tree) * 2)
d = min(d, distance(nut, squirrel) - distance(nut, tree))
return result + d
| 26.478261
| 69
| 0.489327
| 579
| 0.950739
| 0
| 0
| 0
| 0
| 0
| 0
| 217
| 0.356322
|
f0068035c6bebf4ad8dfcbde5996ed5461d03f51
| 345
|
py
|
Python
|
scripts/utils/merge.py
|
GabrielTavernini/TelegramMap
|
96879d037a3e65b555a8f13f4f468645a02cf1f2
|
[
"MIT"
] | 3
|
2021-02-19T21:43:49.000Z
|
2022-03-30T07:50:06.000Z
|
scripts/utils/merge.py
|
GabrielTavernini/TelegramMap
|
96879d037a3e65b555a8f13f4f468645a02cf1f2
|
[
"MIT"
] | null | null | null |
scripts/utils/merge.py
|
GabrielTavernini/TelegramMap
|
96879d037a3e65b555a8f13f4f468645a02cf1f2
|
[
"MIT"
] | 2
|
2021-02-20T16:50:48.000Z
|
2022-01-25T15:15:07.000Z
|
import pandas as pd
import sys
from dotenv import load_dotenv
load_dotenv()
src = pd.read_csv(sys.argv[1])
dst = pd.read_csv(os.getenv('FILE_PATH'))
fdf = pd.concat([dst, src])
fdf = fdf[~((fdf['user'].duplicated(keep='first')) & (fdf['user']!='Point'))]
fdf = fdf[~fdf.duplicated(keep='first')]
fdf.to_csv(os.getenv('FILE_PATH'), index=False)
| 28.75
| 77
| 0.692754
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 55
| 0.15942
|
f00829ce69ca21d2a75d867579f5065b5c43824d
| 395
|
py
|
Python
|
lib/locator/location_test.py
|
alt-locator/address-locator-python
|
9f052dc7721223bde926723648790a17b06e9d7a
|
[
"MIT"
] | null | null | null |
lib/locator/location_test.py
|
alt-locator/address-locator-python
|
9f052dc7721223bde926723648790a17b06e9d7a
|
[
"MIT"
] | null | null | null |
lib/locator/location_test.py
|
alt-locator/address-locator-python
|
9f052dc7721223bde926723648790a17b06e9d7a
|
[
"MIT"
] | null | null | null |
import location
import unittest
class LocationTest(unittest.TestCase):
def testToJson(self):
test_location = location.Location(name='foo',
local_ip_address={'en0': {'local_ip_address': '1.2.3.4'}})
test_json = test_location.to_json()
self.assertEqual(test_json['name'], 'foo')
self.assertEqual(test_json['local_ip_address']['en0']['local_ip_address'],
'1.2.3.4')
| 30.384615
| 78
| 0.698734
| 360
| 0.911392
| 0
| 0
| 0
| 0
| 0
| 0
| 98
| 0.248101
|
f0089faf3980c65d96a9b87de2dfb4cc044e17a8
| 41,489
|
py
|
Python
|
ProjectiveClusteringCoreset.py
|
muradtuk/ProjectiveClusteringCoresets
|
2dcb59723934dc545da9ff84a1f71eb5e02b49d1
|
[
"MIT"
] | null | null | null |
ProjectiveClusteringCoreset.py
|
muradtuk/ProjectiveClusteringCoresets
|
2dcb59723934dc545da9ff84a1f71eb5e02b49d1
|
[
"MIT"
] | null | null | null |
ProjectiveClusteringCoreset.py
|
muradtuk/ProjectiveClusteringCoresets
|
2dcb59723934dc545da9ff84a1f71eb5e02b49d1
|
[
"MIT"
] | null | null | null |
"""*****************************************************************************************
MIT License
Copyright (c) 2022 Murad Tukan, Xuan Wu, Samson Zhou, Vladimir Braverman, Dan Feldman
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*****************************************************************************************"""
import Utils
from helper_functions import Fast_Caratheodory
import numpy as np
from scipy.optimize import linprog
from numpy import linalg as la
from scipy.linalg import null_space
from numpy.linalg import matrix_rank
from sklearn.decomposition import TruncatedSVD
import time
######################################################## Caratheodory ##################################################
def computeInitialWeightVector(P, p):
"""
This function given a point, solves the linear program dot(self.P.P^T, x) = p where x \in [0, \infty)^n,
and n denotes the number of rows of self.P.P.
:param p: A numpy array representing a point.
:return: A numpy array of n non-negative weights with respect to each row of self.P.P
"""
N = P.shape[0] # number of rows of P
# # Solve the linear program using scipy
# ts = time.time()
Q = P.T
Q = np.vstack((Q, np.ones((1, N))))
b = np.hstack((p, 1))
res = linprog(np.ones((N,)), A_eq=Q, b_eq=b, options={'maxiter': int(1e7), 'tol': 1e-10})
w = res.x
assert (np.linalg.norm(np.dot(P.T, w) - p) <= 1e-9, np.linalg.norm(np.dot(P.T, w) - p))
return w
def attainCaratheodorySet(P, p):
"""
The function at hand returns a set of at most d+1 indices of rows of P where d denotes the dimension of
rows of P. It calls the algorithms implemented by Alaa Maalouf, Ibrahim Jubran and Dan Feldman at
"Fast and Accurate Least-Mean-Squares Solvers".
:param p: A numpy array denoting a point.
:return: The indices of points from self.P.P which p is a convex combination of.
"""
d = P.shape[1]
u = computeInitialWeightVector(P, p) # compute initial weight vector
# print('Sum of weights {}'.format(np.sum(u)))
if np.count_nonzero(u) > (d + 1): # if the number of positive weights exceeds d+1
u = Fast_Caratheodory(P, u.flatten(), False)
assert(np.linalg.norm(p - np.dot(P.T, u)) <= 1e-9, np.linalg.norm(p - np.dot(P.T, u)))
return np.where(u != 0)[0]
############################################################ AMVEE #####################################################
def isPD(B):
"""Returns true when input is positive-definite, via Cholesky"""
try:
_ = la.cholesky(B)
return True
except la.LinAlgError:
return False
def nearestPD(A):
"""Find the nearest positive-definite matrix to input
A Python/Numpy port of John D'Errico's `nearestSPD` MATLAB code [1], which
credits [2].
[1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd
[2] N.J. Higham, "Computing a nearest symmetric positive semidefinite
matrix" (1988): https://doi.org/10.1016/0024-3795(88)90223-6
"""
B = (A + A.T) / 2
_, s, V = la.svd(B)
H = np.dot(V.T, np.dot(np.diag(s), V))
A2 = (B + H) / 2
A3 = (A2 + A2.T) / 2
if isPD(A3):
return A3
spacing = np.spacing(la.norm(A))
# The above is different from [1]. It appears that MATLAB's `chol` Cholesky
# decomposition will accept matrixes with exactly 0-eigenvalue, whereas
# Numpy's will not. So where [1] uses `eps(mineig)` (where `eps` is Matlab
# for `np.spacing`), we use the above definition. CAVEAT: our `spacing`
# will be much larger than [1]'s `eps(mineig)`, since `mineig` is usually on
# the order of 1e-16, and `eps(1e-16)` is on the order of 1e-34, whereas
# `spacing` will, for Gaussian random matrixes of small dimension, be on
# othe order of 1e-16. In practice, both ways converge, as the unit test
# below suggests.
I = np.eye(A.shape[0])
k = 1
while not isPD(A3):
mineig = np.min(np.real(la.eigvals(A3)))
A3 += I * (-mineig * k ** 2 + spacing)
k += 1
return A3
def computeAxesPoints(E, C):
"""
This function finds the vertices of the self.E (the MVEE of P or the inscribed version of it)
:return: A numpy matrix containing the vertices of the ellipsoid.
"""
if not isPD(E):
E = nearestPD(E)
# L = np.linalg.cholesky(self.E) # compute the cholesky decomposition of self.E
# U, D, V = np.linalg.svd(L, full_matrices=True) # attain the length of each axis of the ellipsoid and the
# # rotation of the ellipsoid
_, D, V = np.linalg.svd(E, full_matrices=True)
ellips_points = np.multiply(1.0 / np.sqrt(D[:, np.newaxis]), V.T) # attain the vertices of the ellipsoid assuming it was
# centered at the origin
return np.vstack((ellips_points + C.flatten(), - ellips_points + C.flatten()))
def volumeApproximation(P):
"""
This is our implementation of Algorithm 4.1 at the paper "On Khachiyan’s Algorithm for te Computation of Minimum
Volume Enclosing Ellipsoids" by Michael J. Todd and E. Alper Yıldırım. It serves to compute a set of at most
2*self.P.d points which will be used for computing an initial ellipsoid.
:return: A numpy array of 2 * self.P.d indices of points from self.P.P
"""
basis = None
basis_points = []
n, d = P
if n <= 2 * d:
# if number of points is less than 2*self.P.d, then return their indices in self.P.P
return [i for i in range(n)]
v = np.random.randn(d) # start with a random vector
while np.linalg.matrix_rank(basis) < d: # while rank of basis is less than self.P.d
if basis is not None: # if we already have computed basis points
if basis.shape[1] == d:
# if this line is reached then it means that there is numerical instability
print('Numerical Issues!')
_, _, V = np.linalg.svd(basis[:, :-1], full_matrices=True)
return list(range(n))
orth_basis = null_space(basis.T) # get the orthant of basis
v = orth_basis[:, 0] if orth_basis.ndim > 1 else orth_basis # set v to be the first column of basis
Q = np.dot(P, v.T) # get the dot product of each row of self.P.P and v
if len(basis_points) > 0: # if there are already chosen points, then their dot product is depricated
Q[basis_points] = np.nan
p_alpha = np.nanargmax(np.dot(P, v.T)) # get the index of row with largest non nan dot product value
p_beta = np.nanargmin(np.dot(P, v.T)) # get the index of row with smallest non nan dot product value
v = np.expand_dims(P[p_beta, :] - P[p_alpha, :], 1) # let v be the substraction between the
# row of the largest dot product and the
# point with the smallest dot product
if basis is None: # if no basis was computed
basis = v / np.linalg.norm(v)
else: # add v to the basis
basis = np.hstack((basis, v / np.linalg.norm(v, 2)))
basis_points.append(p_alpha) # add the index of the point with largest dot product
basis_points.append(p_beta) # add the index of the point with smallest dot product
return basis_points
def computemahalanobisDistance(Q, ellip):
"""
This function is used for computing the distance between the rows of Q and ellip using the Mahalanobis
loss function.
:param ellip: A numpy array representing a p.s.d matrix (an ellipsoid)
:return: The Mahalanobis distance between each row in self.P.P to ellip.
"""
s = np.einsum("ij,ij->i", np.dot(Q, ellip), Q) # compute the distance efficiently
return s
def computeEllipsoid(P, weights):
"""
This function computes the ellipsoid which is the MVEE of self.P.
:param weights: a numpy of array of weights with respest to the rows of self.P.P.
:return:
- The MVEE of self.P.P in a p.s.d. matrix form.
- The center of the MVEE of self.P.P.
"""
if weights.ndim == 1: # make sure that the weights are not flattened
weights = np.expand_dims(weights, 1)
c = np.dot(P.T, weights) # attain the center of the MVEE
d = P.shape[1]
Q = P[np.where(weights.flatten() > 0.0)[0], :] # get all the points with positive weights
weights2 = weights[np.where(weights.flatten() > 0.0)[0], :] # get all the positive weights
# compute a p.s.d matrix which will represent the ellipsoid
ellipsoid = 1.0 / d * np.linalg.inv(np.dot(np.multiply(Q, weights2).T, Q)
- np.multiply.outer(c.T.ravel(), c.T.ravel()))
return ellipsoid, c
def enlargeByTol(ellipsoid):
"""
The function at hand enlarges the MVEE (the ellipsoid) by a fact or (1 + Utils.TOL).
:param ellipsoid: A numpy matrix represent a p.s.d matrix
:return: An enlarged version of ellipsoid.
"""
return ellipsoid / (1 + Utils.TOL) ** 2.0
def getContainedEllipsoid(ellipsoid):
"""
This function returns a dialtion of E such that it will be contained in the convex hull of self.P.P.
:param ellipsoid: A p.s.d matrix which represents the MVEE of self.P.P
:return: A dilated version of the MVEE of self.P.P such that it will be contained in the convex hull
of self.P.P.
"""
return ellipsoid * ellipsoid.shape[1] ** 2 * (1 + Utils.TOL) ** 2 # get inscribed ellipsoid
def computeEllipInHigherDimension(Q, weights):
"""
The function at hand computes the ellipsoid in a self.P.d + 1 dimensional space (with respect to the
lifted points) which is centered at the origin.
:param weights: A numpy array of weights with respect to each lifter point in self.Q
:return:
"""
idxs = np.where(weights > 0.0)[0] # get all indices of points with positive weights
weighted_Q = np.multiply(Q[idxs, :], np.expand_dims(np.sqrt(weights[idxs]), 1)) # multiply the postive
# weights with their
# corresponding points
delta = np.sum(np.einsum('bi,bo->bio', weighted_Q, weighted_Q), axis=0) # compute an ellipsoid which is
# centered at the origin
return delta
def optimalityCondition(d, Q, ellip, weights):
"""
This function checks if the MVEE of P is found in the context of Michael J. Todd and E. Alper Yıldırım
algorithm.
:param ellip: A numpy array representing a p.s.d matrix.
:param weights: A numpy array of weights with respect to the rows of P.
:return: A boolean value whether the desired MVEE has been achieved or not.
"""
pos_weights_idxs = np.where(weights > 0)[0] # get the indices of all the points with positive weights
current_dists = computemahalanobisDistance(Q, ellip) # compute the Mahalanobis distance between ellip and
# the rows of P
# check if all the distance are at max (1 + self.tol) * (self.P.d +1) and the distances of the points
# with positive weights are at least (1.0 - self.tol) * (self.P.d + 1)
return np.all(current_dists <= (1.0 + Utils.TOL) * (d + 1)) and \
np.all(current_dists[pos_weights_idxs] >= (1.0 - Utils.TOL) * (d + 1)), current_dists
def yilidrimAlgorithm(P):
"""
This is our implementation of Algorithm 4.2 at the paper "On Khachiyan’s Algorithm for te Computation of Minimum
Volume Enclosing Ellipsoids" by Michael J. Todd and E. Alper Yıldırım. It serves to compute an MVEE of self.P.P
faster than Khachiyan's algorithm.
:return: The MVEE ellipsoid of self.P.P.
"""
n, d = P.shape
Q = np.hstack((P, np.ones((n, 1))))
chosen_indices = volumeApproximation(P) # compute an initial set of points which will give the initial
# ellipsoid
if len(chosen_indices) == n: # if all the points were chosen then simply run Khachiyan's algorithm.
# Might occur due to numerical instabilities.
return khachiyanAlgorithm(P)
weights = np.zeros((n, 1)).flatten() # initial the weights to zeros
weights[chosen_indices] = 1.0 / len(chosen_indices) # all the chosen indices of points by the
# volume Approximation algorithm are given uniform weights
ellip = np.linalg.inv(computeEllipInHigherDimension(Q, weights)) # compute the initial ellipsoid
while True: # run till conditions are fulfilled
stop_flag, distances = optimalityCondition(d, Q, ellip, weights) # check if current ellipsoid is desired
# MVEE, and get the distance between rows
# of self.P.P to current ellipsoid
pos_weights_idx = np.where(weights > 0)[0] # get indices of points with positive weights
if stop_flag: # if desired MVEE is achieved
break
j_plus = np.argmax(distances) # index of maximal distance from the ellipsoid
k_plus = distances[j_plus] # maximal distance from the ellipsoid
j_minus = pos_weights_idx[np.argmin(distances[pos_weights_idx])] # get the the index of the points with
# positive weights which also have the
# smallest distance from the current
# ellipsoid
k_minus = distances[j_minus] # the smallest distance of the point among the points with positive weights
eps_plus = k_plus / (d + 1.0) - 1.0
eps_minus = 1.0 - k_minus / (d + 1.0)
if eps_plus > eps_minus: # new point is found and it is important
beta_current = (k_plus - d - 1.0) / ((d + 1) * (k_plus - 1.0))
weights = (1.0 - beta_current) * weights
weights[j_plus] = weights[j_plus] + beta_current
else: # a point which was already found before, yet has large impact on the ellipsoid
beta_current = min((d + 1.0 - k_minus) / ((d + 1.0) * (k_minus - 1.0)),
weights[j_minus]/(1 - weights[j_minus]))
weights = weights * (1 + beta_current)
weights[j_minus] = weights[j_minus] - beta_current
weights[weights < 0.0] = 0.0 # all negative weights are set to zero
ellip = np.linalg.inv(computeEllipInHigherDimension(weights)) # recompute the ellipsoid
return computeEllipsoid(P, weights)
def khachiyanAlgorithm(P):
"""
This is our implementation of Algorithm 3.1 at the paper "On Khachiyan’s Algorithm for te Computation of Minimum
Volume Enclosing Ellipsoids" by Michael J. Todd and E. Alper Yıldırım. It serves to compute an MVEE of self.P.P
using Khachiyan's algorithm.
:return: The MVEE ellipsoid of self.P.P.
"""
err = 1
count = 1 # used for debugging purposes
n, d = P.shape
u = np.ones((n, 1)) / n # all points have uniform weights
Q = np.hstack((P, np.ones((n, 1))))
while err > Utils.TOL: # while the approximation of the ellipsoid is higher than desired
X = np.dot(np.multiply(Q, u).T, Q) # compute ellipsoid
M = computemahalanobisDistance(Q, np.linalg.inv(X)) # get Mahalanobis distances between rows of self.P.P
# and current ellipsoid
j = np.argmax(M) # index of point with maximal distance from current ellipsoid
max_val = M[j] # the maximal Mahalanobis distance from the rows of self.P.P and the current ellipsoid
step_size = (max_val - d - 1) / ((d + 1) * (max_val - 1))
new_u = (1 - step_size) * u # update weights
new_u[j, 0] += step_size
count += 1
err = np.linalg.norm(new_u - u) # set err to be the change between updated weighted and current weights
u = new_u
return computeEllipsoid(P, u)
def computeMVEE(P, alg_type=1):
"""
This function is responsible for running the desired algorithm chosen by the user (or by default value) for
computing the MVEE of P.
:param alg_type: An algorithm type indicator where 1 stands for yilidrim and 0 stands kachaiyan.
:return:
- The inscribed version of MVEE of P.
- The center of the MVEE of P.
- The vertices of the inscribed ellipsoid.
"""
global ax
if alg_type == 1: # yilidrim is chosen or by default
E, C = yilidrimAlgorithm(P)
else: # Kachaiyan, slower yet more numerically stable
E, C = khachiyanAlgorithm(P)
# self.plotEllipsoid(self.E, self.C, self.computeAxesPoints())
contained_ellipsoid = getContainedEllipsoid(E) # get inscribed ellipsoid
return contained_ellipsoid, C, computeAxesPoints(contained_ellipsoid, C)
################################################## ApproximateCenterProblems ###########################################
def computeLINFCoresetKOne(P):
"""
The function at hand computes an L∞ coreset for the matrix vector multiplication or the dot product, with
respect to the weighted set of points P.
:return:
- C: the coreset points, which are a subset of the rows of P
- idx_in_P: the indices with respect to the coreset points C in P.
- an upper bound on the approximation which our L∞ coreset is associated with.
"""
global max_time
r = matrix_rank(P[:, :-1]) # get the rank of P or the dimension of the span of P
d = P.shape[1]
if r < d - 1: # if the span of P is a subspace in REAL^d
svd = TruncatedSVD(n_components=r) # an instance of TruncatedSVD
Q = svd.fit_transform(P[:, :-1]) # reduce the dimensionality of P by taking their dot product by the
# subspace which spans P
Q = np.hstack((Q, np.expand_dims(P[:, -1], 1))) # concatenate the indices to their respected "projected"
# points
else: # if the span of P is REAL^d where d is the dimension of P
Q = P
start_time = time.time() # start counting the time here
if r > 1: # if the dimension of the "projected points" is not on a line
if Q.shape[1] - 1 >= Q.shape[0]:
return Q, np.arange(Q.shape[0]).astype(np.int), Utils.UPPER_BOUND(r)
else:
_, _, S = computeMVEE(Q[:, :-1], alg_type=0) # compute the MVEE of Q
else: # otherwise
# get the index of the maximal and minimal point on the line, i.e., both its ends
idx_in_P = np.unique([np.argmin(Q[:, :-1]).astype(np.int),
np.argmax(Q[:, :-1]).astype(np.int)]).tolist()
return Q[idx_in_P], idx_in_P, Utils.UPPER_BOUND(r)
C = []
# idx_in_P_list = []
# C_list = []
# ts = time.time()
# for q in S: # for each boundary points along the axis of the MVEE of Q
# K = attainCaratheodorySet(P[:, :-1], q) # get d+1 indices of points from Q where q is their convex
# # combination
# idx_in_P_list += [int(idx) for idx in K] # get the indices of the coreset point in Q
# C_list += [int(Q[idx, -1]) for idx in K] # the actual coreset points
# # print('Time for list {}'.format(time.time() - ts))
idx_in_P = np.empty((2*(Utils.J + 1) ** 2, )).astype(np.int)
C = np.empty((2*(Utils.J + 1) ** 2, )).astype(np.int)
idx = 0
# ts = time.time()
for q in S: # for each boundary points along the axis of the MVEE of Q
K = attainCaratheodorySet(Q[:, :-1], q) # get d+1 indices of points from Q where q is their convex
# combination
idx_in_P[idx:idx+K.shape[0]] = K.astype(np.int) # get the indices of the coreset point in Q
C[idx:idx+K.shape[0]] = Q[idx_in_P[idx:idx+K.shape[0]], -1].astype(np.int)
idx += K.shape[0]
# print('Time for numpy {}'.format(time.time() - ts))
return np.unique(C[:idx]), np.unique(idx_in_P[:idx]), Utils.UPPER_BOUND(r)
####################################################### Bicriteria #####################################################
def attainClosestPointsToSubspaces(P, W, flats, indices):
"""
This function returns the closest n/2 points among all of the n points to a list of flats.
:param flats: A list of flats where each flat is represented by an orthogonal matrix and a translation vector.
:param indices: A list of indices of points in self.P.P
:return: The function returns the closest n/2 points to flats.
"""
dists = np.empty((P[indices, :].shape[0], ))
N = indices.shape[0]
if not Utils.ACCELERATE_BICRETERIA:
for i in range(N):
dists[i] = np.min([
Utils.computeDistanceToSubspace(P[np.array([indices[i]]), :], flats[j][0], flats[j][1])
for j in range(len(flats))])
else:
dists = Utils.computeDistanceToSubspace(P[indices, :], flats[0], flats[1])
idxs = np.argpartition(dists, N // 2)[:N//2]
return idxs.tolist()
return np.array(indices)[np.argsort(dists).astype(np.int)[:int(N / 2)]].tolist()
def sortDistancesToSubspace(P, X, v, points_indices):
"""
The function at hand sorts the distances in an ascending order between the points and the flat denoted by (X,v).
:param X: An orthogonal matrix which it's span is a subspace.
:param v: An numpy array denoting a translation vector.
:param points_indices: a numpy array of indices for computing the distance to a subset of the points.
:return: sorted distances between the subset points addressed by points_indices and the flat (X,v).
"""
dists = Utils.computeDistanceToSubspace(P[points_indices, :], X, v) # compute the distance between the subset
# of points towards
# the flat which is represented by (X,v)
return np.array(points_indices)[np.argsort(dists).astype(np.int)].tolist() # return sorted distances
def computeSubOptimalFlat(P, weights):
"""
This function computes the sub optimal flat with respect to l2^2 loss function, which relied on computing the
SVD factorization of the set of the given points, namely P.
:param P: A numpy matrix which denotes the set of points.
:param weights: A numpy array of weightes with respect to each row (point) in P.
:return: A flat which best fits P with respect to the l2^2 loss function.
"""
v = np.average(P, axis=0, weights=weights) # compute the weighted mean of the points
svd = TruncatedSVD(algorithm='randomized', n_iter=1, n_components=Utils.J).fit(P-v)
V = svd.components_
return V, v # return a flat denoted by an orthogonal matrix and a translation vector
def clusterIdxsBasedOnKSubspaces(P, B):
"""
This functions partitions the points into clusters a list of flats.
:param B: A list of flats
:return: A numpy array such each entry contains the index of the flat to which the point which is related to the
entry is assigned to.
"""
n = P.shape[0]
idxs = np.arange(n) # a numpy array of indices
centers = np.array(B) # a numpy array of the flats
dists = np.apply_along_axis(lambda x: Utils.computeDistanceToSubspace(P[idxs, :], x[0], x[1]), 1, centers) # compute the
# distance between
# each point and
# each flat
idxs = np.argmin(dists, axis=0)
return idxs # return the index of the closest flat to each point in self.P.P
def addFlats(P, W, S, B):
"""
This function is responsible for computing a set of all possible flats which passes through j+1 points.
:param S: list of j+1 subsets of points.
:return: None (Add all the aforementioned flats into B).
"""
indices = [np.arange(S[i].shape[0]) for i in range(len(S))]
points = np.meshgrid(*indices) # compute a mesh grid using the duplicated coefs
points = np.array([p.flatten() for p in points]) # flatten each point in the meshgrid for computing the
# all possible ordered sets of j+1 points
idx = len(B)
for i in range(points.shape[1]):
A = [S[j][points[j, i]][0] for j in range(points.shape[0])]
P_sub, W_sub = P[A, :], W[A]
B.append(computeSubOptimalFlat(P_sub, W_sub))
return np.arange(idx, len(B)), B
def computeBicriteria(P, W):
"""
The function at hand is an implemetation of Algorithm Approx-k-j-Flats(P, k, j) at the paper
"Bi-criteria Linear-time Approximations for Generalized k-Mean/Median/Center". The algorithm returns an
(2^j, O(log(n) * (jk)^O(j))-approximation algorithm for the (k,j)-projective clustering problem using the l2^2
loss function.
:return: A (2^j, O(log(n) * (jk)^O(j)) approximation solution towards the optimal solution.
"""
n = P.shape[0]
Q = np.arange(0, n, 1)
t = 1
B = []
tol_sample_size = Utils.K * (Utils.J + 1)
sample_size = (lambda t: int(np.ceil(Utils.K * (Utils.J + 1) * (2 + np.log(Utils.J + 1) +
np.log(Utils.K) +
min(t, np.log(np.log(n)))))))
while np.size(Q) >= tol_sample_size: # run we have small set of points
S = []
for i in range(0, Utils.J+1): # Sample j + 1 subsets of the points in an i.i.d. fashion
random_sample = np.random.choice(Q, size=sample_size(t))
S.append(random_sample[:, np.newaxis])
if not Utils.ACCELERATE_BICRETERIA:
F = addFlats(P, W, S, B)
else:
S = np.unique(np.vstack(S).flatten())
F = computeSubOptimalFlat(P[S, :], W[S])
B.append(F)
sorted_indices = attainClosestPointsToSubspaces(P, W, F, Q)
Q = np.delete(Q, sorted_indices)
t += 1
if not Utils.ACCELERATE_BICRETERIA:
_, B = addFlats(P, W, [Q for i in range(Utils.J + 1)], B)
else:
F = computeSubOptimalFlat(P[Q.flatten(), :], W[Q.flatten()])
B.append(F)
return B
################################################### L1Coreset ##########################################################
def applyBiCriterea(P, W):
"""
The function at hand runs a bicriteria algorithm, which then partition the rows of P into clusters.
:return:
- B: The set of flats which give the bicriteria algorithm, i.e., O((jk)^{j+1}) j-flats which attain 2^j
approximation towards the optimal (k,j)-projective clustering problem involving self.P.P.
- idxs: The set of indices where each entry is with respect to a point in P and contains
index of the flat in B which is assigned to respected point in P.
"""
B = computeBicriteria(P,W) # compute the set of flats which bi-cirteria algorithm returns
idxs = clusterIdxsBasedOnKSubspaces(P, B) # compute for each point which flat fits it best
return B, idxs
def initializeSens(P, B, idxs):
"""
This function initializes the sensitivities using the bicriteria algorithm, to be the distance between each
point to it's closest flat from the set of flats B divided by the sum of distances between self.P.P and B.
:param B: A set of flats where each flat is represented by an orthogonal matrix and a translation vector.
:param idxs: A numpy array which represents the clustering which B imposes on self.P.P
:return: None.
"""
centers_idxs = np.unique(idxs) # number of clusters imposed by B
sensitivity_additive_term = np.zeros((P.shape[0], ))
for center_idx in centers_idxs: # go over each cluster of points from self.P.P
cluster_per_center = np.where(idxs == center_idx)[0] # get all points in certain cluster
# compute the distance of each point in the cluster to its respect flat
cost_per_point_in_cluster = Utils.computeDistanceToSubspace(P[cluster_per_center, :-1],
B[center_idx][0], B[center_idx][1])
# ost_per_point_in_cluster = np.apply_along_axis(lambda x:
# Utils.computeDistanceToSubspace(x, B[center_idx][0],
# B[center_idx][1]), 1,
# self.set_P.P[cluster_per_center, :-1])
# set the sensitivity to the distance of each point from its respected flat divided by the total distance
# between cluster points and the respected flat
sensitivity_additive_term[cluster_per_center] = 2 ** Utils.J * \
np.nan_to_num(cost_per_point_in_cluster /
np.sum(cost_per_point_in_cluster))
return sensitivity_additive_term
def Level(P, k, V, desired_eps=0.01):
"""
The algorithm is an implementation of Algorithm 7 of "Coresets for Gaussian Mixture Models of Any shapes" by Zahi
Kfir and Dan Feldman.
:param P: A Pointset object, i.e., a weighted set of points.
:param k: The number of $j$-subspaces which defines the (k,j)-projective clustering problem.
:param V: A set of numpy arrays
:param desired_eps: An approximation error, default value is set to 0.01.
:return: A list "C" of subset of points of P.P.
"""
t = V.shape[0] # numnber of points in V
d = P.shape[1] - 1 # exclude last entry of each point for it is the concatenated index
# C = [[]] #np.empty((P.shape[0] + Utils.J ** (2 * Utils.K), P.shape[1])) # initialize list of coresets
# U = [[]] #np.empty((P.shape[0] + Utils.J ** (2 * Utils.K), P.shape[1])) # list of each point in V \setminus V_0 minus its
# projection onto a specific affine subspace, see below
C = np.zeros((P.shape[0], ), dtype="bool")
D = np.zeros((P.shape[0], ), dtype="bool")
if k <= 1 or t-1 >= Utils.J:
return np.array([])
# ts = time.time()
A, v = Utils.computeAffineSpan(V)
# print('Affine took {}'.format(time.time() - ts))
dists_from_P_to_A = Utils.computeDistanceToSubspace(P[:, :-1], A.T, v)
non_zero_idxs = np.where(dists_from_P_to_A > 1e-11)[0]
d_0 = 0 if len(non_zero_idxs) < 1 else np.min(dists_from_P_to_A[non_zero_idxs])
c = 1 / d ** (1.5 * (d + 1))
M = np.max(np.abs(P[:, :-1]))
on_j_subspace = np.where(dists_from_P_to_A <= 1e-11)[0]
B = [[]]
if on_j_subspace.size != 0:
B[0] = P[on_j_subspace, :]
if B[0].shape[0] >= Utils.J ** (2 * k):
indices_in_B = B[0][:, -1]
Q = np.hstack((B[0][:,:-1], np.arange(B[0].shape[0])[:, np.newaxis]))
temp = computeLInfCoreset(B[0], k-1)
C[indices_in_B[temp].astype(np.int)] = True
else:
C[B[0][:, -1].astype(np.int)] = True
# current_point += temp.shape[0]
# D = [P[C]]
# print('Bound is {}'.format(int(np.ceil(8 * np.log(M) + np.log(1.0/c)) + 1)))
if d_0 > 0:
for i in range(1, int(np.ceil(8 * np.log(M) + np.log(1.0/c)) + 1)):
B.append(P[np.where(np.logical_and(2 ** (i-1) * d_0 <= dists_from_P_to_A,
dists_from_P_to_A <= 2 ** i * d_0))[0], :])
if len(B[i]) > 0:
if len(B[i]) >= Utils.J ** (2 * k):
indices_B = B[i][:, -1]
Q_B = np.hstack((B[i][:, :-1], np.arange(B[i].shape[0])[:, np.newaxis]))
temp = computeLInfCoreset(Q_B, k-1)
if temp.size > 0:
C[indices_B[temp].astype(np.int)] = True
else:
C[B[i][:, -1].astype(np.int)] = True
temp = np.arange(B[i].shape[0]).astype(np.int)
list_of_coresets = [x for x in B if len(x) > 0]
Q = np.vstack(list_of_coresets)
indices_Q = Q[:, -1]
Q = np.hstack((Q[:, :-1], np.arange(Q.shape[0])[:, np.newaxis]))
if temp.size > 0:
for point in B[i][temp, :]:
indices = Level(Q, k-1, np.vstack((V, point[np.newaxis, :-1])))
if indices.size > 0:
D[indices_Q[indices].astype(np.int)] = True
# D.extend(Level(Q, k-1, np.vstack((V, point[np.newaxis, :-1]))))
return np.where(np.add(C, D))[0]
def computeLInfCoreset(P, k):
"""
This function is our main L_\infty coreset method, as for k = 1 it runs our fast algorithm for computing the
L_\infty coreset. When k > 1, it runs a recursive algorithm for computing a L_\infty coreset for the
(k,j)-projective clustering problem.
This algorithm is a variant of Algorithm 6 of "Coresets for Gaussian Mixture Models of Any shapes" by Zahi
Kfir and Dan Feldman.
:param P: A PointSet object, i.e., a weighted set of points.
:param k: The number of $j$-subspaces which defines the (k,j)-projective clustering problem.
:return: A PointSet object which contains a subset of P which serves as a L_\infty coreset for the
(k,j)-projective clustering problem.
"""
C = []
if k == 1: # if subspace clustering problem
_, idxs_in_Q, upper_bound = computeLINFCoresetKOne(P) # Compute our L_\infty coreset for P
return idxs_in_Q
elif k < 1: # should return None here
return np.array([])
else: # If k > 1
temp = computeLInfCoreset(P, k-1) # call recursively till k == 1
C = np.zeros((P.shape[0], ), dtype="bool")
C[P[temp, -1].astype(np.int)] = True
# Q = np.empty((P.shape[0] + Utils.J ** (2 * Utils.K), P.shape[1]))
# Q[:C_0.shape[0], :] = C_0
for p in P[temp, :]: # for each point in coreset
# print('K = {}'.format(k))
recursive_core = Level(P, k, p[np.newaxis, :-1]) # compute a coreset for (k,j)-projective clustering
# problem using a coreset for (k-1,j)-projective
# clustering problem
if recursive_core.size > 0: # if the coreset for the (k,j)-projective clustering problem is not empty
C[P[recursive_core, -1].astype(np.int)] = True
if np.where(C == False)[0].size < 1:
return np.where(C)[0]
return np.where(C)[0] # return a L_\infty coreset for (k,j)-projective clustering problem
def computeSensitivityPerCluster(P):
sensitivity = np.ones((P.shape[0], )) * np.inf
i = 0
upper_bound = Utils.determineUpperBound() # set upper bound on the approximation which the L_\infty
Q = np.hstack((P[:, :-1], np.arange(P.shape[0])[:, np.newaxis]))
# coreset attains
while Q.shape[0] > 2 * Q.shape[1]: # run till you have at most 2*j points
orig_idx_in_Q = Q[:, -1]
idxs_of_P = computeLInfCoreset(np.hstack((Q[:, :-1], np.arange(Q.shape[0])[:, np.newaxis])), Utils.K) # compute L_\infty coreset
# idxs_of_P = np.unique(Q_P[:, -1]).astype(np.int) # get all points in P which are also in Q_P
if np.any(np.logical_not(np.isinf(sensitivity[orig_idx_in_Q[idxs_of_P].astype(np.int)]))): # used for debugging
raise ValueError('A crucial Bug!')
sensitivity[orig_idx_in_Q[idxs_of_P].astype(np.int)] = upper_bound / (i + 1) # bound the sensitivity of each point in Q_P
if np.isnan(np.sum(sensitivity)):
print('HOLD ON!')
remaining_idxs = Utils.attainAllButSpecifiedIndices(Q, orig_idx_in_Q[idxs_of_P].astype(np.int)) # get all points in cluster which
# are not in Q_P
idxs_in_Q = np.where(remaining_idxs)[0] # get indices in cluster which are not in Q_P
Q = Q[idxs_in_Q, :] # update cluster to exclude current L_\infty coreset
print('Batch {} has finished'.format(i))
i += 1 # count number of L_\infty coreset per each cluster of points
remaining_idxs_per_cluster = Q[:, -1].astype(np.int) # all of the remaining 2*j points
sensitivity[remaining_idxs_per_cluster] = upper_bound / (i if i > 0 else i + 1) # give them the lowest
return np.hstack((sensitivity[:, np.newaxis], P[:, -1][:, np.newaxis]))
def computeSensitivity(P, W):
"""
The function at hand computes the sensitivity of each point using a reduction from L_\infty to L1.
:return: None
"""
P = np.hstack((P, np.arange(P.shape[0])[:, np.newaxis]))
B, idxs = applyBiCriterea(P[:, :-1], W) # attain set of flats which gives 2^j approximation to the optimal solution
sensitivity_additive_term = initializeSens(P, B, idxs) # initialize the sensitivities
unique_cetner_idxs = np.unique(idxs) # get unique indices of clusters
sensitivity = np.empty((P.shape[0], ))
clusters = [np.where(idxs == idx)[0] for idx in unique_cetner_idxs]
Qs = [[] for idx in range(len(clusters))]
for idx in range(len(clusters)): # apply L_\infty conversion to L_1 on each cluster of points
# Qs[idx] = np.hstack(((P[clusters[idx], :-1] - B[idx][1]).dot(B[idx][0].T.dot(B[idx][0])), P[clusters[idx], -1][:, np.newaxis]))
Qs[idx] = np.hstack(((P[clusters[idx], :-1] - B[idx][1]).dot(B[idx][0].T), P[clusters[idx], -1][:, np.newaxis]))
ts = time.time()
# s = computeSensitivityPerCluster(Qs[0])
# print('max = {}, min = {}'.format(np.max(s[0,:]), np.min(s[0,:])))
# print('Time for one cluster took {} secs'.format(time.time() - ts))
# input()
# pool = multiprocessing.Pool(3)
# list_of_sensitivities = pool.map(computeSensitivityPerCluster, Qs)
# print('Time for parallel took {} secs'.format(time.time() - ts))
for i in range(len(Qs)):
s = computeSensitivityPerCluster(Qs[i])
sensitivity[s[:, -1].astype(np.int)] = s[:, 0]
# print('Number of unique values = {}, max = {}, min = {}'.format(np.unique(sensitivity).shape[0],
# np.max(sensitivity), np.min(sensitivity)))
sensitivity += 2 ** Utils.J * sensitivity_additive_term # add the additive term for the sensitivity
return sensitivity
if __name__ == '__main__':
P = np.random.randn(10000, 5)
P = np.hstack((P, np.arange(10000)[:, np.newaxis]))
W = np.ones((P.shape[0], ))
s = computeSensitivity(P, W)
| 49.157583
| 139
| 0.576803
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 21,725
| 0.523355
|
f00939c44715cbb46e21a3b0bd4e2b066d1b7f29
| 2,549
|
py
|
Python
|
extras/pyrepl/console.py
|
dillionhacker/python222
|
205414c33fba8166167fd8a6a03eda1a68f16316
|
[
"Apache-2.0"
] | 1
|
2019-05-27T00:58:46.000Z
|
2019-05-27T00:58:46.000Z
|
extras/pyrepl/console.py
|
tuankien2601/python222
|
205414c33fba8166167fd8a6a03eda1a68f16316
|
[
"Apache-2.0"
] | null | null | null |
extras/pyrepl/console.py
|
tuankien2601/python222
|
205414c33fba8166167fd8a6a03eda1a68f16316
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2000-2004 Michael Hudson mwh@python.net
#
# All Rights Reserved
#
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose is hereby granted without fee,
# provided that the above copyright notice appear in all copies and
# that both that copyright notice and this permission notice appear in
# supporting documentation.
#
# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
class Event:
"""An Event. `evt' is 'key' or somesuch."""
def __init__(self, evt, data, raw=''):
self.evt = evt
self.data = data
self.raw = raw
def __repr__(self):
return 'Event(%r, %r)'%(self.evt, self.data)
class Console:
"""Attributes:
screen,
height,
width,
"""
def refresh(self, screen, xy):
pass
def prepare(self):
pass
def restore(self):
pass
def move_cursor(self, x, y):
pass
def set_cursor_vis(self, vis):
pass
def getheightwidth(self):
"""Return (height, width) where height and width are the height
and width of the terminal window in characters."""
pass
def get_event(self, block=1):
"""Return an Event instance. Returns None if |block| is false
and there is no event pending, otherwise waits for the
completion of an event."""
pass
def beep(self):
pass
def clear(self):
"""Wipe the screen"""
pass
def finish(self):
"""Move the cursor to the end of the display and otherwise get
ready for end. XXX could be merged with restore? Hmm."""
pass
def flushoutput(self):
"""Flush all output to the screen (assuming there's some
buffering going on somewhere)."""
pass
def forgetinput(self):
"""Forget all pending, but not yet processed input."""
pass
def getpending(self):
"""Return the characters that have been typed but not yet
processed."""
pass
def wait(self):
"""Wait for an event."""
pass
| 27.117021
| 71
| 0.634759
| 1,666
| 0.65359
| 0
| 0
| 0
| 0
| 0
| 0
| 1,667
| 0.653982
|
f009b3d518e1b8520f28ad27fc966139292e346f
| 15,818
|
py
|
Python
|
robotpy_build/hooks_datacfg.py
|
ConnectionMaster/robotpy-build
|
9571a84fdd6268be5e945b31ea8929d84355071a
|
[
"BSD-3-Clause"
] | null | null | null |
robotpy_build/hooks_datacfg.py
|
ConnectionMaster/robotpy-build
|
9571a84fdd6268be5e945b31ea8929d84355071a
|
[
"BSD-3-Clause"
] | null | null | null |
robotpy_build/hooks_datacfg.py
|
ConnectionMaster/robotpy-build
|
9571a84fdd6268be5e945b31ea8929d84355071a
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Defines data that is consumed by the header2whatever hooks/templates
# to modify the generated files
#
import enum
from typing import Dict, List, Tuple, Optional
from pydantic import validator
from .util import Model, _generating_documentation
class ParamData(Model):
"""Various ways to modify parameters"""
#: Set parameter name to this
name: Optional[str] = None
#: Change C++ type emitted
x_type: Optional[str] = None
#: Default value for parameter
default: Optional[str] = None
#: Disables a default cast caused by ``default_arg_cast``
disable_type_caster_default_cast: bool = False
#: Force this to be an 'out' parameter
#:
#: .. seealso:: :ref:`autowrap_out_params`
#:
force_out: bool = False
#: Force an array size
array_size: Optional[int] = None
#: Ignore this parameter
ignore: bool = False
class BufferType(str, enum.Enum):
#: The buffer must indicate that it is readable (such as bytes, or bytearray)
IN = "in"
#: The buffer must indicate that it is writeable (such as a bytearray)
OUT = "out"
#: The buffer must indicate that it readable or writeable (such as a bytearray)
INOUT = "inout"
class BufferData(Model):
#: Indicates what type of python buffer is required
type: BufferType
#: Name of C++ parameter that the buffer will use
src: str
#: Name of the C++ length parameter. An out-only parameter, it will be set
#: to the size of the python buffer, and will be returned so the caller can
#: determine how many bytes were written
len: str
#: If specified, the minimum size of the python buffer
minsz: Optional[int] = None
class ReturnValuePolicy(enum.Enum):
"""
See `pybind11 documentation <https://pybind11.readthedocs.io/en/stable/advanced/functions.html#return-value-policies>`_
for what each of these values mean.
"""
TAKE_OWNERSHIP = "take_ownership"
COPY = "copy"
MOVE = "move"
REFERENCE = "reference"
REFERENCE_INTERNAL = "reference_internal"
AUTOMATIC = "automatic"
AUTOMATIC_REFERENCE = "automatic_reference"
class FunctionData(Model):
"""
Customize the way the autogenerator binds a function.
.. code-block:: yaml
functions:
# for non-overloaded functions, just specify the name + customizations
name_of_non_overloaded_fn:
# add customizations for function here
# For overloaded functions, specify the name, but each overload
# separately
my_overloaded_fn:
overloads:
int, int:
# customizations for `my_overloaded_fn(int, int)`
int, int, int:
# customizations for `my_overloaded_fn(int, int, int)`
"""
#: If True, don't wrap this
ignore: bool = False
#: If True, don't wrap this, but provide a pure virtual implementation
ignore_pure: bool = False
#: Generate this in an `#ifdef`
ifdef: Optional[str] = None
#: Generate this in an `#ifndef`
ifndef: Optional[str] = None
#: Use this code instead of the generated code
cpp_code: Optional[str] = None
#: Docstring for the function, will attempt to convert Doxygen docs if omitted
doc: Optional[str] = None
#: Text to append to the (autoconverted) docstring for the function
doc_append: Optional[str] = None
#: If True, prepends an underscore to the python name
internal: bool = False
#: Use this to set the name of the function as exposed to python
rename: Optional[str] = None
#: Mechanism to override individual parameters
param_override: Dict[str, ParamData] = {}
#: If specified, put the function in a sub.pack.age
subpackage: Optional[str] = None
#: By default, robotpy-build will release the GIL whenever a wrapped
#: function is called.
no_release_gil: Optional[bool] = None
buffers: List[BufferData] = []
overloads: Dict[str, "FunctionData"] = {}
#: Adds py::keep_alive<x,y> to the function. Overrides automatic
#: keepalive support, which retains references passed to constructors.
#: https://pybind11.readthedocs.io/en/stable/advanced/functions.html#keep-alive
keepalive: Optional[List[Tuple[int, int]]] = None
#: https://pybind11.readthedocs.io/en/stable/advanced/functions.html#return-value-policies
return_value_policy: ReturnValuePolicy = ReturnValuePolicy.AUTOMATIC
#: If this is a function template, this is a list of instantiations
#: that you wish to provide. This is a list of lists, where the inner
#: list is the template parameters for that function
template_impls: Optional[List[List[str]]] = None
#: Specify a transformation lambda to be used when this virtual function
#: is called from C++. This inline code should be a lambda that has the same
#: arguments as the original C++ virtual function, except the first argument
#: will be a py::function with the python overload
#:
#: cpp_code should also be specified for this to be useful
#:
#: For example, to transform a function that takes an iostream into a function
#: that returns a string:
#:
#: .. code-block:: yaml
#:
#: cpp_code: |
#: [](MyClass* self) {
#: return "string";
#: }
#: virtual_xform: |
#: [](py::function fn, MyClass* self, std::iostream &is) {
#: std::string d = py::cast(fn());
#: is << d;
#: }
#:
virtual_xform: Optional[str] = None
@validator("overloads", pre=True)
def validate_overloads(cls, value):
for k, v in value.items():
if v is None:
value[k] = FunctionData()
return value
if not _generating_documentation:
FunctionData.update_forward_refs()
class PropAccess(enum.Enum):
#: Determine read/read-write automatically:
#:
#: * If a struct/union, default to readwrite
#: * If a class, default to readwrite if a basic type that isn't a
#: reference, otherwise default to readonly
AUTOMATIC = "auto"
#: Allow python users access to the value, but ensure it can't
#: change. This is useful for properties that are defined directly
#: in the class
READONLY = "readonly"
#: Allows python users to read/write the value
READWRITE = "readwrite"
class PropData(Model):
#: If set to True, this property is not made available to python
ignore: bool = False
#: Set the python name of this property to the specified string
rename: Optional[str]
#: Python code access to this property
access: PropAccess = PropAccess.AUTOMATIC
#: Docstring for the property (only available on class properties)
doc: Optional[str] = None
#: Text to append to the (autoconverted) docstring
doc_append: Optional[str] = None
class EnumValue(Model):
#: If set to True, this property is not made available to python
ignore: bool = False
#: Set the python name of this enum value to the specified string
rename: Optional[str] = None
#: Docstring for the enum value
doc: Optional[str] = None
#: Text to append to the (autoconverted) docstring
doc_append: Optional[str] = None
class EnumData(Model):
#: Set your own docstring for the enum
doc: Optional[str] = None
#: Text to append to the (autoconverted) docstring
doc_append: Optional[str] = None
#: If set to True, this property is not made available to python
ignore: bool = False
#: Set the python name of this enum to the specified string
rename: Optional[str] = None
value_prefix: Optional[str] = None
#: If specified, put the enum in a sub.pack.age (ignored for
#: enums that are part of classes)
subpackage: Optional[str] = None
values: Dict[str, EnumValue] = {}
class ClassData(Model):
#: Docstring for the class
doc: Optional[str] = None
#: Text to append to the (autoconverted) docstring
doc_append: Optional[str] = None
ignore: bool = False
ignored_bases: List[str] = []
#: Specify fully qualified names for the bases
base_qualnames: Dict[str, str] = {}
attributes: Dict[str, PropData] = {}
enums: Dict[str, EnumData] = {}
methods: Dict[str, FunctionData] = {}
is_polymorphic: bool = False
force_no_trampoline: bool = False
force_no_default_constructor: bool = False
#: pybind11 will detect multiple inheritance automatically if a
#: class directly derives from multiple classes. However,
#: If the class derives from classes that participate in multiple
#: inheritance, pybind11 won't detect it automatically, so this
#: flag is needed.
force_multiple_inheritance: bool = False
#: If there are circular dependencies, this will help you resolve them
#: manually. TODO: make it so we don't need this
force_depends: List[str] = []
#: Use this to bring in type casters for a particular type that may have
#: been hidden (for example, with a typedef or definition in another file),
#: instead of explicitly including the header. This should be the full
#: namespace of the type.
force_type_casters: List[str] = []
#: If the object shouldn't be deleted by pybind11, use this. Disables
#: implicit constructors.
nodelete: bool = False
#: Set the python name of the class to this
rename: Optional[str] = None
#: This is deprecated and has no effect
shared_ptr: bool = True
#: If specified, put the class in a sub.pack.age. Ignored
#: for functions attached to a class. When template parameters
#: are used, must define subpackage on template instances
#: instead
subpackage: Optional[str] = None
#: Extra 'using' directives to insert into the trampoline and the
#: wrapping scope
typealias: List[str] = []
#: Extra constexpr to insert into the trampoline and wrapping scopes
constants: List[str] = []
#: If this is a template class, a list of the parameters if it can't
#: be autodetected (currently can't autodetect). If there is no space
#: in the parameter, then it is assumed to be a 'typename', otherwise
#: the parameter is split by space and the first item is the type and
#: the second parameter is the name (useful for integral templates)
template_params: Optional[List[str]] = None
#: If this is a template class, the specified C++ code is inserted
#: into the template definition
template_inline_code: str = ""
#: If this class has an associated trampoline, add this code inline at
#: the bottom of the trampoline class. This is rarely useful.
trampoline_inline_code: Optional[str] = None
@validator("attributes", pre=True)
def validate_attributes(cls, value):
for k, v in value.items():
if v is None:
value[k] = PropData()
return value
@validator("enums", pre=True)
def validate_enums(cls, value):
for k, v in value.items():
if v is None:
value[k] = EnumData()
return value
@validator("methods", pre=True)
def validate_methods(cls, value):
for k, v in value.items():
if v is None:
value[k] = FunctionData()
return value
class TemplateData(Model):
"""
Instantiates a template as a python type. To customize the class,
add it to the ``classes`` key and specify the template type.
Code to be wrapped:
.. code-block:: c++
template <typename T>
class MyClass {};
To bind ``MyClass<int>`` as the python class ``MyIntClass``, add this
to your YAML:
.. code-block:: yaml
classes:
MyClass:
template_params:
- T
templates:
MyIntClass:
qualname: MyClass
params:
- int
"""
#: Fully qualified name of instantiated class
qualname: str
#: Template parameters to use
params: List[str]
#: If specified, put the template instantiation in a sub.pack.age
subpackage: Optional[str] = None
#: Set the docstring for the template instance
doc: Optional[str] = None
#: Text to append to the (autoconverted) docstring for the template instance
doc_append: Optional[str] = None
class HooksDataYaml(Model):
"""
Format of the file in [tool.robotpy-build.wrappers."PACKAGENAME"]
generation_data
"""
strip_prefixes: List[str] = []
#: Adds ``#include <FILENAME>`` directives to the top of the autogenerated
#: C++ file, after autodetected include dependencies are inserted.
extra_includes: List[str] = []
#: Adds ``#include <FILENAME>`` directives after robotpy_build.h is
#: included, but before any autodetected include dependencies. Only use
#: this when dealing with broken headers.
extra_includes_first: List[str] = []
#: Specify raw C++ code that will be inserted at the end of the
#: autogenerated file, inside a function. This is useful for extending
#: your classes or providing other customizations. The following C++
#: variables are available:
#:
#: * ``m`` is the ``py::module`` instance
#: * ``cls_CLASSNAME`` are ``py::class`` instances
#: * ... lots of other things too
#:
#: The trampoline class (useful for accessing protected items) is available
#: at ``{CLASSNAME}_Trampoline``
#:
#: To see the full list, run a build and look at the generated code at
#: ``build/*/gensrc/**/*.cpp``
#:
#: Recommend that you use the YAML multiline syntax to specify it:
#:
#: .. code-block:: yaml
#:
#: inline_code: |
#: cls_CLASSNAME.def("get42", []() { return 42; });
inline_code: Optional[str] = None
#: Key is the attribute (variable) name
#:
#: .. code-block:: yaml
#:
#: attributes:
#: my_variable:
#: # customizations here, see PropData
#:
attributes: Dict[str, PropData] = {}
#: Key is the class name
#:
#: .. code-block:: yaml
#:
#: classes:
#: CLASSNAME:
#: # customizations here, see ClassData
#:
classes: Dict[str, ClassData] = {}
#: Key is the function name
#:
#: .. code-block:: yaml
#:
#: functions:
#: fn_name:
#: # customizations here, see FunctionData
#:
functions: Dict[str, FunctionData] = {}
#: Key is the enum name, for enums at global scope
#:
#: .. code-block:: yaml
#:
#: enums:
#: MyEnum:
#: # customizations here, see EnumData
#:
enums: Dict[str, EnumData] = {}
#: Instantiates a template. Key is the name to give to the Python type.
#:
#: .. code-block:: yaml
#:
#: templates:
#: ClassName:
#: # customizations here, see TemplateData
#:
templates: Dict[str, TemplateData] = {}
@validator("attributes", pre=True)
def validate_attributes(cls, value):
for k, v in value.items():
if v is None:
value[k] = PropData()
return value
@validator("classes", pre=True)
def validate_classes(cls, value):
for k, v in value.items():
if v is None:
value[k] = ClassData()
return value
@validator("enums", pre=True)
def validate_enums(cls, value):
for k, v in value.items():
if v is None:
value[k] = EnumData()
return value
@validator("functions", pre=True)
def validate_functions(cls, value):
for k, v in value.items():
if v is None:
value[k] = FunctionData()
return value
| 29.845283
| 123
| 0.637249
| 15,457
| 0.977178
| 0
| 0
| 1,537
| 0.097168
| 0
| 0
| 9,593
| 0.606461
|
f009d6a3b56d42edfcb8bf537787593ecb613a4c
| 27,482
|
py
|
Python
|
src/auspex/qubit/qubit_exp.py
|
minhhaiphys/Auspex
|
3b9480120f0cdaf8a1e890a59e0e45e0fab5f1dd
|
[
"Apache-2.0"
] | null | null | null |
src/auspex/qubit/qubit_exp.py
|
minhhaiphys/Auspex
|
3b9480120f0cdaf8a1e890a59e0e45e0fab5f1dd
|
[
"Apache-2.0"
] | null | null | null |
src/auspex/qubit/qubit_exp.py
|
minhhaiphys/Auspex
|
3b9480120f0cdaf8a1e890a59e0e45e0fab5f1dd
|
[
"Apache-2.0"
] | null | null | null |
from auspex.log import logger
from auspex.experiment import Experiment, FloatParameter
from auspex.stream import DataStream, DataAxis, SweepAxis, DataStreamDescriptor, InputConnector, OutputConnector
import auspex.instruments
import auspex.filters
import bbndb
import numpy as np
import sys
import os
if sys.platform == 'win32' or 'NOFORKING' in os.environ:
from threading import Thread as Process
from threading import Event
else:
from multiprocessing import Process
from multiprocessing import Event
from multiprocessing import Value
from . import pipeline
import time
import datetime
import json
stream_hierarchy = [
bbndb.auspex.Demodulate,
bbndb.auspex.Integrate,
bbndb.auspex.Average,
bbndb.auspex.OutputProxy
]
filter_map = {
bbndb.auspex.Demodulate: auspex.filters.Channelizer,
bbndb.auspex.Average: auspex.filters.Averager,
bbndb.auspex.Integrate: auspex.filters.KernelIntegrator,
bbndb.auspex.Write: auspex.filters.WriteToFile,
bbndb.auspex.Buffer: auspex.filters.DataBuffer,
bbndb.auspex.Display: auspex.filters.Plotter,
bbndb.auspex.FidelityKernel: auspex.filters.SingleShotMeasurement
}
stream_sel_map = {
'X6-1000M': auspex.filters.X6StreamSelector,
'AlazarATS9870': auspex.filters.AlazarStreamSelector
}
instrument_map = {
'DigitalAttenuator': auspex.instruments.DigitalAttenuator,
'X6-1000M': auspex.instruments.X6,
'AlazarATS9870': auspex.instruments.AlazarATS9870,
'APS2': auspex.instruments.APS2,
'TDM': auspex.instruments.TDM,
'APS': auspex.instruments.APS,
'HolzworthHS9000': auspex.instruments.HolzworthHS9000,
'Labbrick': auspex.instruments.Labbrick,
'AgilentN5183A': auspex.instruments.AgilentN5183A,
'BNC845': auspex.instruments.BNC845,
'SpectrumAnalyzer': auspex.instruments.SpectrumAnalyzer,
'YokogawaGS200': auspex.instruments.YokogawaGS200
}
class QubitExperiment(Experiment):
"""Create an `Experiment` with specialized config and run methods for qubit experiments.
Parameters:
meta_file (string)
The filename of the QGL metainfo (*.json) corresponding to the desired
experiment.
averages (int)
The number of shots to take. Results are only actually averaged
if an `Averager` node is present in the processing pipeline.
exp_name (string)
Name of experiment. Used by any writers in pipeline to pick a data container name.
kwargs
Additional keyword arguments passed to the base Auspex `Experiment`
class.
Returns:
experiment instance (`Experiment`)
Returns the initialized Auspex `Experiment`.
Examples:
Creating a simple experiment.
>>> mf = RabiAmp(q1, [-1,0,1])
>>> exp = QubitExperiment(mf, averages=500)
"""
def __init__(self, meta_file, averages=100, exp_name=None, **kwargs):
super(QubitExperiment, self).__init__(**kwargs)
if not pipeline.pipelineMgr:
raise Exception("Could not find pipeline manager, have you declared one using PipelineManager()?")
self.cw_mode = False
self.add_date = True # add date to data files?
self.name = exp_name
self.outputs_by_qubit = {}
self.progressbars = None
self.create_from_meta(meta_file, averages)
def create_from_meta(self, meta_file, averages):
"""Method called during creation. Implementing a subclass of `QubitExperiment` this method
may be overridden to provide additional functionality. However, this is a complex method, and
it is recommended that the user instead override the `modify_graph` method to provide
custom subclass behavior.
"""
try:
with open(meta_file, 'r') as FID:
meta_info = json.load(FID)
except:
raise Exception(f"Could note process meta info from file {meta_file}")
# Load ChannelLibrary and database information
db_provider = meta_info['database_info']['db_provider']
db_resource_name = meta_info['database_info']['db_resource_name']
library_name = meta_info['database_info']['library_name']
library_id = meta_info['database_info']['library_id']
# Load the channel library by ID
sess = pipeline.pipelineMgr.session
self.chan_db = sess.query(bbndb.qgl.ChannelDatabase).filter_by(id=library_id).first()
all_channels = self.chan_db.channels
all_generators = self.chan_db.generators
all_transmitters = self.chan_db.transmitters
all_receivers = self.chan_db.receivers
all_transceivers = self.chan_db.transceivers
all_qubits = [c for c in all_channels if isinstance(c, bbndb.qgl.Qubit)]
all_measurements = [c for c in all_channels if isinstance(c, bbndb.qgl.Measurement)]
# Restrict to current qubits, channels, etc. involved in this actual experiment
self.controlled_qubits = [c for c in self.chan_db.channels if c.label in meta_info["qubits"]]
self.measurements = [c for c in self.chan_db.channels if c.label in meta_info["measurements"]]
self.measured_qubits = [c for c in self.chan_db.channels if "M-"+c.label in meta_info["measurements"]]
self.phys_chans = list(set([e.phys_chan for e in self.controlled_qubits + self.measurements]))
self.transmitters = list(set([e.phys_chan.transmitter for e in self.controlled_qubits + self.measurements]))
self.receiver_chans = list(set([e.receiver_chan for e in self.measurements]))
self.receivers = list(set([e.receiver_chan.receiver for e in self.measurements]))
self.generators = list(set([q.phys_chan.generator for q in self.measured_qubits + self.controlled_qubits + self.measurements if q.phys_chan.generator]))
self.qubits_by_name = {q.label: q for q in self.measured_qubits + self.controlled_qubits}
# Load the relevant stream selectors from the pipeline.
self.stream_selectors = pipeline.pipelineMgr.get_current_stream_selectors()
if len(self.stream_selectors) == 0:
raise Exception("No filter pipeline has been created. You can try running the create_default_pipeline() method of the Pipeline Manager")
self.stream_selectors = [s for s in self.stream_selectors if s.qubit_name in self.qubits_by_name.keys()]
# Locate transmitters relying on processors
self.transceivers = list(set([t.transceiver for t in self.transmitters + self.receivers if t.transceiver]))
self.processors = list(set([p for t in self.transceivers for p in t.processors]))
# Determine if the digitizer trigger lives on another transmitter that isn't included already
self.transmitters = list(set([mq.measure_chan.trig_chan.phys_chan.transmitter for mq in self.measured_qubits] + self.transmitters))
# The exception being any instruments that are declared as standalone
self.all_standalone = [i for i in self.chan_db.all_instruments() if i.standalone and i not in self.transmitters + self.receivers + self.generators]
# In case we need to access more detailed foundational information
self.factory = self
# If no pipeline is defined, assumed we want to generate it automatically
if not pipeline.pipelineMgr.meas_graph:
raise Exception("No pipeline has been created, do so automatically using exp_factory.create_default_pipeline()")
#self.create_default_pipeline(self.measured_qubits)
# Add the waveform file info to the qubits
for awg in self.transmitters:
awg.sequence_file = meta_info['instruments'][awg.label]
# Construct the DataAxis from the meta_info
desc = meta_info["axis_descriptor"]
data_axis = desc[0] # Data will always be the first axis
# ovverride data axis with repeated number of segments
if hasattr(self, "repeats") and self.repeats is not None:
data_axis['points'] = np.tile(data_axis['points'], self.repeats)
# Search for calibration axis, i.e., metadata
axis_names = [d['name'] for d in desc]
if 'calibration' in axis_names:
meta_axis = desc[axis_names.index('calibration')]
# There should be metadata for each cal describing what it is
if len(desc)>1:
metadata = ['data']*len(data_axis['points']) + meta_axis['points']
# Pad the data axis with dummy equidistant x-points for the extra calibration points
avg_step = (data_axis['points'][-1] - data_axis['points'][0])/(len(data_axis['points'])-1)
points = np.append(data_axis['points'], data_axis['points'][-1] + (np.arange(len(meta_axis['points']))+1)*avg_step)
else:
metadata = meta_axis['points'] # data may consist of calibration points only
points = np.arange(len(metadata)) # dummy axis for plotting purposes
# If there's only one segment we can ignore this axis
if len(points) > 1:
self.segment_axis = DataAxis(data_axis['name'], points, unit=data_axis['unit'], metadata=metadata)
else:
# No calibration data, just add a segment axis as long as there is more than one segment
if len(data_axis['points']) > 1:
self.segment_axis = DataAxis(data_axis['name'], data_axis['points'], unit=data_axis['unit'])
# Build a mapping of qubits to self.receivers, construct qubit proxies
# We map by the unique database ID since that is much safer
receiver_chans_by_qubit_label = {}
for m in self.measurements:
q = [c for c in self.chan_db.channels if c.label==m.label[2:]][0]
receiver_chans_by_qubit_label[q.label] = m.receiver_chan
# Now a pipeline exists, so we create Auspex filters from the proxy filters in the db
self.proxy_to_filter = {}
self.filters = []
self.connector_by_sel = {}
self.chan_to_dig = {}
self.chan_to_oc = {}
self.qubit_to_dig = {}
self.qubits_by_output = {}
self.proxy_name_to_instrument = {}
# Create microwave sources and receiver instruments from the database objects.
# We configure the self.receivers later after adding channels.
self.instrument_proxies = self.generators + self.receivers + self.transmitters + self.all_standalone + self.processors
self.instruments = []
for instrument in self.instrument_proxies:
instr = instrument_map[instrument.model](instrument.address, instrument.label) # Instantiate
# For easy lookup
instr.proxy_obj = instrument
instrument.instr = instr # This shouldn't be relied upon
self.proxy_name_to_instrument[instrument.label] = instr
# Add to the experiment's instrument list
self._instruments[instrument.label] = instr
self.instruments.append(instr)
# Add to class dictionary for convenience
if not hasattr(self, instrument.label):
setattr(self, instrument.label, instr)
for mq in self.measured_qubits:
# Stream selectors from the pipeline database:
# These contain all information except for the physical channel
mq_stream_sels = [s for s in self.stream_selectors if s.qubit_name == mq.label]
# The receiver channel only specifies the physical channel
rcv = receiver_chans_by_qubit_label[mq.label]
# Create the auspex stream selectors
dig = rcv.receiver # The digitizer instrument in the database
for mq_stream_sel in mq_stream_sels:
auspex_stream_sel = stream_sel_map[dig.model](name=f"{rcv.label}-{mq_stream_sel.stream_type}-stream_sel")
mq_stream_sel.channel = rcv.channel
auspex_stream_sel.configure_with_proxy(mq_stream_sel)
auspex_stream_sel.receiver = auspex_stream_sel.proxy = mq_stream_sel
# Construct the channel from the receiver channel
channel = auspex_stream_sel.get_channel(mq_stream_sel)
# Manually set the physical channel
channel.phys_channel = rcv.channel
# Get the base descriptor from the channel
descriptor = auspex_stream_sel.get_descriptor(mq_stream_sel, rcv)
# Update the descriptor based on the number of segments
# The segment axis should already be defined if the sequence
# is greater than length 1
if hasattr(self, "segment_axis"):
descriptor.add_axis(self.segment_axis)
# Add averaging if necessary
if averages > 1:
descriptor.add_axis(DataAxis("averages", range(averages)))
# Add the output connectors to the experiment and set their base descriptor
self.connector_by_sel[mq_stream_sel] = self.add_connector(mq_stream_sel)
self.connector_by_sel[mq_stream_sel].set_descriptor(descriptor)
# Add the channel to the instrument
dig.instr.add_channel(channel)
self.chan_to_dig[channel] = dig.instr
self.chan_to_oc [channel] = self.connector_by_sel[mq_stream_sel]
self.qubit_to_dig[mq.id] = dig
# Find the number of self.measurements
segments_per_dig = {receiver_chan.receiver: meta_info["receivers"][receiver_chan.label] for receiver_chan in self.receiver_chans
if receiver_chan.label in meta_info["receivers"].keys()}
# Configure receiver instruments from the database objects
# this must be done after adding channels.
for dig in self.receivers:
dig.number_averages = averages
dig.number_waveforms = 1
dig.number_segments = segments_per_dig[dig]
dig.instr.proxy_obj = dig
# Restrict the graph to the relevant qubits
self.measured_qubit_names = [q.label for q in self.measured_qubits]
pipeline.pipelineMgr.session.commit()
# Any modifications to be done by subclasses, just a passthrough here
self.modified_graph = self.modify_graph(pipeline.pipelineMgr.meas_graph)
# Compartmentalize the instantiation
self.instantiate_filters(self.modified_graph)
def instantiate_filters(self, graph):
# Configure the individual filter nodes
for _, dat in graph.nodes(data=True):
node = dat['node_obj']
if isinstance(node, bbndb.auspex.FilterProxy):
if node.qubit_name in self.measured_qubit_names:
new_filt = filter_map[type(node)]()
new_filt.configure_with_proxy(node)
new_filt.proxy = node
self.filters.append(new_filt)
self.proxy_to_filter[node] = new_filt
if isinstance(node, bbndb.auspex.OutputProxy):
self.qubits_by_output[new_filt] = node.qubit_name
# Connect the filters together
graph_edges = []
pipeline.pipelineMgr.session.commit()
for l1, l2 in graph.edges():
node1, node2 = graph.nodes[l1]['node_obj'], graph.nodes[l2]['node_obj']
if node1.qubit_name in self.measured_qubit_names and node2.qubit_name in self.measured_qubit_names:
if isinstance(node1, bbndb.auspex.FilterProxy):
filt1 = self.proxy_to_filter[node1]
oc = filt1.output_connectors[graph[l1][l2]["connector_out"]]
elif isinstance(node1, bbndb.auspex.StreamSelect):
oc = self.connector_by_sel[node1]
filt2 = self.proxy_to_filter[node2]
ic = filt2.input_connectors[graph[l1][l2]["connector_in"]]
graph_edges.append([oc, ic])
# Define the experiment graph
self.set_graph(graph_edges)
def modify_graph(self, graph):
"""Method called near the end of `create_from_meta` to allow custom manipulation of the filter
pipeline. For example, `CalibrationExperiment` implements a version of `modify_graph` that
selectively removes portions of the graph and creates buffers as needed to perform the desired
calibrations on specific qubits.
"""
return graph
def set_fake_data(self, digitizer_proxy, ideal_data, increment=False, random_mag=0.1):
"""Enabled and use the fake data interface for digitizers in order that auspex can
be run without hardware.
Parameters:
digitizer_proxy (bbndb `Receiver` instance)
The digitizer instrument proxy to be used for fake data generation.
ideal_data (numpy array)
The actual data to be used. If `increment` is False, a 1D array with a single value
per segment is used. The digitizer drivers automatical convert to a integrated, demodulated,
or raw signal depending on the stream type being used. If `increment` is True, then this may be a
2D array, which is incremented through to emulate sweeps such a qubit measurement frequency sweep.
increment (boolean)
Whether or not to step through a 2D data array after to incorporate extra sweeps. The behavior is
defined above.
Examples:
Make sure to set auspex dummy mode at import time.
>>> import auspex.config as config
>>> config.auspex_dummy_mode = True
>>> # Configure channels and pipelines here
>>> amps = np.linspace(-1,1,51)
>>> exp = QubitExperiment(RabiAmp(q1,amps),averages=50)
>>> exp.set_fake_data(digitizer_1, np.cos(np.linspace(0, 2*np.pi,51)))
>>> exp.run_sweeps()
"""
auspex_instr = self.proxy_name_to_instrument[digitizer_proxy.label]
auspex_instr.ideal_data = ideal_data
auspex_instr.increment_ideal_data = increment
auspex_instr.gen_fake_data = True
auspex_instr.fake_data_random_mag = random_mag
def clear_fake_data(self, digitizer_proxy):
"""Disable using fake data interface for a digitizer. Take note that dummy mode may
still be active.
Parameters:
digitizer_proxy (bbndb `Receiver` instance)
The digitizer instrument proxy to be used for fake data generation.
"""
auspex_instr = self.proxy_name_to_instrument[digitizer_proxy.label]
auspex_instr.ideal_data = ideal_data
auspex_instr.gen_fake_data = False
def add_connector(self, stream_selector):
name = stream_selector.qubit_name+'-'+stream_selector.stream_type
logger.debug(f"Adding {name} output connector to experiment.")
oc = OutputConnector(name=name, parent=self)
self.output_connectors[name] = oc
setattr(self, name, oc)
return oc
def init_instruments(self):
for name, instr in self._instruments.items():
instr.configure_with_proxy(instr.proxy_obj)
self.digitizers = [v for _, v in self._instruments.items() if "Digitizer" in v.instrument_type]
self.awgs = [v for _, v in self._instruments.items() if "AWG" in v.instrument_type]
# Swap the master AWG so it is last in the list
try:
master_awg_idx = next(ct for ct,awg in enumerate(self.awgs) if awg.master)
self.awgs[-1], self.awgs[master_awg_idx] = self.awgs[master_awg_idx], self.awgs[-1]
except:
logger.warning("No AWG is specified as the master.")
for gen_proxy in self.generators:
gen_proxy.instr.output = True
# Start socket listening processes, store as keys in a dictionary with exit commands as values
self.dig_listeners = {}
ready = Value('i', 0)
self.dig_run = Event()
self.dig_exit = Event()
for chan, dig in self.chan_to_dig.items():
socket = dig.get_socket(chan)
oc = self.chan_to_oc[chan]
p = Process(target=dig.receive_data, args=(chan, oc, self.dig_exit, ready, self.dig_run))
self.dig_listeners[p] = self.dig_exit
assert None not in self.dig_listeners.keys()
for listener in self.dig_listeners.keys():
listener.start()
while ready.value < len(self.chan_to_dig):
time.sleep(0.3)
if self.cw_mode:
for awg in self.awgs:
awg.run()
def add_instrument_sweep(self, instrument_name, attribute, values, channel=None):
param = FloatParameter() # Create the parameter
param.name = f"{instrument_name} {attribute} {channel}"
instr = self._instruments[instrument_name]
def method(value, channel=channel, instr=instr, prop=attribute):
if channel:
getattr(instr, "set_"+prop)(channel, value)
else:
getattr(instr, "set_"+prop)(value)
param.assign_method(method)
self.add_sweep(param, values) # Create the requested sweep on this parameter
def add_qubit_sweep(self, qubit, measure_or_control, attribute, values):
"""
Add a *ParameterSweep* to the experiment. Users specify a qubit property that auspex
will try to link back to the relevant instrument. For example::
exp = QubitExpFactory.create(PulsedSpec(q1))
self.add_qubit_sweep(q1, "measure", "frequency", np.linspace(6e9, 6.5e9, 500))
self.run_sweeps()
"""
param = FloatParameter() # Create the parameter
param.name = f"{qubit.label} {measure_or_control} {attribute}"
if measure_or_control not in ["measure", "control"]:
raise ValueError(f"Cannot add sweep for something other than measure or control properties of {qubit}")
if measure_or_control == "measure":
logger.debug(f"Sweeping {qubit} measurement")
thing = list(filter(lambda m: m.label=="M-"+qubit.label, self.measurements))
if len(thing) > 1:
raise ValueError(f"Found more than one measurement for {qubit}")
thing = thing[0]
elif measure_or_control == "control":
logger.debug(f"Sweeping {qubit} control")
thing = qubit
if thing.phys_chan.generator and attribute=="frequency":
# Mixed up to final frequency
name = thing.phys_chan.generator.label
instr = list(filter(lambda x: x.name == name, self._instruments.values()))[0]
method = None
else:
# Direct synthesis
name, chan = thing.phys_chan.label.split("-")
instr = self._instruments[name] #list(filter(lambda x: x.name == name, self._instruments.values()))[0]
def method(value, channel=chan, instr=instr, prop=attribute):
# e.g. keysight.set_amplitude("ch1", 0.5)
getattr(instr, "set_"+prop)(chan, value)
if method:
# Custom method
param.assign_method(method)
else:
# Get method by name
if hasattr(instr, "set_"+attribute):
param.assign_method(getattr(instr, "set_"+attribute)) # Couple the parameter to the instrument
param.add_post_push_hook(lambda: time.sleep(0.05))
else:
raise ValueError("The instrument {} has no method {}".format(name, "set_"+attribute))
# param.instr_tree = [instr.name, attribute] #TODO: extend tree to endpoint
self.add_sweep(param, values) # Create the requested sweep on this parameter
def add_avg_sweep(self, num_averages):
param = IntParameter()
param.name = "sw_avg"
setattr(self, param.name, param)
self._parameters[param.name] = param
self.add_sweep(param, range(num_averages))
def shutdown_instruments(self):
# remove socket listeners
logger.debug("Shutting down instruments")
for awg in self.awgs:
awg.stop()
for dig in self.digitizers:
dig.stop()
for gen_proxy in self.generators:
gen_proxy.instr.output = False
for instr in self.instruments:
instr.disconnect()
self.dig_exit.set()
for listener in self.dig_listeners:
listener.join(2)
if listener.is_alive():
logger.info(f"Terminating listener {listener} aggressively")
listener.terminate()
del listener
import gc
gc.collect()
def final_init(self):
super(QubitExperiment, self).final_init()
# In order to fetch data more easily later
self.outputs_by_qubit = {q.label: [self.proxy_to_filter[dat['node_obj']] for f,dat in self.modified_graph.nodes(data=True) if (isinstance(dat['node_obj'], (bbndb.auspex.Write, bbndb.auspex.Buffer,)) and q.label in dat['node_obj'].qubit_name)] for q in self.measured_qubits}
def init_progress_bars(self):
""" initialize the progress bars."""
from auspex.config import isnotebook
if isnotebook():
from ipywidgets import IntProgress, VBox
from IPython.display import display
ocs = list(self.output_connectors.values())
self.progressbars = {}
if len(ocs)>0:
for oc in ocs:
self.progressbars[oc] = IntProgress(min=0, max=oc.output_streams[0].descriptor.num_points(), bar_style='success',
description=f'Digitizer Data {oc.name}:', style={'description_width': 'initial'})
for axis in self.sweeper.axes:
self.progressbars[axis] = IntProgress(min=0, max=axis.num_points(),
description=f'{axis.name}:', style={'description_width': 'initial'})
display(VBox(list(self.progressbars.values())))
def run(self):
# Begin acquisition before enabling the AWGs
for dig in self.digitizers:
dig.acquire()
dig.last_timestamp.value = datetime.datetime.now().timestamp()
# Set flag to enable acquisition process
self.dig_run.set()
# Start the AWGs
if not self.cw_mode:
for awg in self.awgs:
awg.run()
# Wait for all of the acquisitions to complete
timeout = 2
for dig in self.digitizers:
dig.wait_for_acquisition(self.dig_run, timeout=timeout, ocs=list(self.chan_to_oc.values()), progressbars=self.progressbars)
# Bring everything to a stop
for dig in self.digitizers:
dig.stop()
# Pause the receiver processes so they don't time out
self.dig_run.clear()
# Stop the AWGs
if not self.cw_mode:
for awg in self.awgs:
awg.stop()
| 47.138937
| 282
| 0.640965
| 25,586
| 0.931009
| 0
| 0
| 0
| 0
| 0
| 0
| 9,146
| 0.3328
|
f00b1f413db4083c2b4c12dfb8af15b799f387ae
| 2,288
|
py
|
Python
|
mtconnect/mtconnect_ros_bridge/scripts/closedoor.py
|
mtconnect/ros_bridge
|
b578e8c3edca83ea0de8ed15aff0f7733dd23e04
|
[
"Apache-2.0"
] | 5
|
2015-04-30T21:51:46.000Z
|
2019-03-18T06:24:38.000Z
|
mtconnect/mtconnect_ros_bridge/scripts/closedoor.py
|
CubeSpawn/ros_bridge
|
b578e8c3edca83ea0de8ed15aff0f7733dd23e04
|
[
"Apache-2.0"
] | null | null | null |
mtconnect/mtconnect_ros_bridge/scripts/closedoor.py
|
CubeSpawn/ros_bridge
|
b578e8c3edca83ea0de8ed15aff0f7733dd23e04
|
[
"Apache-2.0"
] | 4
|
2016-02-21T20:04:31.000Z
|
2021-01-04T13:48:41.000Z
|
#! /usr/bin/env python
"""
Copyright 2013 Southwest Research Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import roslib; roslib.load_manifest('mtconnect_msgs')
import rospy
# Brings in the SimpleActionClient
import actionlib
# Brings in the messages used by the material_load action.
import mtconnect_msgs.msg
def close_door_client():
rospy.loginfo('Launched CloseDoor Action CLient')
# Creates the SimpleActionClient, passing the type of the action
# (CloseDoorAction) to the constructor.
client = actionlib.SimpleActionClient('CloseDoorClient', mtconnect_msgs.msg.CloseDoorAction)
# Waits until the action server has started up and started listening for goals.
rospy.loginfo('Waiting for Generic Action Server')
client.wait_for_server()
rospy.loginfo('Generic Action Server Activated')
# Creates a DoorAcknowledge goal to send to the action server.
goal = mtconnect_msgs.msg.CloseDoorGoal()
goal.close_door = 'CLOSED'
# Sends the goal to the action server.
rospy.loginfo('Sending the goal')
client.send_goal(goal)
# Waits for the server to finish performing the action.
rospy.loginfo('Waiting for result')
client.wait_for_result()
# Obtain result
result = client.get_result() # result must be a string
rospy.loginfo('Returning the result --> %s' % result)
return
if __name__ == '__main__':
try:
# Initializes a rospy node so that the SimpleActionClient can
# publish and subscribe over ROS.
rospy.init_node('CloseDoorActionClient')
result = close_door_client()
rospy.loginfo('Action Result --> %s' % result)
except rospy.ROSInterruptException:
print 'program interrupted before completion'
| 34.666667
| 96
| 0.723776
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,499
| 0.655157
|
f00bfebe8f465035bb8191daaed17fe817eb4bdf
| 4,112
|
py
|
Python
|
cte/__main__.py
|
iqbal-lab-org/covid-truth-eval
|
a11125538699f21a5483f15bd5aac952340d3797
|
[
"MIT"
] | 1
|
2022-01-21T11:54:21.000Z
|
2022-01-21T11:54:21.000Z
|
cte/__main__.py
|
iqbal-lab-org/covid-truth-eval
|
a11125538699f21a5483f15bd5aac952340d3797
|
[
"MIT"
] | null | null | null |
cte/__main__.py
|
iqbal-lab-org/covid-truth-eval
|
a11125538699f21a5483f15bd5aac952340d3797
|
[
"MIT"
] | 1
|
2022-03-21T09:48:32.000Z
|
2022-03-21T09:48:32.000Z
|
#!/usr/bin/env python3
import argparse
import logging
import sys
import cte
def main(args=None):
parser = argparse.ArgumentParser(
prog="cte", usage="cte <command> <options>", description="cte: test-covid-eval"
)
parser.add_argument("--version", action="version", version=cte.__version__)
subparsers = parser.add_subparsers(title="Available commands", help="", metavar="")
# ----------- general options common to all tasks ------------------------
common_parser = argparse.ArgumentParser(add_help=False)
common_parser.add_argument(
"--debug",
help="More verbose logging, and less file cleaning",
action="store_true",
)
common_parser.add_argument(
"--ref_fasta",
help="Reference FASTA file. Default is to use covid MN908947.3 [%(default)s]",
default=cte.built_in_data.COVID_REF,
metavar="FILENAME",
)
common_parser.add_argument(
"--force",
action="store_true",
help="Overwrite output directory if it already exists",
)
common_parser.add_argument(
"--outdir",
required=True,
help="REQUIRED. Output directory (will be created, or see --force)",
metavar="FILENAME",
)
# ------------------------ eval_one_run -----------------------------------
subparser_eval_one_run = subparsers.add_parser(
"eval_one_run",
parents=[common_parser],
help="Evaluate one run",
usage="cte eval_one_run [options] <truth files options> --outdir out --fasta_to_eval to_eval.fa --primers name",
description="Evaluate one consensus sequence",
)
subparser_eval_one_run.add_argument(
"--truth_vcf",
help="Truth VCF file (with respect to the reference genome). If not provided, must provide --truth_fasta. If this option and --truth_fasta is used, then only dropped amplicon entries are used from the truth_vcf file",
metavar="FILENAME",
)
subparser_eval_one_run.add_argument(
"--truth_fasta",
help="Truth FASTA file. If not provided, must provide --truth_vcf",
metavar="FILENAME",
)
subparser_eval_one_run.add_argument(
"--fasta_to_eval",
required=True,
help="REQUIRED. FASTA file of consensus sequence to be evaluated",
metavar="FILENAME",
)
scheme_names = ",".join(cte.built_in_data.COVID_SCHEME_NAMES)
subparser_eval_one_run.add_argument(
"--primers",
required=True,
help=f"REQUIRED. TSV file of primers (in 'viridian_workflow' format). Or use a built-in scheme by providing one of these names: {scheme_names}",
metavar="SCHEME_NAME/FILENAME",
)
subparser_eval_one_run.set_defaults(func=cte.tasks.eval_one_run.run)
# ------------------------ eval_runs --------------------------------------
subparser_eval_runs = subparsers.add_parser(
"eval_runs",
parents=[common_parser],
help="Evaluate multiple consensus sequences",
usage="cte eval_runs [options] --outdir out manifest.tsv",
description="Evaluate multiple consensus sequences",
)
subparser_eval_runs.add_argument(
"manifest_tsv",
help="TSV file containing files to be evaluated",
)
subparser_eval_runs.set_defaults(func=cte.tasks.eval_runs.run)
args = parser.parse_args()
logging.basicConfig(
format="[%(asctime)s cte %(levelname)s] %(message)s",
datefmt="%Y-%m-%dT%H:%M:%S",
)
log = logging.getLogger()
if hasattr(args, "debug") and args.debug:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
if hasattr(args, "func"):
if (
args.func == cte.tasks.eval_one_run.run
and args.truth_fasta is None
and args.truth_vcf is None
):
print(
"Must use --truth_fasta or --truth_vcf. Cannot continue",
file=sys.stderr,
)
sys.exit(1)
args.func(args)
else:
parser.print_help()
if __name__ == "__main__":
main()
| 34.266667
| 225
| 0.614543
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,747
| 0.424854
|
f00d8a2ff37a2b007fa4edfda74f6d8657793532
| 3,684
|
py
|
Python
|
piton/lib/inquirer/questions.py
|
piton-package-manager/PPM
|
19015b76184befe1e2daa63189a13b039787868d
|
[
"MIT"
] | 19
|
2016-04-08T04:00:07.000Z
|
2021-11-12T19:36:56.000Z
|
piton/lib/inquirer/questions.py
|
LookLikeAPro/PPM
|
19015b76184befe1e2daa63189a13b039787868d
|
[
"MIT"
] | 9
|
2017-01-03T13:39:47.000Z
|
2022-01-15T20:38:20.000Z
|
piton/lib/inquirer/questions.py
|
LookLikeAPro/PPM
|
19015b76184befe1e2daa63189a13b039787868d
|
[
"MIT"
] | 6
|
2017-04-01T03:38:45.000Z
|
2021-05-06T11:25:31.000Z
|
# -*- coding: utf-8 -*-
"""
Module that implements the questions types
"""
import json
from . import errors
def question_factory(kind, *args, **kwargs):
for clazz in (Text, Password, Confirm, List, Checkbox):
if clazz.kind == kind:
return clazz(*args, **kwargs)
raise errors.UnknownQuestionTypeError()
def load_from_dict(question_dict):
"""
Load one question from a dict.
It requires the keys 'name' and 'kind'.
:return: The Question object with associated data.
:return type: Question
"""
return question_factory(**question_dict)
def load_from_list(question_list):
"""
Load a list of questions from a list of dicts.
It requires the keys 'name' and 'kind' for each dict.
:return: A list of Question objects with associated data.
:return type: List
"""
return [load_from_dict(q) for q in question_list]
def load_from_json(question_json):
"""
Load Questions from a JSON string.
:return: A list of Question objects with associated data if the JSON
contains a list or a Question if the JSON contains a dict.
:return type: List or Dict
"""
data = json.loads(question_json)
if isinstance(data, list):
return load_from_list(data)
if isinstance(data, dict):
return load_from_dict(data)
raise TypeError(
'Json contained a %s variable when a dict or list was expected',
type(data))
class TaggedValue(object):
def __init__(self, label, value):
self.label = label
self.value = value
def __str__(self):
return self.label
def __repr__(self):
return self.value
def __cmp__(self, other):
if isinstance(other, TaggedValue):
return self.value != other.value
return self.value != other
class Question(object):
kind = 'base question'
def __init__(self,
name,
message='',
choices=None,
default=None,
ignore=False,
validate=True):
self.name = name
self._message = message
self._choices = choices or []
self._default = default
self._ignore = ignore
self._validate = validate
self.answers = {}
@property
def ignore(self):
return bool(self._solve(self._ignore))
@property
def message(self):
return self._solve(self._message)
@property
def default(self):
return self._solve(self._default)
@property
def choices_generator(self):
for choice in self._solve(self._choices):
yield (
TaggedValue(*choice)
if isinstance(choice, tuple) and len(choice) == 2
else choice
)
@property
def choices(self):
return list(self.choices_generator)
def validate(self, current):
try:
if self._solve(self._validate, current):
return
except Exception:
pass
raise errors.ValidationError(current)
def _solve(self, prop, *args, **kwargs):
if callable(prop):
return prop(self.answers, *args, **kwargs)
if isinstance(prop, str):
return prop.format(**self.answers)
return prop
class Text(Question):
kind = 'text'
class Password(Question):
kind = 'password'
class Confirm(Question):
kind = 'confirm'
def __init__(self, name, default=False, **kwargs):
super(Confirm, self).__init__(name, default=default, **kwargs)
class List(Question):
kind = 'list'
class Checkbox(Question):
kind = 'checkbox'
| 24.236842
| 72
| 0.604777
| 2,216
| 0.60152
| 243
| 0.065961
| 559
| 0.151737
| 0
| 0
| 797
| 0.216341
|
f00ff90a15569e736314d9e7505d121e6996f894
| 4,216
|
py
|
Python
|
json_replacer.py
|
MrMusicMan/json-item-replacer
|
04362b5e5ecf3cf9dd12ef3e72a7a1474a5239fa
|
[
"Apache-2.0"
] | null | null | null |
json_replacer.py
|
MrMusicMan/json-item-replacer
|
04362b5e5ecf3cf9dd12ef3e72a7a1474a5239fa
|
[
"Apache-2.0"
] | null | null | null |
json_replacer.py
|
MrMusicMan/json-item-replacer
|
04362b5e5ecf3cf9dd12ef3e72a7a1474a5239fa
|
[
"Apache-2.0"
] | null | null | null |
import os
import json
import string
from tkinter import filedialog, simpledialog
from tkinter import *
class CsvImporter(object):
def __init__(self):
self.csv_data = None
self.languages = []
def import_csv(self, csv_filename):
with open(csv_filename, 'r') as file:
self.csv_data = {}
for key, line in enumerate(file):
# Create list of line item.
line_items = [x.strip() for x in line.split(',')]
# Header row?
if key == 0:
# Create dictionaries for each language, except the first.
self.languages = line_items[1:]
for language in self.languages:
self.csv_data[language] = {}
else:
# Populate each language's dictionary.
for key, language in enumerate(self.languages):
try:
# Key from first column, value from next.
self.csv_data[language].update({
line_items[0]: line_items[key + 1]
})
except IndexError:
# Sometimes, no item is expected.
pass
return self.csv_data
class JsonEditor(object):
def import_json(self, json_filename):
# Bring JSON in as an object.
with open(json_filename) as file:
json_data = json.load(file)
return json_data
def export_new_json(self, output_filename, json_data):
# Save the JSON object as a file.
f = open(output_filename, "w")
json_data = json.dumps(json_data)
f.write(json_data)
f.close()
return
def update_json(self, input_json, target_key, target_value, update_value):
# Duplicate input_json for modification.
output_json = input_json
if isinstance(input_json, dict):
# Loop through dictionary, searching for target_key, target_value
# and update output_json if there is an update_value
for key, value in input_json.items():
if key == target_key:
if target_value == value:
if update_value:
output_json[key] = update_value
# If we run into a list or another dictionary, recurse.
self.update_json(input_json[key], target_key, target_value, update_value)
elif isinstance(input_json, list):
# Loop through list, searching for lists and dictionaries.
for entity in input_json:
# Recurse through any new list or dictionary.
self.update_json(entity, target_key, target_value, update_value)
return output_json
if __name__ == '__main__':
root = Tk()
root.csv_filename = filedialog.askopenfilename(
title="Select CSV file with translations",
filetypes=(("CSV Files", "*.csv"),)
)
root.json_filename = filedialog.askopenfilename(
title="Select master JSON file to build tranlated JSON files",
filetypes=(("JSON Files","*.json"),("All Files", "*.*"))
)
target_key = simpledialog.askstring(
"Input",
"What is the target key for the values we are replacing?",
initialvalue="title"
)
base_output_filename = simpledialog.askstring(
"Input",
"What would you like the base file to be named?"
)
# Import CSV.
csv = CsvImporter()
csv_data = csv.import_csv(root.csv_filename)
# Import JSON.
make_json = JsonEditor()
# Make changes per language.
for language in csv_data:
# Edit JSON.
input_json = make_json.import_json(root.json_filename)
for key, value in csv_data[language].items():
updated_json = make_json.update_json(input_json, target_key, key, value)
# Create filename per language.
language_filename = base_output_filename + "_" + language + ".json"
made_json = make_json.export_new_json(language_filename, updated_json)
# Finished.
print("Success!")
| 34.842975
| 89
| 0.57851
| 2,773
| 0.657732
| 0
| 0
| 0
| 0
| 0
| 0
| 1,005
| 0.238378
|
f01114fcd31b24a944a91cf16636601c7b3cffa8
| 6,134
|
py
|
Python
|
src/func.py
|
yygr/datascience_utility
|
aa6aa37508e46ab3568805dd1bb514ef10652240
|
[
"MIT"
] | null | null | null |
src/func.py
|
yygr/datascience_utility
|
aa6aa37508e46ab3568805dd1bb514ef10652240
|
[
"MIT"
] | null | null | null |
src/func.py
|
yygr/datascience_utility
|
aa6aa37508e46ab3568805dd1bb514ef10652240
|
[
"MIT"
] | null | null | null |
from pdb import set_trace
from time import time
import matplotlib.pyplot as plt
import numpy as np
from numpy import random
from scipy.stats import chi2
import renom as rm
class Enc(rm.Model):
def __init__(
self, pre, latent_dim,
output_act = None,
):
self.pre = pre
self.latent_dim = latent_dim
self.zm_ = rm.Dense(latent_dim)
self.zlv_ = rm.Dense(latent_dim)
self.output_act = output_act
def forward(self, x):
hidden = self.pre(x)
self.zm = self.zm_(hidden)
self.zlv = self.zlv_(hidden)
if self.output_act:
self.zm = self.output_act(self.zm)
self.zlv = self.output_act(self.zlv)
return self.zm
class VAE(rm.Model):
def __init__(
self,
enc,
dec,
latent_dim,
batch_size = None,
sigma = 1.
):
self.latent_dim = latent_dim
self.enc = enc
self.dec = dec
self.batch_size = batch_size
self.sigma = sigma
def forward(self, x, eps=1e-3):
nb = len(x)
self.enc(x)
e = np.random.randn(nb, self.latent_dim)*self.sigma
self.z = self.enc.zm + rm.exp(self.enc.zlv/2)*e
self.decd = self.dec(self.z)
self.reconE = rm.mean_squared_error(self.decd, x)
self.kl_loss = - 0.5 * rm.sum(
1 + self.enc.zlv - self.enc.zm**2 -rm.exp(self.enc.zlv)
)/nb
self.vae_loss = self.kl_loss + self.reconE
return self.decd
class Mahalanobis():
def __init__(self, data, label):
self.i_max = label.max() + 1
self.labels = np.unique(label)
self.d = data.shape[-1]
#print('Computing the mean')
#s = time.time()
self.mu = np.array([
data[np.where(label==x)[0]].mean(0) for x in self.labels])
#print(' {}sec'.format(time.time() - s))
#print('Computing Cov')
#s = time.time()
self.cov = np.array([
np.cov(data[np.where(label==x)[0]].T) for x in self.labels])
#print(' {}sec'.format(time.time() - s))
#n()
print('Computing Dist')
s = time()
self.comp_dist(data, label)
print(' {}sec'.format(time() - s))
#self.set_th()
def stat(self):
print('latent dimention = {}'.format(self.d))
print('{} classifier'.format(self.i_max))
def a(self, x, i):
temp = x-self.mu[i]
#return np.dot(np.dot(temp, np.linalg.inv(self.cov[i])), temp.T)
return np.dot(temp, np.linalg.solve(self.cov[i], temp.T))
def al(self, x):
return [self.a(x, i) for i in range(self.i_max)]
def comp_dist(self, data, label):
dist = []
if 0:
for x in self.labels:
sub = data[np.where(label==x)[0]]
dist.append(np.array([self.al(x) for x in sub]))
#dist.append(np.diagonal(np.dot(np.dot(sub,self.cov[i]),sub.T)))
else:
for x in self.labels:
sub = data[np.where(label==x)[0]]
sub_dist = []
for i, y in enumerate(self.labels):
temp = sub - self.mu[i]
sub_dist.append(np.diag(
np.dot(temp,
np.linalg.sove(self.cov[i], temp.T))
))
self.dist = np.array(dist)
def get_dist(self, data):
res = np.zeros((len(data), self.i_max))
for i in range(self.i_max):
temp = data - self.mu[i]
res[:,i] = np.diag(
np.dot(temp,
np.linalg.solve(self.cov[i], temp.T))
)
return res
#return np.array([self.al(x) for x in data])
def gamma(self,n):
return np.prod(np.arange(1,n))
def chi_squared(self, u, k, s):
a = 2*s
b = k//2
t = u/a
v = np.power(t,b-1)*np.exp(-t)/a/self.gamma(b)
return v
def comp_th(self, th):
assert th <= 1, "{}:th must be lower than 1 or equal to 1".format(th)
dth = 1 - th
return chi2.isf(dth, self.d)
def get_ths(self, ths):
ths_ = np.sort(ths)
acc = 0
split = 1e6
maxv = 100
delta = maxv/split
athl = []
ath = 0
pre = 0
for dth in ths_:
while acc < dth:
check_value = '\r{}'.format(acc)
sys.stdout.write(check_value)
sys.stdout.flush()
acc += self.chi_squared(ath, self.d, 1) * delta
ath += delta
athl.append(ath)
print('')
return np.array(athl)
def set_th(self, th=0.001):
th = self.comp_th(th)
self.th = th
def predict(self, data, th=None):
res = self.get_dist(data)
if th is None:
return res / self.th
return res / th
def predicts(self, data, ths):
temp = self.get_dist(data)
res = []
for th in ths:
res.append(temp/th)
return np.array(res)
def predict_prob(self, data):
res = self.get_dist(data)
prob_all = []
for item in res:
subprob = []
for i, x in enumerate(item):
distance = self.cumprob[i][0]
prob = self.cumprob[i][1]
if distance[-1] < x:
subprob.append(prob[-1])
else:
subprob.append(prob[np.argmax(distance>x)-1])
prob_all.append(np.array(subprob))
return res/self.th, np.array(prob_all)
def comp_cummlative_probability(self, bins=100):
cumprob = []
for i in range(self.dist.shape[0]):
hist, x = np.histogram(np.sort(self.dist[i][:,i]), bins)
cum_hist = np.array([hist[:j].sum() for j,_ in enumerate(hist)])
cum_hist = 1 - cum_hist/cum_hist.max().astype('float')
cumprob.append((x[:-1],cum_hist))
self.cumprob = np.array(cumprob)
| 31.137056
| 80
| 0.5
| 5,954
| 0.970655
| 0
| 0
| 0
| 0
| 0
| 0
| 472
| 0.076948
|
f0113aeb5d7960eefb66a0247171970b6a1b3515
| 2,245
|
py
|
Python
|
portality/cms/implied_attr_list.py
|
gaybro8777/doaj
|
27d9d98ce4f496ae52acbaba6ee8e42c84cf1a58
|
[
"Apache-2.0"
] | 47
|
2015-04-24T13:13:39.000Z
|
2022-03-06T03:22:42.000Z
|
portality/cms/implied_attr_list.py
|
gaybro8777/doaj
|
27d9d98ce4f496ae52acbaba6ee8e42c84cf1a58
|
[
"Apache-2.0"
] | 1,215
|
2015-01-02T14:29:38.000Z
|
2022-03-28T14:19:13.000Z
|
portality/cms/implied_attr_list.py
|
gaybro8777/doaj
|
27d9d98ce4f496ae52acbaba6ee8e42c84cf1a58
|
[
"Apache-2.0"
] | 14
|
2015-11-27T13:01:23.000Z
|
2021-05-21T07:57:23.000Z
|
import markdown
import re
from markdown.extensions import attr_list
def makeExtension(**kwargs): # pragma: no cover
return ImpliedAttrListExtension(**kwargs)
class ImpliedAttrListExtension(markdown.Extension):
"""Extension for attatching `attr_list` entries to implied elements. Specifically: lists and tables"""
def extendMarkdown(self, md: markdown.Markdown, *args, **kwargs):
md.preprocessors.register(ImpliedAttrListPreprocessor(md), "implied_attr_list", 100)
md.treeprocessors.register(ImpliedAttrListTreeprocessor(md), 'implied_attr_list', 100)
md.registerExtension(self)
class ImpliedAttrListPreprocessor(markdown.preprocessors.Preprocessor):
def run(self, lines):
"""
Insert a blank line in between the declaration of the attr_list and the thing that it applies to
This will allow it to render the list normally. The attr_list will get rendered into the text of a paragraph
tag which the Treeprocessor below will handle
"""
new_lines = []
for line in lines:
new_lines.append(line)
if re.fullmatch(ImpliedAttrListTreeprocessor.BASE_RE, line):
new_lines.append("")
return new_lines
class ImpliedAttrListTreeprocessor(attr_list.AttrListTreeprocessor):
def run(self, doc):
"""
Iterate through the doc, locating <p> tags that contain ONLY the syntax for attr_lists.
Once one is found, the value is applied to the next element in the iteration of the doc, and the
<p> tag is removed
:param doc:
:return:
"""
holdover = None
removes = []
for elem in doc.iter():
if holdover is not None:
self.assign_attrs(elem, holdover)
holdover = None
if elem.tag in ["p"] and elem.text is not None:
m = re.fullmatch(self.BASE_RE, elem.text)
if m:
holdover = m.group(1)
removes.append(elem)
if len(removes) > 0:
parent_map = {c: p for p in doc.iter() for c in p}
for r in removes:
parent = parent_map[r]
parent.remove(r)
| 34.538462
| 117
| 0.629399
| 2,072
| 0.92294
| 0
| 0
| 0
| 0
| 0
| 0
| 738
| 0.328731
|
f0123837d9cb8c6159b0ec92e3dc57d8e6054cf3
| 704
|
py
|
Python
|
services/web/apps/main/pool/views.py
|
xUndero/noc
|
9fb34627721149fcf7064860bd63887e38849131
|
[
"BSD-3-Clause"
] | 1
|
2019-09-20T09:36:48.000Z
|
2019-09-20T09:36:48.000Z
|
services/web/apps/main/pool/views.py
|
ewwwcha/noc
|
aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb
|
[
"BSD-3-Clause"
] | null | null | null |
services/web/apps/main/pool/views.py
|
ewwwcha/noc
|
aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# main.pool application
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.lib.app.extdocapplication import ExtDocApplication
from noc.main.models.pool import Pool
from noc.core.translation import ugettext as _
class PoolApplication(ExtDocApplication):
"""
Pool application
"""
title = _("Pool")
menu = [_("Setup"), _("Pools")]
model = Pool
glyph = "database"
default_ordering = ["name"]
| 28.16
| 71
| 0.473011
| 209
| 0.296875
| 0
| 0
| 0
| 0
| 0
| 0
| 406
| 0.576705
|
f012b80503a597191471f367c16412e1f714452d
| 2,396
|
py
|
Python
|
new_corpus/_sympy.py
|
y-akinobu/multiese
|
e28e6424b9714c5f145f438c8502c4194b70fe25
|
[
"MIT"
] | null | null | null |
new_corpus/_sympy.py
|
y-akinobu/multiese
|
e28e6424b9714c5f145f438c8502c4194b70fe25
|
[
"MIT"
] | null | null | null |
new_corpus/_sympy.py
|
y-akinobu/multiese
|
e28e6424b9714c5f145f438c8502c4194b70fe25
|
[
"MIT"
] | null | null | null |
import sympy
'''
@test($$;type(sympy))
@alt(シンボル|記号|変数)
@alt(数式として、|)
[代数計算|シンボル計算|数式処理][|ライブラリ]を使う
'''
s = 'z'
sympy.symbol(s)
'''
@test(sympy=missing;$$)
数式として、sをシンボルに変換する
'''
z = sympy.symbol(s)
'''
@test(sympy=missing;$$;z)
@prefix(z;[変数|式])
数式として、sを[シンボル|変数][に|化]して、zにする
'''
e = e2 = sympy.symbol(s)
n = 2
e.subs(z, n)
'''
@test(e=missing;e2='e2';z='x';$$)
@prefix(e;式)
数式として、eのzにnを代入する
'''
e.subs(z, e2)
'''
@test(e=missing;e2='e2';z='x';$$)
数式として、eのzにe2を代入する
数式として、eのzをe2で置き換える
'''
sympy.expand(e)
'''
@test(sympy=missing;e='e';$$)
数式として、eを展開する
数式として、eの展開を行う
'''
sympy.factor(e)
'''
@test(sympy=missing;e='e';$$)
数式として、eを因数分解する
数式として、eの因数分解を行う
'''
sympy.sympify(e)
'''
@test(sympy=missing;e='e';$$)
数式として、eを簡単[に|化]する
数式として、eを簡略[に|化]する
数式として、eの簡[略|単]化を行う
'''
sympy.apart(e)
'''
@test(sympy=missing;e='e';$$)
数式として、eを部分分数[に|として]展開する
数式として、eの部分分数化を行う
'''
sympy.solve(e)
'''
@test(sympy=missing;e='e';$$)
数式として、方程e[=0|][を解く|の解を求める]
'''
sympy.solve(e, z)
'''
@test(sympy=missing;e='e';z='x';$$)
数式として、方程e[=0|]のzの解を求める
'''
sympy.solve([e, e2])
'''
@test(sympy=missing;e='e';e2='e2';$$)
数式として、連立方程e[=0|], e2[|=0]の解を求める
'''
sympy.limit(e, z, 0)
'''
@test(sympy=missing;e='e';z='x';$$)
@alt(とき|時|場合|際)
zが0に近づくとき[の|、]eの極限値を求める
'''
sympy.limit(e, z, oo)
'''
@test(sympy=missing;e='e';z='x';oo='oo';$$)
zが無限大に近づくとき[の|、]eの極限値を求める
'''
sympy.limit(e, z, -oo)
'''
@test(sympy=missing;e='e';z='x';oo=0;$$)
zがマイナス無限大に近づくとき[の|、]eの極限値を求める
'''
sympy.diff(e)
'''
@test(sympy=missing;e='e';z='x';$$)
数式として、eを微分する
数式として、eの微分を求める
'''
sympy.diff(e, z)
'''
@test(sympy=missing;e='e';z='x';$$)
数式として、zについてeの微分を[行う|求める]
数式として、eのzを微分する
'''
sympy.diff(e, z, n)
'''
@test(sympy=missing;e='e';z='x';$$)
数式として、{eを|zについて}n階微分する
数式として、eの[zについての|]n階微分を[求める|行う]
'''
sympy.integrate(e)
'''
@test(sympy=missing;e='e';z='x';$$)
数式として、eを積分する
数式として、eの[積分|インテグラル]を[求める|行う]
'''
sympy.integrate(e, z)
'''
@test(sympy=missing;e='e';z='x';$$)
数式として、zについてeを積分する
数式として、zについてeの[積分|インテグラル]を[求める|行う]
'''
float(e)
'''
@test(sympy=missing;e='3.14159';z='x';$$)
数式として、eの数値[を求める|]
数式として、eを数値計算する
数式として、eを[数値|浮動小数点数]に変換する
'''
__X__ = e
sympy.sqrt(__X__)
'''
@test(sympy=missing;e='e';z='x';$$)
@X(e;z)
@Y(e;z)
数式として、__Y__の平方根を求める
'''
# sympy.E**(sympy.I * sympy.pi) == -1
# '''
# 数式として、オイラーの等式を使う
# '''
# sympy.summation(e, (z, 1, N))
# '''
# @test(import sympy;z,N=sympy.Symbol('z N');e=z**2;$$)
# 数式として、eの総和[|を求める]
# '''
| 14.261905
| 55
| 0.604758
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,134
| 0.867183
|
f013b73782802e7be9ad94ff6ab1e1a0a57d6410
| 1,224
|
py
|
Python
|
saleor/app/tests/test_models.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 15,337
|
2015-01-12T02:11:52.000Z
|
2021-10-05T19:19:29.000Z
|
saleor/app/tests/test_models.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 7,486
|
2015-02-11T10:52:13.000Z
|
2021-10-06T09:37:15.000Z
|
saleor/app/tests/test_models.py
|
aminziadna/saleor
|
2e78fb5bcf8b83a6278af02551a104cfa555a1fb
|
[
"CC-BY-4.0"
] | 5,864
|
2015-01-16T14:52:54.000Z
|
2021-10-05T23:01:15.000Z
|
from ...app.models import App
from ...webhook.event_types import WebhookEventType
def test_qs_for_event_type(payment_app):
qs = App.objects.for_event_type(WebhookEventType.PAYMENT_AUTHORIZE)
assert len(qs) == 1
assert qs[0] == payment_app
def test_qs_for_event_type_no_payment_permissions(payment_app):
payment_app.permissions.first().delete()
qs = App.objects.for_event_type(WebhookEventType.PAYMENT_AUTHORIZE)
assert len(qs) == 0
def test_qs_for_event_type_inactive_app(payment_app):
payment_app.is_active = False
payment_app.save()
qs = App.objects.for_event_type(WebhookEventType.PAYMENT_AUTHORIZE)
assert len(qs) == 0
def test_qs_for_event_type_no_webhook_event(payment_app):
webhook = payment_app.webhooks.first()
event = webhook.events.filter(event_type=WebhookEventType.PAYMENT_AUTHORIZE).first()
event.delete()
qs = App.objects.for_event_type(WebhookEventType.PAYMENT_AUTHORIZE)
assert len(qs) == 0
def test_qs_for_event_type_inactive_webhook(payment_app):
webhook = payment_app.webhooks.first()
webhook.is_active = False
webhook.save()
qs = App.objects.for_event_type(WebhookEventType.PAYMENT_AUTHORIZE)
assert len(qs) == 0
| 32.210526
| 88
| 0.768791
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
f01546244daef76f91454218d243e57cff9b2fef
| 113
|
py
|
Python
|
feast/DetectionModules/__init__.py
|
ChandlerKemp/FEAST_PtE
|
9551824932379149dd6bc9135cfac6edf60c40c8
|
[
"MIT"
] | 3
|
2020-04-21T18:59:01.000Z
|
2021-01-14T22:56:17.000Z
|
feast/DetectionModules/__init__.py
|
ChandlerKemp/FEAST_PtE
|
9551824932379149dd6bc9135cfac6edf60c40c8
|
[
"MIT"
] | null | null | null |
feast/DetectionModules/__init__.py
|
ChandlerKemp/FEAST_PtE
|
9551824932379149dd6bc9135cfac6edf60c40c8
|
[
"MIT"
] | null | null | null |
from . import null
from . import abstract_detection_method
from . import tech_detect
from . import tiered_detect
| 22.6
| 39
| 0.823009
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
f015bf5e2e71b04cd941a3ba7f14c687b44c2b00
| 263
|
py
|
Python
|
apps/transactions/__init__.py
|
lsdlab/djshop_toturial
|
6d450225cc05e6a1ecd161de2b522e1af0b68cc0
|
[
"MIT"
] | null | null | null |
apps/transactions/__init__.py
|
lsdlab/djshop_toturial
|
6d450225cc05e6a1ecd161de2b522e1af0b68cc0
|
[
"MIT"
] | 6
|
2020-06-07T15:18:58.000Z
|
2021-09-22T19:07:33.000Z
|
apps/transactions/__init__.py
|
lsdlab/djshop_toturial
|
6d450225cc05e6a1ecd161de2b522e1af0b68cc0
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class TransactionsConfig(AppConfig):
name = 'apps.transactions'
verbose_name = "Transactions"
def ready(self):
import apps.transactions.signals
default_app_config = 'apps.transactions.TransactionsConfig'
| 20.230769
| 59
| 0.752852
| 164
| 0.623574
| 0
| 0
| 0
| 0
| 0
| 0
| 71
| 0.269962
|
f01636a07a87cf93e98d3a0d5e5e79dd6e4913ce
| 1,260
|
py
|
Python
|
8/code.py
|
DeclanOGorman/AdventofCode2021
|
71a25327d5ab1f88124d09ec8ef853610cbff8ef
|
[
"MIT"
] | null | null | null |
8/code.py
|
DeclanOGorman/AdventofCode2021
|
71a25327d5ab1f88124d09ec8ef853610cbff8ef
|
[
"MIT"
] | null | null | null |
8/code.py
|
DeclanOGorman/AdventofCode2021
|
71a25327d5ab1f88124d09ec8ef853610cbff8ef
|
[
"MIT"
] | null | null | null |
with open('./8/input_a.txt', 'r') as f:
input = [[a.strip().split(' | ')[0].split(' '), a.strip().split(' | ')[1].split(' ')] for a in f]
num = sum([sum([1 if len(a) in {2,3,4,7} else 0 for a in o[1]]) for o in input ])
print(f'Part A: Number of 1,4,7 or 8s in output - {num}')
def getoutput(i):
nums = ['0','1','2','3','4','5','6','7','8','9']
nums[1] = [a for a in i[0] if len(a) == 2][0]
nums[4] = [a for a in i[0] if len(a) == 4][0]
nums[7] = [a for a in i[0] if len(a) == 3][0]
nums[8] = [a for a in i[0] if len(a) == 7][0]
nums[9] = [a for a in i[0] if len(a) == 6 and set(nums[4]).issubset(set(a))][0]
nums[0] = [a for a in i[0] if len(a) == 6 and set(nums[1]).issubset(set(a)) and a not in nums][0]
nums[6] = [a for a in i[0] if len(a) == 6 and a not in nums][0]
nums[3] = [a for a in i[0] if len(a) == 5 and set(nums[1]).issubset(set(a))][0]
nums[5] = [a for a in i[0] if len(a) == 5 and (set(nums[4]) - set(nums[1])).issubset(set(a)) and a not in nums][0]
nums[2] = [a for a in i[0] if len(a) == 5 and a not in nums][0]
return int(''.join([str(nums.index([n for n in nums if set(n) == set(a)][0])) for a in i[1]]))
print(f'Part B: total output sum value - {sum([getoutput(a) for a in input])}')
| 57.272727
| 118
| 0.52381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 190
| 0.150794
|
f0172d0fc69d85a2da2f03f4a401ed701e820bb2
| 6,144
|
py
|
Python
|
pythonium/orders/galaxy.py
|
cacrespo/pythonium
|
74cc5d4333212adfb6eedade8fcd8dfe86d221d5
|
[
"MIT"
] | null | null | null |
pythonium/orders/galaxy.py
|
cacrespo/pythonium
|
74cc5d4333212adfb6eedade8fcd8dfe86d221d5
|
[
"MIT"
] | null | null | null |
pythonium/orders/galaxy.py
|
cacrespo/pythonium
|
74cc5d4333212adfb6eedade8fcd8dfe86d221d5
|
[
"MIT"
] | null | null | null |
import logging
from itertools import groupby
import attr
import numpy as np
from ..explosion import Explosion
from .core import GalaxyOrder
logger = logging.getLogger("game")
@attr.s()
class ProduceResources(GalaxyOrder):
name = "produce_resources"
def execute(self) -> None:
for planet in self.galaxy.get_ocuped_planets():
self._produce_resources(planet)
def _produce_resources(self, planet):
dhappypoints = planet.dhappypoints
if dhappypoints:
planet.happypoints += dhappypoints
logger.info(
"Happypoints change",
extra={
"turn": self.galaxy.turn,
"player": planet.player,
"planet": planet.id,
"dhappypoints": dhappypoints,
"happypoints": planet.happypoints,
},
)
dmegacredits = planet.dmegacredits
if dmegacredits:
planet.megacredits += dmegacredits
logger.info(
"Megacredits change",
extra={
"turn": self.galaxy.turn,
"player": planet.player,
"planet": planet.id,
"dmegacredits": dmegacredits,
"megacredits": planet.megacredits,
},
)
dpythonium = planet.dpythonium
if dpythonium:
planet.pythonium += dpythonium
logger.info(
"Pythonium change",
extra={
"turn": self.galaxy.turn,
"player": planet.player,
"planet": planet.id,
"dpythonium": dpythonium,
"pythonium": planet.pythonium,
},
)
dclans = planet.dclans
if dclans:
planet.clans += dclans
logger.info(
"Population change",
extra={
"turn": self.galaxy.turn,
"player": planet.player,
"planet": planet.id,
"dclans": dclans,
"clans": planet.clans,
},
)
@attr.s()
class ResolveShipsConflicts(GalaxyOrder):
name = "resolve_ships_conflicts"
tenacity: float = attr.ib()
def execute(self) -> None:
ships_in_conflict = self.galaxy.get_ships_conflicts()
for ships in ships_in_conflict:
self._resolve_ships_conflicts(ships)
self.galaxy.remove_destroyed_ships()
def _compute_winner(self, ships, total_attack):
"""
Due to the randomness of the fighting process, this method is not tested
"""
groups = groupby(ships, lambda s: s.player)
max_score = 0
winner = None
for player, player_ships in groups:
player_attack = sum((s.attack for s in player_ships))
attack_fraction = player_attack / total_attack
# Compute score probability distribution
shape = 100 * attack_fraction
score = np.random.normal(shape, self.tenacity)
logger.info(
"Score in conflict",
extra={
"turn": self.galaxy.turn,
"player": player,
"player_attack": player_attack,
"attack_fraction": attack_fraction,
"score": score,
},
)
if score > max_score:
winner = player
max_score = score
logger.info(
"Conflict resolved",
extra={
"turn": self.galaxy.turn,
"winner": winner,
"max_score": max_score,
"total_attack": total_attack,
"total_ships": len(ships),
},
)
return winner
def _resolve_ships_conflicts(self, ships):
total_attack = sum(s.attack for s in ships)
winner = self._compute_winner(ships, total_attack)
# Destroy defeated ships
for ship in ships:
if ship.player == winner:
continue
logger.info(
"Explosion",
extra={
"turn": self.galaxy.turn,
"player": ship.player,
"ship": ship.id,
"ship_type": ship.type.name,
"position": ship.position,
},
)
self.galaxy.explosions.append(
Explosion(
ship=ship,
ships_involved=len(ships),
total_attack=total_attack,
)
)
@attr.s()
class ResolvePlanetsConflicts(GalaxyOrder):
name = "resolve_planets_conflicts"
def execute(self) -> None:
planets_in_conflict = self.galaxy.get_planets_conflicts()
for planet, ships in planets_in_conflict:
if not sum((s.attack for s in ships)):
continue
self._resolve_planets_conflicts(planet, ships)
def _resolve_planets_conflicts(self, planet, ships):
enemies = {s.player for s in ships if s.player != planet.player}
if not enemies:
raise ValueError(
"Ok, I don't know what's going on. This is not a conflict."
)
if len(enemies) != 1:
raise ValueError(
"Run :meth:`resolve_ships_to_ship_conflict` first"
)
winner = enemies.pop()
# If is not of his own, the winner conquer the planet.
if planet.player != winner:
logger.info(
"Planet conquered by force",
extra={
"turn": self.galaxy.turn,
"player": winner,
"planet": planet.id,
"clans": planet.clans,
},
)
planet.player = winner
planet.clans = 1
planet.mines = 0
planet.taxes = 0
| 30.415842
| 80
| 0.495605
| 5,927
| 0.964681
| 0
| 0
| 5,957
| 0.969564
| 0
| 0
| 908
| 0.147786
|
f0178f93e06a5ab22b51ea951cf67bdba0d3c339
| 59
|
py
|
Python
|
pdip/processing/factories/__init__.py
|
ahmetcagriakca/pdip
|
c4c16d5666a740154cabdc6762cd44d98b7bdde8
|
[
"MIT"
] | 2
|
2021-12-09T21:07:46.000Z
|
2021-12-11T22:18:01.000Z
|
pdip/processing/factories/__init__.py
|
PythonDataIntegrator/pdip
|
c4c16d5666a740154cabdc6762cd44d98b7bdde8
|
[
"MIT"
] | null | null | null |
pdip/processing/factories/__init__.py
|
PythonDataIntegrator/pdip
|
c4c16d5666a740154cabdc6762cd44d98b7bdde8
|
[
"MIT"
] | 3
|
2021-11-15T00:47:00.000Z
|
2021-12-17T11:35:45.000Z
|
from .process_manager_factory import ProcessManagerFactory
| 29.5
| 58
| 0.915254
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
f01853fdef99763aa76db241019fe3f05895618d
| 4,221
|
py
|
Python
|
assets/src/ba_data/python/ba/_analytics.py
|
SahandAslani/ballistica
|
7e3814cd2a1920ea8f5820cb1cdbb4dc5420d30e
|
[
"MIT"
] | 2
|
2020-07-02T22:18:58.000Z
|
2020-07-02T22:19:49.000Z
|
assets/src/ba_data/python/ba/_analytics.py
|
MalTarDesigns/ballistica
|
c38ae5c39b3cc7985be166a959245ca060d3bf31
|
[
"MIT"
] | null | null | null |
assets/src/ba_data/python/ba/_analytics.py
|
MalTarDesigns/ballistica
|
c38ae5c39b3cc7985be166a959245ca060d3bf31
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2011-2020 Eric Froemling
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
"""Functionality related to analytics."""
from __future__ import annotations
from typing import TYPE_CHECKING
import _ba
if TYPE_CHECKING:
pass
def game_begin_analytics() -> None:
"""Update analytics events for the start of a game."""
# pylint: disable=too-many-branches
# pylint: disable=cyclic-import
from ba._dualteamsession import DualTeamSession
from ba._freeforallsession import FreeForAllSession
from ba._coopsession import CoopSession
from ba._gameactivity import GameActivity
activity = _ba.getactivity(False)
session = _ba.getsession(False)
# Fail gracefully if we didn't cleanly get a session and game activity.
if not activity or not session or not isinstance(activity, GameActivity):
return
if isinstance(session, CoopSession):
campaign = session.campaign
assert campaign is not None
_ba.set_analytics_screen(
'Coop Game: ' + campaign.name + ' ' +
campaign.getlevel(_ba.app.coop_session_args['level']).name)
_ba.increment_analytics_count('Co-op round start')
if len(activity.players) == 1:
_ba.increment_analytics_count('Co-op round start 1 human player')
elif len(activity.players) == 2:
_ba.increment_analytics_count('Co-op round start 2 human players')
elif len(activity.players) == 3:
_ba.increment_analytics_count('Co-op round start 3 human players')
elif len(activity.players) >= 4:
_ba.increment_analytics_count('Co-op round start 4+ human players')
elif isinstance(session, DualTeamSession):
_ba.set_analytics_screen('Teams Game: ' + activity.getname())
_ba.increment_analytics_count('Teams round start')
if len(activity.players) == 1:
_ba.increment_analytics_count('Teams round start 1 human player')
elif 1 < len(activity.players) < 8:
_ba.increment_analytics_count('Teams round start ' +
str(len(activity.players)) +
' human players')
elif len(activity.players) >= 8:
_ba.increment_analytics_count('Teams round start 8+ human players')
elif isinstance(session, FreeForAllSession):
_ba.set_analytics_screen('FreeForAll Game: ' + activity.getname())
_ba.increment_analytics_count('Free-for-all round start')
if len(activity.players) == 1:
_ba.increment_analytics_count(
'Free-for-all round start 1 human player')
elif 1 < len(activity.players) < 8:
_ba.increment_analytics_count('Free-for-all round start ' +
str(len(activity.players)) +
' human players')
elif len(activity.players) >= 8:
_ba.increment_analytics_count(
'Free-for-all round start 8+ human players')
# For some analytics tracking on the c layer.
_ba.reset_game_activity_tracking()
| 45.880435
| 79
| 0.664298
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,928
| 0.456764
|
f01944d27e76d31f7d24bb6d6aee8d5e5c5f6995
| 10,940
|
py
|
Python
|
todo_app/display.py
|
WeaverDyl/python-todo
|
80c533b79c6170ba9ba4923ba78f4900fece8339
|
[
"MIT"
] | 3
|
2020-01-16T09:39:11.000Z
|
2021-11-15T08:38:52.000Z
|
todo_app/display.py
|
WeaverDyl/python-todo
|
80c533b79c6170ba9ba4923ba78f4900fece8339
|
[
"MIT"
] | null | null | null |
todo_app/display.py
|
WeaverDyl/python-todo
|
80c533b79c6170ba9ba4923ba78f4900fece8339
|
[
"MIT"
] | null | null | null |
import os
import math
import shutil
import textwrap
from datetime import datetime
from terminaltables import AsciiTable
class Display:
def __init__(self):
self.colors = {
'RED': '\033[38;5;196m',
'ORANGE': '\033[38;5;220m',
'GREEN': '\033[38;5;46m',
'BOLD': '\u001b[1m',
'UNDERLINE': '\u001b[4m',
'RESET': '\033[0m'
}
# Defines where to insert newlines in case of
# situations where one task has some long columns
self.max_col_widths = {
'ID': 4,
'Added': 10,
'Title': 30,
'Description': 30,
'Due': 10,
'Finished': 1
}
def color_message(self, message, *args):
""" Sets a message to be a specific color from the colors dict before resetting """
args_list = [str(color) for color in args]
colors = ''.join([self.colors[i] for i in args_list])
return ''.join([colors, message, self.colors['RESET']])
def print_error(self, message):
""" Prints a message in bold, red characters """
print(self.color_message(message, 'BOLD', 'RED'))
def print_success(self, message):
""" Prints a message in bold, green characters """
print(self.color_message(message, 'BOLD', 'GREEN'))
def print_message(self, message):
""" Prints a message in bold characters """
print(self.color_message(message, 'BOLD'))
@staticmethod
def clear_terminal():
""" Clears a terminal to prepare for output """
os.system('cls' if os.name == 'nt' else 'clear')
def print_welcome(self):
""" Prints a simple welcome message. """
Display.clear_terminal()
self.print_message('Welcome to python-todo!\n')
def print_commands(self):
""" Prints a list of available commands to run the program with.
Shown when the user has an empty task list """
commands = [[self.color_message(i, 'BOLD') for i in ['Commands', 'Description']],
['-a/--add', 'Add a new element to a task list'],
['-r/--remove', 'Remove an element from a task list'],
['-f/--finish', 'Finish a task in a task list'],
['-u/--unfinish', 'Unfinish a task in a task list'],
['-c/--change', 'Change parts of an existing task'],
['-v/--view', 'View the whole task list']]
table_data = commands
table = AsciiTable(table_data)
table.inner_row_border = True
if not self.check_table_fit(table):
self.print_message('Try adding a task to your list! just call `python-todo -a`')
else:
self.print_message('Try adding a task to your list! Here\'s the available commands:')
print(table.table)
@staticmethod
def check_table_fit(table):
""" Returns true if a terminaltable will fit within the width of
the current terminal width"""
term_width = shutil.get_terminal_size().columns
table_width = table.table_width
if table_width > term_width:
return False
return True
def format_row(self, tasks):
""" Performs formatting tasks such as changing task completions from (0,1) to (X/✓) """
formatted_tasks = []
for task in tasks:
# Format specific columns
title = task['Title']
description = task['Description']
timestamp = task['Added']
finished = task['Finished?']
due = task['Due']
formatted_timestamp = self.format_time(timestamp)
formatted_finished = self.color_message('✓', 'GREEN', 'BOLD') if finished == 1 else self.color_message('X', 'BOLD', 'RED')
formatted_due = self.format_due_date(due, finished)
# Wrap long lines in the title or description
formatted_title = self.format_long_lines(title, 'Title')
formatted_description = self.format_long_lines(description, 'Description')
task['Title'] = formatted_title
task['Description'] = formatted_description
task['Added'] = formatted_timestamp
task['Finished?'] = formatted_finished
task['Due'] = formatted_due
formatted_tasks.append(task)
return formatted_tasks
@staticmethod
def format_time(timestamp):
""" Returns a nice timestamp telling the user how old a task is.
Returns strings such as '1d ago' """
timestamp_datetime = datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S')
curr_time = datetime.now()
total_time_diff = curr_time - timestamp_datetime
# Time Constants
SECONDS_IN_MIN = 60
SECONDS_IN_HOUR = 3600
SECONDS_IN_DAY = 86400
SECONDS_IN_WEEK = 604800
SECONDS_IN_MONTH = 2592000
SECONDS_IN_YEAR = 31536000
# Print out formatted time difference
if total_time_diff.total_seconds() < 10:
return f'just now'
if total_time_diff.total_seconds() < SECONDS_IN_MIN:
seconds_passed = math.floor(total_time_diff.total_seconds())
return f'{seconds_passed}s ago'
if total_time_diff.total_seconds() < SECONDS_IN_HOUR:
minutes_passed = math.floor(total_time_diff.total_seconds() / SECONDS_IN_MIN)
return f'{minutes_passed}m ago'
if total_time_diff.total_seconds() < SECONDS_IN_DAY:
hours_passed = math.floor(total_time_diff.total_seconds() / SECONDS_IN_HOUR)
return f'{hours_passed}h ago'
if total_time_diff.total_seconds() < SECONDS_IN_WEEK:
days_passed = math.floor(total_time_diff.total_seconds() / SECONDS_IN_DAY)
return f'{days_passed}d ago'
if total_time_diff.total_seconds() < SECONDS_IN_MONTH:
weeks_passed = math.floor(total_time_diff.total_seconds() / SECONDS_IN_WEEK)
return f'{weeks_passed}w ago'
if total_time_diff.total_seconds() < SECONDS_IN_YEAR:
months_passed = math.floor(total_time_diff.total_seconds() / SECONDS_IN_MONTH)
return f'{months_passed}mo ago'
years_passed = math.floor(total_time_diff.total_seconds() / SECONDS_IN_YEAR)
return f'{years_passed}yr ago'
@staticmethod
def validate_date(date_str):
""" Ensures that the date given is in an acceptable format """
for date_format in ('%m/%d/%Y', '%m-%d-%Y'):
try:
if datetime.strptime(date_str, date_format):
return True
except ValueError:
pass
return False
def format_due_date(self, due_date, finished):
""" Formats the due date column to be colored based on how close
the task is to its due date. (Red = overdue, etc...)"""
# Don't format tasks that don't have a due date or are finished
if due_date == '' or finished == 1:
return due_date
curr_time = datetime.now()
try:
due_date_time = datetime.strptime(due_date, '%m-%d-%Y')
except ValueError:
due_date_time = datetime.strptime(due_date, '%m/%d/%Y')
time_until_due = due_date_time - curr_time
SECONDS_IN_DAY = 86400
# Overdue tasks are colored red
if int(time_until_due.total_seconds()) < 0:
return self.color_message(due_date, 'RED', 'BOLD')
# Tasks due in 24 hours or less are colored orange
if int(time_until_due.total_seconds()) < SECONDS_IN_DAY:
return self.color_message(due_date, 'ORANGE', 'BOLD')
return due_date
def format_long_lines(self, long_text, element):
wrapper = textwrap.TextWrapper(width=self.max_col_widths[element])
return '\n'.join(wrapper.wrap(text=long_text))
def print_task_list_formatted(self, rows):
""" Prints each formatted task to the terminal in the form
of a table """
header = [self.color_message(i, 'BOLD') for i in ['ID', 'Added', 'Title', 'Description', 'Due', 'Finished?']]
table_data = [task.values() for task in rows]
table_data.insert(0, header) # The column headers are the first element of the list
table = AsciiTable(table_data) # Create the table -- but test width before printing
table.inner_row_border = True # Separates each task
if not self.check_table_fit(table):
max_width_table = table.table_width
term_width = shutil.get_terminal_size().columns
self.print_message(f'The task list has a width of {max_width_table} and cannot fit in the terminal of width {term_width}.')
return
# The table fits and we can print it
self.print_message('Here are your current tasks:')
print(table.table)
# Methods for ADDING tasks
def ask_user_title(self):
""" Asks the user for the title of the task """
title = ''
while title == '':
title = input(self.color_message('Give your task a name: ', 'BOLD'))
if title == '':
self.print_error('The title can\'t be an empty string!')
return title
def ask_user_description(self):
""" Gets an optional description from the user """
description = input(self.color_message('Optionally, give your task a description: ', 'BOLD'))
return description
def ask_user_due(self):
""" Gets an optional due date for the task from the user """
date = ''
asked = False
while not asked or not self.validate_date(date):
date = input(self.color_message('Optionally, give your task a due date (\'mm/dd/yyyy\' or \'mm-dd-yyyy\'): ', 'BOLD'))
asked = True
if date == '':
return date
if not self.validate_date(date):
self.print_error('That\'s not a valid date format!')
return date
def ask_user_finished(self):
""" Asks a user if a task is finished """
valid_responses = {
'yes': True,
'y': True,
'no': False,
'n': False
}
default_resp = False
while True:
user_resp = input(self.color_message('Is the task already finished? (y/N): ', 'BOLD')).lower()
if user_resp in valid_responses:
return valid_responses[user_resp]
if user_resp == '':
return default_resp
self.print_error('That\'s not a valid answer! Answer (y/N).')
def ask_user_id(self, action):
""" Ask the user for a task ID to remove/finish/unfinish/update """
row_id = input(self.color_message(f'What task would you like to {action}? (Enter an ID or `-1` to cancel): ', 'BOLD'))
return row_id
| 39.927007
| 135
| 0.599543
| 10,822
| 0.988852
| 0
| 0
| 2,798
| 0.255665
| 0
| 0
| 3,524
| 0.322003
|
f019487c4d2bfcb30f0598d1b5c51468e7c7807d
| 797
|
py
|
Python
|
linked_list/adding_nodes_value/test.py
|
Shawn-Ng/algorithms-test
|
1ca740d288b9b3fee580f1ac557a1c1b17ea33b1
|
[
"BSD-2-Clause"
] | null | null | null |
linked_list/adding_nodes_value/test.py
|
Shawn-Ng/algorithms-test
|
1ca740d288b9b3fee580f1ac557a1c1b17ea33b1
|
[
"BSD-2-Clause"
] | 1
|
2018-01-12T18:56:58.000Z
|
2018-01-13T01:14:51.000Z
|
linked_list/adding_nodes_value/test.py
|
Shawn-Ng/algorithms
|
1ca740d288b9b3fee580f1ac557a1c1b17ea33b1
|
[
"BSD-2-Clause"
] | null | null | null |
class Node:
def __init__(self, data):
self.data = data
self.next = None
def sumLinkedListNodes(list1, list2):
value1, value2 = "", ""
head1, head2 = list1, list2
while head1:
value1 += str(head1.data)
head1 = head1.next
while head2:
value2 += str(head2.data)
head2 = head2.next
total = str(int(value1) + int(value2))
totalList = list(total)
list3 = {"head": Node(int(totalList[0]))}
current = list3["head"]
for i in range(1, len(totalList)):
current.next = Node(int(totalList[i]))
current = current.next
return list3
list1 = Node(5)
list1.next = Node(6)
list1.next.next = Node(3)
list2 = Node(8)
list2.next = Node(4)
list2.next.next = Node(2)
sumLinkedListNodes(list1, list2)
| 18.97619
| 46
| 0.599749
| 91
| 0.114178
| 0
| 0
| 0
| 0
| 0
| 0
| 16
| 0.020075
|
f0199c2ddd6cf1a82c3279d8fee04fa2d5d2f015
| 3,674
|
py
|
Python
|
env2048.py
|
qhduan/rl-2048
|
9730d366625ac7ffdd8875586ffbb8615468f110
|
[
"MIT"
] | 3
|
2022-02-10T02:19:58.000Z
|
2022-03-06T14:39:20.000Z
|
env2048.py
|
qhduan/rl-2048
|
9730d366625ac7ffdd8875586ffbb8615468f110
|
[
"MIT"
] | null | null | null |
env2048.py
|
qhduan/rl-2048
|
9730d366625ac7ffdd8875586ffbb8615468f110
|
[
"MIT"
] | null | null | null |
import logic
import numpy as np
import gym
ACTION_MAP = {
0: 'up',
1: 'down',
2: 'left',
3: 'right'
}
class Env2048(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self, n=4, max_idle=100, seed=None):
super(Env2048, self).__init__()
self.n = n
self.max_idle = max_idle
self.action_map = ACTION_MAP
# up, down, left, right
self.action_space = gym.spaces.Discrete(4)
self.observation_space = gym.spaces.Box(
low=0, high=255,
shape=(self.n, self.n, 2 ** n), dtype=np.uint8)
self.eye = np.eye(2 ** n)
self.reward_range = (float('-inf'), float('inf'))
if seed is not None:
self.seed(seed)
def seed(self, seed):
np.random.seed(seed)
def reset(self):
self.matrix = logic.new_game(self.n)
self.reward_i = self.i = 0
self.total_reward = 0
return self.obs
@property
def obs(self):
m = np.array(self.matrix)
m = np.clip(m, 1, float('inf')) # from 0, 2, 4, 8, ... to 1, 2, 4, 8
m = np.log2(m).astype(np.int64) # from 1, 2, 4, 8,..., 2048 to 0, 1, 2, 3, ..., 11
m = self.eye[m]
m = m * 255
m = m.astype(np.uint8)
obs = m
return obs
def step(self, action):
if isinstance(action, str) and action in ('up', 'down', 'left', 'right'):
pass
if isinstance(action, (int, np.int64, np.int32)):
action = self.action_map[int(action)]
else:
print(action, type(action))
raise
old_score = np.sort(np.array(self.matrix).flatten())[::-1]
old_matrix = str(self.matrix)
# import pdb; pdb.set_trace()
if action == 'up':
self.matrix, updated = logic.up(self.matrix)
elif action == 'down':
self.matrix, updated = logic.down(self.matrix)
elif action == 'left':
self.matrix, updated = logic.left(self.matrix)
elif action == 'right':
self.matrix, updated = logic.right(self.matrix)
new_matrix = str(self.matrix)
new_score = np.sort(np.array(self.matrix).flatten())[::-1]
reward = np.sum((new_score - old_score) * (new_score >= old_score)) * 4
reward = float(reward)
self.total_reward += reward
self.i += 1
if updated: # matrix有更新
self.matrix = logic.add_two(self.matrix)
if logic.game_state(self.matrix) == 'win':
print('you win')
return self.obs, 10000.0, True, {'i': self.i, 'ri': self.reward_i, 'tr': self.total_reward}
elif logic.game_state(self.matrix) == 'lose':
return self.obs, 100.0, True, {'i': self.i, 'ri': self.reward_i, 'tr': self.total_reward}
idle = False
if old_matrix == new_matrix:
idle = True
if idle:
reward = -1
else:
self.reward_i = self.i
if self.i - self.reward_i > self.max_idle:
return self.obs, -100, True, {'i': self.i, 'ri': self.reward_i, 'tr': self.total_reward}
return self.obs, reward, False, {'i': self.i, 'ri': self.reward_i, 'tr': self.total_reward}
def render(self, mode='human'):
pass
def close(self):
pass
def main():
env = Env2048()
obs = env.reset()
print(obs)
for _ in range(1000):
obs, reward, done, info = env.step(np.random.choice(['right', 'left', 'up', 'down']))
print(obs)
print(reward, done, info)
if done:
break
if __name__ == '__main__':
main()
| 29.15873
| 107
| 0.531301
| 3,238
| 0.879891
| 0
| 0
| 342
| 0.092935
| 0
| 0
| 365
| 0.099185
|
f019f56e66a32402b7c9862f91bbe2284661cc13
| 1,697
|
py
|
Python
|
users/views.py
|
Paulwamaria/instagram
|
546c5472bbebd868e647fd600519a91ccfc47054
|
[
"MIT"
] | null | null | null |
users/views.py
|
Paulwamaria/instagram
|
546c5472bbebd868e647fd600519a91ccfc47054
|
[
"MIT"
] | 4
|
2020-06-05T23:46:45.000Z
|
2021-06-10T19:06:27.000Z
|
users/views.py
|
Paulwamaria/instagram
|
546c5472bbebd868e647fd600519a91ccfc47054
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render,redirect
from django.contrib.auth.decorators import login_required
from .forms import InstaRegistrationForm, UserUpdateForm, ProfileUpdateForm
from django.views.generic import DetailView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib import messages
from .models import Profile
def register(request):
if request.method == 'POST':
form = InstaRegistrationForm(request.POST)
if form.is_valid():
form.save()
email = form.cleaned_data['email']
fullName = form.cleaned_data['fullName']
username = form.cleaned_data.get('username')
password1 = form.cleaned_data['password1']
password2 = form.cleaned_data['password2']
messages.success(request, f'Account created for {username}')
return redirect('home')
else:
form = InstaRegistrationForm()
return render(request, 'users/register.html', {'form': form})
@login_required
def profile(request):
if request.method == 'POST':
u_form = UserUpdateForm(request.POST,instance = request.user)
p_form = ProfileUpdateForm(request.POST,request.FILES,instance = request.user.profile)
if u_form.is_valid and p_form.is_valid():
u_form.save()
p_form.save()
else:
u_form = UserUpdateForm(instance = request.user)
p_form = ProfileUpdateForm(instance = request.user.profile)
context={
'u_form':u_form,
'p_form':p_form
}
return render(request, 'users/profile.html',context)
class ProfileDetailView(LoginRequiredMixin, DetailView):
model = Profile
| 33.27451
| 94
| 0.675899
| 76
| 0.044785
| 0
| 0
| 618
| 0.364172
| 0
| 0
| 163
| 0.096052
|
f01a75f5202b2a67529c1984f10926191041214e
| 9,865
|
py
|
Python
|
1D_CNN.py
|
alex386/EEGPatternRecognition
|
d84085880baa9172a7cfd73b2737b93472394f3e
|
[
"MIT"
] | null | null | null |
1D_CNN.py
|
alex386/EEGPatternRecognition
|
d84085880baa9172a7cfd73b2737b93472394f3e
|
[
"MIT"
] | null | null | null |
1D_CNN.py
|
alex386/EEGPatternRecognition
|
d84085880baa9172a7cfd73b2737b93472394f3e
|
[
"MIT"
] | 1
|
2019-02-25T18:24:37.000Z
|
2019-02-25T18:24:37.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 13 12:55:47 2018
@name: CSVMachLearn.py
@description: 1D CNN using CSV vector for machine learning
@author: Aleksander Dawid
"""
from __future__ import absolute_import, division, print_function
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from sklearn.decomposition import PCA
import numpy as np
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from tensorflow import set_random_seed
tf.enable_eager_execution()
set_random_seed(0)
nrds='S0'
#==============================================================================
# Global parameters
#==============================================================================
total_dataset_fp="D:\\AI_experiments\\CSV\\"+nrds+"\\DAT"+nrds+".csv"
pathlog="D:\\AI_experiments\\CSV\\"+nrds+"\\"+nrds+"pub.log"
pathimg="D:\\AI_experiments\\CSV\\"+nrds+"\\IMG"
num_epochs = 1001 # number of epochs
lrate=2e-5 # learning rate
test_procent=0.2 # procentage of test_dataset
learn_batch_size=32 # batch size
print("Local copy of the dataset file: {}".format(total_dataset_fp))
print("TensorFlow version: {}".format(tf.VERSION))
print("Eager execution: {}".format(tf.executing_eagerly()))
#==============================================================================
# Methods
#==============================================================================
def ChangeBatchSize(dataset,bsize):
dataset=dataset.apply(tf.data.experimental.unbatch())
dataset=dataset.batch(batch_size=bsize)
return dataset
def pack_features_vector(features, labels):
"""Pack the features into a single array."""
features = tf.stack(list(features.values()), axis=1)
return features, labels
with open(total_dataset_fp) as f:
content = f.readlines()
grup=content[0].split(',')
print(grup[1])
f_size=int(grup[1])-1 #number of points in data vector
print("Vector size: "+str(f_size))
filtr1=32
filtr_size1=5
filtr2=32
filtr_size2=5
filtr3=64
filtr_size3=5
filtr4=64
filtr_size4=4
DenseLast=4096
filtr5=512
filtr_size5=5
def create_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Reshape((f_size,1), input_shape=(None,f_size),name='x'),
tf.keras.layers.Conv1D(filters=filtr1,kernel_size=filtr_size1,strides=1, kernel_initializer='random_uniform',activation=tf.nn.relu,padding='same',name='Conv1'),
tf.keras.layers.MaxPooling1D(pool_size=filtr_size1, strides=2, padding='same', name='pool1'),
tf.keras.layers.Conv1D(filters=filtr2,kernel_size=filtr_size2,strides=1, padding='same',name='Conv2',activation=tf.nn.relu, kernel_initializer='random_uniform'),
tf.keras.layers.MaxPooling1D(pool_size=filtr_size2, strides=2, padding='same', name='pool2'),
tf.keras.layers.Conv1D(filters=filtr3,kernel_size=filtr_size3,strides=1, padding='same',name='Conv3',activation=tf.nn.relu, kernel_initializer='random_uniform'),
tf.keras.layers.MaxPooling1D(pool_size=filtr_size3, strides=2, padding='same', name='pool3'),
tf.keras.layers.Conv1D(filters=filtr4,kernel_size=filtr_size4,strides=1, padding='same',name='Conv4',activation=tf.nn.relu, kernel_initializer='random_uniform'),
tf.keras.layers.MaxPooling1D(pool_size=filtr_size4, strides=2, padding='same', name='pool4'),
tf.keras.layers.GlobalMaxPool1D(), #size of last filter
tf.keras.layers.Dense(DenseLast, activation=tf.nn.relu,name='fir'), # input shape required
tf.keras.layers.Dense(256, activation=tf.nn.relu,name='mod_up'),
tf.keras.layers.Dense(3,name='y_pred'), #output layer
])
model.compile(optimizer=tf.train.AdamOptimizer(),
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=['accuracy'])
return model
def loss(model, x, y):
y_ = model(x)
#print(y)
#print(y_)
return tf.losses.sparse_softmax_cross_entropy(labels=y, logits=y_)
def grad(model, inputs, targets):
with tf.GradientTape() as tape:
loss_value = loss(model, inputs, targets)
#print(loss_value)
return loss_value, tape.gradient(loss_value, model.trainable_variables)
mapcolor=['red','green','blue']
# column order in CSV file
column_names = []
for a in range(0,f_size):
column_names.append(str(a))
column_names.append('signal')
print(len(column_names))
feature_names = column_names[:-1]
label_name = column_names[-1]
#class_names = ['Left','Right','NONE']
class_names = ['LIP','JAW','NONE']
batch_size = 200000
#train_dataset = tf.data.experimental.make_csv_dataset(
# total_dataset_fp,
# batch_size,
# column_names=column_names,
# label_name=label_name,
# num_epochs=1,
# shuffle=False)
#train_dataset = train_dataset.map(pack_features_vector)
total_dataset = tf.data.experimental.make_csv_dataset(
total_dataset_fp,
batch_size,
column_names=column_names,
label_name=label_name,
num_epochs=1,
shuffle=True)
features, labels = next(iter(total_dataset))
setsize=float(str(labels.shape[0]))
ts_size=setsize*test_procent
tr_size=setsize-ts_size
print("Total_CSV_size: "+str(setsize) )
print("Train_size: "+str(tr_size) )
print("Test_size: "+str(ts_size) )
total_dataset = total_dataset.map(pack_features_vector)
total_dataset=ChangeBatchSize(total_dataset,tr_size)
#==============================================================================
#Split dataset into train_dataset and test_dataset.
#==============================================================================
i=0
for (parts, labels) in total_dataset:
if(i==0):
k1 = parts
l1 = labels
else:
k2 = parts
l2 = labels
i=i+1
train_dataset = tf.data.Dataset.from_tensors((k1, l1))
train_dataset = ChangeBatchSize(train_dataset,learn_batch_size)
test_dataset = tf.data.Dataset.from_tensors((k2, l2))
test_dataset = ChangeBatchSize(test_dataset,ts_size)
#==============================================================================
# Create model object
#==============================================================================
model=create_model()
model.summary()
optimizer = tf.train.AdamOptimizer(learning_rate=lrate)
global_step = tf.train.get_or_create_global_step()
legend_elements = [Line2D([0], [0], marker='o', color='w', label=class_names[0],markerfacecolor='r', markersize=10),
Line2D([0], [0], marker='o', color='w', label=class_names[1],markerfacecolor='g', markersize=10),
Line2D([0], [0], marker='o', color='w', label=class_names[2],markerfacecolor='b', markersize=10)]
# keep results for plotting
train_loss_results = []
train_accuracy_results = []
np.set_printoptions(threshold=np.nan)
#==============================================================================
# Make machine learning process
#==============================================================================
old_loss=1000
for epoch in range(num_epochs):
epoch_loss_avg = tfe.metrics.Mean()
epoch_accuracy = tfe.metrics.Accuracy()
# Training loop - using batches of 32
for x, y in train_dataset:
# Optimize the model
#print(str(type(x)))
#print(str(x.shape))
loss_value, grads = grad(model, x, y)
optimizer.apply_gradients(zip(grads, model.variables),
global_step)
# Track progress
epoch_loss_avg(loss_value) # add current batch loss
# compare predicted label to actual label
epoch_accuracy(tf.argmax(model(x), axis=1, output_type=tf.int32), y)
# end epoch
train_loss_results.append(epoch_loss_avg.result())
train_accuracy_results.append(epoch_accuracy.result())
if epoch % 5 == 0:
test_accuracy = tfe.metrics.Accuracy()
for (x, y) in test_dataset:
logits = model(x)
prediction = tf.argmax(logits, axis=1, output_type=tf.int32)
test_accuracy(prediction, y)
X=logits.numpy()
Y=y.numpy()
PCA(copy=True, iterated_power='auto', n_components=2, random_state=None, svd_solver='auto', tol=0.0, whiten=False)
X = PCA(n_components=2).fit_transform(X)
arrcolor = []
for cl in Y:
arrcolor.append(mapcolor[cl])
plt.scatter(X[:, 0], X[:, 1], s=40, c=arrcolor)
#plt.show()
imgfile="{:s}\\epoch{:03d}.png".format(pathimg,epoch)
plt.title("{:.3%}".format(test_accuracy.result()))
plt.legend(handles=legend_elements, loc='upper right')
plt.savefig(imgfile)
plt.close()
new_loss=epoch_loss_avg.result()
accur=epoch_accuracy.result()
test_acc=test_accuracy.result()
msg="Epoch {:03d}: Loss: {:.6f}, Accuracy: {:.3%}, Test: {:.3%}".format(epoch,new_loss,accur,test_acc)
msg2 = "{0} {1:.6f} {2:.6f} {3:.6f} \n".format(epoch,accur,test_acc,new_loss)
print(msg)
if new_loss>old_loss:
break
file = open(pathlog,"a");
file.write(msg2)
file.close();
old_loss=epoch_loss_avg.result()
#==============================================================================
# Save trained model to disk
#==============================================================================
model.compile(optimizer=tf.train.AdamOptimizer(),
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=['accuracy'])
filepath="csvsignal.h5"
tf.keras.models.save_model(
model,
filepath,
overwrite=True,
include_optimizer=True
)
print("Model csvsignal.h5 saved to disk")
| 32.557756
| 166
| 0.604055
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,858
| 0.289711
|
f01db4ce612793fa6669b67b17c501ac73c893ec
| 6,037
|
py
|
Python
|
eslearn/machine_learning/classfication/el_classify_sensitive_person_test.py
|
dongmengshi/easylearn
|
df528aaa69c3cf61f5459a04671642eb49421dfb
|
[
"MIT"
] | null | null | null |
eslearn/machine_learning/classfication/el_classify_sensitive_person_test.py
|
dongmengshi/easylearn
|
df528aaa69c3cf61f5459a04671642eb49421dfb
|
[
"MIT"
] | null | null | null |
eslearn/machine_learning/classfication/el_classify_sensitive_person_test.py
|
dongmengshi/easylearn
|
df528aaa69c3cf61f5459a04671642eb49421dfb
|
[
"MIT"
] | 1
|
2021-01-11T08:21:35.000Z
|
2021-01-11T08:21:35.000Z
|
# -*- coding: utf-8 -*-
"""
Created on 2020/03/16
Feature selection: Relief-based feature selection algorithm.
------
@author: LI Chao
"""
import numpy as np
from sklearn import preprocessing
import os
from sklearn.externals import joblib
from el_classify_sensitive_person_train_validation import ClassifyFourKindOfPersonTrain
from eslearn.utils.lc_evaluation_model_performances import eval_performance
class ClassifyFourKindOfPersonTest():
"""
This class is used to testing classification model for 2 kind of sensitive person identification.
Parameters
----------
data_test_file: path str
Path of the dataset
label_test_file: path str
Path of the label
path_out :
Path to save results
is_feature_selection : bool
if perfrome feature selection.
is_showfig_finally: bool
If show figure after all iteration finished.
Returns
-------
Save all classification results and figures to local disk.
"""
def __init__(selftest,
data_test_file=None,
label_test_file=None,
data_train_file=None,
models_path=None,
path_out=None,
is_feature_selection=False,
is_showfig_finally=True):
selftest.data_test_file = data_test_file
selftest.label_test_file = label_test_file
selftest.data_train_file = data_train_file
selftest.path_out = path_out
selftest.models_path = models_path
selftest.is_feature_selection = is_feature_selection
selftest.is_showfig_finally = is_showfig_finally
def main_function(selftest):
"""
"""
print('Training model and testing...\n')
# load data and mask
mask_lassocv = joblib.load(os.path.join(selftest.path_out, 'mask_selected_features_lassocv.pkl'))
model_feature_selection = joblib.load(os.path.join(selftest.models_path, 'model_feature_selection.pkl'))
model_classification = joblib.load(os.path.join(selftest.models_path, 'model_classification.pkl'))
feature_test, selftest.label_test, feature_train = selftest._load_data()
# Age encoding
feature_test[:,2] = ClassifyFourKindOfPersonTrain().age_encodeing(feature_train[:,2], feature_test[:,2])
# Feature selection
if selftest.is_feature_selection:
feature_test = feature_test[:, mask_lassocv != 0]
# Testting
selftest.prediction, selftest.decision = selftest.testing(model_classification, feature_test)
# Evaluating classification performances
selftest.accuracy, selftest.sensitivity, selftest.specificity, selftest.AUC = eval_performance(selftest.label_test, selftest.prediction, selftest.decision,
accuracy_kfold=None, sensitivity_kfold=None, specificity_kfold=None, AUC_kfold=None,
verbose=1, is_showfig=0)
# Save results and fig to local path
selftest.save_results()
selftest.save_fig()
print("--" * 10 + "Done!" + "--" * 10 )
return selftest
def _load_data(selftest):
"""
Load data
"""
data_test = np.load(selftest.data_test_file)
label_test = np.load(selftest.label_test_file)
data_train = np.load(selftest.data_train_file)
return data_test, label_test, data_train
def testing(selftest, model, test_X):
predict = model.predict(test_X)
decision = model.decision_function(test_X)
return predict, decision
def save_results(selftest):
# Save performances and others
import pandas as pd
performances_to_save = np.array([selftest.accuracy, selftest.sensitivity, selftest.specificity, selftest.AUC]).reshape(1,4)
de_pred_label_to_save = np.vstack([selftest.decision.T, selftest.prediction.T, selftest.label_test.T]).T
performances_to_save = pd.DataFrame(performances_to_save, columns=[['Accuracy','Sensitivity', 'Specificity', 'AUC']])
de_pred_label_to_save = pd.DataFrame(de_pred_label_to_save, columns=[['Decision','Prediction', 'Sorted_Real_Label']])
performances_to_save.to_csv(os.path.join(selftest.path_out, 'test_Performances.txt'), index=False, header=True)
de_pred_label_to_save.to_csv(os.path.join(selftest.path_out, 'test_Decision_prediction_label.txt'), index=False, header=True)
def save_fig(selftest):
# Save ROC and Classification 2D figure
acc, sens, spec, auc = eval_performance(selftest.label_test, selftest.prediction, selftest.decision,
selftest.accuracy, selftest.sensitivity, selftest.specificity, selftest.AUC,
verbose=0, is_showfig=selftest.is_showfig_finally, is_savefig=1,
out_name=os.path.join(selftest.path_out, 'Classification_performances_test.pdf'),
legend1='Healthy', legend2='Unhealthy')
#
if __name__ == '__main__':
# =============================================================================
# All inputs
data_file = r'D:\workstation_b\Fundation\给黎超.xlsx'
path_out = r'D:\workstation_b\Fundation'
models_path = r'D:\workstation_b\Fundation'
# =============================================================================
selftest = ClassifyFourKindOfPersonTest(data_test_file=r'D:\workstation_b\Fundation\feature_test.npy',
label_test_file=r'D:\workstation_b\Fundation\label_test.npy',
data_train_file=r'D:\workstation_b\Fundation\feature_train.npy',
path_out=path_out,
models_path=models_path,
is_feature_selection=1)
selftest.main_function()
| 41.349315
| 164
| 0.630777
| 4,708
| 0.779083
| 0
| 0
| 0
| 0
| 0
| 0
| 1,700
| 0.281317
|
f01e36c7e52b2f29e3153f9812f722135e5763dd
| 2,483
|
py
|
Python
|
Curso em Video/D_045.py
|
tonmarcondes/UNIVESP
|
a66a623d4811e8f3f9e2999f09e38a4470035ae2
|
[
"MIT"
] | null | null | null |
Curso em Video/D_045.py
|
tonmarcondes/UNIVESP
|
a66a623d4811e8f3f9e2999f09e38a4470035ae2
|
[
"MIT"
] | null | null | null |
Curso em Video/D_045.py
|
tonmarcondes/UNIVESP
|
a66a623d4811e8f3f9e2999f09e38a4470035ae2
|
[
"MIT"
] | null | null | null |
import random
cor = {
'fim':'\033[m',
'amarelo':'\033[1;033m',
'vermelho':'\033[1;031m',
'vermelhof':'\033[7;031m',
'azul':'\033[1;034m',
'verde':'\033[1;32m',
'verdef':'\033[7;32m',
'branco':'\033[1;030m'
}
print('''
Escolha uma das opções abaixo:
\t {}1{} {}PEDRA{}:
\t {}2{} {}PAPEL{}:
\t {}3{} {}TESOURA{}:'''.format(
cor['vermelho'], cor['fim'], cor['azul'], cor['fim'],
cor['vermelho'], cor['fim'], cor['azul'], cor['fim'],
cor['vermelho'], cor['fim'], cor['azul'], cor['fim']
))
eu = int(input('\t '))
if eu == 1:
me = 'PEDRA'
elif eu == 2:
me = 'PAPEL'
else:
me = 'TESOURA'
pc = ['PEDRA', 'PAPEL', 'TESOURA']
random.shuffle(pc)
if eu < 1 or eu > 3:
print('\n\t\t{}ESCOLHA UM VALOR VÁLIDO{}\n'.format(cor['vermelho'], cor['fim']))
elif eu == 1 and pc[0] == 'PEDRA' or eu == 2 and pc[0] == 'PAPEL' or eu == 3 and pc[0] == 'TESOURA':
print('{}EU{}: {}\t\t{}PC{}: {}'.format(cor['vermelho'], cor['fim'], me, cor['vermelho'], cor['fim'], pc[0]))
print('{} EMPATE, JOGUE OUTRA VEZ {}\n'.format(cor['vermelhof'], cor['fim']))
elif eu == 1 and pc[0] == 'PAPEL':
print('{}EU{}: {}\t\t{}PC{}: {}'.format(cor['vermelho'], cor['fim'], me, cor['vermelho'], cor['fim'], pc[0]))
print('PAPEL {}EMBRULHA{} PEDRA\n'.format(cor['amarelo'], cor['fim']))
elif eu == 1 and pc[0] == 'PAPEL':
print('{}EU{}: {}\t\t{}PC{}: {}'.format(cor['vermelho'], cor['fim'], me, cor['vermelho'], cor['fim'], pc[0]))
print('PEDRA {}QUEBRA{} TESOURA\n'.format(cor['amarelo'], cor['fim']))
elif eu == 2 and pc[0] == 'PEDRA':
print('{}EU{}: {}\t\t{}PC{}: {}'.format(cor['vermelho'], cor['fim'], me, cor['vermelho'], cor['fim'], pc[0]))
print('PAPEL {}EMBRULHA{} PEDRA\n'.format(cor['amarelo'], cor['fim']))
elif eu == 2 and pc[0] == 'TESOURA':
print('{}EU{}: {}\t\t{}PC{}: {}'.format(cor['vermelho'], cor['fim'], me, cor['vermelho'], cor['fim'], pc[0]))
print('TESOURA {}CORTA{} PAPEL\n'.format(cor['amarelo'], cor['fim']))
elif eu == 3 and pc[0] == 'PEDRA':
print('{}EU{}: {}\t\t{}PC{}: {}'.format(cor['vermelho'], cor['fim'], me, cor['vermelho'], cor['fim'], pc[0]))
print('PEDRA {}QUEBRA{} TESOURA\n'.format(cor['amarelo'], cor['fim']))
else:
print('{}EU{}: {}\t\t{}PC{}: {}'.format(cor['vermelho'], cor['fim'], me, cor['vermelho'], cor['fim'], pc[0]))
print('TESOURA {}CORTA{} PAPEL\n'.format(cor['amarelo'], cor['fim']))
| 42.084746
| 114
| 0.515103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,200
| 0.482703
|
f01e8e597dc20bba7caf3b9b0fddc57695c216de
| 5,316
|
py
|
Python
|
train.py
|
ThiruRJST/Deformed-Yolo
|
c9eb4e8c090dff0e9fc4f8652897ff2c59dce889
|
[
"MIT"
] | 1
|
2021-09-10T17:20:09.000Z
|
2021-09-10T17:20:09.000Z
|
train.py
|
ThiruRJST/Deformed-Yolo
|
c9eb4e8c090dff0e9fc4f8652897ff2c59dce889
|
[
"MIT"
] | 1
|
2021-09-10T17:19:54.000Z
|
2021-09-11T08:17:14.000Z
|
wandb/run-20210904_163431-3lkn6hoe/files/code/train.py
|
ThiruRJST/Deformed-Yolo
|
c9eb4e8c090dff0e9fc4f8652897ff2c59dce889
|
[
"MIT"
] | null | null | null |
from pandas.core.algorithms import mode
import torch
import torch.nn as nn
from albumentations import Compose,Resize,Normalize
from albumentations.pytorch import ToTensorV2
import wandb
import time
import torchvision
import torch.nn.functional as F
import torch.optim as optim
from torch.cuda.amp import autocast,GradScaler
import os
import numpy as np
from tqdm import tqdm
from callbacks import EarlyStopping
import pandas as pd
from torch.utils.data import Dataset, DataLoader
import cv2
import torch.nn.functional as F
import random
from build_model import Deformed_Darknet53
torch.manual_seed(2021)
np.random.seed(2021)
random.seed(2021)
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
DEVICE = "cuda:0" if torch.cuda.is_available() else "cpu"
TOTAL_EPOCHS = 100
scaler = GradScaler()
early_stop = EarlyStopping()
wandb.init(project='deformed-darknet',entity='tensorthug',name='new-darknet-256x256_32')
print("***** Loading the Model in {} *****".format(DEVICE))
Model = Deformed_Darknet53().to(DEVICE)
print("Model Shipped to {}".format(DEVICE))
data = pd.read_csv("data.csv")
train_loss_fn = nn.BCEWithLogitsLoss()
val_loss_fn = nn.BCEWithLogitsLoss()
optim = torch.optim.Adam(Model.parameters())
wandb.watch(Model)
class dog_cat(Dataset):
def __init__(self,df,mode="train",folds=0,transforms=None):
super(dog_cat,self).__init__()
self.df = df
self.mode = mode
self.folds = folds
self.transforms = transforms
if self.mode == "train":
self.data = self.df[self.df.folds != self.folds].reset_index(drop=True)
else:
self.data = self.df[self.df.folds == self.folds].reset_index(drop=True)
def __len__(self):
return len(self.data)
def __getitem__(self,idx):
img = cv2.imread(self.data.loc[idx,"Paths"])
label = self.data.loc[idx,'Labels']
if self.transforms is not None:
image = self.transforms(image=img)['image']
return image,label
def train_loop(epoch,dataloader,model,loss_fn,optim,device=DEVICE):
model.train()
epoch_loss = 0
epoch_acc = 0
#start_time = time.time()
pbar = tqdm(enumerate(dataloader),total=len(dataloader))
for i,(img,label) in pbar:
optim.zero_grad()
img = img.to(DEVICE).float()
label = label.to(DEVICE).float()
#LOAD_TIME = time.time() - start_time
with autocast():
yhat = model(img)
#Loss Calculation
train_loss = loss_fn(input = yhat.flatten(), target = label)
out = (yhat.flatten().sigmoid() > 0.5).float()
correct = (label == out).float().sum()
scaler.scale(train_loss).backward()
scaler.step(optim)
scaler.update()
epoch_loss += train_loss.item()
epoch_acc += correct.item() / out.shape[0]
train_epoch_loss = epoch_loss / len(dataloader)
train_epoch_acc = epoch_acc / len(dataloader)
wandb.log({"Training_Loss":train_epoch_loss})
wandb.log({"Training_Acc":train_epoch_acc})
#print(f"Epoch:{epoch}/{TOTAL_EPOCHS} Epoch Loss:{epoch_loss / len(dataloader):.4f} Epoch Acc:{epoch_acc / len(dataloader):.4f}")
return train_epoch_loss,train_epoch_acc
def val_loop(epoch,dataloader,model,loss_fn,device = DEVICE):
model.eval()
val_epoch_loss = 0
val_epoch_acc = 0
pbar = tqdm(enumerate(dataloader),total=len(dataloader))
with torch.no_grad():
for i,(img,label) in pbar:
img = img.to(device).float()
label = label.to(device).float()
yhat = model(img)
val_loss = loss_fn(input=yhat.flatten(),target=label)
out = (yhat.flatten().sigmoid()>0.5).float()
correct = (label == out).float().sum()
val_epoch_loss += val_loss.item()
val_epoch_acc += correct.item() / out.shape[0]
val_lossd = val_epoch_loss / len(dataloader)
val_accd = val_epoch_acc / len(dataloader)
wandb.log({"Val_Loss":val_lossd,"Epoch":epoch})
wandb.log({"Val_Acc":val_accd/len(dataloader),"Epoch":epoch})
return val_lossd,val_accd
if __name__ == "__main__":
train_per_epoch_loss,train_per_epoch_acc = [],[]
val_per_epoch_loss,val_per_epoch_acc = [],[]
train = dog_cat(data,transforms=Compose([Resize(256,256),Normalize(),ToTensorV2()]))
val = dog_cat(data,mode='val',transforms=Compose([Resize(256,256),Normalize(),ToTensorV2()]))
train_load = DataLoader(train,batch_size=32,shuffle=True,num_workers=4)
val_load = DataLoader(val,batch_size=32,num_workers=4)
for e in range(TOTAL_EPOCHS):
train_loss,train_acc = train_loop(e,train_load,Model,train_loss_fn,optim)
val_loss,val_acc = val_loop(e,val_load,Model,val_loss_fn)
train_per_epoch_loss.append(train_loss)
train_per_epoch_acc.append(train_acc)
val_per_epoch_loss.append(val_loss)
val_per_epoch_acc.append(val_acc)
print(f"TrainLoss:{train_loss:.4f} TrainAcc:{train_acc:.4f}")
print(f"ValLoss:{val_loss:.4f} ValAcc:{val_acc:.4f}")
early_stop(Model,val_loss)
if early_stop.early_stop:
break
| 29.04918
| 133
| 0.659518
| 799
| 0.150301
| 0
| 0
| 0
| 0
| 0
| 0
| 556
| 0.10459
|
f01e97fde7da87878e9d54736f7cb227db681497
| 257
|
py
|
Python
|
test/test_encoder.py
|
mickey9910326/py-asa-loader
|
75852a4c633f34a67f5de2b2a807d2d40ce423bf
|
[
"MIT"
] | null | null | null |
test/test_encoder.py
|
mickey9910326/py-asa-loader
|
75852a4c633f34a67f5de2b2a807d2d40ce423bf
|
[
"MIT"
] | null | null | null |
test/test_encoder.py
|
mickey9910326/py-asa-loader
|
75852a4c633f34a67f5de2b2a807d2d40ce423bf
|
[
"MIT"
] | null | null | null |
import conftest
from asaprog import pac_encode
from asaprog.util import *
if __name__ == "__main__":
pac = {
'command': asaProgCommand.CHK_DEVICE.value,
'data': b'test'
}
res = pac_encode(pac)
print(res)
print(res[-1])
| 18.357143
| 51
| 0.626459
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 32
| 0.124514
|
f01f136d0d4a9137fd6a7ceea105c26d2d1478ac
| 1,098
|
py
|
Python
|
tests/controllers/controller_with_throttling.py
|
DmitryKhursevich/winter
|
9f3bf462f963059bab1f1bbb309ca57f8a43b46f
|
[
"MIT"
] | 1
|
2020-10-26T09:48:05.000Z
|
2020-10-26T09:48:05.000Z
|
tests/controllers/controller_with_throttling.py
|
mikhaillazko/winter
|
cd4f11aaf28d500aabb59cec369817bfdb5c2fc1
|
[
"MIT"
] | null | null | null |
tests/controllers/controller_with_throttling.py
|
mikhaillazko/winter
|
cd4f11aaf28d500aabb59cec369817bfdb5c2fc1
|
[
"MIT"
] | null | null | null |
from http import HTTPStatus
import winter.web
from winter.web import ExceptionHandler
from winter.web.exceptions import ThrottleException
class CustomThrottleExceptionHandler(ExceptionHandler):
@winter.response_status(HTTPStatus.TOO_MANY_REQUESTS)
def handle(self, exception: ThrottleException) -> str:
return 'custom throttle exception'
@winter.route_get('with-throttling/')
@winter.web.no_authentication
class ControllerWithThrottling:
@winter.route_get()
@winter.web.throttling('5/s')
def simple_method(self) -> int:
return 1
@winter.route_post()
def simple_post_method(self) -> int:
return 1
@winter.route_get('same/')
@winter.web.throttling('5/s')
def same_simple_method(self) -> int:
return 1
@winter.route_get('without-throttling/')
def method_without_throttling(self):
pass
@winter.route_get('custom-handler/')
@winter.web.throttling('5/s')
@winter.throws(ThrottleException, CustomThrottleExceptionHandler)
def simple_method_with_custom_handler(self) -> int:
return 1
| 26.780488
| 69
| 0.721311
| 885
| 0.806011
| 0
| 0
| 893
| 0.813297
| 0
| 0
| 105
| 0.095628
|
f020207356e26d12c8db3a4bedd4f52a81d8f981
| 269
|
py
|
Python
|
appwebshare/files.py
|
cvakiitho/Webshare-download-manager
|
4c79242d6a8562b269ee69a9096b7158e9f6c3c0
|
[
"MIT"
] | 3
|
2015-02-06T11:22:58.000Z
|
2019-08-14T21:25:29.000Z
|
appwebshare/files.py
|
cvakiitho/Webshare-download-manager
|
4c79242d6a8562b269ee69a9096b7158e9f6c3c0
|
[
"MIT"
] | 2
|
2015-02-04T11:45:51.000Z
|
2015-03-04T22:01:11.000Z
|
appwebshare/files.py
|
cvakiitho/Webshare-download-manager
|
4c79242d6a8562b269ee69a9096b7158e9f6c3c0
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF=8 -*-
__author__ = 'Tomas Hartmann'
import glob
from appwebshare.scripts import config
def get_file_list():
without_dir = []
for i in glob.glob(config.DIR + '*.*') :
without_dir.append(i.replace(config.DIR, ""))
return without_dir
| 26.9
| 53
| 0.66171
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 46
| 0.171004
|
f0229b401abe3feee370d9a51bbc8c817449f9e9
| 1,132
|
py
|
Python
|
tests/checkout_four_sdk_test.py
|
riaz-bordie-cko/checkout-sdk-python
|
d9bc073306c1a98544c326be693ed722576ea895
|
[
"MIT"
] | null | null | null |
tests/checkout_four_sdk_test.py
|
riaz-bordie-cko/checkout-sdk-python
|
d9bc073306c1a98544c326be693ed722576ea895
|
[
"MIT"
] | null | null | null |
tests/checkout_four_sdk_test.py
|
riaz-bordie-cko/checkout-sdk-python
|
d9bc073306c1a98544c326be693ed722576ea895
|
[
"MIT"
] | null | null | null |
import pytest
import checkout_sdk
from checkout_sdk.environment import Environment
from checkout_sdk.exception import CheckoutArgumentException
def test_should_create_four_sdk():
checkout_sdk.FourSdk() \
.secret_key('sk_sbox_m73dzbpy7cf3gfd46xr4yj5xo4e') \
.public_key('pk_sbox_pkhpdtvmkgf7hdnpwnbhw7r2uic') \
.environment(Environment.sandbox()) \
.build()
sdk = checkout_sdk.FourSdk() \
.secret_key('sk_m73dzbpy7cf3gfd46xr4yj5xo4e') \
.public_key('pk_pkhpdtvmkgf7hdnpwnbhw7r2uic') \
.environment(Environment.production()) \
.build()
assert sdk is not None
assert sdk.tokens is not None
def test_should_fail_create_four_sdk():
with pytest.raises(CheckoutArgumentException):
checkout_sdk.FourSdk() \
.secret_key('sk_sbox_m73dzbpy7c-f3gfd46xr4yj5xo4e') \
.environment(Environment.sandbox()) \
.build()
with pytest.raises(CheckoutArgumentException):
checkout_sdk.FourSdk() \
.public_key('pk_sbox_pkh') \
.environment(Environment.sandbox()) \
.build()
| 30.594595
| 65
| 0.682862
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 189
| 0.166961
|
f022af95545ca83849a19b9cfbeb75f2ed9c4fd0
| 181
|
py
|
Python
|
transitfeed_web/__init__.py
|
ed-g/transitfeed_web
|
1e9be7152823641c450612b27cace99a1efe0b4f
|
[
"Apache-2.0"
] | null | null | null |
transitfeed_web/__init__.py
|
ed-g/transitfeed_web
|
1e9be7152823641c450612b27cace99a1efe0b4f
|
[
"Apache-2.0"
] | null | null | null |
transitfeed_web/__init__.py
|
ed-g/transitfeed_web
|
1e9be7152823641c450612b27cace99a1efe0b4f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python2
import sys
import transitfeed
import run_transitfeed_web_server
import util
def main():
print ("Hello, world.")
if __name__ == '__main__':
main()
| 12.928571
| 33
| 0.707182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 47
| 0.259669
|
f0238d97d920682e53df77bf6d0427a081fe7819
| 7,980
|
py
|
Python
|
untiler/__init__.py
|
waissbluth/untiler
|
866b3096196ac340597f77fbf5f2ce899e58238e
|
[
"MIT"
] | 37
|
2015-10-06T16:41:18.000Z
|
2022-03-22T14:52:13.000Z
|
untiler/__init__.py
|
waissbluth/untiler
|
866b3096196ac340597f77fbf5f2ce899e58238e
|
[
"MIT"
] | 18
|
2015-09-02T21:13:44.000Z
|
2021-01-04T15:46:04.000Z
|
untiler/__init__.py
|
waissbluth/untiler
|
866b3096196ac340597f77fbf5f2ce899e58238e
|
[
"MIT"
] | 8
|
2017-04-12T01:22:36.000Z
|
2021-08-17T04:10:46.000Z
|
#!/usr/bin/env python
from __future__ import with_statement
from __future__ import print_function
from __future__ import division
import os
from multiprocessing import Pool
import click
import mercantile as merc
import numpy as np
import rasterio
from rasterio import Affine
from rasterio.warp import reproject
try:
from rasterio.warp import RESAMPLING as Resampling # pre-1.0
except ImportError:
from rasterio.warp import Resampling
import untiler.scripts.tile_utils as tile_utils
def make_affine(height, width, ul, lr):
"""
Create an affine for a tile of a given size
"""
xCell = (ul[0] - lr[0]) / width
yCell = (ul[1] - lr[1]) / height
return Affine(-xCell, 0.0, ul[0],
0.0, -yCell, ul[1])
def affaux(up):
return Affine(1, 0, 0, 0, -1, 0), Affine(up, 0, 0, 0, -up, 0)
def upsample(rgb, up, fr, to):
up_rgb = np.empty((rgb.shape[0], rgb.shape[1] * up, rgb.shape[2] * up), dtype=rgb.dtype)
reproject(
rgb, up_rgb,
src_transform=fr,
dst_transform=to,
src_crs="EPSG:3857",
dst_crs="EPSG:3857",
resampling=Resampling.bilinear)
return up_rgb
def make_src_meta(bounds, size, creation_opts={}):
"""
Create metadata for output tiles
"""
ul = merc.xy(bounds.west, bounds.north)
lr = merc.xy(bounds.east, bounds.south)
aff = make_affine(size, size, ul, lr)
## default values
src_meta = {
'driver': 'GTiff',
'height': size,
'width': size,
'count': 4,
'dtype': np.uint8,
'affine': aff,
"crs": 'EPSG:3857',
'compress': 'JPEG',
'tiled': True,
'blockxsize': 256,
'blockysize': 256
}
for c in creation_opts.keys():
src_meta[c] = creation_opts[c]
return src_meta
def make_window(x, y, xmin, ymin, windowsize):
"""
Create a window for writing a child tile to a parent output tif
"""
if x < xmin or y < ymin:
raise ValueError("Indices can't be smaller than origin")
row = (y - ymin) * windowsize
col = (x - xmin) * windowsize
return (
(row, row + windowsize),
(col, col + windowsize)
)
globalArgs = None
def make_image_array(imdata, outputSize):
try:
depth, width, height = imdata.shape
if depth == 4:
alpha = imdata[3]
else:
alpha = np.zeros((outputSize, outputSize), dtype=np.uint8) + 255
return np.array([
imdata[0 % depth, :, :],
imdata[1 % depth, :, :],
imdata[2 % depth, :, :],
alpha
])
except Exception as e:
raise e
def load_image_data(imdata, outputSize):
imsize, depth = imdata.shape
if int(np.sqrt(imsize)) != outputSize:
raise ValueError("Output size of %s ** 2 does not equal %s" % (outputSize, imsize))
return imdata.reshape(outputSize, outputSize, depth).astype(np.uint8), imsize, depth
def global_setup(inputDir, args):
global globalArgs
globalArgs = args
def logwriter(openLogFile, writeObj):
if openLogFile:
print(writeObj, file=openLogFile)
return
def streaming_tile_worker(data):
size = 2 ** (data['zMax'] - globalArgs['compositezoom']) * globalArgs['tileResolution']
out_meta = make_src_meta(merc.bounds(data['x'], data['y'], data['z']), size, globalArgs['creation_opts'])
z, x, y = [int(i) for i in (data['z'], data['x'], data['y'])]
filename = globalArgs['sceneTemplate'] % (z, x, y)
subtiler = tile_utils.TileUtils()
log = 'FILE: %s\n' % filename
try:
with rasterio.open(filename, 'w', **out_meta) as dst:
if data['zMaxCov']:
superTiles = subtiler.get_super_tiles(data['zMaxTiles'], data['zMaxCov'])
fillbaseX, fillbaseY = subtiler.get_sub_base_zoom(data['x'], data['y'], data['z'], data['zMaxCov'])
## fill thresh == the number of sub tiles that would need to occur in a fill tile to not fill (eg completely covered)
fThresh = 4 ** (data['zMax'] - data['zMaxCov'])
fDiff = 2 ** (data['zMax'] - data['zMaxCov'])
toFaux, frFaux = affaux(fDiff)
if not globalArgs['no_fill']:
print('filling')
## Read and write the fill tiles first
for t in subtiler.get_fill_super_tiles(superTiles, data['maxCovTiles'], fThresh):
z, x, y = [int(i) for i in t]
path = globalArgs['readTemplate'] % (z, x, y)
log += '%s %s %s\n' % (z, x, y)
with rasterio.open(path) as src:
imdata = src.read()
imdata = make_image_array(imdata, globalArgs['tileResolution'])
imdata = upsample(imdata, fDiff, frFaux, toFaux)
window = make_window(x, y, fillbaseX, fillbaseY, globalArgs['tileResolution'] * fDiff)
dst.write(imdata, window=window)
baseX, baseY = subtiler.get_sub_base_zoom(data['x'], data['y'], data['z'], data['zMax'])
for t in data['zMaxTiles']:
z, x, y = [int(i) for i in t]
path = globalArgs['readTemplate'] % (z, x, y)
log += '%s %s %s\n' % (z, x, y)
with rasterio.open(path) as src:
imdata = src.read()
imdata = make_image_array(imdata, globalArgs['tileResolution'])
window = make_window(x, y, baseX, baseY, globalArgs['tileResolution'])
dst.write(imdata, window=window)
if globalArgs['logdir']:
with open(os.path.join(globalArgs['logdir'], '%s.log' % os.path.basename(filename)), 'w') as logger:
logwriter(logger, log)
return filename
except Exception as e:
click.echo("%s errored" % (path), err=True)
raise e
def inspect_dir(inputDir, zoom, read_template):
tiler = tile_utils.TileUtils()
allFiles = tiler.search_dir(inputDir)
template, readTemplate, separator = tile_utils.parse_template("%s/%s" % (inputDir, read_template))
allTiles = np.array([i for i in tiler.get_tiles(allFiles, template, separator)])
allTiles, _, _, _, _ = tiler.select_tiles(allTiles, zoom)
for t in allTiles:
z, x, y = t
click.echo([x, y, z])
def stream_dir(inputDir, outputDir, compositezoom, maxzoom, logdir, read_template, scene_template, workers, creation_opts, no_fill, tile_resolution=256):
tiler = tile_utils.TileUtils()
allFiles = tiler.search_dir(inputDir)
template, readTemplate, separator = tile_utils.parse_template("%s/%s" % (inputDir, read_template))
allTiles = np.array([i for i in tiler.get_tiles(allFiles, template, separator)])
if allTiles.shape[0] == 0 or allTiles.shape[1] != 3:
raise ValueError("No tiles were found for that template")
if maxzoom:
allTiles = tiler.filter_tiles(allTiles, maxzoom)
if allTiles.shape[0] == 0:
raise ValueError("No tiles were found below that maxzoom")
_, sceneTemplate, _ = tile_utils.parse_template("%s/%s" % (outputDir, scene_template))
pool = Pool(workers, global_setup, (inputDir, {
'maxzoom': maxzoom,
'readTemplate': readTemplate,
'outputDir': outputDir,
'tileResolution': tile_resolution,
'compositezoom': compositezoom,
'fileTemplate': '%s/%s_%s_%s_%s.tif',
'sceneTemplate': sceneTemplate,
'logdir': logdir,
'creation_opts': creation_opts,
'no_fill': no_fill
}))
superTiles = tiler.get_super_tiles(allTiles, compositezoom)
for p in pool.imap_unordered(streaming_tile_worker, tiler.get_sub_tiles(allTiles, superTiles)):
click.echo(p)
pool.close()
pool.join()
if __name__ == "__main__":
stream_dir()
inspect_dir()
| 30.113208
| 153
| 0.590977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,250
| 0.156642
|
f023dd97d1d559d5d0d17b6855fef5c568625d43
| 236
|
py
|
Python
|
loadenv.py
|
Natsu-dev/otenki
|
d962d44737a68a4751fd58051a670be4ecf852ce
|
[
"MIT"
] | null | null | null |
loadenv.py
|
Natsu-dev/otenki
|
d962d44737a68a4751fd58051a670be4ecf852ce
|
[
"MIT"
] | null | null | null |
loadenv.py
|
Natsu-dev/otenki
|
d962d44737a68a4751fd58051a670be4ecf852ce
|
[
"MIT"
] | null | null | null |
import os
from os.path import join, dirname
from dotenv import load_dotenv
load_dotenv(verbose=True)
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(verbose=True, dotenv_path=dotenv_path)
TOKEN = os.getenv('DISCORD_TOKEN')
| 21.454545
| 50
| 0.792373
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 21
| 0.088983
|
f024f2d1468cd63a89d1e5336dc2508a4542b04f
| 1,476
|
py
|
Python
|
Stack/10-stack-special-design-and-implement.py
|
mahmutcankurt/DataStructures_Python
|
bfb81e3530b535c4e48c07548dc4a4f9a648bab2
|
[
"MIT"
] | 1
|
2022-01-25T22:17:55.000Z
|
2022-01-25T22:17:55.000Z
|
Stack/10-stack-special-design-and-implement.py
|
mahmutcankurt/DataStructures_Python
|
bfb81e3530b535c4e48c07548dc4a4f9a648bab2
|
[
"MIT"
] | null | null | null |
Stack/10-stack-special-design-and-implement.py
|
mahmutcankurt/DataStructures_Python
|
bfb81e3530b535c4e48c07548dc4a4f9a648bab2
|
[
"MIT"
] | null | null | null |
class Stack:
def __init__(self):
self.array = []
self.top = -1
self.max = 100
def isEmpty(self):
if(self.top == -1):
return True
else:
return False
def isFull(self):
if(self.top == self.max -1):
return True
else:
return False
def push(self, data):
if(self.isFull()):
print("Stack Overflow")
return
else:
self.top += 1
self.array.append(data)
def pop(self):
if(self.isEmpty()):
print("Stack Underflow")
return
else:
self.top -= 1
return(self.array.pop())
class SpecialStack(Stack):
def __init__(self):
super().__init__()
self.Min = Stack()
def push(self, x):
if(self.isEmpty):
super().push(x)
self.Min.push(x)
else:
super().push(x)
y = self.Min.pop()
self.Min.push(y)
if(x <= y):
self.Min.push(x)
else:
self.Min.push(y)
def pop(self):
x = super().pop()
self.Min.pop()
return x
def getMin(self):
x = self.Min.pop()
self.Min.push(x)
return x
if __name__ == "__main__":
s = SpecialStack()
s.push(10)
s.push(20)
s.push(30)
print(s.getMin())
s.push(5)
print(s.getMin())
| 20.219178
| 36
| 0.443767
| 1,319
| 0.893631
| 0
| 0
| 0
| 0
| 0
| 0
| 43
| 0.029133
|
f02506946a855a60b83d59b8fe69069f7a64c710
| 1,316
|
py
|
Python
|
fork_process/dataPreprocess/data_extraction_2.py
|
JianboTang/modified_GroundHog
|
cc511a146a51b42fdfb2b2c045205cca6ab306b7
|
[
"BSD-3-Clause"
] | null | null | null |
fork_process/dataPreprocess/data_extraction_2.py
|
JianboTang/modified_GroundHog
|
cc511a146a51b42fdfb2b2c045205cca6ab306b7
|
[
"BSD-3-Clause"
] | null | null | null |
fork_process/dataPreprocess/data_extraction_2.py
|
JianboTang/modified_GroundHog
|
cc511a146a51b42fdfb2b2c045205cca6ab306b7
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy
import pickle
readfile1 = open('intermediate_data/post_1.txt','r');
readfile2 = open('intermediate_data/cmnt_1.txt','r');
writefile = open('intermediate_data/dictionary.pkl','w');
#writefile1 = open('intermediate_data/post_2.txt','w');
#writefile2 = open('intermediate_data/cmnt_2.txt','w');
def staticDict(dictionary,lline):
for i in xrange(len(lline)):
if lline[i] in dictionary:
dictionary[lline[i]] += 1;
else:
dictionary[lline[i]] = 1;
return dictionary
def preprocess(line):
line = line.decode("utf-8");
lline = [x for x in list(line) if x != u' '];
del lline[-1]
return lline
def dictPrint(dictionary):
for x in dictionary:
print x," : ",dictionary[x];
def main(count):
dict1 = {};
dict2 = {};
i = 0;
while i < count:
line1 = readfile1.readline();
line2 = readfile2.readline();
if not line1 or not line2:
print "touch the end of file"
break;
lline1 = preprocess(line1);
lline2 = preprocess(line2);
dict1 = staticDict(dict1,lline1);
dict2 = staticDict(dict2,lline2);
i += 1;
print "print the first dictionary"
dictPrint(dict1);
print "print the second dictionary"
dictPrint(dict2);
pickle.dump(dict1,writefile);
pickle.dump(dict2,writefile);
if __name__ == '__main__':
main(1000000);
| 25.803922
| 57
| 0.660334
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 319
| 0.242401
|
f027e6207f84d89378cfacc9c580753614b7155a
| 4,245
|
py
|
Python
|
visualization.py
|
Tommy-Johannessen/MovementRecognition
|
be84d7d014a272987dd20d03194336a9244eb900
|
[
"MIT"
] | null | null | null |
visualization.py
|
Tommy-Johannessen/MovementRecognition
|
be84d7d014a272987dd20d03194336a9244eb900
|
[
"MIT"
] | null | null | null |
visualization.py
|
Tommy-Johannessen/MovementRecognition
|
be84d7d014a272987dd20d03194336a9244eb900
|
[
"MIT"
] | 1
|
2019-02-13T12:42:39.000Z
|
2019-02-13T12:42:39.000Z
|
import itertools
import os
from collections import defaultdict
import matplotlib.pyplot as plt
#plt.style.use('ggplot')
from matplotlib.ticker import FuncFormatter
import pickle
import os
import numpy as np
def calculate_cm(pred_vals, true_vals, classes):
"""
This function calculates the confusion matrix.
"""
if len(pred_vals) != len(true_vals):
raise ValueError("Dimensions do not match")
n_classes = len(classes)
d = [[0 for _ in range(n_classes)] for _ in range(n_classes)]
for guess, ground_truth in zip(pred_vals, true_vals):
d[ground_truth][guess] += 1
d = np.asarray(d)
recall = []
precison = []
f1 = []
for index, values in enumerate(d):
recall.append(0 if sum(values) == 0 else values[index] / sum(values))
for index, values in enumerate(d.transpose()):
precison.append(0 if sum(values) == 0 else values[index] / sum(values))
for r, p in zip(recall, precison):
f1.append((r + p)/2)
return recall, precison, f1, d
def plot_confusion_matrix(cm, classes, path, name, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not os.path.exists(path):
os.makedirs(path)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
plt.figure(figsize=(12, 6))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
plt.savefig(path + name)
plt.clf()
plt.close()
def plot_data_distribution(filename, move_type='basic', is_sliding_window=False):
image_folder = os.path.join('figures', 'data_distribution')
figure_name = f'{move_type}_{filename}.png'
data_folder = 'data/processed_data'
movement_type = f'{move_type}_movement'
pickle_file = os.path.join(data_folder, movement_type, f'{filename}.p')
with open(pickle_file, 'rb') as bin_file:
data = pickle.load(bin_file)
x_labels = []
y_labels = []
if is_sliding_window:
sliding_windows, categories = data
data = defaultdict(list)
for category, sliding_window in zip(categories, sliding_windows):
data[category].append([sliding_window.tolist()])
for category, data_lists in data.items():
data_points_count = 0
for data_list in data_lists:
data_points_count += len(data_list)
x_labels.append(category)
y_labels.append(data_points_count)
x_labels = np.arange(len(x_labels))
fig, ax = plt.subplots()
formatter = FuncFormatter(lambda x, p: format(int(x), ','))
ax.yaxis.set_major_formatter(formatter)
plt.title(f'Data distribution for {move_type} {filename.split("_")[0]} {filename.split("_")[1]}')
plt.ylabel('Number of data elements')
plt.xlabel('Movement Categories')
plt.bar(x_labels, y_labels)
plt.xticks(x_labels)
plt.tight_layout()
plt.savefig(os.path.join(image_folder, figure_name))
plt.clf()
plt.close()
if __name__ == '__main__':
search_folder = 'data/processed_data'
for folder in os.listdir(search_folder):
if folder == 'custom_movement':
for file in os.listdir(os.path.join(search_folder, folder)):
plot_data_distribution(file.split('.')[0],
folder.split('_')[0],
True if 'sliding_window' in file else False)
else:
print(f'Image created for {folder} at an earlier stage')
| 30.321429
| 113
| 0.640047
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 785
| 0.184923
|
f028c9418a1b88c255939fa631a2c379765ba1a6
| 7,188
|
py
|
Python
|
raw-myo-plot/Extract_Features.py
|
rjweld21/prostheticClinic
|
1e1ab314fc31d85f455bd7a7868e1269f2808b50
|
[
"MIT"
] | null | null | null |
raw-myo-plot/Extract_Features.py
|
rjweld21/prostheticClinic
|
1e1ab314fc31d85f455bd7a7868e1269f2808b50
|
[
"MIT"
] | null | null | null |
raw-myo-plot/Extract_Features.py
|
rjweld21/prostheticClinic
|
1e1ab314fc31d85f455bd7a7868e1269f2808b50
|
[
"MIT"
] | 1
|
2018-12-13T22:19:55.000Z
|
2018-12-13T22:19:55.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 1 12:54:07 2018
@author: bsala_000
"""
import os
import numpy as np
import pandas as pd
def getFilename(RECORDS_DIR='myo_data'):
"""
Function to allow user to pick filename of CSV to load in the
input records directory. Filename is picked by its
index within the directory which is output to the user
along with the associated filename. If an invalid
file index is entered, the function keeps asking for
a correct index until one is entered.
"""
if len(os.listdir(RECORDS_DIR)):
print('Records found:')
for i, f in enumerate(sorted(set(os.listdir(RECORDS_DIR)))):
print(i, ':', f)
f = input('Enter number of file to load: ')
fileFound = False
while not fileFound:
try:
i = int(f)
f = sorted(set(os.listdir(RECORDS_DIR)))[i]
fileFound = True
except:
print('Incorrect input... Must be number listed above.')
return os.path.join(RECORDS_DIR, f)
else:
print('No records found.')
return ''
def print_record_features(data):
"""
Function to print results of all feature extracting functions
based on data input
:data: - Dataframe of loaded EMG data from CSV
"""
print(get_RMS(data))
print(get_Var(data))
print(get_MAV(data))
print('Zero Crossings:\n',get_zero_crossings(data))
print('Waveform Lengths:\n',get_waveform_length(data))
def get_record_features(data, savefile=False):
"""
Function to return all feature results based on data input
INPUT
:data: - Dataframe of loaded EMG data from CSV
OUTPUT
:features: - Dictionary of dataframes for extracted features
"""
features = {}
features['rms'] = get_RMS(data)
features['var'] = get_Var(data)
features['mav'] = get_MAV(data)
features['zc'] = get_zero_crossings(data)
features['wfl'] = get_waveform_length(data)
if savefile:
save_features(features, savefile)
return features
def save_features(data_dict, f):
"""
Function to save extracted features in specified filename
INPUTs
:data_dict: - Dictionary of dataframes like the one output from
get_record_features()
:f: - CSV filename to save features to
"""
out = pd.DataFrame(columns=list(data_dict[0]))
for i in list(data_dict):
data_dict[i].loc[0, ('feature')] = id
out = pd.concat([out, data_dict[i]], ignore_axis=True)
out.to_csv(f, index=False)
def get_RMS(data):
"""
Function to get root mean squared value for each dataframe column.
INPUT
:data: - Dataframe of loaded EMG data from CSV
OUTPUT
:RMS: - Dataframe of RMS values from input in each column
"""
# Get number of rows
n = data.shape[0]
# Square each value in table
data = data.apply(lambda x:x**2)
# Sum each column values
s = data.apply(np.sum,axis = 0)
# SQRT resulting single value within each column
RMS = s.apply(lambda x:np.sqrt(x/n))
return RMS
def get_Var(data):
"""
Function to get variance feature for each column.
INPUT
:data: - Dataframe of loaded EMG data from CSV
OUTPUT
:data: - Dataframe of varience values for each input column
"""
# Gets standard deviation by column then squares that value
data = (np.std(data,axis = 0))**2
return data
def get_MAV(data):
"""
Function to get mean absolute value feature for each column.
INPUT
:data: - Dataframe of loaded EMG data from CSV
OUTPUT
:data: - Dataframe of MAV for each input column
"""
# Gets absolute value for each value in table then gets mean of each column.
data = np.mean(np.abs(data), axis = 0)
return data
def get_zero_crossing_matlab():
"""
DEPRECIATED - See get_zero_crossings() function below
Function to get number of zero crossings feature by using MATLAB script
"""
Z = eng.Zero_Crossing()
return np.array(Z).astype(int)
def get_zero_crossings(data):
"""
Function to get number of zero crossings feature for each column
INPUT
:data: - Dataframe of loaded EMG data from CSV
OUTPUT
:data: - Dataframe of zero crossings count per column
"""
crossings = pd.DataFrame(0, index=[0], columns=list(data))
for col in list(data):
crossings[col].loc[0] = len(np.where(np.diff(np.sign(data[col])))[0])
return crossings
def get_waveform_length_matlab():
"""
DEPRECIATED
Use function below instead (get_waveform_length)
"""
W = eng.Waveform_Length()
return np.array(W).astype(int)
def get_waveform_length(data):
"""
Function to get waveform length feature for each column
INPUT
:data: - Dataframe of loaded EMG data from CSV
OUTPUT
:data: - Dataframe of waveform length sums per column
"""
data = data.diff().abs().sum(axis=0)
return data
def get_all_files(dir):
c = input('Select specific files (enter 0) or select based on regex (enter 1)? ')
if c == '0':
files = select_batch(dir)
elif c == '1':
files = regex_batch(dir)
else:
files = []
return files
def select_batch(dir):
print('\n' + '='*40 + '\nSELECT FILES\n')
for i, f in enumerate(sorted(set(os.listdir(dir)))):
print(i, ':', f)
c = input('\nInput number choices separated by commas or -1 for all: ')
c = c.replace(' ', '').split(',')
if c[0] == '-1':
c = range(len(os.listdir(dir)))
files = []
for e in c:
try:
int(c[0])
except:
print('Could not use %s as index. Not an integer.' % e)
continue
f = sorted(set(os.listdir(dir)))[int(e)]
alternate_file = os.path.join(dir, '.'.join(f.split('.')[:-1]) + '_snipped.csv')
if '_snipped' in f or os.path.exists(alternate_file):
continue
files.append(os.path.join(dir, f))
return files
def regex_batch(dir):
print('Regex functionality not complete yet, please '
'select by specific files for now... Exiting...')
return []
if __name__ == '__main__':
c = input('Batch or test (b/t): ')
if c == 'b':
c = input('Select specific files (enter 0) or select based on regex (enter 1)? ')
if c == '0':
select_batch('myo_data')
elif c == '1':
regex_batch('myo_data')
elif c == 't':
f = os.path.join('myo_data','myo_record_0.csv')
df = pd.read_csv(f)
get_record_features(df)
| 28.86747
| 89
| 0.569839
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,628
| 0.50473
|
f029112ff9d652c6d8e36f9059cb703264d4ebbd
| 739
|
py
|
Python
|
Hartree-Fock_H2/utils.py
|
WonhoZhung/CH502
|
c64a174fe7218e6e86c84c73e6df441fb5074211
|
[
"MIT"
] | null | null | null |
Hartree-Fock_H2/utils.py
|
WonhoZhung/CH502
|
c64a174fe7218e6e86c84c73e6df441fb5074211
|
[
"MIT"
] | null | null | null |
Hartree-Fock_H2/utils.py
|
WonhoZhung/CH502
|
c64a174fe7218e6e86c84c73e6df441fb5074211
|
[
"MIT"
] | null | null | null |
#----------------------------------------------------------------------
# Basis Set Exchange
# Version v0.8.13
# https://www.basissetexchange.org
#----------------------------------------------------------------------
# Basis set: STO-3G
# Description: STO-3G Minimal Basis (3 functions/AO)
# Role: orbital
# Version: 1 (Data from Gaussian09)
#----------------------------------------------------------------------
# BASIS "ao basis" PRINT
# #BASIS SET: (3s) -> [1s]
# H S
# 0.3425250914E+01 0.1543289673E+00
# 0.6239137298E+00 0.5353281423E+00
# 0.1688554040E+00 0.4446345422E+00
# END
A_LIST = [3.425250914 , 0.6239137298, 0.1688554040]
D_LIST = [0.1543289673, 0.5353281423, 0.4446345422]
| 35.190476
| 71
| 0.460081
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 617
| 0.834912
|
f02b9d8204d4475686faf005be7403e65cc83f6f
| 115
|
py
|
Python
|
focus_testmap.py
|
Driftwood2D/blue
|
02b5bd3dbbc8fe2836f88c8beaa0955344cc7998
|
[
"MIT"
] | null | null | null |
focus_testmap.py
|
Driftwood2D/blue
|
02b5bd3dbbc8fe2836f88c8beaa0955344cc7998
|
[
"MIT"
] | null | null | null |
focus_testmap.py
|
Driftwood2D/blue
|
02b5bd3dbbc8fe2836f88c8beaa0955344cc7998
|
[
"MIT"
] | null | null | null |
def setup():
# Insert light.
Driftwood.light.insert("lightmap_circle1.png", 2, 64, 60, 56, 56, "22FF66DD")
| 28.75
| 81
| 0.652174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 47
| 0.408696
|
f02ceba7181acc45bf9bae1d138dd71123a318a6
| 422
|
py
|
Python
|
my_wallet/apiv1/permissions.py
|
ibolorino/wallet_backend
|
20c80e419eaef6b0577ca45ff35bf4eb9501e3a3
|
[
"MIT"
] | null | null | null |
my_wallet/apiv1/permissions.py
|
ibolorino/wallet_backend
|
20c80e419eaef6b0577ca45ff35bf4eb9501e3a3
|
[
"MIT"
] | null | null | null |
my_wallet/apiv1/permissions.py
|
ibolorino/wallet_backend
|
20c80e419eaef6b0577ca45ff35bf4eb9501e3a3
|
[
"MIT"
] | null | null | null |
from rest_framework import permissions
class IsAadminOrReadOnly(permissions.BasePermission):
"""
The request is authenticated as a admin, or is a read-only request.
"""
def has_permission(self, request, view):
return bool(
(request.user and request.user.is_authenticated and request.method in permissions.SAFE_METHODS) or
(request.user and request.user.is_staff)
)
| 35.166667
| 110
| 0.699052
| 382
| 0.905213
| 0
| 0
| 0
| 0
| 0
| 0
| 83
| 0.196682
|
f02d76a5fd8b5ecfd2e0de43f20b301ddaf039ba
| 2,294
|
py
|
Python
|
automate_insurance_pricing/preprocessing/descriptive_functions.py
|
nassmim/automate-insurance-pricing-nezz
|
7a1cc48be9fb78bdadbbf7616fb01d4d6429e06c
|
[
"MIT"
] | 2
|
2021-11-09T15:47:22.000Z
|
2021-11-14T13:54:56.000Z
|
automate_insurance_pricing/preprocessing/descriptive_functions.py
|
nassmim/automate-insurance-pricing-nezz
|
7a1cc48be9fb78bdadbbf7616fb01d4d6429e06c
|
[
"MIT"
] | null | null | null |
automate_insurance_pricing/preprocessing/descriptive_functions.py
|
nassmim/automate-insurance-pricing-nezz
|
7a1cc48be9fb78bdadbbf7616fb01d4d6429e06c
|
[
"MIT"
] | 1
|
2021-07-09T04:12:57.000Z
|
2021-07-09T04:12:57.000Z
|
import pandas as pd
def derive_termination_rate_year(df, start_business_year, extraction_year, main_column_contract_date, policy_id_column_name, column_to_sum_name):
"""Derives the contracts termination rates per year
Arguments --> the dataframe, the business starting year, the extraction year
the contracts start date and policy ids and the cancellation columns names
Returns --> a dictionnary with the termination rates per year and the overall one
"""
df_previous_year = df[df[main_column_contract_date].dt.year == start_business_year].drop_duplicates(subset=policy_id_column_name, keep='first')
policies_previous_year = df_previous_year[policy_id_column_name]
termination_rates = {}
gwp_year = df_previous_year[column_to_sum_name].sum()
total_gwp = gwp_year
weighted_rates = 0
for year in range(start_business_year+1, extraction_year+1):
df_next_year = df[df[main_column_contract_date].dt.year == year].drop_duplicates(subset=policy_id_column_name, keep='first')
policies_next_year = df_next_year[policy_id_column_name]
policies_from_previous_year = df_next_year[df_next_year[policy_id_column_name].isin(policies_previous_year)]
termination_rate = (len(policies_previous_year) - len(policies_from_previous_year)) / len(policies_previous_year)
termination_rates[year-1] = termination_rate
weighted_rates += termination_rate * gwp_year
gwp_year = df_next_year[column_to_sum_name].sum()
total_gwp += gwp_year
policies_previous_year = policies_next_year
termination_rates['weighted_average'] = weighted_rates / total_gwp
return termination_rates
def create_df_unique_values(df, features):
"""
Gets the unique values of features and the number of these unique values (mainly useful for categorical feature)
Arguments --> the dataframe and the list of features (either a list or a string)
Returns --> A new df with features and number of unique values for each
"""
df_feature_unique_values = pd.DataFrame.from_dict({'feature': features, 'number_of_uniques': df[features].nunique().values})
return df_feature_unique_values.reset_index()
| 46.816327
| 148
| 0.735397
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 702
| 0.306016
|
f02d80a4afeebaf1a2e3f75631b09c3fc74059e3
| 2,538
|
py
|
Python
|
src/flask_easy/auth.py
|
Josephmaclean/flask-easy
|
64cb647b0dbcd031cb8d27cc60889e50c959e1ca
|
[
"MIT"
] | 1
|
2021-12-30T12:25:05.000Z
|
2021-12-30T12:25:05.000Z
|
src/flask_easy/auth.py
|
Josephmaclean/flask-easy
|
64cb647b0dbcd031cb8d27cc60889e50c959e1ca
|
[
"MIT"
] | null | null | null |
src/flask_easy/auth.py
|
Josephmaclean/flask-easy
|
64cb647b0dbcd031cb8d27cc60889e50c959e1ca
|
[
"MIT"
] | null | null | null |
"""
auth.py
Author: Joseph Maclean Arhin
"""
import os
import inspect
from functools import wraps
import jwt
from flask import request
from jwt.exceptions import ExpiredSignatureError, InvalidTokenError, PyJWTError
from .exc import Unauthorized, ExpiredTokenException, OperationError
def auth_required(other_roles=None):
"""auth required decorator"""
def authorize_user(func):
"""
A wrapper to authorize an action using
:param func: {function}` the function to wrap around
:return:
"""
@wraps(func)
def view_wrapper(*args, **kwargs):
authorization_header = request.headers.get("Authorization")
if not authorization_header:
raise Unauthorized("Missing authentication token")
token = authorization_header.split()[1]
try:
key = os.getenv("JWT_SECRET") # noqa E501
payload = jwt.decode(
token, key=key, algorithms=["HS256", "RS256"]
) # noqa E501
# Get realm roles from payload
available_roles = payload.get("realm_access").get("roles")
# Append service name to function name to form role
# generated_role = service_name + "_" + func.__name__
generated_role = "s"
authorized_roles = []
if other_roles:
authorized_roles = other_roles.split("|")
authorized_roles.append(generated_role)
if is_authorized(authorized_roles, available_roles):
if "user_id" in inspect.getfullargspec(func).args:
kwargs["user_id"] = payload.get(
"preferred_username"
) # noqa E501
return func(*args, **kwargs)
except ExpiredSignatureError as error:
raise ExpiredTokenException("Token Expired") from error
except InvalidTokenError as error:
raise OperationError("Invalid Token") from error
except PyJWTError as error:
raise OperationError("Error decoding token") from error
raise Unauthorized(status_code=403)
return view_wrapper
return authorize_user
def is_authorized(access_roles, available_roles):
"""Check if access roles is in available roles"""
for role in access_roles:
if role in available_roles:
return True
return False
| 32.126582
| 79
| 0.593775
| 0
| 0
| 0
| 0
| 1,719
| 0.677305
| 0
| 0
| 618
| 0.243499
|
f02f263b4792b69303bcdec39c484284dc805802
| 1,221
|
py
|
Python
|
src/prefect/engine/result_handlers/secret_result_handler.py
|
trapped/prefect
|
128f11570c35e7156d65ba65fdcbc1f4ccd7c2b7
|
[
"Apache-2.0"
] | 1
|
2019-12-20T07:43:55.000Z
|
2019-12-20T07:43:55.000Z
|
src/prefect/engine/result_handlers/secret_result_handler.py
|
trapped/prefect
|
128f11570c35e7156d65ba65fdcbc1f4ccd7c2b7
|
[
"Apache-2.0"
] | null | null | null |
src/prefect/engine/result_handlers/secret_result_handler.py
|
trapped/prefect
|
128f11570c35e7156d65ba65fdcbc1f4ccd7c2b7
|
[
"Apache-2.0"
] | null | null | null |
import json
from typing import Any
import prefect
from prefect.engine.result_handlers import ResultHandler
class SecretResultHandler(ResultHandler):
"""
Hook for storing and retrieving sensitive task results from a Secret store. Only intended to be used
for Secret tasks.
Args:
- secret_task (Task): the Secret Task that this result handler will be used for
"""
def __init__(self, secret_task: "prefect.tasks.secrets.Secret") -> None:
self.secret_task = secret_task
super().__init__()
def read(self, name: str) -> Any:
"""
Read a secret from a provided name with the provided Secret class;
this method actually retrieves the secret from the Secret store.
Args:
- name (str): the name of the secret to retrieve
Returns:
- Any: the deserialized result
"""
return self.secret_task.run() # type: ignore
def write(self, result: Any) -> str:
"""
Returns the name of the secret.
Args:
- result (Any): the result to write
Returns:
- str: the JSON representation of the result
"""
return self.secret_task.name
| 27.133333
| 104
| 0.626536
| 1,110
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 774
| 0.633907
|
f02f4e1f7df53040bb2247eb8bc8db48f7b3454e
| 9,283
|
py
|
Python
|
hnn_core/tests/test_dipole.py
|
mkhalil8/hnn-core
|
a761e248ddf360710dd60638269f70361f5d6cb3
|
[
"BSD-3-Clause"
] | null | null | null |
hnn_core/tests/test_dipole.py
|
mkhalil8/hnn-core
|
a761e248ddf360710dd60638269f70361f5d6cb3
|
[
"BSD-3-Clause"
] | null | null | null |
hnn_core/tests/test_dipole.py
|
mkhalil8/hnn-core
|
a761e248ddf360710dd60638269f70361f5d6cb3
|
[
"BSD-3-Clause"
] | null | null | null |
import os.path as op
from urllib.request import urlretrieve
import matplotlib
import numpy as np
from numpy.testing import assert_allclose
import pytest
import hnn_core
from hnn_core import read_params, read_dipole, average_dipoles
from hnn_core import Network, jones_2009_model
from hnn_core.viz import plot_dipole
from hnn_core.dipole import Dipole, simulate_dipole, _rmse
from hnn_core.parallel_backends import requires_mpi4py, requires_psutil
matplotlib.use('agg')
def test_dipole(tmpdir, run_hnn_core_fixture):
"""Test dipole object."""
hnn_core_root = op.dirname(hnn_core.__file__)
params_fname = op.join(hnn_core_root, 'param', 'default.json')
dpl_out_fname = tmpdir.join('dpl1.txt')
params = read_params(params_fname)
times = np.arange(0, 6000 * params['dt'], params['dt'])
data = np.random.random((6000, 3))
dipole = Dipole(times, data)
dipole._baseline_renormalize(params['N_pyr_x'], params['N_pyr_y'])
dipole._convert_fAm_to_nAm()
# test smoothing and scaling
dipole_raw = dipole.copy()
dipole.scale(params['dipole_scalefctr'])
dipole.smooth(window_len=params['dipole_smooth_win'])
with pytest.raises(AssertionError):
assert_allclose(dipole.data['agg'], dipole_raw.data['agg'])
assert_allclose(dipole.data['agg'],
(params['dipole_scalefctr'] * dipole_raw.smooth(
params['dipole_smooth_win']).data['agg']))
dipole.plot(show=False)
plot_dipole([dipole, dipole], show=False)
# Test IO
dipole.write(dpl_out_fname)
dipole_read = read_dipole(dpl_out_fname)
assert_allclose(dipole_read.times, dipole.times, rtol=0, atol=0.00051)
for dpl_key in dipole.data.keys():
assert_allclose(dipole_read.data[dpl_key],
dipole.data[dpl_key], rtol=0, atol=0.000051)
# average two identical dipole objects
dipole_avg = average_dipoles([dipole, dipole_read])
for dpl_key in dipole_avg.data.keys():
assert_allclose(dipole_read.data[dpl_key],
dipole_avg.data[dpl_key], rtol=0, atol=0.000051)
with pytest.raises(ValueError, match="Dipole at index 0 was already an "
"average of 2 trials"):
dipole_avg = average_dipoles([dipole_avg, dipole_read])
# average an n_of_1 dipole list
single_dpl_avg = average_dipoles([dipole])
for dpl_key in single_dpl_avg.data.keys():
assert_allclose(
dipole_read.data[dpl_key],
single_dpl_avg.data[dpl_key],
rtol=0,
atol=0.000051)
# average dipole list with one dipole object and a zero dipole object
n_times = len(dipole_read.data['agg'])
dpl_null = Dipole(np.zeros(n_times, ), np.zeros((n_times, 3)))
dpl_1 = [dipole, dpl_null]
dpl_avg = average_dipoles(dpl_1)
for dpl_key in dpl_avg.data.keys():
assert_allclose(dpl_1[0].data[dpl_key] / 2., dpl_avg.data[dpl_key])
# Test experimental dipole
dipole_exp = Dipole(times, data[:, 1])
dipole_exp.write(dpl_out_fname)
dipole_exp_read = read_dipole(dpl_out_fname)
assert_allclose(dipole_exp.data['agg'], dipole_exp_read.data['agg'],
rtol=1e-2)
dipole_exp_avg = average_dipoles([dipole_exp, dipole_exp])
assert_allclose(dipole_exp.data['agg'], dipole_exp_avg.data['agg'])
# XXX all below to be deprecated in 0.3
dpls_raw, net = run_hnn_core_fixture(backend='joblib', n_jobs=1,
reduced=True, record_isoma=True,
record_vsoma=True)
# test deprecation of postproc
with pytest.warns(DeprecationWarning,
match='The postproc-argument is deprecated'):
dpls, _ = run_hnn_core_fixture(backend='joblib', n_jobs=1,
reduced=True, record_isoma=True,
record_vsoma=True, postproc=True)
with pytest.raises(AssertionError):
assert_allclose(dpls[0].data['agg'], dpls_raw[0].data['agg'])
dpls_raw[0]._post_proc(net._params['dipole_smooth_win'],
net._params['dipole_scalefctr'])
assert_allclose(dpls_raw[0].data['agg'], dpls[0].data['agg'])
def test_dipole_simulation():
"""Test data produced from simulate_dipole() call."""
hnn_core_root = op.dirname(hnn_core.__file__)
params_fname = op.join(hnn_core_root, 'param', 'default.json')
params = read_params(params_fname)
params.update({'N_pyr_x': 3,
'N_pyr_y': 3,
'dipole_smooth_win': 5,
't_evprox_1': 5,
't_evdist_1': 10,
't_evprox_2': 20})
net = jones_2009_model(params, add_drives_from_params=True)
with pytest.raises(ValueError, match="Invalid number of simulations: 0"):
simulate_dipole(net, tstop=25., n_trials=0)
with pytest.raises(TypeError, match="record_vsoma must be bool, got int"):
simulate_dipole(net, tstop=25., n_trials=1, record_vsoma=0)
with pytest.raises(TypeError, match="record_isoma must be bool, got int"):
simulate_dipole(net, tstop=25., n_trials=1, record_vsoma=False,
record_isoma=0)
# test Network.copy() returns 'bare' network after simulating
dpl = simulate_dipole(net, tstop=25., n_trials=1)[0]
net_copy = net.copy()
assert len(net_copy.external_drives['evprox1']['events']) == 0
# test that Dipole.copy() returns the expected exact copy
assert_allclose(dpl.data['agg'], dpl.copy().data['agg'])
with pytest.warns(UserWarning, match='No connections'):
net = Network(params)
# warning triggered on simulate_dipole()
simulate_dipole(net, tstop=0.1, n_trials=1)
# Smoke test for raster plot with no spikes
net.cell_response.plot_spikes_raster()
@requires_mpi4py
@requires_psutil
def test_cell_response_backends(run_hnn_core_fixture):
"""Test cell_response outputs across backends."""
# reduced simulation has n_trials=2
trial_idx, n_trials, gid = 0, 2, 7
_, joblib_net = run_hnn_core_fixture(backend='joblib', n_jobs=1,
reduced=True, record_isoma=True,
record_vsoma=True)
_, mpi_net = run_hnn_core_fixture(backend='mpi', n_procs=2, reduced=True,
record_isoma=True, record_vsoma=True)
n_times = len(joblib_net.cell_response.times)
assert len(joblib_net.cell_response.vsoma) == n_trials
assert len(joblib_net.cell_response.isoma) == n_trials
assert len(joblib_net.cell_response.vsoma[trial_idx][gid]) == n_times
assert len(joblib_net.cell_response.isoma[
trial_idx][gid]['soma_gabaa']) == n_times
assert len(mpi_net.cell_response.vsoma) == n_trials
assert len(mpi_net.cell_response.isoma) == n_trials
assert len(mpi_net.cell_response.vsoma[trial_idx][gid]) == n_times
assert len(mpi_net.cell_response.isoma[
trial_idx][gid]['soma_gabaa']) == n_times
assert mpi_net.cell_response.vsoma == joblib_net.cell_response.vsoma
assert mpi_net.cell_response.isoma == joblib_net.cell_response.isoma
# Test if spike time falls within depolarization window above v_thresh
v_thresh = 0.0
times = np.array(joblib_net.cell_response.times)
spike_times = np.array(joblib_net.cell_response.spike_times[trial_idx])
spike_gids = np.array(joblib_net.cell_response.spike_gids[trial_idx])
vsoma = np.array(joblib_net.cell_response.vsoma[trial_idx][gid])
v_mask = vsoma > v_thresh
assert np.all([spike_times[spike_gids == gid] > times[v_mask][0],
spike_times[spike_gids == gid] < times[v_mask][-1]])
# test that event times before and after simulation are the same
for drive_name, drive in joblib_net.external_drives.items():
gid_ran = joblib_net.gid_ranges[drive_name]
for idx_drive, event_times in enumerate(drive['events'][trial_idx]):
net_ets = [spike_times[i] for i, g in enumerate(spike_gids) if
g == gid_ran[idx_drive]]
assert_allclose(np.array(event_times), np.array(net_ets))
def test_rmse():
"""Test to check RMSE calculation"""
data_url = ('https://raw.githubusercontent.com/jonescompneurolab/hnn/'
'master/data/MEG_detection_data/yes_trial_S1_ERP_all_avg.txt')
if not op.exists('yes_trial_S1_ERP_all_avg.txt'):
urlretrieve(data_url, 'yes_trial_S1_ERP_all_avg.txt')
extdata = np.loadtxt('yes_trial_S1_ERP_all_avg.txt')
exp_dpl = Dipole(times=extdata[:, 0],
data=np.c_[extdata[:, 1], extdata[:, 1], extdata[:, 1]])
hnn_core_root = op.join(op.dirname(hnn_core.__file__))
params_fname = op.join(hnn_core_root, 'param', 'default.json')
params = read_params(params_fname)
expected_rmse = 0.1
test_dpl = Dipole(times=extdata[:, 0],
data=np.c_[extdata[:, 1] + expected_rmse,
extdata[:, 1] + expected_rmse,
extdata[:, 1] + expected_rmse])
avg_rmse = _rmse(test_dpl, exp_dpl, tstop=params['tstop'])
assert_allclose(avg_rmse, expected_rmse)
| 43.378505
| 78
| 0.660885
| 0
| 0
| 0
| 0
| 2,354
| 0.253582
| 0
| 0
| 1,675
| 0.180437
|
f02fc9e2410362e641030d8eb9da915829910a4c
| 1,280
|
py
|
Python
|
setup.py
|
creeston/chinese
|
44317b8aa9b909eda9cf3008f6bd0cf4d92f228c
|
[
"MIT"
] | 15
|
2018-11-15T16:54:41.000Z
|
2022-01-12T00:53:10.000Z
|
setup.py
|
creeston/chinese
|
44317b8aa9b909eda9cf3008f6bd0cf4d92f228c
|
[
"MIT"
] | 1
|
2021-05-19T04:01:21.000Z
|
2021-05-19T04:01:21.000Z
|
setup.py
|
creeston/chinese
|
44317b8aa9b909eda9cf3008f6bd0cf4d92f228c
|
[
"MIT"
] | 5
|
2019-03-01T09:30:34.000Z
|
2022-03-07T19:25:40.000Z
|
from setuptools import setup, find_packages
with open('docs/README-rst') as f:
desc = f.read()
setup(
name='chinese',
version='0.2.1',
license='MIT',
url='https://github.com/morinokami/chinese',
keywords=['Chinese', 'text analysis'],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Education',
'Topic :: Text Processing :: Linguistic',
'Topic :: Software Development :: Libraries :: Python Modules',
],
description='Chinese text analyzer',
long_description = desc,
author='Shinya Fujino',
author_email='shf0811@gmail.com',
packages=find_packages(where='src'),
package_dir={'chinese': 'src/chinese'},
package_data={'chinese': ['data/cedict.pickle', 'data/dict.txt.big']},
include_package_data=True,
install_requires=['jieba', 'pynlpir'],
)
| 32
| 74
| 0.613281
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 739
| 0.577344
|
f03149894a1a1db841d1f4b4176a844bc1ba3dd2
| 2,880
|
py
|
Python
|
glacis_core/api/get_keys.py
|
ImperiumSec/glacis_core
|
b9dd0ad0f92dfd89c8ee1791c03ee1a8c6e93500
|
[
"MIT"
] | null | null | null |
glacis_core/api/get_keys.py
|
ImperiumSec/glacis_core
|
b9dd0ad0f92dfd89c8ee1791c03ee1a8c6e93500
|
[
"MIT"
] | null | null | null |
glacis_core/api/get_keys.py
|
ImperiumSec/glacis_core
|
b9dd0ad0f92dfd89c8ee1791c03ee1a8c6e93500
|
[
"MIT"
] | null | null | null |
from ..models import EntityOnServer, AccessToken, Organisation, Server, ServerUser, Key, KeyFetchEvent, AuditNote, AuditEvent, LoginAttempt
from django.template import Context, Template
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from uuid import uuid4
from datetime import datetime
import json
@csrf_exempt
def get_keys(request):
"""
Get Keys API - used in conjunction with the v2.1 client
"""
# input is something similar to:
# {
# "server_id":
# "access_token": {
# id:""
# value:""
# },
# username: ""
# origin_ip: ""
# key_fp: ""
# key_type: ""
# }
data = json.loads(request.body)
# 1. Decide if acceptable request
token = AccessToken.get_validated_token(data["access_token"]["id"], data["access_token"]["value"])
# Validate access_token
# FIXME: refaactor all this code to prevent data leakage through errors
server = Server.objects.filter(active=True).filter(org=token.org).filter(public_id=data["server_id"]).get()
la = LoginAttempt()
la.username = data['username']
la.key_fp = data['key_fp']
la.remote_ip = data['origin_ip']
la.server_ip = request.META['REMOTE_ADDR']
la.public_id = str(uuid4())
la.server = server
la.audit_type = AuditEvent.TYPE_KEYFETCH
la.audit_status = AuditEvent.STATUS_OPEN
la.reported_at = datetime.now()
la.save()
# 2. pull key data
key = None
server_user = ServerUser.objects.filter(server=server).filter(name=data["username"])
target_key = Key.objects.filter(key_fingerprint=data["key_fp"]).get()
cont = True
if server_user == 0:
# login attempt
cont = False
if target_key == 0:
cont = False
if cont:
# look for EntityOnServer to match
try:
target_eos = EntityOnServer.objects.filter(server_user=server_user).filter(named_key=target_key).get()
#print("EOS %s" % target_eos )
key = target_key
except Exception:
# FIXME: do a nicer exception
targets = EntityOnServer.objects.filter(server_user=server_user).filter(entity=target_key.owner)
if len(targets) > 0:
key = target_key
else:
raise Exception("Boom")
else:
key = Key.objects.filter(owner=target_eos.entity).filter(id=target_key.id).get()
if key.active and key.key_fingerprint == data["key_fp"]:
pass
else:
key = None
# Key should now be a Key object
#print ("--> %s" % key)
output = ""
if key:
sub_template = Template("ssh-rsa {{ key.key }}")
c = Context({"key":key})
output = sub_template.render(c)
return HttpResponse(output)
| 27.692308
| 139
| 0.613194
| 0
| 0
| 0
| 0
| 2,536
| 0.880556
| 0
| 0
| 704
| 0.244444
|
f031c64cd48b598cd3b616708c05819e454b8bc1
| 2,870
|
py
|
Python
|
core/translator.py
|
bfu4/mdis
|
fac5ec078ffeaa9339df4b31b9b71140563f4f14
|
[
"MIT"
] | 13
|
2021-05-17T06:38:50.000Z
|
2022-03-27T15:39:57.000Z
|
core/translator.py
|
bfu4/mdis
|
fac5ec078ffeaa9339df4b31b9b71140563f4f14
|
[
"MIT"
] | null | null | null |
core/translator.py
|
bfu4/mdis
|
fac5ec078ffeaa9339df4b31b9b71140563f4f14
|
[
"MIT"
] | null | null | null |
from typing import List
from parser import parse_bytes, split_bytes_from_lines, get_bytes, parse_instruction_set, wrap_parsed_set
from reader import dump_file_hex_with_locs
class Translator:
"""
Class handling file translations from *.mpy to hex dumps and opcodes
"""
def __init__(self, file: str):
"""
Create new translator
:param file: location of the file
"""
self.file = file
def get_file_hex(self):
"""
Get a full hex dump of the file
:return:
"""
return dump_file_hex_with_locs(self.file)
def get_file_hex_at(self, _from: str, _to: str):
"""
Get a byte dump at a specified location
:param _from: from address
:param _to: to address
:return: bytes from address {_from} to address {_to}
"""
return parse_bytes(self.get_file_hex(), _from, _to)
def get_file(self):
"""
Get the file name
:return:
"""
return self.file
def get_magic(self) -> str:
"""
Get the magic number
:return:
"""
return "".join(self.get_all_bytes()[0][:8])
def get_all_bytes(self):
"""
Get all of the bytes
:return: all of the bytes
"""
return get_bytes(self.get_file_hex().split("\n"))
def get_split_bytes(self) -> List[List[str]]:
"""
Get all of the bytes per line
:return: bytes in list form
"""
split = split_bytes_from_lines(self.get_all_bytes())
split[0] = split[0][4:]
return split
def get_bytes_at(self, _from: str, _to: str) -> List[List[str]]:
"""
Get the bytes between the specified locations
:param _from: start address
:param _to: end address
:return: bytes
"""
return split_bytes_from_lines(self.get_file_hex_at(_from, _to))
def get_instruction_set(self) -> List[str]:
"""
Get the file's instruction set
:return: set
"""
bl = self.get_split_bytes()
# offset of 8, start at first BC_BASE_RESERVED
list_with_offset = bl[0][4:]
_bytes = self.__flatten([list_with_offset, bl[1]])
_set = parse_instruction_set(_bytes)
return wrap_parsed_set(_set)
def get_instructions_at(self, _from: str, _to: str) -> List[str]:
"""
Get the instructions between addresses
:param _from: start address
:param _to: end address
:return: instructions
"""
_bytes = self.__flatten(self.get_bytes_at(_from, _to))
_set = parse_instruction_set(_bytes)
return wrap_parsed_set(_set)
def __flatten(self, _list):
# Lambda replaced by def flatten due to E731
return [item for sublist in _list for item in sublist]
| 28.7
| 105
| 0.591289
| 2,693
| 0.938328
| 0
| 0
| 0
| 0
| 0
| 0
| 1,210
| 0.421603
|
f0339846cad63a7692947f289af6990dc4271899
| 3,987
|
py
|
Python
|
easyp2p/p2p_signals.py
|
Ceystyle/easyp2p
|
99c32e3ec0ff5a34733f157dd1b53d1aa9bc9edc
|
[
"MIT"
] | 4
|
2019-07-18T10:58:28.000Z
|
2021-11-18T16:57:45.000Z
|
easyp2p/p2p_signals.py
|
Ceystyle/easyp2p
|
99c32e3ec0ff5a34733f157dd1b53d1aa9bc9edc
|
[
"MIT"
] | 1
|
2019-07-05T09:21:47.000Z
|
2019-07-05T09:21:47.000Z
|
easyp2p/p2p_signals.py
|
Ceystyle/easyp2p
|
99c32e3ec0ff5a34733f157dd1b53d1aa9bc9edc
|
[
"MIT"
] | 2
|
2019-07-05T08:56:34.000Z
|
2020-06-09T10:03:42.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2020 Niko Sandschneider
"""Module implementing Signals for communicating with the GUI."""
from functools import wraps
import logging
from PyQt5.QtCore import QObject, pyqtSignal
class Signals(QObject):
"""Class for signal communication between worker classes and GUI."""
update_progress_bar = pyqtSignal()
add_progress_text = pyqtSignal(str, bool)
abort_signal = pyqtSignal()
get_credentials = pyqtSignal(str)
send_credentials = pyqtSignal(str, str)
def __init__(self):
super().__init__()
self.abort = False
self.abort_signal.connect(self.abort_evaluation)
self.connected = False
self.logger = logging.getLogger('easyp2p.p2p_signals.Signals')
self.logger.debug('Created Signals instance.')
def update_progress(self, func):
"""Decorator for updating progress text and progress bar."""
@wraps(func)
def wrapper(*args, **kwargs):
try:
if self.abort:
raise RuntimeError('Abort by user')
result = func(*args, **kwargs)
except RuntimeError as err:
self.logger.exception('RuntimeError in update_progress')
self.add_progress_text.emit(str(err), True)
raise PlatformFailedError from err
except RuntimeWarning as err:
self.logger.warning(
'RuntimeWarning in update_progress', exc_info=True)
self.add_progress_text.emit(str(err), True)
result = None
finally:
self.update_progress_bar.emit()
return result
return wrapper
def watch_errors(self, func):
"""Decorator for emitting error messages to the progress window."""
@wraps(func)
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
except RuntimeError as err:
self.logger.exception('RuntimeError in watch_errors.')
self.add_progress_text.emit(str(err), True)
raise PlatformFailedError from err
except RuntimeWarning as err:
self.logger.warning(str(err))
self.add_progress_text.emit(str(err), True)
result = None
return result
return wrapper
def connect_signals(self, other: 'Signals') -> None:
"""
Helper method for connecting signals of different classes.
Args:
other: Signals instance of another class.
"""
self.logger.debug('Connecting signals.')
self.update_progress_bar.connect(other.update_progress_bar)
self.add_progress_text.connect(other.add_progress_text)
self.get_credentials.connect(other.get_credentials)
other.send_credentials.connect(self.send_credentials)
self.connected = True
self.logger.debug('Connecting signals successful.')
def disconnect_signals(self) -> None:
"""
Disconnect signals. Ignore error if they were not connected or if
disconnecting fails.
"""
if not self.connected:
return
self.logger.debug('Disconnecting signals.')
for signal in [
self.add_progress_text, self.get_credentials,
self.update_progress_bar]:
try:
signal.disconnect()
except TypeError:
self.logger.exception(
'Disconnecting signal %s failed.', str(signal))
else:
self.logger.debug('Signal %s disconnected.', str(signal))
self.connected = False
def abort_evaluation(self):
"""Set the abort flag to True."""
self.logger.debug('Aborting evaluation.')
self.abort = True
class PlatformFailedError(Exception):
"""Will be raised if evaluation of a P2P platform fails."""
| 34.37069
| 75
| 0.605719
| 3,754
| 0.94156
| 0
| 0
| 1,302
| 0.326561
| 0
| 0
| 1,026
| 0.257336
|
f033f0846a998f9a5ac92cbb40712c19a572ab8c
| 623
|
py
|
Python
|
extra_tests/ctypes_tests/test_unions.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 333
|
2015-08-08T18:03:38.000Z
|
2022-03-22T18:13:12.000Z
|
extra_tests/ctypes_tests/test_unions.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 7
|
2020-02-16T16:49:05.000Z
|
2021-11-26T09:00:56.000Z
|
extra_tests/ctypes_tests/test_unions.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 55
|
2015-08-16T02:41:30.000Z
|
2022-03-20T20:33:35.000Z
|
import sys
from ctypes import *
def test_getattr():
class Stuff(Union):
_fields_ = [('x', c_char), ('y', c_int)]
stuff = Stuff()
stuff.y = ord('x') | (ord('z') << 24)
if sys.byteorder == 'little':
assert stuff.x == b'x'
else:
assert stuff.x == b'z'
def test_union_of_structures():
class Stuff(Structure):
_fields_ = [('x', c_int)]
class Stuff2(Structure):
_fields_ = [('x', c_int)]
class UnionofStuff(Union):
_fields_ = [('one', Stuff),
('two', Stuff2)]
u = UnionofStuff()
u.one.x = 3
assert u.two.x == 3
| 21.482759
| 48
| 0.523274
| 282
| 0.452648
| 0
| 0
| 0
| 0
| 0
| 0
| 44
| 0.070626
|
f0348185cb88efdb34b5de39fe352d2ee65ecef9
| 13,977
|
py
|
Python
|
nssrc/com/citrix/netscaler/nitro/resource/config/snmp/snmpmib.py
|
guardicore/nitro-python
|
5346a5086134aead80968f15a41ff527adaa0ec1
|
[
"Apache-2.0"
] | null | null | null |
nssrc/com/citrix/netscaler/nitro/resource/config/snmp/snmpmib.py
|
guardicore/nitro-python
|
5346a5086134aead80968f15a41ff527adaa0ec1
|
[
"Apache-2.0"
] | null | null | null |
nssrc/com/citrix/netscaler/nitro/resource/config/snmp/snmpmib.py
|
guardicore/nitro-python
|
5346a5086134aead80968f15a41ff527adaa0ec1
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2021 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class snmpmib(base_resource) :
""" Configuration for SNMP mib resource. """
def __init__(self) :
self._contact = None
self._name = None
self._location = None
self._customid = None
self._ownernode = None
self._sysdesc = None
self._sysuptime = None
self._sysservices = None
self._sysoid = None
self.___count = None
@property
def contact(self) :
r"""Name of the administrator for this Citrix ADC. Along with the name, you can include information on how to contact this person, such as a phone number or an email address. Can consist of 1 to 127 characters that include uppercase and lowercase letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore (_) characters.
The following requirement applies only to the Citrix ADC CLI:
If the information includes one or more spaces, enclose it in double or single quotation marks (for example, "my contact" or 'my contact').<br/>Default value: "WebMaster (default)"<br/>Minimum length = 1.
"""
try :
return self._contact
except Exception as e:
raise e
@contact.setter
def contact(self, contact) :
r"""Name of the administrator for this Citrix ADC. Along with the name, you can include information on how to contact this person, such as a phone number or an email address. Can consist of 1 to 127 characters that include uppercase and lowercase letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore (_) characters.
The following requirement applies only to the Citrix ADC CLI:
If the information includes one or more spaces, enclose it in double or single quotation marks (for example, "my contact" or 'my contact').<br/>Default value: "WebMaster (default)"<br/>Minimum length = 1
"""
try :
self._contact = contact
except Exception as e:
raise e
@property
def name(self) :
r"""Name for this Citrix ADC. Can consist of 1 to 127 characters that include uppercase and lowercase letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore (_) characters. You should choose a name that helps identify the Citrix ADC appliance.
The following requirement applies only to the Citrix ADC CLI:
If the name includes one or more spaces, enclose it in double or single quotation marks (for example, "my name" or 'my name').<br/>Default value: "NetScaler"<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
r"""Name for this Citrix ADC. Can consist of 1 to 127 characters that include uppercase and lowercase letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore (_) characters. You should choose a name that helps identify the Citrix ADC appliance.
The following requirement applies only to the Citrix ADC CLI:
If the name includes one or more spaces, enclose it in double or single quotation marks (for example, "my name" or 'my name').<br/>Default value: "NetScaler"<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def location(self) :
r"""Physical location of the Citrix ADC. For example, you can specify building name, lab number, and rack number. Can consist of 1 to 127 characters that include uppercase and lowercase letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore (_) characters.
The following requirement applies only to the Citrix ADC CLI:
If the location includes one or more spaces, enclose it in double or single quotation marks (for example, "my location" or 'my location').<br/>Default value: "POP (default)"<br/>Minimum length = 1.
"""
try :
return self._location
except Exception as e:
raise e
@location.setter
def location(self, location) :
r"""Physical location of the Citrix ADC. For example, you can specify building name, lab number, and rack number. Can consist of 1 to 127 characters that include uppercase and lowercase letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore (_) characters.
The following requirement applies only to the Citrix ADC CLI:
If the location includes one or more spaces, enclose it in double or single quotation marks (for example, "my location" or 'my location').<br/>Default value: "POP (default)"<br/>Minimum length = 1
"""
try :
self._location = location
except Exception as e:
raise e
@property
def customid(self) :
r"""Custom identification number for the Citrix ADC. Can consist of 1 to 127 characters that include uppercase and lowercase letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore (_) characters. You should choose a custom identification that helps identify the Citrix ADC appliance.
The following requirement applies only to the Citrix ADC CLI:
If the ID includes one or more spaces, enclose it in double or single quotation marks (for example, "my ID" or 'my ID').<br/>Default value: "Default"<br/>Minimum length = 1.
"""
try :
return self._customid
except Exception as e:
raise e
@customid.setter
def customid(self, customid) :
r"""Custom identification number for the Citrix ADC. Can consist of 1 to 127 characters that include uppercase and lowercase letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore (_) characters. You should choose a custom identification that helps identify the Citrix ADC appliance.
The following requirement applies only to the Citrix ADC CLI:
If the ID includes one or more spaces, enclose it in double or single quotation marks (for example, "my ID" or 'my ID').<br/>Default value: "Default"<br/>Minimum length = 1
"""
try :
self._customid = customid
except Exception as e:
raise e
@property
def ownernode(self) :
r"""ID of the cluster node for which we are setting the mib. This is a mandatory argument to set snmp mib on CLIP.<br/>Default value: -1<br/>Maximum length = 31.
"""
try :
return self._ownernode
except Exception as e:
raise e
@ownernode.setter
def ownernode(self, ownernode) :
r"""ID of the cluster node for which we are setting the mib. This is a mandatory argument to set snmp mib on CLIP.<br/>Default value: -1<br/>Maximum length = 31
"""
try :
self._ownernode = ownernode
except Exception as e:
raise e
@property
def sysdesc(self) :
r"""The description of the system.
"""
try :
return self._sysdesc
except Exception as e:
raise e
@property
def sysuptime(self) :
r"""The UP time of the system in 100th of a second.
"""
try :
return self._sysuptime
except Exception as e:
raise e
@property
def sysservices(self) :
r"""The services offered by the system.
"""
try :
return self._sysservices
except Exception as e:
raise e
@property
def sysoid(self) :
r"""The OID of the system's management system.
"""
try :
return self._sysoid
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(snmpmib_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.snmpmib
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.ownernode is not None :
return str(self.ownernode)
return None
except Exception as e :
raise e
@classmethod
def filter_update_parameters(cls, resource) :
r""" Use this function to create a resource with only update operation specific parameters.
"""
updateresource = snmpmib()
updateresource.contact = resource.contact
updateresource.name = resource.name
updateresource.location = resource.location
updateresource.customid = resource.customid
updateresource.ownernode = resource.ownernode
return updateresource
@classmethod
def update(cls, client, resource) :
r""" Use this API to update snmpmib.
"""
try :
if type(resource) is not list :
updateresource = cls.filter_update_parameters(resource)
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ snmpmib() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i] = cls.filter_update_parameters(resource[i])
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
r""" Use this API to unset the properties of snmpmib resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = snmpmib()
if type(resource) != type(unsetresource):
unsetresource.ownernode = resource
else :
unsetresource.ownernode = resource.ownernode
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ snmpmib() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].ownernode = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ snmpmib() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].ownernode = resource[i].ownernode
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
r""" Use this API to fetch all the snmpmib resources that are configured on netscaler (on ncore deployment).
"""
try :
if not name :
obj = snmpmib()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_cluster(cls, client, name="", option_="") :
r""" Use this API to fetch all the snmpmib resources that are configured on netscaler.
"""
try :
if not name :
obj = snmpmib()
response = obj.get_resources(client, option_)
else :
if type(name) is not list :
if type(name) == cls :
raise Exception('Invalid parameter name:{0}'.format(type(name)))
obj = snmpmib()
obj.ownernode = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
if type(name[0]) == cls :
raise Exception('Invalid parameter name:{0}'.format(type(name[0])))
response = [snmpmib() for _ in range(len(name))]
obj = [snmpmib() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = snmpmib()
obj[i].ownernode = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
r""" Use this API to fetch filtered set of snmpmib resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = snmpmib()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
r""" Use this API to count the snmpmib resources configured on NetScaler.
"""
try :
obj = snmpmib()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
r""" Use this API to count filtered the set of snmpmib resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = snmpmib()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class snmpmib_response(base_response) :
def __init__(self, length=1) :
self.snmpmib = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.snmpmib = [snmpmib() for _ in range(length)]
| 37.980978
| 387
| 0.701867
| 12,959
| 0.927166
| 0
| 0
| 11,423
| 0.817271
| 0
| 0
| 7,096
| 0.507691
|
f034b8b6b6d0852450c50577d53070c406d80750
| 770
|
py
|
Python
|
lambda-archive/lambda-functions/codebreaker-update-testcaseCount/lambda_function.py
|
singaporezoo/codebreaker-official
|
1fe5792f1c36f922abd0836d8dcb42d271a9323d
|
[
"MIT"
] | 11
|
2021-09-19T06:32:44.000Z
|
2022-03-14T19:09:46.000Z
|
lambda-archive/lambda-functions/codebreaker-update-testcaseCount/lambda_function.py
|
singaporezoo/codebreaker-official
|
1fe5792f1c36f922abd0836d8dcb42d271a9323d
|
[
"MIT"
] | null | null | null |
lambda-archive/lambda-functions/codebreaker-update-testcaseCount/lambda_function.py
|
singaporezoo/codebreaker-official
|
1fe5792f1c36f922abd0836d8dcb42d271a9323d
|
[
"MIT"
] | 1
|
2022-03-02T13:27:27.000Z
|
2022-03-02T13:27:27.000Z
|
import json
import boto3 # Amazon S3 client library
s3 = boto3.resource('s3')
dynamodb = boto3.resource('dynamodb')
problems_table = dynamodb.Table('codebreaker-problems')
bucket = s3.Bucket('codebreaker-testdata')
def lambda_handler(event, context):
problemName = event['problemName']
testcaseCount = 0
for obj in bucket.objects.filter(Prefix="{0}/".format(problemName)):
testcaseCount += 1
print(testcaseCount)
problems_table.update_item(
Key = {'problemName':problemName},
UpdateExpression = f'set #b=:a',
ExpressionAttributeValues={':a':int(testcaseCount/2)},
ExpressionAttributeNames={'#b':'testcaseCount'}
)
return {
'statusCode': 200,
'testcaseCount':testcaseCount
}
| 28.518519
| 72
| 0.672727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 178
| 0.231169
|
f0351c577a324e005d0a9c1acdf54fc0b4f28867
| 2,003
|
py
|
Python
|
bin/varipack.py
|
angrydill/ItsyBitser
|
bf9689136748bef3d022aa7529b4529e610abbf7
|
[
"MIT"
] | null | null | null |
bin/varipack.py
|
angrydill/ItsyBitser
|
bf9689136748bef3d022aa7529b4529e610abbf7
|
[
"MIT"
] | 1
|
2021-04-26T15:31:50.000Z
|
2021-04-26T15:31:50.000Z
|
bin/varipack.py
|
angrydill/ItsyBitser
|
bf9689136748bef3d022aa7529b4529e610abbf7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
""" Packs/unpacks Hextream content to/from the Varipacker format """
import sys
import argparse
from itsybitser import hextream, varipacker
def main():
""" Program entry point """
parser = argparse.ArgumentParser(
description="Packs/unpacks Hextream content to/from the Varipacker format"
)
commands = parser.add_mutually_exclusive_group(required=True)
commands.add_argument("-p", "--pack", action="store_true",
help="Pack Hextream content into Varipacker format")
commands.add_argument("-u", "--unpack", action="store_true",
help="Unpack Varipacker content into Hextream format")
parser.add_argument('infile', nargs='?', type=argparse.FileType('r', encoding="UTF-8"),
help="Name of file with content to be packed/unpacked",
default=sys.stdin)
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w', encoding="UTF-8"),
help="Name of file in which to write packed/unpacked content",
default=sys.stdout)
parser.add_argument("-c", "--comment", type=str,
help="Prepend the output with specified comment string")
parser.add_argument("-n", "--omit-newline", action="store_true",
help="The ending newline character(s) will be omitted from the output")
args = parser.parse_args()
source_content = args.infile.read()
if args.pack:
binary_content = hextream.decode(source_content)
output_content = varipacker.encode(binary_content)
else:
binary_content = varipacker.decode(varipacker.distill(source_content))
output_content = hextream.encode(binary_content)
if args.comment:
args.outfile.write("# {}\n".format(args.comment))
args.outfile.write(output_content)
if not args.omit_newline:
args.outfile.write("\n")
if __name__ == "__main__":
main()
| 41.729167
| 95
| 0.642536
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 655
| 0.327009
|
f035d65dd23cce88533c77e43cfdaf49e3f5a500
| 1,665
|
py
|
Python
|
reobject/models/model.py
|
agusmakmun/reobject
|
a7689bbb37f021c6c8ea72d6984513adec0d8a17
|
[
"Apache-2.0"
] | 92
|
2017-02-08T21:51:03.000Z
|
2021-05-27T22:58:07.000Z
|
reobject/models/model.py
|
agusmakmun/reobject
|
a7689bbb37f021c6c8ea72d6984513adec0d8a17
|
[
"Apache-2.0"
] | 15
|
2017-01-27T22:54:42.000Z
|
2021-05-27T01:31:58.000Z
|
reobject/models/model.py
|
agusmakmun/reobject
|
a7689bbb37f021c6c8ea72d6984513adec0d8a17
|
[
"Apache-2.0"
] | 7
|
2017-02-08T22:00:41.000Z
|
2021-05-26T00:26:44.000Z
|
import attr
from reobject.models.manager import ManagerDescriptor, RelatedManagerDescriptor
from reobject.models.store import Store, ModelStoreMapping
class ModelBase(type):
"""
Metaclass for all models, used to attach the objects class attribute
to the model instance at runtime.
"""
def __new__(cls, name, bases, attrs):
attrs['objects'] = ManagerDescriptor()
mod = attr.s(
super(ModelBase, cls).__new__(cls, name, bases, attrs)
)
if 'Model' in [base.__name__ for base in bases]:
ModelStoreMapping[mod.__name__] = Store()
return mod
class Model(object, metaclass=ModelBase):
def __attrs_post_init__(self):
pass
def __new__(cls, *args, **kwargs):
instance = super(Model, cls).__new__(cls)
for field in attr.fields(cls):
if field.metadata.get('related'):
target = field.metadata['related']['target']
setattr(
target,
cls.__name__.lower() + '_set',
RelatedManagerDescriptor(model=cls)
)
return cls.objects.add(instance)
@property
def id(self) -> int:
"""
Returns a unique integer identifier for the object.
"""
return id(self)
@property
def pk(self) -> int:
"""
Returns a unique integer identifier for the object.
Alias of the id property.
"""
return self.id
def delete(self) -> None:
type(self).objects._delete(self)
@property
def _attrs(self):
return set(self.__dict__.keys()) | {'id'}
| 24.850746
| 79
| 0.581381
| 1,507
| 0.905105
| 0
| 0
| 399
| 0.23964
| 0
| 0
| 359
| 0.215616
|
f035dcaa83c43d11bb11c5eb08b76bef164f27d0
| 11,525
|
py
|
Python
|
label.py
|
winstonwzhang/osumapper
|
e773b45650f8a013de48ff169a93ea1745c6f931
|
[
"Apache-2.0"
] | null | null | null |
label.py
|
winstonwzhang/osumapper
|
e773b45650f8a013de48ff169a93ea1745c6f931
|
[
"Apache-2.0"
] | null | null | null |
label.py
|
winstonwzhang/osumapper
|
e773b45650f8a013de48ff169a93ea1745c6f931
|
[
"Apache-2.0"
] | null | null | null |
import os
import re
import sys
import pdb
import numpy as np
from math import log, e
from scipy.signal import find_peaks
import word
from utils import *
# hit object subword integer representations from word.py
h_int = word.obj_str2int[word.HITCIRCLE]
e_int = word.obj_str2int[word.EMPTY]
sb_int = word.obj_str2int[word.SLIDER_BEGIN]
sc_int = word.obj_str2int[word.SLIDER_CENTER]
se_int = word.obj_str2int[word.SLIDER_END]
b_int = word.obj_str2int[word.BREAK]
SW_int = word.dir_str2int[word.SW]
SE_int = word.dir_str2int[word.SE]
NW_int = word.dir_str2int[word.NW]
NE_int = word.dir_str2int[word.NE]
W_int = word.dir_str2int[word.W]
E_int = word.dir_str2int[word.E]
N_int = word.dir_str2int[word.N]
crawl_int = word.vel_str2int[word.CRAWL]
slow_int = word.vel_str2int[word.SLOW]
med_int = word.vel_str2int[word.MED]
def loadModelPred(arr_file, sec_len):
'''
arr_file: path to numpy array predictions saved by model
sec_len: length of original song in seconds
'''
# process label arr (from model prediction)
label_arr = np.load(arr_file)
num_bins = len(label_arr)
# cropped audio length (from spectrogram calculations) in seconds
crop_sec = int(np.floor(sec_len*16000/512)*512)/16000
return label_arr, crop_sec
def setSlider(tick_obj, word_arr, ss, se):
tick_obj[ss] = sb_int
tick_obj[se] = se_int
slider_dir = np.random.choice(np.array([NW_int, N_int, NE_int]))
if se - ss > 1:
tick_obj[ss+1:se] = sc_int
word_arr[ss:se+1,1] = slider_dir
word_arr[ss+1:se,2] = crawl_int
word_arr[se,2] = med_int
else:
word_arr[se,1] = slider_dir
word_arr[se,2] = med_int
def getTickBins(tarr, bin_in_sec, N):
# shift four bins into the future due to spectrogram window being 4 hop lengths (2048/512)
tbi = np.around((tarr/1000) * bin_in_sec, decimals=0).astype(np.int) + 4
tbi = np.delete(tbi, np.where(tbi >= N))
return tbi
def getBestShifts(time_bpm, ms_ticks, label_arr, bin_in_sec, N, crop_sec):
# modify offsets to match
best_shifts = []
for i, section in enumerate(time_bpm):
if i == len(time_bpm)-1:
rg = (section[0], crop_sec*1000)
else:
rg = (section[0], time_bpm[i+1][0])
section_ticks = ms_ticks[np.bitwise_and(ms_ticks >= rg[0], ms_ticks < rg[1])]
# range of ms shifts to search over
check_range = np.arange(-200, 201)
check_sum = np.zeros(check_range.shape)
for ci, cdiff in enumerate(check_range):
new_ticks = section_ticks+cdiff
new_ticks = new_ticks[new_ticks >= 0]
tbi = getTickBins(new_ticks, bin_in_sec, N)
check_sum[ci] = label_arr[tbi].sum()
# smooth sums to find peak of curves
sumsmooth = smooth(check_sum, win_size=31, method="sg")
pkidx, pkdict = scipy.signal.find_peaks(sumsmooth, prominence=np.ptp(sumsmooth)/10)
pkshifts = check_range[pkidx]
best_shift = check_range[pkidx[np.argmin(np.abs(pkshifts))]]
choice = 0
if choice == 1:
import matplotlib.pyplot as plt
plt.plot(check_range, check_sum)
plt.plot(check_range, sumsmooth)
plt.vlines(best_shift, np.min(sumsmooth), np.max(sumsmooth),
alpha=0.5, color='r', linestyle='--', label='best shift')
print('best tick shift: ', best_shift)
best_shifts.append(best_shift)
return best_shifts
def label2Array(label_arr, tick_arr, time_bpm, wav_len):
'''
labels: array Nx1 with values [0,1] indicating probability of hit object
tick_arr: ticks in ms
time_bpm: list of lists, with each element list containing
[offset, bpm, meter] for each uninherited timing section
wav_len: length of cropped audio from spectrogram in seconds
'''
ticks = np.copy(tick_arr)
# set all objects greater than threshold to 3 (hitcircle) for now
labels = np.copy(label_arr)
thresh = 0.1
labels[labels > thresh] = h_int
labels[labels <= thresh] = e_int
labels = labels.astype(np.uint8)
objs = labels == h_int
N = len(labels)
bin_len = wav_len / (N-1) # length of each time bin in seconds
bin_in_sec = 1 / bin_len # number of bins in every second
# convert ticks (ms) to time bin indices
#tick_diff = np.diff(ticks)
# only keep tick idx with difference > bin length
#kept_ticks = np.where(tick_diff > round(bin_len*1000))[0]
#kept_ticks = ticks[kept_ticks]
# search for best model predictions for given timing ticks
choice = 1
if choice == 1:
best_shifts = getBestShifts(time_bpm, ticks, label_arr, bin_in_sec, N, wav_len)
for i, section in enumerate(time_bpm):
if i == len(time_bpm)-1:
rg = (section[0], wav_len*1000)
else:
rg = (section[0], time_bpm[i+1][0])
section_idx = np.bitwise_and(ticks >= rg[0], ticks < rg[1])
ticks[section_idx] = ticks[section_idx] + best_shifts[i]
tbi = getTickBins(ticks, bin_in_sec, N)
else:
tbi = getTickBins(ticks, bin_in_sec, N)
# if too many hit objects, increase threshold
while objs[tbi].sum() > len(objs)/4:
if thresh >= 0.95:
break
thresh += 0.05
labels[label_arr > thresh] = h_int
labels[label_arr <= thresh] = e_int
objs = labels == h_int
# if too few hit objects, decrease threshold
while objs[tbi].sum() < len(objs)/10:
if thresh <= 0.05:
break
thresh -= 0.05
labels[label_arr > thresh] = h_int
labels[label_arr <= thresh] = e_int
objs = labels == h_int
# get final hit objects for each tick
tick_obj = labels[tbi]
# initialize word array
word_arr = np.zeros((len(tick_obj), 3)).astype(np.uint8)
word_arr[:,0] = tick_obj.flatten()
word_arr[:,1] = E_int
word_arr[:,2] = slow_int
# now look for potential slider starts and ends
# two consecutive hitcircles: slider start and end
hits = np.copy(tick_obj).flatten()
# object index
h_idx = np.where(hits == h_int)[0]
# object mask
h_mask = np.zeros(h_idx.shape, dtype=bool)
# difference in ticks between each hit object
diff = np.diff(h_idx)
### JUMPS: find distribution of tick differences (exclude high tick diff)
diff_dist, _ = np.histogram(diff[diff<10], bins=np.arange(11))
# most common tick difference is assumed to be the
# base time difference between hitcircle jumps
# exclude 1 tick difference
jump_diff = np.argmax(diff_dist[2:])+2
# mask of all ticks with constant base tick diff
base_mask = diff == jump_diff
# jump sections have constant base tick diff for longer than 4 objects
jump_starts = []
jump_mask = np.copy(h_mask)
jump_areas = pattwhere_sequ([True,True,True], base_mask)
if jump_areas.any():
jump_idx_list = consecutive(jump_areas)
# store starting tick idx and length of every jump section (hit circles > 4)
for jump_idx in jump_idx_list:
sec_len = len(jump_idx)
# section length + 2 from the extension of [True,True,True] window
tup = (jump_idx[0], sec_len+2)
jump_starts.append(tup)
jump_mask[tup[0]:tup[0]+tup[1]+1] = True
# jumps should have changing direction (either SW or SE)
# jumps should have medium velocity
for jtup in jump_starts:
s_hidx = jtup[0]
s_hlen = jtup[1]
# first hitcircle in jump section won't change velocity
jump_idx = h_idx[s_hidx+1:s_hidx+s_hlen+1]
word_arr[jump_idx,1] = np.random.choice(np.array([W_int, E_int, SW_int, SE_int]))
word_arr[jump_idx,2] = med_int
# break up long jump sections with sliders
limit = np.random.randint(6,11)
if s_hlen > limit:
num_breaks = s_hlen // limit
ss_idx = np.arange(s_hidx+limit, s_hidx+s_hlen, limit)
for ss in ss_idx:
setSlider(tick_obj, word_arr, h_idx[ss], h_idx[ss+1])
### STREAMS: store starting tick idx and length of every stream (> 3 consec hitcircles)
stream_starts = []
stream_mask = np.copy(h_mask)
# find all occurrences of two consecutive hitcircles
twos = pattwhere_sequ([h_int, h_int], hits)
if twos.any():
# 2 consecutive twos = 3 hitcircles, 3 consec twos = 4 hitcircles, etc
twos_idx_list = consecutive(twos)
for twos_idx in twos_idx_list:
tup_num = len(twos_idx)
# >= 3 consec hitcircles (stream)
if tup_num > 1:
# store tuple (stream starting tick index, stream length in ticks)
tup = (twos_idx[0], tup_num+1)
stream_starts.append(tup)
stream_obj_mask = np.bitwise_and(h_idx >= tup[0], h_idx < tup[0]+tup[1])
stream_mask[stream_obj_mask] = True
# streams should have a constant direction (either NW or NE)
# streams should have 'c' velocity (unless spaced streams are wanted)
for stup in stream_starts:
sidx = stup[0]
slen = stup[1]
# first hitcircle in stream won't change direction or velocity
word_arr[sidx+1:sidx+slen,1] = np.random.choice(np.array([NW_int, N_int, NE_int]))
word_arr[sidx+1:sidx+slen,2] = np.random.choice(np.array([crawl_int, slow_int]))
### SLIDERS: hit objects not belonging to jump or stream sections
slider_mask = ~(jump_mask | stream_mask)
slider_idx = h_idx[slider_mask]
slider_diff = np.diff(slider_idx)
# gaps less than threshold (10 ticks) can be made sliders
obj_avail = np.where(slider_diff < 11)[0]
# use every other obj as slider start
slider_starts = slider_idx[obj_avail[::3]]
slider_ends = slider_idx[obj_avail[::3]+1]
# sliders should have a changing direction (NW, N, NE)
# slider centers should have 'c' velocity
# slider ends should have 'm' slider velocity
for ss, se in zip(slider_starts, slider_ends):
setSlider(tick_obj, word_arr, ss, se)
### remove lone objects
#circle_idx = tick_obj == 3
#for ci in circle_idx:
# print(ci)
# final update to word_arr
word_arr[:,0] = tick_obj.flatten()
# visualize word_arr
#import matplotlib.pyplot as plt
#plt.plot(word_arr[:,0])
#plt.plot(word_arr[:,1])
#plt.plot(word_arr[:,2])
#plt.show()
#pdb.set_trace()
return word_arr
def array2Label(tick_arr, arr, wav_len, num_bins):
'''
tick_arr: ticks in ms
word_arr: [num ticks] x 3 array with hitobject, direction, and velocity information
wav_len: length of cropped audio from spectrogram in seconds
num_bins: number of bins in spectrogram
'''
N = num_bins
bin_len = wav_len / (num_bins-1) # length of each time bin in seconds
bin_in_sec = 1 / bin_len # number of bins in every second
labels = np.zeros((N,3))
# convert ticks (ms) to time bin indices
tbi = np.floor((tick_arr/1000) * bin_in_sec).astype(np.int)
# shift four bins into the future due to spectrogram window being 4 hop lengths (2048/512)
tbi = tbi + 4
# hit object classes
labels[tbi,:] = arr
return labels
| 35.791925
| 98
| 0.632278
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,704
| 0.321388
|
f03610bfed11de35610fed3ea3d3b041e18742ab
| 8,982
|
py
|
Python
|
bbp/tests/AcceptTests.py
|
kevinmilner/bbp
|
d9ba291b123be4e85f76317ef23600a339b2354d
|
[
"Apache-2.0"
] | null | null | null |
bbp/tests/AcceptTests.py
|
kevinmilner/bbp
|
d9ba291b123be4e85f76317ef23600a339b2354d
|
[
"Apache-2.0"
] | null | null | null |
bbp/tests/AcceptTests.py
|
kevinmilner/bbp
|
d9ba291b123be4e85f76317ef23600a339b2354d
|
[
"Apache-2.0"
] | 1
|
2018-11-12T23:10:02.000Z
|
2018-11-12T23:10:02.000Z
|
#!/usr/bin/env python
"""
Southern California Earthquake Center Broadband Platform
Copyright 2010-2017 Southern California Earthquake Center
These are acceptance tests for the broadband platforms
$Id: AcceptTests.py 1795 2017-02-09 16:23:34Z fsilva $
"""
from __future__ import division, print_function
# Import Python modules
import os
import new
import sys
import shutil
import optparse
import unittest
# Import Broadband modules
import bband_utils
import seqnum
import cmp_bbp
from install_cfg import InstallCfg
def find_tests(test, rerun):
"""
# This function searches for .xml files in the accept_inputs directory
"""
install = InstallCfg()
resume = True
accept_test_inputs = "accept_inputs"
accept_test_refs = "accept_refs"
input_dir = os.path.join(install.A_TEST_REF_DIR, accept_test_inputs)
if not os.path.exists(input_dir):
# These are expected to be in the dist
print("Acceptance test inputs dir %s does not exist, aborting" %
(input_dir))
sys.exit()
# Create list of test XML files
files = os.listdir(input_dir)
wfext = ".xml"
# First we find all the tests
test_files = []
for testfile in files:
if testfile.endswith(wfext):
# Don't add SDSU tests on Mac OS X
if sys.platform == 'darwin' and testfile.find("SDSU") >= 0:
if test is None or (test is not None and testfile.find(test) >= 0):
print("*** Mac OS X detected: skipping test %s." %
(testfile))
continue
if test is None:
test_files.append(testfile)
else:
if testfile.find(test) >= 0:
test_files.append(testfile)
resume_file = os.path.join(install.A_OUT_LOG_DIR, "resume.txt")
resume_list = ""
if rerun:
os.remove(resume_file)
# Check for already completed tests if not rerunning
if resume == True and rerun == False:
if os.path.exists(resume_file):
resume_fp = open(resume_file, 'r')
resume_list = resume_fp.read().splitlines()
completed_test_count = len(resume_list)
print("==> Completed Tests : %d" % (completed_test_count))
resume_fp.close()
if ((test is None) and
(completed_test_count >= len(test_files))):
print("All the acceptance tests have passed previously!")
proceed = raw_input("Would you like to re-run "
"all the acceptance tests? (y/n)")
if str.lower(proceed) == 'y':
os.remove(resume_file)
resume_list = ""
else:
sys.exit(0)
# Create unittest test case for each file
for xml_file in test_files:
# Skip test if we ran it already
if xml_file in resume_list:
print("==> Skipping %s" % (xml_file))
continue
file_base = xml_file[0:xml_file.find(wfext)]
# pieces = file_base.split('-')
# Adjust tolerance depending on test mode
tolerance = 0.03
#This defines a method that we're going to add to the
#BBPAcceptanceTests class. The keyword binding has to
#be done b/c Python is storing pointers to 'file' and 'file_base'
#so w/o the keywords, 'file' and 'file_base' in the function will
#point to the final values
def permutation_test(self, file_base=file_base, xml_file=xml_file):
input_dir = os.path.join(self.install.A_TEST_REF_DIR,
accept_test_inputs)
log_dir = os.path.join(self.install.A_OUT_LOG_DIR,
"acceptance_test_logs")
sim_id = int(seqnum.get_seq_num())
self.file_base = file_base
self.log_file = os.path.join(log_dir, "%s.log" % (self.file_base))
self.input_file = os.path.join(input_dir, xml_file)
cmd = ("%s/run_bbp.py -x %s -s %d -l %s" %
(self.install.A_COMP_DIR,
self.input_file, sim_id, self.log_file))
rc = bband_utils.runprog(cmd, False)
self.failIf(rc != 0, "Acceptance test failed to execute")
ref_file_dir = os.path.join(self.install.A_TEST_REF_DIR,
accept_test_refs,
self.file_base)
agree = True
for ref_file in os.listdir(ref_file_dir):
if os.path.isfile(os.path.join(ref_file_dir, ref_file)):
test_file = os.path.join(self.install.A_OUT_DATA_DIR,
str(sim_id),
("%d.%s" % (sim_id, ref_file)))
a_ref_file = os.path.join(ref_file_dir, ref_file)
compare_result = cmp_bbp.cmp_bbp(a_ref_file, test_file,
tolerance=tolerance)
errmsg = ("Output file "
"%s does not match reference file: %s" %
(test_file, a_ref_file))
self.failIf(compare_result != 0, errmsg)
if compare_result != 0:
agree = False
if agree == True:
# Write success to the resume file
resume_fp = open(os.path.join(install.A_OUT_LOG_DIR,
"resume.txt"), 'a')
resume_fp.write("%s\n" % xml_file)
resume_fp.flush()
resume_fp.close()
sys.stdout.flush()
sys.stderr.flush()
# We create a method object which is an instance method for
# BBPAcceptanceTests which executes the code in
# testPermutation
method = new.instancemethod(permutation_test,
None, BBPAcceptanceTests)
# We give the method a new name in BBPAcceptanceTests
# which contains the xml file being run
setattr(BBPAcceptanceTests, "test_%s" % file_base, method)
class BBPAcceptanceTests(unittest.TestCase):
def setUp(self):
self.install = InstallCfg()
accept_test_inputs = "accept_inputs"
src_path = ""
self.resume = True
run_dir = self.install.A_USER_DATA_DIR
# Create run directory, in case it doesn't exist
bband_utils.mkdirs([run_dir], print_cmd=False)
if not os.path.exists(os.path.join(run_dir, "northridge_3_sta.stl")):
src_path = os.path.join(self.install.A_TEST_REF_DIR,
accept_test_inputs,
"northridge_3_sta.stl")
shutil.copy2(src_path, run_dir)
if not os.path.exists(os.path.join(run_dir, "northridge_eq_gp.src")):
src_path = os.path.join(self.install.A_TEST_REF_DIR,
accept_test_inputs,
"northridge_eq_gp.src")
shutil.copy2(src_path, run_dir)
if not os.path.exists(os.path.join(run_dir, "northridge_eq_ucsb.src")):
src_path = os.path.join(self.install.A_TEST_REF_DIR,
accept_test_inputs,
"northridge_eq_ucsb.src")
shutil.copy2(src_path, run_dir)
if not os.path.exists(os.path.join(run_dir, "northridge_eq_song.src")):
src_path = os.path.join(self.install.A_TEST_REF_DIR,
accept_test_inputs,
"northridge_eq_song.src")
shutil.copy2(src_path, run_dir)
if not os.path.exists(os.path.join(self.install.A_OUT_LOG_DIR,
"acceptance_test_logs")):
bband_utils.mkdirs([os.path.join(self.install.A_OUT_LOG_DIR,
"acceptance_test_logs")])
if __name__ == '__main__':
# Parse options
parser = optparse.OptionParser()
parser.add_option("-t", "--test",
dest="test",
help="Execute specific test",
metavar="TEST")
parser.add_option("-r", "--rerun",
action="store_true",
dest="rerun",
help="Rerun tests already completed")
(options, args) = parser.parse_args()
if options.test is not None:
test = options.test
else:
test = None
if options.rerun is not None:
rerun = True
else:
rerun = False
find_tests(test, rerun)
suite = unittest.TestLoader().loadTestsFromTestCase(BBPAcceptanceTests)
print("==> Number of tests to run: %d" % suite.countTestCases())
unittest.TextTestRunner(verbosity=2).run(suite)
| 40.098214
| 83
| 0.554888
| 1,865
| 0.207637
| 0
| 0
| 0
| 0
| 0
| 0
| 2,197
| 0.2446
|
f038a863268c516819dbf950a745c99c6fc026b5
| 6,375
|
py
|
Python
|
tutorials/03-advanced/image_captioning/model.py
|
xuwangyin/pytorch-tutorial
|
d6a29c19288c817432b3b101765596e037e01989
|
[
"MIT"
] | 11
|
2017-08-20T18:12:34.000Z
|
2020-03-18T18:03:16.000Z
|
tutorials/03-advanced/image_captioning/model.py
|
xuwangyin/pytorch-tutorial
|
d6a29c19288c817432b3b101765596e037e01989
|
[
"MIT"
] | null | null | null |
tutorials/03-advanced/image_captioning/model.py
|
xuwangyin/pytorch-tutorial
|
d6a29c19288c817432b3b101765596e037e01989
|
[
"MIT"
] | 5
|
2017-08-10T05:15:37.000Z
|
2021-12-01T08:23:30.000Z
|
import torch
import torch.nn as nn
import torchvision.models as models
from torch.nn.utils.rnn import pack_padded_sequence as pack
from torch.nn.utils.rnn import pad_packed_sequence as unpack
from torch.autograd import Variable
class EncoderCNN(nn.Module):
def __init__(self, embed_size):
"""Load the pretrained ResNet-152 and replace top fc layer."""
super(EncoderCNN, self).__init__()
resnet = models.resnet152(pretrained=True)
modules = list(resnet.children())[:-1] # delete the last fc layer.
self.resnet = nn.Sequential(*modules)
self.linear = nn.Linear(resnet.fc.in_features, embed_size)
self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)
self.init_weights()
def init_weights(self):
"""Initialize the weights."""
self.linear.weight.data.normal_(0.0, 0.02)
self.linear.bias.data.fill_(0)
def forward(self, images):
"""Extract the image feature vectors."""
features = self.resnet(images)
features = Variable(features.data)
features = features.view(features.size(0), -1)
features = self.bn(self.linear(features))
return features
class LayoutEncoder(nn.Module):
def __init__(self, layout_encoding_size, hidden_size, vocab_size, num_layers):
"""Set the hyper-parameters and build the layers."""
super(LayoutEncoder, self).__init__()
self.label_encoder = nn.Embedding(vocab_size, layout_encoding_size)
self.location_encoder = nn.Linear(4, layout_encoding_size)
self.lstm = nn.LSTM(layout_encoding_size, hidden_size, num_layers, batch_first=True)
self.init_weights()
def init_weights(self):
"""Initialize weights."""
self.label_encoder.weight.data.uniform_(-0.1, 0.1)
self.location_encoder.weight.data.uniform_(-0.1, 0.1)
self.location_encoder.bias.data.fill_(0)
def forward(self, label_seqs, location_seqs, lengths):
# sort label sequences and location sequences in batch dimension according to length
batch_idx = sorted(range(len(lengths)), key=lambda k: lengths[k], reverse=True)
reverse_batch_idx = torch.LongTensor([batch_idx.index(i) for i in range(len(batch_idx))])
lens_sorted = sorted(lengths, reverse=True)
label_seqs_sorted = torch.index_select(label_seqs, 0, torch.LongTensor(batch_idx))
location_seqs_sorted = torch.index_select(location_seqs, 0, torch.LongTensor(batch_idx))
# assert torch.equal(torch.index_select(label_seqs_sorted, 0, reverse_batch_idx), label_seqs)
# assert torch.equal(torch.index_select(location_seqs_sorted, 0, reverse_batch_idx), location_seqs)
if torch.cuda.is_available():
reverse_batch_idx = reverse_batch_idx.cuda()
label_seqs_sorted = label_seqs_sorted.cuda()
location_seqs_sorted = location_seqs_sorted.cuda()
# create Variables
label_seqs_sorted_var = Variable(label_seqs_sorted, requires_grad=False)
location_seqs_sorted_var = Variable(location_seqs_sorted, requires_grad=False)
# encode label sequences
label_encoding = self.label_encoder(label_seqs_sorted_var)
# encode location sequences
location_seqs_sorted_var = location_seqs_sorted_var.view(-1, 4)
location_encoding = self.location_encoder(location_seqs_sorted_var)
location_encoding = location_encoding.view(label_encoding.size(0), -1, location_encoding.size(1))
# layout encoding - batch_size x max_seq_len x embed_size
layout_encoding = label_encoding + location_encoding
packed = pack(layout_encoding, lens_sorted, batch_first=True)
hiddens, _ = self.lstm(packed)
# unpack hiddens and get last hidden vector
hiddens_unpack = unpack(hiddens, batch_first=True)[0] # batch_size x max_seq_len x embed_size
last_hidden_idx = torch.zeros(hiddens_unpack.size(0), 1, hiddens_unpack.size(2)).long()
for i in range(hiddens_unpack.size(0)):
last_hidden_idx[i, 0, :] = lens_sorted[i] - 1
if torch.cuda.is_available():
last_hidden_idx = last_hidden_idx.cuda()
last_hidden = torch.gather(hiddens_unpack, 1, Variable(last_hidden_idx, requires_grad=False)) # batch_size x 1 x embed_size
last_hidden = torch.squeeze(last_hidden, 1) # batch_size x embed_size
# convert back to original batch order
last_hidden = torch.index_select(last_hidden, 0, Variable(reverse_batch_idx, requires_grad=False))
return last_hidden
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers):
"""Set the hyper-parameters and build the layers."""
super(DecoderRNN, self).__init__()
self.embed = nn.Embedding(vocab_size, embed_size)
self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True)
self.linear = nn.Linear(hidden_size, vocab_size)
self.init_weights()
def init_weights(self):
"""Initialize weights."""
self.embed.weight.data.uniform_(-0.1, 0.1)
self.linear.weight.data.uniform_(-0.1, 0.1)
self.linear.bias.data.fill_(0)
def forward(self, features, captions, lengths):
"""Decode image feature vectors and generates captions."""
embeddings = self.embed(captions)
embeddings = torch.cat((features.unsqueeze(1), embeddings), 1)
packed = pack(embeddings, lengths, batch_first=True)
hiddens, _ = self.lstm(packed)
outputs = self.linear(hiddens[0])
return outputs
def sample(self, features, states=None):
"""Samples captions for given image features (Greedy search)."""
sampled_ids = []
inputs = features.unsqueeze(1)
for i in range(20): # maximum sampling length
hiddens, states = self.lstm(inputs, states) # (batch_size, 1, hidden_size),
outputs = self.linear(hiddens.squeeze(1)) # (batch_size, vocab_size)
predicted = outputs.max(1)[1]
sampled_ids.append(predicted)
inputs = self.embed(predicted)
sampled_ids = torch.cat(sampled_ids, 1) # (batch_size, 20)
return sampled_ids.squeeze()
| 47.574627
| 132
| 0.674039
| 6,131
| 0.961725
| 0
| 0
| 0
| 0
| 0
| 0
| 1,111
| 0.174275
|
f03ab886461270d772569e4546b232254bbdaeb6
| 3,525
|
py
|
Python
|
.ipynb_checkpoints/main2-checkpoint.py
|
jcus/python-challenge
|
8e00b7ae932e970a98c419e5b49fc7a0dfc3eac5
|
[
"RSA-MD"
] | null | null | null |
.ipynb_checkpoints/main2-checkpoint.py
|
jcus/python-challenge
|
8e00b7ae932e970a98c419e5b49fc7a0dfc3eac5
|
[
"RSA-MD"
] | null | null | null |
.ipynb_checkpoints/main2-checkpoint.py
|
jcus/python-challenge
|
8e00b7ae932e970a98c419e5b49fc7a0dfc3eac5
|
[
"RSA-MD"
] | null | null | null |
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "001887f2",
"metadata": {},
"outputs": [],
"source": [
"# import os modules to create path across operating system to load csv file\n",
"import os\n",
"# module for reading csv files\n",
"import csv"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "77c0f7d8",
"metadata": {},
"outputs": [],
"source": [
"# read csv data and load to budgetDB\n",
"csvpath = os.path.join(\"Resources\",\"budget_data.csv\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b2da0e1e",
"metadata": {},
"outputs": [],
"source": [
"# creat a txt file to hold the analysis\n",
"outputfile = os.path.join(\"Analysis\",\"budget_analysis.txt\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f3c0fd89",
"metadata": {},
"outputs": [],
"source": [
"# set var and initialize to zero\n",
"totalMonths = 0 \n",
"totalBudget = 0"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4f807576",
"metadata": {},
"outputs": [],
"source": [
"# set list to store all of the monthly changes\n",
"monthChange = [] \n",
"months = []"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ad264653",
"metadata": {},
"outputs": [],
"source": [
"# use csvreader object to import the csv library with csvreader object\n",
"with open(csvpath, newline = \"\") as csvfile:\n",
"# # create a csv reader object\n",
" csvreader = csv.reader(csvfile, delimiter=\",\")\n",
" \n",
" # skip the first row since it has all of the column information\n",
" #next(csvreader)\n",
" \n",
"#header: date, profit/losses\n",
"print(csvreader)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "27fc81c1",
"metadata": {},
"outputs": [],
"source": [
"for p in csvreader:\n",
" print(\"date: \" + p[0])\n",
" print(\"profit: \" + p[1])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "83749f03",
"metadata": {},
"outputs": [],
"source": [
"# read the header row\n",
"header = next(csvreader)\n",
"print(f\"csv header:{header}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3b441a20",
"metadata": {},
"outputs": [],
"source": [
"# move to the next row (first row)\n",
"firstRow = next(csvreader)\n",
"totalMonths = (len(f\"[csvfile.index(months)][csvfile]\"))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a815e200",
"metadata": {},
"outputs": [],
"source": [
"output = (\n",
" f\"Financial Anaylsis \\n\"\n",
" f\"------------------------- \\n\"\n",
" f\"Total Months: {totalMonths} \\n\")\n",
"print(output)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6bf35c14",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.8"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
| 21.759259
| 84
| 0.508936
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,464
| 0.699007
|
f03ac5d7659f10dfb37b2809abf402ff19dbec0f
| 338
|
py
|
Python
|
Algorithms/Problems/MaximumSubarray/tests/maximum_substring_naive_test.py
|
Nalhin/AlgorithmsAndDataStructures
|
2d2c87d0572e107c993c3c8866b8beefd4d22082
|
[
"MIT"
] | 1
|
2021-11-16T13:02:25.000Z
|
2021-11-16T13:02:25.000Z
|
Algorithms/Problems/MaximumSubarray/tests/maximum_substring_naive_test.py
|
Nalhin/AlgorithmsAndDataStructures
|
2d2c87d0572e107c993c3c8866b8beefd4d22082
|
[
"MIT"
] | null | null | null |
Algorithms/Problems/MaximumSubarray/tests/maximum_substring_naive_test.py
|
Nalhin/AlgorithmsAndDataStructures
|
2d2c87d0572e107c993c3c8866b8beefd4d22082
|
[
"MIT"
] | null | null | null |
from Algorithms.Problems.MaximumSubarray.maximum_substring_naive import (
maximum_subarray_naive,
)
class TestMaximumSubarrayNaive:
def test_returns_maximum_subarray(self):
data = [1, -2, 3, 10, -5, 14]
expected = [3, 10, -5, 14]
result = maximum_subarray_naive(data)
assert result == expected
| 24.142857
| 73
| 0.683432
| 231
| 0.683432
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
f03b44b7bd155b16cda8a428f739db53cb3a8257
| 138
|
py
|
Python
|
helios/workflows/__init__.py
|
thiagosfs/helios-server
|
1616f742c0d3ab8833aab4cfbcc45d9818c68716
|
[
"Apache-2.0"
] | 525
|
2015-01-04T11:51:26.000Z
|
2022-03-31T17:15:20.000Z
|
helios/workflows/__init__.py
|
thiagosfs/helios-server
|
1616f742c0d3ab8833aab4cfbcc45d9818c68716
|
[
"Apache-2.0"
] | 238
|
2015-01-02T17:50:37.000Z
|
2022-02-09T16:39:49.000Z
|
helios/workflows/__init__.py
|
thiagosfs/helios-server
|
1616f742c0d3ab8833aab4cfbcc45d9818c68716
|
[
"Apache-2.0"
] | 238
|
2015-01-05T23:09:20.000Z
|
2022-03-21T16:47:33.000Z
|
"""
Helios Election Workflows
"""
from helios.datatypes import LDObjectContainer
class WorkflowObject(LDObjectContainer):
pass
| 13.8
| 46
| 0.76087
| 49
| 0.355072
| 0
| 0
| 0
| 0
| 0
| 0
| 33
| 0.23913
|
f03bb71c1a6622417a2b810cfaa5541f5b608f38
| 2,210
|
py
|
Python
|
tests/test_searcher.py
|
dbvirus/searcher
|
f4c2036da0a822bc9e408dcda10462d2d4335f03
|
[
"MIT"
] | null | null | null |
tests/test_searcher.py
|
dbvirus/searcher
|
f4c2036da0a822bc9e408dcda10462d2d4335f03
|
[
"MIT"
] | null | null | null |
tests/test_searcher.py
|
dbvirus/searcher
|
f4c2036da0a822bc9e408dcda10462d2d4335f03
|
[
"MIT"
] | null | null | null |
"""
Unit tests for the searcher module. Those tests mock the Entrez class
and do not make any sort of HTTP request.
"""
# pylint: disable=redefined-outer-name
import io
from pathlib import Path
from Bio import Entrez
from dbvirus_searcher import Searcher
def test_searcher_initialization(searcher):
"""
Tests a searcher initialization parameters
"""
assert isinstance(searcher, Searcher)
assert searcher.db == "sra"
new_searcher = Searcher("another@test.com", db="other_db")
assert new_searcher.db == "other_db"
def test_searcher_searches_sra(searcher: Searcher, mocker):
"""
Tests if the searcher, when supplied with a valid search string,
calls the correct Biopython's Entrez methods
"""
# We need to supply a return value to the esearch function.
# That return value must be a buffer.
mocker.patch("Bio.Entrez.esearch")
Entrez.esearch.return_value = io.StringIO("{}")
searcher.search('"Homo sapiens"[Organism]')
# pylint: disable=no-member
Entrez.esearch.assert_called_with(
"sra", '"Homo sapiens"[Organism]', retmax=10, retmode="json"
)
def test_searcher_configurer_entrez():
"""
In order for everything to work, the Searcher must set Entrez's e-mail and
API Key parameters
"""
Searcher(email="test@test.com", api_key="3141516")
assert Entrez.email == "test@test.com"
assert Entrez.api_key == "3141516"
def test_searcher_returns_dictionary(searcher: Searcher, mocker):
"""
The searcher must return a json formatted SRA resultset
"""
mocker.patch("Bio.Entrez.esearch")
Entrez.esearch.return_value = io.StringIO("{}")
result = searcher.search("Human", max_results=3)
assert isinstance(result, dict)
def test_fetch_result(searcher: Searcher, mocker):
"""
Given an Entrez UID, the searcher must acquire the related data
"""
mocker.patch("Bio.Entrez.efetch")
Entrez.efetch.return_value = open(
Path(__file__).parent.absolute().joinpath("sample_efetch_result.xml")
)
data = searcher.fetch("8801091")
# pylint: disable=no-member
Entrez.efetch.assert_called()
assert data
assert isinstance(data, dict)
| 27.283951
| 78
| 0.698643
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,020
| 0.461538
|
f03d4c226a3b3aa190f45b9620b4a20bd1deafdc
| 2,296
|
py
|
Python
|
isso/tests/test_html.py
|
Nildeala/isso
|
661f2a68813e6ba5c234c9b84f440681712cdcef
|
[
"MIT"
] | 1
|
2017-08-24T21:10:01.000Z
|
2017-08-24T21:10:01.000Z
|
isso/tests/test_html.py
|
Nildeala/isso
|
661f2a68813e6ba5c234c9b84f440681712cdcef
|
[
"MIT"
] | null | null | null |
isso/tests/test_html.py
|
Nildeala/isso
|
661f2a68813e6ba5c234c9b84f440681712cdcef
|
[
"MIT"
] | null | null | null |
try:
import unittest2 as unittest
except ImportError:
import unittest
from isso.core import Config
from isso.utils import html
class TestHTML(unittest.TestCase):
def test_markdown(self):
convert = html.Markdown(extensions=())
examples = [
("*Ohai!*", "<p><em>Ohai!</em></p>"),
("<em>Hi</em>", "<p><em>Hi</em></p>"),
("http://example.org/", '<p>http://example.org/</p>')]
for (input, expected) in examples:
self.assertEqual(convert(input), expected)
def test_markdown_extensions(self):
convert = html.Markdown(extensions=("strikethrough", "superscript"))
examples = [
("~~strike~~ through", "<p><del>strike</del> through</p>"),
("sup^(script)", "<p>sup<sup>script</sup></p>")]
for (input, expected) in examples:
self.assertEqual(convert(input), expected)
@unittest.skipIf(html.html5lib_version == "0.95", "backport")
def test_sanitizer(self):
sanitizer = html.Sanitizer(elements=[], attributes=[])
examples = [
('Look: <img src="..." />', 'Look: '),
('<a href="http://example.org/">Ha</a>', '<a href="http://example.org/">Ha</a>'),
('<a href="sms:+1234567890">Ha</a>', '<a>Ha</a>'),
('<p style="visibility: hidden;">Test</p>', '<p>Test</p>'),
('<script>alert("Onoe")</script>', 'alert("Onoe")')]
for (input, expected) in examples:
self.assertEqual(html.sanitize(sanitizer, input), expected)
@unittest.skipIf(html.html5lib_version == "0.95", "backport")
def test_sanitizer_extensions(self):
sanitizer = html.Sanitizer(elements=["img"], attributes=["src"])
examples = [
('<img src="cat.gif" />', '<img src="cat.gif">'),
('<script src="doge.js"></script>', '')]
for (input, expected) in examples:
self.assertEqual(html.sanitize(sanitizer, input), expected)
def test_render(self):
conf = Config.load(None).section("markup")
renderer = html.Markup(conf).render
self.assertEqual(renderer("http://example.org/ and sms:+1234567890"),
'<p><a href="http://example.org/">http://example.org/</a> and sms:+1234567890</p>')
| 37.639344
| 108
| 0.562718
| 2,155
| 0.938589
| 0
| 0
| 1,063
| 0.462979
| 0
| 0
| 746
| 0.324913
|
f03f530e1dc98bcc4544ec79667fa0181fc768d4
| 373
|
py
|
Python
|
tests/data_elements/test_other_double.py
|
GalBenZvi/dicom_parser
|
fc3e892ebf99c4e5d62cb5e7de7df341baf445fe
|
[
"MIT"
] | 11
|
2020-08-08T21:41:54.000Z
|
2021-07-27T12:48:31.000Z
|
tests/data_elements/test_other_double.py
|
GalBenZvi/dicom_parser
|
fc3e892ebf99c4e5d62cb5e7de7df341baf445fe
|
[
"MIT"
] | 45
|
2020-03-03T14:32:16.000Z
|
2021-07-30T16:42:17.000Z
|
tests/data_elements/test_other_double.py
|
GalBenZvi/dicom_parser
|
fc3e892ebf99c4e5d62cb5e7de7df341baf445fe
|
[
"MIT"
] | 6
|
2021-10-19T09:19:22.000Z
|
2022-03-13T19:26:10.000Z
|
"""
Definition of the :class:`OtherDoubleTestCase` class.
"""
from dicom_parser.data_elements.other_double import OtherDouble
from tests.test_data_element import DataElementTestCase
class OtherDoubleTestCase(DataElementTestCase):
"""
Tests for the
:class:`~dicom_parser.data_elements.other_double.OtherDouble`
class.
"""
TEST_CLASS = OtherDouble
| 23.3125
| 65
| 0.766756
| 188
| 0.504021
| 0
| 0
| 0
| 0
| 0
| 0
| 167
| 0.447721
|
f03ff005925224be3fdb7edb21130977774e1f37
| 14,317
|
py
|
Python
|
acme_compact.py
|
felixfontein/acme-compact
|
922df35fc70e6f157a51d572a02c12fa34caaa35
|
[
"MIT"
] | 5
|
2015-12-19T20:09:53.000Z
|
2017-02-06T08:13:27.000Z
|
acme_compact.py
|
felixfontein/acme-compact
|
922df35fc70e6f157a51d572a02c12fa34caaa35
|
[
"MIT"
] | null | null | null |
acme_compact.py
|
felixfontein/acme-compact
|
922df35fc70e6f157a51d572a02c12fa34caaa35
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Command line interface for the compact ACME library."""
import acme_lib
import argparse
import sys
import textwrap
def _gen_account_key(account_key, key_length, algorithm):
key = acme_lib.create_key(key_length=key_length, algorithm=algorithm)
acme_lib.write_file(account_key, key)
def _gen_cert_key(key, key_length, algorithm):
the_key = acme_lib.create_key(key_length=key_length, algorithm=algorithm)
acme_lib.write_file(key, the_key)
def _gen_csr(domains, key, csr, must_staple):
if csr.endswith('.csr'):
config_filename = csr[:-4] + '.cnf'
else:
config_filename = csr + '.cnf'
sys.stderr.write('Writing OpenSSL config to {0}.\n'.format(config_filename))
the_csr = acme_lib.generate_csr(key, config_filename, domains.split(','), must_staple=must_staple)
acme_lib.write_file(csr, the_csr)
def _print_csr(csr):
sys.stdout.write(acme_lib.get_csr_as_text(csr) + '\n')
def _get_root(root_url, cert):
ic = acme_lib.download_certificate(root_url)
if cert is None:
sys.stdout.write(ic + '\n')
else:
acme_lib.write_file(cert, ic + '\n')
sys.stderr.write("Stored root certificate at '{0}'.\n".format(cert))
def _get_intermediate(intermediate_url, cert):
ic = acme_lib.download_certificate(intermediate_url)
if cert is None:
sys.stdout.write(ic + '\n')
else:
acme_lib.write_file(cert, ic + '\n')
sys.stderr.write("Stored intermediate certificate at '{0}'.\n".format(cert))
def _get_certificate(account_key, csr, acme_dir, CA, cert, email):
sys.stderr.write("Preparing challenges...")
state = acme_lib.get_challenges(account_key, csr, CA, email_address=email)
sys.stderr.write(" ok\n")
try:
sys.stderr.write("Writing and verifying challenges...")
acme_lib.write_challenges(state, acme_dir)
acme_lib.verify_challenges(state)
sys.stderr.write(" ok\n")
sys.stderr.write("Notifying CA of challenges...")
acme_lib.notify_challenges(state)
sys.stderr.write(" ok\n")
sys.stderr.write("Verifying domains...\n")
result = acme_lib.check_challenges(state, csr, lambda domain: sys.stderr.write("Verified domain {0}!\n".format(domain)))
sys.stderr.write("Certificate is signed!\n")
if cert is None:
sys.stdout.write(result)
else:
acme_lib.write_file(cert, result)
sys.stderr.write("Stored certificate at '{0}'.\n".format(cert))
finally:
acme_lib.remove_challenges(state, acme_dir)
def _get_certificate_part1(statefile, account_key, csr, acme_dir, CA, email):
sys.stderr.write("Preparing challenges...")
state = acme_lib.get_challenges(account_key, csr, CA, email_address=email)
sys.stderr.write(" ok\n")
sys.stderr.write("Writing challenges...")
acme_lib.write_challenges(state, acme_dir)
sys.stderr.write(" ok\n")
sys.stderr.write("Serializing state...")
with open(statefile, "w") as sf:
sf.write(acme_lib.serialize_state(state))
sys.stderr.write(" ok\n")
def _get_certificate_part2(statefile, csr, cert):
sys.stderr.write("Deserializing state...")
with open(statefile, "r") as sf:
state = acme_lib.deserialize_state(sf.read())
sys.stderr.write(" ok\n")
sys.stderr.write("Verifying challenges...")
acme_lib.verify_challenges(state)
sys.stderr.write(" ok\n")
sys.stderr.write("Notifying CA of challenges...")
acme_lib.notify_challenges(state)
sys.stderr.write(" ok\n")
sys.stderr.write("Verifying domains...\n")
result = acme_lib.check_challenges(state, csr, lambda domain: sys.stderr.write("Verified domain {0}!\n".format(domain)))
sys.stderr.write("Certificate is signed!\n")
if cert is None:
sys.stdout.write(result)
else:
acme_lib.write_file(cert, result)
sys.stderr.write("Stored certificate at '{0}'.\n".format(cert))
if __name__ == "__main__":
try:
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""\
This script automates the process of getting a signed TLS certificate from
Let's Encrypt using the ACME protocol. It can both be run from the server
and from another machine (when splitting the process up in two steps).
The script needs to have access to your private account key, so PLEASE READ
THROUGH IT! It's only 265+569 lines (including docstrings), so it won't
take too long.
===Example Usage: Creating Letsencrypt account key, private key for certificate and CSR===
python acme_compact.py gen-account-key --account-key /path/to/account.key
python acme_compact.py gen-key --key /path/to/domain.key
python acme_compact.py gen-csr --key /path/to/domain.key --csr /path/to/domain.csr --domains example.com,www.example.com
===================
Note that the email address does not have to be specified.
Also note that by default, RSA keys are generated. If you want ECC keys,
please specify "--algorithm <alg>" with <alg> being "p-256" or "p-384".
===Example Usage: Creating certifiate from CSR on server===
python acme_compact.py get-certificate --account-key /path/to/account.key --email mail@example.com --csr /path/to/domain.csr --acme-dir /usr/share/nginx/html/.well-known/acme-challenge/ --cert /path/to/signed.crt 2>> /var/log/acme_compact.log
===================
===Example Usage: Creating certifiate from CSR from another machine===
python acme_compact.py get-certificate-part-1 --account-key /path/to/account.key --email mail@example.com --csr /path/to/domain.csr --statefile /path/to/state.json --acme-dir /tmp/acme-challenge/ 2>> /var/log/acme_compact.log
... copy files from /tmp/acme-challenge/ into /usr/share/nginx/html/.well-known/acme-challenge/ on the web server ...
python acme_compact.py get-certificate-part-2 --csr /path/to/domain.csr --statefile /path/to/state.json --cert /path/to/signed.crt 2>> /var/log/acme_compact.log
===================
===Example Usage: Combining signed certificate with intermediate certificate===
python acme_compact.py get-intermediate --cert /path/to/domain-intermediate.crt
cat /path/to/signed.crt /path/to/domain-intermediate.crt > /path/to/signed-with-intermediate.crt
===================
""")
)
commands = {
'gen-account-key': {
'help': 'Generates an account key.',
'requires': ["account_key"],
'optional': ["key_length", "algorithm"],
'command': _gen_account_key,
},
'gen-key': {
'help': 'Generates a certificate key.',
'requires': ["key"],
'optional': ["key_length", "algorithm"],
'command': _gen_cert_key,
},
'gen-csr': {
'help': 'Generates a certificate signing request (CSR). Under *nix, use /dev/stdin after --key to provide key via stdin.',
'requires': ["domains", "key", "csr"],
'optional': ["must_staple"],
'command': _gen_csr,
},
'print-csr': {
'help': 'Prints the given certificate signing request (CSR) in human-readable form.',
'requires': ["csr"],
'optional': [],
'command': _print_csr,
},
'get-root': {
'help': 'Retrieves the root certificate from the CA server and prints it to stdout (if --cert is not specified).',
'requires': [],
'optional': ["root_url", "cert"],
'command': _get_root,
},
'get-intermediate': {
'help': 'Retrieves the intermediate certificate from the CA server and prints it to stdout (if --cert is not specified).',
'requires': [],
'optional': ["intermediate_url", "cert"],
'command': _get_intermediate,
},
'get-certificate': {
'help': 'Given a CSR and an account key, retrieves a certificate and prints it to stdout (if --cert is not specified).',
'requires': ["account_key", "csr", "acme_dir"],
'optional': ["CA", "cert", "email"],
'command': _get_certificate,
},
'get-certificate-part-1': {
'help': 'Given a CSR and an account key, prepares retrieving a certificate. The generated challenge files must be manually uploaded to their respective positions.',
'requires': ["account_key", "csr", "acme_dir", "statefile"],
'optional': ["CA", "email"],
'command': _get_certificate_part1,
},
'get-certificate-part-2': {
'help': 'Assuming that get-certificate-part-1 ran through and the challenges were uploaded, retrieves a certificate and prints it to stdout (if --cert is not specified).',
'requires': ["csr", "statefile"],
'optional': ["cert"],
'command': _get_certificate_part2,
},
}
parser.add_argument("command", type=str, nargs='?', help="must be one of {0}".format(', '.join('"{0}"'.format(command) for command in sorted(commands.keys()))))
parser.add_argument("--account-key", required=False, help="path to your Let's Encrypt account private key")
parser.add_argument("--algorithm", required=False, default="rsa", help="the algorithm to use (rsa, ...)") # FIXME
parser.add_argument("--key-length", type=int, default=4096, required=False, help="key length for private keys")
parser.add_argument("--key", required=False, help="path to your certificate's private key")
parser.add_argument("--csr", required=False, help="path to your certificate signing request")
parser.add_argument("--acme-dir", required=False, help="path to the .well-known/acme-challenge/ directory")
parser.add_argument("--CA", required=False, default=None, help="CA to use (default: {0})".format(acme_lib.default_ca))
parser.add_argument("--use-staging-CA", required=False, default=False, action='store_true', help="Use Let's Encrypt staging CA")
parser.add_argument("--statefile", required=False, default=None, help="state file for two-part run")
parser.add_argument("-d", "--domains", required=False, default=None, help="a comma-separated list of domain names")
parser.add_argument("--cert", required=False, help="file name to store certificate into (otherwise it is printed on stdout)")
parser.add_argument("--email", required=False, help="email address (will be associated with account)")
parser.add_argument("--intermediate-url", required=False, default=acme_lib.default_intermediate_url, help="URL for the intermediate certificate (default: {0})".format(acme_lib.default_intermediate_url))
parser.add_argument("--root-url", required=False, default=acme_lib.default_root_url, help="URL for the root certificate (default: {0})".format(acme_lib.default_root_url))
parser.add_argument("--must-staple", required=False, default=False, action='store_true', help="request must staple extension for certificate")
args = parser.parse_args()
if args.command is None:
sys.stderr.write("Command must be one of {1}. More information on the available commands:\n\n".format(args.command, ', '.join('"{0}"'.format(command) for command in sorted(commands.keys()))))
for command in sorted(commands.keys()):
cmd = commands[command]
sys.stderr.write(' {0}:\n'.format(command))
sys.stderr.write('{0}\n'.format(textwrap.indent(cmd['help'], prefix=' ')))
if cmd['requires']:
sys.stderr.write(' Mandatory options: {0}\n'.format(', '.join(['--{0}'.format(opt.replace('_', '-')) for opt in cmd['requires']])))
if cmd['optional']:
sys.stderr.write(' Optional options: {0}\n'.format(', '.join(['--{0}'.format(opt.replace('_', '-')) for opt in cmd['optional']])))
sys.exit(-1)
elif args.command not in commands:
sys.stderr.write("Unknown command '{0}'! Command must be one of {1}.\n".format(args.command, ', '.join('"{0}"'.format(command) for command in sorted(commands.keys()))))
sys.exit(-1)
else:
cmd = commands[args.command]
accepted = set()
values = {}
if args.__dict__['use_staging_CA']:
if args.__dict__['CA'] is not None:
sys.stderr.write("Cannot specify both '--use-staging-CA' and provide '--CA'!\n")
sys.exit(-1)
args.__dict__['CA'] = acme_lib.staging_ca
for req in cmd['requires']:
accepted.add(req)
if args.__dict__[req] is None:
sys.stderr.write("Command '{0}' requires that option '{1}' is set!\n".format(args.command, req))
sys.exit(-1)
values[req] = args.__dict__[req]
for opt in cmd['optional']:
accepted.add(opt)
values[opt] = args.__dict__[opt]
for opt in args.__dict__:
if opt == 'command':
continue
if args.__dict__[opt] is not parser.get_default(opt):
if opt not in accepted:
sys.stderr.write("Warning: option '{0}' is ignored for this command.\n".format(opt))
if 'CA' in values and values['CA'] is None:
values['CA'] = acme_lib.default_ca
cmd['command'](**values)
except Exception as e:
sys.stderr.write("Error occured: {0}\n".format(str(e)))
sys.exit(-2)
| 53.823308
| 258
| 0.605155
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6,442
| 0.449955
|
f040ef34e6f7f28b762c5ac7fa85d111d72daca8
| 1,530
|
py
|
Python
|
cdk/consoleme_ecs_service/nested_stacks/vpc_stack.py
|
avishayil/consoleme-ecs-service
|
357f290c23fb74c6752961a4a4582e4cbab54e0a
|
[
"MIT"
] | 2
|
2021-06-19T04:28:43.000Z
|
2021-06-19T06:12:25.000Z
|
cdk/consoleme_ecs_service/nested_stacks/vpc_stack.py
|
avishayil/consoleme-ecs-service
|
357f290c23fb74c6752961a4a4582e4cbab54e0a
|
[
"MIT"
] | 10
|
2021-06-19T08:12:41.000Z
|
2021-06-20T22:00:34.000Z
|
cdk/consoleme_ecs_service/nested_stacks/vpc_stack.py
|
avishayil/consoleme-ecs-service
|
357f290c23fb74c6752961a4a4582e4cbab54e0a
|
[
"MIT"
] | null | null | null |
"""
VPC stack for running ConsoleMe on ECS
"""
import urllib.request
from aws_cdk import (
aws_ec2 as ec2,
core as cdk
)
class VPCStack(cdk.NestedStack):
"""
VPC stack for running ConsoleMe on ECS
"""
def __init__(self, scope: cdk.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# VPC and security groups
vpc = ec2.Vpc(
self, 'Vpc',
max_azs=2
)
consoleme_sg = ec2.SecurityGroup(
self,
'LBSG',
vpc=vpc,
description='Consoleme ECS service load balancer security group',
allow_all_outbound=True
)
# Open ingress to the deploying computer public IP
my_ip_cidr = urllib.request.urlopen(
'http://checkip.amazonaws.com').read().decode('utf-8').strip() + '/32'
consoleme_sg.add_ingress_rule(
peer=ec2.Peer.ipv4(cidr_ip=my_ip_cidr),
connection=ec2.Port.tcp(port=443),
description='Allow HTTPS traffic'
)
redis_sg = ec2.SecurityGroup(
self,
'ECSG',
vpc=vpc,
description='Consoleme Redis security group',
allow_all_outbound=True
)
redis_sg.connections.allow_from(consoleme_sg, port_range=ec2.Port.tcp(
port=6379), description='Allow ingress from ConsoleMe containers')
self.vpc = vpc
self.redis_sg = redis_sg
self.consoleme_sg = consoleme_sg
| 25.081967
| 82
| 0.578431
| 1,396
| 0.912418
| 0
| 0
| 0
| 0
| 0
| 0
| 380
| 0.248366
|
f0425b1ddda33471bcd698350aad4a8f84b9b335
| 1,837
|
py
|
Python
|
mnt/us/kapps/apps/gallery/gallery.py
|
PhilippMundhenk/kapps
|
eed07669d8554393bfbd40acd8d255475e90b88e
|
[
"MIT"
] | 1
|
2021-11-19T08:40:44.000Z
|
2021-11-19T08:40:44.000Z
|
mnt/us/kapps/apps/gallery/gallery.py
|
PhilippMundhenk/kapps
|
eed07669d8554393bfbd40acd8d255475e90b88e
|
[
"MIT"
] | null | null | null |
mnt/us/kapps/apps/gallery/gallery.py
|
PhilippMundhenk/kapps
|
eed07669d8554393bfbd40acd8d255475e90b88e
|
[
"MIT"
] | null | null | null |
from core.kapp import Kapp
from core.httpResponse import HTTPResponse
from core.Kcommand import Kcommand
import uuid
import os
class GetImage(Kcommand):
getImageHash = str(uuid.uuid4())
def __init__(self):
super(GetImage, self).__init__(
"GetImage", self.getImageHash)
class ViewImage(Kcommand):
viewImageHash = str(uuid.uuid4())
def __init__(self):
super(ViewImage, self).__init__(
"ViewImage", self.viewImageHash)
class GalleryApp(Kapp):
name = "Gallery"
def getImageCallback(self, kcommand):
with open(kcommand.getParameter("path"), 'r') as file:
return HTTPResponse(content=file.read())
def viewImageCallback(self, kcommand):
cmd = GetImage()
cmd.params = dict(kcommand.params)
return HTTPResponse(content=self.getRes("image.html").replace("$IMAGE$", "<img style=\"width:100%;\" src=" + cmd.toURL() + " />"))
def homeCallback(self, kcommand):
path = "/mnt/us/images/"
files = os.listdir(path)
paths = [os.path.join(path, basename) for basename in files]
text = ""
for p in paths:
text = text + "<tr><td>"
imageURL = ViewImage().setParameter("path", p).toURL()
text = text + "<a href=\"" + \
imageURL + "\">" + p.replace(path, "") + "</a>"
text = text + "</td></tr>"
return HTTPResponse(content=self.getRes("imageList.html").replace("$IMAGES$", text))
def iconCallback(self, kcommand):
return HTTPResponse(content=self.getRes("icon.png"))
def register(appID, appPath, ctx):
print("register " + GalleryApp.name)
app = GalleryApp(appID, appPath, ctx)
app.subscribe(GetImage(), app.getImageCallback)
app.subscribe(ViewImage(), app.viewImageCallback)
return app
| 30.114754
| 138
| 0.619488
| 1,459
| 0.79423
| 0
| 0
| 0
| 0
| 0
| 0
| 217
| 0.118127
|
f042f18d33f05c333a291d256763c607089f137e
| 214
|
py
|
Python
|
answers/easy/single-number.py
|
kigawas/lintcode-python
|
c07177a9969abb3860c6c599fe1e4d8be9dd762e
|
[
"Apache-2.0"
] | 1
|
2017-11-01T15:00:02.000Z
|
2017-11-01T15:00:02.000Z
|
answers/easy/single-number.py
|
kigawas/lintcode-python
|
c07177a9969abb3860c6c599fe1e4d8be9dd762e
|
[
"Apache-2.0"
] | null | null | null |
answers/easy/single-number.py
|
kigawas/lintcode-python
|
c07177a9969abb3860c6c599fe1e4d8be9dd762e
|
[
"Apache-2.0"
] | null | null | null |
class Solution:
"""
@param A : an integer array
@return : a integer
"""
def singleNumber(self, A):
# write your code here
return reduce(lambda x, y: x ^ y, A) if A != [] else 0
| 21.4
| 62
| 0.537383
| 213
| 0.995327
| 0
| 0
| 0
| 0
| 0
| 0
| 89
| 0.415888
|
f043daf48c42d7f929f4d25cb52dddbc3fe2c981
| 2,356
|
py
|
Python
|
adding/adding_task.py
|
tk-rusch/coRNN
|
afd81744d108a2d623761b635b4ba56770d9e05d
|
[
"MIT"
] | 24
|
2020-10-06T22:25:39.000Z
|
2021-11-28T09:33:30.000Z
|
adding/adding_task.py
|
tk-rusch/coRNN
|
afd81744d108a2d623761b635b4ba56770d9e05d
|
[
"MIT"
] | 2
|
2020-12-02T16:44:10.000Z
|
2021-08-20T11:59:49.000Z
|
adding/adding_task.py
|
tk-rusch/coRNN
|
afd81744d108a2d623761b635b4ba56770d9e05d
|
[
"MIT"
] | 5
|
2020-10-20T13:54:59.000Z
|
2021-09-23T06:21:49.000Z
|
from torch import nn, optim
import torch
import model
import torch.nn.utils
import utils
import argparse
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
parser = argparse.ArgumentParser(description='training parameters')
parser.add_argument('--n_hid', type=int, default=128,
help='hidden size of recurrent net')
parser.add_argument('--T', type=int, default=100,
help='length of sequences')
parser.add_argument('--max_steps', type=int, default=60000,
help='max learning steps')
parser.add_argument('--log_interval', type=int, default=100,
help='log interval')
parser.add_argument('--batch', type=int, default=50,
help='batch size')
parser.add_argument('--batch_test', type=int, default=1000,
help='size of test set')
parser.add_argument('--lr', type=float, default=2e-2,
help='learning rate')
parser.add_argument('--dt',type=float, default=6e-2,
help='step size <dt> of the coRNN')
parser.add_argument('--gamma',type=float, default=66,
help='y controle parameter <gamma> of the coRNN')
parser.add_argument('--epsilon',type=float, default = 15,
help='z controle parameter <epsilon> of the coRNN')
args = parser.parse_args()
n_inp = 2
n_out = 1
model = model.coRNN(n_inp, args.n_hid, n_out, args.dt, args.gamma, args.epsilon).to(device)
objective = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=args.lr)
def test():
model.eval()
with torch.no_grad():
data, label = utils.get_batch(args.T, args.batch_test)
label = label.unsqueeze(1)
out = model(data.to(device))
loss = objective(out, label.to(device))
return loss.item()
def train():
test_mse = []
for i in range(args.max_steps):
data, label = utils.get_batch(args.T,args.batch)
label = label.unsqueeze(1)
optimizer.zero_grad()
out = model(data.to(device))
loss = objective(out, label.to(device))
loss.backward()
optimizer.step()
if(i%100==0 and i!=0):
mse_error = test()
print('Test MSE: {:.6f}'.format(mse_error))
test_mse.append(mse_error)
model.train()
if __name__ == '__main__':
train()
| 31.837838
| 91
| 0.617997
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 405
| 0.171902
|
f04422d2ac94f3225baf51c6adc62d54d1588ade
| 4,904
|
py
|
Python
|
notebooks/KJIO/kapi_io.py
|
Eclasik/kinetica-jupyterlab
|
c94b7e2e182e500e1c34ccef3af146a9c0a21bd6
|
[
"Xnet",
"X11",
"CECILL-B"
] | 2
|
2019-11-24T23:49:20.000Z
|
2021-09-19T23:05:01.000Z
|
notebooks/KJIO/kapi_io.py
|
Eclasik/kinetica-jupyterlab
|
c94b7e2e182e500e1c34ccef3af146a9c0a21bd6
|
[
"Xnet",
"X11",
"CECILL-B"
] | null | null | null |
notebooks/KJIO/kapi_io.py
|
Eclasik/kinetica-jupyterlab
|
c94b7e2e182e500e1c34ccef3af146a9c0a21bd6
|
[
"Xnet",
"X11",
"CECILL-B"
] | 3
|
2019-11-24T23:49:39.000Z
|
2021-02-10T17:49:16.000Z
|
# File: kapi_io.py
# Purpose: I/O of between dataframes and Kinetica with native API.
# Author: Chad Juliano
# Date: 07/20/2018
###############################################################################
import numpy as np
import pandas as pd
import gpudb
import sys
KDBC = gpudb.GPUdb(encoding='BINARY', host='127.0.0.1', port='9191')
KAPI_TYPE_MAP = { 'int64' : gpudb.GPUdbRecordColumn._ColumnType.LONG,
'int32' : gpudb.GPUdbRecordColumn._ColumnType.INT,
'int16' : gpudb.GPUdbRecordColumn._ColumnType.INT,
'float64' : gpudb.GPUdbRecordColumn._ColumnType.DOUBLE,
'float32' : gpudb.GPUdbRecordColumn._ColumnType.FLOAT,
'object' : gpudb.GPUdbRecordColumn._ColumnType.STRING }
def get_coldef(_col_name, _np_dtype, _col_props):
"""Convert a Numpy type to Kinetica type."""
if(str(_np_dtype) not in KAPI_TYPE_MAP):
raise Exception('Type not supported: {}'.format(_np_dtype))
_k_type = KAPI_TYPE_MAP[str(_np_dtype)]
_k_properties = []
if(_col_name in _col_props):
_k_properties = _col_props[_col_name]
if(_k_type == gpudb.GPUdbRecordColumn._ColumnType.STRING and len(_k_properties) == 0):
_k_properties.append(gpudb.GPUdbColumnProperty.CHAR16)
return gpudb.GPUdbRecordColumn(_col_name, _k_type, _k_properties)
def save_df(_df, _table_name, _schema, _kdbc=KDBC, _col_props={}, _is_replicated=False):
"""Save a Dataframe to a Kinetica table."""
# Should index be used to create a column?
_use_index = (_df.index.name != None)
# Construct the type to use for creating the table.
_result_type = []
if(_use_index):
_idx_type = get_coldef(_df.index.name, _df.index.dtype, _col_props)
_idx_type.column_properties.append('shard_key')
_result_type.append(_idx_type)
for _idx in range(_df.columns.size):
_col_name = _df.columns[_idx]
_dtype = _df.dtypes[_idx]
_result_type.append(get_coldef(_col_name, _dtype, _col_props))
print('Dropping table: <{}>'.format(_table_name))
_kdbc.clear_table(_table_name, options={ 'no_error_if_not_exists':'true' })
_print_replicated = ''
if(_is_replicated):
_print_replicated = 'replicated '
print('Creating {} table: <{}>'.format(_print_replicated, _table_name))
for _idx, _coldef in enumerate(_result_type):
print('Column {}: <{}> ({}) {}'.format(_idx, _coldef.name, _coldef.column_type, _coldef.column_properties))
#_is_replicated = 'false'
_type_obj = gpudb.GPUdbRecordType(columns=_result_type, label=_table_name)
_result_table = gpudb.GPUdbTable(db=_kdbc, _type=_type_obj, name=_table_name,
options={'collection_name': _schema,
'is_replicated': _is_replicated} )
# Convert to records so we can preserve the column dtypes
_insert_records = _df.to_records(index=_use_index)
# Call item() so the types are converted to python native types
_insert_rows = [ list(x.item()) for x in _insert_records ]
if(len(_insert_rows) > 0):
_result_table.insert_records(_insert_rows)
print('Inserted rows into <{}.{}>: {}'.format(_schema, _table_name, len(_insert_rows)))
def load_df(_input_table, _kdbc=KDBC):
"""Load a dataframe from a Kinetica table."""
_table = gpudb.GPUdbTable(_type=None, name=_input_table , db=_kdbc)
_type = _table.get_table_type()
_columns = [_col.name for _col in _type.columns]
#print('Getting records from <{}>'.format(_input_table), end='', flush=True)
sys.stdout.write('Getting {} records from <{}>'.format(_table.count, _input_table))
BATCH_SIZE=10000
_offset = 0
_table_df = pd.DataFrame()
while True:
_response = _kdbc.get_records(table_name=_input_table,
offset=_offset, limit=BATCH_SIZE)
check_response(_response)
_res_decoded = gpudb.GPUdbRecord.decode_binary_data(
_response['type_schema'],
_response['records_binary'])
# print something to show we are working
#print('.', end='', flush=True)
sys.stdout.write('.')
_offset += len(_res_decoded)
_table_df = _table_df.append(_res_decoded)
if _response['has_more_records'] == False:
break;
# reorder dataframe columns
_table_df = _table_df[_columns]
print('')
print('Records Retrieved: {}'.format(_table_df.shape))
return _table_df
def check_response(_response):
_status = _response['status_info']['status']
if(_status != 'OK'):
_message = _response['status_info']['message']
raise Exception('[%s]: %s' % (_status, _message))
return _response
| 35.79562
| 115
| 0.636419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,197
| 0.244086
|
f0466389a763d8464eb0947ea583db3f0c84a014
| 2,581
|
py
|
Python
|
batch/batch/driver/k8s_cache.py
|
MariusDanner/hail
|
5ca0305f8243b5888931b1afaa1fbfb617dee097
|
[
"MIT"
] | 2
|
2020-12-15T21:20:24.000Z
|
2020-12-21T19:46:26.000Z
|
batch/batch/driver/k8s_cache.py
|
MariusDanner/hail
|
5ca0305f8243b5888931b1afaa1fbfb617dee097
|
[
"MIT"
] | 3
|
2017-06-16T18:10:45.000Z
|
2017-07-21T17:44:13.000Z
|
batch/batch/driver/k8s_cache.py
|
MariusDanner/hail
|
5ca0305f8243b5888931b1afaa1fbfb617dee097
|
[
"MIT"
] | 2
|
2020-07-28T18:55:19.000Z
|
2020-10-19T16:43:03.000Z
|
import time
import asyncio
import sortedcontainers
from hailtop.utils import retry_transient_errors
class K8sCache:
def __init__(self, client, refresh_time, max_size=100):
self.client = client
self.refresh_time = refresh_time
self.max_size = max_size
self.secrets = {}
self.secret_ids = sortedcontainers.SortedSet(
key=lambda id: self.secrets[id][1])
self.secret_locks = {}
self.service_accounts = {}
self.service_account_ids = sortedcontainers.SortedSet(
key=lambda id: self.service_accounts[id][1])
self.service_account_locks = {}
async def read_secret(self, name, namespace, timeout):
id = (name, namespace)
lock = self.secret_locks.get(id)
if lock is None:
lock = asyncio.Lock()
self.secret_locks[id] = lock
async with lock:
secret, time_updated = self.secrets.get(id, (None, None))
if time_updated and time.time() < time_updated + self.refresh_time:
return secret
if len(self.secrets) == self.max_size:
head_id = self.secret_ids.pop(0)
del self.secrets[head_id]
secret = await retry_transient_errors(
self.client.read_namespaced_secret,
name,
namespace,
_request_timeout=timeout)
self.secrets[id] = (secret, time.time())
self.secret_ids.add(id)
del self.secret_locks[id]
return secret
async def read_service_account(self, name, namespace, timeout):
id = (name, namespace)
lock = self.service_account_locks.get(id)
if lock is None:
lock = asyncio.Lock()
self.service_account_locks[id] = lock
async with lock:
sa, time_updated = self.service_accounts.get(id, (None, None))
if time_updated and time.time() < time_updated + self.refresh_time:
return sa
if len(self.service_accounts) == self.max_size:
head_id = self.service_account_ids.pop(0)
del self.service_accounts[head_id]
sa = await retry_transient_errors(
self.client.read_namespaced_service_account,
name,
namespace,
_request_timeout=timeout)
self.service_accounts[id] = (sa, time.time())
self.service_account_ids.add(id)
del self.service_account_locks[id]
return sa
| 31.864198
| 79
| 0.588532
| 2,477
| 0.959706
| 0
| 0
| 0
| 0
| 1,931
| 0.74816
| 0
| 0
|
f046760db9f9c57e0de347811b277f149a454916
| 49
|
py
|
Python
|
pluploader/upm/exceptions.py
|
craftamap/pluploader
|
c44e683282abb6fba8ced156aa807a66736a4ca1
|
[
"Apache-2.0"
] | 12
|
2020-04-09T12:50:23.000Z
|
2020-10-30T14:43:40.000Z
|
pluploader/upm/exceptions.py
|
livelyapps/pluploader
|
39f2f50ba9625c038cdb1f5a7ecf2ad64da5577c
|
[
"Apache-2.0"
] | 40
|
2020-04-12T15:25:46.000Z
|
2021-06-04T19:47:44.000Z
|
pluploader/upm/exceptions.py
|
craftamap/pluploader
|
c44e683282abb6fba8ced156aa807a66736a4ca1
|
[
"Apache-2.0"
] | 2
|
2020-09-16T14:07:49.000Z
|
2020-10-30T14:45:07.000Z
|
class UploadFailedException(Exception):
pass
| 16.333333
| 39
| 0.795918
| 48
| 0.979592
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
f0468dc014eafb01e69ffc6248b5e5de49dc570a
| 3,376
|
py
|
Python
|
tools/nntool/interpreter/commands/imageformat.py
|
gemenerik/gap_sdk
|
afae64d239db6d73f79c90c2ca2c832b6361f109
|
[
"Apache-2.0"
] | null | null | null |
tools/nntool/interpreter/commands/imageformat.py
|
gemenerik/gap_sdk
|
afae64d239db6d73f79c90c2ca2c832b6361f109
|
[
"Apache-2.0"
] | null | null | null |
tools/nntool/interpreter/commands/imageformat.py
|
gemenerik/gap_sdk
|
afae64d239db6d73f79c90c2ca2c832b6361f109
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import argparse
from copy import deepcopy
from cmd2 import Cmd2ArgumentParser, with_argparser
from interpreter.nntool_shell_base import NNToolShellBase
from quantization.qtype import QType
from utils.node_id import NodeId
from graph.types import ImageFormatParameters, NNEdge, TransposeParameters
from graph.manipulations.formatter import insert_formatter, remove_formatter
class ImageFormatCommand(NNToolShellBase):
def inputs_choices(self):
if self.G is None:
return []
return [node.name for node in self.G.inputs()]
def format_choices(self):
return [fmt.lower() for fmt in ImageFormatParameters.FORMAT_CHANGES] + ['none']
def norm_choices(self):
return [fmt.lower() for fmt in ImageFormatParameters.NORMALIZATIONS] + ['none']
# IMAGEFORMAT COMMAND
parser_imageformat = Cmd2ArgumentParser(
"inserts image format node into graphs")
parser_imageformat.add_argument('input_node',
choices_method=inputs_choices,
help='input node name to format')
parser_imageformat.add_argument('image_formatter',
choices_method=format_choices,
help='input node name to format')
parser_imageformat.add_argument('image_normalizer',
choices_method=norm_choices,
help='input node name to format')
@with_argparser(parser_imageformat)
def do_imageformat(self, args: argparse.Namespace):
""" Add or modify image format options."""
self._check_graph()
if args.input_node not in self.G:
self.perror("input node not found")
return
input_node = self.G[args.input_node]
out_edges = self.G.out_edges(input_node.name)
if len(out_edges) == 1 and isinstance(out_edges[0].to_node, ImageFormatParameters):
remove_formatter(self.G, out_edges[0].to_node)
self.G.add_dimensions()
self.pfeedback(f'removed image formatter {out_edges[0].to_node.name}')
return
if args.image_formatter == "none" and args.image_normalizer == "none":
self.pfeedback("no formatting set")
self.G.add_dimensions()
return
insert_formatter(self.G, input_node,
args.image_formatter, args.image_normalizer)
self.G.add_dimensions()
self.pfeedback(f'inserted image formatter after node {input_node.name} with'
f'format {args.image_formatter} and normalization {args.image_normalizer}')
| 43.844156
| 98
| 0.672393
| 2,289
| 0.678021
| 0
| 0
| 1,180
| 0.349526
| 0
| 0
| 1,176
| 0.348341
|
f0476c6057fe6e189aeed8a5c7b88b67234d582d
| 78
|
py
|
Python
|
svae/__init__.py
|
APodolskiy/SentenceVAE
|
afe82504922de700810b24638f7df0dbf1d8fa11
|
[
"MIT"
] | null | null | null |
svae/__init__.py
|
APodolskiy/SentenceVAE
|
afe82504922de700810b24638f7df0dbf1d8fa11
|
[
"MIT"
] | null | null | null |
svae/__init__.py
|
APodolskiy/SentenceVAE
|
afe82504922de700810b24638f7df0dbf1d8fa11
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
RNN_TYPES = {
'lstm': nn.LSTM,
'gru': nn.GRU
}
| 11.142857
| 21
| 0.564103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 0.141026
|
f04859b27ee91e595f5a5127a619b6f6d8f15b47
| 5,391
|
py
|
Python
|
extract_embeddings.py
|
Artem531/opencv-face-recognition-with-YOLOv3
|
53a93711a079ea3739cab068aeaf5c684f6e53c4
|
[
"MIT"
] | null | null | null |
extract_embeddings.py
|
Artem531/opencv-face-recognition-with-YOLOv3
|
53a93711a079ea3739cab068aeaf5c684f6e53c4
|
[
"MIT"
] | null | null | null |
extract_embeddings.py
|
Artem531/opencv-face-recognition-with-YOLOv3
|
53a93711a079ea3739cab068aeaf5c684f6e53c4
|
[
"MIT"
] | null | null | null |
# USAGE
# python extract_embeddings.py --dataset dataset --embeddings output/embeddings.pickle \
# --detector face_detection_model --embedding-model openface_nn4.small2.v1.t7
# import the necessary packages
from imutils.face_utils import FaceAligner
from imutils import paths
import numpy as np
import argparse
import imutils
import pickle
import cv2
import os
import dlib
from PIL import Image
from yolo import YOLO, detect_video
from yolo3.utils import letterbox_image
from keras import backend as K
def detect_image(self, image):
if self.model_image_size != (None, None):
assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
else:
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
boxed_image = letterbox_image(image, new_image_size)
image_data = np.array(boxed_image, dtype='float32')
#print(image_data.shape)
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model.input: image_data,
self.input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
print('Found {} boxes for {}'.format(len(out_boxes), 'img'))
return out_boxes, out_scores, out_classes
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--dataset", required=True,
help="path to input directory of faces + images")
ap.add_argument("-e", "--embeddings", required=True,
help="path to output serialized db of facial embeddings")
ap.add_argument("-m", "--embedding-model", required=True,
help="path to OpenCV's deep learning face embedding model")
ap.add_argument("-p", "--shape-predictor", required=True,
help="path to facial landmark predictor")
args = vars(ap.parse_args())
# load our serialized face detector from disk
print("[INFO] loading face detector...")
predictor = dlib.shape_predictor(args["shape_predictor"])
#detector = dlib.get_frontal_face_detector()
detector = YOLO()
# load our serialized face embedding model from disk
print("[INFO] loading face recognizer...")
embedder = cv2.dnn.readNetFromTorch(args["embedding_model"])
# grab the paths to the input images in our dataset
print("[INFO] quantifying faces...")
imagePaths = list(paths.list_images(args["dataset"]))
# initialize our lists of extracted facial embeddings and
# corresponding people names
knownEmbeddings = []
knownNames = []
# initialize the total number of faces processed
total = 0
# loop over the image paths
for (i, imagePath) in enumerate(imagePaths):
# extract the person name from the image path
print("[INFO] processing image {}/{}".format(i + 1,
len(imagePaths)))
name = imagePath.split(os.path.sep)[-2]
# load the image, resize it to have a width of 800 pixels (while
# maintaining the aspect ratio), and then grab the image
# dimensions
image = cv2.imread(imagePath)
image = imutils.resize(image, width=800)
#try to rise resolution
#gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#blurred = cv2.GaussianBlur(gray, (5, 5), 0)
#image = blurred
#clahe = cv2.createCLAHE(clipLimit=4.0, tileGridSize=(8,8))
#image = clahe.apply(image)
#image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
(h, w) = image.shape[:2]
# we're making the assumption that each image has only ONE
# face, so find the bounding box with the largest probability
#align_faces
fa = FaceAligner(predictor, desiredFaceWidth=256)
#gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#rects = detector(gray, 2)
rects = []
out_boxes, out_scores, out_classes = detect_image(detector, Image.fromarray(image))
for i, c in reversed(list(enumerate(out_classes))):
(x, y, x1, y1) = out_boxes[i]
w = abs(x - x1)
h = abs(y - y1)
startX = int(min(x1, x))
endX = startX + w
startY = int(min(y1, y))
endY = startY + h
left, right, bottom, top = startX, endX, endY, startY
rect = dlib.rectangle(int(top), int(left), int(bottom) , int(right))
rects.append(rect)
for rect in rects:
faceAligned = fa.align(image, gray, rect)
print(faceAligned)
cv2.imshow("Aligned", np.asarray(faceAligned))
cv2.waitKey(0)
face = faceAligned
(fH, fW) = face.shape[:2]
# ensure the face width and height are sufficiently large
if fW < 20 or fH < 20:
continue
# construct a blob for the face ROI, then pass the blob
# through our face embedding model to obtain the 128-d
# quantification of the face
faceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255,
(96, 96), (0, 0, 0), swapRB=True, crop=False)
embedder.setInput(faceBlob)
vec = embedder.forward()
# add the name of the person + corresponding face
# embedding to their respective lists
knownNames.append(name)
knownEmbeddings.append(vec.flatten())
total += 1
# dump the facial embeddings + names to disk
print("[INFO] serializing {} encodings...".format(total))
data = {"embeddings": knownEmbeddings, "names": knownNames}
f = open(args["embeddings"], "wb")
f.write(pickle.dumps(data))
f.close()
| 32.475904
| 88
| 0.708959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,227
| 0.413096
|
f04885c174b83de4f873553053e8d7a0c7d4a2dc
| 5,628
|
py
|
Python
|
alf/algorithms/diayn_algorithm.py
|
runjerry/alf
|
7e83a29a3102ff04a6ce2c3105ae36f28b090e65
|
[
"Apache-2.0"
] | 1
|
2021-03-22T10:53:55.000Z
|
2021-03-22T10:53:55.000Z
|
alf/algorithms/diayn_algorithm.py
|
Haichao-Zhang/alf
|
38a3621337a030f74bb3944d7695e7642e777e10
|
[
"Apache-2.0"
] | null | null | null |
alf/algorithms/diayn_algorithm.py
|
Haichao-Zhang/alf
|
38a3621337a030f74bb3944d7695e7642e777e10
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import gin.tf
import tensorflow as tf
from tf_agents.networks.network import Network
import tf_agents.specs.tensor_spec as tensor_spec
from alf.algorithms.algorithm import Algorithm, AlgorithmStep, LossInfo
from alf.utils.normalizers import ScalarAdaptiveNormalizer
from alf.utils.encoding_network import EncodingNetwork
from alf.data_structures import StepType, ActionTimeStep
DIAYNInfo = namedtuple("DIAYNInfo", ["reward", "loss"])
@gin.configurable
class DIAYNAlgorithm(Algorithm):
"""Diversity is All You Need Module
This module learns a set of skill-conditional policies in an unsupervised
way. See Eysenbach et al "Diversity is All You Need: Learning Diverse Skills
without a Reward Function" for more details.
"""
def __init__(self,
num_of_skills,
feature_spec,
hidden_size=256,
reward_adapt_speed=8.0,
encoding_net: Network = None,
discriminator_net: Network = None,
name="DIAYNAlgorithm"):
"""Create a DIAYNAlgorithm.
Args:
num_of_skills (int): number of skills
hidden_size (int|tuple): size of hidden layer(s).
If discriminator_net is None, a default discriminator_net
with this hidden_size will be used.
reward_adapt_speed (float): how fast to adapt the reward normalizer.
rouphly speaking, the statistics for the normalization is
calculated mostly based on the most recent T/speed samples,
where T is the total number of samples.
encoding_net (Network): network for encoding observation into a
latent feature specified by feature_spec. Its input is the same
as the input of this algorithm.
discriminator_net (Network): network for predicting the skill labels
based on the observation.
"""
skill_spec = tf.TensorSpec((num_of_skills, ))
super().__init__(train_state_spec=skill_spec, name=name)
flat_feature_spec = tf.nest.flatten(feature_spec)
assert len(flat_feature_spec
) == 1, "DIAYNAlgorithm doesn't support nested feature_spec"
self._num_skills = num_of_skills
self._encoding_net = encoding_net
if isinstance(hidden_size, int):
hidden_size = (hidden_size, )
if discriminator_net is None:
discriminator_net = EncodingNetwork(
name="discriminator_net",
input_tensor_spec=feature_spec,
fc_layer_params=hidden_size,
last_layer_size=self._num_skills,
last_kernel_initializer=tf.initializers.Zeros())
self._discriminator_net = discriminator_net
self._reward_normalizer = ScalarAdaptiveNormalizer(
speed=reward_adapt_speed)
def train_step(self,
time_step: ActionTimeStep,
state,
calc_intrinsic_reward=True):
"""
Args:
time_step (ActionTimeStep): input time_step data, where the
observation is skill-augmened observation
state (Tensor): state for DIAYN (previous skill)
calc_intrinsic_reward (bool): if False, only return the losses
Returns:
TrainStep:
outputs: empty tuple ()
state: skill
info (DIAYNInfo):
"""
observations_aug = time_step.observation
step_type = time_step.step_type
observation, skill = observations_aug
prev_skill = state
if self._encoding_net is not None:
feature, _ = self._encoding_net(observation)
skill_pred, _ = self._discriminator_net(inputs=feature)
skill_discriminate_loss = tf.nn.softmax_cross_entropy_with_logits(
labels=prev_skill, logits=skill_pred)
valid_masks = tf.cast(
tf.not_equal(step_type, StepType.FIRST), tf.float32)
skill_discriminate_loss = skill_discriminate_loss * valid_masks
intrinsic_reward = ()
if calc_intrinsic_reward:
# use negative cross-entropy as reward
# neglect neg-prior term as it is constant
intrinsic_reward = tf.stop_gradient(-skill_discriminate_loss)
intrinsic_reward = self._reward_normalizer.normalize(
intrinsic_reward)
return AlgorithmStep(
outputs=(),
state=skill,
info=DIAYNInfo(
reward=intrinsic_reward,
loss=LossInfo(
loss=skill_discriminate_loss,
extra=dict(
skill_discriminate_loss=skill_discriminate_loss))))
def calc_loss(self, info: DIAYNInfo):
loss = tf.nest.map_structure(tf.reduce_mean, info.loss)
return LossInfo(scalar_loss=loss.loss, extra=loss.extra)
| 38.813793
| 80
| 0.648543
| 4,527
| 0.804371
| 0
| 0
| 4,545
| 0.807569
| 0
| 0
| 2,378
| 0.42253
|
f04907145e0f5329d91ce6e7245421c294f51891
| 16,781
|
py
|
Python
|
src/genie/libs/parser/linux/route.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 204
|
2018-06-27T00:55:27.000Z
|
2022-03-06T21:12:18.000Z
|
src/genie/libs/parser/linux/route.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 468
|
2018-06-19T00:33:18.000Z
|
2022-03-31T23:23:35.000Z
|
src/genie/libs/parser/linux/route.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 309
|
2019-01-16T20:21:07.000Z
|
2022-03-30T12:56:41.000Z
|
"""route.py
Linux parsers for the following commands:
* route
"""
# python
import re
# metaparser
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Schema, Any, Optional
from netaddr import IPAddress, IPNetwork
# =======================================================
# Schema for 'route'
# =======================================================
class RouteSchema(MetaParser):
"""Schema for route"""
# Destination Gateway Genmask Flags Metric Ref Use Iface
# 0.0.0.0 192.168.1.1 0.0.0.0 UG 0 0 0 wlo1
schema = {
'routes': {
Any(): { # 'destination'
'mask': {
Any(): {
'nexthop': {
Any(): { # index: 1, 2, 3, etc
'interface': str,
Optional('flags'): str,
Optional('gateway'): str,
Optional('metric'): int,
Optional('ref'): int,
Optional('use'): int,
Optional('scope'): str,
Optional('proto'): str,
Optional('src'): str,
Optional('broadcast'): bool,
Optional('table'): str,
Optional('local'): bool
}
}
}
}
}
}
}
# =======================================================
# Parser for 'route'
# =======================================================
class Route(RouteSchema):
"""Parser for
* route
* route -4 -n
* route -4n
* route -n4
* route -n -4
"""
cli_command = ['route', 'route {flag}']
def cli(self, flag=None, output=None):
if output is None:
cmd = self.cli_command[0]
if flag in ['-4 -n', '-4n', '-n4']:
command = self.cli_command[1].replace('{flag}', flag)
out = self.device.execute(cmd)
else:
out = output
# Destination Gateway Genmask Flags Metric Ref Use Iface
# 192.168.1.0 0.0.0.0 255.255.255.0 U 600 0 0 wlo1
p1 = re.compile(r'(?P<destination>[a-z0-9\.\:]+)'
' +(?P<gateway>[a-z0-9\.\:_]+)'
' +(?P<mask>[a-z0-9\.\:]+)'
' +(?P<flags>[a-zA-Z]+)'
' +(?P<metric>(\d+))'
' +(?P<ref>(\d+))'
' +(?P<use>(\d+))'
' +(?P<interface>\S+)'
)
# Initializes the Python dictionary variable
parsed_dict = {}
# Defines the "for" loop, to pattern match each line of output
for line in out.splitlines():
line = line.strip()
# 192.168.1.0 0.0.0.0 255.255.255.0 U 600 0 0 wlo1
m = p1.match(line)
if m:
if 'routes' not in parsed_dict:
parsed_dict.setdefault('routes', {})
group = m.groupdict()
destination = group['destination']
mask = group['mask']
index_dict = {}
for str_k in ['interface', 'flags', 'gateway']:
index_dict[str_k] = group[str_k]
for int_k in ['metric', 'ref', 'use']:
index_dict[int_k] = int(group[int_k])
if destination in parsed_dict['routes']:
if mask in parsed_dict['routes'][destination]['mask']:
parsed_dict['routes'][destination]['mask'][mask].\
setdefault('nexthop', {index+1: index_dict})
else:
index = 1
parsed_dict['routes'][destination]['mask'].\
setdefault(mask, {}).\
setdefault('nexthop', {index: index_dict})
else:
index = 1
parsed_dict['routes'].setdefault(destination, {}).\
setdefault('mask', {}).\
setdefault(mask, {}).\
setdefault('nexthop', {index: index_dict})
continue
return parsed_dict
# =======================================================
# Parser for 'netstat -rn'
# =======================================================
class ShowNetworkStatusRoute(Route, RouteSchema):
"""Parser for
* netstat -rn
"""
cli_command = ['netstat -rn']
def cli(self, output=None):
if output is None:
cmd = self.cli_command[0]
out = self.device.execute(cmd)
else:
out = output
return super().cli(output=out)
# =====================================================
# Parser for ip route show table all
# =====================================================
class IpRouteShowTableAll(RouteSchema):
"""
Parser for
* ip route show table all
"""
cli_command = ['ip route show table all']
def cli(self, output=None):
if output is None:
cmd = self.cli_command[0]
out = self.device.execute(cmd)
else:
out = output
# default via 192.168.1.1 dev enp7s0 proto dhcp metric 100
p1 = re.compile(r'default via (?P<gateway>[a-z0-9\.\:]+)'
' dev (?P<device>[a-z0-9\.\-]+)'
' proto (?P<proto>[a-z]+)'
' metric (?P<metric>[\d]+)'
)
# 169.254.0.0/16 dev enp7s0 scope link metric 1000
p2 = re.compile(r'(?P<destination>[a-z0-9\.\:\/]+)'
' dev (?P<device>[a-z0-9\.\-]+)'
' scope (?P<scope>\w+)'
' metric (?P<metric>[\d]+)'
)
# 172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1
p3 = re.compile(r'(?P<destination>[a-z0-9\.\:\/]+)'
' dev (?P<device>[a-z0-9\.\-]+)'
' proto (?P<proto>\w+)'
' scope (?P<scope>\w+)'
' src (?P<src>[a-z0-9\.\:\/]+)'
)
# 172.18.0.0/16 dev br-d19b23fac393 proto kernel scope link src 172.18.0.1 linkdown
p4 = re.compile(r'(?P<destination>[a-z0-9\.\:\/]+)'
' dev (?P<device>[a-z0-9\.\-]+)'
' proto (?P<proto>\w+)'
' scope (?P<scope>\w+)'
' src (?P<src>[a-z0-9\.\:\/]+)'
' linkdown '
)
# 192.168.1.0/24 dev enp7s0 proto kernel scope link src 192.168.1.212 metric 100
p5 = re.compile(r'(?P<destination>[a-z0-9\.\:\/]+)'
' dev (?P<device>[a-z0-9\.\-]+)'
' proto (?P<proto>\w+)'
' scope (?P<scope>\w+)'
' src (?P<src>[a-z0-9\.\:\/]+)'
' metric (?P<metric>[\d]+)'
)
# broadcast 127.0.0.0 dev lo table local proto kernel scope link src 127.0.0.1
p6 = re.compile(r'broadcast (?P<destination>[a-z0-9\.\:\/]+)'
' dev (?P<device>[a-z0-9\.\-]+)'
' table (?P<table>\w+)'
' proto (?P<proto>\w+)'
' scope (?P<scope>\w+)'
' src (?P<src>[a-z0-9\.\:\/]+)'
)
# local 10.233.44.70 dev kube-ipvs0 table local proto kernel scope host src 10.233.44.70
p7 = re.compile(r'local (?P<destination>[a-z0-9\.\:\/]+)'
' dev (?P<device>[a-z0-9\.\-]+)'
' table (?P<table>\w+)'
' proto (?P<proto>\w+)'
' scope (?P<scope>\w+)'
' src (?P<src>[a-z0-9\.\:\/]+)'
)
# Initializes the Python dictionary variable
parsed_dict = {}
# Defines the "for" loop, to pattern match each line of output
for line in out.splitlines():
line = line.strip()
# default via 192.168.1.1 dev enp7s0 proto dhcp metric 100
m = p1.match(line)
if m:
if 'routes' not in parsed_dict:
parsed_dict.setdefault('routes', {})
group = m.groupdict()
gateway = group['gateway']
interface = group['device']
metric = int(group['metric'])
if gateway:
parsed_dict['routes'] = { '0.0.0.0': {
'mask': {
'0.0.0.0': {
'nexthop': {
1:{
'gateway': gateway,
'interface': interface,
'metric': metric
}
}
}
}
}
}
# 169.254.0.0/16 dev enp7s0 scope link metric 1000
m = p2.match(line)
if m:
group = m.groupdict()
destination = IPNetwork(group['destination'])
mask = str(destination.netmask)
destination_addr = str(destination.ip)
interface = group['device']
metric = int(group['metric'])
scope = group['scope']
index_dict = {'interface' : interface,
'scope' : scope,
'metric': metric
}
index = 1
parsed_dict['routes'].setdefault(destination_addr, {}).\
setdefault('mask', {}).\
setdefault(mask, {}).\
setdefault('nexthop', {index: index_dict})
# 172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1
m = p3.match(line)
if m:
group = m.groupdict()
destination = IPNetwork(group['destination'])
mask = str(destination.netmask)
destination_addr = str(destination.ip)
interface = group['device']
scope = group['scope']
proto = group['proto']
src = group['src']
index_dict = {'interface' : interface,
'scope' : scope,
'proto' : proto ,
'src' : src
}
index = 1
parsed_dict['routes'].setdefault(destination_addr, {}).\
setdefault('mask', {}).\
setdefault(mask, {}).\
setdefault('nexthop', {index: index_dict})
# 172.18.0.0/16 dev br-d19b23fac393 proto kernel scope link src 172.18.0.1 linkdown
m = p4.match(line)
if m:
group = m.groupdict()
destination = IPNetwork(group['destination'])
mask = str(destination.netmask)
destination_addr = str(destination.ip)
interface = group['device']
scope = group['scope']
proto = group['proto']
src = group['src']
index_dict = {'interface' : interface,
'scope' : scope,
'proto' : proto ,
'src' : src
}
index = 1
parsed_dict['routes'].setdefault(destination_addr, {}).\
setdefault('mask', {}).\
setdefault(mask, {}).\
setdefault('nexthop', {index: index_dict})
# 192.168.1.0/24 dev enp7s0 proto kernel scope link src 192.168.1.212 metric 100
m = p5.match(line)
if m:
group = m.groupdict()
destination = IPNetwork(group['destination'])
mask = str(destination.netmask)
destination_addr = str(destination.ip)
interface = group['device']
scope = group['scope']
proto = group['proto']
metric = group['metric']
src = group['src']
index_dict = {'interface' : interface,
'scope' : scope,
'proto' : proto ,
'src' : src,
'metric': metric
}
index = 1
parsed_dict['routes'].setdefault(destination_addr, {}).\
setdefault('mask', {}).\
setdefault(mask, {}).\
setdefault('nexthop', {index: index_dict})
# broadcast 127.0.0.0 dev lo table local proto kernel scope link src 127.0.0.1
m = p6.match(line)
if m:
group = m.groupdict()
destination = IPNetwork(group['destination'])
mask = str(destination.netmask)
destination_addr = str(destination.ip)
interface = group['device']
scope = group['scope']
proto = group['proto']
src = group['src']
table = group['table']
index_dict = {'interface' : interface,
'scope' : scope,
'proto' : proto ,
'src' : src,
'broadcast': True,
'table': table
}
index = 1
parsed_dict['routes'].setdefault(destination_addr, {}).\
setdefault('mask', {}).\
setdefault(mask, {}).\
setdefault('nexthop', {index: index_dict})
# local 10.233.44.70 dev kube-ipvs0 table local proto kernel scope host src 10.233.44.70
m = p7.match(line)
if m:
group = m.groupdict()
destination = IPNetwork(group['destination'])
mask = str(destination.netmask)
destination_addr = str(destination.ip)
interface = group['device']
scope = group['scope']
proto = group['proto']
src = group['src']
table = group['table']
index_dict = {'interface' : interface,
'scope' : scope,
'proto' : proto ,
'src' : src,
'local': True,
'table': table
}
index = 1
parsed_dict['routes'].setdefault(destination_addr, {}).\
setdefault('mask', {}).\
setdefault(mask, {}).\
setdefault('nexthop', {index: index_dict})
return parsed_dict
| 38.052154
| 100
| 0.362255
| 15,950
| 0.95048
| 0
| 0
| 0
| 0
| 0
| 0
| 4,904
| 0.292235
|
f04ab36ef3e94f8716214625f760733bb0b62c82
| 1,437
|
py
|
Python
|
chapter-7/chassis/demo.py
|
wallacei/microservices-in-action-copy
|
f9840464a1f9ec40622989e9e5377742246244f3
|
[
"MIT"
] | 115
|
2017-11-06T08:12:07.000Z
|
2022-02-25T09:56:59.000Z
|
chapter-7/chassis/demo.py
|
wallacei/microservices-in-action-copy
|
f9840464a1f9ec40622989e9e5377742246244f3
|
[
"MIT"
] | 12
|
2017-08-05T14:51:35.000Z
|
2020-12-01T11:05:14.000Z
|
chapter-7/chassis/demo.py
|
wallacei/microservices-in-action-copy
|
f9840464a1f9ec40622989e9e5377742246244f3
|
[
"MIT"
] | 82
|
2017-08-05T09:41:12.000Z
|
2022-02-18T00:57:39.000Z
|
import json
import datetime
import requests
from nameko.web.handlers import http
from nameko.timer import timer
from statsd import StatsClient
from circuitbreaker import circuit
class DemoChassisService:
name = "demo_chassis_service"
statsd = StatsClient('localhost', 8125, prefix='simplebank-demo')
@http('GET', '/health')
@statsd.timer('health')
def health(self, _request):
return json.dumps({'ok': datetime.datetime.utcnow().__str__()})
@http('GET', '/external')
@circuit(failure_threshold=5, expected_exception=ConnectionError)
@statsd.timer('external')
def external_request(self, _request):
response = requests.get('https://jsonplaceholder.typicode.com/posts/1')
return json.dumps({'code': response.status_code, 'body': response.text})
@http('GET', '/error')
@circuit(failure_threshold=5, expected_exception=ZeroDivisionError)
@statsd.timer('http_error')
def error_http_request(self):
return json.dumps({1 / 0})
class HealthCheckService:
name = "health_check_service"
statsd = StatsClient('localhost', 8125, prefix='simplebank-demo')
@timer(interval=10)
@statsd.timer('check_demo_service')
def check_demo_service(self):
response = requests.get('http://0.0.0.0:8000/health')
print("DemoChassisService HEALTH CHECK: status_code {}, response: {}".format(
response.status_code, response.text))
| 31.933333
| 85
| 0.701461
| 1,253
| 0.871955
| 0
| 0
| 969
| 0.674322
| 0
| 0
| 346
| 0.240779
|
f04b25d10196843175ed158d8658c6dd85f4722b
| 2,009
|
py
|
Python
|
src/autodoc/python/rst/base/block_quote.py
|
LudditeLabs/autodoc-tool
|
b4ae7e3b61907e7e9c3a1b534fce055e5860ffab
|
[
"Apache-2.0"
] | null | null | null |
src/autodoc/python/rst/base/block_quote.py
|
LudditeLabs/autodoc-tool
|
b4ae7e3b61907e7e9c3a1b534fce055e5860ffab
|
[
"Apache-2.0"
] | null | null | null |
src/autodoc/python/rst/base/block_quote.py
|
LudditeLabs/autodoc-tool
|
b4ae7e3b61907e7e9c3a1b534fce055e5860ffab
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Luddite Labs Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Block Quotes
------------
Line blocks are groups of lines beginning with vertical bar ("|") prefixes.
Each vertical bar prefix indicates a new line, so line breaks are preserved.
Initial indents are also significant, resulting in a nested structure.
Inline markup is supported. Continuation lines are wrapped portions
of long lines; they begin with a space in place of the vertical bar.
The left edge of a continuation line must be indented, but need not be aligned
with the left edge of the text above it. A line block ends with a blank line.
Syntax diagram:
+------------------------------+
| (current level of |
| indentation) |
+------------------------------+
+---------------------------+
| block quote |
| (body elements)+ |
| |
| -- attribution text |
| (optional) |
+---------------------------+
http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html#block-quotes
"""
class BlockQuoteMixin:
def visit_block_quote(self, node):
self.open_block(indent=self.options['indent'], top_margin=1,
bottom_margin=1)
def depart_block_quote(self, node):
self.close_block()
def visit_attribution(self, node):
self.block.add_text(u'-- ')
def depart_attribution(self, node):
pass
| 34.637931
| 79
| 0.621702
| 369
| 0.183673
| 0
| 0
| 0
| 0
| 0
| 0
| 1,636
| 0.814335
|
f04b2efa0372d0580af551921d46f98895a3f1a0
| 11,037
|
py
|
Python
|
userdocker/subcommands/run.py
|
jsteffen/userdocker
|
eb3b6a2421ca392ec4485744244d913e51687040
|
[
"MIT"
] | null | null | null |
userdocker/subcommands/run.py
|
jsteffen/userdocker
|
eb3b6a2421ca392ec4485744244d913e51687040
|
[
"MIT"
] | null | null | null |
userdocker/subcommands/run.py
|
jsteffen/userdocker
|
eb3b6a2421ca392ec4485744244d913e51687040
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import argparse
import logging
import os
import re
from .. import __version__
from ..config import ALLOWED_IMAGE_REGEXPS
from ..config import ALLOWED_PORT_MAPPINGS
from ..config import CAPS_ADD
from ..config import CAPS_DROP
from ..config import ENV_VARS
from ..config import ENV_VARS_EXT
from ..config import NV_ALLOW_OWN_GPU_REUSE
from ..config import NV_ALLOWED_GPUS
from ..config import NV_DEFAULT_GPU_COUNT_RESERVATION
from ..config import NV_MAX_GPU_COUNT_RESERVATION
from ..config import PROBE_USED_MOUNTS
from ..config import RUN_PULL
from ..config import USER_IN_CONTAINER
from ..config import VOLUME_MOUNTS_ALWAYS
from ..config import VOLUME_MOUNTS_AVAILABLE
from ..config import VOLUME_MOUNTS_DEFAULT
from ..config import gid
from ..config import gids
from ..config import uid
from ..config import user_name
from ..helpers.cmd import init_cmd
from ..helpers.exceptions import UserDockerException
from ..helpers.execute import exec_cmd
from ..helpers.execute import exit_exec_cmd
from ..helpers.logger import logger
from ..helpers.nvidia import nvidia_get_available_gpus
from ..helpers.parser import init_subcommand_parser
def parser_run(parser):
sub_parser = init_subcommand_parser(parser, 'run')
sub_parser.add_argument(
"--no-default-mounts",
help="does not automatically add default mounts",
action="store_true",
)
mounts_help = []
if VOLUME_MOUNTS_ALWAYS:
mounts_help += ['Admin enforced: %s.' % ', '.join(VOLUME_MOUNTS_ALWAYS)]
if VOLUME_MOUNTS_DEFAULT:
mounts_help += ['Default: %s.' % ', '.join(VOLUME_MOUNTS_DEFAULT)]
if VOLUME_MOUNTS_AVAILABLE:
mounts_help += ['Available: %s.' % ', '.join(VOLUME_MOUNTS_AVAILABLE)]
if mounts_help:
sub_parser.add_argument(
"-v", "--volume",
help="user specified volume mounts (can be given multiple times). "
"%s" % " ".join(mounts_help),
action="append",
dest="volumes",
default=[],
)
sub_parser.add_argument(
"--entrypoint",
help="Overwrite the default ENTRYPOINT of the image",
)
sub_parser.add_argument(
"-w", "--workdir",
help="Working directory inside the container",
)
if ALLOWED_PORT_MAPPINGS:
sub_parser.add_argument(
"-p", "--publish",
help="Publish a container's ports to the host (see docker help). "
"Allowed: " + ', '.join(ALLOWED_PORT_MAPPINGS),
action="append",
dest="port_mappings",
default=[],
)
sub_parser.add_argument(
"image",
help="the image to run. Allowed: " + ', '.join(ALLOWED_IMAGE_REGEXPS),
)
sub_parser.add_argument(
"image_args",
help="arguments passed to the image",
nargs=argparse.REMAINDER
)
def prepare_nvidia_docker_run(args):
# mainly handles GPU arbitration via ENV var for nvidia-docker
# note that these are ENV vars for the command, not the container
if os.getenv('NV_HOST'):
raise UserDockerException('ERROR: NV_HOST env var not supported yet')
# check if allowed
if not NV_ALLOWED_GPUS:
raise UserDockerException(
"ERROR: No GPUs available due to admin setting."
)
nv_gpus = os.getenv('NV_GPU', '')
if nv_gpus:
# the user has set NV_GPU, just check if it's ok
nv_gpus = [g.strip() for g in nv_gpus.split(',')]
try:
nv_gpus = [int(gpu) for gpu in nv_gpus]
except ValueError as e:
raise UserDockerException(
"ERROR: Can't parse NV_GPU, use index notation: %s" % e
)
if not (
NV_ALLOWED_GPUS == 'ALL'
or all(gpu in NV_ALLOWED_GPUS for gpu in nv_gpus)):
raise UserDockerException(
"ERROR: Access to at least one specified NV_GPU denied by "
"admin. Available GPUs: %r" % (NV_ALLOWED_GPUS,)
)
# check if in bounds (and MAX >= 0)
if 0 <= NV_MAX_GPU_COUNT_RESERVATION < len(nv_gpus):
raise UserDockerException(
"ERROR: Number of requested GPUs > %d (admin limit)" % (
NV_MAX_GPU_COUNT_RESERVATION,)
)
# check if available
gpus_available, own_gpus = nvidia_get_available_gpus(args.executor_path)
if NV_ALLOW_OWN_GPU_REUSE:
gpus_available.extend(own_gpus)
for g in nv_gpus:
if g not in gpus_available:
msg = (
'ERROR: GPU %d is currently not available!\nUse:\n'
'"sudo userdocker ps --gpu-free" to find available GPUs.\n'
'"sudo userdocker ps --gpu-used" and "nvidia-smi" to see '
'status.' % g
)
if NV_ALLOW_OWN_GPU_REUSE and own_gpus:
msg += '\n"sudo userdocker ps --gpu-used-mine to show own' \
'(reusable) GPUs.'
raise UserDockerException(msg)
else:
# NV_GPU wasn't set, use admin defaults, tell user
gpu_default = NV_DEFAULT_GPU_COUNT_RESERVATION
logger.info(
"NV_GPU environment variable not set, trying to acquire admin "
"default of %d GPUs" % gpu_default
)
gpus_available, own_gpus = nvidia_get_available_gpus(args.executor_path)
gpus = gpus_available[:gpu_default]
if len(gpus) < gpu_default:
msg = (
'Could not find %d available GPU(s)!\nUse:\n'
'"sudo userdocker ps --gpu-used" and "nvidia-smi" to see '
'status.' % gpu_default
)
if NV_ALLOW_OWN_GPU_REUSE and own_gpus:
msg += '\n You can set NV_GPU to reuse a GPU you have already' \
' reserved.'
raise UserDockerException(msg)
gpu_env = ",".join([str(g) for g in gpus])
logger.info("Setting NV_GPU=%s" % gpu_env)
os.environ['NV_GPU'] = gpu_env
def exec_cmd_run(args):
cmd = init_cmd(args)
# check port mappings
for pm in getattr(args, 'port_mappings', []):
for pm_pattern in ALLOWED_PORT_MAPPINGS:
if re.match(pm_pattern, pm):
cmd += ['-p', pm]
break
else:
raise UserDockerException(
"ERROR: given port mapping not allowed: %s" % pm
)
# check mounts
mounts = []
mounts_available = \
VOLUME_MOUNTS_ALWAYS + VOLUME_MOUNTS_DEFAULT + VOLUME_MOUNTS_AVAILABLE
mounts += VOLUME_MOUNTS_ALWAYS
if not args.no_default_mounts:
mounts += VOLUME_MOUNTS_DEFAULT
for user_mount in getattr(args, 'volumes', []):
if user_mount in mounts:
continue
if user_mount in mounts_available:
mounts += [user_mount]
continue
# literal matches didn't work, check if the user appended a 'ro' flag
if len(user_mount.split(':')) == 3:
host_path, container_path, flag = user_mount.split(':')
if flag == 'ro':
st = ':'.join([host_path, container_path])
if st in mounts:
# upgrade mount to include ro flag
idx = mounts.index(st)
mounts[idx] = user_mount
continue
if st in mounts_available:
mounts += [user_mount]
continue
# allow potential unspecified container_path mounts
host_path = user_mount.split(':')[0] + ':'
if host_path in mounts_available:
mounts += [user_mount]
continue
raise UserDockerException(
"ERROR: given mount not allowed: %s" % user_mount
)
mount_host_paths = [m.split(':')[0] for m in mounts]
for ms in mount_host_paths:
if not os.path.exists(ms):
raise UserDockerException(
"ERROR: mount can't be found: %s" % ms
)
if PROBE_USED_MOUNTS and os.path.isdir(ms):
os.listdir(ms)
for mount in mounts:
if ':' not in mount:
raise UserDockerException(
"ERROR: anonymous mounts currently not supported: %s" % mount
)
cmd += ["-v", mount]
if args.executor == 'nvidia-docker':
prepare_nvidia_docker_run(args)
env_vars = ENV_VARS + ENV_VARS_EXT.get(args.executor, [])
env_vars += [
"USERDOCKER=%s" % __version__,
"USERDOCKER_USER=%s" % user_name,
"USERDOCKER_UID=%d" % uid,
]
if args.executor == 'nvidia-docker':
# remember which GPU was assigned to the container for ps --gpu-used
env_vars += [
"USERDOCKER_NV_GPU=%s" % os.environ['NV_GPU']
]
for env_var in env_vars:
cmd += ['-e', env_var]
if USER_IN_CONTAINER:
cmd += ["-u", "%d:%d" % (uid, gid)]
for _g in gids:
if _g < 1000 or _g == gid:
continue
cmd += ["--group-add", "%d" % (_g)]
for cap_drop in CAPS_DROP:
cmd += ["--cap-drop=%s" % cap_drop]
for cap_add in CAPS_ADD:
cmd += ["--cap-add=%s" % cap_add]
if args.workdir:
cmd += ["-w", args.workdir]
if args.entrypoint:
cmd += ["--entrypoint", args.entrypoint]
# additional injection protection, deactivated for now due to nvidia-docker
# unability to handle this
# cmd.append("--")
img = args.image
if ":" not in img and "@" not in img:
# user didn't explicitly set a tag or digest, append ":latest"
img += ":latest"
if ALLOWED_IMAGE_REGEXPS:
for air in ALLOWED_IMAGE_REGEXPS:
if re.match(air, img):
break
else:
raise UserDockerException(
"ERROR: image %s not in allowed image regexps: %s" % (
img, ALLOWED_IMAGE_REGEXPS))
# pull image?
if RUN_PULL == "default":
# just let `docker run` do its thing
pass
elif RUN_PULL == "always":
# pull image
exec_cmd(
[args.executor_path, 'pull', img],
dry_run=args.dry_run,
loglvl=logging.DEBUG,
)
elif RUN_PULL == "never":
# check if image is available locally
tmp = exec_cmd(
[args.executor_path, 'images', '-q', img],
return_status=False,
loglvl=logging.DEBUG,
)
if not tmp:
raise UserDockerException(
"ERROR: you can only use locally available images, but %s could"
" not be found locally" % img
)
else:
raise UserDockerException(
"ERROR: RUN_PULL config variable not expected range, contact admin"
)
cmd.append(img)
cmd.extend(args.image_args)
exit_exec_cmd(cmd, dry_run=args.dry_run)
| 33.144144
| 80
| 0.581317
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,934
| 0.265833
|