hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9f4fae4aa81279897b72d2373678cf498b7c183a | 1,397 | py | Python | util.py | atush-dhakal/GT-MW-Notifier | 3873e30c772c5885ca8935665cc22966353264c6 | [
"MIT"
] | 1 | 2022-02-18T05:10:41.000Z | 2022-02-18T05:10:41.000Z | util.py | atush-dhakal/GT-MW-Notifier | 3873e30c772c5885ca8935665cc22966353264c6 | [
"MIT"
] | null | null | null | util.py | atush-dhakal/GT-MW-Notifier | 3873e30c772c5885ca8935665cc22966353264c6 | [
"MIT"
] | 2 | 2021-01-04T13:58:28.000Z | 2021-12-31T22:16:58.000Z | import requests
import configparser
from email_validator import validate_email, EmailNotValidError
from mailchimp import OnCampusJobList
import email_notifier
import groupme_bot
config = configparser.ConfigParser()
config.read('config.ini')
google_config = config['GOOGLE']
def is_valid_recaptcha(recaptcha_response) -> bool:
request_url = 'https://www.google.com/recaptcha/api/siteverify'
verification_data = {
'secret': google_config['RECAPTCHA_SECRET_KEY'],
'response': recaptcha_response
}
response = requests.post(request_url, data=verification_data)
if response.status_code == 200:
return response.json()['success']
else:
return False
def is_valid_email(email) -> bool:
try:
validate_email(email)
return True
except EmailNotValidError as e:
return False
def add_email_subscriber(new_email_subscriber):
custom_list = OnCampusJobList()
custom_list.add_list_member(new_email_subscriber)
groupme_bot.send_message("We just got a new subscriber, my dudes!")
try:
# send welcome message for new subscribers. We don't want to send welcome message to existing user
email_notifier.send_welcome_message(new_email_subscriber)
except Exception as e:
groupme_bot.send_message("Oops, there was a failure on sending the welcome email")
print(e)
pass
| 29.723404 | 106 | 0.730136 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 313 | 0.224052 |
9f4fb4aa07286decc81511426c6486ee3fe2e5ca | 4,704 | py | Python | datagokr/kma/VilageFcstInfoService_2_0.py | uujei/datagokr | 308f5151f819010f2c4e174a6ef84d83d3bea922 | [
"MIT"
] | null | null | null | datagokr/kma/VilageFcstInfoService_2_0.py | uujei/datagokr | 308f5151f819010f2c4e174a6ef84d83d3bea922 | [
"MIT"
] | null | null | null | datagokr/kma/VilageFcstInfoService_2_0.py | uujei/datagokr | 308f5151f819010f2c4e174a6ef84d83d3bea922 | [
"MIT"
] | null | null | null | import logging
import os
from datetime import datetime
from enum import Enum
from typing import Optional
from pydantic import BaseModel, Field, HttpUrl, SecretStr, ValidationError
from ..DataGoKr import DataGoKr
# logging
logger = logging.getLogger(__file__)
# debug only
KMA_API_KEY = os.getenv("KMA_API_KEY")
################################################################################
# Types
################################################################################
# (Type)
class DataType(str, Enum):
# Only JSON Available yet
json = "JSON"
# (Type)
class VilageFcstVersionFtype(str, Enum):
ODAM = "ODAM"
VSRT = "VSRT"
SHRT = "SHRT"
################################################################################
# [Abstract] Abstract for VilageFcst
################################################################################
class VilageFcstInfo(DataGoKr):
__version__ = "2.0"
baseUrl: HttpUrl = "http://apis.data.go.kr/1360000/VilageFcstInfoService_2.0"
dataType: Optional[DataType] = "JSON" # Only JSON available yet.
serviceKey: str = KMA_API_KEY
################################################################################
# [API] 초단기 실황 UltraSrtNcst
################################################################################
# Output Model
class UltraSrtNcstModel(BaseModel):
baseDate: str
baseTime: str
T1H: Optional[float] # 10 decimal
RN1: Optional[str] # 8 code
UUU: Optional[float] # 12 float
VVV: Optional[float] # 12 float
REH: Optional[int] # 8 int
PTY: Optional[int] # 4 code
VEC: Optional[float] # 10 decimal
WSD: Optional[float] # 10 decimal
# API
class UltraSrtNcst(VilageFcstInfo):
__RecordModel__ = UltraSrtNcstModel
__index_names__ = None
__key_name__ = "category"
__value_name__ = "obsrValue"
route: str = "getUltraSrtNcst"
base_date: str = datetime.now().strftime("%Y%m%d")
base_time: str = "0500"
nx: int = 64
ny: int = 118
################################################################################
# [API] 초단기 예보 UltraSrtFcst
################################################################################
# Output Model
class UltraSrtFcstModel(BaseModel):
baseDate: str
baseTime: str
fcstDate: str
fcstTime: str
T1H: Optional[float] # 10 decimal
RN1: Optional[str] # 8 code
SKY: Optional[int] # 4 code
UUU: Optional[float] # 12 float
VVV: Optional[float] # 12 float
REH: Optional[int] # 8 int
PTY: Optional[int] # 4 code
LGT: Optional[str] # 4 code
VEC: Optional[float] # 10 decimal
WSD: Optional[float] # 10 decimal
# API
class UltraSrtFcst(VilageFcstInfo):
__RecordModel__ = UltraSrtFcstModel
__index_names__ = ["fcstDate", "fcstTime"]
__key_name__ = "category"
__value_name__ = "fcstValue"
route: str = "getUltraSrtFcst"
base_date: str = datetime.now().strftime("%Y%m%d")
base_time: str = "0500"
nx: int = 64
ny: int = 118
################################################################################
# [API] 단기 예보 VilageFcst
################################################################################
# Output Model
class VilageFcstModel(BaseModel):
baseDate: str
baseTime: str
fcstDate: str
fcstTime: str
POP: Optional[int] # 8 int
PTY: Optional[int] # 4 code
PCP: Optional[str] # 8 code
REH: Optional[int] # 8 int
SNO: Optional[str] # 8 code
SKY: Optional[int] # 4 code
TMP: Optional[float] # 10 decimal
TMN: Optional[float] # 10 decimal
TMX: Optional[float] # 10 decimal
UUU: Optional[float] # 12 float
VVV: Optional[float] # 12 float
WAV: Optional[float] # 8 int
VEC: Optional[float] # 10 decimal
WSD: Optional[float] # 10 decimal
# API
class VilageFcst(VilageFcstInfo):
__RecordModel__ = VilageFcstModel
__index_names__ = ["fcstDate", "fcstTime"]
__key_name__ = "category"
__value_name__ = "fcstValue"
route: str = "getVilageFcst"
base_date: str = datetime.now().strftime("%Y%m%d")
base_time: str = "0500"
nx: int = 64
ny: int = 118
################################################################################
# [API] 단기예보 수치모델 버전
################################################################################
# Output Model
class VilageFcstVersion(BaseModel):
filetype: VilageFcstVersionFtype
version: str
# API
class VilageFcstVersion(VilageFcstInfo):
__RecordModel__ = VilageFcstVersion
route: str = "getFcstVersion"
ftype: VilageFcstVersionFtype = "ODAM"
basedatetime: str = datetime.now().strftime("%Y%m%d0800")
| 28.682927 | 81 | 0.528699 | 3,135 | 0.659722 | 0 | 0 | 0 | 0 | 0 | 0 | 1,955 | 0.411406 |
9f4ff94e02e03949fdcaa970d232b5cc43d0775f | 3,135 | py | Python | tests/test_io.py | OpenPIV/pivpy | 4df90b696d3a2ec53e42e222de1ec8125305b309 | [
"BSD-3-Clause"
] | 8 | 2020-04-20T11:45:59.000Z | 2022-02-04T17:02:32.000Z | tests/test_io.py | alexlib/pivpy | f731fe4148a0f1f9e60e11c19e3b8fa74792d58f | [
"BSD-3-Clause"
] | 17 | 2019-07-20T14:47:49.000Z | 2021-06-14T07:58:06.000Z | tests/test_io.py | alexlib/pivpy | f731fe4148a0f1f9e60e11c19e3b8fa74792d58f | [
"BSD-3-Clause"
] | 6 | 2015-07-02T11:41:06.000Z | 2021-03-20T10:24:12.000Z | import numpy as np
from pivpy import io, pivpy
import matplotlib.pyplot as plt
import os
import pkg_resources as pkg
path = pkg.resource_filename("pivpy", "data")
fname = os.path.join(path, "Insight", "Run000002.T000.D000.P000.H001.L.vec")
# def test_get_dt():
# """ test if we get correct delta t """
# _, _, _, _,dt,_ = io.parse_header(os.path.join(path,fname))
# assert dt == 2000.
def test_get_frame():
""" tests the correct frame number """
_, _, _, _, _, frame = io.parse_header(
os.path.join(path, "day2/day2a005003.T000.D000.P003.H001.L.vec")
)
assert frame == 5003
_, _, _, _, _, frame = io.parse_header(
os.path.join(path, "Insight/Run000002.T000.D000.P000.H001.L.vec")
)
assert frame == 2
_, _, _, _, _, frame = io.parse_header(
os.path.join(path, "openpiv/exp1_001_b.vec")
)
assert frame == 1
_, _, _, _, _, frame = io.parse_header(
os.path.join(path, "openpiv/exp1_001_b.txt")
)
assert frame == 1
def test_get_units():
# test vec file with m/s
lUnits, vUnits, tUnits = io.get_units(fname)
assert lUnits == "mm"
assert vUnits == "m/s"
assert tUnits == "s"
# test vec file with pixels/dt
lUnits, vUnits, tUnits = io.get_units(
os.path.join(path, "day2/day2a005000.T000.D000.P003.H001.L.vec")
)
assert lUnits == "pixel"
assert vUnits == "pixel/dt"
assert tUnits == "dt"
# test OpenPIV vec
lUnits, vUnits, tUnits = io.get_units(
os.path.join(path, "openpiv/exp1_001_b.vec")
)
assert lUnits == "pix"
def test_load_vec():
fname = "Run000001.T000.D000.P000.H001.L.vec"
data = io.load_vec(os.path.join(path, "Insight", fname))
assert data["u"].shape == (63, 63, 1)
assert data["u"][0, 0, 0] == 0.0
assert np.allclose(data.coords["x"][0], 0.31248)
assert "t" in data.dims
def test_load_vc7():
data = io.load_vc7(os.path.join(path, "VC7/2Ca.VC7"))
assert data["u"].shape == (57, 43, 1)
assert np.allclose(data.u.values[0, 0], -0.04354814)
assert np.allclose(data.coords["x"][-1], 193.313795)
def test_loadopenpivtxt():
data = io.load_vec(os.path.join(path, "openpiv", "exp1_001_b.txt"))
def test_load_directory():
_ = pkg.resource_filename("pivpy", "data/Insight")
data = io.load_directory(_, basename="Run*", ext=".vec")
assert np.allclose(data["t"], [0, 1, 2, 3, 4])
_ = pkg.resource_filename("pivpy", "data/VC7")
data = io.load_directory(_, basename="2*", ext=".VC7")
assert np.allclose(data["t"], [0, 1])
data = io.load_directory(
path=os.path.join(path, "urban_canopy"), basename="B*", ext=".vc7"
)
assert np.allclose(data["t"], [0, 1, 2, 3, 4])
def test_create_sample_field():
data = io.create_sample_field(frame=3)
assert data["t"] == 3
data = io.create_sample_field(rows=3, cols=7)
assert data.x.shape[0] == 7
assert data.y.shape[0] == 3
assert data["t"] == 0
def test_create_sample_dataset(n=3):
data = io.create_sample_dataset(n=n)
assert data.dims["t"] == 3
assert np.allclose(data["t"], np.arange(3))
| 29.299065 | 76 | 0.620415 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 772 | 0.246252 |
9f513a1848907a84153b93f6379a41b3912f5550 | 1,807 | py | Python | test_hig.py | xiaohan2012/lst | 793944d1dd8235adbe2f651270ab12e46ff8f6f7 | [
"MIT"
] | 1 | 2016-07-05T13:10:27.000Z | 2016-07-05T13:10:27.000Z | test_hig.py | xiaohan2012/lst | 793944d1dd8235adbe2f651270ab12e46ff8f6f7 | [
"MIT"
] | null | null | null | test_hig.py | xiaohan2012/lst | 793944d1dd8235adbe2f651270ab12e46ff8f6f7 | [
"MIT"
] | null | null | null | import unittest
import networkx as nx
from nose.tools import assert_equal, assert_raises, \
assert_true
from .util import json_load
from .test_util import make_path
from hig import construct_hig_from_interactions
from interactions import InteractionsUtil as IU
class HIGTest(unittest.TestCase):
def setUp(self):
self.interactions = IU.clean_interactions(
json_load(
make_path('test/data/enron_test.json')
)
)
def test_construct_hig(self):
hig = construct_hig_from_interactions(
self.interactions
)
a, b, c, d, e, f = ('A', 'B', 'C', 'D', 'E', 'F')
assert_equal(
sorted(
range(1, 7) +
[a, b, c, d, e, f, 'XXX']
),
sorted(hig.nodes()))
print hig.edges()
assert_equal(
sorted(
[(a, 1), (1, b), (1, c), (1, d),
(a, 2), (2, f),
(d, 3), (3, e),
(a, 4), (4, b),
(d, 5), (5, f),
(6, u'XXX'), (u'XXX', 6)
]),
sorted(hig.edges())
)
def test_construct_hig_interacting_ids(self):
self.interactions.append({'sender_id': 1,
'recipient_ids': [1],
'message_id': 7})
assert_raises(ValueError,
construct_hig_from_interactions,
self.interactions)
def test_pagerank_on_hig(self):
pr = nx.pagerank(
construct_hig_from_interactions(self.interactions)
)
assert_true(pr['A'] < pr['F'])
assert_true(pr['A'] < pr['B'])
assert_true(pr['A'] < pr['C'])
assert_true(pr['A'] < pr['D'])
| 30.116667 | 62 | 0.484781 | 1,530 | 0.846707 | 0 | 0 | 0 | 0 | 0 | 0 | 124 | 0.068622 |
9f517ecb4978d7abbb693bb67e1f4a9ac780ee1f | 5,608 | py | Python | euler_angle_visualization/lib/TaitBryanRotation.py | klosteraner/Euler-Angles-Visual | f9e1f41ca75912b21572d11e0d14cbd4bb82aa52 | [
"MIT"
] | null | null | null | euler_angle_visualization/lib/TaitBryanRotation.py | klosteraner/Euler-Angles-Visual | f9e1f41ca75912b21572d11e0d14cbd4bb82aa52 | [
"MIT"
] | null | null | null | euler_angle_visualization/lib/TaitBryanRotation.py | klosteraner/Euler-Angles-Visual | f9e1f41ca75912b21572d11e0d14cbd4bb82aa52 | [
"MIT"
] | null | null | null | from traits.api import HasTraits, Bool, Enum, List, Str
from numpy import array, cos, sin
class ElementalRotationDefinition(HasTraits):
'''
A definition of an elemental rotation and its angle's name
'''
angle_name = Str("undefined angle")
axis = Enum('around_x', 'around_y', 'around_z')
isClockwiseCameraSystemRotation = Bool(False)
class TaitBryanAnglesDefinition(HasTraits):
'''
Tait-Bryan angle rotations are defined by three rotation angles around
the x,y & z-axis.
The resulting rotation will be different according to
1. The order in which the rotations are applied
2. The rotation direction (clockwise vs. counter-clockwise)
'''
angles_in_order_applied = List(ElementalRotationDefinition)
def angles_yaw_pitch_roll():
'''
Returns a definition of the "Yaw, Pitch, Roll" Tait-Bryan angles set widespread in aerospace applications.
'''
definition = TaitBryanAnglesDefinition()
# first roll is applied
definition.angles_in_order_applied.append(
ElementalRotationDefinition(angle_name="Roll", axis='around_x', isClockwiseCameraSystemRotation=False))
# then pitch
definition.angles_in_order_applied.append(
ElementalRotationDefinition(angle_name="Pitch", axis='around_y', isClockwiseCameraSystemRotation=False))
# then yaw
definition.angles_in_order_applied.append(
ElementalRotationDefinition(angle_name="Yaw", axis='around_z', isClockwiseCameraSystemRotation=False))
return definition
def angles_pix4d_omega_phi_kappa():
'''
Returns a definition of the "Omega, Phi, Kappa" Tait-Bryan angles set used by pix4d.
'''
definition = TaitBryanAnglesDefinition()
# first kappa is applied
definition.angles_in_order_applied.append(
ElementalRotationDefinition(angle_name="Kappa", axis='around_z', isClockwiseCameraSystemRotation=False))
# then phi
definition.angles_in_order_applied.append(
ElementalRotationDefinition(angle_name="Phi", axis='around_y', isClockwiseCameraSystemRotation=False))
# last omega
definition.angles_in_order_applied.append(
ElementalRotationDefinition(angle_name="Omega", axis='around_x', isClockwiseCameraSystemRotation=False))
return definition
def camera_to_world_rotation_around_x(cc_angle = 0):
'''
Compute a rotation matrix that is used to transform
a point in camera coordinates to a point in world coordinates.
when the camera(system) rotates counter-clockwise.
(Seeing the camera(system) as fixed, the rotation
would transform points clockwise around its x axis)
'''
return array([[1., 0., 0.],
[0., cos(cc_angle), -sin(cc_angle)],
[0., sin(cc_angle), cos(cc_angle)]])
def camera_to_world_rotation_around_y(cc_angle = 0):
'''
Compute a rotation matrix that is used to transform
a point in camera coordinates to a point in world coordinates.
when the camera(system) rotates counter-clockwise.
(Seeing the camera(system) as fixed, the rotation
would transform points clockwise around its x axis)
'''
return array([[cos(cc_angle), 0., sin(cc_angle)],
[0., 1., 0.],
[-sin(cc_angle), 0., cos(cc_angle)]])
def camera_to_world_rotation_around_z(cc_angle = 0):
'''
Compute a rotation matrix that is used to transform
a point in camera coordinates to a point in world coordinates
when the camera(system) rotates counter-clockwise.
(Seeing the camera(system) as fixed, the rotation
would transform points clockwise around its x axis)
'''
return array([[cos(cc_angle), -sin(cc_angle), 0.],
[sin(cc_angle), cos(cc_angle), 0.],
[0., 0., 1.]])
def world_angle(angle, world_axis):
'''
Correction on the angle for possibly inverted axes
due to the world system definition (w.r.t. the mayavi world system)
'''
if(world_axis in ['Down', 'West', 'South']):
angle = -angle
return angle
def elemental_rotation(angle_and_definition, worldsystem):
'''
Returns an elemental rotation matrix that is used to transform
a point in camera coordinates to a point in world coordinates
given an euler angle and its definition.
'''
angle, definition = angle_and_definition
if (definition.isClockwiseCameraSystemRotation):
angle = -angle
if definition.axis == 'around_x':
return camera_to_world_rotation_around_x(world_angle(angle, worldsystem.x_axis))
if definition.axis == 'around_y':
return camera_to_world_rotation_around_y(world_angle(angle, worldsystem.y_axis))
if definition.axis == 'around_z':
return camera_to_world_rotation_around_z(world_angle(angle, worldsystem.z_axis))
def camera_to_world_rotation_matrix(first_angle_and_definition,
second_angle_and_definition,
last_angle_and_definition,
world_system):
'''
Compute a rotation matrix that is used to transform
a point in camera coordinates to a point in world coordinates
given Tait-Bryan angles and their definition.
Note: Matrices application order is opposite to reading order
'''
return elemental_rotation(last_angle_and_definition, world_system).dot(
elemental_rotation(second_angle_and_definition, world_system)).dot(
elemental_rotation(first_angle_and_definition, world_system))
| 38.675862 | 112 | 0.694009 | 660 | 0.117689 | 0 | 0 | 0 | 0 | 0 | 0 | 2,335 | 0.416369 |
9f521de84b0ac5792cc9124444676f4fcf2028af | 854 | py | Python | w2020/w9/w9_19/sort.py | abrance/mine | d4067bf6fb158ebaea3eb7a516ae372dcb8cf419 | [
"MIT"
] | null | null | null | w2020/w9/w9_19/sort.py | abrance/mine | d4067bf6fb158ebaea3eb7a516ae372dcb8cf419 | [
"MIT"
] | null | null | null | w2020/w9/w9_19/sort.py | abrance/mine | d4067bf6fb158ebaea3eb7a516ae372dcb8cf419 | [
"MIT"
] | null | null | null |
def merge_sort(ls: list):
# 2020/9/19 二路归并排序
length = len(ls)
if length <= 1:
return ls
idx = length // 2
piece1 = ls[:idx]
piece2 = ls[idx:]
piece1 = merge_sort(piece1)
piece2 = merge_sort(piece2)
sorted_ls = []
# 2020/9/19 合并的算法
while len(piece1) != 0 and len(piece2) != 0:
if piece1[0] <= piece2[0]:
sorted_ls.append(piece1[0])
piece1 = piece1[1:]
else:
sorted_ls.append(piece2[0])
piece2 = piece2[1:]
if len(piece1) > 0:
sorted_ls += piece1
elif len(piece2) > 0:
sorted_ls += piece2
else:
raise Exception
return sorted_ls
l1 = [1, -1, 31, 23, 9, 4, 2, 3]
def main():
l2 = merge_sort(l1)
print(l2)
l3 = sorted(l1)
assert l2 == l3
if __name__ == '__main__':
main()
| 18.565217 | 48 | 0.522248 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 67 | 0.076484 |
9f5431df273285b9e2904f4a66e99d7fa60a4ed4 | 4,952 | py | Python | training.py | oakwood44267/scooter | 11e6de1d3216b87e6a42bcb6de4a2ad1affabf49 | [
"BSD-2-Clause"
] | 4 | 2020-03-04T23:43:55.000Z | 2021-05-24T05:23:26.000Z | training.py | oakwood44267/scooter | 11e6de1d3216b87e6a42bcb6de4a2ad1affabf49 | [
"BSD-2-Clause"
] | null | null | null | training.py | oakwood44267/scooter | 11e6de1d3216b87e6a42bcb6de4a2ad1affabf49 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python3
import pandas as pd
import numpy as np
from glob import glob
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from sklearn.metrics import classification_report, confusion_matrix
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import *
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.utils import to_categorical
# Create Techie Pizza's machine learning model.
# Achieves over 99% accuracy on verification data.
def techie_pizza_model(trainX, trainy):
n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[2], trainy.shape[1]
model = Sequential()
# Input to model
model.add(Input(shape=(n_timesteps, n_features)))
############################################################################
# Convolutional layer 1
model.add(Conv1D(filters=16, kernel_size=12, activation='relu'))
model.add(MaxPooling1D(12))
model.add(Dropout(0.15))
# Convolutional layer 2
model.add(Conv1D(filters=16, kernel_size=4, activation='relu'))
model.add(MaxPooling1D(3))
model.add(Dropout(0.25))
# Convolutional layer 3
model.add(Conv1D(filters=24, kernel_size=4, activation='relu'))
model.add(MaxPooling1D(3))
model.add(Dropout(0.25))
# Dense layer
model.add(Flatten())
model.add(Dense(60, activation='relu'))
model.add(Dropout(0.25))
############################################################################
# Final layer that predicts which category
model.add(Dense(n_outputs, activation='softmax'))
# Build the model. It is evaluated based on how strongly it predicts the
# correct category.
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
return model
# fit and evaluate a model
# from https://machinelearningmastery.com/how-to-develop-rnn-models-for-human-activity-recognition-time-series-classification/
def evaluate_model(trainX, trainy, testX, testy):
model = our_model(trainX, trainy)
# Train network
model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=verbose,
validation_data=(testX, testy),
callbacks=[EarlyStopping(monitor='loss', patience=patience, restore_best_weights=True)])
# Use trained network to predict hold-back data
predY = model.predict(testX)
global predy, comparey
predy = np.append(predy, np.argmax(predY, axis=1))
comparey = np.append(comparey, np.argmax(testy, axis=1))
# Also use Kera's evaluation function
loss, accuracy = model.evaluate(testX, testy, batch_size=batch_size, verbose=0)
return (loss, accuracy)
# run an experiment
# from https://machinelearningmastery.com/how-to-develop-rnn-models-for-human-activity-recognition-time-series-classification/
def run_experiment(X, y):
scores = []
losses = []
seed = 42
# Repeat experiment
for r in range(repeats):
# Hold back 20% of the data to measure accuracy
trainX, testX, trainy, testy = train_test_split(X, y, test_size=0.20, random_state=seed)
loss, score = evaluate_model(trainX, trainy, testX, testy)
score = score * 100.0
print('>#%d: %.3f' % (r+1, score))
scores.append(score)
losses.append(loss)
seed += 1
# Summarize results across experiments
print(scores)
print(losses)
m, s = np.mean(scores), np.std(scores)
print('Accuracy: %.3f%% (+/-%.3f)' % (m, s))
# from Jupyter notebook written by team
def load_file(filename):
df = pd.read_csv(filename, names = [ 'time', 'rotx', 'roty', 'rotz', 'accelx', 'accely', 'accelz'])
# Drop fields we do not care about
df.drop(columns='time', inplace = True)
return df
# from Jupyter notebook written by team
def load_label(path):
loaded = []
for name in glob(path):
data = load_file(name)
loaded.append(data.values)
return loaded
# Written by Mr. Lyle to format data appropriately for Keras
def load_labels(paths):
loaded = []
targets = []
label = 0
for path in paths:
chunk = load_label(path)
loaded.extend(chunk)
targets.extend(len(chunk) * [ label ])
label = label + 1
loaded = np.asarray(loaded)
targets = to_categorical(targets)
return (loaded, targets)
our_model = techie_pizza_model
predy = np.empty(0)
comparey = np.empty(0)
epochs = 500
patience = 60
verbose = 1
batch_size = 32
repeats = 14
# Load labelled data
categories = [ 'Sidewalk', 'Street', 'Standing' ]
X, y = load_labels(['sidewalk/good/*', 'street/good/*', 'standing/good/*'])
# Shuffle the data
X, y = shuffle(X, y, random_state=640)
run_experiment(X, y)
print('Classification Report')
print(classification_report(comparey, predy, np.arange(len(categories)), categories, digits=3))
cm = confusion_matrix(comparey, predy)
print(cm)
| 28.959064 | 126 | 0.668821 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,484 | 0.299677 |
9f577d4e6078ddc55f14433fb0ba6b1f3afe187f | 3,468 | py | Python | test/test_files/pylops/pytests/test_sliding.py | SoftwareUnderstanding/inspect4py | 9c4d7252535082ad938b26baf281d93f3a27285e | [
"BSD-3-Clause"
] | 2 | 2022-02-15T20:30:57.000Z | 2022-03-17T00:50:37.000Z | test/test_files/pylops/pytests/test_sliding.py | SoftwareUnderstanding/code_inspector | a820b5a7bb18f5df9c3e79346108d8280b20c39a | [
"BSD-3-Clause"
] | 101 | 2021-06-09T14:19:59.000Z | 2022-01-24T13:24:39.000Z | test/test_files/pylops/pytests/test_sliding.py | SoftwareUnderstanding/inspect4py | 9c4d7252535082ad938b26baf281d93f3a27285e | [
"BSD-3-Clause"
] | 1 | 2021-09-22T06:59:32.000Z | 2021-09-22T06:59:32.000Z | import pytest
import numpy as np
from numpy.testing import assert_array_almost_equal
from pylops.utils import dottest
from pylops import LinearOperator
from pylops.basicoperators import MatrixMult
from pylops.signalprocessing import Sliding1D, Sliding2D, Sliding3D
par1 = {'ny': 6, 'nx': 7, 'nt': 10,
'npy': 15, 'nwiny': 5, 'novery': 0, 'winsy': 3,
'npx': 10, 'nwinx': 5, 'noverx': 0, 'winsx': 2,
'tapertype': None} # no overlap, no taper
par2 = {'ny': 6, 'nx': 7, 'nt': 10,
'npy': 15, 'nwiny': 5, 'novery': 0, 'winsy': 3,
'npx': 10, 'nwinx': 5, 'noverx': 0, 'winsx': 2,
'tapertype': 'hanning'} # no overlap, with taper
par3 = {'ny': 6, 'nx': 7, 'nt': 10,
'npy': 15, 'nwiny': 7, 'novery': 3, 'winsy': 3,
'npx': 10, 'nwinx': 4, 'noverx': 2, 'winsx': 4,
'tapertype': None} # overlap, no taper
par4 = {'ny': 6, 'nx': 7, 'nt': 10,
'npy': 15, 'nwiny': 7, 'novery': 3, 'winsy': 3,
'npx': 10, 'nwinx': 4, 'noverx': 2, 'winsx': 4,
'tapertype': 'hanning'} # overlap, with taper
@pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4)])
def test_Sliding1D(par):
"""Dot-test and inverse for Sliding1D operator
"""
Op = MatrixMult(np.ones((par['nwiny'], par['ny'])))
Slid = Sliding1D(Op, dim=par['ny']*par['winsy'],
dimd=par['npy'],
nwin=par['nwiny'], nover=par['novery'],
tapertype=par['tapertype'])
assert dottest(Slid, par['npy'],
par['ny']*par['winsy'])
x = np.ones(par['ny']*par['winsy'])
y = Slid * x.flatten()
xinv = LinearOperator(Slid) / y
assert_array_almost_equal(x.flatten(), xinv)
@pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4)])
def test_Sliding2D(par):
"""Dot-test and inverse for Sliding2D operator
"""
Op = MatrixMult(np.ones((par['nwiny'] * par['nt'], par['ny'] * par['nt'])))
Slid = Sliding2D(Op, dims=(par['ny']*par['winsy'], par['nt']),
dimsd=(par['npy'], par['nt']),
nwin=par['nwiny'], nover=par['novery'],
tapertype=par['tapertype'])
assert dottest(Slid, par['npy']*par['nt'],
par['ny']*par['nt']*par['winsy'])
x = np.ones((par['ny']*par['winsy'], par['nt']))
y = Slid * x.flatten()
xinv = LinearOperator(Slid) / y
assert_array_almost_equal(x.flatten(), xinv)
@pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4)])
def test_Sliding3D(par):
"""Dot-test and inverse for Sliding3D operator
"""
Op = MatrixMult(np.ones((par['nwiny'] * par['nwinx'] * par['nt'],
par['ny'] * par['nx'] * par['nt'])))
Slid = Sliding3D(Op,
dims=(par['ny']*par['winsy'],
par['nx']*par['winsx'], par['nt']),
dimsd=(par['npy'], par['npx'], par['nt']),
nwin=(par['nwiny'], par['nwinx']),
nover=(par['novery'], par['noverx']),
nop=(par['ny'], par['nx']),
tapertype=par['tapertype'])
assert dottest(Slid, par['npy']*par['npx']*par['nt'],
par['ny']*par['nx']*par['nt']*par['winsy']*par['winsx'])
x = np.ones((par['ny']*par['nx']*par['winsy']*par['winsx'], par['nt']))
y = Slid * x.flatten()
xinv = LinearOperator(Slid) / y
assert_array_almost_equal(x.flatten(), xinv)
| 39.409091 | 79 | 0.520185 | 0 | 0 | 0 | 0 | 2,390 | 0.689158 | 0 | 0 | 962 | 0.277393 |
9f57dc161a4c2f57109ec9b96b98cf65e6091f2e | 6,203 | py | Python | tests/binding/test_binders_errors.py | jimcarreer/dinao | ab9514d81ffff2d3e8717f26403c3b73f388739d | [
"MIT"
] | 1 | 2021-01-27T08:09:31.000Z | 2021-01-27T08:09:31.000Z | tests/binding/test_binders_errors.py | jimcarreer/dinao | ab9514d81ffff2d3e8717f26403c3b73f388739d | [
"MIT"
] | 34 | 2020-12-18T19:40:26.000Z | 2022-03-05T13:06:37.000Z | tests/binding/test_binders_errors.py | jimcarreer/dinao | ab9514d81ffff2d3e8717f26403c3b73f388739d | [
"MIT"
] | 1 | 2021-01-27T08:12:15.000Z | 2021-01-27T08:12:15.000Z | """Tests various errors that can be thrown by binding."""
from typing import Generator, List, Tuple, Union
from dinao.backend import Connection
from dinao.binding import FunctionBinder
from dinao.binding.binders import BoundedGeneratingQuery
from dinao.binding.errors import (
BadReturnTypeError,
CannotInferMappingError,
FunctionAlreadyBoundError,
MissingTemplateArgumentError,
MultipleConnectionArgumentError,
NoPoolSetError,
PoolAlreadySetError,
TemplateError,
)
from dinao.binding.templating import Template
import pytest
from tests.binding.mocks import MockConnection, MockConnectionPool
def test_cannot_infer_generic(binder_and_pool: Tuple[FunctionBinder, MockConnectionPool]):
"""Tests that binding a function to typed generics raises an error."""
binder, _ = binder_and_pool
with pytest.raises(CannotInferMappingError, match="Unable to determine mapper for typing.Union"):
@binder.query("SELECT * FROM table")
def raises_cannot_infer() -> Union[str, int]:
pass # pragma: no cover
def test_cannot_infer_nested_generic(binder_and_pool: Tuple[FunctionBinder, MockConnectionPool]):
"""Tests that binding a function to typed generics as row types raises."""
binder, _ = binder_and_pool
with pytest.raises(CannotInferMappingError, match="Unable to determine row mapper for typing.List\\[str\\]"):
@binder.query("SELECT * FROM table")
def raises_cannot_infer_row_type() -> List[List[str]]:
pass # pragma: no cover
def test_binding_generator_throws(binder_and_pool: Tuple[FunctionBinder, MockConnectionPool]):
"""Tests that binding a function to generate when send type and return type are specified."""
binder, pool = binder_and_pool
with pytest.raises(CannotInferMappingError, match="Only yield_type"):
@binder.query("SELECT some_num FROM table LIMIT 3")
def generating_query_bad() -> Generator[int, int, int]:
pass # pragma: no cover
def test_bounded_generating_query_throws(binder_and_pool: Tuple[FunctionBinder, MockConnectionPool]):
"""Tests that BoundedGeneratingQuery raises if not bound to a generator."""
binder, pool = binder_and_pool
def not_a_generator() -> int:
pass # pragma: no cover
with pytest.raises(BadReturnTypeError, match="Expected results type to be Generator"):
BoundedGeneratingQuery(binder, Template("SELECT * FROM table"), not_a_generator)
def test_binder_execute_bad_type(binder_and_pool: Tuple[FunctionBinder, MockConnectionPool]):
"""Tests that binding a function specifying an invalid return type for execution raises an exception."""
binder, _ = binder_and_pool
with pytest.raises(BadReturnTypeError, match="can only return None or int"):
@binder.execute("INSERT INTO TABLE (#{arg1})")
def should_raise(arg1: str) -> List:
pass # pragma: no cover
def test_binder_raises_for_template(binder_and_pool: Tuple[FunctionBinder, MockConnectionPool]):
"""Tests that a bad template causes an error at binding time."""
binder, _ = binder_and_pool
with pytest.raises(TemplateError, match="#{arg1"):
@binder.execute("INSERT INTO table #{arg1")
def should_raise_0(arg1: str) -> int:
pass # pragma: no cover
def test_double_binding_raises(binder_and_pool: Tuple[FunctionBinder, MockConnectionPool]):
"""Tests that binding a function more than once results in an error."""
binder, _ = binder_and_pool
match = "has already been bounded by"
with pytest.raises(FunctionAlreadyBoundError, match=match):
@binder.execute("UPDATE table SET col = #{arg1}")
@binder.execute("INSERT INTO TABLE (#{arg1})")
def should_raise_1(arg1: str):
pass # pragma: no cover
with pytest.raises(FunctionAlreadyBoundError, match=match):
@binder.execute("UPDATE table SET col = #{arg1}")
@binder.query("SELECT * FROM table WHERE col = #{arg1})")
def should_raise_2(arg1: str):
pass # pragma: no cover
with pytest.raises(FunctionAlreadyBoundError, match=match):
@binder.execute("UPDATE table SET col = #{arg1}")
@binder.transaction()
def should_raise_3(arg1: str):
pass # pragma: no cover
def test_args_mismatch_raises(binder_and_pool: Tuple[FunctionBinder, MockConnectionPool]):
"""Tests an error is raised if a template is bound to a function without a matching argument."""
binder, _ = binder_and_pool
with pytest.raises(MissingTemplateArgumentError, match="specified in template but is not an argument of"):
@binder.execute("INSERT INTO table (#{arg})")
def should_raise_4(some_arg: str):
pass # pragma: no cover
def test_binder_raises_for_no_pool():
"""Tests an error is raised when a bind has no pool but an operation requiring one is performed."""
binder = FunctionBinder()
@binder.execute("INSERT INTO table (#{arg})")
def test_bound_execute(arg: str):
pass # pragma: no cover
with pytest.raises(NoPoolSetError, match="No connection pool"):
test_bound_execute("testing")
with pytest.raises(NoPoolSetError, match="No connection pool"):
with binder.connection() as cnx: # noqa: F841
pass # pragma: no cover
def test_binder_raises_for_pool_set_twice(binder_and_pool: Tuple[FunctionBinder, MockConnectionPool]):
"""Tests an error is raised when a binder has its pool set twice."""
binder, _ = binder_and_pool
pool = MockConnectionPool([])
with pytest.raises(PoolAlreadySetError, match="only be set once"):
binder.pool = pool
def test_binder_raises_for_double_connection_arg(binder_and_pool: Tuple[FunctionBinder, MockConnectionPool]):
"""Tests an error is raised when a bound function specifies it would like more than one connection."""
binder, _ = binder_and_pool
with pytest.raises(MultipleConnectionArgumentError, match="Connection argument specified multiple times for"):
@binder.transaction()
def should_raise_5(cnx1: Connection, cnx2: MockConnection):
pass # pragma: no cover
| 38.055215 | 114 | 0.715138 | 0 | 0 | 0 | 0 | 1,562 | 0.251814 | 0 | 0 | 1,986 | 0.320168 |
9f5811036c606f5ac57993aea9de0c63592c971b | 1,773 | py | Python | gcsa/serializers/base_serializer.py | DirkReiners/Google-Calendar-Simple-API | b2d3e3e2ab1a8230301d97281346f8f95d762588 | [
"MIT"
] | null | null | null | gcsa/serializers/base_serializer.py | DirkReiners/Google-Calendar-Simple-API | b2d3e3e2ab1a8230301d97281346f8f95d762588 | [
"MIT"
] | null | null | null | gcsa/serializers/base_serializer.py | DirkReiners/Google-Calendar-Simple-API | b2d3e3e2ab1a8230301d97281346f8f95d762588 | [
"MIT"
] | null | null | null | import json
class BaseSerializer:
type_ = None
def __init__(self, obj):
if isinstance(obj, self.type_):
self.data = self.to_json(obj)
elif isinstance(obj, str):
self.data = json.loads(obj)
elif isinstance(obj, dict):
self.data = obj
else:
raise TypeError('The "{}" object must be {}, str or dict, not {!r}.'
.format(self.type_.__name__.lower(), self.type_.__name__, obj.__class__.__name__))
def get_object(self):
return self.to_object(self.data)
def get_json(self):
return self.data
@staticmethod
def to_json(obj):
raise NotImplemented
@staticmethod
def to_object(json_):
raise NotImplemented
@staticmethod
def assure_dict(json_):
if not isinstance(json_, (str, dict)):
raise TypeError('The json object must be str or dict, not {!r}'.format(json_.__class__.__name__))
if isinstance(json_, str):
return json.loads(json_)
else:
return json_
def __init_subclass__(cls, **kwargs):
"""Checks that "type_" is defined and that name of the argument in subclasses __init__ method is the name of
the "type_" in lowercase. It assures that error in __init__ function of BaseSerializer has a correct message.
"""
if cls.type_ is None:
raise AssertionError('Subclass of BaseSerializer has to define class "type_" that is being serialized.')
if cls.__init__.__code__.co_varnames != ('self', cls.type_.__name__.lower()):
raise AssertionError('Argument of the __init__ method has to have a name "{}".'
.format(cls.type_.__name__.lower()))
| 34.764706 | 117 | 0.611957 | 1,758 | 0.99154 | 0 | 0 | 442 | 0.249295 | 0 | 0 | 483 | 0.27242 |
9f582bd96f276e9ca75965135dcc5b33474b1f4e | 1,078 | py | Python | scripts/quest/q30006s.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 54 | 2019-04-16T23:24:48.000Z | 2021-12-18T11:41:50.000Z | scripts/quest/q30006s.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 3 | 2019-05-19T15:19:41.000Z | 2020-04-27T16:29:16.000Z | scripts/quest/q30006s.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 49 | 2020-11-25T23:29:16.000Z | 2022-03-26T16:20:24.000Z | # [Root Abyss] Guardians of the World Tree
MYSTERIOUS_GIRL = 1064001 # npc Id
sm.removeEscapeButton()
sm.lockInGameUI(True)
sm.setPlayerAsSpeaker()
sm.sendNext("We need to find those baddies if we want to get you out of here.")
sm.setSpeakerID(MYSTERIOUS_GIRL)
sm.sendNext("But... they all left")
sm.setPlayerAsSpeaker()
sm.sendNext("They had to have left some clues behind. "
"What about those weird doors over there?")
sm.setSpeakerID(MYSTERIOUS_GIRL)
sm.sendNext("They showed up when the bad guys left, but I can't get through them.")
sm.setPlayerAsSpeaker()
sm.sendNext("Then that sounds like a good place to start. Maybe I should-")
sm.setSpeakerID(MYSTERIOUS_GIRL)
sm.sendNext("Y-you're glowing!")
sm.invokeAtFixedRate(0, 2450, 3, "showEffect", "Effect/Direction11.img/effect/Aura/0", 3, 0)
sm.setPlayerAsSpeaker()
sm.sendNext("Ah! What is this?! Don't let it take all my fr00dz!!")
sm.setSpeakerID(MYSTERIOUS_GIRL)
sm.sendNext("#h0#!!!")
sm.startQuest(parentID)
sm.lockInGameUI(False)
sm.warpInstanceIn(910700300, 0) # Fake Vellum Cave for QuestLine
| 30.8 | 92 | 0.751391 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 519 | 0.481447 |
9f58b4292a34cd1938789d7e2e28f7021f2ddc09 | 80 | py | Python | nfbackend/env_vars.py | neuralfoo/neuralfoo | d40388cd506d612ceee9ae10d92985c794249c65 | [
"MIT"
] | 2 | 2022-01-06T08:18:29.000Z | 2022-01-06T08:19:09.000Z | nfbackend/env_vars.py | neuralfoo/neuralfoo | d40388cd506d612ceee9ae10d92985c794249c65 | [
"MIT"
] | null | null | null | nfbackend/env_vars.py | neuralfoo/neuralfoo | d40388cd506d612ceee9ae10d92985c794249c65 | [
"MIT"
] | null | null | null | seaweedfs_master = "http://localhost:9333" #do not add slash to end (after port) | 80 | 80 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.75 |
9f5970de9e5e48b628139ce7eea832df4e02442b | 3,462 | py | Python | test.py | sasha42/c3lingo-stats | a2729405a0397a7072b1d6b66d9562b67502ec68 | [
"MIT"
] | 2 | 2019-12-30T08:17:57.000Z | 2019-12-30T16:38:10.000Z | test.py | sasha42/c3lingo-stats | a2729405a0397a7072b1d6b66d9562b67502ec68 | [
"MIT"
] | 1 | 2019-01-08T19:12:32.000Z | 2019-01-08T20:19:58.000Z | test.py | sasha42/c3lingo-stats | a2729405a0397a7072b1d6b66d9562b67502ec68 | [
"MIT"
] | 1 | 2019-12-30T14:43:41.000Z | 2019-12-30T14:43:41.000Z | #!/usr/bin/env python3
import unittest
from textwrap import dedent
from datetime import timedelta
from parse import parse_block, TranslationShift
class TestTranslationShift(unittest.TestCase):
def test_eq(self):
self.assertEqual(TranslationShift('name', 'lang'), TranslationShift('name', 'lang'))
self.assertNotEqual(TranslationShift('name', 'lang'), TranslationShift('anonther_name', 'lang'))
self.assertNotEqual(TranslationShift('name', 'lang'), TranslationShift('name', 'anonther_lang'))
class TestParseBlock(unittest.TestCase):
def test_simple(self):
result = parse_block(dedent("""
#1
[de] 11:00 +00:30, Adams
Opening Event
rufus, rixx
Fahrplan: https://fahrplan.events.ccc.de/congress/2018/Fahrplan/events/9985.html
Slides (if available): https://speakers.c3lingo.org/talks/15f4e5c5-40e1-4c73-8da0-4cc2a773ab13/
→ en: waffle, simplysaym, sirenensang
→ fr: informancer, ironic, yann0u
"""))
self.assertEqual(result.language, 'de')
self.assertEqual(result.room, 'Adams')
self.assertEqual(result.duration, timedelta(hours=0, minutes=30))
self.assertEqual(result.title, 'Opening Event')
self.assertEqual(result.speakers, ['rufus', 'rixx'])
self.assertEqual(result.fahrplan, 'https://fahrplan.events.ccc.de/congress/2018/Fahrplan/events/9985.html')
self.assertEqual(result.translation_shifts, [
TranslationShift('waffle', 'en', result),
TranslationShift('simplysaym', 'en', result),
TranslationShift('sirenensang', 'en', result),
TranslationShift('informancer', 'fr', result),
TranslationShift('ironic', 'fr', result),
TranslationShift('yann0u', 'fr', result),
])
def test_notes(self):
"""
Test that notes and parenthetical stuff inside the shift assignments is stripped out
as much as possible
"""
result = parse_block(dedent("""
#31
[de] 18:50 +01:00, Borg
"Das ist mir nicht erinnerlich." − Der NSU-Komplex heute
Caro Keller (NSU-Watch)
Fahrplan: https://fahrplan.events.ccc.de/congress/2018/Fahrplan/events/9766.html
Slides (if available): https://speakers.c3lingo.org/talks/a12d17e9-3758-4fa0-b612-0c6ba22ea773/
→ en: tr1 (note), (foo) tr2
→ fr: tr3 – yay!
→ gsw: (reservation), (another one) , (never mind me)
"""))
self.assertEqual(result.translation_shifts, [
TranslationShift('tr1', 'en', result),
TranslationShift('tr2', 'en', result),
TranslationShift('tr3', 'fr', result),
])
def test_trailing_comma(self):
"""
Test that trailing commas don't cause trouble
"""
result = parse_block(dedent("""
#31
[de] 18:50 +01:00, Borg
"Das ist mir nicht erinnerlich." − Der NSU-Komplex heute
Caro Keller (NSU-Watch)
Fahrplan: https://fahrplan.events.ccc.de/congress/2018/Fahrplan/events/9766.html
Slides (if available): https://speakers.c3lingo.org/talks/a12d17e9-3758-4fa0-b612-0c6ba22ea773/
→ en: tr1, tr2,
"""))
self.assertEqual(result.translation_shifts, [
TranslationShift('tr1', 'en', result),
TranslationShift('tr2', 'en', result),
])
if __name__ == '__main__':
unittest.main()
| 39.793103 | 115 | 0.62825 | 3,279 | 0.942241 | 0 | 0 | 0 | 0 | 0 | 0 | 1,814 | 0.521264 |
9f5af16633fdf3d87fecf487badb15528c65c1ec | 5,018 | py | Python | tests/oleform/test_basic.py | tirkarthi/oletools | 8b97e45bffe22f18363fd58431bbae4613296103 | [
"BSD-2-Clause"
] | 1 | 2020-12-05T10:30:06.000Z | 2020-12-05T10:30:06.000Z | tests/oleform/test_basic.py | tirkarthi/oletools | 8b97e45bffe22f18363fd58431bbae4613296103 | [
"BSD-2-Clause"
] | null | null | null | tests/oleform/test_basic.py | tirkarthi/oletools | 8b97e45bffe22f18363fd58431bbae4613296103 | [
"BSD-2-Clause"
] | null | null | null | """ Test oleform basic functionality """
import unittest
from os.path import join
import sys
# Directory with test data, independent of current working directory
from tests.test_utils import DATA_BASE_DIR
if sys.version_info[0] <= 2:
from oletools.olevba import VBA_Parser
else:
from oletools.olevba3 import VBA_Parser
SAMPLES = [('oleform-PR314.docm', [('word/vbaProject.bin', u'UserFormTEST1', {'caption': 'Label1-test', 'control_tip_text': None, 'name': 'Label1', 'value': None, 'tag': 'l\x18sdf', 'ClsidCacheIndex': 21, 'id': 1, 'tabindex': 0}), ('word/vbaProject.bin', u'UserFormTEST1', {'caption': None, 'control_tip_text': None, 'name': 'TextBox1', 'value': 'heyhey', 'tag': '', 'ClsidCacheIndex': 23, 'id': 2, 'tabindex': 1}), ('word/vbaProject.bin', u'UserFormTEST1', {'caption': None, 'control_tip_text': None, 'name': 'ComboBox1', 'value': 'none dd', 'tag': '', 'ClsidCacheIndex': 25, 'id': 3, 'tabindex': 2}), ('word/vbaProject.bin', u'UserFormTEST1', {'caption': None, 'control_tip_text': None, 'name': 'CheckBox1', 'value': '1', 'tag': '', 'ClsidCacheIndex': 26, 'id': 5, 'tabindex': 4}), ('word/vbaProject.bin', u'UserFormTEST1', {'caption': None, 'control_tip_text': None, 'name': 'OptionButton1', 'value': '0', 'tag': '', 'ClsidCacheIndex': 27, 'id': 6, 'tabindex': 5}), ('word/vbaProject.bin', u'UserFormTEST1', {'caption': None, 'control_tip_text': None, 'name': 'ToggleButton1', 'value': '0', 'tag': '', 'ClsidCacheIndex': 28, 'id': 7, 'tabindex': 6}), ('word/vbaProject.bin', u'UserFormTEST1', {'caption': None, 'control_tip_text': None, 'name': 'Frame1', 'value': None, 'tag': '', 'ClsidCacheIndex': 14, 'id': 8, 'tabindex': 7}), ('word/vbaProject.bin', u'UserFormTEST1', {'caption': None, 'control_tip_text': None, 'name': 'TabStrip1', 'value': None, 'tag': '', 'ClsidCacheIndex': 18, 'id': 10, 'tabindex': 8}), ('word/vbaProject.bin', u'UserFormTEST1', {'caption': None, 'control_tip_text': None, 'name': 'CommandButton1', 'value': None, 'tag': '', 'ClsidCacheIndex': 17, 'id': 9, 'tabindex': 9}), ('word/vbaProject.bin', u'UserFormTEST1', {'caption': None, 'control_tip_text': None, 'name': 'MultiPage1', 'value': None, 'tag': '', 'ClsidCacheIndex': 57, 'id': 12, 'tabindex': 10}), ('word/vbaProject.bin', u'UserFormTEST1', {'caption': None, 'control_tip_text': None, 'name': 'ScrollBar1', 'value': None, 'tag': '', 'ClsidCacheIndex': 47, 'id': 16, 'tabindex': 11}), ('word/vbaProject.bin', u'UserFormTEST1', {'caption': None, 'control_tip_text': None, 'name': 'SpinButton1', 'value': None, 'tag': '', 'ClsidCacheIndex': 16, 'id': 17, 'tabindex': 12}), ('word/vbaProject.bin', u'UserFormTEST1', {'caption': None, 'control_tip_text': None, 'name': 'Image1', 'value': None, 'tag': '', 'ClsidCacheIndex': 12, 'id': 18, 'tabindex': 13}), ('word/vbaProject.bin', u'UserFormTEST1', {'caption': None, 'control_tip_text': None, 'name': 'ListBox1', 'value': '', 'tag': '', 'ClsidCacheIndex': 24, 'id': 4, 'tabindex': 3}), ('word/vbaProject.bin', u'UserFormTEST1/i08', {'caption': None, 'control_tip_text': None, 'name': 'TextBox2', 'value': 'abcd', 'tag': '', 'ClsidCacheIndex': 23, 'id': 20, 'tabindex': 0}), ('word/vbaProject.bin', u'UserFormTEST1/i12', {'caption': None, 'control_tip_text': None, 'name': '', 'value': None, 'tag': '', 'ClsidCacheIndex': 18, 'id': 13, 'tabindex': 2}), ('word/vbaProject.bin', u'UserFormTEST1/i12', {'caption': None, 'control_tip_text': None, 'name': 'Page1', 'value': None, 'tag': '', 'ClsidCacheIndex': 7, 'id': 14, 'tabindex': 0}), ('word/vbaProject.bin', u'UserFormTEST1/i12', {'caption': None, 'control_tip_text': None, 'name': 'Page2', 'value': None, 'tag': '', 'ClsidCacheIndex': 7, 'id': 15, 'tabindex': 1}), ('word/vbaProject.bin', u'UserFormTEST1/i12/i14', {'caption': None, 'control_tip_text': None, 'name': 'TextBox3', 'value': 'last one', 'tag': '', 'ClsidCacheIndex': 23, 'id': 24, 'tabindex': 0}), ('word/vbaProject.bin', u'UserFormTest2', {'caption': 'Label1', 'control_tip_text': None, 'name': 'Label1', 'value': None, 'tag': '', 'ClsidCacheIndex': 21, 'id': 1, 'tabindex': 0}), ('word/vbaProject.bin', u'UserFormTest2', {'caption': 'Label2', 'control_tip_text': None, 'name': 'Label2', 'value': None, 'tag': '', 'ClsidCacheIndex': 21, 'id': 2, 'tabindex': 1}), ('word/vbaProject.bin', u'UserFormTest2', {'caption': None, 'control_tip_text': None, 'name': 'TextBox1', 'value': '&\xe9"\'', 'tag': '', 'ClsidCacheIndex': 23, 'id': 3, 'tabindex': 2})])]
class TestOleForm(unittest.TestCase):
def test_samples(self):
if sys.version_info[0] > 2:
# Unfortunately, olevba3 doesn't have extract_form_strings_extended
return
for sample, expected_result in SAMPLES:
full_name = join(DATA_BASE_DIR, 'oleform', sample)
parser = VBA_Parser(full_name)
variables = list(parser.extract_form_strings_extended())
self.assertEqual(variables, expected_result)
# just in case somebody calls this file as a script
if __name__ == '__main__':
unittest.main()
| 147.588235 | 4,099 | 0.646273 | 483 | 0.096253 | 0 | 0 | 0 | 0 | 0 | 0 | 3,132 | 0.624153 |
9f5b476a26d33e866185f1fc2f1015578a813124 | 593 | py | Python | base/shortener/admin.py | elijah74/django-url-shortener | 8934b46539748957b4be46945bea159640911434 | [
"BSD-3-Clause"
] | null | null | null | base/shortener/admin.py | elijah74/django-url-shortener | 8934b46539748957b4be46945bea159640911434 | [
"BSD-3-Clause"
] | null | null | null | base/shortener/admin.py | elijah74/django-url-shortener | 8934b46539748957b4be46945bea159640911434 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Shortener
@admin.register(Shortener)
class ShortenerAdmin(admin.ModelAdmin):
list_display = ('id', 'short_url', 'link_url', 'status', 'created')
fields = ('short_url', 'link_url', 'status', 'created', 'modified')
readonly_fields = ('short_url', 'created', 'modified')
def save_formset(self, request, form, formset, change):
instances = formset.save(commit=False)
instances.link_url = instances.link_url.rstrip()
formset.save_m2m()
| 31.210526 | 71 | 0.693086 | 434 | 0.731872 | 0 | 0 | 461 | 0.777403 | 0 | 0 | 143 | 0.241147 |
9f5cd4fcdfedb1f66c3f0409b1f89c49107cd3d7 | 6,484 | py | Python | terraform/stacks/analytics/cloud_sniper_beaconing_detection/cloud_sniper_beaconing_detection.py | houey/cloud-sniper | b0ac98eddc0b2da0f37c70926e2cef897283d787 | [
"MIT"
] | 160 | 2019-09-27T18:02:03.000Z | 2022-03-15T23:46:40.000Z | terraform/analytics/cloud_sniper_beaconing_detection/cloud_sniper_beaconing_detection.py | rckasa/cloud-sniper | cf9433622497360a6f2fb6a0d924d99f7a8387d2 | [
"MIT"
] | 2 | 2019-10-21T13:30:17.000Z | 2019-10-30T00:09:11.000Z | terraform/analytics/cloud_sniper_beaconing_detection/cloud_sniper_beaconing_detection.py | rckasa/cloud-sniper | cf9433622497360a6f2fb6a0d924d99f7a8387d2 | [
"MIT"
] | 31 | 2019-10-19T18:10:23.000Z | 2022-02-28T14:13:19.000Z | import os
import ipaddress
import numpy as np
import pandas as pd
import datetime
import boto3
import gzip
import json
from signal_processing import signalProcess
BUCKET_NAME = os.environ.get("BUCKET_NAME", None)
VPC_FLOW_LOGS_PATH = os.environ.get("VPC_FLOW_LOGS_PATH", None)
FINDINGS_PATH = os.environ.get("FINDINGS_PATH", None)
TMP_DOWNLOAD_DIR = "/tmp/s3_download"
FLOW_COLUMNS = [
"date",
"version",
"account-id",
"interface-id",
"srcaddr",
"dstaddr",
"srcport",
"dstport",
"protocol",
"packets",
"bytes",
"start",
"end",
"action",
"log-status",
]
def cloud_sniper_beaconing_detection(event, context):
bucket_name = BUCKET_NAME
vpc_flow_logs_path = VPC_FLOW_LOGS_PATH
findings_path = FINDINGS_PATH
df = load_data(bucket_name, vpc_flow_logs_path)
print(f"Number of raw records: {len(df.index)}")
version = df.version.iloc[0] # constant
account_id = df["account-id"].iloc[0] # constant
df = filter_format_data(df)
print(f"Number of records after filtering missing data: {len(df.index)}")
df = sort_data(df)
print(f"Number of records after filtering by time: {len(df.index)}")
df = filter_useless_data(df)
print(f"Number of records after filtering by port: {len(df.index)}")
df = filter_unfrequent_data(df)
print(f"Number of records after filtering unfrequent: {len(df.index)}")
res = find_beacons(df)
new_fields = {
"hits": "",
"cloud.provider": "aws",
"event.type": "beaconing",
"cloud.account.name": "",
"interface.vpc.id": "",
"protocol": "",
"version": version,
"cloud.account.id": account_id,
}
list(map(lambda x: x.update(new_fields), res))
print(f"Result: {res}")
save_results(bucket_name, findings_path, res)
return res
def load_data(s3_bucket, s3_vpc_flow_logs_path):
s3 = boto3.resource('s3')
bucket = s3.Bucket(name=s3_bucket)
prefix = s3_vpc_flow_logs_path
if prefix.startswith("/"):
prefix = prefix[1:]
if not prefix.endswith("/"):
prefix += "/"
if not os.path.exists(TMP_DOWNLOAD_DIR):
os.mkdir(TMP_DOWNLOAD_DIR)
for i, s3_file_obj in enumerate(bucket.objects.filter(Prefix=prefix)):
if s3_file_obj.key.endswith(".log.gz"):
extension = "log.gz"
elif s3_file_obj.key.endswith(".log"):
extension = "log"
else:
continue
bucket.download_file(s3_file_obj.key,
TMP_DOWNLOAD_DIR + "/%06d" % i + "." + extension)
data = []
for fname in sorted(os.listdir(TMP_DOWNLOAD_DIR)):
if fname.endswith(".log.gz"):
open_ = gzip.open
decode = True
elif fname.endswith(".log"):
open_ = open
decode = False
else:
continue
with open_(os.path.join(TMP_DOWNLOAD_DIR, fname), 'r') as fd:
first_line = True
for line in fd:
if first_line:
first_line = False
continue
if decode:
line = line.decode("utf-8").strip().split(" ")
else:
line = line.strip().split(" ")
data.append(line)
if data and (len(data[0]) == len(FLOW_COLUMNS)):
df = pd.DataFrame(data, columns=FLOW_COLUMNS)
df.drop(['date'], axis=1, inplace=True)
else:
df = pd.DataFrame(data, columns=FLOW_COLUMNS[1:])
return df
def filter_format_data(df):
df = df[df.srcaddr != "-"]
df = df[df.dstaddr != "-"]
df.drop(["version", "srcport"], axis=1, inplace=True)
df = df.replace("-", np.nan)
df = df.replace("-", np.nan)
df[["dstport", "protocol", "packets", "bytes", "start", "end"]] = \
df[["dstport", "protocol", "packets", "bytes", "start", "end"]] \
.apply(pd.to_numeric)
return df
def sort_data(df):
df['datetime'] = pd.to_datetime(df.start, unit='s')
# TODO: should we process just the last hours?
df = df.set_index('datetime')
df.sort_index(inplace=True)
return df.reset_index(level=0)
def filter_useless_data(df):
# Requirements
# * srcIP should be private
# * dstport < 1024 and != 123
if df.empty:
return df
df = df[df.srcaddr.map(lambda x: ipaddress.ip_address(x).is_private)]
df = df[df.dstport <= 1024]
df = df[df.dstport != 123]
return df
def filter_unfrequent_data(df):
# remove communications if there were less than 6 snippets
selection = df.groupby(["srcaddr", "dstaddr", "dstport"])
df = selection.filter(lambda x: len(x) >= 6)
df = df.reset_index(level=0)
return df
def find_beacons(df):
res = []
time_fmt = "%Y-%m-%dT%H:%M:%S.%f"
groups = df.groupby(["srcaddr", "dstaddr", "dstport"])
data_in = {
"data": {},
"time": {}
}
for (srcaddr, dstaddr, port), traffic in groups:
k = (srcaddr, dstaddr, port)
data_in["data"][k] = traffic.bytes
data_in["time"][k] = traffic.datetime
lrner = signalProcess(data_in, options_in=None)
output = lrner.getPrimaryPeriods()
for (srcaddr, dstaddr, port) in output["powers"]:
if output["powers"][(srcaddr, dstaddr, port)][0] is not None:
print(data_in["time"][k])
k = (srcaddr, dstaddr, port)
start_time = data_in["time"][k].iloc[0].strftime(time_fmt)[:-3] + 'Z'
end_time = data_in["time"][k].iloc[-1].strftime(time_fmt)[:-3] + 'Z'
res.append({
"source.ip": srcaddr,
"destination.ip": dstaddr,
"destination.port": int(port),
"timestamp": start_time,
"event.end": end_time,
"event.start": start_time
})
return res
def save_results(bucket_name, findings_path, res):
now = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
s3_resource = boto3.resource('s3')
bucket = s3_resource.Bucket(name=bucket_name)
if findings_path.startswith("/"):
findings_path = findings_path[1:]
if findings_path.endswith("/"):
findings_path = findings_path[:-1]
(bucket.Object(key=f"{findings_path}/beaconing_detection_{now}.json")
.put(Body=bytes(json.dumps(res).encode('UTF-8'))))
if __name__ == "__main__":
print(json.dumps(cloud_sniper_beaconing_detection(None, None), indent=4))
| 30.87619 | 81 | 0.593461 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,395 | 0.215145 |
9f5fffd14d557fb85e919eb6a0d6e8b847206c01 | 2,062 | py | Python | examples/KEPLER/Pretrain/convert.py | MichalPitr/KEPLER | 8692bb3d1e11673eb7b19a4175827024e916e55e | [
"MIT"
] | 98 | 2020-12-26T04:47:03.000Z | 2022-03-31T06:06:53.000Z | examples/KEPLER/Pretrain/convert.py | MichalPitr/KEPLER | 8692bb3d1e11673eb7b19a4175827024e916e55e | [
"MIT"
] | 15 | 2020-11-30T09:57:09.000Z | 2022-03-30T13:56:43.000Z | examples/KEPLER/Pretrain/convert.py | MichalPitr/KEPLER | 8692bb3d1e11673eb7b19a4175827024e916e55e | [
"MIT"
] | 12 | 2021-01-15T04:27:20.000Z | 2022-03-27T04:05:38.000Z | import argparse
import json
parser = argparse.ArgumentParser()
parser.add_argument("--text", type=str, help="path to original text file")
parser.add_argument("--train", type=str, help="path to original training data file")
parser.add_argument("--valid", type=str, help="path to original validation data file")
parser.add_argument("--converted_text", type=str, default="Qdesc.txt", help="path to converted text file")
parser.add_argument("--converted_train", type=str, default="train.txt", help="path to converted training file")
parser.add_argument("--converted_valid", type=str, default="valid.txt", help="path to converted validation file")
if __name__=='__main__':
args = parser.parse_args()
Qid={} #Entity to id (line number in the description file)
Pid={} #Relation to id
def getNum(s):
return int(s[1:])
with open(args.text, "r") as fin:
with open(args.converted_text, "w") as fout:
lines = fin.readlines()
Cnt=0
for idx, line in enumerate(lines):
data = line.split('\t')
assert len(data) >= 2
assert data[0].startswith('Q')
desc = '\t'.join(data[1:]).strip()
if getNum(data[0])>1000:
continue
fout.write(desc+"\n")
Qid[data[0]] = Cnt#idx
Cnt+=1
def convert_triples(inFile, outFile):
with open(inFile, "r") as fin:
with open(outFile, "w") as fout:
lines = fin.readlines()
for line in lines:
data = line.strip().split('\t')
assert len(data) == 3
if getNum(data[0])>1000 or getNum(data[2]) > 1000:
continue
if data[1] not in Pid:
Pid[data[1]] = len(Pid)
fout.write("%d %d %d\n"%(Qid[data[0]], Pid[data[1]], Qid[data[2]]))
convert_triples(args.train, args.converted_train)
convert_triples(args.valid, args.converted_valid)
| 44.826087 | 113 | 0.56644 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 439 | 0.2129 |
9f60d2f486b09e2eedf0913467b709bee5e50f98 | 4,219 | py | Python | mabel/data/formats/dictset/group_by.py | mabel-dev/mabel | ee1fdfcfe5fb87d2c5ce4f24b4b7113478ba1b8a | [
"Apache-2.0"
] | null | null | null | mabel/data/formats/dictset/group_by.py | mabel-dev/mabel | ee1fdfcfe5fb87d2c5ce4f24b4b7113478ba1b8a | [
"Apache-2.0"
] | 287 | 2021-05-14T21:25:26.000Z | 2022-03-30T12:02:51.000Z | mabel/data/formats/dictset/group_by.py | mabel-dev/mabel | ee1fdfcfe5fb87d2c5ce4f24b4b7113478ba1b8a | [
"Apache-2.0"
] | 1 | 2021-04-29T18:18:20.000Z | 2021-04-29T18:18:20.000Z | from typing import Callable, Iterator
from mabel.data.formats import dictset as ds
class Groups:
__slots__ = "_groups"
def __init__(self, dictset: Iterator, column: str, dedupe: bool = False):
"""
Group By functionality for Iterables of Dictionaries
Parameters:
dictset: Iterable of dictionaries
The dataset to perform the Group By on
column: string
The name of the field to group by
dedupe: bool
Remove duplicate values from the groups
Returns:
Groups
Warning:
The 'Groups' object holds the entire dataset in memory so is unsuitable
for large datasets.
"""
groups: dict = {}
if dedupe:
dictset = ds.drop_duplicates(dictset)
for item in dictset:
my_item = item.copy()
key = my_item.pop(column, None)
if not key in groups:
groups[key] = []
groups[key].append(my_item)
if dedupe:
for group in groups:
groups[group] = {
frozenset(item.items()): item for item in groups[group]
}.values()
self._groups = groups
def count(self, group=None):
"""
Count the number of items in groups
Parameters:
group: string (optional)
If provided, return the count of just this group
Returns:
if a group is provided, return an integer
if no group is provided, return a dictionary
"""
if group is None:
return {x: len(y) for x, y in self._groups.items()}
else:
try:
return [len(y) for x, y in self._groups.items() if x == group].pop()
except IndexError:
return 0
def aggregate(self, column, method):
"""
Applies an aggregation function by group.
Parameters:
column: string
The name of the field to aggregate on
method: callable
The function to aggregate with
Returns:
dictionary
Examples:
maxes = grouped.aggregate('age', max)
means = grouped.aggregate('age', maths.mean)
"""
response = {}
for key, items in self._groups.items():
values = [
item.get(column) for item in items if item.get(column) is not None
]
response[key] = method(values)
return response
def apply(self, method: Callable):
"""
Apply a function to all groups
Parameters:
method: callable
The function to apply to the groups
Returns:
dictionary
"""
return {key: method(items) for key, items in self._groups.items()}
def __len__(self):
"""
Returns the number of groups in the set.
"""
return len(self._groups)
def __repr__(self):
"""
Returns the group names
"""
return f"Group of {len(self)} items"
def __getitem__(self, item):
"""
Selector access to groups, e.g. Groups["Group Name"]
Note that Groups["Group 1", "Group 2"] creates a group with just those items
"""
if isinstance(item, (tuple, list)):
newg = Groups([], None)
for entry in item:
if entry in self._groups:
newg._groups[entry].append(self._groups[entry])
return newg
else:
return SubGroup(self._groups.get(item))
class SubGroup:
__slots__ = "values"
def __init__(self, values):
self.values = values or []
def __getitem__(self, item):
"""
Selector access to a value in a group, support arrays
"""
if isinstance(item, tuple):
return list(ds.select_from(self.values, columns=item))
else:
return ds.extract_column(self.values, item)
def __len__(self):
return len(self.values)
def __repr__(self):
return f"SubGroup of {len(self)} items"
| 30.135714 | 84 | 0.53662 | 4,130 | 0.978905 | 0 | 0 | 0 | 0 | 0 | 0 | 1,862 | 0.441337 |
9f64787f57a24c0791b4e390dd80153d89d86f8d | 12,181 | py | Python | geonet/region.py | bbengfort/geonet | 05b96f44834bdb39444ad73d7a6c863f3d55326f | [
"MIT"
] | 1 | 2018-03-28T15:12:28.000Z | 2018-03-28T15:12:28.000Z | geonet/region.py | bbengfort/geonet | 05b96f44834bdb39444ad73d7a6c863f3d55326f | [
"MIT"
] | 1 | 2018-01-26T15:21:13.000Z | 2018-02-02T22:04:59.000Z | geonet/region.py | bbengfort/geonet | 05b96f44834bdb39444ad73d7a6c863f3d55326f | [
"MIT"
] | null | null | null | # geonet.region
# Interface for managing region objects
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Wed Jan 17 06:49:35 2018 -0500
#
# ID: region.py [] benjamin@bengfort.com $
"""
Interface for managing region objects
"""
##########################################################################
## Imports
##########################################################################
import os
import json
from geonet.ec2 import connect
from geonet.config import settings
from geonet.config import USERDATA
from geonet.utils.async import wait
from geonet.utils.serialize import Encoder
from geonet.base import Collection, Resource
from geonet.utils.timez import utcnow, parse_datetime
from geonet.ec2 import KeyPairs, SecurityGroups, Images
from geonet.ec2 import Instances, Volumes, LaunchTemplates
from geonet.ec2 import PlacementGroups
from geonet.zone import AvailabilityZones
from operator import itemgetter
REGIONDATA = os.path.join(USERDATA, "regions.json")
##########################################################################
## Helper Function
##########################################################################
def parse_region(region, lookup=True):
"""
Parses the region from the given input.
Parameters
----------
region : object
Parses a region according to the input type as follows:
- basestring: returns a region from name
- Region: pass-through so that region classes are returned
- dict: instantiates a new region with the data
lookup : bool, default=True
If the region is a basestring or a Region class, all other information
about the region is populated from disk.
Returns
-------
region : Region
The region object, either fully or sparsely populated
raises : TypeError
If an unknown region type is passed in
"""
if isinstance(region, Region):
if lookup:
return Region.from_name(str(region))
return region
if isinstance(region, basestring):
if lookup:
return Region.from_name(region)
return Region({"RegionName": region})
if isinstance(region, dict):
return Region(region)
raise TypeError("unparseable region type: {}".format(type(region)))
##########################################################################
## Region object and collection
##########################################################################
class Region(Resource):
"""
A description of a region with extra information stored in config. Expects
to be initialized from JSON data, an element of a boto response or data
loaded from disk. This element should be a dictionary only.
"""
REQUIRED_KEYS = ('RegionName',)
EXTRA_KEYS = ('LocaleName',)
@staticmethod
def from_name(name):
"""
Load a region from a region name by loading all the region fixtures
and finding the one that matches the RegionName or LocaleName keys.
Raises a LookupError if the region could not be found.
"""
regions = Regions.load()
region = regions.find(name)
if region is None:
raise LookupError("no region named '{}' found".format(name))
return region
def __init__(self, *args, **kwargs):
self._conn = kwargs.pop('conn', None)
super(Region, self).__init__(*args, **kwargs)
def __repr__(self):
return "Region {} ({})".format(self, self.locale)
def __str__(self):
return self["RegionName"]
def __hash__(self):
return hash(str(self))
@property
def name(self):
if 'LocaleName' in self and self['LocaleName']:
return self['LocaleName']
return str(self)
@property
def locale(self):
"""
Returns the locale name of the region
"""
if 'LocaleName' in self and self['LocaleName']:
return self['LocaleName']
# Make it up
parts = self['RegionName'].split('-')
parts[0] = parts[0].upper()
parts[1] = parts[1].title()
return " ".join(parts)
@property
def conn(self):
if self._conn is None:
for tries in range(4):
try:
self._conn = connect(self)
break
except:
if tries >= 3: raise
self._conn = None
return self._conn
def is_configured(self):
"""
Returns true if the region is configured in the settings
"""
return self["RegionName"] in settings.regions
def zones(self, **kwargs):
"""
Describe the availability zones in the region and their state
"""
resp = self.conn.describe_availability_zones(**kwargs)
return AvailabilityZones(resp['AvailabilityZones'], region=self)
def instances(self, **kwargs):
"""
Returns all instances associated with the region
"""
# TODO: validate response
resp = self.conn.describe_instances(**kwargs)
instances = []
for reservation in resp['Reservations']:
for instance in reservation['Instances']:
instance['ReservationId'] = reservation['ReservationId']
instances.append(instance)
return Instances(instances, region=self)
def volumes(self, **kwargs):
"""
Returns all volumes associated with the region
"""
# TODO: validate response
resp = self.conn.describe_volumes(**kwargs)
return Volumes(resp['Volumes'], region=self)
def key_pairs(self, **kwargs):
"""
Returns the keys associated with the region.
"""
# TODO: validate response
resp = self.conn.describe_key_pairs(**kwargs)
return KeyPairs(resp['KeyPairs'], region=self)
def launch_templates(self, **kwargs):
"""
Returns the launch templates associated with the region.
"""
# TODO: validate response
resp = self.conn.describe_launch_templates(**kwargs)
return LaunchTemplates(resp['LaunchTemplates'], region=self)
def images(self, **kwargs):
"""
Returns the images associated with the region. By default this filters
the images that belong to the owner id set in the configuration file,
otherwise this will take a really long time and return many results.
"""
if 'Filters' not in kwargs and settings.aws.aws_owner_id:
kwargs['Filters'] = [{
'Name': 'owner-id',
'Values': [settings.aws.aws_owner_id]
}]
# TODO: validate response
resp = self.conn.describe_images(**kwargs)
return Images(resp['Images'], region=self)
def security_groups(self, **kwargs):
"""
Returns the security groups associated with the region.
"""
# TODO: validate repsonse
resp = self.conn.describe_security_groups(**kwargs)
return SecurityGroups(resp['SecurityGroups'], region=self)
def placement_groups(self, **kwargs):
"""
REtursn the placement groups associated with the region.
"""
resp = self.conn.describe_placement_groups(**kwargs)
return PlacementGroups(resp["PlacementGroups"], region=self)
class Regions(Collection):
"""
A collection of region objects with extra information stored in config.
Expects to be initialized from JSON data, either a response from boto
describe_regions or loading the data from disk in a saved location.
"""
RESOURCE = Region
@classmethod
def load(klass, path=REGIONDATA):
"""
Load the region data from a path on disk.
"""
# Return list of configured regions
if not os.path.exists(path):
return klass([
{'RegionName': region} for region in settings.regions
])
with open(path, 'r') as f:
data = json.load(f)
updated = parse_datetime(data["updated"])
return klass(data["regions"], updated=updated)
@classmethod
def load_active(klass, **kwargs):
"""
Loads regions and filters active ones.
"""
return klass([
region for region in klass.load(**kwargs)
if region.is_configured()
])
@classmethod
def fetch(klass, conn):
"""
Fetch the region data from EC2 with the given connection
"""
# TODO: validate response
resp = conn.describe_regions()
return klass(resp["Regions"])
def dump(self, path=REGIONDATA):
"""
Dump the regions to the specified path on disk
"""
data = {
"updated": utcnow(),
"regions": list(self),
}
with open(path, 'w') as f:
json.dump(data, f, cls=Encoder, indent=2)
def sortby(self, key, reverse=False):
"""
Sort the region by the specified key
"""
self.items.sort(key=itemgetter(key), reverse=reverse)
def find(self, name):
"""
Find a region by RegionName or by LocaleName. Returns None if no key
with the specified name could be found. Is case sensitive.
"""
for region in self:
if name == region["RegionName"] or name == region.locale:
return region
return None
def zones(self, **kwargs):
"""
Returns a collection ofa vailability zones across all regions.
"""
return AvailabilityZones.collect(
wait((region.zones for region in self), kwargs=kwargs)
)
def instances(self, status=False, **kwargs):
"""
Returns a collection of instances across all regions. If status is
True then the status for all instances are also collected.
"""
instances = wait((region.instances for region in self), kwargs=kwargs)
if status:
wait((instance.update_statuses for instance in instances))
return Instances.collect(instances)
def volumes(self, **kwargs):
"""
Returns a collection of volumes across all regions.
"""
return Volumes.collect(
wait((region.volumes for region in self), kwargs=kwargs)
)
def key_pairs(self, **kwargs):
"""
Returns the keys associated with the region.
"""
return KeyPairs.collect(
wait((region.key_pairs for region in self), kwargs=kwargs)
)
def launch_templates(self, **kwargs):
"""
Returns the launch templates associated with the region.
"""
return LaunchTemplates.collect(
wait((region.launch_templates for region in self), kwargs=kwargs)
)
def images(self, **kwargs):
"""
Returns the images associated with the region. By default this filters
the images that belong to the owner id set in the configuration file,
otherwise this will take a really long time and return many results.
"""
return Images.collect(
wait((region.images for region in self), kwargs=kwargs)
)
def security_groups(self, **kwargs):
"""
Returns the security groups associated with the region.
"""
return SecurityGroups.collect(
wait((region.security_groups for region in self), kwargs=kwargs)
)
def placement_groups(self, **kwargs):
"""
Returns the placement groups associated with the region.
"""
return PlacementGroups.collect(
wait((region.placement_groups for region in self), kwargs=kwargs)
)
if __name__ == '__main__':
from geonet.utils.serialize import to_json
regions = Regions.load()
region = regions[settings.aws.aws_region]
# print(to_json(region.key_pairs(), indent=2))
print(to_json(region.instances(), indent=2))
# print(to_json(region.images(), indent=2))
# print(to_json(region.security_groups(), indent=2))
| 31.233333 | 78 | 0.588868 | 9,331 | 0.766029 | 0 | 0 | 2,287 | 0.187751 | 0 | 0 | 5,292 | 0.434447 |
9f671a95487f003d950ed13b08b34375df5b9270 | 21 | py | Python | src/kdenlive_tools/__main__.py | kdeldycke/kdenlive-tools | 442fd45f6df473e15d20a67fe7feaf3b9f93acda | [
"BSD-2-Clause"
] | 5 | 2017-02-01T08:36:06.000Z | 2021-08-20T16:41:33.000Z | src/kdenlive_tools/__main__.py | kdeldycke/kdenlive-tools | 442fd45f6df473e15d20a67fe7feaf3b9f93acda | [
"BSD-2-Clause"
] | 1 | 2015-06-30T12:53:31.000Z | 2015-06-30T12:53:31.000Z | src/kdenlive_tools/__main__.py | kdeldycke/kdenlive-tools | 442fd45f6df473e15d20a67fe7feaf3b9f93acda | [
"BSD-2-Clause"
] | 1 | 2015-05-26T07:11:16.000Z | 2015-05-26T07:11:16.000Z | import cli
cli.cli()
| 7 | 10 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9f6739c4bea6fd5cebb646ce1b393c5ad315b2ef | 1,539 | py | Python | tests/test_webframe.py | vmiklos/osm-gimmisn | 361a3bec83feaa49a1b0a1e4b4bca3ab61e57225 | [
"MIT"
] | 8 | 2019-05-25T10:56:41.000Z | 2021-09-07T11:17:33.000Z | tests/test_webframe.py | vmiklos/osm-gimmisn | 361a3bec83feaa49a1b0a1e4b4bca3ab61e57225 | [
"MIT"
] | 290 | 2019-05-02T21:07:42.000Z | 2022-01-28T09:36:51.000Z | tests/test_webframe.py | vmiklos/osm-gimmisn | 361a3bec83feaa49a1b0a1e4b4bca3ab61e57225 | [
"MIT"
] | 504 | 2019-05-02T20:50:54.000Z | 2021-05-09T12:26:18.000Z | #!/usr/bin/env python3
#
# Copyright (c) 2019 Miklos Vajna and contributors.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The test_webframe module covers the webframe module."""
from typing import List
from typing import TYPE_CHECKING
from typing import Tuple
from typing import cast
import traceback
import unittest
import webframe
if TYPE_CHECKING:
# pylint: disable=no-name-in-module,import-error,unused-import
from wsgiref.types import StartResponse # noqa: F401
class TestHandleException(unittest.TestCase):
"""Tests handle_exception()."""
def test_happy(self) -> None:
"""Tests the happy path."""
environ = {
"PATH_INFO": "/"
}
def start_response(status: str, response_headers: List[Tuple[str, str]]) -> None:
self.assertTrue(status.startswith("500"))
header_dict = dict(response_headers)
self.assertEqual(header_dict["Content-type"], "text/html; charset=utf-8")
try:
int("a")
# pylint: disable=broad-except
except Exception:
callback = cast('StartResponse', start_response)
status, headers, data = webframe.handle_exception(environ, traceback.format_exc())
callback(status, headers)
self.assertTrue(data)
output = data.decode('utf-8')
self.assertIn("ValueError", output)
return
self.fail()
if __name__ == '__main__':
unittest.main()
| 29.596154 | 94 | 0.648473 | 947 | 0.615335 | 0 | 0 | 0 | 0 | 0 | 0 | 500 | 0.324886 |
9f6a5e9db62958c0cbf9484b9d16be6014ffce1e | 504 | py | Python | Python Snippets with Documentation/05 Data Structures/11 List Comprehensions.py | AhmedRaja1/Python-Beginner-s-Starter-Kit | 285cfbeb7207e6531954f21cae3a062f977ee5a0 | [
"MIT"
] | 1 | 2021-09-27T16:47:25.000Z | 2021-09-27T16:47:25.000Z | Python Snippets with Documentation/05 Data Structures/11 List Comprehensions.py | AhmedRaja1/Python-Beginner-s-Starter-Kit | 285cfbeb7207e6531954f21cae3a062f977ee5a0 | [
"MIT"
] | null | null | null | Python Snippets with Documentation/05 Data Structures/11 List Comprehensions.py | AhmedRaja1/Python-Beginner-s-Starter-Kit | 285cfbeb7207e6531954f21cae3a062f977ee5a0 | [
"MIT"
] | 1 | 2021-09-27T16:47:33.000Z | 2021-09-27T16:47:33.000Z | # 11 List Comprehensions
products = [
("Product1", 15),
("Product2", 50),
("Product3", 5)
]
print(products)
# prices = list(map(lambda item: item[1], products))
# print(prices)
prices = [item[1] for item in products] # With list comprehensions we can achive the same result with a clenaer code
print(prices)
# filtered_price = list(filter(lambda item: item[1] >= 10, products))
# print(filtered_price)
filtered_price = [item for item in products if item[1] >= 10]
print(filtered_price) | 22.909091 | 116 | 0.69246 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 289 | 0.573413 |
9f6ba376e6b11128b8c918a4e5e647fb6f6188f8 | 1,092 | py | Python | wes_service/util.py | DailyDreaming/dev-workflow-service | 753c38655c96f20a9d187b507f55fd097d6900ab | [
"Apache-2.0"
] | null | null | null | wes_service/util.py | DailyDreaming/dev-workflow-service | 753c38655c96f20a9d187b507f55fd097d6900ab | [
"Apache-2.0"
] | null | null | null | wes_service/util.py | DailyDreaming/dev-workflow-service | 753c38655c96f20a9d187b507f55fd097d6900ab | [
"Apache-2.0"
] | 1 | 2018-07-19T00:16:57.000Z | 2018-07-19T00:16:57.000Z | from six import itervalues
def visit(d, op):
"""Recursively call op(d) for all list subelements and dictionary 'values' that d may have."""
op(d)
if isinstance(d, list):
for i in d:
visit(i, op)
elif isinstance(d, dict):
for i in itervalues(d):
visit(i, op)
class WESBackend(object):
"""Stores and retrieves options. Intended to be inherited."""
def __init__(self, opts):
"""Parse and store options as a list of tuples."""
self.pairs = []
for o in opts if opts else []:
k, v = o.split("=", 1)
self.pairs.append((k, v))
def getopt(self, p, default=None):
"""Returns the first option value stored that matches p or default."""
for k, v in self.pairs:
if k == p:
return v
return default
def getoptlist(self, p):
"""Returns all option values stored that match p as a list."""
optlist = []
for k, v in self.pairs:
if k == p:
optlist.append(v)
return optlist
| 28.736842 | 98 | 0.543956 | 773 | 0.707875 | 0 | 0 | 0 | 0 | 0 | 0 | 341 | 0.312271 |
9f6d489b1bec4ea8deaa1ee79593777477246ffb | 1,081 | py | Python | ss/projectilesystem.py | slode/spaceshooter | 8892f78b2e0a009ebefeddbaa848208a37068ce7 | [
"MIT"
] | null | null | null | ss/projectilesystem.py | slode/spaceshooter | 8892f78b2e0a009ebefeddbaa848208a37068ce7 | [
"MIT"
] | 3 | 2019-06-27T12:14:07.000Z | 2019-06-27T12:15:03.000Z | ss/projectilesystem.py | slode/spaceshooter | 8892f78b2e0a009ebefeddbaa848208a37068ce7 | [
"MIT"
] | null | null | null | from triton.ecs import System
from .events import *
from .components import *
class ProjectileSystem(System):
def initialize(self):
self.on(TickEvent, self.handle_cooldowns)
self.on(TickEvent, self.on_shootevent)
def handle_cooldowns(self, event):
for e, (g,) in self.registry.get_components(
GameState):
dt = g.dt
for e, (p, w) in self.registry.get_components(
Position, Weapon):
if w.countdown > 0:
w.countdown -= dt/1000
def on_shootevent(self, event):
for e, [s, p, w, c] in self.registry.get_components(Shooting, Position, Weapon, Collidable):
if w.countdown <= 0:
w.countdown = w.cooldown
self.registry.add_entity(
Position(x=p.x, y=p.y),
Velocity(),
Health(),
UpAccel(),
Renderable(),
Collidable(team=c.team),
Animatable("rocket"))
| 31.794118 | 100 | 0.509713 | 1,000 | 0.925069 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.007401 |
9f6e916f9895a09483d063aa2baf23c896ad2327 | 1,988 | py | Python | List/ArrayBaseList.py | alstn2468/Python_Data_Structure | 7a092c2b7a3ac5fbc735b51ce8d639ed7c2ecfce | [
"MIT"
] | 2 | 2020-08-10T09:31:13.000Z | 2020-08-11T07:03:18.000Z | List/ArrayBaseList.py | SikSo1897/Python_Data_Structure | 7a092c2b7a3ac5fbc735b51ce8d639ed7c2ecfce | [
"MIT"
] | null | null | null | List/ArrayBaseList.py | SikSo1897/Python_Data_Structure | 7a092c2b7a3ac5fbc735b51ce8d639ed7c2ecfce | [
"MIT"
] | 4 | 2019-03-19T06:59:01.000Z | 2020-09-02T04:38:22.000Z | # ArrayBaseList.py
# 배열 기반 리스트 클래스
class ArrayBaseList :
def __init__(self) :
self.list = []
self.count = 0
# 배열 기반 리스트 삽입 함수
def add(self, data) :
self.list.append(data)
self.count += 1
# 배열 기반 리스트 탐색 함수
def search(self, data) :
return [index for index, stored in enumerate(self.list) if stored == data]
# 배열 기반 리스트 참조 함수
def get(self, index) :
if 0 <= index < self.count :
return self.list[index]
else :
raise IndexError
# 가장 최근에 삽입된 데이터 삭제 함수
def pop(self) :
val = self.list[self.count - 1]
self.remove(self.count - 1)
return val
# 배열 기반 리스트 삭제 함수
def remove(self, index) :
for index in range(index, self.count - 1) :
self.list[index] = self.list[index + 1]
del self.list[self.count - 1]
self.count -= 1
# 배열 기반 리스트 출력 함수
def display(self) :
for index in range(self.count) :
print(str(self.list[index]) + ' ', end = "")
if __name__ == '__main__' :
# 배열 기반 리스트 클래스 선언
list = ArrayBaseList()
# 배열 기반 리스트 데이터 삽입
list.add(10), list.add(20)
list.add(30), list.add(40)
list.add(50), list.add(60)
# 배열 기반 리스트 데이터 출력
print('Add Data')
list.display()
print()
'''
Add Data
10 20 30 40 50 60
'''
# 가장 최근에 저장된 데이터 삭제 후 출력
print('pop : ' + str(list.pop())) # pop : 60
# pop 연산 후 리스트의 데이터 출력
print('After Pop')
list.display()
print()
'''
After Pop
10 20 30 40 50
'''
# 두 번째 인덱스의 데이터 삭제
list.remove(2)
# 삭제 후 리스트의 데이터 출력
print('After Remove 2nd index item')
list.display()
print()
'''
After Remove 2nd index item
10 20 40 50
'''
# 두 번째 인덱스의 데이터 참조
print('Get list[2] : ' + str(list.get(2))) # Get list[2] : 40
# 40 데이터 탐색
print('Item(40) in list : ' + str(list.search(40)) + ' index') # Item(40) in list : [2] index
| 20.708333 | 97 | 0.528169 | 1,147 | 0.486429 | 0 | 0 | 0 | 0 | 0 | 0 | 1,000 | 0.424088 |
9f6fd9b78959476700ebfd2a923895af3dc7f59d | 250 | py | Python | gui/examples/hello_0.py | t20100/silx-training | 409656479c7fdc9f1e895c6f3f0530c7eb89cbc1 | [
"CC-BY-4.0"
] | 7 | 2017-05-02T10:03:12.000Z | 2021-06-28T14:11:32.000Z | gui/examples/hello_0.py | t20100/silx-training | 409656479c7fdc9f1e895c6f3f0530c7eb89cbc1 | [
"CC-BY-4.0"
] | 23 | 2016-11-21T17:55:11.000Z | 2021-11-24T13:43:13.000Z | gui/examples/hello_0.py | t20100/silx-training | 409656479c7fdc9f1e895c6f3f0530c7eb89cbc1 | [
"CC-BY-4.0"
] | 13 | 2016-11-17T10:47:22.000Z | 2022-02-07T09:38:47.000Z | from PyQt5.QtWidgets import QApplication, QLabel, QMainWindow
app = QApplication([])
main_window = QMainWindow()
first_widget = QLabel('hello world !!!', parent=main_window)
main_window.setCentralWidget(first_widget)
main_window.show()
app.exec_() | 25 | 61 | 0.788 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.068 |
9f6ff1cce926c42a5b3e96edd3fb7ad902b5329d | 1,429 | py | Python | day_09/main.py | 7Rocky/AoC-2021 | ed8e94255d6d935e3fcde4363361fb7c2a8eccba | [
"0BSD"
] | 1 | 2021-12-07T08:03:51.000Z | 2021-12-07T08:03:51.000Z | day_09/main.py | 7Rocky/AoC-2021 | ed8e94255d6d935e3fcde4363361fb7c2a8eccba | [
"0BSD"
] | null | null | null | day_09/main.py | 7Rocky/AoC-2021 | ed8e94255d6d935e3fcde4363361fb7c2a8eccba | [
"0BSD"
] | null | null | null | from functools import reduce
def bfs(root, points):
queue = [root]
visited_states = {root}
basin_size = 1
while len(queue) > 0:
i, j = queue[0]
queue = queue[1:]
ps = [(i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1)]
for n in ps:
if n not in visited_states and points[n[1]][n[0]] != 9:
basin_size += 1
queue.append(n)
visited_states.add(n)
return basin_size
def main():
points = [[9]]
with open('input.txt') as f:
for line in f.readlines():
points.append([9] + list(map(int, list(line.strip()))) + [9])
points[0] = [9] * len(points[1])
points.append(points[0])
size = (len(points[0]), len(points))
low_points, basin_sizes = [], []
for j in range(1, size[1] - 1):
for i in range(1, size[0] - 1):
ps = [points[j - 1][i], points[j + 1][i],
points[j][i - 1], points[j][i + 1]]
if all(map(lambda p, i=i, j=j: points[j][i] < p, ps)):
low_points.append(points[j][i])
basin_sizes.append(bfs((i, j), points))
basin_sizes.sort()
basins_prod = reduce(lambda x, y: x * y, basin_sizes[-3:], 1)
print(f'Risk of low points (1): { sum(low_points) + len(low_points) }')
print(f'Product of three largest basins (2): { basins_prod }')
if __name__ == '__main__':
main()
| 25.517857 | 75 | 0.505948 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 140 | 0.097971 |
9f7018c0a4594a3537d02ec23d95b4ae17544c4d | 1,101 | py | Python | kolibri/logger/test/factory_logger.py | aronasorman/kolibri | 940672bc849cd0b26d7d84ee08a34f072c4f6cd6 | [
"MIT"
] | null | null | null | kolibri/logger/test/factory_logger.py | aronasorman/kolibri | 940672bc849cd0b26d7d84ee08a34f072c4f6cd6 | [
"MIT"
] | 2 | 2017-02-08T00:22:04.000Z | 2017-06-12T20:27:44.000Z | kolibri/logger/test/factory_logger.py | aronasorman/kolibri | 940672bc849cd0b26d7d84ee08a34f072c4f6cd6 | [
"MIT"
] | null | null | null | import datetime
import factory
import uuid
from kolibri.auth.test.test_api import FacilityUserFactory
from .. import models
class ContentSessionLogFactory(factory.DjangoModelFactory):
class Meta:
model = models.ContentSessionLog
user = factory.SubFactory(FacilityUserFactory)
content_id = uuid.uuid4().hex
channel_id = uuid.uuid4().hex
start_timestamp = datetime.datetime.now()
class ContentSummaryLogFactory(factory.DjangoModelFactory):
class Meta:
model = models.ContentSummaryLog
user = factory.SubFactory(FacilityUserFactory)
content_id = uuid.uuid4().hex
channel_id = uuid.uuid4().hex
start_timestamp = datetime.datetime.now()
class ContentRatingLogFactory(factory.DjangoModelFactory):
class Meta:
model = models.ContentRatingLog
user = factory.SubFactory(FacilityUserFactory)
content_id = uuid.uuid4().hex
channel_id = uuid.uuid4().hex
class UserSessionLogFactory(factory.DjangoModelFactory):
class Meta:
model = models.UserSessionLog
user = factory.SubFactory(FacilityUserFactory)
| 23.425532 | 59 | 0.745686 | 964 | 0.875568 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9f71a197572466c48094262987b091e399a3df17 | 237 | py | Python | lists/urls.py | Tawakalt/todo_list | 184293acf62771f60c6fdc46271634ae89684775 | [
"MIT"
] | null | null | null | lists/urls.py | Tawakalt/todo_list | 184293acf62771f60c6fdc46271634ae89684775 | [
"MIT"
] | 5 | 2020-06-06T01:03:12.000Z | 2022-02-10T10:01:49.000Z | lists/urls.py | Tawakalt/todo_list | 184293acf62771f60c6fdc46271634ae89684775 | [
"MIT"
] | 1 | 2020-01-20T12:44:56.000Z | 2020-01-20T12:44:56.000Z | from django.urls import path
from . import views
urlpatterns = [
path('new', views.new_list, name='new_list'),
path('<list_id>/', views.view_list, name='view_list'),
path('users/<email>/', views.my_lists, name='my_lists'),
] | 29.625 | 60 | 0.670886 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 64 | 0.270042 |
9f73832f769c299fec9835e66ca79ecdd0318b41 | 1,268 | py | Python | populate_chijri.py | J471/Telegram-ID_AzanBot | e48bff507ade5e811a6bbac2bfe8615448b09e83 | [
"MIT"
] | 2 | 2021-01-27T10:38:25.000Z | 2021-06-14T18:54:23.000Z | populate_chijri.py | J471/Telegram-ID_AzanBot | e48bff507ade5e811a6bbac2bfe8615448b09e83 | [
"MIT"
] | 2 | 2021-01-27T10:38:07.000Z | 2021-06-14T18:51:19.000Z | populate_chijri.py | J471/Telegram-ID_AzanBot | e48bff507ade5e811a6bbac2bfe8615448b09e83 | [
"MIT"
] | 2 | 2020-05-09T23:38:57.000Z | 2021-01-27T10:38:31.000Z | #!/usr/bin/env python
import logging
from pymongo import MongoClient
from credentials import DBNAME, DBUSER, DBPASS, DBAUTH
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s:%(lineno)d - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
# MongoDB connection
client = MongoClient()
db = client[DBNAME]
db.authenticate(DBUSER, DBPASS, source=DBAUTH)
CHIJRI_CONTENTS = [
{"_id": 1, "f29flg": False, "f30flg": False, "fadjst": 0},
{"_id": 2, "f29flg": False, "f30flg": False, "fadjst": 0},
{"_id": 3, "f29flg": False, "f30flg": False, "fadjst": 0},
{"_id": 4, "f29flg": False, "f30flg": False, "fadjst": 0},
{"_id": 5, "f29flg": False, "f30flg": False, "fadjst": 0},
{"_id": 6, "f29flg": False, "f30flg": False, "fadjst": 0},
{"_id": 7, "f29flg": False, "f30flg": False, "fadjst": 0},
{"_id": 8, "f29flg": False, "f30flg": False, "fadjst": 0},
{"_id": 9, "f29flg": False, "f30flg": False, "fadjst": 0},
{"_id": 10, "f29flg": False, "f30flg": False, "fadjst": 0},
{"_id": 11, "f29flg": False, "f30flg": False, "fadjst": 0},
{"_id": 12, "f29flg": False, "f30flg": False, "fadjst": 0},
]
def main():
db.chijri.insert_many(CHIJRI_CONTENTS)
if __name__ == '__main__':
main() | 33.368421 | 93 | 0.619085 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 480 | 0.378549 |
9f738ea756e75f0ed0084df2282be6af1ddfc79e | 12,206 | py | Python | sources/iam_online.py | X-rayLaser/keras-auto-hwr | 67cfc0209045b1e211f0491b0199cb9d6811bfd0 | [
"MIT"
] | null | null | null | sources/iam_online.py | X-rayLaser/keras-auto-hwr | 67cfc0209045b1e211f0491b0199cb9d6811bfd0 | [
"MIT"
] | 2 | 2020-01-04T09:03:31.000Z | 2021-05-10T18:29:41.000Z | sources/iam_online.py | X-rayLaser/keras-auto-hwr | 67cfc0209045b1e211f0491b0199cb9d6811bfd0 | [
"MIT"
] | null | null | null | import os
import random
from xml.etree import ElementTree as ET
import numpy as np
from PIL import Image
from PIL.ImageDraw import ImageDraw
from sources import BaseSource
from sources.preloaded import PreLoadedSource
class OnlineSource(BaseSource):
def __init__(self, data_root):
self._root = data_root
self._strokes_root = os.path.join(self._root, 'lineStrokes-all',
'lineStrokes')
def transcription_paths(self):
labels_root = os.path.join(self._root, 'original-xml-all', 'original')
for dirname1 in os.listdir(labels_root):
path1 = os.path.join(labels_root, dirname1)
for dirname2 in os.listdir(path1):
path2 = os.path.join(path1, dirname2)
for transcription_file in os.listdir(path2):
transcription_path = os.path.join(path2, transcription_file)
yield transcription_path
def get_strokes_path(self, file_id):
path_components = file_id.split('-')
if path_components[1][-1].isalpha():
subfolder = path_components[0] + '-' + path_components[1][:-1]
else:
subfolder = path_components[0] + '-' + path_components[1]
stroke_path = os.path.join(
self._strokes_root, path_components[0],
subfolder,
file_id + '.xml'
)
return stroke_path
def get_transcriptions(self, random_order=False):
for transcription_path in self.transcription_paths():
try:
transcription = Transcription(transcription_path, random_order)
yield transcription
except MissingTranscriptionException:
continue
def get_sequences(self):
for transcription in self.get_transcriptions():
for file_id, true_text in transcription.text_lines():
stroke_path = self.get_strokes_path(file_id)
try:
stroke_line = StrokeLine(stroke_path)
except StrokesNotFoundException:
continue
line_strokes = stroke_line.strokes()
yield line_strokes, true_text
def __len__(self):
counter = 0
for _ in self.get_sequences():
counter += 1
return counter
class LinesSource(BaseSource):
def __init__(self, source):
self._source = source
def get_sequences(self):
for line in self._source.get_sequences():
yield line
def __len__(self):
return len(self._source)
def fetch_strokes(source, num_strokes):
cropped_strokes = []
dummy_out = []
for strokes, text in source.get_sequences():
for stroke in strokes:
if len(cropped_strokes) > num_strokes:
return PreLoadedSource(cropped_strokes, dummy_out)
try:
deltas = stroke.stroke_to_points()
except BadStrokeException:
continue
cropped_strokes.append(deltas)
dummy_out.append('')
return PreLoadedSource(cropped_strokes, dummy_out)
class StrokesSource(BaseSource):
def __init__(self, source, num_strokes):
self._source = source
self._num_strokes = num_strokes
self._preloaded = fetch_strokes(source, num_strokes)
def get_sequences(self):
for line in self._preloaded.get_sequences():
yield line
def __len__(self):
return self._num_strokes
class WordsSource(BaseSource):
def __init__(self, source):
self._source = source
def _distance_deviation(self, strokes):
delayed_strokes = strokes[1:]
distances = []
for i in range(len(delayed_strokes)):
next_stroke = delayed_strokes[i]
stroke = strokes[i]
distances.append(next_stroke.horizontal_distance(stroke))
return np.std(distances)
def _word_strokes(self, strokes):
if len(strokes) == 0:
return
sd = self._distance_deviation(strokes)
word_strokes = []
prev_stroke = strokes[0]
word_strokes.append(prev_stroke)
for stroke in strokes[1:]:
if stroke.horizontal_distance(prev_stroke) > 1.5 * sd:
yield word_strokes
return
word_strokes = []
word_strokes.append(stroke)
prev_stroke = stroke
def get_sequences(self):
for strokes, transcription in self._source.get_sequences():
word_transcriptions = transcription.split(' ')
for i, word_strokes in enumerate(self._word_strokes(strokes)):
if i < len(word_transcriptions):
yield word_strokes, word_transcriptions[i]
def __len__(self):
pass
class RandomOrderSource(OnlineSource):
def transcription_paths(self):
all_paths = []
for path in super().transcription_paths():
all_paths.append(path)
random.shuffle(all_paths)
for path in all_paths:
yield path
def make_generator(self, transcription_path):
try:
transcription = Transcription(transcription_path, random_order=True)
for line in transcription.text_lines():
yield line
except MissingTranscriptionException:
pass
def get_sequences(self):
line_iterators = []
for path in self.transcription_paths():
line_iterators.append(self.make_generator(path))
while len(line_iterators) > 0:
gen = random.choice(line_iterators)
try:
file_id, true_text = next(gen)
except StopIteration:
line_iterators.remove(gen)
continue
stroke_path = self.get_strokes_path(file_id)
try:
stroke_line = StrokeLine(stroke_path)
except StrokesNotFoundException:
continue
line_points = stroke_line.points()
yield line_points, true_text
class StrokesNotFoundException(Exception):
pass
class Stroke:
def __init__(self, points):
self.points = points
def left_most_x(self):
return min([x for x, y in self.points])
def right_most_x(self):
return min([x for x, y in self.points])
def horizontal_distance(self, stroke):
return self.left_most_x() - stroke.right_most_x()
def stroke_to_points(self):
max_x = max([x for x, y in self.points])
min_x = min([x for x, y in self.points])
max_y = max([y for x, y in self.points])
min_y = min([y for x, y in self.points])
width = max_x - min_x
height = max_y - min_y
max_side = max(width, height) + 0.00001
if max_side < 1:
print('BAD STROKE')
raise BadStrokeException()
points = [(0, 0)]
for x, y, t in self.points:
x = (x - min_x) / max_side
y = (y - min_y) / max_side
points.append((x, y))
prev_point = (0, 0)
deltas = []
for p in points:
x, y = p
dx = x - prev_point[0]
dy = y - prev_point[1]
deltas.append((dx, dy))
prev_point = p
points.append((0, 0))
deltas.append((0, 0))
return deltas
class BadStrokeException(Exception):
pass
class StrokeLine:
def __init__(self, xml_path):
if not os.path.isfile(xml_path):
raise StrokesNotFoundException()
tree = ET.parse(xml_path)
root = tree.getroot()
board_info = list(root.iterfind('WhiteboardDescription'))[0]
location = list(board_info.iterfind('SensorLocation'))[0]
assert location.attrib['corner'] == 'top_left'
diagonally_opposite = list(board_info.iterfind('DiagonallyOppositeCoords'))[0]
vertically_opposite = list(board_info.iterfind('VerticallyOppositeCoords'))[0]
horizontally_opposite = list(board_info.iterfind('HorizontallyOppositeCoords'))[0]
x1 = int(diagonally_opposite.attrib['x'])
y1 = int(diagonally_opposite.attrib['y'])
x2 = int(vertically_opposite.attrib['x'])
y2 = int(vertically_opposite.attrib['y'])
x3 = int(horizontally_opposite.attrib['x'])
y3 = int(horizontally_opposite.attrib['y'])
x_from = min(x1, x2, x3)
x_to = max(x1, x2, x3)
y_from = min(y1, y2, y3)
y_to = max(y1, y2, y3)
height = y_to - y_from + 1
width = x_to - x_from + 1
self._x_from = x_from
self._y_from = y_from
self._x_max = x_to
self._y_max = y_to
self._height = height
self._width = width
self._root = root
def points_generator(self):
stroke_set = list(self._root.iterfind('StrokeSet'))[0]
for stroke in stroke_set:
assert stroke.attrib['colour'] == 'black'
prev_point = None
for point in stroke:
y = int(point.attrib['y']) - self._y_from
x = int(point.attrib['x']) - self._x_from
yield x, y, prev_point
prev_point = (x, y)
def points(self):
return [(x, y) for x, y, prev in self.points_generator()]
def strokes(self):
stroke_set = list(self._root.iterfind('StrokeSet'))[0]
res = []
for stroke_tag in stroke_set:
assert stroke_tag.attrib['colour'] == 'black'
points = []
for point in stroke_tag:
y = int(point.attrib['y']) - self._y_from
x = int(point.attrib['x']) - self._x_from
t = float(point.attrib['time'])
points.append((x, y, t))
res.append(Stroke(points))
return res
def heights(self):
v = []
for x, y, prev in self.points_generator():
v.append(y)
return v
def to_image(self):
a = np.zeros((self._height, self._width), dtype=np.uint8)
im = Image.fromarray(a, mode='L')
canvas = ImageDraw(im, mode='L')
for x, y, prev_point in self.points_generator():
if prev_point:
canvas.line((prev_point, (x, y)), width=12, fill=255)
return im
class Transcription:
def __init__(self, path, random_order=False):
tree = ET.parse(path)
root = tree.getroot()
transcription_tag = list(root.iterfind('Transcription'))
if len(transcription_tag) == 0:
raise MissingTranscriptionException()
self._transcription = transcription_tag[0]
self._random_order = random_order
self._path = path
self._lines = None
def _fetch_lines(self, ):
if self._lines is None:
self._lines = []
for line in self._transcription.iterfind('TextLine'):
text = line.attrib['text']
file_id = line.attrib['id']
second_opinion = self.id_from_words(line)
if file_id != second_opinion:
print('ATTENTION: id attribute "file_id" on line does not match the one on words: {} vs {}'.format(file_id, second_opinion))
self._lines.append((second_opinion, text))
if self._random_order:
random.shuffle(self._lines)
return self._lines
def id_from_words(self, line):
id_counts = {}
for word in line.iterfind('Word'):
word_id = word.attrib['id']
components = word_id.split('-')
line_id = '-'.join(components[:3])
if line_id not in id_counts:
id_counts[line_id] = 0
id_counts[line_id] += 1
res = None
max_count = 0
for line_id, count in id_counts.items():
if count > max_count:
max_count = count
res = line_id
return res
def text_lines(self):
for line in self._fetch_lines():
yield line
class MissingTranscriptionException(Exception):
pass | 28.452214 | 144 | 0.580616 | 11,404 | 0.934295 | 4,059 | 0.332541 | 0 | 0 | 0 | 0 | 457 | 0.037441 |
9f73b74bec263fb02cc230f60b05601fb2257463 | 6,329 | py | Python | mission_control/navigator_missions/navigator_missions/start_gate_marshall.py | MarshallRawson/NaviGator | 1f12dccb1f51a5f2c1250ed58b955da34c4b081f | [
"MIT"
] | null | null | null | mission_control/navigator_missions/navigator_missions/start_gate_marshall.py | MarshallRawson/NaviGator | 1f12dccb1f51a5f2c1250ed58b955da34c4b081f | [
"MIT"
] | null | null | null | mission_control/navigator_missions/navigator_missions/start_gate_marshall.py | MarshallRawson/NaviGator | 1f12dccb1f51a5f2c1250ed58b955da34c4b081f | [
"MIT"
] | 1 | 2020-04-26T17:31:01.000Z | 2020-04-26T17:31:01.000Z | #!/usr/bin/env python
from __future__ import division
import txros
import numpy as np
import mil_tools
from mil_misc_tools.text_effects import fprint
from navigator import Navigator
import math
from twisted.internet import defer
from mil_tools import rosmsg_to_numpy
from mil_misc_tools import ThrowingArgumentParser
___author___ = "Marshall Rawson"
#This mission takes care of the second part of the qualifier objective
class StartGateMarshall(Navigator):
#this method gets the location of the nearest black totem and the scan the code platform
#from the PCODAR database
#runs the scan the code preception script eventually
def get_scan_the_code(self):
#currently hard coded, no STC that works yet :/
return False
#returns the xy of target totem and unit vector from target to non target totem
@txros.util.cancellableInlineCallbacks
def get_bouy_go_round_target(self):
return_array = []
#gets the xy and state of the scan the code from the database
scan_the_code = np.array([])
res = yield self.database_query('stc_platform')
#makes sure that only 1 scan the code exists
assert len(res.objects) == 1
#raises error if the scan the code platform is nto
if not res.found:
raise TaskException(query + ' not found in object database')
point = rosmsg_to_numpy(res.objects[0].pose.position)[:2]
#runs the function that retrives/runs the scan the code state True for circle scan
#the code, False for circle the black totem
scan_the_code = point
return_array.append(scan_the_code)
#print scan_the_code
#this portion of the method gets the location of the nearest black totem
#gets all of the black totems from the database
num_of_black_totems = 1
black_totems = yield self.database_query('totem_black')
black_totems_poses = []
for i in black_totems.objects:
point = rosmsg_to_numpy(i.pose.position)[:2]
black_totems_poses.append(point)
#the follwing determins which is the closest
#i wish python had a do while loop
closest = black_totems_poses[0]
dist = ((black_totems_poses[0][0]-self.pose[0][0])**2)+((black_totems_poses[0][1]-self.pose[0][1])**2)
j=0 #an index for populating the dist_temp array
while j < len(black_totems_poses):
dist_temp = ((black_totems_poses[j][0]-self.pose[0][0])**2)+((black_totems_poses[j][1]-self.pose[0][1])**2)
if dist_temp < dist:
dist = dist_temp
closest = black_totems[j]
j+=1
#closest now has the position of the closest black totem
#closest is a np array
return_array.append(closest)
#returnValue has the scan the code and closest black totem location
defer.returnValue(return_array)
@txros.util.cancellableInlineCallbacks
def bouy_go_round(self):
TOTEM_MARGIN = 6 #m, distance to pass behind the totem
start_pose = self.pose[0][:2]
locations = yield self.get_bouy_go_round_target()
#target contains xy of target totem and unit vector from target to non target totem
scan_the_code = locations[0]
black_totem = locations[1]
#an ENU vector from the scan_the_code to start pose of magnitude TOTEM_MARGIN (N=0)
stc_waypoint = np.append((((start_pose-scan_the_code)/np.linalg.norm(start_pose-scan_the_code))*TOTEM_MARGIN)+scan_the_code, 0)
#go to the end of that vector and look at the scan_the_code platform
yield self.move.set_position(stc_waypoint).look_at(np.append(scan_the_code, 0)).go()
#determine weather or not to circle the stc platform
if self.get_scan_the_code() == True:
#turn 90deg to the left so we cirlce prograde
yield self.move.yaw_left(math.pi/2).go()
#we cirlce clock-wise .75 revolutions
circle = self.move.circle_point([scan_the_code[0], scan_the_code[1], 0], "cw", .75)
elif self.get_scan_the_code() == False:
#an ENU vector from black_totem to self.pose of magnitude TOTEM_MARGIN (N=0)
black_totem_waypoint = np.append(((((self.pose[0][:2]-black_totem[:2])/np.linalg.norm(self.pose[0][:2]-black_totem[:2]))*TOTEM_MARGIN)+black_totem[:2]),0)
yield self.move.set_position(black_totem_waypoint).look_at(np.append(black_totem[:2], 0)).go()
#turn 90deg to the right so we cirlce prograde
yield self.move.yaw_right(math.pi/2).go()
#we cirlce counter clock-wise .5 revolutions
circle = self.move.circle_point([black_totem[0], black_totem[1], 0], "ccw", .5)
yield circle.go()
#go bakc to where was dropped off to listen for hydrophones
yield self.move.set_position(np.append(start_pose,0)).go()
@txros.util.cancellableInlineCallbacks
def go_around_black_totem(self):
TOTEM_MARGIN = 6 #m, distance to pass behind the totem
start_pose = self.pose[0][:2]
locations = yield self.get_bouy_go_round_target()
waypoint = np.append((((start_pose-locations[1])/np.linalg.norm(start_pose-locations[1]))*TOTEM_MARGIN)+locations[1], 0)
yield self.move.set_position(waypoint).look_at(np.append(locations[1], 0)).go()
circle = self.move.circle_point([locations[1][0], locations[1][1], 0], "ccw", 1)
yield circle.go()
yield self.move.yaw_right(math.pi*2).go()
@classmethod
def decode_parameters(cls, parameters):
argv = parameters.split()
return cls.parser.parse_args(argv)
@classmethod
def init(cls):
parser = ThrowingArgumentParser(description='start gate marshall',
usage='''start gate marshall''')
parser.add_argument('-q', '--quals', action='store_true',
help='set for quals')
cls.parser = parser
@txros.util.cancellableInlineCallbacks
def run (self, parameters):
if parameters.quals:
yield self.go_around_black_totem()
else:
yield self.bouy_go_round() | 41.638158 | 166 | 0.652552 | 5,907 | 0.933323 | 4,819 | 0.761416 | 5,448 | 0.860799 | 0 | 0 | 1,912 | 0.302101 |
9f73dd661d58f6bc6a5bf932909ee30f8b1fd07a | 8,205 | py | Python | misc bots/mtg info bot.py | b9king/Discord-Bots | e6b08eeeb8de0952726883cfce0717d4866eacc9 | [
"MIT"
] | null | null | null | misc bots/mtg info bot.py | b9king/Discord-Bots | e6b08eeeb8de0952726883cfce0717d4866eacc9 | [
"MIT"
] | null | null | null | misc bots/mtg info bot.py | b9king/Discord-Bots | e6b08eeeb8de0952726883cfce0717d4866eacc9 | [
"MIT"
] | null | null | null |
import discord
import random
from asd import *
from mtgsdk import Card
from mtgsdk import Set
from mtgsdk import Type
from mtgsdk import Supertype
from mtgsdk import Subtype
from mtgsdk import Changelog
client=discord.Client()
@client.event
async def on_ready():
print('logged in as')
print(client.user.name)
print(client.user.id)
print('-----')
@client.event
async def on_message(message):
if message.content.startswith("(debug 124)"):
x = message.content.replace("(debug 124)","")
await client.change_presence(status=discord.Status.online, activity=discord.Game(x))
elif message.content == "<@630330114605056011>":
name = "**{}**".format(message.mentions[0].name)
command1 = "**~Jesus**"
Helpmessage = """
**Thanks for adding me to {}**!
***Description***
Hello my child, I shall share images of myself with you if you call upon me using my command.
***Commands***
{}
***Support The Creator***
Please support the creator by sharing me to other servers using the following link:
https://discordapp.com/api/oauth2/authorize?client_id=630330114605056011&permissions=0&scope=bot
or through the following links.
-<:patreon:630306170791395348> https://www.patreon.com/b9king
-<:paypal:630306883105849354> https://www.paypal.com/paypalme2/b9king
Or you can visit him here: https://benignking.xyz :heart:
""".format(message.guild.name,command1)
embed=discord.Embed(title="", url="https://www.patreon.com/b9king", description= Helpmessage, color=0x00ffff)
embed.set_thumbnail(url= message.mentions[0].avatar_url)
await message.channel.send(embed=embed)
#_________________________________________________________________
#__________________MTG CARD INFO__________________________________
if message.content.startswith("~Price"):
message.content = message.content.replace("~Price ","")
x = cardPrice(message.content)
prices = ""
for i in x["prices"]:
prices += i + "\n"
#
embed=discord.Embed(title= x["name"], description = prices , color=0x746A69)
embed.set_thumbnail(url= "http://www.manaleak.com/mtguk/files/2012/12/mtg-money.jpg")
await message.channel.send(embed=embed)
if message.content.startswith("~Card "):
await message.channel.send("I'm working on it")
message.content = message.content.replace("~Card ", "")
cards = Card.where(name=message.content).all()
color = {"White" : 0xd2d2c1, "Green" : 0x008000, "Black" : 0x000000 , "Red" : 0xff0000 , "Blue" : 0x0080c0 , "None" : 0x808080}
if len(cards[0].colors) > 0:
c = cards[0].colors[0]
else:
c = "None"
cost = ""
manas = { "9" : "<:9_:549923808560021505>", "8" : "<:8_:549923808291848194>", "7" : "<:7_:549923035289878558>", "6" : "<:6_:549923035294072847>", "5" : "<:5_:549923035289878548>", "4" : "<:4_:549923035302592512>", "3" : "<:3_:549923035486879744>", "2" : "<:2_:549923035298136104>", "1" : "<:1_:549923035306655744>", "0" : "<:0_:549923035998715914>","X" : "<:x_:549923035382022145>","W" : "<:White:549913967762341888>","B" : "<:Black:549911363607199754>","U" : "<:Blue:549913606347816961>","R" : "<:Red:549910969149816842>","G" : "<:Green:549911048925347850>"}
if cards[0].mana_cost != None:
for i in cards[0].mana_cost:
if i in manas:
cost = cost + manas[i]
desc = cards[0].text
desc = desc.replace("{W}","<:White:549913967762341888>")
desc = desc.replace("{B}","<:Black:549911363607199754>")
desc = desc.replace("{U}","<:Blue:549913606347816961>")
desc = desc.replace("{R}", "<:Red:549910969149816842>")
desc = desc.replace("{G}","<:Green:549911048925347850>")
desc = desc.replace("{X}","<:x_:549923035382022145>")
desc = desc.replace("{9}" , "<:9_:549923808560021505>")
desc = desc.replace("{8}" , "<:8_:549923808291848194>")
desc = desc.replace("{7}" , "<:7_:549923035289878558>")
desc = desc.replace("{6}" , "<:6_:549923035294072847>")
desc = desc.replace("{5}" , "<:5_:549923035289878548>")
desc = desc.replace("{4}" , "<:4_:549923035302592512>")
desc = desc.replace("{3}" , "<:3_:549923035486879744>")
desc = desc.replace("{2}" , "<:2_:549923035298136104>")
desc = desc.replace("{1}" , "<:1_:549923035306655744>")
desc = desc.replace("{C}" , "<:1_:549923035306655744>")
desc = desc.replace("{0}" , "<:0_:549923035998715914>")
desc = desc.replace("{T}" , "<:tap:549932371806388259>")
desc = desc.replace("{1}" , "<:1_:549923035306655744>")
embed = discord.Embed(
title = cards[0].name + " " + cost,
colour = color[c]
)
legal = ""
for i in cards[0].legalities:
legal = legal + i["format"] + " : "
if i["legality"] == "Legal":
legal = legal + "✅"
else:
legal = legal + "🚫"
legal = legal + "\n"
rules = ""
for i in cards[0].rulings:
rules = rules + i["date"] + " : " + i["text"]
rules = rules + "\n"
embed.set_image(url = cards[0].image_url) #card picture
embed.add_field(name = "Text", value = desc, inline = False) #copy pasta for card info
if cards[0].flavor != None:
embed.add_field(name = "Flavor", value = cards[0].flavor, inline = False) #copy pasta for card info
if cards[0].power != None:
embed.add_field(name = "Power/Toughness", value = cards[0].power + "/" + cards[0].toughness , inline = False) #copy pasta for card info
if cards[0].type != None:
embed.add_field(name = "Type", value = cards[0].type , inline = False) #copy pasta for card info
if cards[0].set_name != None:
embed.add_field(name = "Set", value = cards[0].set_name, inline = False) #copy pasta for card info
if cards[0].rarity != None:
embed.add_field(name = "Rarity", value = cards[0].rarity, inline = False) #copy pasta for card info
if cards[0].loyalty != None:
embed.add_field(name = "Loyalty", value = cards[0].loyalty, inline = False) #copy pasta for card info
if cards[0].artist != None:
embed.add_field(name = "Artist", value = cards[0].artist, inline = False) #copy pasta for card info
if cards[0].variations != None:
embed.add_field(name = "Variations", value = cards[0].variations, inline = False) #copy pasta for card info
if cards[0].release_date != None:
embed.add_field(name = "Release Date", value = cards[0].release_date, inline = False) #copy pasta for card info
if rules != None and len(rules) != 0:
embed.add_field(name = "RULINGS", value = rules, inline = False) #copy pasta for card info
if cards[0].legalities != None:
embed.add_field(name = "Legalities", value = legal , inline = False) #copy pasta for card info
embed.set_footer(text="Coded by : B9king", icon_url="https://cdn6.aptoide.com/imgs/8/2/3/823240ba13a239948950f78f38b1f1d9_icon.png?w=256") #My credit
await message.channel.send(embed=embed)
client.run('NjM1NTk0MzE3MDAzNjIwMzYy.XazWfA.7KMudkJo_OD7R9UpAt1Rv55ipCs') | 42.076923 | 569 | 0.553199 | 0 | 0 | 0 | 0 | 7,872 | 0.958831 | 7,842 | 0.955177 | 2,978 | 0.362728 |
9f74f5e7280e8a768862ffba117b0d9c83db850f | 543 | py | Python | docs/webflask/quiz_orm/quiz2_sa/app.py | damiankarol7/python101 | 1978a9402a8fb0f20c4ca7bd542cb8d7d4501b9b | [
"MIT"
] | 44 | 2015-02-11T19:10:37.000Z | 2021-11-11T09:45:43.000Z | docs/webflask/quiz_orm/quiz2_sa/app.py | damiankarol7/python101 | 1978a9402a8fb0f20c4ca7bd542cb8d7d4501b9b | [
"MIT"
] | 9 | 2015-02-06T21:26:25.000Z | 2022-03-31T10:44:22.000Z | docs/webflask/quiz_orm/quiz2_sa/app.py | damiankarol7/python101 | 1978a9402a8fb0f20c4ca7bd542cb8d7d4501b9b | [
"MIT"
] | 172 | 2015-06-13T07:16:24.000Z | 2022-03-30T20:41:11.000Z | # -*- coding: utf-8 -*-
# quiz-orm/app.py
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import os
app = Flask(__name__)
# konfiguracja aplikacji
app.config.update(dict(
SECRET_KEY='bardzosekretnawartosc',
DATABASE=os.path.join(app.root_path, 'quiz.db'),
SQLALCHEMY_DATABASE_URI='sqlite:///' +
os.path.join(app.root_path, 'quiz.db'),
SQLALCHEMY_TRACK_MODIFICATIONS=False,
TYTUL='Quiz ORM SQLAlchemy'
))
# tworzymy instancję bazy używanej przez modele
baza = SQLAlchemy(app)
| 24.681818 | 67 | 0.699816 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 187 | 0.343119 |
9f751bbb91c8500eae6e90b0594cc10254454ae4 | 100 | py | Python | web/cache.py | therealplato/vim-awesome | 54e33073f4d470230e4578239ceb8f203f5f6613 | [
"MIT"
] | 1,379 | 2016-04-10T06:35:30.000Z | 2022-03-31T16:07:38.000Z | web/cache.py | therealplato/vim-awesome | 54e33073f4d470230e4578239ceb8f203f5f6613 | [
"MIT"
] | 119 | 2016-04-10T04:26:48.000Z | 2022-03-25T15:10:06.000Z | web/cache.py | therealplato/vim-awesome | 54e33073f4d470230e4578239ceb8f203f5f6613 | [
"MIT"
] | 83 | 2016-04-10T11:32:03.000Z | 2022-03-16T01:56:35.000Z | """Module to hold the instance of the cache"""
from flask.ext.cache import Cache
cache = Cache()
| 14.285714 | 46 | 0.71 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.46 |
9f75493254c668925a5805aa8390f3fd04f12976 | 2,143 | py | Python | ontologyExtraction/testOntologyExtractionPerformance.py | mikiec84/linkshop | 72959ceca0003be226edeca6496f915502831596 | [
"Apache-2.0"
] | 6 | 2017-07-18T15:28:33.000Z | 2020-03-03T14:45:45.000Z | ontologyExtraction/testOntologyExtractionPerformance.py | mikiec84/linkshop | 72959ceca0003be226edeca6496f915502831596 | [
"Apache-2.0"
] | null | null | null | ontologyExtraction/testOntologyExtractionPerformance.py | mikiec84/linkshop | 72959ceca0003be226edeca6496f915502831596 | [
"Apache-2.0"
] | 3 | 2017-09-09T00:36:48.000Z | 2020-03-03T14:45:49.000Z | #!/usr/bin/env python3
import datetime
import json
import loadPath # Adds the project path.
import linkograph.labels as llabels
import linkograph.linkoCreate as llinkoCreate
import ontologyExtraction as oe
import os
import sys
def print_usage():
print("usage:", sys.argv[0], "<JSON commands filename> ...")
def bulk_transform(method):
for session_filename in sys.argv[1:]:
#########################
# create input linkograph
#########################
label_rules = open("abstraction.json", "r")
labeler = llabels.Labeler(json.load(label_rules))
label_rules.close()
commands = open(session_filename, "r")
json_commands = json.load(commands)
commands.close()
labeled = labeler.labelCommands(json_commands, "NoLabel")
llabels.writeLabelsToJsonFile(labeled, "labeled.json")
ontology_file = open("ontology.json", "r")
inv_labeling_file = open("labeled.json", "r")
lg = llinkoCreate.createLinko(json.load(inv_labeling_file), json.load(ontology_file))
inv_labeling_file.close()
ontology_file.close()
##################################
# transform linkograph to ontology
##################################
if 0 == method:
extracted_ontology = oe.simple_lg_to_ontology(lg)
elif 1 == method:
extracted_ontology = oe.threshold_lg_to_ontology(lg)
else:
print("unknown method:", method)
#########
# cleanup
#########
os.remove("labeled.json")
if "__main__" == __name__:
if 2 > len(sys.argv):
print_usage()
exit()
simple_start_datetime = datetime.datetime.now()
bulk_transform(0)
simple_end_datetime = datetime.datetime.now()
bulk_transform(1)
threshold_end_datetime = datetime.datetime.now()
simple_run_datetime = simple_end_datetime - simple_start_datetime
threshold_run_datetime = threshold_end_datetime - simple_end_datetime
print("simple run time:", simple_run_datetime)
print("threshold run time:", threshold_run_datetime)
| 28.573333 | 93 | 0.619692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 450 | 0.209986 |
9f7671b22d25aa69840f2f73e18739e85cea6408 | 1,523 | py | Python | app/v2/template/post_template.py | alphagov/notify-notifications-api | e604385e0cf4c2ab8c6451b7120ceb196cce21b5 | [
"MIT"
] | 51 | 2016-04-03T23:36:17.000Z | 2022-03-21T20:04:52.000Z | app/v2/template/post_template.py | alphagov/notify-notifications-api | e604385e0cf4c2ab8c6451b7120ceb196cce21b5 | [
"MIT"
] | 1,335 | 2015-12-15T14:28:50.000Z | 2022-03-30T16:24:27.000Z | app/v2/template/post_template.py | alphagov/notify-notifications-api | e604385e0cf4c2ab8c6451b7120ceb196cce21b5 | [
"MIT"
] | 30 | 2016-01-08T19:05:32.000Z | 2021-12-20T16:37:23.000Z | from flask import jsonify, request
from app import authenticated_service
from app.dao import templates_dao
from app.schema_validation import validate
from app.v2.errors import BadRequestError
from app.v2.template import v2_template_blueprint
from app.v2.template.template_schemas import (
create_post_template_preview_response,
post_template_preview_request,
)
from app.v2.utils import get_valid_json
@v2_template_blueprint.route("/<template_id>/preview", methods=['POST'])
def post_template_preview(template_id):
# The payload is empty when there are no place holders in the template.
_data = request.get_data(as_text=True)
if not _data:
_data = {}
else:
_data = get_valid_json()
_data['id'] = template_id
data = validate(_data, post_template_preview_request)
template = templates_dao.dao_get_template_by_id_and_service_id(
template_id, authenticated_service.id)
template_object = template._as_utils_template_with_personalisation(
data.get('personalisation')
)
check_placeholders(template_object)
resp = create_post_template_preview_response(template=template,
template_object=template_object)
return jsonify(resp), 200
def check_placeholders(template_object):
if template_object.missing_data:
message = 'Missing personalisation: {}'.format(", ".join(template_object.missing_data))
raise BadRequestError(message=message, fields=[{'template': message}])
| 32.404255 | 95 | 0.741957 | 0 | 0 | 0 | 0 | 855 | 0.561392 | 0 | 0 | 165 | 0.108339 |
9f76e0fb3dc684661b0f1ce53cf8d60b14cc6636 | 357 | py | Python | mdm_inventory/clients/graphql/query.py | TeamWalls/mdm-backend-django | 4e23f9abc8531eb786d5e6cf958c9ffa8acd6b1d | [
"MIT"
] | null | null | null | mdm_inventory/clients/graphql/query.py | TeamWalls/mdm-backend-django | 4e23f9abc8531eb786d5e6cf958c9ffa8acd6b1d | [
"MIT"
] | null | null | null | mdm_inventory/clients/graphql/query.py | TeamWalls/mdm-backend-django | 4e23f9abc8531eb786d5e6cf958c9ffa8acd6b1d | [
"MIT"
] | null | null | null | import graphene
from graphene_django import DjangoObjectType
#models
from mdm_inventory.clients.models import Client
#types
from mdm_inventory.clients.graphql.types import ClientType
class QueryClient(graphene.ObjectType):
clients = graphene.List(ClientType)
def resolve_clients(root, info):
return Client.objects.filter(is_active=True)
| 23.8 | 58 | 0.803922 | 170 | 0.47619 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.036415 |
9f778f6dd25faa594cf09e778443b905c376ef79 | 4,923 | py | Python | bnn_vi/data_sampling.py | Daniil-Selikhanovych/bnn-vi | 6788edc1438c66609abca249e33a81da7a0ff1a2 | [
"MIT"
] | 3 | 2020-06-13T22:40:38.000Z | 2020-10-23T17:34:25.000Z | bnn_vi/data_sampling.py | Daniil-Selikhanovych/bnn-vi | 6788edc1438c66609abca249e33a81da7a0ff1a2 | [
"MIT"
] | null | null | null | bnn_vi/data_sampling.py | Daniil-Selikhanovych/bnn-vi | 6788edc1438c66609abca249e33a81da7a0ff1a2 | [
"MIT"
] | 1 | 2022-02-23T18:10:07.000Z | 2022-02-23T18:10:07.000Z | import torch
from torch.utils.data import Dataset, DataLoader
from torch.distributions.multivariate_normal import MultivariateNormal
import numpy as np
from tqdm import tqdm
import random
def get_rotation(theta):
rad = np.radians(theta)
c, s = np.cos(rad), np.sin(rad)
R = np.array([[c, -s],
[s, c]])
return R
class CircleDataset(Dataset):
def __init__(self, n_samples, n_centers=9, sigma=0.1, ysigma=0.01, include_zero=True,
target_label=1., seed = None, radius=1.):
super().__init__()
if seed != None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
self.include_zero = include_zero
self.nus = []
if include_zero:
self.nus.append(torch.zeros(2))
self.sigma = sigma
self.ysigma = ysigma
self.radius = radius
for i in range(n_centers-include_zero):
R = get_rotation(i*360/(n_centers-include_zero))
self.nus.append(torch.tensor([radius, 0] @ R, dtype=torch.float))
classes = torch.multinomial(torch.ones(n_centers), n_samples,
replacement=True)
data = []
target = []
for i in range(n_centers):
n_samples_class = torch.sum(classes == i)
if n_samples_class == 0:
continue
dist = MultivariateNormal(self.nus[i],
torch.eye(2)*sigma**2)
data.append(dist.sample([n_samples_class.item()]))
enc = torch.full((n_samples_class, n_centers), -target_label)
enc[:, i] = target_label
target.append(enc + ysigma * torch.randn(n_samples_class)[:, None])
self.data = torch.cat(data).float()
self.target = torch.cat(target).float()
def __getitem__(self, idx):
return self.data[idx], self.target[idx]
def __len__(self):
return self.data.shape[0]
def gaussian_sampler_2d(gaussian_center, cov_matrix):
mu_distr = MultivariateNormal(gaussian_center, cov_matrix)
return mu_distr
def gaussian_data_sampling(gaussian_center, cov_matrix, data_num, seed = None):
if seed is not None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
sampler = gaussian_sampler_2d(gaussian_center, cov_matrix)
data = sampler.sample(sample_shape=torch.Size([data_num]))
return data
def gaussian_mixture_data_sampling(centers, cov_matrix, data_num, seed = None, device = None):
if seed is not None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
index_to_choice = np.random.randint(centers.shape[0], size = data_num)
data_clusters = gaussian_data_sampling(centers[index_to_choice[0]], cov_matrix, 1)
for i in range(1, data_num):
cur_data = gaussian_data_sampling(centers[index_to_choice[i]], cov_matrix, 1)
data_clusters = torch.cat((data_clusters, cur_data), 0)
return data_clusters
def model_1d(data):
real_labels = torch.sin(12*data) + 0.66*torch.cos(25*data) + 3
return real_labels
def noise_labels_model(real_labels, sigma_noise, seed = None):
loc = 0. # mean zero
scale = 1.
normal = torch.distributions.Normal(loc, scale) # create a normal distribution object
if seed is not None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
x = normal.rsample([real_labels.shape[0]])
real_labels = real_labels + x*sigma_noise
return real_labels
def get_sample_regression(n_samples, noise = 0.1, seed = 42):
"""
Returns (x_train, y_train), (x_true, y_true)
"""
gaussian_centers = torch.Tensor([[-1.0/(2**0.5)], [1.0/(2**0.5)]])
data_num = n_samples
data_sigma_noise = noise
sigma = 0.01
init_cov_matrix = torch.eye(1)
cov_matrix_default = sigma*init_cov_matrix
data_1d = gaussian_mixture_data_sampling(gaussian_centers,
cov_matrix_default,
data_num,
seed)
real_labels = model_1d(data_1d[:, 0])
noise_labels = noise_labels_model(real_labels,
sigma_noise = data_sigma_noise,
seed = seed).reshape((real_labels.shape[0], 1))
range_for_real_labels = torch.linspace(-1, 1, steps = 1000)
real_labels_range = model_1d(range_for_real_labels)
# data, range_for_real_labels, real_labels, noise_labels,
return (data_1d[:, 0], noise_labels[:, 0]), (range_for_real_labels, real_labels_range) | 38.162791 | 94 | 0.62015 | 1,721 | 0.349584 | 0 | 0 | 0 | 0 | 0 | 0 | 165 | 0.033516 |
9f785e07c83c19573eabf2bf59f8baedfd273e7b | 125 | py | Python | damgard_jurik/__init__.py | NCGThompson/damgard-jurik | 5471ec2eb098381dd4dc37fac6b041a010290960 | [
"MIT"
] | 7 | 2019-05-14T02:41:44.000Z | 2022-01-11T17:22:06.000Z | damgard_jurik/__init__.py | NCGThompson/damgard-jurik | 5471ec2eb098381dd4dc37fac6b041a010290960 | [
"MIT"
] | 5 | 2019-06-14T07:56:20.000Z | 2021-04-30T03:46:32.000Z | damgard_jurik/__init__.py | NCGThompson/damgard-jurik | 5471ec2eb098381dd4dc37fac6b041a010290960 | [
"MIT"
] | 2 | 2021-01-08T11:19:10.000Z | 2021-04-27T15:22:47.000Z | #!/usr/bin/env python3
from damgard_jurik.crypto import EncryptedNumber, PrivateKeyRing, PrivateKeyShare, PublicKey, keygen
| 31.25 | 100 | 0.832 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.176 |
9f7916fb0cf96de2e606af6f56ed3dba57c56995 | 2,387 | py | Python | src/todo/service/sqlalchemy_todo_service.py | MarkStefanovic/todo-api | fb6198511712df853e693787839533f0c9956178 | [
"MIT"
] | null | null | null | src/todo/service/sqlalchemy_todo_service.py | MarkStefanovic/todo-api | fb6198511712df853e693787839533f0c9956178 | [
"MIT"
] | null | null | null | src/todo/service/sqlalchemy_todo_service.py | MarkStefanovic/todo-api | fb6198511712df853e693787839533f0c9956178 | [
"MIT"
] | null | null | null | import datetime
import typing
from src import core
from src.todo import domain, adapter
__all__ = ("SqlAlchemyTodoService",)
class SqlAlchemyTodoService(domain.TodoService):
def __init__(self, /, uow: core.SqlAlchemyUnitOfWork):
self._uow = uow
def all(self, /, user_id: int) -> typing.List[domain.Todo]:
with self._uow:
return self._repo.all(user_id)
def add_todo(self, *, user_id: int, todo: domain.Todo) -> domain.Todo:
with self._uow:
new_todo = self._repo.add(user_id=user_id, item=todo)
self._uow.commit()
return new_todo
def delete_todo(self, *, user_id: int, todo_id: int) -> None:
with self._uow:
self._repo.remove(user_id=user_id, item_id=todo_id)
self._uow.commit()
def get_by_id(self, *, user_id: int, todo_id: int) -> typing.Optional[domain.Todo]:
assert todo_id > 0, f"Todo id values should be positive, but got {todo_id!r}."
with self._uow:
todo = self._repo.get_by_id(user_id=user_id, todo_id=todo_id)
if todo and todo.user_id == user_id:
return todo
else:
raise core.exception.AuthException("Todo belongs to another user")
def get_current_todos(
self,
*,
user_id: int,
category: str,
today: datetime.date = datetime.date.today(),
) -> typing.List[domain.Todo]:
return [
todo
for todo in self.all(user_id)
if todo.display(today) and todo.category == category
]
def get_todos_completed_today(
self, *, user_id: int, today: datetime.date = datetime.date.today()
) -> typing.List[domain.Todo]:
return [todo for todo in self.all(user_id) if todo.date_completed == today]
def mark_complete(self, *, user_id: int, todo_id: int) -> None:
with self._uow:
self._repo.mark_completed(user_id=user_id, item_id=todo_id)
self._uow.commit()
def update_todo(self, *, user_id: int, todo: domain.Todo) -> domain.Todo:
with self._uow:
updated_todo = self._repo.update(user_id=user_id, item=todo)
self._uow.commit()
return updated_todo
@property
def _repo(self) -> domain.TodoRepository:
return adapter.SqlAlchemyTodoRepository(self._uow.session) | 34.594203 | 87 | 0.618349 | 2,258 | 0.945957 | 0 | 0 | 122 | 0.05111 | 0 | 0 | 111 | 0.046502 |
9f79347db3c45d34cb02e6bd2e8159dde0e90c77 | 4,013 | py | Python | tests/parties/participants/test_view.py | PartyGwam/api | f580e29762990eabdb3bb5e317dee22c6c441696 | [
"MIT"
] | 1 | 2018-06-24T08:10:12.000Z | 2018-06-24T08:10:12.000Z | tests/parties/participants/test_view.py | PartyGwam/api | f580e29762990eabdb3bb5e317dee22c6c441696 | [
"MIT"
] | 48 | 2018-06-24T12:30:15.000Z | 2022-01-13T00:48:24.000Z | tests/parties/participants/test_view.py | PartyGwam/api | f580e29762990eabdb3bb5e317dee22c6c441696 | [
"MIT"
] | null | null | null | import datetime
from django.test import TestCase
from django.utils import timezone
from rest_framework import status
from rest_framework.test import APIRequestFactory, force_authenticate
from api.parties.participants.views import ParticipantsAPIView
from apps.parties.models import Party
from apps.users.models import User
class ParticipantAPIViewTest(TestCase):
def setUp(self):
self.today = timezone.now()
self.users = [
User.objects.create_user(
email='sample{}@gmail.com'.format(i + 1),
password='sample_password{}'.format(i + 1),
username='샘플 유저 {}'.format(i + 1)
)
for i in range(6)
]
self.parties = [
Party.objects.create_party(
owner=self.users[i].profile,
title='파티 제목 {}'.format(i + 1),
place='파티 장소 {}'.format(i + 1),
start_time=self.today + datetime.timedelta(days=(i + 1) * 10),
max_people=(i + 2),
)
for i in range(5)
]
for i in range(1, 5):
for j in range(i):
self.parties[i].add_participants(self.users[j].profile)
self.factory = APIRequestFactory()
self.view = ParticipantsAPIView.as_view()
def _get_request_path(self, party_slug):
return '/api/parties/{}/participants'.format(party_slug)
def test_get_participants(self):
for i in range(5):
slug = self.parties[i].slug
path = self._get_request_path(slug)
request = self.factory.get(path)
force_authenticate(request, self.users[i])
response = self.view(request, slug=slug)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data['participants']), i + 1)
def test_participate(self):
slug = self.parties[0].slug
path = self._get_request_path(slug)
request = self.factory.post(path)
force_authenticate(request, self.users[1])
response = self.view(request, slug=slug)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_participate_when_full(self):
self.parties[0].add_participants(self.users[1].profile)
slug = self.parties[0].slug
path = self._get_request_path(slug)
request = self.factory.post(path)
force_authenticate(request, self.users[2])
response = self.view(request, slug=slug)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_participate_again(self):
slug = self.parties[0].slug
path = self._get_request_path(slug)
request = self.factory.post(path)
force_authenticate(request, self.users[0])
response = self.view(request, slug=slug)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_cancel_participate(self):
slug = self.parties[0].slug
path = self._get_request_path(slug)
request = self.factory.delete(path)
force_authenticate(request, self.users[0])
response = self.view(request, slug=slug)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_cancel_participate_when_party_owner(self):
slug = self.parties[1].slug
path = self._get_request_path(slug)
request = self.factory.delete(path)
force_authenticate(request, self.users[1])
response = self.view(request, slug=slug)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_cancel_participate_when_not_participating(self):
slug = self.parties[0].slug
path = self._get_request_path(slug)
request = self.factory.delete(path)
force_authenticate(request, self.users[1])
response = self.view(request, slug=slug)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
| 34.008475 | 78 | 0.642911 | 3,710 | 0.918999 | 0 | 0 | 0 | 0 | 0 | 0 | 137 | 0.033936 |
9f7961daf22e80aa849ae0a67694bcc8b1f9979d | 348 | py | Python | setup.py | anthonyvallee/bettejpeg | eaa809f4d07d85274cd4ee4671352ff069f94307 | [
"Apache-2.0"
] | 1 | 2020-03-29T13:12:32.000Z | 2020-03-29T13:12:32.000Z | setup.py | anthonyvallee/bettejpeg | eaa809f4d07d85274cd4ee4671352ff069f94307 | [
"Apache-2.0"
] | 2 | 2016-10-23T21:15:52.000Z | 2016-12-08T07:25:07.000Z | setup.py | RentAPlace/python-betterjpeg | eaa809f4d07d85274cd4ee4671352ff069f94307 | [
"Apache-2.0"
] | null | null | null | from setuptools import (find_packages, setup)
from rap import betterjpeg
setup(
name=betterjpeg.__pkgname__,
description=betterjpeg.__description__,
version=betterjpeg.__version__,
packages=["rap.betterjpeg"],
entry_points="""
[console_scripts]
betterjpeg=rap.betterjpeg.betterjpeg:cli
"""
)
| 23.2 | 49 | 0.686782 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 105 | 0.301724 |
9f7a4be8ce759b7e547712d816275d3d49b73d19 | 3,450 | py | Python | engine/db/org/db_org_parameter.py | datapunk2078/torro_community | 97a97c9d089b0a7b47ccdc28e4e077da36d4b85c | [
"MIT"
] | null | null | null | engine/db/org/db_org_parameter.py | datapunk2078/torro_community | 97a97c9d089b0a7b47ccdc28e4e077da36d4b85c | [
"MIT"
] | null | null | null | engine/db/org/db_org_parameter.py | datapunk2078/torro_community | 97a97c9d089b0a7b47ccdc28e4e077da36d4b85c | [
"MIT"
] | null | null | null |
class orgApiPara:
setOrg_POST_request = {"host": {"type": str, "default": ''},
"port": {"type": int, "default": 636},
"cer_path": {"type": str, "default": ''},
"use_sll": {"type": bool, "default": True},
"admin": {"type": str, "default": ''},
"admin_pwd": {"type": str, "default": ''},
"admin_group": {"type": str, "default": ''},
"base_group": {"type": str, "default": ''},
"org_name": {"type": str, "default": ''},
"des": {"type": str, "default": ''},
"search_base": {"type": str, "default": ''}},
updateOrg_POST_request = {"id": {"type": int, "default": -1},
"host": {"type": str, "default": ''},
"port": {"type": int, "default": 636},
"cer_path": {"type": str, "default": ''},
"use_sll": {"type": bool, "default": True},
"admin": {"type": str, "default": ''},
"admin_pwd": {"type": str, "default": ''},
"admin_group": {"type": str, "default": ''},
"base_group": {"type": str, "default": ''},
"org_name": {"type": str, "default": ''},
"des": {"type": str, "default": ''},
"search_base": {"type": str, "default": ''}},
setOrg_POST_response = {
"ldap_id": {"type": int, "default": -1},
"org_id": {"type": int, "default": -1},
"host": {"type": str, "default": ''},
"port": {"type": int, "default": 636},
"cer_path": {"type": str, "default": ''},
"use_sll": {"type": bool, "default": True},
"admin": {"type": str, "default": ''},
"admin_pwd": {"type": str, "default": ''},
"admin_group": {"type": str, "default": ''},
"base_group": {"type": str, "default": ''},
"org_name": {"type": str, "default": ''},
"des": {"type": str, "default": ''},
"search_base": {"type": str, "default": ''}}
updateOrg_POST_response = {
"ldap_id": {"type": int, "default": -1},
"org_id": {"type": int, "default": -1},
"host": {"type": str, "default": ''},
"port": {"type": int, "default": 636},
"use_sll": {"type": bool, "default": True},
"cer_path": {"type": str, "default": ''},
"admin": {"type": str, "default": ''},
"admin_pwd": {"type": str, "default": ''},
"admin_group": {"type": str, "default": ''},
"base_group": {"type": str, "default": ''},
"org_name": {"type": str, "default": ''},
"des": {"type": str, "default": ''},
"search_base": {"type": str, "default": ''}} | 60.526316 | 72 | 0.33913 | 3,449 | 0.99971 | 0 | 0 | 0 | 0 | 0 | 0 | 1,253 | 0.363188 |
9f7b07ec93646ee4b6b36fc42d1cca113c97fedc | 72 | py | Python | TimeSeriesAnalysisWithPython-master/SciPyTimeSeries/snippets/custom_rolling.py | sunny2309/scipy_conf_notebooks | 30a85d5137db95e01461ad21519bc1bdf294044b | [
"MIT"
] | 2 | 2021-01-09T15:57:26.000Z | 2021-11-29T01:44:21.000Z | TimeSeriesAnalysisWithPython-master/SciPyTimeSeries/snippets/custom_rolling.py | sunny2309/scipy_conf_notebooks | 30a85d5137db95e01461ad21519bc1bdf294044b | [
"MIT"
] | 5 | 2019-11-15T02:00:26.000Z | 2021-01-06T04:26:40.000Z | TimeSeriesAnalysisWithPython-master/SciPyTimeSeries/snippets/custom_rolling.py | sunny2309/scipy_conf_notebooks | 30a85d5137db95e01461ad21519bc1bdf294044b | [
"MIT"
] | null | null | null | df.rolling(window = 10, center = False).apply(lambda x: x[1]/x[2])[1:10] | 72 | 72 | 0.652778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9f7be079aecb0c7f1007be53df1d11761f197a8b | 1,653 | py | Python | CovarianceMatrix.py | Roundthecorner/CovarianceMatrix | 33c740b127b3307cc78c2558279a92605a759cd9 | [
"MIT"
] | null | null | null | CovarianceMatrix.py | Roundthecorner/CovarianceMatrix | 33c740b127b3307cc78c2558279a92605a759cd9 | [
"MIT"
] | null | null | null | CovarianceMatrix.py | Roundthecorner/CovarianceMatrix | 33c740b127b3307cc78c2558279a92605a759cd9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat May 23 11:28:30 2020
@author: rener
"""
import numpy as np
import pandas as pd
import os
from datetime import date
import time
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
os.chdir(dir_path)
#%% For the various companies we have data going back differently far.
#
#
frames=[]
for file in os.listdir('Stocks'):
frames.append(
pd.read_csv('Stocks/' +file,index_col=0))
# For the various companies we have data going back differently far.
# So there is decision to make: We could discard look for the shortest
# available timeseries, and trim all other datasets to the same length.
# But then whenever we compute a covariance for two longer datasets
# we will not use all available information.
# So we only trim every pair in the covariance computing function.
df=pd.concat(frames)
# Add column with Estimated Average of the day
df['EstAvg'] = df[['open','high','low','close']].apply(np.mean,axis=1)
df.to_csv('fulltable.csv')
#%%
pivot = df.pivot(columns = 'symbol', values = 'EstAvg')
# Note that we are taking the symbols from the Pivot Table.
# This is the case, because when the Alphavantage API does not give
# us a dataset for some symbol, it does not appear in the pivot table,
# so we avoid a Key Error.
symbols = pivot.columns
# Next we initialize an 'empty' dataframe, and start filling it.
CovMatrix = pd.DataFrame(index=symbols,columns=symbols)
#%%
def covariance(a,b):
return np.mean((a-np.mean(a)*(b-np.mean(b))))
for col in CovMatrix:
for row in CovMatrix.index:
CovMatrix[row][col]=covariance(pivot[row], pivot[col])
| 25.828125 | 71 | 0.715064 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 965 | 0.583787 |
9f7d06f6be365e2194cba4024a296f967397ecca | 434 | py | Python | source/online_catalog_scraper/online_scraper.py | kevinliang43/ProjectDB | 6ffd435cb01658e16f0da272bb3f8ec2faeef73c | [
"Apache-2.0"
] | null | null | null | source/online_catalog_scraper/online_scraper.py | kevinliang43/ProjectDB | 6ffd435cb01658e16f0da272bb3f8ec2faeef73c | [
"Apache-2.0"
] | null | null | null | source/online_catalog_scraper/online_scraper.py | kevinliang43/ProjectDB | 6ffd435cb01658e16f0da272bb3f8ec2faeef73c | [
"Apache-2.0"
] | null | null | null | import bs4
import urllib
from base_online_scraper import base_online_scraper as scraper
BASE_URL = 'http://catalog.northeastern.edu'
INITIAL_PATH = '/course-descriptions/'
fp = urllib.urlopen(BASE_URL + INITIAL_PATH)
soup = bs4.BeautifulSoup(fp, 'lxml')
nav_menu = soup.find("div", {"id": "atozindex"}).find_all('a', href=True)
scraper = scraper('')
for a in nav_menu:
scraper.url = BASE_URL + a['href']
scraper.scrape()
| 24.111111 | 73 | 0.723502 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 93 | 0.214286 |
9f7e2802591fddf966e77ff0e8ee36df43d62e23 | 2,343 | py | Python | dummyapi/prediction.py | BW-Saltiest-Hacker-News-Trolls/flask_backend | 66e7abed5753478f0e93d7e20e5406da04abf294 | [
"MIT"
] | null | null | null | dummyapi/prediction.py | BW-Saltiest-Hacker-News-Trolls/flask_backend | 66e7abed5753478f0e93d7e20e5406da04abf294 | [
"MIT"
] | 1 | 2020-09-26T00:39:58.000Z | 2020-09-26T00:39:58.000Z | dummyapi/prediction.py | crsanderford/unit3-build-dummy-api | 66e7abed5753478f0e93d7e20e5406da04abf294 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import re
import spacy
import string
import sklearn
from html.parser import HTMLParser
from joblib import load
import tensorflow as tf
nlp = spacy.load('en_core_web_sm')
tfidf = load('tfidf.joblib')
wordlist = load('wordlist.joblib')
tf.keras.backend.clear_session()
model = tf.keras.models.load_model('simple_nn.h5')
model._make_predict_function()
graph = tf.get_default_graph()
class MLStripper(HTMLParser):
def __init__(self):
super().__init__()
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ' '.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
def preprocess(text):
"""cleans a single text input."""
text = strip_tags(text)
text = re.sub('\s+', ' ', text)
text = text.lower()
text = str(re.sub('[{}]'.format(string.punctuation), '', text))
text = text.strip()
return text
def vectorize(text):
"""vectorizes a single text input."""
tokens = nlp.tokenizer(text)
lemmas = [' '.join([token.lemma_ for token in tokens])]
vec = tfidf.transform(lemmas).todense()
df = pd.DataFrame(vec, columns=tfidf.get_feature_names())
return(df[wordlist])
def model_predict(input):
global graph
with graph.as_default():
output = model.predict(input)
return output
def model_predict_single(text):
"""applies cleaning and vectorizing to a single text input, produces a prediction."""
text = preprocess(text)
predicted = np.float64(model.predict([vectorize(
text)])[0][0])
return predicted
def model_predict_df(df_in):
"""applies cleaning and vectorizing to a dataframe, produces a Series of predictions."""
df = df_in.copy()
df['clean_text'] = df['text'].apply(preprocess)
X_test = df['clean_text'].copy()
X_test = X_test.apply(nlp.tokenizer)
X_test = X_test.apply(lambda x: [token.lemma_ for token in x])
X_vec = tfidf.transform(X_test.astype(str))
X_vec_frame = pd.DataFrame(X_vec.todense(), columns=tfidf.get_feature_names())
X_vec_frame = X_vec_frame[wordlist]
X_pred = model_predict(X_vec_frame)
X_pred.shape = (9970)
model_pred = pd.Series(data=X_pred, name='model_output')
return model_pred
| 27.892857 | 92 | 0.674776 | 238 | 0.101579 | 0 | 0 | 0 | 0 | 0 | 0 | 370 | 0.157917 |
9f80aa8cd5c991f5a585c0a271b19cf8e97f19c9 | 141 | py | Python | lit/fields/text.py | velvetkeyboard/py-lit | 2bdc722e251d2c53ed19ad0e82e2447d9cdda8f9 | [
"Unlicense"
] | null | null | null | lit/fields/text.py | velvetkeyboard/py-lit | 2bdc722e251d2c53ed19ad0e82e2447d9cdda8f9 | [
"Unlicense"
] | null | null | null | lit/fields/text.py | velvetkeyboard/py-lit | 2bdc722e251d2c53ed19ad0e82e2447d9cdda8f9 | [
"Unlicense"
] | null | null | null | from lit.fields.base import Field
from lit.fields.base import TextType
class TextField(Field):
sql_type = TextType()
py_type = str
| 17.625 | 36 | 0.737589 | 67 | 0.475177 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9f81339892faac8ee15eada26003419db9fd29d9 | 1,763 | py | Python | src/evidently/model_profile/sections/cat_target_drift_profile_section.py | jenoOvchi/evidently | 6ca36d633ee258442410ef47a219ff40b8a5097b | [
"Apache-2.0"
] | null | null | null | src/evidently/model_profile/sections/cat_target_drift_profile_section.py | jenoOvchi/evidently | 6ca36d633ee258442410ef47a219ff40b8a5097b | [
"Apache-2.0"
] | null | null | null | src/evidently/model_profile/sections/cat_target_drift_profile_section.py | jenoOvchi/evidently | 6ca36d633ee258442410ef47a219ff40b8a5097b | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
from typing import Any
from typing import Dict
from typing import Iterable
from typing import Optional
from typing import Type
from evidently.analyzers.base_analyzer import Analyzer
from evidently.analyzers.cat_target_drift_analyzer import CatTargetDriftAnalyzer
from evidently.model_profile.sections.base_profile_section import ProfileSection
class CatTargetDriftProfileSection(ProfileSection):
def part_id(self) -> str:
return 'cat_target_drift'
def __init__(self) -> None:
super().__init__()
self.analyzers_types = [CatTargetDriftAnalyzer]
self._result = None
def analyzers(self) -> Iterable[Type[Analyzer]]:
return self.analyzers_types
def calculate(self, reference_data, current_data, column_mapping, analyzers_results) -> None:
result = CatTargetDriftAnalyzer.get_results(analyzers_results)
result_json: Dict[str, Any] = result.columns.as_dict()
result_json['metrics'] = {}
if result.target_metrics:
result_json['metrics']['target_name'] = result.target_metrics.column_name
result_json['metrics']['target_type'] = 'cat'
result_json['metrics']['target_drift'] = result.target_metrics.drift
if result.prediction_metrics:
result_json['metrics']['prediction_name'] = result.prediction_metrics.column_name
result_json['metrics']['prediction_type'] = 'cat'
result_json['metrics']['prediction_drift'] = result.prediction_metrics.drift
self._result = {
'name': self.part_id(),
'datetime': str(datetime.now()),
'data': result_json
}
def get_results(self) -> Optional[dict]:
return self._result
| 36.729167 | 97 | 0.697674 | 1,385 | 0.785593 | 0 | 0 | 0 | 0 | 0 | 0 | 205 | 0.116279 |
9f8168ed8352cd8534f1cb89d1dc8013ebfa7064 | 5,719 | py | Python | stores/apps/preferences/forms.py | diassor/CollectorCity-Market-Place | 892ad220b8cf1c0fc7433f625213fe61729522b2 | [
"Apache-2.0"
] | 135 | 2015-03-19T13:28:18.000Z | 2022-03-27T06:41:42.000Z | stores/apps/preferences/forms.py | dfcoding/CollectorCity-Market-Place | e59acec3d600c049323397b17cae14fdcaaaec07 | [
"Apache-2.0"
] | null | null | null | stores/apps/preferences/forms.py | dfcoding/CollectorCity-Market-Place | e59acec3d600c049323397b17cae14fdcaaaec07 | [
"Apache-2.0"
] | 83 | 2015-01-30T01:00:15.000Z | 2022-03-08T17:25:10.000Z | import re, logging
from django import forms
from django.forms import ModelForm
from django.utils.translation import ugettext as _
from django.contrib.localflavor.us.forms import USStateSelect,\
USPhoneNumberField
from models import Preference, ShippingWeight, ShippingPrice, ShippingItem, TaxState, DnsShop, EmailNotification
from preferences.models import ShopPolicies
from auth.models import User
from users.models import Profile
class GeneralPreferenceForm(ModelForm):
email = forms.EmailField(required=False)
phone = USPhoneNumberField(required=False)
class Meta:
model = Preference
fields = ['name_store', 'email', 'phone']
class ProfileForm(ModelForm):
state = forms.CharField(widget=USStateSelect)
class Meta:
model = Profile
fields = ['street_address', 'zip', 'city', 'state', 'country', ]
def clean_zip(self):
zip = self.cleaned_data.get("zip", "")
if zip.strip() == "": raise forms.ValidationError("Zip is a required field.")
if not (re.match("[0-9]{5}(-[0-9]{4})?$", zip)): raise forms.ValidationError("Invalid Zip code. Valid formats are XXXXX or XXXXX-XXXX")
return zip
def clean_country(self):
country = self.cleaned_data.get("country", "")
if country.strip() == "": raise forms.ValidationError("Country is a required field.")
return country
def clean_street_address(self):
street = self.cleaned_data.get("street_address", "")
if street.strip() == "": raise forms.ValidationError("Street is a required field.")
return street
def clean_city(self):
city = self.cleaned_data.get("city", "")
if city.strip() == "": raise forms.ValidationError("City is a required field.")
return city
class TaxesPreferenceForm(ModelForm):
class Meta:
model = Preference
fields = ['taxes_same_state_store', 'taxes_to_shipping_fees']
class TaxStateForm(ModelForm):
#state = forms.CharField(widget=USStateSelect)
tax = forms.DecimalField(help_text=_("Enter a state tax rate number (between 1 and 100)"))
class Meta:
model = TaxState
exclude = ['shop']
def __init__(self, shop, *args, ** kwargs):
self.shop = shop
super(TaxStateForm, self).__init__(*args, ** kwargs)
def clean_state(self):
state = self.cleaned_data['state']
try:
TaxState.objects.get(shop=self.shop, state=state)
except TaxState.DoesNotExist:
return state
raise forms.ValidationError(_("A tax for state %s already exists." % state))
def clean_tax(self):
tax = self.cleaned_data['tax']
if tax < 0:
raise forms.ValidationError(_("A tax has to be more or equal 0%"))
elif tax > 100:
raise forms.ValidationError(_("A tax has to be less than 100%"))
return tax
class TaxStateEditForm(ModelForm):
class Meta:
model = TaxState
exclude = ['shop', 'state']
def __init__(self, shop, *args, ** kwargs):
self.shop = shop
super(TaxStateEditForm, self).__init__(*args, ** kwargs)
def clean_tax(self):
tax = self.cleaned_data['tax']
if tax < 0:
raise forms.ValidationError(_("A tax has to be more or equal 0%"))
elif tax > 100:
raise forms.ValidationError(_("A tax has to be less than 100%"))
return tax
class AuctionsPreferenceForm(ModelForm):
class Meta:
model = Preference
fields = ['allow_sessions', 'allow_open_auctions', 'default_days', 'open_auto_extend', 'session_auto_extend']
class DnsShopForm(ModelForm):
class Meta:
model = DnsShop
exclude = ['shop']
def clean_dns(self):
dns = self.cleaned_data['dns']
try:
DnsShop.objects.get(dns=dns)
except DnsShop.DoesNotExist:
return dns
raise forms.ValidationError(_("A shop with that dns already exists."))
class ShippingWeightForm(ModelForm):
class Meta:
model = ShippingWeight
exclude = ['shop']
class ShippingPriceForm(ModelForm):
class Meta:
model = ShippingPrice
exclude = ['shop']
class ShippingItemForm(ModelForm):
class Meta:
model = ShippingItem
exclude = ['shop']
class EmailNotificationForm(ModelForm):
class Meta:
model = EmailNotification
fields = ['subject', 'body']
class ShopPoliciesForm(ModelForm):
class Meta:
model = ShopPolicies
fields = ['refund_policy', 'privacy_policy', 'terms_of_service']
class MarketingForm(ModelForm):
class Meta:
model = Preference
fields = ['google_analytics_account_number']
def clean_google_analytics_account_number(self):
google_analytics_account_number = self.cleaned_data['google_analytics_account_number']
if re.match(r"^\w{2}\-\d{4,8}\-\d$", google_analytics_account_number) is None:
raise forms.ValidationError('Invalid analitycs account number')
return google_analytics_account_number
class UsernameChangeForm(forms.ModelForm):
username = forms.RegexField(label=_("Username"), max_length=30, regex=r'^\w+$',
help_text = _("Required. 30 characters or fewer. Alphanumeric characters only (letters, digits and underscores)."),
error_message = _("This value must contain only letters, numbers and underscores."))
class Meta:
model = User
fields = ['username'] | 32.129213 | 146 | 0.63088 | 5,172 | 0.904354 | 0 | 0 | 0 | 0 | 0 | 0 | 1,197 | 0.209302 |
9f827ccaec697c2953b95793dc0577ac42b9d164 | 306 | py | Python | cmake_tidy/utils/app_configuration/__init__.py | MaciejPatro/cmake-tidy | ddab3d9c6dd1a6c9cfa47bff5a9f120defea9e6a | [
"MIT"
] | 16 | 2020-05-16T17:20:00.000Z | 2022-02-14T12:08:41.000Z | cmake_tidy/utils/app_configuration/__init__.py | MaciejPatro/cmake-tidy | ddab3d9c6dd1a6c9cfa47bff5a9f120defea9e6a | [
"MIT"
] | 19 | 2020-05-18T06:17:42.000Z | 2020-08-11T07:15:11.000Z | cmake_tidy/utils/app_configuration/__init__.py | MaciejPatro/cmake-tidy | ddab3d9c6dd1a6c9cfa47bff5a9f120defea9e6a | [
"MIT"
] | null | null | null | ###############################################################################
# Copyright Maciej Patro (maciej.patro@gmail.com)
# MIT License
###############################################################################
from cmake_tidy.utils.app_configuration.configuration import ConfigurationError
| 38.25 | 79 | 0.398693 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 220 | 0.718954 |
9f8409e156458ec7719d06965f21cf2369d9c6f6 | 6,331 | py | Python | testing/logging_tests.py | nanome-ai/nanome-plugin-api | f2ce6a5e3123ee7449a90c2659f3891124289f4a | [
"MIT"
] | null | null | null | testing/logging_tests.py | nanome-ai/nanome-plugin-api | f2ce6a5e3123ee7449a90c2659f3891124289f4a | [
"MIT"
] | null | null | null | testing/logging_tests.py | nanome-ai/nanome-plugin-api | f2ce6a5e3123ee7449a90c2659f3891124289f4a | [
"MIT"
] | null | null | null | import logging
import sys
import unittest
from nanome import Plugin, PluginInstance
from nanome.util import Logs
if sys.version_info.major >= 3:
from unittest.mock import MagicMock, patch
else:
# Python 2.7 way of getting magicmock. Requires pip install mock
from mock import MagicMock, patch
class LoggingTestCase(unittest.TestCase):
def setUp(self):
self.plugin = Plugin('Test Plugin', 'Unit Test Plugin')
self.plugin.set_plugin_class(PluginInstance)
self.host = 'anyhost'
self.port = 8000
self.key = ''
# Make it so that Logs logged in this module are handled the same as "nanome"
testing_logger = logging.getLogger('logging_tests')
nanome_logger = logging.getLogger('nanome')
testing_logger.handlers = nanome_logger.handlers
testing_logger.setLevel(logging.DEBUG)
@classmethod
def tearDownClass(cls):
# Make sure remote logging always off after test.
# Without this teardown, logging configs persist to tests run after this.
super(LoggingTestCase, cls).tearDownClass()
nanome_logger = logging.getLogger("nanome")
testing_logger = logging.getLogger('logging_tests')
nanome_logger.handlers = []
testing_logger.handlers = []
@patch('nanome._internal._plugin._Plugin._loop')
@patch('nanome._internal._plugin.Network._NetInstance')
@patch('nanome._internal.logs.NTSLoggingHandler.handle')
def test_nts_handler_called(self, handle_mock, netinstance_mock, loop_mock):
"""Assert logs get forwarded to NTS."""
remote_logging = "True"
testargs = [
'run.py',
'--remote-logging', remote_logging
]
with patch.object(sys, 'argv', testargs):
self.plugin.run(self.host, self.port, self.key)
# Write log, and make sure NTSLogging Handler called.
Logs.message('This should be forwarded to NTS.')
handle_mock.assert_called()
@patch('nanome._internal._plugin._Plugin._loop')
@patch('nanome._internal._plugin.Network._NetInstance')
@patch('nanome._internal.logs.NTSLoggingHandler.handle')
def test_nts_handler_not_called(self, handle_mock, netinstance_mock, loop_mock):
"""Assert logs don't get forwarded to NTS if remote-logging is False."""
remote_logging = False
testargs = [
'run.py',
'--remote-logging', remote_logging
]
with patch.object(sys, 'argv', testargs):
self.plugin.run(self.host, self.port, self.key)
# Write log, and make sure NTSLogging Handler not called.
Logs.message('This should not be forwarded to NTS.')
# log_file_handler should be called, but set to NullHandler
nts_handler = self.plugin._logs_manager.nts_handler
self.assertTrue(isinstance(nts_handler, logging.NullHandler))
@patch('nanome._internal._plugin._Plugin._loop')
@patch('nanome._internal._plugin.Network._NetInstance')
def test_file_handler_called(self, netinstance_mock, loop_mock):
"""Assert if write_log_file is True, the log_file_handler is utilized."""
write_log_file = "True"
testargs = [
'run.py',
'--write-log-file', write_log_file,
]
with patch.object(sys, 'argv', testargs):
self.plugin.run(self.host, self.port, self.key)
# Write log, and make sure log_file_handler is called.
self.plugin._logs_manager.log_file_handler.handle = MagicMock()
Logs.message('Log file handler should be called.')
self.plugin._logs_manager.log_file_handler.handle.assert_called()
@patch('nanome._internal._plugin._Plugin._loop')
@patch('nanome._internal._plugin.Network._NetInstance')
def test_file_handler_not_called(self, netinstance_mock, loop_mock):
"""Assert if write_log_file is False, the log_file_handler is not utilized."""
write_log_file = False
testargs = [
'run.py',
'--write-log-file', write_log_file,
]
with patch.object(sys, 'argv', testargs):
self.plugin.run(self.host, self.port, self.key)
self.plugin._logs_manager.log_file_handler.handle = MagicMock()
Logs.message('Log file should not be called')
@patch('nanome._internal._plugin._Plugin._loop')
@patch('nanome._internal._plugin.Network._NetInstance.connect')
@patch('nanome._internal._plugin.Network._NetInstance.send')
def test_nts_logger_handler(self, send_mock, connect_mock, loop_mock):
"""Ensure NTSLoggingHandler.handle() triggers a network request."""
with patch.object(sys, 'argv', ['run.py', '--remote-logging', 'True']):
self.plugin.run(self.host, self.port, self.key)
send_mock.assert_called()
@patch('nanome._internal._plugin._Plugin._loop')
@patch('nanome._internal._plugin.Network._NetInstance.connect')
@patch('nanome._internal._plugin.Network._NetInstance.send')
def test_log_types(self, send_mock, connect_mock, loop_mock):
with patch.object(sys, 'argv', ['run.py', '-v', '--remote-logging', 'True', '--write-log-file', 'True']):
self.plugin.run(self.host, self.port, self.key)
Logs.warning("This is a warning")
Logs.error("This is an error")
Logs.debug("This is a debug message")
Logs.message("This is a regular message")
@patch('nanome._internal._plugin._Plugin._loop')
@patch('nanome._internal._plugin.Network._NetInstance')
def test_console_handler_called(self, netinstance_mock, loop_mock):
"""Assert logs are always logged to the console."""
testargs = [
'run.py',
'--remote-logging', False,
'--write-log-file', False
]
with patch.object(sys, 'argv', testargs):
self.plugin.run(self.host, self.port, self.key)
console_handler = self.plugin._logs_manager.console_handler
with patch.object(console_handler, 'handle') as handle_mock:
Logs.message("Should be printed to console")
handle_mock.assert_called()
| 42.206667 | 114 | 0.648555 | 6,006 | 0.948665 | 0 | 0 | 5,367 | 0.847733 | 0 | 0 | 2,347 | 0.370716 |
9f848cb711d62302aee8c7ee856899de4de8f868 | 2,416 | py | Python | pytest_timeout.py | big91987/pytest-timeout | 54a0a51bfc6e6c3eb99c3d33e27a6d33bf34b20e | [
"MIT"
] | null | null | null | pytest_timeout.py | big91987/pytest-timeout | 54a0a51bfc6e6c3eb99c3d33e27a6d33bf34b20e | [
"MIT"
] | null | null | null | pytest_timeout.py | big91987/pytest-timeout | 54a0a51bfc6e6c3eb99c3d33e27a6d33bf34b20e | [
"MIT"
] | null | null | null | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://www.mozilla.org/en-US/MPL/2.0/.
import warnings
from unittest import TestCase
import pytest
def pytest_addoption(parser):
parser.addoption(
'--count',
action='store',
default=1,
type=int,
help='Number of times to repeat each test')
parser.addoption(
'--repeat-scope',
action='store',
default='function',
type=str,
choices=('function', 'class', 'module', 'session'),
help='Scope for repeating tests')
parser.addoption(
'--ignore_repeat_mark',
action='store_true',
default=False,
help='ignore repeat mark if set')
def pytest_configure(config):
config.addinivalue_line(
'markers',
'repeat(n): run the given test function `n` times.')
class UnexpectedError(Exception):
pass
@pytest.fixture
def __pytest_repeat_step_number(request):
marker = request.node.get_closest_marker("repeat")
count = marker and marker.args[0] or request.config.option.count
if count > 1:
try:
return request.param
except AttributeError:
if issubclass(request.cls, TestCase):
warnings.warn(
"Repeating unittest class tests not supported")
else:
raise UnexpectedError(
"This call couldn't work with pytest-repeat. "
"Please consider raising an issue with your usage.")
@pytest.hookimpl(trylast=True)
def pytest_generate_tests(metafunc):
count = metafunc.config.option.count
ignore_repeat_mark = metafunc.config.option.ignore_repeat_mark
m = metafunc.definition.get_closest_marker('repeat')
if ignore_repeat_mark:
pass
else:
if m is not None:
count = int(m.args[0])
if count > 1:
metafunc.fixturenames.append("__pytest_repeat_step_number")
def make_progress_id(i, n=count):
return '{0}-{1}'.format(i + 1, n)
scope = metafunc.config.option.repeat_scope
metafunc.parametrize(
'__pytest_repeat_step_number',
range(count),
indirect=True,
ids=make_progress_id,
scope=scope
)
| 28.093023 | 72 | 0.615066 | 42 | 0.017384 | 0 | 0 | 1,392 | 0.576159 | 0 | 0 | 701 | 0.290149 |
9f875156ef7feb3f17f69d0658641a08a2dd7647 | 2,439 | py | Python | examples/test_Confined.py | pompiduskus/pybox2d | 4393bc93df4828267d2143327abd76de6f146750 | [
"Zlib"
] | null | null | null | examples/test_Confined.py | pompiduskus/pybox2d | 4393bc93df4828267d2143327abd76de6f146750 | [
"Zlib"
] | null | null | null | examples/test_Confined.py | pompiduskus/pybox2d | 4393bc93df4828267d2143327abd76de6f146750 | [
"Zlib"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# C++ version Copyright (c) 2006-2007 Erin Catto http://www.box2d.org
# Python version by Ken Lauer / sirkne at gmail dot com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
from framework import *
from random import random
class Confined (Framework):
name="Confined space"
description="Press c to create a circle"
def __init__(self):
super(Confined, self).__init__()
# The ground
ground = self.world.CreateStaticBody(
shapes=[
b2EdgeShape(vertices=[(-10, 0),( 10, 0)]),
b2EdgeShape(vertices=[(-10, 0),(-10, 20)]),
b2EdgeShape(vertices=[( 10, 0),( 10, 20)]),
b2EdgeShape(vertices=[(-10, 20),( 10, 20)]),
]
)
# The bodies
self.radius = radius = 0.5
columnCount=5
rowCount=5
for j in range(columnCount):
for i in range(rowCount):
self.CreateCircle( (-10+(2.1*j+1+0.01*i)*radius, (2*i+1)*radius) )
self.world.gravity = (0,0)
def CreateCircle(self, pos):
fixture=b2FixtureDef(shape=b2CircleShape(radius=self.radius, p=(0,0)), density=1, friction=0.1)
self.world.CreateDynamicBody(
position=pos,
fixtures=fixture
)
def Keyboard(self, key):
if key == Keys.K_c:
self.CreateCircle( (2.0*random()-1.0, self.radius*(1.0+random())) )
if __name__=="__main__":
main(Confined)
| 38.109375 | 105 | 0.603116 | 1,305 | 0.535055 | 0 | 0 | 0 | 0 | 0 | 0 | 1,088 | 0.446084 |
9f88301771a145190370ef5763a0c5c2c1e81530 | 26,827 | py | Python | pystella/step.py | zachjweiner/pystella | 2d994d1b9f3d2a39a41bbb821fa37fafec699e0c | [
"MIT"
] | 14 | 2019-10-16T15:08:44.000Z | 2022-02-06T10:26:11.000Z | pystella/step.py | zachjweiner/pystella | 2d994d1b9f3d2a39a41bbb821fa37fafec699e0c | [
"MIT"
] | null | null | null | pystella/step.py | zachjweiner/pystella | 2d994d1b9f3d2a39a41bbb821fa37fafec699e0c | [
"MIT"
] | 2 | 2021-04-13T09:32:55.000Z | 2021-08-15T13:16:05.000Z | __copyright__ = "Copyright (C) 2019 Zachary J Weiner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
import loopy as lp
from pystella.field import Field, index_fields
from pystella.elementwise import ElementWiseMap
from pymbolic import var
from pymbolic.primitives import Subscript, Variable
__doc__ = """
.. currentmodule:: pystella.step
.. autoclass:: Stepper
.. currentmodule:: pystella
Low-storage Runge-Kutta methods
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. currentmodule:: pystella.step
.. autoclass:: LowStorageRKStepper
.. currentmodule:: pystella
.. autoclass:: LowStorageRK54
.. autoclass:: LowStorageRK3Williamson
.. autoclass:: LowStorageRK3Inhomogeneous
.. autoclass:: LowStorageRK3SSP
Classical Runge-Kutta methods
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
"Classical" Runge-Kutta methods are also implemented, though are not recommended
over the low-storage methods above.
.. currentmodule:: pystella.step
.. autoclass:: RungeKuttaStepper
.. currentmodule:: pystella
.. autoclass:: RungeKutta4
.. autoclass:: RungeKutta3SSP
.. autoclass:: RungeKutta3Heun
.. autoclass:: RungeKutta3Nystrom
.. autoclass:: RungeKutta3Ralston
.. autoclass:: RungeKutta2Midpoint
.. autoclass:: RungeKutta2Ralston
"""
class Stepper:
"""
The base class for time steppers, with no implementation of a particular time
stepper.
:arg input: May be one of the following:
* a :class:`dict` whose values represent the right-hand side
of the ODEs to solve, i.e., `(key, value)` pairs corresponding to
:math:`(y, f)` such that
.. math::
\\frac{\\mathrm{d} y}{\\mathrm{d} t} = f,
where :math:`f` is an arbitrary function of kernel data.
Both keys and values must be :mod:`pymbolic` expressions.
* a :class:`~pystella.Sector`. In this case, the right-hand side
dictionary will be obtained from :attr:`~pystella.Sector.rhs_dict`.
* a :class:`list` of :class:`~pystella.Sector`\\ s. In this case,
the input obtained from each :class:`~pystella.Sector`
(as described above) will be combined.
The following keyword arguments are recognized:
:arg MapKernel: The kernel class which each substep/stage will be an
instance of---i.e., one of :class:`~pystella.ElementWiseMap` or its
subclasses. Defaults to :class:`~pystella.ElementWiseMap`.
:arg dt: A :class:`float` fixing the value of the timestep interval.
Defaults to *None*, in which case it is not fixed at kernel creation.
The remaining arguments are passed to :meth:`MapKernel` for
each substep of the timestepper (i.e., see the documentation of
:class:`~pystella.ElementWiseMap`).
.. automethod:: __call__
.. attribute:: num_stages
The number of substeps/stages per timestep.
.. attribute:: expected_order
The expected convergence order of *global* error, i.e.
:math:`n` such that the global error is :math:`\\mathcal{O}(\\Delta t^n)`.
.. attribute:: num_unknowns
The number of unknown degrees of freedom which are evolved.
"""
num_stages = None
expected_order = None
num_copies = None
def make_steps(self, MapKernel=ElementWiseMap, **kwargs):
raise NotImplementedError
def __init__(self, input, MapKernel=ElementWiseMap, **kwargs):
single_stage = kwargs.pop("single_stage", True)
from pystella import Sector
if isinstance(input, Sector):
self.rhs_dict = input.rhs_dict
elif isinstance(input, list):
self.rhs_dict = dict(i for s in input for i in s.rhs_dict.items())
elif isinstance(input, dict):
self.rhs_dict = input
if not single_stage:
prepend_with = (self.num_copies,)
else:
prepend_with = None
args = kwargs.pop("args", [...])
args = args + [lp.ValueArg("dt")]
from pystella import get_field_args
inferred_args = get_field_args(self.rhs_dict, prepend_with=prepend_with)
from pystella.elementwise import append_new_args
self.args = append_new_args(args, inferred_args)
dt = kwargs.pop("dt", None)
fixed_parameters = kwargs.pop("fixed_parameters", dict())
if dt is not None:
fixed_parameters.update(dict(dt=dt))
self.num_unknowns = len(self.rhs_dict.keys())
self.steps = self.make_steps(**kwargs, fixed_parameters=fixed_parameters)
def __call__(self, stage, queue=None, **kwargs):
"""
Calls substep/stage ``stage`` (:attr:`steps[stage]`) of the timestepper,
i.e., :func:`pystella.ElementWiseMap.__call__` for the kernel for
substep/stage ``stage``.
:arg stage: The substep/stage of time timestepper to call.
:returns: The :class:`pyopencl.Event` associated with the kernel invocation.
"""
evt, _ = self.steps[stage](queue, **kwargs)
return evt
class RungeKuttaStepper(Stepper):
"""
The base implementation of classical, explicit Runge-Kutta time steppers,
which operate by storing and operating on multiple copies of each unknown
array. Subclasses must provide an implementation of :meth:`step_statements`
which returns a key-value pair implementing a specific substep of the
particular timestepper.
.. warning::
To minimize the required storage per unknown (i.e., number of
temporaries), the implementation of most subclasses overwrite arrays that
are being read as input to compute right-hand sides. This means that any
non-local (stencil-type) operations must be precomputed and cached
*globally* (unless otherwise noted).
:raises ValueError: if the keys of :attr:`rhs_dict` are not
:class:`~pystella.Field`\\ s (or :class:`pymbolic.primitives.Subscript`\\ s
thereof). This is required for :meth:`make_steps` to be able to prepend
unknown arrays' subscripts with the index corresponding to the temporary
storage axis.
"""
def __init__(self, input, **kwargs):
super().__init__(input, single_stage=False, **kwargs)
def step_statements(self, stage, f, dt, rhs):
raise NotImplementedError
def make_steps(self, MapKernel=ElementWiseMap, **kwargs):
rhs = var("rhs")
dt = var("dt")
q = var("q")
fixed_parameters = kwargs.pop("fixed_parameters", dict())
rhs_statements = {rhs[i]: index_fields(value, prepend_with=(q,))
for i, value in enumerate(self.rhs_dict.values())}
steps = []
for stage in range(self.num_stages):
RK_dict = {}
for i, f in enumerate(self.rhs_dict.keys()):
# ensure that key is either a Field or a Subscript of a Field
# so that index_fields can prepend the q index
key_has_field = False
if isinstance(f, Field):
key_has_field = True
elif isinstance(f, Subscript):
if isinstance(f.aggregate, Field):
key_has_field = True
if not key_has_field:
raise ValueError("rhs_dict keys must be Field instances")
statements = self.step_statements(stage, f, dt, rhs[i])
for k, v in statements.items():
RK_dict[k] = v
fixed_parameters.update(q=0 if stage == 0 else 1)
options = lp.Options(enforce_variable_access_ordered="no_check")
step = MapKernel(RK_dict, tmp_instructions=rhs_statements,
args=self.args, **kwargs, options=options,
fixed_parameters=fixed_parameters)
steps.append(step)
return steps
class RungeKutta4(RungeKuttaStepper):
"""
The classical, four-stage, fourth-order Runge-Kutta method.
Requires unknown arrays to have temporary storage axes of length three.
"""
num_stages = 4
expected_order = 4
num_copies = 3
def step_statements(self, stage, f, dt, rhs):
fq = [index_fields(f, prepend_with=(q,)) for q in range(3)]
if stage == 0:
return {fq[1]: fq[0] + dt/2 * rhs,
fq[2]: fq[0] + dt/6 * rhs}
elif stage == 1:
return {fq[1]: fq[0] + dt/2 * rhs,
fq[2]: fq[2] + dt/3 * rhs}
elif stage == 2:
return {fq[1]: fq[0] + dt * rhs,
fq[2]: fq[2] + dt/3 * rhs}
elif stage == 3:
return {fq[0]: fq[2] + dt/6 * rhs}
class RungeKutta3Heun(RungeKuttaStepper):
"""
Heun's three-stage, third-order Runge-Kutta method.
Requires unknown arrays to have temporary storage axes of length three.
"""
num_stages = 3
expected_order = 3
num_copies = 3
def step_statements(self, stage, f, dt, rhs):
fq = [index_fields(f, prepend_with=(q,)) for q in range(3)]
if stage == 0:
return {fq[1]: fq[0] + dt/3 * rhs,
fq[2]: fq[0] + dt/4 * rhs}
elif stage == 1:
return {fq[1]: fq[0] + dt*2/3 * rhs}
elif stage == 2:
return {fq[0]: fq[2] + dt*3/4 * rhs}
class RungeKutta3Nystrom(RungeKuttaStepper):
"""
Nystrom's three-stage, third-order Runge-Kutta method.
Requires unknown arrays to have temporary storage axes of length three.
"""
num_stages = 3
expected_order = 3
num_copies = 3
def step_statements(self, stage, f, dt, rhs):
fq = [index_fields(f, prepend_with=(q,)) for q in range(3)]
if stage == 0:
return {fq[1]: fq[0] + dt*2/3 * rhs,
fq[2]: fq[0] + dt*2/8 * rhs}
elif stage == 1:
return {fq[1]: fq[0] + dt*2/3 * rhs,
fq[2]: fq[2] + dt*3/8 * rhs}
elif stage == 2:
return {fq[0]: fq[2] + dt*3/8 * rhs}
class RungeKutta3Ralston(RungeKuttaStepper):
"""
Ralston's three-stage, third-order Runge-Kutta method.
Requires unknown arrays to have temporary storage axes of length three.
"""
num_stages = 3
expected_order = 3
num_copies = 3
def step_statements(self, stage, f, dt, rhs):
fq = [index_fields(f, prepend_with=(q,)) for q in range(3)]
if stage == 0:
return {fq[1]: fq[0] + dt/2 * rhs,
fq[2]: fq[0] + dt*2/9 * rhs}
elif stage == 1:
return {fq[1]: fq[0] + dt*3/4 * rhs,
fq[2]: fq[2] + dt*1/3 * rhs}
elif stage == 2:
return {fq[0]: fq[2] + dt*4/9 * rhs}
class RungeKutta3SSP(RungeKuttaStepper):
"""
A three-stage, third-order strong-stability preserving Runge-Kutta method.
Requires unknown arrays to have temporary storage axes of length two.
"""
num_stages = 3
expected_order = 3
num_copies = 2
def step_statements(self, stage, f, dt, rhs):
fq = [index_fields(f, prepend_with=(q,)) for q in range(3)]
if stage == 0:
return {fq[1]: fq[0] + dt * rhs}
elif stage == 1:
return {fq[1]: 3/4 * fq[0] + 1/4 * fq[1] + dt/4 * rhs}
elif stage == 2:
return {fq[0]: 1/3 * fq[0] + 2/3 * fq[1] + dt*2/3 * rhs}
class RungeKutta2Midpoint(RungeKuttaStepper):
"""
The "midpoint" method, a two-stage, second-order Runge-Kutta method.
Requires unknown arrays to have temporary storage axes of length two.
Note that right-hand side operations *can* safely involve non-local computations
of unknown arrays for this method.
"""
num_stages = 2
expected_order = 2
num_copies = 2
def step_statements(self, stage, f, dt, rhs):
fq = [index_fields(f, prepend_with=(q,)) for q in range(2)]
if stage == 0:
return {fq[1]: fq[0] + dt/2 * rhs}
elif stage == 1:
return {fq[0]: fq[0] + dt * rhs}
# possible order reduction
class RungeKutta2Heun(RungeKuttaStepper):
num_stages = 2
expected_order = 2
num_copies = 2
def step_statements(self, stage, f, dt, rhs):
fq = [index_fields(f, prepend_with=(q,)) for q in range(2)]
if stage == 0:
return {fq[1]: fq[0] + dt * rhs,
fq[0]: fq[0] + dt/2 * rhs}
elif stage == 1:
return {fq[0]: fq[0] + dt/2 * rhs}
class RungeKutta2Ralston(RungeKuttaStepper):
"""
Ralstons's two-stage, second-order Runge-Kutta method.
Requires unknown arrays to have temporary storage axes of length two.
"""
num_stages = 2
expected_order = 2
num_copies = 2
def step_statements(self, stage, f, dt, rhs):
fq = [index_fields(f, prepend_with=(q,)) for q in range(2)]
if stage == 0:
return {fq[1]: fq[0] + dt*2/3 * rhs,
fq[0]: fq[0] + dt/4 * rhs}
elif stage == 1:
return {fq[0]: fq[0] + dt*3/4 * rhs}
def get_name(expr):
if isinstance(expr, Field):
return get_name(expr.child)
elif isinstance(expr, Subscript):
return get_name(expr.aggregate)
elif isinstance(expr, Variable):
return expr.name
elif isinstance(expr, str):
return expr
def gen_tmp_name(expr, prefix="_", suffix="_tmp"):
name = get_name(expr)
return prefix + name + suffix
def copy_and_rename(expr):
if isinstance(expr, Field):
return expr.copy(child=copy_and_rename(expr.child))
elif isinstance(expr, Subscript):
return Subscript(copy_and_rename(expr.aggregate), expr.index)
elif isinstance(expr, Variable):
return Variable(gen_tmp_name(expr))
elif isinstance(expr, str):
return gen_tmp_name(expr)
class LowStorageRKStepper(Stepper):
"""
The base implementation of low-storage, explicit Runge-Kutta time steppers,
which operate by storing and operating on a single copy of each unknown array,
plus an auxillary temporary array.
The substeps are expressed in a standard form, drawing coefficients from
a subclass's provided values of :attr:`_A`, :attr:`_B`, and :attr:`_C`.
Allocation of the auxillary arrays is handled internally by:
.. automethod:: get_tmp_arrays_like
:meth:`get_tmp_arrays_like` is called the first time
:meth:`__call__` is called, with the result stored in the attribute
:attr:`tmp_arrays`.
These arrays must not be modified between substages of a single timestep,
but may be safely modified in between timesteps.
.. versionchanged:: 2020.2
Auxillary arrays handled internally by :meth:`get_tmp_arrays_like`.
Previously, manual allocation (and passing) of a single temporary
array ``k_tmp`` was required.
"""
_A = []
_B = []
_C = []
tmp_arrays = {}
def make_steps(self, MapKernel=ElementWiseMap, **kwargs):
tmp_arrays = [copy_and_rename(key) for key in self.rhs_dict.keys()]
self.dof_names = {get_name(key) for key in self.rhs_dict.keys()}
rhs_statements = {var(gen_tmp_name(key, suffix=f"_rhs_{i}")): val
for i, (key, val) in enumerate(self.rhs_dict.items())}
steps = []
for stage in range(self.num_stages):
RK_dict = {}
for i, (f, k) in enumerate(zip(self.rhs_dict.keys(), tmp_arrays)):
rhs = var(gen_tmp_name(f, suffix=f"_rhs_{i}"))
RK_dict[k] = self._A[stage] * k + var("dt") * rhs
RK_dict[f] = f + self._B[stage] * k
step = MapKernel(RK_dict, tmp_instructions=rhs_statements,
args=self.args, **kwargs)
steps.append(step)
return steps
def get_tmp_arrays_like(self, **kwargs):
"""
Allocates required temporary arrays matching those passed via keyword.
:returns: A :class:`dict` of named arrays, suitable for passing via
dictionary expansion.
.. versionadded:: 2020.2
"""
tmp_arrays = {}
for name in self.dof_names:
f = kwargs[name]
tmp_name = gen_tmp_name(name)
import pyopencl.array as cla
if isinstance(f, cla.Array):
tmp_arrays[tmp_name] = cla.empty_like(f)
elif isinstance(f, np.ndarray):
tmp_arrays[tmp_name] = np.empty_like(f)
else:
raise ValueError(f"Could not generate tmp array for {f}"
f"of type {type(f)}")
tmp_arrays[tmp_name][...] = 0.
return tmp_arrays
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for step in self.steps:
step.knl = lp.add_inames_for_unused_hw_axes(step.knl)
def __call__(self, stage, *, queue=None, **kwargs):
if len(self.tmp_arrays) == 0:
self.tmp_arrays = self.get_tmp_arrays_like(**kwargs)
return super().__call__(stage, queue=queue, **kwargs, **self.tmp_arrays)
class LowStorageRK54(LowStorageRKStepper):
"""
A five-stage, fourth-order, low-storage Runge-Kutta method.
See
Carpenter, M.H., and Kennedy, C.A., Fourth-order-2N-storage
Runge-Kutta schemes, NASA Langley Tech Report TM 109112, 1994
"""
num_stages = 5
expected_order = 4
_A = [
0,
-567301805773 / 1357537059087,
-2404267990393 / 2016746695238,
-3550918686646 / 2091501179385,
-1275806237668 / 842570457699,
]
_B = [
1432997174477 / 9575080441755,
5161836677717 / 13612068292357,
1720146321549 / 2090206949498,
3134564353537 / 4481467310338,
2277821191437 / 14882151754819,
]
_C = [
0,
1432997174477 / 9575080441755,
2526269341429 / 6820363962896,
2006345519317 / 3224310063776,
2802321613138 / 2924317926251,
]
class LowStorageRK144(LowStorageRKStepper):
"""
A 14-stage, fourth-order low-storage Runge-Kutta method optimized for elliptic
stability regions.
See
Niegemann, Jens & Diehl, Richard & Busch, Kurt. (2012). Efficient low-storage
Runge-Kutta schemes with optimized stability regions. J. Comput. Physics. 231.
364-372. 10.1016/j.jcp.2011.09.003.
"""
num_stages = 14
expected_order = 4
_A = [
0,
-0.7188012108672410,
-0.7785331173421570,
-0.0053282796654044,
-0.8552979934029281,
-3.9564138245774565,
-1.5780575380587385,
-2.0837094552574054,
-0.7483334182761610,
-0.7032861106563359,
0.0013917096117681,
-0.0932075369637460,
-0.9514200470875948,
-7.1151571693922548
]
_B = [
0.0367762454319673,
0.3136296607553959,
0.1531848691869027,
0.0030097086818182,
0.3326293790646110,
0.2440251405350864,
0.3718879239592277,
0.6204126221582444,
0.1524043173028741,
0.0760894927419266,
0.0077604214040978,
0.0024647284755382,
0.0780348340049386,
5.5059777270269628
]
_C = [
0,
0.0367762454319673,
0.1249685262725025,
0.2446177702277698,
0.2476149531070420,
0.2969311120382472,
0.3978149645802642,
0.5270854589440328,
0.6981269994175695,
0.8190890835352128,
0.8527059887098624,
0.8604711817462826,
0.8627060376969976,
0.8734213127600976
]
class LowStorageRK134(LowStorageRKStepper):
"""
A 13-stage, fourth-order low-storage Runge-Kutta method optimized for circular
stability regions.
See
Niegemann, Jens & Diehl, Richard & Busch, Kurt. (2012). Efficient low-storage
Runge-Kutta schemes with optimized stability regions. J. Comput. Physics. 231.
364-372. 10.1016/j.jcp.2011.09.003.
"""
num_stages = 13
expected_order = 4
_A = [
0,
0.6160178650170565,
0.4449487060774118,
1.0952033345276178,
1.2256030785959187,
0.2740182222332805,
0.0411952089052647,
0.179708489915356,
1.1771530652064288,
0.4078831463120878,
0.8295636426191777,
4.789597058425229,
0.6606671432964504
]
_B = [
0.0271990297818803,
0.1772488819905108,
0.0378528418949694,
0.6086431830142991,
0.21543139743161,
0.2066152563885843,
0.0415864076069797,
0.0219891884310925,
0.9893081222650993,
0.0063199019859826,
0.3749640721105318,
1.6080235151003195,
0.0961209123818189
]
_C = [
0,
0.0271990297818803,
0.0952594339119365,
0.1266450286591127,
0.1825883045699772,
0.3737511439063931,
0.5301279418422206,
0.5704177433952291,
0.5885784947099155,
0.6160769826246714,
0.6223252334314046,
0.6897593128753419,
0.9126827615920843
]
class LowStorageRK124(LowStorageRKStepper):
"""
A 12-stage, fourth-order low-storage Runge-Kutta method optimized for inviscid
problems.
See
Niegemann, Jens & Diehl, Richard & Busch, Kurt. (2012). Efficient low-storage
Runge-Kutta schemes with optimized stability regions. J. Comput. Physics. 231.
364-372. 10.1016/j.jcp.2011.09.003.
"""
num_stages = 12
expected_order = 4
_A = [
0,
0.0923311242368072,
0.9441056581158819,
4.327127324757639,
2.155777132902607,
0.9770727190189062,
0.7581835342571139,
1.79775254708255,
2.691566797270077,
4.646679896026814,
0.1539613783825189,
0.5943293901830616
]
_B = [
0.0650008435125904,
0.0161459902249842,
0.5758627178358159,
0.1649758848361671,
0.3934619494248182,
0.0443509641602719,
0.2074504268408778,
0.6914247433015102,
0.3766646883450449,
0.0757190350155483,
0.2027862031054088,
0.2167029365631842
]
_C = [
0,
0.0650008435125904,
0.0796560563081853,
0.1620416710085376,
0.2248877362907778,
0.2952293985641261,
0.3318332506149405,
0.4094724050198658,
0.6356954475753369,
0.6806551557645497,
0.714377371241835,
0.9032588871651854,
]
class LowStorageRK3Williamson(LowStorageRKStepper):
"""
A three-stage, third-order, low-storage Runge-Kutta method.
See
Williamson, J. H., Low-storage Runge-Kutta schemes,
J. Comput. Phys., 35, 48-56, 1980
"""
num_stages = 3
expected_order = 3
_A = [0, -5/9, -153/128]
_B = [1/3, 15/16, 8/15]
_C = [0, 4/9, 15/32]
class LowStorageRK3Inhomogeneous(LowStorageRKStepper):
"""
A three-stage, third-order, low-storage Runge-Kutta method.
"""
num_stages = 3
expected_order = 3
_A = [0, -17/32, -32/27]
_B = [1/4, 8/9, 3/4]
_C = [0, 15/32, 4/9]
# possible order reduction
class LowStorageRK3Symmetric(LowStorageRKStepper):
num_stages = 3
expected_order = 3
_A = [0, -2/3, -1]
_B = [1/3, 1, 1/2]
_C = [0, 1/3, 2/3]
# possible order reduction
class LowStorageRK3PredictorCorrector(LowStorageRKStepper):
num_stages = 3
expected_order = 3
_A = [0, -1/4, -4/3]
_B = [1/2, 2/3, 1/2]
_C = [0, 1/2, 1]
c2 = .924574
z1 = np.sqrt(36 * c2**4 + 36 * c2**3 - 135 * c2**2 + 84 * c2 - 12)
z2 = 2 * c2**2 + c2 - 2
z3 = 12 * c2**4 - 18 * c2**3 + 18 * c2**2 - 11 * c2 + 2
z4 = 36 * c2**4 - 36 * c2**3 + 13 * c2**2 - 8 * c2 + 4
z5 = 69 * c2**3 - 62 * c2**2 + 28 * c2 - 8
z6 = 34 * c2**4 - 46 * c2**3 + 34 * c2**2 - 13 * c2 + 2
B1 = c2
B2 = ((12 * c2 * (c2 - 1) * (3 * z2 - z1) - (3 * z2 - z1)**2)
/ (144 * c2 * (3 * c2 - 2) * (c2 - 1)**2))
B3 = (- 24 * (3 * c2 - 2) * (c2 - 1)**2
/ ((3 * z2 - z1)**2 - 12 * c2 * (c2 - 1) * (3 * z2 - z1)))
A2 = ((- z1 * (6 * c2**2 - 4 * c2 + 1) + 3 * z3)
/ ((2 * c2 + 1) * z1 - 3 * (c2 + 2) * (2 * c2 - 1)**2))
A3 = ((- z4 * z1 + 108 * (2 * c2 - 1) * c2**5 - 3 * (2 * c2 - 1) * z5)
/ (24 * z1 * c2 * (c2 - 1)**4 + 72 * c2 * z6 + 72 * c2**6 * (2 * c2 - 13)))
class LowStorageRK3SSP(LowStorageRKStepper):
"""
A three-stage, third-order, strong-stability preserving, low-storage
Runge-Kutta method.
"""
num_stages = 3
expected_order = 3
_A = [0, A2, A3]
_B = [B1, B2, B3]
_C = [0, B1, B1 + B2 * (A2 + 1)]
all_steppers = [RungeKutta4, RungeKutta3SSP, RungeKutta3Heun, RungeKutta3Nystrom,
RungeKutta3Ralston, RungeKutta2Midpoint,
RungeKutta2Ralston, LowStorageRK54, LowStorageRK144,
LowStorageRK3Williamson, LowStorageRK3Inhomogeneous,
LowStorageRK3SSP]
| 31.413349 | 85 | 0.586871 | 22,399 | 0.834942 | 0 | 0 | 0 | 0 | 0 | 0 | 9,854 | 0.367317 |
9f8be4596b5e31deb9da06daf1c9c42045dcf57e | 4,121 | py | Python | identity-mapper.py | jamesbak/databox-adls-loader | 1189ba399f63d34d797f51b3a9e655101587d798 | [
"Apache-2.0"
] | 2 | 2020-06-01T14:06:37.000Z | 2021-12-06T17:59:53.000Z | identity-mapper.py | Azure/databox-adls-loader | 24cfb4cb68cd121b0d0907cbabf18bfdaa370ab1 | [
"MIT"
] | null | null | null | identity-mapper.py | Azure/databox-adls-loader | 24cfb4cb68cd121b0d0907cbabf18bfdaa370ab1 | [
"MIT"
] | 3 | 2021-01-08T11:02:29.000Z | 2022-03-22T07:59:05.000Z | #!/usr/bin/env python
import requests
import sys, subprocess, datetime, json, itertools, os.path, threading, argparse, logging
from adls_copy_utils import AdlsCopyUtils
log = logging.getLogger(__name__)
def update_files_owners(account, container, sas_token, work_queue):
log = logging.getLogger(threading.currentThread().name)
log.debug("Thread starting: %d", threading.currentThread().ident)
while not work_queue.isDone():
file = work_queue.nextItem()
if file:
file["permissions"]["owner"] = AdlsCopyUtils.lookupIdentity(AdlsCopyUtils.IDENTITY_USER, file["permissions"]["owner"], identity_map)
file["permissions"]["group"] = AdlsCopyUtils.lookupIdentity(AdlsCopyUtils.IDENTITY_GROUP, file["permissions"]["group"], identity_map)
# Merge the updated information into the other metadata properties, so that we can update in 1 call
file["metadata"][AdlsCopyUtils.METDATA_PERMISSIONS] = json.dumps(file["permissions"])
if file["is_folder"]:
file["metadata"][AdlsCopyUtils.METADATA_ISFOLDER] = "true"
url = "http://{0}.blob.core.windows.net/{1}/{2}?comp=metadata&{3}".format(account, container, file["name"], sas_token)
log.debug(url)
# No portable way to combine 2 dicts
metadata_headers = {
"x-ms-version": "2018-03-28",
"x-ms-date": datetime.datetime.utcnow().strftime("%a, %d %b %Y %H:%M:%S GMT")
}
metadata_headers.update({"x-ms-meta-" + name: value for (name, value) in file["metadata"].items()})
with requests.put(url, headers=metadata_headers) as response:
if not response:
log.warning("Failed to set metadata on file: %s. Error: %s", url, response.text)
else:
work_queue.itemDone()
log.debug("Updated ownership for %s", file["name"])
log.debug("Thread ending")
if __name__ == '__main__':
parser = AdlsCopyUtils.createCommandArgsParser("Remaps identities on HDFS sourced data")
parser.add_argument('-g', '--generate-identity-map', action='store_true', help="Specify this flag to generate a based identity mapping file using the unique identities in the source account. The identity map will be written to the file specified by the --identity-map argument.")
args = parser.parse_known_args()[0]
AdlsCopyUtils.configureLogging(args.log_config, args.log_level, args.log_file)
print("Remapping identities for file owners in account: " + args.source_account)
# Acquire SAS token, so that we don't have to sign each request (construct as string as Python 2.7 on linux doesn't marshall the args correctly with shell=True)
sas_token = AdlsCopyUtils.getSasToken(args.source_account, args.source_key)
# Get the full account list
inventory = AdlsCopyUtils.getSourceFileList(args.source_account, args.source_key, args.source_container, args.prefix)
if args.generate_identity_map:
log.info("Generating identity map from source account to file: " + args.identity_map)
unique_users = set([x["permissions"]["owner"] for x in inventory])
unique_groups = set([x["permissions"]["group"] for x in inventory])
identities = [{
"type": identity_type["type"],
"source": identity,
"target": ""
} for identity_type in [{"type": AdlsCopyUtils.IDENTITY_USER, "identities": unique_users}, {"type": AdlsCopyUtils.IDENTITY_GROUP, "identities": unique_groups}]
for identity in identity_type["identities"]]
with open(args.identity_map, "w+") as f:
json.dump(identities, f)
else:
# Load identity map
identity_map = AdlsCopyUtils.loadIdentityMap(args.identity_map)
# Fire up the processing in args.max_parallelism threads, co-ordinated via a thread-safe queue
AdlsCopyUtils.processWorkQueue(update_files_owners, [args.source_account, args.source_container, sas_token], inventory, args.max_parallelism)
print("All work processed. Exiting")
| 58.871429 | 283 | 0.676777 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,401 | 0.339966 |
9f914c2903091304cb9fd3f8a798da2d8ded6800 | 10,822 | py | Python | code2.0/models.py | Mulns/Whale-Identification | 26db3b0ce3f2f4a9539965e2632acb6fe07156e8 | [
"MIT"
] | 2 | 2019-01-14T14:59:06.000Z | 2019-01-17T02:15:29.000Z | code2.0/models.py | Mulns/Whale-Identification | 26db3b0ce3f2f4a9539965e2632acb6fe07156e8 | [
"MIT"
] | null | null | null | code2.0/models.py | Mulns/Whale-Identification | 26db3b0ce3f2f4a9539965e2632acb6fe07156e8 | [
"MIT"
] | 2 | 2019-01-13T12:30:15.000Z | 2019-01-13T14:42:40.000Z | from tensorflow import keras
from util import ArcfaceLoss, inference_loss, identity_loss
import numpy as np
import os
import wn
weight_decay = 5e-4
H, W, C = (150, 300, 3)
nb_classes = 5004
lambda_c = 0.2
lr = 6e-4
feature_size = 512
final_active = 'sigmoid' # for siamese net
# labels: not one-hot
class BaseModel(object):
def __init__(self, workspace, model_name):
if not os.path.isdir(workspace):
os.mkdir(workspace)
self.weights_path = os.path.join(workspace, "weights",
"%s.h5" % model_name)
self.log_dir = os.path.join(workspace, "logs", model_name)
def create_model(self, ):
self.model = None
def fit(self, train_gen, valid_gen, batch_size, nb_epochs):
callback_list = [
keras.callbacks.ModelCheckpoint(
self.weights_path,
monitor="val_loss",
save_best_only=True,
mode='min',
save_weights_only=True,
verbose=2),
# keras.callbacks.ReduceLROnPlateau(
# monitor="val_loss",
# factor=self.lr_reduce_factor,
# patience=self.lr_reduce_patience,
# verbose=1,
# mode='min',
# epsilon=1e-6,
# cooldown=0,
# min_lr=1e-6),
keras.callbacks.TensorBoard(
log_dir=self.log_dir,
histogram_freq=10,
write_grads=False,
write_graph=False,
write_images=False,
batch_size=10)
]
self.model.fit_generator(
generator=train_gen,
steps_per_epoch=2000,
epochs=nb_epochs,
verbose=1,
callbacks=callback_list,
validation_data=valid_gen,
validation_steps=20,
max_queue_size=6,
workers=6,
use_multiprocessing=True,
shuffle=True)
class ArcFace(BaseModel):
def __init__(self, backbone, workspace, model_name):
super(ArcFace, self).__init__(workspace, model_name)
self.backbone = backbone
self.weight_norm = False
def create_model(self,
_compile=True,
load_weights=False,
weights_path=None):
images = keras.layers.Input(shape=(H, W, C))
labels_ = keras.layers.Input(shape=(1, ), dtype='int32')
labels = keras.layers.Lambda(
lambda x: keras.backend.squeeze(x, axis=1),
name="squeeze")(labels_)
embedding = self.backbone(images)
logit = ArcfaceLoss(
nb_classes, m=0.5, s=64., name="arcface_loss")([embedding, labels])
# print(keras.backend.int_shape(logit))
pred = keras.layers.Softmax(name="prediction")(logit)
# print(keras.backend.int_shape(pred))
loss = keras.layers.Lambda(
inference_loss, name="softmax_loss")([logit, labels])
self.model = keras.Model(
inputs=[images, labels_], outputs=[pred, loss])
if _compile:
# compilation
# if self.weight_norm:
# opt = wn.AdamWithWeightnorm(lr=self.learning_rate)
# else:
# opt = keras.optimizers.Adam(
# lr=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
opt = keras.optimizers.SGD(lr=1e-4, momentum=0.9, decay=0.)
self.model.compile(
optimizer=opt,
loss=["categorical_crossentropy", identity_loss],
metrics=dict(prediction="accuracy"),
loss_weights=[0, 1])
# if self.weight_norm and self.num_init_batches:
# model_weightnorm_init(self.model, tr_gen, self.num_init_batches)
if load_weights:
weights_path = self.weights_path if weights_path is None else weights_path
self.model.load_weights(weights_path, by_name=True)
return self
class CenterLossNet(BaseModel):
def __init__(self, backbone, workspace, model_name):
super(CenterLossNet, self).__init__(workspace, model_name)
self.backbone = backbone
def create_model(self,
_compile=True,
use_weightnorm=False,
database_init=False,
data=None,
lambda_c=0.2,
load_weights=False,
weights_path=None):
images = keras.layers.Input(shape=(H, W, C))
labels = keras.layers.Input(shape=(1, ), dtype='int32')
# labels = keras.layers.Lambda(
# lambda x: keras.backend.squeeze(x, axis=1), name="squeeze")(labels_)
centers = keras.layers.Embedding(nb_classes, feature_size)(labels)
embedding = self.backbone(images)
l2_loss = keras.layers.Lambda(
lambda x: keras.backend.sum(
keras.backend.square(x[0] - x[1][:, 0]), 1, keepdims=True),
name='l2_loss')([embedding, centers])
out = keras.layers.Dense(
nb_classes, activation="softmax", name="prediction")(embedding)
self.model = keras.Model(
inputs=[images, labels], outputs=[out, l2_loss])
if _compile:
# compilation
if use_weightnorm:
opt = wn.AdamWithWeightnorm(lr=lr)
else:
opt = keras.optimizers.Adam(
lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
# opt = keras.optimizers.SGD(lr=1e-4, momentum=0.9, decay=0.)
self.model.compile(
optimizer=opt,
loss=["categorical_crossentropy", identity_loss],
metrics=dict(prediction="accuracy"),
loss_weights=[1, lambda_c])
if use_weightnorm and database_init:
wn.data_based_init(self.model, data)
if load_weights:
weights_path = self.weights_path if weights_path is None else weights_path
self.model.load_weights(weights_path, by_name=True)
return self
def get_embedding(self, ):
return keras.Model(self.model.get_layer("input_1").output,
self.model.get_layer("prediction").input)
def get_centers(self, ):
# (N,) integar numpy array
return keras.Model(
self.model.get_layer("input_2").input,
self.model.get_layer("embedding").output)
class Siamese(BaseModel):
def __init__(self, backbone, workspace, model_name):
super(Siamese, self).__init__(workspace, model_name)
self.backbone = backbone
def create_branch(self, ):
init = keras.layers.Input((H, W, C))
out = self.backbone(init)
return keras.Model(init, out)
def create_model(self,
_compile=True,
use_weightnorm=False,
database_init=False,
data=None,
lambda_c=0.2,
load_weights=False,
weights_path=None):
i1 = keras.layers.Input((H, W, C))
i2 = keras.layers.Input((H, W, C))
branch = self.create_branch()
branch.trainable = False
xa_inp = branch(i1)
xb_inp = branch(i2)
x1 = keras.layers.Lambda(lambda x: x[0] * x[1])([xa_inp, xb_inp])
x2 = keras.layers.Lambda(lambda x: x[0] + x[1])([xa_inp, xb_inp])
x3 = keras.layers.Lambda(lambda x: keras.backend.abs(x[0] - x[1]))(
[xa_inp, xb_inp])
x4 = keras.layers.Lambda(lambda x: keras.backend.square(x))(x3)
x = keras.layers.Concatenate()([x1, x2, x3, x4])
x = keras.layers.Reshape((4, feature_size, 1), name='reshape1')(x)
# Per feature NN with shared weight is implemented using CONV2D with appropriate stride.
x = keras.layers.Conv2D(
32, (4, 1), activation='relu', padding='valid')(x)
x = keras.layers.Reshape((feature_size, 32, 1))(x)
x = keras.layers.Conv2D(
1, (1, 32), activation='linear', padding='valid')(x)
x = keras.layers.Flatten(name='flatten')(x)
# Weighted sum implemented as a Dense layer.
score = keras.layers.Dense(
1, use_bias=True, activation=final_active,
name='weighted-average')(x)
self.model = keras.Model([i1, i2], score)
if _compile:
# compilation
if use_weightnorm:
opt = wn.AdamWithWeightnorm(lr=lr)
else:
opt = keras.optimizers.Adam(
lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
# opt = keras.optimizers.SGD(lr=1e-4, momentum=0.9, decay=0.)
self.model.compile(
optimizer=opt,
loss=["binary_crossentropy"],
metrics=dict(prediction="weighted-average"),
loss_weights=[1])
if use_weightnorm and database_init:
wn.data_based_init(self.model, data)
if load_weights:
weights_path = self.weights_path if weights_path is None else weights_path
self.model.load_weights(weights_path, by_name=True)
return self
if __name__ == "__main__":
from backbones import Vgg16, resnet50, siamese
from data import rgb2ycbcr, ImageDataLabelGenerator
train_data_gen = ImageDataLabelGenerator(
samplewise_center=True,
samplewise_std_normalization=True,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=16,
width_shift_range=0.2,
height_shift_range=0.1,
zoom_range=0.2,
fill_mode='reflect',
horizontal_flip=True,
vertical_flip=False,
preprocessing_function=rgb2ycbcr,
rescale=1. / 255,
validation_split=0.1)
train_gen = train_data_gen.flow_from_directory(
"../Dataset/Train",
target_size=(H, W),
color_mode="rgb",
class_mode="sparse",
batch_size=16,
shuffle=True,
interpolation="bicubic",
subset='training')
valid_gen = train_data_gen.flow_from_directory(
"../Dataset/Train",
target_size=(H, W),
color_mode='rgb',
class_mode='sparse',
batch_size=10,
subset="validation",
shuffle=True,
interpolation="bicubic")
model = Siamese(siamese, "./trainSpace/", "SiameseNet").create_model(
_compile=True,
use_weightnorm=False,
database_init=False,
load_weights=True,
weights_path="./trainSpace/weights/CenterLossNet.h5",
lambda_c=lambda_c)
# model.fit(train_gen, valid_gen, batch_size=10, nb_epochs=10000)
| 36.315436 | 96 | 0.570597 | 9,014 | 0.832933 | 0 | 0 | 0 | 0 | 0 | 0 | 1,679 | 0.155147 |
9f9188caeea1a7ed3cadeea7fb8917d2d8703de3 | 102 | py | Python | esercizi/stringaWhile.py | gdv/python-alfabetizzazione | d87561222de8a230db11d8529c49cf1702aec326 | [
"MIT"
] | null | null | null | esercizi/stringaWhile.py | gdv/python-alfabetizzazione | d87561222de8a230db11d8529c49cf1702aec326 | [
"MIT"
] | null | null | null | esercizi/stringaWhile.py | gdv/python-alfabetizzazione | d87561222de8a230db11d8529c49cf1702aec326 | [
"MIT"
] | 1 | 2019-03-26T11:14:33.000Z | 2019-03-26T11:14:33.000Z | stringa = 'corso di Informatica'
i = 0
while (i < len(stringa)):
print stringa[i]
i = i + 1
| 12.75 | 32 | 0.578431 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.215686 |
9f91c475aa5095f37f59eced571f625fdafc3e96 | 1,383 | py | Python | usbmon/capture/tests/test_usbpcap.py | Flameeyes/usbmon-tools | f373e12da338135b14bcd4bec2e04ec60b27014b | [
"Unlicense",
"Apache-2.0",
"MIT"
] | 17 | 2019-02-12T00:24:55.000Z | 2022-01-18T18:07:06.000Z | usbmon/capture/tests/test_usbpcap.py | Flameeyes/usbmon-tools | f373e12da338135b14bcd4bec2e04ec60b27014b | [
"Unlicense",
"Apache-2.0",
"MIT"
] | 3 | 2019-02-12T16:52:04.000Z | 2021-05-14T17:41:17.000Z | usbmon/capture/tests/test_usbpcap.py | Flameeyes/usbmon-tools | f373e12da338135b14bcd4bec2e04ec60b27014b | [
"Unlicense",
"Apache-2.0",
"MIT"
] | 1 | 2020-04-10T09:56:04.000Z | 2020-04-10T09:56:04.000Z | # python
#
# Copyright 2020 The usbmon-tools Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
"""Tests for usbmon.capture.usbpcap."""
import os
from absl.testing import absltest
import usbmon.addresses
import usbmon.pcapng
class TestUsbpcap(absltest.TestCase):
def test_parse(self):
session = usbmon.pcapng.parse_file(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../../../testdata/usbpcap1.pcap",
)
)
self.assertLen(list(session), 498)
self.assertLen(session.device_descriptors, 1)
(device_descriptor,) = session.device_descriptors.values()
self.assertEqual(
device_descriptor.address, usbmon.addresses.DeviceAddress(1, 1)
)
self.assertEqual(device_descriptor.vendor_id, 0x0627)
| 30.733333 | 75 | 0.695589 | 608 | 0.439624 | 0 | 0 | 0 | 0 | 0 | 0 | 695 | 0.502531 |
9f92fb8ab712b3379f7765bfad5d1727d68d45f0 | 4,055 | py | Python | 10 - SQLAlchemy/app.py | cce24/Homework | 75e2d4a61712d2fba1fdfe75ab4d273a4d24d98b | [
"MIT"
] | null | null | null | 10 - SQLAlchemy/app.py | cce24/Homework | 75e2d4a61712d2fba1fdfe75ab4d273a4d24d98b | [
"MIT"
] | null | null | null | 10 - SQLAlchemy/app.py | cce24/Homework | 75e2d4a61712d2fba1fdfe75ab4d273a4d24d98b | [
"MIT"
] | null | null | null | #Import Dependencies
import numpy as np
import pandas as pd
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
#Set Engine
engine = create_engine("sqlite:///Resources/hawaii.sqlite", connect_args={'check_same_thread': False})
#Set Base
Base = automap_base()
Base.prepare(engine, reflect=True)
#Examine Base Keys
Base.classes.keys()
#Use Keys
Measurement = Base.classes.measurement
Station = Base.classes.station
#Bind Session to Python App and Database
session = Session(engine)
#Set Flask Weather App
app = Flask(__name__)
#Set Routes
@app.route("/")
def home():
return (
f"Available Routes:<br/>"
f"/api/v1.0/precipitaton<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/<startDate><br/>"
f"/api/v1.0//api/v1.0/<startDate>/<endDate><br/>")
#Set Last Date
last_date = (session.query(Measurement.date).order_by(Measurement.date.desc()).first())
last_date = list(np.ravel(last_date))[0]
last_date = dt.datetime.strptime(last_date, '%Y-%m-%d')
#Set Last Year
last_year = int(dt.datetime.strftime(last_date, '%Y'))
#Set Last Month
last_month = int(dt.datetime.strftime(last_date, '%m'))
#Set Last Day
last_day = int(dt.datetime.strftime(last_date, '%d'))
#Set Previous Year
prev_year = dt.date(last_year, last_month, last_day) - dt.timedelta(days=365)
prev_year = dt.datetime.strftime(prev_year, '%Y-%m-%d')
@app.route("/api/v1.0/precipitaton")
def precipitation():
precipitation_results = (session.query(Measurement.date, Measurement.prcp, Measurement.station)
.filter(Measurement.date > prev_year).order_by(Measurement.date).all())
precipitation = []
for x in precipitation_results:
precipitation_temp = {x.date: x.prcp, "Station": x.station}
precipitation.append(precipitation_temp)
return jsonify(precipitation)
@app.route("/api/v1.0/stations")
def stations():
station_results = session.query(Station.name).all()
total_stations = list(np.ravel(station_results))
return jsonify(total_stations)
@app.route("/api/v1.0/tobs")
def tobs():
tobs_results = (session.query(Measurement.date, Measurement.tobs, Measurement.station).filter(Measurement.date > prev_year)
.order_by(Measurement.date).all())
tobs = []
for x in tobs_results:
tobs_temp = {x.date: x.tobs, "Station": x.station}
tobs.append(tobs_temp)
return jsonify(tobs)
@app.route('/api/v1.0/<startDate>')
def start(startDate):
sel = [Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
results = (session.query(*sel).filter(func.strftime("%Y-%m-%d", Measurement.date) >= startDate)
.group_by(Measurement.date).all())
selected_dates = []
for result in results:
date = {}
date["Date"] = result[0]
date["Low Temp"] = result[1]
date["Avg Temp"] = result[2]
date["High Temp"] = result[3]
selected_dates.append(date)
return jsonify(selected_dates)
@app.route('/api/v1.0/<startDate>/<endDate>')
def startEnd(startDate, endDate):
date_selection = [Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
start_end_results = (session.query(*date_selection).filter(func.strftime("%Y-%m-%d", Measurement.date) >= startDate)
.filter(func.strftime("%Y-%m-%d", Measurement.date) <= endDate).group_by(Measurement.date).all())
selected_dates = []
for x in start_end_results:
date = {}
date["Date"] = x[0]
date["Low Temp"] = x[1]
date["Avg Temp"] = x[2]
date["High Temp"] = x[3]
selected_dates.append(date)
return jsonify(selected_dates)
if __name__ == "__main__":
app.run(debug=True) | 29.59854 | 127 | 0.659186 | 0 | 0 | 0 | 0 | 2,725 | 0.67201 | 0 | 0 | 732 | 0.180518 |
9f93b5cf954b2bbc0f4e0034fb5c50850b8a9553 | 1,539 | py | Python | DLmodules/usermessage.py | egg0001/telegram-e-hentaiDL-bot | 83200df4c9d35abaf308fc27b2f83d6452f86555 | [
"WTFPL"
] | 7 | 2019-04-07T06:42:09.000Z | 2021-01-11T15:58:28.000Z | DLmodules/usermessage.py | egg0001/telegram-e-hentaiDL-bot | 83200df4c9d35abaf308fc27b2f83d6452f86555 | [
"WTFPL"
] | 1 | 2020-02-24T09:16:54.000Z | 2020-03-07T20:37:15.000Z | DLmodules/usermessage.py | egg0001/telegram-e-hentaiDL-bot | 83200df4c9d35abaf308fc27b2f83d6452f86555 | [
"WTFPL"
] | 2 | 2020-02-03T14:16:55.000Z | 2020-07-09T04:49:01.000Z | #!/usr/bin/python3
#-----------------bot session-------------------
UserCancel = 'You have cancel the process.'
welcomeMessage = ('User identity conformed, please input gallery urls ' +
'and use space to separate them'
)
denyMessage = 'You are not the admin of this bot, conversation end.'
urlComform = ('Received {0} gallery url(s). \nNow begin to download the content. ' +
'Once the download completed, you will receive a report.'
)
magnetLinkConform = ('Received a magnetLink. Now send it to bittorrent client')
magnetResultMessage = ('Torrent {0} has been added bittorrent client.')
urlNotFound = 'Could not find any gallery url, please check and re-input.'
gidError = 'Encountered an error gid: {0}'
#------------------magnet download session---------------------
emptyFileListError = ('Could not retrive file list for this link, maybe still '+
'downloading meta data.')
#-----------------dloptgenerate session---------------------
userCookiesFormError = 'userCookies form error, please check the config file.'
#-----------------managgessiongen session-------------------
usercookiesEXHError = 'This cookies could not access EXH'
#-----------------ehlogin session---------------------------
ehloginError = 'username or password error, please check.'
exhError = 'This username could not access exhentai.'
#------------------download session------------------------
galleryError = 'Gallery does not contain any page, maybe deleted.'
| 34.2 | 84 | 0.59974 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,164 | 0.756335 |
9f948b4087a6e7652b5aab9c2862aa71cb819a4e | 1,923 | py | Python | ver1/cnn_crawler.py | euihyeonmoon/RiskManagement | 2555a966ca9472314bee1e0085f5d5bebd4445b2 | [
"MIT"
] | null | null | null | ver1/cnn_crawler.py | euihyeonmoon/RiskManagement | 2555a966ca9472314bee1e0085f5d5bebd4445b2 | [
"MIT"
] | null | null | null | ver1/cnn_crawler.py | euihyeonmoon/RiskManagement | 2555a966ca9472314bee1e0085f5d5bebd4445b2 | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import pandas as pd
url_list = []
title_list = []
summary_list = []
date_list = []
# source_list = []
url = 'https://edition.cnn.com/search?size=30&q=terror&sort=newest'
# keyword_list = ['terror']
# for keyword in keyword_list:
# # set the url
# url = 'https://www.google.com/search?q=' + keyword + '&tbm=nws&tbs=qdr:m'
# url_list.append(url)
# print(url_list)
options = webdriver.ChromeOptions()
options.add_experimental_option("excludeSwitches", ["enable-logging"])
driver = webdriver.Chrome(options=options)
driver.get(url)
# crawl the data from google using selenium with Chrome
def cnn_crawl():
time.sleep(2)
# get the title
title = driver.find_elements_by_css_selector(".cnn-search__result-headline")
for _ in title:
title_list.append(_.text)
# get the summary
summary = driver.find_elements_by_css_selector('.cnn-search__result-body')
for _ in summary:
summary_list.append(_.text)
# get the date
date = driver.find_elements_by_css_selector('.cnn-search__result-publish-date')
for _ in date:
date_list.append(_.text)
print('Crawling...')
# click the next page
next_button = driver.find_elements_by_css_selector('.pagination-arrow.pagination-arrow-right.cnnSearchPageLink.text-active')
next_button = next_button[-1]
next_button.click()
time.sleep(10)
return title_list, summary_list, date_list
# crawl until the last page
for i in range(1,2):
# cnn_crawl()
title_list = cnn_crawl()[0]
summary_list = cnn_crawl()[1]
date_list = cnn_crawl()[2]
driver.close()
data = pd.DataFrame({'Title':title_list, 'Summary':summary_list, 'Date':date_list})
print(data)
data.to_excel('cnn.xlsx', index=False)
print("Crawling is Done")
| 29.136364 | 129 | 0.680187 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 718 | 0.373375 |
9f94cec9c4b9cf4433cbe35153ebec1175aa197a | 1,964 | py | Python | tests/ec2/test_ec2_clients.py | paulhutchings/beartype-boto3-example | d69298d9444d578799e2a17cb63de11474b2278a | [
"MIT"
] | 3 | 2021-11-16T06:21:11.000Z | 2021-11-22T08:59:11.000Z | tests/ec2/test_ec2_clients.py | paulhutchings/beartype-boto3-example | d69298d9444d578799e2a17cb63de11474b2278a | [
"MIT"
] | 9 | 2021-11-19T03:29:00.000Z | 2021-12-30T23:54:47.000Z | tests/ec2/test_ec2_clients.py | paulhutchings/beartype-boto3-example | d69298d9444d578799e2a17cb63de11474b2278a | [
"MIT"
] | null | null | null | import pytest
from bearboto3.ec2 import (
EC2Client,
EC2ServiceResource,
)
from beartype import beartype
from beartype.roar import (
BeartypeCallHintPepParamException,
BeartypeCallHintPepReturnException,
BeartypeDecorHintPep484585Exception,
)
# ============================
# EC2Client
# ============================
def test_ec2_client_arg_pass(gen_ec2_client):
@beartype
def func(param: EC2Client):
pass
func(gen_ec2_client)
def test_ec2_client_arg_fail(gen_s3_client):
with pytest.raises(BeartypeCallHintPepParamException):
@beartype
def func(param: EC2Client):
pass
func(gen_s3_client)
def test_ec2_client_return_pass(gen_ec2_client):
@beartype
def func() -> EC2Client:
return gen_ec2_client
func()
def test_ec2_client_return_fail(gen_s3_client):
with pytest.raises(
(BeartypeCallHintPepReturnException, BeartypeDecorHintPep484585Exception)
):
@beartype
def func() -> EC2Client:
return gen_s3_client
func()
# ============================
# EC2ServiceResource
# ============================
def test_ec2_resource_arg_pass(gen_ec2_resource):
@beartype
def func(param: EC2ServiceResource):
pass
func(gen_ec2_resource)
def test_ec2_resource_arg_fail(gen_s3_resource):
with pytest.raises(BeartypeCallHintPepParamException):
@beartype
def func(param: EC2ServiceResource):
pass
func(gen_s3_resource)
def test_ec2_resource_return_pass(gen_ec2_resource):
@beartype
def func() -> EC2ServiceResource:
return gen_ec2_resource
func()
def test_ec2_resource_return_fail(gen_s3_resource):
with pytest.raises(
(BeartypeCallHintPepReturnException, BeartypeDecorHintPep484585Exception)
):
@beartype
def func() -> EC2ServiceResource:
return gen_s3_resource
func()
| 20.040816 | 81 | 0.658859 | 0 | 0 | 0 | 0 | 558 | 0.284114 | 0 | 0 | 151 | 0.076884 |
9f952aae828334e0efd2771e093d18d52c4ad4a7 | 230 | py | Python | solutions/130_solution_05.py | UFResearchComputing/py4ai | db7f80614f26274ec18556d56ea9f549c463165a | [
"CC-BY-4.0"
] | null | null | null | solutions/130_solution_05.py | UFResearchComputing/py4ai | db7f80614f26274ec18556d56ea9f549c463165a | [
"CC-BY-4.0"
] | null | null | null | solutions/130_solution_05.py | UFResearchComputing/py4ai | db7f80614f26274ec18556d56ea9f549c463165a | [
"CC-BY-4.0"
] | 1 | 2021-04-27T09:50:54.000Z | 2021-04-27T09:50:54.000Z | def first_negative(values):
for v in values:
if v<0:
return v
# If an empty list is passed to this function, it returns `None`:
my_list = []
print(first_negative(my_list)
| 23 | 66 | 0.552174 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 66 | 0.286957 |
9f95821914a38c9e5fa3635e4ff975b40a81a782 | 680 | py | Python | .config/qtile/modules/core/screens.py | jx11r/dotfiles | fcc925dd184892198deae60e4113c215fccfa7a9 | [
"MIT"
] | 7 | 2021-09-11T00:27:23.000Z | 2022-01-26T12:50:31.000Z | .config/qtile/modules/core/screens.py | jx11r/dotfiles | fcc925dd184892198deae60e4113c215fccfa7a9 | [
"MIT"
] | null | null | null | .config/qtile/modules/core/screens.py | jx11r/dotfiles | fcc925dd184892198deae60e4113c215fccfa7a9 | [
"MIT"
] | 2 | 2022-01-09T16:51:41.000Z | 2022-01-26T21:35:05.000Z | # --==[ Screens ]==--
from libqtile import bar
from libqtile.config import Screen
from ..utils.settings import wallpaper
from ..extras.widgets import widgets
screens = [
Screen(
wallpaper = wallpaper,
wallpaper_mode = 'fill',
top = bar.Bar(
# Source in widgets.py
widgets,
# Bar Size
20,
# Background Color
background = "#00000000",
# Margin
margin = [9, 15, 0, 15],
# Transparency
opacity = 1,
),
),
Screen(
wallpaper = wallpaper,
wallpaper_mode = 'fill',
# top = bar.Bar()
),
]
| 18.378378 | 38 | 0.489706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 133 | 0.195588 |
9f9586cf1080091750ae0d67e05db74d5efd5f68 | 1,942 | py | Python | MovieSerieTorrent/renamer.py | JonathanPetit/Parser-Renamer-torrentfile | 3193b0544348e17c527f921e34283eb7ea66b01b | [
"MIT"
] | 7 | 2016-04-21T21:16:29.000Z | 2021-11-20T13:16:21.000Z | MovieSerieTorrent/renamer.py | JonathanPetit/Parser-Renamer-torrentfile | 3193b0544348e17c527f921e34283eb7ea66b01b | [
"MIT"
] | 5 | 2016-03-03T23:41:08.000Z | 2016-03-05T13:11:34.000Z | MovieSerieTorrent/renamer.py | JonathanPetit/Parser-Renamer-torrentfile | 3193b0544348e17c527f921e34283eb7ea66b01b | [
"MIT"
] | 3 | 2019-04-03T19:00:42.000Z | 2021-06-08T17:03:06.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Usage:
>>> from renamer import Renamer
>>> Renamer().rename(file)
"""
import os
from fuzzywuzzy import fuzz
try:
from parser import Parser
except:
from .parser import Parser
class Renamer:
def __init__(self):
self.infos = None
self.excess = None
self.parse_file = None
self.rename_file = []
self.compteur = 0
self.filename = None
def extract(self, files):
self.parse_file = Parser().parse(files)
self.infos = self.parse_file[0]
self.excess = self.parse_file[1]
if self.infos['type'] == 'serie':
self.rename_file = ['{title}', ' {season}{episode} ', '-{languages}-', '{quality}', '.{extension}']
return self.rename_file
else:
self.rename_file = ['{title}', ' {Part}', ' ({year})', '-{languages}-', '{quality}', '.{extension}']
return self.rename_file
def preview(self, files):
self.rename_file = self.extract(files)
# Build liste for filename
for elements in self.rename_file:
try:
self.rename_file[self.compteur] = self.rename_file[self.compteur].format(**self.infos)
except KeyError:
self.rename_file[self.compteur] = ''
self.compteur += 1
# Build filename
for element in self.rename_file:
if element == '':
self.rename_file.remove('')
# Rename
self.filename = ''.join(self.rename_file)
return self.filename
def renaming(self, path, filename):
filename = self.preview(filename)
for element in os.listdir(path):
if fuzz.token_set_ratio(filename, element) == 100:
path_file = os.path.join(path, element)
target = os.path.join(path, filename)
os.rename(path_file, target)
| 28.558824 | 112 | 0.564882 | 1,690 | 0.870237 | 0 | 0 | 0 | 0 | 0 | 0 | 341 | 0.175592 |
9f9602b2224dec858d62feca228f9bec5c6a8d6a | 2,320 | py | Python | tensorflow_transform/beam/tft_beam_io/beam_metadata_io_test.py | sswapnil2/transform | 54561ddb357ef752153dd569aad7cc2651b38ac2 | [
"Apache-2.0"
] | null | null | null | tensorflow_transform/beam/tft_beam_io/beam_metadata_io_test.py | sswapnil2/transform | 54561ddb357ef752153dd569aad7cc2651b38ac2 | [
"Apache-2.0"
] | null | null | null | tensorflow_transform/beam/tft_beam_io/beam_metadata_io_test.py | sswapnil2/transform | 54561ddb357ef752153dd569aad7cc2651b38ac2 | [
"Apache-2.0"
] | 1 | 2020-04-07T23:48:26.000Z | 2020-04-07T23:48:26.000Z | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for beam_metadata_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# GOOGLE-INITIALIZATION
import apache_beam as beam
from tensorflow_transform.beam.tft_beam_io import beam_metadata_io
from tensorflow_transform.beam.tft_beam_io import test_metadata
from tensorflow_transform.tf_metadata import metadata_io
import unittest
from tensorflow.python.framework import test_util
class BeamMetadataIoTest(test_util.TensorFlowTestCase):
def testWriteMetadataNonDeferred(self):
# Write metadata to disk using WriteMetadata PTransform.
with beam.Pipeline() as pipeline:
path = self.get_temp_dir()
_ = (test_metadata.COMPLETE_METADATA
| beam_metadata_io.WriteMetadata(path, pipeline))
# Load from disk and check that it is as expected.
metadata = metadata_io.read_metadata(path)
self.assertEqual(metadata, test_metadata.COMPLETE_METADATA)
def testWriteMetadataDeferred(self):
# Write metadata to disk using WriteMetadata PTransform, combining
# incomplete metadata with (deferred) complete metadata.
with beam.Pipeline() as pipeline:
path = self.get_temp_dir()
deferred_metadata = pipeline | 'CreateDeferredMetadata' >> beam.Create(
[test_metadata.COMPLETE_METADATA])
metadata = beam_metadata_io.BeamDatasetMetadata(
test_metadata.INCOMPLETE_METADATA, deferred_metadata)
_ = metadata | beam_metadata_io.WriteMetadata(path, pipeline)
# Load from disk and check that it is as expected.
metadata = metadata_io.read_metadata(path)
self.assertEqual(metadata, test_metadata.COMPLETE_METADATA)
if __name__ == '__main__':
unittest.main()
| 36.825397 | 77 | 0.771983 | 1,221 | 0.526293 | 0 | 0 | 0 | 0 | 0 | 0 | 951 | 0.409914 |
9f97eda032d59779e4adc2cc5c75750de6d24a2c | 723 | py | Python | intermol/forces/torsion_torsion_CMAP.py | jpthompson17/InterMol | 9289ea770bc2d069482bf5fa74dceea4c14d0e4b | [
"MIT"
] | null | null | null | intermol/forces/torsion_torsion_CMAP.py | jpthompson17/InterMol | 9289ea770bc2d069482bf5fa74dceea4c14d0e4b | [
"MIT"
] | null | null | null | intermol/forces/torsion_torsion_CMAP.py | jpthompson17/InterMol | 9289ea770bc2d069482bf5fa74dceea4c14d0e4b | [
"MIT"
] | null | null | null | from intermol.decorators import *
class TorsionTorsionCMAP(object):
@accepts_compatible_units(None, None, None, None, None, None, None, None,
None, None)
def __init__(self, atom1, atom2, atom3, atom4, atom5, atom6, atom7, atom8, type, chart):
"""
"""
self.type = type
self.atom1 = atom1
self.atom2 = atom2
self.atom3 = atom3
self.atom4 = atom4
self.atom5 = atom5
self.atom6 = atom6
self.atom7 = atom7
self.atom8 = atom8
self.chart = chart
def getparameters(self):
return (self.atom1, self.atom2, self.atom3, self.atom4, self.atom5, self.atom6, self.atom7, self.atom8, self.type, self.chart)
| 30.125 | 134 | 0.605809 | 687 | 0.950207 | 0 | 0 | 483 | 0.66805 | 0 | 0 | 15 | 0.020747 |
9f986ec10f3e78183b662ab2929af55d8a60b9a2 | 3,671 | py | Python | opentech/apply/review/tests/test_models.py | JakabGy/hypha | 32634080ba1cb369f07f27f6616041e4eca8dbf2 | [
"BSD-3-Clause"
] | null | null | null | opentech/apply/review/tests/test_models.py | JakabGy/hypha | 32634080ba1cb369f07f27f6616041e4eca8dbf2 | [
"BSD-3-Clause"
] | null | null | null | opentech/apply/review/tests/test_models.py | JakabGy/hypha | 32634080ba1cb369f07f27f6616041e4eca8dbf2 | [
"BSD-3-Clause"
] | null | null | null | from django.test import TestCase
from opentech.apply.funds.tests.factories import ApplicationSubmissionFactory
from .factories import ReviewFactory, ReviewOpinionFactory
from ..options import MAYBE, NO, YES
class TestReviewQueryset(TestCase):
def test_reviews_yes(self):
submission = ApplicationSubmissionFactory()
ReviewFactory(recommendation_yes=True, submission=submission)
ReviewFactory(recommendation_yes=True, submission=submission)
recommendation = submission.reviews.recommendation()
self.assertEqual(recommendation, YES)
def test_reviews_no(self):
submission = ApplicationSubmissionFactory()
ReviewFactory(submission=submission)
ReviewFactory(submission=submission)
recommendation = submission.reviews.recommendation()
self.assertEqual(recommendation, NO)
def test_reviews_maybe(self):
submission = ApplicationSubmissionFactory()
ReviewFactory(recommendation_maybe=True, submission=submission)
ReviewFactory(recommendation_maybe=True, submission=submission)
recommendation = submission.reviews.recommendation()
self.assertEqual(recommendation, MAYBE)
def test_reviews_mixed(self):
submission = ApplicationSubmissionFactory()
ReviewFactory(recommendation_yes=True, submission=submission)
ReviewFactory(submission=submission)
recommendation = submission.reviews.recommendation()
self.assertEqual(recommendation, MAYBE)
def test_review_yes_opinion_agree(self):
submission = ApplicationSubmissionFactory()
review = ReviewFactory(recommendation_yes=True, submission=submission)
ReviewOpinionFactory(review=review, opinion_agree=True)
recommendation = submission.reviews.recommendation()
self.assertEqual(recommendation, YES)
def test_review_yes_opinion_disagree(self):
submission = ApplicationSubmissionFactory()
review = ReviewFactory(recommendation_yes=True, submission=submission)
ReviewOpinionFactory(review=review, opinion_disagree=True)
recommendation = submission.reviews.recommendation()
self.assertEqual(recommendation, MAYBE)
def test_review_no_opinion_agree(self):
submission = ApplicationSubmissionFactory()
review = ReviewFactory(submission=submission)
ReviewOpinionFactory(review=review, opinion_agree=True)
recommendation = submission.reviews.recommendation()
self.assertEqual(recommendation, NO)
def test_review_no_opinion_disagree(self):
submission = ApplicationSubmissionFactory()
review = ReviewFactory(submission=submission)
ReviewOpinionFactory(review=review, opinion_disagree=True)
recommendation = submission.reviews.recommendation()
self.assertEqual(recommendation, MAYBE)
def test_review_not_all_opinion(self):
submission = ApplicationSubmissionFactory()
ReviewFactory(recommendation_yes=True, submission=submission)
review = ReviewFactory(recommendation_yes=True, submission=submission)
ReviewOpinionFactory(review=review, opinion_agree=True)
recommendation = submission.reviews.recommendation()
self.assertEqual(recommendation, YES)
def test_review_yes_mixed_opinion(self):
submission = ApplicationSubmissionFactory()
review = ReviewFactory(submission=submission)
ReviewOpinionFactory(review=review, opinion_agree=True)
ReviewOpinionFactory(review=review, opinion_disagree=True)
recommendation = submission.reviews.recommendation()
self.assertEqual(recommendation, MAYBE)
| 45.8875 | 78 | 0.752928 | 3,460 | 0.942522 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9f99023ca596a6f107186bc87fd64e3695056346 | 3,677 | py | Python | FPLbot/starting_eleven.py | amosbastian/FantasyPL_bot | e07a501c4f3168f3fe07cf270fc5d938b18a8789 | [
"MIT"
] | 54 | 2019-03-02T16:48:53.000Z | 2022-03-23T10:24:11.000Z | FPLbot/starting_eleven.py | amosbastian/FantasyPL_bot | e07a501c4f3168f3fe07cf270fc5d938b18a8789 | [
"MIT"
] | 7 | 2019-02-12T23:26:40.000Z | 2021-11-19T09:39:52.000Z | FPLbot/starting_eleven.py | amosbastian/FantasyPL_bot | e07a501c4f3168f3fe07cf270fc5d938b18a8789 | [
"MIT"
] | 13 | 2019-02-11T15:58:54.000Z | 2021-10-09T07:42:23.000Z | import asyncio
import json
import os
from datetime import datetime, timedelta
import aiohttp
import tweepy
from dateutil.parser import parse
from fpl import FPL, utils
from pymongo import MongoClient
from constants import lineup_markers, twitter_usernames
dirname = os.path.dirname(os.path.realpath(__file__))
client = MongoClient()
database = client.team_news
def short_name_converter(team_id):
"""Converts a team's ID to their short name."""
short_name_map = {
1: "ARS",
2: "AVL",
3: "BHA",
4: "BUR",
5: "CHE",
6: "CRY",
7: "EVE",
8: "FUL",
9: "LEI",
10: "LEE",
11: "LIV",
12: "MCI",
13: "MUN",
14: "NEW",
15: "SHU",
16: "SOU",
17: "TOT",
18: "WBA",
19: "WHU",
20: "WOL",
None: None
}
return short_name_map[team_id]
async def get_current_fixtures():
async with aiohttp.ClientSession() as session:
fpl = FPL(session)
current_gameweek = await utils.get_current_gameweek(session)
fixtures = await fpl.get_fixtures_by_gameweek(current_gameweek)
min_range = timedelta(minutes=2)
return [fixture for fixture in fixtures
if fixture.team_news_time.replace(tzinfo=None) - min_range <
datetime.now() <
fixture.team_news_time.replace(tzinfo=None) + min_range]
def is_new_lineup(fixture_id, team_id):
if database.lineup.count_documents({"fixture_id": fixture_id,
"team_id": team_id}) < 1:
return True
return False
def add_lineup_to_database(fixture_id, team_id, url):
self.database.lineup.update_one(
{"fixture_id": fixture_id},
{"$set": {"fixture_id": fixture_id,
"team_id": team_id,
"url": url}},
upsert=True
)
def lineup_handler(team_id, team_short_name, opponent_id):
team_name = twitter_usernames[team_short_name]
for status in api.user_timeline(screen_name=team_name,
tweet_mode="extended",
count=3):
status_split = status.full_text.lower().replace("-", " ").split()
for marker in lineup_markers:
if marker in list(zip(split_status, split_status[1:])):
if "media" not in status.entities:
continue
media = status.entities["media"][0]
media_url = media["media_url_https"]
if is_new_lineup(fixture.id, team_id):
add_lineup_to_database(fixture.id, team_id, media_url)
return
async def main(config):
auth = tweepy.OAuthHandler(config["CONSUMER_API_KEY"],
config["CONSUMER_API_SECRET_KEY"])
auth.set_access_token(config["ACCESS_TOKEN"],
config["ACCESS_TOKEN_SECRET"])
api = tweepy.API(auth)
current_fixtures = await get_current_fixtures()
images_urls = []
for fixture in current_fixtures:
team_h_short = short_name_converter(fixture.team_h)
team_a_short = short_name_converter(fixture.team_a)
lineup_handler(fixture.team_h, team_h_short, fixture.team_a)
lineup_handler(fixture.team_a, team_a_short, fixture.team_h)
if __name__ == "__main__":
with open(f"{dirname}/../twitter_config.json") as file:
config = json.loads(file.read())
try:
asyncio.run(main(config))
except AttributeError:
loop = asyncio.get_event_loop()
loop.run_until_complete(main(config))
loop.close()
| 30.139344 | 74 | 0.598858 | 0 | 0 | 0 | 0 | 0 | 0 | 1,158 | 0.314931 | 382 | 0.103889 |
9f993c36fec75e6aca1229cd209317b6f68a3df3 | 4,519 | py | Python | inputoutput/getters.py | den1den/web-inf-ret-ml | 3530a60905769f504207702d91d540d4ac610fb3 | [
"MIT"
] | null | null | null | inputoutput/getters.py | den1den/web-inf-ret-ml | 3530a60905769f504207702d91d540d4ac610fb3 | [
"MIT"
] | null | null | null | inputoutput/getters.py | den1den/web-inf-ret-ml | 3530a60905769f504207702d91d540d4ac610fb3 | [
"MIT"
] | null | null | null | import os
from datetime import date, timedelta
from config import config
from inputoutput.readers import CSVInputReader, InputReader, Input2000Reader
from models.article import Article
from models.tuser import TUser
from models.tweet import Tweet
TWEETS_DIR = os.path.join(config.PCLOUD_DIR, 'tweets')
TWEET_USERS_DIR = os.path.join(config.PCLOUD_DIR, 'users')
ARTICLES_DIR = os.path.join(config.PCLOUD_DIR, 'preprocessed_articles', 'sander_results')
ARTICLES_DIR_BY_DATE = os.path.join(config.PCLOUD_DIR, 'preprocessed_articles', 'sander_results_bydate')
def update_tweets_cache(start_date: date, end_date: date, tweets_cache: dict):
date_strs = []
d = start_date
while d <= end_date:
date_strs.append(d.strftime('tweets_%Y_%m_%d'))
d += timedelta(days=1)
# remove from cache whats not in this range
remove_keys = []
for (k, v) in tweets_cache.items():
if k not in date_strs:
remove_keys.append(k)
for k in remove_keys:
del tweets_cache[k]
# Add elements that were not in the cache
i = 0
for k in date_strs:
if k not in tweets_cache:
tweets = get_tweets(filename_prefix=k)
if len(tweets) > 0:
tweets_cache[k] = tweets
i += 1
print("update_tweets_cache(removed=%d, added=%d, size=%d)" % (len(remove_keys), i, len(tweets_cache)))
def get_tweets_count_by_date(start_date: date, end_date: date):
n = 0
d = start_date
while d <= end_date:
n += get_tweet_count(filename_prefix=d.strftime('tweets_%Y_%m_%d'))
d += timedelta(days=1)
return n
def get_tweets_by_date(start_date: date, end_date: date):
tweets = []
d = start_date
while d <= end_date:
tweets += get_tweets(filename_prefix=d.strftime('tweets_%Y_%m_%d'))
d += timedelta(days=1)
return tweets
def get_tweet_count(file_offset=0, dir_path=TWEETS_DIR, filename_prefix=''):
"""
Read in tweets from files
see input.read_json_array_from_files()
:rtype [Tweet]
"""
from preprocessing.tweet_preprocessor import TweetPreprocessor
r = Input2000Reader(dir_path, TweetPreprocessor.TWEET_COLUMNS, file_offset=file_offset,
filename_prefix=filename_prefix)
all = r.count_all(to_tweet)
if all == 0:
return 0
all -= 20000 - 1
# really read the last
# next_file = "%s%d%s" % (r.current_file[:-5], int(r.current_file[-5]) + 1, r.current_file[-4:])
r = CSVInputReader(dir_path, TweetPreprocessor.TWEET_COLUMNS, filename_prefix=os.path.basename(r.current_file))
all += len(r.read_all(to_tweet))
return all
def get_tweets(tweets_n=None, file_offset=0, dir_path=TWEETS_DIR, filename_prefix=''):
"""
Read in tweets from files
see input.read_json_array_from_files()
:rtype [Tweet]
"""
from preprocessing.tweet_preprocessor import TweetPreprocessor
r = CSVInputReader(dir_path, TweetPreprocessor.TWEET_COLUMNS, file_offset=file_offset,
filename_prefix=filename_prefix)
return r.read_all(to_tweet, item_count=tweets_n)
def to_tweet(preprocessed_data):
""""This actually creates the Tweet objects"""
return Tweet(preprocessed_data)
def get_tusers(users_n=None, file_offset=0, dir_path=TWEET_USERS_DIR, filename_prefix=''):
"""
Read in twitter user accounts from files
see input.read_json_array_from_files()
"""
r = InputReader(dir_path, file_offset=file_offset, filename_prefix=filename_prefix)
return r.read_all(to_tuser, item_count=users_n)
def to_tuser(preprocessed_data):
""""This actually creates the TUser objects"""
return TUser(preprocessed_data)
def get_articles(articles_n=None, file_offset=0, dir_path=ARTICLES_DIR, filename_prefix=''):
"""
Read in twitter user accounts from files
see input.read_json_array_from_files()
"""
from preprocessing.article_preprocessor import ArticlePreprocessor
r = CSVInputReader(dir_path, ArticlePreprocessor.ARTICLE_COLUMNS, file_offset=file_offset, filename_prefix=filename_prefix)
return r.read_all(to_article, item_count=articles_n)
def get_articles_by_date(articles_n=None, file_offset=0, dir_path=ARTICLES_DIR_BY_DATE, filename_prefix=''):
return get_articles(articles_n=articles_n, file_offset=file_offset, dir_path=dir_path, filename_prefix=filename_prefix)
def to_article(preprocessed_data):
""""This actually creates the Article objects"""
return Article(preprocessed_data)
| 35.865079 | 127 | 0.714317 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 960 | 0.212436 |
9f9a26e3d51680625b1f6c1a4e6e1570c2817e7d | 6,905 | py | Python | src/Config.py | jthomas03/gpSTS | 1f9e5b90cc7e502248c5d3a28552bc4693e21279 | [
"MIT"
] | null | null | null | src/Config.py | jthomas03/gpSTS | 1f9e5b90cc7e502248c5d3a28552bc4693e21279 | [
"MIT"
] | null | null | null | src/Config.py | jthomas03/gpSTS | 1f9e5b90cc7e502248c5d3a28552bc4693e21279 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# John C. Thomas 2021 gpSTS
###########################################
###Configuration File######################
###for gpSTS steering of experiments######
###########################################
import os
import numpy as np
from gpsts.NanonisInterface.nanonis_interface import Nanonis
from gpsts.NanonisInterface.data_class import ScanData, SpecCounter, PointList, ImageInfo
from gpsts.NanonisInterface.kernel import kernel_l2
import json
###############################
###Initialize##################
###############################
nanonis_config = {
"Nanonis_Settings": {
"File": "gpSTSinit",
"ExperimentName": "Test Out",
"Version": "0.0.1",
"ImageStart": "test_img001.sxm",
"FolderLocation": "C:\\gpSTS\\src\\",
"DataLocation": "C:\\gpSTS\\src\\data\\",
"Channel": "Z",
"ImDirection": "forward",
"SpectralRange": [-1,1],
"NumSpectralPoints": 1200,
"Center_Point": [174,34],
"Search_Window": 40,
"Feature_Window": 20,
"ScanCurrent": 30e-12,
"SpecCurrent": 200e-12,
"STSbias": "Bias calc (V)",
"STSsignal": "Current (A)"
},
"Neural_Network": {
"TrainingPath": "C:\\gpSTS\\src\\train\\",
"EpochNumber": 2,
"ClassNumber": 4,
"LearningRate": 0.001,
"BatchSizeTrain": 5,
"BatchSizeVal": 1,
"BatchSizeTest": 1
}
}
with open('data/'+str(nanonis_config['Nanonis_Settings']['File'])+'.json','w') as fil:
json.dump(nanonis_config, fil, sort_keys = True, indent = 4, ensure_ascii = False)
Vals = ScanData()
Vals.update_file_info(nanonis_config['Nanonis_Settings']['FolderLocation'],
nanonis_config['Nanonis_Settings']['ImageStart'], nanonis_config['Nanonis_Settings']['Channel'],
nanonis_config['Nanonis_Settings']['ImDirection'])
Vals.update_search_conditions(nanonis_config['Nanonis_Settings']['Center_Point'],
nanonis_config['Nanonis_Settings']['Search_Window'],nanonis_config['Nanonis_Settings']['Feature_Window'],
nanonis_config['Nanonis_Settings']['SpectralRange'])
fil_path, imfile, channel, imdirection = Vals.get_file_info()
try:
imoff, impix, imsize = Nanonis.readheader(fil_path+'data'+'\\',imfile)
except Exception as e:
print('Error. Please save '+str(imfile)+' within '+str(fil_path)+'data\\')
raise e
Vals.update_scan_conditions(imoff, impix, imsize)
imdirectory = fil_path+'data'+'\\'+'impath'
if not os.path.exists(imdirectory):
os.makedirs(imdirectory)
datadirectory = fil_path+'data'
if not os.path.exists(datadirectory):
os.makedirs(datadirectory)
def return_scandata():
return Vals
spec_counter = SpecCounter()
spec_counter.update_maxcnt(10)
def return_cnt():
return spec_counter
recorded_points = PointList()
def return_pntlist():
return recorded_points
imout = Nanonis.readimage(fil_path+'data'+'\\'+imfile,channel,imdirection)
current_image = ImageInfo(imout)
def return_image():
return current_image
Nanonis.sxm_plot(imout,imdirectory,'current',recorded_points.get_list())
center_point, search_window, feature_window, spec_range = Vals.get_search_conditions()
imx1, imx2 = int((center_point[0]-(feature_window/2))), int((center_point[0]+(feature_window/2)))
imy1, imy2 = int((center_point[1]-(feature_window/2))), int((center_point[1]+(feature_window/2)))
imtrack = imout[imx1:imx2,imy1:imy2]
Nanonis.sxm_plot(imtrack,imdirectory,'feature',recorded_points.get_list())
###############################
###General#####################
###############################
from controls import perform_NanonisExp_BiasSpec, perform_experiment_overlap2
from gpsts.NanonisInterface.graph import plot_2d_function
parameters = {
"x1": {
"element interval": [1,int(impix[0][0])],
},
"x2": {
"element interval": [1,int(impix[0][0])],
},
}
###acquisition functions###
def my_ac_func(x,obj):
mean = obj.posterior_mean(x)["f(x)"]
cov = obj.posterior_covariance(x)["v(x)"]
sig = obj.shannon_information_gain(x)["sig"]
ucb = mean + 3.0 * np.sqrt(cov)
return cov
gaussian_processes = {
"model_1": {
"kernel function": kernel_l2,
"hyperparameters": [1.0,1.0,1.0],
"hyperparameter bounds": [[1.0,100.0],[0.10,100.0],[0.10,100.0]],
"input hyper parameters": [1.0,1.0,1.0],
"output hyper parameters": [1.0],
"input hyper parameter bounds": [[0.01,1000000.0],[0.01,10.0],[0.01,10.0]],
"output hyper parameter bounds":[[0.9,1.1]],
"number of returns": 1,
"dimensionality of return": 1,
"variance optimization tolerance": 0.001,
"adjust optimization threshold": [True,0.1],
"steering mode": "covariance",
"run function in every iteration": None,
"data acquisition function": perform_NanonisExp_BiasSpec,
"acquisition function": my_ac_func,
"objective function": None,
"mean function": None,
"cost function": None,
"cost update function": None,
"cost function parameters": {"offset": 10,"slope":2.0},
"cost function optimization bounds": [[0.0,10.0],[0.0,10.0]],
"cost optimization chance" : 0.1,
"plot function": plot_2d_function,
"acquisition function optimization tolerance": 0.001
},
}
compute_device = "cpu"
sparse = False
compute_inverse = False
initial_likelihood_optimization_method = "global"
training_dask_client = False
prediction_dask_client = False
likelihood_optimization_tolerance = 1e-12
likelihood_optimization_max_iter = 200
automatic_signal_variance_range_determination = True
acquisition_function_optimization_method = "global"
chance_for_local_acquisition_function_optimization = 0.5
acquisition_function_optimization_population_size = 20
acquisition_function_optimization_max_iter = 20
global_likelihood_optimization_at = [200]
hgdl_likelihood_optimization_at = []
local_likelihood_optimization_at = []
breaking_error = 1e-18
########################################
###Variance Optimization################
########################################
objective_function_optimization_population_size = 20
likelihood_optimization_population_size = 20
number_of_suggested_measurements = 1
########################################
###Computation Parameters###############
########################################
global_kernel_optimization_frequency = 0.2
local_kernel_optimization_frequency = 0.5
gpu_acceleration = False
rank_n_update = [False,0.2]
gp_system_solver = "inv" # "inv", "cg" or "minres"
switch_system_solver_to_after = [True, "cg", 5000]
###############################
###DATA ACQUISITION############
###############################
initial_data_set_size = 1
max_number_of_measurements = 10
#####################################################################
###############END###################################################
#####################################################################
| 38.149171 | 109 | 0.629978 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,620 | 0.379435 |
9f9abf9bd659e3c5cd886d27c3067e12da0ea781 | 490 | py | Python | src/basic/check_type.py | xxzhwx/hello-python | 83bb01c146049d3c7f7fa9ed007abee054d004ef | [
"MIT"
] | null | null | null | src/basic/check_type.py | xxzhwx/hello-python | 83bb01c146049d3c7f7fa9ed007abee054d004ef | [
"MIT"
] | null | null | null | src/basic/check_type.py | xxzhwx/hello-python | 83bb01c146049d3c7f7fa9ed007abee054d004ef | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''
@author: xxzhwx
'''
from types import IntType
def is_int_type(num):
# 对象身份比较
if type(num) is IntType:
return True
return False
def is_int_typeX(num):
if isinstance(num, int):
return True
# if type(num) is int:
# return True
return False
# Usage
print(is_int_type(1))
print(is_int_type(1.0))
print(is_int_type(''))
print(is_int_typeX(1))
print(is_int_typeX(1.0))
print(is_int_typeX('')) | 15.3125 | 28 | 0.606122 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 124 | 0.247012 |
9f9cd1ac2346cdc01917506089aa45b428ec2b93 | 3,883 | py | Python | hwtLib/handshaked/ramAsHs_test.py | optical-o/hwtLib | edad621f5ad4cdbea20a5751ff4468979afe2f77 | [
"MIT"
] | null | null | null | hwtLib/handshaked/ramAsHs_test.py | optical-o/hwtLib | edad621f5ad4cdbea20a5751ff4468979afe2f77 | [
"MIT"
] | null | null | null | hwtLib/handshaked/ramAsHs_test.py | optical-o/hwtLib | edad621f5ad4cdbea20a5751ff4468979afe2f77 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from hwt.hdl.constants import NOP, READ, WRITE
from hwt.interfaces.utils import addClkRstn, propagateClkRstn
from hwt.simulator.simTestCase import SingleUnitSimTestCase
from hwtLib.common_nonstd_interfaces.addr_data_hs import AddrDataHs
from hwtLib.handshaked.ramAsHs import RamAsHs, RamHsR
from hwtLib.mem.ram import RamSingleClock
from hwtSimApi.constants import CLK_PERIOD
from hwtSimApi.triggers import Timer
class RamWithHs(RamAsHs):
def _declr(self):
addClkRstn(self)
with self._paramsShared():
if self.HAS_R:
self.r = RamHsR()
if self.HAS_W:
self.w = AddrDataHs()
self.conv = RamAsHs()
r = self.ram = RamSingleClock()
if not self.HAS_W:
assert self.INIT_DATA is not None
assert self.HAS_R
r.PORT_CNT = (READ,)
elif not self.HAS_R:
assert self.HAS_W
r.PORT_CNT = (WRITE,)
def _impl(self):
propagateClkRstn(self)
if self.HAS_R:
self.conv.r(self.r)
if self.HAS_W:
self.conv.w(self.w)
self.ram.port[0](self.conv.ram)
class RamAsHs_R_only_TC(SingleUnitSimTestCase):
@classmethod
def getUnit(cls):
u = cls.u = RamWithHs()
u.DATA_WIDTH = 16
u.ADDR_WIDTH = 8
u.HAS_W = False
ITEMS = cls.ITEMS = 2 ** u.ADDR_WIDTH
cls.MAGIC = 99
u.INIT_DATA = tuple(cls.MAGIC + (i % ITEMS) for i in range(ITEMS))
return cls.u
def test_read(self, N=100, randomized=True,):
u = self.u
t = (10 + N) * CLK_PERIOD
if randomized:
self.randomize(u.r.addr)
self.randomize(u.r.data)
t *= 3
ref = []
_N = N
ITEMS = self.ITEMS
while True:
if _N > ITEMS:
_N -= ITEMS
ref.extend(u.INIT_DATA)
else:
ref.extend(u.INIT_DATA[:_N])
break
u.r.addr._ag.data.extend((i % ITEMS) for i in range(N))
self.runSim(t)
self.assertValSequenceEqual(u.r.data._ag.data, ref)
class RamAsHs_TC(SingleUnitSimTestCase):
@classmethod
def getUnit(cls):
u = cls.u = RamWithHs()
u.DATA_WIDTH = 32
u.ADDR_WIDTH = 8
return cls.u
def test_nop(self):
self.runSim(10 * CLK_PERIOD)
self.assertEmpty(self.u.r.data._ag.data)
def test_writeAndRead(self, N=10):
u = self.u
MAGIC = 87
u.w._ag.data.extend([(25 + i, MAGIC + i)
for i in range(N)])
u.r.addr._ag.data.extend([NOP for _ in range(N + 2)] + [25 + i for i in range(N)])
self.runSim((10 + 2 * N) * CLK_PERIOD)
self.assertValSequenceEqual(u.r.data._ag.data, [ MAGIC + i for i in range(N)])
def test_writeAndRead_randomized(self, N=10):
u = self.u
MAGIC = 87
self.randomize(u.w)
self.randomize(u.r)
u.w._ag.data.extend([(25 + i, MAGIC + i)
for i in range(N)])
def read():
while u.w._ag.data:
yield Timer(3 * CLK_PERIOD)
yield Timer(5 * CLK_PERIOD)
u.r.addr._ag.data.extend([25 + i for i in range(N)])
self.procs.append(read())
self.runSim((8 + N) * 3 * CLK_PERIOD)
self.assertValSequenceEqual(u.r.data._ag.data, [ MAGIC + i for i in range(N)])
RamAsHs_TCs = [RamAsHs_TC, RamAsHs_R_only_TC]
if __name__ == "__main__":
import unittest
suite = unittest.TestSuite()
# suite.addTest(RamAsHs_TC('test_writeAndRead_randomized'))
for tc in RamAsHs_TCs:
suite.addTest(unittest.makeSuite(tc))
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite)
| 28.343066 | 90 | 0.5658 | 3,075 | 0.791913 | 609 | 0.156837 | 444 | 0.114345 | 0 | 0 | 114 | 0.029359 |
9f9d44c4798bcd931e4faeef1271674a40360f6f | 1,195 | py | Python | src/zenml/integrations/vertex/constants.py | dumpmemory/zenml | ec3f6994ae9666493519d600471c035eb9109ac4 | [
"Apache-2.0"
] | null | null | null | src/zenml/integrations/vertex/constants.py | dumpmemory/zenml | ec3f6994ae9666493519d600471c035eb9109ac4 | [
"Apache-2.0"
] | null | null | null | src/zenml/integrations/vertex/constants.py | dumpmemory/zenml | ec3f6994ae9666493519d600471c035eb9109ac4 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) ZenML GmbH 2022. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
from google.cloud.aiplatform_v1.types.job_state import JobState
VERTEX_ENDPOINT_SUFFIX = "-aiplatform.googleapis.com"
POLLING_INTERVAL_IN_SECONDS = 30
CONNECTION_ERROR_RETRY_LIMIT = 5
_VERTEX_JOB_STATE_SUCCEEDED = JobState.JOB_STATE_SUCCEEDED
_VERTEX_JOB_STATE_FAILED = JobState.JOB_STATE_FAILED
_VERTEX_JOB_STATE_CANCELLED = JobState.JOB_STATE_CANCELLED
VERTEX_JOB_STATES_COMPLETED = (
_VERTEX_JOB_STATE_SUCCEEDED,
_VERTEX_JOB_STATE_FAILED,
_VERTEX_JOB_STATE_CANCELLED,
)
VERTEX_JOB_STATES_FAILED = (
_VERTEX_JOB_STATE_FAILED,
_VERTEX_JOB_STATE_CANCELLED,
)
| 36.212121 | 70 | 0.796653 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 628 | 0.525523 |
9f9f37cbe8abc431337c426fe206756a84c0965f | 2,226 | py | Python | backend/src/controllers/base_controller.py | tmdt-buw/gideon-ts | b839672fcc19f13562f6da23e6407fff0b18d3ec | [
"MIT"
] | null | null | null | backend/src/controllers/base_controller.py | tmdt-buw/gideon-ts | b839672fcc19f13562f6da23e6407fff0b18d3ec | [
"MIT"
] | null | null | null | backend/src/controllers/base_controller.py | tmdt-buw/gideon-ts | b839672fcc19f13562f6da23e6407fff0b18d3ec | [
"MIT"
] | null | null | null | from typing import Generic, List, Optional, Type, TypeVar, Union, Dict, Any
from uuid import UUID
from fastapi import HTTPException
from fastapi.encoders import jsonable_encoder
from pydantic import BaseModel
from sqlalchemy.orm import Session
from src.db.sqlalchemy.database import Base
ModelType = TypeVar("ModelType", bound=Base)
CreateModelType = TypeVar("CreateModelType", bound=BaseModel)
UpdateSchemaType = TypeVar("UpdateSchemaType", bound=BaseModel)
class ControllerBase(Generic[ModelType, CreateModelType, UpdateSchemaType]):
def __init__(self, model: Type[ModelType]):
"""
CRUD object with default methods to Create, Read, Update, Delete (CRUD).
"""
self.model = model
def get(self, db: Session, uuid: UUID) -> Optional[ModelType]:
return db.query(self.model).filter(self.model.id == uuid).first()
def get_all(self, db: Session, skip: int = 0, limit: int = 1000) -> List[ModelType]:
return db.query(self.model).offset(skip).limit(limit).all()
def create(self, db: Session, *, create: CreateModelType) -> ModelType:
create_obj = jsonable_encoder(create)
db_obj = self.model(**create_obj) # type: ignore
db.add(db_obj)
db.commit()
db.refresh(db_obj)
return db_obj
def update(self, db: Session, *, db_obj: ModelType, update_obj: Union[UpdateSchemaType, Dict[str, Any]]) -> ModelType:
obj_data = jsonable_encoder(db_obj)
if isinstance(update_obj, dict):
update_data = update_obj
else:
update_data = update_obj.dict(exclude_unset=True)
for field in obj_data:
if field in update_data:
setattr(db_obj, field, update_data[field])
db.add(db_obj)
db.commit()
db.refresh(db_obj)
return db_obj
def remove(self, db: Session, uuid: UUID) -> ModelType:
obj = self.get_or_error(db=db, uuid=uuid)
db.delete(obj)
db.commit()
return obj
def get_or_error(self, db: Session, uuid: UUID) -> ModelType:
obj = self.get(db=db, uuid=uuid)
if not obj:
raise HTTPException(status_code=404, detail="Not found.")
return obj
| 35.333333 | 122 | 0.656783 | 1,761 | 0.791105 | 0 | 0 | 0 | 0 | 0 | 0 | 168 | 0.075472 |
9f9fb5269f268dd9c05b46524e7bd7c7ed4f68af | 2,982 | py | Python | models/squeezenet.py | LEE-SEON-WOO/network-slimming | c1de69d670f82e4eb00e7fd1583c849fbb0e222f | [
"MIT"
] | null | null | null | models/squeezenet.py | LEE-SEON-WOO/network-slimming | c1de69d670f82e4eb00e7fd1583c849fbb0e222f | [
"MIT"
] | null | null | null | models/squeezenet.py | LEE-SEON-WOO/network-slimming | c1de69d670f82e4eb00e7fd1583c849fbb0e222f | [
"MIT"
] | null | null | null | """squeezenet in pytorch
[1] Song Han, Jeff Pool, John Tran, William J. Dally
squeezenet: Learning both Weights and Connections for Efficient Neural Networks
https://arxiv.org/abs/1506.02626
"""
import torch
import torch.nn as nn
from .channel_selection import channel_selection
class Fire(nn.Module):
def __init__(self, in_channel, out_channel, squzee_channel):
super().__init__()
self.squeeze = nn.Sequential(
nn.Conv2d(in_channel, squzee_channel, 1),
nn.BatchNorm2d(squzee_channel),
channel_selection(squzee_channel),
nn.ReLU(inplace=True)
)
self.expand_1x1 = nn.Sequential(
nn.Conv2d(squzee_channel, int(out_channel / 2), 1),
nn.BatchNorm2d(int(out_channel / 2)),
channel_selection(int(out_channel / 2)),
nn.ReLU(inplace=True)
)
self.expand_3x3 = nn.Sequential(
nn.Conv2d(squzee_channel, int(out_channel / 2), 3, padding=1),
nn.BatchNorm2d(int(out_channel / 2)),
channel_selection(int(out_channel / 2)),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.squeeze(x)
x = torch.cat([
self.expand_1x1(x),
self.expand_3x3(x)
], 1)
return x
class SqueezeNet(nn.Module):
"""mobile net with simple bypass"""
def __init__(self, dataset='cifar10', depth=0):
super().__init__()
if dataset == 'cifar10':
class_num = 10
channel_size = 3
elif dataset == 'cifar100':
class_num = 100
channel_size = 3
elif dataset == 'fer2013':
class_num = 7
channel_size = 1
self.stem = nn.Sequential(
nn.Conv2d(channel_size, 96, 3, padding=1),
nn.BatchNorm2d(96),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2)
)
self.fire2 = Fire(96, 128, 16)
self.fire3 = Fire(128, 128, 16)
self.fire4 = Fire(128, 256, 32)
self.fire5 = Fire(256, 256, 32)
self.fire6 = Fire(256, 384, 48)
self.fire7 = Fire(384, 384, 48)
self.fire8 = Fire(384, 512, 64)
self.fire9 = Fire(512, 512, 64)
self.conv10 = nn.Conv2d(512, class_num, 1)
self.avg = nn.AdaptiveAvgPool2d(1)
self.maxpool = nn.MaxPool2d(2, 2)
def forward(self, x):
x = self.stem(x)
f2 = self.fire2(x)
f3 = self.fire3(f2) + f2
f4 = self.fire4(f3)
f4 = self.maxpool(f4)
f5 = self.fire5(f4) + f4
f6 = self.fire6(f5)
f7 = self.fire7(f6) + f6
f8 = self.fire8(f7)
f8 = self.maxpool(f8)
f9 = self.fire9(f8)
c10 = self.conv10(f9)
x = self.avg(c10)
x = x.view(x.size(0), -1)
return x
def squeezenet(dataset='cifar10', depth=0):
return SqueezeNet(dataset=dataset)
| 26.864865 | 83 | 0.54896 | 2,602 | 0.872569 | 0 | 0 | 0 | 0 | 0 | 0 | 287 | 0.096244 |
9fa0c5b4929e45001fc240b68a55e0ebf452f161 | 749 | py | Python | mrjob/yelp_ratings_per_business.py | davelester/Yelp-Rating-and-Review-Trends | 652adc91f800270db29441a2420eecf9999d4a13 | [
"MIT"
] | 1 | 2021-04-21T12:07:58.000Z | 2021-04-21T12:07:58.000Z | mrjob/yelp_ratings_per_business.py | davelester/Yelp-Rating-and-Review-Trends | 652adc91f800270db29441a2420eecf9999d4a13 | [
"MIT"
] | null | null | null | mrjob/yelp_ratings_per_business.py | davelester/Yelp-Rating-and-Review-Trends | 652adc91f800270db29441a2420eecf9999d4a13 | [
"MIT"
] | 4 | 2015-01-30T12:04:06.000Z | 2016-09-29T02:25:55.000Z | """
Output a list of star ratings for each business ID
"""
from mrjob.job import MRJob
from mrjob.protocol import JSONValueProtocol
from itertools import izip
class MRRatingsPerBusinesses(MRJob):
INPUT_PROTOCOL = JSONValueProtocol
def mapper(self, _, review):
if review['type'] == 'review':
biz_id = review['business_id']
rating = review['stars']
date = review['date']
yield biz_id, (rating, date)
def reducer(self, biz_id, value):
ratings, dates = izip(*value)
for rating in ratings:
yield biz_id, rating
def finale(self, key, value):
yield key, list(value)
def steps(self):
return [self.mr(self.mapper, self.reducer),
self.mr(reducer=self.finale)]
if __name__ == '__main__':
MRRatingsPerBusinesses.run() | 22.029412 | 50 | 0.711615 | 529 | 0.706275 | 349 | 0.465955 | 0 | 0 | 0 | 0 | 109 | 0.145527 |
9fa210e32e4bc5f34c60bcf1b43a2bfbc28d9212 | 9,937 | py | Python | web/datasets/tasks.py | andressadotpy/maria-quiteria | eb0dae395d2eb12b354aedb50810419d3b512875 | [
"MIT"
] | 151 | 2019-11-10T02:18:25.000Z | 2022-01-18T14:28:25.000Z | web/datasets/tasks.py | andressadotpy/maria-quiteria | eb0dae395d2eb12b354aedb50810419d3b512875 | [
"MIT"
] | 202 | 2019-11-09T16:27:19.000Z | 2022-03-22T12:41:27.000Z | web/datasets/tasks.py | andressadotpy/maria-quiteria | eb0dae395d2eb12b354aedb50810419d3b512875 | [
"MIT"
] | 69 | 2020-02-05T01:33:35.000Z | 2022-03-30T10:39:27.000Z | from datetime import datetime
from logging import info
from pathlib import Path
from typing import List
import requests
from celery import shared_task
from django.conf import settings
from django.contrib.admin.options import get_content_type_for_model
from requests import HTTPError
from tika import parser
from web.datasets.adapters import (
to_citycouncil_bid,
to_citycouncil_contract,
to_citycouncil_expense,
to_citycouncil_revenue,
)
from web.datasets.models import (
CityCouncilBid,
CityCouncilContract,
CityCouncilExpense,
CityCouncilRevenue,
File,
SyncInformation,
)
from web.datasets.services import get_s3_client
client = get_s3_client(settings)
class WebserviceException(Exception):
pass
@shared_task
def content_from_file(file_pk=None, path=None, keep_file=True):
if not any([file_pk, path]):
raise Exception("Ou `file_pk` ou `path` devem ser informados.")
a_file = None
if file_pk:
a_file = File.objects.get(pk=file_pk)
if a_file.content is not None:
return a_file.content
path = client.download_file(a_file.s3_file_path)
keep_file = False
if not Path(path).exists():
info(f"Arquivo {path} não encontrado.")
return
raw = parser.from_file(path)
if not keep_file:
Path(path).unlink()
if a_file:
a_file.content = raw["content"] or ""
a_file.save()
return a_file.content
return raw["content"]
@shared_task
def backup_file(file_id):
try:
file_obj = File.objects.get(pk=file_id, s3_url__isnull=True)
except File.DoesNotExist:
info(f"O arquivo ({file_id}) não existe ou já possui backup.")
return
if not file_obj.url and not file_obj.local_path:
info(f"O arquivo ({file_id}) não tem URL ou não existe localmente.")
return
model_name = file_obj.content_object._meta.model_name
relative_file_path = (
f"{model_name}/{file_obj.created_at.year}/"
f"{file_obj.created_at.month}/{file_obj.created_at.day}/"
)
location = file_obj.local_path or file_obj.url
s3_url, s3_file_path = client.upload_file(
location, relative_file_path, prefix=file_obj.checksum
)
file_obj.s3_file_path = s3_file_path
file_obj.s3_url = s3_url
file_obj.save()
return s3_url
@shared_task
def get_city_council_updates(formatted_date):
"""Solicita atualizações ao webservice da Câmara."""
target_date = datetime.strptime(formatted_date, "%Y-%m-%d").date()
sync_info, _ = SyncInformation.objects.get_or_create(
date=target_date, source="camara", defaults={"succeed": False}
)
response = requests.get(
settings.CITY_COUNCIL_WEBSERVICE_ENDPOINT,
params={
"data": formatted_date, # formato aaaa-mm-dd
"token": settings.CITY_COUNCIL_WEBSERVICE_TOKEN,
},
headers={"User-Agent": "Maria Quitéria"},
)
try:
response.raise_for_status()
sync_info.succeed = True
except HTTPError:
sync_info.succeed = False
sync_info.save()
raise HTTPError
response = response.json()
sync_info.response = response
if response.get("erro"):
sync_info.succeed = False
sync_info.save()
raise WebserviceException(response["erro"])
sync_info.save()
return response
@shared_task(ignore_result=True)
def distribute_city_council_objects_to_sync(payload):
"""Recebe o payload e dispara uma task para cada registro.
O webservice da Câmara retorna uma lista de ações (inserção,
atualização e deleção) e os registros que sofreram cada uma
delas. Essa task executa cada uma de maneira separada para que,
caso tenham algum erro, possam ser tratados de maneira separada.
"""
action_methods = {
"inclusoesContrato": add_citycouncil_contract,
"alteracoesContrato": update_citycouncil_contract,
"exclusoesContrato": remove_citycouncil_contract,
"inclusoesLicitacao": add_citycouncil_bid,
"alteracoesLicitacao": update_citycouncil_bid,
"exclusoesLicitacao": remove_citycouncil_bid,
"inclusoesReceita": add_citycouncil_revenue,
"alteracoesReceita": update_citycouncil_revenue,
"exclusoesReceita": remove_citycouncil_revenue,
"inclusoesDespesa": add_citycouncil_expense,
"alteracoesDespesa": update_citycouncil_expense,
"exclusoesDespesa": remove_citycouncil_expense,
}
for action_name, records in payload.items():
info(f"{action_name}: {len(records)} registros")
task = action_methods.get(action_name)
if action_name.startswith("exclusoes"):
task.delay(records)
else:
for record in records:
task.delay(record)
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def save_citycouncil_files(files, object, url_key):
if not files:
return
content_type = get_content_type_for_model(object)
from web.datasets.management.commands._file import save_file
if files:
for file_ in files:
save_file(file_[url_key], content_type, object.pk)
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def add_citycouncil_bid(record):
new_item = to_citycouncil_bid(record)
new_item["crawled_at"] = datetime.now()
new_item["crawled_from"] = settings.CITY_COUNCIL_WEBSERVICE_ENDPOINT
bid, _ = CityCouncilBid.objects.get_or_create(
external_code=new_item["external_code"], defaults=new_item
)
save_citycouncil_files(record.get("arquivos"), bid, "caminhoArqLic")
return bid
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def update_citycouncil_bid(record):
bid = CityCouncilBid.objects.get(external_code=record["codLic"])
updated_item = to_citycouncil_bid(record)
for key, value in updated_item.items():
setattr(bid, key, value)
bid.save()
save_citycouncil_files(record.get("arquivos"), bid, "caminhoArqLic")
return bid
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def remove_citycouncil_bid(records: List[dict]):
to_be_removed = [record["codLic"] for record in records]
CityCouncilBid.objects.filter(external_code__in=to_be_removed).update(excluded=True)
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def add_citycouncil_contract(record):
new_item = to_citycouncil_contract(record)
new_item["crawled_at"] = datetime.now()
new_item["crawled_from"] = settings.CITY_COUNCIL_WEBSERVICE_ENDPOINT
contract, _ = CityCouncilContract.objects.get_or_create(
external_code=new_item["external_code"], defaults=new_item
)
save_citycouncil_files(record.get("arquivos"), contract, "caminho")
return contract
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def update_citycouncil_contract(record):
contract = CityCouncilContract.objects.get(external_code=record["codCon"])
updated_item = to_citycouncil_contract(record)
for key, value in updated_item.items():
setattr(contract, key, value)
contract.save()
save_citycouncil_files(record.get("arquivos"), contract, "caminho")
return contract
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def remove_citycouncil_contract(records: List[dict]):
to_be_removed = [record["codCon"] for record in records]
CityCouncilContract.objects.filter(external_code__in=to_be_removed).update(
excluded=True
)
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def add_citycouncil_revenue(record):
new_item = to_citycouncil_revenue(record)
new_item["crawled_at"] = datetime.now()
new_item["crawled_from"] = settings.CITY_COUNCIL_WEBSERVICE_ENDPOINT
revenue, _ = CityCouncilRevenue.objects.get_or_create(
external_code=new_item["external_code"], defaults=new_item
)
return revenue
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def update_citycouncil_revenue(record):
revenue = CityCouncilRevenue.objects.get(external_code=record["codLinha"])
updated_item = to_citycouncil_revenue(record)
for key, value in updated_item.items():
setattr(revenue, key, value)
revenue.save()
return revenue
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def remove_citycouncil_revenue(records: List[dict]):
to_be_removed = [record["codLinha"] for record in records]
CityCouncilRevenue.objects.filter(external_code__in=to_be_removed).update(
excluded=True
)
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def add_citycouncil_expense(record):
new_item = to_citycouncil_expense(record)
new_item["crawled_at"] = datetime.now()
new_item["crawled_from"] = settings.CITY_COUNCIL_WEBSERVICE_ENDPOINT
expense, _ = CityCouncilExpense.objects.get_or_create(
external_file_code=new_item["external_file_code"],
external_file_line=new_item["external_file_line"],
number=new_item["number"],
phase=new_item["phase"],
defaults=new_item,
)
return expense
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def update_citycouncil_expense(record):
expense = CityCouncilExpense.objects.get(
external_file_code=record["codArquivo"],
external_file_line=record["codLinha"],
)
updated_item = to_citycouncil_expense(record)
for key, value in updated_item.items():
setattr(expense, key, value)
expense.save()
return expense
@shared_task(retry_kwargs={"max_retries": 1}, ignore_result=True)
def remove_citycouncil_expense(records: List[dict]):
for record in records:
CityCouncilExpense.objects.filter(
external_file_code=record["codigo"], external_file_line=record["linha"]
).update(excluded=True)
| 32.903974 | 88 | 0.717118 | 46 | 0.004621 | 0 | 0 | 9,158 | 0.91994 | 0 | 0 | 1,652 | 0.165947 |
9fa2b6471bd6d79dfad1b744ce99d19125228b13 | 32 | py | Python | stubbs/defs/ustr.py | holy-crust/reclaimer | 0aa693da3866ce7999c68d5f71f31a9c932cdb2c | [
"MIT"
] | null | null | null | stubbs/defs/ustr.py | holy-crust/reclaimer | 0aa693da3866ce7999c68d5f71f31a9c932cdb2c | [
"MIT"
] | null | null | null | stubbs/defs/ustr.py | holy-crust/reclaimer | 0aa693da3866ce7999c68d5f71f31a9c932cdb2c | [
"MIT"
] | null | null | null | from ...hek.defs.ustr import *
| 16 | 31 | 0.65625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9fa2e9ad7d1fd21c4c1944a6f5f15060207c18a9 | 674 | py | Python | src/json_helper/json_to_pandas.py | AidanFarhi/PythonFundamentals.Labs.PipModule | f3461576b34c5a830577d36057e17538f3b3e328 | [
"RSA-MD"
] | null | null | null | src/json_helper/json_to_pandas.py | AidanFarhi/PythonFundamentals.Labs.PipModule | f3461576b34c5a830577d36057e17538f3b3e328 | [
"RSA-MD"
] | null | null | null | src/json_helper/json_to_pandas.py | AidanFarhi/PythonFundamentals.Labs.PipModule | f3461576b34c5a830577d36057e17538f3b3e328 | [
"RSA-MD"
] | null | null | null | import json
import pandas as pd
import os
def read_json(file_path):
file = open(file_path, 'r')
data = json.load(file)
file.close()
return data
def read_all_json_files(JSON_ROOT):
df = pd.DataFrame() # create empty data frame
for file_name in os.listdir(JSON_ROOT): # iterate through directory
if file_name.find('daily') == -1: # check that file has 'locations' in it
continue
file_path = 'data/daily_summaries/' + file_name # create full path to pass to read_json function
data = read_json(file_path) # get data from file
df = df.append(data['results']) # append data to DataFrame
return df
| 30.636364 | 105 | 0.664688 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 227 | 0.336795 |
9fa41e66ab11d94e02e55a4155db8605b9a147d4 | 2,685 | py | Python | shrike/pipeline/telemetry_utils.py | Anbang-Hu/shrike | 78189984c85696a9a9feaadb72aa471cf2409796 | [
"MIT"
] | 27 | 2021-05-27T00:01:24.000Z | 2022-01-30T19:55:24.000Z | shrike/pipeline/telemetry_utils.py | Anbang-Hu/shrike | 78189984c85696a9a9feaadb72aa471cf2409796 | [
"MIT"
] | 284 | 2021-05-12T22:26:41.000Z | 2022-02-23T21:18:34.000Z | shrike/pipeline/telemetry_utils.py | Anbang-Hu/shrike | 78189984c85696a9a9feaadb72aa471cf2409796 | [
"MIT"
] | 5 | 2021-06-02T04:51:47.000Z | 2021-12-20T17:07:41.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
from opencensus.ext.azure.log_exporter import AzureLogHandler
log = logging.getLogger(__name__)
class TelemetryLogger:
"""Utils class for opencensus azure monitor"""
def __init__(
self, enable_telemetry=True, instrumentation_key=None, level=logging.INFO
):
self.logger = logging.getLogger("telemetry_logger")
self.logger.setLevel(level)
self.enable_telemetry = enable_telemetry
# Why is it okay to include this key directly in the source code?
# For any client-side tool, there is a fundamental problem with protecting instrumentation
# keys. You want the published tool to be able to collect telemetry, but the only way
# it can do this is if it has some kind of instrumentation key.
#
# For an authoritative example, the dotnet CLI contains their telemetry key in a
# public GitHub repository:
# https://github.com/dotnet/cli/blob/master/src/dotnet/Telemetry/Telemetry.cs
#
# The underlying Azure resource is called `aml1p-ml-tooling`.
self.instrumentation_key = (
"aaefce9e-d109-4fac-bb9f-8277c68e91ac"
if instrumentation_key is None
else instrumentation_key
)
handler = AzureLogHandler(
connection_string=f"InstrumentationKey={self.instrumentation_key}"
)
handler.add_telemetry_processor(self.scrubber_function)
self.logger.addHandler(handler)
def log_trace(self, message, properties={}, level=logging.INFO):
if self.enable_telemetry:
try:
if level == logging.INFO:
self.logger.info(message, extra=properties)
elif level == logging.WARNING:
self.logger.warning(message, extra=properties)
elif level == logging.ERROR:
self.logger.error(message, extra=properties)
elif level == logging.CRITICAL:
self.logger.critical(message, extra=properties)
else:
log.error("The logging level is not expected!")
except Exception as ex:
log.warning("Send telemetry exception: %s", str(ex))
else:
log.info(
"Sending trace log messages to application insight has been disabled."
)
# Callback function to scrub some columns
def scrubber_function(self, envelope):
envelope.tags["ai.cloud.roleInstance"] = "cloud_RoleInstance_Scrubbed"
envelope.tags["ai.location.ip"] = "IP_Scrubbed"
| 41.953125 | 98 | 0.639851 | 2,496 | 0.929609 | 0 | 0 | 0 | 0 | 0 | 0 | 1,029 | 0.38324 |
9fa5fa2df40b29c75a161e9790ab053c973c7300 | 3,569 | py | Python | langsense/core.py | sneub/langsense | 7e194582f19bbd2b7c93f8a1ef5d96d4d9f5ae73 | [
"Apache-2.0"
] | null | null | null | langsense/core.py | sneub/langsense | 7e194582f19bbd2b7c93f8a1ef5d96d4d9f5ae73 | [
"Apache-2.0"
] | null | null | null | langsense/core.py | sneub/langsense | 7e194582f19bbd2b7c93f8a1ef5d96d4d9f5ae73 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from . import ruleset
import re
import operator
class LangSense(object):
def detect(self, string, country_hint=None):
if type(string) == str:
text = string.decode('utf-8').lower()
else:
text = string.lower()
shortlist_char = self._char_shortlist(text)
shortlist_rules = self._rule_shortlist(text)
shortlist_words = self._word_shortlist(text)
shortlist_segments = self._segment_shortlist(text)
result = {l: 0 for l in ruleset.RuleSet.language_list}
for l in ruleset.RuleSet.language_list:
try:
result[l] += (shortlist_char[l] * ruleset.RuleSet.score_weights['char'])
except Exception, _:
pass
try:
result[l] += (shortlist_rules[l] * ruleset.RuleSet.score_weights['rule'])
except Exception, _:
pass
try:
result[l] += (shortlist_words[l] * ruleset.RuleSet.score_weights['word'])
except Exception, _:
pass
try:
result[l] += (shortlist_segments[l] * ruleset.RuleSet.score_weights['segment'])
except Exception, _:
pass
sum_scores = sum([v for _, v in result.iteritems()])
result = {k: float(v)/sum_scores for k, v in result.iteritems() if v > 0}
if country_hint:
country_hint = country_hint.decode('utf-8').upper()
lang = ruleset.RuleSet.country_langs[country_hint]
if lang in result:
result[lang] *= ruleset.RuleSet.score_weights['hint']
result = list(reversed(sorted(result.items(), key=operator.itemgetter(1))))
return result
def _char_shortlist(self, string):
lang_shortlist = []
for c in string:
lang_shortlist = lang_shortlist + [(c, l) for (l, a) in ruleset.RuleSet.alphabets.iteritems() if c in a]
langs = set([i[1] for i in lang_shortlist])
master_langs = []
bad_langs = []
scores = {l: len(string) for l in langs}
for l in langs:
for c, _ in lang_shortlist:
if c not in ruleset.RuleSet.alphabets[l]:
bad_langs.append(l)
scores[l] -= 1
master_langs.append(l)
scores = {k: s/len(string) for k, s in scores.iteritems()}
return {k: v for k, v in scores.iteritems() if v > 0}
def _rule_shortlist(self, string):
lang_shortlist = []
scores = {}
for lang, rules in ruleset.RuleSet.word_rules.iteritems():
for rule in rules:
if re.search(rule, string):
lang_shortlist.append(lang)
if lang in scores:
scores[lang] += 1
else:
scores[lang] = 1
return {k: scores[k] for k in list(set(lang_shortlist))}
def _segment_shortlist(self, string):
lang_shortlist = []
scores = {}
for lang, segments in ruleset.RuleSet.word_segments.iteritems():
for segment in segments:
if re.search(r'\w'+ segment + r'\b', string) or re.search(r'\b'+ segment + r'\w', string) or re.search(r'\w'+ segment + r'\w', string):
lang_shortlist.append(lang)
if lang in scores:
scores[lang] += 1
else:
scores[lang] = 1
return {k: scores[k] for k in list(set(lang_shortlist))}
def _word_shortlist(self, string):
lang_shortlist = []
scores = {}
for lang, words in ruleset.RuleSet.words.iteritems():
for word in words:
if re.search(r'\b'+ word + r'\b', string):
lang_shortlist.append(lang)
if lang in scores:
scores[lang] += 1
else:
scores[lang] = 1
return {k: scores[k] for k in list(set(lang_shortlist))}
| 27.666667 | 143 | 0.609695 | 3,493 | 0.978706 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.030821 |
9fa75ad8fd9d506ae24046c05570702c53d633a2 | 11,695 | py | Python | preprocess.py | sdc17/NaivePinYin | 30301de4289d022160e121338c1b9e337f6ed5af | [
"MIT"
] | null | null | null | preprocess.py | sdc17/NaivePinYin | 30301de4289d022160e121338c1b9e337f6ed5af | [
"MIT"
] | null | null | null | preprocess.py | sdc17/NaivePinYin | 30301de4289d022160e121338c1b9e337f6ed5af | [
"MIT"
] | null | null | null | #-*- coding : utf-8-*-
import os
import json
import yaml
import glob
import jieba
import pickle
import datetime
import argparse
from collections import Counter
from concurrent.futures import ProcessPoolExecutor, Executor, as_completed
def task_one_gram(news):
gram1 = {}
with open('./training/pinyin_table/一二级汉字表.txt', encoding='gbk') as f:
keys = list(f.read().replace('\n', ''))
for key in keys:
gram1[key] = 0
for line in open(news, 'r', encoding='gbk', errors='ignore'):
try:
content = json.loads(line)
except:
continue
info = content['html'] + content['title']
word_dict = Counter(info)
for key, value in word_dict.items():
if gram1.get(key) != None:
gram1[key] += value
return gram1
def one_gram():
news = glob.glob('./training/sina_news_gbk/*.txt')
gram1 = {}
with ProcessPoolExecutor() as executor:
for results in executor.map(task_one_gram, news):
for key, value in results.items():
gram1[key] = gram1.get(key, 0) + value
with open('./data/1gram.pkl', 'wb') as f:
pickle.dump(gram1, f)
# with open('./data/1gram.yaml', 'w', encoding='utf-8') as f:
# yaml.dump(gram1, f, default_flow_style=False, allow_unicode=True)
def task_two_gram(news):
gram1 = {}
gram2 = {}
cnt_s = 0
cnt_t = 0
with open('./data/1gram.pkl','rb') as f:
gram1 = pickle.load(f)
for line in open(news, 'r', encoding='gbk', errors='ignore'):
try:
content = json.loads(line)
except:
continue
info = content['html'] + content['title']
info_len = len(info)
for i in range(info_len - 1):
key = None
if i == 0 and gram1.get(info[0]):
key = ''.join(['s', info[0]])
cnt_s += 1
elif i > 0 and gram1.get(info[i]) and not gram1.get(info[i - 1]):
key = ''.join(['s', info[i]])
cnt_s += 1
elif gram1.get(info[i]) and not gram1.get(info[i + 1]):
key = ''.join([info[i], 't'])
cnt_t += 1
elif i == info_len - 2 and gram1.get(info[i + 1]):
key = ''.join([info[i + 1], 't'])
cnt_t += 1
if key:
gram2[key] = gram2.get(key, 0) + 1
if gram1.get(info[i]) and gram1.get(info[i + 1]):
key = ''.join([info[i], info[i + 1]])
gram2[key] = gram2.get(key, 0) + 1
return (gram2, cnt_s, cnt_t)
def two_gram():
news = glob.glob('./training/sina_news_gbk/*.txt')
gram1 = {}
gram2 = {}
cnt_s = 0
cnt_t = 0
with ProcessPoolExecutor() as executor:
for results in executor.map(task_two_gram, news):
for key, value in results[0].items():
gram2[key] = gram2.get(key, 0) + value
cnt_s += results[1]
cnt_t += results[2]
with open('./data/1gram.pkl','rb') as f:
gram1 = pickle.load(f)
gram1['s'] = cnt_s
gram1['t'] = cnt_t
with open('./data/1gram.pkl', 'wb') as f:
pickle.dump(gram1, f)
# with open('./data/1gram.yaml', 'w', encoding='utf-8') as f:
# yaml.dump(gram1, f, default_flow_style=False, allow_unicode=True)
with open('./data/2gram.pkl', 'wb') as f:
pickle.dump(gram2, f)
# with open('./data/2gram.yaml', 'w', encoding='utf-8') as f:
# yaml.dump(gram2, f, default_flow_style=False, allow_unicode=True)
def task_three_gram(news):
gram1 = {}
gram3 = {}
cnt_s = 0
cnt_t = 0
with open('./data/1gram.pkl','rb') as f:
gram1 = pickle.load(f)
for line in open(news, 'r', encoding='gbk', errors='ignore'):
try:
content = json.loads(line)
except:
continue
info = content['html'] + content['title']
info_len = len(info)
for i in range(info_len - 2):
key = ''
if i == 0 and gram1.get(info[0]) and gram1.get(info[1]):
key = ''.join(['s', info[0], info[1]])
cnt_s += 1
elif i > 1 and gram1.get(info[i]) and gram1.get(info[i - 1]) and not gram1.get(info[i - 2]):
key = ''.join(['s', info[i - 1], info[i]])
cnt_s += 1
elif gram1.get(info[i]) and gram1.get(info[i + 1]) and not gram1.get(info[i + 2]):
key = ''.join([info[i], info[i + 1], 't'])
cnt_t += 1
elif i == info_len - 3 and gram1.get(info[i + 1]) and gram1.get(info[i + 2]):
key = ''.join([info[i + 1], info[i + 2], 't'])
cnt_t += 1
if key:
gram3[key] = gram3.get(key, 0) + 1
if gram1.get(info[i]) and gram1.get(info[i + 1]) and gram1.get(info[i + 2]):
key = ''.join([info[i], info[i + 1], info[i + 2]])
gram3[key] = gram3.get(key, 0) + 1
return (gram3, cnt_s, cnt_t)
def three_gram(rank):
news = glob.glob('./training/sina_news_gbk/*.txt')
gram1 = {}
gram3 = {}
cnt_s = 0
cnt_t = 0
with ProcessPoolExecutor() as executor:
for results in executor.map(task_three_gram, news):
for key, value in results[0].items():
gram3[key] = gram3.get(key, 0) + value
cnt_s += results[1]
cnt_t += results[2]
with open('./data/1gram.pkl','rb') as f:
gram1 = pickle.load(f)
gram1['s'] = cnt_s
gram1['t'] = cnt_t
with open('./data/1gram.pkl', 'wb') as f:
pickle.dump(gram1, f)
# with open('./data/1gram.yaml', 'w', encoding='utf-8') as f:
# yaml.dump(gram1, f, default_flow_style=False, allow_unicode=True)
# Select top rank% features for valid features
gram3 = dict(sorted(gram3.items(), key = lambda x: x[1], reverse = True)[:int(len(gram3)*rank)])
if rank == 1.0:
with open('./data/3gram_whole.pkl', 'wb') as f:
pickle.dump(gram3, f)
# with open('./data/3gram_whole.yaml', 'w', encoding='utf-8') as f:
# yaml.dump(gram3, f, default_flow_style=False, allow_unicode=True)
else:
with open('./data/3gram.pkl', 'wb') as f:
pickle.dump(gram3, f)
# with open('./data/3gram.yaml', 'w', encoding='utf-8') as f:
# yaml.dump(gram3, f, default_flow_style=False, allow_unicode=True)
def task_one_word(news):
word1 = {}
gram1 = {}
with open('./data/1gram.pkl','rb') as f:
gram1 = pickle.load(f)
for line in open(news, 'r', encoding='gbk', errors='ignore'):
try:
content = json.loads(line)
except:
continue
info = content['html'] + content['title']
seg = jieba.lcut(info, cut_all=False, HMM=True)
word_dict = Counter(seg)
for key, value in word_dict.items():
valid = True
for i in key:
if not gram1.get(i) or i == 's' or i == 't':
valid = False
break
if valid:
word1[key] = word1.get(key, 0) + value
return word1
def one_word():
news = glob.glob('./training/sina_news_gbk/*.txt')
word1 = {}
with ProcessPoolExecutor() as executor:
for results in executor.map(task_one_word, news):
for key, value in results.items():
word1[key] = word1.get(key, 0) + value
with open('./data/1word.pkl', 'wb') as f:
pickle.dump(word1, f)
# with open('./data/1word.yaml', 'w', encoding='utf-8') as f:
# yaml.dump(word1, f, default_flow_style=False, allow_unicode=True)
def task_two_word(news):
word1 = {}
word2 = {}
cnt_s = 0
cnt_t = 0
with open('./data/1word.pkl','rb') as f:
word1 = pickle.load(f)
for line in open(news, 'r', encoding='gbk', errors='ignore'):
try:
content = json.loads(line)
except:
continue
info = content['html'] + content['title']
info_len = len(info)
seg = jieba.lcut(info, cut_all=False, HMM=True)
# seg = [x for x in _seg if word1.get(x)]
seg_len = len(seg)
for i in range(seg_len - 1):
key = None
if i == 0 and word1.get(seg[0]):
key = ''.join(['s_', seg[0]])
cnt_s += 1
elif i > 0 and word1.get(seg[i]) and not word1.get(seg[i - 1]):
key = ''.join(['s_', seg[i]])
cnt_s += 1
elif word1.get(seg[i]) and not word1.get(seg[i + 1]):
key = ''.join([seg[i], '_t'])
cnt_t += 1
elif i == seg_len - 2 and word1.get(seg[i + 1]):
key = ''.join([seg[i + 1], '_t'])
cnt_t += 1
if key:
word2[key] = word2.get(key, 0) + 1
if word1.get(seg[i]) and word1.get(seg[i + 1]):
key = ''.join([seg[i], '_', seg[i + 1]])
word2[key] = word2.get(key, 0) + 1
return (word2, cnt_s, cnt_t)
def two_word(rank):
news = glob.glob('./training/sina_news_gbk/*.txt')
word1 = {}
word2 = {}
cnt_s = 0
cnt_t = 0
with ProcessPoolExecutor() as executor:
for results in executor.map(task_two_word, news):
for key, value in results[0].items():
word2[key] = word2.get(key, 0) + value
cnt_s += results[1]
cnt_t += results[2]
with open('./data/1word.pkl','rb') as f:
word1 = pickle.load(f)
word1['s'] = cnt_s
word1['t'] = cnt_t
with open('./data/1word.pkl', 'wb') as f:
pickle.dump(word1, f)
# with open('./data/1word.yaml', 'w', encoding='utf-8') as f:
# yaml.dump(word1, f, default_flow_style=False, allow_unicode=True)
# Select top rank% features for valid features
word2 = dict(sorted(word2.items(), key = lambda x: x[1], reverse = True)[:int(len(word2)*rank)])
if rank == 1.0:
with open('./data/2word_whole.pkl', 'wb') as f:
pickle.dump(word2, f)
# with open('./data/2word_whole.yaml', 'w', encoding='utf-8') as f:
# yaml.dump(word2, f, default_flow_style=False, allow_unicode=True)
else:
with open('./data/2word.pkl', 'wb') as f:
pickle.dump(word2, f)
# with open('./data/2word.yaml', 'w', encoding='utf-8') as f:
# yaml.dump(word2, f, default_flow_style=False, allow_unicode=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_type", default='3c', type=str, choices=['2c', '3c', '2w'], help="Available models")
parser.add_argument("--rank_3c", default=0.2, type=float, help="Select rank percent of chars data for predicting")
parser.add_argument("--rank_2w", default=0.25, type=float, help="Select rank percent of words data for predicting")
args = parser.parse_args()
print('===> Preprocessing')
start = datetime.datetime.now()
if args.model_type == '2c':
print("Model type: binary char")
one_gram()
two_gram()
elif args.model_type == '3c':
print("Model type: ternary char")
one_gram()
three_gram(rank=args.rank_3c)
elif args.model_type == '2w':
print("Model type: binary word")
jieba.enable_parallel()
# one_word()
two_word(rank=args.rank_2w)
end = datetime.datetime.now()
print('Time cost: {}'.format(end -start))
print('===> Completed!')
print('-' * 20)
| 33.319088 | 119 | 0.524583 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,671 | 0.228154 |
9fa815038e88bc8c7182ffc9b58dd48bcbb7159c | 329 | py | Python | src/testMultiRootWkspc/workspace5/remoteDebugger-start.py | ChaseKnowlden/vscode-jupyter | 9bdaf87f0b6dcd717c508e9023350499a6093f97 | [
"MIT"
] | 2,461 | 2016-01-21T16:40:43.000Z | 2022-03-31T12:01:55.000Z | src/testMultiRootWkspc/workspace5/remoteDebugger-start.py | ChaseKnowlden/vscode-jupyter | 9bdaf87f0b6dcd717c508e9023350499a6093f97 | [
"MIT"
] | 12,536 | 2019-05-06T21:26:14.000Z | 2022-03-31T23:06:48.000Z | src/testMultiRootWkspc/workspace5/remoteDebugger-start.py | vasili8m/vscode-python | 846eee870e8b7bab38172600836faedb5fb80166 | [
"MIT"
] | 871 | 2019-05-15T13:43:55.000Z | 2022-03-31T03:04:35.000Z | import sys
import time
def main():
sys.stdout.write('this is stdout')
sys.stdout.flush()
sys.stderr.write('this is stderr')
sys.stderr.flush()
# Give the debugger some time to add a breakpoint.
time.sleep(5)
for i in range(1):
time.sleep(0.5)
pass
print('this is print')
main()
| 18.277778 | 54 | 0.613982 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 97 | 0.294833 |
9fa8a760a99df59cb027859fabf342dae3bf5734 | 2,985 | py | Python | rbb_tools/src/rbb_tools/simenvs/test.py | SK4P3/rbb_core | 618617270314af5335de30179072244e1f440c4c | [
"MIT"
] | 55 | 2019-05-09T06:43:05.000Z | 2021-12-08T05:56:43.000Z | rbb_tools/src/rbb_tools/simenvs/test.py | SK4P3/rbb_core | 618617270314af5335de30179072244e1f440c4c | [
"MIT"
] | 5 | 2019-09-08T15:33:28.000Z | 2021-04-17T17:30:53.000Z | rbb_tools/src/rbb_tools/simenvs/test.py | SK4P3/rbb_core | 618617270314af5335de30179072244e1f440c4c | [
"MIT"
] | 16 | 2019-08-08T07:15:35.000Z | 2021-12-07T15:34:41.000Z | # AMZ-Driverless
# Copyright (c) 2019 Authors:
# - Huub Hendrikx <hhendrik@ethz.ch>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import yaml
from rbb_tools.simenvs.environment import SimulationEnvironment
class TestSimulationEnvironment(SimulationEnvironment):
def __init__(self, env_config, sim_config, output_dir, tmp_dir):
super(TestSimulationEnvironment, self).__init__(env_config, sim_config, output_dir, tmp_dir)
self._fail = True
if 'fail' in sim_config:
self._fail = sim_config["fail"]
def prepare(self):
logging.info("TestSimulationEnvironment.prepare()")
return True
def simulate(self):
logging.info("TestSimulationEnvironment.simulate()")
output_file = {
'title': "TestSimulationEnvironment",
'repetitions': {
'Test run 1': {
'bag': None,
'pass': True,
'duration': 1.0,
'results': {"some-result": "good"}
},
'Test run 2': {
'bag': 'missing-bag.bag',
'pass': not self._fail,
'duration': 1.0,
'results': {"some-result": "bad"}
},
'Test run 3': {
'bag': 'bag.bag',
'pass': True,
'duration': 1.0,
'results': {"some-result": "this one has a bag"}
}
}
}
with open(self._output_dir + "/output.yaml", 'w') as f:
yaml.safe_dump(output_file, f, default_flow_style=False)
with open(self._output_dir + "/bag.bag", 'w') as f:
for x in range(1024):
f.write("THIS IS A FAKE ROSBAG \n")
return True
def clean(self):
logging.info("TestSimulationEnvironment.clean()")
environment = TestSimulationEnvironment
| 34.709302 | 100 | 0.61206 | 1,701 | 0.569849 | 0 | 0 | 0 | 0 | 0 | 0 | 1,570 | 0.525963 |
9fa9b9a2508774b622073aec9a477c8353758d80 | 1,188 | py | Python | tests/test_data_models.py | haihabi/GenerativeCRB | d53c01bec7214bb087fbe17dba241e12eb60858e | [
"MIT"
] | null | null | null | tests/test_data_models.py | haihabi/GenerativeCRB | d53c01bec7214bb087fbe17dba241e12eb60858e | [
"MIT"
] | null | null | null | tests/test_data_models.py | haihabi/GenerativeCRB | d53c01bec7214bb087fbe17dba241e12eb60858e | [
"MIT"
] | null | null | null | import unittest
import torch
import data_model as dm
import normflowpy as nf
from experiments import constants
def generate_model_dict():
return {constants.DIM: 4,
constants.THETA_MIN: 0.3,
constants.THETA_MAX: 10,
constants.SIGMA_N: 0.1,
}
class FlowToCRBTest(unittest.TestCase):
def run_model_check(self, model_enum):
model = dm.get_model(model_enum, generate_model_dict())
self.assertTrue(str(4) in model.name) # Check that dim size in model name
self.assertTrue(model.__class__.__name__ in model.name) # Check that class name is in model
flow = model.get_optimal_model()
self.assertTrue(isinstance(flow, nf.NormalizingFlowModel))
crb_value = model.crb(2.0)
self.assertTrue(isinstance(crb_value, torch.Tensor))
def test_flow_linear(self):
self.run_model_check(dm.ModelType.Linear)
def test_gaussian_variance(self):
self.run_model_check(dm.ModelType.GaussianVariance)
def test_mult_1_3(self):
self.run_model_check(dm.ModelType.Pow1Div3Gaussian)
def test_mult_3(self):
self.run_model_check(dm.ModelType.Pow3Gaussian)
| 32.108108 | 100 | 0.70202 | 890 | 0.749158 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.058923 |
9fa9e56b663a8787279c9b910d19d9a32f11bba2 | 15,310 | py | Python | neutronclient/tests/unit/test_auth.py | asadoughi/python-neutronclient | 2026ffdc6c82135b64b3ccd569a4a442e170ca53 | [
"Apache-2.0"
] | 1 | 2016-07-16T09:36:02.000Z | 2016-07-16T09:36:02.000Z | neutronclient/tests/unit/test_auth.py | ntt-sic/python-neutronclient | a8c5c650ea7d51461c6311fb477b733b2f7c36a0 | [
"Apache-2.0"
] | null | null | null | neutronclient/tests/unit/test_auth.py | ntt-sic/python-neutronclient | a8c5c650ea7d51461c6311fb477b733b2f7c36a0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 NEC Corporation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import copy
import httplib2
import json
import uuid
import mox
import testtools
from neutronclient import client
from neutronclient.common import exceptions
from neutronclient.common import utils
USERNAME = 'testuser'
TENANT_NAME = 'testtenant'
TENANT_ID = 'testtenantid'
PASSWORD = 'password'
AUTH_URL = 'authurl'
ENDPOINT_URL = 'localurl'
ENDPOINT_OVERRIDE = 'otherurl'
TOKEN = 'tokentoken'
REGION = 'RegionTest'
KS_TOKEN_RESULT = {
'access': {
'token': {'id': TOKEN,
'expires': '2012-08-11T07:49:01Z',
'tenant': {'id': str(uuid.uuid1())}},
'user': {'id': str(uuid.uuid1())},
'serviceCatalog': [
{'endpoints_links': [],
'endpoints': [{'adminURL': ENDPOINT_URL,
'internalURL': ENDPOINT_URL,
'publicURL': ENDPOINT_URL,
'region': REGION}],
'type': 'network',
'name': 'Neutron Service'}
]
}
}
ENDPOINTS_RESULT = {
'endpoints': [{
'type': 'network',
'name': 'Neutron Service',
'region': REGION,
'adminURL': ENDPOINT_URL,
'internalURL': ENDPOINT_URL,
'publicURL': ENDPOINT_URL
}]
}
class CLITestAuthKeystone(testtools.TestCase):
# Auth Body expected when using tenant name
auth_type = 'tenantName'
def setUp(self):
"""Prepare the test environment."""
super(CLITestAuthKeystone, self).setUp()
self.mox = mox.Mox()
self.client = client.HTTPClient(username=USERNAME,
tenant_name=TENANT_NAME,
password=PASSWORD,
auth_url=AUTH_URL,
region_name=REGION)
self.addCleanup(self.mox.VerifyAll)
self.addCleanup(self.mox.UnsetStubs)
def test_get_token(self):
self.mox.StubOutWithMock(self.client, "request")
res200 = self.mox.CreateMock(httplib2.Response)
res200.status = 200
self.client.request(
AUTH_URL + '/tokens', 'POST',
body=mox.StrContains(self.auth_type), headers=mox.IsA(dict)
).AndReturn((res200, json.dumps(KS_TOKEN_RESULT)))
self.client.request(
mox.StrContains(ENDPOINT_URL + '/resource'), 'GET',
headers=mox.ContainsKeyValue('X-Auth-Token', TOKEN)
).AndReturn((res200, ''))
self.mox.ReplayAll()
self.client.do_request('/resource', 'GET')
self.assertEqual(self.client.endpoint_url, ENDPOINT_URL)
self.assertEqual(self.client.auth_token, TOKEN)
def test_refresh_token(self):
self.mox.StubOutWithMock(self.client, "request")
self.client.auth_token = TOKEN
self.client.endpoint_url = ENDPOINT_URL
res200 = self.mox.CreateMock(httplib2.Response)
res200.status = 200
res401 = self.mox.CreateMock(httplib2.Response)
res401.status = 401
# If a token is expired, neutron server retruns 401
self.client.request(
mox.StrContains(ENDPOINT_URL + '/resource'), 'GET',
headers=mox.ContainsKeyValue('X-Auth-Token', TOKEN)
).AndReturn((res401, ''))
self.client.request(
AUTH_URL + '/tokens', 'POST',
body=mox.IsA(str), headers=mox.IsA(dict)
).AndReturn((res200, json.dumps(KS_TOKEN_RESULT)))
self.client.request(
mox.StrContains(ENDPOINT_URL + '/resource'), 'GET',
headers=mox.ContainsKeyValue('X-Auth-Token', TOKEN)
).AndReturn((res200, ''))
self.mox.ReplayAll()
self.client.do_request('/resource', 'GET')
def test_get_endpoint_url(self):
self.mox.StubOutWithMock(self.client, "request")
self.client.auth_token = TOKEN
res200 = self.mox.CreateMock(httplib2.Response)
res200.status = 200
self.client.request(
mox.StrContains(AUTH_URL + '/tokens/%s/endpoints' % TOKEN), 'GET',
headers=mox.IsA(dict)
).AndReturn((res200, json.dumps(ENDPOINTS_RESULT)))
self.client.request(
mox.StrContains(ENDPOINT_URL + '/resource'), 'GET',
headers=mox.ContainsKeyValue('X-Auth-Token', TOKEN)
).AndReturn((res200, ''))
self.mox.ReplayAll()
self.client.do_request('/resource', 'GET')
def test_use_given_endpoint_url(self):
self.client = client.HTTPClient(
username=USERNAME, tenant_name=TENANT_NAME, password=PASSWORD,
auth_url=AUTH_URL, region_name=REGION,
endpoint_url=ENDPOINT_OVERRIDE)
self.assertEqual(self.client.endpoint_url, ENDPOINT_OVERRIDE)
self.mox.StubOutWithMock(self.client, "request")
self.client.auth_token = TOKEN
res200 = self.mox.CreateMock(httplib2.Response)
res200.status = 200
self.client.request(
mox.StrContains(ENDPOINT_OVERRIDE + '/resource'), 'GET',
headers=mox.ContainsKeyValue('X-Auth-Token', TOKEN)
).AndReturn((res200, ''))
self.mox.ReplayAll()
self.client.do_request('/resource', 'GET')
self.assertEqual(self.client.endpoint_url, ENDPOINT_OVERRIDE)
def test_get_endpoint_url_other(self):
self.client = client.HTTPClient(
username=USERNAME, tenant_name=TENANT_NAME, password=PASSWORD,
auth_url=AUTH_URL, region_name=REGION, endpoint_type='otherURL')
self.mox.StubOutWithMock(self.client, "request")
self.client.auth_token = TOKEN
res200 = self.mox.CreateMock(httplib2.Response)
res200.status = 200
self.client.request(
mox.StrContains(AUTH_URL + '/tokens/%s/endpoints' % TOKEN), 'GET',
headers=mox.IsA(dict)
).AndReturn((res200, json.dumps(ENDPOINTS_RESULT)))
self.mox.ReplayAll()
self.assertRaises(exceptions.EndpointTypeNotFound,
self.client.do_request,
'/resource',
'GET')
def test_get_endpoint_url_failed(self):
self.mox.StubOutWithMock(self.client, "request")
self.client.auth_token = TOKEN
res200 = self.mox.CreateMock(httplib2.Response)
res200.status = 200
res401 = self.mox.CreateMock(httplib2.Response)
res401.status = 401
self.client.request(
mox.StrContains(AUTH_URL + '/tokens/%s/endpoints' % TOKEN), 'GET',
headers=mox.IsA(dict)
).AndReturn((res401, ''))
self.client.request(
AUTH_URL + '/tokens', 'POST',
body=mox.IsA(str), headers=mox.IsA(dict)
).AndReturn((res200, json.dumps(KS_TOKEN_RESULT)))
self.client.request(
mox.StrContains(ENDPOINT_URL + '/resource'), 'GET',
headers=mox.ContainsKeyValue('X-Auth-Token', TOKEN)
).AndReturn((res200, ''))
self.mox.ReplayAll()
self.client.do_request('/resource', 'GET')
def test_url_for(self):
resources = copy.deepcopy(KS_TOKEN_RESULT)
endpoints = resources['access']['serviceCatalog'][0]['endpoints'][0]
endpoints['publicURL'] = 'public'
endpoints['internalURL'] = 'internal'
endpoints['adminURL'] = 'admin'
catalog = client.ServiceCatalog(resources)
# endpoint_type not specified
url = catalog.url_for(attr='region',
filter_value=REGION)
self.assertEqual('public', url)
# endpoint type specified (3 cases)
url = catalog.url_for(attr='region',
filter_value=REGION,
endpoint_type='adminURL')
self.assertEqual('admin', url)
url = catalog.url_for(attr='region',
filter_value=REGION,
endpoint_type='publicURL')
self.assertEqual('public', url)
url = catalog.url_for(attr='region',
filter_value=REGION,
endpoint_type='internalURL')
self.assertEqual('internal', url)
# endpoint_type requested does not exist.
self.assertRaises(exceptions.EndpointTypeNotFound,
catalog.url_for,
attr='region',
filter_value=REGION,
endpoint_type='privateURL')
# Test scenario with url_for when the service catalog only has publicURL.
def test_url_for_only_public_url(self):
resources = copy.deepcopy(KS_TOKEN_RESULT)
catalog = client.ServiceCatalog(resources)
# Remove endpoints from the catalog.
endpoints = resources['access']['serviceCatalog'][0]['endpoints'][0]
del endpoints['internalURL']
del endpoints['adminURL']
endpoints['publicURL'] = 'public'
# Use publicURL when specified explicitly.
url = catalog.url_for(attr='region',
filter_value=REGION,
endpoint_type='publicURL')
self.assertEqual('public', url)
# Use publicURL when specified explicitly.
url = catalog.url_for(attr='region',
filter_value=REGION)
self.assertEqual('public', url)
# Test scenario with url_for when the service catalog only has adminURL.
def test_url_for_only_admin_url(self):
resources = copy.deepcopy(KS_TOKEN_RESULT)
catalog = client.ServiceCatalog(resources)
endpoints = resources['access']['serviceCatalog'][0]['endpoints'][0]
del endpoints['internalURL']
del endpoints['publicURL']
endpoints['adminURL'] = 'admin'
# Use publicURL when specified explicitly.
url = catalog.url_for(attr='region',
filter_value=REGION,
endpoint_type='adminURL')
self.assertEqual('admin', url)
# But not when nothing is specified.
self.assertRaises(exceptions.EndpointTypeNotFound,
catalog.url_for,
attr='region',
filter_value=REGION)
def test_endpoint_type(self):
resources = copy.deepcopy(KS_TOKEN_RESULT)
endpoints = resources['access']['serviceCatalog'][0]['endpoints'][0]
endpoints['internalURL'] = 'internal'
endpoints['adminURL'] = 'admin'
endpoints['publicURL'] = 'public'
# Test default behavior is to choose public.
self.client = client.HTTPClient(
username=USERNAME, tenant_name=TENANT_NAME, password=PASSWORD,
auth_url=AUTH_URL, region_name=REGION)
self.client._extract_service_catalog(resources)
self.assertEqual(self.client.endpoint_url, 'public')
# Test admin url
self.client = client.HTTPClient(
username=USERNAME, tenant_name=TENANT_NAME, password=PASSWORD,
auth_url=AUTH_URL, region_name=REGION, endpoint_type='adminURL')
self.client._extract_service_catalog(resources)
self.assertEqual(self.client.endpoint_url, 'admin')
# Test public url
self.client = client.HTTPClient(
username=USERNAME, tenant_name=TENANT_NAME, password=PASSWORD,
auth_url=AUTH_URL, region_name=REGION, endpoint_type='publicURL')
self.client._extract_service_catalog(resources)
self.assertEqual(self.client.endpoint_url, 'public')
# Test internal url
self.client = client.HTTPClient(
username=USERNAME, tenant_name=TENANT_NAME, password=PASSWORD,
auth_url=AUTH_URL, region_name=REGION, endpoint_type='internalURL')
self.client._extract_service_catalog(resources)
self.assertEqual(self.client.endpoint_url, 'internal')
# Test url that isn't found in the service catalog
self.client = client.HTTPClient(
username=USERNAME, tenant_name=TENANT_NAME, password=PASSWORD,
auth_url=AUTH_URL, region_name=REGION, endpoint_type='privateURL')
self.assertRaises(exceptions.EndpointTypeNotFound,
self.client._extract_service_catalog,
resources)
def test_strip_credentials_from_log(self):
def verify_no_credentials(kwargs):
return ('REDACTED' in kwargs['body']) and (
self.client.password not in kwargs['body'])
def verify_credentials(body):
return 'REDACTED' not in body and self.client.password in body
self.mox.StubOutWithMock(self.client, "request")
self.mox.StubOutWithMock(utils, "http_log_req")
res200 = self.mox.CreateMock(httplib2.Response)
res200.status = 200
utils.http_log_req(mox.IgnoreArg(), mox.IgnoreArg(), mox.Func(
verify_no_credentials))
self.client.request(
mox.IsA(str), mox.IsA(str), body=mox.Func(verify_credentials),
headers=mox.IgnoreArg()
).AndReturn((res200, json.dumps(KS_TOKEN_RESULT)))
utils.http_log_req(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.client.request(
mox.IsA(str), mox.IsA(str), headers=mox.IsA(dict)
).AndReturn((res200, ''))
self.mox.ReplayAll()
self.client.do_request('/resource', 'GET')
class CLITestAuthKeystoneWithId(CLITestAuthKeystone):
# Auth Body expected when using tenant Id
auth_type = 'tenantId'
def setUp(self):
"""Prepare the test environment."""
super(CLITestAuthKeystoneWithId, self).setUp()
self.client = client.HTTPClient(username=USERNAME,
tenant_id=TENANT_ID,
password=PASSWORD,
auth_url=AUTH_URL,
region_name=REGION)
class CLITestAuthKeystoneWithIdandName(CLITestAuthKeystone):
# Auth Body expected when using tenant Id
auth_type = 'tenantId'
def setUp(self):
"""Prepare the test environment."""
super(CLITestAuthKeystoneWithIdandName, self).setUp()
self.client = client.HTTPClient(username=USERNAME,
tenant_id=TENANT_ID,
tenant_name=TENANT_NAME,
password=PASSWORD,
auth_url=AUTH_URL,
region_name=REGION)
| 37.52451 | 79 | 0.602482 | 13,374 | 0.873547 | 0 | 0 | 0 | 0 | 0 | 0 | 3,120 | 0.203788 |
9fa9e701108d419a34d4509a9212f620fff5c5c8 | 8,532 | py | Python | src/airobot/ee_tool/robotiq2f140_pybullet.py | weiqiao/airobot | 5f9c6f239a16f31f8923131e144b360f71a89a93 | [
"MIT"
] | null | null | null | src/airobot/ee_tool/robotiq2f140_pybullet.py | weiqiao/airobot | 5f9c6f239a16f31f8923131e144b360f71a89a93 | [
"MIT"
] | null | null | null | src/airobot/ee_tool/robotiq2f140_pybullet.py | weiqiao/airobot | 5f9c6f239a16f31f8923131e144b360f71a89a93 | [
"MIT"
] | null | null | null | import threading
import time
import airobot.utils.common as arutil
from airobot.ee_tool.ee import EndEffectorTool
from airobot.utils.arm_util import wait_to_reach_jnt_goal
class Robotiq2F140Pybullet(EndEffectorTool):
"""
Class for interfacing with a Robotiq 2F140 gripper when
it is attached to UR5e arm in pybullet.
Args:
cfgs (YACS CfgNode): configurations for the gripper.
pb_client (BulletClient): pybullet client.
Attributes:
cfgs (YACS CfgNode): configurations for the gripper.
gripper_close_angle (float): position value corresponding to the
fully closed position of the gripper.
gripper_open_angle (float): position value corresponding to the
fully open position of the gripper.
jnt_names (list): names of the gripper joints.
gripper_jnt_ids (list): pybullet joint ids of the gripper joints.
robot_id (int): robot id in Pybullet.
jnt_to_id (dict): mapping from the joint name to joint id.
"""
def __init__(self, cfgs, pb_client):
self._pb = pb_client
super(Robotiq2F140Pybullet, self).__init__(cfgs=cfgs)
self._gripper_mimic_coeff = [1, -1, 1, -1, -1, 1]
self.jnt_names = [
'finger_joint', 'left_inner_knuckle_joint',
'left_inner_finger_joint', 'right_outer_knuckle_joint',
'right_inner_knuckle_joint', 'right_inner_finger_joint',
]
self._max_torque = 5.0
self.gripper_close_angle = self.cfgs.EETOOL.CLOSE_ANGLE
self.gripper_open_angle = self.cfgs.EETOOL.OPEN_ANGLE
self._mthread_started = False
self.deactivate()
def feed_robot_info(self, robot_id, jnt_to_id):
"""
Setup the gripper, pass the robot info from the arm to the gripper.
Args:
robot_id (int): robot id in Pybullet.
jnt_to_id (dict): mapping from the joint name to joint id.
"""
self.robot_id = robot_id
self.jnt_to_id = jnt_to_id
self.gripper_jnt_ids = [
self.jnt_to_id[jnt] for jnt in self.jnt_names
]
self._pb.changeDynamics(self.robot_id,
self.jnt_to_id['left_inner_finger_pad_joint'],
lateralFriction=2.0,
spinningFriction=1.0,
rollingFriction=1.0)
self._pb.changeDynamics(self.robot_id,
self.jnt_to_id['right_inner_finger_pad_joint'],
lateralFriction=2.0,
spinningFriction=1.0,
rollingFriction=1.0)
# if the gripper has been activated once,
# the following code is used to prevent starting
# a new thread after the arm reset if a thread has been started
if not self._mthread_started:
self._mthread_started = True
# gripper thread
self._th_gripper = threading.Thread(target=self._th_mimic_gripper)
self._th_gripper.daemon = True
self._th_gripper.start()
else:
return
def open(self, wait=True):
"""
Open the gripper.
Returns:
bool: return if the action is sucessful or not.
"""
if not self._is_activated:
raise RuntimeError('Call activate function first!')
success = self.set_pos(self.gripper_open_angle,
wait=wait)
return success
def close(self, wait=True):
"""
Close the gripper.
Returns:
bool: return if the action is sucessful or not.
"""
if not self._is_activated:
raise RuntimeError('Call activate function first!')
success = self.set_pos(self.gripper_close_angle,
wait=wait)
return success
def set_pos(self, pos, wait=True):
"""
Set the gripper position.
Args:
pos (float): joint position.
wait (bool): wait until the joint position is set
to the target position.
Returns:
bool: A boolean variable representing if the action is
successful at the moment when the function exits.
"""
joint_name = self.jnt_names[0]
tgt_pos = arutil.clamp(pos,
self.gripper_open_angle,
self.gripper_close_angle)
jnt_id = self.jnt_to_id[joint_name]
self._pb.setJointMotorControl2(self.robot_id,
jnt_id,
self._pb.POSITION_CONTROL,
targetPosition=tgt_pos,
force=self._max_torque)
if not self._pb.in_realtime_mode():
self._set_rest_joints(tgt_pos)
success = False
if self._pb.in_realtime_mode() and wait:
success = wait_to_reach_jnt_goal(
tgt_pos,
get_func=self.get_pos,
joint_name=joint_name,
get_func_derv=self.get_vel,
timeout=self.cfgs.ARM.TIMEOUT_LIMIT,
max_error=self.cfgs.ARM.MAX_JOINT_ERROR
)
return success
def get_pos(self):
"""
Return the joint position(s) of the gripper.
Returns:
float: joint position.
"""
if not self._is_activated:
raise RuntimeError('Call activate function first!')
jnt_id = self.jnt_to_id[self.jnt_names[0]]
pos = self._pb.getJointState(self.robot_id, jnt_id)[0]
return pos
def get_vel(self):
"""
Return the joint velocity of the gripper.
Returns:
float: joint velocity.
"""
if not self._is_activated:
raise RuntimeError('Call activate function first!')
jnt_id = self.jnt_to_id[self.jnt_names[0]]
vel = self._pb.getJointState(self.robot_id, jnt_id)[1]
return vel
def disable_gripper_self_collision(self):
"""
Disable the gripper collision checking in Pybullet.
"""
if not self._is_activated:
raise RuntimeError('Call activate function first!')
for i in range(len(self.jnt_names)):
for j in range(i + 1, len(self.jnt_names)):
jnt_idx1 = self.jnt_to_id[self.jnt_names[i]]
jnt_idx2 = self.jnt_to_id[self.jnt_names[j]]
self._pb.setCollisionFilterPair(self.robot_id,
self.robot_id,
jnt_idx1,
jnt_idx2,
enableCollision=0)
def _mimic_gripper(self, joint_val):
"""
Given the value for the first joint,
mimic the joint values for the rest joints.
"""
jnt_vals = [joint_val]
for i in range(1, len(self.jnt_names)):
jnt_vals.append(joint_val * self._gripper_mimic_coeff[i])
return jnt_vals
def _th_mimic_gripper(self):
"""
Make all the other joints of the gripper
follow the motion of the first joint of the gripper.
"""
while True:
if self._is_activated and self._pb.in_realtime_mode():
self._set_rest_joints()
time.sleep(0.005)
def _set_rest_joints(self, gripper_pos=None):
max_torq = self._max_torque
max_torques = [max_torq] * (len(self.jnt_names) - 1)
if gripper_pos is None:
gripper_pos = self.get_pos()
gripper_poss = self._mimic_gripper(gripper_pos)[1:]
gripper_vels = [0.0] * len(max_torques)
self._pb.setJointMotorControlArray(self.robot_id,
self.gripper_jnt_ids[1:],
self._pb.POSITION_CONTROL,
targetPositions=gripper_poss,
targetVelocities=gripper_vels,
forces=max_torques)
def deactivate(self):
"""
Deactivate the gripper.
"""
self._is_activated = False
def activate(self):
"""
Activate the gripper.
"""
self._is_activated = True
| 36.306383 | 79 | 0.557782 | 8,356 | 0.979372 | 0 | 0 | 0 | 0 | 0 | 0 | 2,792 | 0.327239 |
9fabd95aa01b07fe66b5dd8128304fe6e2bfee3f | 4,907 | py | Python | schrodinger2D.py | frostburn/quantum-pde | 69603d11ad7aadfbd0b2ab57bfb65b8963582df3 | [
"MIT"
] | null | null | null | schrodinger2D.py | frostburn/quantum-pde | 69603d11ad7aadfbd0b2ab57bfb65b8963582df3 | [
"MIT"
] | null | null | null | schrodinger2D.py | frostburn/quantum-pde | 69603d11ad7aadfbd0b2ab57bfb65b8963582df3 | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import division
import argparse
import os
import sys
from matplotlib.animation import FuncAnimation
from pylab import *
from flow import schrodinger_flow_2D
from util import normalize_2D, advance_pde
from lattice import make_lattice_2D, make_border_wall_2D, make_periodic_2D, RESOLUTIONS
from episodes import EPISODES
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=u'Dump frames of 2D Schrödinger equation to a folder')
parser.add_argument('episode', help='Episode name. One of {}'.format(EPISODES.keys()))
parser.add_argument('--folder', help='Folder to dump raw frames into')
parser.add_argument('--resolution', help='Screen resolution. One of {}'.format(RESOLUTIONS.keys()), default='160p')
parser.add_argument('--animate', help='Animate instead of dumping frames', action='store_true')
parser.add_argument('--num_frames', help='Number of frames to dump', type=int, default=600)
args = parser.parse_args()
if args.folder and args.animate:
print('Animation is not supported while dumping to disk')
sys.exit()
if args.folder and not args.animate and not os.path.isdir(args.folder):
print("Target folder doesn't exist")
sys.exit()
episode = EPISODES[args.episode](args.resolution)
if isinstance(episode, tuple):
dx, screen, psi, potential, episode_length = episode
else:
locals().update(episode)
locals().setdefault("measurements", {})
locals().setdefault("dt", 0.004 * dx)
t = 0
dt_frame = episode_length / args.num_frames
ticks_per_frame = 1
while dt_frame > dt * ticks_per_frame:
ticks_per_frame += 1
dt = dt_frame / ticks_per_frame
start = datetime.datetime.now()
print("Rendering episode '{}'".format(args.episode))
print("Resolution = {}".format(args.resolution))
print("Episode length = {}".format(episode_length))
print("Delta t = {}".format(dt))
print("Ticks per frame = {}".format(ticks_per_frame))
print("Start time = {}".format(start))
def step():
global psi, t
for j in range(ticks_per_frame):
patch = advance_pde(t, psi, potential, dt, dx, schrodinger_flow_2D, dimensions=2)
psi[4:-4, 4:-4] = patch
t += dt
for measurement_t in list(measurements.keys()):
if t >= measurement_t:
measurement = measurements.pop(measurement_t)
mask = measurement["mask"]
if measurement["forced"]:
psi *= mask
else:
psi = normalize_2D(psi, dx)
prob = (abs(psi * mask)**2*dx*dx).sum()
if prob > rand():
psi *= mask
else:
psi *= 1-mask
psi = normalize_2D(psi, dx)
if args.animate:
fig, ax = subplots()
prob = abs(psi)**2
impsi = imshow(prob[screen], vmin=0, vmax=0.1*prob.max())
def init():
return impsi,
def update(frame):
global psi, t
if frame == 0:
print("t = {}, Energy = {}".format(t, (dx*dx*abs(schrodinger_flow_2D(0, psi, potential, dx))**2).sum()))
step()
prob = abs(psi)**2
impsi.set_data(prob[screen])
return impsi,
ani = FuncAnimation(fig, update, frames=range(100), init_func=init, blit=True, repeat=True, interval=1)
show()
else:
checkpoint_path = os.path.join(args.folder, 'checkpoint')
if not os.path.isdir(checkpoint_path):
os.mkdir(checkpoint_path)
raw_path = os.path.join(args.folder, 'raw')
if not os.path.isdir(raw_path):
os.mkdir(raw_path)
for frame in range(args.num_frames):
if frame % 100 == 0:
print("t = {}, Energy = {}".format(t, (dx*dx*abs(schrodinger_flow_2D(0, psi, potential, dx))**2).sum()))
if t > 0:
now = datetime.datetime.now()
duration = now - start
fraction_complete = t / episode_length
fraction_remaining = 1 - fraction_complete
remaining = datetime.timedelta(seconds=(duration.total_seconds() / fraction_complete * fraction_remaining))
eta = now + remaining
print("ETA = {}; {} left".format(eta, remaining))
print("")
if frame % 1000 == 0:
with open(os.path.join(checkpoint_path, "frame{:05}.dat".format(frame)), "wb") as f:
save(f, psi)
with open(os.path.join(raw_path, "frame{:05}.dat".format(frame)), "wb") as f:
save(f, psi[screen])
step()
frame += 1
| 37.746154 | 127 | 0.573263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 649 | 0.132233 |
9fac1f748968d9d8f776e72e18bfa9ae886fb300 | 3,170 | py | Python | tests/numpy_unit_testing/test_function_binary_operator_true_divide.py | jiajiaxu123/Orca | e86189e70c1d0387816bb98b8047a6232fbda9df | [
"Apache-2.0"
] | 20 | 2019-12-02T11:49:12.000Z | 2021-12-24T19:34:32.000Z | tests/numpy_unit_testing/test_function_binary_operator_true_divide.py | jiajiaxu123/Orca | e86189e70c1d0387816bb98b8047a6232fbda9df | [
"Apache-2.0"
] | null | null | null | tests/numpy_unit_testing/test_function_binary_operator_true_divide.py | jiajiaxu123/Orca | e86189e70c1d0387816bb98b8047a6232fbda9df | [
"Apache-2.0"
] | 5 | 2019-12-02T12:16:22.000Z | 2021-10-22T02:27:47.000Z | import unittest
from setup.settings import *
from numpy.testing import *
from pandas.util.testing import *
import numpy as np
import dolphindb_numpy as dnp
import pandas as pd
import orca
class FunctionTruedivideTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
def test_function_math_binary_true_divide_scalar(self):
self.assertEqual(dnp.true_divide(1.2 + 1j, 1.2 - 1j), np.true_divide(1.2 + 1j, 1.2 - 1j))
self.assertEqual(dnp.true_divide(0.5, 9), np.true_divide(0.5, 9))
self.assertEqual(dnp.true_divide(-1, 8.5), np.true_divide(-1, 8.5))
self.assertEqual(dnp.true_divide(1, 4), np.true_divide(1, 4))
self.assertEqual(dnp.true_divide(1, -5), np.true_divide(1, -5))
self.assertEqual(dnp.true_divide(0, 9), np.true_divide(0, 9))
self.assertEqual(dnp.isnan(dnp.true_divide(dnp.nan, -5)), True)
self.assertEqual(np.isnan(np.true_divide(dnp.nan, -5)), True)
def test_function_math_binary_true_divide_list(self):
lst1 = [1, 2, 3]
lst2 = [4, 6, 9]
assert_array_equal(dnp.true_divide(lst1, lst2), np.true_divide(lst1, lst2))
def test_function_math_binary_true_divide_array_with_scalar(self):
npa = np.array([1, 2, 3])
dnpa = dnp.array([1, 2, 3])
assert_array_equal(dnp.true_divide(dnpa, 1), np.true_divide(npa, 1))
assert_array_equal(dnp.true_divide(dnpa, dnp.nan), np.true_divide(npa, np.nan))
assert_array_equal(dnp.true_divide(1, dnpa), np.true_divide(1, npa))
def test_function_math_binary_true_divide_array_with_array(self):
npa1 = np.array([1, 2, 3])
npa2 = np.array([4, 6, 9])
dnpa1 = dnp.array([1, 2, 3])
dnpa2 = dnp.array([4, 6, 9])
assert_array_equal(dnp.true_divide(dnpa1, dnpa2), np.true_divide(npa1, npa2))
def test_function_math_binary_true_divide_array_with_array_param_out(self):
npa1 = np.array([1, 2, 3])
npa2 = np.array([4, 6, 9])
npa = np.zeros(shape=(1, 3))
dnpa1 = dnp.array([1, 2, 3])
dnpa2 = dnp.array([4, 6, 9])
dnpa = dnp.zeros(shape=(1, 3))
np.true_divide(npa1, npa2, out=npa)
dnp.true_divide(dnpa1, dnpa2, out=dnpa)
assert_array_equal(dnpa, npa)
def test_function_math_binary_true_divide_array_with_series(self):
npa = np.array([1, 2, 3])
dnpa = dnp.array([1, 2, 3])
ps = pd.Series([4, 6, 9])
os = orca.Series([4, 6, 9])
assert_series_equal(dnp.true_divide(dnpa, os).to_pandas(), np.true_divide(npa, ps))
assert_series_equal(dnp.true_divide(os, dnpa).to_pandas(), np.true_divide(ps, npa))
def test_function_math_binary_true_divide_array_with_dataframe(self):
npa = np.array([1, 2, 3])
dnpa = dnp.array([1, 2, 3])
pdf = pd.DataFrame({'A': [4, 6, 9]})
odf = orca.DataFrame({'A': [4, 6, 9]})
# TODO: orca true_divide bug
# assert_frame_equal(odf.true_divide(dnpa, axis=0).to_pandas(), pdf.true_divide(npa, axis=0))
if __name__ == '__main__':
unittest.main()
| 39.625 | 101 | 0.643218 | 2,930 | 0.92429 | 0 | 0 | 129 | 0.040694 | 0 | 0 | 183 | 0.057729 |
9fade505bc662862b1aaa9416970953c55df7076 | 3,217 | py | Python | apps/loader/utils.py | PremierLangage/premierlangage | 7134a2aadffee2bf264abee6c4b23ea33f1b390b | [
"CECILL-B"
] | 8 | 2019-01-30T13:51:59.000Z | 2022-01-08T03:26:53.000Z | apps/loader/utils.py | PremierLangage/premierlangage | 7134a2aadffee2bf264abee6c4b23ea33f1b390b | [
"CECILL-B"
] | 286 | 2019-01-18T21:35:51.000Z | 2022-03-24T18:53:59.000Z | apps/loader/utils.py | PremierLangage/premierlangage | 7134a2aadffee2bf264abee6c4b23ea33f1b390b | [
"CECILL-B"
] | 4 | 2019-02-11T13:38:30.000Z | 2021-03-02T20:59:00.000Z | import os
from django.conf import settings
def get_location(directory, path, current="", parser=None):
"""Returns a tuple (directory, path)
params:
- directory: [Directory] Directory containing the currently parsed file
- path: [str] Path to the file needed
- current: [str] Current position relative to directory
returns:
Return a tuple (directory_name, path)
raises:
- SyntaxError if a directory is given but the path after ':' isn't absolute or if '~\' is
used outside repository.
- FileNotFoundError is either the library or the file does not exists."""
if ':' in path: # Relative to a library
lib, path = path.split(':')
if lib.isdigit():
raise SyntaxError("Library's name cannot be an integer")
if not path.startswith('/'):
raise SyntaxError("Syntax Error (path after ':' must be absolute)")
path = path[1:]
absolute = os.path.join(settings.FILEBROWSER_ROOT, lib)
if not os.path.isdir(absolute):
raise FileNotFoundError("Library '%s' does not exists" % lib)
absolute = os.path.join(absolute, path)
if not os.path.isfile(absolute):
raise FileNotFoundError("File '%s' does not exists in library '%s'" % (path, lib))
return lib, os.path.normpath(path)
if path.startswith('/'):
path = path[1:]
absolute = os.path.join(directory.root, path)
if not os.path.isfile(absolute):
for lib in [i for i in os.listdir(settings.FILEBROWSER_ROOT) if
i != settings.HOME]: # pragma: no cover
absolute = os.path.join(settings.FILEBROWSER_ROOT, lib, path)
if os.path.isfile(absolute):
return lib, path
raise FileNotFoundError("File '%s' does not exist" % path)
return directory.name, os.path.normpath(path)
if path.startswith('~/'): # Relative to user's home
path = path[2:]
absolute = os.path.join(directory.root, path)
if not os.path.isfile(absolute):
raise FileNotFoundError("File '%s' does not exists" % path)
return directory.name, os.path.normpath(path)
# Relative to current file
absolute = os.path.join(directory.root, current, path)
if not os.path.isfile(absolute):
raise FileNotFoundError("File '%s' does not exists" % path)
return directory.name, os.path.normpath(os.path.join(current, path))
def extends_dict(target, source):
""" Will copy every key and value of source in target if key is not present in target """
for key, value in source.items():
if key not in target:
target[key] = value
elif type(target[key]) is dict:
extends_dict(target[key], value)
elif type(target[key]) is list:
target[key] += value
return target
def displayed_path(path):
path = path.replace(settings.FILEBROWSER_ROOT, '')
p = [i for i in path.split('/') if i]
if p[0].isdigit():
p[0] = 'home'
return os.path.join(*p)
| 37.406977 | 100 | 0.594032 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,047 | 0.325459 |
9fb1e69711f393a902cc733c79510a11dd6b8db4 | 7,309 | py | Python | backend/venv/Lib/site-packages/github/tools/template.py | analurandis/Tur | b4b5d1230d70659be0c3f477f0baea68fc46ba39 | [
"MIT"
] | null | null | null | backend/venv/Lib/site-packages/github/tools/template.py | analurandis/Tur | b4b5d1230d70659be0c3f477f0baea68fc46ba39 | [
"MIT"
] | null | null | null | backend/venv/Lib/site-packages/github/tools/template.py | analurandis/Tur | b4b5d1230d70659be0c3f477f0baea68fc46ba39 | [
"MIT"
] | null | null | null | """
:Description: PasteScript Template to generate a GitHub hosted python package.
Let you set the package name, a one line description, the Licence (support
GPL, LGPL, AGPL and BSD - GPLv3 by default) and the author name, email and
organisation variables::
paster create -t gh_package <project name>
.. note::
The default author name and email variables are the ones set with
git-config::
git config --global user.name "Damien Lebrun"
git config --global user.email dinoboff@hotmail.com
The result::
<project name>/
docs/
source/
_static
_templates/
conf.py
index.rst
src/
<package name>/
__init__.py
support-files/
.gitignore
bootstrap.py
LICENCE
MANIFEST.in
pavement.py
README.rst
setup.cfg
* <project name>/pavement.py is the paver configuration file. All the setuptools
tasks are available with paver. Paver make the creation of of new task easy.
See `paver documentation <http://www.blueskyonmars.com/projects/paver/>`_
for more details::
paver paverdocs
* <project name>/src contain your package.
* <project name>/docs/source/ will contains your documentation source. conf.py
is Sphinx' configuration file.
Check `Sphinx' documentation <http://sphinx.pocoo.org/>`_ for more details.
.. note::
The version number, the project name and author name(s) are set in
``pavement.py`` and shared with ``docs/source/conf.py``.
However licence and copyright information are hard coded into ``LICENCE``,
``pavement.py``, ``docs/source/conf`` and ``src/<package>/__init__.py``.
"""
from datetime import date
import os
from paste.script.templates import var
from paste.script.templates import Template
from git import Git
YEAR = date.today().year
LICENCE_HEADER = """%(description)s
Copyright (c) %(year)s, %(author)s
All rights reserved.
"""
GPL = """
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU%(gpl_type)s General Public License as published by
the Free Software Foundation, either version %(gpl_version)s of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU%(gpl_type)s General Public License for more details.
You should have received a copy of the GNU%(gpl_type)s General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
BSD = """
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the %(org)s nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
DEFAULT_NAME = Git(os.getcwd()).config(
'user.name', with_exceptions=False).strip()
DEFAULT_NAME = DEFAULT_NAME or os.getlogin()
DEFAULT_EMAIL = Git(os.getcwd()).config(
'user.email', with_exceptions=False).strip()
class GithubTemplate(Template):
"""Paver template for a GitHub hosted Python package."""
_template_dir = 'tmpl/gh'
summary = ("A basic layout for project hosted on GitHub "
"and managed with Paver")
use_cheetah = True
vars = [
var('package', 'The package contained',
default='example'),
var('description',
'One-line description of the package',
default='<On-line description>'),
var('licence',
'package licence - GPLv2/GPLv3/LGPLv2/LGPLv3/AGPLv3/BSD',
default='GPLv3'),
var('author', 'Author name', DEFAULT_NAME),
var('author_email', 'Author email', DEFAULT_EMAIL),
var('org', 'Organisation name - for licence.',
default='<Organisation>'),
]
def check_vars(self, vars, command):
"""
Reset the package variable in interactive so that project and
package names can be different (GitHub and Python
Have different restriction on names).
"""
if not command.options.no_interactive and \
not hasattr(command, '_deleted_once'):
del vars['package']
command._deleted_once = True
return Template.check_vars(self, vars, command)
def pre(self, command, output_dir, vars):
"""
Set extra template variables:
* "year", current year.
* "gitignore", set to ".gitignore".
* "licence_body", licence notice of the package.
* "gpl_type", for gpl licences
"""
vars['year'] = YEAR
vars['gitignore'] = '.gitignore'
licence = vars.get('licence')
vars['licence_body'] = ''
vars['gpl_type'] = ''
vars['gpl_version'] = ''
if licence:
if licence == 'BSD':
licence_tmpl = BSD
elif licence == 'LGPLv2':
vars['gpl_type'] = ' Lesser'
vars['gpl_version'] = '2'
vars['licence'] = 'LGPLv2'
licence_tmpl = GPL
elif licence == 'LGPLv3':
vars['gpl_type'] = ' Lesser'
vars['gpl_version'] = '3'
vars['licence'] = 'LGPLv3'
licence_tmpl = GPL
elif licence == 'AGPLv3':
vars['gpl_type'] = ' Affero'
vars['gpl_version'] = '3'
vars['licence'] = 'AGPLv3'
licence_tmpl = GPL
elif licence == 'GPLv2':
vars['gpl_type'] = ''
vars['gpl_version'] = '2'
vars['licence'] = 'GPLv2'
licence_tmpl = GPL
else:
vars['gpl_type'] = ''
vars['gpl_version'] = '3'
vars['licence'] = 'GPL'
licence_tmpl = GPL
vars['licence_body'] = (LICENCE_HEADER + licence_tmpl) % vars | 36.004926 | 82 | 0.63716 | 2,909 | 0.398002 | 0 | 0 | 0 | 0 | 0 | 0 | 5,238 | 0.716651 |
9fb248fe5801f1909b00e7b87eb821f0157d90d8 | 26,158 | py | Python | app.py | cwh32/DiffCapAnalyzer | 882f838c6e5451620cea386f060fd7a384d50e41 | [
"MIT"
] | null | null | null | app.py | cwh32/DiffCapAnalyzer | 882f838c6e5451620cea386f060fd7a384d50e41 | [
"MIT"
] | null | null | null | app.py | cwh32/DiffCapAnalyzer | 882f838c6e5451620cea386f060fd7a384d50e41 | [
"MIT"
] | null | null | null | import ast
import base64
import dash
import dash_core_components as dcc
from dash.dependencies import Input, Output, State
import dash_html_components as html
import dash_table as dt
import io
import json
from lmfit.model import load_modelresult
from lmfit.model import save_modelresult
import numpy as np
import os
import pandas as pd
import plotly
import urllib
import urllib.parse
from diffcapanalyzer.app_helper_functions import parse_contents
from diffcapanalyzer.app_helper_functions import pop_with_db
from diffcapanalyzer.chachifuncs import col_variables
from diffcapanalyzer.databasewrappers import get_db_filenames, get_filename_pref
from diffcapanalyzer.databasefuncs import get_file_from_database
from diffcapanalyzer.descriptors import generate_model
from diffcapanalyzer.descriptors import get_model_dfs
database = "data/databases/dQdV.db"
init_db = "data/databases/init_database.db"
assert os.path.exists(init_db)
app = dash.Dash(__name__,
external_stylesheets=["https://codepen.io/chriddyp/pen/bWLwgP.css",
{'href': "https://codepen.io/chriddyp/pen/bWLwgP.css",
'rel': 'stylesheet'}])
##########################################
# App Layout
##########################################
Introduction = dcc.Markdown('''
# dQ/dV
## Interface for visualizing battery cycle data
#'''),
app.layout = html.Div([
html.Div([
html.H1('Differential Capacity Analyzer')
]),
html.Div([
html.Div([html.H5('Choose existing data: '),
html.Div([html.Div([html.H6('Here are the files currently available in the database: ')],
style={'width': '90%', 'textAlign': 'left', 'margin-left': '50px'}),
html.Div([dcc.Dropdown(id='available-data',
options=[{'label': 'options',
'value': 'options'}])],
style={'width': '80%',
'vertical-align': 'center',
'margin-left': '50px'}),
html.Div(id='gen-desc-confirmation')])], style={'width': '43%', 'display': 'inline-block', 'vertical-align': 'top'}),
html.Div([html.H5('Or load in your own data: '),
html.Div([html.Div([html.H6('1. Input your datatype: ')], style={'width': '40%', 'textAlign': 'left', 'display': 'inline-block', 'margin-left': '50px'}),
html.Div([dcc.Dropdown(id='input-datatype',
options=[{'label': 'Arbin', 'value': 'ARBIN'}, {
'label': 'MACCOR', 'value': 'MACCOR'}],
placeholder='datatype')],
style={'width': '40%', 'vertical-align': 'top', 'display': 'inline-block',
'margin-right': '10px', 'margin-left': '10px'})],
),
html.Div([html.Div([html.H6('2. Upload your data: ')], style={'width': '40%', 'textAlign': 'left', 'display': 'inline-block', 'margin-left': '50px'}),
html.Div([dcc.Upload(
id='upload-data',
children=html.Div([
'Drag and Drop or ',
html.A('Select Files')]),
style={
'width': '98%',
'height': '60px',
'lineHeight': '60px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center',
# 'margin': '10px'
},
multiple=False)], style={'width': '40.2%', 'display': 'inline-block', 'margin-right': '10px', 'margin-left': '10px'}),
html.Div([dcc.Loading(id="loading-1",
children=[
html.Div(id='output-data-upload')],
type="default")],
style={'margin-left': '50px',
'font-size': '20px'}),
html.Div(['Note: data will be saved in database with the original filename.'],
style={'margin-left': '50px', 'font-style': 'italic'}),
html.Div(['Once data is uploaded, select the new data from the dropdown menu to the left (refresh the page if necessary).'],
style={'margin-left': '50px', 'font-style': 'italic'})],
)], style={'width': '55%', 'display': 'inline-block'}),
]),
html.Div([
html.Div([html.H6('Cycle Number')], style={'textAlign': 'left'}),
dcc.Loading(id="loading-2", children=[dcc.Slider(
id='cycle--slider',
min=0,
max=15,
value=15,
step=1,
included=True,
marks={str(each): str(each) for each in range(15)},
)]),
html.Div([html.Br()], style={
'width': '89%', 'display': 'inline-block'}),
html.Br(),
html.Div([dcc.RadioItems(id='show-model-fig1', options=[{'label': 'Show Model', 'value': 'showmodel'},
{'label': 'Hide Model',
'value': 'hidemodel'},
], labelStyle={'display': 'inline-block'}, value='showmodel')], style={'width': '10%', 'textAlign': 'left', 'display': 'inline-block'}),
],
style={
'width': '98%',
'margin': '10px',
}
),
html.Div([
dcc.Graph(id='charge-graph'),
],
style={
'columnCount': 1,
'width': '98%',
'height': '80%',
}
),
html.Div([
html.Div([
html.H4(['Explore Descriptors']),
html.Div([
html.Div(
['Specify charge/discharge, locations/areas/heights, and peak number(s).'],
style={
'width': '75%',
'font-style': 'italic'}),
dcc.RadioItems(id='cd-to-plot', options=[
{'label': '(+) dQ/dV', 'value': 'c-'},
{'label': '(-) dQ/dV', 'value': 'd-'}
],
value='c-', labelStyle={'display': 'inline-block'}),
dcc.RadioItems(id='desc-to-plot', options=[
{'label': 'Peak Locations', 'value': 'sortedloc-'},
{'label': 'Peak Areas', 'value': 'sortedarea-'},
{'label': 'Peak Height', 'value': 'sortedactheight-'},
],
value='sortedloc-', labelStyle={'display': 'inline-block'}),
html.Div([dcc.Checklist(id='desc-peaknum-to-plot',
options=[
{'label': 'Peak 1', 'value': '1'},
{'label': 'Peak 2', 'value': '2'},
{'label': 'Peak 3', 'value': '3'},
{'label': 'Peak 4', 'value': '4'},
{'label': 'Peak 5', 'value': '5'},
{'label': 'Peak 6', 'value': '6'},
{'label': 'Peak 7', 'value': '7'},
{'label': 'Peak 8', 'value': '8'},
{'label': 'Peak 9', 'value': '9'},
{'label': 'Peak 10', 'value': '10'},
],
value=['1'], labelStyle={'display': 'inline-block'})], style={'width': '80%'}),
]),
html.Br(),
html.Br(),
], style={'display': 'inline-block', 'width': '49%', 'margin': '10px'}),
html.Div([
html.H4(['Update Model']),
html.Div(
['New Peak Detection Threshold (default is 0.7, must be between 0 and 1): ']),
html.Div([dcc.Input(id='new-peak-threshold',
placeholder='threshold for peak detection')]),
html.Div(['After updating the threshold, you can update the preview of the model' +
' and then update the database once the model appears to be optimal.'], style={'font-style': 'italic'}),
html.Div(id='update-model-ans'),
html.Button('Update Preview of Model', id='update-model-button'),
html.Button(
'Update Model in Database',
id='update-model-indb-button'),
html.Div([dcc.Checklist(id='show-gauss',
options=[
{'label': 'Show Guassian Baseline',
'value': 'show'},
],
value=['show'])]),
], style={'display': 'inline-block', 'width': '49%'}),
html.Div([
dcc.Graph(id='model-graph'),
], style={
'columnCount': 1,
'width': '98%',
'height': '80%',
}
),
]),
html.Div([
html.H4('Download Files'),
html.A(
'Download Peak Descriptors CSV',
download="descriptors.csv",
id='my-link-1'),
html.Br(),
html.A(
'Download Cleaned Cycles CSV',
download="cleaned_cycles.csv",
id='my-link-2'),
html.Br(),
html.A(
'Download Model Points CSV',
download="model_points.csv",
id='my-link-3'),
],
style={
'width': '98%',
'margin': '10px'
},
),
html.Div(id='hidden-div', style={'display': 'none'})
])
##########################################
# Interactive Parts
##########################################
@app.callback(Output('output-data-upload', 'children'),
[Input('upload-data', 'contents'),
Input('upload-data', 'filename'),
Input('input-datatype', 'value')])
def update_output(contents, filename, value):
# value here is the datatype, then voltagerange1, then voltagerange2
try:
# Windowlength and polyorder are parameters for the savitsky golay
# filter, could be inputs eventually
windowlength = 9
polyorder = 3
if contents is None:
return html.Div(
['No file has been uploaded, or the file uploaded was empty.'])
else:
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
return html.Div([parse_contents(decoded,
filename,
value,
database,
windowlength,
polyorder)])
except Exception as e:
return html.Div(['There was a problem uploading that file: ' + str(e)])
# return children
@app.callback(Output('available-data', 'options'),
[Input('output-data-upload', 'children')])
def update_dropdown(children):
options = [{'label': i, 'value': i} for i in get_db_filenames(database)]
return options
@app.callback(Output('update-model-ans', 'children'),
[Input('available-data', 'value'),
Input('update-model-indb-button', 'n_clicks'),
Input('new-peak-threshold', 'value')])
def update_model_indb(filename, n_clicks, new_peak_thresh):
if n_clicks is not None:
int_list_c = []
int_list_d = []
cleanset_name = filename.split('.')[0] + 'CleanSet'
df_clean = get_file_from_database(cleanset_name, database)
feedback_str = generate_model(
df_clean, filename, new_peak_thresh, database)
feedback = html.Div([feedback_str])
else:
feedback = html.Div(['Model has not been updated yet.'])
return feedback
@app.callback( # update slider
Output('cycle--slider', 'max'),
[Input('available-data', 'value')])
def update_slider_max(filename):
if filename is None:
filename = 'ExampleData'
database_sel = init_db
else:
filename = filename
database_sel = database
data, raw_data = pop_with_db(filename, database_sel)
datatype = data['datatype'].iloc[0]
(cycle_ind_col, data_point_col, volt_col, curr_col, dis_cap_col,
char_cap_col, charge_or_discharge) = col_variables(datatype)
return data['Cycle_Index'].max()
@app.callback(
Output('cycle--slider', 'marks'),
[Input('available-data', 'value')])
def update_slider_marks(filename):
if filename is None:
filename = 'ExampleData'
database_sel = init_db
else:
filename = filename
database_sel = database
data, raw_data = pop_with_db(filename, database_sel)
return {str(each): str(each) for each in data['Cycle_Index'].unique()}
@app.callback(
Output('cycle--slider', 'value'),
[Input('available-data', 'value')])
def update_slider_value(filename):
if filename is None:
filename = 'ExampleData'
database_sel = init_db
else:
filename = filename
database_sel = database
data, raw_data = pop_with_db(filename, database_sel)
datatype = data['datatype'].iloc[0]
(cycle_ind_col, data_point_col, volt_col, curr_col, dis_cap_col,
char_cap_col, charge_or_discharge) = col_variables(datatype)
return data['Cycle_Index'].min()
@app.callback(
Output('charge-graph', 'figure'),
[Input('cycle--slider', 'value'),
Input('available-data', 'value'),
Input('show-model-fig1', 'value')]
)
def update_figure1(selected_step, filename, showmodel):
fig = plotly.subplots.make_subplots(
rows=1, cols=2,
subplot_titles=('Raw Cycle', 'Smoothed Cycle'),
shared_xaxes=True)
marker = {'color': ['#0074D9']}
if filename is None or filename == 'options':
filename = 'ExampleData'
database_sel = init_db
else:
database_sel = database
data, raw_data = pop_with_db(filename, database_sel)
datatype = data['datatype'].iloc[0]
(cycle_ind_col, data_point_col, volt_col, curr_col,
dis_cap_col, char_cap_col, charge_or_discharge) = col_variables(datatype)
modset_name = filename.split('.')[0] + '-ModPoints'
df_model = get_file_from_database(modset_name, database_sel)
if df_model is not None:
filt_mod = df_model[df_model[cycle_ind_col] == selected_step]
if data is not None:
filtered_data = data[data[cycle_ind_col] == selected_step]
if raw_data is not None:
raw_filtered_data = raw_data[raw_data[cycle_ind_col] == selected_step]
for i in filtered_data[cycle_ind_col].unique():
if data is not None:
dff = filtered_data[filtered_data[cycle_ind_col] == i]
if raw_data is not None:
dff_raw = raw_filtered_data[raw_filtered_data[cycle_ind_col] == i]
if df_model is not None:
dff_mod = filt_mod[filt_mod[cycle_ind_col] == i]
if data is not None:
fig.append_trace({
'x': dff[volt_col],
'y': dff['Smoothed_dQ/dV'],
'type': 'scatter',
'marker': marker,
'name': 'Smoothed Data'
}, 1, 2)
if raw_data is not None:
fig.append_trace({
'x': dff_raw[volt_col],
'y': dff_raw['dQ/dV'],
'type': 'scatter',
'marker': marker,
'name': 'Raw Data'
}, 1, 1)
if df_model is not None and showmodel == 'showmodel':
fig.append_trace({
'x': dff_mod[volt_col],
'y': dff_mod['Model'],
'type': 'scatter',
'name': 'Model'
}, 1, 2)
fig['layout']['showlegend'] = False
fig['layout']['xaxis1'].update(title='Voltage (V)')
fig['layout']['xaxis2'].update(title='Voltage (V)')
fig['layout']['yaxis1'].update(title='dQ/dV')
fig['layout']['yaxis2'].update(title='dQ/dV')
fig['layout']['height'] = 600
fig['layout']['margin'] = {
'l': 40,
'r': 10,
't': 60,
'b': 200
}
return fig
@app.callback(
Output('model-graph', 'figure'),
[
Input('available-data', 'value'),
Input('new-peak-threshold', 'value'),
Input('update-model-button', 'n_clicks'),
Input('show-gauss', 'value'),
Input('desc-to-plot', 'value'),
Input('cd-to-plot', 'value'),
Input('desc-peaknum-to-plot', 'value')]
)
def update_figure2(
filename,
peak_thresh,
n_clicks,
show_gauss,
desc_to_plot,
cd_to_plot,
peaknum_to_plot):
""" This is a function to evaluate the model on a sample plot before updating the database"""
if filename is None:
filename = 'ExampleData'
database_sel = init_db
else:
database_sel = database
data, raw_data = pop_with_db(filename, database_sel)
datatype = data['datatype'].iloc[0]
(cycle_ind_col, data_point_col, volt_col, curr_col, dis_cap_col,
char_cap_col, charge_or_discharge) = col_variables(datatype)
selected_step = round(data[cycle_ind_col].max() / 2) + 1
# select a cycle in the middle of the set
dff_data = data[data[cycle_ind_col] == selected_step]
if len(data[cycle_ind_col].unique()) > 1:
lenmax = max([len(data[data[cycle_ind_col] == cyc])
for cyc in data[cycle_ind_col].unique() if cyc != 1])
else:
lenmax = len(data)
dff_raw = raw_data[raw_data[cycle_ind_col] == selected_step]
peak_vals_df = get_file_from_database(
filename.split('.')[0] + '-descriptors', database_sel)
fig = plotly.subplots.make_subplots(
rows=1, cols=2, subplot_titles=(
'Descriptors', 'Example Data for Model Tuning (Cycle ' + str(
int(selected_step)) + ')'), shared_xaxes=True)
marker = {'color': ['#0074D9']}
if peak_vals_df is not None:
if n_clicks is not None:
# if the user has hit the update-model-button - remake model
new_df_mody, model_c_vals, model_d_vals, peak_heights_c, peak_heights_d = get_model_dfs(
dff_data, datatype, selected_step, lenmax, peak_thresh)
dff_mod = new_df_mody
c_sigma = model_c_vals['base_sigma']
c_center = model_c_vals['base_center']
c_amplitude = model_c_vals['base_amplitude']
c_fwhm = model_c_vals['base_fwhm']
c_height = model_c_vals['base_height']
d_sigma = model_d_vals['base_sigma']
d_center = model_d_vals['base_center']
d_amplitude = model_d_vals['base_amplitude']
d_fwhm = model_d_vals['base_fwhm']
d_height = model_d_vals['base_height']
else:
# if user hasn't pushed the button, populate with original model
# from database
modset_name = filename.split('.')[0] + '-ModPoints'
df_model = get_file_from_database(modset_name, database_sel)
dff_mod = df_model[df_model[cycle_ind_col] == selected_step]
filtpeakvals = peak_vals_df[peak_vals_df['c_cycle_number']
== selected_step]
filtpeakvals = filtpeakvals.reset_index(drop=True)
# grab values for the underlying gaussian in the charge:
try:
c_sigma = filtpeakvals['c_gauss_sigma'].iloc[0]
c_center = filtpeakvals['c_gauss_center'].iloc[0]
c_amplitude = filtpeakvals['c_gauss_amplitude'].iloc[0]
c_fwhm = filtpeakvals['c_gauss_fwhm'].iloc[0]
c_height = filtpeakvals['c_gauss_height'].iloc[0]
except BaseException:
# there may not be a model
pass
# grab values for the underlying discharge gaussian:
try:
d_sigma = filtpeakvals['d_gauss_sigma'].iloc[0]
d_center = filtpeakvals['d_gauss_center'].iloc[0]
d_amplitude = filtpeakvals['d_gauss_amplitude'].iloc[0]
d_fwhm = filtpeakvals['d_gauss_fwhm'].iloc[0]
d_height = filtpeakvals['d_gauss_height'].iloc[0]
except BaseException:
pass
fig.append_trace({
'x': dff_data[volt_col],
'y': dff_data['Smoothed_dQ/dV'],
'type': 'scatter',
'marker': marker,
'name': 'Smoothed Data'
}, 1, 2)
if len(peaknum_to_plot) > 0:
for value in peaknum_to_plot:
try:
fig.append_trace({
'x': peak_vals_df['c_cycle_number'],
'y': peak_vals_df[str(''.join(desc_to_plot)) + str(''.join(cd_to_plot)) + value],
'type': 'scatter',
'marker': marker,
'name': value
}, 1, 1)
except KeyError as e:
None
fig.append_trace({
'x': dff_mod[volt_col],
'y': dff_mod['Model'],
'type': 'scatter',
'name': 'Model of One Cycle'
}, 1, 2)
# add if checkbox is selected to show polynomial baseline
if 'show' in show_gauss:
try:
fig.append_trace({
'x': dff_mod[volt_col],
'y': ((c_amplitude / (c_sigma * ((2 * 3.14159)**0.5))) * np.exp((-(dff_mod[volt_col] - c_center)**2) / (2 * c_sigma**2))),
'type': 'scatter',
'name': 'Charge Gaussian Baseline' # plot the poly
}, 1, 2)
except BaseException:
pass
# add the plot of the discharge guassian:
try:
fig.append_trace({
'x': dff_mod[volt_col],
'y': -((d_amplitude / (d_sigma * ((2 * 3.14159)**0.5))) * np.exp((-(dff_mod[volt_col] - d_center)**2) / (2 * d_sigma**2))),
'type': 'scatter',
'name': 'Discharge Gaussian Baseline' # plot the poly
}, 1, 2)
except BaseException:
pass
fig['layout']['showlegend'] = True
fig['layout']['xaxis1'].update(title='Cycle Number')
fig['layout']['xaxis2'].update(title='Voltage (V)')
fig['layout']['yaxis1'].update(title='Descriptor Value')
fig['layout']['yaxis2'].update(
title='dQ/dV',
range=[
dff_data['Smoothed_dQ/dV'].min(),
dff_data['Smoothed_dQ/dV'].max()])
fig['layout']['height'] = 600
fig['layout']['margin'] = {
'l': 40,
'r': 10,
't': 60,
'b': 200
}
return fig
@app.callback(Output('my-link-1', 'href'),
[Input('available-data', 'value')])
def update_link_1(value):
if value is not None:
peak_vals_df = get_file_from_database(
value.split('.')[0] + '-descriptors', database)
if peak_vals_df is not None:
csv_string = peak_vals_df.to_csv(index=False, encoding='utf-8')
else:
# return an empty dataframe
csv_string = pd.DataFrame().to_csv(index=False, encoding='utf-8')
csv_string = "data:text/csv;charset=utf-8," + \
urllib.parse.quote(csv_string)
return csv_string
@app.callback(Output('my-link-2', 'href'),
[Input('available-data', 'value')])
def update_link_2(value):
if value is not None:
clean_set_df = get_file_from_database(
value.split('.')[0] + 'CleanSet', database)
if clean_set_df is not None:
csv_string = clean_set_df.to_csv(index=False, encoding='utf-8')
else:
# return an empty dataframe
csv_string = pd.DataFrame().to_csv(index=False, encoding='utf-8')
csv_string = "data:text/csv;charset=utf-8," + \
urllib.parse.quote(csv_string)
return csv_string
@app.callback(Output('my-link-3', 'href'),
[Input('available-data', 'value')])
def update_link_3(value):
if value is not None:
mod_points_df = get_file_from_database(
value.split('.')[0] + '-ModPoints', database)
if mod_points_df is not None:
csv_string = mod_points_df.to_csv(index=False, encoding='utf-8')
else:
# return an empty dataframe
csv_string = pd.DataFrame().to_csv(index=False, encoding='utf-8')
csv_string = "data:text/csv;charset=utf-8," + \
urllib.parse.quote(csv_string)
return csv_string
##########################################
# Customize CSS
##########################################
# app.css.append_css({"external_url": "https://codepen.io/chriddyp/pen/bWLwgP.css"})
# app.scripts.config.serve_locally = False
if __name__ == '__main__':
app.run_server(debug=True)
| 40.935837 | 200 | 0.502523 | 0 | 0 | 0 | 0 | 15,050 | 0.57535 | 0 | 0 | 7,377 | 0.282017 |
9fb43f7e2af262b4eec008e1b1fa5323d82b1b1a | 252 | py | Python | Python_Advanced_Softuni/Tuples_And_Sets_Excercise/venv/sets_of_elements.py | borisboychev/SoftUni | 22062312f08e29a1d85377a6d41ef74966d37e99 | [
"MIT"
] | 1 | 2020-12-14T23:25:19.000Z | 2020-12-14T23:25:19.000Z | Python_Advanced_Softuni/Tuples_And_Sets_Excercise/venv/sets_of_elements.py | borisboychev/SoftUni | 22062312f08e29a1d85377a6d41ef74966d37e99 | [
"MIT"
] | null | null | null | Python_Advanced_Softuni/Tuples_And_Sets_Excercise/venv/sets_of_elements.py | borisboychev/SoftUni | 22062312f08e29a1d85377a6d41ef74966d37e99 | [
"MIT"
] | null | null | null | (n,m) = [int(x) for x in input().split()]
loop_range = n + m
set_m = set()
set_n = set()
for _ in range(n):
set_n.add(int(input()))
for _ in range(m):
set_m.add(int(input()))
uniques = set_n.intersection(set_m)
[print(x) for x in (uniques)]
| 18 | 41 | 0.611111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9fb58ec6926f4ee6f24f0c39bf1b0ebd934bf3b3 | 1,284 | py | Python | netconf-cisco.py | Raul-Flores/Network-programmability-examples | e540b050b89da167b84f415565b75313605e01b2 | [
"Apache-2.0"
] | 2 | 2020-01-09T18:32:37.000Z | 2020-01-09T18:32:42.000Z | netconf-cisco.py | Raul-Flores/Network-programmability-examples | e540b050b89da167b84f415565b75313605e01b2 | [
"Apache-2.0"
] | null | null | null | netconf-cisco.py | Raul-Flores/Network-programmability-examples | e540b050b89da167b84f415565b75313605e01b2 | [
"Apache-2.0"
] | null | null | null | from ncclient import manager
from xml.dom import minidom
import xmltodict
huaweiautomation = {'address':'ios-xe-mgmt-latest.cisco.com',
'netconf_port': 10000, 'username': 'developer', 'password': 'C1sco12345'}
huawei_manager = manager.connect(host = huaweiautomation["address"], port = huaweiautomation["netconf_port"], username = huaweiautomation["username"],
password = huaweiautomation["password"], device_params = {'name': 'iosxe'}, hostkey_verify = False)
filter_Interfaces= """
<filter>
<interfaces xmlns="urn:ietf:params:xml:ns:yang:ietf-interfaces">
<interface>
</interface>
</interfaces>
</filter>
"""
#Para cualquier interfaz
huawei_get_interfaces = huawei_manager.get_config('running', filter_Interfaces).xml
xml_pretty = minidom.parseString(huawei_get_interfaces)
print ("Interfaces en XML format")
print ("#"*100)
print (xml_pretty.toprettyxml(indent=" "))
xml_to_dict_general = xmltodict.parse(huawei_get_interfaces)
print ("#"*100)
print ("Extraer todas las interfaces ")
for x in xml_to_dict_general['rpc-reply']['data']['interfaces']['interface']:
print (x['name'])
print ("#"*100)
#print ("Estatus....")
#print ("")
#huawei_manager.connected
#Verificar capabilitys
#for capability in huawei_manager.server_capabilities:
# print (capability) | 29.860465 | 150 | 0.746106 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 598 | 0.465732 |
9fb70e248cc42f7e6de7af2a2f074492332a1224 | 10,337 | py | Python | pizza.py | purplefrizzel/PizzaTill | 2beedcb7ed7e72af813a01b1d5396c149acefce3 | [
"MIT"
] | null | null | null | pizza.py | purplefrizzel/PizzaTill | 2beedcb7ed7e72af813a01b1d5396c149acefce3 | [
"MIT"
] | null | null | null | pizza.py | purplefrizzel/PizzaTill | 2beedcb7ed7e72af813a01b1d5396c149acefce3 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# Pizza Till
# Lewis Shaw
import os
import sys
import time
import re
isProgramRuning = True
welcomeMessageDisplay = False
lastShownMenu = 0
order = { "pizzas": [] }
customer = { "customerName": None, "customerPhoneNumber": None, "customerAddress": { "postcode": None, "houseNumber": None } }
class TooManyPizzasError(BaseException):
pass
class OrderIsNotValidError(BaseException):
pass
def is_order_valid() -> bool:
pizzas = order["pizzas"]
if len(pizzas) > 0 and customer["customerName"] != None:
return True
return False
def cancel_order():
global order
global customer
order = { "pizzas": [] }
customer = { "customerName": None, "customerPhoneNumber": None, "customerAddress": { "postcode": None, "houseNumber": None } }
def print_title(title: str):
new_line()
print(title)
print("." * 32)
def new_line():
print()
def enter_customer_name():
try:
customerName = str(input("Enter customer's first name: "))
customerName = customerName.strip(" ")
if customerName.isalpha() == False:
raise ValueError
if len(customerName) == 0:
raise ValueError
customer["customerName"] = customerName
except ValueError:
handle_error("Please enter an correct value.")
enter_customer_name()
def enter_customer_phone_number():
try:
customerPhoneNumber = str(input("Enter customer's phone number: "))
customerPhoneNumber = customerPhoneNumber.strip(" ")
if len(customerPhoneNumber) != 11:
raise ValueError
customer["customerPhoneNumber"] = customerPhoneNumber
except ValueError:
handle_error("Please enter an correct UK phone number.")
enter_customer_phone_number()
def enter_customer_address():
try:
customerAddress = str(input("Enter customer's house number and postcode (seperate by a comma): "))
customerAddressDetails = customerAddress.split(",")
houseNumber = str(customerAddress[0]).strip(" ")
postcode = str(customerAddressDetails[1]).strip(" ")
if houseNumber.isnumeric() == False:
raise ValueError
if re.search("/^[a-z]{1,2}\d[a-z\d]?\s*\d[a-z]{2}$/i", postcode) == False:
raise ValueError
customer["customerAddress"] = { "houseNumber": houseNumber, "postcode": postcode }
except ValueError:
handle_error("Please enter an correct house number and UK postcode.")
enter_customer_address()
except IndexError:
handle_error("Please seperate the address details by comma like this 32,PC11 4RT.")
enter_customer_address()
def customer_details():
new_line()
print("Customer Name:", customer["customerName"])
print("Customer Phone Number:", customer["customerPhoneNumber"])
print("Customer Address:", customer["customerAddress"])
def customer_details_menu():
global customer
print_title("Enter Customer Details")
new_line()
enter_customer_name()
enter_customer_phone_number()
enter_customer_address()
customer_details()
new_line()
answer = str(input("Is this data correct? "))
answer = answer.lower()
if answer == "yes":
clear_screen()
showMenus(1)
else:
clear_screen()
showMenus(2)
def complete_order_menu():
print_title("Complete Order")
new_line()
new_line()
smallPizza = 0
mediumPizza = 0
largePizza = 0
extraToppingsCharge = 0
for pizza in order["pizzas"]:
if pizza["size"] == "small":
smallPizza += 1
elif pizza["size"] == "medium":
mediumPizza += 1
elif pizza["size"] == "large":
largePizza += 1
if pizza["addedTopping"] == 1:
extraToppingsCharge += 0.75
elif pizza["addedTopping"] == 2:
extraToppingsCharge += 1.35
elif pizza["addedTopping"] == 3:
extraToppingsCharge += 2.00
else:
extraToppingsCharge += 2.50
smallPizzaCost = round(smallPizza * 3.25, 2)
mediumPizzaCost = round(mediumPizza * 5.50, 2)
largePizzaCost = round(largePizza * 7.15, 2)
extraToppingsCharge = round(extraToppingsCharge, 2)
subtotal = round(smallPizzaCost + mediumPizzaCost + largePizzaCost + extraToppingsCharge, 2)
print("_" * 38)
print("{:>15} {:>10} {:>10}".format("Qty", "Pizza", "Amount"))
if smallPizza != 0:
print("{:>15} {:>10} £{:>10}".format(smallPizza, "Small Pizza", smallPizzaCost))
if mediumPizza != 0:
print("{:>15} {:>10} £{:>10}".format(mediumPizza, "Medium Pizza", mediumPizzaCost))
if largePizza != 0:
print("{:>15} {:>10} £{:>10}".format(largePizza, "Large Pizza", largePizzaCost))
print("_" * 38)
if extraToppingsCharge != 0:
print("{:>15} £{:>20}".format("Extra Toppings Charge", extraToppingsCharge))
if subtotal >= round(20.0, 2):
#subtotal = subtotal / 0.1
print("{:>15} £{:>20}".format("Discount", "10%"))
print("{:>15} £{:>20}".format("Delivery Charge", "2.50"))
print("{:>15} £{:>20}".format("Subtotal", round(subtotal, 2)))
print("_" * 38)
total = round(subtotal + 2.5, 2)
print("{:>15} £{:>20}".format("Total", total))
print("_" * 38)
new_line()
new_line()
corret = input("Is this corret? ").lower()
if corret == "yes":
cancel_order()
clear_screen()
showMenus(0)
else:
clear_screen()
showMenus(1)
def add_pizza_menu():
print_title("Select a Pizza Size")
print("1. Small - £3.25")
print("2. Medium - £5.50")
print("3. Large - £7.15")
print("4. Done?")
new_line()
option = int(input("Please select an option: "))
try:
if len(order["pizzas"]) > 5 and option != 4:
raise TooManyPizzasError
elif option == 1:
order["pizzas"].append({ "size": "small", "addedTopping": None })
elif option == 2:
order["pizzas"].append({ "size": "medium", "addedTopping": None })
elif option == 3:
order["pizzas"].append({ "size": "large", "addedTopping": None })
elif option == 4:
clear_screen()
showMenus(1)
else:
handle_error(str(option) + " is not a correct option.")
except ValueError:
handle_error("Please enter an correct option.")
except TooManyPizzasError:
handle_error("You can only order a max of 6 pizzas at one time.")
def pizza_toppings_menu():
print_title("Add Toppings to Order")
new_line()
pizzas = order["pizzas"]
pizzaIndex = 0
for pizza in pizzas:
print(pizzaIndex, pizza)
pizzaIndex += 1
print("7: Done?")
try:
new_line()
pizza = int(input("Select a pizza to add toppings to: "))
if pizza == 7:
clear_screen()
showMenus(1)
if pizza > len(pizzas):
raise ValueError
new_line()
toppings = int(input("How many toppings would you like: "))
pizzas[pizza]["addedTopping"] = toppings
clear_screen()
except ValueError:
handle_error("Please enter an correct pizza.")
def order_pizza_menu():
print_title("Order Pizza")
print("1. Customer details")
print("2. Add pizza to order")
print("3. Add extra toppings to order")
print("4. Complete order")
print("5. Cancel")
new_line()
try:
option = int(input("Select an option > "))
if option == 1:
if customer["customerName"] == None:
clear_screen()
showMenus(2)
else:
customer_details()
elif option == 2:
clear_screen()
showMenus(3)
elif option == 3:
clear_screen()
showMenus(4)
elif option == 4:
if is_order_valid() == False:
raise OrderIsNotValidError
clear_screen()
showMenus(5)
elif option == 5:
cancel_order()
clear_screen()
showMenus(0)
else:
handle_error(str(option) + " is not a correct option.")
except ValueError:
handle_error("Please enter an correct option.")
except OrderIsNotValidError:
handle_error("The order is not valid, please check the order.")
def welcome_message(username: str):
print(" Pizza till")
print(" Welcome,", username)
print("." * 32)
new_line()
def main_menu():
global isShowingMainMenu
print_title("Main Menu")
print("1. Create an order")
print("2. Exit")
new_line()
try:
option = int(input("Select an option > "))
if option == 1:
clear_screen()
showMenus(1)
elif option == 2:
exit(0)
else:
handle_error(str(option) + " is not a correct option.")
except ValueError:
handle_error("Please enter an correct option.")
def handle_error(error):
clear_screen()
if error == None or error == "":
print("There was an unknown error.")
else:
print(error)
new_line()
def program():
global isShowingMainMenu
global welcomeMessageDisplay
if welcomeMessageDisplay == False:
welcome_message("Lewis")
welcomeMessageDisplay = True
isShowingMainMenu = True
showMenus(lastShownMenu)
def showMenus(index: int):
global lastShownMenu
if index == 0:
lastShownMenu = 0
main_menu()
elif index == 1:
lastShownMenu = 1
order_pizza_menu()
elif index == 2:
lastShownMenu = 2
customer_details_menu()
elif index == 3:
lastShownMenu = 3
add_pizza_menu()
elif index == 4:
lastShownMenu = 4
pizza_toppings_menu()
elif index == 5:
lastShownMenu = 5
complete_order_menu()
def exit(code: int):
global isProgramRuning
isProgramRuning = False
print("Exiting...")
time.sleep(1)
sys.exit(code)
def clear_screen():
if os.name == "nt":
_ = os.system("cls")
else:
_ = os.system("clear")
while isProgramRuning:
program() | 24.553444 | 130 | 0.587018 | 100 | 0.009664 | 0 | 0 | 0 | 0 | 0 | 0 | 2,326 | 0.224778 |
9fb86bc2021d88c9c656e9537d5af6bffae02181 | 4,294 | py | Python | code/lib.py | Pendra89/geom2020 | afcb176a2840eca4aa985a67c43773d6cc548069 | [
"MIT"
] | 1 | 2021-11-17T11:47:35.000Z | 2021-11-17T11:47:35.000Z | code/lib.py | Pendra89/geom2020 | afcb176a2840eca4aa985a67c43773d6cc548069 | [
"MIT"
] | null | null | null | code/lib.py | Pendra89/geom2020 | afcb176a2840eca4aa985a67c43773d6cc548069 | [
"MIT"
] | 1 | 2021-02-23T22:46:25.000Z | 2021-02-23T22:46:25.000Z | import numpy as np
from numpy import array
from numpy.linalg import det
from numpy.linalg import matrix_rank
from numpy.linalg import solve
"""
*** remember the following useful tools***
from numpy import transpose
from numpy import dot
from numpy import argmax
from numpy import abs
from numpy.linalg import eig
from numpy.linalg import inv
"""
def linear_equations(matrix, vector) -> array:
"""
this function resolve a system of linear equations
:param matrix: matrix of coefficients
:param vector: vector of constant terms
>>> linear_equations(np.eye(2),np.array([1,1]))
The system has a single unique solution.
[1. 1.]
>>> linear_equations(np.array([[1,0],[1,0]]),np.array([1,0]))
The system has no solution.
"""
B = np.c_[matrix, vector]
rank_A = matrix_rank(matrix)
rank_B = matrix_rank(B)
if rank_A == rank_B:
if rank_A == len(matrix):
print(f'\n The system has a single unique solution.\n {solve(matrix, vector)}\n ')
return solve(matrix, vector)
else:
print('\n The system has infinitely many solutions. \n')
if input('Do you want the matrix after the gauss_elimination elimination? [y/n]\n') == 'y':
S = gauss_elimination(B)
print(S)
return S
else:
print('\n The system has no solution.\n')
return None
# esercizio 2
def linear_dependence(matrix: array) -> int:
"""
This function answer to the question "Are these vectors linearly independent?"
:param matrix: matrix with vectors as rows
:return: the number of linearly independent vectors
"""
rank = matrix_rank(matrix)
if rank == matrix.shape[0]:
print('The vectors are linearly independents')
else:
print(f'The vectors are linearly dependents and only {rank} of them are linearly independents')
if input('Do you want the matrix after the gauss_elimination elimination? [y/n]\n') == 'y':
S = gauss_elimination(matrix)
print(S)
return rank
# esercizio3
def cartesian_representation_line(vec_1: np.array, vec_2: np.array, type: int = 1) -> None:
"""
This function print the cartesian presentation of a line
a: numpy-array of the
b: numpy-array of the
:param vec_1: first point
:param vec_2: direction (type = 0) or the second point (type = 1)
:param type: it switches between two points and one point and a direction
"""
if type:
vec_2 = vec_2 - vec_1
for i in range(len(vec_1)):
print(f' x_{i + 1} = {vec_1[i]} + {vec_2[i]}t')
return None
def gauss_elimination(matrix) -> np.array:
"""
This function compute Gauss elimination process
:param matrix: generic matrix
:return: matrix after the Gauss elimination
"""
import sympy
return np.array(sympy.Matrix(matrix).rref()[0])
def conic_section_classification(coeff: list) -> None:
"""
This function provides a classification of a conic section
:param coeff: list of the coefficient of the equation of the conic section
if the equation is
A x^2 + B xy + C y^2 + D x + E y + F = 0
then the array coeff is
[A,B,C,D,E,F]
"""
A = array([[coeff[0], coeff[1] / 2, coeff[3] / 2], [coeff[1] / 2, coeff[2], coeff[4] / 2],
[coeff[3], coeff[4] / 2, coeff[5]]])
rank = matrix_rank(A)
if rank == 3:
d = det(A[:2, :2])
# remember that we have a finite precision on floats, for this reason we consider 1e-09 as tolerance
if d > 1e-09:
print('This conic section is an ellipse')
elif d < -1e-09:
print('This conic section is a hyperbola')
else:
print('This conic section is a parabola')
elif rank == 2:
print('This conic section is a degenerate conic, ', end="")
d = det(A[:2, :2])
if d > 1e-09:
print('in particular we have one point')
elif d < -1e-09:
print('in particular we have two incident lines')
else:
print('in particular we have two parallel lines')
else:
print('This conic section is a degenerate conic, in particular we have two coincident lines')
return None
| 27.703226 | 108 | 0.619935 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,441 | 0.568468 |
9fb8870f39230a47bbcccb28e26862e42ee85468 | 2,520 | py | Python | text/symbols.py | roedoejet/FastSpeech2 | a2560cdaf6d6e08e40e59bdc790bf7758987a990 | [
"MIT"
] | 7 | 2021-11-29T06:53:12.000Z | 2022-02-15T18:56:06.000Z | text/symbols.py | roedoejet/FastSpeech2 | a2560cdaf6d6e08e40e59bdc790bf7758987a990 | [
"MIT"
] | null | null | null | text/symbols.py | roedoejet/FastSpeech2 | a2560cdaf6d6e08e40e59bdc790bf7758987a990 | [
"MIT"
] | 2 | 2021-06-10T16:43:06.000Z | 2022-02-05T10:28:24.000Z | """ from https://github.com/keithito/tacotron """
"""
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details.
For the three languages in my dissertation, Gitksan (git), Kanien'kéha (moh), & SENĆOŦEN (str), mappings were used from the g2p library. If the language is supported by g2p,
it's recommended to add it in the same fashion as below
"""
from text import cmudict, pinyin
from g2p.mappings.langs import MAPPINGS_AVAILABLE
from g2p import make_g2p
from nltk.tokenize import RegexpTokenizer
from unicodedata import normalize
from epitran.flite import Flite
FLITE = Flite()
ARPA_IPA = [x for x in FLITE.arpa_map.values() if x]
_pad = "_"
_punctuation = "!(),.;? ':"
_moh_punctuation = "!(),.;? "
_special = "-"
_letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
_silences = ["sp", "spn", "sil", "@sp", "@spn", "@sil"]
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
# _arpabet = ["@" + s for s in cmudict.valid_symbols]
_arpabet = [s for s in cmudict.valid_symbols]
_pinyin = ["@" + s for s in pinyin.valid_symbols]
MAPPINGS = {
"git": {"norm": make_g2p("git", "git-equiv"), "ipa": make_g2p("git", "git-ipa")},
"moh": {"norm": make_g2p("moh", "moh-equiv"), "ipa": make_g2p("moh", "moh-ipa")},
"str": {"norm": make_g2p("str", "str-equiv"), "ipa": make_g2p("str", "str-ipa")},
}
IPA = {
k: [
normalize("NFC", c)
for cs in [x for x in MAPPINGS[k]["ipa"]._transducers[-1].mapping.mapping]
for c in cs["out"].split()
]
for k in MAPPINGS.keys()
}
TOKENIZERS = {
k: RegexpTokenizer("|".join(sorted(v, key=lambda x: len(x), reverse=True)))
for k, v in IPA.items()
}
BASE_SYMBOLS = [_pad] + list(_special) + list(_punctuation) + _silences
MOH_BASE_SYMBOLS = [_pad] + list(_special) + list(_moh_punctuation) + _silences
# SYMBOLS = {k: v + BASE_SYMBOLS for k, v in IPA.items()} # moh uses different base
SYMBOLS = {}
SYMBOLS["moh"] = MOH_BASE_SYMBOLS + IPA["moh"]
SYMBOLS["git"] = BASE_SYMBOLS + IPA["git"]
SYMBOLS["str"] = BASE_SYMBOLS + IPA["str"]
SYMBOLS["eng"] = BASE_SYMBOLS + ARPA_IPA + _arpabet
# # Export all symbols:
CHARS = (
[_pad]
+ list(_special)
+ list(_punctuation)
# + _moh_ipa
# + _moh_orth
+ list(_letters)
+ _arpabet
# + _pinyin
+ _silences
)
| 32.727273 | 192 | 0.667857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,142 | 0.452636 |