hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c307067321c98df8a703698a1402c3ece87867d3 | 596 | py | Python | Primer3.py | ZyryanovAV/lb10 | 8fd9708a0b6ae72fe2e65ab1a22495b51f81803f | [
"MIT"
] | null | null | null | Primer3.py | ZyryanovAV/lb10 | 8fd9708a0b6ae72fe2e65ab1a22495b51f81803f | [
"MIT"
] | null | null | null | Primer3.py | ZyryanovAV/lb10 | 8fd9708a0b6ae72fe2e65ab1a22495b51f81803f | [
"MIT"
] | null | null | null | #!/usr/bin/evn python3
# -*- config: utf-8 -*-
# Решите следующую задачу: напишите функцию, которая считывает с клавиатуры числа и
# перемножает их до тех пор, пока не будет введен 0. Функция должна возвращать
# полученное произведение. Вызовите функцию и выведите на экран результат ее работы.
if __name__ == '__main__':
prod = composition()
print(prod)
| 22.923077 | 84 | 0.600671 | #!/usr/bin/evn python3
# -*- config: utf-8 -*-
# Решите следующую задачу: напишите функцию, которая считывает с клавиатуры числа и
# перемножает их до тех пор, пока не будет введен 0. Функция должна возвращать
# полученное произведение. Вызовите функцию и выведите на экран результат ее работы.
def composition():
while True:
p = 1
a = int(input('first number: '))
b = int(input('second number: '))
if a == 0 or b == 0:
break
p *= a
p *= b
print(p)
if __name__ == '__main__':
prod = composition()
print(prod)
| 206 | 0 | 23 |
bb811c228423ac83c7ebb23ca24b08e2c438f774 | 2,235 | py | Python | analysis/migrations/0017_auto_20200521_1740.py | bizeasy17/investtrack | 3840948896573f3906a5df80ea80859a492f4133 | [
"MIT"
] | null | null | null | analysis/migrations/0017_auto_20200521_1740.py | bizeasy17/investtrack | 3840948896573f3906a5df80ea80859a492f4133 | [
"MIT"
] | 3 | 2021-07-15T13:23:28.000Z | 2021-12-09T03:32:16.000Z | analysis/migrations/0017_auto_20200521_1740.py | bizeasy17/investtrack | 3840948896573f3906a5df80ea80859a492f4133 | [
"MIT"
] | 1 | 2021-08-19T14:42:59.000Z | 2021-08-19T14:42:59.000Z | # Generated by Django 3.0.2 on 2020-05-21 09:40
from django.db import migrations, models
| 37.881356 | 196 | 0.595973 | # Generated by Django 3.0.2 on 2020-05-21 09:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('analysis', '0016_auto_20200521_1511'),
]
operations = [
migrations.AddField(
model_name='stockstrategytestlog',
name='cp_marked_dt',
field=models.DateTimeField(blank=True, null=True, verbose_name='临界点标记时间?'),
),
migrations.AddField(
model_name='stockstrategytestlog',
name='cp_update_dt',
field=models.DateTimeField(blank=True, null=True, verbose_name='临界点更新时间?'),
),
migrations.AddField(
model_name='stockstrategytestlog',
name='exppct_mark_dt',
field=models.DateTimeField(blank=True, null=True, verbose_name='预期涨幅标记时间?'),
),
migrations.AddField(
model_name='stockstrategytestlog',
name='exppct_mark_update_dt',
field=models.DateTimeField(blank=True, null=True, verbose_name='预期涨幅更新时间?'),
),
migrations.AddField(
model_name='stockstrategytestlog',
name='hist_download_dt',
field=models.DateTimeField(blank=True, null=True, verbose_name='下载时间?'),
),
migrations.AddField(
model_name='stockstrategytestlog',
name='hist_update_dt',
field=models.DateTimeField(blank=True, null=True, verbose_name='下载更新时间?'),
),
migrations.AddField(
model_name='stockstrategytestlog',
name='lhpct_mark_dt',
field=models.DateTimeField(blank=True, null=True, verbose_name='高低点标记时间?'),
),
migrations.AddField(
model_name='stockstrategytestlog',
name='lhpct_update_dt',
field=models.DateTimeField(blank=True, null=True, verbose_name='高低点更新时间?'),
),
migrations.AlterField(
model_name='tradestrategystat',
name='applied_period',
field=models.CharField(blank=True, choices=[('60', '60分钟'), ('mm', '月线'), ('wk', '周线'), ('15', '15分钟'), ('30', '30分钟'), ('dd', '日线')], default='60', max_length=2, verbose_name='应用周期'),
),
]
| 0 | 2,277 | 23 |
d91ae631739647d1352e6ac2bd006d83a4e6e228 | 5,753 | py | Python | decisiontelecom/viber.py | IT-DecisionTelecom/DecisionTelecom-Python | 00c9cdb36fe39ce59d01603b4512210e89257249 | [
"MIT"
] | null | null | null | decisiontelecom/viber.py | IT-DecisionTelecom/DecisionTelecom-Python | 00c9cdb36fe39ce59d01603b4512210e89257249 | [
"MIT"
] | null | null | null | decisiontelecom/viber.py | IT-DecisionTelecom/DecisionTelecom-Python | 00c9cdb36fe39ce59d01603b4512210e89257249 | [
"MIT"
] | null | null | null | import base64
import enum
import json
import requests
class ViberMessageType(enum.IntEnum):
"""Represents Viber message type"""
TextOnly = 106
TextImageButton = 108
TextOnly2Way = 206
TextImageButton2Way = 208
class ViberMessageSourceType(enum.IntEnum):
"""Represents Viber message source type"""
Promotional = 1
Transactional = 2
class ViberMessageStatus(enum.IntEnum):
"""Represents Viber message status"""
Sent = 0
Delivered = 1
Error = 2
Rejected = 3
Undelivered = 4
Pending = 5
Unknown = 20
class ViberError(Exception):
"""Represents Viber error"""
def __init__(self, name, message, code, status) -> None:
"""Initializes ViberError object
Args:
name (string): Error name
message (string): Error message
code (int): Error code
status (int): Error status
"""
super().__init__()
self.name = name
self.message = message
self.code = code
self.status = status
class ViberMessage:
"""Represents Viber message"""
def __init__(self, sender, receiver, message_type, text, source_type, image_url=None, button_caption=None, button_action=None,
callback_url=None, validity_period=None):
"""Initializes ViberMessage object
Args:
sender (string): Message sender (from whom message is sent)
receiver (string): Message receiver (to whom message is sent)
message_type (ViberMessageType): Message type
text (string): Message body
source_type (ViberMessageSourceType): Message sending procedure
image_url (string, optional): Image URL for promotional message with button caption and button action. Defaults to None.
button_caption (string, optional): Button caption. Defaults to None.
button_action (string, optional): URL for transition when the button is pressed. Defaults to None.
callback_url (string, optional): URL for message status callback. Defaults to None.
validity_period (int, optional): Life time of a message (in seconds). Defaults to None.
"""
self.sender = sender
self.receiver = receiver
self.message_type = message_type
self.text = text
self.image_url = image_url
self.button_caption = button_caption
self.button_action = button_action
self.source_type = source_type
self.callback_url = callback_url
self.validity_period = validity_period
class ViberMessageReceipt:
"""Represents Viber message receipt (Id and status of the particular Viber message)"""
def __init__(self, message_id, status) -> None:
"""Initializes ViberMessageReceipt object
Args:
message_id (int): Viber message Id
status (ViberMessageStatus): Viber message status
"""
self.message_id = message_id
self.status = ViberMessageStatus(status)
class ViberClient:
"""Client to work with Viber messages"""
def __init__(self, api_key) -> None:
"""Initializes ViberClient object
Args:
api_key (string): User access key
"""
self.api_key = api_key
def send_message(self, message) -> int:
"""Sends Viber message
Args:
message (ViberMessage): Viber message to send
Returns:
int: Id of the sent Viber message
Raises:
ViberError: If specific Viber error occurred
"""
request = message.toJSON()
return self.__make_http_request("send-viber", request, ok_response_func)
def get_message_status(self, message_id) -> ViberMessageReceipt:
"""Returns Viber message status
Args:
message_id (int): Id of the Viber message (sent in the last 5 days)
Returns:
ViberMessageReceipt: Viber message receipt object
Raises:
ViberError: If specific Viber error occurred
"""
request = json.dumps({"message_id": message_id})
return self.__make_http_request("receive-viber", request, ok_response_func)
| 33.447674 | 132 | 0.642621 | import base64
import enum
import json
import requests
class ViberMessageType(enum.IntEnum):
"""Represents Viber message type"""
TextOnly = 106
TextImageButton = 108
TextOnly2Way = 206
TextImageButton2Way = 208
class ViberMessageSourceType(enum.IntEnum):
"""Represents Viber message source type"""
Promotional = 1
Transactional = 2
class ViberMessageStatus(enum.IntEnum):
"""Represents Viber message status"""
Sent = 0
Delivered = 1
Error = 2
Rejected = 3
Undelivered = 4
Pending = 5
Unknown = 20
class ViberError(Exception):
"""Represents Viber error"""
def __init__(self, name, message, code, status) -> None:
"""Initializes ViberError object
Args:
name (string): Error name
message (string): Error message
code (int): Error code
status (int): Error status
"""
super().__init__()
self.name = name
self.message = message
self.code = code
self.status = status
class ViberMessage:
"""Represents Viber message"""
def __init__(self, sender, receiver, message_type, text, source_type, image_url=None, button_caption=None, button_action=None,
callback_url=None, validity_period=None):
"""Initializes ViberMessage object
Args:
sender (string): Message sender (from whom message is sent)
receiver (string): Message receiver (to whom message is sent)
message_type (ViberMessageType): Message type
text (string): Message body
source_type (ViberMessageSourceType): Message sending procedure
image_url (string, optional): Image URL for promotional message with button caption and button action. Defaults to None.
button_caption (string, optional): Button caption. Defaults to None.
button_action (string, optional): URL for transition when the button is pressed. Defaults to None.
callback_url (string, optional): URL for message status callback. Defaults to None.
validity_period (int, optional): Life time of a message (in seconds). Defaults to None.
"""
self.sender = sender
self.receiver = receiver
self.message_type = message_type
self.text = text
self.image_url = image_url
self.button_caption = button_caption
self.button_action = button_action
self.source_type = source_type
self.callback_url = callback_url
self.validity_period = validity_period
def toJSON(self):
# Use mapping to change names of attributes in the result json string
mapping = {"sender": "source_addr",
"receiver": "destination_addr", "image_url": "image"}
return json.dumps({mapping.get(k, k): v for k, v in self.__dict__.items()})
class ViberMessageReceipt:
"""Represents Viber message receipt (Id and status of the particular Viber message)"""
def __init__(self, message_id, status) -> None:
"""Initializes ViberMessageReceipt object
Args:
message_id (int): Viber message Id
status (ViberMessageStatus): Viber message status
"""
self.message_id = message_id
self.status = ViberMessageStatus(status)
class ViberClient:
"""Client to work with Viber messages"""
def __init__(self, api_key) -> None:
"""Initializes ViberClient object
Args:
api_key (string): User access key
"""
self.api_key = api_key
def send_message(self, message) -> int:
"""Sends Viber message
Args:
message (ViberMessage): Viber message to send
Returns:
int: Id of the sent Viber message
Raises:
ViberError: If specific Viber error occurred
"""
def ok_response_func(response_body):
return int(json.loads(response_body)["message_id"])
request = message.toJSON()
return self.__make_http_request("send-viber", request, ok_response_func)
def get_message_status(self, message_id) -> ViberMessageReceipt:
"""Returns Viber message status
Args:
message_id (int): Id of the Viber message (sent in the last 5 days)
Returns:
ViberMessageReceipt: Viber message receipt object
Raises:
ViberError: If specific Viber error occurred
"""
def ok_response_func(response_body):
deserialized_json = json.loads(response_body)
return ViberMessageReceipt(**deserialized_json)
request = json.dumps({"message_id": message_id})
return self.__make_http_request("receive-viber", request, ok_response_func)
def __make_http_request(self, url, request, ok_response_func):
BASE_URL = "https://web.it-decision.com/v1/api"
full_url = "{base_url}/{url}".format(base_url=BASE_URL, url=url)
headers = {
"Authorization": "Basic " + base64.b64encode(self.api_key.encode()).decode(),
"Content-Type": "application/json",
"Accept": "application/json"}
response = requests.post(full_url, data=request, headers=headers)
# Raise exception for unsuccessful response status codes
response.raise_for_status()
# If response contains "name", "message", "code" and "status" words, treat it as a ViberError
if "name" in response.text and "message" in response.text and "code" in response.text and "status" in response.text:
deserialized_json = json.loads(response.text)
raise ViberError(**deserialized_json)
return ok_response_func(response.text)
| 1,419 | 0 | 114 |
e95c33c75d2540b5b561e775969eeb9bfadf4f14 | 4,574 | py | Python | src/controller_pid_with_anti_windup.py | 30sectomars/psas_testbot | 06954927c1d11be2e49359515c0b8f57f6960fd5 | [
"MIT"
] | 1 | 2020-02-26T07:29:17.000Z | 2020-02-26T07:29:17.000Z | src/controller_pid_with_anti_windup.py | 30sectomars/psas_testbot | 06954927c1d11be2e49359515c0b8f57f6960fd5 | [
"MIT"
] | null | null | null | src/controller_pid_with_anti_windup.py | 30sectomars/psas_testbot | 06954927c1d11be2e49359515c0b8f57f6960fd5 | [
"MIT"
] | 1 | 2020-02-26T07:25:46.000Z | 2020-02-26T07:25:46.000Z | #!/usr/bin/env python2
# Python libs
import math
# Ros libsSIMULATION:
import rospy
# Ros messages
from std_msgs.msg import Float64
from std_msgs.msg import Float32MultiArray
from sensor_msgs.msg import Imu
from geometry_msgs.msg import Twist
#Gravity
G = 9.81
FILTER_SIZE = 20
# IMU offset in real world
if rospy.has_param('/use_simulation'):
SIMULATION = rospy.get_param('/use_simulation')
if SIMULATION:
OFFSET_Y = 0.0
else:
OFFSET_Y = 0.134
else:
SIMULATION = False
OFFSET_Y = 0.134
# get v_max
if rospy.has_param('/v_max'):
V_MAX = rospy.get_param('/v_max')
else:
V_MAX = 0.05
# get loop rate in hz
if rospy.has_param('/loop_rate_in_hz'):
LOOP_RATE_IN_HZ = rospy.get_param('/loop_rate_in_hz')
else:
LOOP_RATE_IN_HZ = 100
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException: pass | 25.270718 | 88 | 0.688894 | #!/usr/bin/env python2
# Python libs
import math
# Ros libsSIMULATION:
import rospy
# Ros messages
from std_msgs.msg import Float64
from std_msgs.msg import Float32MultiArray
from sensor_msgs.msg import Imu
from geometry_msgs.msg import Twist
#Gravity
G = 9.81
FILTER_SIZE = 20
# IMU offset in real world
if rospy.has_param('/use_simulation'):
SIMULATION = rospy.get_param('/use_simulation')
if SIMULATION:
OFFSET_Y = 0.0
else:
OFFSET_Y = 0.134
else:
SIMULATION = False
OFFSET_Y = 0.134
# get v_max
if rospy.has_param('/v_max'):
V_MAX = rospy.get_param('/v_max')
else:
V_MAX = 0.05
# get loop rate in hz
if rospy.has_param('/loop_rate_in_hz'):
LOOP_RATE_IN_HZ = rospy.get_param('/loop_rate_in_hz')
else:
LOOP_RATE_IN_HZ = 100
class Controller:
def __init__(self):
self.connected = False
self.gyro_x = 0.0
self.gyro_y = 0.0
self.gyro_z = 0.0
self.accel_x = 0.0
self.accel_y = 0.0
self.accel_z = 0.0
self.ref = 0.0
self.e_sum = 0.0
self.e = [0.0, 0.0]
self.y = 0.0
self.y_list = [0.0] * FILTER_SIZE
self.u_pre = 0.0
self.u = [0.0, 0.0, 0.0]
self.diff_u = 0.0
self.umax = 0.116
self.umin = -0.116
self.Kp = 4.0
self.Ki = 0.1
self.Kd = 0.5
self.dt = 1.0 / LOOP_RATE_IN_HZ
self.delta1 = 0.0
if SIMULATION:
self.imu_sub = rospy.Subscriber('/imu', Imu, self.imu_callback)
else:
self.imu_sub = rospy.Subscriber('/testbot/imu', Float32MultiArray, self.imu_callback)
self.delta1_pub = rospy.Publisher('/testbot/delta1', Float64, queue_size=10)
self.e_pub = rospy.Publisher('/controller/e', Float64, queue_size=10)
self.y_avg_pub = rospy.Publisher('/controller/y_avg', Float64, queue_size=10)
self.y_pub = rospy.Publisher('/controller/y', Float64, queue_size=10)
self.u_pub = rospy.Publisher('/controller/u', Float64, queue_size=10)
self.u_pre_pub = rospy.Publisher('/controller/u_pre', Float64, queue_size=10)
self.u_pub = rospy.Publisher('/controller/u', Float64, queue_size=10)
self.diff_u_pub = rospy.Publisher('/controller/diff_u', Float64, queue_size=10)
self.e_sum_pub = rospy.Publisher('/controller/e_sum', Float64, queue_size=10)
self.vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
rospy.on_shutdown(self.shutdown)
def control(self):
self.diff_u = 0.0
self.y = sum(self.y_list)/len(self.y_list)
# insert new error in list and pop oldest value
self.e.insert(0, self.ref - self.y)
del self.e[-1]
self.e_sum += self.e[0]
I_anteil = 0.0
D_anteil = (self.e[0] - self.e[1]) / self.dt
self.u_pre = self.Kp * self.e[0] + self.Ki * I_anteil + self.Kd * D_anteil
if self.u_pre > self.umax:
self.diff_u = self.umax - self.u_pre
if self.u_pre < self.umin:
self.diff_u = self.umin - self.u_pre
if self.diff_u != 0:
I_anteil = (1.0 / self.Ki) * self.diff_u + self.e[0]
if (self.accel_y/G <= 1.0) & (self.accel_y/G > -1.0) & self.connected:
self.y_list.insert(0, math.asin(self.accel_y/G) - OFFSET_Y)
del self.y_list[-1]
self.u.insert(0,self.Kp * self.e[0] + self.Ki * I_anteil + self.Kd * D_anteil)
del self.u[-1]
self.delta1 = -math.tan(0.015 / V_MAX * self.u[0]) * 180 / math.pi
if SIMULATION:
self.delta1 = -self.delta1
def publish_all(self):
#self.delta1_pub.publish(self.delta1)
self.e_pub.publish(self.e[0])
self.y_pub.publish(self.y_list[0])
self.y_avg_pub.publish(self.y)
self.u_pre_pub.publish(self.u_pre)
self.u_pub.publish(self.u[0])
self.diff_u_pub.publish(self.diff_u)
self.e_sum_pub.publish(self.e_sum)
msg = Twist()
msg.linear.x = V_MAX
msg.angular.z = self.delta1
self.vel_pub.publish(msg)
def imu_callback(self, msg):
self.connected = True
if SIMULATION:
self.gyro_x = msg.angular_velocity.x
self.gyro_y = -msg.angular_velocity.y
self.gyro_z = -msg.angular_velocity.z
self.accel_x = msg.linear_acceleration.x
self.accel_y = -msg.linear_acceleration.y
self.accel_z = -msg.linear_acceleration.z
else:
self.gyro_x = msg.data[0]
self.gyro_y = msg.data[1]
self.gyro_z = msg.data[2]
self.accel_x = msg.data[3]
self.accel_y = msg.data[4]
self.accel_z = msg.data[5]
def shutdown(self):
msg = Twist()
msg.linear.x = 0.0
msg.angular.z = 0.0
self.vel_pub.publish(msg)
#rospy.loginfo("Controller is shut down")
def talker():
rospy.init_node('controller', anonymous=True)
ctrl = Controller()
rate = rospy.Rate(LOOP_RATE_IN_HZ)
while not rospy.is_shutdown():
ctrl.control()
ctrl.publish_all()
rate.sleep()
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException: pass | 3,569 | -4 | 166 |
4a6e54b88c6c32b5dea57fa6fb826e8eda65c050 | 7,002 | py | Python | kitsune/groups/tests/test_views.py | turtleloveshoes/kitsune | 7e5524644eab7f608a44c44c63d242cda3aef7f0 | [
"BSD-3-Clause"
] | 1 | 2015-03-09T05:48:58.000Z | 2015-03-09T05:48:58.000Z | kitsune/groups/tests/test_views.py | rlr/kitsune | 591e996a3a115a7b235cbca19f5dec58fc9b6249 | [
"BSD-3-Clause"
] | 2 | 2015-01-16T19:47:25.000Z | 2015-01-16T19:49:09.000Z | kitsune/groups/tests/test_views.py | rlr/kitsune | 591e996a3a115a7b235cbca19f5dec58fc9b6249 | [
"BSD-3-Clause"
] | null | null | null | import os
from django.core.files import File
from nose.tools import eq_
from kitsune.groups.models import GroupProfile
from kitsune.groups.tests import group_profile
from kitsune.sumo.helpers import urlparams
from kitsune.sumo.tests import TestCase
from kitsune.sumo.urlresolvers import reverse
from kitsune.users.tests import user, group, add_permission
| 39.559322 | 78 | 0.634105 | import os
from django.core.files import File
from nose.tools import eq_
from kitsune.groups.models import GroupProfile
from kitsune.groups.tests import group_profile
from kitsune.sumo.helpers import urlparams
from kitsune.sumo.tests import TestCase
from kitsune.sumo.urlresolvers import reverse
from kitsune.users.tests import user, group, add_permission
class EditGroupProfileTests(TestCase):
def setUp(self):
super(EditGroupProfileTests, self).setUp()
self.user = user(save=True)
self.group_profile = group_profile(group=group(save=True), save=True)
self.client.login(username=self.user.username, password='testpass')
def _verify_get_and_post(self):
slug = self.group_profile.slug
# Verify GET
r = self.client.get(reverse('groups.edit', args=[slug]), follow=True)
eq_(r.status_code, 200)
# Verify POST
r = self.client.post(reverse('groups.edit', locale='en-US',
args=[slug]),
{'information': '=new info='})
eq_(r.status_code, 302)
gp = GroupProfile.objects.get(slug=slug)
eq_(gp.information, '=new info=')
def test_edit_with_perm(self):
add_permission(self.user, GroupProfile, 'change_groupprofile')
self._verify_get_and_post()
def test_edit_as_leader(self):
self.group_profile.leaders.add(self.user)
self._verify_get_and_post()
def test_edit_without_perm(self):
slug = self.group_profile.slug
# Try GET
r = self.client.get(reverse('groups.edit', args=[slug]), follow=True)
eq_(r.status_code, 403)
# Try POST
r = self.client.post(reverse('groups.edit', locale='en-US',
args=[slug]),
{'information': '=new info='})
eq_(r.status_code, 403)
class EditAvatarTests(TestCase):
def setUp(self):
super(EditAvatarTests, self).setUp()
self.user = user(save=True)
add_permission(self.user, GroupProfile, 'change_groupprofile')
self.group_profile = group_profile(group=group(save=True), save=True)
self.client.login(username=self.user.username, password='testpass')
def tearDown(self):
if self.group_profile.avatar:
self.group_profile.avatar.delete()
super(EditAvatarTests, self).tearDown()
def test_upload_avatar(self):
"""Upload a group avatar."""
with open('kitsune/upload/tests/media/test.jpg') as f:
self.group_profile.avatar.save('test_old.jpg', File(f), save=True)
assert self.group_profile.avatar.name.endswith('92b516.jpg')
old_path = self.group_profile.avatar.path
assert os.path.exists(old_path), 'Old avatar is not in place.'
url = reverse('groups.edit_avatar', locale='en-US',
args=[self.group_profile.slug])
with open('kitsune/upload/tests/media/test.jpg') as f:
r = self.client.post(url, {'avatar': f})
eq_(302, r.status_code)
url = reverse('groups.profile', args=[self.group_profile.slug])
eq_('http://testserver/en-US' + url, r['location'])
assert not os.path.exists(old_path), 'Old avatar was not removed.'
def test_delete_avatar(self):
"""Delete a group avatar."""
self.test_upload_avatar()
url = reverse('groups.delete_avatar', locale='en-US',
args=[self.group_profile.slug])
r = self.client.get(url)
eq_(200, r.status_code)
r = self.client.post(url)
eq_(302, r.status_code)
url = reverse('groups.profile', args=[self.group_profile.slug])
eq_('http://testserver/en-US' + url, r['location'])
gp = GroupProfile.objects.get(slug=self.group_profile.slug)
eq_('', gp.avatar.name)
class AddRemoveMemberTests(TestCase):
def setUp(self):
super(AddRemoveMemberTests, self).setUp()
self.user = user(save=True)
self.member = user(save=True)
add_permission(self.user, GroupProfile, 'change_groupprofile')
self.group_profile = group_profile(group=group(save=True), save=True)
self.client.login(username=self.user.username, password='testpass')
def test_add_member(self):
url = reverse('groups.add_member', locale='en-US',
args=[self.group_profile.slug])
r = self.client.get(url)
eq_(405, r.status_code)
r = self.client.post(url, {'users': self.member.username})
eq_(302, r.status_code)
assert self.member in self.group_profile.group.user_set.all()
def test_remove_member(self):
self.member.groups.add(self.group_profile.group)
url = reverse('groups.remove_member', locale='en-US',
args=[self.group_profile.slug, self.member.id])
r = self.client.get(url)
eq_(200, r.status_code)
r = self.client.post(url)
eq_(302, r.status_code)
assert self.member not in self.group_profile.group.user_set.all()
class AddRemoveLeaderTests(TestCase):
def setUp(self):
super(AddRemoveLeaderTests, self).setUp()
self.user = user(save=True)
add_permission(self.user, GroupProfile, 'change_groupprofile')
self.leader = user(save=True)
self.group_profile = group_profile(group=group(save=True), save=True)
self.client.login(username=self.user.username, password='testpass')
def test_add_leader(self):
url = reverse('groups.add_leader', locale='en-US',
args=[self.group_profile.slug])
r = self.client.get(url)
eq_(405, r.status_code)
r = self.client.post(url, {'users': self.leader.username})
eq_(302, r.status_code)
assert self.leader in self.group_profile.leaders.all()
def test_remove_member(self):
self.group_profile.leaders.add(self.leader)
url = reverse('groups.remove_leader', locale='en-US',
args=[self.group_profile.slug, self.leader.id])
r = self.client.get(url)
eq_(200, r.status_code)
r = self.client.post(url)
eq_(302, r.status_code)
assert self.leader not in self.group_profile.leaders.all()
class JoinContributorsTests(TestCase):
def setUp(self):
super(JoinContributorsTests, self).setUp()
self.user = user(save=True)
self.client.login(username=self.user.username, password='testpass')
group(name='Contributors', save=True)
def test_join_contributors(self):
next = reverse('groups.list')
url = reverse('groups.join_contributors', locale='en-US')
url = urlparams(url, next=next)
r = self.client.get(url)
eq_(405, r.status_code)
r = self.client.post(url)
eq_(302, r.status_code)
eq_('http://testserver%s' % next, r['location'])
assert self.user.groups.filter(name='Contributors').exists()
| 4,586 | 1,591 | 462 |
36e0f9dd4baba21bf27274894f46b27586544485 | 3,673 | py | Python | scrapper/scrapper_last_years.py | MicaelMCarvalho/autarquicasportugaldata | 889b754df6b3f4901ff4154d949a38563666fa9c | [
"MIT"
] | null | null | null | scrapper/scrapper_last_years.py | MicaelMCarvalho/autarquicasportugaldata | 889b754df6b3f4901ff4154d949a38563666fa9c | [
"MIT"
] | null | null | null | scrapper/scrapper_last_years.py | MicaelMCarvalho/autarquicasportugaldata | 889b754df6b3f4901ff4154d949a38563666fa9c | [
"MIT"
] | null | null | null | #! /usr/bin/python
"""
Entry point for scrapper module to be used in 2017 and 2013
in this module it will be defined all the logic behind the data scrapping from the website(s)
"""
import requests
import json
from .filter import Filter
from .data_transform import transform
| 40.811111 | 169 | 0.62973 | #! /usr/bin/python
"""
Entry point for scrapper module to be used in 2017 and 2013
in this module it will be defined all the logic behind the data scrapping from the website(s)
"""
import requests
import json
from .filter import Filter
from .data_transform import transform
class scrapper:
def __init__(self, elections):
self.url = {}
self.url_votes = {}
self.url_territorykey = {}
self.main_territorykey = {}
self.year = []
for item in elections:
self.url[item['year']] = item['url']
self.url_votes[item['year']] = item['url_votes']
self.url_territorykey[item['year']] = item['url_territorykey']
self.main_territorykey[item['year']] = item['main_territorykey']
self.year.append(item['year'])
def _save_to_file(self, data, finename):
with open(finename ,'w') as f:
json.dump(data, f, sort_keys=True, indent=4, ensure_ascii=False)
def start_scrapper(self):
sort_out = Filter()
for year in self.year:
print(' ++++++++++ Starting Year %s ++++++++++' % (year))
location_keys = self.get_location_key(year)
data = self.iterateUrl(year)
data = sort_out.get_organized_data(data, year)
data = self.add_votes(data, location_keys, year)
self._save_to_file(data, 'autarquicas_%s.json' % (year))
transform.data_format_to_pandas(data, year)
def iterateUrl(self, year):
dictAllInfo = {"candidate":[]}
for page in range(1, 100):
print(self.url[year] % (page))
response = requests.get(self.url[year] % (page))
data = response.json()
maxPageNum = data['numberOfPages']
for candidate in data['electionCandidates']:
dictAllInfo['candidate'].append(candidate)
if page == maxPageNum:
break
return dictAllInfo
def get_location_key(self, year):
print(self.url_territorykey[year] % (self.main_territorykey[year]))
response = requests.get(self.url_territorykey[year] % (self.main_territorykey[year]))
data = response.json()
dict_locations = {}
for elem in data:
response = requests.get(self.url_territorykey[year] % (elem['territoryKey']))
towns = response.json()
dict_locations[elem['name']] = {}
dict_locations[elem['name']]['territoryKey'] = elem['territoryKey']
dict_locations[elem['name']]['towns'] = {}
for town in towns:
dict_locations[elem['name']]['towns'][town['name']] = town['territoryKey']
return dict_locations
def get_votes(self, location_key, year):
response = requests.get(self.url_votes[year] % (location_key))
votes = response.json()
return(votes['currentResults'])
def add_votes(self, data, location_keys, year):
new_data = {}
for district in data:
district_name = str(district)
new_data[district_name] = {}
for town in data[district]:
new_data[district_name][str(data[district][town]['territoryName'])] = {}
votes = self.get_votes(data[district][town]['territoryKey'], year)
print('\n\n\nSTART MERGE: DISTRICT: ', data[district][town]['parentTerritoryName'], ' County:', data[district][town]['territoryName'])
#new_data[district_name][data[district][town]['territoryName']]['candidates'] = Filter.merge_votes_with_candidates(data[district][town]['candidates'], votes)
new_data[district_name][data[district][town]['territoryName']] = Filter.merge_votes_with_candidates(data[district][town], votes)
return new_data
| 3,173 | -6 | 217 |
8e22d3c579a5a54095efdc59417909a548eea279 | 6,442 | py | Python | Codes/strings.py | shreyansh26/Malware-Classification-Project | ae467d3c5073c3090ad6e8f408ee103fcb7f19a4 | [
"MIT"
] | 5 | 2019-04-12T18:13:23.000Z | 2022-01-27T16:23:02.000Z | Codes/strings.py | shreyansh26/Malware-Classification-Project | ae467d3c5073c3090ad6e8f408ee103fcb7f19a4 | [
"MIT"
] | null | null | null | Codes/strings.py | shreyansh26/Malware-Classification-Project | ae467d3c5073c3090ad6e8f408ee103fcb7f19a4 | [
"MIT"
] | 2 | 2019-04-12T18:13:22.000Z | 2021-11-09T00:56:39.000Z | import numpy as np
from numba.decorators import jit, autojit
import hickle
import os, gzip
binary_search_numba = autojit(binary_search, nopython=True)
ex_numba = autojit(extract, nopython=True)
| 34.449198 | 105 | 0.532754 | import numpy as np
from numba.decorators import jit, autojit
import hickle
import os, gzip
def binary_search(a, x):
lo = 0
hi = a.shape[0]
while lo < hi:
mid = (lo + hi) // 2
midval = a[mid]
if midval < x:
lo = mid + 1
elif midval > x:
hi = mid
else:
return mid
return -1
binary_search_numba = autojit(binary_search, nopython=True)
def extract(all_elems_codes, out, ascii_list):
MAX_STR = out.shape[0]
cur_num_str = 0
i = all_elems_codes.shape[0] - 1
state = 0
cur_end = -1
min_length = 4
count_one = 0
count_two = 0
count_three = 0
while i >= 1:
if all_elems_codes[i] == 0:
if (state == 1):
if (cur_end - i - 1 >= min_length):
out[cur_num_str, 0] = i + 1
out[cur_num_str, 1] = cur_end
cur_num_str += 1
elif (cur_end - i - 1 == 1):
count_one += 1
elif (cur_end - i - 1 == 2):
count_two += 1
elif (cur_end - i - 1 == 3):
count_three += 1
state = 1
cur_end = i
else:
if binary_search_numba(ascii_list, all_elems_codes[i]) == -1:
if (state == 1):
state = 0
if (cur_end - i - 1 >= min_length):
out[cur_num_str, 0] = i + 1
out[cur_num_str, 1] = cur_end
cur_num_str += 1
elif (cur_end - i - 1 == 1):
count_one += 1
elif (cur_end - i - 1 == 2):
count_two += 1
elif (cur_end - i - 1 == 3):
count_three += 1
i -= 1
if cur_num_str == MAX_STR:
break
return cur_num_str, count_one, count_two, count_three
ex_numba = autojit(extract, nopython=True)
def get_dict():
d = {format(key, '02X'): key for key in range(256)}
d['??'] = 256
return d
def get_strings(byte_data):
text = byte_data
name = ''
lines = ''.join(text).split('\n')
all_elems_codes = []
convert_dict = get_dict()
ascii_list = np.array(list(range(32, 127)) + [13, 10])
ascii_list.sort()
for l in lines:
elems = l.split(' ')
all_elems_codes.extend([convert_dict[x] for x in elems[1:]])
all_elems_codes = np.array(all_elems_codes)
out_ = np.zeros([15000, 2], dtype=np.int64)
m,count_one,count_two, count_three = ex_numba(all_elems_codes, out_, ascii_list)
string_total_len = np.sum(out_[:,1] - out_[:,0]) + count_one + count_two + count_three
string_ratio = float(string_total_len)/len(all_elems_codes)
strings = []
for i in range(m):
strings.extend(
[''.join([chr(x) for x in all_elems_codes[out_[i, 0]:out_[i, 1]]])])
return [name, strings, [count_one,count_two,count_three,string_total_len,string_ratio]]
def extract_length(data):
another_f = np.vstack([x[2] for x in data])
len_arrays = [np.array([len(y) for y in x[1]] + [0]+[10000]) for x in data]
bincounts = [ np.bincount(arr) for arr in len_arrays]
counts = np.concatenate([another_f[:,:3], np.vstack([ arr[4:100] for arr in bincounts])],axis = 1)
counts_0_10 = np.sum(counts[:,0:10],axis = 1)[:,None]
counts_10_30 = np.sum(counts[:,10:30],axis = 1)[:,None]
counts_30_60 = np.sum(counts[:,30:60],axis = 1)[:,None]
counts_60_90 = np.sum(counts[:,60:90],axis = 1)[:,None]
counts_0_100 = np.sum(counts[:,0:100],axis = 1)[:,None]
counts_100_150 = [np.sum(arr[100:150]) for arr in bincounts]
counts_150_250 = [np.sum(arr[150:250]) for arr in bincounts]
counts_250_400 = [np.sum(arr[250:450]) for arr in bincounts]
counts_400_600 = [np.sum(arr[400:600]) for arr in bincounts]
counts_600_900 = [np.sum(arr[600:900]) for arr in bincounts]
counts_900_1300 = [np.sum(arr[900:1300]) for arr in bincounts]
counts_1300_2000 = [np.sum(arr[1300:2000]) for arr in bincounts]
counts_2000_3000 = [np.sum(arr[2000:3000]) for arr in bincounts]
counts_3000_6000 = [np.sum(arr[3000:6000]) for arr in bincounts]
counts_6000_15000 = [np.sum(arr[6000:15000]) for arr in bincounts]
med = np.array([np.median([len(y) for y in x[1]] + [0]) for x in data ])[:,None]
mean = np.array([np.mean([len(y) for y in x[1]] + [0]) for x in data ])[:,None]
var = np.array([np.var([len(y) for y in x[1]] + [0]) for x in data ])[:,None]
feats = np.concatenate([np.vstack(counts),
counts_0_10,
counts_10_30,
counts_30_60,
counts_60_90,
counts_0_100,
np.array(counts_100_150)[:,None],
np.array(counts_150_250)[:,None],
np.array(counts_250_400)[:,None],
np.array(counts_400_600)[:,None],
np.array(counts_600_900)[:,None],
np.array(counts_900_1300)[:,None],
np.array(counts_1300_2000)[:,None],
np.array(counts_2000_3000)[:,None],
np.array(counts_3000_6000)[:,None],
np.array(counts_6000_15000)[:,None],
another_f[:,3:]
],axis = 1)
return feats
def dump_names(strings_feats_dir):
n = ['string_len_counts_' + str(x) for x in range(1,100)] + [
'string_len_counts_0_10',
'string_len_counts_10_30',
'string_len_counts_30_60',
'string_len_counts_60_90',
'string_len_counts_0_100',
'string_len_counts_100_150',
'string_len_counts_150_250',
'string_len_counts_250_400',
'string_len_counts_400_600',
'string_len_counts_600_900',
'string_len_counts_900_1300',
'string_len_counts_1300_2000',
'string_len_counts_2000_3000',
'string_len_counts_3000_6000',
'string_len_counts_6000_15000',
'string_total_len',
'string_ratio'
]
hickle.dump(n,os.path.join(strings_feats_dir,'strings_feats_names')) | 6,103 | 0 | 138 |
3838554b908b79fecc82cf82f1f36a72cafd1cea | 809 | py | Python | setup.py | thautwarm/fix-author | d5cfe9906c1099de038a2681c8b72a6a71c0eae8 | [
"MIT"
] | 3 | 2018-09-07T06:58:42.000Z | 2018-09-13T04:59:30.000Z | setup.py | thautwarm/fix-author | d5cfe9906c1099de038a2681c8b72a6a71c0eae8 | [
"MIT"
] | 2 | 2018-09-13T04:41:19.000Z | 2020-10-12T04:33:24.000Z | setup.py | thautwarm/fix-author | d5cfe9906c1099de038a2681c8b72a6a71c0eae8 | [
"MIT"
] | 2 | 2018-09-10T05:50:24.000Z | 2018-09-13T04:59:33.000Z | from setuptools import setup
setup(
name='fix-author',
version='1.1',
packages=['fix_author'],
install_requires=['rbnf', 'wisepy'],
license='MIT',
author='thautwarm',
keywords='git commit, fix author',
description='fix author info in git commits',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
python_requires='>=3.6.0',
url='https://github.com/thautwarm/fix-author',
author_email='twshere@outlook.com',
platforms='any',
entry_points={'console_scripts': ['fix-author=fix_author.cli:main']},
classifiers=[
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython'
],
zip_safe=False)
| 32.36 | 73 | 0.651422 | from setuptools import setup
setup(
name='fix-author',
version='1.1',
packages=['fix_author'],
install_requires=['rbnf', 'wisepy'],
license='MIT',
author='thautwarm',
keywords='git commit, fix author',
description='fix author info in git commits',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
python_requires='>=3.6.0',
url='https://github.com/thautwarm/fix-author',
author_email='twshere@outlook.com',
platforms='any',
entry_points={'console_scripts': ['fix-author=fix_author.cli:main']},
classifiers=[
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython'
],
zip_safe=False)
| 0 | 0 | 0 |
5c267dae4db1390916c741691a80668fa63bf5fe | 3,102 | py | Python | onem_details.py | In-finite/NaturalDisasters | 031d50e21ff2e8d1559eb1545e11e8f95143fe53 | [
"MIT"
] | 2 | 2019-03-13T16:55:39.000Z | 2019-04-19T03:53:09.000Z | onem_details.py | In-finite/NaturalDisasters | 031d50e21ff2e8d1559eb1545e11e8f95143fe53 | [
"MIT"
] | 2 | 2019-02-09T17:48:13.000Z | 2019-02-10T05:48:55.000Z | onem_details.py | In-finite/NaturalDisasters | 031d50e21ff2e8d1559eb1545e11e8f95143fe53 | [
"MIT"
] | 2 | 2018-12-24T16:59:21.000Z | 2019-07-02T04:12:33.000Z | import csv
import base64
import pandas as pd
import datetime as dt
from realtime_details import (extract_places_regions, radius_multiplier)
logo_image = 'cartoon-globe.png'
en_logo = base64.b64encode(open(logo_image, 'rb').read())
entire_month = pd.read_csv('https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_month.csv')
def extract_month_values():
'''
Takes the entire data in a list -> [ [], [], [] ]
Parameters : `None`
Return : `list`
'''
all_month = entire_month.copy()
time = pd.to_datetime(all_month['time'])
all_month['time'] = time
fields = [field for field in all_month]
month_values = all_month.values
return fields, month_values
def csv_feature_extraction(year, month, day):
'''
Considers the data which only meet the criteria, year, month, value
Parameters : `year`, `month`, `day`
Return : `list`
'''
fields, month_values = extract_month_values()
extraction = [fields]
for vals in month_values:
if vals[0].year == year and vals[0].month == month and vals[0].day == day:
if vals[4] >= 4.5: # magnitude > 1
extraction.append(vals)
return extraction
def day_wise_extraction(year, month, day):
'''
Writes the data which is selected as per the input into a CSV file.
Parameters : `year`, `month`, `day`
Return : `pandas DataFrame`
'''
extraction = csv_feature_extraction(year, month, day)
with open('month_day.csv', 'w') as extract:
writer = csv.writer(extract)
writer.writerows(extraction)
def get_dates_sorted():
'''
Sort the dates
Parameters : `None`
Return : `list`
'''
_, month_values = extract_month_values()
all_dates = []
for each_date in month_values:
all_dates.append(str(each_date[0].date()))
timestamps = sorted(list(set(all_dates)))
return timestamps
timestamps = get_dates_sorted()
date_start = dt.datetime.strptime(timestamps[0], '%Y-%m-%d')
date_end = dt.datetime.strptime(timestamps[len(timestamps)-1], '%Y-%m-%d')
def place_wise_extraction(place_name):
'''
This function is useful for plotting as per the place name chosen.
Parameters : `place_name` --> Alaska, Japan ...
Return : `pandas DataFrame`
'''
all_month = entire_month.copy()
all_places = all_month['place'].tolist()
u_regions, _, _ = extract_places_regions(all_places) # specific last name
if place_name in u_regions:
entire_place = all_month[all_month['place'].str.contains(place_name)]
return entire_place
else:
entire_world = all_month[all_month['mag'] > 1]
return entire_world
def history_eq(eq_some, zoom_value):
'''
This function basically reduces redundancy.
Parameters : `eq_some`, `zoom_value`
Return : `tuple`
'''
lats = eq_some['latitude'].tolist()
lons = eq_some['longitude'].tolist()
places = eq_some['place'].tolist()
mags = ['Magnitude : ' + str(i) for i in eq_some['mag']]
mag_size = [float(i) * radius_multiplier['outer'] for i in eq_some['mag']]
depths = ['Depth : ' + str(i) for i in eq_some['depth']]
info = [places[i] + '<br>' + mags[i] + '<br>' + depths[i] for i in range(len(places))]
zooming = zoom_value
return lats, lons, places, mags, mag_size, depths, info, zooming
| 27.696429 | 101 | 0.705029 | import csv
import base64
import pandas as pd
import datetime as dt
from realtime_details import (extract_places_regions, radius_multiplier)
logo_image = 'cartoon-globe.png'
en_logo = base64.b64encode(open(logo_image, 'rb').read())
entire_month = pd.read_csv('https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_month.csv')
def extract_month_values():
'''
Takes the entire data in a list -> [ [], [], [] ]
Parameters : `None`
Return : `list`
'''
all_month = entire_month.copy()
time = pd.to_datetime(all_month['time'])
all_month['time'] = time
fields = [field for field in all_month]
month_values = all_month.values
return fields, month_values
def csv_feature_extraction(year, month, day):
'''
Considers the data which only meet the criteria, year, month, value
Parameters : `year`, `month`, `day`
Return : `list`
'''
fields, month_values = extract_month_values()
extraction = [fields]
for vals in month_values:
if vals[0].year == year and vals[0].month == month and vals[0].day == day:
if vals[4] >= 4.5: # magnitude > 1
extraction.append(vals)
return extraction
def day_wise_extraction(year, month, day):
'''
Writes the data which is selected as per the input into a CSV file.
Parameters : `year`, `month`, `day`
Return : `pandas DataFrame`
'''
extraction = csv_feature_extraction(year, month, day)
with open('month_day.csv', 'w') as extract:
writer = csv.writer(extract)
writer.writerows(extraction)
def get_dates_sorted():
'''
Sort the dates
Parameters : `None`
Return : `list`
'''
_, month_values = extract_month_values()
all_dates = []
for each_date in month_values:
all_dates.append(str(each_date[0].date()))
timestamps = sorted(list(set(all_dates)))
return timestamps
timestamps = get_dates_sorted()
date_start = dt.datetime.strptime(timestamps[0], '%Y-%m-%d')
date_end = dt.datetime.strptime(timestamps[len(timestamps)-1], '%Y-%m-%d')
def place_wise_extraction(place_name):
'''
This function is useful for plotting as per the place name chosen.
Parameters : `place_name` --> Alaska, Japan ...
Return : `pandas DataFrame`
'''
all_month = entire_month.copy()
all_places = all_month['place'].tolist()
u_regions, _, _ = extract_places_regions(all_places) # specific last name
if place_name in u_regions:
entire_place = all_month[all_month['place'].str.contains(place_name)]
return entire_place
else:
entire_world = all_month[all_month['mag'] > 1]
return entire_world
def history_eq(eq_some, zoom_value):
'''
This function basically reduces redundancy.
Parameters : `eq_some`, `zoom_value`
Return : `tuple`
'''
lats = eq_some['latitude'].tolist()
lons = eq_some['longitude'].tolist()
places = eq_some['place'].tolist()
mags = ['Magnitude : ' + str(i) for i in eq_some['mag']]
mag_size = [float(i) * radius_multiplier['outer'] for i in eq_some['mag']]
depths = ['Depth : ' + str(i) for i in eq_some['depth']]
info = [places[i] + '<br>' + mags[i] + '<br>' + depths[i] for i in range(len(places))]
zooming = zoom_value
return lats, lons, places, mags, mag_size, depths, info, zooming
| 0 | 0 | 0 |
4df5523457e630581a1069e4f7d2dc62993f436a | 751 | py | Python | pynapl/Util.py | Dyalog/pynapl | 8b17bceda64b182cf89f4c9b7b77580ec9daf2ed | [
"MIT"
] | 38 | 2017-12-26T08:21:46.000Z | 2022-03-24T21:30:23.000Z | pynapl/Util.py | Dyalog/pynapl | 8b17bceda64b182cf89f4c9b7b77580ec9daf2ed | [
"MIT"
] | 15 | 2018-02-18T08:03:15.000Z | 2022-03-13T17:38:19.000Z | pynapl/Util.py | Dyalog/pynapl | 8b17bceda64b182cf89f4c9b7b77580ec9daf2ed | [
"MIT"
] | 10 | 2018-02-18T07:53:09.000Z | 2022-03-11T13:40:35.000Z | # Utility functions
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from functools import reduce
import operator
def product(seq):
"""The product of a sequence of numbers"""
return reduce(operator.__mul__, seq, 1)
def scan_reverse(f, arr):
"""Scan over a list in reverse, using a function"""
r=list(arr)
for i in reversed(range(len(r))[1:]):
r[i-1] = f(r[i-1],r[i])
return r
def extend(arr,length):
"""Extend a list APL-style"""
if len(arr) >= length: return arr[:length]
else:
r=arr[:]
while length-len(r) >= len(arr):
r.extend(arr)
else:
r.extend(arr[:length-len(r)])
return r
| 22.088235 | 55 | 0.621838 | # Utility functions
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from functools import reduce
import operator
def product(seq):
"""The product of a sequence of numbers"""
return reduce(operator.__mul__, seq, 1)
def scan_reverse(f, arr):
"""Scan over a list in reverse, using a function"""
r=list(arr)
for i in reversed(range(len(r))[1:]):
r[i-1] = f(r[i-1],r[i])
return r
def extend(arr,length):
"""Extend a list APL-style"""
if len(arr) >= length: return arr[:length]
else:
r=arr[:]
while length-len(r) >= len(arr):
r.extend(arr)
else:
r.extend(arr[:length-len(r)])
return r
| 0 | 0 | 0 |
83dabb736d1f87e4dc57532d8e843328a964e148 | 1,271 | py | Python | djangocms_misc/basic/middleware/redirect_subpage.py | bnzk/djangocms-tools | 6e5702594a7cd8c87b92ed46e27a72ff09257fd5 | [
"MIT"
] | 2 | 2016-09-23T14:15:35.000Z | 2016-10-13T07:10:05.000Z | djangocms_misc/basic/middleware/redirect_subpage.py | bnzk/djangocms-tools | 6e5702594a7cd8c87b92ed46e27a72ff09257fd5 | [
"MIT"
] | 28 | 2017-06-16T09:41:55.000Z | 2022-02-08T15:50:04.000Z | djangocms_misc/basic/middleware/redirect_subpage.py | benzkji/djangocms-tools | 6e5702594a7cd8c87b92ed46e27a72ff09257fd5 | [
"MIT"
] | 1 | 2017-04-04T12:16:50.000Z | 2017-04-04T12:16:50.000Z | from django.shortcuts import redirect
| 41 | 107 | 0.637293 | from django.shortcuts import redirect
class RedirectFirstSubpageMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
# One-time configuration and initialization.
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
return response
def process_view(self, request, view_func, view_args, view_kwargs):
if getattr(request, 'current_page', None):
the_page = request.current_page
the_redirect = the_page.get_redirect()
# some more checks if in a cms view!
if view_func.__name__ == 'details' and "slug" in view_kwargs and the_redirect == "/firstchild":
if getattr(request.current_page, 'get_child_pages', None):
subpages = request.current_page.get_child_pages()
else:
subpages = request.current_page.children.all()
if len(subpages):
return redirect(subpages[0].get_absolute_url(), permanent=True)
return None
| 1,104 | 24 | 104 |
f713bd985d707ee952fc4906911d895395ad2c03 | 2,325 | py | Python | google-cloud-sdk/lib/surface/runtime_config/configs/list.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | null | null | null | google-cloud-sdk/lib/surface/runtime_config/configs/list.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | null | null | null | google-cloud-sdk/lib/surface/runtime_config/configs/list.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | 3 | 2017-07-27T18:44:13.000Z | 2020-07-25T17:48:53.000Z | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The configs list command."""
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.runtime_config import util
from googlecloudsdk.calliope import base
class List(base.ListCommand):
"""List runtime-config resources within the current project.
This command lists runtime-config resources for the current project.
"""
DEFAULT_PAGE_SIZE = 100
detailed_help = {
'EXAMPLES': """\
To list all runtime-config resources for the current project, run:
$ {command}
The --filter parameter can be used to filter results based on content.
For example, to list all runtime-config resources with names that
begin with 'foo', run:
$ {command} --filter 'name=foo*'
""",
}
@staticmethod
def Run(self, args):
"""Run 'runtime-configs list'.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Yields:
The list of runtime-config resources.
Raises:
HttpException: An http error response was received while executing api
request.
"""
config_client = util.ConfigClient()
messages = util.Messages()
project = util.Project()
request = messages.RuntimeconfigProjectsConfigsListRequest(
parent=util.ProjectPath(project),
)
page_size = args.page_size or self.DEFAULT_PAGE_SIZE
results = list_pager.YieldFromList(
config_client, request, field='configs',
batch_size_attribute='pageSize', limit=args.limit,
batch_size=page_size,
)
for result in results:
yield util.FormatConfig(result)
| 28.703704 | 80 | 0.695914 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The configs list command."""
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.runtime_config import util
from googlecloudsdk.calliope import base
class List(base.ListCommand):
"""List runtime-config resources within the current project.
This command lists runtime-config resources for the current project.
"""
DEFAULT_PAGE_SIZE = 100
detailed_help = {
'EXAMPLES': """\
To list all runtime-config resources for the current project, run:
$ {command}
The --filter parameter can be used to filter results based on content.
For example, to list all runtime-config resources with names that
begin with 'foo', run:
$ {command} --filter 'name=foo*'
""",
}
@staticmethod
def Args(parser):
parser.display_info.AddFormat('table(name, description)')
def Run(self, args):
"""Run 'runtime-configs list'.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Yields:
The list of runtime-config resources.
Raises:
HttpException: An http error response was received while executing api
request.
"""
config_client = util.ConfigClient()
messages = util.Messages()
project = util.Project()
request = messages.RuntimeconfigProjectsConfigsListRequest(
parent=util.ProjectPath(project),
)
page_size = args.page_size or self.DEFAULT_PAGE_SIZE
results = list_pager.YieldFromList(
config_client, request, field='configs',
batch_size_attribute='pageSize', limit=args.limit,
batch_size=page_size,
)
for result in results:
yield util.FormatConfig(result)
| 58 | 0 | 24 |
57e12c78020bb13b6ec8ef9b93d238d3185368bf | 171 | py | Python | symengine/sympy_compat.py | Midnighter/symengine.py | 7b158d20013c91d229fd574ca68e6c47e3568b37 | [
"MIT"
] | 133 | 2015-10-10T06:04:37.000Z | 2022-03-23T21:20:51.000Z | symengine/sympy_compat.py | Midnighter/symengine.py | 7b158d20013c91d229fd574ca68e6c47e3568b37 | [
"MIT"
] | 318 | 2015-08-24T16:36:35.000Z | 2022-03-31T04:17:30.000Z | symengine/sympy_compat.py | Midnighter/symengine.py | 7b158d20013c91d229fd574ca68e6c47e3568b37 | [
"MIT"
] | 62 | 2015-08-24T16:13:15.000Z | 2022-01-02T01:39:17.000Z | import warnings
warnings.warn("sympy_compat module is deprecated. Use `import symengine` instead", DeprecationWarning,
stacklevel=2)
from symengine import *
| 34.2 | 102 | 0.754386 | import warnings
warnings.warn("sympy_compat module is deprecated. Use `import symengine` instead", DeprecationWarning,
stacklevel=2)
from symengine import *
| 0 | 0 | 0 |
fbff9b3497de83af65b26c058ce9f084fbd8204b | 386 | py | Python | extension/httpfs/httpfs_config.py | AldoMyrtaj/duckdb | 3aa4978a2ceab8df25e4b20c388bcd7629de73ed | [
"MIT"
] | 2,816 | 2018-06-26T18:52:52.000Z | 2021-04-06T10:39:15.000Z | extension/httpfs/httpfs_config.py | AldoMyrtaj/duckdb | 3aa4978a2ceab8df25e4b20c388bcd7629de73ed | [
"MIT"
] | 1,310 | 2021-04-06T16:04:52.000Z | 2022-03-31T13:52:53.000Z | extension/httpfs/httpfs_config.py | AldoMyrtaj/duckdb | 3aa4978a2ceab8df25e4b20c388bcd7629de73ed | [
"MIT"
] | 270 | 2021-04-09T06:18:28.000Z | 2022-03-31T11:55:37.000Z | import os
# list all include directories
include_directories = [os.path.sep.join(x.split('/')) for x in ['extension/httpfs/include', 'third_party/picohash', 'third_party/httplib']]
# source files
source_files = [os.path.sep.join(x.split('/')) for x in ['extension/httpfs/crypto.cpp', 'extension/httpfs/httpfs.cpp', 'extension/httpfs/httpfs-extension.cpp', 'extension/httpfs/s3fs.cpp']]
| 64.333333 | 189 | 0.743523 | import os
# list all include directories
include_directories = [os.path.sep.join(x.split('/')) for x in ['extension/httpfs/include', 'third_party/picohash', 'third_party/httplib']]
# source files
source_files = [os.path.sep.join(x.split('/')) for x in ['extension/httpfs/crypto.cpp', 'extension/httpfs/httpfs.cpp', 'extension/httpfs/httpfs-extension.cpp', 'extension/httpfs/s3fs.cpp']]
| 0 | 0 | 0 |
96ef26a9a0782458325c1472bf2516e8184ef8e3 | 6,987 | py | Python | infra_macros/macro_lib/convert/container_image/compiler/dep_graph.py | martarozek/buckit | 343cc5a5964c1d43902b6a77868652adaefa0caa | [
"BSD-3-Clause"
] | null | null | null | infra_macros/macro_lib/convert/container_image/compiler/dep_graph.py | martarozek/buckit | 343cc5a5964c1d43902b6a77868652adaefa0caa | [
"BSD-3-Clause"
] | null | null | null | infra_macros/macro_lib/convert/container_image/compiler/dep_graph.py | martarozek/buckit | 343cc5a5964c1d43902b6a77868652adaefa0caa | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
'''
To start, read the docblock of `provides.py`. The code in this file verifies
that a set of Items can be correctly installed (all requirements are
satisfied, etc). It then computes an installation order such that every
Item is installed only after all of the Items that match its Requires have
already been installed. This is known as dependency order or topological
sort.
'''
from collections import namedtuple
# To build the item-to-item dependency graph, we need to first build up a
# complete mapping of {path, {items, requiring, it}}. To validate that
# every requirement is satisfied, it is similarly useful to have access to a
# mapping of {path, {what, it, provides}}. Lastly, we have to
# simultaneously examine a single item's requires() and provides() for the
# purposes of sanity checks.
#
# To avoid re-evaluating ImageItem.{provides,requires}(), we'll just store
# everything in these data structures:
ItemProv = namedtuple('ItemProv', ['provides', 'item'])
# NB: since the item is part of the tuple, we'll store identical
# requirements that come from multiple items multiple times. This is OK.
ItemReq = namedtuple('ItemReq', ['requires', 'item'])
ItemReqsProvs = namedtuple('ItemReqsProvs', ['item_provs', 'item_reqs'])
class ValidatedReqsProvs:
'''
Given a set of Items (see the docblocks of `item.py` and `provides.py`),
computes {'path': {ItemReqProv{}, ...}} so that we can build the
DependencyGraph for these Items. In the process validates that:
- No one item provides or requires the same path twice,
- Each path is provided by at most one item (could be relaxed later),
- Every Requires is matched by a Provides at that path.
'''
@staticmethod
@staticmethod
class DependencyGraph:
'''
Given an iterable of ImageItems, validates their requires / provides
structures, and populates indexes describing dependencies between items.
The indexes make it easy to topologically sort the items.
'''
| 42.865031 | 79 | 0.642908 | #!/usr/bin/env python3
'''
To start, read the docblock of `provides.py`. The code in this file verifies
that a set of Items can be correctly installed (all requirements are
satisfied, etc). It then computes an installation order such that every
Item is installed only after all of the Items that match its Requires have
already been installed. This is known as dependency order or topological
sort.
'''
from collections import namedtuple
# To build the item-to-item dependency graph, we need to first build up a
# complete mapping of {path, {items, requiring, it}}. To validate that
# every requirement is satisfied, it is similarly useful to have access to a
# mapping of {path, {what, it, provides}}. Lastly, we have to
# simultaneously examine a single item's requires() and provides() for the
# purposes of sanity checks.
#
# To avoid re-evaluating ImageItem.{provides,requires}(), we'll just store
# everything in these data structures:
ItemProv = namedtuple('ItemProv', ['provides', 'item'])
# NB: since the item is part of the tuple, we'll store identical
# requirements that come from multiple items multiple times. This is OK.
ItemReq = namedtuple('ItemReq', ['requires', 'item'])
ItemReqsProvs = namedtuple('ItemReqsProvs', ['item_provs', 'item_reqs'])
class ValidatedReqsProvs:
'''
Given a set of Items (see the docblocks of `item.py` and `provides.py`),
computes {'path': {ItemReqProv{}, ...}} so that we can build the
DependencyGraph for these Items. In the process validates that:
- No one item provides or requires the same path twice,
- Each path is provided by at most one item (could be relaxed later),
- Every Requires is matched by a Provides at that path.
'''
def __init__(self, items):
self.path_to_reqs_provs = {}
for item in items:
path_to_req_or_prov = {} # Checks req/prov are sane within an item
for req in item.requires():
self._add_to_map(
path_to_req_or_prov, req, item,
add_to_map_fn=self._add_to_req_map,
)
for prov in item.provides():
self._add_to_map(
path_to_req_or_prov, prov, item,
add_to_map_fn=self._add_to_prov_map,
)
# Validate that all requirements are satisfied.
for path, reqs_provs in self.path_to_reqs_provs.items():
for item_req in reqs_provs.item_reqs:
for item_prov in reqs_provs.item_provs:
if item_prov.provides.matches(
self.path_to_reqs_provs, item_req.requires
):
break
else:
raise RuntimeError(
'At {}: nothing in {} matches the requirement {}'
.format(path, reqs_provs.item_provs, item_req)
)
@staticmethod
def _add_to_req_map(reqs_provs, req, item):
reqs_provs.item_reqs.add(ItemReq(requires=req, item=item))
@staticmethod
def _add_to_prov_map(reqs_provs, prov, item):
# I see no reason to allow provides-provides collisions.
if len(reqs_provs.item_provs):
raise RuntimeError(
f'Both {reqs_provs.item_provs} and {prov} from {item} provide '
'the same path'
)
reqs_provs.item_provs.add(ItemProv(provides=prov, item=item))
def _add_to_map(
self, path_to_req_or_prov, req_or_prov, item, add_to_map_fn
):
# One ImageItem should not emit provides / requires clauses that
# collide on the path. Such duplication can always be avoided by
# the item not emitting the "requires" clause that it knows it
# provides. Failing to enforce this invariant would make it easy to
# bloat dependency graphs unnecessarily.
other = path_to_req_or_prov.get(req_or_prov.path)
assert other is None, 'Same path in {}, {}'.format(req_or_prov, other)
path_to_req_or_prov[req_or_prov.path] = req_or_prov
add_to_map_fn(
self.path_to_reqs_provs.setdefault(
req_or_prov.path,
ItemReqsProvs(item_provs=set(), item_reqs=set()),
),
req_or_prov,
item
)
class DependencyGraph:
'''
Given an iterable of ImageItems, validates their requires / provides
structures, and populates indexes describing dependencies between items.
The indexes make it easy to topologically sort the items.
'''
def __init__(self, items):
# Without deduping, dependency diamonds would cause a lot of
# redundant work below. Below, we also rely on mutating this set.
items = set(items)
# An item is only added here if it requires at least one other item,
# otherwise it goes in `.items_without_predecessors`.
self.item_to_predecessors = {} # {item: {items, it, requires}}
self.predecessor_to_items = {} # {item: {items, requiring, it}}
# For each path, treat items that provide something at that path as
# predecessors of items that require something at the path.
for _path, rp in ValidatedReqsProvs(items).path_to_reqs_provs.items():
for item_prov in rp.item_provs:
requiring_items = self.predecessor_to_items.setdefault(
item_prov.item, set()
)
for item_req in rp.item_reqs:
requiring_items.add(item_req.item)
self.item_to_predecessors.setdefault(
item_req.item, set()
).add(item_prov.item)
# We own `items`, so reuse this set to find dependency-less items.
items.difference_update(self.item_to_predecessors.keys())
self.items_without_predecessors = items
def dependency_order_items(items):
dg = DependencyGraph(items)
while dg.items_without_predecessors:
# "Install" an item that has no unsatisfied dependencies.
item = dg.items_without_predecessors.pop()
yield item
# All items, which had `item` was a dependency, must have their
# "predecessors" sets updated
for requiring_item in dg.predecessor_to_items[item]:
predecessors = dg.item_to_predecessors[requiring_item]
predecessors.remove(item)
if not predecessors:
dg.items_without_predecessors.add(requiring_item)
del dg.item_to_predecessors[requiring_item] # Won't be used.
# We won't need this value again, and this lets us detect cycles.
del dg.predecessor_to_items[item]
# Initially, every item was indexed here. If there's anything left over,
# we must have a cycle. Future: print a cycle to simplify debugging.
assert not dg.predecessor_to_items, \
'Cycle in {}'.format(dg.predecessor_to_items)
| 4,813 | 0 | 155 |
46b627b97e720aa977f3c5bcb153120c1579cf5b | 744 | py | Python | comply/rules/experimental/symbol_used.py | jhauberg/comply | 0461ab96b85a1f368839aae8a5029ece3a5e4ed8 | [
"MIT"
] | null | null | null | comply/rules/experimental/symbol_used.py | jhauberg/comply | 0461ab96b85a1f368839aae8a5029ece3a5e4ed8 | [
"MIT"
] | 1 | 2018-11-02T11:55:12.000Z | 2018-11-02T11:55:12.000Z | comply/rules/experimental/symbol_used.py | jhauberg/comply | 0461ab96b85a1f368839aae8a5029ece3a5e4ed8 | [
"MIT"
] | null | null | null | # coding=utf-8
from comply.rules.rule import *
class SymbolUsed(Rule):
""" Always list used symbols as needed/required.<br/><br/>**_Not implemented._**
If your code is using a symbol, but not explicitly telling where it got it from, you might have
a hard time figuring out just how far your code reaches out.
<br/><br/>
See <tt>require-symbols</tt>.
"""
@property
@property
| 24 | 99 | 0.591398 | # coding=utf-8
from comply.rules.rule import *
class SymbolUsed(Rule):
""" Always list used symbols as needed/required.<br/><br/>**_Not implemented._**
If your code is using a symbol, but not explicitly telling where it got it from, you might have
a hard time figuring out just how far your code reaches out.
<br/><br/>
See <tt>require-symbols</tt>.
"""
def __init__(self):
Rule.__init__(self, name='symbol-used',
description='Used symbol \'{symbol}\' not listed as needed',
suggestion='Add symbol \'{symbol}\' to list.')
@property
def triggers(self):
return [
]
@property
def nontriggers(self):
return [
]
| 253 | 0 | 79 |
14823eda1caec298020912ea790d52c54899a162 | 128 | py | Python | reviewboard/reviews/evolutions/__init__.py | smorley/reviewboard | 39dd1166fdec19d4fbced965b42a3a23a3b6b956 | [
"MIT"
] | 1 | 2019-01-16T11:59:40.000Z | 2019-01-16T11:59:40.000Z | reviewboard/reviews/evolutions/__init__.py | smorley/reviewboard | 39dd1166fdec19d4fbced965b42a3a23a3b6b956 | [
"MIT"
] | null | null | null | reviewboard/reviews/evolutions/__init__.py | smorley/reviewboard | 39dd1166fdec19d4fbced965b42a3a23a3b6b956 | [
"MIT"
] | null | null | null | SEQUENCE = [
'change_descriptions',
'last_review_timestamp',
'shipit_count',
'default_reviewer_repositories',
]
| 18.285714 | 36 | 0.695313 | SEQUENCE = [
'change_descriptions',
'last_review_timestamp',
'shipit_count',
'default_reviewer_repositories',
]
| 0 | 0 | 0 |
011857a5ac9a97988abf67e805a38fce9cb2cd87 | 657 | py | Python | django_db_constraints/apps.py | rrauenza/django-db-constraints | 68c154c7ce13ca66dc7ccef0378e30ae59a583cd | [
"MIT"
] | 27 | 2017-08-04T14:25:57.000Z | 2019-02-14T21:57:03.000Z | django_db_constraints/apps.py | rrauenza/django-db-constraints | 68c154c7ce13ca66dc7ccef0378e30ae59a583cd | [
"MIT"
] | 6 | 2017-10-28T15:12:18.000Z | 2018-12-27T17:16:32.000Z | django_db_constraints/apps.py | rapilabs/django-db-constraints | b4308ef4b239a94ea9c6ace301daad0084912ac9 | [
"MIT"
] | 4 | 2017-12-14T21:37:35.000Z | 2018-07-09T09:05:10.000Z | from django.apps import AppConfig
from django.db.migrations import state
from django.db.models import options
options.DEFAULT_NAMES = options.DEFAULT_NAMES + ('db_constraints',)
state.DEFAULT_NAMES = options.DEFAULT_NAMES
| 36.5 | 85 | 0.802131 | from django.apps import AppConfig
from django.db.migrations import state
from django.db.models import options
options.DEFAULT_NAMES = options.DEFAULT_NAMES + ('db_constraints',)
state.DEFAULT_NAMES = options.DEFAULT_NAMES
class DjangoDbConstraintsConfig(AppConfig):
name = 'django_db_constraints'
def ready(self):
from django.core.management.commands import makemigrations, migrate # noqa
from .autodetector import MigrationAutodetectorWithDbConstraints # noqa
makemigrations.MigrationAutodetector = MigrationAutodetectorWithDbConstraints
migrate.MigrationAutodetector = MigrationAutodetectorWithDbConstraints
| 326 | 84 | 23 |
667ad8868b2bc263f679704dd0506b629528a854 | 6,354 | py | Python | steeve/fine_tune.py | RandLive/Avito-Demand-Prediction-Challenge | eb2955c6cb799907071d8bbf7b31b73b163c604f | [
"MIT"
] | null | null | null | steeve/fine_tune.py | RandLive/Avito-Demand-Prediction-Challenge | eb2955c6cb799907071d8bbf7b31b73b163c604f | [
"MIT"
] | null | null | null | steeve/fine_tune.py | RandLive/Avito-Demand-Prediction-Challenge | eb2955c6cb799907071d8bbf7b31b73b163c604f | [
"MIT"
] | null | null | null | import pickle
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from ImageGenerator import *
from sklearn.model_selection import KFold
from keras.applications import VGG16
from keras.applications.resnet50 import ResNet50
from keras.layers import Input, Dropout, Dense, concatenate, CuDNNGRU, Embedding, Flatten, Activation, BatchNormalization, PReLU
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
import keras.backend as K
from tqdm import tqdm
from nltk import ngrams
from keras.backend.tensorflow_backend import set_session
from sklearn.metrics import mean_squared_error
import os
import tensorflow as tf
from keras import models
from keras import layers
from keras import optimizers
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
train_dir = '../input/train_jpg/data/competition_files/train_jpg_ds/'
test_dir = '../input/test_jpg/data/competition_files/test_jpg_ds/'
# restrict gpu usage
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
set_session(sess)
import pickle
with open('../input/train_ridge.p', 'rb') as f:
train = pickle.load(f)
with open('../input/test_ridge.p', 'rb') as f:
test = pickle.load(f)
# train = train.iloc[:10000]
nfolds=10
fname='vgg_base'
epochs= 30
model = get_model()
val_predict = train_bagging(train, train.deal_probability.values, nfolds)
# print(f"model list length: {len(model_list)}")
# fname = 'des_word_svd_200_char_svd_1000_title_200_resnet50_500_lgb_1fold'
print('storing test prediction', flush=True)
for index in tqdm(range(nfold)):
model_path = f'../weights/{fname}_fold{index}.hdf5'
model.load_weights(model_path)
if index == 0:
y_pred = model.predict(x_test)
else:
y_pred *= model.predict(x_test)
# y_pred += model.predict(x_test)
y_pred = np.clip(y_pred, 0, 1)
y_pred = y_pred **( 1.0/ (nfold))
print('storing test prediction', flush=True)
sub = pd.read_csv('../input/sample_submission.csv')
sub['deal_probability'] = y_pred
sub['deal_probability'].clip(0.0, 1.0, inplace=True)
sub.to_csv(f'../output/{fname}_test.csv', index=False)
print('storing oof prediction', flush=True)
train_data = pd.read_csv('../input/train.csv.zip')
label = ['deal_probability']
train_user_ids = train_data.user_id.values
train_item_ids = train_data.item_id.values
train_item_ids = train_item_ids.reshape(len(train_item_ids), 1)
train_user_ids = train_user_ids.reshape(len(train_user_ids), 1)
val_predicts = pd.DataFrame(data=val_predict, columns= label)
val_predicts['user_id'] = train_user_ids
val_predicts['item_id'] = train_item_ids
val_predicts.to_csv(f'../output/{fname}_train.csv', index=False)
| 33.97861 | 128 | 0.690746 | import pickle
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from ImageGenerator import *
from sklearn.model_selection import KFold
from keras.applications import VGG16
from keras.applications.resnet50 import ResNet50
from keras.layers import Input, Dropout, Dense, concatenate, CuDNNGRU, Embedding, Flatten, Activation, BatchNormalization, PReLU
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
import keras.backend as K
from tqdm import tqdm
from nltk import ngrams
from keras.backend.tensorflow_backend import set_session
from sklearn.metrics import mean_squared_error
import os
import tensorflow as tf
from keras import models
from keras import layers
from keras import optimizers
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
train_dir = '../input/train_jpg/data/competition_files/train_jpg_ds/'
test_dir = '../input/test_jpg/data/competition_files/test_jpg_ds/'
# restrict gpu usage
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
set_session(sess)
def get_model():
#Load the VGG model
vgg_conv = VGG16(weights='imagenet', include_top=False, input_shape=(160,160, 3))
# vgg_conv = ResNet50(weights='imagenet', include_top=False, input_shape=(160,160, 3))
# Freeze the layers except the last 4 layers
for layer in vgg_conv.layers:
layer.trainable = False
model = models.Sequential()
# Add the vgg convolutional base model
model.add(vgg_conv)
model.add(BatchNormalization())
# Add new layers
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
optimizer = optimizers.Adam(0.0001, amsgrad=True)
model.compile(loss="mse", optimizer=optimizers.SGD(lr=1e-4, momentum=0.9))
return model
def train_bagging(X, y, fold_count):
kf = KFold(n_splits=fold_count, random_state=42, shuffle=True)
# skf = StratifiedKFold(n_splits=fold_count, random_state=None, shuffle=False)
fold_id = -1
# model_list = []
val_predict= np.zeros(y.shape)
# rmse_list = []
for train_index, test_index in kf.split(y):
fold_id +=1
if fold_id >= 1: exit()
print(f'fold number: {fold_id}', flush=True)
# x_train, x_val = X[train_index], X[test_index]
# print(X.head())
# print(X.index)
x_train, x_val = X.iloc[train_index], X.iloc[test_index]
y_train, y_val = y[train_index], y[test_index]
x_train.set_index('item_id', inplace=True)
x_val.set_index('item_id', inplace=True)
train_item_ids = x_train.index
val_item_ids = x_val.index
train_image_ids = x_train.image
val_image_ids = x_val.image
train_labels = x_train.deal_probability
val_labels = x_val.deal_probability
# print(val_labels)
train_gen = ImageGenerator(train_dir, train_item_ids, train_image_ids, train_labels)
val_gen = ImageGenerator(train_dir, val_item_ids, val_image_ids, val_labels)
model_path = f'../weights/{fname}_fold{fold_id}.hdf5'
model = get_model()
early= EarlyStopping(monitor='val_loss', patience=3, verbose=0, mode='auto')
checkpoint = ModelCheckpoint(model_path, monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
# rlrop = ReduceLROnPlateau(monitor='val_loss',mode='auto',patience=2,verbose=1,factor=0.1,cooldown=0,min_lr=1e-6)
callbacks = [early, checkpoint]
model.fit_generator(train_gen, validation_data=val_gen, callbacks=callbacks, epochs=epochs, verbose=1)
model.load_weights(model_path)
y_pred = model.predict(x_val)
val_predict[test_index] = y_pred[:,0]
rmse = mean_squared_error(y_val, y_pred) ** 0.5
train_rmse = mean_squared_error(model.predict(x_train), y_train) ** 0.5
print(f'train_rmse {train_rmse}')
print(f'rmse: {rmse}')
y_pred = model.predict(x_test)
sub = pd.read_csv('../input/sample_submission.csv')
sub['deal_probability'] = y_pred
sub['deal_probability'].clip(0.0, 1.0, inplace=True)
sub.to_csv(f'../output/{fname}_test_fold{fold_id}.csv', index=False)
del model
gc.collect()
rmse_list.append(rmse)
# model_list.append(model)
print(f'rmse score avg: {np.mean(rmse_list)}', flush=True)
return val_predict
import pickle
with open('../input/train_ridge.p', 'rb') as f:
train = pickle.load(f)
with open('../input/test_ridge.p', 'rb') as f:
test = pickle.load(f)
# train = train.iloc[:10000]
nfolds=10
fname='vgg_base'
epochs= 30
model = get_model()
val_predict = train_bagging(train, train.deal_probability.values, nfolds)
# print(f"model list length: {len(model_list)}")
# fname = 'des_word_svd_200_char_svd_1000_title_200_resnet50_500_lgb_1fold'
print('storing test prediction', flush=True)
for index in tqdm(range(nfold)):
model_path = f'../weights/{fname}_fold{index}.hdf5'
model.load_weights(model_path)
if index == 0:
y_pred = model.predict(x_test)
else:
y_pred *= model.predict(x_test)
# y_pred += model.predict(x_test)
y_pred = np.clip(y_pred, 0, 1)
y_pred = y_pred **( 1.0/ (nfold))
print('storing test prediction', flush=True)
sub = pd.read_csv('../input/sample_submission.csv')
sub['deal_probability'] = y_pred
sub['deal_probability'].clip(0.0, 1.0, inplace=True)
sub.to_csv(f'../output/{fname}_test.csv', index=False)
print('storing oof prediction', flush=True)
train_data = pd.read_csv('../input/train.csv.zip')
label = ['deal_probability']
train_user_ids = train_data.user_id.values
train_item_ids = train_data.item_id.values
train_item_ids = train_item_ids.reshape(len(train_item_ids), 1)
train_user_ids = train_user_ids.reshape(len(train_user_ids), 1)
val_predicts = pd.DataFrame(data=val_predict, columns= label)
val_predicts['user_id'] = train_user_ids
val_predicts['item_id'] = train_item_ids
val_predicts.to_csv(f'../output/{fname}_train.csv', index=False)
| 3,470 | 0 | 46 |
d130ceb73855155fd1cebc90aa55172fad5a0ce7 | 850 | py | Python | esphome/components/mcp4728/__init__.py | OttoWinter/esphomeyaml | 6a85259e4d6d1b0a0f819688b8e555efcb99ecb0 | [
"MIT"
] | 249 | 2018-04-07T12:04:11.000Z | 2019-01-25T01:11:34.000Z | esphome/components/mcp4728/__init__.py | OttoWinter/esphomeyaml | 6a85259e4d6d1b0a0f819688b8e555efcb99ecb0 | [
"MIT"
] | 243 | 2018-04-11T16:37:11.000Z | 2019-01-25T16:50:37.000Z | esphome/components/mcp4728/__init__.py | OttoWinter/esphomeyaml | 6a85259e4d6d1b0a0f819688b8e555efcb99ecb0 | [
"MIT"
] | 40 | 2018-04-10T05:50:14.000Z | 2019-01-25T15:20:36.000Z | import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import i2c
from esphome.const import CONF_ID
CODEOWNERS = ["@berfenger"]
DEPENDENCIES = ["i2c"]
MULTI_CONF = True
CONF_STORE_IN_EEPROM = "store_in_eeprom"
mcp4728_ns = cg.esphome_ns.namespace("mcp4728")
MCP4728Component = mcp4728_ns.class_("MCP4728Component", cg.Component, i2c.I2CDevice)
CONFIG_SCHEMA = (
cv.Schema(
{
cv.GenerateID(): cv.declare_id(MCP4728Component),
cv.Optional(CONF_STORE_IN_EEPROM, default=False): cv.boolean,
}
)
.extend(cv.COMPONENT_SCHEMA)
.extend(i2c.i2c_device_schema(0x60))
)
| 28.333333 | 85 | 0.732941 | import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import i2c
from esphome.const import CONF_ID
CODEOWNERS = ["@berfenger"]
DEPENDENCIES = ["i2c"]
MULTI_CONF = True
CONF_STORE_IN_EEPROM = "store_in_eeprom"
mcp4728_ns = cg.esphome_ns.namespace("mcp4728")
MCP4728Component = mcp4728_ns.class_("MCP4728Component", cg.Component, i2c.I2CDevice)
CONFIG_SCHEMA = (
cv.Schema(
{
cv.GenerateID(): cv.declare_id(MCP4728Component),
cv.Optional(CONF_STORE_IN_EEPROM, default=False): cv.boolean,
}
)
.extend(cv.COMPONENT_SCHEMA)
.extend(i2c.i2c_device_schema(0x60))
)
async def to_code(config):
var = cg.new_Pvariable(config[CONF_ID], config[CONF_STORE_IN_EEPROM])
await cg.register_component(var, config)
await i2c.register_i2c_device(var, config)
| 171 | 0 | 23 |
41263a9f7b9abe8325cf669faf4ca22f8cd8fa9a | 1,004 | py | Python | pyp/src/main.py | sebwink/learn-rabbitmq | 66081cb9f7bf0adf6d9d9dfd60a497ac80dd2941 | [
"MIT"
] | null | null | null | pyp/src/main.py | sebwink/learn-rabbitmq | 66081cb9f7bf0adf6d9d9dfd60a497ac80dd2941 | [
"MIT"
] | null | null | null | pyp/src/main.py | sebwink/learn-rabbitmq | 66081cb9f7bf0adf6d9d9dfd60a497ac80dd2941 | [
"MIT"
] | null | null | null | import os
import time
import signal
import pika
INTERVAL = int(os.getenv('PYP_INTERVAL', 5))
RABBITMQ_HOST = os.getenv('PYP_RABBITMQ_HOST', 'rabbitmq')
RABBITMQ_VHOST = os.getenv('PYP_RABBITMQ_VHOST')
RABBITMQ_USER = os.getenv('PYP_RABBITMQ_USER')
RABBITMQ_PASS = os.getenv('PYP_RABBITMQ_PASS')
if __name__ == '__main__':
credentials = pika.PlainCredentials(
RABBITMQ_USER,
RABBITMQ_PASS,
)
connection = pika.BlockingConnection(
pika.ConnectionParameters(
host=RABBITMQ_HOST,
credentials=credentials,
virtual_host=RABBITMQ_VHOST,
)
)
signal.signal(
signal.SIGTERM,
lambda s, f: connection.close(),
)
channel = connection.channel()
channel.queue_declare(queue='hello')
while True:
time.sleep(INTERVAL)
print(' [x] Sending message.')
channel.basic_publish(
exchange='',
routing_key='hello',
body='Hello World!',
)
| 25.1 | 58 | 0.633466 | import os
import time
import signal
import pika
INTERVAL = int(os.getenv('PYP_INTERVAL', 5))
RABBITMQ_HOST = os.getenv('PYP_RABBITMQ_HOST', 'rabbitmq')
RABBITMQ_VHOST = os.getenv('PYP_RABBITMQ_VHOST')
RABBITMQ_USER = os.getenv('PYP_RABBITMQ_USER')
RABBITMQ_PASS = os.getenv('PYP_RABBITMQ_PASS')
if __name__ == '__main__':
credentials = pika.PlainCredentials(
RABBITMQ_USER,
RABBITMQ_PASS,
)
connection = pika.BlockingConnection(
pika.ConnectionParameters(
host=RABBITMQ_HOST,
credentials=credentials,
virtual_host=RABBITMQ_VHOST,
)
)
signal.signal(
signal.SIGTERM,
lambda s, f: connection.close(),
)
channel = connection.channel()
channel.queue_declare(queue='hello')
while True:
time.sleep(INTERVAL)
print(' [x] Sending message.')
channel.basic_publish(
exchange='',
routing_key='hello',
body='Hello World!',
)
| 0 | 0 | 0 |
425af5ebcd09541ac6a3c4123c4422a749d94979 | 1,975 | py | Python | Movement-Transfer/3.4_Pipe_Determine_Diameter.py | Daz-Riza-Seriog/Transport_Phenomena | 822b89556fa56ef57494a318cbb03524e3a4d237 | [
"MIT"
] | 4 | 2021-03-19T00:15:20.000Z | 2021-11-17T11:32:28.000Z | Movement-Transfer/3.4_Pipe_Determine_Diameter.py | Daz-Riza-Seriog/Transport_Phenomena | 822b89556fa56ef57494a318cbb03524e3a4d237 | [
"MIT"
] | null | null | null | Movement-Transfer/3.4_Pipe_Determine_Diameter.py | Daz-Riza-Seriog/Transport_Phenomena | 822b89556fa56ef57494a318cbb03524e3a4d237 | [
"MIT"
] | 1 | 2021-03-22T23:26:50.000Z | 2021-03-22T23:26:50.000Z | # Code made for Sergio Andrés Díaz Ariza
# 29 July 2021
# License MIT
# Transport Phenomena: Pipe find Diameter
from scipy.optimize import minimize
import seaborn as sns
import numpy as np
import time
start_time = time.time()
sns.set()
# Optimice the function for T, and assign constraints to resolve for Rmin,E_cons,C1,C2
Opt = Optimice()
constraint_equal = {'type': 'eq', 'fun': Opt.objective_Colebrook}
constraint_equal1 = {'type': 'eq', 'fun': Opt.constraint_D_eq_f}
constraint_equal2 = {'type': 'eq', 'fun': Opt.constraint_Vavg_eq_D}
constraint = [constraint_equal, constraint_equal1, constraint_equal2]
x0 = [0.5, 1, 1.5]
sol = minimize(Opt.objective_Colebrook, x0, method='SLSQP', constraints=constraint, options={'maxiter': 1000})
print(sol)
print("\nDarcy factor :\t", sol.x[0])
print("\nDiameter:\t", sol.x[1], "[m]")
print("\nVelocity Average:\t", sol.x[2], "[m/s]")
print("\n--- %s seconds ---" % (time.time() - start_time))
| 29.477612 | 110 | 0.575696 | # Code made for Sergio Andrés Díaz Ariza
# 29 July 2021
# License MIT
# Transport Phenomena: Pipe find Diameter
from scipy.optimize import minimize
import seaborn as sns
import numpy as np
import time
start_time = time.time()
sns.set()
# Optimice the function for T, and assign constraints to resolve for Rmin,E_cons,C1,C2
class Optimice:
def objective_Colebrook(self, x):
# Parameters
eps = 2.6e-4 # Roughness [m]
L = 1200 # Length of pipe [m]
niu = 1.3e-7 # Cinematic Viscosity [m^2/s]
DP = 2 # Head Drop [m]
V = 0.55 # Caudal [m^3/s]
x1 = x[0] # Darcy factor
x2 = x[1] # Diameter
x3 = x[2] # Velocity Average
return (1 / np.sqrt(x1)) + (2.0 * np.log10(
((eps / (x1 * L * (x3 ** 2) / DP * 2)) / 3.7) + (2.51 / ((V * x2 / niu) * np.sqrt(x1)))))
def constraint_D_eq_f(self, x):
# Parameters
L = 1200 # Length of pipe [m]
DP = 2 # Head Drop [m]
x1 = x[0] # Darcy factor
x2 = x[1] # Diameter
x3 = x[2] # Velocity Average
return x2 - (x1 * (L * (x3 ** 2) / DP * 2))
def constraint_Vavg_eq_D(self, x):
# Parameters
V = 0.55 # Caudal [m^3/s]
x2 = x[1] # Diameter
x3 = x[2] # Velocity Average
return x3 - (4 * V / (np.pi * (x2 ** 2)))
Opt = Optimice()
constraint_equal = {'type': 'eq', 'fun': Opt.objective_Colebrook}
constraint_equal1 = {'type': 'eq', 'fun': Opt.constraint_D_eq_f}
constraint_equal2 = {'type': 'eq', 'fun': Opt.constraint_Vavg_eq_D}
constraint = [constraint_equal, constraint_equal1, constraint_equal2]
x0 = [0.5, 1, 1.5]
sol = minimize(Opt.objective_Colebrook, x0, method='SLSQP', constraints=constraint, options={'maxiter': 1000})
print(sol)
print("\nDarcy factor :\t", sol.x[0])
print("\nDiameter:\t", sol.x[1], "[m]")
print("\nVelocity Average:\t", sol.x[2], "[m/s]")
print("\n--- %s seconds ---" % (time.time() - start_time))
| 932 | -6 | 102 |
e8d15d0e8366eac5cc666ee0aff35e1329cd94dc | 2,722 | py | Python | noisemaker/scripts/mood.py | aayars/py-noisemaker | 4e27f536632ade583eb0110aaaa9e19c59355ba6 | [
"Apache-2.0"
] | 106 | 2017-03-25T23:14:55.000Z | 2022-01-11T04:18:14.000Z | noisemaker/scripts/mood.py | aayars/py-noisemaker | 4e27f536632ade583eb0110aaaa9e19c59355ba6 | [
"Apache-2.0"
] | 32 | 2020-06-03T05:40:06.000Z | 2022-03-31T13:00:56.000Z | noisemaker/scripts/mood.py | aayars/py-noisemaker | 4e27f536632ade583eb0110aaaa9e19c59355ba6 | [
"Apache-2.0"
] | 10 | 2018-12-03T19:23:56.000Z | 2021-01-13T17:55:04.000Z | import os
import random
from PIL import Image, ImageDraw, ImageFont
import click
import textwrap
@click.command()
@click.option('--filename', type=click.Path(dir_okay=False), required=True)
@click.option('--text', type=str, required=True)
@click.option('--font', type=str, default='LiberationSans-Bold')
@click.option('--font-size', type=int, default=42)
@click.option('--color', is_flag=True)
@click.option('--no-rect', is_flag=True)
@click.option('--wrap-width', type=int, default=42)
@click.option('--bottom', is_flag=True)
@click.option('--right', is_flag=True)
@click.option('--invert', is_flag=True)
| 29.912088 | 156 | 0.628949 | import os
import random
from PIL import Image, ImageDraw, ImageFont
import click
import textwrap
def mood_text(input_filename, text, font='LiberationSans-Bold', font_size=42, fill=None, rect=True, wrap_width=42, bottom=False, right=False, invert=False):
if fill is None:
if invert:
fill = (0, 0, 0, 0)
else:
fill = (255, 255, 255, 255)
image = Image.open(input_filename).convert('RGB')
input_width, input_height = image.size
font_path = os.path.join(os.path.expanduser('~'), '.noisemaker', 'fonts', '{}.ttf'.format(font))
font = ImageFont.truetype(font_path, font_size)
draw = ImageDraw.Draw(image, 'RGBA')
padding = 6
lines = textwrap.wrap(text, width=wrap_width)
text_height = sum(draw.textsize(line, font=font)[1] + padding for line in lines)
text_y = input_height - text_height
if bottom:
text_y -= padding
else:
text_y /= 2
if invert:
shadow_color = (255, 255, 255, 128)
else:
shadow_color = (0, 0, 0, 128)
if rect:
draw.rectangle(((0, text_y - padding), (input_width, text_y + text_height + padding)), fill=shadow_color)
for i, line in enumerate(textwrap.wrap(text, width=wrap_width)):
line_w, line_h = draw.textsize(line, font=font)
text_x = input_width - line_w
if right:
text_x -= padding + 4
else:
text_x /= 2
draw.text((text_x + 1, text_y + 1), line, font=font, fill=shadow_color)
draw.text((text_x, text_y), line, font=font, fill=fill)
text_y += line_h + padding
image.save(input_filename)
@click.command()
@click.option('--filename', type=click.Path(dir_okay=False), required=True)
@click.option('--text', type=str, required=True)
@click.option('--font', type=str, default='LiberationSans-Bold')
@click.option('--font-size', type=int, default=42)
@click.option('--color', is_flag=True)
@click.option('--no-rect', is_flag=True)
@click.option('--wrap-width', type=int, default=42)
@click.option('--bottom', is_flag=True)
@click.option('--right', is_flag=True)
@click.option('--invert', is_flag=True)
def main(filename, text, font, font_size, color, no_rect, wrap_width, bottom, right, invert):
if color:
if invert:
fill = (random.randint(0, 128), random.randint(0, 128), random.randint(0, 128), 255)
else:
fill = (random.randint(128, 255), random.randint(128, 255), random.randint(128, 255), 255)
else:
if invert:
fill = (0, 0, 0, 0)
else:
fill = (255, 255, 255, 255)
mood_text(filename, text, font, font_size, fill, not no_rect, wrap_width, bottom, right, invert)
| 2,065 | 0 | 45 |
455a0e23caf1744de30265eeaa683fe70252a833 | 4,302 | py | Python | ckanext-hdx_users/ckanext/hdx_users/tests/test_notifications/test_quarantine_notifications.py | OCHA-DAP/hdx-ckan | 202e0c44adc4ea8d0b90141e69365b65cce68672 | [
"Apache-2.0"
] | 58 | 2015-01-11T09:05:15.000Z | 2022-03-17T23:44:07.000Z | ckanext-hdx_users/ckanext/hdx_users/tests/test_notifications/test_quarantine_notifications.py | OCHA-DAP/hdx-ckan | 202e0c44adc4ea8d0b90141e69365b65cce68672 | [
"Apache-2.0"
] | 1,467 | 2015-01-01T16:47:44.000Z | 2022-02-28T16:51:20.000Z | ckanext-hdx_users/ckanext/hdx_users/tests/test_notifications/test_quarantine_notifications.py | OCHA-DAP/hdx-ckan | 202e0c44adc4ea8d0b90141e69365b65cce68672 | [
"Apache-2.0"
] | 17 | 2015-05-06T14:04:21.000Z | 2021-11-11T19:58:16.000Z | import pytest
import ckan.tests.factories as factories
import ckan.plugins.toolkit as tk
import ckan.authz as authz
import ckan.model as model
import ckanext.hdx_theme.tests.hdx_test_base as hdx_test_base
from ckanext.hdx_org_group.helpers.static_lists import ORGANIZATION_TYPE_LIST
from ckanext.hdx_users.helpers.notifications_dao import QuarantinedDatasetsDao
from ckanext.hdx_users.helpers.notification_service import QuarantinedDatasetsService, \
SysadminQuarantinedDatasetsService
config = tk.config
NotAuthorized = tk.NotAuthorized
_get_action = tk.get_action
| 40.205607 | 117 | 0.643887 | import pytest
import ckan.tests.factories as factories
import ckan.plugins.toolkit as tk
import ckan.authz as authz
import ckan.model as model
import ckanext.hdx_theme.tests.hdx_test_base as hdx_test_base
from ckanext.hdx_org_group.helpers.static_lists import ORGANIZATION_TYPE_LIST
from ckanext.hdx_users.helpers.notifications_dao import QuarantinedDatasetsDao
from ckanext.hdx_users.helpers.notification_service import QuarantinedDatasetsService, \
SysadminQuarantinedDatasetsService
config = tk.config
NotAuthorized = tk.NotAuthorized
_get_action = tk.get_action
class TestQuarantineNotifications(hdx_test_base.HdxBaseTest):
EDITOR_USER = 'editor_user'
SYSADMIN_USER = 'testsysadmin'
PACKAGE_ID = 'test_dataset_4_quarantine'
RESOURCE_ID = None
@classmethod
def setup_class(cls):
super(TestQuarantineNotifications, cls).setup_class()
factories.User(name=cls.EDITOR_USER, email='quarantine_user@hdx.hdxtest.org')
factories.Organization(
name='org_name_4_quarantine',
title='ORG NAME FOR QUARANTINE',
users=[
{'name': cls.EDITOR_USER, 'capacity': 'editor'},
],
hdx_org_type=ORGANIZATION_TYPE_LIST[0][1],
org_url='https://hdx.hdxtest.org/'
)
package = {
"package_creator": "test function",
"private": False,
"dataset_date": "[1960-01-01 TO 2012-12-31]",
"caveats": "These are the caveats",
"license_other": "TEST OTHER LICENSE",
"methodology": "This is a test methodology",
"dataset_source": "Test data",
"license_id": "hdx-other",
"name": cls.PACKAGE_ID,
"notes": "This is a test dataset",
"title": "Test Dataset for Quarantine",
"owner_org": "org_name_4_quarantine",
"groups": [{"name": "roger"}],
"resources": [
{
'package_id': 'test_private_dataset_1',
'url': config.get('ckan.site_url', '') + '/storage/f/test_folder/hdx_test.csv',
'resource_type': 'file.upload',
'format': 'CSV',
'name': 'hdx_test.csv'
}
]
}
context = {'model': model, 'session': model.Session, 'user': cls.EDITOR_USER}
dataset_dict = _get_action('package_create')(context, package)
cls.RESOURCE_ID = dataset_dict['resources'][0]['id']
@staticmethod
def __get_quarantine_service(username):
userobj = model.User.get(username)
is_sysadmin = authz.is_sysadmin(username)
quarantined_datasets_dao = QuarantinedDatasetsDao(model, userobj, is_sysadmin)
quarantine_service = SysadminQuarantinedDatasetsService (quarantined_datasets_dao, username) if is_sysadmin \
else QuarantinedDatasetsService(quarantined_datasets_dao, username)
return quarantine_service
@staticmethod
def __hdx_qa_resource_patch(package_id, resource_id, key, new_value, username):
try:
_get_action('hdx_qa_resource_patch')(
{
'model': model, 'session': model.Session,
'user': username,
},
{'id': resource_id, key: new_value}
)
except NotAuthorized as e:
pass
return _get_action('package_show')({}, {'id': package_id})
def test_quarantine(self):
self.__hdx_qa_resource_patch(self.PACKAGE_ID, self.RESOURCE_ID, 'in_quarantine', True, self.SYSADMIN_USER)
quarantine_service = self.__get_quarantine_service(self.EDITOR_USER)
notifications_list = quarantine_service.get_quarantined_datasets_info()
assert len(notifications_list) == 1
assert notifications_list[0]['dataset'].get('name') == self.PACKAGE_ID
assert not notifications_list[0]['for_sysadmin']
quarantine_service = self.__get_quarantine_service(self.SYSADMIN_USER)
notifications_list = quarantine_service.get_quarantined_datasets_info()
assert len(notifications_list) == 1
assert notifications_list[0]['dataset'].get('name') == self.PACKAGE_ID
assert notifications_list[0]['for_sysadmin']
| 3,368 | 337 | 23 |
f10ce62c92d2f1ce391099973a1871a6f92c0754 | 250 | py | Python | tests/conftest.py | npc-engine/npc-engine | 0047794e96369c23515f794a1e77009c516a382c | [
"MIT"
] | 12 | 2021-11-10T21:03:19.000Z | 2022-03-21T21:55:34.000Z | tests/conftest.py | npc-engine/npc-engine | 0047794e96369c23515f794a1e77009c516a382c | [
"MIT"
] | 1 | 2021-12-05T14:51:44.000Z | 2021-12-05T14:51:44.000Z | tests/conftest.py | npc-engine/npc-engine | 0047794e96369c23515f794a1e77009c516a382c | [
"MIT"
] | null | null | null | import pytest
import os
@pytest.fixture(scope="session", autouse=True)
| 25 | 74 | 0.688 | import pytest
import os
@pytest.fixture(scope="session", autouse=True)
def execute_before_any_test():
os.environ["NPC_ENGINE_MODELS_PATH"] = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "resources", "models"
)
| 149 | 0 | 23 |
3145d8af6223c9b649df7ba108395627938871ea | 1,021 | py | Python | examples/udp_client/udp_server.py | alanbarr/ChibiOS_CC3000_SPI | 3f970a678a2524b8f427510b878e49b6f9965ccb | [
"BSD-3-Clause"
] | null | null | null | examples/udp_client/udp_server.py | alanbarr/ChibiOS_CC3000_SPI | 3f970a678a2524b8f427510b878e49b6f9965ccb | [
"BSD-3-Clause"
] | null | null | null | examples/udp_client/udp_server.py | alanbarr/ChibiOS_CC3000_SPI | 3f970a678a2524b8f427510b878e49b6f9965ccb | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python3
#
# Simple UDP server companion for udp_client.c
# This expects to receive a particular message from the CC3000. Upon each
# receipt it will respond with its own message.
import socket
UDP_IP = "10.0.0.1"
UDP_PORT = 44444
MSG_EXP = "Hello World from CC3000"
MSG_EXP_BYTES = MSG_EXP.encode()
MSG_TX = "Hello CC3000"
MSG_TX_BYTES = MSG_TX.encode()
print("Creating socket...")
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print("Created!")
print("Binding to:", UDP_IP, ":", UDP_PORT)
sock.bind((UDP_IP, UDP_PORT))
print("Bound!")
while True:
data_bytes, (src_ip, src_port) = sock.recvfrom(256)
data = data_bytes.decode()
print("Message Received:")
print("data is: ", data)
print("src_ip is: ", src_ip)
print("src_port is: ", src_port)
if data != MSG_EXP:
print("Message text was not as expected.")
continue
else:
print("Sending Reply...")
sock.sendto(MSG_TX_BYTES, (src_ip, src_port))
print("Sent!")
| 22.688889 | 74 | 0.666014 | #! /usr/bin/env python3
#
# Simple UDP server companion for udp_client.c
# This expects to receive a particular message from the CC3000. Upon each
# receipt it will respond with its own message.
import socket
UDP_IP = "10.0.0.1"
UDP_PORT = 44444
MSG_EXP = "Hello World from CC3000"
MSG_EXP_BYTES = MSG_EXP.encode()
MSG_TX = "Hello CC3000"
MSG_TX_BYTES = MSG_TX.encode()
print("Creating socket...")
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print("Created!")
print("Binding to:", UDP_IP, ":", UDP_PORT)
sock.bind((UDP_IP, UDP_PORT))
print("Bound!")
while True:
data_bytes, (src_ip, src_port) = sock.recvfrom(256)
data = data_bytes.decode()
print("Message Received:")
print("data is: ", data)
print("src_ip is: ", src_ip)
print("src_port is: ", src_port)
if data != MSG_EXP:
print("Message text was not as expected.")
continue
else:
print("Sending Reply...")
sock.sendto(MSG_TX_BYTES, (src_ip, src_port))
print("Sent!")
| 0 | 0 | 0 |
63fd18ac0d007edf8ab4f465e7b5114985675ab5 | 1,100 | py | Python | constructure/tests/utilities/test_utilities.py | lilyminium/constructure | db3a9e0afb2e98e451959fb62009a733a2cac546 | [
"MIT"
] | 5 | 2021-01-25T17:51:44.000Z | 2021-05-08T00:08:21.000Z | constructure/tests/utilities/test_utilities.py | lilyminium/constructure | db3a9e0afb2e98e451959fb62009a733a2cac546 | [
"MIT"
] | 12 | 2021-01-28T17:38:47.000Z | 2021-04-29T22:18:17.000Z | constructure/tests/utilities/test_utilities.py | lilyminium/constructure | db3a9e0afb2e98e451959fb62009a733a2cac546 | [
"MIT"
] | 1 | 2021-04-14T13:50:50.000Z | 2021-04-14T13:50:50.000Z | import pytest
from constructure.utilities import MissingOptionalDependency, requires_package
from constructure.utilities.utilities import _CONDA_INSTALLATION_COMMANDS
| 25 | 79 | 0.722727 | import pytest
from constructure.utilities import MissingOptionalDependency, requires_package
from constructure.utilities.utilities import _CONDA_INSTALLATION_COMMANDS
def test_requires_package_found():
@requires_package("constructure")
def dummy_function():
return 42
assert dummy_function() == 42
def test_requires_package_unknown_missing():
@requires_package("fake-package-42")
def dummy_function():
pass
with pytest.raises(MissingOptionalDependency) as error_info:
dummy_function()
assert "The required fake-package-42 module could not be imported." in str(
error_info.value
)
def test_requires_package_known_missing(monkeypatch):
monkeypatch.setitem(
_CONDA_INSTALLATION_COMMANDS, "fake-package-42", "conda install ..."
)
@requires_package("fake-package-42")
def dummy_function():
pass
with pytest.raises(MissingOptionalDependency) as error_info:
dummy_function()
assert "Try installing the package by running `conda install ...`" in str(
error_info.value
)
| 860 | 0 | 69 |
950a2b77dba242372c468aa4bb240ce4dbc548dd | 501 | py | Python | setup.py | developmentseed/cogeo-watchbot-light | c82d55a61a2d8ebfb87aceae1847c0af822ebabe | [
"MIT"
] | 9 | 2019-10-09T11:28:38.000Z | 2020-12-04T16:05:21.000Z | setup.py | developmentseed/cogeo-watchbot-light | c82d55a61a2d8ebfb87aceae1847c0af822ebabe | [
"MIT"
] | 5 | 2019-12-13T19:27:02.000Z | 2020-06-22T19:53:17.000Z | setup.py | developmentseed/cogeo-watchbot-light | c82d55a61a2d8ebfb87aceae1847c0af822ebabe | [
"MIT"
] | null | null | null | """Setup."""
from setuptools import setup, find_packages
inst_reqs = ["rio-cogeo~=2.0a4", "rasterio[s3]~=1.1", "requests"]
extra_reqs = {"test": ["pytest", "pytest-cov"]}
setup(
name="app",
version="0.0.2",
description=u"cogeo watchbot",
python_requires=">=3",
keywords="AWS-Lambda Python",
packages=find_packages(exclude=["ez_setup", "examples", "tests"]),
include_package_data=True,
zip_safe=False,
install_requires=inst_reqs,
extras_require=extra_reqs,
)
| 25.05 | 70 | 0.666667 | """Setup."""
from setuptools import setup, find_packages
inst_reqs = ["rio-cogeo~=2.0a4", "rasterio[s3]~=1.1", "requests"]
extra_reqs = {"test": ["pytest", "pytest-cov"]}
setup(
name="app",
version="0.0.2",
description=u"cogeo watchbot",
python_requires=">=3",
keywords="AWS-Lambda Python",
packages=find_packages(exclude=["ez_setup", "examples", "tests"]),
include_package_data=True,
zip_safe=False,
install_requires=inst_reqs,
extras_require=extra_reqs,
)
| 0 | 0 | 0 |
1775adced6d88e1e0e716c60de2336221c7c37fe | 516 | py | Python | app/main.py | michaldev/fastapi-async-mongodb | f8f42c73b5c3cfff6de0258618aa28189d2e0afe | [
"MIT"
] | 38 | 2020-10-05T05:32:03.000Z | 2022-03-22T00:02:53.000Z | app/main.py | michaldev/fastapi-async-mongodb | f8f42c73b5c3cfff6de0258618aa28189d2e0afe | [
"MIT"
] | null | null | null | app/main.py | michaldev/fastapi-async-mongodb | f8f42c73b5c3cfff6de0258618aa28189d2e0afe | [
"MIT"
] | 10 | 2021-01-07T14:42:59.000Z | 2022-03-27T09:59:35.000Z | import uvicorn
from fastapi import FastAPI
from app.config import get_config
from app.db import db
from app.rest import posts
app = FastAPI(title="Async FastAPI")
app.include_router(posts.router, prefix='/api/posts')
@app.on_event("startup")
@app.on_event("shutdown")
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
| 19.846154 | 53 | 0.736434 | import uvicorn
from fastapi import FastAPI
from app.config import get_config
from app.db import db
from app.rest import posts
app = FastAPI(title="Async FastAPI")
app.include_router(posts.router, prefix='/api/posts')
@app.on_event("startup")
async def startup():
config = get_config()
await db.connect_to_database(path=config.db_path)
@app.on_event("shutdown")
async def shutdown():
await db.close_database_connection()
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
| 120 | 0 | 44 |
e6b632df80f4da61b64e92cf276e22ea6f9f94a1 | 4,134 | py | Python | zaqar/tests/base.py | mail2nsrajesh/zaqar | a68a03a228732050b33c2a7f35d1caa9f3467718 | [
"Apache-2.0"
] | null | null | null | zaqar/tests/base.py | mail2nsrajesh/zaqar | a68a03a228732050b33c2a7f35d1caa9f3467718 | [
"Apache-2.0"
] | null | null | null | zaqar/tests/base.py | mail2nsrajesh/zaqar | a68a03a228732050b33c2a7f35d1caa9f3467718 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2013 Rackspace Hosting, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import fixtures
from oslo_config import cfg
from oslo_log import log
from osprofiler import opts
import testtools
from zaqar.common import configs
from zaqar.tests import helpers
class TestBase(testtools.TestCase):
"""Child class of testtools.TestCase for testing Zaqar.
Inherit from this and write your test methods. If the child class defines
a prepare(self) method, this method will be called before executing each
test method.
"""
config_file = None
@classmethod
def conf_path(cls, filename):
"""Returns the full path to the specified Zaqar conf file.
:param filename: Name of the conf file to find (e.g.,
'wsgi_memory.conf')
"""
if os.path.exists(filename):
return filename
return os.path.join(os.environ["ZAQAR_TESTS_CONFIGS_DIR"], filename)
@classmethod
def load_conf(cls, filename):
"""Loads `filename` configuration file.
:param filename: Name of the conf file to find (e.g.,
'wsgi_memory.conf')
:returns: Project's config object.
"""
conf = cfg.ConfigOpts()
log.register_options(conf)
conf(args=[], default_config_files=[cls.conf_path(filename)])
return conf
def config(self, group=None, **kw):
"""Override some configuration values.
The keyword arguments are the names of configuration options to
override and their values.
If a group argument is supplied, the overrides are applied to
the specified configuration option group.
All overrides are automatically cleared at the end of the current
test by the tearDown() method.
"""
for k, v in kw.items():
self.conf.set_override(k, v, group)
| 35.333333 | 77 | 0.64925 | # Copyright (c) 2013 Rackspace Hosting, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import fixtures
from oslo_config import cfg
from oslo_log import log
from osprofiler import opts
import testtools
from zaqar.common import configs
from zaqar.tests import helpers
class TestBase(testtools.TestCase):
"""Child class of testtools.TestCase for testing Zaqar.
Inherit from this and write your test methods. If the child class defines
a prepare(self) method, this method will be called before executing each
test method.
"""
config_file = None
def setUp(self):
super(TestBase, self).setUp()
self.useFixture(fixtures.FakeLogger('zaqar'))
if os.environ.get('OS_STDOUT_CAPTURE') is not None:
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if os.environ.get('OS_STDERR_CAPTURE') is not None:
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
if self.config_file:
self.config_file = helpers.override_mongo_conf(
self.config_file, self)
self.conf = self.load_conf(self.config_file)
else:
self.conf = cfg.ConfigOpts()
self.conf.register_opts(configs._GENERAL_OPTIONS)
self.conf.register_opts(configs._DRIVER_OPTIONS,
group=configs._DRIVER_GROUP)
self.conf.register_opts(configs._NOTIFICATION_OPTIONS,
group=configs._NOTIFICATION_GROUP)
self.conf.register_opts(configs._NOTIFICATION_OPTIONS,
group=configs._NOTIFICATION_GROUP)
self.conf.register_opts(configs._SIGNED_URL_OPTIONS,
group=configs._SIGNED_URL_GROUP)
opts.set_defaults(self.conf)
self.conf.register_opts(configs._PROFILER_OPTIONS,
group=configs._PROFILER_GROUP)
self.mongodb_url = os.environ.get('ZAQAR_TEST_MONGODB_URL',
'mongodb://127.0.0.1:27017')
@classmethod
def conf_path(cls, filename):
"""Returns the full path to the specified Zaqar conf file.
:param filename: Name of the conf file to find (e.g.,
'wsgi_memory.conf')
"""
if os.path.exists(filename):
return filename
return os.path.join(os.environ["ZAQAR_TESTS_CONFIGS_DIR"], filename)
@classmethod
def load_conf(cls, filename):
"""Loads `filename` configuration file.
:param filename: Name of the conf file to find (e.g.,
'wsgi_memory.conf')
:returns: Project's config object.
"""
conf = cfg.ConfigOpts()
log.register_options(conf)
conf(args=[], default_config_files=[cls.conf_path(filename)])
return conf
def config(self, group=None, **kw):
"""Override some configuration values.
The keyword arguments are the names of configuration options to
override and their values.
If a group argument is supplied, the overrides are applied to
the specified configuration option group.
All overrides are automatically cleared at the end of the current
test by the tearDown() method.
"""
for k, v in kw.items():
self.conf.set_override(k, v, group)
def _my_dir(self):
return os.path.abspath(os.path.dirname(__file__))
| 1,666 | 0 | 54 |
e7aa246d4bb2851366daaf5f91a5fe555ce9c5c2 | 692 | py | Python | pyalp/gs_interface/generate_certificates.py | Mause/pyalp | fb0f723070e11f8c9ed57e2475eb963599f442a6 | [
"MIT"
] | null | null | null | pyalp/gs_interface/generate_certificates.py | Mause/pyalp | fb0f723070e11f8c9ed57e2475eb963599f442a6 | [
"MIT"
] | 2 | 2021-06-08T19:32:48.000Z | 2022-03-11T23:17:45.000Z | pyalp/gs_interface/generate_certificates.py | Mause/pyalp | fb0f723070e11f8c9ed57e2475eb963599f442a6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Generate client and server CURVE certificate files then move them into the
appropriate store directory, private_keys or public_keys. The certificates
generated by this script are used by the stonehouse and ironhouse examples.
In practice this would be done by hand or some out-of-band process.
Author: Chris Laws
"""
import zmq.auth
from __init__ import KEYS_DIR
def generate_certificates():
''' Generate client and server CURVE certificate files'''
# create new keys in certificates dir
zmq.auth.create_certificates(KEYS_DIR, "server")
zmq.auth.create_certificates(KEYS_DIR, "client")
if __name__ == '__main__':
generate_certificates()
| 25.62963 | 75 | 0.765896 | #!/usr/bin/env python
"""
Generate client and server CURVE certificate files then move them into the
appropriate store directory, private_keys or public_keys. The certificates
generated by this script are used by the stonehouse and ironhouse examples.
In practice this would be done by hand or some out-of-band process.
Author: Chris Laws
"""
import zmq.auth
from __init__ import KEYS_DIR
def generate_certificates():
''' Generate client and server CURVE certificate files'''
# create new keys in certificates dir
zmq.auth.create_certificates(KEYS_DIR, "server")
zmq.auth.create_certificates(KEYS_DIR, "client")
if __name__ == '__main__':
generate_certificates()
| 0 | 0 | 0 |
b441f1b2db0b3be859ab9f14e874f3a4b4bba3d9 | 2,799 | py | Python | auth_client.py | varajala/flask-auth-server | 5159c75a7b2de87b1ae84cd24d5a4d91f924eca7 | [
"BSD-3-Clause"
] | 1 | 2021-12-20T11:37:31.000Z | 2021-12-20T11:37:31.000Z | auth_client.py | varajala/flask-auth-server | 5159c75a7b2de87b1ae84cd24d5a4d91f924eca7 | [
"BSD-3-Clause"
] | null | null | null | auth_client.py | varajala/flask-auth-server | 5159c75a7b2de87b1ae84cd24d5a4d91f924eca7 | [
"BSD-3-Clause"
] | null | null | null | import sys
import zlib
import base64
import requests
import auth_server.jwt as jwt
from json import dumps as json_dumps
from json import loads as json_loads
| 31.1 | 130 | 0.690604 | import sys
import zlib
import base64
import requests
import auth_server.jwt as jwt
from json import dumps as json_dumps
from json import loads as json_loads
def b64url_decode(input_: bytes) -> bytes:
padding = len(input_) % 4
data = input_ if not padding else input_ + b'=' * (4 - padding)
return base64.urlsafe_b64decode(data)
def decode_flask_session_cookie(cookie: str) -> dict:
compressed = False
if cookie.startswith('.'):
compressed = True
cookie = cookie[1:]
data = cookie.split('.')[0]
data = b64url_decode(data.encode())
if compressed:
data = zlib.decompress(data)
return json_loads(data.decode("utf-8"))
def register_user(url: str, email: str, password: str) -> int:
response = requests.post(url, json = dict(email=email, password=password, password_confirm=password), allow_redirects = False)
return response.status_code
def verify_user(url: str, json_data: dict) -> int:
response = requests.post(url, json = json_data, allow_redirects = False)
return response.status_code
def login_user(url: str, json_data: dict) -> dict:
response = requests.post(url, json = json_data, allow_redirects = False)
redirect_location = response.headers.get('Location')
auth_header = response.headers.get('Authorization')
_, access_token_str = auth_header.split(' ')
header, payload, signature = jwt.decode(access_token_str)
access_token = dict(
header = header,
payload = payload,
signature = signature
)
raw_session_cookie = response.cookies['session']
session_cookie = decode_flask_session_cookie(raw_session_cookie)
header, payload, signature = jwt.decode(session_cookie['refresh_token'])
refresh_token = dict(
header = header,
payload = payload,
signature = signature
)
return dict(
statuscode = response.status_code,
redirect_location = redirect_location,
access_token = access_token,
refresh_token = refresh_token,
session_cookie = session_cookie,
raw_session_cookie = raw_session_cookie
)
def refresh_access_token(url: str, raw_session_cookie: object) -> dict:
response = requests.post(url, allow_redirects = False, cookies=dict(session=raw_session_cookie))
redirect_location = response.headers.get('Location')
auth_header = response.headers.get('Authorization')
header, auth_token_str = auth_header.split(' ')
header, payload, signature = jwt.decode(auth_token_str)
access_token = dict(
header = header,
payload = payload,
signature = signature
)
return dict(
statuscode = response.status_code,
redirect_location = redirect_location,
access_token = access_token
)
| 2,498 | 0 | 138 |
726705a7f06e2df8fa684ea8c2a5debc89802e47 | 572 | py | Python | Desafios/desafio-52.py | marielitonmb/Curso-Python3 | 26215c47c4d1eadf940b8024305b7e9ff600883b | [
"MIT"
] | null | null | null | Desafios/desafio-52.py | marielitonmb/Curso-Python3 | 26215c47c4d1eadf940b8024305b7e9ff600883b | [
"MIT"
] | null | null | null | Desafios/desafio-52.py | marielitonmb/Curso-Python3 | 26215c47c4d1eadf940b8024305b7e9ff600883b | [
"MIT"
] | null | null | null | # Aula 13 - Desafio 52: Numeros primos
# Ler um numero inteiro e dizer se ele eh ou nao primo
num = int(input('Digite um numero: '))
primo = 0
for n in range(1, num+1):
if num % n == 0:
primo += 1
print('\033[1;32m', end=' ')
else:
print('\033[m', end=' ')
print(f'{n}\033[m ', end='')
print()
if primo == 2:
print(f'\nLogo \033[1m{num}\033[m \033[4mEH NUMERO PRIMO\033[m pois soh eh divisivel por {primo} numeros')
else:
print(f'Logo \033[1m{num}\033[m \033[4mNAO EH NUMERO PRIMO\033[m pois eh divisiel por {primo} numeros')
| 30.105263 | 110 | 0.603147 | # Aula 13 - Desafio 52: Numeros primos
# Ler um numero inteiro e dizer se ele eh ou nao primo
num = int(input('Digite um numero: '))
primo = 0
for n in range(1, num+1):
if num % n == 0:
primo += 1
print('\033[1;32m', end=' ')
else:
print('\033[m', end=' ')
print(f'{n}\033[m ', end='')
print()
if primo == 2:
print(f'\nLogo \033[1m{num}\033[m \033[4mEH NUMERO PRIMO\033[m pois soh eh divisivel por {primo} numeros')
else:
print(f'Logo \033[1m{num}\033[m \033[4mNAO EH NUMERO PRIMO\033[m pois eh divisiel por {primo} numeros')
| 0 | 0 | 0 |
20c4455caf2671c77f8d0f3f923f72f466e70630 | 27,563 | py | Python | [archived]/mcmt-tracking-python/mcmt-tracking-python/mcmt-tracking-python/multi-cam/utility/object_tracking_util.py | sieniven/spot-it-3d | 7c149c5ede1c72fd0178dd76e1b96bb9d6ecdcf5 | [
"Apache-2.0"
] | 8 | 2021-04-26T15:05:45.000Z | 2021-09-18T17:56:29.000Z | [archived]/mcmt-tracking-python/mcmt-tracking-python/mcmt-tracking-python/multi-cam/utility/object_tracking_util.py | sieniven/spot-it-3d | 7c149c5ede1c72fd0178dd76e1b96bb9d6ecdcf5 | [
"Apache-2.0"
] | 1 | 2021-07-28T06:54:26.000Z | 2021-07-28T06:54:26.000Z | [archived]/mcmt-tracking-python/mcmt-tracking-python/mcmt-tracking-python/multi-cam/utility/object_tracking_util.py | sieniven/spot-it-3d | 7c149c5ede1c72fd0178dd76e1b96bb9d6ecdcf5 | [
"Apache-2.0"
] | 1 | 2021-11-12T14:08:21.000Z | 2021-11-12T14:08:21.000Z | import cv2
import math
import numpy as np
from filterpy.kalman import KalmanFilter
from scipy.spatial import distance
from scipy.optimize import linear_sum_assignment
# local imported codes
from automatic_brightness import average_brightness, average_brightness_hsv
import parameters as parm
# Dilates the image multiple times to get of noise in order to get a single large contour for each background object
# Identify background objects by their shape (non-circular)
# Creates a copy of the input image which has the background contour filled in
# Returns the filled image which has the background elements filled in
# Take in the original frame, and return two masked images: One contains the sky while the other contains non-sky components
# This is for situations where there is bright sunlight reflecting off the drone, causing it to blend into sky
# Increasing contrast of the whole image will detect drone but cause false positives in the background
# Hence the sky must be extracted before a localised contrast increase can be applied to it
# The sky is extracted by converting the image from RGB to HSV and applying thresholding + morphological operations
# Create VideoCapture object to extract frames from,
# background subtractor object and blob detector objects for object detection
# and VideoWriters for output videos
# Apply image masks to prepare frame for blob detection
# Masks: 1) Increased contrast and brightness to fade out the sky and make objects stand out
# 2) Background subtractor to remove the stationary background (Converts frame to a binary image)
# 3) Further background subtraction by means of contouring around non-circular objects
# 4) Dilation to fill holes in detected drones
# 5) Inversion to make the foreground black for the blob detector to identify foreground objects
# Perform the blob detection on the masked image
# Return detected blob centroids as well as size
# Adjust contrast and brightness of image to make foreground stand out more
# alpha used to adjust contrast, where alpha < 1 reduces contrast and alpha > 1 increases it
# beta used to increase brightness, scale of (-255 to 255) ? Needs confirmation
# formula is im_out = alpha * im_in + beta
# Therefore to change brightness before contrast, we need to do alpha = 1 first
# Assigns detections to tracks using Munkre's Algorithm with cost based on euclidean distance,
# with detections being located too far from existing tracks being designated as unassigned detections
# and tracks without any nearby detections being designated as unassigned tracks
# Using the coordinates of valid assignments which correspond to the detection and track indices,
# update the track with the matched detection
# Existing tracks without a matching detection are aged and considered invisible for the frame
# If any track has been invisible for too long, or generated by a flash, it will be removed from the list of tracks
# Detections not assigned an existing track are given their own track, initialized with the location of the detection
# for single camera detection
# for multi camera detection | 45.037582 | 138 | 0.66143 | import cv2
import math
import numpy as np
from filterpy.kalman import KalmanFilter
from scipy.spatial import distance
from scipy.optimize import linear_sum_assignment
# local imported codes
from automatic_brightness import average_brightness, average_brightness_hsv
import parameters as parm
class Camera:
def __init__(self, index, fps):
self.index = index
self.cap = cv2.VideoCapture(self.index)
self.frame_w = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.frame_h = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.fps = fps
self.scale_factor = math.sqrt(self.frame_w ** 2 + self.frame_h ** 2) / math.sqrt(848 ** 2 + 480 ** 2)
self.aspect_ratio = self.frame_w / self.frame_h
downsample = False
if self.frame_w * self.frame_h > 1920 * 1080:
downsample = True
self.frame_w = 1920
self.frame_h = int(1920 / aspect_ratio)
self.scale_factor = math.sqrt(self.frame_w ** 2 + self.frame_h ** 2) / math.sqrt(848 ** 2 + 480 ** 2)
self.fgbg, self.detector = setup_system_objects(self.scale_factor)
self.tracks = []
self.origin = np.array([0, 0])
self.next_id = 1000
self.dead_tracks = []
class Track:
def __init__(self, track_id, size):
self.id = track_id
self.size = size
# Constant Velocity Model
self.kalmanFilter = KalmanFilter(dim_x=4, dim_z=2)
# # Constant Acceleration Model
# self.kalmanFilter = KalmanFilter(dim_x=6, dim_z=2)
self.age = 1
self.totalVisibleCount = 1
self.consecutiveInvisibleCount = 0
self.goodtrack = False
if parm.SECONDARY_FILTER == 1:
self.tracker = cv2.TrackerKCF_create()
elif parm.SECONDARY_FILTER == 2:
self.tracker = cv2.TrackerCSRT_create()
else:
self.tracker = None
self.box = np.zeros(4)
self.outOfSync = False
# Dilates the image multiple times to get of noise in order to get a single large contour for each background object
# Identify background objects by their shape (non-circular)
# Creates a copy of the input image which has the background contour filled in
# Returns the filled image which has the background elements filled in
def imopen(im_in, kernel_size, iterations=1):
# kernel = np.ones((kernel_size, kernel_size), np.uint8)/(kernel_size**2)
kernel = np.ones((kernel_size, kernel_size), np.uint8)
im_out = cv2.morphologyEx(im_in, cv2.MORPH_OPEN, kernel, iterations=iterations)
return im_out
def scalar_to_rgb(scalar_value, max_value):
f = scalar_value / max_value
a = (1 - f) * 5
x = math.floor(a)
y = math.floor(255 * (a - x))
if x == 0:
return 255, y, 0
elif x == 1:
return 255, 255, 0
elif x == 2:
return 0, 255, y
elif x == 3:
return 0, 255, 255
elif x == 4:
return y, 0, 255
else: # x == 5:
return 255, 0, 255
# Take in the original frame, and return two masked images: One contains the sky while the other contains non-sky components
# This is for situations where there is bright sunlight reflecting off the drone, causing it to blend into sky
# Increasing contrast of the whole image will detect drone but cause false positives in the background
# Hence the sky must be extracted before a localised contrast increase can be applied to it
# The sky is extracted by converting the image from RGB to HSV and applying thresholding + morphological operations
def extract_sky(frame):
# Convert image from RGB to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Threshold the HSV image to extract the sky. A clear, sunlit sky has high V value (200 - 255)
lower = np.array([0, 0, parm.SKY_THRES])
upper = np.array([180, 255, 255])
sky = cv2.inRange(hsv, lower, upper)
# Also extract the non-sky component
lower = np.array([0, 0, 0])
upper = np.array([180, 255, parm.SKY_THRES])
non_sky = cv2.inRange(hsv, lower, upper)
# Morphologically open the image (erosion followed by dilation) to remove small patches of sky among the background
# These small patches of sky may be mistaken for drones if not removed
kernel = np.ones((5, 5), np.uint8)
sky = cv2.morphologyEx(sky, cv2.MORPH_OPEN, kernel, iterations=parm.DILATION_ITER)
# Retrieve original RGB images with filtered sky using bitwise and
sky = cv2.bitwise_and(frame, frame, mask=sky)
non_sky = cv2.bitwise_and(frame, frame, mask=non_sky)
return sky, non_sky
def remove_ground(im_in, dilation_iterations, background_contour_circularity, frame, index):
kernel_dilation = np.ones((5, 5), np.uint8)
# Number of iterations determines how close objects need to be to be considered background
dilated = cv2.dilate(im_in, kernel_dilation, iterations=dilation_iterations)
# imshow_resized('dilated_' + str(index), dilated)
contours, hierarchy = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
background_contours = []
for contour in contours:
# Identify background from foreground by the circularity of their dilated contours
circularity = 4 * math.pi * cv2.contourArea(contour) / (cv2.arcLength(contour, True) ** 2)
if circularity <= background_contour_circularity:
background_contours.append(contour)
# This bit is used to find a suitable level of dilation to remove background objects
# while keeping objects to be detected
# im_debug = cv2.cvtColor(im_in.copy(), cv2.COLOR_GRAY2BGR)
im_debug = frame.copy()
cv2.drawContours(im_debug, background_contours, -1, (0, 255, 0), 3)
# imshow_resized('Remove Ground' + str(index), im_debug)
im_out = im_in.copy()
cv2.drawContours(im_out, background_contours, -1, 0, -1)
return im_out
def imshow_resized(window_name, img):
aspect_ratio = img.shape[1] / img.shape[0]
window_size = (int(600), int(600 / aspect_ratio))
img = cv2.resize(img, window_size, interpolation=cv2.INTER_CUBIC)
cv2.imshow(window_name, img)
def downsample_image(img):
aspect_ratio = img.shape[1] / img.shape[0]
img_size = (int(1920), int(1920 / aspect_ratio))
img = cv2.resize(img, img_size, interpolation=cv2.INTER_CUBIC)
return img
# Create VideoCapture object to extract frames from,
# background subtractor object and blob detector objects for object detection
# and VideoWriters for output videos
def setup_system_objects(scale_factor):
# Background subtractor works by subtracting the history from the current frame.
# Further more this model already incldues guassian blur and morphological transformations
# varThreshold affects the spottiness of the image. The lower it is, the more smaller spots.
# The larger it is, these spots will combine into large foreground areas
# fgbg = cv2.createBackgroundSubtractorMOG2(history=int(10*FPS), varThreshold=64*SCALE_FACTOR,
# detectShadows=False)
# A lower varThreshold results in more noise which is beneficial to ground subtraction (but detrimental if you want
# detections closer to the ground as there is more noise
fgbg = cv2.createBackgroundSubtractorMOG2(history=int(parm.FGBG_HISTORY * parm.VIDEO_FPS), varThreshold= 4 / scale_factor,
detectShadows=False)
# Background ratio represents the fraction of the history a frame must be present
# to be considered part of the background
# eg. history is 5s, background ratio is 0.1, frames present for 0.5s will be considered background
fgbg.setBackgroundRatio(parm.BACKGROUND_RATIO)
fgbg.setNMixtures(parm.NMIXTURES)
params = cv2.SimpleBlobDetector_Params()
# params.filterByArea = True
# params.minArea = 1
# params.maxArea = 1000
params.filterByConvexity = False
params.filterByCircularity = False
detector = cv2.SimpleBlobDetector_create(params)
return fgbg, detector
# Apply image masks to prepare frame for blob detection
# Masks: 1) Increased contrast and brightness to fade out the sky and make objects stand out
# 2) Background subtractor to remove the stationary background (Converts frame to a binary image)
# 3) Further background subtraction by means of contouring around non-circular objects
# 4) Dilation to fill holes in detected drones
# 5) Inversion to make the foreground black for the blob detector to identify foreground objects
# Perform the blob detection on the masked image
# Return detected blob centroids as well as size
# Adjust contrast and brightness of image to make foreground stand out more
# alpha used to adjust contrast, where alpha < 1 reduces contrast and alpha > 1 increases it
# beta used to increase brightness, scale of (-255 to 255) ? Needs confirmation
# formula is im_out = alpha * im_in + beta
# Therefore to change brightness before contrast, we need to do alpha = 1 first
def detect_objects(frame, mask, fgbg, detector, origin, index, scale_factor):
if average_brightness_hsv(16, frame, mask) > parm.BRIGHTNESS_THRES:
# If sun compensation is required, extract the sky and apply localised contrast increase to it
# And then restore the non-sky (i.e. treeline) back into the image to avoid losing data
masked, non_sky = extract_sky(frame)
masked = cv2.convertScaleAbs(masked, alpha=2, beta=0)
masked = cv2.add(masked, non_sky)
else:
masked = cv2.convertScaleAbs(frame, alpha=1, beta=0)
imshow_resized("pre-backhground subtraction", masked)
masked = cv2.convertScaleAbs(masked, alpha=1, beta=256 - average_brightness(16, frame, mask) + parm.BRIGHTNESS_GAIN)
# masked = cv2.convertScaleAbs(masked, alpha=2, beta=128)
# masked = cv2.cvtColor(masked, cv2.COLOR_BGR2GRAY)
# masked = threshold_rgb(frame)
# Subtract Background
# Learning rate affects how often the model is updated
# High values > 0.5 tend to lead to patchy output
# Found that 0.1 - 0.3 is a good range
masked = fgbg.apply(masked, learningRate=parm.FGBG_LEARNING_RATE)
masked = remove_ground(masked, int(13 / (2.26 / scale_factor)), 0.5, frame, index)
cv2.imshow("after remove ground", masked)
# Morphological Transforms
# Close to remove black spots
# masked = imclose(masked, 3, 1)
# Open to remove white holes
# masked = imopen(masked, 3, 2)
# masked = imfill(masked)
kernel_dilation = np.ones((5, 5), np.uint8)
masked = cv2.dilate(masked, kernel_dilation, iterations=parm.DILATION_ITER)
# Apply foreground mask (dilated) to the image and perform detection on that
# masked = cv2.bitwise_and(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY), masked)
# Invert frame such that black pixels are foreground
masked = cv2.bitwise_not(masked)
cv2.imshow("after dilation again and inversion", masked)
# Blob detection
keypoints = detector.detect(masked)
n_keypoints = len(keypoints)
centroids = np.zeros((n_keypoints, 2))
sizes = np.zeros((n_keypoints, 2))
for i in range(n_keypoints):
centroids[i] = keypoints[i].pt
centroids[i] += origin
sizes[i] = keypoints[i].size
return centroids, sizes, masked
def detect_objects_large(frame, mask, fgbg, detector, origin, scale_factor):
masked = cv2.convertScaleAbs(frame, alpha=1, beta=0)
gain = 15
masked = cv2.convertScaleAbs(masked, alpha=1, beta=256 - average_brightness(16, frame, mask) + gain)
masked = fgbg.apply(masked, learningRate=-1)
kernel = np.ones((5, 5), np.uint8)
# Remove Noise
masked = cv2.morphologyEx(masked, cv2.MORPH_OPEN, kernel, iterations=int(1))
masked = cv2.dilate(masked, kernel, iterations=int(4 * scale_factor))
contours, hierarchy = cv2.findContours(masked, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
n_keypoints = len(contours)
centroids = np.zeros((n_keypoints, 2))
sizes = np.zeros((n_keypoints, 2))
for i, contour in enumerate(contours):
M = cv2.moments(contour)
centroids[i] = [int(M['m10'] / M['m00']), int(M['m01'] / M['m00'])]
centroids[i] += origin
x, y, w, h = cv2.boundingRect(contour)
sizes[i] = (w, h)
return centroids, sizes, masked
def predict_new_locations_of_tracks(tracks, frame, fps):
for track in tracks:
track.kalmanFilter.predict()
if track.age >= max(1.0 * fps, 30) and track.tracker is not None:
ok, box = track.tracker.update(frame)
if ok:
track.box = box
# # Tracking success
# p1 = (int(box[0]), int(box[1]))
# p2 = (int(box[0] + box[2]), int(box[1] + box[3]))
# cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)
# x = int(box[0] + box[2]*0.5)
# y = int(box[1] + box[3]*0.5)
# center = (x,y)
# # trajectory_x.append(x)
# # trajectory_y.append(y)
# cv2.circle(frame, center, 2, (255,0,0), -1)
# else:
# cv2.putText(frame, "Tracking failure detected", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
# multiprocessing.Process(target=secondary_tracking, args=(track, frame))
# if ok:
# # Tracking success
# p1 = (int(box[0]), int(box[1]))
# p2 = (int(box[0] + box[2]), int(box[1] + box[3]))
# cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)
# x = int(box[0] + box[2]*0.5)
# y = int(box[1] + box[3]*0.5)
# center = (x,y)
# # trajectory_x.append(x)
# # trajectory_y.append(y)
# cv2.circle(frame, center, 2, (255,0,0), -1)
# if not ok:
# # Tracking failure
# cv2.putText(frame, "Tracking failure detected", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
# Assigns detections to tracks using Munkre's Algorithm with cost based on euclidean distance,
# with detections being located too far from existing tracks being designated as unassigned detections
# and tracks without any nearby detections being designated as unassigned tracks
def detection_to_track_assignment(tracks, centroids, cost_of_non_assignment):
# start_time = time.time()
m, n = len(tracks), len(centroids)
k, l = min(m, n), max(m, n)
# Create a square 2-D cost matrix with dimensions twice the size of the larger list (detections or tracks)
cost = np.zeros((k + l, k + l))
# Calculate the distance of every detection from each track,
# filling up the rows of the cost matrix (up to column n, the number of detections) corresponding to existing tracks
# This creates a m x n matrix
for i in range(len(tracks)):
# start_time_distance_loop = time.time()
track = tracks[i]
track_location = track.kalmanFilter.x[:2]
cost[i, :n] = np.array([distance.euclidean(track_location, centroid) for centroid in centroids])
unassigned_track_cost = cost_of_non_assignment
unassigned_detection_cost = cost_of_non_assignment
extra_tracks = 0
extra_detections = 0
if m > n: # More tracks than detections
extra_tracks = m - n
elif n > m: # More detections than tracks
extra_detections = n - m
elif n == m:
pass
# Padding cost matrix with dummy columns to account for unassigned tracks
# This is used to fill the top right corner of the cost matrix
detection_padding = np.ones((m, m)) * unassigned_track_cost
cost[:m, n:] = detection_padding
# Padding cost matrix with dummy rows to account for unassigned detections
# This is used to fill the bottom left corner of the cost matrix
track_padding = np.ones((n, n)) * unassigned_detection_cost
cost[m:, :n] = track_padding
# The bottom right corner of the cost matrix, corresponding to dummy detections being matched to dummy tracks
# is left with 0 cost to ensure that excess dummies are always matched to each other
# Perform the assignment, returning the indices of assignments,
# which are combined into a coordinate within the cost matrix
row_ind, col_ind = linear_sum_assignment(cost)
assignments_all = np.column_stack((row_ind, col_ind))
# Assignments within the top left corner corresponding to existing tracks and detections
# are designated as (valid) assignments
assignments = assignments_all[(assignments_all < [m, n]).all(axis=1)]
# Assignments within the top right corner corresponding to existing tracks matched with dummy detections
# are designated as unassigned tracks and will later be regarded as invisible
unassigned_tracks = assignments_all[
(assignments_all >= [0, n]).all(axis=1) & (assignments_all < [m, k + l]).all(axis=1)]
# Assignments within the bottom left corner corresponding to detections matched to dummy tracks
# are designated as unassigned detections and will generate a new track
unassigned_detections = assignments_all[
(assignments_all >= [m, 0]).all(axis=1) & (assignments_all < [k + l, n]).all(axis=1)]
return assignments, unassigned_tracks, unassigned_detections
# Using the coordinates of valid assignments which correspond to the detection and track indices,
# update the track with the matched detection
def update_assigned_tracks(assignments, tracks, centroids, sizes, frame):
for assignment in assignments:
track_idx = assignment[0]
detection_idx = assignment[1]
centroid = centroids[detection_idx]
size = sizes[detection_idx]
track = tracks[track_idx]
track.kalmanFilter.update(centroid)
if track.tracker is not None:
if track.age == max(parm.SEC_FILTER_DELAY * parm.VIDEO_FPS, 30) - 1:
track.box = (centroid[0] - (size[0] / 2), centroid[1] - (size[1] / 2), size[0], size[1])
track.tracker.init(frame, track.box)
if track.age >= max(parm.SEC_FILTER_DELAY * parm.VIDEO_FPS, 30):
track.outOfSync = (centroid[0] < track.box[0] - (1 * track.box[2]) or centroid[0] > track.box[0] + (2 * track.box[2])) \
and (centroid[1] < track.box[1] - (1 * track.box[3]) or centroid[1] > track.box[1] + (2 * track.box[3]))
# cv2.putText(frame, "Separation detected", (100,160), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
# # Adaptive filtering
# # If the residual is too large, increase the process noise
# Q_scale_factor = 100.
# y, S = kf.y, kf.S # Residual and Measurement covariance
# # Square and normalize the residual
# eps = np.dot(y.T, np.linalg.inv(S)).dot(y)
# kf.Q *= eps * 10.
track.size = size
track.age += 1
track.totalVisibleCount += 1
track.consecutiveInvisibleCount = 0
# Existing tracks without a matching detection are aged and considered invisible for the frame
def update_unassigned_tracks(unassigned_tracks, tracks):
for unassignedTrack in unassigned_tracks:
track_idx = unassignedTrack[0]
track = tracks[track_idx]
track.age += 1
track.consecutiveInvisibleCount += 1
# If any track has been invisible for too long, or generated by a flash, it will be removed from the list of tracks
def get_lost_tracks(tracks):
invisible_for_too_long = parm.CONSECUTIVE_THRESH * parm.VIDEO_FPS
age_threshold = parm.AGE_THRESH * parm.VIDEO_FPS
tracks_to_be_removed = []
for track in tracks:
visibility = track.totalVisibleCount / track.age
# A new created track with a low visibility is likely to have been generated by noise and is to be removed
# Tracks that have not been seen for too long (The threshold determined by the reliability of the filter)
# cannot be accurately located and are also be removed
if (track.age < age_threshold and visibility < parm.VISIBILITY_RATIO) \
or track.consecutiveInvisibleCount >= invisible_for_too_long or track.outOfSync:
tracks_to_be_removed.append(track)
return tracks_to_be_removed
def delete_lost_tracks(tracks, tracks_to_be_removed):
if len(tracks) == 0 or len(tracks_to_be_removed) == 0:
return tracks
tracks = [track for track in tracks if track not in tracks_to_be_removed]
return tracks
# Detections not assigned an existing track are given their own track, initialized with the location of the detection
def create_new_tracks(unassigned_detections, next_id, tracks, centroids, sizes):
for unassignedDetection in unassigned_detections:
detection_idx = unassignedDetection[1]
centroid = centroids[detection_idx]
size = sizes[detection_idx]
track = Track(next_id, size)
# Attempted tuning
# # Constant velocity model
# # Initial Location
# track.kalmanFilter.x = [centroid[0], centroid[1], 0, 0]
# # State Transition Matrix
# track.kalmanFilter.F = np.array([[1., 0, dt, 0],
# [0, 1, 0, dt],
# [0, 0, 1, 0],
# [0, 0, 0, 1]])
# # Measurement Function
# track.kalmanFilter.H = np.array([[1., 0, 0, 0],
# [0, 1, 0, 0]])
# # Covariance Matrix
# track.kalmanFilter.P = np.diag([(10.*SCALE_FACTOR)**2, (10.*SCALE_FACTOR)**2, # Positional variance
# (7*SCALE_FACTOR)**2, (7*SCALE_FACTOR)**2]) # Velocity variance
# # Process Noise
# # Assumes that the process noise is white
# track.kalmanFilter.Q = Q_discrete_white_noise(dim=4, dt=dt, var=1000)
# # Measurement Noise
# track.kalmanFilter.R = np.diag([10.**2, 10**2])
# Constant velocity model
# Initial Location
track.kalmanFilter.x = [centroid[0], centroid[1], 0, 0]
# State Transition Matrix
track.kalmanFilter.F = np.array([[1., 0, 1, 0],
[0, 1, 0, 1],
[0, 0, 1, 0],
[0, 0, 0, 1]])
# Measurement Function
track.kalmanFilter.H = np.array([[1., 0, 0, 0],
[0, 1, 0, 0]])
# Ah I really don't know what I'm doing here
# Covariance Matrix
track.kalmanFilter.P = np.diag([200., 200, 50, 50])
# Motion Noise
track.kalmanFilter.Q = np.diag([100., 100, 25, 25])
# Measurement Noise
track.kalmanFilter.R = 100
tracks.append(track)
next_id += 1
return next_id
def filter_tracks(frame, masked, tracks, origin):
# Minimum number of frames to remove noise seems to be somewhere in the range of 30
# Actually, I feel having both might be redundant together with the deletion criteria
min_track_age = max(parm.AGE_THRESH * parm.VIDEO_FPS, 30) # seconds * FPS to give number of frames in seconds
# This has to be less than or equal to the minimum age or it make the minimum age redundant
min_visible_count = max(parm.VISIBILITY_THRESH * parm.VIDEO_FPS, 30)
good_tracks = []
if len(tracks) != 0:
for track in tracks:
if track.age > min_track_age and track.totalVisibleCount > min_visible_count:
centroid = track.kalmanFilter.x[:2]
size = track.size
# requirement for track to be considered in re-identification
# note that no. of frames being too small may lead to loss of continuous tracking,
# due to reidentification.py -> line 250
if track.consecutiveInvisibleCount <= 5:
track.goodtrack = True
good_tracks.append([track.id, track.age, size, (centroid[0], centroid[1])])
centroid = track.kalmanFilter.x[:2] - origin
# Display filtered tracks
rect_top_left = (int(centroid[0] - size[0] / 2), int(centroid[1] - size[1] / 2))
rect_bottom_right = (int(centroid[0] + size[0] / 2), int(centroid[1] + size[1] / 2))
colour = (0, 255, 0) if track.consecutiveInvisibleCount == 0 else (0, 0, 255)
thickness = 1
cv2.rectangle(frame, rect_top_left, rect_bottom_right, colour, thickness)
cv2.rectangle(masked, rect_top_left, rect_bottom_right, colour, thickness)
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 0.5
# cv2.putText(frame, str(track.id), (rect_bottom_right[0], rect_top_left[1]),
# font, font_scale, colour, thickness, cv2.LINE_AA)
# cv2.putText(masked, str(track.id), (rect_bottom_right[0], rect_top_left[1]),
# font, font_scale, colour, thickness, cv2.LINE_AA)
return good_tracks, frame
# for single camera detection
def single_cam_detector(tracks, next_id, index, fgbg, detector, fps, frame_width, frame_height, scale_factor, origin, frame):
mask = np.ones((frame_height, frame_width), dtype=np.uint8) * 255
centroids, sizes, masked = detect_objects(frame, mask, fgbg, detector, origin, index, scale_factor)
predict_new_locations_of_tracks(tracks, frame, fps)
assignments, unassigned_tracks, unassigned_detections \
= detection_to_track_assignment(tracks, centroids, 10 * scale_factor)
update_assigned_tracks(assignments, tracks, centroids, sizes, frame)
update_unassigned_tracks(unassigned_tracks, tracks)
tracks_to_be_removed = get_lost_tracks(tracks)
tracks = delete_lost_tracks(tracks, tracks_to_be_removed)
next_id = create_new_tracks(unassigned_detections, next_id, tracks, centroids, sizes)
masked = cv2.cvtColor(masked, cv2.COLOR_GRAY2BGR)
good_tracks, frame = filter_tracks(frame, masked, tracks, origin)
# cv2.imshow(f"Masked {index}", masked)
return good_tracks, tracks, next_id, frame
# for multi camera detection
def multi_cam_detector(camera, frame):
mask = np.ones((camera.frame_h, camera.frame_w), dtype=np.uint8) * 255
centroids, sizes, masked = detect_objects(frame, mask, camera.fgbg, camera.detector, camera.origin, camera.index, camera.scale_factor)
predict_new_locations_of_tracks(camera.tracks, frame, camera.fps)
assignments, unassigned_tracks, unassigned_detections \
= detection_to_track_assignment(camera.tracks, centroids, 10 * camera.scale_factor)
update_assigned_tracks(assignments, camera.tracks, centroids, sizes, frame)
update_unassigned_tracks(unassigned_tracks, camera.tracks)
tracks_to_be_removed = get_lost_tracks(camera.tracks)
camera.tracks = delete_lost_tracks(camera.tracks, tracks_to_be_removed)
# list to keep track of dead tracks
for gone_track in tracks_to_be_removed:
if gone_track.goodtrack:
camera.dead_tracks.append(gone_track.id)
camera.next_id = create_new_tracks(unassigned_detections, camera.next_id, camera.tracks, centroids, sizes)
masked = cv2.cvtColor(masked, cv2.COLOR_GRAY2BGR)
good_tracks, frame = filter_tracks(frame, masked, camera.tracks, camera.origin)
cv2.imshow(f"Masked {camera.index}", masked)
return good_tracks, frame | 23,905 | -17 | 524 |
66b3f0cd23c8683df2151d4c248e0bbbe7d3b840 | 1,347 | py | Python | day08-numpy-array-boardcast/index.py | edgardeng/python-data-science-days | 726451c827da502b585605f2ada1160817d25479 | [
"MIT"
] | 1 | 2019-04-28T03:37:33.000Z | 2019-04-28T03:37:33.000Z | day08-numpy-array-boardcast/index.py | edgardeng/python-data-science-days | 726451c827da502b585605f2ada1160817d25479 | [
"MIT"
] | null | null | null | day08-numpy-array-boardcast/index.py | edgardeng/python-data-science-days | 726451c827da502b585605f2ada1160817d25479 | [
"MIT"
] | null | null | null | import numpy as np
if __name__ == '__main__':
print('Numpy Version', np.__version__)
# broadcast_operate()
broadcast_operate_example()
| 25.415094 | 89 | 0.521901 | import numpy as np
def broadcast_operate():
a = np.array([0, 1, 2])
b = np.array([5, 5, 5])
print('a:', a)
print('b:', b)
print('a + b = ', a + b)
print('a + 5 =', a + 5)
c = np.ones((3, 3))
print('c:', c)
print('c + a', c + a)
d = np.arange(3)
e = np.arange(3)[:, np.newaxis]
print('d = ', d)
print('e = ', e)
print('d + e = ', d + e)
def broadcast_operate_example():
# adding a two-dimensional array to a one-dimensional array:
a = np.ones((2, 3))
b = np.arange(3)
print('a + b = ', a + b)
# both arrays need to be broadcast:
a2 = np.arange(3).reshape((3, 1))
b2 = np.arange(3)
print('a2 + b2 = ', a2 + b2)
# the two arrays are not compatible:
a3 = np.ones((3, 2))
b3 = np.arange(3)
# print('a3 + b3 = ', a3 + b3) # ValueError: operands could not be broadcast
b4 = b3[:, np.newaxis]
print('a3 + b4 = ', a3 + b4)
np.logaddexp(a3, b4) # logaddexp(a, b) function, which computes log(exp(a) + exp(b))
# define a function $z = f(x, y)
x = np.linspace(0, 5, 50)
y = np.linspace(0, 5, 50)[:, np.newaxis]
z = np.sin(x) ** 10 + np.cos(10 + y * x) * np.cos(x)
print('z = ', z)
if __name__ == '__main__':
print('Numpy Version', np.__version__)
# broadcast_operate()
broadcast_operate_example()
| 1,150 | 0 | 46 |
9c24386a63bd7c851cb9a1a3e2d69ba717705743 | 34,589 | py | Python | mobly/base_instrumentation_test.py | booneng/mobly | 539788309c7631c20fa5381937e10f9cd997e2d0 | [
"Apache-2.0"
] | 532 | 2016-11-07T22:01:00.000Z | 2022-03-30T17:11:40.000Z | mobly/base_instrumentation_test.py | booneng/mobly | 539788309c7631c20fa5381937e10f9cd997e2d0 | [
"Apache-2.0"
] | 528 | 2016-11-22T01:42:19.000Z | 2022-03-24T02:27:15.000Z | mobly/base_instrumentation_test.py | booneng/mobly | 539788309c7631c20fa5381937e10f9cd997e2d0 | [
"Apache-2.0"
] | 169 | 2016-11-18T15:12:26.000Z | 2022-03-24T01:22:08.000Z | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections import defaultdict
from enum import Enum
from mobly import base_test
from mobly import records
from mobly import signals
from mobly import utils
class _InstrumentationStructurePrefixes:
"""Class containing prefixes that structure insturmentation output.
Android instrumentation generally follows the following format:
.. code-block:: none
INSTRUMENTATION_STATUS: ...
...
INSTRUMENTATION_STATUS: ...
INSTRUMENTATION_STATUS_CODE: ...
INSTRUMENTATION_STATUS: ...
...
INSTRUMENTATION_STATUS: ...
INSTRUMENTATION_STATUS_CODE: ...
...
INSTRUMENTATION_RESULT: ...
...
INSTRUMENTATION_RESULT: ...
...
INSTRUMENTATION_CODE: ...
This means that these prefixes can be used to guide parsing
the output of the instrumentation command into the different
instrumetnation test methods.
Refer to the following Android Framework package for more details:
.. code-block:: none
com.android.commands.am.AM
"""
STATUS = 'INSTRUMENTATION_STATUS:'
STATUS_CODE = 'INSTRUMENTATION_STATUS_CODE:'
RESULT = 'INSTRUMENTATION_RESULT:'
CODE = 'INSTRUMENTATION_CODE:'
FAILED = 'INSTRUMENTATION_FAILED:'
class _InstrumentationKnownStatusKeys:
"""Commonly used keys used in instrumentation output for listing
instrumentation test method result properties.
An instrumenation status line usually contains a key-value pair such as
the following:
.. code-block:: none
INSTRUMENTATION_STATUS: <key>=<value>
Some of these key-value pairs are very common and represent test case
properties. This mapping is used to handle each of the corresponding
key-value pairs different than less important key-value pairs.
Refer to the following Android Framework packages for more details:
.. code-block:: none
android.app.Instrumentation
android.support.test.internal.runner.listener.InstrumentationResultPrinter
TODO: Convert android.support.* to androidx.*,
(https://android-developers.googleblog.com/2018/05/hello-world-androidx.html).
"""
CLASS = 'class'
ERROR = 'Error'
STACK = 'stack'
TEST = 'test'
STREAM = 'stream'
class _InstrumentationStatusCodes:
"""A mapping of instrumentation status codes to test method results.
When instrumentation runs, at various points output is created in a series
of blocks that terminate as follows:
.. code-block:: none
INSTRUMENTATION_STATUS_CODE: 1
These blocks typically have several status keys in them, and they indicate
the progression of a particular instrumentation test method. When the
corresponding instrumentation test method finishes, there is generally a
line which includes a status code that gives thes the test result.
The UNKNOWN status code is not an actual status code and is only used to
represent that a status code has not yet been read for an instrumentation
block.
Refer to the following Android Framework package for more details:
.. code-block:: none
android.support.test.internal.runner.listener.InstrumentationResultPrinter
TODO: Convert android.support.* to androidx.*,
(https://android-developers.googleblog.com/2018/05/hello-world-androidx.html).
"""
UNKNOWN = None
OK = '0'
START = '1'
IN_PROGRESS = '2'
ERROR = '-1'
FAILURE = '-2'
IGNORED = '-3'
ASSUMPTION_FAILURE = '-4'
class _InstrumentationStatusCodeCategories:
"""A mapping of instrumentation test method results to categories.
Aside from the TIMING category, these categories roughly map to Mobly
signals and are used for determining how a particular instrumentation test
method gets recorded.
"""
TIMING = [
_InstrumentationStatusCodes.START,
_InstrumentationStatusCodes.IN_PROGRESS,
]
PASS = [
_InstrumentationStatusCodes.OK,
]
FAIL = [
_InstrumentationStatusCodes.ERROR,
_InstrumentationStatusCodes.FAILURE,
]
SKIPPED = [
_InstrumentationStatusCodes.IGNORED,
_InstrumentationStatusCodes.ASSUMPTION_FAILURE,
]
class _InstrumentationKnownResultKeys:
"""Commonly used keys for outputting instrumentation errors.
When instrumentation finishes running all of the instrumentation test
methods, a result line will appear as follows:
.. code-block:: none
INSTRUMENTATION_RESULT:
If something wrong happened during the instrumentation run such as an
application under test crash, the line will appear similarly as thus:
.. code-block:: none
INSTRUMENTATION_RESULT: shortMsg=Process crashed.
Since these keys indicate that something wrong has happened to the
instrumentation run, they should be checked for explicitly.
Refer to the following documentation page for more information:
.. code-block:: none
https://developer.android.com/reference/android/app/ActivityManager.ProcessErrorStateInfo.html
"""
LONGMSG = 'longMsg'
SHORTMSG = 'shortMsg'
class _InstrumentationResultSignals:
"""Instrumenttion result block strings for signalling run completion.
The final section of the instrumentation output generally follows this
format:
.. code-block:: none
INSTRUMENTATION_RESULT: stream=
...
INSTRUMENTATION_CODE -1
Inside of the ellipsed section, one of these signaling strings should be
present. If they are not present, this usually means that the
instrumentation run has failed in someway such as a crash. Because the
final instrumentation block simply summarizes information, simply roughly
checking for a particilar string should be sufficient to check to a proper
run completion as the contents of the instrumentation result block don't
really matter.
Refer to the following JUnit package for more details:
.. code-block:: none
junit.textui.ResultPrinter
"""
FAIL = 'FAILURES!!!'
PASS = 'OK ('
class _InstrumentationBlockStates(Enum):
"""States used for determing what the parser is currently parsing.
The parse always starts and ends a block in the UNKNOWN state, which is
used to indicate that either a method or a result block (matching the
METHOD and RESULT states respectively) are valid follow ups, which means
that parser should be checking for a structure prefix that indicates which
of those two states it should transition to. If the parser is in the
METHOD state, then the parser will be parsing input into test methods.
Otherwise, the parse can simply concatenate all the input to check for
some final run completion signals.
"""
UNKNOWN = 0
METHOD = 1
RESULT = 2
class _InstrumentationBlock:
"""Container class for parsed instrumentation output for instrumentation
test methods.
Instrumentation test methods typically follow the follwoing format:
.. code-block:: none
INSTRUMENTATION_STATUS: <key>=<value>
...
INSTRUMENTATION_STATUS: <key>=<value>
INSTRUMENTATION_STATUS_CODE: <status code #>
The main issue with parsing this however is that the key-value pairs can
span multiple lines such as this:
.. code-block:: none
INSTRUMENTATION_STATUS: stream=
Error in ...
...
Or, such as this:
.. code-block:: none
INSTRUMENTATION_STATUS: stack=...
...
Because these keys are poentially very long, constant string contatention
is potentially inefficent. Instead, this class builds up a buffer to store
the raw output until it is processed into an actual test result by the
_InstrumentationBlockFormatter class.
Additionally, this class also serves to store the parser state, which
means that the BaseInstrumentationTestClass does not need to keep any
potentially volatile instrumentation related state, so multiple
instrumentation runs should have completely separate parsing states.
This class is also used for storing result blocks although very little
needs to be done for those.
Attributes:
begin_time: string, optional timestamp for when the test corresponding
to the instrumentation block began.
current_key: string, the current key that is being parsed, default to
_InstrumentationKnownStatusKeys.STREAM.
error_message: string, an error message indicating that something
unexpected happened during a instrumentatoin test method.
known_keys: dict, well known keys that are handled uniquely.
prefix: string, a prefix to add to the class name of the
instrumentation test methods.
previous_instrumentation_block: _InstrumentationBlock, the last parsed
instrumentation block.
state: _InstrumentationBlockStates, the current state of the parser.
status_code: string, the state code for an instrumentation method
block.
unknown_keys: dict, arbitrary keys that are handled generically.
"""
@property
def is_empty(self):
"""Deteremines whether or not anything has been parsed with this
instrumentation block.
Returns:
A boolean indicating whether or not the this instrumentation block
has parsed and contains any output.
"""
return self._empty
def set_error_message(self, error_message):
"""Sets an error message on an instrumentation block.
This method is used exclusively to indicate that a test method failed
to complete, which is usually cause by a crash of some sort such that
the test method is marked as error instead of ignored.
Args:
error_message: string, an error message to be added to the
TestResultRecord to explain that something wrong happened.
"""
self._empty = False
self.error_message = error_message
def _remove_structure_prefix(self, prefix, line):
"""Helper function for removing the structure prefix for parsing.
Args:
prefix: string, a _InstrumentationStructurePrefixes to remove from
the raw output.
line: string, the raw line from the instrumentation output.
Returns:
A string containing a key value pair descripting some property
of the current instrumentation test method.
"""
return line[len(prefix):].strip()
def set_status_code(self, status_code_line):
"""Sets the status code for the instrumentation test method, used in
determining the test result.
Args:
status_code_line: string, the raw instrumentation output line that
contains the status code of the instrumentation block.
"""
self._empty = False
self.status_code = self._remove_structure_prefix(
_InstrumentationStructurePrefixes.STATUS_CODE,
status_code_line,
)
if self.status_code == _InstrumentationStatusCodes.START:
self.begin_time = utils.get_current_epoch_time()
def set_key(self, structure_prefix, key_line):
"""Sets the current key for the instrumentation block.
For unknown keys, the key is added to the value list in order to
better contextualize the value in the output.
Args:
structure_prefix: string, the structure prefix that was matched
and that needs to be removed.
key_line: string, the raw instrumentation output line that contains
the key-value pair.
"""
self._empty = False
key_value = self._remove_structure_prefix(
structure_prefix,
key_line,
)
if '=' in key_value:
(key, value) = key_value.split('=', 1)
self.current_key = key
if key in self.known_keys:
self.known_keys[key].append(value)
else:
self.unknown_keys[key].append(key_value)
def add_value(self, line):
"""Adds unstructured or multi-line value output to the current parsed
instrumentation block for outputting later.
Usually, this will add extra lines to the value list for the current
key-value pair. However, sometimes, such as when instrumentation
failed to start, output does not follow the structured prefix format.
In this case, adding all of the output is still useful so that a user
can debug the issue.
Args:
line: string, the raw instrumentation line to append to the value
list.
"""
# Don't count whitespace only lines.
if line.strip():
self._empty = False
if self.current_key in self.known_keys:
self.known_keys[self.current_key].append(line)
else:
self.unknown_keys[self.current_key].append(line)
def transition_state(self, new_state):
"""Transitions or sets the current instrumentation block to the new
parser state.
Args:
new_state: _InstrumentationBlockStates, the state that the parser
should transition to.
Returns:
A new instrumentation block set to the new state, representing
the start of parsing a new instrumentation test method.
Alternatively, if the current instrumentation block represents the
start of parsing a new instrumentation block (state UNKNOWN), then
this returns the current instrumentation block set to the now
known parsing state.
"""
if self.state == _InstrumentationBlockStates.UNKNOWN:
self.state = new_state
return self
else:
next_block = _InstrumentationBlock(
state=new_state,
prefix=self.prefix,
previous_instrumentation_block=self,
)
if self.status_code in _InstrumentationStatusCodeCategories.TIMING:
next_block.begin_time = self.begin_time
return next_block
class _InstrumentationBlockFormatter:
"""Takes an instrumentation block and converts it into a Mobly test
result.
"""
DEFAULT_INSTRUMENTATION_METHOD_NAME = 'instrumentation_method'
def _get_name(self):
"""Gets the method name of the test method for the instrumentation
method block.
Returns:
A string containing the name of the instrumentation test method's
test or a default name if no name was parsed.
"""
if self._known_keys[_InstrumentationKnownStatusKeys.TEST]:
return self._known_keys[_InstrumentationKnownStatusKeys.TEST]
else:
return self.DEFAULT_INSTRUMENTATION_METHOD_NAME
def _get_class(self):
"""Gets the class name of the test method for the instrumentation
method block.
Returns:
A string containing the class name of the instrumentation test
method's test or empty string if no name was parsed. If a prefix
was specified, then the prefix will be prepended to the class
name.
"""
class_parts = [
self._prefix, self._known_keys[_InstrumentationKnownStatusKeys.CLASS]
]
return '.'.join(filter(None, class_parts))
def _get_full_name(self):
"""Gets the qualified name of the test method corresponding to the
instrumentation block.
Returns:
A string containing the fully qualified name of the
instrumentation test method. If parts are missing, then degrades
steadily.
"""
full_name_parts = [self._get_class(), self._get_name()]
return '#'.join(filter(None, full_name_parts))
def _get_details(self):
"""Gets the output for the detail section of the TestResultRecord.
Returns:
A string to set for a TestResultRecord's details.
"""
detail_parts = [self._get_full_name(), self._error_message]
return '\n'.join(filter(None, detail_parts))
def _get_extras(self):
"""Gets the output for the extras section of the TestResultRecord.
Returns:
A string to set for a TestResultRecord's extras.
"""
# Add empty line to start key-value pairs on a new line.
extra_parts = ['']
for value in self._unknown_keys.values():
extra_parts.append(value)
extra_parts.append(self._known_keys[_InstrumentationKnownStatusKeys.STREAM])
extra_parts.append(
self._known_keys[_InstrumentationKnownResultKeys.SHORTMSG])
extra_parts.append(
self._known_keys[_InstrumentationKnownResultKeys.LONGMSG])
extra_parts.append(self._known_keys[_InstrumentationKnownStatusKeys.ERROR])
if self._known_keys[
_InstrumentationKnownStatusKeys.STACK] not in self._known_keys[
_InstrumentationKnownStatusKeys.STREAM]:
extra_parts.append(
self._known_keys[_InstrumentationKnownStatusKeys.STACK])
return '\n'.join(filter(None, extra_parts))
def _is_failed(self):
"""Determines if the test corresponding to the instrumentation block
failed.
This method can not be used to tell if a test method passed and
should not be used for such a purpose.
Returns:
A boolean indicating if the test method failed.
"""
if self._status_code in _InstrumentationStatusCodeCategories.FAIL:
return True
elif (self._known_keys[_InstrumentationKnownStatusKeys.STACK] and
self._status_code != _InstrumentationStatusCodes.ASSUMPTION_FAILURE):
return True
elif self._known_keys[_InstrumentationKnownStatusKeys.ERROR]:
return True
elif self._known_keys[_InstrumentationKnownResultKeys.SHORTMSG]:
return True
elif self._known_keys[_InstrumentationKnownResultKeys.LONGMSG]:
return True
else:
return False
def create_test_record(self, mobly_test_class):
"""Creates a TestResultRecord for the instrumentation block.
Args:
mobly_test_class: string, the name of the Mobly test case
executing the instrumentation run.
Returns:
A TestResultRecord with an appropriate signals exception
representing the instrumentation test method's result status.
"""
details = self._get_details()
extras = self._get_extras()
tr_record = records.TestResultRecord(
t_name=self._get_full_name(),
t_class=mobly_test_class,
)
if self._begin_time:
tr_record.begin_time = self._begin_time
if self._is_failed():
tr_record.test_fail(e=signals.TestFailure(details=details, extras=extras))
elif self._status_code in _InstrumentationStatusCodeCategories.SKIPPED:
tr_record.test_skip(e=signals.TestSkip(details=details, extras=extras))
elif self._status_code in _InstrumentationStatusCodeCategories.PASS:
tr_record.test_pass(e=signals.TestPass(details=details, extras=extras))
elif self._status_code in _InstrumentationStatusCodeCategories.TIMING:
if self._error_message:
tr_record.test_error(
e=signals.TestError(details=details, extras=extras))
else:
tr_record = None
else:
tr_record.test_error(e=signals.TestError(details=details, extras=extras))
if self._known_keys[_InstrumentationKnownStatusKeys.STACK]:
tr_record.termination_signal.stacktrace = self._known_keys[
_InstrumentationKnownStatusKeys.STACK]
return tr_record
def has_completed_result_block_format(self, error_message):
"""Checks the instrumentation result block for a signal indicating
normal completion.
Args:
error_message: string, the error message to give if the
instrumentation run did not complete successfully.-
Returns:
A boolean indicating whether or not the instrumentation run passed
or failed overall.
Raises:
signals.TestError: Error raised if the instrumentation run did not
complete because of a crash or some other issue.
"""
extras = self._get_extras()
if _InstrumentationResultSignals.PASS in extras:
return True
elif _InstrumentationResultSignals.FAIL in extras:
return False
else:
raise signals.TestError(details=error_message, extras=extras)
class InstrumentationTestMixin:
"""A mixin for Mobly test classes to inherit from for instrumentation tests.
This class should be used in a subclass of both BaseTestClass and this class
in order to provide instrumentation test capabilities. This mixin is
explicitly for the case where the underlying BaseTestClass cannot be
replaced with BaseInstrumentationTestClass. In general, prefer using
BaseInstrumentationTestClass instead.
Attributes:
DEFAULT_INSTRUMENTATION_OPTION_PREFIX: string, the default prefix for
instrumentation params contained within user params.
DEFAULT_INSTRUMENTATION_ERROR_MESSAGE: string, the default error
message to set if something has prevented something in the
instrumentation test run from completing properly.
"""
DEFAULT_INSTRUMENTATION_OPTION_PREFIX = 'instrumentation_option_'
DEFAULT_INSTRUMENTATION_ERROR_MESSAGE = ('instrumentation run exited '
'unexpectedly')
def _previous_block_never_completed(self, current_block, previous_block,
new_state):
"""Checks if the previous instrumentation method block completed.
Args:
current_block: _InstrumentationBlock, the current instrumentation
block to check for being a different instrumentation test
method.
previous_block: _InstrumentationBlock, rhe previous
instrumentation block to check for an incomplete status.
new_state: _InstrumentationBlockStates, the next state for the
parser, used to check for the instrumentation run ending
with an incomplete test.
Returns:
A boolean indicating whether the previous instrumentation block
completed executing.
"""
if previous_block:
previously_timing_block = (previous_block.status_code
in _InstrumentationStatusCodeCategories.TIMING)
currently_new_block = (current_block.status_code
== _InstrumentationStatusCodes.START or
new_state == _InstrumentationBlockStates.RESULT)
return all([previously_timing_block, currently_new_block])
else:
return False
def _create_formatters(self, instrumentation_block, new_state):
"""Creates the _InstrumentationBlockFormatters for outputting the
instrumentation method block that have finished parsing.
Args:
instrumentation_block: _InstrumentationBlock, the current
instrumentation method block to create formatters based upon.
new_state: _InstrumentationBlockState, the next state that the
parser will transition to.
Returns:
A list of the formatters tha need to create and add
TestResultRecords to the test results.
"""
formatters = []
if self._previous_block_never_completed(
current_block=instrumentation_block,
previous_block=instrumentation_block.previous_instrumentation_block,
new_state=new_state):
instrumentation_block.previous_instrumentation_block.set_error_message(
self.DEFAULT_INSTRUMENTATION_ERROR_MESSAGE)
formatters.append(
_InstrumentationBlockFormatter(
instrumentation_block.previous_instrumentation_block))
if not instrumentation_block.is_empty:
formatters.append(_InstrumentationBlockFormatter(instrumentation_block))
return formatters
def _transition_instrumentation_block(
self,
instrumentation_block,
new_state=_InstrumentationBlockStates.UNKNOWN):
"""Transitions and finishes the current instrumentation block.
Args:
instrumentation_block: _InstrumentationBlock, the current
instrumentation block to finish.
new_state: _InstrumentationBlockState, the next state for the
parser to transition to.
Returns:
The new instrumentation block to use for storing parsed
instrumentation output.
"""
formatters = self._create_formatters(instrumentation_block, new_state)
for formatter in formatters:
test_record = formatter.create_test_record(self.TAG)
if test_record:
self.results.add_record(test_record)
self.summary_writer.dump(test_record.to_dict(),
records.TestSummaryEntryType.RECORD)
return instrumentation_block.transition_state(new_state=new_state)
def _parse_method_block_line(self, instrumentation_block, line):
"""Parses the instrumnetation method block's line.
Args:
instrumentation_block: _InstrumentationBlock, the current
instrumentation method block.
line: string, the raw instrumentation output line to parse.
Returns:
The next instrumentation block, which should be used to continue
parsing instrumentation output.
"""
if line.startswith(_InstrumentationStructurePrefixes.STATUS):
instrumentation_block.set_key(_InstrumentationStructurePrefixes.STATUS,
line)
return instrumentation_block
elif line.startswith(_InstrumentationStructurePrefixes.STATUS_CODE):
instrumentation_block.set_status_code(line)
return self._transition_instrumentation_block(instrumentation_block)
elif line.startswith(_InstrumentationStructurePrefixes.RESULT):
# Unexpected transition from method block -> result block
instrumentation_block.set_key(_InstrumentationStructurePrefixes.RESULT,
line)
return self._parse_result_line(
self._transition_instrumentation_block(
instrumentation_block,
new_state=_InstrumentationBlockStates.RESULT,
),
line,
)
else:
instrumentation_block.add_value(line)
return instrumentation_block
def _parse_result_block_line(self, instrumentation_block, line):
"""Parses the instrumentation result block's line.
Args:
instrumentation_block: _InstrumentationBlock, the instrumentation
result block for the instrumentation run.
line: string, the raw instrumentation output to add to the
instrumenation result block's _InstrumentationResultBlocki
object.
Returns:
The instrumentation result block for the instrumentation run.
"""
instrumentation_block.add_value(line)
return instrumentation_block
def _parse_unknown_block_line(self, instrumentation_block, line):
"""Parses a line from the instrumentation output from the UNKNOWN
parser state.
Args:
instrumentation_block: _InstrumentationBlock, the current
instrumenation block, where the correct categorization it noti
yet known.
line: string, the raw instrumenation output line to be used to
deteremine the correct categorization.
Returns:
The next instrumentation block to continue parsing with. Usually,
this is the same instrumentation block but with the state
transitioned appropriately.
"""
if line.startswith(_InstrumentationStructurePrefixes.STATUS):
return self._parse_method_block_line(
self._transition_instrumentation_block(
instrumentation_block,
new_state=_InstrumentationBlockStates.METHOD,
),
line,
)
elif (line.startswith(_InstrumentationStructurePrefixes.RESULT) or
_InstrumentationStructurePrefixes.FAILED in line):
return self._parse_result_block_line(
self._transition_instrumentation_block(
instrumentation_block,
new_state=_InstrumentationBlockStates.RESULT,
),
line,
)
else:
# This would only really execute if instrumentation failed to start.
instrumentation_block.add_value(line)
return instrumentation_block
def _parse_line(self, instrumentation_block, line):
"""Parses an arbitrary line from the instrumentation output based upon
the current parser state.
Args:
instrumentation_block: _InstrumentationBlock, an instrumentation
block with any of the possible parser states.
line: string, the raw instrumentation output line to parse
appropriately.
Returns:
The next instrumenation block to continue parsing with.
"""
if instrumentation_block.state == _InstrumentationBlockStates.METHOD:
return self._parse_method_block_line(instrumentation_block, line)
elif instrumentation_block.state == _InstrumentationBlockStates.RESULT:
return self._parse_result_block_line(instrumentation_block, line)
else:
return self._parse_unknown_block_line(instrumentation_block, line)
def _finish_parsing(self, instrumentation_block):
"""Finishes parsing the instrumentation result block for the final
instrumentation run status.
Args:
instrumentation_block: _InstrumentationBlock, the instrumentation
result block for the instrumenation run. Potentially, thisi
could actually be method block if the instrumentation outputi
is malformed.
Returns:
A boolean indicating whether the instrumentation run completed
with all the tests passing.
Raises:
signals.TestError: Error raised if the instrumentation failed to
complete with either a pass or fail status.
"""
formatter = _InstrumentationBlockFormatter(instrumentation_block)
return formatter.has_completed_result_block_format(
self.DEFAULT_INSTRUMENTATION_ERROR_MESSAGE)
def parse_instrumentation_options(self, parameters=None):
"""Returns the options for the instrumentation test from user_params.
By default, this method assume that the correct instrumentation options
all start with DEFAULT_INSTRUMENTATION_OPTION_PREFIX.
Args:
parameters: dict, the key value pairs representing an assortment
of parameters including instrumentation options. Usually,
this argument will be from self.user_params.
Returns:
A dictionary of options/parameters for the instrumentation tst.
"""
if parameters is None:
return {}
filtered_parameters = {}
for parameter_key, parameter_value in parameters.items():
if parameter_key.startswith(self.DEFAULT_INSTRUMENTATION_OPTION_PREFIX):
option_key = parameter_key[len(self.
DEFAULT_INSTRUMENTATION_OPTION_PREFIX):]
filtered_parameters[option_key] = parameter_value
return filtered_parameters
def run_instrumentation_test(self,
device,
package,
options=None,
prefix=None,
runner=None):
"""Runs instrumentation tests on a device and creates test records.
Args:
device: AndroidDevice, the device to run instrumentation tests on.
package: string, the package name of the instrumentation tests.
options: dict, Instrumentation options for the instrumentation
tests.
prefix: string, an optional prefix for parser output for
distinguishing between instrumentation test runs.
runner: string, the runner to use for the instrumentation package,
default to DEFAULT_INSTRUMENTATION_RUNNER.
Returns:
A boolean indicating whether or not all the instrumentation test
methods passed.
Raises:
TestError if the instrumentation run crashed or if parsing the
output failed.
"""
# Dictionary hack to allow overwriting the instrumentation_block in the
# parse_instrumentation closure
instrumentation_block = [_InstrumentationBlock(prefix=prefix)]
device.adb.instrument(package=package,
options=options,
runner=runner,
handler=parse_instrumentation)
return self._finish_parsing(instrumentation_block[0])
class BaseInstrumentationTestClass(InstrumentationTestMixin,
base_test.BaseTestClass):
"""Base class for all instrumentation test classes to inherit from.
This class extends the BaseTestClass to add functionality to run and parse
the output of instrumentation runs.
Attributes:
DEFAULT_INSTRUMENTATION_OPTION_PREFIX: string, the default prefix for
instrumentation params contained within user params.
DEFAULT_INSTRUMENTATION_ERROR_MESSAGE: string, the default error
message to set if something has prevented something in the
instrumentation test run from completing properly.
"""
| 35.658763 | 98 | 0.726358 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections import defaultdict
from enum import Enum
from mobly import base_test
from mobly import records
from mobly import signals
from mobly import utils
class _InstrumentationStructurePrefixes:
"""Class containing prefixes that structure insturmentation output.
Android instrumentation generally follows the following format:
.. code-block:: none
INSTRUMENTATION_STATUS: ...
...
INSTRUMENTATION_STATUS: ...
INSTRUMENTATION_STATUS_CODE: ...
INSTRUMENTATION_STATUS: ...
...
INSTRUMENTATION_STATUS: ...
INSTRUMENTATION_STATUS_CODE: ...
...
INSTRUMENTATION_RESULT: ...
...
INSTRUMENTATION_RESULT: ...
...
INSTRUMENTATION_CODE: ...
This means that these prefixes can be used to guide parsing
the output of the instrumentation command into the different
instrumetnation test methods.
Refer to the following Android Framework package for more details:
.. code-block:: none
com.android.commands.am.AM
"""
STATUS = 'INSTRUMENTATION_STATUS:'
STATUS_CODE = 'INSTRUMENTATION_STATUS_CODE:'
RESULT = 'INSTRUMENTATION_RESULT:'
CODE = 'INSTRUMENTATION_CODE:'
FAILED = 'INSTRUMENTATION_FAILED:'
class _InstrumentationKnownStatusKeys:
"""Commonly used keys used in instrumentation output for listing
instrumentation test method result properties.
An instrumenation status line usually contains a key-value pair such as
the following:
.. code-block:: none
INSTRUMENTATION_STATUS: <key>=<value>
Some of these key-value pairs are very common and represent test case
properties. This mapping is used to handle each of the corresponding
key-value pairs different than less important key-value pairs.
Refer to the following Android Framework packages for more details:
.. code-block:: none
android.app.Instrumentation
android.support.test.internal.runner.listener.InstrumentationResultPrinter
TODO: Convert android.support.* to androidx.*,
(https://android-developers.googleblog.com/2018/05/hello-world-androidx.html).
"""
CLASS = 'class'
ERROR = 'Error'
STACK = 'stack'
TEST = 'test'
STREAM = 'stream'
class _InstrumentationStatusCodes:
"""A mapping of instrumentation status codes to test method results.
When instrumentation runs, at various points output is created in a series
of blocks that terminate as follows:
.. code-block:: none
INSTRUMENTATION_STATUS_CODE: 1
These blocks typically have several status keys in them, and they indicate
the progression of a particular instrumentation test method. When the
corresponding instrumentation test method finishes, there is generally a
line which includes a status code that gives thes the test result.
The UNKNOWN status code is not an actual status code and is only used to
represent that a status code has not yet been read for an instrumentation
block.
Refer to the following Android Framework package for more details:
.. code-block:: none
android.support.test.internal.runner.listener.InstrumentationResultPrinter
TODO: Convert android.support.* to androidx.*,
(https://android-developers.googleblog.com/2018/05/hello-world-androidx.html).
"""
UNKNOWN = None
OK = '0'
START = '1'
IN_PROGRESS = '2'
ERROR = '-1'
FAILURE = '-2'
IGNORED = '-3'
ASSUMPTION_FAILURE = '-4'
class _InstrumentationStatusCodeCategories:
"""A mapping of instrumentation test method results to categories.
Aside from the TIMING category, these categories roughly map to Mobly
signals and are used for determining how a particular instrumentation test
method gets recorded.
"""
TIMING = [
_InstrumentationStatusCodes.START,
_InstrumentationStatusCodes.IN_PROGRESS,
]
PASS = [
_InstrumentationStatusCodes.OK,
]
FAIL = [
_InstrumentationStatusCodes.ERROR,
_InstrumentationStatusCodes.FAILURE,
]
SKIPPED = [
_InstrumentationStatusCodes.IGNORED,
_InstrumentationStatusCodes.ASSUMPTION_FAILURE,
]
class _InstrumentationKnownResultKeys:
"""Commonly used keys for outputting instrumentation errors.
When instrumentation finishes running all of the instrumentation test
methods, a result line will appear as follows:
.. code-block:: none
INSTRUMENTATION_RESULT:
If something wrong happened during the instrumentation run such as an
application under test crash, the line will appear similarly as thus:
.. code-block:: none
INSTRUMENTATION_RESULT: shortMsg=Process crashed.
Since these keys indicate that something wrong has happened to the
instrumentation run, they should be checked for explicitly.
Refer to the following documentation page for more information:
.. code-block:: none
https://developer.android.com/reference/android/app/ActivityManager.ProcessErrorStateInfo.html
"""
LONGMSG = 'longMsg'
SHORTMSG = 'shortMsg'
class _InstrumentationResultSignals:
"""Instrumenttion result block strings for signalling run completion.
The final section of the instrumentation output generally follows this
format:
.. code-block:: none
INSTRUMENTATION_RESULT: stream=
...
INSTRUMENTATION_CODE -1
Inside of the ellipsed section, one of these signaling strings should be
present. If they are not present, this usually means that the
instrumentation run has failed in someway such as a crash. Because the
final instrumentation block simply summarizes information, simply roughly
checking for a particilar string should be sufficient to check to a proper
run completion as the contents of the instrumentation result block don't
really matter.
Refer to the following JUnit package for more details:
.. code-block:: none
junit.textui.ResultPrinter
"""
FAIL = 'FAILURES!!!'
PASS = 'OK ('
class _InstrumentationBlockStates(Enum):
"""States used for determing what the parser is currently parsing.
The parse always starts and ends a block in the UNKNOWN state, which is
used to indicate that either a method or a result block (matching the
METHOD and RESULT states respectively) are valid follow ups, which means
that parser should be checking for a structure prefix that indicates which
of those two states it should transition to. If the parser is in the
METHOD state, then the parser will be parsing input into test methods.
Otherwise, the parse can simply concatenate all the input to check for
some final run completion signals.
"""
UNKNOWN = 0
METHOD = 1
RESULT = 2
class _InstrumentationBlock:
"""Container class for parsed instrumentation output for instrumentation
test methods.
Instrumentation test methods typically follow the follwoing format:
.. code-block:: none
INSTRUMENTATION_STATUS: <key>=<value>
...
INSTRUMENTATION_STATUS: <key>=<value>
INSTRUMENTATION_STATUS_CODE: <status code #>
The main issue with parsing this however is that the key-value pairs can
span multiple lines such as this:
.. code-block:: none
INSTRUMENTATION_STATUS: stream=
Error in ...
...
Or, such as this:
.. code-block:: none
INSTRUMENTATION_STATUS: stack=...
...
Because these keys are poentially very long, constant string contatention
is potentially inefficent. Instead, this class builds up a buffer to store
the raw output until it is processed into an actual test result by the
_InstrumentationBlockFormatter class.
Additionally, this class also serves to store the parser state, which
means that the BaseInstrumentationTestClass does not need to keep any
potentially volatile instrumentation related state, so multiple
instrumentation runs should have completely separate parsing states.
This class is also used for storing result blocks although very little
needs to be done for those.
Attributes:
begin_time: string, optional timestamp for when the test corresponding
to the instrumentation block began.
current_key: string, the current key that is being parsed, default to
_InstrumentationKnownStatusKeys.STREAM.
error_message: string, an error message indicating that something
unexpected happened during a instrumentatoin test method.
known_keys: dict, well known keys that are handled uniquely.
prefix: string, a prefix to add to the class name of the
instrumentation test methods.
previous_instrumentation_block: _InstrumentationBlock, the last parsed
instrumentation block.
state: _InstrumentationBlockStates, the current state of the parser.
status_code: string, the state code for an instrumentation method
block.
unknown_keys: dict, arbitrary keys that are handled generically.
"""
def __init__(self,
state=_InstrumentationBlockStates.UNKNOWN,
prefix=None,
previous_instrumentation_block=None):
self.state = state
self.prefix = prefix
self.previous_instrumentation_block = previous_instrumentation_block
if previous_instrumentation_block:
# The parser never needs lookback for two previous blocks,
# so unset to allow previous blocks to get garbage collected.
previous_instrumentation_block.previous_instrumentation_block = None
self._empty = True
self.error_message = ''
self.status_code = _InstrumentationStatusCodes.UNKNOWN
self.current_key = _InstrumentationKnownStatusKeys.STREAM
self.known_keys = {
_InstrumentationKnownStatusKeys.STREAM: [],
_InstrumentationKnownStatusKeys.CLASS: [],
_InstrumentationKnownStatusKeys.ERROR: [],
_InstrumentationKnownStatusKeys.STACK: [],
_InstrumentationKnownStatusKeys.TEST: [],
_InstrumentationKnownResultKeys.LONGMSG: [],
_InstrumentationKnownResultKeys.SHORTMSG: [],
}
self.unknown_keys = defaultdict(list)
self.begin_time = None
@property
def is_empty(self):
"""Deteremines whether or not anything has been parsed with this
instrumentation block.
Returns:
A boolean indicating whether or not the this instrumentation block
has parsed and contains any output.
"""
return self._empty
def set_error_message(self, error_message):
"""Sets an error message on an instrumentation block.
This method is used exclusively to indicate that a test method failed
to complete, which is usually cause by a crash of some sort such that
the test method is marked as error instead of ignored.
Args:
error_message: string, an error message to be added to the
TestResultRecord to explain that something wrong happened.
"""
self._empty = False
self.error_message = error_message
def _remove_structure_prefix(self, prefix, line):
"""Helper function for removing the structure prefix for parsing.
Args:
prefix: string, a _InstrumentationStructurePrefixes to remove from
the raw output.
line: string, the raw line from the instrumentation output.
Returns:
A string containing a key value pair descripting some property
of the current instrumentation test method.
"""
return line[len(prefix):].strip()
def set_status_code(self, status_code_line):
"""Sets the status code for the instrumentation test method, used in
determining the test result.
Args:
status_code_line: string, the raw instrumentation output line that
contains the status code of the instrumentation block.
"""
self._empty = False
self.status_code = self._remove_structure_prefix(
_InstrumentationStructurePrefixes.STATUS_CODE,
status_code_line,
)
if self.status_code == _InstrumentationStatusCodes.START:
self.begin_time = utils.get_current_epoch_time()
def set_key(self, structure_prefix, key_line):
"""Sets the current key for the instrumentation block.
For unknown keys, the key is added to the value list in order to
better contextualize the value in the output.
Args:
structure_prefix: string, the structure prefix that was matched
and that needs to be removed.
key_line: string, the raw instrumentation output line that contains
the key-value pair.
"""
self._empty = False
key_value = self._remove_structure_prefix(
structure_prefix,
key_line,
)
if '=' in key_value:
(key, value) = key_value.split('=', 1)
self.current_key = key
if key in self.known_keys:
self.known_keys[key].append(value)
else:
self.unknown_keys[key].append(key_value)
def add_value(self, line):
"""Adds unstructured or multi-line value output to the current parsed
instrumentation block for outputting later.
Usually, this will add extra lines to the value list for the current
key-value pair. However, sometimes, such as when instrumentation
failed to start, output does not follow the structured prefix format.
In this case, adding all of the output is still useful so that a user
can debug the issue.
Args:
line: string, the raw instrumentation line to append to the value
list.
"""
# Don't count whitespace only lines.
if line.strip():
self._empty = False
if self.current_key in self.known_keys:
self.known_keys[self.current_key].append(line)
else:
self.unknown_keys[self.current_key].append(line)
def transition_state(self, new_state):
"""Transitions or sets the current instrumentation block to the new
parser state.
Args:
new_state: _InstrumentationBlockStates, the state that the parser
should transition to.
Returns:
A new instrumentation block set to the new state, representing
the start of parsing a new instrumentation test method.
Alternatively, if the current instrumentation block represents the
start of parsing a new instrumentation block (state UNKNOWN), then
this returns the current instrumentation block set to the now
known parsing state.
"""
if self.state == _InstrumentationBlockStates.UNKNOWN:
self.state = new_state
return self
else:
next_block = _InstrumentationBlock(
state=new_state,
prefix=self.prefix,
previous_instrumentation_block=self,
)
if self.status_code in _InstrumentationStatusCodeCategories.TIMING:
next_block.begin_time = self.begin_time
return next_block
class _InstrumentationBlockFormatter:
"""Takes an instrumentation block and converts it into a Mobly test
result.
"""
DEFAULT_INSTRUMENTATION_METHOD_NAME = 'instrumentation_method'
def __init__(self, instrumentation_block):
self._prefix = instrumentation_block.prefix
self._status_code = instrumentation_block.status_code
self._error_message = instrumentation_block.error_message
self._known_keys = {}
self._unknown_keys = {}
for key, value in instrumentation_block.known_keys.items():
self._known_keys[key] = '\n'.join(
instrumentation_block.known_keys[key]).rstrip()
for key, value in instrumentation_block.unknown_keys.items():
self._unknown_keys[key] = '\n'.join(
instrumentation_block.unknown_keys[key]).rstrip()
self._begin_time = instrumentation_block.begin_time
def _get_name(self):
"""Gets the method name of the test method for the instrumentation
method block.
Returns:
A string containing the name of the instrumentation test method's
test or a default name if no name was parsed.
"""
if self._known_keys[_InstrumentationKnownStatusKeys.TEST]:
return self._known_keys[_InstrumentationKnownStatusKeys.TEST]
else:
return self.DEFAULT_INSTRUMENTATION_METHOD_NAME
def _get_class(self):
"""Gets the class name of the test method for the instrumentation
method block.
Returns:
A string containing the class name of the instrumentation test
method's test or empty string if no name was parsed. If a prefix
was specified, then the prefix will be prepended to the class
name.
"""
class_parts = [
self._prefix, self._known_keys[_InstrumentationKnownStatusKeys.CLASS]
]
return '.'.join(filter(None, class_parts))
def _get_full_name(self):
"""Gets the qualified name of the test method corresponding to the
instrumentation block.
Returns:
A string containing the fully qualified name of the
instrumentation test method. If parts are missing, then degrades
steadily.
"""
full_name_parts = [self._get_class(), self._get_name()]
return '#'.join(filter(None, full_name_parts))
def _get_details(self):
"""Gets the output for the detail section of the TestResultRecord.
Returns:
A string to set for a TestResultRecord's details.
"""
detail_parts = [self._get_full_name(), self._error_message]
return '\n'.join(filter(None, detail_parts))
def _get_extras(self):
"""Gets the output for the extras section of the TestResultRecord.
Returns:
A string to set for a TestResultRecord's extras.
"""
# Add empty line to start key-value pairs on a new line.
extra_parts = ['']
for value in self._unknown_keys.values():
extra_parts.append(value)
extra_parts.append(self._known_keys[_InstrumentationKnownStatusKeys.STREAM])
extra_parts.append(
self._known_keys[_InstrumentationKnownResultKeys.SHORTMSG])
extra_parts.append(
self._known_keys[_InstrumentationKnownResultKeys.LONGMSG])
extra_parts.append(self._known_keys[_InstrumentationKnownStatusKeys.ERROR])
if self._known_keys[
_InstrumentationKnownStatusKeys.STACK] not in self._known_keys[
_InstrumentationKnownStatusKeys.STREAM]:
extra_parts.append(
self._known_keys[_InstrumentationKnownStatusKeys.STACK])
return '\n'.join(filter(None, extra_parts))
def _is_failed(self):
"""Determines if the test corresponding to the instrumentation block
failed.
This method can not be used to tell if a test method passed and
should not be used for such a purpose.
Returns:
A boolean indicating if the test method failed.
"""
if self._status_code in _InstrumentationStatusCodeCategories.FAIL:
return True
elif (self._known_keys[_InstrumentationKnownStatusKeys.STACK] and
self._status_code != _InstrumentationStatusCodes.ASSUMPTION_FAILURE):
return True
elif self._known_keys[_InstrumentationKnownStatusKeys.ERROR]:
return True
elif self._known_keys[_InstrumentationKnownResultKeys.SHORTMSG]:
return True
elif self._known_keys[_InstrumentationKnownResultKeys.LONGMSG]:
return True
else:
return False
def create_test_record(self, mobly_test_class):
"""Creates a TestResultRecord for the instrumentation block.
Args:
mobly_test_class: string, the name of the Mobly test case
executing the instrumentation run.
Returns:
A TestResultRecord with an appropriate signals exception
representing the instrumentation test method's result status.
"""
details = self._get_details()
extras = self._get_extras()
tr_record = records.TestResultRecord(
t_name=self._get_full_name(),
t_class=mobly_test_class,
)
if self._begin_time:
tr_record.begin_time = self._begin_time
if self._is_failed():
tr_record.test_fail(e=signals.TestFailure(details=details, extras=extras))
elif self._status_code in _InstrumentationStatusCodeCategories.SKIPPED:
tr_record.test_skip(e=signals.TestSkip(details=details, extras=extras))
elif self._status_code in _InstrumentationStatusCodeCategories.PASS:
tr_record.test_pass(e=signals.TestPass(details=details, extras=extras))
elif self._status_code in _InstrumentationStatusCodeCategories.TIMING:
if self._error_message:
tr_record.test_error(
e=signals.TestError(details=details, extras=extras))
else:
tr_record = None
else:
tr_record.test_error(e=signals.TestError(details=details, extras=extras))
if self._known_keys[_InstrumentationKnownStatusKeys.STACK]:
tr_record.termination_signal.stacktrace = self._known_keys[
_InstrumentationKnownStatusKeys.STACK]
return tr_record
def has_completed_result_block_format(self, error_message):
"""Checks the instrumentation result block for a signal indicating
normal completion.
Args:
error_message: string, the error message to give if the
instrumentation run did not complete successfully.-
Returns:
A boolean indicating whether or not the instrumentation run passed
or failed overall.
Raises:
signals.TestError: Error raised if the instrumentation run did not
complete because of a crash or some other issue.
"""
extras = self._get_extras()
if _InstrumentationResultSignals.PASS in extras:
return True
elif _InstrumentationResultSignals.FAIL in extras:
return False
else:
raise signals.TestError(details=error_message, extras=extras)
class InstrumentationTestMixin:
"""A mixin for Mobly test classes to inherit from for instrumentation tests.
This class should be used in a subclass of both BaseTestClass and this class
in order to provide instrumentation test capabilities. This mixin is
explicitly for the case where the underlying BaseTestClass cannot be
replaced with BaseInstrumentationTestClass. In general, prefer using
BaseInstrumentationTestClass instead.
Attributes:
DEFAULT_INSTRUMENTATION_OPTION_PREFIX: string, the default prefix for
instrumentation params contained within user params.
DEFAULT_INSTRUMENTATION_ERROR_MESSAGE: string, the default error
message to set if something has prevented something in the
instrumentation test run from completing properly.
"""
DEFAULT_INSTRUMENTATION_OPTION_PREFIX = 'instrumentation_option_'
DEFAULT_INSTRUMENTATION_ERROR_MESSAGE = ('instrumentation run exited '
'unexpectedly')
def _previous_block_never_completed(self, current_block, previous_block,
new_state):
"""Checks if the previous instrumentation method block completed.
Args:
current_block: _InstrumentationBlock, the current instrumentation
block to check for being a different instrumentation test
method.
previous_block: _InstrumentationBlock, rhe previous
instrumentation block to check for an incomplete status.
new_state: _InstrumentationBlockStates, the next state for the
parser, used to check for the instrumentation run ending
with an incomplete test.
Returns:
A boolean indicating whether the previous instrumentation block
completed executing.
"""
if previous_block:
previously_timing_block = (previous_block.status_code
in _InstrumentationStatusCodeCategories.TIMING)
currently_new_block = (current_block.status_code
== _InstrumentationStatusCodes.START or
new_state == _InstrumentationBlockStates.RESULT)
return all([previously_timing_block, currently_new_block])
else:
return False
def _create_formatters(self, instrumentation_block, new_state):
"""Creates the _InstrumentationBlockFormatters for outputting the
instrumentation method block that have finished parsing.
Args:
instrumentation_block: _InstrumentationBlock, the current
instrumentation method block to create formatters based upon.
new_state: _InstrumentationBlockState, the next state that the
parser will transition to.
Returns:
A list of the formatters tha need to create and add
TestResultRecords to the test results.
"""
formatters = []
if self._previous_block_never_completed(
current_block=instrumentation_block,
previous_block=instrumentation_block.previous_instrumentation_block,
new_state=new_state):
instrumentation_block.previous_instrumentation_block.set_error_message(
self.DEFAULT_INSTRUMENTATION_ERROR_MESSAGE)
formatters.append(
_InstrumentationBlockFormatter(
instrumentation_block.previous_instrumentation_block))
if not instrumentation_block.is_empty:
formatters.append(_InstrumentationBlockFormatter(instrumentation_block))
return formatters
def _transition_instrumentation_block(
self,
instrumentation_block,
new_state=_InstrumentationBlockStates.UNKNOWN):
"""Transitions and finishes the current instrumentation block.
Args:
instrumentation_block: _InstrumentationBlock, the current
instrumentation block to finish.
new_state: _InstrumentationBlockState, the next state for the
parser to transition to.
Returns:
The new instrumentation block to use for storing parsed
instrumentation output.
"""
formatters = self._create_formatters(instrumentation_block, new_state)
for formatter in formatters:
test_record = formatter.create_test_record(self.TAG)
if test_record:
self.results.add_record(test_record)
self.summary_writer.dump(test_record.to_dict(),
records.TestSummaryEntryType.RECORD)
return instrumentation_block.transition_state(new_state=new_state)
def _parse_method_block_line(self, instrumentation_block, line):
"""Parses the instrumnetation method block's line.
Args:
instrumentation_block: _InstrumentationBlock, the current
instrumentation method block.
line: string, the raw instrumentation output line to parse.
Returns:
The next instrumentation block, which should be used to continue
parsing instrumentation output.
"""
if line.startswith(_InstrumentationStructurePrefixes.STATUS):
instrumentation_block.set_key(_InstrumentationStructurePrefixes.STATUS,
line)
return instrumentation_block
elif line.startswith(_InstrumentationStructurePrefixes.STATUS_CODE):
instrumentation_block.set_status_code(line)
return self._transition_instrumentation_block(instrumentation_block)
elif line.startswith(_InstrumentationStructurePrefixes.RESULT):
# Unexpected transition from method block -> result block
instrumentation_block.set_key(_InstrumentationStructurePrefixes.RESULT,
line)
return self._parse_result_line(
self._transition_instrumentation_block(
instrumentation_block,
new_state=_InstrumentationBlockStates.RESULT,
),
line,
)
else:
instrumentation_block.add_value(line)
return instrumentation_block
def _parse_result_block_line(self, instrumentation_block, line):
"""Parses the instrumentation result block's line.
Args:
instrumentation_block: _InstrumentationBlock, the instrumentation
result block for the instrumentation run.
line: string, the raw instrumentation output to add to the
instrumenation result block's _InstrumentationResultBlocki
object.
Returns:
The instrumentation result block for the instrumentation run.
"""
instrumentation_block.add_value(line)
return instrumentation_block
def _parse_unknown_block_line(self, instrumentation_block, line):
"""Parses a line from the instrumentation output from the UNKNOWN
parser state.
Args:
instrumentation_block: _InstrumentationBlock, the current
instrumenation block, where the correct categorization it noti
yet known.
line: string, the raw instrumenation output line to be used to
deteremine the correct categorization.
Returns:
The next instrumentation block to continue parsing with. Usually,
this is the same instrumentation block but with the state
transitioned appropriately.
"""
if line.startswith(_InstrumentationStructurePrefixes.STATUS):
return self._parse_method_block_line(
self._transition_instrumentation_block(
instrumentation_block,
new_state=_InstrumentationBlockStates.METHOD,
),
line,
)
elif (line.startswith(_InstrumentationStructurePrefixes.RESULT) or
_InstrumentationStructurePrefixes.FAILED in line):
return self._parse_result_block_line(
self._transition_instrumentation_block(
instrumentation_block,
new_state=_InstrumentationBlockStates.RESULT,
),
line,
)
else:
# This would only really execute if instrumentation failed to start.
instrumentation_block.add_value(line)
return instrumentation_block
def _parse_line(self, instrumentation_block, line):
"""Parses an arbitrary line from the instrumentation output based upon
the current parser state.
Args:
instrumentation_block: _InstrumentationBlock, an instrumentation
block with any of the possible parser states.
line: string, the raw instrumentation output line to parse
appropriately.
Returns:
The next instrumenation block to continue parsing with.
"""
if instrumentation_block.state == _InstrumentationBlockStates.METHOD:
return self._parse_method_block_line(instrumentation_block, line)
elif instrumentation_block.state == _InstrumentationBlockStates.RESULT:
return self._parse_result_block_line(instrumentation_block, line)
else:
return self._parse_unknown_block_line(instrumentation_block, line)
def _finish_parsing(self, instrumentation_block):
"""Finishes parsing the instrumentation result block for the final
instrumentation run status.
Args:
instrumentation_block: _InstrumentationBlock, the instrumentation
result block for the instrumenation run. Potentially, thisi
could actually be method block if the instrumentation outputi
is malformed.
Returns:
A boolean indicating whether the instrumentation run completed
with all the tests passing.
Raises:
signals.TestError: Error raised if the instrumentation failed to
complete with either a pass or fail status.
"""
formatter = _InstrumentationBlockFormatter(instrumentation_block)
return formatter.has_completed_result_block_format(
self.DEFAULT_INSTRUMENTATION_ERROR_MESSAGE)
def parse_instrumentation_options(self, parameters=None):
"""Returns the options for the instrumentation test from user_params.
By default, this method assume that the correct instrumentation options
all start with DEFAULT_INSTRUMENTATION_OPTION_PREFIX.
Args:
parameters: dict, the key value pairs representing an assortment
of parameters including instrumentation options. Usually,
this argument will be from self.user_params.
Returns:
A dictionary of options/parameters for the instrumentation tst.
"""
if parameters is None:
return {}
filtered_parameters = {}
for parameter_key, parameter_value in parameters.items():
if parameter_key.startswith(self.DEFAULT_INSTRUMENTATION_OPTION_PREFIX):
option_key = parameter_key[len(self.
DEFAULT_INSTRUMENTATION_OPTION_PREFIX):]
filtered_parameters[option_key] = parameter_value
return filtered_parameters
def run_instrumentation_test(self,
device,
package,
options=None,
prefix=None,
runner=None):
"""Runs instrumentation tests on a device and creates test records.
Args:
device: AndroidDevice, the device to run instrumentation tests on.
package: string, the package name of the instrumentation tests.
options: dict, Instrumentation options for the instrumentation
tests.
prefix: string, an optional prefix for parser output for
distinguishing between instrumentation test runs.
runner: string, the runner to use for the instrumentation package,
default to DEFAULT_INSTRUMENTATION_RUNNER.
Returns:
A boolean indicating whether or not all the instrumentation test
methods passed.
Raises:
TestError if the instrumentation run crashed or if parsing the
output failed.
"""
# Dictionary hack to allow overwriting the instrumentation_block in the
# parse_instrumentation closure
instrumentation_block = [_InstrumentationBlock(prefix=prefix)]
def parse_instrumentation(raw_line):
line = raw_line.rstrip().decode('utf-8')
logging.info(line)
instrumentation_block[0] = self._parse_line(instrumentation_block[0],
line)
device.adb.instrument(package=package,
options=options,
runner=runner,
handler=parse_instrumentation)
return self._finish_parsing(instrumentation_block[0])
class BaseInstrumentationTestClass(InstrumentationTestMixin,
base_test.BaseTestClass):
"""Base class for all instrumentation test classes to inherit from.
This class extends the BaseTestClass to add functionality to run and parse
the output of instrumentation runs.
Attributes:
DEFAULT_INSTRUMENTATION_OPTION_PREFIX: string, the default prefix for
instrumentation params contained within user params.
DEFAULT_INSTRUMENTATION_ERROR_MESSAGE: string, the default error
message to set if something has prevented something in the
instrumentation test run from completing properly.
"""
| 1,990 | 0 | 77 |
b011b34bc49f7b31bd89f3addb4cbcefa9643f84 | 1,847 | py | Python | augmentor/product_fun.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | 3 | 2020-01-11T13:55:38.000Z | 2020-08-25T22:34:15.000Z | augmentor/product_fun.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | null | null | null | augmentor/product_fun.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | 1 | 2021-01-01T05:21:44.000Z | 2021-01-01T05:21:44.000Z | from functools import singledispatch
import streamlit as st
from sagas.ofbiz.entities import MetaEntity
from sagas.ofbiz.services import OfService
from sagas.ofbiz.entities import OfEntity as e, format
from sagas.ofbiz.services import OfService as s, oc
from datetime import datetime
# product("GZ-2002", 'price')
# product(dt('2013-07-04 00:00:00'), "Test_product_A")
@singledispatch
@product.register(str)
@product.register(datetime)
exports={product}
| 30.278689 | 112 | 0.651326 | from functools import singledispatch
import streamlit as st
from sagas.ofbiz.entities import MetaEntity
from sagas.ofbiz.services import OfService
from sagas.ofbiz.entities import OfEntity as e, format
from sagas.ofbiz.services import OfService as s, oc
from datetime import datetime
# product("GZ-2002", 'price')
# product(dt('2013-07-04 00:00:00'), "Test_product_A")
@singledispatch
def product(arg, prop, verbose=False):
raise NotImplementedError('Unsupported type')
def product_price(id):
product = MetaEntity("Product").record(id)
ok, ret = OfService().calculateProductPrice(product=product)
st.markdown(f"The **default** price is `{ret['defaultPrice']}`, the **list** price is `{ret['listPrice']}`")
def output_rec(rec, show_null=True):
import sagas
table_header = ['name','value']
table_data = []
for k,v in rec.items():
if v is None and not show_null:
pass
else:
table_data.append((k, v))
st.table(sagas.to_df(table_data, ['key','val']))
def price_from_date(id, dt):
props=e().getProductPrice(productId=id,
productPriceTypeId='AVERAGE_COST',
productPricePurposeId='COMPONENT_PRICE',
productStoreGroupId='Test_group',
currencyUomId='USD',
fromDate=oc.j.Timestamp.valueOf(str(dt)))
# st.table(sagas.dict_df(props))
output_rec(props, False)
@product.register(str)
def _(arg, prop, verbose=False):
st.write(".. argument is of type ", type(arg))
if prop=='price':
product_price(arg)
else:
st.error(f'No such prop {prop}')
@product.register(datetime)
def _(arg, product_id, verbose=False):
st.write(".. argument is of type ", type(arg))
price_from_date(product_id, arg)
exports={product}
| 1,249 | 0 | 135 |
9d01fa892ae94aaae599cea331e5dce4d19d76ec | 883 | py | Python | functional_tests/test_layout_and_styling.py | cdcarter/pup-gets-it-done | c539907ee128d8aefb478035f3a3ba3d3bcf7817 | [
"BSD-3-Clause"
] | null | null | null | functional_tests/test_layout_and_styling.py | cdcarter/pup-gets-it-done | c539907ee128d8aefb478035f3a3ba3d3bcf7817 | [
"BSD-3-Clause"
] | null | null | null | functional_tests/test_layout_and_styling.py | cdcarter/pup-gets-it-done | c539907ee128d8aefb478035f3a3ba3d3bcf7817 | [
"BSD-3-Clause"
] | null | null | null | """ Functional tests for the Obey simple list app """
from .base import FunctionalTest
class LayoutAndStylingTest(FunctionalTest):
""" Tests of the layout and styling of the lists app."""
def test_layout_and_styling(self):
""" The home page looks roughly what we expect it to """
self.browser.get(self.server_url)
self.browser.set_window_size(1024, 768)
inputbox = self.get_item_input_box()
self.assertAlmostEqual(
inputbox.location['x'] + inputbox.size['width'] / 2,
512,
delta=5
)
self._type_and_submit_item('Learn python')
self._wait_for_row_in_list_table('1: Learn python')
inputbox = self.get_item_input_box()
self.assertAlmostEqual(
inputbox.location['x'] + inputbox.size['width'] / 2,
512,
delta=5
)
| 28.483871 | 64 | 0.614949 | """ Functional tests for the Obey simple list app """
from .base import FunctionalTest
class LayoutAndStylingTest(FunctionalTest):
""" Tests of the layout and styling of the lists app."""
def test_layout_and_styling(self):
""" The home page looks roughly what we expect it to """
self.browser.get(self.server_url)
self.browser.set_window_size(1024, 768)
inputbox = self.get_item_input_box()
self.assertAlmostEqual(
inputbox.location['x'] + inputbox.size['width'] / 2,
512,
delta=5
)
self._type_and_submit_item('Learn python')
self._wait_for_row_in_list_table('1: Learn python')
inputbox = self.get_item_input_box()
self.assertAlmostEqual(
inputbox.location['x'] + inputbox.size['width'] / 2,
512,
delta=5
)
| 0 | 0 | 0 |
9a18ea64965476bbdd95bfa372c7c1b5f688b1fe | 2,701 | py | Python | tests/test_config.py | alblue/adfs-aws-login | b695bfe58e13584b0c40b9314a3833c9a5944a12 | [
"Apache-2.0"
] | 3 | 2020-03-19T16:27:38.000Z | 2021-05-12T17:36:31.000Z | tests/test_config.py | alblue/adfs-aws-login | b695bfe58e13584b0c40b9314a3833c9a5944a12 | [
"Apache-2.0"
] | 6 | 2020-11-09T10:11:43.000Z | 2021-08-16T06:13:54.000Z | tests/test_config.py | alblue/adfs-aws-login | b695bfe58e13584b0c40b9314a3833c9a5944a12 | [
"Apache-2.0"
] | 1 | 2021-04-20T13:25:42.000Z | 2021-04-20T13:25:42.000Z | from adfs_aws_login import conf
import pytest
import argparse
try:
# For Python 3.5 and later
import configparser
except ImportError:
# Fall back to Python 2
import ConfigParser as configparser # noqa: F401
args = {
"profile": "test-profile",
"user": "test@example.com",
"no_prompt": False,
"duration": None,
"role": None,
}
params = {
"adfs_role_arn": "arn:aws:iam::123456789012:role/test_role",
"adfs_login_url": "https://testauthority",
"adfs_default_username": "test@example.com",
}
sections = {"profile test-profile": params}
@pytest.fixture
@pytest.fixture
@pytest.fixture
| 30.348315 | 86 | 0.724917 | from adfs_aws_login import conf
import pytest
import argparse
try:
# For Python 3.5 and later
import configparser
except ImportError:
# Fall back to Python 2
import ConfigParser as configparser # noqa: F401
args = {
"profile": "test-profile",
"user": "test@example.com",
"no_prompt": False,
"duration": None,
"role": None,
}
params = {
"adfs_role_arn": "arn:aws:iam::123456789012:role/test_role",
"adfs_login_url": "https://testauthority",
"adfs_default_username": "test@example.com",
}
sections = {"profile test-profile": params}
def test_init_no_profile_found(args_patched):
with pytest.raises(SystemExit) as pytest_wrapped_e:
config = conf.init()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
assert args_patched.call_count == 1
def test_init(args_patched, aws_config_patched):
config = conf.init()
for mock in aws_config_patched:
mock.call_count == 1
args_patched.call_count == 1
_verify_config(args, params)
def test_init_missing_login_url(args_patched, aws_config_patched_without_login_url):
with pytest.raises(SystemExit) as pytest_wrapped_e:
config = conf.init()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
for mock in aws_config_patched_without_login_url:
mock.call_count == 1
args_patched.call_count == 1
def _verify_config(args, params):
config = conf.init()
assert config.PROFILE == args["profile"]
assert config.CONFIG_PROFILE == "profile {}".format(args["profile"])
assert config.ROLE_ARN == params["adfs_role_arn"]
assert config.ADFS_LOGIN_URL == params["adfs_login_url"]
assert config.NO_PROMPT == args["no_prompt"]
@pytest.fixture
def args_patched(mocker):
return mocker.patch(
"argparse.ArgumentParser.parse_args", return_value=argparse.Namespace(**args)
)
@pytest.fixture
def aws_config_patched(mocker):
mock1 = mocker.patch("configparser.ConfigParser.__getitem__", return_value=params)
mock2 = mocker.patch("configparser.ConfigParser.read", return_value=None)
mock3 = mocker.patch("configparser.ConfigParser.has_section", return_value=True)
return mock1, mock2, mock3
@pytest.fixture
def aws_config_patched_without_login_url(mocker):
newparams = params.copy()
newparams.pop("adfs_login_url")
mock1 = mocker.patch(
"configparser.ConfigParser.__getitem__", return_value=newparams
)
mock2 = mocker.patch("configparser.ConfigParser.read", return_value=None)
mock3 = mocker.patch("configparser.ConfigParser.has_section", return_value=True)
return mock1, mock2, mock3
| 1,901 | 0 | 158 |
f1d94cbd727b3be87094ad92b2a89e814c061bbf | 1,522 | py | Python | scripts/trifingerpro_model_test.py | compsciencelab/trifinger_simulation | ddd93c0b370072d706d85a6d1567f49a4de7d5c6 | [
"BSD-3-Clause"
] | 25 | 2020-08-15T12:11:10.000Z | 2022-03-18T12:43:49.000Z | scripts/trifingerpro_model_test.py | compsciencelab/trifinger_simulation | ddd93c0b370072d706d85a6d1567f49a4de7d5c6 | [
"BSD-3-Clause"
] | 12 | 2020-08-14T09:39:05.000Z | 2021-12-15T16:26:53.000Z | scripts/trifingerpro_model_test.py | compsciencelab/trifinger_simulation | ddd93c0b370072d706d85a6d1567f49a4de7d5c6 | [
"BSD-3-Clause"
] | 10 | 2020-08-17T12:13:29.000Z | 2022-02-01T18:28:05.000Z | #!/usr/bin/env python3
"""Script for testing the TriFingerPro model."""
import time
import pybullet
from trifinger_simulation import (
sim_finger,
visual_objects,
)
if __name__ == "__main__":
main()
| 23.78125 | 70 | 0.59724 | #!/usr/bin/env python3
"""Script for testing the TriFingerPro model."""
import time
import pybullet
from trifinger_simulation import (
sim_finger,
visual_objects,
)
def visualize_collisions(sim):
contact_points = pybullet.getContactPoints(
bodyA=sim.finger_id,
physicsClientId=sim._pybullet_client_id,
)
markers = []
for cp in contact_points:
contact_distance = cp[8]
if contact_distance < 0:
position = cp[5]
marker = visual_objects.CubeMarker(
width=0.01,
position=position,
orientation=(0, 0, 0, 1),
color=(1, 0, 0, 1),
)
markers.append(marker)
return markers
def main():
# argparser = argparse.ArgumentParser(description=__doc__)
# args = argparser.parse_args()
time_step = 0.001
finger = sim_finger.SimFinger(
finger_type="trifingerpro",
time_step=time_step,
enable_visualization=True,
)
finger.reset_finger_positions_and_velocities([0.0, 0.9, -1.7] * 3)
markers = []
while True:
# action = finger.Action(torque=[0.3, 0.3, -0.2] * 3)
action = finger.Action(position=[0.0, 0.9, -1.7] * 3)
t = finger.append_desired_action(action)
finger.get_observation(t)
# delete old markers
for m in markers:
del m
markers = visualize_collisions(finger)
time.sleep(time_step)
if __name__ == "__main__":
main()
| 1,261 | 0 | 46 |
555637fb90fa7fb22547cda74044acf3370a37fa | 1,108 | py | Python | admin/nodes/serializers.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | null | null | null | admin/nodes/serializers.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | null | null | null | admin/nodes/serializers.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | null | null | null | from website.project.model import User
from website.util.permissions import reduce_permissions
from admin.users.serializers import serialize_simple_node
| 29.945946 | 81 | 0.644404 | from website.project.model import User
from website.util.permissions import reduce_permissions
from admin.users.serializers import serialize_simple_node
def serialize_node(node):
embargo = node.embargo
if embargo is not None:
embargo = node.embargo.end_date
return {
'id': node._id,
'title': node.title,
'public': node.is_public,
'parent': node.parent_id,
'root': node.root._id,
'is_registration': node.is_registration,
'date_created': node.date_created,
'withdrawn': node.is_retracted,
'embargo': embargo,
'contributors': map(serialize_simple_user,
node.permissions.iteritems()),
'children': map(serialize_simple_node, node.nodes),
'deleted': node.is_deleted,
'pending_registration': node.is_pending_registration,
}
def serialize_simple_user(user_info):
user = User.load(user_info[0])
return {
'id': user._id,
'name': user.fullname,
'permission': reduce_permissions(user_info[1]) if user_info[1] else None,
}
| 906 | 0 | 46 |
f92ba0edeb8000836bfcc4ba69fb51075129f123 | 442 | py | Python | neighbourhood/migrations/0004_alter_occurrence_to_happen_at.py | Ken-mbira/THE_WATCH | a6bfb65b2f134adf3b2e584ea8ebfc79588ef0b5 | [
"MIT"
] | null | null | null | neighbourhood/migrations/0004_alter_occurrence_to_happen_at.py | Ken-mbira/THE_WATCH | a6bfb65b2f134adf3b2e584ea8ebfc79588ef0b5 | [
"MIT"
] | null | null | null | neighbourhood/migrations/0004_alter_occurrence_to_happen_at.py | Ken-mbira/THE_WATCH | a6bfb65b2f134adf3b2e584ea8ebfc79588ef0b5 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.8 on 2021-11-02 17:04
from django.db import migrations, models
| 23.263158 | 89 | 0.628959 | # Generated by Django 3.2.8 on 2021-11-02 17:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('neighbourhood', '0003_alter_business_name'),
]
operations = [
migrations.AlterField(
model_name='occurrence',
name='to_happen_at',
field=models.DateField(blank=True, null=True, verbose_name='scheduled time'),
),
]
| 0 | 328 | 23 |
a5e2af4e1191c0d12090ee91cfe05134db34f108 | 151 | py | Python | applications/physbam/physbam-lib/External_Libraries/Archives/boost/tools/build/v2/test/unit_tests.py | schinmayee/nimbus | 170cd15e24a7a88243a6ea80aabadc0fc0e6e177 | [
"BSD-3-Clause"
] | 20 | 2017-07-03T19:09:09.000Z | 2021-09-10T02:53:56.000Z | applications/physbam/physbam-lib/External_Libraries/Archives/boost/tools/build/v2/test/unit_tests.py | schinmayee/nimbus | 170cd15e24a7a88243a6ea80aabadc0fc0e6e177 | [
"BSD-3-Clause"
] | null | null | null | applications/physbam/physbam-lib/External_Libraries/Archives/boost/tools/build/v2/test/unit_tests.py | schinmayee/nimbus | 170cd15e24a7a88243a6ea80aabadc0fc0e6e177 | [
"BSD-3-Clause"
] | 9 | 2017-09-17T02:05:06.000Z | 2020-01-31T00:12:01.000Z | #!/usr/bin/python
from BoostBuild import Tester
t = Tester(pass_toolset=0)
t.run_build_system(extra_args="--debug --build-system=test")
t.cleanup()
| 16.777778 | 60 | 0.748344 | #!/usr/bin/python
from BoostBuild import Tester
t = Tester(pass_toolset=0)
t.run_build_system(extra_args="--debug --build-system=test")
t.cleanup()
| 0 | 0 | 0 |
6b146d7f310ae52413f1189415f9b41b8ca69906 | 4,035 | py | Python | cookietemple/lint/lint.py | e2jk/cookietemple | 86af5622cdabe9ae446048536571898716939f29 | [
"Apache-2.0"
] | null | null | null | cookietemple/lint/lint.py | e2jk/cookietemple | 86af5622cdabe9ae446048536571898716939f29 | [
"Apache-2.0"
] | null | null | null | cookietemple/lint/lint.py | e2jk/cookietemple | 86af5622cdabe9ae446048536571898716939f29 | [
"Apache-2.0"
] | null | null | null | import logging
import sys
from pathlib import Path
from typing import Any, Optional, Union
from ruamel.yaml import YAML
from cookietemple.lint.domains.cli import CliJavaLint, CliPythonLint
from cookietemple.lint.domains.gui import GuiJavaLint
from cookietemple.lint.domains.lib import LibCppLint
from cookietemple.lint.domains.pub import PubLatexLint
from cookietemple.lint.domains.web import WebWebsitePythonLint
from cookietemple.lint.template_linter import TemplateLinter
from cookietemple.util.rich import console
log = logging.getLogger(__name__)
def lint_project(project_dir: str, skip_external: bool, is_create: bool = False) -> Optional[TemplateLinter]:
"""
Verifies the integrity of a project to best coding and practices.
Runs a set of general linting functions, which all templates share and afterwards runs template specific linting functions.
All results are collected and presented to the user.
:param project_dir: The path to the .cookietemple.yml file.
:param skip_external: Whether to skip external linters such as autopep8
:param is_create: Whether linting is called during project creation
"""
# Detect which template the project is based on
template_handle = get_template_handle(project_dir)
log.debug(f"Detected handle {template_handle}")
switcher = {
"cli-python": CliPythonLint,
"cli-java": CliJavaLint,
"web-website-python": WebWebsitePythonLint,
"gui-java": GuiJavaLint,
"lib-cpp": LibCppLint,
"pub-thesis-latex": PubLatexLint,
}
try:
lint_obj: Union[TemplateLinter, Any] = switcher.get(template_handle)(project_dir) # type: ignore
except TypeError:
console.print(f"[bold red]Unable to find linter for handle {template_handle}! Aborting...")
sys.exit(1)
# Run the linting tests
try:
# Disable check files?
disable_check_files_templates = ["pub-thesis-latex"]
if template_handle in disable_check_files_templates:
disable_check_files = True
else:
disable_check_files = False
# Run non project specific linting
log.debug("Running general linting.")
console.print("[bold blue]Running general linting")
lint_obj.lint_project(
super(lint_obj.__class__, lint_obj), custom_check_files=disable_check_files, is_subclass_calling=False
)
# Run the project specific linting
log.debug(f"Running linting of {template_handle}")
console.print(f"[bold blue]Running {template_handle} linting")
# for every python project that is created autopep8 will run one time
# when linting en existing python cookietemple project, autopep8 should be now optional,
# since (for example) it messes up Jinja syntax (if included in project)
if "python" in template_handle:
lint_obj.lint(is_create, skip_external) # type: ignore
else:
lint_obj.lint(skip_external) # type: ignore
except AssertionError as e:
console.print(f"[bold red]Critical error: {e}")
console.print("[bold red] Stopping tests...")
return lint_obj
# Print the results
lint_obj.print_results()
# Exit code
if len(lint_obj.failed) > 0:
console.print(f"[bold red] {len(lint_obj.failed)} tests failed! Exiting with non-zero error code.")
sys.exit(1)
return None
def get_template_handle(dot_cookietemple_path: str = ".cookietemple.yml") -> str:
"""
Reads the .cookietemple file and extracts the template handle
:param dot_cookietemple_path: path to the .cookietemple file
:return: found template handle
"""
path = Path(f"{dot_cookietemple_path}/.cookietemple.yml")
if not path.exists():
console.print("[bold red].cookietemple.yml not found. Is this a cookietemple project?")
sys.exit(1)
yaml = YAML(typ="safe")
dot_cookietemple_content = yaml.load(path)
return dot_cookietemple_content["template_handle"]
| 38.798077 | 127 | 0.705576 | import logging
import sys
from pathlib import Path
from typing import Any, Optional, Union
from ruamel.yaml import YAML
from cookietemple.lint.domains.cli import CliJavaLint, CliPythonLint
from cookietemple.lint.domains.gui import GuiJavaLint
from cookietemple.lint.domains.lib import LibCppLint
from cookietemple.lint.domains.pub import PubLatexLint
from cookietemple.lint.domains.web import WebWebsitePythonLint
from cookietemple.lint.template_linter import TemplateLinter
from cookietemple.util.rich import console
log = logging.getLogger(__name__)
def lint_project(project_dir: str, skip_external: bool, is_create: bool = False) -> Optional[TemplateLinter]:
"""
Verifies the integrity of a project to best coding and practices.
Runs a set of general linting functions, which all templates share and afterwards runs template specific linting functions.
All results are collected and presented to the user.
:param project_dir: The path to the .cookietemple.yml file.
:param skip_external: Whether to skip external linters such as autopep8
:param is_create: Whether linting is called during project creation
"""
# Detect which template the project is based on
template_handle = get_template_handle(project_dir)
log.debug(f"Detected handle {template_handle}")
switcher = {
"cli-python": CliPythonLint,
"cli-java": CliJavaLint,
"web-website-python": WebWebsitePythonLint,
"gui-java": GuiJavaLint,
"lib-cpp": LibCppLint,
"pub-thesis-latex": PubLatexLint,
}
try:
lint_obj: Union[TemplateLinter, Any] = switcher.get(template_handle)(project_dir) # type: ignore
except TypeError:
console.print(f"[bold red]Unable to find linter for handle {template_handle}! Aborting...")
sys.exit(1)
# Run the linting tests
try:
# Disable check files?
disable_check_files_templates = ["pub-thesis-latex"]
if template_handle in disable_check_files_templates:
disable_check_files = True
else:
disable_check_files = False
# Run non project specific linting
log.debug("Running general linting.")
console.print("[bold blue]Running general linting")
lint_obj.lint_project(
super(lint_obj.__class__, lint_obj), custom_check_files=disable_check_files, is_subclass_calling=False
)
# Run the project specific linting
log.debug(f"Running linting of {template_handle}")
console.print(f"[bold blue]Running {template_handle} linting")
# for every python project that is created autopep8 will run one time
# when linting en existing python cookietemple project, autopep8 should be now optional,
# since (for example) it messes up Jinja syntax (if included in project)
if "python" in template_handle:
lint_obj.lint(is_create, skip_external) # type: ignore
else:
lint_obj.lint(skip_external) # type: ignore
except AssertionError as e:
console.print(f"[bold red]Critical error: {e}")
console.print("[bold red] Stopping tests...")
return lint_obj
# Print the results
lint_obj.print_results()
# Exit code
if len(lint_obj.failed) > 0:
console.print(f"[bold red] {len(lint_obj.failed)} tests failed! Exiting with non-zero error code.")
sys.exit(1)
return None
def get_template_handle(dot_cookietemple_path: str = ".cookietemple.yml") -> str:
"""
Reads the .cookietemple file and extracts the template handle
:param dot_cookietemple_path: path to the .cookietemple file
:return: found template handle
"""
path = Path(f"{dot_cookietemple_path}/.cookietemple.yml")
if not path.exists():
console.print("[bold red].cookietemple.yml not found. Is this a cookietemple project?")
sys.exit(1)
yaml = YAML(typ="safe")
dot_cookietemple_content = yaml.load(path)
return dot_cookietemple_content["template_handle"]
| 0 | 0 | 0 |
9eb1b8391304cc3a6d032bc90cae6eac481ffd10 | 204 | py | Python | celery_study/tasks.py | RicardoScofileld/MyCode | 24a34a48304de7f13f20436839f481b2c9d3921d | [
"MIT"
] | null | null | null | celery_study/tasks.py | RicardoScofileld/MyCode | 24a34a48304de7f13f20436839f481b2c9d3921d | [
"MIT"
] | null | null | null | celery_study/tasks.py | RicardoScofileld/MyCode | 24a34a48304de7f13f20436839f481b2c9d3921d | [
"MIT"
] | null | null | null | from celery import Celery
app = Celery('tasks', broker='redis://localhost:6379/0', backend='redis://localhost:6379/1')
@app.task
| 17 | 92 | 0.637255 | from celery import Celery
app = Celery('tasks', broker='redis://localhost:6379/0', backend='redis://localhost:6379/1')
@app.task
def add(x, y):
print('beagin running .....', x, y)
return x+y
| 48 | 0 | 22 |
86a50e7003181e9d1d40bdceb66cb7ee740c36df | 8,407 | py | Python | interaction3/arrays/foldable_constant_spiral.py | bdshieh/interaction3 | b44c390045cf3b594125e90d2f2f4f617bc2433b | [
"MIT"
] | 2 | 2020-07-08T14:42:52.000Z | 2022-03-13T05:25:55.000Z | interaction3/arrays/foldable_constant_spiral.py | bdshieh/interaction3 | b44c390045cf3b594125e90d2f2f4f617bc2433b | [
"MIT"
] | null | null | null | interaction3/arrays/foldable_constant_spiral.py | bdshieh/interaction3 | b44c390045cf3b594125e90d2f2f4f617bc2433b | [
"MIT"
] | null | null | null | ## interaction3 / arrays / foldable_constant_spiral.py
import numpy as np
from interaction3.abstract import *
from interaction3 import util
# default parameters
defaults = {}
# membrane properties
defaults['length'] = [35e-6, 35e-6]
defaults['electrode'] = [35e-6, 35e-6]
defaults['nnodes'] = [9, 9]
defaults['thickness'] = [2.2e-6,]
defaults['density'] = [2040,]
defaults['y_modulus'] = [110e9,]
defaults['p_ratio'] = [0.22,]
defaults['isolation'] = 200e-9
defaults['permittivity'] = 6.3
defaults['gap'] = 50e-9
defaults['att_mech'] = 3000
defaults['ndiv'] = [2, 2]
# array properties
defaults['mempitch'] = [45e-6, 45e-6]
defaults['nmem'] = [2, 2]
defaults['nelem'] = 489
defaults['edge_buffer'] = 60e-6 # accounts for 20um dicing tolerance
defaults['taper_radius'] = 3.63e-3 # controls size of spiral
defaults['assert_radius'] = 3.75e-3 - 40e-6
# array pane vertices, hard-coded
_vertices0 = [[-3.75e-3, -3.75e-3, 0],
[-3.75e-3, 3.75e-3, 0],
[-1.25e-3, 3.75e-3, 0],
[-1.25e-3, -3.75e-3, 0]]
_vertices1 = [[-1.25e-3, -3.75e-3, 0],
[-1.25e-3, 3.75e-3, 0],
[1.25e-3, 3.75e-3, 0],
[1.25e-3, -3.75e-3, 0]]
_vertices2 = [[1.25e-3, -3.75e-3, 0],
[1.25e-3, 3.75e-3, 0],
[3.75e-3, 3.75e-3, 0],
[3.75e-3, -3.75e-3, 0]]
## COMMAND LINE INTERFACE ##
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--nmem', nargs=2, type=int)
parser.add_argument('--mempitch', nargs=2, type=float)
parser.add_argument('--length', nargs=2, type=float)
parser.add_argument('--electrode', nargs=2, type=float)
parser.add_argument('--nelem', type=int)
parser.add_argument('-d', '--dump', nargs='?', default=None)
parser.set_defaults(**defaults)
args = vars(parser.parse_args())
filename = args.pop('dump')
spec = create(**args)
print(spec)
print('Total number of channels ->', sum(get_channel_count(spec)))
print('Number of transmit channels ->', sum(get_channel_count(spec, kind='tx')))
print('Number of receive channels ->', sum(get_channel_count(spec, kind='rx')))
print('Number of transmit/receive channels ->', sum(get_channel_count(spec, kind='both')))
if filename is not None:
dump(spec, filename, mode='w')
from matplotlib import pyplot as plt
pos = np.concatenate(get_membrane_positions_from_array(spec), axis=0)
plt.plot(pos[:, 0], pos[:, 1], '.')
plt.gca().set_aspect('equal')
plt.gca().axvline(-1.25e-3)
plt.gca().axvline(1.25e-3)
plt.gca().axvline(-3.75e-3)
plt.gca().axvline(3.75e-3)
plt.gca().axhline(-3.75e-3)
plt.gca().axhline(3.75e-3)
plt.gca().add_patch(plt.Circle(radius=defaults['assert_radius'], xy=(0,0), fill=None))
plt.show() | 32.210728 | 116 | 0.594267 | ## interaction3 / arrays / foldable_constant_spiral.py
import numpy as np
from interaction3.abstract import *
from interaction3 import util
# default parameters
defaults = {}
# membrane properties
defaults['length'] = [35e-6, 35e-6]
defaults['electrode'] = [35e-6, 35e-6]
defaults['nnodes'] = [9, 9]
defaults['thickness'] = [2.2e-6,]
defaults['density'] = [2040,]
defaults['y_modulus'] = [110e9,]
defaults['p_ratio'] = [0.22,]
defaults['isolation'] = 200e-9
defaults['permittivity'] = 6.3
defaults['gap'] = 50e-9
defaults['att_mech'] = 3000
defaults['ndiv'] = [2, 2]
# array properties
defaults['mempitch'] = [45e-6, 45e-6]
defaults['nmem'] = [2, 2]
defaults['nelem'] = 489
defaults['edge_buffer'] = 60e-6 # accounts for 20um dicing tolerance
defaults['taper_radius'] = 3.63e-3 # controls size of spiral
defaults['assert_radius'] = 3.75e-3 - 40e-6
# array pane vertices, hard-coded
_vertices0 = [[-3.75e-3, -3.75e-3, 0],
[-3.75e-3, 3.75e-3, 0],
[-1.25e-3, 3.75e-3, 0],
[-1.25e-3, -3.75e-3, 0]]
_vertices1 = [[-1.25e-3, -3.75e-3, 0],
[-1.25e-3, 3.75e-3, 0],
[1.25e-3, 3.75e-3, 0],
[1.25e-3, -3.75e-3, 0]]
_vertices2 = [[1.25e-3, -3.75e-3, 0],
[1.25e-3, 3.75e-3, 0],
[3.75e-3, 3.75e-3, 0],
[3.75e-3, -3.75e-3, 0]]
def create(**kwargs):
# set defaults if not in kwargs:
for k, v in defaults.items():
kwargs.setdefault(k, v)
nelem = kwargs['nelem']
nmem_x, nmem_y = kwargs['nmem']
mempitch_x, mempitch_y = kwargs['mempitch']
length_x, length_y = kwargs['length']
electrode_x, electrode_y = kwargs['electrode']
nnodes_x, nnodes_y = kwargs['nnodes']
ndiv_x, ndiv_y = kwargs['ndiv']
edge_buffer = kwargs['edge_buffer']
taper_radius = kwargs['taper_radius']
assert_radius = kwargs['assert_radius']
# calculated parameters
gr = np.pi * (np.sqrt(5) - 1)
# membrane properties
mem_properties = dict()
mem_properties['length_x'] = length_x
mem_properties['length_y'] = length_y
mem_properties['electrode_x'] = electrode_x
mem_properties['electrode_y'] = electrode_y
mem_properties['y_modulus'] = kwargs['y_modulus']
mem_properties['p_ratio'] = kwargs['p_ratio']
mem_properties['isolation'] = kwargs['isolation']
mem_properties['permittivity'] = kwargs['permittivity']
mem_properties['gap'] = kwargs['gap']
mem_properties['nnodes_x'] = nnodes_x
mem_properties['nnodes_y'] = nnodes_y
mem_properties['thickness'] = kwargs['thickness']
mem_properties['density'] = kwargs['density']
mem_properties['att_mech'] = kwargs['att_mech']
mem_properties['ndiv_x'] = ndiv_x
mem_properties['ndiv_y'] = ndiv_y
# calculate membrane positions
xx, yy, zz = np.meshgrid(np.linspace(0, (nmem_x - 1) * mempitch_x, nmem_x),
np.linspace(0, (nmem_y - 1) * mempitch_y, nmem_y),
0)
mem_pos = np.c_[xx.ravel(), yy.ravel(), zz.ravel()] - [(nmem_x - 1) * mempitch_x / 2,
(nmem_y - 1) * mempitch_y / 2,
0]
elem_pos = []
n = 0
while True:
if len(elem_pos) == nelem:
break
r = taper_radius * np.sqrt((n + 1) / nelem)
theta = (n + 1) * gr
xx = r * np.sin(theta)
yy = r * np.cos(theta)
zz = 0
n += 1
if _check_for_edge_collision([xx, yy, zz], _vertices0, edge_buffer):
continue
elif _check_for_edge_collision([xx, yy, zz], _vertices1, edge_buffer):
continue
elif _check_for_edge_collision([xx, yy, zz], _vertices2, edge_buffer):
continue
else:
elem_pos.append([xx, yy, zz])
elem_pos = np.array(elem_pos)
# create arrays, bounding box and rotation points are hard-coded
x0, y0, _ = _vertices0[0]
x1, y1, _ = _vertices0[2]
xx, yy, zz = elem_pos.T
mask = np.logical_and(np.logical_and(np.logical_and(xx >= x0, xx < x1), yy >= y0), yy < y1)
array0 = _construct_array(0, np.array([-1.25e-3, 0, 0]), _vertices0, elem_pos[mask, :], mem_pos, mem_properties)
x0, y0, _ = _vertices1[0]
x1, y1, _ = _vertices1[2]
xx, yy, zz = elem_pos.T
mask = np.logical_and(np.logical_and(np.logical_and(xx >= x0, xx < x1), yy >= y0), yy < y1)
array1 = _construct_array(1, np.array([0, 0, 0]), _vertices1, elem_pos[mask, :], mem_pos, mem_properties)
x0, y0, _ = _vertices2[0]
x1, y1, _ = _vertices2[2]
xx, yy, zz = elem_pos.T
mask = np.logical_and(np.logical_and(np.logical_and(xx >= x0, xx < x1), yy >= y0), yy < y1)
array2 = _construct_array(2, np.array([1.25e-3, 0, 0]), _vertices2, elem_pos[mask, :], mem_pos, mem_properties)
_assert_radius_rule(assert_radius, array0, array1, array2)
return array0, array1, array2
def _assert_radius_rule(radius, *arrays):
pos = np.concatenate(get_channel_positions_from_array(arrays), axis=0)
r = util.distance(pos, [0,0,0])
assert np.all(r <= radius)
def _check_for_edge_collision(pos, vertices, edge_buffer):
x, y, z = pos
x0, y0, _ = vertices[0]
x1, y1, _ = vertices[2]
if (abs(x - x0) >= edge_buffer and abs(x - x1) >= edge_buffer
and abs(y - y0) >= edge_buffer and abs(y - y1) >= edge_buffer):
return False
return True
def _construct_array(id, rotation_origin, vertices, elem_pos, mem_pos, mem_properties):
if rotation_origin is None:
rotation_origin = np.array([0,0,0])
# construct channels
channels = []
mem_counter = 0
elem_counter = 0
ch_counter = 0
for e_pos in elem_pos:
membranes = []
elements = []
for m_pos in mem_pos:
# construct membrane
m = SquareCmutMembrane(**mem_properties)
m['id'] = mem_counter
m['position'] = (e_pos + m_pos).tolist()
membranes.append(m)
mem_counter += 1
# construct element
elem = Element(id=elem_counter,
position=e_pos.tolist(),
membranes=membranes)
element_position_from_membranes(elem)
elements.append(elem)
elem_counter += 1
# construct channel
ch = Channel(id=ch_counter,
kind='both',
position=e_pos.tolist(),
elements=elements,
dc_bias=0,
active=True,
delay=0)
channels.append(ch)
ch_counter += 1
# construct array
array = Array(id=id,
channels=channels,
rotation_origin=rotation_origin.tolist(),
vertices=vertices)
array_position_from_vertices(array)
return array
## COMMAND LINE INTERFACE ##
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--nmem', nargs=2, type=int)
parser.add_argument('--mempitch', nargs=2, type=float)
parser.add_argument('--length', nargs=2, type=float)
parser.add_argument('--electrode', nargs=2, type=float)
parser.add_argument('--nelem', type=int)
parser.add_argument('-d', '--dump', nargs='?', default=None)
parser.set_defaults(**defaults)
args = vars(parser.parse_args())
filename = args.pop('dump')
spec = create(**args)
print(spec)
print('Total number of channels ->', sum(get_channel_count(spec)))
print('Number of transmit channels ->', sum(get_channel_count(spec, kind='tx')))
print('Number of receive channels ->', sum(get_channel_count(spec, kind='rx')))
print('Number of transmit/receive channels ->', sum(get_channel_count(spec, kind='both')))
if filename is not None:
dump(spec, filename, mode='w')
from matplotlib import pyplot as plt
pos = np.concatenate(get_membrane_positions_from_array(spec), axis=0)
plt.plot(pos[:, 0], pos[:, 1], '.')
plt.gca().set_aspect('equal')
plt.gca().axvline(-1.25e-3)
plt.gca().axvline(1.25e-3)
plt.gca().axvline(-3.75e-3)
plt.gca().axvline(3.75e-3)
plt.gca().axhline(-3.75e-3)
plt.gca().axhline(3.75e-3)
plt.gca().add_patch(plt.Circle(radius=defaults['assert_radius'], xy=(0,0), fill=None))
plt.show() | 5,465 | 0 | 92 |
904219956df115e3ae89ae5b78c930e163e97040 | 3,775 | py | Python | src/levitas/middleware/dynSiteMiddleware.py | tobi-weber/levitas | b14fb4135839611ace652b9f43cbe5a7fa5e3b66 | [
"Apache-2.0"
] | 1 | 2018-02-27T00:28:29.000Z | 2018-02-27T00:28:29.000Z | src/levitas/middleware/dynSiteMiddleware.py | tobi-weber/levitas | b14fb4135839611ace652b9f43cbe5a7fa5e3b66 | [
"Apache-2.0"
] | null | null | null | src/levitas/middleware/dynSiteMiddleware.py | tobi-weber/levitas | b14fb4135839611ace652b9f43cbe5a7fa5e3b66 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (C) 2010-2014 Tobias Weber <tobi-weber@gmx.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from . import Middleware
log = logging.getLogger("levitas.middleware.dynSiteMiddleware")
class DynSiteMiddleware(Middleware):
"""
class MySite(object):
def index(self):
return "Hello World"
Example settings entry:
urls = [(r"^/(.*)$", DynSiteMiddleware, MySite)]
"""
| 31.722689 | 74 | 0.501192 | # -*- coding: utf-8 -*-
# Copyright (C) 2010-2014 Tobias Weber <tobi-weber@gmx.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from . import Middleware
log = logging.getLogger("levitas.middleware.dynSiteMiddleware")
class PostFile(object):
def __init__(self, filename, f, mtype, mtype_options):
self.filename = filename
self.file = file
self.type = mtype
self.type_options = mtype_options
class DynSiteMiddleware(Middleware):
"""
class MySite(object):
def index(self):
return "Hello World"
Example settings entry:
urls = [(r"^/(.*)$", DynSiteMiddleware, MySite)]
"""
def __init__(self, dynsite_class,
dynsite_args=[],
dynsite_kwargs={}):
Middleware.__init__(self)
self._dynsite_class = dynsite_class
self._dynsite_args = dynsite_args
self._dynsite_kwargs = dynsite_kwargs
def get(self):
kwargs = {}
if self.request_data is not None:
for k, v in self.request_data.items():
if len(v) == 1:
kwargs[k] = v[0]
else:
kwargs[k] = v
return self._callDynSite(kwargs)
def post(self):
kwargs = {}
if self.request_data is not None:
data = self.request_data
for k in data.keys():
d = data[k]
if d.filename is None:
kwargs[k] = d.value
else:
kwargs[d.name] = PostFile(d.filename,
d.file,
d.type,
d.type_options)
return self._callDynSite(kwargs)
def _callDynSite(self, kwargs):
comps = []
for g in self.url_groups():
comps.extend(g.split("/"))
comps = [comp for comp in comps if comp]
if not len(comps):
comps.append("index")
site = self._dynsite_class(*self._dynsite_args,
**self._dynsite_kwargs)
log.debug("Path components: %s" % ", ".join(comps))
for i in range(len(comps) + 1): # @UnusedVariable
if i < len(comps):
m = "_".join(comps[:i + 1])
args = comps[i + 1:]
else:
m = "index"
args = comps
if hasattr(site, m):
log.debug("Call '%s' method '%s' with args '%s'"
% (self._dynsite_class.__name__,
m,
str(args)))
try:
return getattr(site, m)(*args, **kwargs)
except Exception as err:
log.error(str(err), exc_info=True)
return self.responseError(500, str(err))
msg = "%s cannot handle args %s" % (self._dynsite_class.__name__,
str(comps))
log.error(msg)
return self.responseError(404, msg)
| 2,551 | 2 | 186 |
8a5dcd4f0018bd656389a3c66bcb756ef4013866 | 2,794 | py | Python | src/KTS/cpd_auto.py | StevRamos/video_summarization | 051632fd9e5ad94dd4a2b2bb31ea928f7269c1ac | [
"MIT"
] | 3 | 2021-11-09T03:05:52.000Z | 2022-03-17T08:37:45.000Z | src/KTS/cpd_auto.py | StevRamos/video_summarization | 051632fd9e5ad94dd4a2b2bb31ea928f7269c1ac | [
"MIT"
] | 3 | 2021-11-04T03:14:06.000Z | 2022-01-13T21:00:51.000Z | src/KTS/cpd_auto.py | StevRamos/video_summarization | 051632fd9e5ad94dd4a2b2bb31ea928f7269c1ac | [
"MIT"
] | 1 | 2021-12-05T19:12:45.000Z | 2021-12-05T19:12:45.000Z | import numpy as np
from .cpd_nonlin import cpd_nonlin
def cpd_auto(K, ncp, vmax, desc_rate=1, **kwargs):
"""Main interface
Detect change points automatically selecting their number
K - kernel between each pair of frames in video
ncp - maximum ncp
vmax - special parameter
Optional arguments:
lmin - minimum segment length
lmax - maximum segment length
desc_rate - rate of descriptor sampling (vmax always corresponds to 1x)
Note:
- cps are always calculated in subsampled coordinates irrespective to
desc_rate
- lmin and m should be in agreement
---
Returns: (cps, costs)
cps - best selected change-points
costs - costs for 0,1,2,...,m change-points
Memory requirement: ~ (3*N*N + N*ncp)*4 bytes ~= 16 * N^2 bytes
That is 1,6 Gb for the N=10000.
"""
m = ncp
(_, scores) = cpd_nonlin(K, m, backtrack=False, **kwargs)
#(cps, scores) = cpd_nonlin(K, m, backtrack=False, **kwargs)
N = K.shape[0]
N2 = N*desc_rate # length of the video before subsampling
penalties = np.zeros(m+1)
# Prevent division by zero (in case of 0 changes)
ncp = np.arange(1, m+1)
penalties[1:] = (vmax*ncp/(2.0*N2))*(np.log(float(N2)/ncp)+1)
costs = scores/float(N) + penalties
m_best = np.argmin(costs)
(cps, scores2) = cpd_nonlin(K, m_best, **kwargs)
return (cps, costs)
# ------------------------------------------------------------------------------
# Extra functions (currently not used)
def estimate_vmax(K_stable):
"""K_stable - kernel between all frames of a stable segment"""
n = K_stable.shape[0]
vmax = np.trace(centering(K_stable)/n)
return vmax
def centering(K):
"""Apply kernel centering"""
mean_rows = np.mean(K, 1)[:, np.newaxis]
return K - mean_rows - mean_rows.T + np.mean(mean_rows)
def eval_score(K, cps):
""" Evaluate unnormalized empirical score
(sum of kernelized scatters) for the given change-points """
N = K.shape[0]
cps = [0] + list(cps) + [N]
V1 = 0
V2 = 0
for i in range(len(cps)-1):
K_sub = K[cps[i]:cps[i+1], :][:, cps[i]:cps[i+1]]
V1 += np.sum(np.diag(K_sub))
V2 += np.sum(K_sub) / float(cps[i+1] - cps[i])
return (V1 - V2)
def eval_cost(K, cps, score, vmax):
""" Evaluate cost function for automatic number of change points selection
K - kernel between all frames
cps - selected change-points
score - unnormalized empirical score (sum of kernelized scatters)
vmax - vmax parameter"""
N = K.shape[0]
penalty = (vmax*len(cps)/(2.0*N))*(np.log(float(N)/len(cps))+1)
return score/float(N) + penalty
| 31.393258 | 80 | 0.590193 | import numpy as np
from .cpd_nonlin import cpd_nonlin
def cpd_auto(K, ncp, vmax, desc_rate=1, **kwargs):
"""Main interface
Detect change points automatically selecting their number
K - kernel between each pair of frames in video
ncp - maximum ncp
vmax - special parameter
Optional arguments:
lmin - minimum segment length
lmax - maximum segment length
desc_rate - rate of descriptor sampling (vmax always corresponds to 1x)
Note:
- cps are always calculated in subsampled coordinates irrespective to
desc_rate
- lmin and m should be in agreement
---
Returns: (cps, costs)
cps - best selected change-points
costs - costs for 0,1,2,...,m change-points
Memory requirement: ~ (3*N*N + N*ncp)*4 bytes ~= 16 * N^2 bytes
That is 1,6 Gb for the N=10000.
"""
m = ncp
(_, scores) = cpd_nonlin(K, m, backtrack=False, **kwargs)
#(cps, scores) = cpd_nonlin(K, m, backtrack=False, **kwargs)
N = K.shape[0]
N2 = N*desc_rate # length of the video before subsampling
penalties = np.zeros(m+1)
# Prevent division by zero (in case of 0 changes)
ncp = np.arange(1, m+1)
penalties[1:] = (vmax*ncp/(2.0*N2))*(np.log(float(N2)/ncp)+1)
costs = scores/float(N) + penalties
m_best = np.argmin(costs)
(cps, scores2) = cpd_nonlin(K, m_best, **kwargs)
return (cps, costs)
# ------------------------------------------------------------------------------
# Extra functions (currently not used)
def estimate_vmax(K_stable):
"""K_stable - kernel between all frames of a stable segment"""
n = K_stable.shape[0]
vmax = np.trace(centering(K_stable)/n)
return vmax
def centering(K):
"""Apply kernel centering"""
mean_rows = np.mean(K, 1)[:, np.newaxis]
return K - mean_rows - mean_rows.T + np.mean(mean_rows)
def eval_score(K, cps):
""" Evaluate unnormalized empirical score
(sum of kernelized scatters) for the given change-points """
N = K.shape[0]
cps = [0] + list(cps) + [N]
V1 = 0
V2 = 0
for i in range(len(cps)-1):
K_sub = K[cps[i]:cps[i+1], :][:, cps[i]:cps[i+1]]
V1 += np.sum(np.diag(K_sub))
V2 += np.sum(K_sub) / float(cps[i+1] - cps[i])
return (V1 - V2)
def eval_cost(K, cps, score, vmax):
""" Evaluate cost function for automatic number of change points selection
K - kernel between all frames
cps - selected change-points
score - unnormalized empirical score (sum of kernelized scatters)
vmax - vmax parameter"""
N = K.shape[0]
penalty = (vmax*len(cps)/(2.0*N))*(np.log(float(N)/len(cps))+1)
return score/float(N) + penalty
| 0 | 0 | 0 |
9b07006b1d80f12d48ad69748404d400d204cc1f | 13,610 | py | Python | scripts/printingValidation/ImgToGcode/image_to_gcode.py | Air-Factories-2-0/af2-hyperledger | 7aeeb831cf03fdf7fe64f9500da17c02688a0886 | [
"Apache-2.0"
] | null | null | null | scripts/printingValidation/ImgToGcode/image_to_gcode.py | Air-Factories-2-0/af2-hyperledger | 7aeeb831cf03fdf7fe64f9500da17c02688a0886 | [
"Apache-2.0"
] | null | null | null | scripts/printingValidation/ImgToGcode/image_to_gcode.py | Air-Factories-2-0/af2-hyperledger | 7aeeb831cf03fdf7fe64f9500da17c02688a0886 | [
"Apache-2.0"
] | 1 | 2022-02-03T09:38:16.000Z | 2022-02-03T09:38:16.000Z | import numpy as np
from scipy import ndimage
import imageio
from PIL import Image, ImageFilter
import argparse
import constants
if __name__ == "__main__":
main() | 35.442708 | 251 | 0.683982 | import numpy as np
from scipy import ndimage
import imageio
from PIL import Image, ImageFilter
import argparse
import constants
class CircularRange:
def __init__(self, begin, end, value):
self.begin, self.end, self.value = begin, end, value
def __repr__(self):
return f"[{self.begin},{self.end})->{self.value}"
def halfway(self):
return int((self.begin + self.end) / 2)
class Graph:
class Node:
def __init__(self, point, index):
self.x, self.y = point
self.index = index
self.connections = {}
def __repr__(self):
return f"({self.y},{-self.x})"
def _addConnection(self, to):
self.connections[to] = False # i.e. not already used in gcode generation
def toDotFormat(self):
return (f"{self.index} [pos=\"{self.y},{-self.x}!\", label=\"{self.index}\\n{self.x},{self.y}\"]\n" +
"".join(f"{self.index}--{conn}\n" for conn in self.connections if self.index < conn))
def __init__(self):
self.nodes = []
def __getitem__(self, index):
return self.nodes[index]
def __repr__(self):
return repr(self.nodes)
def addNode(self, point):
index = len(self.nodes)
self.nodes.append(Graph.Node(point, index))
return index
def addConnection(self, a, b):
self.nodes[a]._addConnection(b)
self.nodes[b]._addConnection(a)
def distance(self, a, b):
return np.hypot(self[a].x-self[b].x, self[a].y-self[b].y)
def areConnectedWithin(self, a, b, maxDistance):
if maxDistance < 0:
return False
elif a == b:
return True
else:
return any(
self.areConnectedWithin(conn, b, maxDistance - self.distance(conn, b))
for conn in self[a].connections)
def saveAsDotFile(self, f):
f.write("graph G {\nnode [shape=plaintext];\n")
for node in self.nodes:
f.write(node.toDotFormat())
f.write("}\n")
def saveAsGcodeFile(self, f):
### First follow all paths that have a start/end node (i.e. are not cycles)
# The next chosen starting node is the closest to the current position
def pathGcode(i, insidePath):
f.write(f"G{1 if insidePath else 0} X{self[i].y} Y{-self[i].x}\n")
for connTo, alreadyUsed in self[i].connections.items():
if not alreadyUsed:
self[i].connections[connTo] = True
self[connTo].connections[i] = True
return pathGcode(connTo, True)
return i
possibleStartingNodes = set()
for i in range(len(self.nodes)):
if len(self[i].connections) == 0 or len(self[i].connections) % 2 == 1:
possibleStartingNodes.add(i)
if len(possibleStartingNodes) != 0:
node = next(iter(possibleStartingNodes)) # first element
while 1:
possibleStartingNodes.remove(node)
pathEndNode = pathGcode(node, False)
if len(self[node].connections) == 0:
assert pathEndNode == node
f.write(f"G1 X{self[node].y} Y{-self[node].x}\n")
else:
possibleStartingNodes.remove(pathEndNode)
if len(possibleStartingNodes) == 0:
break
minDistanceSoFar = np.inf
for nextNode in possibleStartingNodes:
distance = self.distance(pathEndNode, nextNode)
if distance < minDistanceSoFar:
minDistanceSoFar = distance
node = nextNode
### Then pick the node closest to the current position that still has unused/available connections
# That node must belong to a cycle, because otherwise it would have been used above
# TODO improve by finding Eulerian cycles
cycleNodes = set()
for i in range(len(self.nodes)):
someConnectionsAvailable = False
for _, alreadyUsed in self[i].connections.items():
if not alreadyUsed:
someConnectionsAvailable = True
break
if someConnectionsAvailable:
cycleNodes.add(i)
def cyclePathGcode(i, insidePath):
f.write(f"G{1 if insidePath else 0} X{self[i].y} Y{-self[i].x}\n")
foundConnections = 0
for connTo, alreadyUsed in self[i].connections.items():
if not alreadyUsed:
if foundConnections == 0:
self[i].connections[connTo] = True
self[connTo].connections[i] = True
cyclePathGcode(connTo, True)
foundConnections += 1
if foundConnections > 1:
break
if foundConnections == 1:
cycleNodes.remove(i)
if len(cycleNodes) != 0:
node = next(iter(cycleNodes)) # first element
while 1:
# since every node has an even number of connections, ANY path starting from it
# must complete at the same place (see Eulerian paths/cycles properties)
cyclePathGcode(node, False)
if len(cycleNodes) == 0:
break
pathEndNode = node
minDistanceSoFar = np.inf
for nextNode in possibleStartingNodes:
distance = self.distance(pathEndNode, nextNode)
if distance < minDistanceSoFar:
minDistanceSoFar = distance
node = nextNode
class EdgesToGcode:
def __init__(self, edges):
self.edges = edges
self.ownerNode = np.full(np.shape(edges), -1, dtype=int)
self.xSize, self.ySize = np.shape(edges)
self.graph = Graph()
def getCircularArray(self, center, r, smallerArray = None):
circumferenceSize = len(constants.circumferences[r])
circularArray = np.zeros(circumferenceSize, dtype=bool)
if smallerArray is None:
smallerArray = np.ones(1, dtype=bool)
smallerSize = np.shape(smallerArray)[0]
smallerToCurrentRatio = smallerSize / circumferenceSize
for i in range(circumferenceSize):
x = center[0] + constants.circumferences[r][i][0]
y = center[1] + constants.circumferences[r][i][1]
if x not in range(self.xSize) or y not in range(self.ySize):
circularArray[i] = False # consider pixels outside of the image as not-edges
else:
iSmaller = i * smallerToCurrentRatio
a, b = int(np.floor(iSmaller)), int(np.ceil(iSmaller))
if smallerArray[a] == False and (b not in range(smallerSize) or smallerArray[b] == False):
circularArray[i] = False # do not take into consideration not connected regions (roughly)
else:
circularArray[i] = self.edges[x, y]
return circularArray
def toCircularRanges(self, circularArray):
ranges = []
circumferenceSize = np.shape(circularArray)[0]
lastValue, lastValueIndex = circularArray[0], 0
for i in range(1, circumferenceSize):
if circularArray[i] != lastValue:
ranges.append(CircularRange(lastValueIndex, i, lastValue))
lastValue, lastValueIndex = circularArray[i], i
ranges.append(CircularRange(lastValueIndex, circumferenceSize, lastValue))
if len(ranges) > 1 and ranges[-1].value == ranges[0].value:
ranges[0].begin = ranges[-1].begin - circumferenceSize
ranges.pop() # the last range is now contained in the first one
return ranges
def getNextPoints(self, point):
"""
Returns the radius of the circle used to identify the points and
the points toward which propagate, in a tuple `(radius, [point0, point1, ...])`
"""
bestRadius = 0
circularArray = self.getCircularArray(point, 0)
allRanges = [self.toCircularRanges(circularArray)]
for radius in range(1, len(constants.circumferences)):
circularArray = self.getCircularArray(point, radius, circularArray)
allRanges.append(self.toCircularRanges(circularArray))
if len(allRanges[radius]) > len(allRanges[bestRadius]):
bestRadius = radius
if len(allRanges[bestRadius]) >= 4 and len(allRanges[-2]) >= len(allRanges[-1]):
# two consecutive circular arrays with the same or decreasing number>=4 of ranges
break
elif len(allRanges[radius]) == 2 and radius > 1:
edge = 0 if allRanges[radius][0].value == True else 1
if allRanges[radius][edge].end-allRanges[radius][edge].begin < len(constants.circumferences[radius]) / 4:
# only two ranges but the edge range is small (1/4 of the circumference)
if bestRadius == 1:
bestRadius = 2
break
elif len(allRanges[radius]) == 1 and allRanges[radius][0].value == False:
# this is a point-shaped edge not sorrounded by any edges
break
if bestRadius == 0:
return 0, []
circularRanges = allRanges[bestRadius]
points = []
for circularRange in circularRanges:
if circularRange.value == True:
circumferenceIndex = circularRange.halfway()
x = point[0] + constants.circumferences[bestRadius][circumferenceIndex][0]
y = point[1] + constants.circumferences[bestRadius][circumferenceIndex][1]
if x in range(self.xSize) and y in range(self.ySize) and self.ownerNode[x, y] == -1:
points.append((x,y))
return bestRadius, points
def propagate(self, point, currentNodeIndex):
radius, nextPoints = self.getNextPoints(point)
# depth first search to set the owner of all reachable connected pixels
# without an owner and find connected nodes
allConnectedNodes = set()
def setSeenDFS(x, y):
if (x in range(self.xSize) and y in range(self.ySize)
and np.hypot(x-point[0], y-point[1]) <= radius + 0.5
and self.edges[x, y] == True and self.ownerNode[x, y] != currentNodeIndex):
if self.ownerNode[x, y] != -1:
allConnectedNodes.add(self.ownerNode[x, y])
self.ownerNode[x, y] = currentNodeIndex # index of just added node
setSeenDFS(x+1, y)
setSeenDFS(x-1, y)
setSeenDFS(x, y+1)
setSeenDFS(x, y-1)
self.ownerNode[point] = -1 # reset to allow DFS to start
setSeenDFS(*point)
for nodeIndex in allConnectedNodes:
if not self.graph.areConnectedWithin(currentNodeIndex, nodeIndex, 11):
self.graph.addConnection(currentNodeIndex, nodeIndex)
validNextPoints = []
for nextPoint in nextPoints:
if self.ownerNode[nextPoint] == currentNodeIndex:
# only if this point belongs to the current node after the DFS,
# which means it is reachable and connected
validNextPoints.append(nextPoint)
for nextPoint in validNextPoints:
nodeIndex = self.graph.addNode(nextPoint)
self.graph.addConnection(currentNodeIndex, nodeIndex)
self.propagate(nextPoint, nodeIndex)
self.ownerNode[point] = currentNodeIndex
def addNodeAndPropagate(self, point):
nodeIndex = self.graph.addNode(point)
self.propagate(point, nodeIndex)
def buildGraph(self):
for point in np.ndindex(np.shape(self.edges)):
if self.edges[point] == True and self.ownerNode[point] == -1:
radius, nextPoints = self.getNextPoints(point)
if radius == 0:
self.addNodeAndPropagate(point)
else:
for nextPoint in nextPoints:
if self.ownerNode[nextPoint] == -1:
self.addNodeAndPropagate(nextPoint)
return self.graph
def sobel(image):
image = np.array(image, dtype=float)
image /= 255.0
Gx = ndimage.sobel(image, axis=0)
Gy = ndimage.sobel(image, axis=1)
res = np.hypot(Gx, Gy)
res /= np.max(res)
res = np.array(res * 255, dtype=np.uint8)
return res[2:-2, 2:-2, 0:3]
def convertToBinaryEdges(edges, threshold):
result = np.maximum.reduce([edges[:, :, 0], edges[:, :, 1], edges[:, :, 2]]) >= threshold
if np.shape(edges)[2] > 3:
result[edges[:, :, 3] < threshold] = False
return result
def parseArgs(namespace):
argParser = argparse.ArgumentParser(fromfile_prefix_chars="@",
description="Detects the edges of an image and converts them to 2D gcode that can be printed by a plotter")
argParser.add_argument_group("Data options")
argParser.add_argument("-i", "--input", type=argparse.FileType('br'), required=True, metavar="FILE",
help="Image to convert to gcode; all formats supported by the Python imageio library are supported")
argParser.add_argument("-o", "--output", type=argparse.FileType('w'), required=True, metavar="FILE",
help="File in which to save the gcode result")
argParser.add_argument("--dot-output", type=argparse.FileType('w'), metavar="FILE",
help="Optional file in which to save the graph (in DOT format) generated during an intermediary step of gcode generation")
argParser.add_argument("-e", "--edges", type=str, metavar="MODE",
help="Consider the input file already as an edges matrix, not as an image of which to detect the edges. MODE should be either `white` or `black`, that is the color of the edges in the image. The image should only be made of white or black pixels.")
argParser.add_argument("-t", "--threshold", type=int, default=32, metavar="VALUE",
help="The threshold in range (0,255) above which to consider a pixel as part of an edge (after Sobel was applied to the image or on reading the edges from file with the --edges option)")
argParser.parse_args(namespace=namespace)
if namespace.edges is not None and namespace.edges not in ["white", "black"]:
argParser.error("mode for --edges should be `white` or `black`")
if namespace.threshold <= 0 or namespace.threshold >= 255:
argParser.error("value for --threshold should be in range (0,255)")
def main():
class Args: pass
parseArgs(Args)
image = imageio.imread(Args.input)
if Args.edges is None:
edges = sobel(image)
elif Args.edges == "black":
edges = np.invert(image)
else: # Args.edges == "white"
edges = image
edges = convertToBinaryEdges(edges, Args.threshold)
converter = EdgesToGcode(edges)
converter.buildGraph()
if Args.dot_output is not None:
converter.graph.saveAsDotFile(Args.dot_output)
converter.graph.saveAsGcodeFile(Args.output)
def extractGCODE(input,output,threshold):
image = imageio.imread(open(input,"br"))
edges = sobel(image)
edges = convertToBinaryEdges(edges, threshold)
converter = EdgesToGcode(edges)
converter.buildGraph()
converter.graph.saveAsGcodeFile(open(output,"w"))
if __name__ == "__main__":
main() | 10,823 | 2,335 | 275 |
bd8093d54c94a05bd5e38d58809ee53b1784c27a | 664 | py | Python | app/__main__.py | jieggii/giving-tuesday-bot | f27d143d2f24b81c9121ae0852d3f73a5897b165 | [
"MIT"
] | 1 | 2021-11-18T04:27:19.000Z | 2021-11-18T04:27:19.000Z | app/__main__.py | jieggii/giving-tuesday-bot | f27d143d2f24b81c9121ae0852d3f73a5897b165 | [
"MIT"
] | null | null | null | app/__main__.py | jieggii/giving-tuesday-bot | f27d143d2f24b81c9121ae0852d3f73a5897b165 | [
"MIT"
] | null | null | null | import asyncio
import logging
import uvloop
from vkwave.bots import SimpleLongPollBot
from vkwave.bots.core.dispatching import filters
from app import db
from app.config import config
from app.routers import home, registration
logging.basicConfig(level=logging.INFO)
uvloop.install()
loop = asyncio.get_event_loop()
loop.run_until_complete(db.init())
bot = SimpleLongPollBot(config.TOKENS, config.GROUP_ID)
bot.router.registrar.add_default_filter(filters.EventTypeFilter("message_new"))
bot.dispatcher.add_router(registration.router)
bot.dispatcher.add_router(home.router)
try:
bot.run_forever(ignore_errors=True)
except KeyboardInterrupt:
exit()
| 22.133333 | 79 | 0.817771 | import asyncio
import logging
import uvloop
from vkwave.bots import SimpleLongPollBot
from vkwave.bots.core.dispatching import filters
from app import db
from app.config import config
from app.routers import home, registration
logging.basicConfig(level=logging.INFO)
uvloop.install()
loop = asyncio.get_event_loop()
loop.run_until_complete(db.init())
bot = SimpleLongPollBot(config.TOKENS, config.GROUP_ID)
bot.router.registrar.add_default_filter(filters.EventTypeFilter("message_new"))
bot.dispatcher.add_router(registration.router)
bot.dispatcher.add_router(home.router)
try:
bot.run_forever(ignore_errors=True)
except KeyboardInterrupt:
exit()
| 0 | 0 | 0 |
e276bed2b32ad9523745939da058b699a52bc734 | 158 | py | Python | 2-farm/lessons/2-detect-soil-moisture/code/pi/soil-moisture-sensor/app.py | yash7raut/IoT-For-Beginners | 074f4880e655f19008f2fa9dfca03e18f94cf441 | [
"MIT"
] | 9,718 | 2021-03-17T12:14:37.000Z | 2022-03-31T21:34:50.000Z | 2-farm/lessons/2-detect-soil-moisture/code/pi/soil-moisture-sensor/app.py | bennice/IoT-For-Beginners | caaca7b5b6dac7298d72c3bfa802fda4c3a49e29 | [
"MIT"
] | 123 | 2021-05-17T17:24:15.000Z | 2022-03-04T06:58:47.000Z | 2-farm/lessons/2-detect-soil-moisture/code/pi/soil-moisture-sensor/app.py | bennice/IoT-For-Beginners | caaca7b5b6dac7298d72c3bfa802fda4c3a49e29 | [
"MIT"
] | 1,115 | 2021-07-08T13:56:20.000Z | 2022-03-31T22:54:25.000Z | import time
from grove.adc import ADC
adc = ADC()
while True:
soil_moisture = adc.read(0)
print("Soil moisture:", soil_moisture)
time.sleep(10) | 15.8 | 42 | 0.683544 | import time
from grove.adc import ADC
adc = ADC()
while True:
soil_moisture = adc.read(0)
print("Soil moisture:", soil_moisture)
time.sleep(10) | 0 | 0 | 0 |
ef87f6a9cd2bd86af931087588bbeeed87223d62 | 5,137 | py | Python | hmm/opt_helpers.py | donlelef/see-you-outside | f98955599443aa63c90147caedb76905cbe8fee0 | [
"MIT"
] | 5 | 2020-04-05T10:13:30.000Z | 2021-01-02T14:44:22.000Z | hmm/opt_helpers.py | donlelef/see-you-outside | f98955599443aa63c90147caedb76905cbe8fee0 | [
"MIT"
] | null | null | null | hmm/opt_helpers.py | donlelef/see-you-outside | f98955599443aa63c90147caedb76905cbe8fee0 | [
"MIT"
] | null | null | null | import casadi as cs
# plt.figure(1)
# plt.clf()
# plt.plot(sol.value(k))
# plt.figure(2)
# plt.clf()
# plt.plot(sol.value(eps_soft))
# plt.figure(3)
# plt.clf()
# plt.plot(sol.value(x)[3,:],label='infected')
# plt.plot(sol.value(x)[4,:],label='hospitalized')
# plt.plot(sol.value(x)[5,:],label='death')
# plt.legend()
# plt.show()
#pd.DataFrame(sol.value(x), index=['S','E','A','I','H','D','R']).to_csv('For_Emanuele.csv')
| 33.357143 | 134 | 0.576796 | import casadi as cs
def f(x, params):
return 2 - cs.exp(-params['eps']*x)
def infect_rate(x, k,params):
z = 1/(f(params['n_eff']/params['s'], params))
x_a = x[2]
x_i = x[3]
return 1 - cs.power(1-params['beta_a'], k*params['C']*x_a) *cs.power(1-params['beta_i'], k*params['C']*x_i)
def calculate_trans(x, params,k, t):
# evolve one step
Gamma = infect_rate(x, k, params)
# print(t)
# print(params['tr'])
trans = cs.MX.zeros(7,7)
trans[0,0] = (1-Gamma)
trans[1,0] = Gamma
trans[1,1] = 1-params['eta']
trans[2,1] = params['eta']
trans[2,2] = 1-params['alpha']
trans[3,2] = params['alpha']
trans[3,3] = 1-params['mu']
trans[4,3] = params['mu']*params['gamma']
trans[4,4] = params['w']*(1-params['phi']) + (1-params['w'])*(1-params['xi'])
trans[5,4] = params['w']*params['phi']
trans[5,5] = 1
trans[6,3] = params['mu']*(1-params['gamma'])
trans[6,4] = (1-params['w'])*params['xi']
trans[6,6] = 1
# trans = [[(1-Gamma), 0, 0, 0, 0, 0, 0],
# [Gamma, 1-params['eta'], 0, 0, 0, 0, 0],
# [0, params['eta'], 1-params['alpha'], 0, 0, 0, 0 ],
# [0, 0, params['alpha'], 1-params['mu'], 0, 0, 0 ],
# [0, 0, 0, params['mu']*params['gamma'], params['w']*(1-params['phi']) + (1-params['w'])*(1-params['xi']), 0, 0],
# [0, 0, 0, 0, params['w']*params['phi'], 1, 0],
# [0, 0, 0, params['mu']*(1-params['gamma']), (1-params['w'])*params['xi'], 0, 1]]
return trans
def opt_strategy (weight_eps, bed_ratio,weight_goout,initial_infect = 0.2):
# args:
# weight_eps: weights on overloading hospital: [0,1]
# bed_ratio: bed per person
# weight_goout: weights on going out/ economy: [0,1]
# initial state: SIR, d not consider because control cannot change it
params = {}
params['mobility'] = 0 # only one region
params['eta'] = 1/2.34 # from exposed to asymptomatic
params['alpha'] = 1/2.86 # from asymptomatic to infected
params['mu'] = 1/3.2 # prob leaving infected
params['gamma'] = 0.13 # conditional prob to icu
params['phi'] = 1/7.0 # death rate (inverse of time in icu)
params['w'] = 0.2 # prob death
params['xi'] = 0.1 # prob recover from ICU
params['beta_a'] = 0.07 # infectivity of asymptomatic
params['beta_i'] = 0.07 # infectivity of infected
params['k'] = 13.3 # average number of contact
params['C'] = 0.721 # contact rate
params['eps'] = 0.01 # density factor
params['sigma']= 2.5 # household size
params['n_eff'] = 8570000 # effecitve population
params['s'] = 39133 # area of the region
eps_penalty = weight_eps*1e5 # penalty parameter for soft constraints,upper bound 1e5
lockdown_penalty = weight_goout*8e-2 # upper bound 8e-2
death_penalty = weight_eps*5e3 # upper bound 5e3
bed_per_person = bed_ratio # upper bound 5e-2
final_infect_penalty = 5e6
opti = cs.Opti()
T = 100 # horizon
x = opti.variable(7,T+1)
k = opti.variable(1,T)
eps_soft = opti.variable(1,T)
loss = opti.variable(1,T+1)
x_init = opti.parameter(7,1)
# boundery condition
opti.subject_to(loss[1]==0)
# multiple shooting (dynamics)
for i in range(T):
trans = calculate_trans(x[:,i], params,k[i], i)
opti.subject_to(x[:,i+1]==trans@x[:,i])
#opti.subject_to(loss[i+1]==loss[i]-k[i])#**2+10000*(x[3,i]+x[5,i])**2)
opti.subject_to(loss[i+1]==loss[i]+lockdown_penalty*(params['k']-k[i])**2+eps_penalty*(eps_soft[i]))
# control constraints
opti.subject_to(k[i]<=params['k'])
opti.subject_to(k[i]>=1)
opti.subject_to(eps_soft[i]>=0)
opti.subject_to(eps_soft[i]<=0.1) # reasonable upper bound on available beds
#opti.subject_to(x[4,i]<=0.01)
opti.subject_to(x[4,i]<=bed_per_person + eps_soft[i])
# initialization of value
opti.set_initial(eps_soft[i],0.1)
opti.set_initial(k[i],1)
# boundary conditions
opti.subject_to(x[:,0]==x_init)
opti.subject_to(k[0]==1)
opti.minimize(loss[-1]+death_penalty*x[6,T]*x[6,T]+final_infect_penalty*x[4,T]*x[4,T])
p_opts = {"expand":True}
s_opts = {"max_iter": 1e4}
opti.solver('ipopt',p_opts,s_opts)
# initial state
temp = cs.DM(7,1)
temp[0] = 1-initial_infect # s
temp[1] = 0.5*initial_infect # e
temp[2] = 0.4*initial_infect # a
temp[3] = 0.09*initial_infect # i
temp[4] = 0.01*initial_infect # h
temp[5] = 0.0 # r
temp[6] = 0.0 # d
opti.set_value(x_init,temp)
sol = opti.solve()
return opti, opti.value(x),opti.value(k)
# plt.figure(1)
# plt.clf()
# plt.plot(sol.value(k))
# plt.figure(2)
# plt.clf()
# plt.plot(sol.value(eps_soft))
# plt.figure(3)
# plt.clf()
# plt.plot(sol.value(x)[3,:],label='infected')
# plt.plot(sol.value(x)[4,:],label='hospitalized')
# plt.plot(sol.value(x)[5,:],label='death')
# plt.legend()
# plt.show()
#pd.DataFrame(sol.value(x), index=['S','E','A','I','H','D','R']).to_csv('For_Emanuele.csv')
| 4,608 | 0 | 92 |
a70c9efcd91641f8773a1d95390445eb3c8fc362 | 10,971 | py | Python | evaluation/finetune.py | MosyMosy/cellemnet | 59a3b6f2acc1397a95ee704c3e31c916c47f4b92 | [
"BSD-3-Clause"
] | 12 | 2020-12-16T15:01:30.000Z | 2022-03-06T12:29:48.000Z | evaluation/finetune.py | MosyMosy/cellemnet | 59a3b6f2acc1397a95ee704c3e31c916c47f4b92 | [
"BSD-3-Clause"
] | null | null | null | evaluation/finetune.py | MosyMosy/cellemnet | 59a3b6f2acc1397a95ee704c3e31c916c47f4b92 | [
"BSD-3-Clause"
] | 1 | 2021-08-31T16:17:20.000Z | 2021-08-31T16:17:20.000Z | import os, sys, argparse, mlflow, yaml
import numpy as np
import torch
import torch.nn as nn
import segmentation_models_pytorch as smp
from torch.utils.data import DataLoader
from albumentations import (
Compose, PadIfNeeded, Normalize, HorizontalFlip, VerticalFlip, RandomBrightnessContrast,
CropNonEmptyMaskIfExists, GaussNoise, RandomResizedCrop, Rotate, GaussianBlur
)
from albumentations.pytorch import ToTensorV2
from resources.data import SegmentationData, FactorResize
from resources.train_utils import Trainer
from resources.utils import load_pretrained_state_for_unet, moco_to_unet_prefixes
augmentation_dict = {
'PadIfNeeded': PadIfNeeded, 'HorizontalFlip': HorizontalFlip, 'VerticalFlip': VerticalFlip,
'RandomBrightnessContrast': RandomBrightnessContrast, 'CropNonEmptyMaskIfExists': CropNonEmptyMaskIfExists,
'GaussNoise': GaussNoise, 'RandomResizedCrop': RandomResizedCrop, 'Rotate': Rotate,
'GaussianBlur': GaussianBlur
}
if __name__ == "__main__":
if 'snakemake' in globals():
args = snakemake_args()
else:
args = parse_args()
#set manual seed to ensure we always start with the same model parameters
torch.manual_seed(42)
with open(args['config'], 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
config['config_file'] = args['config']
#overwrite the model_dir, pretraining, iterations, or finetuning layer
if args['md'] is not None:
config['model_dir'] = args['md']
if args['pf'] is not None:
config['pretraining'] = args['pf']
if args['n'] is not None:
config['iters'] = args['n']
if args['ft'] is not None:
config['finetune_layer'] = args['ft']
experiment = config['experiment_name']
pretraining = config['pretraining']
#if we're working with MoCo pretrained weights
#then we'll have to download them separately from the
#built-in pytorch function
if pretraining in ['imagenet_mocov2', 'cellemnet_mocov2']:
#this loads the state dict and adds the prefix "encoder."
#to the keys such that they match those in the UNet model
#it
state_dict, norms = load_pretrained_state_for_unet(config['encoder'], pretraining)
if norms == None:
gray_channels = 3
normalize = Normalize() #default is ImageNet means and standard deviations
else:
gray_channels = 1
normalize = Normalize(mean=norms[0], std=norms[1])
#create the Unet model and load the pretrained weights
model = smp.Unet(config['encoder'], in_channels=gray_channels, encoder_weights=None, classes=config['num_classes'])
msg = model.load_state_dict(state_dict, strict=False)
elif pretraining == 'imagenet_supervised':
#create the UNet with imagenet supervised weights which are
#automatically downloaded through smp
model = smp.Unet(config['encoder'], encoder_weights='imagenet', classes=config['num_classes'])
gray_channels = 3
normalize = Normalize() #default is ImageNet means and standard deviations
elif os.path.isfile(pretraining):
#it's also possible to directly pass a .pth file as the
#pretrained weights. In which case we assume that they
#were generated by the train_mocov2.py script and load them accordingly
checkpoint = torch.load(pretraining, map_location='cpu')
state_dict, norms = checkpoint['state_dict'], checkpoint['norms']
state_dict = moco_to_unet_prefixes(state_dict)
gray_channels = 1
normalize = Normalize(mean=norms[0], std=norms[1])
#create the Unet model and load the pretrained weights
model = smp.Unet(config['encoder'], in_channels=gray_channels, encoder_weights=None, classes=config['num_classes'])
msg = model.load_state_dict(state_dict, strict=False)
print(f'Successfully loaded parameters from {pretraining}')
else: #random initialization
print('No pretraining found. Using randomly initialized weights!')
gray_channels = 1
model = smp.Unet(config['encoder'], in_channels=gray_channels, encoder_weights=None, classes=config['num_classes'])
#use the norms defined for the dataset in the config file
normalize = Normalize(**config['norms'])
#importantly, we want to store the mean and std that we're
#using for training with theses weights. this eliminates
#any confusion during inference.
config['training_norms'] = [normalize.mean, normalize.std]
#freeze all encoder layers to start and only open
#them when specified
for param in model.encoder.parameters():
param.requires_grad = False
#unfreeze layers based on the finetune_layer argument
finetune_layer = config['finetune_layer']
encoder_groups = [mod[1] for mod in model.encoder.named_children()]
if finetune_layer != 'none':
#this indices should work for any ResNet model, but were specifically
#chosen for ResNet50
layer_index = {'all': 0, 'layer1': 4, 'layer2': 5, 'layer3': 6, 'layer4': 7}
start_layer = layer_index[finetune_layer]
#always finetune from the start layer to the last layer in the resnet
for group in encoder_groups[start_layer:]:
for param in group.parameters():
param.requires_grad = True
#in the MoCo paper, the authors suggest making the parameters
#in BatchNorm layers trainable to help account for the smaller
#magnitudes of weights that typically occur with unsupervised
#pretraining. we haven't found this to be beneficial for the
#OneCycle LR policy, it might be for other lr policies though.
if config['unfreeze_encoder_bn']:
#this makes all the batchnorm layers in the encoder trainable
model.encoder.apply(unfreeze_encoder_bn)
#print out the number of trainable parameters in the whole model
#unfreeze_encoder_bn adds about 50k more
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print(f'Using model with {params} trainable parameters!')
#construct the set of augmentations from config
dataset_augs = []
for aug_params in config['augmentations']:
aug_name = aug_params['aug']
#lookup aug_name and replace it with the
#correct augmentation class
aug = augmentation_dict[aug_name]
#delete the aug key and then the remaining
#dictionary items are kwargs
del aug_params['aug']
dataset_augs.append(aug(**aug_params))
#unpack the list of dataset specific augmentations
#into Compose, and then add normalization and tensor
#conversion, which apply universally
augs = Compose([
*dataset_augs,
normalize,
ToTensorV2()
])
#create the segmentation data for training
data_dir = config['data_dir']
train_dir = 'train/'
bsz = config['bsz']
trn_data = SegmentationData(os.path.join(data_dir, train_dir), tfs=augs, gray_channels=gray_channels,
segmentation_classes=config['num_classes'])
config['n_images'] = len(trn_data.fnames)
#create the dataloader
#NOTE: if using CPU, the pin_memory argument must be set to False
#In the future, we may add a "cpu" argument to the config; we expect
#that most users will have access to a GPU though.
train = DataLoader(trn_data, batch_size=bsz, shuffle=True, pin_memory=True, drop_last=True, num_workers=config['jobs'])
#check for a validation directory and use it if it exists
#if not, then we don't use any validation data
val_dir = 'valid/'
if os.path.isdir(os.path.join(data_dir, val_dir)):
#eval_augs are always the same.
#since we ultimately want to run our model on
#full size images and not cropped patches, we use
#FactorResize. This is a custom augmentation that
#simply resizes the image to the nearest multiple
#of 32 (which is necessary to work with the UNet model).
#if working with very large images that don't fit in memory
#it could be swapped out for a CenterCrop. the results will
#be less reflective of performance in the test case however.
eval_augs = Compose([
FactorResize(32),
normalize,
ToTensorV2()
])
val_data = SegmentationData(os.path.join(data_dir, val_dir), tfs=eval_augs, gray_channels=gray_channels,
segmentation_classes=config['num_classes'])
#using a batch size of 1 means that we report a per-image IoU score
valid = DataLoader(val_data, batch_size=1, shuffle=False, pin_memory=True, num_workers=config['jobs'])
else:
valid = None
#create model path ahead of time so that
#we don't try to save to a directory that doesn't
#exist later on
model_dir = config['model_dir']
if not os.path.isdir(model_dir):
os.mkdir(model_dir)
#train the model using the parameters in the config file
#TODO: add a progress bar option to config
trainer = Trainer(config, model, train, valid)
trainer.train() | 44.597561 | 123 | 0.676511 | import os, sys, argparse, mlflow, yaml
import numpy as np
import torch
import torch.nn as nn
import segmentation_models_pytorch as smp
from torch.utils.data import DataLoader
from albumentations import (
Compose, PadIfNeeded, Normalize, HorizontalFlip, VerticalFlip, RandomBrightnessContrast,
CropNonEmptyMaskIfExists, GaussNoise, RandomResizedCrop, Rotate, GaussianBlur
)
from albumentations.pytorch import ToTensorV2
from resources.data import SegmentationData, FactorResize
from resources.train_utils import Trainer
from resources.utils import load_pretrained_state_for_unet, moco_to_unet_prefixes
augmentation_dict = {
'PadIfNeeded': PadIfNeeded, 'HorizontalFlip': HorizontalFlip, 'VerticalFlip': VerticalFlip,
'RandomBrightnessContrast': RandomBrightnessContrast, 'CropNonEmptyMaskIfExists': CropNonEmptyMaskIfExists,
'GaussNoise': GaussNoise, 'RandomResizedCrop': RandomResizedCrop, 'Rotate': Rotate,
'GaussianBlur': GaussianBlur
}
def parse_args():
#setup the argument parser
parser = argparse.ArgumentParser(description='Runs finetuning on 2d segmentation data')
#get the config file
parser.add_argument('config', type=str, metavar='pretraining', help='Path to a config yaml file')
#the next arguments should already be defined in the config file
#however, it's sometimes desirable to override them, especially
#when using Snakemake to run the scripts
parser.add_argument('-md', type=str, dest='md', metavar='model_dir',
help='Directory in which to save models')
parser.add_argument('-pf', type=str, dest='pf', metavar='pretraining_file',
help='Path to a pretrained state_dict')
parser.add_argument('-n', type=int, dest='n', metavar='iters',
help='Number of training iterations')
ft_layer_choices = ['all', 'layer4', 'layer3', 'layer2', 'layer1', 'none']
parser.add_argument('-ft', type=str, dest='ft', metavar='finetune_layer', choices=ft_layer_choices,
help='ResNet encoder layers to finetune')
#return the arguments converted to a dictionary
return vars(parser.parse_args())
def snakemake_args():
params = vars(snakemake.params)
params['config'] = snakemake.input[0]
del params['_names']
return params
if __name__ == "__main__":
if 'snakemake' in globals():
args = snakemake_args()
else:
args = parse_args()
#set manual seed to ensure we always start with the same model parameters
torch.manual_seed(42)
with open(args['config'], 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
config['config_file'] = args['config']
#overwrite the model_dir, pretraining, iterations, or finetuning layer
if args['md'] is not None:
config['model_dir'] = args['md']
if args['pf'] is not None:
config['pretraining'] = args['pf']
if args['n'] is not None:
config['iters'] = args['n']
if args['ft'] is not None:
config['finetune_layer'] = args['ft']
experiment = config['experiment_name']
pretraining = config['pretraining']
#if we're working with MoCo pretrained weights
#then we'll have to download them separately from the
#built-in pytorch function
if pretraining in ['imagenet_mocov2', 'cellemnet_mocov2']:
#this loads the state dict and adds the prefix "encoder."
#to the keys such that they match those in the UNet model
#it
state_dict, norms = load_pretrained_state_for_unet(config['encoder'], pretraining)
if norms == None:
gray_channels = 3
normalize = Normalize() #default is ImageNet means and standard deviations
else:
gray_channels = 1
normalize = Normalize(mean=norms[0], std=norms[1])
#create the Unet model and load the pretrained weights
model = smp.Unet(config['encoder'], in_channels=gray_channels, encoder_weights=None, classes=config['num_classes'])
msg = model.load_state_dict(state_dict, strict=False)
elif pretraining == 'imagenet_supervised':
#create the UNet with imagenet supervised weights which are
#automatically downloaded through smp
model = smp.Unet(config['encoder'], encoder_weights='imagenet', classes=config['num_classes'])
gray_channels = 3
normalize = Normalize() #default is ImageNet means and standard deviations
elif os.path.isfile(pretraining):
#it's also possible to directly pass a .pth file as the
#pretrained weights. In which case we assume that they
#were generated by the train_mocov2.py script and load them accordingly
checkpoint = torch.load(pretraining, map_location='cpu')
state_dict, norms = checkpoint['state_dict'], checkpoint['norms']
state_dict = moco_to_unet_prefixes(state_dict)
gray_channels = 1
normalize = Normalize(mean=norms[0], std=norms[1])
#create the Unet model and load the pretrained weights
model = smp.Unet(config['encoder'], in_channels=gray_channels, encoder_weights=None, classes=config['num_classes'])
msg = model.load_state_dict(state_dict, strict=False)
print(f'Successfully loaded parameters from {pretraining}')
else: #random initialization
print('No pretraining found. Using randomly initialized weights!')
gray_channels = 1
model = smp.Unet(config['encoder'], in_channels=gray_channels, encoder_weights=None, classes=config['num_classes'])
#use the norms defined for the dataset in the config file
normalize = Normalize(**config['norms'])
#importantly, we want to store the mean and std that we're
#using for training with theses weights. this eliminates
#any confusion during inference.
config['training_norms'] = [normalize.mean, normalize.std]
#freeze all encoder layers to start and only open
#them when specified
for param in model.encoder.parameters():
param.requires_grad = False
#unfreeze layers based on the finetune_layer argument
finetune_layer = config['finetune_layer']
encoder_groups = [mod[1] for mod in model.encoder.named_children()]
if finetune_layer != 'none':
#this indices should work for any ResNet model, but were specifically
#chosen for ResNet50
layer_index = {'all': 0, 'layer1': 4, 'layer2': 5, 'layer3': 6, 'layer4': 7}
start_layer = layer_index[finetune_layer]
#always finetune from the start layer to the last layer in the resnet
for group in encoder_groups[start_layer:]:
for param in group.parameters():
param.requires_grad = True
#in the MoCo paper, the authors suggest making the parameters
#in BatchNorm layers trainable to help account for the smaller
#magnitudes of weights that typically occur with unsupervised
#pretraining. we haven't found this to be beneficial for the
#OneCycle LR policy, it might be for other lr policies though.
if config['unfreeze_encoder_bn']:
def unfreeze_encoder_bn(module):
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
for param in module.parameters():
param.requires_grad = True
#this makes all the batchnorm layers in the encoder trainable
model.encoder.apply(unfreeze_encoder_bn)
#print out the number of trainable parameters in the whole model
#unfreeze_encoder_bn adds about 50k more
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print(f'Using model with {params} trainable parameters!')
#construct the set of augmentations from config
dataset_augs = []
for aug_params in config['augmentations']:
aug_name = aug_params['aug']
#lookup aug_name and replace it with the
#correct augmentation class
aug = augmentation_dict[aug_name]
#delete the aug key and then the remaining
#dictionary items are kwargs
del aug_params['aug']
dataset_augs.append(aug(**aug_params))
#unpack the list of dataset specific augmentations
#into Compose, and then add normalization and tensor
#conversion, which apply universally
augs = Compose([
*dataset_augs,
normalize,
ToTensorV2()
])
#create the segmentation data for training
data_dir = config['data_dir']
train_dir = 'train/'
bsz = config['bsz']
trn_data = SegmentationData(os.path.join(data_dir, train_dir), tfs=augs, gray_channels=gray_channels,
segmentation_classes=config['num_classes'])
config['n_images'] = len(trn_data.fnames)
#create the dataloader
#NOTE: if using CPU, the pin_memory argument must be set to False
#In the future, we may add a "cpu" argument to the config; we expect
#that most users will have access to a GPU though.
train = DataLoader(trn_data, batch_size=bsz, shuffle=True, pin_memory=True, drop_last=True, num_workers=config['jobs'])
#check for a validation directory and use it if it exists
#if not, then we don't use any validation data
val_dir = 'valid/'
if os.path.isdir(os.path.join(data_dir, val_dir)):
#eval_augs are always the same.
#since we ultimately want to run our model on
#full size images and not cropped patches, we use
#FactorResize. This is a custom augmentation that
#simply resizes the image to the nearest multiple
#of 32 (which is necessary to work with the UNet model).
#if working with very large images that don't fit in memory
#it could be swapped out for a CenterCrop. the results will
#be less reflective of performance in the test case however.
eval_augs = Compose([
FactorResize(32),
normalize,
ToTensorV2()
])
val_data = SegmentationData(os.path.join(data_dir, val_dir), tfs=eval_augs, gray_channels=gray_channels,
segmentation_classes=config['num_classes'])
#using a batch size of 1 means that we report a per-image IoU score
valid = DataLoader(val_data, batch_size=1, shuffle=False, pin_memory=True, num_workers=config['jobs'])
else:
valid = None
#create model path ahead of time so that
#we don't try to save to a directory that doesn't
#exist later on
model_dir = config['model_dir']
if not os.path.isdir(model_dir):
os.mkdir(model_dir)
#train the model using the parameters in the config file
#TODO: add a progress bar option to config
trainer = Trainer(config, model, train, valid)
trainer.train() | 1,500 | 0 | 76 |
d9b900ac0011d88c2286031f282911a8b10c74f9 | 321 | py | Python | tests/v7/exemplar_generators/__init__.py | maxalbert/tohu | 3adf0c58b13ef1e1d716d7d613484d2adc58fb60 | [
"MIT"
] | 1 | 2019-03-07T19:58:45.000Z | 2019-03-07T19:58:45.000Z | tests/v7/exemplar_generators/__init__.py | maxalbert/tohu | 3adf0c58b13ef1e1d716d7d613484d2adc58fb60 | [
"MIT"
] | 9 | 2017-10-04T15:08:53.000Z | 2021-02-02T21:51:41.000Z | tests/v7/exemplar_generators/__init__.py | maxalbert/tohu | 3adf0c58b13ef1e1d716d7d613484d2adc58fb60 | [
"MIT"
] | null | null | null | from .exemplar_primitive_generators import EXEMPLAR_PRIMITIVE_GENERATORS
from .exemplar_derived_generators import EXEMPLAR_DERIVED_GENERATORS
from .exemplar_custom_generators import EXEMPLAR_CUSTOM_GENERATORS
EXEMPLAR_GENERATORS = EXEMPLAR_PRIMITIVE_GENERATORS + EXEMPLAR_DERIVED_GENERATORS + EXEMPLAR_CUSTOM_GENERATORS
| 53.5 | 110 | 0.919003 | from .exemplar_primitive_generators import EXEMPLAR_PRIMITIVE_GENERATORS
from .exemplar_derived_generators import EXEMPLAR_DERIVED_GENERATORS
from .exemplar_custom_generators import EXEMPLAR_CUSTOM_GENERATORS
EXEMPLAR_GENERATORS = EXEMPLAR_PRIMITIVE_GENERATORS + EXEMPLAR_DERIVED_GENERATORS + EXEMPLAR_CUSTOM_GENERATORS
| 0 | 0 | 0 |
6a619faf0375516724dca16a64221e0c2c63b51e | 10,349 | py | Python | parser.py | LiXianyao/ace2005-preprocessing | 49e2d6c45d68b51568a2e234f0dd66dd74d01006 | [
"MIT"
] | null | null | null | parser.py | LiXianyao/ace2005-preprocessing | 49e2d6c45d68b51568a2e234f0dd66dd74d01006 | [
"MIT"
] | null | null | null | parser.py | LiXianyao/ace2005-preprocessing | 49e2d6c45d68b51568a2e234f0dd66dd74d01006 | [
"MIT"
] | null | null | null | from xml.etree import ElementTree
from bs4 import BeautifulSoup
import nltk
import json
import re
if __name__ == '__main__':
# parser = Parser('./data/ace_2005_td_v7/data/English/un/fp2/alt.gossip.celebrities_20041118.2331')
parser = Parser('./data/ace_2005_td_v7/data/English/un/timex2norm/alt.corel_20041228.0503')
data = parser.get_data()
with open('./output/debug.json', 'w') as f:
json.dump(data, f, indent=2)
# index = parser.sgm_text.find("Diego Garcia")
# print('index :', index)
# print(parser.sgm_text[1918 - 30:])
| 38.615672 | 124 | 0.519857 | from xml.etree import ElementTree
from bs4 import BeautifulSoup
import nltk
import json
import re
class Parser:
def __init__(self, path, withValue):
self.path = path
self.entity_mentions = []
self.event_mentions = []
self.sentences = []
self.withValue = withValue
print("ACE Value and Time are include?: {}".format(withValue))
self.sgm_text = ''
self.entity_mentions, self.event_mentions = self.parse_xml(path + '.apf.xml')
self.sents_with_pos = self.parse_sgm(path + '.sgm')
self.fix_wrong_position()
@staticmethod
def clean_text(text):
return text.replace('\n', ' ')
def get_data(self):
data = []
for sent in self.sents_with_pos:
item = dict()
item['sentence'] = self.clean_text(sent['text'])
item['position'] = sent['position']
text_position = sent['position']
for i, s in enumerate(item['sentence']):
if s != ' ':
item['position'][0] += i
break
item['sentence'] = item['sentence'].strip()
entity_map = dict()
item['golden-entity-mentions'] = []
item['golden-event-mentions'] = []
for entity_mention in self.entity_mentions:
entity_position = entity_mention['position']
if text_position[0] <= entity_position[0] and entity_position[1] <= text_position[1]:
item['golden-entity-mentions'].append({
'text': self.clean_text(entity_mention['text']),
'position': entity_position,
'entity-type': entity_mention['entity-type']
})
entity_map[entity_mention['entity-id']] = entity_mention
for event_mention in self.event_mentions:
event_position = event_mention['trigger']['position']
if text_position[0] <= event_position[0] and event_position[1] <= text_position[1]:
event_arguments = []
for argument in event_mention['arguments']:
try:
entity_type = entity_map[argument['entity-id']]['entity-type']
except KeyError:
print('[Warning] The entity in the other sentence is mentioned. This argument will be ignored.')
continue
event_arguments.append({
'role': argument['role'],
'position': argument['position'],
'entity-type': entity_type,
'text': self.clean_text(argument['text']),
})
item['golden-event-mentions'].append({
'trigger': event_mention['trigger'],
'arguments': event_arguments,
'position': event_position,
'event_type': event_mention['event_type'],
})
data.append(item)
return data
def find_correct_offset(self, sgm_text, start_index, text):
offset = 0
for i in range(0, 70):
for j in [-1, 1]:
offset = i * j
if sgm_text[start_index + offset:start_index + offset + len(text)] == text:
return offset
print('[Warning] fail to find offset! (start_index: {}, text: {}, path: {})'.format(start_index, text, self.path))
return offset
def fix_wrong_position(self):
for entity_mention in self.entity_mentions:
offset = self.find_correct_offset(
sgm_text=self.sgm_text,
start_index=entity_mention['position'][0],
text=entity_mention['text'])
entity_mention['position'][0] += offset
entity_mention['position'][1] += offset
for event_mention in self.event_mentions:
offset1 = self.find_correct_offset(
sgm_text=self.sgm_text,
start_index=event_mention['trigger']['position'][0],
text=event_mention['trigger']['text'])
event_mention['trigger']['position'][0] += offset1
event_mention['trigger']['position'][1] += offset1
for argument in event_mention['arguments']:
offset2 = self.find_correct_offset(
sgm_text=self.sgm_text,
start_index=argument['position'][0],
text=argument['text'])
argument['position'][0] += offset2
argument['position'][1] += offset2
def parse_sgm(self, sgm_path):
with open(sgm_path, 'r') as f:
soup = BeautifulSoup(f.read(), features='html.parser')
self.sgm_text = soup.text
doc_type = soup.doc.doctype.text.strip()
def remove_tags(selector):
tags = soup.findAll(selector)
for tag in tags:
tag.extract()
remove_tags('datetime')
if doc_type == 'WEB TEXT':
remove_tags('poster')
remove_tags('postdate')
remove_tags('subject')
elif doc_type in ['CONVERSATION', 'STORY']:
remove_tags('speaker')
try:
remove_tags('headline')
remove_tags('endtime')
except:
pass
sents = []
converted_text = soup.text
for sent in nltk.sent_tokenize(converted_text):
sents.extend(sent.split('\n\n'))
sents = list(filter(lambda x: len(x) > 5, sents))
sents = sents[1:]
sents_with_pos = []
last_pos = 0
for sent in sents:
pos = self.sgm_text.find(sent, last_pos)
last_pos = pos
sents_with_pos.append({
'text': sent,
'position': [pos, pos + len(sent)]
})
return sents_with_pos
def parse_xml(self, xml_path):
entity_mentions, event_mentions = [], []
tree = ElementTree.parse(xml_path)
root = tree.getroot()
for child in root[0]:
if child.tag == 'entity':
entity_mentions.extend(self.parse_entity_tag(child))
elif self.withValue and child.tag in ['value', 'timex2']:
entity_mentions.extend(self.parse_value_timex_tag(child))
elif child.tag == 'event':
event_mentions.extend(self.parse_event_tag(child))
return entity_mentions, event_mentions
@staticmethod
def parse_entity_tag(node):
entity_mentions = []
for child in node:
if child.tag != 'entity_mention':
continue
extent = child[0]
charset = extent[0]
entity_mention = dict()
entity_mention['entity-id'] = child.attrib['ID']
entity_mention['entity-type'] = '{}:{}'.format(node.attrib['TYPE'], node.attrib['SUBTYPE'])
entity_mention['text'] = charset.text
entity_mention['position'] = [int(charset.attrib['START']), int(charset.attrib['END'])]
entity_mentions.append(entity_mention)
return entity_mentions
@staticmethod
def parse_event_tag(node):
event_mentions = []
for child in node:
if child.tag == 'event_mention':
event_mention = dict()
event_mention['event_type'] = '{}:{}'.format(node.attrib['TYPE'], node.attrib['SUBTYPE'])
event_mention['arguments'] = []
for child2 in child:
if child2.tag == 'ldc_scope':
charset = child2[0]
event_mention['text'] = charset.text
event_mention['position'] = [int(charset.attrib['START']), int(charset.attrib['END'])]
if child2.tag == 'anchor':
charset = child2[0]
event_mention['trigger'] = {
'text': charset.text,
'position': [int(charset.attrib['START']), int(charset.attrib['END'])],
}
if child2.tag == 'event_mention_argument':
extent = child2[0]
charset = extent[0]
event_mention['arguments'].append({
'text': charset.text,
'position': [int(charset.attrib['START']), int(charset.attrib['END'])],
'role': child2.attrib['ROLE'],
'entity-id': child2.attrib['REFID'],
})
event_mentions.append(event_mention)
return event_mentions
@staticmethod
def parse_value_timex_tag(node):
entity_mentions = []
for child in node:
extent = child[0]
charset = extent[0]
entity_mention = dict()
entity_mention['entity-id'] = child.attrib['ID']
if 'TYPE' in node.attrib:
entity_mention['entity-type'] = node.attrib['TYPE']
if 'SUBTYPE' in node.attrib:
entity_mention['entity-type'] += ':{}'.format(node.attrib['SUBTYPE'])
if child.tag == 'timex2_mention':
entity_mention['entity-type'] = 'TIM:time'
entity_mention['text'] = charset.text
entity_mention['position'] = [int(charset.attrib['START']), int(charset.attrib['END'])]
entity_mentions.append(entity_mention)
return entity_mentions
if __name__ == '__main__':
# parser = Parser('./data/ace_2005_td_v7/data/English/un/fp2/alt.gossip.celebrities_20041118.2331')
parser = Parser('./data/ace_2005_td_v7/data/English/un/timex2norm/alt.corel_20041228.0503')
data = parser.get_data()
with open('./output/debug.json', 'w') as f:
json.dump(data, f, indent=2)
# index = parser.sgm_text.find("Diego Garcia")
# print('index :', index)
# print(parser.sgm_text[1918 - 30:])
| 9,428 | 333 | 23 |
0f6b82b2722b8631ecff0cbbd5cb14bc65c289af | 2,347 | py | Python | test/test_tecio.py | flying-tiger/aero_util | 78cb761fa3fd838dcc4786fcc6b7b9b92299c4b7 | [
"MIT"
] | null | null | null | test/test_tecio.py | flying-tiger/aero_util | 78cb761fa3fd838dcc4786fcc6b7b9b92299c4b7 | [
"MIT"
] | null | null | null | test/test_tecio.py | flying-tiger/aero_util | 78cb761fa3fd838dcc4786fcc6b7b9b92299c4b7 | [
"MIT"
] | null | null | null | import io
import unittest
from aero_util.tecio import *
from . import common
| 41.910714 | 80 | 0.491265 | import io
import unittest
from aero_util.tecio import *
from . import common
class TestTecIO(unittest.TestCase):
def test_simple_example(self):
''' Test that we can read a simple Tecplot *.dat file '''
data = read_dat(common.data_dir/'example1.dat')
self.assertEqual(len(data), 1)
self.assertEqual(set(data[0].keys()), {"X", "Y"})
self.assertTrue(all(np.equal(data[0]['X'], [1., 2., 2., 1.])))
self.assertTrue(all(np.equal(data[0]['Y'], [1., 1., 2., 2.])))
def test_blayer_example(self):
''' Test that we can read a BLAYER output file '''
data = read_dat(common.data_dir/'blayer2d.dat')
self.assertEqual(len(data), 1)
self.assertTrue(all(np.equal(data[0]['xw (m)'][0:6],[
1.000000000E-30,
9.521124155E-06,
4.759823346E-05,
1.237390843E-04,
2.379385096E-04,
3.902034650E-04,
])))
self.assertTrue(all(np.equal(data[0]['pw (Pa)'][0:6],[
3.044047349E+02,
3.044047349E+02,
3.044855657E+02,
3.041873005E+02,
3.037223769E+02,
3.031704390E+02,
])))
self.assertEqual(set(data[0].keys()), {
"xw (m)", "yw (m)", "running length (m)", "rhow (kg/m^3)",
"pw (Pa)", "Tw (K)", "Tvw (K)", "Hw (J/kg)", "muw (Pa.s)", "n2w",
"o2w", "now", "no+w", "n2+w", "o2+w", "nw", "ow", "n+w", "o+w",
"ew", "qw (W/m^2)", "qvw (W/m^2)", "tauwx (Pa)", "tauwy (Pa)",
"kappaw (W/m.K)", "rhoe (kg/m^3)", "pe (Pa)", "Te (K)", "Tve (K)",
"He (J/kg)", "ue (m/s)", "ve (m/s)", "Me", "mue (Pa.s)", "n2e",
"o2e", "noe", "no+e", "n2+e", "o2+e", "ne", "oe", "n+e", "o+e",
"ee", "delta (m)", "deltastar (m)", "theta (m)", "Re-ue",
"CH (kg/m^2.s)", "kappae (W/m.K)", "roughness (m)", "rhok (kg/m^3)",
"velk (m/s)", "muk (Pa.s)", "Re-kk",
})
def test_cube_grid(self):
''' Verify reading blocked, multi-zone, 2D data file '''
data = read_dat(common.data_dir/'cube.dat')
self.assertEqual(len(data),6)
self.assertAlmostEqual(data[3]['x'][8,3], -0.30)
self.assertAlmostEqual(data[3]['y'][8,3], 0.50)
self.assertAlmostEqual(data[3]['z'][8,3], 0.20)
| 0 | 2,247 | 23 |
cba83a2c1a0ebe0d05c5cb83974bb9b6654b020a | 12,720 | py | Python | lib/scribbles.py | masadcv/ECONet-MONAILabel | 284c83bf9f772932df2e1e39a9bddc0ecee514e2 | [
"Apache-2.0"
] | 4 | 2022-03-17T22:07:13.000Z | 2022-03-27T22:02:53.000Z | lib/scribbles.py | masadcv/ECONet-MONAILabel | 284c83bf9f772932df2e1e39a9bddc0ecee514e2 | [
"Apache-2.0"
] | null | null | null | lib/scribbles.py | masadcv/ECONet-MONAILabel | 284c83bf9f772932df2e1e39a9bddc0ecee514e2 | [
"Apache-2.0"
] | null | null | null | import logging
logger = logging.getLogger(__name__)
from monai.transforms import (Compose, EnsureChannelFirstd, LoadImaged,
ScaleIntensityRanged, Spacingd)
from monailabel.interfaces.tasks.infer import InferTask, InferType
from monailabel.scribbles.transforms import AddBackgroundScribblesFromROId
from monailabel.transform.post import BoundingBoxd, Restored
from lib.transforms import (AddBackgroundScribblesFromROIWithDropfracd,
ApplyGaussianSmoothing, ApplyGraphCutOptimisationd,
MakeLikelihoodFromScribblesDybaORFd,
MakeLikelihoodFromScribblesECONetd,
MakeLikelihoodFromScribblesGMMd,
MakeLikelihoodFromScribblesHistogramd, Timeit)
class ECONetPlusGraphCut(MyLikelihoodBasedSegmentor):
"""
Defines Efficient Convolutional Online Likelihood Network (ECONet) based Online Likelihood training and inference method for
COVID-19 lung lesion segmentation based on the following paper:
Asad, Muhammad, Lucas Fidon, and Tom Vercauteren. "" ECONet: Efficient Convolutional Online Likelihood Network
for Scribble-based Interactive Segmentation."
To be reviewed (preprint: https://arxiv.org/pdf/2201.04584.pdf).
This task takes as input 1) original image volume and 2) scribbles from user
indicating foreground and background regions. A likelihood volume is learned and inferred using ECONet method.
numpymaxflow's GraphCut layer is used to regularise the resulting likelihood, where unaries come from likelihood
and pairwise is the original input volume.
This also implements variations of ECONet with hand-crafted features, referred as ECONet-Haar-Like in the paper.
"""
class DybaORFPlusGraphCut(MyLikelihoodBasedSegmentor):
"""
Defines Dynamically Balanced Online Random Forest (DybaORF) based Online Likelihood training and inference method for
COVID-19 lung lesion segmentation based on the following paper:
Wang, Guotai, et al. "Dynamically balanced online random forests for interactive scribble-based segmentation."
International Conference on Medical Image Computing and Computer-Assisted Intervention. Springer, Cham, 2016.
This task takes as input 1) original image volume and 2) scribbles from user
indicating foreground and background regions. A likelihood volume is learned and inferred using DybaORF-Haar-Like method.
numpymaxflow's GraphCut layer is used to regularise the resulting likelihood, where unaries come from likelihood
and pairwise is the original input volume.
"""
class GMMPlusGraphCut(MyLikelihoodBasedSegmentor):
"""
Defines Gaussian Mixture Model (GMM) based Online Likelihood generation method for COVID-19 lung lesion segmentation based on the following paper:
Rother, Carsten, Vladimir Kolmogorov, and Andrew Blake. "" GrabCut" interactive foreground extraction using iterated graph cuts."
ACM transactions on graphics (TOG) 23.3 (2004): 309-314.
This task takes as input 1) original image volume and 2) scribbles from user
indicating foreground and background regions. A likelihood volume is generated using GMM method.
numpymaxflow's GraphCut layer is used to regularise the resulting likelihood, where unaries come from likelihood
and pairwise is the original input volume.
"""
class HistogramPlusGraphCut(MyLikelihoodBasedSegmentor):
"""
Defines Histogram-based Online Likelihood generation method for COVID-19 lung lesion segmentation based on the following paper:
Boykov, Yuri Y., and M-P. Jolly. "Interactive graph cuts for optimal boundary & region segmentation of objects in ND images."
Proceedings eighth IEEE international conference on computer vision. ICCV 2001. Vol. 1. IEEE, 2001.
This task takes as input 1) original image volume and 2) scribbles from user
indicating foreground and background regions. A likelihood volume is generated using histogram method.
numpymaxflow's GraphCut layer is used to regularise the resulting likelihood, where unaries come from likelihood
and pairwise is the original input volume.
"""
| 35.333333 | 150 | 0.58011 | import logging
logger = logging.getLogger(__name__)
from monai.transforms import (Compose, EnsureChannelFirstd, LoadImaged,
ScaleIntensityRanged, Spacingd)
from monailabel.interfaces.tasks.infer import InferTask, InferType
from monailabel.scribbles.transforms import AddBackgroundScribblesFromROId
from monailabel.transform.post import BoundingBoxd, Restored
from lib.transforms import (AddBackgroundScribblesFromROIWithDropfracd,
ApplyGaussianSmoothing, ApplyGraphCutOptimisationd,
MakeLikelihoodFromScribblesDybaORFd,
MakeLikelihoodFromScribblesECONetd,
MakeLikelihoodFromScribblesGMMd,
MakeLikelihoodFromScribblesHistogramd, Timeit)
class MyLikelihoodBasedSegmentor(InferTask):
def __init__(
self,
dimension=3,
description="Generic base class for constructing online likelihood based segmentors",
intensity_range=(-1000, 400, 0.0, 1.0, True),
pix_dim=(2.0, 2.0, 2.0),
lamda=5.0,
sigma=0.1,
config=None,
):
super().__init__(
path=None,
network=None,
labels="region 7",
type=InferType.SCRIBBLES,
dimension=dimension,
description=description,
config=config,
)
self.intensity_range = intensity_range
self.pix_dim = pix_dim
self.lamda = lamda
self.sigma = sigma
def pre_transforms(self):
return [
LoadImaged(keys=["image", "label"]),
EnsureChannelFirstd(keys=["image", "label"]),
# AddBackgroundScribblesFromROId(
AddBackgroundScribblesFromROIWithDropfracd(
scribbles="label", scribbles_bg_label=2, scribbles_fg_label=3, drop_frac=0.98
),
Spacingd(
keys=["image", "label"],
pixdim=self.pix_dim,
mode=["bilinear", "nearest"],
),
ScaleIntensityRanged(
keys="image",
a_min=self.intensity_range[0],
a_max=self.intensity_range[1],
b_min=self.intensity_range[2],
b_max=self.intensity_range[3],
clip=self.intensity_range[4],
),
ApplyGaussianSmoothing(
image="image",
kernel_size=3,
sigma=1.0,
device="cuda",
),
]
def post_transforms(self):
return [
ApplyGraphCutOptimisationd(
unary="prob",
pairwise="image",
post_proc_label="pred",
lamda=self.lamda,
sigma=self.sigma,
),
Timeit(),
Restored(keys="pred", ref_image="image"),
BoundingBoxd(keys="pred", result="result", bbox="bbox"),
]
class ECONetPlusGraphCut(MyLikelihoodBasedSegmentor):
"""
Defines Efficient Convolutional Online Likelihood Network (ECONet) based Online Likelihood training and inference method for
COVID-19 lung lesion segmentation based on the following paper:
Asad, Muhammad, Lucas Fidon, and Tom Vercauteren. "" ECONet: Efficient Convolutional Online Likelihood Network
for Scribble-based Interactive Segmentation."
To be reviewed (preprint: https://arxiv.org/pdf/2201.04584.pdf).
This task takes as input 1) original image volume and 2) scribbles from user
indicating foreground and background regions. A likelihood volume is learned and inferred using ECONet method.
numpymaxflow's GraphCut layer is used to regularise the resulting likelihood, where unaries come from likelihood
and pairwise is the original input volume.
This also implements variations of ECONet with hand-crafted features, referred as ECONet-Haar-Like in the paper.
"""
def __init__(
self,
dimension=3,
description="Online likelihood inference with ECONet for COVID-19 lung lesion segmentation",
intensity_range=(-1000, 400, 0.0, 1.0, True),
pix_dim=(2.0, 2.0, 2.0),
lamda=5.0,
sigma=0.1,
model="FEAT",
loss="CE",
epochs=200,
lr=0.01,
lr_step=[0.7],
dropout=0.3,
hidden_layers=[32, 16],
kernel_size=7,
num_filters=128,
train_feat=True,
model_path=None,
config=None,
):
super().__init__(
dimension=dimension,
description=description,
intensity_range=intensity_range,
pix_dim=pix_dim,
lamda=lamda,
sigma=sigma,
config=config,
)
self.model = model
self.loss = loss
self.epochs = epochs
self.lr = lr
self.lr_step = lr_step
self.dropout = dropout
self.hidden_layers = hidden_layers
self.kernel_size = kernel_size
self.num_filters = num_filters
self.train_feat = train_feat
self.model_path = model_path
def inferer(self):
return Compose(
[
Timeit(),
MakeLikelihoodFromScribblesECONetd(
image="image",
scribbles="label",
post_proc_label="prob",
scribbles_bg_label=2,
scribbles_fg_label=3,
model=self.model,
loss=self.loss,
epochs=self.epochs,
lr=self.lr,
lr_step=self.lr_step,
dropout=self.dropout,
hidden_layers=self.hidden_layers,
kernel_size=self.kernel_size,
num_filters=self.num_filters,
train_feat=self.train_feat,
use_argmax=False,
model_path=self.model_path,
use_amp=False,
device="cuda",
),
Timeit(),
]
)
class DybaORFPlusGraphCut(MyLikelihoodBasedSegmentor):
"""
Defines Dynamically Balanced Online Random Forest (DybaORF) based Online Likelihood training and inference method for
COVID-19 lung lesion segmentation based on the following paper:
Wang, Guotai, et al. "Dynamically balanced online random forests for interactive scribble-based segmentation."
International Conference on Medical Image Computing and Computer-Assisted Intervention. Springer, Cham, 2016.
This task takes as input 1) original image volume and 2) scribbles from user
indicating foreground and background regions. A likelihood volume is learned and inferred using DybaORF-Haar-Like method.
numpymaxflow's GraphCut layer is used to regularise the resulting likelihood, where unaries come from likelihood
and pairwise is the original input volume.
"""
def __init__(
self,
dimension=3,
description="Online likelihood inference with DybaORF-Haar for COVID-19 lung lesion segmentation",
intensity_range=(-1000, 400, 0.0, 1.0, True),
pix_dim=(2.0, 2.0, 2.0),
lamda=5.0,
sigma=0.1,
kernel_size=9,
criterion="entropy",
num_trees=50,
max_tree_depth=20,
min_samples_split=6,
model_path=None,
config=None,
):
super().__init__(
dimension=dimension,
description=description,
intensity_range=intensity_range,
pix_dim=pix_dim,
lamda=lamda,
sigma=sigma,
config=config,
)
self.kernel_size = kernel_size
self.criterion = criterion
self.num_trees = num_trees
self.max_tree_depth = max_tree_depth
self.min_samples_split = min_samples_split
self.model_path = model_path
def inferer(self):
return Compose(
[
Timeit(),
MakeLikelihoodFromScribblesDybaORFd(
image="image",
scribbles="label",
post_proc_label="prob",
scribbles_bg_label=2,
scribbles_fg_label=3,
kernel_size=self.kernel_size,
criterion=self.criterion,
num_trees=self.num_trees,
max_tree_depth=self.max_tree_depth,
min_samples_split=self.min_samples_split,
use_argmax=False,
model_path=self.model_path,
device="cuda",
),
Timeit(),
]
)
class GMMPlusGraphCut(MyLikelihoodBasedSegmentor):
"""
Defines Gaussian Mixture Model (GMM) based Online Likelihood generation method for COVID-19 lung lesion segmentation based on the following paper:
Rother, Carsten, Vladimir Kolmogorov, and Andrew Blake. "" GrabCut" interactive foreground extraction using iterated graph cuts."
ACM transactions on graphics (TOG) 23.3 (2004): 309-314.
This task takes as input 1) original image volume and 2) scribbles from user
indicating foreground and background regions. A likelihood volume is generated using GMM method.
numpymaxflow's GraphCut layer is used to regularise the resulting likelihood, where unaries come from likelihood
and pairwise is the original input volume.
"""
def __init__(
self,
dimension=3,
description="Online likelihood generation using GMM for COVID-19 lung lesion segmentation",
intensity_range=(-1000, 400, 0.0, 1.0, True),
pix_dim=(2.0, 2.0, 2.0),
lamda=5.0,
sigma=0.1,
mixture_size=20,
config=None,
):
super().__init__(
dimension=dimension,
description=description,
intensity_range=intensity_range,
pix_dim=pix_dim,
lamda=lamda,
sigma=sigma,
config=config,
)
self.mixture_size = mixture_size
def inferer(self):
return Compose(
[
Timeit(),
MakeLikelihoodFromScribblesGMMd(
image="image",
scribbles="label",
post_proc_label="prob",
scribbles_bg_label=2,
scribbles_fg_label=3,
mixture_size=self.mixture_size,
),
Timeit(),
]
)
class HistogramPlusGraphCut(MyLikelihoodBasedSegmentor):
"""
Defines Histogram-based Online Likelihood generation method for COVID-19 lung lesion segmentation based on the following paper:
Boykov, Yuri Y., and M-P. Jolly. "Interactive graph cuts for optimal boundary & region segmentation of objects in ND images."
Proceedings eighth IEEE international conference on computer vision. ICCV 2001. Vol. 1. IEEE, 2001.
This task takes as input 1) original image volume and 2) scribbles from user
indicating foreground and background regions. A likelihood volume is generated using histogram method.
numpymaxflow's GraphCut layer is used to regularise the resulting likelihood, where unaries come from likelihood
and pairwise is the original input volume.
"""
def __init__(
self,
dimension=3,
description="Online likelihood generation using Histogram for COVID-19 lung lesion segmentation",
intensity_range=(-1000, 400, 0.0, 1.0, True),
pix_dim=(2.0, 2.0, 2.0),
lamda=5.0,
sigma=0.1,
alpha_bg=1,
alpha_fg=1,
bins=128,
config=None,
):
super().__init__(
dimension=dimension,
description=description,
intensity_range=intensity_range,
pix_dim=pix_dim,
lamda=lamda,
sigma=sigma,
config=config,
)
self.alpha_bg = alpha_bg
self.alpha_fg = alpha_fg
self.bins = bins
def inferer(self):
return Compose(
[
Timeit(),
MakeLikelihoodFromScribblesHistogramd(
image="image",
scribbles="label",
post_proc_label="prob",
scribbles_bg_label=2,
scribbles_fg_label=3,
normalise=True,
alpha_bg=self.alpha_bg,
alpha_fg=self.alpha_fg,
bins=self.bins,
),
Timeit(),
]
)
| 8,158 | 23 | 319 |
b64b561ae483be24d4de3f2656163e6911f143ea | 6,040 | py | Python | code/chapter4/q4-37-1.py | Starrynightzyq/SEU-NumericalAnalysis-Exercises | 7004b86cb8c1ced70567c2fbadac366bc9cee8bd | [
"MIT"
] | null | null | null | code/chapter4/q4-37-1.py | Starrynightzyq/SEU-NumericalAnalysis-Exercises | 7004b86cb8c1ced70567c2fbadac366bc9cee8bd | [
"MIT"
] | null | null | null | code/chapter4/q4-37-1.py | Starrynightzyq/SEU-NumericalAnalysis-Exercises | 7004b86cb8c1ced70567c2fbadac366bc9cee8bd | [
"MIT"
] | null | null | null | '''
Author: zyq
Date: 2020-11-30 17:19:51
LastEditTime: 2020-12-09 17:24:59
LastEditors: Please set LastEditors
Description: 数值分析上机题 课本 P195 37题 3次样条插值
FilePath: /code/chapter4/q4-37-1.py
'''
import numpy as np
import matplotlib.pyplot as plt
from pylab import mpl
import sys, os
'''
description:
param {*} x n+1 个插值点
param {*} y n+1 个插值点
return {*} n
'''
'''
description: 求三次样条差值的 4n 个方程
param: {x[0,n], y[0,n]} n+1 个插值点
param: Type 三次样条边界条件 1 or 2 or 3
return {A, B} [a0 b0 c0 d0 a1 b1 c1 d1 ... a(n-1) b(n-1) c(n-1) d(n-1)] = [B] 形式的方程组
'''
"""
功能:根据所给参数,计算三次函数的函数值:
参数:OriginalInterval为原始x的区间, parameters为二次函数的系数,x为自变量
返回值:为函数的因变量
"""
"""
功能:将函数绘制成图像
参数:data_x,data_y为离散的点.new_data_x,new_data_y为由拉格朗日插值函数计算的值。x为函数的预测值。
返回值:空
"""
if __name__ == "__main__":
# 获取当前文件路径
current_path = os.path.abspath(__file__)
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(current_path), '../')))
# print(sys.path)
# 调用 chapter3 中的列主元高斯消去法
from chapter3.q3 import MGauss_Caculate
main()
| 26.964286 | 164 | 0.527815 | '''
Author: zyq
Date: 2020-11-30 17:19:51
LastEditTime: 2020-12-09 17:24:59
LastEditors: Please set LastEditors
Description: 数值分析上机题 课本 P195 37题 3次样条插值
FilePath: /code/chapter4/q4-37-1.py
'''
import numpy as np
import matplotlib.pyplot as plt
from pylab import mpl
import sys, os
'''
description:
param {*} x n+1 个插值点
param {*} y n+1 个插值点
return {*} n
'''
def Prejudgment(x, y):
n1 = len(x)
n2 = len(y)
if n1 != n2:
print('x 与 y 长度不相等')
sys.exit()
n = n1-1
return n
'''
description: 求三次样条差值的 4n 个方程
param: {x[0,n], y[0,n]} n+1 个插值点
param: Type 三次样条边界条件 1 or 2 or 3
return {A, B} [a0 b0 c0 d0 a1 b1 c1 d1 ... a(n-1) b(n-1) c(n-1) d(n-1)] = [B] 形式的方程组
'''
def calculateEquationParameters(x, y, Type=1, dy0=0, dyn=0):
n = Prejudgment(x, y)
parameterA = []
parameterB = []
# S_i(x_i) = y_i
# S_i(x_{i+1}) = y_{i+1}
# 0 <= i <= n-1
for i in range(0, n):
# S_i(x_i) = y_i
data = np.zeros(n*4)
data[i*4] = pow(x[i], 3)
data[i*4+1] = pow(x[i], 2)
data[i*4+2] = x[i]
data[i*4+3] = 1
parameterA.append(data.tolist())
parameterB.append(y[i])
# S_i(x_{i+1}) = y_{i+1}
data1 = np.zeros(n*4)
data1[i*4] = pow(x[(i+1)], 3)
data1[i*4+1] = pow(x[(i+1)], 2)
data1[i*4+2] = x[(i+1)]
data1[i*4+3] = 1
parameterA.append(data1.tolist())
parameterB.append(y[i+1])
# S'_i(x_{i+1}) = S'_{i+1}(x_{i+1})
# 0 <= i <= n-2
for i in range(0, n-1):
data = np.zeros(n*4)
data[i*4] = 3 * pow(x[i+1], 2)
data[i*4+1] = 2 * x[i+1]
data[i*4+2] = 1
data[(i+1)*4] = -3 * pow(x[i+1], 2)
data[(i+1)*4+1] = -2 * x[i+1]
data[(i+1)*4+2] = -1
parameterA.append(data.tolist())
parameterB.append(0)
# S''_i(x_{i+1}) = S''_{i+1}(x_{i+1})
# 0 <= i <= n-2
for i in range(0, n-1):
data = np.zeros(n*4)
data[i*4] = 6 * x[i+1]
data[i*4+1] = 2
data[(i+1)*4] = -6 * x[i+1]
data[(i+1)*4+1] = -2
parameterA.append(data.tolist())
parameterB.append(0)
if Type == 1:
# S'_0(x_0) = y'_0
data = np.zeros(n*4)
data[0] = 3 * pow(x[0], 2)
data[1] = 2 * x[0]
data[2] = 1
parameterA.append(data.tolist())
parameterB.append(dy0)
# S'_{n-1}(x_n) = y'_n
data = np.zeros(n*4)
data[(n-1)*4] = 3 * pow(x[n], 2)
data[(n-1)*4+1] = 2 * x[n]
data[(n-1)*4+2] = 1
parameterA.append(data.tolist())
parameterB.append(dyn)
elif Type == 2:
# S''(a) = S''(b) = 0
# S''_0(x_0) = 0
data = np.zeros(n*4)
data[0] = 6 * x[0]
data[1] = 2
parameterA.append(data.tolist())
parameterB.append(0)
# S''_{n-1}(x_n) = 0
data = np.zeros(n*4)
data[(n-1)*4] = 6 * x[n]
data[(n-1)*4+1] = 2
parameterA.append(data.tolist())
parameterB.append(0)
elif Type == 3:
# S'(a) = S'(b) and # S''(a) = S''(b)
pass
else:
print('Error! Unknown "Type" Value!')
return parameterA, parameterB
"""
功能:根据所给参数,计算三次函数的函数值:
参数:OriginalInterval为原始x的区间, parameters为二次函数的系数,x为自变量
返回值:为函数的因变量
"""
def calculate(OriginalInterval, paremeters, x):
n = int(len(paremeters)/4)
result=[]
for data_x in x:
Interval = 0
if data_x <= OriginalInterval[0]:
Interval = 0
elif data_x >= OriginalInterval[-1]:
Interval = n-1
else:
for i in range(0,n):
if data_x >= OriginalInterval[i] and data_x < OriginalInterval[i+1]:
Interval = i
break
result.append(paremeters[Interval*4+0]*data_x*data_x*data_x+paremeters[Interval*4+1]*data_x*data_x+paremeters[Interval*4+2]*data_x+paremeters[Interval*4+3])
return result
"""
功能:将函数绘制成图像
参数:data_x,data_y为离散的点.new_data_x,new_data_y为由拉格朗日插值函数计算的值。x为函数的预测值。
返回值:空
"""
def Draw(data_x,data_y,new_data_x,new_data_y, title):
plt.plot(new_data_x, new_data_y, label="拟合曲线", color="black")
plt.scatter(data_x,data_y, label="离散数据",color="red")
mpl.rcParams['font.sans-serif'] = ['SimHei']
mpl.rcParams['axes.unicode_minus'] = False
plt.title("三次样条函数")
plt.legend(loc="upper left")
plt.savefig(os.path.join(os.path.dirname(os.path.abspath(__file__)), title+'.png'), dpi=300)
plt.show()
def PrintS(parameterX):
n = int(len(parameterX)/4)
print('S(x) = ')
# for i in range(0, n):
# print("{0}x^3 + {1}x^2 + {2}x + {3}".format(parameterX[i*4], parameterX[i*4+1], parameterX[i*4+2], parameterX[i*4+3]))
# print('\n\n')
for i in range(0,n):
print("%.6g & %.6g & %.6g & %.6g \\\\" % (parameterX[i*4], parameterX[i*4+1], parameterX[i*4+2], parameterX[i*4+3]))
print('\n\n')
def main():
x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y = [2.51, 3.30, 4.04, 4.70, 5.22, 5.54, 5.78, 5.40, 5.57, 5.70, 5.80]
dy0 = 0.8
dyn = 0.2
parameterA, parameterB = calculateEquationParameters(x, y, 1, dy0, dyn)
parameterX = MGauss_Caculate(parameterA, parameterB)
PrintS(parameterX)
# 画图
new_data_x = np.arange(x[0]-0.5, x[-1]+0.6, 0.1)
new_data_y = calculate(x, parameterX, new_data_x)
Draw(x, y, new_data_x, new_data_y, '三次样条插值')
# 打印
new_data_x = np.arange(0.5, 10.5, 1)
new_data_y = calculate(x, parameterX, new_data_x)
# f4_5 = calculate(parameterX[8:12], [4.5])
print(new_data_x)
for i,data in enumerate(new_data_y):
print("%.6g & " % data)
if __name__ == "__main__":
# 获取当前文件路径
current_path = os.path.abspath(__file__)
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(current_path), '../')))
# print(sys.path)
# 调用 chapter3 中的列主元高斯消去法
from chapter3.q3 import MGauss_Caculate
main()
| 4,922 | 0 | 142 |
b9b4e8f0cfb8a8d811bdc458a893677c8f5c8ba6 | 1,279 | py | Python | data_modeling/create_tables.py | jbj2505/dend_02_data_modeling_apache_cassandra | 72b26e242b4ae95c31c59f4987376c3fc58c8528 | [
"MIT"
] | null | null | null | data_modeling/create_tables.py | jbj2505/dend_02_data_modeling_apache_cassandra | 72b26e242b4ae95c31c59f4987376c3fc58c8528 | [
"MIT"
] | null | null | null | data_modeling/create_tables.py | jbj2505/dend_02_data_modeling_apache_cassandra | 72b26e242b4ae95c31c59f4987376c3fc58c8528 | [
"MIT"
] | null | null | null | """
This module provides methods to drop and re-create all tables.
"""
from db import create_session
import cql_queries
def create_database():
"""
Creates the database and establishes the connection.
"""
# connect to default database
cluster, session = create_session()
# create sparkify database with UTF8 encoding
session.execute(cql_queries.KEYSPACE_DROP)
session.execute(cql_queries.KEYSPACE_CREATE)
session.set_keyspace('sparkifydb')
return cluster, session
def drop_tables(session):
"""
Drops all tables.
"""
for query in cql_queries.DROP_TABLE_QUERIES:
session.execute(query)
def create_tables(session):
"""
Creates all tables.
"""
for query in cql_queries.CREATE_TABLE_QUERIES:
session.execute(query)
def main():
"""
First, creates databse and establishes connection.
Then, drops all tables and re-creates them.
"""
print("Creating connection...")
cluster, session = create_database()
print("Dropping old tables...")
drop_tables(session)
print("Creating new tables...")
create_tables(session)
print("Closing connection...")
session.shutdown()
cluster.shutdown()
print("Done.")
if __name__ == "__main__":
main()
| 22.051724 | 62 | 0.677873 | """
This module provides methods to drop and re-create all tables.
"""
from db import create_session
import cql_queries
def create_database():
"""
Creates the database and establishes the connection.
"""
# connect to default database
cluster, session = create_session()
# create sparkify database with UTF8 encoding
session.execute(cql_queries.KEYSPACE_DROP)
session.execute(cql_queries.KEYSPACE_CREATE)
session.set_keyspace('sparkifydb')
return cluster, session
def drop_tables(session):
"""
Drops all tables.
"""
for query in cql_queries.DROP_TABLE_QUERIES:
session.execute(query)
def create_tables(session):
"""
Creates all tables.
"""
for query in cql_queries.CREATE_TABLE_QUERIES:
session.execute(query)
def main():
"""
First, creates databse and establishes connection.
Then, drops all tables and re-creates them.
"""
print("Creating connection...")
cluster, session = create_database()
print("Dropping old tables...")
drop_tables(session)
print("Creating new tables...")
create_tables(session)
print("Closing connection...")
session.shutdown()
cluster.shutdown()
print("Done.")
if __name__ == "__main__":
main()
| 0 | 0 | 0 |
bf7dd08185b7a864b752a321a8811af545e7e291 | 1,056 | py | Python | python/ql/test/library-tests/frameworks/stdlib/XPathExecution.py | adityasharad/ql | 439dcc0731ae665402466a13daf12737ea3a2a44 | [
"MIT"
] | 643 | 2018-08-03T11:16:54.000Z | 2020-04-27T23:10:55.000Z | python/ql/test/library-tests/frameworks/stdlib/XPathExecution.py | DirtyApexAlpha/codeql | 4c59b0d2992ee0d90cc2f46d6a85ac79e1d57f21 | [
"MIT"
] | 1,880 | 2018-08-03T11:28:32.000Z | 2020-04-28T13:18:51.000Z | python/ql/test/library-tests/frameworks/stdlib/XPathExecution.py | DirtyApexAlpha/codeql | 4c59b0d2992ee0d90cc2f46d6a85ac79e1d57f21 | [
"MIT"
] | 218 | 2018-08-03T11:16:58.000Z | 2020-04-24T02:24:00.000Z | match = "dc:title"
ns = {'dc': 'http://purl.org/dc/elements/1.1/'}
import xml.etree.ElementTree as ET
tree = ET.parse('country_data.xml') # $ decodeFormat=XML decodeInput='country_data.xml' decodeOutput=ET.parse(..) xmlVuln='XML bomb' getAPathArgument='country_data.xml'
root = tree.getroot()
root.find(match, namespaces=ns) # $ getXPath=match
root.findall(match, namespaces=ns) # $ getXPath=match
root.findtext(match, default=None, namespaces=ns) # $ getXPath=match
tree = ET.ElementTree()
tree.parse("index.xhtml") # $ decodeFormat=XML decodeInput="index.xhtml" decodeOutput=tree.parse(..) xmlVuln='XML bomb' getAPathArgument="index.xhtml"
tree.find(match, namespaces=ns) # $ getXPath=match
tree.findall(match, namespaces=ns) # $ getXPath=match
tree.findtext(match, default=None, namespaces=ns) # $ getXPath=match
parser = ET.XMLParser()
parser.feed("<foo>bar</foo>") # $ decodeFormat=XML decodeInput="<foo>bar</foo>" xmlVuln='XML bomb'
tree = parser.close() # $ decodeOutput=parser.close()
tree.find(match, namespaces=ns) # $ getXPath=match
| 45.913043 | 168 | 0.731061 | match = "dc:title"
ns = {'dc': 'http://purl.org/dc/elements/1.1/'}
import xml.etree.ElementTree as ET
tree = ET.parse('country_data.xml') # $ decodeFormat=XML decodeInput='country_data.xml' decodeOutput=ET.parse(..) xmlVuln='XML bomb' getAPathArgument='country_data.xml'
root = tree.getroot()
root.find(match, namespaces=ns) # $ getXPath=match
root.findall(match, namespaces=ns) # $ getXPath=match
root.findtext(match, default=None, namespaces=ns) # $ getXPath=match
tree = ET.ElementTree()
tree.parse("index.xhtml") # $ decodeFormat=XML decodeInput="index.xhtml" decodeOutput=tree.parse(..) xmlVuln='XML bomb' getAPathArgument="index.xhtml"
tree.find(match, namespaces=ns) # $ getXPath=match
tree.findall(match, namespaces=ns) # $ getXPath=match
tree.findtext(match, default=None, namespaces=ns) # $ getXPath=match
parser = ET.XMLParser()
parser.feed("<foo>bar</foo>") # $ decodeFormat=XML decodeInput="<foo>bar</foo>" xmlVuln='XML bomb'
tree = parser.close() # $ decodeOutput=parser.close()
tree.find(match, namespaces=ns) # $ getXPath=match
| 0 | 0 | 0 |
48ed79effcfb0360b19bfbc7d8ef90ac8a11b573 | 3,547 | py | Python | stubs.min/System/Windows/Documents/__init___parts/DocumentPage.py | ricardyn/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | 1 | 2021-02-02T13:39:16.000Z | 2021-02-02T13:39:16.000Z | stubs.min/System/Windows/Documents/__init___parts/DocumentPage.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | stubs.min/System/Windows/Documents/__init___parts/DocumentPage.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | class DocumentPage(object,IDisposable):
"""
Represents a document page produced by a paginator.
DocumentPage(visual: Visual)
DocumentPage(visual: Visual,pageSize: Size,bleedBox: Rect,contentBox: Rect)
"""
def Dispose(self):
"""
Dispose(self: DocumentPage)
Releases all resources used by the System.Windows.Documents.DocumentPage.
"""
pass
def OnPageDestroyed(self,*args):
"""
OnPageDestroyed(self: DocumentPage,e: EventArgs)
Raises the System.Windows.Documents.DocumentPage.PageDestroyed event.
e: An System.EventArgs that contains the event data.
"""
pass
def SetBleedBox(self,*args):
"""
SetBleedBox(self: DocumentPage,bleedBox: Rect)
Sets the dimensions and location of the
System.Windows.Documents.DocumentPage.BleedBox.
bleedBox: An object that specifies the size and location of a rectangle.
"""
pass
def SetContentBox(self,*args):
"""
SetContentBox(self: DocumentPage,contentBox: Rect)
Sets the dimension and location of the
System.Windows.Documents.DocumentPage.ContentBox.
contentBox: An object that specifies the size and location of a rectangle.
"""
pass
def SetSize(self,*args):
"""
SetSize(self: DocumentPage,size: Size)
Sets the System.Windows.Documents.DocumentPage.Size of the physical page as it
will be after any cropping.
size: The size of the page.
"""
pass
def SetVisual(self,*args):
"""
SetVisual(self: DocumentPage,visual: Visual)
Sets the System.Windows.Documents.DocumentPage.Visual that depicts the page.
visual: The visual representation of the page.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,visual,pageSize=None,bleedBox=None,contentBox=None):
"""
__new__(cls: type,visual: Visual)
__new__(cls: type,visual: Visual,pageSize: Size,bleedBox: Rect,contentBox: Rect)
"""
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
BleedBox=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""When overridden in a derived class,gets the area for print production-related bleeds,registration marks,and crop marks that may appear on the physical sheet outside the logical page boundaries.
Get: BleedBox(self: DocumentPage) -> Rect
"""
ContentBox=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""When overridden in a derived class,gets the area of the page within the margins.
Get: ContentBox(self: DocumentPage) -> Rect
"""
Size=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""When overridden in a derived class,gets the actual size of a page as it will be following any cropping.
Get: Size(self: DocumentPage) -> Size
"""
Visual=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""When overridden in a derived class,gets the visual representation of the page.
Get: Visual(self: DocumentPage) -> Visual
"""
Missing=None
PageDestroyed=None
| 31.954955 | 215 | 0.696645 | class DocumentPage(object,IDisposable):
"""
Represents a document page produced by a paginator.
DocumentPage(visual: Visual)
DocumentPage(visual: Visual,pageSize: Size,bleedBox: Rect,contentBox: Rect)
"""
def Dispose(self):
"""
Dispose(self: DocumentPage)
Releases all resources used by the System.Windows.Documents.DocumentPage.
"""
pass
def OnPageDestroyed(self,*args):
"""
OnPageDestroyed(self: DocumentPage,e: EventArgs)
Raises the System.Windows.Documents.DocumentPage.PageDestroyed event.
e: An System.EventArgs that contains the event data.
"""
pass
def SetBleedBox(self,*args):
"""
SetBleedBox(self: DocumentPage,bleedBox: Rect)
Sets the dimensions and location of the
System.Windows.Documents.DocumentPage.BleedBox.
bleedBox: An object that specifies the size and location of a rectangle.
"""
pass
def SetContentBox(self,*args):
"""
SetContentBox(self: DocumentPage,contentBox: Rect)
Sets the dimension and location of the
System.Windows.Documents.DocumentPage.ContentBox.
contentBox: An object that specifies the size and location of a rectangle.
"""
pass
def SetSize(self,*args):
"""
SetSize(self: DocumentPage,size: Size)
Sets the System.Windows.Documents.DocumentPage.Size of the physical page as it
will be after any cropping.
size: The size of the page.
"""
pass
def SetVisual(self,*args):
"""
SetVisual(self: DocumentPage,visual: Visual)
Sets the System.Windows.Documents.DocumentPage.Visual that depicts the page.
visual: The visual representation of the page.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,visual,pageSize=None,bleedBox=None,contentBox=None):
"""
__new__(cls: type,visual: Visual)
__new__(cls: type,visual: Visual,pageSize: Size,bleedBox: Rect,contentBox: Rect)
"""
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
BleedBox=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""When overridden in a derived class,gets the area for print production-related bleeds,registration marks,and crop marks that may appear on the physical sheet outside the logical page boundaries.
Get: BleedBox(self: DocumentPage) -> Rect
"""
ContentBox=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""When overridden in a derived class,gets the area of the page within the margins.
Get: ContentBox(self: DocumentPage) -> Rect
"""
Size=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""When overridden in a derived class,gets the actual size of a page as it will be following any cropping.
Get: Size(self: DocumentPage) -> Size
"""
Visual=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""When overridden in a derived class,gets the visual representation of the page.
Get: Visual(self: DocumentPage) -> Visual
"""
Missing=None
PageDestroyed=None
| 0 | 0 | 0 |
52aba61e49db7aa74a638f0e177e93994fe863a9 | 361 | py | Python | app/db/models/jobs.py | Luivatra/ergopad-api | e3bcf93bf61509b3aa96b62603268acd399bbc28 | [
"MIT"
] | null | null | null | app/db/models/jobs.py | Luivatra/ergopad-api | e3bcf93bf61509b3aa96b62603268acd399bbc28 | [
"MIT"
] | 23 | 2022-03-09T11:31:32.000Z | 2022-03-31T08:53:27.000Z | app/db/models/jobs.py | Luivatra/ergopad-api | e3bcf93bf61509b3aa96b62603268acd399bbc28 | [
"MIT"
] | 2 | 2022-02-16T03:40:05.000Z | 2022-02-16T22:40:15.000Z | from sqlalchemy import Boolean, Column, Integer, String
from db.session import Base
# JOBS MODEL
| 21.235294 | 55 | 0.709141 | from sqlalchemy import Boolean, Column, Integer, String
from db.session import Base
# JOBS MODEL
class Jobs(Base):
__tablename__ = "jobs"
id = Column(Integer, primary_key=True, index=True)
title = Column(String)
shortDescription = Column(String)
description = Column(String)
category = Column(String)
archived = Column(Boolean)
| 0 | 238 | 23 |
c75571ae625cb169a987fe46f69653cf0def791d | 424 | py | Python | tests/views.py | mari8i/drf-file-upload | 83bab708643a9f87bc9a3f41ee95d1b1d74e584d | [
"BSD-3-Clause"
] | 1 | 2021-05-13T04:19:05.000Z | 2021-05-13T04:19:05.000Z | tests/views.py | mari8i/drf-file-upload | 83bab708643a9f87bc9a3f41ee95d1b1d74e584d | [
"BSD-3-Clause"
] | null | null | null | tests/views.py | mari8i/drf-file-upload | 83bab708643a9f87bc9a3f41ee95d1b1d74e584d | [
"BSD-3-Clause"
] | null | null | null | from rest_framework import viewsets
from rest_framework.parsers import JSONParser
from rest_framework.permissions import IsAuthenticated
from tests import models, serializers
| 30.285714 | 63 | 0.830189 | from rest_framework import viewsets
from rest_framework.parsers import JSONParser
from rest_framework.permissions import IsAuthenticated
from tests import models, serializers
class TestUserFileUpload(viewsets.ModelViewSet):
serializer_class = serializers.TestUserFileUploadSerializer
permission_classes = [IsAuthenticated]
queryset = models.TestUserFileUpload.objects.all()
parser_classes = [JSONParser]
| 0 | 224 | 23 |
5e70a958d4ae486eb1c18f3247ba85b0f8ec1b97 | 964 | py | Python | stock_data.py | advaithca/Stock-Data | 7e7ea67c6fb105b472c394b484044deb9bac6a2c | [
"MIT"
] | null | null | null | stock_data.py | advaithca/Stock-Data | 7e7ea67c6fb105b472c394b484044deb9bac6a2c | [
"MIT"
] | null | null | null | stock_data.py | advaithca/Stock-Data | 7e7ea67c6fb105b472c394b484044deb9bac6a2c | [
"MIT"
] | null | null | null | import yfinance as yf
import streamlit as st
import pandas as pd
import csv
import csv
tickers = []
with open(r'nasdaq_screener_1640497257523.csv') as f:
r = csv.reader(f)
header = next(r)
for row in r:
tickers.append([row[1],row[0]])
tname = []
for i in tickers:
tname.append(i[0])
st.write("""
# Simple Stock Price App
### Shows ***closing price*** and ***volume*** of Selected Company
***
""")
tickersymbol = ''
tickername = st.selectbox(
'Select Ticker',
tuple(tname))
for i in tickers:
if i[0] == tickername:
tickersymbol = i[1]
tickerdata = yf.Ticker(tickersymbol)
tickerdf = tickerdata.history(period='1d',start='2010-5-31',end='2020-5-31')
if not tickerdf.empty:
st.write("""
## Closing Price
""")
st.line_chart(tickerdf.Close)
st.write("""
## Volume Price
""")
st.line_chart(tickerdf.Volume)
else :
st.error("No data found for this company")
st.write("""
***
""") | 17.851852 | 76 | 0.626556 | import yfinance as yf
import streamlit as st
import pandas as pd
import csv
import csv
tickers = []
with open(r'nasdaq_screener_1640497257523.csv') as f:
r = csv.reader(f)
header = next(r)
for row in r:
tickers.append([row[1],row[0]])
tname = []
for i in tickers:
tname.append(i[0])
st.write("""
# Simple Stock Price App
### Shows ***closing price*** and ***volume*** of Selected Company
***
""")
tickersymbol = ''
tickername = st.selectbox(
'Select Ticker',
tuple(tname))
for i in tickers:
if i[0] == tickername:
tickersymbol = i[1]
tickerdata = yf.Ticker(tickersymbol)
tickerdf = tickerdata.history(period='1d',start='2010-5-31',end='2020-5-31')
if not tickerdf.empty:
st.write("""
## Closing Price
""")
st.line_chart(tickerdf.Close)
st.write("""
## Volume Price
""")
st.line_chart(tickerdf.Volume)
else :
st.error("No data found for this company")
st.write("""
***
""") | 0 | 0 | 0 |
7c7b107b912fd4082ce4f8dd26ab913e962c2422 | 808 | py | Python | worldengine/basic_map_operations.py | stefan-feltmann/lands | b2f1fc3aab4895763160a135d085a17dceb5f58e | [
"MIT"
] | null | null | null | worldengine/basic_map_operations.py | stefan-feltmann/lands | b2f1fc3aab4895763160a135d085a17dceb5f58e | [
"MIT"
] | null | null | null | worldengine/basic_map_operations.py | stefan-feltmann/lands | b2f1fc3aab4895763160a135d085a17dceb5f58e | [
"MIT"
] | null | null | null | import math
import random
def index_of_nearest(p, hot_points, distance_f=distance):
"""Given a point and a set of hot points it found the hot point
nearest to the given point. An arbitrary distance function can
be specified
:return the index of the nearest hot points, or None if the list of hot
points is empty
"""
min_dist = None
nearest_hp_i = None
for i, hp in enumerate(hot_points):
dist = distance_f(p, hp)
if min_dist is None or dist < min_dist:
min_dist = dist
nearest_hp_i = i
return nearest_hp_i
| 26.933333 | 75 | 0.642327 | import math
import random
def random_point(width, height):
return random.randrange(0, width), random.randrange(0, height)
def distance(pa, pb):
ax, ay = pa
bx, by = pb
return math.sqrt((ax - bx) ** 2 + (ay - by) ** 2)
def index_of_nearest(p, hot_points, distance_f=distance):
"""Given a point and a set of hot points it found the hot point
nearest to the given point. An arbitrary distance function can
be specified
:return the index of the nearest hot points, or None if the list of hot
points is empty
"""
min_dist = None
nearest_hp_i = None
for i, hp in enumerate(hot_points):
dist = distance_f(p, hp)
if min_dist is None or dist < min_dist:
min_dist = dist
nearest_hp_i = i
return nearest_hp_i
| 164 | 0 | 46 |
c71980bddb7770c38122ebdf46cf3149a12d1ee6 | 17 | py | Python | example/__main__.py | konchokdolma/python-package | a3b57db100dee6e3d2a758408b453a920f535b62 | [
"BSD-2-Clause"
] | null | null | null | example/__main__.py | konchokdolma/python-package | a3b57db100dee6e3d2a758408b453a920f535b62 | [
"BSD-2-Clause"
] | null | null | null | example/__main__.py | konchokdolma/python-package | a3b57db100dee6e3d2a758408b453a920f535b62 | [
"BSD-2-Clause"
] | null | null | null | print('test321')
| 8.5 | 16 | 0.705882 | print('test321')
| 0 | 0 | 0 |
a0c58fbf2bde8ed1ecfca05af47f256ca7a93d0d | 1,105 | py | Python | app_ccf/twilio/twilio_client.py | richardmobikasa/cash-assistance-platform | fafc117823c9bb7b5b6115d11c66afb459e1ec5f | [
"MIT"
] | 10 | 2020-10-02T20:03:08.000Z | 2022-01-05T17:27:54.000Z | app_ccf/twilio/twilio_client.py | richardmobikasa/cash-assistance-platform | fafc117823c9bb7b5b6115d11c66afb459e1ec5f | [
"MIT"
] | 3 | 2020-10-06T14:44:28.000Z | 2020-10-07T15:33:23.000Z | app_ccf/twilio/twilio_client.py | richardmobikasa/cash-assistance-platform | fafc117823c9bb7b5b6115d11c66afb459e1ec5f | [
"MIT"
] | 6 | 2020-09-22T22:39:38.000Z | 2021-07-13T06:45:53.000Z |
from twilio.base.exceptions import TwilioRestException
from twilio.rest import Client
import os
import logging
LOGGER = logging.getLogger(__name__)
TWILIO_ACCOUNT_SID = os.environ.get('TWILIO_ACCOUNT_SID')
TWILIO_AUTH_TOKEN = os.environ.get('TWILIO_AUTH_TOKEN')
TWILIO_SERVICE_SID = os.environ.get('TWILIO_SERVICE_SID')
| 29.864865 | 77 | 0.706787 |
from twilio.base.exceptions import TwilioRestException
from twilio.rest import Client
import os
import logging
LOGGER = logging.getLogger(__name__)
TWILIO_ACCOUNT_SID = os.environ.get('TWILIO_ACCOUNT_SID')
TWILIO_AUTH_TOKEN = os.environ.get('TWILIO_AUTH_TOKEN')
TWILIO_SERVICE_SID = os.environ.get('TWILIO_SERVICE_SID')
def trigger_text_messages(recipients, body):
to_binding = [
'{{"binding_type":"sms","address":"{recipient}"}}'.format(
recipient=recipient
) for recipient in recipients
]
LOGGER.info('About to send {} to {}.'.format(body, to_binding))
if not (TWILIO_ACCOUNT_SID and TWILIO_AUTH_TOKEN and TWILIO_SERVICE_SID):
LOGGER.info('Twilio is not configured. Aborting sending the text...')
return
client = Client(TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN)
try:
client.notify.services(TWILIO_SERVICE_SID).notifications.create(
to_binding=to_binding,
body=body)
except TwilioRestException as e:
LOGGER.error(e)
else:
LOGGER.info('Successfully sent the text messages.')
| 757 | 0 | 23 |
7a7d6dfeb94d35a8c57a970357e7cf8547f8f7b5 | 1,625 | py | Python | year2020/day19/test_solver.py | Sebaestschjin/advent-of-code | 5fd708efa355483fc0ccddf7548b62682662bcc8 | [
"MIT"
] | null | null | null | year2020/day19/test_solver.py | Sebaestschjin/advent-of-code | 5fd708efa355483fc0ccddf7548b62682662bcc8 | [
"MIT"
] | null | null | null | year2020/day19/test_solver.py | Sebaestschjin/advent-of-code | 5fd708efa355483fc0ccddf7548b62682662bcc8 | [
"MIT"
] | null | null | null | import pytest
from assertpy import assert_that
import year2020.day19.reader as reader
import year2020.day19.solver as solver
@pytest.mark.parametrize('word', ['aab', 'aba'])
@pytest.mark.parametrize('word', ['abba', 'abbb', 'bab'])
@pytest.mark.solution
@pytest.mark.solution
| 27.083333 | 60 | 0.651077 | import pytest
from assertpy import assert_that
import year2020.day19.reader as reader
import year2020.day19.solver as solver
def test_example_a_validator():
rules = {0: [1, 2], 1: 'a', 2: [[1, 3], [3, 1]], 3: 'b'}
result = solver.create_validator(rules)
assert_that(result).is_equal_to(r'^a(ab|ba)$')
@pytest.mark.parametrize('word', ['aab', 'aba'])
def test_example_a_valid_words(word):
validator = r'^a(ab|ba)$'
assert_that(solver.is_valid(validator, word)).is_true()
@pytest.mark.parametrize('word', ['abba', 'abbb', 'bab'])
def test_example_a_invalid_words(word):
validator = r'^a(ab|ba)$'
assert_that(solver.is_valid(validator, word)).is_false()
def test_example_a():
rules = {0: [1, 2], 1: 'a', 2: [[1, 3], [3, 1]], 3: 'b'}
words = ['aab', 'aba', 'aabb']
result = solver.solve_a(rules, words)
assert_that(result).is_equal_to(2)
@pytest.mark.solution
def test_solution_a():
result = solver.solve_a(*reader.read())
assert_that(result).is_equal_to(162)
def test_example_b_1():
rules, words = reader.read('in_test')
result = solver.solve_b(rules, words)
assert_that(result).is_equal_to(3)
def test_example_b_2():
rules, words = reader.read('in_test')
rules[8] = [[42], [42, 8]]
rules[11] = [[42, 31], [42, 11, 31]]
result = solver.solve_b(rules, words)
assert_that(result).is_equal_to(12)
@pytest.mark.solution
def test_solution_b():
rules, words = reader.read()
rules[8] = [[42], [42, 8]]
rules[11] = [[42, 31], [42, 11, 31]]
result = solver.solve_b(rules, words)
assert_that(result).is_equal_to(267)
| 1,156 | 0 | 180 |
0739a5fbb5b0f2fc3abc14b0c2c4de9d190cb1ba | 1,950 | py | Python | jarviscli/plugins/advice_giver.py | WWFelina/Jarvis | 69c4dba3e4b86478221b3d401a1f9423434309eb | [
"MIT"
] | 2,605 | 2017-03-10T22:44:36.000Z | 2022-03-31T15:33:17.000Z | jarviscli/plugins/advice_giver.py | nikiboura/Jarvis | eb22f7c84a345e9ae5925b4b98adbc4f2e4a93f3 | [
"MIT"
] | 729 | 2017-03-11T00:06:46.000Z | 2022-03-31T22:04:44.000Z | jarviscli/plugins/advice_giver.py | nikiboura/Jarvis | eb22f7c84a345e9ae5925b4b98adbc4f2e4a93f3 | [
"MIT"
] | 1,181 | 2017-03-10T23:24:55.000Z | 2022-03-31T03:59:46.000Z | from plugin import plugin
import random
@plugin("give me advice")
| 33.050847 | 71 | 0.456923 | from plugin import plugin
import random
@plugin("give me advice")
def advice(jarvis, s):
answers = [
"No",
"Yes",
"You Can Do It!",
"I Cant Help You",
"Sorry To hear That, But You Must Forget :(",
"Keep It Up!",
"Nice",
"Dont Do It Ever Again",
"I Like It, Good Job",
"I Am Not Certain",
"Too Bad For You, Try To Find Something Else To Do And Enjoy",
"Time Will Pass And You Will Forget",
"Dont Do It",
"Do It",
"Never Ask Me About That Again",
"I Cant Give Advice Now I Am Sleepy",
"Sorry I Cant Hear This Language",
"Sorry But Your Question Does Not Make Sense"]
greetings = "#################################################\n" \
"# HELLO THERE! #\n" \
"# Ask Me Question And I Will Give You Advice #\n" \
"# I Am Limited So Pick First Which Fits Context #\n" \
"#################################################\n"
question = ""
acceptable = 0
while not acceptable:
question = input("Ask Me A Question : ")
questionTmp = question.strip()
if len(questionTmp) > 0:
if questionTmp[len(questionTmp) - 1] == '?':
acceptable = 1
while True:
randPos = random.randint(0, len(answers))
print(answers[randPos])
indicator = 0
while True:
desire = input("Was This In Context? (Y/N) : ")
if desire.strip().lower() == 'n':
print("Its A Pitty :( I'll Try Again!")
break
elif desire.strip().lower() == 'y':
indicator = 1
print("Good To hear! Happy To Advice You!")
break
else:
continue
if indicator == 1:
print("Good Bye!")
break
| 1,860 | 0 | 22 |
58e4130998026cd9f7479ec95eb66f4f446ae3e1 | 7,806 | py | Python | release/src-rt-6.x.4708/router/samba3/source4/scripting/python/samba/tests/__init__.py | zaion520/ATtomato | 4d48bb79f8d147f89a568cf18da9e0edc41f93fb | [
"FSFAP"
] | 2 | 2019-01-13T09:16:31.000Z | 2019-02-15T03:30:28.000Z | release/src-rt-6.x.4708/router/samba3/source4/scripting/python/samba/tests/__init__.py | zaion520/ATtomato | 4d48bb79f8d147f89a568cf18da9e0edc41f93fb | [
"FSFAP"
] | null | null | null | release/src-rt-6.x.4708/router/samba3/source4/scripting/python/samba/tests/__init__.py | zaion520/ATtomato | 4d48bb79f8d147f89a568cf18da9e0edc41f93fb | [
"FSFAP"
] | 2 | 2020-03-08T01:58:25.000Z | 2020-12-20T10:34:54.000Z | #!/usr/bin/env python
# Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2010
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Samba Python tests."""
import os
import ldb
import samba
import samba.auth
from samba import param
from samba.samdb import SamDB
import subprocess
import tempfile
# Other modules import these two classes from here, for convenience:
from testtools.testcase import (
TestCase as TesttoolsTestCase,
TestSkipped,
)
class TestCase(TesttoolsTestCase):
"""A Samba test case."""
class LdbTestCase(TesttoolsTestCase):
"""Trivial test case for running tests against a LDB."""
def set_modules(self, modules=[]):
"""Change the modules for this Ldb."""
m = ldb.Message()
m.dn = ldb.Dn(self.ldb, "@MODULES")
m["@LIST"] = ",".join(modules)
self.ldb.add(m)
self.ldb = samba.Ldb(self.filename)
def env_get_var_value(var_name):
"""Returns value for variable in os.environ
Function throws AssertionError if variable is defined.
Unit-test based python tests require certain input params
to be set in environment, otherwise they can't be run
"""
assert var_name in os.environ.keys(), "Please supply %s in environment" % var_name
return os.environ[var_name]
cmdline_credentials = None
class RpcInterfaceTestCase(TestCase):
"""DCE/RPC Test case."""
class BlackboxProcessError(subprocess.CalledProcessError):
"""This exception is raised when a process run by check_output() returns
a non-zero exit status. Exception instance should contain
the exact exit code (S.returncode), command line (S.cmd),
process output (S.stdout) and process error stream (S.stderr)"""
class BlackboxTestCase(TestCase):
"""Base test case for blackbox tests."""
def connect_samdb(samdb_url, lp=None, session_info=None, credentials=None,
flags=0, ldb_options=None, ldap_only=False):
"""Create SamDB instance and connects to samdb_url database.
:param samdb_url: Url for database to connect to.
:param lp: Optional loadparm object
:param session_info: Optional session information
:param credentials: Optional credentials, defaults to anonymous.
:param flags: Optional LDB flags
:param ldap_only: If set, only remote LDAP connection will be created.
Added value for tests is that we have a shorthand function
to make proper URL for ldb.connect() while using default
parameters for connection based on test environment
"""
samdb_url = samdb_url.lower()
if not "://" in samdb_url:
if not ldap_only and os.path.isfile(samdb_url):
samdb_url = "tdb://%s" % samdb_url
else:
samdb_url = "ldap://%s" % samdb_url
# use 'paged_search' module when connecting remotely
if samdb_url.startswith("ldap://"):
ldb_options = ["modules:paged_searches"]
elif ldap_only:
raise AssertionError("Trying to connect to %s while remote "
"connection is required" % samdb_url)
# set defaults for test environment
if lp is None:
lp = env_loadparm()
if session_info is None:
session_info = samba.auth.system_session(lp)
if credentials is None:
credentials = cmdline_credentials
return SamDB(url=samdb_url,
lp=lp,
session_info=session_info,
credentials=credentials,
flags=flags,
options=ldb_options)
def connect_samdb_ex(samdb_url, lp=None, session_info=None, credentials=None,
flags=0, ldb_options=None, ldap_only=False):
"""Connects to samdb_url database
:param samdb_url: Url for database to connect to.
:param lp: Optional loadparm object
:param session_info: Optional session information
:param credentials: Optional credentials, defaults to anonymous.
:param flags: Optional LDB flags
:param ldap_only: If set, only remote LDAP connection will be created.
:return: (sam_db_connection, rootDse_record) tuple
"""
sam_db = connect_samdb(samdb_url, lp, session_info, credentials,
flags, ldb_options, ldap_only)
# fetch RootDse
res = sam_db.search(base="", expression="", scope=ldb.SCOPE_BASE,
attrs=["*"])
return (sam_db, res[0])
| 34.087336 | 110 | 0.664233 | #!/usr/bin/env python
# Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2010
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Samba Python tests."""
import os
import ldb
import samba
import samba.auth
from samba import param
from samba.samdb import SamDB
import subprocess
import tempfile
# Other modules import these two classes from here, for convenience:
from testtools.testcase import (
TestCase as TesttoolsTestCase,
TestSkipped,
)
class TestCase(TesttoolsTestCase):
"""A Samba test case."""
def setUp(self):
super(TestCase, self).setUp()
test_debug_level = os.getenv("TEST_DEBUG_LEVEL")
if test_debug_level is not None:
test_debug_level = int(test_debug_level)
self._old_debug_level = samba.get_debug_level()
samba.set_debug_level(test_debug_level)
self.addCleanup(samba.set_debug_level, test_debug_level)
def get_loadparm(self):
return env_loadparm()
def get_credentials(self):
return cmdline_credentials
class LdbTestCase(TesttoolsTestCase):
"""Trivial test case for running tests against a LDB."""
def setUp(self):
super(LdbTestCase, self).setUp()
self.filename = os.tempnam()
self.ldb = samba.Ldb(self.filename)
def set_modules(self, modules=[]):
"""Change the modules for this Ldb."""
m = ldb.Message()
m.dn = ldb.Dn(self.ldb, "@MODULES")
m["@LIST"] = ",".join(modules)
self.ldb.add(m)
self.ldb = samba.Ldb(self.filename)
class TestCaseInTempDir(TestCase):
def setUp(self):
super(TestCaseInTempDir, self).setUp()
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
super(TestCaseInTempDir, self).tearDown()
self.assertEquals([], os.listdir(self.tempdir))
os.rmdir(self.tempdir)
def env_loadparm():
lp = param.LoadParm()
try:
lp.load(os.environ["SMB_CONF_PATH"])
except KeyError:
raise Exception("SMB_CONF_PATH not set")
return lp
def env_get_var_value(var_name):
"""Returns value for variable in os.environ
Function throws AssertionError if variable is defined.
Unit-test based python tests require certain input params
to be set in environment, otherwise they can't be run
"""
assert var_name in os.environ.keys(), "Please supply %s in environment" % var_name
return os.environ[var_name]
cmdline_credentials = None
class RpcInterfaceTestCase(TestCase):
"""DCE/RPC Test case."""
class ValidNetbiosNameTests(TestCase):
def test_valid(self):
self.assertTrue(samba.valid_netbios_name("FOO"))
def test_too_long(self):
self.assertFalse(samba.valid_netbios_name("FOO"*10))
def test_invalid_characters(self):
self.assertFalse(samba.valid_netbios_name("*BLA"))
class BlackboxProcessError(subprocess.CalledProcessError):
"""This exception is raised when a process run by check_output() returns
a non-zero exit status. Exception instance should contain
the exact exit code (S.returncode), command line (S.cmd),
process output (S.stdout) and process error stream (S.stderr)"""
def __init__(self, returncode, cmd, stdout, stderr):
super(BlackboxProcessError, self).__init__(returncode, cmd)
self.stdout = stdout
self.stderr = stderr
def __str__(self):
return "Command '%s'; exit status %d; stdout: '%s'; stderr: '%s'" % (self.cmd, self.returncode,
self.stdout, self.stderr)
class BlackboxTestCase(TestCase):
"""Base test case for blackbox tests."""
def _make_cmdline(self, line):
bindir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../../bin"))
parts = line.split(" ")
if os.path.exists(os.path.join(bindir, parts[0])):
parts[0] = os.path.join(bindir, parts[0])
line = " ".join(parts)
return line
def check_run(self, line):
line = self._make_cmdline(line)
subprocess.check_call(line, shell=True)
def check_output(self, line):
line = self._make_cmdline(line)
p = subprocess.Popen(line, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, close_fds=True)
retcode = p.wait()
if retcode:
raise BlackboxProcessError(retcode, line, p.stdout.read(), p.stderr.read())
return p.stdout.read()
def connect_samdb(samdb_url, lp=None, session_info=None, credentials=None,
flags=0, ldb_options=None, ldap_only=False):
"""Create SamDB instance and connects to samdb_url database.
:param samdb_url: Url for database to connect to.
:param lp: Optional loadparm object
:param session_info: Optional session information
:param credentials: Optional credentials, defaults to anonymous.
:param flags: Optional LDB flags
:param ldap_only: If set, only remote LDAP connection will be created.
Added value for tests is that we have a shorthand function
to make proper URL for ldb.connect() while using default
parameters for connection based on test environment
"""
samdb_url = samdb_url.lower()
if not "://" in samdb_url:
if not ldap_only and os.path.isfile(samdb_url):
samdb_url = "tdb://%s" % samdb_url
else:
samdb_url = "ldap://%s" % samdb_url
# use 'paged_search' module when connecting remotely
if samdb_url.startswith("ldap://"):
ldb_options = ["modules:paged_searches"]
elif ldap_only:
raise AssertionError("Trying to connect to %s while remote "
"connection is required" % samdb_url)
# set defaults for test environment
if lp is None:
lp = env_loadparm()
if session_info is None:
session_info = samba.auth.system_session(lp)
if credentials is None:
credentials = cmdline_credentials
return SamDB(url=samdb_url,
lp=lp,
session_info=session_info,
credentials=credentials,
flags=flags,
options=ldb_options)
def connect_samdb_ex(samdb_url, lp=None, session_info=None, credentials=None,
flags=0, ldb_options=None, ldap_only=False):
"""Connects to samdb_url database
:param samdb_url: Url for database to connect to.
:param lp: Optional loadparm object
:param session_info: Optional session information
:param credentials: Optional credentials, defaults to anonymous.
:param flags: Optional LDB flags
:param ldap_only: If set, only remote LDAP connection will be created.
:return: (sam_db_connection, rootDse_record) tuple
"""
sam_db = connect_samdb(samdb_url, lp, session_info, credentials,
flags, ldb_options, ldap_only)
# fetch RootDse
res = sam_db.search(base="", expression="", scope=ldb.SCOPE_BASE,
attrs=["*"])
return (sam_db, res[0])
def delete_force(samdb, dn):
try:
samdb.delete(dn)
except ldb.LdbError, (num, _):
assert(num == ldb.ERR_NO_SUCH_OBJECT)
| 2,327 | 30 | 468 |
4c79376da61280fc9b6f20a49be6405638e59f7d | 1,697 | py | Python | run_multiple.py | nifunk/GNNMushroomRL | d0d8eefdc10bca62e7cb536d65ea619607be755b | [
"MIT"
] | 1 | 2022-02-06T22:04:42.000Z | 2022-02-06T22:04:42.000Z | run_multiple.py | nifunk/GNNMushroomRL | d0d8eefdc10bca62e7cb536d65ea619607be755b | [
"MIT"
] | null | null | null | run_multiple.py | nifunk/GNNMushroomRL | d0d8eefdc10bca62e7cb536d65ea619607be755b | [
"MIT"
] | null | null | null | import subprocess
import os
import time
processes = []
processes.append(subprocess.Popen("python examples/boxes_2D_dqn__fill_multidim.py --save --name stack_s2v_ensemble_unlimited_neg_rew --model s2v --use-cuda", shell=True))
time.sleep(10)
processes.append(subprocess.Popen("python examples/boxes_2D_dqn__fill_multidim.py --save --name stack_mha_ensemble_unlimited_neg_rew --model mha --use-cuda", shell=True))
time.sleep(10)
processes.append(subprocess.Popen("python examples/boxes_2D_dqn__fill_multidim.py --save --name stack_mha_full_ensemble_unlimited_neg_rew --model mha_full --use-cuda", shell=True))
time.sleep(10)
processes.append(subprocess.Popen("python examples/boxes_2D_dqn__fill_multidim.py --save --name stack_s2v_ensemble_unlimited_neg_rew --model s2v --use-cuda", shell=True))
time.sleep(10)
processes.append(subprocess.Popen("python examples/boxes_2D_dqn__fill_multidim.py --save --name stack_mha_ensemble_unlimited_neg_rew --model mha --use-cuda", shell=True))
time.sleep(10)
processes.append(subprocess.Popen("python examples/boxes_2D_dqn__fill_multidim.py --save --name stack_mha_full_ensemble_unlimited_neg_rew --model mha_full --use-cuda", shell=True))
while (len(processes)>0):
removal_list = []
for i in range(len(processes)):
poll = processes[i].poll()
if poll is None:
time.sleep(60)
else:
removal_list.append(i)
time.sleep(60)
if (len(removal_list)!=0):
correcting_counter = 0
for i in range(len(removal_list)):
print ("PROCESS " + str(removal_list[i]) + " FINISHED")
processes.pop(removal_list[i]-correcting_counter)
correcting_counter += 1
| 49.911765 | 180 | 0.738362 | import subprocess
import os
import time
processes = []
processes.append(subprocess.Popen("python examples/boxes_2D_dqn__fill_multidim.py --save --name stack_s2v_ensemble_unlimited_neg_rew --model s2v --use-cuda", shell=True))
time.sleep(10)
processes.append(subprocess.Popen("python examples/boxes_2D_dqn__fill_multidim.py --save --name stack_mha_ensemble_unlimited_neg_rew --model mha --use-cuda", shell=True))
time.sleep(10)
processes.append(subprocess.Popen("python examples/boxes_2D_dqn__fill_multidim.py --save --name stack_mha_full_ensemble_unlimited_neg_rew --model mha_full --use-cuda", shell=True))
time.sleep(10)
processes.append(subprocess.Popen("python examples/boxes_2D_dqn__fill_multidim.py --save --name stack_s2v_ensemble_unlimited_neg_rew --model s2v --use-cuda", shell=True))
time.sleep(10)
processes.append(subprocess.Popen("python examples/boxes_2D_dqn__fill_multidim.py --save --name stack_mha_ensemble_unlimited_neg_rew --model mha --use-cuda", shell=True))
time.sleep(10)
processes.append(subprocess.Popen("python examples/boxes_2D_dqn__fill_multidim.py --save --name stack_mha_full_ensemble_unlimited_neg_rew --model mha_full --use-cuda", shell=True))
while (len(processes)>0):
removal_list = []
for i in range(len(processes)):
poll = processes[i].poll()
if poll is None:
time.sleep(60)
else:
removal_list.append(i)
time.sleep(60)
if (len(removal_list)!=0):
correcting_counter = 0
for i in range(len(removal_list)):
print ("PROCESS " + str(removal_list[i]) + " FINISHED")
processes.pop(removal_list[i]-correcting_counter)
correcting_counter += 1
| 0 | 0 | 0 |
cd175c3e011891593c4dd89e69708641f0c8af05 | 477 | py | Python | messenger/box.py | vinoth3v/In_addon_messenger | e1c5044d6ee4bfc2adbb1a81af16f7769b230c70 | [
"Apache-2.0"
] | 1 | 2015-12-16T03:25:39.000Z | 2015-12-16T03:25:39.000Z | messenger/box.py | vinoth3v/In_addon_messenger | e1c5044d6ee4bfc2adbb1a81af16f7769b230c70 | [
"Apache-2.0"
] | null | null | null | messenger/box.py | vinoth3v/In_addon_messenger | e1c5044d6ee4bfc2adbb1a81af16f7769b230c70 | [
"Apache-2.0"
] | 1 | 2019-09-13T10:12:46.000Z | 2019-09-13T10:12:46.000Z | from In.boxer.box import Box, BoxThemer
class BoxMessagesList(Box):
''''''
title = s('Messages')
@IN.register('BoxMessagesList', type = 'Themer')
| 17.035714 | 53 | 0.624738 | from In.boxer.box import Box, BoxThemer
class BoxMessagesList(Box):
''''''
title = s('Messages')
@IN.register('BoxMessagesList', type = 'Themer')
class BoxMessagesListThemer(BoxThemer):
def theme_items(self, obj, format, view_mode, args):
obj.css.append('i-overflow-container')
data = {
'lazy_args' : {
'load_args' : {
'data' : {
},
}
},
}
obj.add('MessageListLazy', data)
super().theme_items(obj, format, view_mode, args)
| 261 | 18 | 46 |
e010f3e22b693a181806e15cecdaf275f414e767 | 12,173 | py | Python | tests/e2e/rnn_rollout/test_deal_or_not.py | haojiepan1/CrossWOZ | 6d7b4c4cfb73a528b76074764687906abecc90b6 | [
"Apache-2.0"
] | 1 | 2020-03-09T02:09:10.000Z | 2020-03-09T02:09:10.000Z | tests/e2e/rnn_rollout/test_deal_or_not.py | haojiepan1/CrossWOZ | 6d7b4c4cfb73a528b76074764687906abecc90b6 | [
"Apache-2.0"
] | null | null | null | tests/e2e/rnn_rollout/test_deal_or_not.py | haojiepan1/CrossWOZ | 6d7b4c4cfb73a528b76074764687906abecc90b6 | [
"Apache-2.0"
] | null | null | null | import argparse
from convlab2.e2e.rnn_rollout.deal_or_not import DealornotAgent
from convlab2.e2e.rnn_rollout.deal_or_not.model import get_context_generator
from convlab2 import DealornotSession
import convlab2.e2e.rnn_rollout.utils as utils
import numpy as np
session_num = 20
# agent
alice_agent = DealornotAgent('Alice', rnn_model_args(), sel_model_args())
bob_agent = DealornotAgent('Bob', rnn_model_args(), sel_model_args())
agents = [alice_agent, bob_agent]
context_generator = get_context_generator(rnn_model_args().context_file)
# session
session = DealornotSession(alice_agent, bob_agent)
session_idx = 0
rewards = [[], []]
for ctxs in context_generator.iter():
print('session_idx', session_idx)
for agent, ctx, partner_ctx in zip(agents, ctxs, reversed(ctxs)):
agent.feed_context(ctx)
agent.feed_partner_context(partner_ctx)
last_observation = None
while True:
response = session.next_response(last_observation)
print('\t', ' '.join(response))
session_over = session.is_terminated()
if session_over:
break
last_observation = response
agree, [alice_r, bob_r] = session.get_rewards(ctxs)
print('session [{}] alice vs bos: {:.1f}/{:.1f}'.format(session_idx, alice_r, bob_r))
rewards[0].append(alice_r)
rewards[1].append(bob_r)
session.init_session()
session_idx += 1
# print(np.mean(rewards, axis=1))
| 52.69697 | 108 | 0.657356 | import argparse
from convlab2.e2e.rnn_rollout.deal_or_not import DealornotAgent
from convlab2.e2e.rnn_rollout.deal_or_not.model import get_context_generator
from convlab2 import DealornotSession
import convlab2.e2e.rnn_rollout.utils as utils
import numpy as np
session_num = 20
def rnn_model_args():
parser = argparse.ArgumentParser(description='selfplaying script')
parser.add_argument('--nembed_word', type=int, default=256,
help='size of word embeddings')
parser.add_argument('--nembed_ctx', type=int, default=64,
help='size of context embeddings')
parser.add_argument('--nhid_lang', type=int, default=128,
help='size of the hidden state for the language module')
parser.add_argument('--nhid_cluster', type=int, default=256,
help='size of the hidden state for the language module')
parser.add_argument('--nhid_ctx', type=int, default=64,
help='size of the hidden state for the context module')
parser.add_argument('--nhid_strat', type=int, default=64,
help='size of the hidden state for the strategy module')
parser.add_argument('--nhid_attn', type=int, default=64,
help='size of the hidden state for the attention module')
parser.add_argument('--nhid_sel', type=int, default=128,
help='size of the hidden state for the selection module')
parser.add_argument('--lr', type=float, default=0.001,
help='initial learning rate')
parser.add_argument('--min_lr', type=float, default=1e-07,
help='min threshold for learning rate annealing')
parser.add_argument('--decay_rate', type=float, default=5.0,
help='decrease learning rate by this factor')
parser.add_argument('--decay_every', type=int, default=1,
help='decrease learning rate after decay_every epochs')
parser.add_argument('--momentum', type=float, default=0.1,
help='momentum for sgd')
parser.add_argument('--clip', type=float, default=2.0,
help='gradient clipping')
parser.add_argument('--dropout', type=float, default=0.1,
help='dropout rate in embedding layer')
parser.add_argument('--init_range', type=float, default=0.2,
help='initialization range')
parser.add_argument('--max_epoch', type=int, default=30,
help='max number of epochs')
parser.add_argument('--num_clusters', type=int, default=50,
help='number of clusters')
parser.add_argument('--partner_ctx_weight', type=float, default=0.0,
help='selection weight')
parser.add_argument('--sel_weight', type=float, default=0.6,
help='selection weight')
parser.add_argument('--prediction_model_file', type=str, default='',
help='path to save the prediction model')
parser.add_argument('--cluster_model_file', type=str, default='',
help='path to save the cluster model')
parser.add_argument('--lang_model_file', type=str, default='',
help='path to save the language model')
parser.add_argument('--model_file', type=str,
help='model file (use algorithm/dataset/configs as root path)',
default="models/rnn_model_state_dict.th")
parser.add_argument('--alice_forward_model_file', type=str,
help='Alice forward model file')
parser.add_argument('--bob_model_file', type=str,
help='Bob model file')
parser.add_argument('--context_file', type=str, default='data/deal_or_not/selfplay.txt',
help='context file')
parser.add_argument('--temperature', type=float, default=1.0,
help='temperature')
parser.add_argument('--pred_temperature', type=float, default=1.0,
help='temperature')
parser.add_argument('--verbose', action='store_true', default=False,
help='print out converations')
parser.add_argument('--seed', type=int, default=1,
help='random seed')
parser.add_argument('--score_threshold', type=int, default=6,
help='successful dialog should have more than score_threshold in score')
parser.add_argument('--max_turns', type=int, default=20,
help='maximum number of turns in a dialog')
parser.add_argument('--log_file', type=str, default='',
help='log successful dialogs to file for training')
parser.add_argument('--smart_alice', action='store_true', default=False,
help='make Alice smart again')
parser.add_argument('--diverse_alice', action='store_true', default=False,
help='make Alice smart again')
parser.add_argument('--rollout_bsz', type=int, default=3,
help='rollout batch size')
parser.add_argument('--rollout_count_threshold', type=int, default=3,
help='rollout count threshold')
parser.add_argument('--smart_bob', action='store_true', default=False,
help='make Bob smart again')
parser.add_argument('--selection_model_file', type=str, default='models/selection_model.th',
help='path to save the final model')
parser.add_argument('--rollout_model_file', type=str, default='',
help='path to save the final model')
parser.add_argument('--diverse_bob', action='store_true', default=False,
help='make Alice smart again')
parser.add_argument('--ref_text', type=str,
help='file with the reference text')
parser.add_argument('--cuda', action='store_true', default=False,
help='use CUDA')
parser.add_argument('--domain', type=str, default='object_division',
help='domain for the dialogue')
parser.add_argument('--visual', action='store_true', default=False,
help='plot graphs')
parser.add_argument('--eps', type=float, default=0.0,
help='eps greedy')
parser.add_argument('--data', type=str, default='data/deal_or_not',
help='location of the data corpus (use project path root path)')
parser.add_argument('--unk_threshold', type=int, default=20,
help='minimum word frequency to be in dictionary')
parser.add_argument('--bsz', type=int, default=16,
help='batch size')
parser.add_argument('--validate', action='store_true', default=False,
help='plot graphs')
parser.add_argument('--sep_sel', action='store_true', default=True,
help='use separate classifiers for selection')
args = parser.parse_args()
return args
def sel_model_args():
parser = argparse.ArgumentParser(description='training script')
parser.add_argument('--data', type=str, default='data/negotiate',
help='location of the data corpus')
parser.add_argument('--nembed_word', type=int, default=128,
help='size of word embeddings')
parser.add_argument('--nembed_ctx', type=int, default=128,
help='size of context embeddings')
parser.add_argument('--nhid_lang', type=int, default=128,
help='size of the hidden state for the language module')
parser.add_argument('--nhid_cluster', type=int, default=256,
help='size of the hidden state for the language module')
parser.add_argument('--nhid_ctx', type=int, default=64,
help='size of the hidden state for the context module')
parser.add_argument('--nhid_strat', type=int, default=256,
help='size of the hidden state for the strategy module')
parser.add_argument('--nhid_attn', type=int, default=128,
help='size of the hidden state for the attention module')
parser.add_argument('--nhid_sel', type=int, default=128,
help='size of the hidden state for the selection module')
parser.add_argument('--lr', type=float, default=0.001,
help='initial learning rate')
parser.add_argument('--min_lr', type=float, default=1e-5,
help='min threshold for learning rate annealing')
parser.add_argument('--decay_rate', type=float, default=5.0,
help='decrease learning rate by this factor')
parser.add_argument('--decay_every', type=int, default=1,
help='decrease learning rate after decay_every epochs')
parser.add_argument('--momentum', type=float, default=0.1,
help='momentum for sgd')
parser.add_argument('--clip', type=float, default=0.2,
help='gradient clipping')
parser.add_argument('--dropout', type=float, default=0.1,
help='dropout rate in embedding layer')
parser.add_argument('--init_range', type=float, default=0.2,
help='initialization range')
parser.add_argument('--max_epoch', type=int, default=7,
help='max number of epochs')
parser.add_argument('--num_clusters', type=int, default=50,
help='number of clusters')
parser.add_argument('--bsz', type=int, default=25,
help='batch size')
parser.add_argument('--unk_threshold', type=int, default=20,
help='minimum word frequency to be in dictionary')
parser.add_argument('--temperature', type=float, default=0.1,
help='temperature')
parser.add_argument('--partner_ctx_weight', type=float, default=0.0,
help='selection weight')
parser.add_argument('--sel_weight', type=float, default=0.6,
help='selection weight')
parser.add_argument('--seed', type=int, default=1,
help='random seed')
parser.add_argument('--cuda', action='store_true', default=False,
help='use CUDA')
parser.add_argument('--model_file', type=str, default='',
help='path to save the final model')
parser.add_argument('--prediction_model_file', type=str, default='',
help='path to save the prediction model')
parser.add_argument('--selection_model_file', type=str, default='models/selection_model_state_dict.th',
help='path to save the selection model')
parser.add_argument('--cluster_model_file', type=str, default='',
help='path to save the cluster model')
parser.add_argument('--lang_model_file', type=str, default='',
help='path to save the language model')
parser.add_argument('--visual', action='store_true', default=False,
help='plot graphs')
parser.add_argument('--skip_values', action='store_true', default=True,
help='skip values in ctx encoder')
parser.add_argument('--model_type', type=str, default='selection_model',
help='model type')
parser.add_argument('--domain', type=str, default='object_division',
help='domain for the dialogue')
parser.add_argument('--clustering', action='store_true', default=False,
help='use clustering')
parser.add_argument('--sep_sel', action='store_true', default=True,
help='use separate classifiers for selection')
args = parser.parse_args()
return args
# agent
alice_agent = DealornotAgent('Alice', rnn_model_args(), sel_model_args())
bob_agent = DealornotAgent('Bob', rnn_model_args(), sel_model_args())
agents = [alice_agent, bob_agent]
context_generator = get_context_generator(rnn_model_args().context_file)
# session
session = DealornotSession(alice_agent, bob_agent)
session_idx = 0
rewards = [[], []]
for ctxs in context_generator.iter():
print('session_idx', session_idx)
for agent, ctx, partner_ctx in zip(agents, ctxs, reversed(ctxs)):
agent.feed_context(ctx)
agent.feed_partner_context(partner_ctx)
last_observation = None
while True:
response = session.next_response(last_observation)
print('\t', ' '.join(response))
session_over = session.is_terminated()
if session_over:
break
last_observation = response
agree, [alice_r, bob_r] = session.get_rewards(ctxs)
print('session [{}] alice vs bos: {:.1f}/{:.1f}'.format(session_idx, alice_r, bob_r))
rewards[0].append(alice_r)
rewards[1].append(bob_r)
session.init_session()
session_idx += 1
# print(np.mean(rewards, axis=1))
| 10,706 | 0 | 46 |
0a5572169ce1f38ec8fba4567eee421bbdb0a433 | 2,363 | py | Python | tests/threaded_server.py | golly-splorts/golly-pelican | b258551778d3d24cb8e1173ae08ee935a53437b2 | [
"MIT"
] | null | null | null | tests/threaded_server.py | golly-splorts/golly-pelican | b258551778d3d24cb8e1173ae08ee935a53437b2 | [
"MIT"
] | 11 | 2020-12-12T01:12:30.000Z | 2021-07-29T05:00:13.000Z | tests/threaded_server.py | golly-splorts/golly-pelican | b258551778d3d24cb8e1173ae08ee935a53437b2 | [
"MIT"
] | null | null | null | import json
import os
import threading
from http.server import BaseHTTPRequestHandler, HTTPServer
try:
HOST = os.environ["GOLLY_PELICAN_TEST_MOCKAPI_HOST"]
PORT = int(os.environ["GOLLY_PELICAN_TEST_MOCKAPI_PORT"])
except KeyError:
raise Exception(
"Error: you must define GOLLY_PELICAN_TEST_MOCKAPI_{HOST,PORT}. Try running source environment.test"
)
except ValueError:
raise Exception(
"Error: you must provide an integer for GOLLY_PELICAN_TEST_MOCKAPI_PORT. Try running source environment.test"
)
| 28.130952 | 117 | 0.637325 | import json
import os
import threading
from http.server import BaseHTTPRequestHandler, HTTPServer
try:
HOST = os.environ["GOLLY_PELICAN_TEST_MOCKAPI_HOST"]
PORT = int(os.environ["GOLLY_PELICAN_TEST_MOCKAPI_PORT"])
except KeyError:
raise Exception(
"Error: you must define GOLLY_PELICAN_TEST_MOCKAPI_{HOST,PORT}. Try running source environment.test"
)
except ValueError:
raise Exception(
"Error: you must provide an integer for GOLLY_PELICAN_TEST_MOCKAPI_PORT. Try running source environment.test"
)
class ThreadedServer(BaseHTTPRequestHandler):
_server = None
_thread = None
@staticmethod
def get_addr_port():
return HOST, PORT
@staticmethod
def get_base_url():
addr, port = ThreadedServer.get_addr_port()
base_url = f"http://{addr}:{port}"
return base_url
@classmethod
def start_serving(cls):
# Get the bind address and port
cls._addr, cls._port = cls.get_addr_port()
# Create an HTTP server
cls._server = HTTPServer((cls._addr, cls._port), cls)
# Create a thread to run the server
cls._thread = threading.Thread(target=cls._server.serve_forever)
# Start the server
cls._thread.start()
@classmethod
def stop_serving(cls):
# Shut down the server
if cls._server is not None:
cls._server.shutdown()
# Let the thread rejoin the worker pool
cls._thread.join(timeout=10)
assert not cls._thread.is_alive()
def _serialize(self, d):
return bytes(json.dumps(d), "utf-8")
def prq(self, path):
if path == "/ping":
return {"ping": "pong"}
def do_GET(self):
try:
response = self.prq(self.path)
self._set_headers()
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(serialize(response))
except Exception:
self.send_response(400)
def _set_headers(self):
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
def log_request(self, *args, **kwargs):
"""If this method is empty, it stops logging messages from being sent to the console"""
pass
| 1,296 | 502 | 23 |
836ea136d7486720db3c15a1e1b1688ce5bb7662 | 1,328 | py | Python | Python/Simulation/Numerical_Methods/test_secant_root_solve.py | MattMarti/Lambda-Trajectory-Sim | 4155f103120bd49221776cc3b825b104f36817f2 | [
"MIT"
] | null | null | null | Python/Simulation/Numerical_Methods/test_secant_root_solve.py | MattMarti/Lambda-Trajectory-Sim | 4155f103120bd49221776cc3b825b104f36817f2 | [
"MIT"
] | null | null | null | Python/Simulation/Numerical_Methods/test_secant_root_solve.py | MattMarti/Lambda-Trajectory-Sim | 4155f103120bd49221776cc3b825b104f36817f2 | [
"MIT"
] | null | null | null | import unittest;
import math;
from secant_root_solve import secant_root_solve;
class Test_secant_root_solve(unittest.TestCase):
'''
Test_secantrootsolve.m
Test case for the Secant Root Solver function. Based on the solution to
Problem 2 of Homework 1 f AOE 4404 Numerical Methods
Use Graphical technique, bisection method, false-position, fixed-point
iteration, Netwon method, and secant method to find the first root of
f(x) = x*exp(x) - cos(x)
@author: Matt Marti
@date: 2019-06-16
'''
def test_only(self):
'''Only test needed'''
# Define function
f = lambda x : math.cos(x) - x*math.exp(x);
# Parameters
a = 0; # Lower bound
b = 1; # Upper bound
errstop = 1e-12; # Stopping criteria
maxiter = 1000;
# Function call
x, niter, erra = secant_root_solve(f, a, b, maxiter, errstop);
# Check results
self.assertLess(abs(f(x)), errstop, \
'Results error not less than specified error');
self.assertLess(abs(erra), errstop, \
'Results error not less than specified error');
self.assertLess(niter, maxiter, \
'Took too many iterations, function could be bugged');
#
#
# | 30.181818 | 75 | 0.595633 | import unittest;
import math;
from secant_root_solve import secant_root_solve;
class Test_secant_root_solve(unittest.TestCase):
'''
Test_secantrootsolve.m
Test case for the Secant Root Solver function. Based on the solution to
Problem 2 of Homework 1 f AOE 4404 Numerical Methods
Use Graphical technique, bisection method, false-position, fixed-point
iteration, Netwon method, and secant method to find the first root of
f(x) = x*exp(x) - cos(x)
@author: Matt Marti
@date: 2019-06-16
'''
def test_only(self):
'''Only test needed'''
# Define function
f = lambda x : math.cos(x) - x*math.exp(x);
# Parameters
a = 0; # Lower bound
b = 1; # Upper bound
errstop = 1e-12; # Stopping criteria
maxiter = 1000;
# Function call
x, niter, erra = secant_root_solve(f, a, b, maxiter, errstop);
# Check results
self.assertLess(abs(f(x)), errstop, \
'Results error not less than specified error');
self.assertLess(abs(erra), errstop, \
'Results error not less than specified error');
self.assertLess(niter, maxiter, \
'Took too many iterations, function could be bugged');
#
#
# | 0 | 0 | 0 |
2822961c69b06aac537a9f55a900e01ef0741ec0 | 1,675 | py | Python | espider/espider/__init__.py | MeteorsHub/espider | 28701083c6881a8f32b87a29c0a647fb81e2e107 | [
"MIT"
] | 1 | 2018-01-17T05:44:32.000Z | 2018-01-17T05:44:32.000Z | espider/espider/__init__.py | MeteorKepler/espider | 28701083c6881a8f32b87a29c0a647fb81e2e107 | [
"MIT"
] | null | null | null | espider/espider/__init__.py | MeteorKepler/espider | 28701083c6881a8f32b87a29c0a647fb81e2e107 | [
"MIT"
] | 1 | 2019-11-12T19:42:16.000Z | 2019-11-12T19:42:16.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
espider.*
------------------------------------------------------------
Package espider is a simply constructed web crawling and scrabing framework that is easy to use.
This package includes modules mentioned below:
|name |description |
|:-------------:|:---------------------------------------------------------------------------|
|spider |Scribe web sources automatically and save original sources |
|parser |Parse the sources that are scribed by spider |
|httphandler |Manipulate module that communicate with web server |
|proxy |A proxy handler provides Internet connection |
|selephan |Use selenium and phantomjs to load website instantly just like a browser do |
|mysql |Provide mysql service while saving data |
|log |Support configurable console and file logging |
|util |Including some useful functions the project need |
|config |Loading configuration from both config_default and config_override |
|config_default |Define default settings. You should always change configs in config_override|
You can refer to README.md for further instruction.
:Copyright (c) 2016 MeteorKepler
:license: MIT, see LICENSE for more details.
"""
__author__ = 'MeterKepler'
__version__ = '0.1.3'
| 47.857143 | 101 | 0.506866 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
espider.*
------------------------------------------------------------
Package espider is a simply constructed web crawling and scrabing framework that is easy to use.
This package includes modules mentioned below:
|name |description |
|:-------------:|:---------------------------------------------------------------------------|
|spider |Scribe web sources automatically and save original sources |
|parser |Parse the sources that are scribed by spider |
|httphandler |Manipulate module that communicate with web server |
|proxy |A proxy handler provides Internet connection |
|selephan |Use selenium and phantomjs to load website instantly just like a browser do |
|mysql |Provide mysql service while saving data |
|log |Support configurable console and file logging |
|util |Including some useful functions the project need |
|config |Loading configuration from both config_default and config_override |
|config_default |Define default settings. You should always change configs in config_override|
You can refer to README.md for further instruction.
:Copyright (c) 2016 MeteorKepler
:license: MIT, see LICENSE for more details.
"""
__author__ = 'MeterKepler'
__version__ = '0.1.3'
| 0 | 0 | 0 |
22a495ec55dab73c42eb53fe09b0deb99436f82d | 5,565 | py | Python | example_project/organizations/tests/test_backends.py | st8st8/django-guardian | dd51ac3f8dd211cc3bf8d66536340aa39e360f23 | [
"MIT"
] | 1 | 2017-09-06T08:19:18.000Z | 2017-09-06T08:19:18.000Z | example_project/organizations/tests/test_backends.py | st8st8/django-guardian | dd51ac3f8dd211cc3bf8d66536340aa39e360f23 | [
"MIT"
] | null | null | null | example_project/organizations/tests/test_backends.py | st8st8/django-guardian | dd51ac3f8dd211cc3bf8d66536340aa39e360f23 | [
"MIT"
] | null | null | null | from django.core import mail
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.http import Http404, QueryDict
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from organizations.tests.utils import request_factory_login
from organizations.backends.defaults import (BaseBackend, InvitationBackend,
RegistrationBackend)
from organizations.backends.tokens import RegistrationTokenGenerator
@override_settings(USE_TZ=True)
@override_settings(USE_TZ=True)
@override_settings(USE_TZ=True)
| 39.75 | 84 | 0.695597 | from django.core import mail
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.http import Http404, QueryDict
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from organizations.tests.utils import request_factory_login
from organizations.backends.defaults import (BaseBackend, InvitationBackend,
RegistrationBackend)
from organizations.backends.tokens import RegistrationTokenGenerator
@override_settings(USE_TZ=True)
class BaseTests(TestCase):
def test_generate_username(self):
self.assertTrue(BaseBackend().get_username())
@override_settings(USE_TZ=True)
class InvitationTests(TestCase):
fixtures = ['users.json', 'orgs.json']
def setUp(self):
mail.outbox = []
self.factory = RequestFactory()
self.tokenizer = RegistrationTokenGenerator()
self.user = User.objects.get(username="krist")
self.pending_user = User.objects.create_user(username="theresa",
email="t@example.com", password="test")
self.pending_user.is_active = False
self.pending_user.save()
def test_backend_definition(self):
from organizations.backends import invitation_backend
self.assertTrue(isinstance(invitation_backend(), InvitationBackend))
def test_create_user(self):
invited = InvitationBackend().invite_by_email("sedgewick@example.com")
self.assertTrue(isinstance(invited, User))
self.assertFalse(invited.is_active)
self.assertEqual(1, len(mail.outbox))
mail.outbox = []
def test_create_existing_user(self):
invited = InvitationBackend().invite_by_email(self.user.email)
self.assertEqual(self.user, invited)
self.assertEqual(0, len(mail.outbox)) # User is active
def test_send_reminder(self):
InvitationBackend().send_reminder(self.pending_user)
self.assertEqual(1, len(mail.outbox))
InvitationBackend().send_reminder(self.user)
self.assertEqual(1, len(mail.outbox)) # User is active
mail.outbox = []
def test_urls(self):
"""Ensure no error is raised"""
reverse('invitations_register', kwargs={
'user_id': self.pending_user.id,
'token': self.tokenizer.make_token(self.pending_user)})
def test_activate_user(self):
request = self.factory.request()
with self.assertRaises(Http404):
InvitationBackend().activate_view(request, self.user.id,
self.tokenizer.make_token(self.user))
self.assertEqual(200, InvitationBackend().activate_view(request,
self.pending_user.id,
self.tokenizer.make_token(self.pending_user)).status_code)
@override_settings(USE_TZ=True)
class RegistrationTests(TestCase):
fixtures = ['users.json', 'orgs.json']
def setUp(self):
mail.outbox = []
self.factory = RequestFactory()
self.tokenizer = RegistrationTokenGenerator()
self.user = User.objects.get(username="krist")
self.pending_user = User.objects.create_user(username="theresa",
email="t@example.com", password="test")
self.pending_user.is_active = False
self.pending_user.save()
def test_backend_definition(self):
from organizations.backends import registration_backend
self.assertTrue(isinstance(registration_backend(), RegistrationBackend))
def test_register_authenticated(self):
"""Ensure an already authenticated user is redirected"""
backend = RegistrationBackend()
request = request_factory_login(self.factory, self.user)
self.assertEqual(302, backend.create_view(request).status_code)
def test_register_existing(self):
"""Ensure that an existing user is redirected to login"""
backend = RegistrationBackend()
request = request_factory_login(self.factory)
request.POST = QueryDict("name=Mudhoney&slug=mudhoney&email=dave@foo.com")
self.assertEqual(302, backend.create_view(request).status_code)
def test_create_user(self):
registered = RegistrationBackend().register_by_email("greenway@example.com")
self.assertTrue(isinstance(registered, User))
self.assertFalse(registered.is_active)
self.assertEqual(1, len(mail.outbox))
mail.outbox = []
def test_create_existing_user(self):
registered = RegistrationBackend().register_by_email(self.user.email)
self.assertEqual(self.user, registered)
self.assertEqual(0, len(mail.outbox)) # User is active
def test_send_reminder(self):
RegistrationBackend().send_reminder(self.pending_user)
self.assertEqual(1, len(mail.outbox))
RegistrationBackend().send_reminder(self.user)
self.assertEqual(1, len(mail.outbox)) # User is active
mail.outbox = []
def test_urls(self):
reverse('registration_register', kwargs={
'user_id': self.pending_user.id,
'token': self.tokenizer.make_token(self.pending_user)})
def test_activate_user(self):
request = self.factory.request()
with self.assertRaises(Http404):
RegistrationBackend().activate_view(request, self.user.id,
self.tokenizer.make_token(self.user))
self.assertEqual(200, RegistrationBackend().activate_view(request,
self.pending_user.id,
self.tokenizer.make_token(self.pending_user)).status_code)
| 3,510 | 1,336 | 93 |
f365548c3fbdbd12a228a97d3a485f60bf1f2fa7 | 9,495 | py | Python | src/hde_embedding.py | mmyros/hdestimator | 8a6da9ef513a3bd1ba0e8bbc1a46a2beb4fee69b | [
"BSD-3-Clause"
] | 1 | 2022-03-25T21:56:53.000Z | 2022-03-25T21:56:53.000Z | src/hde_embedding.py | Priesemann-Group/historydependence | e1adc5eea8cb05cc686bfda0b979244b34d63bb4 | [
"BSD-3-Clause"
] | null | null | null | src/hde_embedding.py | Priesemann-Group/historydependence | e1adc5eea8cb05cc686bfda0b979244b34d63bb4 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from scipy.optimize import newton
from collections import Counter
from sys import stderr, exit
FAST_EMBEDDING_AVAILABLE = True
try:
import hde_fast_embedding as fast_emb
except:
FAST_EMBEDDING_AVAILABLE = False
print("""
Error importing Cython fast embedding module. Continuing with slow Python implementation.\n
This may take a long time.\n
""", file=stderr, flush=True)
def get_set_of_scalings(past_range_T,
number_of_bins_d,
number_of_scalings,
min_first_bin_size,
min_step_for_scaling):
"""
Get scaling exponents such that the uniform embedding as well as
the embedding for which the first bin has a length of
min_first_bin_size (in seconds), as well as linearly spaced
scaling factors in between, such that in total
number_of_scalings scalings are obtained.
"""
min_scaling = 0
if past_range_T / number_of_bins_d <= min_first_bin_size or number_of_bins_d == 1:
max_scaling = 0
else:
# for the initial guess assume the largest bin dominates, so k is approx. log(T) / d
max_scaling = newton(lambda scaling: get_past_range(number_of_bins_d,
min_first_bin_size,
scaling)
- past_range_T,
np.log10(past_range_T
/ min_first_bin_size) / (number_of_bins_d - 1),
tol = 1e-04, maxiter = 500)
while np.linspace(min_scaling, max_scaling,
number_of_scalings, retstep = True)[1] < min_step_for_scaling:
number_of_scalings -= 1
return np.linspace(min_scaling, max_scaling, number_of_scalings)
def get_embeddings(embedding_past_range_set,
embedding_number_of_bins_set,
embedding_scaling_exponent_set):
"""
Get all combinations of parameters T, d, k, based on the
sets of selected parameters.
"""
embeddings = []
for past_range_T in embedding_past_range_set:
for number_of_bins_d in embedding_number_of_bins_set:
if not isinstance(number_of_bins_d, int) or number_of_bins_d < 1:
print("Error: numer of bins {} is not a positive integer. Skipping.".format(number_of_bins_d),
file=stderr, flush=True)
continue
if type(embedding_scaling_exponent_set) == dict:
scaling_set_given_T_and_d = get_set_of_scalings(past_range_T,
number_of_bins_d,
**embedding_scaling_exponent_set)
else:
scaling_set_given_T_and_d = embedding_scaling_exponent_set
for scaling_k in scaling_set_given_T_and_d:
embeddings += [(past_range_T, number_of_bins_d, scaling_k)]
return embeddings
def get_fist_bin_size_for_embedding(embedding):
"""
Get size of first bin for the embedding, based on the parameters
T, d and k.
"""
past_range_T, number_of_bins_d, scaling_k = embedding
return newton(lambda first_bin_size: get_past_range(number_of_bins_d,
first_bin_size,
scaling_k) - past_range_T,
0.005, tol = 1e-03, maxiter = 100)
def get_past_range(number_of_bins_d, first_bin_size, scaling_k):
"""
Get the past range T of the embedding, based on the parameters d, tau_1 and k.
"""
return np.sum([first_bin_size * 10**((number_of_bins_d - i) * scaling_k)
for i in range(1, number_of_bins_d + 1)])
def get_window_delimiters(number_of_bins_d, scaling_k, first_bin_size, embedding_step_size):
"""
Get delimiters of the window, used to describe the embedding. The
window includes both the past embedding and the response.
The delimiters are times, relative to the first bin, that separate
two consequent bins.
"""
bin_sizes = [first_bin_size * 10**((number_of_bins_d - i) * scaling_k)
for i in range(1, number_of_bins_d + 1)]
window_delimiters = [sum([bin_sizes[j] for j in range(i)])
for i in range(1, number_of_bins_d + 1)]
window_delimiters.append(window_delimiters[number_of_bins_d - 1] + embedding_step_size)
return window_delimiters
def get_median_number_of_spikes_per_bin(raw_symbols):
"""
Given raw symbols (in which the number of spikes per bin are counted,
ie not necessarily binary quantity), get the median number of spikes
for each bin, among all symbols obtained by the embedding.
"""
# number_of_bins here is number_of_bins_d + 1,
# as it here includes not only the bins of the embedding but also the response
number_of_bins = len(raw_symbols[0])
spike_counts_per_bin = [[] for i in range(number_of_bins)]
for raw_symbol in raw_symbols:
for i in range(number_of_bins):
spike_counts_per_bin[i] += [raw_symbol[i]]
return [np.median(spike_counts_per_bin[i]) for i in range(number_of_bins)]
def symbol_binary_to_array(symbol_binary, number_of_bins_d):
"""
Given a binary representation of a symbol (cf symbol_array_to_binary),
convert it back into its array-representation.
"""
# assert 2 ** number_of_bins_d > symbol_binary
spikes_in_window = np.zeros(number_of_bins_d)
for i in range(0, number_of_bins_d):
b = 2 ** (number_of_bins_d - 1 - i)
if b <= symbol_binary:
spikes_in_window[i] = 1
symbol_binary -= b
return spikes_in_window
def symbol_array_to_binary(spikes_in_window, number_of_bins_d):
"""
Given an array of 1s and 0s, representing spikes and the absence
thereof, read the array as a binary number to obtain a
(base 10) integer.
"""
# assert len(spikes_in_window) == number_of_bins_d
# TODO check if it makes sense to use len(spikes_in_window)
# directly, to avoid mismatch as well as confusion
# as number_of_bins_d here can also be number_of_bins
# as in get_median_number_of_spikes_per_bin, ie
# including the response
return sum([2 ** (number_of_bins_d - i - 1) * spikes_in_window[i]
for i in range(0, number_of_bins_d)])
def get_raw_symbols(spike_times,
embedding,
first_bin_size,
embedding_step_size):
"""
Get the raw symbols (in which the number of spikes per bin are counted,
ie not necessarily binary quantity), as obtained by applying the
embedding.
"""
past_range_T, number_of_bins_d, scaling_k = embedding
# the window is the embedding plus the response,
# ie the embedding and one additional bin of size embedding_step_size
window_delimiters = get_window_delimiters(number_of_bins_d,
scaling_k,
first_bin_size,
embedding_step_size)
window_length = window_delimiters[-1]
num_spike_times = len(spike_times)
last_spike_time = spike_times[-1]
raw_symbols = []
spike_index_lo = 0
# for time in np.arange(0, int(last_spike_time - window_length), embedding_step_size):
for time in np.arange(0, last_spike_time - window_length, embedding_step_size):
while(spike_index_lo < num_spike_times and spike_times[spike_index_lo] < time):
spike_index_lo += 1
spike_index_hi = spike_index_lo
while(spike_index_hi < num_spike_times and
spike_times[spike_index_hi] < time + window_length):
spike_index_hi += 1
spikes_in_window = np.zeros(number_of_bins_d + 1)
embedding_bin_index = 0
for spike_index in range(spike_index_lo, spike_index_hi):
while(spike_times[spike_index] > time + window_delimiters[embedding_bin_index]):
embedding_bin_index += 1
spikes_in_window[embedding_bin_index] += 1
raw_symbols += [spikes_in_window]
return raw_symbols
def get_symbol_counts(spike_times, embedding, embedding_step_size):
"""
Apply embedding to the spike times to obtain the symbol counts.
"""
if FAST_EMBEDDING_AVAILABLE:
return Counter(fast_emb.get_symbol_counts(spike_times, embedding, embedding_step_size))
past_range_T, number_of_bins_d, scaling_k = embedding
first_bin_size = get_fist_bin_size_for_embedding(embedding)
raw_symbols = get_raw_symbols(spike_times,
embedding,
first_bin_size,
embedding_step_size)
median_number_of_spikes_per_bin = get_median_number_of_spikes_per_bin(raw_symbols)
symbol_counts = Counter()
for raw_symbol in raw_symbols:
symbol_array = [int(raw_symbol[i] > median_number_of_spikes_per_bin[i])
for i in range(number_of_bins_d + 1)]
symbol = symbol_array_to_binary(symbol_array, number_of_bins_d + 1)
symbol_counts[symbol] += 1
return symbol_counts
| 39.074074 | 110 | 0.632965 | import numpy as np
from scipy.optimize import newton
from collections import Counter
from sys import stderr, exit
FAST_EMBEDDING_AVAILABLE = True
try:
import hde_fast_embedding as fast_emb
except:
FAST_EMBEDDING_AVAILABLE = False
print("""
Error importing Cython fast embedding module. Continuing with slow Python implementation.\n
This may take a long time.\n
""", file=stderr, flush=True)
def get_set_of_scalings(past_range_T,
number_of_bins_d,
number_of_scalings,
min_first_bin_size,
min_step_for_scaling):
"""
Get scaling exponents such that the uniform embedding as well as
the embedding for which the first bin has a length of
min_first_bin_size (in seconds), as well as linearly spaced
scaling factors in between, such that in total
number_of_scalings scalings are obtained.
"""
min_scaling = 0
if past_range_T / number_of_bins_d <= min_first_bin_size or number_of_bins_d == 1:
max_scaling = 0
else:
# for the initial guess assume the largest bin dominates, so k is approx. log(T) / d
max_scaling = newton(lambda scaling: get_past_range(number_of_bins_d,
min_first_bin_size,
scaling)
- past_range_T,
np.log10(past_range_T
/ min_first_bin_size) / (number_of_bins_d - 1),
tol = 1e-04, maxiter = 500)
while np.linspace(min_scaling, max_scaling,
number_of_scalings, retstep = True)[1] < min_step_for_scaling:
number_of_scalings -= 1
return np.linspace(min_scaling, max_scaling, number_of_scalings)
def get_embeddings(embedding_past_range_set,
embedding_number_of_bins_set,
embedding_scaling_exponent_set):
"""
Get all combinations of parameters T, d, k, based on the
sets of selected parameters.
"""
embeddings = []
for past_range_T in embedding_past_range_set:
for number_of_bins_d in embedding_number_of_bins_set:
if not isinstance(number_of_bins_d, int) or number_of_bins_d < 1:
print("Error: numer of bins {} is not a positive integer. Skipping.".format(number_of_bins_d),
file=stderr, flush=True)
continue
if type(embedding_scaling_exponent_set) == dict:
scaling_set_given_T_and_d = get_set_of_scalings(past_range_T,
number_of_bins_d,
**embedding_scaling_exponent_set)
else:
scaling_set_given_T_and_d = embedding_scaling_exponent_set
for scaling_k in scaling_set_given_T_and_d:
embeddings += [(past_range_T, number_of_bins_d, scaling_k)]
return embeddings
def get_fist_bin_size_for_embedding(embedding):
"""
Get size of first bin for the embedding, based on the parameters
T, d and k.
"""
past_range_T, number_of_bins_d, scaling_k = embedding
return newton(lambda first_bin_size: get_past_range(number_of_bins_d,
first_bin_size,
scaling_k) - past_range_T,
0.005, tol = 1e-03, maxiter = 100)
def get_past_range(number_of_bins_d, first_bin_size, scaling_k):
"""
Get the past range T of the embedding, based on the parameters d, tau_1 and k.
"""
return np.sum([first_bin_size * 10**((number_of_bins_d - i) * scaling_k)
for i in range(1, number_of_bins_d + 1)])
def get_window_delimiters(number_of_bins_d, scaling_k, first_bin_size, embedding_step_size):
"""
Get delimiters of the window, used to describe the embedding. The
window includes both the past embedding and the response.
The delimiters are times, relative to the first bin, that separate
two consequent bins.
"""
bin_sizes = [first_bin_size * 10**((number_of_bins_d - i) * scaling_k)
for i in range(1, number_of_bins_d + 1)]
window_delimiters = [sum([bin_sizes[j] for j in range(i)])
for i in range(1, number_of_bins_d + 1)]
window_delimiters.append(window_delimiters[number_of_bins_d - 1] + embedding_step_size)
return window_delimiters
def get_median_number_of_spikes_per_bin(raw_symbols):
"""
Given raw symbols (in which the number of spikes per bin are counted,
ie not necessarily binary quantity), get the median number of spikes
for each bin, among all symbols obtained by the embedding.
"""
# number_of_bins here is number_of_bins_d + 1,
# as it here includes not only the bins of the embedding but also the response
number_of_bins = len(raw_symbols[0])
spike_counts_per_bin = [[] for i in range(number_of_bins)]
for raw_symbol in raw_symbols:
for i in range(number_of_bins):
spike_counts_per_bin[i] += [raw_symbol[i]]
return [np.median(spike_counts_per_bin[i]) for i in range(number_of_bins)]
def symbol_binary_to_array(symbol_binary, number_of_bins_d):
"""
Given a binary representation of a symbol (cf symbol_array_to_binary),
convert it back into its array-representation.
"""
# assert 2 ** number_of_bins_d > symbol_binary
spikes_in_window = np.zeros(number_of_bins_d)
for i in range(0, number_of_bins_d):
b = 2 ** (number_of_bins_d - 1 - i)
if b <= symbol_binary:
spikes_in_window[i] = 1
symbol_binary -= b
return spikes_in_window
def symbol_array_to_binary(spikes_in_window, number_of_bins_d):
"""
Given an array of 1s and 0s, representing spikes and the absence
thereof, read the array as a binary number to obtain a
(base 10) integer.
"""
# assert len(spikes_in_window) == number_of_bins_d
# TODO check if it makes sense to use len(spikes_in_window)
# directly, to avoid mismatch as well as confusion
# as number_of_bins_d here can also be number_of_bins
# as in get_median_number_of_spikes_per_bin, ie
# including the response
return sum([2 ** (number_of_bins_d - i - 1) * spikes_in_window[i]
for i in range(0, number_of_bins_d)])
def get_raw_symbols(spike_times,
embedding,
first_bin_size,
embedding_step_size):
"""
Get the raw symbols (in which the number of spikes per bin are counted,
ie not necessarily binary quantity), as obtained by applying the
embedding.
"""
past_range_T, number_of_bins_d, scaling_k = embedding
# the window is the embedding plus the response,
# ie the embedding and one additional bin of size embedding_step_size
window_delimiters = get_window_delimiters(number_of_bins_d,
scaling_k,
first_bin_size,
embedding_step_size)
window_length = window_delimiters[-1]
num_spike_times = len(spike_times)
last_spike_time = spike_times[-1]
raw_symbols = []
spike_index_lo = 0
# for time in np.arange(0, int(last_spike_time - window_length), embedding_step_size):
for time in np.arange(0, last_spike_time - window_length, embedding_step_size):
while(spike_index_lo < num_spike_times and spike_times[spike_index_lo] < time):
spike_index_lo += 1
spike_index_hi = spike_index_lo
while(spike_index_hi < num_spike_times and
spike_times[spike_index_hi] < time + window_length):
spike_index_hi += 1
spikes_in_window = np.zeros(number_of_bins_d + 1)
embedding_bin_index = 0
for spike_index in range(spike_index_lo, spike_index_hi):
while(spike_times[spike_index] > time + window_delimiters[embedding_bin_index]):
embedding_bin_index += 1
spikes_in_window[embedding_bin_index] += 1
raw_symbols += [spikes_in_window]
return raw_symbols
def get_symbol_counts(spike_times, embedding, embedding_step_size):
"""
Apply embedding to the spike times to obtain the symbol counts.
"""
if FAST_EMBEDDING_AVAILABLE:
return Counter(fast_emb.get_symbol_counts(spike_times, embedding, embedding_step_size))
past_range_T, number_of_bins_d, scaling_k = embedding
first_bin_size = get_fist_bin_size_for_embedding(embedding)
raw_symbols = get_raw_symbols(spike_times,
embedding,
first_bin_size,
embedding_step_size)
median_number_of_spikes_per_bin = get_median_number_of_spikes_per_bin(raw_symbols)
symbol_counts = Counter()
for raw_symbol in raw_symbols:
symbol_array = [int(raw_symbol[i] > median_number_of_spikes_per_bin[i])
for i in range(number_of_bins_d + 1)]
symbol = symbol_array_to_binary(symbol_array, number_of_bins_d + 1)
symbol_counts[symbol] += 1
return symbol_counts
| 0 | 0 | 0 |
52a1ac56f1cbfd032e3cfa9dda9ff6117b366817 | 1,761 | py | Python | unit_tests/test_cloud_list.py | hep-gc/cloud-scheduler-2 | 180d9dc4f8751cf8c8254518e46f83f118187e84 | [
"Apache-2.0"
] | 3 | 2020-03-03T03:25:36.000Z | 2021-12-03T15:31:39.000Z | unit_tests/test_cloud_list.py | hep-gc/cloud-scheduler-2 | 180d9dc4f8751cf8c8254518e46f83f118187e84 | [
"Apache-2.0"
] | 341 | 2017-06-08T17:27:59.000Z | 2022-01-28T19:37:57.000Z | unit_tests/test_cloud_list.py | hep-gc/cloud-scheduler-2 | 180d9dc4f8751cf8c8254518e46f83f118187e84 | [
"Apache-2.0"
] | 3 | 2018-04-25T16:13:20.000Z | 2020-04-15T20:03:46.000Z | from unit_test_common import execute_csv2_request, initialize_csv2_request, ut_id, sanity_requests
from sys import argv
# lno: CV - error code identifier.
if __name__ == "__main__":
main(None)
| 32.611111 | 124 | 0.585463 | from unit_test_common import execute_csv2_request, initialize_csv2_request, ut_id, sanity_requests
from sys import argv
# lno: CV - error code identifier.
def main(gvar):
if not gvar:
gvar = {}
if len(argv) > 1:
initialize_csv2_request(gvar, selections=argv[1])
else:
initialize_csv2_request(gvar)
# 01 - 05
sanity_requests(gvar, '/cloud/list', ut_id(gvar, 'ctg1'), ut_id(gvar, 'ctu1'), ut_id(gvar, 'ctg2'), ut_id(gvar, 'ctu2'))
# 06
execute_csv2_request(
gvar, 0, None, None,
'/cloud/list/', group=ut_id(gvar, 'ctg1'),
expected_list='cloud_list', list_filter={'group_name': ut_id(gvar, 'ctg1'), 'cloud_name': ut_id(gvar, 'ctc2')},
values={
'authurl': gvar['cloud_credentials']['authurl'],
'username': gvar['cloud_credentials']['username'],
'project': gvar['cloud_credentials']['project'],
'region': gvar['cloud_credentials']['region'],
'cloud_type': 'openstack',
'cloud_priority': 0,
'cacertificate': None,
'user_domain_name': 'Default',
'project_domain_name': 'Default',
},
server_user=ut_id(gvar, 'ctu1')
)
# 07
execute_csv2_request(
gvar, 1, 'CV', 'request contained a bad parameter "invalid-unit-test".',
'/cloud/list/', group=(ut_id(gvar, 'ctg1')),
form_data={'invalid-unit-test': 'invalid-unit-test'},
server_user=ut_id(gvar, 'ctu1')
)
# 08
execute_csv2_request(
gvar, 0, None, None,
'/cloud/list/', group=ut_id(gvar, 'ctg1'),
expected_list='cloud_list',
server_user=ut_id(gvar, 'ctu1')
)
if __name__ == "__main__":
main(None)
| 1,539 | 0 | 23 |
df58365c283d8224f524b149e55cde0468310484 | 3,124 | py | Python | Project Euler Qusetions 61 - 70/Project Euler Question 61.py | Clayton-Threm/Coding-Practice | 6671e8a15f9e797338caa617dae45093f4157bc1 | [
"MIT"
] | 1 | 2020-02-11T02:03:02.000Z | 2020-02-11T02:03:02.000Z | Project Euler Qusetions 61 - 70/Project Euler Question 61.py | Clayton-Threm/Coding-Practice | 6671e8a15f9e797338caa617dae45093f4157bc1 | [
"MIT"
] | null | null | null | Project Euler Qusetions 61 - 70/Project Euler Question 61.py | Clayton-Threm/Coding-Practice | 6671e8a15f9e797338caa617dae45093f4157bc1 | [
"MIT"
] | null | null | null | #Project Euler Question 61
#Cyclical figurate numbers
oct_list = []
hept_list = []
hex_list = []
pent_list = []
squ_list = []
tri_list = []
n = 0
while True:
n += 1
oct_x = octagonal(n)
hept_x = heptagonal(n)
hex_x = hexagonal(n)
pent_x = pentagonal(n)
squ_x = sqaure(n)
tri_x = triangle(n)
if 10000 > oct_x >= 1000:
oct_list.append(oct_x)
if 10000 > hept_x >= 1000:
hept_list.append(hept_x)
if 10000 > hex_x >= 1000:
hex_list.append(hex_x)
if 10000 > pent_x >= 1000:
pent_list.append(pent_x)
if 10000 > squ_x >= 1000:
squ_list.append(squ_x)
if 10000 > tri_x >= 1000:
tri_list.append(tri_x)
elif oct_x >= 10000:
break
all_list = [hept_list, hex_list, pent_list, squ_list, tri_list]
print (cycle_numbers()) | 31.24 | 74 | 0.483675 | #Project Euler Question 61
#Cyclical figurate numbers
def octagonal(n):
return (n * ((3 * n) - 2))
def heptagonal(n):
return int(n * ((5 * n) - 3) / 2)
def hexagonal(n):
return (n * ((2 * n) - 1))
def pentagonal(n):
return int(n * ((3 * n) - 1) / 2)
def sqaure(n):
return (n ** 2)
def triangle(n):
return int(n * (n + 1) / 2)
oct_list = []
hept_list = []
hex_list = []
pent_list = []
squ_list = []
tri_list = []
n = 0
while True:
n += 1
oct_x = octagonal(n)
hept_x = heptagonal(n)
hex_x = hexagonal(n)
pent_x = pentagonal(n)
squ_x = sqaure(n)
tri_x = triangle(n)
if 10000 > oct_x >= 1000:
oct_list.append(oct_x)
if 10000 > hept_x >= 1000:
hept_list.append(hept_x)
if 10000 > hex_x >= 1000:
hex_list.append(hex_x)
if 10000 > pent_x >= 1000:
pent_list.append(pent_x)
if 10000 > squ_x >= 1000:
squ_list.append(squ_x)
if 10000 > tri_x >= 1000:
tri_list.append(tri_x)
elif oct_x >= 10000:
break
all_list = [hept_list, hex_list, pent_list, squ_list, tri_list]
def cycle_numbers():
index_dict = {0: True, 1: True, 2: True, 3: True, 4: True}
for oct_number in oct_list:
cycle_list = {oct_number: 5}
cycle_list_keys = list(cycle_list.keys())
ignore_list = []
check = int(str(oct_number)[2:])
og_check = int(str(oct_number)[0:2])
index = -1
counter = 0
while True:
index += 1
if index > 4:
index = 0
if index_dict[index] == False:
continue
for num in all_list[index]:
if num in ignore_list:
continue
if num in list(cycle_list.keys()):
continue
check_2 = int(str(num)[0:2])
if check_2 == check:
counter = 0
new_term = num
cycle_list[new_term] = index
cycle_list_keys = list(cycle_list.keys())
check = int(str(num)[2:])
index_dict[index] = False
if len(cycle_list) == 6:
if og_check == check:
return sum(cycle_list_keys)
else:
ignore_list.append(cycle_list_keys[-1])
index_dict[index] = True
del cycle_list[cycle_list_keys[-1]]
cycle_list_keys = list(cycle_list.keys())
break
else:
counter += 1
if counter == 5:
counter = 0
ignore_list.append(cycle_list_keys[-1])
index_dict[cycle_list.get(cycle_list_keys[-1])] = True
cycle_list.popitem()
if len(cycle_list) == 0:
break
cycle_list_keys = list(cycle_list.keys())
check = int(str(cycle_list_keys[-1])[2:])
print (cycle_numbers()) | 2,147 | 0 | 156 |
1daa9a7a637b9bb6c0fd08dd5be07a7d2d6725d8 | 365 | py | Python | hood/migrations/0007_rename_neighbourhood_business_neighborhood.py | clarametto/Neighbor-Hood | 8f3518ccff899b2eeb082f068ed225038366392d | [
"Unlicense"
] | 1 | 2022-01-08T17:27:49.000Z | 2022-01-08T17:27:49.000Z | hood/migrations/0007_rename_neighbourhood_business_neighborhood.py | clarametto/Neighbor-Hood | 8f3518ccff899b2eeb082f068ed225038366392d | [
"Unlicense"
] | null | null | null | hood/migrations/0007_rename_neighbourhood_business_neighborhood.py | clarametto/Neighbor-Hood | 8f3518ccff899b2eeb082f068ed225038366392d | [
"Unlicense"
] | null | null | null | # Generated by Django 3.2.7 on 2022-01-10 12:12
from django.db import migrations
| 19.210526 | 47 | 0.586301 | # Generated by Django 3.2.7 on 2022-01-10 12:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hood', '0006_business'),
]
operations = [
migrations.RenameField(
model_name='business',
old_name='neighbourhood',
new_name='neighborhood',
),
]
| 0 | 259 | 23 |
240bab2a9de5edc70c1d95c2f4748e52aa0da751 | 847 | py | Python | code/download_bigbird_dataset.py | jbboin/fisher_vector_aggregation_3d | ba07b7cc90b0490f626189afa45fdc437a255ded | [
"MIT"
] | null | null | null | code/download_bigbird_dataset.py | jbboin/fisher_vector_aggregation_3d | ba07b7cc90b0490f626189afa45fdc437a255ded | [
"MIT"
] | null | null | null | code/download_bigbird_dataset.py | jbboin/fisher_vector_aggregation_3d | ba07b7cc90b0490f626189afa45fdc437a255ded | [
"MIT"
] | null | null | null | import config
import requests, bs4, urllib, os
dest_dir = os.path.join(config.DATASET_DIR, 'model_zip')
queries_dir = os.path.join(config.DATASET_DIR, 'queries_zip')
url = 'http://rll.berkeley.edu/bigbird/aliases/a47741b172/'
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
if not os.path.exists(queries_dir):
os.makedirs(queries_dir)
page = requests.get(url)
soup = bs4.BeautifulSoup(page.content, 'html.parser')
download_links = [x.get('href') for x in soup.find_all('a') if x.get_text() == 'High res (.tgz)']
for d in download_links:
urllib.urlretrieve(url + d, os.path.join(dest_dir, d.split('/')[-2] + '.tgz'))
download_links = [x.get('href') for x in soup.find_all('a') if x.get_text() == 'RGB-D (.tgz)']
for d in download_links:
urllib.urlretrieve(url + d, os.path.join(queries_dir, d.split('/')[-2] + '.tgz'))
| 35.291667 | 97 | 0.694215 | import config
import requests, bs4, urllib, os
dest_dir = os.path.join(config.DATASET_DIR, 'model_zip')
queries_dir = os.path.join(config.DATASET_DIR, 'queries_zip')
url = 'http://rll.berkeley.edu/bigbird/aliases/a47741b172/'
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
if not os.path.exists(queries_dir):
os.makedirs(queries_dir)
page = requests.get(url)
soup = bs4.BeautifulSoup(page.content, 'html.parser')
download_links = [x.get('href') for x in soup.find_all('a') if x.get_text() == 'High res (.tgz)']
for d in download_links:
urllib.urlretrieve(url + d, os.path.join(dest_dir, d.split('/')[-2] + '.tgz'))
download_links = [x.get('href') for x in soup.find_all('a') if x.get_text() == 'RGB-D (.tgz)']
for d in download_links:
urllib.urlretrieve(url + d, os.path.join(queries_dir, d.split('/')[-2] + '.tgz'))
| 0 | 0 | 0 |
ad14a6f9854f3e727e888563bd3c73c8f5b01e14 | 879 | py | Python | capsnet/utils.py | gsarti/cancer-detection | a858a7c28e77da9ded9cdf6eb9abc5771183d848 | [
"MIT"
] | 7 | 2019-05-21T15:56:07.000Z | 2021-11-02T09:07:20.000Z | capsnet/utils.py | noorahmad76155/cancer-detection | a858a7c28e77da9ded9cdf6eb9abc5771183d848 | [
"MIT"
] | null | null | null | capsnet/utils.py | noorahmad76155/cancer-detection | a858a7c28e77da9ded9cdf6eb9abc5771183d848 | [
"MIT"
] | 4 | 2020-03-07T00:34:19.000Z | 2022-01-27T20:12:47.000Z | import numpy as np
from matplotlib import pyplot as plt
import csv
import math
import pandas
if __name__=="__main__":
plot_log('result/log.csv')
| 23.756757 | 79 | 0.626849 | import numpy as np
from matplotlib import pyplot as plt
import csv
import math
import pandas
def plot_log(filename, show=True):
data = pandas.read_csv(filename)
fig = plt.figure(figsize=(4,6))
fig.subplots_adjust(top=0.95, bottom=0.05, right=0.95)
fig.add_subplot(211)
for key in data.keys():
if key.find('loss') >= 0 and not key.find('val') >= 0: # training loss
plt.plot(data['epoch'].values, data[key].values, label=key)
plt.legend()
plt.title('Training loss')
fig.add_subplot(212)
for key in data.keys():
if key.find('acc') >= 0: # acc
plt.plot(data['epoch'].values, data[key].values, label=key)
plt.legend()
plt.title('Training and validation accuracy')
# fig.savefig('result/log.png')
if show:
plt.show()
if __name__=="__main__":
plot_log('result/log.csv')
| 702 | 0 | 23 |
5876bcfd2066aa61b5cad32f8f625a822fb7652c | 8,906 | py | Python | homeassistant/components/onewire/config_flow.py | liangleslie/core | cc807b4d597daaaadc92df4a93c6e30da4f570c6 | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | homeassistant/components/onewire/config_flow.py | liangleslie/core | cc807b4d597daaaadc92df4a93c6e30da4f570c6 | [
"Apache-2.0"
] | 24,710 | 2016-04-13T08:27:26.000Z | 2020-03-02T12:59:13.000Z | homeassistant/components/onewire/config_flow.py | liangleslie/core | cc807b4d597daaaadc92df4a93c6e30da4f570c6 | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Config flow for 1-Wire component."""
from __future__ import annotations
from typing import Any
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry, ConfigFlow, OptionsFlow
from homeassistant.const import CONF_HOST, CONF_PORT
from homeassistant.core import HomeAssistant, callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers import config_validation as cv, device_registry as dr
from homeassistant.helpers.device_registry import DeviceRegistry
from .const import (
DEFAULT_HOST,
DEFAULT_PORT,
DEVICE_SUPPORT_OPTIONS,
DOMAIN,
INPUT_ENTRY_CLEAR_OPTIONS,
INPUT_ENTRY_DEVICE_SELECTION,
OPTION_ENTRY_DEVICE_OPTIONS,
OPTION_ENTRY_SENSOR_PRECISION,
PRECISION_MAPPING_FAMILY_28,
)
from .model import OWDeviceDescription
from .onewirehub import CannotConnect, OneWireHub
DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST, default=DEFAULT_HOST): str,
vol.Required(CONF_PORT, default=DEFAULT_PORT): int,
}
)
async def validate_input(hass: HomeAssistant, data: dict[str, Any]) -> dict[str, str]:
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
hub = OneWireHub(hass)
host = data[CONF_HOST]
port = data[CONF_PORT]
# Raises CannotConnect exception on failure
await hub.connect(host, port)
# Return info that you want to store in the config entry.
return {"title": host}
class OneWireFlowHandler(ConfigFlow, domain=DOMAIN):
"""Handle 1-Wire config flow."""
VERSION = 1
def __init__(self) -> None:
"""Initialize 1-Wire config flow."""
self.onewire_config: dict[str, Any] = {}
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle 1-Wire config flow start.
Let user manually input configuration.
"""
errors: dict[str, str] = {}
if user_input:
# Prevent duplicate entries
self._async_abort_entries_match(
{
CONF_HOST: user_input[CONF_HOST],
CONF_PORT: user_input[CONF_PORT],
}
)
self.onewire_config.update(user_input)
try:
info = await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
else:
return self.async_create_entry(
title=info["title"], data=self.onewire_config
)
return self.async_show_form(
step_id="user",
data_schema=DATA_SCHEMA,
errors=errors,
)
@staticmethod
@callback
def async_get_options_flow(config_entry: ConfigEntry) -> OptionsFlow:
"""Get the options flow for this handler."""
return OnewireOptionsFlowHandler(config_entry)
class OnewireOptionsFlowHandler(OptionsFlow):
"""Handle OneWire Config options."""
def __init__(self, config_entry: ConfigEntry) -> None:
"""Initialize OneWire Network options flow."""
self.entry_id = config_entry.entry_id
self.options = dict(config_entry.options)
self.configurable_devices: dict[str, OWDeviceDescription] = {}
self.devices_to_configure: dict[str, OWDeviceDescription] = {}
self.current_device: str = ""
async def async_step_init(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Manage the options."""
controller: OneWireHub = self.hass.data[DOMAIN][self.entry_id]
all_devices: list[OWDeviceDescription] = controller.devices # type: ignore[assignment]
if not all_devices:
return self.async_abort(reason="No configurable devices found.")
device_registry = dr.async_get(self.hass)
self.configurable_devices = {
self._get_device_long_name(device_registry, device.id): device
for device in all_devices
if device.family in DEVICE_SUPPORT_OPTIONS
}
return await self.async_step_device_selection(user_input=None)
async def async_step_device_selection(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Select what devices to configure."""
errors = {}
if user_input is not None:
if user_input.get(INPUT_ENTRY_CLEAR_OPTIONS):
# Reset all options
self.options = {}
return self._async_update_options()
selected_devices: list[str] = (
user_input.get(INPUT_ENTRY_DEVICE_SELECTION) or []
)
if selected_devices:
self.devices_to_configure = {
device_name: self.configurable_devices[device_name]
for device_name in selected_devices
}
return await self.async_step_configure_device(user_input=None)
errors["base"] = "device_not_selected"
return self.async_show_form(
step_id="device_selection",
data_schema=vol.Schema(
{
vol.Optional(
INPUT_ENTRY_CLEAR_OPTIONS,
default=False,
): bool,
vol.Optional(
INPUT_ENTRY_DEVICE_SELECTION,
default=self._get_current_configured_sensors(),
description="Multiselect with list of devices to choose from",
): cv.multi_select(
{device: False for device in self.configurable_devices}
),
}
),
errors=errors,
)
async def async_step_configure_device(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Config precision option for device."""
if user_input is not None:
self._update_device_options(user_input)
if self.devices_to_configure:
return await self.async_step_configure_device(user_input=None)
return self._async_update_options()
self.current_device, description = self.devices_to_configure.popitem()
data_schema = vol.Schema(
{
vol.Required(
OPTION_ENTRY_SENSOR_PRECISION,
default=self._get_current_setting(
description.id, OPTION_ENTRY_SENSOR_PRECISION, "temperature"
),
): vol.In(PRECISION_MAPPING_FAMILY_28),
}
)
return self.async_show_form(
step_id="configure_device",
data_schema=data_schema,
description_placeholders={"sensor_id": self.current_device},
)
@callback
def _async_update_options(self) -> FlowResult:
"""Update config entry options."""
return self.async_create_entry(title="", data=self.options)
@staticmethod
def _get_current_configured_sensors(self) -> list[str]:
"""Get current list of sensors that are configured."""
configured_sensors = self.options.get(OPTION_ENTRY_DEVICE_OPTIONS)
if not configured_sensors:
return []
return [
device_name
for device_name, description in self.configurable_devices.items()
if description.id in configured_sensors
]
def _get_current_setting(self, device_id: str, setting: str, default: Any) -> Any:
"""Get current value for setting."""
if entry_device_options := self.options.get(OPTION_ENTRY_DEVICE_OPTIONS):
if device_options := entry_device_options.get(device_id):
return device_options.get(setting)
return default
def _update_device_options(self, user_input: dict[str, Any]) -> None:
"""Update the global config with the new options for the current device."""
options: dict[str, dict[str, Any]] = self.options.setdefault(
OPTION_ENTRY_DEVICE_OPTIONS, {}
)
description = self.configurable_devices[self.current_device]
device_options: dict[str, Any] = options.setdefault(description.id, {})
if description.family == "28":
device_options[OPTION_ENTRY_SENSOR_PRECISION] = user_input[
OPTION_ENTRY_SENSOR_PRECISION
]
self.options.update({OPTION_ENTRY_DEVICE_OPTIONS: options})
| 35.624 | 95 | 0.625196 | """Config flow for 1-Wire component."""
from __future__ import annotations
from typing import Any
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry, ConfigFlow, OptionsFlow
from homeassistant.const import CONF_HOST, CONF_PORT
from homeassistant.core import HomeAssistant, callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers import config_validation as cv, device_registry as dr
from homeassistant.helpers.device_registry import DeviceRegistry
from .const import (
DEFAULT_HOST,
DEFAULT_PORT,
DEVICE_SUPPORT_OPTIONS,
DOMAIN,
INPUT_ENTRY_CLEAR_OPTIONS,
INPUT_ENTRY_DEVICE_SELECTION,
OPTION_ENTRY_DEVICE_OPTIONS,
OPTION_ENTRY_SENSOR_PRECISION,
PRECISION_MAPPING_FAMILY_28,
)
from .model import OWDeviceDescription
from .onewirehub import CannotConnect, OneWireHub
DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST, default=DEFAULT_HOST): str,
vol.Required(CONF_PORT, default=DEFAULT_PORT): int,
}
)
async def validate_input(hass: HomeAssistant, data: dict[str, Any]) -> dict[str, str]:
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
hub = OneWireHub(hass)
host = data[CONF_HOST]
port = data[CONF_PORT]
# Raises CannotConnect exception on failure
await hub.connect(host, port)
# Return info that you want to store in the config entry.
return {"title": host}
class OneWireFlowHandler(ConfigFlow, domain=DOMAIN):
"""Handle 1-Wire config flow."""
VERSION = 1
def __init__(self) -> None:
"""Initialize 1-Wire config flow."""
self.onewire_config: dict[str, Any] = {}
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle 1-Wire config flow start.
Let user manually input configuration.
"""
errors: dict[str, str] = {}
if user_input:
# Prevent duplicate entries
self._async_abort_entries_match(
{
CONF_HOST: user_input[CONF_HOST],
CONF_PORT: user_input[CONF_PORT],
}
)
self.onewire_config.update(user_input)
try:
info = await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
else:
return self.async_create_entry(
title=info["title"], data=self.onewire_config
)
return self.async_show_form(
step_id="user",
data_schema=DATA_SCHEMA,
errors=errors,
)
@staticmethod
@callback
def async_get_options_flow(config_entry: ConfigEntry) -> OptionsFlow:
"""Get the options flow for this handler."""
return OnewireOptionsFlowHandler(config_entry)
class OnewireOptionsFlowHandler(OptionsFlow):
"""Handle OneWire Config options."""
def __init__(self, config_entry: ConfigEntry) -> None:
"""Initialize OneWire Network options flow."""
self.entry_id = config_entry.entry_id
self.options = dict(config_entry.options)
self.configurable_devices: dict[str, OWDeviceDescription] = {}
self.devices_to_configure: dict[str, OWDeviceDescription] = {}
self.current_device: str = ""
async def async_step_init(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Manage the options."""
controller: OneWireHub = self.hass.data[DOMAIN][self.entry_id]
all_devices: list[OWDeviceDescription] = controller.devices # type: ignore[assignment]
if not all_devices:
return self.async_abort(reason="No configurable devices found.")
device_registry = dr.async_get(self.hass)
self.configurable_devices = {
self._get_device_long_name(device_registry, device.id): device
for device in all_devices
if device.family in DEVICE_SUPPORT_OPTIONS
}
return await self.async_step_device_selection(user_input=None)
async def async_step_device_selection(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Select what devices to configure."""
errors = {}
if user_input is not None:
if user_input.get(INPUT_ENTRY_CLEAR_OPTIONS):
# Reset all options
self.options = {}
return self._async_update_options()
selected_devices: list[str] = (
user_input.get(INPUT_ENTRY_DEVICE_SELECTION) or []
)
if selected_devices:
self.devices_to_configure = {
device_name: self.configurable_devices[device_name]
for device_name in selected_devices
}
return await self.async_step_configure_device(user_input=None)
errors["base"] = "device_not_selected"
return self.async_show_form(
step_id="device_selection",
data_schema=vol.Schema(
{
vol.Optional(
INPUT_ENTRY_CLEAR_OPTIONS,
default=False,
): bool,
vol.Optional(
INPUT_ENTRY_DEVICE_SELECTION,
default=self._get_current_configured_sensors(),
description="Multiselect with list of devices to choose from",
): cv.multi_select(
{device: False for device in self.configurable_devices}
),
}
),
errors=errors,
)
async def async_step_configure_device(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Config precision option for device."""
if user_input is not None:
self._update_device_options(user_input)
if self.devices_to_configure:
return await self.async_step_configure_device(user_input=None)
return self._async_update_options()
self.current_device, description = self.devices_to_configure.popitem()
data_schema = vol.Schema(
{
vol.Required(
OPTION_ENTRY_SENSOR_PRECISION,
default=self._get_current_setting(
description.id, OPTION_ENTRY_SENSOR_PRECISION, "temperature"
),
): vol.In(PRECISION_MAPPING_FAMILY_28),
}
)
return self.async_show_form(
step_id="configure_device",
data_schema=data_schema,
description_placeholders={"sensor_id": self.current_device},
)
@callback
def _async_update_options(self) -> FlowResult:
"""Update config entry options."""
return self.async_create_entry(title="", data=self.options)
@staticmethod
def _get_device_long_name(
device_registry: DeviceRegistry, current_device: str
) -> str:
device = device_registry.async_get_device({(DOMAIN, current_device)})
if device and device.name_by_user:
return f"{device.name_by_user} ({current_device})"
return current_device
def _get_current_configured_sensors(self) -> list[str]:
"""Get current list of sensors that are configured."""
configured_sensors = self.options.get(OPTION_ENTRY_DEVICE_OPTIONS)
if not configured_sensors:
return []
return [
device_name
for device_name, description in self.configurable_devices.items()
if description.id in configured_sensors
]
def _get_current_setting(self, device_id: str, setting: str, default: Any) -> Any:
"""Get current value for setting."""
if entry_device_options := self.options.get(OPTION_ENTRY_DEVICE_OPTIONS):
if device_options := entry_device_options.get(device_id):
return device_options.get(setting)
return default
def _update_device_options(self, user_input: dict[str, Any]) -> None:
"""Update the global config with the new options for the current device."""
options: dict[str, dict[str, Any]] = self.options.setdefault(
OPTION_ENTRY_DEVICE_OPTIONS, {}
)
description = self.configurable_devices[self.current_device]
device_options: dict[str, Any] = options.setdefault(description.id, {})
if description.family == "28":
device_options[OPTION_ENTRY_SENSOR_PRECISION] = user_input[
OPTION_ENTRY_SENSOR_PRECISION
]
self.options.update({OPTION_ENTRY_DEVICE_OPTIONS: options})
| 294 | 0 | 26 |
2e5976138e0bb24b7f8d11e79a9cb0aee0a59df2 | 8,038 | py | Python | MRFNETgray/model.py | BiolabHHU/Image-denoising-with-MRFNet | 79420d707058de0ac04522d499adef79b5f6fc6e | [
"Apache-2.0"
] | 2 | 2021-10-30T03:40:46.000Z | 2021-11-22T01:02:19.000Z | MRFNETgray/model.py | BiolabHHU/Image-denoising-with-MRFNet | 79420d707058de0ac04522d499adef79b5f6fc6e | [
"Apache-2.0"
] | null | null | null | MRFNETgray/model.py | BiolabHHU/Image-denoising-with-MRFNet | 79420d707058de0ac04522d499adef79b5f6fc6e | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
from abc import ABC
| 57.827338 | 119 | 0.61259 | import torch
import torch.nn as nn
from abc import ABC
class MRFNET(nn.Module, ABC):
def __init__(self, channels):
super(MRFNET, self).__init__()
kernel_size = 3
padding = 1
features = 64
groups = 1
self.conv1_1 = nn.Sequential(
nn.Conv2d(in_channels=channels, out_channels=features, kernel_size=kernel_size, padding=padding,
groups=groups, bias=False), nn.BatchNorm2d(features), nn.ReLU(inplace=True))
self.conv1_2 = nn.Sequential(
nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=2, groups=groups,
bias=False, dilation=2), nn.BatchNorm2d(features), nn.ReLU(inplace=True))
self.conv1_3 = nn.Sequential(
nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=1, groups=groups,
bias=False), nn.BatchNorm2d(features), nn.ReLU(inplace=True))
self.conv1_4 = nn.Sequential(
nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=1, groups=groups,
bias=False), nn.BatchNorm2d(features), nn.ReLU(inplace=True))
self.conv1_5 = nn.Sequential(
nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=2, groups=groups,
bias=False, dilation=2), nn.BatchNorm2d(features), nn.ReLU(inplace=True))
self.conv1_6 = nn.Sequential(
nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=1, groups=groups,
bias=False), nn.BatchNorm2d(features), nn.ReLU(inplace=True))
self.conv1_7 = nn.Sequential(
nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=padding,
groups=groups, bias=False), nn.BatchNorm2d(features), nn.ReLU(inplace=True))
self.conv1_8 = nn.Sequential(
nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=1, groups=groups,
bias=False), nn.BatchNorm2d(features), nn.ReLU(inplace=True))
self.conv1_9 = nn.Sequential(
nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=2, groups=groups,
bias=False, dilation=2), nn.BatchNorm2d(features), nn.ReLU(inplace=True))
self.conv1_10 = nn.Sequential(
nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=1, groups=groups,
bias=False), nn.BatchNorm2d(features), nn.ReLU(inplace=True))
self.conv1_11 = nn.Sequential(
nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=1, groups=groups,
bias=False), nn.BatchNorm2d(features), nn.ReLU(inplace=True))
self.conv1_12 = nn.Sequential(
nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=2, groups=groups,
bias=False, dilation=2), nn.BatchNorm2d(features), nn.ReLU(inplace=True))
self.conv1_13 = nn.Sequential(
nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=padding,
groups=groups, bias=False), nn.BatchNorm2d(features), nn.ReLU(inplace=True))
self.conv1_14 = nn.Sequential(
nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=padding,
groups=groups, bias=False), nn.BatchNorm2d(features), nn.ReLU(inplace=True))
self.conv1_15 = nn.Sequential(
nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=1, groups=groups,
bias=False), nn.BatchNorm2d(features), nn.ReLU(inplace=True))
self.conv1_18 = nn.Sequential(
nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=2, groups=groups,
bias=False, dilation=2), nn.BatchNorm2d(features), nn.ReLU(inplace=True))
self.conv1_19 = nn.Sequential(
nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=1, groups=groups,
bias=False), nn.BatchNorm2d(features), nn.ReLU(inplace=True))
self.conv1_20 = nn.Sequential(
nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=1, groups=groups,
bias=False), nn.BatchNorm2d(features), nn.ReLU(inplace=True))
self.conv1_21 = nn.Sequential(
nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=2, groups=groups,
bias=False, dilation=2), nn.BatchNorm2d(features), nn.ReLU(inplace=True))
self.conv1_22 = nn.Sequential(
nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=padding,
groups=groups, bias=False), nn.BatchNorm2d(features), nn.ReLU(inplace=True))
self.conv1_23 = nn.Sequential(
nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=padding,
groups=groups, bias=False), nn.BatchNorm2d(features), nn.ReLU(inplace=True))
self.conv1_24 = nn.Sequential(
nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=1, groups=groups,
bias=False), nn.BatchNorm2d(features), nn.ReLU(inplace=True))
self.conv1_16 = nn.Conv2d(in_channels=features, out_channels=1, kernel_size=kernel_size, padding=1,
groups=groups, bias=False)
self.conv1_17 = nn.Conv2d(in_channels=features, out_channels=1, kernel_size=kernel_size, padding=1,
groups=groups, bias=False)
self.conv1_18o = nn.Conv2d(in_channels=features, out_channels=1, kernel_size=kernel_size, padding=1,
groups=groups, bias=False)
self.conv3 = nn.Conv2d(in_channels=3, out_channels=1, kernel_size=3, stride=1, padding=1, groups=1, bias=True)
self.ReLU = nn.ReLU(inplace=True)
self.Tanh = nn.Tanh()
self.sigmoid = nn.Sigmoid()
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, (2 / (9.0 * 64)) ** 0.5)
if isinstance(m, nn.BatchNorm2d):
m.weight.data.normal_(0, (2 / (9.0 * 64)) ** 0.5)
clip_b = 0.025
w = m.weight.data.shape[0]
for j in range(w):
if 0 <= m.weight.data[j] < clip_b:
m.weight.data[j] = clip_b
elif -clip_b < m.weight.data[j] < 0:
m.weight.data[j] = -clip_b
m.running_var.fill_(0.01)
def forward(self, x):
x1 = self.conv1_1(x)
x1 = self.conv1_2(x1)
x1 = self.conv1_3(x1)
x1 = self.conv1_4(x1)
x1 = self.conv1_5(x1)
x1 = self.conv1_6(x1)
x1 = self.conv1_7(x1)
x1t = self.conv1_8(x1)
x1 = self.conv1_9(x1t)
x1 = self.conv1_10(x1)
x1 = self.conv1_11(x1)
x1 = self.conv1_12(x1)
x1 = self.conv1_13(x1)
x1 = self.conv1_14(x1)
x2t = self.conv1_15(x1)
x1 = self.conv1_18(x2t)
x1 = self.conv1_19(x1)
x1 = self.conv1_20(x1)
x1 = self.conv1_21(x1)
x1 = self.conv1_22(x1)
x1 = self.conv1_23(x1)
x1 = self.conv1_24(x1)
out = torch.cat([self.conv1_16(x1t), self.conv1_17(x2t), self.conv1_18o(x1)], 1)
out = self.Tanh(out)
out = self.conv3(out)
out2 = x - out
return out2 | 7,891 | 8 | 80 |
03b1a6e63f14bade406989015214f2e188a00108 | 2,074 | py | Python | examples/notebooks-py/combineExample.py | ShaikAsifullah/distributed-tellurium | 007e9b3842b614edd34908c001119c6da1d41897 | [
"Apache-2.0"
] | 1 | 2019-06-19T04:40:33.000Z | 2019-06-19T04:40:33.000Z | examples/notebooks-py/combineExample.py | ShaikAsifullah/distributed-tellurium | 007e9b3842b614edd34908c001119c6da1d41897 | [
"Apache-2.0"
] | null | null | null | examples/notebooks-py/combineExample.py | ShaikAsifullah/distributed-tellurium | 007e9b3842b614edd34908c001119c6da1d41897 | [
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# Back to the main [Index](../index.ipynb)
# ### Combine archives
# The experiment, i.e. model with the simulation description, can be stored as Combine Archive.
# In[1]:
#!!! DO NOT CHANGE !!! THIS FILE WAS CREATED AUTOMATICALLY FROM NOTEBOOKS !!! CHANGES WILL BE OVERWRITTEN !!! CHANGE CORRESPONDING NOTEBOOK FILE !!!
from __future__ import print_function
import tellurium as te
antimonyStr = """
model test()
J0: S1 -> S2; k1*S1;
S1 = 10.0; S2=0.0;
k1 = 0.1;
end
"""
phrasedmlStr = """
model0 = model "test"
sim0 = simulate uniform(0, 6, 100)
task0 = run sim0 on model0
plot "Timecourse test model" task0.time vs task0.S1
"""
# phrasedml experiment
exp = te.experiment(antimonyStr, phrasedmlStr)
exp.execute(phrasedmlStr)
# create Combine Archive
import tempfile
f = tempfile.NamedTemporaryFile()
exp.exportAsCombine(f.name)
# print the content of the Combine Archive
import zipfile
zip=zipfile.ZipFile(f.name)
print(zip.namelist())
# ### Create combine archive
# TODO
# In[2]:
import tellurium as te
import phrasedml
antTest1Str = """
model test1()
J0: S1 -> S2; k1*S1;
S1 = 10.0; S2=0.0;
k1 = 0.1;
end
"""
antTest2Str = """
model test2()
v0: X1 -> X2; p1*X1;
X1 = 5.0; X2 = 20.0;
k1 = 0.2;
end
"""
phrasedmlStr = """
model1 = model "test1"
model2 = model "test2"
model3 = model model1 with S1=S2+20
sim1 = simulate uniform(0, 6, 100)
task1 = run sim1 on model1
task2 = run sim1 on model2
plot "Timecourse test1" task1.time vs task1.S1, task1.S2
plot "Timecourse test2" task2.time vs task2.X1, task2.X2
"""
# phrasedml.setReferencedSBML("test1")
exp = te.experiment(phrasedmlList=[phrasedmlStr], antimonyList=[antTest1Str])
print(exp)
# set first model
phrasedml.setReferencedSBML("test1", te.antimonyToSBML(antTest1Str))
phrasedml.setReferencedSBML("test2", te.antimonyToSBML(antTest2Str))
sedmlstr = phrasedml.convertString(phrasedmlStr)
if sedmlstr is None:
raise Exception(phrasedml.getLastError())
print(sedmlstr)
# In[3]:
# In[3]:
| 20.135922 | 148 | 0.685149 |
# coding: utf-8
# Back to the main [Index](../index.ipynb)
# ### Combine archives
# The experiment, i.e. model with the simulation description, can be stored as Combine Archive.
# In[1]:
#!!! DO NOT CHANGE !!! THIS FILE WAS CREATED AUTOMATICALLY FROM NOTEBOOKS !!! CHANGES WILL BE OVERWRITTEN !!! CHANGE CORRESPONDING NOTEBOOK FILE !!!
from __future__ import print_function
import tellurium as te
antimonyStr = """
model test()
J0: S1 -> S2; k1*S1;
S1 = 10.0; S2=0.0;
k1 = 0.1;
end
"""
phrasedmlStr = """
model0 = model "test"
sim0 = simulate uniform(0, 6, 100)
task0 = run sim0 on model0
plot "Timecourse test model" task0.time vs task0.S1
"""
# phrasedml experiment
exp = te.experiment(antimonyStr, phrasedmlStr)
exp.execute(phrasedmlStr)
# create Combine Archive
import tempfile
f = tempfile.NamedTemporaryFile()
exp.exportAsCombine(f.name)
# print the content of the Combine Archive
import zipfile
zip=zipfile.ZipFile(f.name)
print(zip.namelist())
# ### Create combine archive
# TODO
# In[2]:
import tellurium as te
import phrasedml
antTest1Str = """
model test1()
J0: S1 -> S2; k1*S1;
S1 = 10.0; S2=0.0;
k1 = 0.1;
end
"""
antTest2Str = """
model test2()
v0: X1 -> X2; p1*X1;
X1 = 5.0; X2 = 20.0;
k1 = 0.2;
end
"""
phrasedmlStr = """
model1 = model "test1"
model2 = model "test2"
model3 = model model1 with S1=S2+20
sim1 = simulate uniform(0, 6, 100)
task1 = run sim1 on model1
task2 = run sim1 on model2
plot "Timecourse test1" task1.time vs task1.S1, task1.S2
plot "Timecourse test2" task2.time vs task2.X1, task2.X2
"""
# phrasedml.setReferencedSBML("test1")
exp = te.experiment(phrasedmlList=[phrasedmlStr], antimonyList=[antTest1Str])
print(exp)
# set first model
phrasedml.setReferencedSBML("test1", te.antimonyToSBML(antTest1Str))
phrasedml.setReferencedSBML("test2", te.antimonyToSBML(antTest2Str))
sedmlstr = phrasedml.convertString(phrasedmlStr)
if sedmlstr is None:
raise Exception(phrasedml.getLastError())
print(sedmlstr)
# In[3]:
# In[3]:
| 0 | 0 | 0 |
56cbcd1510e4bdd1ca619d48e259ed4b8d81e3f0 | 4,538 | py | Python | sandbox/grist/gpath.py | nataliemisasi/grist-core | 52d3f6320339b23ed0155009f45ff7121d90e3b8 | [
"Apache-2.0"
] | 2,667 | 2020-10-30T16:25:06.000Z | 2022-03-31T15:27:37.000Z | sandbox/grist/gpath.py | nataliemisasi/grist-core | 52d3f6320339b23ed0155009f45ff7121d90e3b8 | [
"Apache-2.0"
] | 137 | 2020-12-04T08:14:09.000Z | 2022-03-31T22:36:13.000Z | sandbox/grist/gpath.py | nataliemisasi/grist-core | 52d3f6320339b23ed0155009f45ff7121d90e3b8 | [
"Apache-2.0"
] | 103 | 2020-10-30T15:17:51.000Z | 2022-03-28T17:02:04.000Z | from six.moves import xrange
def get(obj, path):
"""
Looks up and returns a path in the object. Returns None if the path isn't there.
"""
for part in path:
try:
obj = obj[part]
except(KeyError, IndexError):
return None
return obj
def glob(obj, path, func, extra_arg):
"""
Resolves wildcards in `path`, calling func for all matching paths. Returns the number of
times that func was called.
obj - An object to scan.
path - Path to an item in an object or an array in obj. May contain the special key '*', which
-- for arrays only -- means "for all indices".
func - Will be called as func(subobj, key, fullPath, extraArg).
extra_arg - An arbitrary value to pass along to func, for convenience.
Returns count of matching paths, for which func got called.
"""
return _globHelper(obj, path, path, func, extra_arg)
def place(obj, path, value):
"""
Sets or deletes an object property in DocObj.
gpath - Path to an Object in obj.
value - Any value. Setting None will remove the selected object key.
"""
return glob(obj, path, _placeHelper, value)
def _checkIsArray(subobj, errPrefix, index, itemPath, isInsert):
"""
This is a helper for checking operations on arrays, and throwing descriptive errors.
"""
if subobj is None:
raise Exception(errPrefix + ": non-existent object at " + describe(dirname(itemPath)))
elif not _is_array(subobj):
raise Exception(errPrefix + ": not an array at " + describe(dirname(itemPath)))
else:
length = len(subobj)
validIndex = (isinstance(index, int) and index >= 0 and index < length)
validInsertIndex = (index is None or index == length)
if not (validIndex or (isInsert and validInsertIndex)):
raise Exception(errPrefix + ": invalid array index: " + describe(itemPath))
def insert(obj, path, value):
"""
Inserts an element into an array in DocObj.
gpath - Path to an item in an array in obj.
The new value will be inserted before the item pointed to by gpath.
The last component of gpath may be null, in which case the value is appended at the end.
value - Any value.
"""
return glob(obj, path, _insertHelper, value)
def update(obj, path, value):
"""
Updates an element in an array in DocObj.
gpath - Path to an item in an array in obj.
value - Any value.
"""
return glob(obj, path, _updateHelper, value)
def remove(obj, path):
"""
Removes an element from an array in DocObj.
gpath - Path to an item in an array in obj.
"""
return glob(obj, path, _removeHelper, None)
def dirname(path):
"""
Returns path without the last component, like a directory name in a filesystem path.
"""
return path[:-1]
def basename(path):
"""
Returns the last component of path, like base name of a filesystem path.
"""
return path[-1] if path else None
def describe(path):
"""
Returns a human-readable representation of path.
"""
return "/" + "/".join(str(p) for p in path)
| 31.296552 | 98 | 0.671882 | from six.moves import xrange
def _is_array(obj):
return isinstance(obj, list)
def get(obj, path):
"""
Looks up and returns a path in the object. Returns None if the path isn't there.
"""
for part in path:
try:
obj = obj[part]
except(KeyError, IndexError):
return None
return obj
def glob(obj, path, func, extra_arg):
"""
Resolves wildcards in `path`, calling func for all matching paths. Returns the number of
times that func was called.
obj - An object to scan.
path - Path to an item in an object or an array in obj. May contain the special key '*', which
-- for arrays only -- means "for all indices".
func - Will be called as func(subobj, key, fullPath, extraArg).
extra_arg - An arbitrary value to pass along to func, for convenience.
Returns count of matching paths, for which func got called.
"""
return _globHelper(obj, path, path, func, extra_arg)
def _globHelper(obj, path, full_path, func, extra_arg):
for i, part in enumerate(path[:-1]):
if part == "*" and _is_array(obj):
# We got an array wildcard
subpath = path[i + 1:]
count = 0
for subobj in obj:
count += _globHelper(subobj, subpath, full_path, func, extra_arg)
return count
try:
obj = obj[part]
except:
raise Exception("gpath.glob: non-existent object at " +
describe(full_path[:len(full_path) - len(path) + i + 1]))
return func(obj, path[-1], full_path, extra_arg) or 1
def place(obj, path, value):
"""
Sets or deletes an object property in DocObj.
gpath - Path to an Object in obj.
value - Any value. Setting None will remove the selected object key.
"""
return glob(obj, path, _placeHelper, value)
def _placeHelper(subobj, key, full_path, value):
if not isinstance(subobj, dict):
raise Exception("gpath.place: not a plain object at " + describe(dirname(full_path)))
if value is not None:
subobj[key] = value
elif key in subobj:
del subobj[key]
def _checkIsArray(subobj, errPrefix, index, itemPath, isInsert):
"""
This is a helper for checking operations on arrays, and throwing descriptive errors.
"""
if subobj is None:
raise Exception(errPrefix + ": non-existent object at " + describe(dirname(itemPath)))
elif not _is_array(subobj):
raise Exception(errPrefix + ": not an array at " + describe(dirname(itemPath)))
else:
length = len(subobj)
validIndex = (isinstance(index, int) and index >= 0 and index < length)
validInsertIndex = (index is None or index == length)
if not (validIndex or (isInsert and validInsertIndex)):
raise Exception(errPrefix + ": invalid array index: " + describe(itemPath))
def insert(obj, path, value):
"""
Inserts an element into an array in DocObj.
gpath - Path to an item in an array in obj.
The new value will be inserted before the item pointed to by gpath.
The last component of gpath may be null, in which case the value is appended at the end.
value - Any value.
"""
return glob(obj, path, _insertHelper, value)
def _insertHelper(subobj, index, fullPath, value):
_checkIsArray(subobj, "gpath.insert", index, fullPath, True)
if index is None:
subobj.append(value)
else:
subobj.insert(index, value)
def update(obj, path, value):
"""
Updates an element in an array in DocObj.
gpath - Path to an item in an array in obj.
value - Any value.
"""
return glob(obj, path, _updateHelper, value)
def _updateHelper(subobj, index, fullPath, value):
if index == '*':
_checkIsArray(subobj, "gpath.update", None, fullPath, True)
for i in xrange(len(subobj)):
subobj[i] = value
return len(subobj)
else:
_checkIsArray(subobj, "gpath.update", index, fullPath, False)
subobj[index] = value
def remove(obj, path):
"""
Removes an element from an array in DocObj.
gpath - Path to an item in an array in obj.
"""
return glob(obj, path, _removeHelper, None)
def _removeHelper(subobj, index, fullPath, _):
_checkIsArray(subobj, "gpath.remove", index, fullPath, False)
del subobj[index]
def dirname(path):
"""
Returns path without the last component, like a directory name in a filesystem path.
"""
return path[:-1]
def basename(path):
"""
Returns the last component of path, like base name of a filesystem path.
"""
return path[-1] if path else None
def describe(path):
"""
Returns a human-readable representation of path.
"""
return "/" + "/".join(str(p) for p in path)
| 1,402 | 0 | 138 |
074785aa55f0d84535e8e8972e207607e74a1574 | 29,448 | py | Python | mpf/tests/test_Game.py | pmansukhani/mpf | 0979965d24bcaba9423b43581c6a18b847b1b900 | [
"MIT"
] | null | null | null | mpf/tests/test_Game.py | pmansukhani/mpf | 0979965d24bcaba9423b43581c6a18b847b1b900 | [
"MIT"
] | null | null | null | mpf/tests/test_Game.py | pmansukhani/mpf | 0979965d24bcaba9423b43581c6a18b847b1b900 | [
"MIT"
] | null | null | null | from mpf.tests.MpfFakeGameTestCase import MpfFakeGameTestCase
from mpf.tests.MpfGameTestCase import MpfGameTestCase
from unittest.mock import MagicMock
| 59.853659 | 116 | 0.708367 | from mpf.tests.MpfFakeGameTestCase import MpfFakeGameTestCase
from mpf.tests.MpfGameTestCase import MpfGameTestCase
from unittest.mock import MagicMock
class TestGame(MpfGameTestCase):
def getConfigFile(self):
return 'config.yaml'
def getMachinePath(self):
return 'tests/machine_files/game/'
def get_platform(self):
return 'smart_virtual'
def testSinglePlayerGame(self):
# setup event callbacks
self._events = MagicMock()
# Create handler entries for all game lifecycle events we wish to test
self.machine.events.add_handler('game_will_start', self._events, event_name='game_will_start')
self.machine.events.add_handler('game_starting', self._events, event_name='game_starting')
self.machine.events.add_handler('game_started', self._events, event_name='game_started')
self.machine.events.add_handler('player_add_request', self._events, event_name='player_add_request')
self.machine.events.add_handler('player_will_add', self._events, event_name='player_will_add')
self.machine.events.add_handler('player_adding', self._events, event_name='player_adding')
self.machine.events.add_handler('player_added', self._events, event_name='player_added')
self.machine.events.add_handler('player_turn_will_start', self._events, event_name='player_turn_will_start')
self.machine.events.add_handler('player_turn_starting', self._events, event_name='player_turn_starting')
self.machine.events.add_handler('player_turn_started', self._events, event_name='player_turn_started')
self.machine.events.add_handler('ball_will_start', self._events, event_name='ball_will_start')
self.machine.events.add_handler('ball_starting', self._events, event_name='ball_starting')
self.machine.events.add_handler('ball_started', self._events, event_name='ball_started')
self.machine.events.add_handler('ball_will_end', self._events, event_name='ball_will_end')
self.machine.events.add_handler('ball_ending', self._events, event_name='ball_ending')
self.machine.events.add_handler('ball_ended', self._events, event_name='ball_ended')
self.machine.events.add_handler('game_will_end', self._events, event_name='game_will_end')
self.machine.events.add_handler('game_ending', self._events, event_name='game_ending')
self.machine.events.add_handler('game_ended', self._events, event_name='game_ended')
# prepare game
self.machine.switch_controller.process_switch('s_ball_switch1', 1)
self.machine.switch_controller.process_switch('s_ball_switch2', 1)
self.advance_time_and_run(10)
self.assertEqual(2, self.machine.ball_controller.num_balls_known)
self.assertEqual(2, self.machine.ball_devices.bd_trough.balls)
# start game (single player)
self.start_game()
self.assertGameIsRunning()
self.assertPlayerNumber(1)
self.assertBallNumber(1)
self.assertEqual(3, self.machine.modes.game.balls_per_game)
# Assert game startup sequence
self.assertEqual(13, self._events.call_count)
self.assertEqual('game_will_start', self._events.call_args_list[0][1]['event_name'])
self.assertEqual('game_starting', self._events.call_args_list[1][1]['event_name'])
self.assertEqual('player_add_request', self._events.call_args_list[2][1]['event_name'])
self.assertEqual('player_will_add', self._events.call_args_list[3][1]['event_name'])
self.assertEqual('player_adding', self._events.call_args_list[4][1]['event_name'])
self.assertEqual('player_added', self._events.call_args_list[5][1]['event_name'])
self.assertEqual(1, self._events.call_args_list[5][1]['num'])
self.assertEqual('game_started', self._events.call_args_list[6][1]['event_name'])
self.assertEqual('player_turn_will_start', self._events.call_args_list[7][1]['event_name'])
self.assertEqual('player_turn_starting', self._events.call_args_list[8][1]['event_name'])
self.assertEqual('player_turn_started', self._events.call_args_list[9][1]['event_name'])
self.assertEqual(1, self._events.call_args_list[9][1]['number'])
self.assertEqual('ball_will_start', self._events.call_args_list[10][1]['event_name'])
self.assertEqual('ball_starting', self._events.call_args_list[11][1]['event_name'])
self.assertEqual(2, self._events.call_args_list[11][1]['balls_remaining'])
self.assertFalse(self._events.call_args_list[11][1]['is_extra_ball'])
self.assertEqual('ball_started', self._events.call_args_list[12][1]['event_name'])
self.assertEqual(1, self._events.call_args_list[12][1]['ball'])
self.assertEqual(1, self._events.call_args_list[12][1]['player'])
self._events.reset_mock()
# Drain the first ball
self.drain_all_balls()
self.advance_time_and_run()
self.assertPlayerNumber(1)
self.assertBallNumber(2)
# Assert ball drain, next ball start sequence
self.assertEqual(9, self._events.call_count)
self.assertEqual('ball_will_end', self._events.call_args_list[0][1]['event_name'])
self.assertEqual('ball_ending', self._events.call_args_list[1][1]['event_name'])
self.assertEqual('ball_ended', self._events.call_args_list[2][1]['event_name'])
self.assertEqual('player_turn_will_start', self._events.call_args_list[3][1]['event_name'])
self.assertEqual('player_turn_starting', self._events.call_args_list[4][1]['event_name'])
self.assertEqual('player_turn_started', self._events.call_args_list[5][1]['event_name'])
self.assertEqual(1, self._events.call_args_list[5][1]['number'])
self.assertEqual('ball_will_start', self._events.call_args_list[6][1]['event_name'])
self.assertEqual('ball_starting', self._events.call_args_list[7][1]['event_name'])
self.assertEqual(1, self._events.call_args_list[7][1]['balls_remaining'])
self.assertFalse(self._events.call_args_list[7][1]['is_extra_ball'])
self.assertEqual('ball_started', self._events.call_args_list[8][1]['event_name'])
self.assertEqual(2, self._events.call_args_list[8][1]['ball'])
self._events.reset_mock()
# Drain the second ball
self.drain_all_balls()
self.advance_time_and_run()
self.assertPlayerNumber(1)
self.assertBallNumber(3)
# Assert ball drain, next ball start sequence
self.assertEqual(9, self._events.call_count)
self.assertEqual('ball_will_end', self._events.call_args_list[0][1]['event_name'])
self.assertEqual('ball_ending', self._events.call_args_list[1][1]['event_name'])
self.assertEqual('ball_ended', self._events.call_args_list[2][1]['event_name'])
self.assertEqual('player_turn_will_start', self._events.call_args_list[3][1]['event_name'])
self.assertEqual('player_turn_starting', self._events.call_args_list[4][1]['event_name'])
self.assertEqual('player_turn_started', self._events.call_args_list[5][1]['event_name'])
self.assertEqual(1, self._events.call_args_list[5][1]['number'])
self.assertEqual('ball_will_start', self._events.call_args_list[6][1]['event_name'])
self.assertEqual('ball_starting', self._events.call_args_list[7][1]['event_name'])
self.assertEqual(0, self._events.call_args_list[7][1]['balls_remaining'])
self.assertFalse(self._events.call_args_list[7][1]['is_extra_ball'])
self.assertEqual('ball_started', self._events.call_args_list[8][1]['event_name'])
self.assertEqual(3, self._events.call_args_list[8][1]['ball'])
self._events.reset_mock()
# Drain the third (and last) ball
self.drain_all_balls()
self.advance_time_and_run()
self.assertGameIsNotRunning()
# Assert ball drain, game ending sequence
self.assertEqual(6, self._events.call_count)
self.assertEqual('ball_will_end', self._events.call_args_list[0][1]['event_name'])
self.assertEqual('ball_ending', self._events.call_args_list[1][1]['event_name'])
self.assertEqual('ball_ended', self._events.call_args_list[2][1]['event_name'])
self.assertEqual('game_will_end', self._events.call_args_list[3][1]['event_name'])
self.assertEqual('game_ending', self._events.call_args_list[4][1]['event_name'])
self.assertEqual('game_ended', self._events.call_args_list[5][1]['event_name'])
def testMultiplePlayerGame(self):
# setup event callbacks
self._events = MagicMock()
# Create handler entries for all game lifecycle events we wish to test
self.machine.events.add_handler('game_will_start', self._events, event_name='game_will_start')
self.machine.events.add_handler('game_starting', self._events, event_name='game_starting')
self.machine.events.add_handler('game_started', self._events, event_name='game_started')
self.machine.events.add_handler('player_add_request', self._events, event_name='player_add_request')
self.machine.events.add_handler('player_will_add', self._events, event_name='player_will_add')
self.machine.events.add_handler('player_adding', self._events, event_name='player_adding')
self.machine.events.add_handler('player_added', self._events, event_name='player_added')
self.machine.events.add_handler('player_turn_will_start', self._events, event_name='player_turn_will_start')
self.machine.events.add_handler('player_turn_starting', self._events, event_name='player_turn_starting')
self.machine.events.add_handler('player_turn_started', self._events, event_name='player_turn_started')
self.machine.events.add_handler('player_turn_will_end', self._events, event_name='player_turn_will_end')
self.machine.events.add_handler('player_turn_ending', self._events, event_name='player_turn_ending')
self.machine.events.add_handler('player_turn_ended', self._events, event_name='player_turn_ended')
self.machine.events.add_handler('ball_will_start', self._events, event_name='ball_will_start')
self.machine.events.add_handler('ball_starting', self._events, event_name='ball_starting')
self.machine.events.add_handler('ball_started', self._events, event_name='ball_started')
self.machine.events.add_handler('ball_will_end', self._events, event_name='ball_will_end')
self.machine.events.add_handler('ball_ending', self._events, event_name='ball_ending')
self.machine.events.add_handler('ball_ended', self._events, event_name='ball_ended')
self.machine.events.add_handler('game_will_end', self._events, event_name='game_will_end')
self.machine.events.add_handler('game_ending', self._events, event_name='game_ending')
self.machine.events.add_handler('game_ended', self._events, event_name='game_ended')
# prepare game
self.machine.switch_controller.process_switch('s_ball_switch1', 1)
self.machine.switch_controller.process_switch('s_ball_switch2', 1)
self.advance_time_and_run(10)
self.assertEqual(2, self.machine.ball_controller.num_balls_known)
self.assertEqual(2, self.machine.ball_devices.bd_trough.balls)
# start game (first player)
self.start_game()
self.advance_time_and_run(5)
self.assertGameIsRunning()
self.assertPlayerNumber(1)
self.assertBallNumber(1)
self.assertEqual(3, self.machine.modes.game.balls_per_game)
# Assert game startup sequence
self.assertEqual(13, self._events.call_count)
self.assertEqual('game_will_start', self._events.call_args_list[0][1]['event_name'])
self.assertEqual('game_starting', self._events.call_args_list[1][1]['event_name'])
self.assertEqual('player_add_request', self._events.call_args_list[2][1]['event_name'])
self.assertEqual('player_will_add', self._events.call_args_list[3][1]['event_name'])
self.assertEqual('player_adding', self._events.call_args_list[4][1]['event_name'])
self.assertEqual('player_added', self._events.call_args_list[5][1]['event_name'])
self.assertEqual(1, self._events.call_args_list[5][1]['num'])
self.assertEqual('game_started', self._events.call_args_list[6][1]['event_name'])
self.assertEqual('player_turn_will_start', self._events.call_args_list[7][1]['event_name'])
self.assertEqual('player_turn_starting', self._events.call_args_list[8][1]['event_name'])
self.assertEqual('player_turn_started', self._events.call_args_list[9][1]['event_name'])
self.assertEqual(1, self._events.call_args_list[9][1]['number'])
self.assertEqual('ball_will_start', self._events.call_args_list[10][1]['event_name'])
self.assertEqual('ball_starting', self._events.call_args_list[11][1]['event_name'])
self.assertEqual(2, self._events.call_args_list[11][1]['balls_remaining'])
self.assertFalse(self._events.call_args_list[11][1]['is_extra_ball'])
self.assertEqual('ball_started', self._events.call_args_list[12][1]['event_name'])
self.assertEqual(1, self._events.call_args_list[12][1]['ball'])
self.assertEqual(1, self._events.call_args_list[12][1]['player'])
self._events.reset_mock()
# add another player (player 2)
self.add_player()
# Assert game startup sequence
self.assertEqual(4, self._events.call_count)
self.assertEqual('player_add_request', self._events.call_args_list[0][1]['event_name'])
self.assertEqual('player_will_add', self._events.call_args_list[1][1]['event_name'])
self.assertEqual('player_adding', self._events.call_args_list[2][1]['event_name'])
self.assertEqual('player_added', self._events.call_args_list[3][1]['event_name'])
self.assertEqual(2, self._events.call_args_list[3][1]['num'])
self._events.reset_mock()
# Drain the first ball (player 1)
self.drain_all_balls()
self.advance_time_and_run(5)
self.assertPlayerNumber(2)
self.assertBallNumber(1)
# Assert ball drain, next ball start sequence
self.assertEqual(12, self._events.call_count)
self.assertEqual('ball_will_end', self._events.call_args_list[0][1]['event_name'])
self.assertEqual('ball_ending', self._events.call_args_list[1][1]['event_name'])
self.assertEqual('ball_ended', self._events.call_args_list[2][1]['event_name'])
self.assertEqual('player_turn_will_end', self._events.call_args_list[3][1]['event_name'])
self.assertEqual('player_turn_ending', self._events.call_args_list[4][1]['event_name'])
self.assertEqual('player_turn_ended', self._events.call_args_list[5][1]['event_name'])
self.assertEqual('player_turn_will_start', self._events.call_args_list[6][1]['event_name'])
self.assertEqual('player_turn_starting', self._events.call_args_list[7][1]['event_name'])
self.assertEqual('player_turn_started', self._events.call_args_list[8][1]['event_name'])
self.assertEqual(2, self._events.call_args_list[8][1]['number'])
self.assertEqual('ball_will_start', self._events.call_args_list[9][1]['event_name'])
self.assertEqual('ball_starting', self._events.call_args_list[10][1]['event_name'])
self.assertEqual(2, self._events.call_args_list[10][1]['balls_remaining'])
self.assertFalse(self._events.call_args_list[10][1]['is_extra_ball'])
self.assertEqual('ball_started', self._events.call_args_list[11][1]['event_name'])
self.assertEqual(1, self._events.call_args_list[11][1]['ball'])
self._events.reset_mock()
# Drain the first ball (player 2)
self.drain_all_balls()
self.advance_time_and_run(5)
self.assertPlayerNumber(1)
self.assertBallNumber(2)
# Assert ball drain, next ball start sequence
self.assertEqual(12, self._events.call_count)
self.assertEqual('ball_will_end', self._events.call_args_list[0][1]['event_name'])
self.assertEqual('ball_ending', self._events.call_args_list[1][1]['event_name'])
self.assertEqual('ball_ended', self._events.call_args_list[2][1]['event_name'])
self.assertEqual('player_turn_will_end', self._events.call_args_list[3][1]['event_name'])
self.assertEqual('player_turn_ending', self._events.call_args_list[4][1]['event_name'])
self.assertEqual('player_turn_ended', self._events.call_args_list[5][1]['event_name'])
self.assertEqual('player_turn_will_start', self._events.call_args_list[6][1]['event_name'])
self.assertEqual('player_turn_starting', self._events.call_args_list[7][1]['event_name'])
self.assertEqual('player_turn_started', self._events.call_args_list[8][1]['event_name'])
self.assertEqual(1, self._events.call_args_list[8][1]['number'])
self.assertEqual('ball_will_start', self._events.call_args_list[9][1]['event_name'])
self.assertEqual('ball_starting', self._events.call_args_list[10][1]['event_name'])
self.assertEqual(1, self._events.call_args_list[10][1]['balls_remaining'])
self.assertFalse(self._events.call_args_list[10][1]['is_extra_ball'])
self.assertEqual('ball_started', self._events.call_args_list[11][1]['event_name'])
self.assertEqual(2, self._events.call_args_list[11][1]['ball'])
self._events.reset_mock()
# Drain the second ball (player 1)
self.drain_all_balls()
self.advance_time_and_run(5)
self.assertPlayerNumber(2)
self.assertBallNumber(2)
# Assert ball drain, next ball start sequence
self.assertEqual(12, self._events.call_count)
self.assertEqual('ball_will_end', self._events.call_args_list[0][1]['event_name'])
self.assertEqual('ball_ending', self._events.call_args_list[1][1]['event_name'])
self.assertEqual('ball_ended', self._events.call_args_list[2][1]['event_name'])
self.assertEqual('player_turn_will_end', self._events.call_args_list[3][1]['event_name'])
self.assertEqual('player_turn_ending', self._events.call_args_list[4][1]['event_name'])
self.assertEqual('player_turn_ended', self._events.call_args_list[5][1]['event_name'])
self.assertEqual('player_turn_will_start', self._events.call_args_list[6][1]['event_name'])
self.assertEqual('player_turn_starting', self._events.call_args_list[7][1]['event_name'])
self.assertEqual('player_turn_started', self._events.call_args_list[8][1]['event_name'])
self.assertEqual(2, self._events.call_args_list[8][1]['number'])
self.assertEqual('ball_will_start', self._events.call_args_list[9][1]['event_name'])
self.assertEqual('ball_starting', self._events.call_args_list[10][1]['event_name'])
self.assertEqual(1, self._events.call_args_list[10][1]['balls_remaining'])
self.assertFalse(self._events.call_args_list[10][1]['is_extra_ball'])
self.assertEqual('ball_started', self._events.call_args_list[11][1]['event_name'])
self.assertEqual(2, self._events.call_args_list[11][1]['ball'])
self._events.reset_mock()
# Player 2 earns extra ball before draining
self.machine.game.player.extra_balls += 1
# Drain the ball (player 2 has earned an extra ball so it should still be
# player 2's turn)
self.drain_all_balls()
self.advance_time_and_run(5)
self.assertPlayerNumber(2)
self.assertBallNumber(2)
# Assert ball drain, next ball sequence
self.assertEqual(6, self._events.call_count)
self.assertEqual('ball_will_end', self._events.call_args_list[0][1]['event_name'])
self.assertEqual('ball_ending', self._events.call_args_list[1][1]['event_name'])
self.assertEqual('ball_ended', self._events.call_args_list[2][1]['event_name'])
self.assertEqual('ball_will_start', self._events.call_args_list[3][1]['event_name'])
self.assertTrue(self._events.call_args_list[3][1]['is_extra_ball'])
self.assertEqual('ball_starting', self._events.call_args_list[4][1]['event_name'])
self.assertEqual(1, self._events.call_args_list[4][1]['balls_remaining'])
self.assertTrue(self._events.call_args_list[4][1]['is_extra_ball'])
self.assertEqual('ball_started', self._events.call_args_list[5][1]['event_name'])
self.assertEqual(2, self._events.call_args_list[5][1]['ball'])
self._events.reset_mock()
# Drain the second ball (player 2)
self.drain_all_balls()
self.advance_time_and_run(5)
self.assertPlayerNumber(1)
self.assertBallNumber(3)
# Assert ball drain, next ball start sequence
self.assertEqual(12, self._events.call_count)
self.assertEqual('ball_will_end', self._events.call_args_list[0][1]['event_name'])
self.assertEqual('ball_ending', self._events.call_args_list[1][1]['event_name'])
self.assertEqual('ball_ended', self._events.call_args_list[2][1]['event_name'])
self.assertEqual('player_turn_will_end', self._events.call_args_list[3][1]['event_name'])
self.assertEqual('player_turn_ending', self._events.call_args_list[4][1]['event_name'])
self.assertEqual('player_turn_ended', self._events.call_args_list[5][1]['event_name'])
self.assertEqual('player_turn_will_start', self._events.call_args_list[6][1]['event_name'])
self.assertEqual('player_turn_starting', self._events.call_args_list[7][1]['event_name'])
self.assertEqual('player_turn_started', self._events.call_args_list[8][1]['event_name'])
self.assertEqual(1, self._events.call_args_list[8][1]['number'])
self.assertEqual('ball_will_start', self._events.call_args_list[9][1]['event_name'])
self.assertEqual('ball_starting', self._events.call_args_list[10][1]['event_name'])
self.assertEqual(0, self._events.call_args_list[10][1]['balls_remaining'])
self.assertFalse(self._events.call_args_list[10][1]['is_extra_ball'])
self.assertEqual('ball_started', self._events.call_args_list[11][1]['event_name'])
self.assertEqual(3, self._events.call_args_list[11][1]['ball'])
self._events.reset_mock()
# Drain the third ball (player 1)
self.drain_all_balls()
self.advance_time_and_run(5)
self.assertPlayerNumber(2)
self.assertBallNumber(3)
# Assert ball drain, next ball start sequence
self.assertEqual(12, self._events.call_count)
self.assertEqual('ball_will_end', self._events.call_args_list[0][1]['event_name'])
self.assertEqual('ball_ending', self._events.call_args_list[1][1]['event_name'])
self.assertEqual('ball_ended', self._events.call_args_list[2][1]['event_name'])
self.assertEqual('player_turn_will_end', self._events.call_args_list[3][1]['event_name'])
self.assertEqual('player_turn_ending', self._events.call_args_list[4][1]['event_name'])
self.assertEqual('player_turn_ended', self._events.call_args_list[5][1]['event_name'])
self.assertEqual('player_turn_will_start', self._events.call_args_list[6][1]['event_name'])
self.assertEqual('player_turn_starting', self._events.call_args_list[7][1]['event_name'])
self.assertEqual('player_turn_started', self._events.call_args_list[8][1]['event_name'])
self.assertEqual(2, self._events.call_args_list[8][1]['number'])
self.assertEqual('ball_will_start', self._events.call_args_list[9][1]['event_name'])
self.assertEqual('ball_starting', self._events.call_args_list[10][1]['event_name'])
self.assertEqual(0, self._events.call_args_list[10][1]['balls_remaining'])
self.assertFalse(self._events.call_args_list[10][1]['is_extra_ball'])
self.assertEqual('ball_started', self._events.call_args_list[11][1]['event_name'])
self.assertEqual(3, self._events.call_args_list[11][1]['ball'])
self._events.reset_mock()
# Drain the third (and last) ball for player 2
self.drain_all_balls()
self.advance_time_and_run()
self.assertGameIsNotRunning()
# Assert ball drain, game ending sequence
self.assertEqual(9, self._events.call_count)
self.assertEqual('ball_will_end', self._events.call_args_list[0][1]['event_name'])
self.assertEqual('ball_ending', self._events.call_args_list[1][1]['event_name'])
self.assertEqual('ball_ended', self._events.call_args_list[2][1]['event_name'])
self.assertEqual('player_turn_will_end', self._events.call_args_list[3][1]['event_name'])
self.assertEqual('player_turn_ending', self._events.call_args_list[4][1]['event_name'])
self.assertEqual('player_turn_ended', self._events.call_args_list[5][1]['event_name'])
self.assertEqual('game_will_end', self._events.call_args_list[6][1]['event_name'])
self.assertEqual('game_ending', self._events.call_args_list[7][1]['event_name'])
self.assertEqual('game_ended', self._events.call_args_list[8][1]['event_name'])
def testGameEvents(self):
self.machine.switch_controller.process_switch('s_ball_switch1', 1)
self.machine.switch_controller.process_switch('s_ball_switch2', 1)
self.advance_time_and_run(10)
self.assertEqual(2, self.machine.ball_controller.num_balls_known)
self.assertEqual(2, self.machine.ball_devices.bd_trough.balls)
self.post_event("start_my_game")
self.assertGameIsRunning()
self.advance_time_and_run()
self.assertPlayerCount(1)
self.post_event("start_my_game")
self.assertPlayerCount(1)
self.post_event("add_my_player")
self.assertPlayerCount(2)
self.post_event("add_my_player")
self.assertPlayerCount(3)
self.post_event("add_my_player")
self.assertPlayerCount(4)
self.post_event("add_my_player")
self.assertPlayerCount(4)
class TestGameLogic(MpfFakeGameTestCase):
def testLastGameScore(self):
# no previous scores
self.assertFalse(self.machine.variables.is_machine_var("player1_score"))
self.assertFalse(self.machine.variables.is_machine_var("player2_score"))
self.assertFalse(self.machine.variables.is_machine_var("player3_score"))
self.assertFalse(self.machine.variables.is_machine_var("player4_score"))
# four players
self.start_game()
self.add_player()
self.add_player()
self.add_player()
self.machine.game.player.score = 100
self.assertPlayerNumber(1)
self.drain_all_balls()
self.machine.game.player.score = 200
self.assertPlayerNumber(2)
self.drain_all_balls()
self.machine.game.player.score = 0
self.assertPlayerNumber(3)
self.drain_all_balls()
self.machine.game.player.score = 42
self.assertPlayerNumber(4)
# still old scores should not be set
self.assertFalse(self.machine.variables.is_machine_var("player1_score"))
self.assertFalse(self.machine.variables.is_machine_var("player2_score"))
self.assertFalse(self.machine.variables.is_machine_var("player3_score"))
self.assertFalse(self.machine.variables.is_machine_var("player4_score"))
self.stop_game()
self.assertMachineVarEqual(100, "player1_score")
self.assertMachineVarEqual(200, "player2_score")
self.assertMachineVarEqual(0, "player3_score")
self.assertMachineVarEqual(42, "player4_score")
# two players
self.start_game()
self.add_player()
self.machine.game.player.score = 100
self.assertPlayerNumber(1)
self.drain_all_balls()
self.assertPlayerNumber(2)
self.machine.game.player.score = 200
self.drain_all_balls()
# old scores should still be active
self.assertMachineVarEqual(100, "player1_score")
self.assertMachineVarEqual(200, "player2_score")
self.assertMachineVarEqual(0, "player3_score")
self.assertMachineVarEqual(42, "player4_score")
self.stop_game()
self.assertMachineVarEqual(100, "player1_score")
self.assertMachineVarEqual(200, "player2_score")
self.assertFalse(self.machine.variables.is_machine_var("player3_score"))
self.assertFalse(self.machine.variables.is_machine_var("player4_score"))
# start one player game
self.start_game()
self.machine.game.player.score = 1337
self.drain_all_balls()
self.drain_all_balls()
# still the old scores
self.assertMachineVarEqual(100, "player1_score")
self.assertMachineVarEqual(200, "player2_score")
self.assertFalse(self.machine.variables.is_machine_var("player3_score"))
self.assertFalse(self.machine.variables.is_machine_var("player4_score"))
self.drain_all_balls()
self.assertGameIsNotRunning()
self.assertMachineVarEqual(1337, "player1_score")
self.assertFalse(self.machine.variables.is_machine_var("player2_score"))
self.assertFalse(self.machine.variables.is_machine_var("player3_score"))
self.assertFalse(self.machine.variables.is_machine_var("player4_score"))
| 29,027 | 31 | 235 |
6f032cb5abec8fc8945f5c021bf35043f9ce7546 | 5,377 | py | Python | apps/users/tests.py | pedro-hs/financial-account | 7e8e4d0f3ac888fa36a091d0e733a8e1926180d2 | [
"MIT"
] | null | null | null | apps/users/tests.py | pedro-hs/financial-account | 7e8e4d0f3ac888fa36a091d0e733a8e1926180d2 | [
"MIT"
] | null | null | null | apps/users/tests.py | pedro-hs/financial-account | 7e8e4d0f3ac888fa36a091d0e733a8e1926180d2 | [
"MIT"
] | null | null | null | from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from .models import User
from .serializers import DefaultUserSerializer
client = APIClient()
| 43.715447 | 153 | 0.649061 | from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from .models import User
from .serializers import DefaultUserSerializer
client = APIClient()
class TestGet(APITestCase):
def setUp(self):
User.objects.create(cpf='44756054644', email='root@mail.com', password='!bF6tVmbXt9dMc#', full_name='I am root',
is_superuser=True, is_staff=True, role='collaborator')
User.objects.create(cpf='23756054611', email='test@mail.com',
password='!bF6tVmbXt9dMc#', full_name='Pedro Henrique Santos',
role='collaborator')
User.objects.create(cpf='33756054622', email='test2@mail.com',
password='!bF6tVmbXt9dMc#', full_name='Pedro Carlos', role='collaborator')
user = User.objects.get(cpf='44756054644')
client.force_authenticate(user=user)
def test_list(self):
response = client.get(reverse('user-list'))
users = User.objects.all()
serializer = DefaultUserSerializer(users, many=True)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_retrieve(self):
response = client.get(reverse('user-detail', args=['44756054644']))
user = User.objects.get(email='root@mail.com')
serializer = DefaultUserSerializer(user)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
class TestPost(APITestCase):
def test_success(self):
body = {'cpf': '44756054644', 'email': 'root@mail.com',
'password': '!bF6tVmbXt9dMc#', 'full_name': 'I am root'}
response = client.post(reverse('user-list'), body)
user = User.objects.get(email='root@mail.com')
serializer = DefaultUserSerializer(user)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_invalid(self):
body = {'cpf': 'invalid', 'email': 'invalid',
'password': 'invalid', 'full_name': '0'}
response = client.post(reverse('user-list'), body)
validation = {'cpf': ['Ensure this field has at least 11 characters.'], 'email': ['Enter a valid email address.'], 'full_name': ['Invalid name'],
'password': ['This password is too short. It must contain at least 8 characters.']}
self.assertEqual(response.json(), validation)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
body = {'cpf': '44756054644', 'email': 'root@mail.com',
'password': '!bF6tVmbXt9dMc#', 'full_name': 'I am root', 'invalid': 'invalid'}
response = client.post(reverse('user-list'), body)
validation = {'non_field_errors': ['Unknown field(s): invalid']}
self.assertEqual(response.json(), validation)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class TestPut(APITestCase):
def setUp(self):
User.objects.create(cpf='44756054644', email='root@mail.com', password='!bF6tVmbXt9dMc#', full_name='I am root',
is_staff=True, role='collaborator')
user = User.objects.get(cpf='44756054644')
client.force_authenticate(user=user)
def test_success(self):
body = {'full_name': 'I am root edited'}
response = client.put(reverse('user-detail', args=['44756054644']), body)
user = User.objects.get(email='root@mail.com')
serializer = DefaultUserSerializer(user)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_invalid(self):
body = {'full_name': '0'}
response = client.put(reverse('user-detail', args=['44756054644']), body)
validation = {'full_name': ['Invalid name']}
self.assertEqual(response.json(), validation)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
body = {'cpf': '12345678900', 'password': '!bF6tVmbXt9dMc#'}
response = client.put(reverse('user-detail', args=['44756054644']), body)
validation = {'password': ['Cannot update password']}
self.assertEqual(response.json(), validation)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class TestDelete(APITestCase):
def setUp(self):
User.objects.create(cpf='44756054644', email='root@mail.com',
password='!bF6tVmbXt9dMc#', full_name='I am root',
role='collaborator', is_staff=True)
user = User.objects.get(email='root@mail.com')
client.force_authenticate(user=user)
def test_success(self):
response = client.delete(reverse('user-detail', args=['44756054644']))
user = User.objects.get(email='root@mail.com')
serializer = DefaultUserSerializer(user)
data = dict(serializer.data)
self.assertEqual(data['is_active'], False)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
response = client.delete(reverse('user-detail', args=['invalid']))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
| 4,771 | 28 | 358 |
fa5aeb979b0904d65bd92faa167b43fdb060b354 | 14,557 | py | Python | BookClub/tests/views/meeting_views/test_leave_meeting_view.py | amir-rahim/BookClubSocialNetwork | b69a07cd33592f700214252a64c7c1c53845625d | [
"MIT"
] | 4 | 2022-02-04T02:11:48.000Z | 2022-03-12T21:38:01.000Z | BookClub/tests/views/meeting_views/test_leave_meeting_view.py | amir-rahim/BookClubSocialNetwork | b69a07cd33592f700214252a64c7c1c53845625d | [
"MIT"
] | 51 | 2022-02-01T18:56:23.000Z | 2022-03-31T15:35:37.000Z | BookClub/tests/views/meeting_views/test_leave_meeting_view.py | amir-rahim/BookClubSocialNetwork | b69a07cd33592f700214252a64c7c1c53845625d | [
"MIT"
] | null | null | null | from django.contrib.messages import get_messages
from django.core.exceptions import ObjectDoesNotExist
from django.test import TestCase, tag
from django.urls import reverse
from django.utils import timezone
from BookClub.models import User, Meeting, Club, ClubMembership
from BookClub.tests.helpers import LogInTester
@tag("views", "meeting", "leave_meeting")
class LeaveMeetingViewTestCase(TestCase, LogInTester):
"""Tests of the Join Meeting view."""
fixtures = [
'BookClub/tests/fixtures/default_users.json',
'BookClub/tests/fixtures/default_clubs.json',
'BookClub/tests/fixtures/default_meetings.json',
'BookClub/tests/fixtures/default_books.json',
]
def test_get_leave_meeting_redirects_to_list_of_meetings(self):
"""Test for redirecting user to available_clubs when used get method."""
self.client.login(username=self.user.username, password='Password123')
self.assertTrue(self._is_logged_in())
response = self.client.get(reverse('leave_meeting', kwargs={'club_url_name': self.club.club_url_name,
'meeting_id': self.future_meeting.id}))
redirect_url = reverse('meeting_list', kwargs={'club_url_name': self.club.club_url_name})
self.assertRedirects(response, redirect_url, status_code=302, target_status_code=200)
| 55.773946 | 116 | 0.700969 | from django.contrib.messages import get_messages
from django.core.exceptions import ObjectDoesNotExist
from django.test import TestCase, tag
from django.urls import reverse
from django.utils import timezone
from BookClub.models import User, Meeting, Club, ClubMembership
from BookClub.tests.helpers import LogInTester
@tag("views", "meeting", "leave_meeting")
class LeaveMeetingViewTestCase(TestCase, LogInTester):
"""Tests of the Join Meeting view."""
fixtures = [
'BookClub/tests/fixtures/default_users.json',
'BookClub/tests/fixtures/default_clubs.json',
'BookClub/tests/fixtures/default_meetings.json',
'BookClub/tests/fixtures/default_books.json',
]
def setUp(self):
self.user = User.objects.get(username="johndoe")
self.organiser = User.objects.get(username="janedoe")
self.club = Club.objects.get(pk="1")
self.past_meeting = Meeting.objects.get(pk="2")
self.future_meeting = Meeting.objects.get(pk="3")
self.url = reverse('leave_meeting',
kwargs={'club_url_name': self.club.club_url_name, 'meeting_id': self.future_meeting.id})
def test_url(self):
self.assertEqual(self.url, f'/club/{self.club.club_url_name}/meetings/{self.future_meeting.id}/leave/')
def test_redirect_when_not_logged_in(self):
self.assertFalse(self._is_logged_in())
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
def test_get_leave_meeting_redirects_to_list_of_meetings(self):
"""Test for redirecting user to available_clubs when used get method."""
self.client.login(username=self.user.username, password='Password123')
self.assertTrue(self._is_logged_in())
response = self.client.get(reverse('leave_meeting', kwargs={'club_url_name': self.club.club_url_name,
'meeting_id': self.future_meeting.id}))
redirect_url = reverse('meeting_list', kwargs={'club_url_name': self.club.club_url_name})
self.assertRedirects(response, redirect_url, status_code=302, target_status_code=200)
def test_member_successful_leave_meeting(self):
self.client.login(username=self.user.username, password='Password123')
self.assertTrue(self._is_logged_in())
ClubMembership.objects.create(user=self.user, club=self.club, membership=ClubMembership.UserRoles.MEMBER)
self.future_meeting.join_member(self.user)
before_count = self.future_meeting.get_members().count()
response = self.client.post(reverse('leave_meeting', kwargs={
'club_url_name': self.club.club_url_name,
'meeting_id': self.future_meeting.id
}))
after_count = self.future_meeting.get_members().count()
self.assertEqual(before_count, after_count + 1)
self.assertFalse(self.future_meeting.get_members().filter(username=self.user.username).exists())
messages = list(get_messages(response.wsgi_request))
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), 'You have left the meeting.')
def test_member_leave_meeting_not_in(self):
self.client.login(username=self.user.username, password='Password123')
self.assertTrue(self._is_logged_in())
ClubMembership.objects.create(user=self.user, club=self.club, membership=ClubMembership.UserRoles.MEMBER)
self.assertFalse(self.future_meeting.get_members().filter(username=self.user.username).exists())
before_count = self.future_meeting.get_members().count()
response = self.client.post(reverse('leave_meeting', kwargs={
'club_url_name': self.club.club_url_name,
'meeting_id': self.future_meeting.id
}))
after_count = self.future_meeting.get_members().count()
self.assertEqual(before_count, after_count)
self.assertFalse(self.future_meeting.get_members().filter(username=self.user.username).exists())
messages = list(get_messages(response.wsgi_request))
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), "You cannot leave this meeting.")
def test_member_cannot_leave_meeting_in_past(self):
self.client.login(username=self.user.username, password='Password123')
self.assertTrue(self._is_logged_in())
ClubMembership.objects.create(user=self.user, club=self.club, membership=ClubMembership.UserRoles.MEMBER)
self.assertTrue(self.past_meeting.get_meeting_time() < timezone.now())
before_count = self.past_meeting.get_members().count()
response = self.client.post(reverse('leave_meeting', kwargs={
'club_url_name': self.club.club_url_name,
'meeting_id': self.past_meeting.id
}))
after_count = self.past_meeting.get_members().count()
self.assertEqual(before_count, after_count)
self.assertTrue(self.past_meeting.get_members().filter(username=self.user.username).exists())
messages = list(get_messages(response.wsgi_request))
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), "You cannot leave this meeting.")
def test_member_cannot_leave_invalid_meeting(self):
self.client.login(username=self.user.username, password='Password123')
self.assertTrue(self._is_logged_in())
ClubMembership.objects.create(user=self.user, club=self.club, membership=ClubMembership.UserRoles.MEMBER)
response = self.client.post(reverse('leave_meeting', kwargs={
'club_url_name': self.club.club_url_name,
'meeting_id': 0
}))
with self.assertRaises(ObjectDoesNotExist):
Meeting.objects.get(id=0).exists()
messages = list(get_messages(response.wsgi_request))
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), "Error, meeting not found.")
def test_mod_successful_leave_meeting(self):
self.client.login(username=self.user.username, password='Password123')
self.assertTrue(self._is_logged_in())
ClubMembership.objects.create(user=self.user, club=self.club, membership=ClubMembership.UserRoles.MODERATOR)
self.future_meeting.join_member(self.user)
before_count = self.future_meeting.get_members().count()
response = self.client.post(reverse('leave_meeting', kwargs={
'club_url_name': self.club.club_url_name,
'meeting_id': self.future_meeting.id
}))
after_count = self.future_meeting.get_members().count()
self.assertEqual(before_count, after_count + 1)
self.assertFalse(self.future_meeting.get_members().filter(username=self.user.username).exists())
messages = list(get_messages(response.wsgi_request))
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), 'You have left the meeting.')
def test_mod_leave_meeting_not_in(self):
self.client.login(username=self.user.username, password='Password123')
self.assertTrue(self._is_logged_in())
ClubMembership.objects.create(user=self.user, club=self.club, membership=ClubMembership.UserRoles.MODERATOR)
self.assertFalse(self.future_meeting.get_members().filter(username=self.user.username).exists())
before_count = self.future_meeting.get_members().count()
response = self.client.post(reverse('leave_meeting', kwargs={
'club_url_name': self.club.club_url_name,
'meeting_id': self.future_meeting.id
}))
after_count = self.future_meeting.get_members().count()
self.assertEqual(before_count, after_count)
self.assertFalse(self.future_meeting.get_members().filter(username=self.user.username).exists())
messages = list(get_messages(response.wsgi_request))
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), "You cannot leave this meeting.")
def test_mod_cannot_leave_meeting_in_past(self):
self.client.login(username=self.user.username, password='Password123')
self.assertTrue(self._is_logged_in())
ClubMembership.objects.create(user=self.user, club=self.club, membership=ClubMembership.UserRoles.MODERATOR)
self.assertTrue(self.past_meeting.get_meeting_time() < timezone.now())
before_count = self.past_meeting.get_members().count()
response = self.client.post(reverse('leave_meeting', kwargs={
'club_url_name': self.club.club_url_name,
'meeting_id': self.past_meeting.id
}))
after_count = self.past_meeting.get_members().count()
self.assertEqual(before_count, after_count)
self.assertTrue(self.past_meeting.get_members().filter(username=self.user.username).exists())
messages = list(get_messages(response.wsgi_request))
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), "You cannot leave this meeting.")
def test_mod_cannot_leave_invalid_meeting(self):
self.client.login(username=self.user.username, password='Password123')
self.assertTrue(self._is_logged_in())
ClubMembership.objects.create(user=self.user, club=self.club, membership=ClubMembership.UserRoles.MODERATOR)
response = self.client.post(reverse('leave_meeting', kwargs={
'club_url_name': self.club.club_url_name,
'meeting_id': 0
}))
with self.assertRaises(ObjectDoesNotExist):
Meeting.objects.get(id=0).exists()
messages = list(get_messages(response.wsgi_request))
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), "Error, meeting not found.")
def test_owner_successful_leave_meeting(self):
self.client.login(username=self.user.username, password='Password123')
self.assertTrue(self._is_logged_in())
ClubMembership.objects.create(user=self.user, club=self.club, membership=ClubMembership.UserRoles.OWNER)
self.future_meeting.join_member(self.user)
before_count = self.future_meeting.get_members().count()
response = self.client.post(reverse('leave_meeting', kwargs={
'club_url_name': self.club.club_url_name,
'meeting_id': self.future_meeting.id
}))
after_count = self.future_meeting.get_members().count()
self.assertEqual(before_count, after_count + 1)
self.assertFalse(self.future_meeting.get_members().filter(username=self.user.username).exists())
messages = list(get_messages(response.wsgi_request))
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), 'You have left the meeting.')
def test_owner_leave_meeting_not_in(self):
self.client.login(username=self.user.username, password='Password123')
self.assertTrue(self._is_logged_in())
ClubMembership.objects.create(user=self.user, club=self.club, membership=ClubMembership.UserRoles.OWNER)
self.assertFalse(self.future_meeting.get_members().filter(username=self.user.username).exists())
before_count = self.future_meeting.get_members().count()
response = self.client.post(reverse('leave_meeting', kwargs={
'club_url_name': self.club.club_url_name,
'meeting_id': self.future_meeting.id
}))
after_count = self.future_meeting.get_members().count()
self.assertEqual(before_count, after_count)
self.assertFalse(self.future_meeting.get_members().filter(username=self.user.username).exists())
messages = list(get_messages(response.wsgi_request))
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), "You cannot leave this meeting.")
def test_owner_cannot_leave_meeting_in_past(self):
self.client.login(username=self.user.username, password='Password123')
self.assertTrue(self._is_logged_in())
ClubMembership.objects.create(user=self.user, club=self.club, membership=ClubMembership.UserRoles.OWNER)
self.assertTrue(self.past_meeting.get_meeting_time() < timezone.now())
before_count = self.past_meeting.get_members().count()
response = self.client.post(reverse('leave_meeting', kwargs={
'club_url_name': self.club.club_url_name,
'meeting_id': self.past_meeting.id
}))
after_count = self.past_meeting.get_members().count()
self.assertEqual(before_count, after_count)
self.assertTrue(self.past_meeting.get_members().filter(username=self.user.username).exists())
messages = list(get_messages(response.wsgi_request))
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), "You cannot leave this meeting.")
def test_owner_cannot_leave_invalid_meeting(self):
self.client.login(username=self.user.username, password='Password123')
self.assertTrue(self._is_logged_in())
ClubMembership.objects.create(user=self.user, club=self.club, membership=ClubMembership.UserRoles.OWNER)
response = self.client.post(reverse('leave_meeting', kwargs={
'club_url_name': self.club.club_url_name,
'meeting_id': 0
}))
with self.assertRaises(ObjectDoesNotExist):
Meeting.objects.get(id=0).exists()
messages = list(get_messages(response.wsgi_request))
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), "Error, meeting not found.")
def test_organiser_cannot_leave_meeting(self):
self.client.login(username=self.organiser.username, password='Password123')
self.assertTrue(self._is_logged_in())
ClubMembership.objects.create(user=self.user, club=self.club, membership=ClubMembership.UserRoles.MODERATOR)
self.future_meeting.join_member(self.user)
before_count = self.future_meeting.get_members().count()
response = self.client.post(reverse('leave_meeting', kwargs={
'club_url_name': self.club.club_url_name,
'meeting_id': self.future_meeting.id
}))
after_count = self.future_meeting.get_members().count()
self.assertEqual(before_count, after_count)
self.assertTrue(self.future_meeting.get_members().filter(username=self.organiser.username).exists())
messages = list(get_messages(response.wsgi_request))
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), "You cannot leave this meeting.")
| 12,736 | 0 | 432 |
7e62cf267c879d77f5cf234e45ea53d6bdf46597 | 11,200 | py | Python | ime/exp/exp_baseline.py | ParikhKadam/google-research | 00a282388e389e09ce29109eb050491c96cfab85 | [
"Apache-2.0"
] | 2 | 2022-01-21T18:15:34.000Z | 2022-01-25T15:21:34.000Z | ime/exp/exp_baseline.py | ParikhKadam/google-research | 00a282388e389e09ce29109eb050491c96cfab85 | [
"Apache-2.0"
] | 110 | 2021-10-01T18:22:38.000Z | 2021-12-27T22:08:31.000Z | ime/exp/exp_baseline.py | admariner/google-research | 7cee4b22b925581d912e8d993625c180da2a5a4f | [
"Apache-2.0"
] | 1 | 2022-02-10T10:43:10.000Z | 2022-02-10T10:43:10.000Z | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Baseline models for time series data."""
import os
import time
import warnings
from data.data_loader import ECL
from exp.exp_basic import ExpBasic
import matplotlib.pyplot as plt
from models.ar_net import ARNet
from models.linear import Linear
from models.lstm import LSTM
import numpy as np
import torch
from torch import optim
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
from utils.metrics import Metric as metric
from utils.tools import EarlyStopping
warnings.filterwarnings('ignore')
class ExpBaseline(ExpBasic):
"""Baseline experiments for time series data."""
def _get_dataset(self):
"""Function creates dataset based on data name in the parsers.
Returns:
Data: An instant of the dataset created
"""
if self.args.data == 'ECL':
data = ECL(self.args.root_path, self.args.seq_len, self.args.pred_len,
self.args.features, self.args.scale, self.args.num_ts)
else:
raise NotImplementedError
return data
def _build_model(self):
"""Function that creates a model instance based on the model name.
Here we only support LSTM, Linear and ARNet.
Returns:
model: An instance of the model.
"""
if self.args.model == 'LSTM':
model = LSTM(self.args.input_dim, self.args.pred_len, self.args.d_model,
self.args.layers, self.args.dropout, self.device).float()
elif self.args.model == 'Linear':
model = Linear(
self.args.pred_len * self.args.input_dim,
self.args.seq_len,
).float()
elif self.args.model == ' ARNet':
model = ARNet(
n_forecasts=self.args.pred_len * self.args.input_dim,
n_lags=self.args.seq_len,
device=self.device).float()
else:
raise NotImplementedError
# if multiple GPU are to be used parralize model
if self.args.use_multi_gpu and self.args.use_gpu:
model = nn.DataParallel(model, device_ids=self.args.device_ids)
return model
def _get_data(self, flag):
"""Function that creats a dataloader basd on flag.
Args:
flag: Flag indicating if we should return training/validation/testing
dataloader
Returns:
data_loader: Dataloader for the required dataset.
"""
args = self.args
if flag == 'test':
shuffle_flag = False
drop_last = True
batch_size = args.batch_size
data_set = TensorDataset(
torch.Tensor(self.data.test_x), torch.Tensor(self.data.test_y))
elif flag == 'pred':
shuffle_flag = False
drop_last = False
batch_size = args.batch_size
data_set = TensorDataset(
torch.Tensor(self.data.test_x), torch.Tensor(self.data.test_y))
elif flag == 'val':
shuffle_flag = False
drop_last = False
batch_size = args.batch_size
data_set = TensorDataset(
torch.Tensor(self.data.valid_x), torch.Tensor(self.data.valid_y))
else:
shuffle_flag = True
drop_last = True
batch_size = args.batch_size
data_set = TensorDataset(
torch.Tensor(self.data.train_x), torch.Tensor(self.data.train_y))
print('Data for', flag, 'dataset size', len(data_set))
data_loader = DataLoader(
data_set,
batch_size=batch_size,
shuffle=shuffle_flag,
num_workers=args.num_workers,
drop_last=drop_last)
return data_loader
def _select_optimizer(self):
"""Function that returns the optimizer based on learning rate.
Returns:
model_optim: model optimizer
"""
model_optim = optim.Adam(
self.model.parameters(), lr=self.args.learning_rate)
return model_optim
def vali(self, vali_loader, criterion):
"""Validation Function.
Args:
vali_loader: Validation dataloader
criterion: criterion used in for loss function
Returns:
total_loss: average loss
"""
self.model.eval()
total_loss = []
for (batch_x, batch_y) in vali_loader:
pred, true = self._process_one_batch(batch_x, batch_y, validation=True)
loss = criterion(pred.detach().cpu(), true.detach().cpu())
total_loss.append(loss)
total_loss = np.average(total_loss)
self.model.train()
return total_loss
def train(self, setting):
"""Training Function.
Args:
setting: Name used to save the model
Returns:
model: Trained model
"""
# Load different datasets
train_loader = self._get_data(flag='train')
vali_loader = self._get_data(flag='val')
test_loader = self._get_data(flag='test')
path = os.path.join(self.args.checkpoints, setting)
if not os.path.exists(path):
os.makedirs(path)
time_now = time.time()
train_steps = len(train_loader)
early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)
# Setting optimizer and loss functions
model_optim = self._select_optimizer()
criterion = nn.MSELoss()
all_training_loss = []
all_validation_loss = []
# Training Loop
for epoch in range(self.args.train_epochs):
iter_count = 0
train_loss = []
self.model.train()
epoch_time = time.time()
for i, (batch_x, batch_y) in enumerate(train_loader):
iter_count += 1
model_optim.zero_grad()
pred, true = self._process_one_batch(batch_x, batch_y)
loss = criterion(pred, true)
train_loss.append(loss.item())
if (i + 1) % 100 == 0:
print('\titers: {0}/{1}, epoch: {2} | loss: {3:.7f}'.format(
i + 1, train_steps, epoch + 1, loss.item()))
speed = (time.time() - time_now) / iter_count
left_time = speed * (
(self.args.train_epochs - epoch) * train_steps - i)
print('\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(
speed, left_time))
iter_count = 0
time_now = time.time()
loss.backward()
model_optim.step()
print('Epoch: {} cost time: {}'.format(epoch + 1,
time.time() - epoch_time))
train_loss = np.average(train_loss)
all_training_loss.append(train_loss)
vali_loss = self.vali(vali_loader, criterion)
all_validation_loss.append(vali_loss)
test_loss = self.vali(test_loader, criterion)
print(
'Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}'
.format(epoch + 1, train_steps, train_loss, vali_loss, test_loss))
early_stopping(vali_loss, self.model, path)
# Plotting train and validation loss
if ((epoch + 1) % 5 == 0 and self.args.plot):
check_folder = os.path.isdir(self.args.plot_dir)
# If folder doesn't exist, then create it.
if not check_folder:
os.makedirs(self.args.plot_dir)
plt.figure()
plt.plot(all_training_loss, label='train loss')
plt.plot(all_validation_loss, label='Val loss')
plt.legend()
plt.savefig(self.args.plot_dir + setting + '.png')
plt.show()
plt.close()
# If ran out of patience stop training
if early_stopping.early_stop:
if self.args.plot:
plt.figure()
plt.plot(all_training_loss, label='train loss')
plt.plot(all_validation_loss, label='Val loss')
plt.legend()
plt.savefig(self.args.plot_dir + setting + '.png')
plt.show()
print('Early stopping')
break
best_model_path = path + '/' + 'checkpoint.pth'
self.model.load_state_dict(torch.load(best_model_path))
return self.model
def predict(self, setting, load=False):
"""Prediction Function.
Args:
setting: Name used to be used for prediction
load: whether to load best model
Returns:
mae: Mean absolute error
mse: Mean squared error
rmse: Root mean squared error
mape: Mean absolute percentage error
mspe: Mean squared percentage error
"""
# Create prediction dataset
pred_loader = self._get_data(flag='pred')
# Load best model saved in the checkpoint folder
if load:
path = os.path.join(self.args.checkpoints, setting)
best_model_path = path + '/' + 'checkpoint.pth'
self.model.load_state_dict(torch.load(best_model_path))
# Get model predictions
self.model.eval()
for i, (batch_x, batch_y) in enumerate(pred_loader):
pred, true = self._process_one_batch(batch_x, batch_y, validation=True)
if i == 0:
preds = pred.detach().cpu().numpy()
trues = true.detach().cpu().numpy()
else:
preds = np.concatenate((preds, pred.detach().cpu().numpy()), axis=0)
trues = np.concatenate((trues, true.detach().cpu().numpy()), axis=0)
preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])
trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1])
# save predictions made by model
folder_path = './results/' + setting + '/'
check_folder = os.path.isdir(folder_path)
if not check_folder:
os.makedirs(folder_path)
np.save(folder_path + 'real_prediction.npy', preds)
# Evaluate the model performance
mae, mse, rmse, mape, mspe = metric(preds, trues)
print('mse:{}, mae:{}, rmse:{}'.format(mse, mae, rmse))
return mae, mse, rmse, mape, mspe, 0, 0
def _process_one_batch(self, batch_x, batch_y, validation=False):
"""Function to process batch and send it to model and get output.
Args:
batch_x: batch input
batch_y: batch target
validation: flag to determine if this process is done for training or
testing
Returns:
outputs: model outputs
batch_y: batch target
"""
# Reshape input for Linear and ARNet
if (self.model_type == 'Linear' or self.model_type == ' ARNet'):
batch_size, _, _ = batch_x.shape
batch_x = batch_x.reshape(batch_size, -1)
batch_x = batch_x.float().to(self.device)
batch_y = batch_y.float().to(self.device)
if (self.model_type == 'Linear' or self.model_type == ' ARNet'):
batch_y = batch_y[:, -self.args.pred_len:, 0]
else:
batch_y = batch_y[:, -self.args.pred_len:, 0].unsqueeze(-1)
if not validation:
if self.model_type == ' ARNet':
outputs = self.model(batch_x, batch_y)
else:
outputs = self.model(batch_x)
else:
if self.model_type == ' ARNet':
outputs = self.model.predict(batch_x)
else:
outputs = self.model(batch_x)
return outputs, batch_y
| 31.460674 | 94 | 0.643929 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Baseline models for time series data."""
import os
import time
import warnings
from data.data_loader import ECL
from exp.exp_basic import ExpBasic
import matplotlib.pyplot as plt
from models.ar_net import ARNet
from models.linear import Linear
from models.lstm import LSTM
import numpy as np
import torch
from torch import optim
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
from utils.metrics import Metric as metric
from utils.tools import EarlyStopping
warnings.filterwarnings('ignore')
class ExpBaseline(ExpBasic):
"""Baseline experiments for time series data."""
def _get_dataset(self):
"""Function creates dataset based on data name in the parsers.
Returns:
Data: An instant of the dataset created
"""
if self.args.data == 'ECL':
data = ECL(self.args.root_path, self.args.seq_len, self.args.pred_len,
self.args.features, self.args.scale, self.args.num_ts)
else:
raise NotImplementedError
return data
def _build_model(self):
"""Function that creates a model instance based on the model name.
Here we only support LSTM, Linear and ARNet.
Returns:
model: An instance of the model.
"""
if self.args.model == 'LSTM':
model = LSTM(self.args.input_dim, self.args.pred_len, self.args.d_model,
self.args.layers, self.args.dropout, self.device).float()
elif self.args.model == 'Linear':
model = Linear(
self.args.pred_len * self.args.input_dim,
self.args.seq_len,
).float()
elif self.args.model == ' ARNet':
model = ARNet(
n_forecasts=self.args.pred_len * self.args.input_dim,
n_lags=self.args.seq_len,
device=self.device).float()
else:
raise NotImplementedError
# if multiple GPU are to be used parralize model
if self.args.use_multi_gpu and self.args.use_gpu:
model = nn.DataParallel(model, device_ids=self.args.device_ids)
return model
def _get_data(self, flag):
"""Function that creats a dataloader basd on flag.
Args:
flag: Flag indicating if we should return training/validation/testing
dataloader
Returns:
data_loader: Dataloader for the required dataset.
"""
args = self.args
if flag == 'test':
shuffle_flag = False
drop_last = True
batch_size = args.batch_size
data_set = TensorDataset(
torch.Tensor(self.data.test_x), torch.Tensor(self.data.test_y))
elif flag == 'pred':
shuffle_flag = False
drop_last = False
batch_size = args.batch_size
data_set = TensorDataset(
torch.Tensor(self.data.test_x), torch.Tensor(self.data.test_y))
elif flag == 'val':
shuffle_flag = False
drop_last = False
batch_size = args.batch_size
data_set = TensorDataset(
torch.Tensor(self.data.valid_x), torch.Tensor(self.data.valid_y))
else:
shuffle_flag = True
drop_last = True
batch_size = args.batch_size
data_set = TensorDataset(
torch.Tensor(self.data.train_x), torch.Tensor(self.data.train_y))
print('Data for', flag, 'dataset size', len(data_set))
data_loader = DataLoader(
data_set,
batch_size=batch_size,
shuffle=shuffle_flag,
num_workers=args.num_workers,
drop_last=drop_last)
return data_loader
def _select_optimizer(self):
"""Function that returns the optimizer based on learning rate.
Returns:
model_optim: model optimizer
"""
model_optim = optim.Adam(
self.model.parameters(), lr=self.args.learning_rate)
return model_optim
def vali(self, vali_loader, criterion):
"""Validation Function.
Args:
vali_loader: Validation dataloader
criterion: criterion used in for loss function
Returns:
total_loss: average loss
"""
self.model.eval()
total_loss = []
for (batch_x, batch_y) in vali_loader:
pred, true = self._process_one_batch(batch_x, batch_y, validation=True)
loss = criterion(pred.detach().cpu(), true.detach().cpu())
total_loss.append(loss)
total_loss = np.average(total_loss)
self.model.train()
return total_loss
def train(self, setting):
"""Training Function.
Args:
setting: Name used to save the model
Returns:
model: Trained model
"""
# Load different datasets
train_loader = self._get_data(flag='train')
vali_loader = self._get_data(flag='val')
test_loader = self._get_data(flag='test')
path = os.path.join(self.args.checkpoints, setting)
if not os.path.exists(path):
os.makedirs(path)
time_now = time.time()
train_steps = len(train_loader)
early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)
# Setting optimizer and loss functions
model_optim = self._select_optimizer()
criterion = nn.MSELoss()
all_training_loss = []
all_validation_loss = []
# Training Loop
for epoch in range(self.args.train_epochs):
iter_count = 0
train_loss = []
self.model.train()
epoch_time = time.time()
for i, (batch_x, batch_y) in enumerate(train_loader):
iter_count += 1
model_optim.zero_grad()
pred, true = self._process_one_batch(batch_x, batch_y)
loss = criterion(pred, true)
train_loss.append(loss.item())
if (i + 1) % 100 == 0:
print('\titers: {0}/{1}, epoch: {2} | loss: {3:.7f}'.format(
i + 1, train_steps, epoch + 1, loss.item()))
speed = (time.time() - time_now) / iter_count
left_time = speed * (
(self.args.train_epochs - epoch) * train_steps - i)
print('\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(
speed, left_time))
iter_count = 0
time_now = time.time()
loss.backward()
model_optim.step()
print('Epoch: {} cost time: {}'.format(epoch + 1,
time.time() - epoch_time))
train_loss = np.average(train_loss)
all_training_loss.append(train_loss)
vali_loss = self.vali(vali_loader, criterion)
all_validation_loss.append(vali_loss)
test_loss = self.vali(test_loader, criterion)
print(
'Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}'
.format(epoch + 1, train_steps, train_loss, vali_loss, test_loss))
early_stopping(vali_loss, self.model, path)
# Plotting train and validation loss
if ((epoch + 1) % 5 == 0 and self.args.plot):
check_folder = os.path.isdir(self.args.plot_dir)
# If folder doesn't exist, then create it.
if not check_folder:
os.makedirs(self.args.plot_dir)
plt.figure()
plt.plot(all_training_loss, label='train loss')
plt.plot(all_validation_loss, label='Val loss')
plt.legend()
plt.savefig(self.args.plot_dir + setting + '.png')
plt.show()
plt.close()
# If ran out of patience stop training
if early_stopping.early_stop:
if self.args.plot:
plt.figure()
plt.plot(all_training_loss, label='train loss')
plt.plot(all_validation_loss, label='Val loss')
plt.legend()
plt.savefig(self.args.plot_dir + setting + '.png')
plt.show()
print('Early stopping')
break
best_model_path = path + '/' + 'checkpoint.pth'
self.model.load_state_dict(torch.load(best_model_path))
return self.model
def predict(self, setting, load=False):
"""Prediction Function.
Args:
setting: Name used to be used for prediction
load: whether to load best model
Returns:
mae: Mean absolute error
mse: Mean squared error
rmse: Root mean squared error
mape: Mean absolute percentage error
mspe: Mean squared percentage error
"""
# Create prediction dataset
pred_loader = self._get_data(flag='pred')
# Load best model saved in the checkpoint folder
if load:
path = os.path.join(self.args.checkpoints, setting)
best_model_path = path + '/' + 'checkpoint.pth'
self.model.load_state_dict(torch.load(best_model_path))
# Get model predictions
self.model.eval()
for i, (batch_x, batch_y) in enumerate(pred_loader):
pred, true = self._process_one_batch(batch_x, batch_y, validation=True)
if i == 0:
preds = pred.detach().cpu().numpy()
trues = true.detach().cpu().numpy()
else:
preds = np.concatenate((preds, pred.detach().cpu().numpy()), axis=0)
trues = np.concatenate((trues, true.detach().cpu().numpy()), axis=0)
preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])
trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1])
# save predictions made by model
folder_path = './results/' + setting + '/'
check_folder = os.path.isdir(folder_path)
if not check_folder:
os.makedirs(folder_path)
np.save(folder_path + 'real_prediction.npy', preds)
# Evaluate the model performance
mae, mse, rmse, mape, mspe = metric(preds, trues)
print('mse:{}, mae:{}, rmse:{}'.format(mse, mae, rmse))
return mae, mse, rmse, mape, mspe, 0, 0
def _process_one_batch(self, batch_x, batch_y, validation=False):
"""Function to process batch and send it to model and get output.
Args:
batch_x: batch input
batch_y: batch target
validation: flag to determine if this process is done for training or
testing
Returns:
outputs: model outputs
batch_y: batch target
"""
# Reshape input for Linear and ARNet
if (self.model_type == 'Linear' or self.model_type == ' ARNet'):
batch_size, _, _ = batch_x.shape
batch_x = batch_x.reshape(batch_size, -1)
batch_x = batch_x.float().to(self.device)
batch_y = batch_y.float().to(self.device)
if (self.model_type == 'Linear' or self.model_type == ' ARNet'):
batch_y = batch_y[:, -self.args.pred_len:, 0]
else:
batch_y = batch_y[:, -self.args.pred_len:, 0].unsqueeze(-1)
if not validation:
if self.model_type == ' ARNet':
outputs = self.model(batch_x, batch_y)
else:
outputs = self.model(batch_x)
else:
if self.model_type == ' ARNet':
outputs = self.model.predict(batch_x)
else:
outputs = self.model(batch_x)
return outputs, batch_y
| 0 | 0 | 0 |
f8457b3470b0d264832c6274890cf93daeb28863 | 8,295 | py | Python | tests/test_namedfunctionnode.py | davehadley/graci | 8c5b86ce364df32e48bca40a46091021459547fb | [
"MIT"
] | 1 | 2020-07-18T17:53:02.000Z | 2020-07-18T17:53:02.000Z | tests/test_namedfunctionnode.py | davehadley/graci | 8c5b86ce364df32e48bca40a46091021459547fb | [
"MIT"
] | null | null | null | tests/test_namedfunctionnode.py | davehadley/graci | 8c5b86ce364df32e48bca40a46091021459547fb | [
"MIT"
] | 3 | 2020-07-31T16:57:50.000Z | 2020-07-31T16:58:02.000Z | import operator
import tempfile
import unittest
import fungraph
| 31.184211 | 76 | 0.514286 | import operator
import tempfile
import unittest
import fungraph
def _add_xy(x, y):
return x + y
class TestNamedFunctionNode(unittest.TestCase):
def test_constructor(self):
return fungraph.named("name", lambda: None)
def test_simple_named_graph(self):
node = fungraph.named("add", operator.add, 1, 2)
self.assertEqual(node.cachedcompute(), 3)
self.assertEqual(node.name, "add")
return
def test_retrieve_by_name(self):
node = fungraph.named(
"add",
operator.add,
fungraph.named("a", lambda: 1),
fungraph.named("b", lambda: 2),
)
a = node["a"]
b = node["b"]
self.assertEqual(a.cachedcompute(), 1)
self.assertEqual(b.cachedcompute(), 2)
self.assertEqual(a.name, "a")
self.assertEqual(b.name, "b")
return
def test_set_by_name(self):
node = fungraph.named(
"add",
operator.add,
fungraph.named("a", lambda: 1),
fungraph.named("b", lambda: 2),
)
aprime = fungraph.named("aprime", lambda: 3)
node["a"] = aprime
self.assertEqual(node.cachedcompute(), 5)
with self.assertRaises(KeyError):
node["a"]
return
def test_retrieve_by_wrong_name_raises_keyerror(self):
node = fungraph.named(
"add",
operator.add,
fungraph.named("a", lambda: 1),
fungraph.named("b", lambda: 2),
)
with self.assertRaises(KeyError):
node["c"]
return
def test_set_by_wrong_name_raises_keyerror(self):
node = fungraph.named(
"add",
operator.add,
fungraph.named("a", lambda: 1),
fungraph.named("b", lambda: 2),
)
with self.assertRaises(KeyError):
node["c"] = fungraph.named("c", lambda: 3)
return
def test_mixed_named_unnamed_graph(self):
node = fungraph.fun(
operator.add,
fungraph.fun(lambda: 1),
fungraph.named("b", lambda: 2),
)
b = node["b"]
self.assertEqual(node.cachedcompute(), 3)
self.assertEqual(b.cachedcompute(), 2)
self.assertEqual(b.name, "b")
return
def test_get_nameclash_with_named(self):
node = fungraph.fun(
operator.add,
fungraph.named("x", lambda: 1),
fungraph.named("x", lambda: 2),
)
x = node["x"]
# return first found result
self.assertEqual(node.cachedcompute(), 3)
self.assertEqual(x.cachedcompute(), 1)
self.assertEqual(x.name, "x")
return
def test_set_nameclash_with_named(self):
node = fungraph.fun(
operator.add,
fungraph.named("x", lambda: 1),
fungraph.named("x", lambda: 2),
)
node["x"] = fungraph.named("x", lambda: 3)
# set first found result
self.assertEqual(node.cachedcompute(), 5)
return
def test_get_nameclash_with_kwargument(self):
node = fungraph.fun(
_add_xy,
x=fungraph.named("y", lambda: 1),
y=fungraph.named("x", lambda: 2),
)
x = node["x"]
# prefer arguments over named
self.assertEqual(node.cachedcompute(), 3)
self.assertEqual(x.cachedcompute(), 1)
self.assertEqual(x.name, "y")
return
def test_set_nameclash_with_kwargument(self):
node = fungraph.fun(
_add_xy,
x=fungraph.named("y", lambda: 1),
y=fungraph.named("x", lambda: 2),
)
node["x"] = fungraph.named("z", lambda: 3)
# prefer arguments over named
self.assertEqual(node.cachedcompute(), 5)
return
def test_get_nameclash_with_kwargument_explicit(self):
node = fungraph.fun(
_add_xy,
x=fungraph.named("y", lambda: 1),
y=fungraph.named("x", lambda: 2),
)
x = node[fungraph.Name("x")]
y = node[fungraph.KeywordArgument("x")]
self.assertEqual(x.cachedcompute(), 2)
self.assertEqual(x.name, "x")
self.assertEqual(y.cachedcompute(), 1)
self.assertEqual(y.name, "y")
return
def test_set_nameclash_with_kwargument_explicit(self):
node = fungraph.fun(
_add_xy,
x=fungraph.named("y", lambda: 1),
y=fungraph.named("x", lambda: 2),
)
node[fungraph.Name("x")] = fungraph.named("z", lambda: 3)
node[fungraph.KeywordArgument("x")] = fungraph.named("w", lambda: 4)
self.assertEqual(node["x"].cachedcompute(), 4)
self.assertEqual(node["x"].name, "w")
self.assertEqual(node["y"].cachedcompute(), 3)
self.assertEqual(node["y"].name, "z")
return
def test_retrieve_by_path(self):
node = fungraph.named(
"add",
operator.add,
fungraph.named(
"mul1",
operator.mul,
fungraph.named("one", lambda: 1),
fungraph.named("two", lambda: 2),
),
fungraph.named(
"mul2",
operator.mul,
fungraph.named("three", lambda: 3),
fungraph.named("four", lambda: 4),
),
)
one = node["mul1/one"]
two = node["mul1/two"]
three = node["mul2/three"]
four = node["mul2/four"]
self.assertEqual(one.cachedcompute(), 1)
self.assertEqual(two.cachedcompute(), 2)
self.assertEqual(three.cachedcompute(), 3)
self.assertEqual(four.cachedcompute(), 4)
return
def test_set_by_path(self):
node = fungraph.named(
"add",
operator.add,
fungraph.named(
"mul1",
operator.mul,
fungraph.named("one", lambda: 1),
fungraph.named("two", lambda: 2),
),
fungraph.named(
"mul2",
operator.mul,
fungraph.named("three", lambda: 3),
fungraph.named("four", lambda: 4),
),
)
node["mul1/one"] = fungraph.named("five", lambda: 5)
node["mul1/two"] = fungraph.named("size", lambda: 6)
node["mul2/three"] = fungraph.named("seven", lambda: 7)
node["mul2/four"] = fungraph.named("eight", lambda: 8)
self.assertEqual(node.cachedcompute(), 5 * 6 + 7 * 8)
return
def test_get_all(self):
node = fungraph.named(
"add",
operator.add,
fungraph.named(
"p1",
operator.mul,
fungraph.named("a", lambda: 1),
fungraph.named("b", lambda: 2),
),
fungraph.named(
"p2",
operator.mul,
fungraph.named("a", lambda: 3),
fungraph.named("b", lambda: 4),
),
)
bs = node.getall("b")
self.assertEqual([b.cachedcompute() for b in bs], [2, 4])
def test_set_all(self):
node = fungraph.named(
"add",
operator.add,
fungraph.named(
"p1",
operator.mul,
fungraph.named("a", lambda: 1),
fungraph.named("b", lambda: 2),
),
fungraph.named(
"p2",
operator.mul,
fungraph.named("a", lambda: 3),
fungraph.named("b", lambda: 4),
),
)
node.setall("b", fungraph.named("c", lambda: 5))
self.assertEqual(node.cachedcompute(), 1 * 5 + 3 * 5)
def test_identical_function(self):
cachedir = tempfile.mkdtemp()
f = fungraph.named(
"add",
operator.add,
fungraph.named("left", operator.mul, 2, 2),
fungraph.named("right", operator.mul, 2, 2),
)
self.assertEqual(f.cachedcompute(cache=cachedir), 8)
def test_repr(self):
name = "name"
node = fungraph.named(name, operator.add, 1, 2)
self.assertTrue(name in str(node))
| 7,644 | 26 | 558 |
f1c938bd4970c0f9e8063c695a4913ce01b9efb1 | 154 | py | Python | p14_test.py | alpatine/project-euler-python | d731d2deebff4bfb812811921f56da7b984652c0 | [
"MIT"
] | null | null | null | p14_test.py | alpatine/project-euler-python | d731d2deebff4bfb812811921f56da7b984652c0 | [
"MIT"
] | null | null | null | p14_test.py | alpatine/project-euler-python | d731d2deebff4bfb812811921f56da7b984652c0 | [
"MIT"
] | null | null | null | from unittest import TestCase
from p14 import p14
| 22 | 46 | 0.746753 | from unittest import TestCase
from p14 import p14
class P14_Test(TestCase):
def test_1_1000000(self):
self.assertEqual(p14(1000000), 837799)
| 51 | 4 | 49 |
bb471817a1b506f19b396bc2784390bfd17e7efb | 5,903 | py | Python | lunavl/sdk/base.py | ddc67cd/lunasdk | 93915256c56059847ed0a75f0a81791c0261f5af | [
"MIT"
] | 2 | 2021-06-23T09:53:56.000Z | 2021-10-03T10:54:45.000Z | lunavl/sdk/base.py | VisionLabs/lunasdk | 540ea29cc5aeb46ca185e6412a8b9d59804f8b39 | [
"MIT"
] | null | null | null | lunavl/sdk/base.py | VisionLabs/lunasdk | 540ea29cc5aeb46ca185e6412a8b9d59804f8b39 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from typing import Union, Optional, Tuple, Dict, Any
from FaceEngine import DetectionFloat, HumanLandmark, HumanLandmarks17 # pylint: disable=E0611,E0401
from .image_utils.geometry import LANDMARKS, Point, Rect
class BaseEstimation(ABC):
"""
Base class for estimation structures.
Attributes:
_coreEstimation: core estimation
"""
__slots__ = ("_coreEstimation",)
@property
def coreEstimation(self):
"""
Get core estimation from init
Returns:
_coreEstimation
"""
return self._coreEstimation
@abstractmethod
def asDict(self) -> Union[dict, list]:
"""
Convert to a dict.
Returns:
dict from luna api
"""
pass
def __repr__(self) -> str:
"""
Representation.
Returns:
str(self.asDict())
"""
return str(self.asDict())
class Landmarks(BaseEstimation):
"""
Base class for landmarks
Attributes:
_points (Optional[Tuple[Point[float]]]): lazy loaded attributes: core landmarks as point list
"""
__slots__ = ["_points", "_coreEstimation"]
def __init__(self, coreLandmarks: LANDMARKS):
"""
Init
Args:
coreLandmarks (LANDMARKS): core landmarks
"""
super().__init__(coreLandmarks)
self._points: Optional[Tuple[Point[float], ...]] = None
@property
def points(self) -> Tuple[Point[float], ...]:
"""
Lazy points loader.
Returns:
list of points
"""
if self._points is None:
self._points = tuple(
Point.fromVector2(self._coreEstimation[index]) for index in range(len(self._coreEstimation))
)
return self._points
def asDict(self) -> Tuple[Tuple[int, int], ...]: # type: ignore
"""
Convert to dict
Returns:
list to list points
"""
pointCount = len(self._coreEstimation)
points = self._coreEstimation
return tuple(((int(points[index].x), int(points[index].x)) for index in range(pointCount)))
class LandmarkWithScore(BaseEstimation):
"""
Point with score.
"""
def __init__(self, landmark: HumanLandmark): # pylint: disable=C0103
"""
Init
Args:
landmark: core landmark
"""
super().__init__(landmark)
@property
def point(self) -> Point[float]:
"""
Coordinate of landmark
Returns:
point
"""
return Point.fromVector2(self._coreEstimation.point)
@property
def score(self) -> float:
"""
Landmark score
Returns:
float[0,1]
"""
return self._coreEstimation.score
def asDict(self) -> dict:
"""
Convert point to list (json), coordinates will be cast from float to int
Returns:
dict with keys: score and point
"""
return {"score": self._coreEstimation.score, "point": (int(self.point.x), int(self.point.y))}
def __repr__(self) -> str:
"""
Representation.
Returns:
"x = {self.point.x}, y = {self.point.y}, score = {self.score}"
"""
return "x = {}, y = {}, score = {}".format(self.point.x, self.point.y, self.score)
class LandmarksWithScore(BaseEstimation):
"""
Base class for landmarks with score
Attributes:
_points (Optional[Tuple[Point[float]]]): lazy load attributes, converted to point list core landmarks
"""
__slots__ = ["_points", "_coreEstimation"]
def __init__(self, coreLandmarks: HumanLandmarks17):
"""
Init
Args:
coreLandmarks (LANDMARKS): core landmarks
"""
super().__init__(coreLandmarks)
self._points: Optional[Tuple[LandmarkWithScore, ...]] = None
@property
def points(self) -> Tuple[LandmarkWithScore, ...]:
"""
Lazy load of points.
Returns:
list of points
"""
if self._points is None:
self._points = tuple(
LandmarkWithScore(self._coreEstimation[index]) for index in range(len(self._coreEstimation))
)
return self._points
def asDict(self) -> Tuple[dict, ...]: # type: ignore
"""
Convert to dict
Returns:
list to list points
"""
return tuple(point.asDict() for point in self.points)
class BoundingBox(BaseEstimation):
"""
Detection bounding box, it is characterized of rect and score:
- rect (Rect[float]): face bounding box
- score (float): face score (0,1), detection score is the measure of classification confidence
and not the source image quality. It may be used topick the most "*confident*" face of many.
"""
# pylint: disable=W0235
def __init__(self, boundingBox: DetectionFloat):
"""
Init.
Args:
boundingBox: core bounding box
"""
super().__init__(boundingBox)
@property
def score(self) -> float:
"""
Get score
Returns:
number in range [0,1]
"""
return self._coreEstimation.score
@property
def rect(self) -> Rect[float]:
"""
Get rect.
Returns:
float rect
"""
return Rect.fromCoreRect(self._coreEstimation.rect)
def asDict(self) -> Dict[str, Union[Dict[str, float], float]]:
"""
Convert to dict.
Returns:
{"rect": self.rect, "score": self.score}
"""
return {"rect": self.rect.asDict(), "score": self.score}
| 24.698745 | 117 | 0.562765 | from abc import ABC, abstractmethod
from typing import Union, Optional, Tuple, Dict, Any
from FaceEngine import DetectionFloat, HumanLandmark, HumanLandmarks17 # pylint: disable=E0611,E0401
from .image_utils.geometry import LANDMARKS, Point, Rect
class BaseEstimation(ABC):
"""
Base class for estimation structures.
Attributes:
_coreEstimation: core estimation
"""
__slots__ = ("_coreEstimation",)
def __init__(self, coreEstimation: Any):
self._coreEstimation = coreEstimation
@property
def coreEstimation(self):
"""
Get core estimation from init
Returns:
_coreEstimation
"""
return self._coreEstimation
@abstractmethod
def asDict(self) -> Union[dict, list]:
"""
Convert to a dict.
Returns:
dict from luna api
"""
pass
def __repr__(self) -> str:
"""
Representation.
Returns:
str(self.asDict())
"""
return str(self.asDict())
class Landmarks(BaseEstimation):
"""
Base class for landmarks
Attributes:
_points (Optional[Tuple[Point[float]]]): lazy loaded attributes: core landmarks as point list
"""
__slots__ = ["_points", "_coreEstimation"]
def __init__(self, coreLandmarks: LANDMARKS):
"""
Init
Args:
coreLandmarks (LANDMARKS): core landmarks
"""
super().__init__(coreLandmarks)
self._points: Optional[Tuple[Point[float], ...]] = None
@property
def points(self) -> Tuple[Point[float], ...]:
"""
Lazy points loader.
Returns:
list of points
"""
if self._points is None:
self._points = tuple(
Point.fromVector2(self._coreEstimation[index]) for index in range(len(self._coreEstimation))
)
return self._points
def asDict(self) -> Tuple[Tuple[int, int], ...]: # type: ignore
"""
Convert to dict
Returns:
list to list points
"""
pointCount = len(self._coreEstimation)
points = self._coreEstimation
return tuple(((int(points[index].x), int(points[index].x)) for index in range(pointCount)))
class LandmarkWithScore(BaseEstimation):
"""
Point with score.
"""
def __init__(self, landmark: HumanLandmark): # pylint: disable=C0103
"""
Init
Args:
landmark: core landmark
"""
super().__init__(landmark)
@property
def point(self) -> Point[float]:
"""
Coordinate of landmark
Returns:
point
"""
return Point.fromVector2(self._coreEstimation.point)
@property
def score(self) -> float:
"""
Landmark score
Returns:
float[0,1]
"""
return self._coreEstimation.score
def asDict(self) -> dict:
"""
Convert point to list (json), coordinates will be cast from float to int
Returns:
dict with keys: score and point
"""
return {"score": self._coreEstimation.score, "point": (int(self.point.x), int(self.point.y))}
def __repr__(self) -> str:
"""
Representation.
Returns:
"x = {self.point.x}, y = {self.point.y}, score = {self.score}"
"""
return "x = {}, y = {}, score = {}".format(self.point.x, self.point.y, self.score)
class LandmarksWithScore(BaseEstimation):
"""
Base class for landmarks with score
Attributes:
_points (Optional[Tuple[Point[float]]]): lazy load attributes, converted to point list core landmarks
"""
__slots__ = ["_points", "_coreEstimation"]
def __init__(self, coreLandmarks: HumanLandmarks17):
"""
Init
Args:
coreLandmarks (LANDMARKS): core landmarks
"""
super().__init__(coreLandmarks)
self._points: Optional[Tuple[LandmarkWithScore, ...]] = None
@property
def points(self) -> Tuple[LandmarkWithScore, ...]:
"""
Lazy load of points.
Returns:
list of points
"""
if self._points is None:
self._points = tuple(
LandmarkWithScore(self._coreEstimation[index]) for index in range(len(self._coreEstimation))
)
return self._points
def asDict(self) -> Tuple[dict, ...]: # type: ignore
"""
Convert to dict
Returns:
list to list points
"""
return tuple(point.asDict() for point in self.points)
class BoundingBox(BaseEstimation):
"""
Detection bounding box, it is characterized of rect and score:
- rect (Rect[float]): face bounding box
- score (float): face score (0,1), detection score is the measure of classification confidence
and not the source image quality. It may be used topick the most "*confident*" face of many.
"""
# pylint: disable=W0235
def __init__(self, boundingBox: DetectionFloat):
"""
Init.
Args:
boundingBox: core bounding box
"""
super().__init__(boundingBox)
@property
def score(self) -> float:
"""
Get score
Returns:
number in range [0,1]
"""
return self._coreEstimation.score
@property
def rect(self) -> Rect[float]:
"""
Get rect.
Returns:
float rect
"""
return Rect.fromCoreRect(self._coreEstimation.rect)
def asDict(self) -> Dict[str, Union[Dict[str, float], float]]:
"""
Convert to dict.
Returns:
{"rect": self.rect, "score": self.score}
"""
return {"rect": self.rect.asDict(), "score": self.score}
| 65 | 0 | 27 |
993be2bd739c3b010465fbebfbce9601483b8336 | 379 | py | Python | django/gsmap/migrations/0002_auto_20200130_1554.py | n0rdlicht/spatial-data-package-platform | 97659a5f5e3df1ee78c31a3d0cee7bcab0c34c22 | [
"MIT"
] | 14 | 2020-11-26T11:20:55.000Z | 2022-03-02T15:48:51.000Z | django/gsmap/migrations/0002_auto_20200130_1554.py | n0rdlicht/spatial-data-package-platform | 97659a5f5e3df1ee78c31a3d0cee7bcab0c34c22 | [
"MIT"
] | 328 | 2020-11-26T16:01:06.000Z | 2022-03-28T03:15:07.000Z | django/gsmap/migrations/0002_auto_20200130_1554.py | n0rdlicht/spatial-data-package-platform | 97659a5f5e3df1ee78c31a3d0cee7bcab0c34c22 | [
"MIT"
] | 2 | 2020-12-01T15:08:23.000Z | 2020-12-22T14:06:30.000Z | # Generated by Django 3.0.2 on 2020-01-30 15:54
from django.db import migrations
| 21.055556 | 84 | 0.604222 | # Generated by Django 3.0.2 on 2020-01-30 15:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('gsmap', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='municipality',
options={'ordering': ['name'], 'verbose_name_plural': 'municipalities'},
),
]
| 0 | 273 | 23 |
d4ed18184848db58f396ccbc37e82a0b31ee38ba | 418 | py | Python | test_work/tree_views/core/migrations/0006_alter_workselect_name.py | Netromnik/python | 630a9df63b1cade9af38de07bb9cd0c3b8694c93 | [
"Apache-2.0"
] | null | null | null | test_work/tree_views/core/migrations/0006_alter_workselect_name.py | Netromnik/python | 630a9df63b1cade9af38de07bb9cd0c3b8694c93 | [
"Apache-2.0"
] | null | null | null | test_work/tree_views/core/migrations/0006_alter_workselect_name.py | Netromnik/python | 630a9df63b1cade9af38de07bb9cd0c3b8694c93 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2 on 2021-04-28 00:01
from django.db import migrations, models
| 22 | 78 | 0.617225 | # Generated by Django 3.2 on 2021-04-28 00:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_customuser_name_user_full'),
]
operations = [
migrations.AlterField(
model_name='workselect',
name='name',
field=models.CharField(db_index=True, max_length=12, unique=True),
),
]
| 0 | 306 | 23 |
e0c1627c336f2f44acd50c4b12b19e39d59ca696 | 754 | py | Python | monero_glue/messages/NEMImportanceTransfer.py | ph4r05/monero-agent | 0bac0e6f33142b2bb885565bfd1ef8ac04559280 | [
"MIT"
] | 20 | 2018-04-05T22:06:10.000Z | 2021-09-18T10:43:44.000Z | monero_glue/messages/NEMImportanceTransfer.py | ph4r05/monero-agent | 0bac0e6f33142b2bb885565bfd1ef8ac04559280 | [
"MIT"
] | null | null | null | monero_glue/messages/NEMImportanceTransfer.py | ph4r05/monero-agent | 0bac0e6f33142b2bb885565bfd1ef8ac04559280 | [
"MIT"
] | 5 | 2018-08-06T15:06:04.000Z | 2021-07-16T01:58:43.000Z | # Automatically generated by pb2py
# fmt: off
from .. import protobuf as p
if __debug__:
try:
from typing import Dict, List # noqa: F401
from typing_extensions import Literal # noqa: F401
EnumTypeNEMImportanceTransferMode = Literal[1, 2]
except ImportError:
pass
| 25.133333 | 76 | 0.611406 | # Automatically generated by pb2py
# fmt: off
from .. import protobuf as p
if __debug__:
try:
from typing import Dict, List # noqa: F401
from typing_extensions import Literal # noqa: F401
EnumTypeNEMImportanceTransferMode = Literal[1, 2]
except ImportError:
pass
class NEMImportanceTransfer(p.MessageType):
def __init__(
self,
mode: EnumTypeNEMImportanceTransferMode = None,
public_key: bytes = None,
) -> None:
self.mode = mode
self.public_key = public_key
@classmethod
def get_fields(cls) -> Dict:
return {
1: ('mode', p.EnumType("NEMImportanceTransferMode", (1, 2)), 0),
2: ('public_key', p.BytesType, 0),
}
| 331 | 93 | 23 |
19dda940693b8c17b1451efeb8113c0b16bdb456 | 10,206 | py | Python | game/modelgen.py | tcdude/pyweek28 | 7397f54f0f768f1941f489053c380b580c1eaf38 | [
"MIT"
] | null | null | null | game/modelgen.py | tcdude/pyweek28 | 7397f54f0f768f1941f489053c380b580c1eaf38 | [
"MIT"
] | null | null | null | game/modelgen.py | tcdude/pyweek28 | 7397f54f0f768f1941f489053c380b580c1eaf38 | [
"MIT"
] | 1 | 2020-03-30T03:21:18.000Z | 2020-03-30T03:21:18.000Z | """
Provides trees/bushes/etc.
"""
__copyright__ = """
MIT License
Copyright (c) 2019 tcdude
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import random
from math import ceil
from math import pi
import numpy as np
from panda3d import core
from .shapegen import shape
from . import common
sg = shape.ShapeGen()
# noinspection PyArgumentList
# noinspection PyArgumentList
# noinspection PyArgumentList
# noinspection PyArgumentList
# noinspection PyArgumentList
| 31.021277 | 90 | 0.579463 | """
Provides trees/bushes/etc.
"""
__copyright__ = """
MIT License
Copyright (c) 2019 tcdude
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import random
from math import ceil
from math import pi
import numpy as np
from panda3d import core
from .shapegen import shape
from . import common
sg = shape.ShapeGen()
# noinspection PyArgumentList
def fir_tree(
avg_height=50,
avg_segments=6,
avg_radius=1.2,
offset=0.4,
tex=None
):
height = random.uniform(
offset * avg_height,
(1.0 - offset + 1) * avg_height
)
segments = int(ceil(avg_segments / avg_height * height))
trunk_radius = avg_radius / avg_height * height
trunk_color = common.FIR_TRUNK_START
trunk_color += common.FIR_TRUNK_DELTA * random.random()
bbc = common.FIR_BRANCH_START + common.FIR_BRANCH_DELTA * random.random()
branch_colors = [
bbc + common.FIR_BRANCH_DELTA * (random.random() - 0.5) * 0.1
for _ in range(segments)
]
node_path = core.NodePath('fir_tree')
trunk_node_path = node_path.attach_new_node(
sg.cone(
origin=core.Vec3(0),
direction=core.Vec3.up(),
radius=(trunk_radius, 0),
polygon=12,
length=height,
origin_offset=0.05,
color=trunk_color,
nac=False,
name='fir_tree/trunk'
)
)
trunk_node_path.set_hpr(random.uniform(0, 360), random.uniform(0, 5), 0)
if tex is not None:
trunk_node_path.set_texture(tex, 1)
seg_height = height * 0.8 / segments
seg_start = height * 0.2
for i, bc in enumerate(branch_colors):
radius = (
random.uniform(
(segments - i) * trunk_radius * 0.8,
(segments - i) * trunk_radius * 1.0
),
random.uniform(
(segments - i - 1) * trunk_radius * 0.6,
(segments - i - 1) * trunk_radius * 0.8
) if i < segments - 1 else 0,
)
br_node_path = node_path.attach_new_node(
sg.cone(
origin=core.Vec3(0),
direction=core.Vec3.up(),
radius=radius,
polygon=16,
length=seg_height,
color=bc,
nac=False,
name=f'fir_tree/branch{i}'
)
)
br_node_path.set_z(trunk_node_path, seg_start + seg_height * 0.5 + i * seg_height)
br_node_path.set_hpr(random.uniform(0, 360), random.uniform(0, 5), 0)
return node_path, trunk_radius
# noinspection PyArgumentList
def leaf_tree(
avg_height=25,
avg_radius=0.8,
offset=0.6,
tex=None
):
height = random.uniform(
offset * avg_height,
(1.0 - offset + 1) * avg_height
)
trunk_radius = avg_radius / avg_height * height
trunk_color = common.LEAF_TRUNK_START
trunk_color += common.LEAF_TRUNK_DELTA * random.random()
branch_color, branch_delta = random.choice(common.LEAF_BRANCH_COLORS)
branch_color2 = branch_color * 0.999
branch_color += common.LEAF_TRUNK_DELTA * random.random()
branch_color2 += common.LEAF_TRUNK_DELTA * random.random()
node_path = core.NodePath('leaf_tree')
trunk_node_path = node_path.attach_new_node(
sg.cone(
origin=core.Vec3(0),
direction=core.Vec3.up(),
radius=(trunk_radius, 0),
polygon=12,
length=height,
origin_offset=0.05,
color=trunk_color,
nac=False,
name='leaf_tree/trunk'
)
)
trunk_node_path.set_hpr(random.uniform(0, 360), random.uniform(0, 5), 0)
if tex is not None:
trunk_node_path.set_texture(tex, 1)
for i in range(random.randint(1, 3)):
bb = core.Vec3(
random.uniform(trunk_radius * 4, height / 4),
random.uniform(trunk_radius * 4, height / 4),
random.uniform(height / 3, height * 0.4),
)
br_node_path = node_path.attach_new_node(
sg.blob(
origin=core.Vec3(0),
direction=core.Vec3.up(),
bounds=bb,
color=branch_color,
color2=branch_color2,
name='fir_tree/branch',
# seed=np.random.randint(0, 2**31, dtype=np.int32),
noise_radius=12,
nac=False
)
)
br_node_path.set_z(trunk_node_path, height - bb.z * random.random())
br_node_path.set_x(trunk_node_path, bb.x * (random.random() - 0.5))
br_node_path.set_y(trunk_node_path, bb.y * (random.random() - 0.5))
br_node_path.set_hpr(random.uniform(0, 360), random.uniform(0, 90), 0)
return node_path, trunk_radius
# noinspection PyArgumentList
def obelisk(r=(2.5, 1.8)):
node_path = core.NodePath('obelisk')
base = node_path.attach_new_node(
sg.cone(
origin=core.Vec3(0),
direction=core.Vec3.up(),
radius=r,
polygon=4,
length=15.0,
smooth=False,
capsule=False,
origin_offset=0,
color=core.Vec4(core.Vec3(0.2), 1),
nac=False
)
)
top = node_path.attach_new_node(
sg.cone(
origin=core.Vec3(0),
direction=core.Vec3.up(),
radius=(r[1], 0),
polygon=4,
length=1.5,
smooth=False,
capsule=False,
origin_offset=0,
color=core.Vec4(core.Vec3(0.2), 1),
nac=False
)
)
top.set_z(15)
# mat = core.Material()
# mat.set_emission(core.Vec4(.35, 1.0, 0.52, 0.1))
# mat.set_shininess(5.0)
# node_path.set_material(mat)
return node_path
# noinspection PyArgumentList
def stone(xy):
node_path = core.NodePath('stone')
base = common.STONE_START
color = base + common.STONE_DELTA * random.random()
color2 = base + common.STONE_DELTA * random.random()
bb = core.Vec3(
xy,
random.uniform(min(xy) * 0.9, min(xy) * 1.1)
)
br_node_path = node_path.attach_new_node(
sg.blob(
origin=core.Vec3(0),
direction=core.Vec3.up(),
bounds=bb,
color=color,
color2=color2,
name='fir_tree/branch',
# seed=random.randint(0, 2 ** 32 - 1),
noise_radius=200,
nac=False
)
)
return br_node_path
# noinspection PyArgumentList
def three_rings():
node_path = core.NodePath('three_rings')
o1 = obelisk((1.5, 0.8))
o2 = obelisk((1.5, 0.8))
o1.reparent_to(node_path)
o2.reparent_to(node_path)
o1.set_pos(common.TR_O1_OFFSET)
o2.set_pos(common.TR_O2_OFFSET)
random.shuffle(common.TR_COLORS)
rings = []
symbol_cards = []
for r, h, c in zip(common.TR_RADII, common.TR_HEIGHTS, common.TR_COLORS):
rings.append(node_path.attach_new_node(
sg.cone(
origin=core.Vec3(0),
direction=core.Vec3.up(),
radius=r,
polygon=common.TR_POLYGON,
length=h,
color=c,
nac=False
)
)
)
symbol_cards.append([])
for i in range(6):
r_node = rings[-1].attach_new_node('rot')
c = core.CardMaker(f'symbol {len(rings)}/{i}')
c.set_frame(core.Vec4(-1, 1, -1, 1))
symbol_cards[-1].append(
r_node.attach_new_node(c.generate())
)
r_node.set_h(i * 60)
r_node.set_transparency(core.TransparencyAttrib.M_alpha)
r_node.set_alpha_scale(common.TR_SYM_ALPHA)
symbol_cards[-1][-1].set_y(r - 0.5)
symbol_cards[-1][-1].set_z(h)
symbol_cards[-1][-1].set_billboard_axis()
return node_path, rings, symbol_cards
def lever(i):
node_path = core.NodePath('lever')
box = node_path.attach_new_node(
sg.box(
origin=core.Vec3(0),
direction=core.Vec3.up(),
bounds=common.TR_LEVER_BOX_BB,
color=common.TR_COLORS[i] * 0.9,
nac=False,
name='lever_box'
)
)
box.set_z(box, -common.TR_LEVER_BOX_BB[2])
lev = node_path.attach_new_node(
sg.cone(
origin=core.Vec3(0),
direction=core.Vec3.up(),
radius=0.12,
polygon=12,
length=1.8,
origin_offset=0.2,
color=common.TR_COLORS[i] * 1.1,
nac=False,
name='lever'
)
)
lev.set_z(0.15)
# lev.set_r(90)
return node_path, lev
def stone_circle(r, num_stones):
node_path = core.NodePath('stone_circle')
rot = node_path.attach_new_node('rot')
d = rot.attach_new_node('d')
d.set_y(r)
c = 2 * pi * r / 2 * 3
length = c / num_stones / 2
for i in range(num_stones):
rot.set_h(300 / num_stones * i - 30)
p = d.get_pos(node_path)
s = stone(core.Vec2(length / 2, length))
s.reparent_to(node_path)
s.set_pos(p)
return node_path
| 8,589 | 0 | 156 |
0fe6b79d49e1676cecbf82bc1f9272ef6a82ff96 | 187 | py | Python | apps/utils/models/managers/managers.py | jorgesaw/oclock | 2a78bd4d1ab40eaa65ea346cf8c37556fcbbeca5 | [
"MIT"
] | null | null | null | apps/utils/models/managers/managers.py | jorgesaw/oclock | 2a78bd4d1ab40eaa65ea346cf8c37556fcbbeca5 | [
"MIT"
] | null | null | null | apps/utils/models/managers/managers.py | jorgesaw/oclock | 2a78bd4d1ab40eaa65ea346cf8c37556fcbbeca5 | [
"MIT"
] | null | null | null | """Managers."""
# Django
from django.db import models
class ActiveManager(models.Manager):
"""Active manager."""
| 15.583333 | 39 | 0.647059 | """Managers."""
# Django
from django.db import models
class ActiveManager(models.Manager):
"""Active manager."""
def active(self):
return self.filter(active=True)
| 36 | 0 | 31 |
db03d187d50e357b40326f457a695e7364dc92a2 | 13,258 | py | Python | Statistical/Dereverb_100files_filelist_mirevalcheck.py | TeunKrikke/dereverb | 21913046b3a5a28664f4cb0a3af1258f08d8cbb6 | [
"MIT"
] | 1 | 2022-01-06T12:45:12.000Z | 2022-01-06T12:45:12.000Z | Statistical/Dereverb_100files_filelist_mirevalcheck.py | TeunKrikke/dereverb | 21913046b3a5a28664f4cb0a3af1258f08d8cbb6 | [
"MIT"
] | null | null | null | Statistical/Dereverb_100files_filelist_mirevalcheck.py | TeunKrikke/dereverb | 21913046b3a5a28664f4cb0a3af1258f08d8cbb6 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from scipy.io import wavfile
from scipy.signal import fftconvolve
from librosa.core import load
from librosa.core import stft
from librosa.core import istft
from librosa import amplitude_to_db, db_to_amplitude
from librosa.display import specshow
from librosa.output import write_wav
from scipy.signal import butter, lfilter, csd
from scipy.linalg import svd, pinv
import scipy
import scipy.fftpack
from scipy.linalg import toeplitz
from scipy.signal import fftconvolve
from utils import apply_reverb, read_wav
import corpus
import mir_eval
from pypesq import pypesq
import pyroomacoustics as pra
import roomsimove_single
import olafilt
if __name__ == '__main__':
main()
| 35.929539 | 256 | 0.620154 | import numpy as np
import matplotlib.pyplot as plt
from scipy.io import wavfile
from scipy.signal import fftconvolve
from librosa.core import load
from librosa.core import stft
from librosa.core import istft
from librosa import amplitude_to_db, db_to_amplitude
from librosa.display import specshow
from librosa.output import write_wav
from scipy.signal import butter, lfilter, csd
from scipy.linalg import svd, pinv
import scipy
import scipy.fftpack
from scipy.linalg import toeplitz
from scipy.signal import fftconvolve
from utils import apply_reverb, read_wav
import corpus
import mir_eval
from pypesq import pypesq
import pyroomacoustics as pra
import roomsimove_single
import olafilt
def load_file(files):
s1, _ = load(files[0], sr=16000)
s2, _ = load(files[1], sr=16000)
# s1, s2 = map(read_wav, files)
if len(s1) > len(s2):
pad_length = len(s1) - len(s2)
s2 = np.pad(s2, (0,pad_length), 'reflect')
else:
pad_length = len(s2) - len(s1)
s1 = np.pad(s1, (0,pad_length), 'reflect')
return s1, s2
def do_reverb(s1,s2):
corners = np.array([[0,0], [0,8], [8,8], [8,0]]).T # [x,y]
room = pra.Room.from_corners(corners)
room.extrude(5.)
room.add_source([8.,4.,1.6], signal=s1)
# room.add_source([2.,4.,1.6], signal=s2)
#[[X],[Y],[Z]]
R = np.asarray([[4.75,5.5],[2.,2.],[1.,1]])
room.add_microphone_array(pra.MicrophoneArray(R, room.fs))
room.simulate()
return room
def do_stft(s1, s2, room):
nfft=2048
win = 1024
hop = int(nfft/8)
Y1 = stft(room.mic_array.signals[0,:len(s1)], n_fft=nfft, hop_length=hop, win_length=win)
Y2 = stft(room.mic_array.signals[1,:len(s1)], n_fft=nfft, hop_length=hop, win_length=win)
X1 = stft(s1, n_fft=nfft, hop_length=hop, win_length=win)
X2 = stft(s2, n_fft=nfft, hop_length=hop, win_length=win)
return Y1, Y2, X1, X2
def do_reverb_oldskool(s1,s2, rt60=0.4):
room_dim = [8, 8, 5] # in meters
mic_pos1 = [4.75, 2, 1] # in meters
mic_pos2 = [2, 2, 1] # in meters
sampling_rate = 16000
mic_positions = [mic_pos1, mic_pos2]
rir = roomsimove_single.do_everything(room_dim, mic_positions, [8,4,1.6], rt60)
data_rev_ch1 = olafilt.olafilt(rir[:,0], s1)
data_rev_ch2 = olafilt.olafilt(rir[:,1], s1)
return data_rev_ch1, data_rev_ch2
def do_stft_oldskool(s1, s2, m1, m2):
nfft=2048
win = 1024
hop = int(nfft/8)
Y1 = stft(m1[:len(s1)], n_fft=nfft, hop_length=hop, win_length=win)
Y2 = stft(m2[:len(s1)], n_fft=nfft, hop_length=hop, win_length=win)
X1 = stft(s1, n_fft=nfft, hop_length=hop, win_length=win)
X2 = stft(s2, n_fft=nfft, hop_length=hop, win_length=win)
return Y1, Y2, X1, X2
def correlation(X1, X2, Y1, Y2):
nfft=2048
win = 1024
hop = int(nfft/8)
Gxx = X1 * np.conj(X1)
Gxyx = X1 * Y1 * np.conj(X1)
Gyxy = Y1 * X1 * np.conj(Y1)
Gxy = X1 * np.conj(Y1)
Gyx = Y1 * np.conj(X1)
Gyy = Y1 * np.conj(Y1)
recon_y1_H1 = istft(np.multiply(np.divide(Gxy, Gxx),Y1), hop_length=hop, win_length=win) * 1000
recon_y1_H2 = istft(np.multiply(np.divide(Gyy, Gyx),Y1), hop_length=hop, win_length=win) * 1000
return recon_y1_H1, recon_y1_H2
def correlation_Hs(X1, X2, Y1, Y2, s_value=1):
nfft=2048
win = 1024
hop = int(nfft/8)
F,T = X1.shape
Gxx = X1 * np.conj(X1)
Gxy = X1 * np.conj(Y1)
Gyx = Y1 * np.conj(X1)
Gyy = Y1 * np.conj(Y1)
temp = np.asarray([[Gxx, Gxy],[Gyx, Gyy]]).reshape(2*F,2*T)
U, s, V = svd(temp)
tmpsum = 0
summed = []
for i in range(len(s)):
tmpsum += s[i]/sum(s)
summed.append(tmpsum)
summed = np.asarray(summed)
val_percent = np.where(summed>s_value)[0][0]
smallU = U[:,:val_percent].reshape(-1, 2*F).T
smallV = V[:val_percent,:].reshape(-1, 2*T)
# smallU = U[0:s_value,:].reshape(-1, 2*F).T
# smallV = V[0:s_value,:].reshape(-1, 2*T)
Hs1 = np.matmul(smallU[:F,:],pinv(smallV[:,T:]).T)
Hs2 = np.matmul(smallU[F:,:],pinv(smallV[:,T:]).T)
Hs3 = np.matmul(smallU[:F,:],pinv(smallV[:,:T]).T)
Hs4 = np.matmul(smallU[F:,:],pinv(smallV[:,:T]).T)
recon_y1_H1 = istft(np.multiply(pinv(Hs1).T,Y1), hop_length=hop, win_length=win) * 1000
recon_y1_H2 = istft(np.multiply(pinv(Hs2).T,Y1), hop_length=hop, win_length=win) * 1000
recon_y1_H3 = istft(np.multiply(pinv(Hs3).T,Y1), hop_length=hop, win_length=win) * 1000
recon_y1_H4 = istft(np.multiply(pinv(Hs4).T,Y1), hop_length=hop, win_length=win) * 1000
return recon_y1_H1, recon_y1_H2, recon_y1_H3, recon_y1_H4
def difference(s1, y1):
if len(s1) > len(y1):
bss = mir_eval.separation.bss_eval_sources(np.vstack((s1[:len(y1)],s1[:len(y1)])), np.vstack((y1,y1)))
pesq = pypesq(16000, s1[:len(y1)], y1, 'wb')
s1 = s1[:len(y1)]
else:
bss = mir_eval.separation.bss_eval_sources(np.vstack((s1,s1)), np.vstack((y1[:len(s1)],y1[:len(s1)])))
pesq = pypesq(16000, s1, y1[:len(s1)], 'wb')
y1 = y1[:len(s1)]
nsrc = 1
nsampl = len(s1)
flen = 512
reference_source = np.hstack((s1, np.zeros((flen - 1))))
estimated_source = np.hstack((y1.reshape((-1,)), np.zeros(flen - 1)))
n_fft = int(2**np.ceil(np.log2(nsampl + flen - 1.)))
sf = scipy.fftpack.fft(reference_source, n=n_fft)
sef = scipy.fftpack.fft(estimated_source, n=n_fft)
G = np.zeros((nsrc * flen, nsrc * flen))
ssf = sf * np.conj(sf)
ssf = np.real(scipy.fftpack.ifft(ssf))
ss = toeplitz(np.hstack((ssf[0], ssf[-1:-flen:-1])), r=ssf[:flen])
G = ss
D = np.zeros(nsrc * flen)
ssef = sf * np.conj(sef)
ssef = np.real(scipy.fftpack.ifft(ssef))
D = np.hstack((ssef[0], ssef[-1:-flen:-1]))
try:
C = np.linalg.solve(G, D).reshape(flen, order='F')
except np.linalg.linalg.LinAlgError:
C = np.linalg.lstsq(G, D)[0].reshape(flen, order='F')
# Filtering
sproj = np.zeros(nsampl + flen - 1)
sproj += fftconvolve(C, reference_source)[:nsampl + flen - 1]
e_spat = sproj - reference_source
# interference
e_interf = sproj - reference_source - e_spat
# artifacts
e_artif = -reference_source - e_spat - e_interf
e_artif[:nsampl] += estimated_source[:nsampl]
s_filt = reference_source + e_spat
sdr = 10 * np.log10(np.sum(reference_source**2)/ np.sum((e_interf + e_spat + e_artif)**2))
# sir = np.sum(s_filt**2)/ np.sum(e_interf**2)
snr = 10 * np.log10(np.sum((reference_source + e_interf)**2) / np.sum((e_spat)**2))
sar = 10 * np.log10(np.sum((s_filt + e_interf)**2)/ np.sum(e_artif**2))
print("SAR: "+str(bss[2][0]) + ", SAR: " + str(sar) + ", SNR: " + str(snr) + ", SDR: " + str(sdr) + ", SDR: " + str(bss[0][0]) + ", interf: " + str(np.sum(e_interf**2)) + ", artif: " +str(np.sum((e_artif)**2)) + ", spat: " + str(np.sum((e_spat)**2)) )
return bss[2][0], bss[0][0], bss[1][0], np.sum(e_interf**2), np.sum((e_artif)**2), pesq
def difference_H(s, H1, H2):
SAR_h1, SDR_h1, SIR_h1, artif_h1, interf_h1, pesq_h1 = difference(s, H1)
SAR_h2, SDR_h2, SIR_h2, artif_h2, interf_h2, pesq_h2 = difference(s, H2)
return SAR_h1, SDR_h1, SIR_h1, SAR_h2, SDR_h2, SIR_h2, artif_h1, artif_h2, interf_h1, interf_h2, pesq_h1, pesq_h2
def difference_Hs(s, H1, H2, H3, H4):
SAR_h1, SDR_h1, SIR_h1, artif_h1, interf_h1, pesq_h1 = difference(s, H1)
# SAR_h2, SDR_h2, SIR_h2, artif_h2, interf_h2, pesq_h2 = difference(s, H2)
# SAR_h3, SDR_h3, SIR_h3, artif_h3, interf_h3, pesq_h3 = difference(s, H3)
# SAR_h4, SDR_h4, SIR_h4, artif_h4, interf_h4, pesq_h4 = difference(s, H4)
return SAR_h1, SDR_h1, SIR_h1, artif_h1, interf_h1,pesq_h1
def mic_change(M1, M2, switch_mics=False):
if switch_mics:
return M2, M1
else:
return M1, M2
def experiment(s1,s2, results, area, mic, switch_mics=False,
go_oldskool=False,rt60=0.4, hs=False, s_value=1):
if go_oldskool:
m1, m2 = do_reverb_oldskool(s1,s2, rt60)
M1, M2, S1, S2 = do_stft_oldskool(s1,s2,m1, m2)
else:
room = do_reverb(s1,s2)
M1, M2, S1, S2 = do_stft(s1,s2,room)
if hs:
M1, M2 = mic_change(M1,M2,switch_mics)
H1, H2, H3, H4 = correlation_Hs(S1, S2, M1, M2, s_value)
SAR_h1, SDR_h1, SIR_h1, artif_h1, interf_h1, pesq_h1 = difference_Hs(s1, H1, H2, H3, H4)
results[area][mic+"_h1"]["SAR"].append(SAR_h1)
results[area][mic+"_h1"]["SDR"].append(SDR_h1)
results[area][mic+"_h1"]["SIR"].append(SIR_h1)
results[area][mic+"_h1"]["artif"].append(artif_h1)
results[area][mic+"_h1"]["interf"].append(interf_h1)
results[area][mic+"_h1"]["PESQ"].append(pesq_h1)
else:
M1, M2 = mic_change(M1,M2,switch_mics)
H1, H2= correlation(S1, S2, M1, M2)
SAR_h1, SDR_h1, SIR_h1, SAR_h2, SDR_h2, SIR_h2, artif_h1, artif_h2, interf_h1, interf_h2, pesq_h1, pesq_h2 = difference_H(s1, H1, H2)
results[area][mic+"_h1"]["SAR"].append(SAR_h1)
results[area][mic+"_h1"]["SDR"].append(SDR_h1)
results[area][mic+"_h1"]["SIR"].append(SIR_h1)
results[area][mic+"_h1"]["artif"].append(artif_h1)
results[area][mic+"_h1"]["interf"].append(interf_h1)
results[area][mic+"_h1"]["PESQ"].append(pesq_h1)
results[area][mic+"_h2"]["SAR"].append(SAR_h2)
results[area][mic+"_h2"]["SDR"].append(SDR_h2)
results[area][mic+"_h2"]["SIR"].append(SIR_h2)
results[area][mic+"_h2"]["artif"].append(artif_h2)
results[area][mic+"_h2"]["interf"].append(interf_h2)
results[area][mic+"_h2"]["PESQ"].append(pesq_h2)
def create_results():
results = {}
results = create_subresults(results, "room", "mic1_h1")
results = create_subresults(results, "room", "mic1_h2")
results = create_subresults(results, "room", "mic1_h3")
results = create_subresults(results, "room", "mic1_h4")
results = create_subresults(results, "room", "mic2_h1")
results = create_subresults(results, "room", "mic2_h2")
results = create_subresults(results, "room", "mic2_h3")
results = create_subresults(results, "room", "mic2_h4")
results = create_subresults(results, "0.4", "mic1_h1")
results = create_subresults(results, "0.4", "mic1_h2")
results = create_subresults(results, "0.4", "mic1_h3")
results = create_subresults(results, "0.4", "mic1_h4")
results = create_subresults(results, "0.4", "mic2_h1")
results = create_subresults(results, "0.4", "mic2_h2")
results = create_subresults(results, "0.4", "mic2_h3")
results = create_subresults(results, "0.4", "mic2_h4")
results = create_subresults(results, "1.0", "mic1_h1")
results = create_subresults(results, "1.0", "mic1_h2")
results = create_subresults(results, "1.0", "mic1_h3")
results = create_subresults(results, "1.0", "mic1_h4")
results = create_subresults(results, "1.0", "mic2_h1")
results = create_subresults(results, "1.0", "mic2_h2")
results = create_subresults(results, "1.0", "mic2_h3")
results = create_subresults(results, "1.0", "mic2_h4")
results = create_subresults(results, "1.5", "mic1_h1")
results = create_subresults(results, "1.5", "mic1_h2")
results = create_subresults(results, "1.5", "mic1_h3")
results = create_subresults(results, "1.5", "mic1_h4")
results = create_subresults(results, "1.5", "mic2_h1")
results = create_subresults(results, "1.5", "mic2_h2")
results = create_subresults(results, "1.5", "mic2_h3")
results = create_subresults(results, "1.5", "mic2_h4")
return results
def create_subresults(results, area, mic):
if not area in results.keys():
results[area] = {}
results[area][mic] = {}
results[area][mic]["SAR"] = []
results[area][mic]["SDR"] = []
results[area][mic]["SIR"] = []
results[area][mic]["artif"] = []
results[area][mic]["interf"] = []
results[area][mic]["PESQ"] = []
return results
def print_results(results, no_files):
for key in results.keys():
print("|--------------"+key+"---------------|")
for subkey in results[key].keys():
print("|--------------"+subkey+"---------------|")
print(np.sum(np.array(results[key][subkey]["SAR"]))/no_files)
print(np.sum(np.array(results[key][subkey]["SDR"]))/no_files)
print(np.sum(np.array(results[key][subkey]["SIR"]))/no_files)
print(np.sum(np.array(results[key][subkey]["artif"]))/no_files)
print(np.sum(np.array(results[key][subkey]["interf"]))/no_files)
print(np.sum(np.array(results[key][subkey]["PESQ"]))/no_files)
def main():
results = create_results()
print("95")
with open("files_v2.csv") as f:
lines = f.readlines()
s_value = 0.95
no_files = 10
for file_nr in range(0,no_files):
files = []
s1 = lines[file_nr]
s2 = lines[file_nr+1]
s1 = s1[:-1]
s2 = s2[:-1]
files.append(s1)
files.append(s2)
s1,s2 = load_file(files)
experiment(s1,s2, results, "room", "mic1", hs=True, s_value=s_value)
print_results(results,no_files)
if __name__ == '__main__':
main()
| 12,155 | 0 | 368 |