blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
e9672fd59354efc95991b6783512354ecd4bc87b
|
Python
|
rhsieh91/sife-net
|
/charades_experiments/old_files/create_single_action_dataset.py
|
UTF-8
| 1,626
| 2.828125
| 3
|
[
"Apache-2.0"
] |
permissive
|
# Splits Charades RGBs into single-action samples and copies into a target directory
# Contributor: Samuel Kwong
import cv2
import argparse
import os
import pandas as pd
import numpy as np
import math
import shutil
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--csv_path', type=str) # <relative-path>/Charades_v1_train.csv
parser.add_argument('--input_root', type=str) # directory containing 24FPS RGBs
parser.add_argument('--target_root', type=str) # directory to save single-action samples
args = parser.parse_args()
df = pd.read_csv(args.csv_path)
NUM_FPS = 24
for i, row in df.iterrows():
# for a single sample
if type(row['actions']) is not str:
continue
groups = row['actions'].split(';')
frame_names = sorted(os.listdir(os.path.join(args.input_root, row['id'])))
for j, group in enumerate(groups):
# this will turn into its own sample
action, start, end = group.split()
length = float(end) - float(start)
num_frames = math.floor(NUM_FPS * length)
# copy from start to end
for k in range(math.floor(float(start) * NUM_FPS), math.floor(float(end) * NUM_FPS)):
if k >= len(frame_names):
break
src = os.path.join(args.input_root, row['id'], frame_names[k])
dst = os.path.join(args.target_root, row['id'] + '-' + str(j).zfill(2))
if not os.path.exists(dst):
os.makedirs(dst)
shutil.copy2(src, dst)
| true
|
ecf91e7b6a2943c6b0004ab9133a467b3fdbc6c0
|
Python
|
yufanana/ROStutorial
|
/ros_navigation/src/bug_zero.py
|
UTF-8
| 9,288
| 3
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# roslaunch turtlebot3_gazebo turtlebot3_stage_4.launch
# rosservice call /gazebo/reset_world
'''
This Bug0 alrogithm turns left upon encountering an obstacle.
'''
import rospy
import math
import sys
from sensor_msgs.msg import LaserScan
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Point
from geometry_msgs.msg import Twist
from tf import transformations
yaw_ = 0
yaw_error_allowed_ = 5 * (math.pi / 180) # 5 degrees
# Precision of facing/reaching goal
yaw_precision_ = math.pi / 90 # +/- 2 degree allowed
dist_precision_ = 0.1
position_ = Point()
regions_ = None
bug_state_ = 0 # 0 as go to goal, 1 as wall following
state_ = 4 # initialise state as go to goal
state_dict_ = {
0: 'find the wall',
1: 'turn left',
2: 'follow the wall',
3: 'fix yaw',
4: 'go to goal',
5: 'reached goal'
}
def odom_callback(msg):
'''
Updates the robot's current position that is stored in the global variable
'''
global position_, yaw_
# position
position_ = msg.pose.pose.position
# yaw
quaternion = (
msg.pose.pose.orientation.x,
msg.pose.pose.orientation.y,
msg.pose.pose.orientation.z,
msg.pose.pose.orientation.w)
euler = transformations.euler_from_quaternion(quaternion)
yaw_ = euler[2]
return
def laser_callback(msg):
'''
Reads the scan, determines where are the obstacles
'''
global regions_
# define directions
regions_ = {
'front': min(min(msg.ranges[0:22]),min(msg.ranges[338:360]), 10),
'fleft': min(min(msg.ranges[23:67]), 10),
'left': min(min(msg.ranges[68:102]), 10),
'right': min(min(msg.ranges[247:291]), 10),
'fright': min(min(msg.ranges[292:337]), 10),
}
# print("front: ",msg.ranges[0])
# print("left: ",msg.ranges[90])
# print("back: ",msg.ranges[180])
# print("right: ",msg.ranges[270])
# print("------")
d = 0.5 # threshold to consider obstacle as far
# determine direction of obstacle(s)
if bug_state_ == 1: # wall following
if regions_['front'] > d and regions_['fleft'] > d and regions_['fright'] > d:
state_description = 'case 1 - nowhere'
change_state(0) # find wall
elif regions_['front'] < d and regions_['fleft'] > d and regions_['fright'] > d:
state_description = 'case 2 - front only'
change_state(1) # turn left
elif regions_['front'] > d and regions_['fleft'] > d and regions_['fright'] < d:
state_description = 'case 3 - fright only'
change_state(2) # follow wall
elif regions_['front'] > d and regions_['fleft'] < d and regions_['fright'] > d:
state_description = 'case 4 - fleft only'
change_state(0) # find wall
elif regions_['front'] < d and regions_['fleft'] > d and regions_['fright'] < d:
state_description = 'case 5 - front and fright'
change_state(1) # turn left
elif regions_['front'] < d and regions_['fleft'] < d and regions_['fright'] > d:
state_description = 'case 6 - front and fleft'
change_state(1) # turn left
elif regions_['front'] < d and regions_['fleft'] < d and regions_['fright'] < d:
state_description = 'case 7 - front and fleft and fright'
change_state(1) # turn left
elif regions_['front'] > d and regions_['fleft'] < d and regions_['fright'] < d:
state_description = 'case 8 - fleft and fright'
change_state(0) # find wall
else:
state_description = 'unknown case'
rospy.loginfo(regions_)
def change_state(state):
global state_, state_dict_
if state is not state_:
print('Bug Zero: [%s] - %s' % (state, state_dict_[state]))
state_ = state # change global state to current state
def find_wall():
msg = Twist()
msg.linear.x = 0.05
msg.angular.z = -0.3
return msg
def turn_left():
'''
Turn left to start going along the wall
'''
cmd = Twist()
cmd.angular.z = 0.3
cmd.linear.x = 0
return cmd
def follow_wall():
'''
Go straight along the wall
'''
cmd = Twist()
cmd.angular.z = 0
cmd.linear.x = 0.1
return cmd
def normalize_angle(angle):
if(math.fabs(angle) > math.pi):
angle = angle - (2 * math.pi * angle) / (math.fabs(angle))
return angle
def fix_yaw(goal):
'''
Rotate the robot to face the goal
'''
global yaw_, yaw_precision_, state_
desired_yaw = math.atan2(goal.y - position_.y, goal.x - position_.x)
err_yaw = normalize_angle(desired_yaw - yaw_)
# rospy.loginfo(err_yaw)
cmd = Twist()
if math.fabs(err_yaw) > yaw_precision_:
cmd.angular.z = 0.3 if err_yaw > 0 else -0.1
# state change conditions
if math.fabs(err_yaw) <= yaw_precision_:
print('Yaw error: [%s]' % err_yaw)
cmd.angular.z = 0
cmd.linear.x = 0
change_state(4) # go to goal
return cmd
def go_to_goal(goal):
'''
Go towards the goal location
'''
global yaw_, yaw_precision_, state_
desired_yaw = math.atan2(goal.y - position_.y, goal.x - position_.x)
err_yaw = desired_yaw - yaw_
err_pos = math.sqrt(pow(goal.y - position_.y, 2) + pow(goal.x - position_.x, 2))
if err_pos > dist_precision_:
cmd = Twist()
cmd.linear.x = 0.1
cmd.angular.z = 0.1 if err_yaw > 0 else -0.1
else:
print('Position error: [%s]' % err_pos)
cmd = Twist()
cmd.linear.x = 0
cmd.angular.z = 0
change_state(5) # done
# state change conditions
if math.fabs(err_yaw) > yaw_precision_:
print('Yaw error: [%s]' % err_yaw)
change_state(3) # fix yaw
return cmd
def done():
cmd = Twist()
cmd.linear.x = 0
cmd.angular.z = 0
return cmd
def main():
global position_, regions_, bug_state_
if len(sys.argv) < 3:
print ("Usage: bug_zero.py x_goal y_goal")
else:
goal = Point()
goal.x = float(sys.argv[1])
goal.y = float(sys.argv[2])
# goal.x = 2
# goal.y = -2
desired_yaw = math.atan2(goal.y - position_.y, goal.x - position_.x)
rospy.init_node('bug0') #init node
sub_odom = rospy.Subscriber('/odom', Odometry, odom_callback)
sub_laser = rospy.Subscriber('/scan', LaserScan, laser_callback)
pub_velocity = rospy.Publisher('/cmd_vel',Twist, queue_size=1)
rate = rospy.Rate(10.0)
while not rospy.is_shutdown():
if regions_ == None:
continue
if bug_state_ == 0: # go to goal
if regions_['front'] < 0.4:
print("Front: ", regions_['front'])
change_state(1) # turn left
bug_state_ = 1
elif bug_state_ == 1: # wall following
desired_yaw = math.atan2(goal.y - position_.y, goal.x - position_.x)
err_yaw = normalize_angle(desired_yaw - yaw_)
d = 0.5
# if goal is in front, check if there obstacles in front
if math.fabs(err_yaw) < (math.pi / 8) and \
regions_['front'] > d and regions_['fright'] > d and regions_['fleft'] > d:
print("Cleared case 1")
# less than 30
# no more obstacles in front
change_state(4) # go to goal
bug_state_ = 0
# if goal is fleft, check if there are obstacles in fleft
if err_yaw > 0 and \
math.fabs(err_yaw) > (math.pi / 8) and \
math.fabs(err_yaw) < (math.pi / 2 + math.pi / 8) and \
regions_['left'] > d and regions_['fleft'] > d:
print("Cleared case 2")
# between 30 and 90 - to the left
# cleared the obstacle that was on the left
change_state(4) # go to goal
bug_state_ = 0
# if goal is fright, check if there are obstacles in fright
if err_yaw < 0 and \
math.fabs(err_yaw) > (math.pi / 8) and \
math.fabs(err_yaw) < (math.pi / 2 + math.pi / 8) and \
regions_['right'] > d and regions_['fright'] > d:
print("Cleared case 3")
# between 247 & 337 to the right
# cleared the obstacle that was on the right
change_state(4) # go to goal
bug_state_ = 0
# publish cmd messages based on the current state
if state_ == 0:
cmd = find_wall()
elif state_ == 1:
cmd = turn_left()
elif state_ == 2:
cmd = follow_wall()
elif state_ == 3:
cmd = fix_yaw(goal)
elif state_ == 4:
cmd = go_to_goal(goal)
elif state_ == 5:
cmd = done()
else:
rospy.logerr('Unknown state!')
pub_velocity.publish(cmd)
rate.sleep()
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
print("\n Quitting...")
pass
| true
|
9bb20c57a8cae5f5d31e542e2cbaee1c6048001d
|
Python
|
jshreyas/python-serverless-app
|
/testrail.py
|
UTF-8
| 19,062
| 2.515625
| 3
|
[] |
no_license
|
import json
import base64
import time
import math
import datetime
import urllib.request
import urllib.error
import pytz
def get_timestamp():
""" Returns timestamp in desired format """
return datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
class Testrail(object):
def __init__(self, user, password, base_url="https://velocloud.testrail.com"):
""" Testrail initializer
Args:
user (str): Username to authorize with
password (str): Password
base_url (str): Testrail URL
"""
self.user = user
self.password = password
self.__run_id = None
self.result_map = {
"passed": 1,
"failed": 5
}
self.cases_in_run = {}
if not base_url.endswith('/'):
base_url += '/'
self.__url = base_url + 'index.php?/api/v2/'
self.__project = None
self.__project_id = None
self.__suite = None
self.__suite_id = None
self.project = "Velocloud"
self.suite = "Master"
@property
def project(self):
return self.__project
@project.setter
def project(self, project_name):
self.__project = project_name
if project_name is not None:
self.__project_id = self.__get_project_by_name(project_name)["id"]
else:
self.__project_id = None
@property
def project_id(self):
if self.__project_id:
return self.__project_id
raise EnvironmentError(
"Define the project name as an object attribute like so\n"
"client.project = 'Velocloud'")
@property
def suite(self):
return self.__suite
@suite.setter
def suite(self, suite_name):
self.__suite = suite_name
if suite_name is not None:
self.__suite_id = self.__get_suite_by_name(suite_name)["id"]
else:
self.__suite_id = None
@property
def suite_id(self):
if self.__suite_id:
return self.__suite_id
raise EnvironmentError(
"Define the suite name as an object attribute like so\n"
"client.suite = 'Master'")
@property
def run_id(self):
return self.__run_id
@run_id.setter
def run_id(self, run_id):
self.__run_id = run_id
self.cases_in_run = {
run_id: self.get_cases_in_run(run_id=run_id)
}
def get_cases_in_run(self, run_id):
""" Returns the test cases associated to a test run
Args:
run_id (str): id of the run
Returns:
list: response of the API call of the test cases associated to a run
"""
return self.send_get("get_tests/{run_id}".format(run_id=run_id))
def get_all_cases(self):
""" Returns all test cases from testrail
Returns:
list: list of all test cases
"""
return self.send_get(f"get_cases/{self.project_id}")
def get_milestone_id(self, name):
""" Returns milestone id based on given name
Returns:
integer: Milestone ID
"""
milestone_list = self.send_get(f"get_milestones/{self.project_id}")
if milestone_list:
for elem in milestone_list:
if name in elem['name']:
return elem['id']
def get_cases_based_on_milestone(self, milestone_id):
""" Returns testcase ID list based on milestone
Returns:
list: Testcase list
"""
total_cases = self.send_get(f"get_cases/{self.project_id}&milestone_id={milestone_id}")
case_list = []
for elem in total_cases:
case_list.append(elem['id'])
return case_list
def get_sections(self):
""" Gets all sections
Returns:
list: list of all sections
"""
return self.send_get(f"get_sections/{self.project_id}")
def send_get(self, uri):
""" Send Get
Issues a GET request (read) against the API and returns the result
(as Python dict).
Args:
uri (str): The API method to call including parameters
(e.g. get_case/1)
"""
return self.__send_request('GET', uri, None)
def send_post(self, uri, data):
""" Send POST
Issues a POST request (write) against the API and returns the result
(as Python dict).
Args:
uri (str): The API method to call including parameters
(e.g. add_case/1)
data (dict): The data to submit as part of the request (as
Python dict, strings must be UTF-8 encoded)
"""
return self.__send_request('POST', uri, data)
def __send_request(self, method, uri, data):
url = self.__url + uri
request = urllib.request.Request(url)
if method == 'POST':
request.data = bytes(json.dumps(data), 'utf-8')
auth = str(
base64.b64encode(
bytes('%s:%s' % (self.user, self.password), 'utf-8')
),
'ascii'
).strip()
request.add_header('Authorization', 'Basic %s' % auth)
request.add_header('Content-Type', 'application/json')
e = None
try:
response = urllib.request.urlopen(request).read()
except urllib.error.HTTPError as ex:
response = ex.read()
e = ex
if response:
result = json.loads(response.decode())
else:
result = {}
if e is not None:
if result and 'error' in result:
error = '"' + result['error'] + '"'
else:
error = 'No additional error message received'
raise APIError('TestRail API returned HTTP %s (%s)' %
(e.code, error))
return result
def __get_projects(self):
""" Returns a list of projects """
url = f"get_projects"
return self.send_get(url)
def __get_project_by_name(self, project_name):
""" Searches for an exact match of the project name
Args:
project_name (string): name of the project
Returns:
dict: project
None: if there is no match
"""
projects = self.__get_projects()
for project in projects:
if project_name == project["name"]:
return project
return None
def __get_suites(self, project_id=None):
""" Returns a list of suites for a project
Args:
project_id (str): project id
Returns:
list: list of suites in a project
"""
if not project_id:
project_id = self.project_id
url = f"get_suites/{project_id}"
return self.send_get(url)
def __get_suite_by_name(self, suite_name, project_id=None):
""" Searches for an exact match of the suite name for a given project
Args:
suite_name (string): name of the suite
project_id (string): project id
Returns:
dict: suite
None: if there is no match
"""
if not project_id:
project_id = self.project_id
suites = self.__get_suites(project_id)
for suite in suites:
if suite_name == suite["name"]:
return suite
return None
def get_users(self):
""" Returns users information
Returns:
list: list of users
"""
url = "get_users"
return self.send_get(url)
def get_case(self, case_id):
""" Returns test case information
Args:
case_id (string): test case id
Returns:
dict: case
"""
url = f"get_case/{case_id}"
return self.send_get(url)
def update_case(self, case_id, data):
""" Updates test case information
Args:
case_id (string): test case id
data (dict)L dictionary of the testcase attributes
"""
return self.send_post(
uri=f'update_case/{case_id}',
data=data)
def _get_plan_by_name(self, plan_name, project_id=None):
""" Searches for an exact match of the test plan name for a given project
Args:
plan_name (string): name of the test plan
project_id (string): project id
Returns:
dict: plan
None: if there is no match
"""
if not project_id:
project_id = self.project_id
plans = self._get_plans(project_id)
for plan in plans:
if plan_name == plan["name"]:
return plan
return None
def _get_plans(self, project_id=None):
""" Returns a list of test plans for a project
Args:
project_id (string): project id
Returns:
list: list of test plans
"""
if not project_id:
project_id = self.project_id
url = f"get_plans/{project_id}"
return self.send_get(url)
def _get_plan(self, plan_id):
""" Returns an existing test plan along with all its runs
Args:
plan_id (int): plan id
Returns:
dict: test plan entry
"""
url = f"get_plan/{plan_id}"
return self.send_get(url)
def _add_plan_entry(self, plan_id, data):
""" Add test run/entry to a test plan
Args:
plan_id (string): test plan id
data (dict): post body data
Returns:
dict: test plan entry
"""
url = f"add_plan_entry/{plan_id}"
return self.send_post(url, data)
def format_testrun(self, test_run):
""" Formats test run with time stamp """
# return test_run
date_format = '%Y-%m-%d %H:%M'
date = datetime.datetime.now(tz=pytz.utc)
date = date.astimezone(pytz.timezone('US/Pacific'))
date_format = date.strftime(date_format)
return f"{date_format} - {test_run}"
def add_default_test_run_entry_to_plan(self, test_run_name, plan_name, case_ids=None):
""" Creates a test run entry with some standard defaults to a test plan
Defaults:
test_run_entry = {
"suite_id": suite_id,
"name": "{test_run_name} - {timestamp}".format(
test_run_name=test_run_name,
timestamp=get_timestamp()),
"assignedto_id": None,
"include_all": True,
"config_ids": [],
"runs": [
{
"include_all": True,
"case_ids": [],
"config_ids": []
}
]
}
Args:
test_run_name (string): name of the test plan entry to be created
plan_name (string): name of the test plan
case_ids (list): Specifug testCase Id's with which new test run will be created.
Returns:
dict: test plan entry
"""
if case_ids:
include_all = False
else:
include_all = True
# This condition mandates testcase list not to be empty if include_all is False
if not include_all and not case_ids:
return False
plan = self._get_plan_by_name(plan_name=plan_name)
if plan is None:
raise APIError("testrail plan '%s' not found" % plan_name)
plan_id = plan["id"]
# Format the testrun name with Time stamp.
test_run_name = self.format_testrun(test_run_name)
suite_id = self.suite_id
test_run_entry = {
"suite_id": suite_id,
"name": "{test_run_name}".format(
test_run_name=test_run_name,
timestamp=get_timestamp()),
"assignedto_id": None,
"include_all": include_all,
"config_ids": [],
"case_ids": case_ids,
"runs": [
{
"include_all": False,
"case_ids": case_ids,
"config_ids": []
}
]
}
return self._add_plan_entry(plan_id, test_run_entry)
def update_test_run(self, test_run_name, plan_name, case_ids=None):
""" Updates the existing test run with new set of testcases
Args:
test_run (str): test run name
plan_name (str): test plan name
"""
# Look for tescase entru already present in the test-run if so skip those
test_run_id = self.get_run_id(plan_name, test_run_name)
plan = self._get_plan_by_name(plan_name)
plan_detail = self._get_plan(plan['id'])
for elem in plan_detail['entries']:
if test_run_name in elem['name']:
entry_id = elem['runs'][0]['entry_id']
break
else:
return False
case_ids = self.check_test_id_duplicate(case_ids, test_run_id)
if not case_ids:
return False
existing_tests = self.get_cases_in_run(test_run_id)
for elem in existing_tests:
case_ids.append(elem['case_id'])
test_run_entry = {
"include_all": False,
"case_ids": case_ids
}
url = f"update_plan_entry/{plan['id']}/{entry_id}"
return self.send_post(url, test_run_entry)
def check_test_id_duplicate(self, test_list, run_id):
""" Look for testcase present in the testrun and remove duplicate testcases
Args:
test_list (str): List of testcases to be validated
run_id (object): test run id
Returns:
(list): Valid testcases (unique one)
"""
url = f"get_tests/{run_id}"
test_case_list = self.send_get(url)
new_list = []
for elem in test_case_list:
new_list.append(elem['case_id'])
new_list = set(test_list) - set(new_list)
return list(new_list)
def get_run_id(self, plan_name, run_name):
""" Returns an existing test run id
Args:
plan_name (str): test plan name
run_name (str): test run name
Returns:
int: test run id
"""
plan = self._get_plan_by_name(plan_name)
complete_plan = self._get_plan(plan["id"])
for each in complete_plan["entries"]:
# Added this to look for sub-string, Reason: we are creating test-run prepending with
# time stamp and also wanted to remove duplicate testrun entries
if run_name in each["runs"][0]["name"]:
return each["runs"][0]["id"]
def add_results(self, run_id, data):
""" Add results for a test run/entry
Args:
run_id (string): test run or entry id
data (dict): post body data
Returns:
dict: result
"""
url = f"add_results/{run_id}"
return self.send_post(url, data)
def __check_case_in_run(self, run_id, case_id):
""" Checks if the case is associated to the run
Args:
run_id (str): id the the run
case_id (int): Case id (not the id that is created once associated to a run)
Returns:
id: test case if the case is present in the run
bool: False if absent
"""
if run_id not in self.cases_in_run:
tc_list = self.get_cases_in_run(run_id=run_id)
else:
tc_list = self.cases_in_run[run_id]
for test_case in tc_list:
if test_case.get('case_id', None) == case_id:
return test_case.get('id', None)
return False
def update_test_result(self, run_id, case_id, report):
""" Update the test result of a test case associated to a run
Args:
run_id (str): Id of the test run
case_id (id): Case id (not the id that is created once associated to a run)
report (obj): instance of pytest TestReport
Returns:
bool: False if unsuccessful in updating
"""
test_id = self.__check_case_in_run(run_id, case_id)
if not test_id:
return False
data = {"results": [{
"test_id": test_id,
"status_id": self.result_map.get(report.outcome, 5),
"elapsed": time.strftime('%Hh %Mm %Ss', time.gmtime(math.ceil(report.duration))),
"comment": report.longreprtext
}]
}
return self.send_post(
uri='add_results/{run_id}'.format(run_id=run_id),
data=data)
def _get_runs(self, project_id=None):
""" Returns a list of test runs for a project
Args:
project_id (int): project id
Returns:
list: list of test runs
"""
if not project_id:
project_id = self.project_id
url = f"get_runs/{project_id}"
return self.send_get(url)
def _get_run(self, run_id):
""" Returns an existing test run
Args:
run_id (int): plan id
Returns:
dict: test run entry
"""
url = f"get_run/{run_id}"
return self.send_get(url)
def _get_results_for_run(self, run_id):
""" Returns a list of test results for a test run.
Args:
run_id (int): The ID of the test run
Returns:
list: list of test results
"""
url = f"get_results_for_run/{run_id}"
return self.send_get(url)
def _get_testrun_by_name(self, project_name, plan_name, testrun_name):
""" Searches for an exact match of the testrun name
Args:
project_name (string): name of the project
testrun_name (str): testrun name
Returns:
dict: testrun
None: if there is no match
"""
project = self.__get_project_by_name(project_name)
plan = self._get_plan_by_name(plan_name, project['id'])
plan = self._get_plan(plan['id'])
for entry in plan['entries']:
if entry['name'] == testrun_name:
for testrun in entry['runs']:
if testrun['name'] == testrun_name:
return testrun
return None
def get_testrun_results(self, project_name, plan_name, testrun_name):
""" Gets testrun results
Args:
project_name (str): project name
plane_name (str): plane name
testrun_name (str): testrun name
Returns:
list: list of test results
"""
testrun = self._get_testrun_by_name(project_name, plan_name, testrun_name)
if testrun is not None:
return self._get_results_for_run(testrun["id"])
return None
class APIError(Exception):
pass
| true
|
849b341510f5c7bf398912befd6451284eae4903
|
Python
|
AungMyatSan/python
|
/practice/bst/bst.py
|
UTF-8
| 1,966
| 3.6875
| 4
|
[] |
no_license
|
from typing import List
class Node:
def __init__(self, val, left=None, right=None) -> None:
self.val = val
self.left = left
self.right = right
class Tree:
def __init__(self, root_val) -> None:
self.root = Node(root_val)
self.height = 0
def insert(self, cnode, val) -> Node:
if cnode.left is None and cnode.right is None:
self.height += 1
if val < cnode.val:
cnode.left = Node(val)
else:
cnode.right = Node(val)
return cnode
if val < cnode.val:
if cnode.left is None:
cnode.left = Node(val)
return cnode
else:
return self.insert(cnode.left, val)
else:
if cnode.right is None:
cnode.right = Node(val)
return cnode
else:
return self.insert(cnode.right, val)
def traverse(self, node: Node, kind='io') -> list:
if node is None:
return []
if kind is 'io':
return self.traverse(node.left, kind) + [node.val] + self.traverse(node.right, kind)
elif kind is 'pr':
return [node.val] + self.traverse(node.left, kind) + self.traverse(node.right, kind)
elif kind is 'po':
return self.traverse(node.left, kind) + self.traverse(node.right, kind) + [node.val]
def from_list(l: list) -> Tree:
bst = Tree(l[0])
for x in l[1:]:
bst.insert(bst.root, x)
return bst
def traversal_test(x: list):
bst = Tree(x[0])
for val in x[1:]:
bst.insert(bst.root, val)
print(bst.traverse(bst.root, kind='io'))
print(bst.traverse(bst.root, kind='pr'))
print(bst.traverse(bst.root, kind='po'))
if __name__ == '__main__':
x = [10, 33, 1, 5, 65, 3, 25, 167, 34, 6 , 15, 28]
traversal_test(x)
| true
|
8e3dfe8b23e3eccbc6636c5f677be2c2dbb0f615
|
Python
|
ZAKERR/-16-python-
|
/软件第四次作业/软件161/夔纭嘉2016021196/计算器的实现.py
|
UTF-8
| 738
| 4.125
| 4
|
[] |
no_license
|
#!/user/bin/python
#-*-coding:utf-8-*-
'''
编写人:夔纭嘉
编写时间:20181005
功能:计算器的实现
'''
#定义函数
def add(x,y):
return x+y
def jian(x,y):
return x-y
def cheng(x,y):
return x*y
def chu(x,y):
return x/y
print(u"选择运算:")
print(u"+ - * /")
choice =input("请输入你的选择:")
x = int(input(u'请输入第一个数字:'))
y = int(input(u'请输入第二个数字:'))
if choice == '+':
print(x,'+',y,'=',add(x,y))
elif choice == '-':
print(x,'-',y,'=',jian(x,y))
elif choice == '*':
print(x,'*',y,'=',cheng(x,y))
elif choice == '/':
print(x,'/',y,'=',chu(x,y))
else:
print(u'您的输入出错!')
| true
|
9db6c166aa9982c7cc0d4d82fba7752358eb94f8
|
Python
|
dmrowan/LommenResearchGroup
|
/PulseCharacteristics/old_scripts/ampfitcrab.py
|
UTF-8
| 5,077
| 2.953125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
from astropy.table import Table
from astropy import log
import datetime
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import pandas as pd
import math
from functions import *
desc="""
Makes a histogram of the integrated intensities of pulse profiles for each integration time
"""
def fit(timewidth):
intint = pd.read_csv('intdata/crabintdata_%s.txt' %timewidth, header = None)
intint = list(intint[0])
intint = [x for x in intint if x > 0.001]
intint = [x for x in intint if x < 3]
intint = np.array(intint)
print('The total number of profiles in the %s pulse histogram is '%timewidth, len(intint))
binwidths = list(np.linspace(0, 0.12, 100))
plt.hist(intint, binwidths)
width = 0.05
sd = np.std(intint) # calculates standard deviation directly
# Makes a line plot from the histogrm
intint = np.array(intint) # uses pulse phases in nth profile
xvals, yvals = hist_to_curve(intint, binwidths)
# Use convolution to find the estimate for the location of the peak
#x = xvals
#template = (np.exp(-((x/width)**2)/2))
#convo = []
#for i in range(len(yvals)):
# convo.append(np.sum(yvals*np.roll(template,i))) # finds convolution
#m = np.max(convo) # finds peak value of convolution
#maxloc = xvals[convo.index(m)] # finds the location of the peak of convolution
#popt, pcov = curve_fit(gauss, xvals, yvals, p0= [max(yvals),maxloc, width], bounds = ((0, 0, 0), (np.inf, np.inf, np.inf))) # uses gaussian function to do a curve fit to the line version fo the histogram; uses maxloc for the guess for location
#popt, pcov = curve_fit(lognormal, xvals, yvals, p0 = [100, 0, 2])
#print(popt)
#plt.plot(xvals, lognormal(xvals, *popt))
plt.hist(intint, bins=binwidths)
#plt.plot(xvals, gauss(xvals,*popt))
#errorbar = np.absolute(pcov[2][2])**0.5
plt.xlabel('Integrated intensity (counts/pulse)')
plt.ylabel('# of Profiles')
plt.title('Integrated intensity distribution for %s pulses/profile'%timewidth)
plt.savefig('crab_%s.png' % timewidth)
plt.clf()
return()
plottype = 'loglog'
width = []
width2 = []
errorbars = []
timewidth=[]
times = [15, 20, 30, 90, 150, 300, 900]
for twidth in times:
if (twidth == 0):
twidth = 10
fit(twidth)
#width.append(w)
#width2.append(w2)
#errorbars.append(e)
#timewidth.append(twidth)
#y =[]
"""
for i in timewidth:
y.append(1/(i**0.5))
if (plottype == 'plot'):
plt.plot(timewidth, width, 'o', color = 'b')
popt, pcov = curve_fit(power, timewidth, width)
calc = plt.plot(timewidth, power(timewidth, *popt), color = 'b', label = 'Standard deviation')
# plt.plot(timewidth, width2, 'o', color = 'g')
popt, pcov = curve_fit(power, timewidth, width2)
fit = plt.plot(timewidth, power(timewidth, *popt), color = 'g', label = 'Gaussian curve fit')
plt.errorbar(timewidth, width2, yerr = errorbars, fmt = 'o', color = 'g')
plt.title("Width of Integrated Intensity Distribution vs Integration Time")
plt.legend()
plt.xlabel("Integration Time (seconds)")
plt.ylabel("Standard Deviation (counts/second)")
if (plottype == 'loglog'):
plt.plot(np.log10(timewidth), np.log10(width), 'o', color = 'b')
#popt, pcov = curve_fit(power, timewidth, width)
fit, cov = np.polyfit(np.log10(timewidth), np.log10(width), 1, cov=True)
cslope = fit[0]
csloperror = np.absolute(cov[0][0])**0.5
#cslopeerror = np.absolute(pcov[1][1])**0.5
plt.plot(np.log10(timewidth), np.log10(timewidth)*fit[0]+fit[1], color = 'b', label = 'Standard deviation, %s$\pm$%s'%(float('%.2g' % cslope), float('%.1g' % csloperror)))
plt.plot(np.log10(timewidth), np.log10(width2), 'o', color = 'g')
#popt, pcov = curve_fit(power, timewidth, width2)
fit, cov = np.polyfit(np.log10(timewidth), np.log10(width2), 1, cov=True)
fslope = fit[0]
#fslopeerror = np.absolute(pcov[1][1])**0.5
fsloperror = np.absolute(cov[0][0])**0.5
plt.plot(np.log10(timewidth), np.log10(timewidth)*fit[0]+fit[1], color = 'g', label = 'Gaussian curve fit, %s$\pm$%s'%(float('%.2g' % fslope), float('%.1g' % fsloperror)))
shift = 1.3
plt.plot(np.log10(timewidth), np.log10(y)-shift, '--', label = 'Gaussian distribution (slope = -1/2)')
lowererror = []
uppererror = []
for x in range(len(errorbars)):
lowererror.append(abs(np.log10(width2[x]-errorbars[x])-np.log10(width2[x])))
uppererror.append(np.log10(width2[x]+errorbars[x])-np.log10(width2[x]))
loglogerrors = [lowererror, uppererror]
loglogerror = np.array(loglogerrors)
plt.errorbar(np.log10(timewidth),np.log10(width2), yerr = loglogerror , fmt = 'o', color = 'g')
plt.title("Width of Integrated Intensity Distribution vs Number of Pulses")
plt.legend()
plt.xlabel("log(Number of Pulses per Profile)")
plt.ylabel("log(Width (counts/second))")
print(cslope, csloperror)
print(fslope, fsloperror)
plt.show()
"""
| true
|
026564c0f515289c801342d2e8887c9eec7490b6
|
Python
|
andaru/netmunge
|
/netmunge/grammars/cisco_show_arp.py
|
UTF-8
| 2,621
| 2.515625
| 3
|
[] |
no_license
|
# "show arp" parser for Cisco IOS
#
def convert_mac(mac):
"""Converts cisco xxxx.xxxx.xxxx.format to xx:xx:xx:xx:xx:xx format."""
return '%s:%s:%s:%s:%s:%s' % (mac[0:2],
mac[2:4],
mac[5:7],
mac[7:9],
mac[10:12],
mac[12:14])
# Begin -- grammar generated by Yapps
import sys, re
from yapps import runtime
class CiscoShowArpScanner(runtime.Scanner):
patterns = [
("'(Protocol.*Interface)?'", re.compile('(Protocol.*Interface)?')),
("'ARPA'", re.compile('ARPA')),
("'Internet'", re.compile('Internet')),
('\\s+', re.compile('\\s+')),
('MAC', re.compile('\\S+')),
('INTF', re.compile('\\S+')),
('END', re.compile('$')),
('ANY', re.compile('.')),
('IPV4', re.compile('([0-9]{1,3}\\.){3}[0-9]{1,3}')),
('AGE', re.compile('([0-9]+|-)')),
]
def __init__(self, str,*args,**kw):
runtime.Scanner.__init__(self,None,{'\\s+':None,},str,*args,**kw)
class CiscoShowArp(runtime.Parser):
Context = runtime.Context
def entry(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'entry', [])
self._scan("'Internet'", context=_context)
IPV4 = self._scan('IPV4', context=_context)
AGE = self._scan('AGE', context=_context)
MAC = self._scan('MAC', context=_context)
self._scan("'ARPA'", context=_context)
INTF = self._scan('INTF', context=_context)
return (('arpv4', IPV4, convert_mac(MAC), INTF))
def parse(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'parse', [])
arps = set()
while self._peek("'(Protocol.*Interface)?'", 'ANY', context=_context) == 'ANY':
ANY = self._scan('ANY', context=_context)
self._scan("'(Protocol.*Interface)?'", context=_context)
while self._peek('END', "'Internet'", context=_context) == "'Internet'":
entry = self.entry(_context)
arps.add(entry)
END = self._scan('END', context=_context)
return arps
def parse(rule, text):
P = CiscoShowArp(CiscoShowArpScanner(text))
return runtime.wrap_error_reporter(P, rule)
if __name__ == '__main__':
from sys import argv, stdin
if len(argv) >= 2:
if len(argv) >= 3:
f = open(argv[2],'r')
else:
f = stdin
print parse(argv[1], f.read())
else: print >>sys.stderr, 'Args: <rule> [<filename>]'
# End -- grammar generated by Yapps
| true
|
ae899a6c7c4d0cd5ca879fd51810fddf685ac428
|
Python
|
zfdupont/stockticker
|
/main.py
|
UTF-8
| 1,189
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
from yahoo_fin import stock_info as si
from time import time
from datetime import datetime
from decimal import Decimal
import os;
class colors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# TEST TICKETS
# AAPL AMZN TSLA gme AMC SPY MSFT SBUX FB
tags = input("").split(" ")
t = time()
clear = lambda: os.system('cls')
prices = []
def print_watching():
clear()
temp = []
global prices
for i in range(len(tags)):
tick = tags[i]
x = si.get_live_price(tick)
print(f"{tick.upper()}\t\t{x}", end="")
if prices:
change = "="
if(x > prices[i]): change = "▲"
if(x < prices[i]): change = "▼"
print(f"({change}{round(abs(x - prices[i]), 2)})")
else: print()
temp.append(round(x, 2))
prices = temp[:]
print(datetime.now().strftime("%H:%M:%S"))
print_watching()
CONST_INTERVAL = 5
while True:
# print(time() - t)
if (time() - t) > CONST_INTERVAL:
print_watching()
t = time()
| true
|
df56e59cf7749b29e91f8524106307b0afdfd1bd
|
Python
|
mali0728/cs107_Max_Li
|
/homework/HW2/HW2_final/P2.py
|
UTF-8
| 942
| 4.28125
| 4
|
[] |
no_license
|
# Problem 2 DNA Complement for Homework 2 of CS107
# Author: Max Li
def dna_complement(original):
'''This function takes in a DNA sequence represented as a string and returns it complement as a string.'''
bases = ['A', 'T', 'G', 'C']
if len(original) == 0:
return None
for x in original:
if x.upper() not in bases:
return None
complement = ''
for x in original:
if x.upper() == 'A':
complement+='T'
elif x.upper() == 'T':
complement+='A'
elif x.upper() == 'C':
complement+='G'
elif x.upper() == 'G':
complement+='C'
return complement
example_1 = 'AatGGc'
example_2 = 'AATdog'
print('example 1: '+ example_1 + " has complement: ")
print(dna_complement(example_1))
print('example 2: '+ example_2 + " has complement: ")
print(dna_complement(example_2))
| true
|
d9dd28aac91196449ec7cba7582a01ee7396e8e0
|
Python
|
percyliang/sempre
|
/tables/wikipedia-scripts/weblib/clean_html.py
|
UTF-8
| 4,314
| 2.75
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re, sys, os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'external'))
from bs4 import BeautifulSoup, element
import tidylib
tidylib.BASE_OPTIONS = {
'output-html': 1,
'indent': 0,
'tidy-mark': 0,
'wrap': 0,
'doctype': 'strict',
'force-output': 1,
}
WHITELIST_NAMES = set(
('html', 'head', 'meta', 'title', #'noscript',
'body', 'section', 'nav', 'article', 'aside',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'footer', 'address', 'main',
'p', 'hr', 'pre', 'blockquote',
'ol', 'ul', 'li', 'dl', 'dt', 'dd',
'figure', 'figcaption', 'div',
'a', 'em', 'strong', 'small', 's', 'cite', 'q', 'dfn',
'abbr', 'data', 'time', 'code', 'var', 'samp', 'kbd',
'sub', 'sup', 'i', 'b', 'u', 'mark', 'ruby', 'rt', 'rp',
'wbr', 'ins', 'del', 'bdi', 'bdo', 'span', 'br',
'img', 'table', 'caption', 'colgroup', 'col',
'tbody', 'thead', 'tfoot', 'tr', 'td', 'th',
'form', 'fieldset', 'legend', 'label', 'input', 'button',
'select', 'datalist', 'optgroup', 'option', 'textarea',
'keygen', 'output', 'progress', 'meter',
'details', 'summary', 'menuitem', 'menu',
'acronym', 'basefont', 'big', 'blink', 'center',
'font', 'marquee', 'nobr', 'noframes', 'strike', 'tt')
)
WHITELIST_ATTRS = set(
('colspan', 'rowspan')
)
WHITELIST_NAME_ATTRS = set(
(('meta', 'charset'), ('img', 'alt'), ('img', 'title'))
)
INPUT_TYPES = set(
('checkbox', 'color', 'date', 'datetime', 'datetime-local',
'email', 'hidden', 'month', 'number',
'password', 'radio', 'range', 'tel', 'text', 'time', 'url', 'week')
)
INPUT_BUTTON_TYPES = set(
('button', 'reset', 'submit', 'file', 'image')
)
def is_whitelisted(name):
return name.lower() in WHITELIST
def create_clean_tag(tag):
'''Return an empty tag with whitelisted attributes, or None'''
name = tag.name.lower()
answer = element.Tag(name=name)
# Special Case : encoding
if name == 'meta':
if (tag.get('http-equiv') == "Content-Type"
and '7' not in tag.get('content')):
answer['http-equiv'] = tag.get('http-equiv')
answer['content'] = tag.get('content')
else:
return None
# Special Case : input
if name == 'input':
if tag.get('type') in INPUT_TYPES:
answer['type'] = tag.get('type')
elif tag.get('type') in INPUT_BUTTON_TYPES:
answer['type'] = 'button'
for key, value in tag.attrs.iteritems():
if (key in WHITELIST_ATTRS or
(name, key) in WHITELIST_NAME_ATTRS):
answer[key] = value
# Special Case : display:none
elif key == 'style':
if (not isinstance(value, list) and
re.search(r'display:\s*none', value)):
answer['style'] = 'display:none'
# Special Case : button
if name == 'button':
answer['type'] = 'button'
return answer
def build(tag):
'''Return the clean equivalent of tag, or None'''
answer = create_clean_tag(tag)
if not answer:
return
for child in tag.contents:
#print tag.name, type(child), child.name, unicode([unicode(child)])[:50]
if isinstance(child, element.Tag):
if child.name.lower() in WHITELIST_NAMES:
built_child = build(child)
if built_child:
answer.append(built_child)
elif child.__class__ == element.NavigableString:
answer.append(unicode(child))
return answer
def clean_html(page):
'''Return the cleaned page as a unicode object.
Argument:
- page -- a page string without any <!--comment--> at the top.
'''
soup = BeautifulSoup(unicode(BeautifulSoup(page, "html5lib")), "html5lib")
new_soup = BeautifulSoup('<!DOCTYPE html><html></html>')
new_soup.html.replace_with(build(soup.html))
document, errors = tidylib.tidy_document(unicode(new_soup))
return document
if __name__ == '__main__':
# Test
from codecs import open
with open(sys.argv[1]) as fin:
url = fin.readline()
data = fin.read()
cleaned = clean_html(data)
with open(sys.argv[2], 'w', 'utf8') as fout:
fout.write(cleaned)
| true
|
825845aeb99e34797ccd7a92264cfe22c251dfcc
|
Python
|
Fulldis/dl-course
|
/week-13/research/experiments/optuna/model.py
|
UTF-8
| 964
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
import torch
from torch import nn
class SimpleNet(nn.Module):
def __init__(
self,
num_filters1: int = 6,
num_filters2: int = 16,
num_hiddens1: int = 120,
num_hiddens2: int = 84,
num_classes: int = 10,
):
super().__init__()
self.num_hiddens0 = num_filters2 * 5 * 5
self.core = nn.Sequential(
nn.Conv2d(3, num_filters1, 5),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(num_filters1, num_filters2, 5),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Flatten(),
nn.Linear(self.num_hiddens0, num_hiddens1),
nn.ReLU(),
nn.Linear(num_hiddens1, num_hiddens2),
nn.ReLU(),
)
self.head = nn.Sequential(nn.Linear(num_hiddens2, num_classes),)
def forward(self, x: torch.Tensor) -> torch.Tensor:
output = self.core(x)
return self.head(output)
| true
|
c56c956cbd0aff6383e386db0db20aec4cdae69a
|
Python
|
arusdef/OpenCSAM
|
/scrapy-crawlers/scrapy_crawlers/useragents.py
|
UTF-8
| 704
| 2.546875
| 3
|
[] |
no_license
|
""" User-Agent Middleware """
import logging
from fake_useragent import UserAgent
class RandomUserAgentMiddleware(object):
""" Random User-Agent Middleware """
default_user_agent = ('''Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) '''
'''AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36''')
user_agent = UserAgent(verify_ssl=False, fallback=default_user_agent)
def process_request(self, request, spider):
""" Process Request """
random_user_agent = self.user_agent.random
logging.debug(random_user_agent)
request.headers.setdefault('User-Agent', random_user_agent)
return None # continue processing this request
| true
|
e90a7353f5fe5adfcb08e65fb18e2b0c29d964c2
|
Python
|
ChoiYoonSung/Python
|
/HelloPython/day10/myflask04_forward.py
|
UTF-8
| 473
| 2.75
| 3
|
[] |
no_license
|
from flask import Flask, request, render_template
app = Flask(__name__)
@app.route('/')
def home():
title = 'List'
mylist = ['a', 'b', 'c', 'd']
objList = {}
objList.append({'e_id' : '1', 'e_name' : '이름1', 'e_birth' : '1996'})
objList.append({'e_id' : '2', 'e_name' : '이름2', 'e_birth' : '1997'})
return render_template('index.html', mylist = mylist, title=title, objList = objList)
if __name__ == '__main__':
app.run(debug=True)
| true
|
00373b0eb385faa3e84177cd0822cf60a147fc13
|
Python
|
sachahu1/RL_trading
|
/network.py
|
UTF-8
| 4,232
| 3.34375
| 3
|
[] |
no_license
|
import torch
# The Network class inherits the torch.nn.Module class, which represents a neural network.
class Network(torch.nn.Module):
# The class initialisation function. This takes as arguments the dimension of the network's input (i.e. the dimension of the state), and the dimension of the network's output (i.e. the dimension of the action).
def __init__(self, input_dimension, output_dimension):
# Call the initialisation function of the parent class.
super(Network, self).__init__()
# Config the network
# self.config = network_config
# Define the network layers. This example network has two hidden layers, each with 100 units.
self.layer_1 = torch.nn.Linear(in_features=input_dimension, out_features=100)
self.layer_2 = torch.nn.Linear(in_features=100, out_features=100)
self.V_layer = torch.nn.Linear(in_features=100, out_features=1)
self.a_layer = torch.nn.Linear(in_features=100, out_features=output_dimension)
# Function which sends some input data through the network and returns the network's output. In this example, a ReLU activation function is used for both hidden layers, but the output layer has no activation function (it is just a linear layer).
def forward(self, input):
layer_1_output = torch.nn.functional.relu(self.layer_1(input.float()))
layer_2_output = torch.nn.functional.relu(self.layer_2(layer_1_output))
V = self.V_layer(layer_2_output)
a = self.a_layer(layer_2_output)
q = V + a - a.mean()
return q
# import torch
# # The Network class inherits the torch.nn.Module class, which represents a neural network.
# class Network(torch.nn.Module):
# # The class initialisation function. This takes as arguments the dimension of the network's input (i.e. the dimension of the state), and the dimension of the network's output (i.e. the dimension of the action).
# def __init__(self, input_dimension, output_dimension):
# # Call the initialisation function of the parent class.
# super(Network, self).__init__()
# # Config the network
# # self.config = network_config
# # Define the network layers. This example network has two hidden layers, each with 100 units.
# self.rnn_market = torch.nn.GRU(10, 1, 2)
# self.rnn_rsi = torch.nn.GRU(10, 1, 2)
# self.h1 = torch.zeros(2,1,1)
# self.h2 = torch.zeros(2,1,1)
# self.layer_1 = torch.nn.Linear(in_features=4, out_features=100)
# self.layer_2 = torch.nn.Linear(in_features=100, out_features=100)
# self.V_layer = torch.nn.Linear(in_features=100, out_features=1)
# self.a_layer = torch.nn.Linear(in_features=100, out_features=output_dimension)
# # Function which sends some input data through the network and returns the network's output. In this example, a ReLU activation function is used for both hidden layers, but the output layer has no activation function (it is just a linear layer).
# def forward(self, input):
# T = torch.cat((torch.tensor(input[0]),
# torch.tensor(input[1]),
# torch.tensor([input[2]]),
# torch.tensor([input[3]])
# ))
# O, self.h1 = self.rnn_market(T[:10].reshape(1,1,-1), self.h1)
# rnn_market_output = torch.nn.functional.relu(O)
# O, self.h2 = self.rnn_rsi(T[10:20].reshape(1,1,-1), self.h2)
# rnn_rsi_output = torch.nn.functional.relu(O)
# rnn_output = torch.cat((rnn_market_output.reshape(-1), rnn_rsi_output.reshape(-1), T[20:]))
# layer_1_output = torch.nn.functional.relu(self.layer_1(rnn_output))
# layer_2_output = torch.nn.functional.relu(self.layer_2(layer_1_output))
# V = self.V_layer(layer_2_output)
# a = self.a_layer(layer_2_output)
# q = V + a - a.mean()
# return q
| true
|
e1ceb3f5ca54d3624ee3f8d44590da8dd7cb0c1b
|
Python
|
C3RV1/Prive
|
/server/generateKeys.py
|
UTF-8
| 1,855
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
import Crypto.PublicKey.RSA as RSA
import os
import sys
import json
from config import Config
class GenerateKeys:
def __init__(self):
self.database_directory = Config.DATABASE_PATH
self.key_size = Config.CLIENT_KEYSIZE
def generate(self):
#type: () -> None
if not os.path.isdir(self.database_directory):
print("Error in generateKeys.py: Database directory not found")
print("Aborting operation")
sys.exit(1)
print("\nGenerating key pair")
new_key = RSA.generate(self.key_size)
new_private_key_exported = new_key.export_key()
new_public_key_exported = new_key.publickey().export_key()
private_key_file = open(self.database_directory + "/privateKey.skm", "wb")
private_key_file.write(new_private_key_exported)
private_key_file.close()
public_key_file = open(self.database_directory + "/publicKey.pk", "wb")
public_key_file.write(new_public_key_exported)
public_key_file.close()
config_dict = {"host": Config.HOST,
"port": Config.PORT,
"key-size": Config.CLIENT_KEYSIZE,
"rsa-key": new_public_key_exported.decode("ascii"),
"pow-0es": Config.POW_NUM_OF_0,
"pow-iterations": Config.POW_ITERATIONS,
"file-send-chunks": Config.FILE_SEND_CHUNKS}
print("Creating Prive Config File (PCF) with conf: ")
for key in config_dict.keys():
print("{}. {}".format(key, repr(config_dict[key])))
prive_config_file = open(self.database_directory + "/priveConfigFile.pcf", "w")
prive_config_file.write(json.dumps(config_dict, sort_keys=True, indent=4))
prive_config_file.close()
print("Setup Complete\n")
| true
|
068a077d2f580dadbc0234c96ddfe96e1d268da0
|
Python
|
NithinNitz12/ProgrammingLab-Python
|
/CO1/9_exchange.py
|
UTF-8
| 62
| 3.3125
| 3
|
[] |
no_license
|
str = input("Enter a string:")
print(str[-1]+str[1:-1]+str[0])
| true
|
7b26a8c7065744adde26e32dc5d3278efb80f192
|
Python
|
serashioda/code-katas
|
/src/calculate_years.py
|
UTF-8
| 379
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
"""Implementation of the Kata Money, Money, Money."""
def calculate_years(principal, interest, tax, desired):
"""Calculate how many years it take to reach desired principle."""
years = 0
while (principal < desired):
accrued = principal * interest
accrued = accrued - (accrued * tax)
principal += accrued
years += 1
return years
| true
|
fdf5608b017eb64ec5d627056d180a5750db1dc6
|
Python
|
enriqueee99/learning_python
|
/ejercicio_12.3.py
|
UTF-8
| 200
| 4.125
| 4
|
[] |
no_license
|
lista = []
for x in range(5):
num = input('Ingrese un numero: ')
lista.append(num)
print(f'El mayor de la lista es: ',max(lista))
print(f'El numero {num} se repite {lista.count(num)} veces')
| true
|
a587eb70c3cf901751e707220a9e1b3380de1365
|
Python
|
ShaneRich5/fti-programming-training
|
/solutions/labs/lab5.2/lab.py
|
UTF-8
| 1,273
| 2.734375
| 3
|
[] |
no_license
|
import getpass
from sqlalchemy import create_engine
from sqlalchemy.orm import relationship, backref, sessionmaker
import models
# Ask user for connection details
server = 'tmp-training.c21v9ghziray.us-west-1.rds.amazonaws.com'
database = 'training'
username = 'ftimaster'
password = getpass.getpass()
# Format connection string
connection_string_template = "mssql+pyodbc://{}:{}@{}:1433/{}?driver=SQL+Server+Native+Client+11.0"
connection_string = connection_string_template.format(username, password, server, database)
# create database engine from connection string
engine = create_engine(connection_string)
# configure session
Session = sessionmaker()
Session.configure(bind=engine)
# create session for interacting with database
session = Session()
# define the users to insert
users = [
{"first_name": 'George', "last_name": 'Washington', "username": 'gwash'},
{"first_name": 'Thomas', "last_name": 'Jefferson', "username": 'tjeff'}
]
# iterate new users
for user in users:
# create user object from model
user = models.User(first_name=user["first_name"], last_name=user["last_name"], username=user["username"])
# create database record from object
session.add(user)
# save changes to database
session.commit()
print('done inserting')
| true
|
fc5138ae3007b67918acc3c028ac700d4581785e
|
Python
|
pfigz/python-files-projects
|
/Celsius.py
|
UTF-8
| 206
| 4.03125
| 4
|
[] |
no_license
|
celsius = float(input("Degrees Celsius"))
def fahrenheit(c):
return round((1.8 * c + 32), 1)
print("The Fahrenheit equivalent of " + str(celsius) + " degrees Celsius is " + str(fahrenheit(celsius)))
| true
|
5a6474ef331ba43d60be0ee52f1978ec2db883c0
|
Python
|
Rkluk/uri
|
/1248 Plano de Dieta.py
|
UTF-8
| 1,222
| 3.15625
| 3
|
[] |
no_license
|
u = int(input())
for c in range(u):
dieta= input()
cafe= input()
almoco = input()
ldieta = []
cheater = False
for v in dieta:
if ord(v) not in ldieta:
ldieta.append(ord(v))
lcafe = []
for v in cafe:
lcafe.append(ord(v))
lalmoco = []
for v in almoco:
lalmoco.append(ord(v))
ldieta.sort()
for v in lcafe:
if v in ldieta:
for k in range(len(ldieta)):
if ldieta[k] == v:
ldieta.pop(k)
break
else:
cheater = True
break
if not cheater:
for v in lalmoco:
if v in ldieta:
for k in range(len(ldieta)):
if ldieta[k] == v:
ldieta.pop(k)
break
else:
cheater = True
break
if cheater:
print('CHEATER')
elif len(ldieta) > 0:
for v in range(len(ldieta)):
if v == (len(ldieta) - 1):
print(chr(ldieta[v]))
else:
print(chr(ldieta[v]), end='')
else:
print('')
| true
|
aa77c7009863cc23d04e9ace18c152a33bbccb8c
|
Python
|
jsheflin/UnitConverter
|
/test.py
|
UTF-8
| 3,641
| 2.71875
| 3
|
[] |
no_license
|
import unittest
import start
class TestFactorial(unittest.TestCase):
def test_all_temp(self):
temp = start.MULTIPLIERS_TO_STD['temp'].keys()
source_val = 111.1
answer = 232
for tunit in temp:
for to_temp in temp:
if (to_temp != tunit):
print ('Convert From ' + tunit +' to ' + to_temp )
def test_all_volume(self):
volume = start.MULTIPLIERS_TO_STD['volume'].keys()
source_val = 111.1
answer = 0.064
for tunit in volume:
for to_volume in volume:
if (to_volume != tunit):
print ('Convert From ' + tunit +' to ' + to_volume )
def test_c_to_tb_incorrect(self):
print(self._testMethodName)
source_unit = 'c'
source_val = 11.1
convert_to = 'tb'
answer = 222.2
res = start.convert(source_unit, source_val, convert_to,answer)
self.assertEqual(res, answer)
def test_c_to_tb_correct(self):
print(self._testMethodName)
source_unit = 'c'
source_val = 11.1
convert_to = 'tb'
answer = 177.6
res = start.convert(source_unit, source_val, convert_to,answer)
self.assertEqual(res, answer)
def test_c_to_g_incorrect(self):
print(self._testMethodName)
source_unit = 'c'
source_val = 11.1
convert_to = 'g'
answer = 222.2
res = start.convert(source_unit, source_val, convert_to,answer)
self.assertEqual(res, 177.6)
def test_c_to_g_correct(self):
print(self._testMethodName)
source_unit = 'c'
source_val = 11.1
convert_to = 'g'
answer = .7
res = start.convert(source_unit, source_val, convert_to,answer)
self.assertEqual(res, answer)
def test_c_to_l_incorrect(self):
print(self._testMethodName)
source_unit = 'c'
source_val = 1
convert_to = 'l'
answer = 0.236
res = start.convert(source_unit, source_val, convert_to,answer)
self.assertEqual(res, 177.6)
def test_c_to_l_correct(self):
print(self._testMethodName)
source_unit = 'c'
source_val = 11.1
convert_to = 'l'
answer = 2.6
res = start.convert(source_unit, source_val, convert_to,answer)
self.assertEqual(res, answer)
def test_c_to_cf_incorrect(self):
print(self._testMethodName)
source_unit = 'c'
source_val = 11.1
convert_to = 'cf'
answer = 12.6
res = start.convert(source_unit, source_val, convert_to,answer)
self.assertEqual(res, answer)
def test_c_to_cf_correct(self):
print(self._testMethodName)
source_unit = 'c'
source_val = 1
convert_to = 'cf'
answer = 0.0
res = start.convert(source_unit, source_val, convert_to,answer)
self.assertEqual(res, answer)
def test_c_to_ci_incorrect(self):
print(self._testMethodName)
source_unit = 'c'
source_val = 11.1
convert_to = 'ci'
answer = 222.2
res = start.convert(source_unit, source_val, convert_to,answer)
self.assertEqual(res, answer)
def test_c_to_ci_correct(self):
print(self._testMethodName)
source_unit = 'c'
source_val = 11.1
convert_to = 'ci'
answer = 160.2
res = start.convert(source_unit, source_val, convert_to,answer)
self.assertEqual(res, answer)
if __name__ == '__main__':
unittest.main()
| true
|
de86818ea87db1ae8c64e5855bbce363446a442f
|
Python
|
ImNimeshKumar/InitalizePython
|
/forloop.py
|
UTF-8
| 62
| 3.078125
| 3
|
[] |
no_license
|
j=0
for i in range(0,10):
i = i+1
j = j+i
print(j)
| true
|
bc69d7d99ad4c2fbaf6b3988a8e92bc7f380f452
|
Python
|
dAIsySHEng1/CCC-Junior-Python-Solutions
|
/2011/J3.py
|
UTF-8
| 213
| 3.265625
| 3
|
[] |
no_license
|
def sumac():
t1 = int(input())
t2 = int(input())
sumac_num = [t1,t2]
while t1>=t2:
a = t1
t1=t2
t2 = a-t1
sumac_num.append(t2)
print(len(sumac_num))
sumac()
| true
|
e00cc57588e2e8066902af6b5f4c66f81572ef1f
|
Python
|
Deniska10K/stepik
|
/2.4-integer_arithmetic_part_1/7.arithmetic_operations.py
|
UTF-8
| 729
| 4.125
| 4
|
[] |
no_license
|
"""
Напишите программу, в которой вычисляется сумма, разность и произведение двух целых чисел, введенных с клавиатуры.
Формат входных данных
На вход программе подаётся два целых числа, каждое на отдельной строке.
Формат выходных данных
Программа должна вывести сумму, разность и произведение введённых чисел, каждое на отдельной строке.
"""
a, b = int(input()), int(input())
print(f"{a} + {b} = {a + b}\n{a} - {b} = {a - b}\n{a} * {b} = {a * b}")
| true
|
e0e2c0330e7327fe1c692453c88e4e77c3bfbfb6
|
Python
|
lemontreeran/adx-automation-monitor
|
/app/app/models/user.py
|
UTF-8
| 206
| 2.75
| 3
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
from flask_login import UserMixin
class User(UserMixin):
def __init__(self, user_id: str, user_name: str):
self.id = user_id # pylint: disable=invalid-name
self.user_name = user_name
| true
|
952b341742faf36affb6b3def84206118a8c3d0a
|
Python
|
willie19971103/test_in_cjcu-master
|
/4-3.py
|
UTF-8
| 102
| 2.6875
| 3
|
[] |
no_license
|
# 10 numbers are given in the input. Read them and print their sum. Use as few variables as you can.
| true
|
612c4a707bf44656dbf7c908475560c5bd50e489
|
Python
|
Manideepnizam26/CLOTH-EXTRACTION
|
/CITY SCAPES/save_data_as_npy.py
|
UTF-8
| 1,276
| 2.828125
| 3
|
[] |
no_license
|
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import time
train_images = []
train_output = []
test_images = []
test_output = []
t1 = time.time()
files = glob.glob('./DATA/train/*')
print(len(files))
for image_file in files:
image = cv2.imread(image_file)
image1, image2 = np.split(image,2,1)
train_images.append(image1)
train_output.append(image2)
files = glob.glob('./DATA/test/*')
print(len(files))
for image_file in files:
image = cv2.imread(image_file)
image1, image2 = np.split(image,2,1)
test_images.append(image1)
test_output.append(image2)
t2 = time.time()
print(f"Time took to read: {t2-t1} seconds.")
np.save('./DATA/train_images.npy', train_images)
np.save('./DATA/train_optput.npy', train_output)
np.save('./DATA/test_images.npy', test_images)
np.save('./DATA/test_output.npy', test_output)
t3 = time.time()
print(f"Time took to save: {t3-t2} seconds.")
# image1 = image[:256][256:][:3]
# image2 = image[:][256:][:]
# print(image1.shape)
# print(image2.shape)
# plt.imshow(image1[:,:,[2,1,0]],cmap = 'gray')
# plt.show()
# plt.imshow(image2[:,:,[2,1,0]],cmap = 'gray')
# plt.show()
# cv2.imshow(image1[:,:,[2,1,0]],cmap = 'gray')
# plt.imshow(image2)
| true
|
b83f1d8b0c171a18c82eec8325b7db9351ecb882
|
Python
|
ajinkyapatankar/Soccer-Team-Management-System
|
/Diva/Code/DIVAAAAA/diva_backend/diva_backend/djangorest/api/example.py
|
UTF-8
| 12,012
| 2.546875
| 3
|
[] |
no_license
|
import pandas as pd
import itertools
import numpy
data = pd.read_csv('api/data.csv')
def removeadd(st):
return float(st[0:-2])
def preprocessing_data(data, club):
list = ['LS', 'ST', 'RS', 'LW', 'LF', 'CF', 'RF', 'RW',
'LAM', 'CAM', 'RAM', 'LM', 'LCM', 'CM', 'RCM', 'RM', 'LWB', 'LDM',
'CDM', 'RDM', 'RWB', 'LB', 'LCB', 'CB', 'RCB', 'RB']
midfielders = ['LCM', 'CM', 'RCM', 'LDM',
'CDM', 'RDM']
defenders = ['RWB', 'LB', 'LCB', 'CB', 'RCB', 'RB']
attackers = ['LS', 'ST', 'RS', 'LW', 'LF', 'CF', 'RF', 'RW',
'LAM', 'CAM', 'RAM', 'LM', 'RM']
for i in list:
data[i] = data[i].apply(removeadd)
return data
def mapping(formation):
if formation == '4231A':
list_positions = ['ST', 'LAM', 'RAM', 'LCM', 'CAM', 'RCM', 'LB', 'LCB', 'RCB', 'RB']
attackers = ['ST', 'RAM', 'LAM', 'CAM']
defenders = ['LB', 'LCB', 'RCB', 'RB']
mid_fielders = ['LCM', 'RCM']
amd_no = [4, 2, 4]
elif formation == '4231D':
list_positions = ['ST', 'LAM', 'RAM', 'LCM', 'CDM', 'RCM', 'LB', 'LCB', 'RCB', 'RB']
attackers = ['ST', 'RAM', 'LAM']
defenders = ['CDM', 'LB', 'LCB', 'RCB', 'RB']
mid_fielders = ['LCM', 'RCM']
amd_no = [3, 2, 5]
elif formation == '433':
list_positions = ['ST', 'RW', 'LW', 'LCM', 'CM', 'RCM', 'LB', 'LCB', 'RCB', 'RB']
attackers = ['ST', 'RW', 'LW']
mid_fielders = ['LCM', 'CM', 'RCM']
defenders = ['LB', 'LCB', 'RCB', 'RB']
amd_no = [3, 3, 4]
return (attackers, mid_fielders, defenders, amd_no)
def position_mapping(formation):
if formation == '4231A':
lst = [['ST'], ['LAM', 'CAM', 'RAM'], ['LCM', 'RCM'], ['LB', 'LCB', 'RCB', 'RB']]
if formation == '4231D':
lst = [['ST'], ['LAM', 'CAM', 'RAM'], ['LCM', 'RCM'], ['LB', 'LCB', 'RCB', 'RB']]
if formation == '433':
lst = [['LW','ST', 'RW'], ['LCM', 'CM', 'RCM'], ['LB', 'LCB', 'RCB', 'RB']]
return lst
def formations(formation):
if formation == '4231A':
list_positions = ['ST', 'LAM', 'RAM', 'LCM', 'CAM', 'RCM', 'LB', 'LCB', 'RCB', 'RB']
if formation == '4231D':
lst_positions = ['ST', 'LAM', 'CAM', 'RAM', 'LCM', 'RCM', 'LB', 'LCB', 'RCB', 'RB']
if formation == '433':
lst_positions = ['ST', 'LW', 'RW', 'LCM', 'CM' 'RCM', 'LB', 'LCB', 'RCB', 'RB']
return list_positions
def combo(data, combo):
pref_score = 0
for i in combo:
if i[0] == data.loc[data['Name'] == i[1], 'Position'].reset_index(drop=True)[0]:
pref_score += 1
return pref_score
def map_value(st):
if st[1] == '0':
return 0
return float(st[1:-1])
def formplayers(position, data_filter, val):
players = set()
for i in position:
players_top_players = data_filter.nlargest(val, columns=[i])['Name'].values
players.update(players_top_players)
player_positions = itertools.permutations(players, val)
attack_score = 0
team_attackers = {}
for i in player_positions:
score = 0
team = []
for j in range(len(i)):
score += data_filter.loc[data_filter['Name'] == i[j], position[j]].values[0]
team.append((position[j], i[j]))
t = tuple(team)
team_attackers[t] = score
for t in team_attackers:
pre_score = combo(data_filter, t)
team_attackers[t] = (team_attackers[t], pre_score)
best_team_attakers = [v for v in sorted(team_attackers.items(), key=lambda x: (-x[1][0], -x[1][1]))][0]
for p in best_team_attakers:
data_filter.drop(data_filter[data_filter['Name'] == p].index)
return best_team_attakers
def map_position_cordinates(st, formation):
if formation == '4231A' or '4231D':
if st == 'ST':
left = 237
top = 35
elif st == 'LAM':
left = 120
top = 25
elif st == 'CAM':
left = 35
top = 25
elif st == 'RAM':
left = 35
top = 25
elif st == 'LCM':
left = 176
top = 40
elif st == 'RCM':
left = 35
top = 40
elif st == 'LB':
left = 90
top = 70
elif st == 'LCB':
left = 15
top = 70
elif st == 'RCB':
left = 15
top = 70
elif st == 'RB':
left = 15
top = 70
elif st == 'GK':
left = 237
top = 20
elif st == 'CDM':
left = 35
top = 25
elif st == 'GK':
left = 237
top = 15
if formation == '433':
if st == 'LW':
left = 120
top = 85
elif st == 'ST':
left = 35
top = 85
elif st == 'RW':
left = 35
top = 85
elif st == 'LCM':
left = 120
top = 85
elif st == 'CM':
left = 35
top = 85
elif st == 'RCM':
left = 35
top = 85
elif st == 'LB':
left = 90
top = 85
elif st == 'LCB':
left = 15
top = 85
elif st == 'RB':
left = 15
top = 85
elif st == 'RCB':
left = 15
top = 85
elif st == 'GK':
left = 237
top = 15
return (left, top)
def build_team(data, formation, club=None):
data1 = data
list = []
if club != None:
data1 = data.loc[data['Club'] == club]
data1 = data1.filter(items=['LS', 'ST', 'RS', 'LW', 'LF', 'CF', 'RF', 'RW',
'LAM', 'CAM', 'RAM', 'LM', 'LCM', 'CM', 'RCM', 'RM', 'LWB', 'LDM',
'CDM', 'RDM', 'RWB', 'LB', 'LCB', 'CB', 'RCB', 'RB', 'Position', 'Overall', 'Name',
'Club', 'Nationality', 'Photo'])
goal_keeper = tuple(
data1.loc[data1['Position'] == 'GK'].nlargest(1, columns='Overall')[['Name', 'Overall']].values[0])
lst = []
df = data1.loc[data1['Name'] == goal_keeper[0]]
df = df.fillna(0)
d1 = {}
d1['Position'] = 'GK'
d1['Name'] = goal_keeper[0]
(left, top) = map_position_cordinates('GK', formation)
d1['left'] = left
d1['top'] = top
for column in df.columns[4:]:
d1[column] = df[column].iloc[0]
data1 = data1.dropna(subset=['LS', 'ST', 'RS', 'LW', 'LF', 'CF', 'RF', 'RW',
'LAM', 'CAM', 'RAM', 'LM', 'LCM', 'CM', 'RCM', 'RM', 'LWB', 'LDM',
'CDM', 'RDM', 'RWB', 'LB', 'LCB', 'CB', 'RCB', 'RB'])
data_filter = preprocessing_data(data1, club)
if len(data_filter) < 10:
return None
team_score = 0
(attackers, mid_fielders, defenders, amd_no) = mapping(formation)
players = set()
team_attackers = formplayers(attackers, data_filter, amd_no[0])
data_player = data_filter.loc[data_filter['Name'] == team_attackers[0][0][1]]
data_filter = data_filter.drop(data_filter[data_filter['Name'] == team_attackers[0][0][1]].index)
for p in team_attackers[0][1:]:
data_player = data_player.append(data_filter.loc[data_filter['Name'] == p[1]])
data_filter = data_filter.drop(data_filter[data_filter['Name'] == p[1]].index)
team_midfielders = formplayers(mid_fielders, data_filter, amd_no[1])
for p in team_midfielders[0]:
df = data_filter.loc[data_filter['Name'] == p[1]]
data_player = data_player.append(df, ignore_index=True)
data_filter = data_filter.drop(data_filter[data_filter['Name'] == p[1]].index)
team_defenders = formplayers(defenders, data_filter, amd_no[2])
for p in team_defenders[0]:
df = data_filter.loc[data_filter['Name'] == p[1]]
df = data_filter.loc[data_filter['Name'] == p[1]]
data_player = data_player.append(df, ignore_index=True)
data_filter = data_filter.drop(data_filter[data_filter['Name'] == p[1]].index)
team = dict(team_attackers[0] + team_midfielders[0] + team_defenders[0])
team_score = team_attackers[1][0] + team_defenders[1][0] + team_midfielders[1][0] + goal_keeper[1]
position_placement = position_mapping(formation)
data_player = data_player.drop(['Position'], axis=1)
for i in position_placement:
list_i = []
for j in i:
d = {}
d['Position'] = j
d['Name'] = team[j]
(left, top) = map_position_cordinates(j, formation)
d['left'] = left
d['top'] = top
df = data_player.loc[data_player['Name'] == team[j]]
for column in df.columns[4:]:
d[column] = df[column].iloc[0]
list_i.append(d)
lst.append(list_i)
lst.append([d1])
return {"name":lst, "score":team_score}
team, team_score = build_team(data, '433', 'FC Barcelona')
# print(team)
def subsitute_player(data, team, player):
club = team[0][0]['Club']
fixed_players = []
for i in team:
for j in i:
fixed_players.append(j['Name'])
data_filter = data.loc[data['Club'] == club]
fixed_players.remove(player)
data_filter = data_filter.drop(data_filter[data_filter['Name'] == player].index)
#
data_fixed_players = data_filter[data_filter['Name'] == fixed_players[0]]
data_filter = data_filter.drop(data_filter[data_filter['Name'] == fixed_players[0]].index)
#
#
for i in fixed_players[1:]:
df2 = data_filter[data_filter['Name'] == i]
data_fixed_players = data_fixed_players.append(df2, ignore_index=True)
data_filter = data_filter.drop(data_filter[data_filter['Name'] == i].index)
#
#
#
score = 0
best_team = []
for index, row in data_filter.iterrows():
new_data = data_fixed_players.append(row, ignore_index=True)
ret = build_team(new_data, '4231A', club)
if ret == None:
continue
else:
(team, team_score) = ret
if team_score > score:
score = team_score
best_team = team
return score, best_team
# print(subsitute_player(data,team,'Coutinho'))
def budget_build(data, budget, formation):
data1 = data
list = ['ID', 'Name', 'LS', 'ST', 'RS', 'LW', 'LF', 'CF', 'RF', 'RW',
'LAM', 'CAM', 'RAM', 'LM', 'LCM', 'CM', 'RCM', 'RM', 'LWB', 'LDM',
'CDM', 'RDM', 'RWB', 'LB', 'LCB', 'CB', 'RCB', 'RB', 'Position', 'Value']
data_filter = data1.loc[:, data1.columns.isin(list)]
data_filter = data_filter.dropna()
list = ['LS', 'ST', 'RS', 'LW', 'LF', 'CF', 'RF', 'RW',
'LAM', 'CAM', 'RAM', 'LM', 'LCM', 'CM', 'RCM', 'RM', 'LWB', 'LDM',
'CDM', 'RDM', 'RWB', 'LB', 'LCB', 'CB', 'RCB', 'RB']
for i in list:
data_filter[i] = data_filter[i].apply(removeadd)
data_filter = data_filter.dropna()
positions = formations(formation)
print(positions)
data_filter['Value'] = data_filter['Value'].apply(map_value)
team = []
while len(positions) != 0 and budget >= 0:
team_positions = {}
val = budget / len(positions)
for i in positions:
rows = data_filter.loc[(data_filter['Value'] <= val) & (data_filter['Value'] > 0)]
serie = rows.loc[rows[i].idxmin()][['Name', 'Value']]
team_positions[i] = (serie['Name'], serie['Value'])
print('hi')
value = ([(v[0], v[1][0], v[1][1]) for v in sorted(team_positions.items(), key=lambda x: x[1][1])][0])
print(value[0])
team.append([value[0], value[1], value[2]])
budget = budget - value[2]
print(budget)
positions.remove(value[0])
data_filter = data_filter.drop(data_filter[data_filter['Name'] == value[1]].index)
return team
# print(budget_build(data,200,'4231A'))
| true
|
a68f778c94f180a2dcb45805a775ffa44ad174ea
|
Python
|
lauramatchett/hackbright-intro-more-lists-and-loops
|
/01-list-primes/list_primes.py
|
UTF-8
| 121
| 2.796875
| 3
|
[] |
no_license
|
primes = [1,3]
primes = primes + [5]
primes.append (7)
primes.append (11)
primes.extend ([13, 17])
print primes
| true
|
cdb0aaac23045a257c540f947a71dc11782277fd
|
Python
|
rob-luke/mne-nirs
|
/examples/general/plot_13_fir_glm.py
|
UTF-8
| 12,883
| 2.765625
| 3
|
[] |
permissive
|
"""
.. _tut-fnirs-fir:
GLM FIR Analysis
================
In this example we analyse data from a real multi-channel
functional near-infrared spectroscopy (fNIRS)
experiment (see :ref:`tut-fnirs-hrf-sim` for a simplified simulated
analysis). The experiment consists of three conditions:
1) tapping with the left hand,
2) tapping with the right hand, and
3) a control condition where the participant does nothing.
In this tutorial the morphology of an fNIRS response is obtained using a
Finite Impulse Response (FIR) GLM analysis.
An alternative epoching-style analysis on the same data can be
viewed in the
:ref:`waveform analysis example <tut-fnirs-group-wave>`.
See
`Luke et al (2021) <https://www.spiedigitallibrary.org/journals/neurophotonics/volume-8/issue-2/025008/Analysis-methods-for-measuring-passive-auditory-fNIRS-responses-generated-by/10.1117/1.NPh.8.2.025008.short>`_
for a comparison of the epoching and GLM FIR approaches.
This tutorial only examines the tapping with the right hand condition
to simplify explanation and minimise computation time. The reader is invited
to expand the code to also analyse the other conditions.
This GLM analysis is a wrapper over the excellent
`Nilearn GLM <http://nilearn.github.io/modules/reference.html#module-nilearn.glm>`_.
.. note::
This is an advanced tutorial and requires knowledge of pandas and numpy.
I plan to write some functions to make this more convenient in the future.
.. note::
The sample rate used in this example is set to 0.5 Hz. This is to
ensure that the code can run on the continuous integration servers. You may
wish to increase the sample rate by adjusting `resample` below for your
own analysis.
.. note::
This tutorial uses data stored using
`the BIDS format <https://bids-specification.readthedocs.io/en/stable/04-modality-specific-files/11-near-infrared-spectroscopy.html>`_
:footcite:p:`luke2023bids`.
MNE-Python allows you to process fNIRS data that is not in BIDS format.
Simply modify the ``read_raw_`` function to match your data type.
See :ref:`data importing tutorial <tut-importing-fnirs-data>` to learn how
to use your data with MNE-Python.
.. contents:: Page contents
:local:
:depth: 2
"""
# sphinx_gallery_thumbnail_number = 1
# Authors: Robert Luke <mail@robertluke.net>
#
# License: BSD (3-clause)
# Import common libraries
import numpy as np
import pandas as pd
# Import MNE processing
from mne.preprocessing.nirs import optical_density, beer_lambert_law
# Import MNE-NIRS processing
from mne_nirs.statistics import run_glm
from mne_nirs.experimental_design import make_first_level_design_matrix
from mne_nirs.statistics import statsmodels_to_results
from mne_nirs.datasets import fnirs_motor_group
from mne_nirs.channels import get_short_channels, get_long_channels
# Import MNE-BIDS processing
from mne_bids import BIDSPath, read_raw_bids
# Import StatsModels
import statsmodels.formula.api as smf
# Import Plotting Library
import matplotlib.pyplot as plt
# %%
# Define FIR analysis
# ---------------------------------------------------------------------
#
# This code runs a FIR GLM analysis.
# It fits a FIR for each sample from the onset of a trigger.
# We specify here that 10 FIR delays should be used.
# This results in values being estimated for 10 `delay` steps.
# Due to the chosen sample rate of 0.5 Hz, these delays
# correspond to 0, 2, 4... seconds from the onset of the stimulus.
def analysis(fname, ID):
raw_intensity = read_raw_bids(bids_path=fname, verbose=False)
# Delete annotation labeled 15, as these just signify the start and end of experiment.
raw_intensity.annotations.delete(raw_intensity.annotations.description == '15.0')
# sanitize event names
raw_intensity.annotations.description[:] = [
d.replace('/', '_') for d in raw_intensity.annotations.description]
# Convert signal to haemoglobin and just keep hbo
raw_od = optical_density(raw_intensity)
raw_haemo = beer_lambert_law(raw_od, ppf=0.1)
raw_haemo.resample(0.5, npad="auto")
# Cut out just the short channels for creating a GLM regressor
short_chans = get_short_channels(raw_haemo)
raw_haemo = get_long_channels(raw_haemo)
# Create a design matrix
design_matrix = make_first_level_design_matrix(raw_haemo,
hrf_model='fir',
stim_dur=1.0,
fir_delays=range(10),
drift_model='cosine',
high_pass=0.01,
oversampling=1)
# Add short channels as regressor in GLM
for chan in range(len(short_chans.ch_names)):
design_matrix[f"short_{chan}"] = short_chans.get_data(chan).T
# Run GLM
glm_est = run_glm(raw_haemo, design_matrix)
# Create a single ROI that includes all channels for example
rois = dict(AllChannels=range(len(raw_haemo.ch_names)))
# Calculate ROI for all conditions
conditions = design_matrix.columns
# Compute output metrics by ROI
df_ind = glm_est.to_dataframe_region_of_interest(rois, conditions)
df_ind["ID"] = ID
df_ind["theta"] = [t * 1.e6 for t in df_ind["theta"]]
return df_ind, raw_haemo, design_matrix
# %%
# Run analysis
# ---------------------------------------------------------------------
#
# The analysis is run on each individual subject measurement.
# The individual results are then appended to a dataframe for
# group-level analysis below.
df = pd.DataFrame()
for sub in range(1, 6): # Loop from first to fifth subject
ID = '%02d' % sub # Tidy the subject name
# Create path to file based on experiment info
bids_path = BIDSPath(subject=ID, task="tapping",
root=fnirs_motor_group.data_path(),
datatype="nirs", suffix="nirs", extension=".snirf")
df_individual, raw, dm = analysis(bids_path, ID)
df = pd.concat([df, df_individual])
# %%
# Tidy the dataframe
# ---------------------------------------------------------------------
#
# For simplicity we only examine the data from the right hand
# tapping condition. The code below retains only the relevant information
# from the dataframe and design matrix for the following statistical analysis.
# Keep only tapping and FIR delay information in the dataframe
# I.e., for this example we are not interest in the drift coefficients,
# short channel information, or control conditions.
df["isTapping"] = ["Tapping_Right" in n for n in df["Condition"]]
df["isDelay"] = ["delay" in n for n in df["Condition"]]
df = df.query("isDelay in [True]")
df = df.query("isTapping in [True]")
# Make a new column that stores the condition name for tidier model below
df.loc[df["isTapping"] == True, "TidyCond"] = "Tapping"
# Finally, extract the FIR delay in to its own column in data frame
df.loc[:, "delay"] = [n.split('_')[-1] for n in df.Condition]
# To simplify this example we will only look at the right hand tapping
# condition so we now remove the left tapping conditions from the
# design matrix and GLM results
dm_cols_not_left = np.where(["Right" in c for c in dm.columns])[0]
dm = dm[[dm.columns[i] for i in dm_cols_not_left]]
# %%
# Run group-level model
# ---------------------------------------------------------------------
#
# A linear mixed effects (LME) model is used to determine the effect
# of FIR delay for each chromophore on the evoked response with participant
# (ID) as a random variable.
lme = smf.mixedlm('theta ~ -1 + delay:TidyCond:Chroma', df,
groups=df["ID"]).fit()
# The model is summarised below, and is not displayed here.
# You can display the model output using: lme.summary()
# %%
# Summarise group-level findings
# ---------------------------------------------------------------------
#
# Next the values from the model above are extracted into a dataframe for
# more convenient analysis below.
# A subset of the results is displayed, illustrating the estimated coefficients
# for oxyhaemoglobin (HbO) for the right hand tapping condition.
# Create a dataframe from LME model for plotting below
df_sum = statsmodels_to_results(lme)
df_sum["delay"] = [int(n) for n in df_sum["delay"]]
df_sum = df_sum.sort_values('delay')
# Print the result for the oxyhaemoglobin data in the tapping condition
df_sum.query("TidyCond in ['Tapping']").query("Chroma in ['hbo']")
# %%
# Note in the output above that there are 10 FIR delays.
# A coefficient estimate has been calculated for each delay.
# These coefficients must be multiplied by the FIR function to obtain the
# morphology of the fNIRS response.
# %%
# Plot the response from a single condition
# ---------------------------------------------------------------------
#
# Finally we create a plot with three facets.
# The first facet illustrates the FIR model that was used in the GLM analysis,
# the model results displayed in the table above indicate the scaling values
# that should be applied to this model so that it best describes the
# measured data.
# The second facet illustrates the estimated amplitude of each FIR component
# for the right hand tapping condition for the oxyhaemoglobin data,
# it is obtained by multiplying the FIR model by the estimated coefficients
# from the GLM output.
# The third facet illustrates the overall estimated response for each
# chromophore and is calculated by summing all the individual FIR components
# per chromophore (HbR not shown in first two facets).
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(20, 10))
# Extract design matrix columns that correspond to the condition of interest
dm_cond_idxs = np.where(["Tapping" in n for n in dm.columns])[0]
dm_cond = dm[[dm.columns[i] for i in dm_cond_idxs]]
# Extract the corresponding estimates from the lme dataframe for hbo
df_hbo = df_sum.query("TidyCond in ['Tapping']").query("Chroma in ['hbo']")
vals_hbo = [float(v) for v in df_hbo["Coef."]]
dm_cond_scaled_hbo = dm_cond * vals_hbo
# Extract the corresponding estimates from the lme dataframe for hbr
df_hbr = df_sum.query("TidyCond in ['Tapping']").query("Chroma in ['hbr']")
vals_hbr = [float(v) for v in df_hbr["Coef."]]
dm_cond_scaled_hbr = dm_cond * vals_hbr
# Extract the time scale for plotting.
# Set time zero to be the onset of the finger tapping.
index_values = dm_cond_scaled_hbo.index - np.ceil(raw.annotations.onset[0])
index_values = np.asarray(index_values)
# Plot the result
axes[0].plot(index_values, np.asarray(dm_cond))
axes[1].plot(index_values,np.asarray(dm_cond_scaled_hbo))
axes[2].plot(index_values, np.sum(dm_cond_scaled_hbo, axis=1), 'r')
axes[2].plot(index_values, np.sum(dm_cond_scaled_hbr, axis=1), 'b')
# Format the plot
for ax in range(3):
axes[ax].set_xlim(-5, 30)
axes[ax].set_xlabel("Time (s)")
axes[0].set_ylim(-0.5, 1.3)
axes[1].set_ylim(-3, 8)
axes[2].set_ylim(-3, 8)
axes[0].set_title("FIR Model (Unscaled by GLM estimates)")
axes[1].set_title("FIR Components (Scaled by Tapping/Right GLM Estimates)")
axes[2].set_title("Evoked Response (Tapping/Right)")
axes[0].set_ylabel("FIR Model")
axes[1].set_ylabel("Oyxhaemoglobin (ΔμMol)")
axes[2].set_ylabel("Haemoglobin (ΔμMol)")
axes[2].legend(["Oyxhaemoglobin", "Deoyxhaemoglobin"])
# %%
# Plot the response with confidence intervals
# ---------------------------------------------------------------------
#
# We can also extract the 95% confidence intervals of the estimates too
l95_hbo = [float(v) for v in df_hbo["[0.025"]] # lower estimate
u95_hbo = [float(v) for v in df_hbo["0.975]"]] # upper estimate
dm_cond_scaled_hbo_l95 = dm_cond * l95_hbo
dm_cond_scaled_hbo_u95 = dm_cond * u95_hbo
l95_hbr = [float(v) for v in df_hbr["[0.025"]] # lower estimate
u95_hbr = [float(v) for v in df_hbr["0.975]"]] # upper estimate
dm_cond_scaled_hbr_l95 = dm_cond * l95_hbr
dm_cond_scaled_hbr_u95 = dm_cond * u95_hbr
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(7, 7))
# Plot the result
axes.plot(index_values, np.sum(dm_cond_scaled_hbo, axis=1), 'r')
axes.plot(index_values, np.sum(dm_cond_scaled_hbr, axis=1), 'b')
axes.fill_between(index_values,
np.asarray(np.sum(dm_cond_scaled_hbo_l95, axis=1)),
np.asarray(np.sum(dm_cond_scaled_hbo_u95, axis=1)),
facecolor='red', alpha=0.25)
axes.fill_between(index_values,
np.asarray(np.sum(dm_cond_scaled_hbr_l95, axis=1)),
np.asarray(np.sum(dm_cond_scaled_hbr_u95, axis=1)),
facecolor='blue', alpha=0.25)
# Format the plot
axes.set_xlim(-5, 30)
axes.set_ylim(-7, 10)
axes.set_title("Evoked Response (Tapping/Right)")
axes.set_ylabel("Haemoglobin (ΔμMol)")
axes.legend(["Oyxhaemoglobin", "Deoyxhaemoglobin"])
axes.set_xlabel("Time (s)")
| true
|
4043513921a71bd2f619ae6a66e9e010ec894662
|
Python
|
slott56/navtools
|
/navtools/olc.py
|
UTF-8
| 8,898
| 3.921875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
"""OLC Representation of latitude/longitude pairs.
(Sometimes called a "Plus Code")
This is one of many Geocoding schemes that permits simplistic proximity checks.
There are two parts to the encoding: most significant 10, least signficant 5.
- Most significant is base 20 for 5 digits.
Lat and Lon codes are interleaved we create a 10 digit sequence.
A "+" is inserted after 8 characters. This is approximately a 13.9 meter square.
- Least significant is base 5/base 4 decomposition, combined into single digits.
There can be at most 5 of these. They are optional.
>>> from math import isclose
>>> lat = 1.286785
>>> lon = 103.854503
>>> OLC().encode(lat, lon)
'6PH57VP3+PR6'
>>> lat_2, lon_2 = OLC().decode('6PH57VP3+PR6')
>>> isclose(lat_2, 1.28675, rel_tol=1E-5) # Truncation, not the original value
True
>>> isclose(lon_2, 103.8545, rel_tol=1E-5) # Truncation: not the original lon value.
True
>>> isclose(lat, lat_2, rel_tol=1E-4)
True
>>> isclose(lon, lon_2, rel_tol=1E-5)
True
The official Test Cases:
https://github.com/google/open-location-code/blob/main/test_data
We don't actually pass *all* of these. There are four decoding
test cases
that involve careful rounding that are implemented incorrectly.
"""
from typing import Iterable, Iterator
class Geocode: # pragma: no cover
def encode(self, lat: float, lon: float) -> str:
pass
def decode(self, code: str) -> tuple[float, float]:
pass
class OLC(Geocode):
code = "23456789CFGHJMPQRVWX"
def encode(self, lat: float, lon: float, size: int = 11) -> str:
"""
Encode an OLC string from a lat, lon pair.
The latitude number must be clipped to be in the range -90 to 90.
The longitude number must be normalised to be in the range -180 to 180.
>>> OLC().encode(20.3701135, 2.78223535156, size=13)
'7FG49QCJ+2VXGJ'
:param lat: latitude, signed
:param lon: longitude, signed
:param size: limit of detail, usually 10 or 11, but be up to 15.
:return: OLC string
"""
# Clip latitude to -90 - +90.
# Special case for excluding +90: back off based on how many digits are needed.
lat = max(min(lat, 90), -90)
if lat == 90.0:
if size <= 10:
adj = 20 ** (2 - size / 2)
else:
adj = 20 ** -3 / 5 ** (size - 10)
lat -= adj
# Normalize longitude to -180 to +180 (excluding +180)
while lon >= 180:
lon -= 360
while lon < -180:
lon += 360
# Convert to N latitude and E longitude via offsets to remove signs.
nlat = lat + 90
elon = lon + 180
# Create the sequences of digits
lat_digits = list(base20(nlat, lsb=5))
lon_digits = list(base20(elon, lsb=4))
# Interleave 5 pairs of digits from latitude and longitude for the most significant portion
msb = "".join(
f"{self.code[lat_digits[i]]}{self.code[lon_digits[i]]}" for i in range(5)
)
# Append five of the LSB characters from pairs of digits.
lsb = "".join(
self.code[lat_digits[p] * 4 + lon_digits[p]] for p in range(5, 10)
)
# Handle the size parameter with truncation and/or zero-padding.
olc = (msb + lsb)[:size]
if len(olc) < 8:
olc += "0" * (8 - len(olc))
# Inject the "+" after 8.
return f"{olc[:8]}+{olc[8:]}"
def decode(self, olc: str, size: int = 11) -> tuple[float, float]:
"""
Decode a lat, lon pair from an OLC string.
An OLC has several forms, punctuated by an "+" that signals
the end of the leading 8 characters.
1. ``AOAOAOAO``: no plus. Assume "+00" suffix to fill up to a 10-digit MSB-only form.
2. ``AOAOAOAO+AO``: the expected 10-digit MSB-only form.
3. ``AOAOAOAO+AOVWYXZ``: after the 10-digits, an LSB suffix of 1 to 5 additional digits.
4. ``AOAO0000`` zeros used as place-holders to fill out the MSB section.
5. ``AOAO+`` leading positions can be assumed based on other context.
We don't handle this.
Note that the encoded value is allowed to pad with zeroes, which are not otherwise valid.
These are -- in effect -- wild-card matching values. We can replace them with "2" which
is not as obviously a wild-card.
The reference implementation decodes an OLC to define a bounding box; not a single point.
We don't implement this completely. Four test cases do not pass with this implementation.
:param olc: OLC string
:param size: not used, but can truncate long over-specified strings
Can also be used to define the size of the bounding box in the LSB suffix.
:return: lat, lon pair
"""
# Expand to a single, uniform string (without punctuation or special cases.)
# 10 MSB positions of 2-digit, 5 LSB positions of 1-digit.
olc_clean = "".join(olc.split("+"))
if len(olc_clean) <= 15:
olc_15 = olc_clean.replace("0", "2") + "2" * (15 - len(olc_clean))
else:
olc_15 = olc_clean
# Each of the LSB Characters needs to be expanded into base 5/base 4 lat-lon pair
msb = olc_15[:10]
lsb = olc_15[10:15]
pairs = (divmod(self.code.index(c), 5) for c in lsb)
lsb_expanded = "".join(
f"{self.code[lat]}{self.code[lon]}" for lat, lon in pairs
)
lsb_expanded += "2" * (10 - len(lsb_expanded))
full = msb + lsb_expanded
# Convert from base-20 to float.
# TODO: Honor the size parameter by chopping the values.
nlat = from20(list(self.code.index(c) for c in full[0::2]), lsb=5)
elon = from20(list(self.code.index(c) for c in full[1::2]), lsb=4)
# TODO: Tweak a tiny bit with the level of precision given by the size.
nlat += 0.5 / 20 ** 3 / 5 ** 5
elon += 0.5 / 20 ** 3 / 4 ** 5
# Remove North and East offsets.
return round(nlat - 90, 8), round(elon - 180, 8)
def base20(x: float, msb: int = 20, lsb: int = 5) -> Iterable[int]:
"""
Decompose a positive Lat or Lon value to a sequence of 5 base-20 values
followed by 5 base-4 or base-5 values.
See https://github.com/google/open-location-code/blob/main/docs/specification.md#encoding
>>> list(base20(1.286785+90, lsb=5))
[4, 11, 5, 14, 14, 1, 2, 0, 0, 0]
>>> list(base20(103.854503+180, lsb=4))
[14, 3, 17, 1, 16, 0, 0, 1, 2, 0]
From 20.3701135,2.78223535156,13,7FG49QCJ+2VXGJ
The last 3, XGJ, are combinations of base 5, base 4 pairs.
X = (4, 3), G = (2, 2), J = (3, 0)
"7G9C2645"
>>> list(base20(20.3701135+90, lsb=5))
[5, 10, 7, 8, 0, 4, 2, 3, 2, 2]
"F4QJV642"
>>> list(base20(2.78223535156+180, lsb=4))
[9, 2, 15, 12, 17, 3, 2, 0, 1, 3]
"""
def ldigits(value: int, base: int) -> Iterable[int]:
"""
Generates 5 digits from an integer value for a given base.
This is starts with Least Significant, which requires reversal.
"""
for b in range(5):
value, digit = divmod(value, base)
yield digit
def digits(value: int, base: int) -> list[int]:
"""
A list of six digits from a positive integer value for a given base.
This starts with the Most Significant Digit.
"""
return list(reversed(list(ldigits(value, base))))
# Scale up the latitude or longitude float to a large integer for the 5 MSB's.
x_most = int(round(x * msb ** 3, 6))
# Create the sequence of digits in the MSB base (20, usually)
msb_digits = digits(int(x_most), msb)
# Scale up the latitude or longitude float to a larger integer for the 5 LSB's.
x_least = int(round(x * msb ** 3 * lsb ** 5, 5))
# Create a sequence of digits in the LSB base (4 or 5, depending.)
lsb_digits = digits(int(x_least), lsb)
# Emit the sequence of digits
return msb_digits + lsb_digits
def from20(digits: list[int], msb: int = 20, lsb: int = 5) -> float:
"""
Convert a sequence of 10 digits, 5 in the msb base and 5 in the lsb base,
into a float value.
>>> from math import isclose
>>> nlat_i = from20([4, 11, 5, 14, 14, 1, 1, 4, 4, 4])
>>> isclose(nlat_i, 91.286785, rel_tol=1E-6)
True
>>> elon_i = from20([14, 3, 17, 1, 16, 0, 0, 1, 2, 0], lsb=4)
>>> isclose(elon_i, 283.854503, rel_tol=1E-5)
True
"""
m = digits[0]
for d in digits[1:5]:
m = m * msb + d
l = digits[5]
for d in digits[6:10]:
l = l * lsb + d
# print(f"{digits=} {m=} {msb ** 3=} {m / msb ** 3=} {l=} {l / lsb**5=}")
return (m + l / lsb ** 5) / msb ** 3
if __name__ == "__main__": # pragma: no cover
import doctest
doctest.testmod()
| true
|
59c2e577d0748bf6c3d4110ce6a243797fedbd17
|
Python
|
fpt/sendmsgw
|
/sendmsgw.py
|
UTF-8
| 1,930
| 2.78125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pywin32
# http://sourceforge.net/projects/pywin32/
# ref
# http://mail.python.org/pipermail/python-win32/2005-March/003077.html
import win32api
import win32gui
import win32con
import getopt
import sys
import re
def FindWindowRegExp(pat):
p = re.compile(pat)
# http://www.brunningonline.net/simon/blog/archives/000652.html
def windowEnumerationHandler(hwnd, resultList):
resultList.append((hwnd, win32gui.GetWindowText(hwnd)))
topWindows = []
win32gui.EnumWindows(windowEnumerationHandler, topWindows)
for item in topWindows:
if p.match(item[1]):
return item[0]
# http://stackoverflow.com/questions/5080777/what-sendmessage-to-use-to-send-keys-directly-to-another-window
def send_input_hax(hwnd, msg):
for c in msg:
if c == "\n":
win32api.SendMessage(hwnd, win32con.WM_KEYDOWN, win32con.VK_RETURN, 0)
win32api.SendMessage(hwnd, win32con.WM_KEYUP, win32con.VK_RETURN, 0)
else:
win32api.SendMessage(hwnd, win32con.WM_CHAR, ord(c), 0)
def usage():
print 'usage: sendmsg <window title> <key strokes>'
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "h", ["help"])
except getopt.GetoptError:
# ヘルプメッセージを出力して終了
usage()
sys.exit(2)
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
if len(args) < 2:
usage()
sys.exit(2)
pat = args[0]
strokes = args[1].replace(r'\n', '\n')
# find window by regexp
hwnd = FindWindowRegExp(pat)
if not hwnd:
print 'Window not found'
sys.exit(1)
send_input_hax(hwnd, strokes)
print 'sent message ' + strokes + ' to ' + str(hwnd)
if __name__ == "__main__":
main()
| true
|
6884d5389a3362482daebd359dd1ec7714ceeec5
|
Python
|
bhaveshpandey29/phpstore
|
/product.py
|
UTF-8
| 2,294
| 3
| 3
|
[] |
no_license
|
from databaseConnector import getDBConnection as connection
def registerProduct(product_name,product_price,product_quantity):
try:
flag=0
db,cursor= connection()
insert_sql = f"insert into product(product_name,product_price,product_quantity) values ('{product_name}','{product_price}','{product_quantity}')"
search_sql = f"select * from product where product_name like '{product_name}' and product_price like {product_price}"
cursor.execute(search_sql)
rs = cursor.fetchall()
if(len(rs)>0):
flag = 1
else:
cursor.execute(insert_sql)
db.commit()
except Exception as e:
db.rollback()
raise e
else:
if(flag == 0):
print("inserted successfully")
else:
print("produt already exist")
finally:
db.close()
#registerProduct('Java','100','5','21 Aug')
def getProductDetail(product_name):
try:
flag = 0
db,cursor = connection()
sql = f"select * from product where product_name like '{product_name}'"
cursor.execute(sql)
res = cursor.fetchall()
if len(res)>0:
flag =1
except Exception as e:
print("Something went wrong")
raise e
else:
print(list(res))
finally:
db.close()
#getProductDetail('python')
def getAllProduct():
db,cursor = connection()
try:
sql = f"select * from product"
cursor.execute(sql)
resu = cursor.fetchall()
except Exception as e:
print("Something went wrong")
raise e
else:
print(list(resu))
finally:
db.close()
#getAllProduct()
def getProductId(product_name):
try:
flag = 0
db,cursor = connection()
sql = f"select product_id from product where product_name like '{product_name}'"
cursor.execute(sql)
result = cursor.fetchall()
if len(result)>0:
flag =1
except Exception as e:
print("Something went wrong")
raise e
else:
#print(list(result))
return(result[0][0])
#print(result[0][0])
finally:
db.close()
#getProductId('java')
| true
|
83c76f6091ae0cf0eca04ac870456997217ae729
|
Python
|
iverberk/advent-of-code-2018
|
/day-06/part1.py
|
UTF-8
| 1,209
| 3.359375
| 3
|
[] |
no_license
|
from collections import defaultdict
def X(point):
x, y = point
return x
def Y(point):
x, y = point
return y
def manhattan_distance(p1, p2):
return abs(X(p2) - X(p1)) + abs(Y(p2) - Y(p1))
coordinates = {}
max_x, max_y = 0, 0
with open('input') as f:
for index, coordinate in enumerate(f):
(x, y) = list(map(int, coordinate.strip().split(',')))
max_x, max_y = max(max_x, x), max(max_y, y)
coordinates[(x, y)] = index
counts = defaultdict(int)
infinite = set()
for y in range(0, max_y+1):
for x in range(0, max_x+1):
min_distance = max_x + max_y
min_index = 0
distances = []
for coordinate, index in coordinates.items():
distance = manhattan_distance((x, y), coordinate)
distances.append(distance)
if distance < min_distance:
min_distance = distance
min_index = index
distances.sort()
if distances[0] != distances[1]:
counts[min_index] += 1
if x == 0 or x == max_x or y == 0 or y == max_y:
infinite.add(min_index)
print(max({k: v for k, v in counts.items() if k not in infinite}.values()))
| true
|
e0e0b1f2f48da420f57fbdd77a28dd381543ba00
|
Python
|
L3NNY0969/datbananabotheroku
|
/cogs/idiotic.py
|
UTF-8
| 5,051
| 2.5625
| 3
|
[] |
no_license
|
import discord
import os
import io
import idioticapi
import random
import json
from discord.ext import commands
class Idiotic:
def __init__(self, bot):
self.bot = bot
self.client = idioticapi.Client(os.environ.get('idioticapi'), dev=True)
def format_avatar(self, avatar_url):
if avatar_url.endswith(".gif"):
return avatar_url + "?size=2048"
return avatar_url.replace("webp", "png")
@commands.command(aliases=['triggered'])
async def triggeredpic(self, ctx, user: discord.Member = None):
"""TRI GER RED!!!"""
if user is None:
user = ctx.author
try:
await ctx.trigger_typing()
av = self.format_avatar(user.avatar_url)
await ctx.send(f"Grrrr...**{user.name}** is triggered.", file=discord.File(await self.client.triggered(av), "triggered.gif"))
except Exception as e:
await ctx.send(f"An error occured with IdioticAPI. \nMore details: \n{e}")
@commands.command()
async def batslap(self, ctx, user: discord.Member = None):
"""User 1 will be slapping, user 2 will BE SLAPPED! Tehehe!"""
if user is None:
await ctx.send("Gotta tag someone that you wanna slap!")
else:
await ctx.trigger_typing()
try:
av = self.format_avatar(user.avatar_url)
avatar = self.format_avatar(ctx.author.avatar_url)
await ctx.send(f"Ouch! **{ctx.author.name}** slapped **{user.name}!**", file=discord.File(await self.client.batslap(avatar, av), "batslap.png"))
except Exception as e:
await ctx.send(f"An error occured with IdioticAPI. \nMore details: \n{e}")
@commands.command()
async def missing(self, ctx, user: discord.Member = None):
"""Uh-oh...someone went missing!"""
await ctx.trigger_typing()
user = ctx.author if user is None else user
try:
await ctx.send(f"**{user.name}** went missing!", file=discord.File(await self.client.missing(user.avatar_url, user.name), "missing.png"))
except Exception as e:
await ctx.send(f"An error occured with IdioticAPI. \nMore details: \n{e}")
@commands.command()
async def wanted(self, ctx, user: discord.Member = None):
"""Someone is WANTED!"""
await ctx.trigger_typing()
user = ctx.author if user is None else user
try:
await ctx.send(f"**{user.name}** is wanted!", file=discord.File(await self.client.wanted(user.avatar_url), "wanted.png"))
except Exception as e:
await ctx.send(f"An error occured with IdioticAPI. \nMore details: \n{e}")
@commands.command()
async def achievement(self, ctx, *, text=None):
"""Give yourself an achievement. You need one."""
text = text if text else "Not putting text when using this command."
try:
await ctx.send(f"**{ctx.author.name}** got an achievement!", file=discord.File(await self.client.achievement(ctx.author.avatar_url, text), "achievement.png"))
except Exception as e:
await ctx.send(f"An error occured with IdioticAPI. \nMore details: \n{e}")
@commands.command()
async def facepalm(self, ctx, user: discord.Member = None):
user = user if user is not None else ctx.author
try:
await ctx.send(f"**{user.name}** had to facepalm.", file=discord.File(await self.client.facepalm(user.avatar_url), "facepalm.png"))
except Exception as e:
await ctx.send(f"An error occured with IdioticAPI. \nMore details: \n{e}")
@commands.command()
async def beautiful(self, ctx, user: discord.Member = None):
user = user if user is not None else ctx.author
try:
await ctx.send(f"**{user.name}** is beautiful!", file=discord.File(await self.client.beautiful(user.avatar_url), "beautiful.png"))
except Exception as e:
await ctx.send(f"An error occured with IdioticAPI. \nMore details: \n{e}")
@commands.command()
async def stepped(self, ctx, user: discord.Member = None):
user = user if user is not None else ctx.author
try:
await ctx.send(f"**{user.name}** got stepped on.", file=discord.File(await self.client.stepped(user.avatar_url), "stepped.png"))
except Exception as e:
await ctx.send(f"An error occured with IdioticAPI. \nMore details: \n{e}")
@commands.command()
async def fear(self, ctx, user: discord.Member = None):
user = user if user is not None else ctx.author
try:
await ctx.send(f"**{user.name}** is SCARY!", file=discord.File(await self.client.heavyfear(user.avatar_url), "fear.png"))
except Exception as e:
await ctx.send(f"An error occured with IdioticAPI. \nMore details: \n{e}")
def setup(bot):
bot.add_cog(Idiotic(bot))
| true
|
e7e609d3f62b6377cc159f1e8fdeccae08a3271c
|
Python
|
lcbasu/Pyhthon-Search-Engine
|
/greatest.py
|
UTF-8
| 147
| 3.484375
| 3
|
[] |
no_license
|
def greatest(list):
max = 0
if len(list) > 0 :
for e in list :
if e > max :
max = e
return max
print greatest([1,2,3,4,5,1,5])
| true
|
479dfc761da13ddca612baa1f50ca30c9318494c
|
Python
|
kwohlfahrt/nuc_analyze
|
/nuc_analyze/util.py
|
UTF-8
| 562
| 2.921875
| 3
|
[] |
no_license
|
from collections import defaultdict
def flatten_dict(d):
r = {}
for key, value in d.items():
try:
r.update({(key,) + k: v for k, v in flatten_dict(value).items()})
except AttributeError:
r[(key,)] = value
return r
def tree():
def tree_():
return defaultdict(tree_)
return tree_()
def unflatten_dict(it):
r = tree()
for ks, v in it:
d = r
for k in ks[:-1]:
d = d[k]
d[ks[-1]] = v
return r
def ceil_div(x, y):
return x // y + (x % y != 0)
| true
|
d814daf3f093f9e86466d2200a1f37cfdbda2220
|
Python
|
steveshogren/GraphFun
|
/Graph.py
|
UTF-8
| 2,167
| 2.875
| 3
|
[] |
no_license
|
__author__ = 'jack'
from os.path import exists
from subprocess import call
G = {
'A': {'B': 10, 'D': 4, 'F': 10},
'B': {'E': 5, 'J': 10, 'I': 17},
'C': {'A': 4, 'D': 10, 'E': 16},
'D': {'F': 12, 'G': 21},
'E': {'G': 4},
'F': {'H': 3},
'G': {'J': 3},
'H': {'G': 3, 'J': 5},
'I': {},
'J': {'I': 8},
}
def printInGraphViz(G):
dotContents = '';
for v, pointsTo in G.items():
for u, weight in pointsTo.items():
dotContents += v + ' -> ' + u + ' [label=' + str(weight) + '];'
return dotContents
def printPathInGraphViz(G, start, end):
dotContents = '';
path = Dijkstra(G, start, end)
for v, pointsTo in G.items():
for u, weight in pointsTo.items():
color = ''
if path.count(u) and path.count(v):
color = ' color=red '
dotContents += v + ' -> ' + u + ' [label=' + str(weight) + color + '];'
return dotContents
def Dijkstra(G, start, end):
dist = {}
previous = {}
for v in G.keys():
dist[v] = float('inf')
previous[v] = None
dist[start] = 0
unseen_nodes = G.keys()
# calculate the shortest path from every node to the start
while len(unseen_nodes) > 0:
shortest = None
u = ''
for temp_node in unseen_nodes:
if shortest is None:
shortest = dist[temp_node]
u = temp_node
elif dist[temp_node] < shortest:
shortest = dist[temp_node]
u = temp_node
unseen_nodes.remove(u)
for v, child_dist in G[u].items():
alt = dist[u] + child_dist
if dist[v] > alt:
dist[v] = alt
previous[v] = u
path = []
u = end
while previous[u] is not None:
path.insert(0, u)
u = previous[u]
path.insert(0, start)
return path
dotFile = 'dotTemp.dot'
f = open(dotFile, 'w')
f.write('digraph G {' + printPathInGraphViz(G, 'C', 'I') + '}')
f.close()
output_file = 'graph.png'
if exists(output_file):
call(['rm', output_file])
call(['dot', '-Tpng', dotFile, '-o' + output_file])
| true
|
2d61eb68dede36b38faafbf9e7085ef292d75fcf
|
Python
|
oxhead/CodingYourWay
|
/src/lt_167.py
|
UTF-8
| 2,248
| 4.03125
| 4
|
[] |
no_license
|
"""
https://leetcode.com/problems/two-sum-ii-input-array-is-sorted
Related:
- lt_1_two-sum
- lt_653_two-sum-iv-input-is-a-bst
"""
"""
Given an array of integers that is already sorted in ascending order, find two numbers such that they add up to a specific target number.
The function twoSum should return indices of the two numbers such that they add up to the target, where index1 must be less than index2. Please note that your returned answers (both index1 and index2) are not zero-based.
You may assume that each input would have exactly one solution and you may not use the same element twice.
Input: numbers={2, 7, 11, 15}, target=9
Output: index1=1, index2=2
"""
class Solution:
def twoSum(self, numbers, target):
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
left, right = 0, len(numbers) - 1
while left < right:
total = numbers[left] + numbers[right]
if total > target:
right -= 1
elif total < target:
left += 1
else:
return [left + 1, right + 1]
def twoSum_slow(self, numbers, target):
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
def binary_search(start, end, n):
if start > end: return -1
mid = (start + end) // 2
records[target - numbers[mid]] = mid
if numbers[mid] == n: return mid
elif numbers[mid] > n: return binary_search(start, mid - 1, n)
else: return binary_search(mid + 1, end, n)
records = {}
for i, n in enumerate(numbers):
if n in records:
return [i + 1, records[n] + 1]
j = binary_search(i + 1, len(numbers) - 1, target - n)
if j != -1:
return [i + 1, j + 1]
return []
if __name__ == '__main__':
test_cases = [
(([2, 7, 11, 15], 9), [1, 2]),
(([5, 25, 75], 100), [2, 3]),
]
for test_case in test_cases:
print('case:', test_case)
output = Solution().twoSum(*test_case[0])
print('output:', output)
assert output == test_case[1]
| true
|
c9e1a0bb1fb9d060b651c1c883bfbc5acdf125bd
|
Python
|
muvox/PythonPractice
|
/functionpractice.py
|
UTF-8
| 1,836
| 4.1875
| 4
|
[] |
no_license
|
import random
wins = 0
ties = 0
rounds = 0
def computerChooses(playerString):
player = int(playerString)
computerSelection = random.randint(1, 3)
if computerSelection == 2:
print("Computer chose: Nuke")
elif computerSelection == 1:
print("Computer chose: Foot")
else:
print("Computer chose: Cockroach")
comp = computerSelection
if comp == 2 and player == 2:
print("Both LOSE")
roundCounter(0)
elif comp == player:
print("It is a tie!")
roundCounter(2)
elif (comp == 1 and player == 2) or \
(comp == 2 and player == 3) or \
(comp == 3 and player == 1):
print("You WIN!")
roundCounter(1)
else:
print("You LOSE!")
roundCounter(0)
def roundCounter(outcome=0):
global rounds
global wins
global ties
if outcome == 1:
wins = wins+1
rounds = rounds + 1
elif outcome == 2:
ties = ties+1
rounds = rounds + 1
else:
rounds = rounds + 1
def main():
running = True
while running:
playerSelection = input("Foot, Nuke or cockroach? (Quit ends):")
if playerSelection == "Foot":
print("You chose: Foot")
computerChooses(1)
elif playerSelection == "Nuke":
print("You chose: Nuke")
computerChooses(2)
elif playerSelection == "Cockroach":
print("You chose: Cockroach")
computerChooses(3)
elif playerSelection == "Quit":
print("You played {} rounds, and won {} rounds, playing tie in"
" {} rounds." .format(rounds, wins, ties))
running = False
else:
print("Incorrect selection.")
if __name__ == "__main__":
# execute only if run as a script
main()
| true
|
6a00be042a1851b24b1b99b717f4d5659b5e1103
|
Python
|
Socrats/Axelrod
|
/axelrod/tests/unit/test_classification.py
|
UTF-8
| 7,977
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
"""Tests for the classification."""
import unittest
import axelrod as axl
class TestClassification(unittest.TestCase):
def test_known_classifiers(self):
# A set of dimensions that are known to have been fully applied
known_keys = [
"stochastic",
"memory_depth",
"long_run_time",
"inspects_source",
"manipulates_source",
"manipulates_state",
]
for s in axl.all_strategies:
s = s()
self.assertTrue(None not in [s.classifier[key] for key in known_keys])
def test_multiple_instances(self):
"""Certain instances of classes of strategies will have different
classifiers based on the initialisation variables"""
P1 = axl.MemoryOnePlayer(four_vector=(.5, .5, .5, .5))
P2 = axl.MemoryOnePlayer(four_vector=(1, 0, 0, 1))
self.assertNotEqual(P1.classifier, P2.classifier)
P1 = axl.Joss()
P2 = axl.Joss(p=0)
self.assertNotEqual(P1.classifier, P2.classifier)
P1 = axl.GTFT(p=1)
P2 = axl.GTFT(p=.5)
self.assertNotEqual(P1.classifier, P2.classifier)
P1 = axl.StochasticWSLS()
P2 = axl.StochasticWSLS(ep=0)
self.assertNotEqual(P1.classifier, P2.classifier)
P1 = axl.GoByMajority(memory_depth=5)
P2 = axl.StochasticWSLS(ep=.1)
self.assertNotEqual(P1.classifier, P2.classifier)
def test_manipulation_of_classifier(self):
"""Test that can change the classifier of an instance without changing
the classifier of the class"""
player = axl.Cooperator()
player.classifier["memory_depth"] += 1
self.assertNotEqual(player.classifier, axl.Cooperator.classifier)
player = axl.Defector()
player.classifier["memory_depth"] += 1
self.assertNotEqual(player.classifier, axl.Defector.classifier)
def test_obey_axelrod(self):
"""A test that verifies if the obey_axl function works correctly"""
known_cheaters = [
axl.Darwin,
axl.Geller,
axl.GellerCooperator,
axl.GellerDefector,
axl.MindBender,
axl.MindController,
axl.MindWarper,
axl.MindReader,
]
known_basic = [
axl.Alternator,
axl.AntiTitForTat,
axl.Bully,
axl.Cooperator,
axl.Defector,
axl.GoByMajority,
axl.SuspiciousTitForTat,
axl.TitForTat,
axl.WinStayLoseShift,
]
known_ordinary = [
axl.AverageCopier,
axl.ForgivingTitForTat,
axl.GoByMajority20,
axl.GTFT,
axl.Grudger,
axl.Inverse,
axl.Random,
]
for strategy in known_cheaters:
self.assertFalse(axl.obey_axelrod(strategy()), msg=strategy)
for strategy in known_basic:
self.assertTrue(axl.obey_axelrod(strategy()), msg=strategy)
for strategy in known_ordinary:
self.assertTrue(axl.obey_axelrod(strategy()), msg=strategy)
def test_is_basic(self):
"""A test that verifies if the is_basic function works correctly"""
known_cheaters = [
axl.Darwin,
axl.Geller,
axl.GellerCooperator,
axl.GellerDefector,
axl.MindBender,
axl.MindController,
axl.MindWarper,
axl.MindReader,
]
known_basic = [
axl.Alternator,
axl.AntiTitForTat,
axl.Bully,
axl.Cooperator,
axl.Defector,
axl.SuspiciousTitForTat,
axl.TitForTat,
axl.WinStayLoseShift,
]
known_ordinary = [
axl.AverageCopier,
axl.ForgivingTitForTat,
axl.GoByMajority20,
axl.GTFT,
axl.Inverse,
axl.Random,
]
for strategy in known_cheaters:
self.assertFalse(axl.is_basic(strategy()), msg=strategy)
for strategy in known_basic:
self.assertTrue(axl.is_basic(strategy()), msg=strategy)
for strategy in known_ordinary:
self.assertFalse(axl.is_basic(strategy()), msg=strategy)
def str_reps(xs):
"""Maps a collection of player classes to their string representations."""
return set(map(str, [x() for x in xs]))
class TestStrategies(unittest.TestCase):
def test_strategy_list(self):
for strategy_list in [
"all_strategies",
"demo_strategies",
"basic_strategies",
"long_run_time_strategies",
"strategies",
"ordinary_strategies",
"cheating_strategies",
]:
self.assertTrue(hasattr(axl, strategy_list))
def test_lists_not_empty(self):
for strategy_list in [
axl.all_strategies,
axl.demo_strategies,
axl.basic_strategies,
axl.long_run_time_strategies,
axl.strategies,
axl.ordinary_strategies,
axl.cheating_strategies,
]:
self.assertTrue(len(strategy_list) > 0)
def test_inclusion_of_strategy_lists(self):
all_strategies_set = set(axl.all_strategies)
for strategy_list in [
axl.demo_strategies,
axl.basic_strategies,
axl.long_run_time_strategies,
axl.strategies,
axl.ordinary_strategies,
axl.cheating_strategies,
]:
self.assertTrue(
str_reps(strategy_list).issubset(str_reps(all_strategies_set))
)
strategies_set = set(axl.strategies)
for strategy_list in [
axl.demo_strategies,
axl.basic_strategies,
axl.long_run_time_strategies,
]:
self.assertTrue(str_reps(strategy_list).issubset(str_reps(strategies_set)))
def test_long_run_strategies(self):
long_run_time_strategies = [
axl.DBS,
axl.MetaMajority,
axl.MetaMajorityFiniteMemory,
axl.MetaMajorityLongMemory,
axl.MetaMinority,
axl.MetaMixer,
axl.MetaWinner,
axl.MetaWinnerDeterministic,
axl.MetaWinnerEnsemble,
axl.MetaWinnerFiniteMemory,
axl.MetaWinnerLongMemory,
axl.MetaWinnerStochastic,
axl.NMWEDeterministic,
axl.NMWEFiniteMemory,
axl.NMWELongMemory,
axl.NMWEStochastic,
axl.NiceMetaWinner,
axl.NiceMetaWinnerEnsemble,
]
self.assertEqual(
str_reps(long_run_time_strategies), str_reps(axl.long_run_time_strategies)
)
self.assertTrue(
all(s().classifier["long_run_time"] for s in axl.long_run_time_strategies)
)
def test_short_run_strategies(self):
short_run_time_strategies = [
s for s in axl.strategies if s not in axl.long_run_time_strategies
]
self.assertEqual(
str_reps(short_run_time_strategies), str_reps(axl.short_run_time_strategies)
)
self.assertFalse(
any(s().classifier["long_run_time"] for s in axl.short_run_time_strategies)
)
def test_meta_inclusion(self):
self.assertTrue(str(axl.MetaMajority()) in str_reps(axl.strategies))
self.assertTrue(str(axl.MetaHunter()) in str_reps(axl.strategies))
self.assertFalse(
str(axl.MetaHunter()) in str_reps(axl.long_run_time_strategies)
)
def test_demo_strategies(self):
demo_strategies = [
axl.Cooperator,
axl.Defector,
axl.TitForTat,
axl.Grudger,
axl.Random,
]
self.assertTrue(str_reps(demo_strategies), str_reps(axl.demo_strategies))
| true
|
06ec6000235923a6f526ce9bc5305d080ef9e882
|
Python
|
IgorxutStepikOrg/AlgorithmsTheoryAndPracticeMethods
|
/Module2_2/Step6/python/solution1.py
|
UTF-8
| 209
| 3.46875
| 3
|
[] |
no_license
|
def fib(num):
prev, cur = 0, 1
for i in range(1, num):
prev, cur = cur, prev + cur
return cur
def main():
n = int(input())
print(fib(n))
if __name__ == "__main__":
main()
| true
|
4e698d55499c70ae0bc5fe3270162686e25b5a80
|
Python
|
arlethitgo/pic2map
|
/tests/test_cli.py
|
UTF-8
| 6,128
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
"""Command Line Interface test cases."""
import argparse
import logging
import os
import tempfile
import unittest
from StringIO import StringIO
from mock import (
MagicMock as Mock,
patch,
)
from pic2map.cli import (
add,
count,
main,
parse_arguments,
remove,
serve,
valid_directory,
)
class MainTests(unittest.TestCase):
"""Main function test cases."""
def setUp(self):
"""Patch parse_arguments function."""
self.parse_arguments_patcher = patch('pic2map.cli.parse_arguments')
self.parse_arguments = self.parse_arguments_patcher.start()
self.logging_patcher = patch('pic2map.cli.logging')
self.logging_patcher.start()
def test_func_called(self):
"""Command function is called."""
argv = Mock()
function = Mock()
args = argparse.Namespace(
log_level=logging.WARNING,
func=function,
)
self.parse_arguments.return_value = args
main(argv)
function.assert_called_once_with(args)
def tearDown(self):
"""Undo the patching."""
self.parse_arguments_patcher.stop()
self.logging_patcher.stop()
class CommandFunctionTests(unittest.TestCase):
"""Command function test cases."""
def setUp(self):
"""Patch dependencies."""
self.tree_explorer_patcher = patch('pic2map.cli.TreeExplorer')
self.tree_explorer_cls = self.tree_explorer_patcher.start()
self.filter_gps_metadata_patcher = (
patch('pic2map.cli.filter_gps_metadata'))
self.filter_gps_metadata = self.filter_gps_metadata_patcher.start()
self.transform_metadata_to_row_patcher = (
patch('pic2map.cli.transform_metadata_to_row'))
self.transform_metadata_to_row = (
self.transform_metadata_to_row_patcher.start())
self.location_db_patcher = patch('pic2map.cli.LocationDB')
self.location_cls = self.location_db_patcher.start()
def tearDown(self):
"""Undo the patching."""
self.tree_explorer_patcher.stop()
self.filter_gps_metadata_patcher.stop()
self.transform_metadata_to_row_patcher.stop()
self.location_db_patcher.stop()
def test_add(self):
"""Add command function."""
tree_explorer = self.tree_explorer_cls()
paths = Mock()
tree_explorer.paths.return_value = paths
metadata_record = Mock()
metadata_records = [metadata_record]
self.filter_gps_metadata.return_value = metadata_records
row = Mock()
self.transform_metadata_to_row.return_value = row
database = self.location_cls().__enter__()
directory = 'some directory'
args = argparse.Namespace(directory=directory)
add(args)
self.tree_explorer_cls.assert_called_with(directory)
self.filter_gps_metadata.assert_called_once_with(paths)
self.transform_metadata_to_row.assert_called_once_with(metadata_record)
database.insert.assert_called_with([row])
def test_remove(self):
"""Remove command function."""
directory = 'some directory'
args = argparse.Namespace(directory=directory)
remove(args)
database = self.location_cls().__enter__()
database.delete.assert_called_once_with(directory)
def test_count(self):
"""Count command function."""
file_count = 10
database = self.location_cls().__enter__()
database.count.return_value = file_count
args = argparse.Namespace()
with patch('sys.stdout', new_callable=StringIO) as stdout:
count(args)
self.assertEqual(stdout.getvalue(), '{}\n'.format(file_count))
def test_serve(self):
"""Serve command function."""
args = argparse.Namespace()
with patch('pic2map.cli.app') as app:
serve(args)
app.run.assert_called_once_with(debug=True)
class ValidDirectoryTest(unittest.TestCase):
"""Valid directory test cases."""
def test_valid_directory(self):
"""Valid directory path."""
temp_directory = tempfile.mkdtemp()
try:
self.assertTrue(
valid_directory(temp_directory),
temp_directory,
)
finally:
os.rmdir(temp_directory)
def test_invalid_directory(self):
"""Invalid directory."""
with tempfile.NamedTemporaryFile() as temp_file:
with self.assertRaises(argparse.ArgumentTypeError):
valid_directory(temp_file.name)
def test_unreadable_directory(self):
"""Unreadable diretory."""
temp_directory = tempfile.mkdtemp()
try:
os.chmod(temp_directory, 0)
with self.assertRaises(argparse.ArgumentTypeError):
valid_directory(temp_directory)
finally:
os.rmdir(temp_directory)
class ParseArgumentsTest(unittest.TestCase):
"""Parse arguments test case."""
def test_add_command(self):
"""Add command."""
directory = 'some directory'
with patch('pic2map.cli.valid_directory') as valid_directory_func:
valid_directory_func.return_value = directory
args = parse_arguments(['add', directory])
self.assertEqual(args.directory, directory)
self.assertEqual(args.func, add)
def test_remove(self):
"""Remove command."""
directory = 'some directory'
with patch('pic2map.cli.valid_directory') as valid_directory_func:
valid_directory_func.return_value = directory
args = parse_arguments(['remove', directory])
self.assertEqual(args.directory, directory)
self.assertEqual(args.func, remove)
def test_count(self):
"""Count command."""
args = parse_arguments(['count'])
self.assertEqual(args.func, count)
def test_serve_command(self):
"""Serve command."""
args = parse_arguments(['serve'])
self.assertEqual(args.func, serve)
| true
|
5834bce9b1f5a4868bcd8f11fc644eea9d65e7b8
|
Python
|
RodolfoGueiros/PYTHON
|
/Exercicios/Exerc_lista_3.py
|
UTF-8
| 886
| 4.0625
| 4
|
[] |
no_license
|
listaCD = []
def exibirMenu():
print("1 - Inserir novo CD")
print("2 - Excluir CD")
print("3 - Listar CDs")
print("4 - Sair")
opcao = int(input("Escolha uma opcao: "))
return opcao
def inserirCD():
novoCD = input("Digite o nome do Artista: ")
listaCD.append(novoCD)
def listarCD():
for elemento in listaCD:
print (elemento)
def excluirCD():
nomeCD = input("Nome do CD que deseja excluir:")
indice = -1
encontrou = False
for elemento in listaCD:
indice += 1
if elemento == nomeCD:
encontrou = True
break
if (encontrou):
del listaCD[indice]
while True:
opcao = exibirMenu()
if opcao == 4:
break
elif opcao == 3:
listarCD()
elif opcao == 1:
inserirCD()
elif opcao == 2:
excluirCD()
| true
|
4975b1dd0a41288f00512424a317b4e6d0f2af19
|
Python
|
wfjin/LING570
|
/HW5/ngram_count.py
|
UTF-8
| 1,856
| 3.53125
| 4
|
[] |
no_license
|
""" This python program reads a training file and outputs the ngram count for unigrams, bigrams
and trigrams of the training data """
import sys
def addebos(line_ebos):
""" This function adds BOS and EOS to each sentence """
new_line = ['<s>']
new_line.extend(line_ebos)
new_line.append('</s>')
return new_line
if __name__ == "__main__":
UNIGRAM = {}
BIGRAM = {}
TRIGRAM = {}
with open(sys.argv[1], 'r') as training:
for line in training:
words = line.split()
words = addebos(words)
for i in range(0, len(words)):
if words[i] in UNIGRAM:
UNIGRAM[words[i]] += 1
else:
UNIGRAM[words[i]] = 1
if i < len(words) - 1:
bi_words = words[i] + ' ' + words[i+1]
if bi_words in BIGRAM:
BIGRAM[bi_words] += 1
else:
BIGRAM[bi_words] = 1
if i < len(words) - 2:
tri_words = words[i] + ' ' + words[i+1] + ' ' + words[i+2]
if tri_words in TRIGRAM:
TRIGRAM[tri_words] += 1
else:
TRIGRAM[tri_words] = 1
SORTED_UNIGRAM = sorted(UNIGRAM.items(), key=lambda x: (x[1], x[0]), reverse=True)
SORTED_BIGRAM = sorted(BIGRAM.items(), key=lambda x: (x[1], x[0]), reverse=True)
SORTED_TRIGRAM = sorted(TRIGRAM.items(), key=lambda x: (x[1], x[0]), reverse=True)
NGRAM_COUNT = open(sys.argv[2], 'w')
for uni in SORTED_UNIGRAM:
NGRAM_COUNT.write(str(uni[1])+'\t'+uni[0]+'\n')
for bi in SORTED_BIGRAM:
NGRAM_COUNT.write(str(bi[1])+'\t'+bi[0]+'\n')
for tri in SORTED_TRIGRAM:
NGRAM_COUNT.write(str(tri[1])+'\t'+tri[0]+'\n')
| true
|
551a6f003f0061c7b26f74efa36fad6acc5cf67e
|
Python
|
obround/ppci
|
/ppci/codegen/registerallocator.py
|
UTF-8
| 29,548
| 3.96875
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
"""
Instructions generated during instruction selection phase use virtual
registers and some physical registers (e.g. when an instruction expects
arguments in particular register(s)). Register allocation is the process
of assigning physical location (register or memory) to the remaining
virtual registers.
Some key concepts in the domain of register allocation are:
- **virtual register**: A location which must be mapped to a physical register.
- **physical register**: A real CPU register
- **interference graph**: A graph in which each node represents a location.
An edge indicates that the two locations cannot have the same physical
register assigned.
- **pre-colored register**: A location that is already assigned a specific
physical register.
- **coalescing**: The process of merging two nodes in an interference graph
which do not interfere and are connected by a move instruction.
- **spilling**: Spilling is the process when no physical register can be found
for a certain virtual register. Then this value will be placed in memory.
- **register class**: Most CPU's contain registers grouped into classes. For
example, there may be registers for floating point, and registers for
integer operations.
- **register alias**: Some registers may alias to registers in another class.
A good example are the x86 registers rax, eax, ax, al and ah.
**Interference graph**
Each instruction in the instruction list may use or define certain registers.
A register is live between a definition and a use of that register. Registers that
are live at the same point in the program interfere with each other.
An interference graph is a graph in which each node represents a register
and each edge represents interference between those two registers.
**Graph coloring**
In 1981 Chaitin presented the idea to use graph coloring for register
allocation.
In a graph a node can be colored if it has less neighbours than
possible colors (referred to as K from now on). This is true because when
each neighbour has a different color, there is still a valid color left for
the node itself.
The outline of the algorithm is:
Given a graph, if a node can be colored, remove this node from the graph and
put it on a stack. When added back to the graph, it can be given a color.
Now repeat this process recursively with the remaining graph. When the
graph is empty, place all nodes back from the stack one by one and assign
each node a color when placed. Remember that when adding back, a color can
be found, because this was the criteria during removal.
See: https://en.wikipedia.org/wiki/Chaitin%27s_algorithm
[Chaitin1982]_
**Coalescing**
Coalescing is the process of merging two nodes in an interference graph.
This means that two temporaries will be assigned to the same register. This
is especially useful if the temporaries are used in move instructions, since
when the source and the destination of a move instruction are the same
register, the move can be deleted.
Coalescing a move instruction is easy when an interference graph is present.
Two nodes that are used by a move instruction can be coalesced when they do
not interfere.
However, if we coalesce too many moves, the graph can become uncolorable, and
spilling has to be done. To prevent spilling, coalescing must be done
in a controlled manner.
A conservative approach to the coalescing is the following: if the merged node has fewer than
K neighbours, then the nodes can be coalesced. The reason for
this is that when all nodes that can be colored are removed and the merged
node and its non-colored neighbours remain, the merged node can be colored.
This ensures that the coalescing of the nodes does not have a negative
effect on the colorability of the graph.
[Briggs1994]_
**Spilling**
**Iterated register coalescing**
Iterated register coalescing (IRC) is a combination of graph coloring,
coalescing and spilling.
The process consists of the following steps:
- build an interference graph from the instruction list
- remove trivial colorable nodes.
- coalesce registers to remove redundant moves
- spill registers
- select registers
See: https://en.wikipedia.org/wiki/Register_allocation
[George1996]_
**Graph coloring with more register classes**
Most instruction sets are not uniform, and hence simple graph coloring cannot
be used. The algorithm can be modified to work with several register
classes that possibly interfere.
[Runeson2003]_
[Smith2004]_
**Implementations**
The following class can be used to perform register allocation.
"""
import logging
from functools import lru_cache
from .flowgraph import FlowGraph
from .interferencegraph import InterferenceGraph
from ..arch.arch import Architecture, Frame
from ..arch.registers import Register
from ..utils.tree import Tree
from ..utils.collections import OrderedSet, OrderedDict
from .instructionselector import ContextInterface
class MiniCtx(ContextInterface):
def __init__(self, frame, arch):
self._frame = frame
self._arch = arch
self.instructions = []
def move(self, dst, src):
""" Generate move """
self.emit(self._arch.move(dst, src))
def emit(self, instruction):
self.instructions.append(instruction)
def new_reg(self, cls):
return self._frame.new_reg(cls)
class MiniGen:
""" Spill code generator """
def __init__(self, arch, selector):
self.arch = arch
self.selector = selector
def gen_load(self, frame, vreg, slot):
""" Generate instructions to load vreg from a stack slot """
at = self.make_at(slot)
fmt = self.make_fmt(vreg)
t = Tree(
"MOV{}".format(fmt),
Tree("LDR{}".format(fmt), at),
value=vreg,
)
return self.gen(frame, t)
def gen_store(self, frame, vreg, slot):
""" Generate instructions to store vreg at a stack slot """
at = self.make_at(slot)
fmt = self.make_fmt(vreg)
t = Tree(
"STR{}".format(fmt),
at,
Tree("REG{}".format(fmt), value=vreg),
)
return self.gen(frame, t)
def gen(self, frame, tree):
""" Generate code for a given tree """
ctx = MiniCtx(frame, self.arch)
self.selector.gen_tree(ctx, tree)
return ctx.instructions
def make_fmt(self, vreg):
"""Determine the type suffix, such as I32 or F64."""
# TODO: hack to retrieve register type (U, I or F):
ty = getattr(vreg, "ty", "I")
fmt = "{}{}".format(ty, vreg.bitsize)
return fmt
def make_at(self, slot):
bitsize = self.arch.get_size("ptr") * 8
offset_tree = Tree("FPRELU{}".format(bitsize), value=slot)
return offset_tree
# TODO: implement linear scan allocator and other allocators!
class GraphColoringRegisterAllocator:
"""Target independent register allocator.
Algorithm is iterated register coalescing by Appel and George.
Also the pq-test algorithm for more register classes is added.
"""
logger = logging.getLogger("regalloc")
verbose = False # Set verbose to True to get more logging info
def __init__(self, arch: Architecture, instruction_selector, reporter):
assert isinstance(arch, Architecture), arch
self.arch = arch
self.spill_gen = MiniGen(arch, instruction_selector)
self.reporter = reporter
# A map with register alias info:
self.alias = arch.info.alias
# Register information:
# TODO: Improve different register classes
self.K = {} # type: Dict[Register, int]
self.cls_regs = {} # Mapping from class to register set
for reg_class in self.arch.info.register_classes:
kls, regs = reg_class.typ, reg_class.registers
if self.verbose:
self.logger.debug('Register class "%s" contains %s', kls, regs)
self.K[kls] = len(regs)
self.cls_regs[kls] = OrderedSet(regs)
def alloc_frame(self, frame: Frame):
"""Do iterated register allocation for a single frame.
This is the entry function for the register allocator and drives
through all stages of the algorithm.
Args:
frame: The frame to perform register allocation on.
"""
spill_rounds = 0
self.logger.debug("Starting iterative coloring")
while True:
self.init_data(frame)
# Process all work lists:
while True:
# self.check_invariants()
# Run one of the possible steps:
if self.simplify_worklist:
self.simplify()
elif self.worklistMoves:
self.coalesc()
elif self.freeze_worklist:
self.freeze()
elif self.spill_worklist:
self.select_spill()
else:
break
self.logger.debug("Now assinging colors")
spilled_nodes = self.assign_colors()
if spilled_nodes:
spill_rounds += 1
self.logger.debug("Spilling round %s", spill_rounds)
max_spill_rounds = 30
if spill_rounds > max_spill_rounds:
raise RuntimeError(
"Give up: more than {} spill rounds done!".format(
max_spill_rounds
)
)
# Rewrite program now.
for node in spilled_nodes:
self.rewrite_program(node)
if self.verbose:
self.reporter.message("Rewrote program with spilling")
self.reporter.dump_frame(self.frame)
else:
# Done!
break
self.remove_redundant_moves()
self.apply_colors()
def link_move(self, move):
""" Associate move with its source and destination """
src = self.node(move.used_registers[0])
dst = self.node(move.defined_registers[0])
src.moves.add(move)
dst.moves.add(move)
def unlink_move(self, move):
src = self.node(move.used_registers[0])
dst = self.node(move.defined_registers[0])
if move in src.moves:
src.moves.remove(move)
if move in dst.moves:
dst.moves.remove(move)
def init_data(self, frame: Frame):
""" Initialize data structures """
self.frame = frame
cfg = FlowGraph(self.frame.instructions)
self.logger.debug(
"Constructed flowgraph with %s nodes", len(cfg.nodes)
)
cfg.calculate_liveness()
self.frame.ig = InterferenceGraph()
self.frame.ig.calculate_interference(cfg)
self.logger.debug(
"Constructed interferencegraph with %s nodes",
len(self.frame.ig.nodes),
)
self.moves = [i for i in self.frame.instructions if i.ismove]
for mv in self.moves:
self.link_move(mv)
self.select_stack = []
# Move related sets:
self.coalescedMoves = OrderedSet()
self.constrainedMoves = OrderedSet()
self.frozenMoves = OrderedSet()
self.activeMoves = OrderedSet()
self.worklistMoves = OrderedSet()
# Fill initial move set, try to remove all moves:
for m in self.moves:
self.worklistMoves.add(m)
# Make worklists for nodes:
self.spill_worklist = OrderedSet()
self.freeze_worklist = OrderedSet()
self.simplify_worklist = OrderedSet()
self.precolored = OrderedSet()
self._num_blocked = {}
# Divide nodes into categories:
for node in self.frame.ig.nodes:
if node.is_colored:
if self.verbose:
self.logger.debug("Pre colored: %s", node)
self.precolored.add(node)
elif not self.is_colorable(node):
self.spill_worklist.add(node)
elif self.is_move_related(node):
self.freeze_worklist.add(node)
else:
self.simplify_worklist.add(node)
self.logger.debug(
"%s node in spill list, %s in freeze list and %s in simplify list",
len(self.spill_worklist),
len(self.freeze_worklist),
len(self.simplify_worklist),
)
def node(self, vreg):
return self.frame.ig.get_node(vreg)
def has_edge(self, t, r):
""" Helper function to check for an interfering edge """
if self.frame.ig.has_edge(t, r):
return True
if t in self.precolored:
# Check aliases:
for reg2 in self.alias[t.reg]:
if self.frame.ig.has_node(reg2):
t2 = self.frame.ig.get_node(reg2, create=False)
if self.frame.ig.has_edge(t2, r):
return True
if r in self.precolored:
# Check aliases:
for reg2 in self.alias[r.reg]:
if self.frame.ig.has_node(reg2):
r2 = self.frame.ig.get_node(reg2, create=False)
if self.frame.ig.has_edge(t, r2):
return True
return False
@lru_cache(maxsize=None)
def q(self, B, C) -> int:
""" The number of class B registers that can be blocked by class C. """
assert issubclass(B, Register)
assert issubclass(C, Register)
B_regs = self.cls_regs[B]
C_regs = self.cls_regs[C]
x = max(len(self.alias[r] & B_regs) for r in C_regs)
if self.verbose:
self.logger.debug(
"Class %s register can block max %s class %s register", C, x, B
)
return x
def is_colorable(self, node) -> bool:
"""
Helper function to determine whether a node is trivially
colorable. This means: no matter the colors of the nodes neighbours,
the node can be given a color.
In case of one register class, this is: n.degree < self.K
In case of more than one register class, somehow the worst case
damage by all neighbours must be determined.
We do this now with the pq-test.
"""
if node.is_colored:
return True
if node in self._num_blocked:
num_blocked = self._num_blocked[node]
else:
num_blocked = self.calc_num_blocked(node)
self._num_blocked[node] = num_blocked
# This invariant should hold:
# assert num_blocked == self.calc_num_blocked(node)
return num_blocked < self.K[node.reg_class]
def calc_num_blocked(self, node):
"""Calculate for the given node how many registers are blocked
by it's adjecent nodes.
This is an advanced form of a nodes degree, but then for
register of different classes.
"""
B = node.reg_class
num_blocked = sum(self.q(B, j.reg_class) for j in node.adjecent)
return num_blocked
def release_pressure(self, node, reg_class):
"""Remove some register pressure from the given node."""
if node in self._num_blocked:
self._num_blocked[node] -= self.q(node.reg_class, reg_class)
def NodeMoves(self, n):
return n.moves
def is_move_related(self, n):
""" Check if a node is used by move instructions """
return bool(self.NodeMoves(n))
def simplify(self):
""" Remove nodes from the graph """
n = self.simplify_worklist.pop()
self.select_stack.append(n)
if self.verbose:
self.logger.debug("Simplify node %s", n)
# Pop out of graph, we place it back later:
self.frame.ig.mask_node(n)
for m in n.adjecent:
self.release_pressure(m, n.reg_class)
self.decrement_degree(m)
def decrement_degree(self, m):
"""If a node was lowered in degree, check if there are nodes that
can be moved from the spill list to the freeze of simplify
list
"""
# This check was m.degree == self.K - 1
if m in self.spill_worklist and self.is_colorable(m):
self.enable_moves({m} | m.adjecent)
self.spill_worklist.remove(m)
if self.is_move_related(m):
self.freeze_worklist.add(m)
else:
self.simplify_worklist.add(m)
def enable_moves(self, nodes):
for node in nodes:
for move in self.NodeMoves(node):
if move in self.activeMoves:
self.activeMoves.remove(move)
self.worklistMoves.add(move)
def coalesc(self):
"""Coalesc moves conservative.
This means, merge the variables of a move into
one variable, and delete the move. But do this
only when no spill will occur.
"""
# Remove the move from the worklist:
m = self.worklistMoves.pop()
x = self.node(m.defined_registers[0])
y = self.node(m.used_registers[0])
u, v = (y, x) if y in self.precolored else (x, y)
if self.verbose:
self.logger.debug("Coalescing %s which couples %s and %s", m, u, v)
if u is v:
# u is v, so we do 'mov x, x', which is redundant
self.coalescedMoves.add(m)
self.unlink_move(m)
self.add_worklist(u)
if self.verbose:
self.logger.debug("Move was an identity move")
elif (v in self.precolored) or self.has_edge(u, v):
# Both u and v are precolored
# or there is an interfering edge
# between the two nodes:
self.constrainedMoves.add(m)
self.unlink_move(m)
self.add_worklist(u)
self.add_worklist(v)
if self.verbose:
self.logger.debug("Move is constrained!")
elif (
u.is_colored
and issubclass(u.reg_class, v.reg_class)
and all(self.ok(t, u) for t in v.adjecent)
) or ((not u.is_colored) and self.conservative(u, v)):
# Check if v can be given the class of u, in other words:
# is u a subclass of v?
self.coalescedMoves.add(m)
self.unlink_move(m)
self.combine(u, v)
self.add_worklist(u)
else:
self.logger.debug("Active move!")
self.activeMoves.add(m)
def add_worklist(self, u):
if (
(u not in self.precolored)
and (not self.is_move_related(u))
and self.is_colorable(u)
):
self.freeze_worklist.remove(u)
self.simplify_worklist.add(u)
def ok(self, t, r):
""" Implement coalescing testing with pre-colored register """
return t.is_colored or self.is_colorable(t) or self.has_edge(t, r)
def conservative(self, u, v):
"""Briggs conservative criteria for coalescing.
If the result of the merge has fewer than K nodes that are
not trivially colorable, then coalescing is safe, because
when coloring, all other nodes that can be colored will be popped
from the graph, leaving the merged node that then can be colored.
In the case of multiple register classes, first determine the
new neighbour nodes. Then assume that all nodes that can be colored
will be colored, and are taken out of the graph. Then calculate how
many registers can be blocked by the remaining nodes. If this is
less than the number of available registers, the coalesc is safe!
"""
nodes = u.adjecent | v.adjecent
B = self.common_reg_class(u.reg_class, v.reg_class)
num_blocked = sum(
self.q(B, j.reg_class) for j in nodes if not self.is_colorable(j)
)
return num_blocked < self.K[B]
def combine(self, u, v):
""" Combine u and v into one node, updating work lists """
if self.verbose:
self.logger.debug("Combining %s and %s", u, v)
# Remove v from work list:
if v in self.freeze_worklist:
self.freeze_worklist.remove(v)
else:
self.spill_worklist.remove(v)
# update _num_blocked of neighbours fine grained:
for t in u.adjecent:
self.release_pressure(t, u.reg_class)
for t in v.adjecent:
self.release_pressure(t, v.reg_class)
# Determine new register class:
u.reg_class = self.common_reg_class(u.reg_class, v.reg_class)
self.frame.ig.combine(u, v)
# Update node pseudo-degree:
if u in self._num_blocked:
# Brute force re-calculate.
# We could figure out the shared edges
# and do careful book keeping, but this
# might be as intensive as well.
self._num_blocked[u] = self.calc_num_blocked(u)
for t in u.adjecent:
if t in self._num_blocked:
self._num_blocked[t] += self.q(t.reg_class, u.reg_class)
if self.verbose:
self.logger.debug("Combined node: %s", u)
# See if any adjecent nodes dropped in degree by merging u and v
# This can happen when u and v both interfered with t.
for t in u.adjecent:
self.decrement_degree(t)
# Move node to spill worklist if higher degree is reached:
if (not self.is_colorable(u)) and u in self.freeze_worklist:
self.freeze_worklist.remove(u)
self.spill_worklist.add(u)
@lru_cache(maxsize=None)
def common_reg_class(self, u, v):
""" Determine the smallest common register class of two nodes """
if issubclass(u, v):
cc = u
elif issubclass(v, u):
cc = v
else:
raise RuntimeError(
"Cannot determine common registerclass for {} and {}".format(
u, v
)
)
if self.verbose:
self.logger.debug("The common class of %s and %s is %s", u, v, cc)
return cc
def freeze(self):
"""Give up coalescing on some node, move it to the simplify list
and freeze all moves associated with it.
"""
u = self.freeze_worklist.pop()
if self.verbose:
self.logger.debug("freezing %s", u)
self.simplify_worklist.add(u)
self.freeze_moves(u)
def freeze_moves(self, u):
""" Freeze moves for node u """
for m in list(self.NodeMoves(u)):
if m in self.activeMoves:
self.activeMoves.remove(m)
else:
self.worklistMoves.remove(m)
self.unlink_move(m)
self.frozenMoves.add(m)
# Check other part of the move for still being move related:
src = self.node(m.used_registers[0])
dst = self.node(m.defined_registers[0])
v = src if u is dst else dst
if (
v not in self.precolored
and not self.is_move_related(v)
and self.is_colorable(v)
):
assert v in self.freeze_worklist
self.freeze_worklist.remove(v)
self.simplify_worklist.add(v)
def select_spill(self):
"""Select potential spill node.
Select a node to be spilled. This is optimistic,
since this might be turned into a real spill.
Continue nevertheless, to discover more potential
spills, or we might be lucky and able to color the
graph any ways.
"""
# TODO: select a node which is certainly not a node that was
# introduced during spilling?
# Select to be spilled variable:
# Select node with the lowest priority:
p = []
for n in self.spill_worklist:
assert not n.is_colored
d = sum(len(self.frame.ig.defs(t)) for t in n.temps)
u = sum(len(self.frame.ig.uses(t)) for t in n.temps)
priority = (u + d) / n.degree
self.logger.debug("%s has spill priority=%s", n, priority)
p.append((n, priority))
node = min(p, key=lambda x: x[1])[0]
# Potential spill node, place in simplify worklist:
self.spill_worklist.remove(node)
self.simplify_worklist.add(node)
self.freeze_moves(node)
def rewrite_program(self, node):
""" Rewrite program by creating a load and a store for each use """
# Generate spill code:
self.logger.debug("Placing {} on stack".format(node))
if self.verbose:
self.reporter.message("Placing {} on stack".format(node))
size = node.reg_class.bitsize // 8
alignment = size
slot = self.frame.alloc(size, alignment)
self.logger.debug("Allocating stack slot %s", slot)
# TODO: maybe break-up coalesced node before doing this?
for tmp in node.temps:
instructions = OrderedSet(
self.frame.ig.uses(tmp) + self.frame.ig.defs(tmp)
)
for instruction in instructions:
if self.verbose:
self.reporter.message(
"Updating instruction: {}".format(instruction)
)
vreg2 = self.frame.new_reg(type(tmp))
self.logger.debug("tmp: %s, new: %s", tmp, vreg2)
if self.verbose:
self.reporter.message(
"Replace {} by {}".format(tmp, vreg2)
)
instruction.replace_register(tmp, vreg2)
if instruction.reads_register(vreg2):
code = self.spill_gen.gen_load(self.frame, vreg2, slot)
if self.verbose:
self.reporter.message(
"Load code before instruction: {}".format(
list(map(str, code))
)
)
self.frame.insert_code_before(instruction, code)
if instruction.writes_register(vreg2):
code = self.spill_gen.gen_store(self.frame, vreg2, slot)
if self.verbose:
self.reporter.message(
"Store code after instruction: {}".format(
list(map(str, code))
)
)
self.frame.insert_code_after(instruction, code)
if self.verbose:
self.reporter.dump_frame(self.frame)
def assign_colors(self):
"""Add nodes back to the graph to color it.
Any potential spills might turn into real spills
at this stage.
"""
spilled_nodes = []
# Start with the last node added:
for node in reversed(self.select_stack):
# Place node back into graph:
self.frame.ig.unmask_node(node)
# Check registers occupied by neighbours:
takenregs = set()
for m in node.adjecent:
if m.reg in self.alias:
for r in self.alias[m.reg]:
takenregs.add(r)
else:
takenregs.add(m.reg)
ok_regs = self.cls_regs[node.reg_class] - takenregs
if ok_regs:
assert ok_regs
reg = ok_regs[0]
if self.verbose:
self.logger.debug("Assign %s to node %s", reg, node)
node.reg = reg
else:
spilled_nodes.append(node)
return spilled_nodes
def remove_redundant_moves(self):
""" Remove coalesced moves """
for move in self.coalescedMoves:
self.frame.instructions.remove(move)
def apply_colors(self):
""" Assign colors to registers """
# Apply all colors:
for node in self.frame.ig:
assert node.reg is not None
for reg in node.temps:
if reg.is_colored:
assert reg.color == node.reg.color
else:
reg.set_color(node.reg.color)
# Mark the register as used in this frame:
self.frame.used_regs.add(node.reg.get_real())
# TODO:
# if self.frame.debug_db:
# self.frame.debug_db.map(
# reg, self.arch.get_register(node.reg))
def check_invariants(self): # pragma: no cover
""" Test invariants """
# When changing the code, these asserts validate the worklists.
assert all(self.is_colorable(u) for u in self.simplify_worklist)
assert all(not self.is_move_related(u) for u in self.simplify_worklist)
assert all(self.is_colorable(u) for u in self.freeze_worklist)
assert all(self.is_move_related(u) for u in self.freeze_worklist)
assert all(not self.is_colorable(u) for u in self.spill_worklist)
# Check that moves live in exactly one set:
assert (
self.activeMoves
& self.worklistMoves
& self.coalescedMoves
& self.constrainedMoves
& self.frozenMoves
== set()
)
| true
|
5e753ca4f30b2840195ed03afc0d1f7203f98d42
|
Python
|
a-pallotto/project-euler
|
/ex014.py
|
UTF-8
| 1,010
| 4.09375
| 4
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 5 23:22:00 2018
The following iterative sequence is defined for the set of positive integers:
n → n/2 (n is even)
n → 3n + 1 (n is odd)
Using the rule above and starting with 13, we generate the following sequence:
13 → 40 → 20 → 10 → 5 → 16 → 8 → 4 → 2 → 1
It can be seen that this sequence (starting at 13 and finishing at 1) contains 10 terms. Although it has not been proved yet (Collatz Problem), it is thought that all starting numbers finish at 1.
Which starting number, under one million, produces the longest chain?
NOTE: Once the chain starts the terms are allowed to go above one million.
@author: pallo
"""
def collatz(n, count=1):
while n > 1:
count += 1
if n % 2 == 0:
n = n/2
else:
n = 3*n + 1
return count
sizes = []
for i in range(1000000):
sizes.append(collatz(i))
print(max(sizes))
print(sizes.index(max(sizes)))
| true
|
ec897f4a35b0f9e4a7f18f3092c53221076bbd5c
|
Python
|
jianengli/leetcode_practice
|
/array/43.py
|
UTF-8
| 1,001
| 3.515625
| 4
|
[] |
no_license
|
class Solution:
def multiply(self, num1: str, num2: str) -> str:
if num1 == '0' or num2 == '0' : return "0"
l1 = list(map(int,list(num1))) # 用列表储存num1的每一数位
l2 = list(map(int,list(num2))) # 用列表储存num2的每一数位
carry2 = 1 # 帮助 num2 中乘数的进位
res = 0
# 累加 乘数的每一位与被乘数相乘得到的结果
while l2:
carry1 = 1 # 帮助 num1 中被乘数的进位
tmp = 0 # 储存在未进位时,乘数的每一位与被乘数相乘得到的结果
factor2 = l2.pop()
# 将被乘数的每一位与乘数的当前位相乘,得到的结果累加,并存入tmp
for i in range(len(l1)-1,-1,-1):
print(i)
tmp += l1[i]*factor2*carry1
carry1 *= 10 # 被乘数进位
res += tmp * carry2
carry2 *= 10 # 乘数进位
return str(res)
| true
|
fcbbdbdf255b81b93864a140692171b96553d077
|
Python
|
welchbj/almanac
|
/almanac/core/command_engine.py
|
UTF-8
| 11,513
| 2.625
| 3
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
from __future__ import annotations
import inspect
import pyparsing as pp
from typing import (
Any,
Callable,
Dict,
List,
MutableMapping,
Tuple,
Type,
TYPE_CHECKING,
TypeVar,
Union
)
from ..commands import FrozenCommand
from ..errors import (
CommandNameCollisionError,
ConflictingPromoterTypesError,
MissingArgumentsError,
NoSuchArgumentError,
NoSuchCommandError,
TooManyPositionalArgumentsError,
UnknownArgumentBindingError
)
from ..hooks import AsyncHookCallback, PromoterFunction
from ..types import is_matching_type
from ..utils import FuzzyMatcher
if TYPE_CHECKING:
from .application import Application
HookCallbackMapping = MutableMapping[FrozenCommand, List[AsyncHookCallback]]
_T = TypeVar('_T')
class CommandEngine:
"""A command lookup and management engine."""
def __init__(
self,
app: Application,
*commands_to_register: FrozenCommand
) -> None:
self._app = app
self._registered_commands: List[FrozenCommand] = []
self._command_lookup_table: Dict[str, FrozenCommand] = {}
self._after_command_callbacks: HookCallbackMapping = {}
self._before_command_callbacks: HookCallbackMapping = {}
for command in commands_to_register:
self.register(command)
self._type_promoter_mapping: Dict[Type, Callable] = {}
@property
def app(
self
) -> Application:
"""The application that this engine manages."""
return self._app
@property
def type_promoter_mapping(
self
) -> Dict[Type, Callable]:
"""A mapping of types to callables that convert raw arguments to those types."""
return self._type_promoter_mapping
def add_promoter_for_type(
self,
_type: Type[_T],
promoter_callable: PromoterFunction[_T]
) -> None:
"""Register a promotion callable for a specific argument type."""
if _type in self._type_promoter_mapping.keys():
raise ConflictingPromoterTypesError(
f'Type {_type} already has a registered promoter callable '
f'{promoter_callable}'
)
self._type_promoter_mapping[_type] = promoter_callable
def register(
self,
command: FrozenCommand
) -> None:
"""Register a command on this class.
Raises:
CommandNameCollisionError: If the ``name`` or one of the
``aliases`` on the specified :class:`FrozenCommand` conflicts with an
entry already stored in this :class:`CommandEngine`.
"""
already_mapped_names = tuple(
identifier for identifier in command.identifiers
if identifier in self._command_lookup_table.keys()
)
if already_mapped_names:
raise CommandNameCollisionError(*already_mapped_names)
for identifier in command.identifiers:
self._command_lookup_table[identifier] = command
self._after_command_callbacks[command] = []
self._before_command_callbacks[command] = []
self._registered_commands.append(command)
def add_before_command_callback(
self,
name_or_command: Union[str, FrozenCommand],
callback: AsyncHookCallback
) -> None:
"""Register a callback for execution before a command."""
if isinstance(name_or_command, str):
try:
command = self[name_or_command]
except KeyError:
raise NoSuchCommandError(name_or_command)
else:
command = name_or_command
self._before_command_callbacks[command].append(callback)
def add_after_command_callback(
self,
name_or_command: Union[str, FrozenCommand],
callback: AsyncHookCallback
) -> None:
"""Register a callback for execution after a command."""
if isinstance(name_or_command, str):
try:
command = self[name_or_command]
except KeyError:
raise NoSuchCommandError(name_or_command)
else:
command = name_or_command
self._after_command_callbacks[command].append(callback)
def get(
self,
name_or_alias: str
) -> FrozenCommand:
"""Get a :class:`FrozenCommand` by its name or alias.
Returns:
The mapped :class:`FrozenCommand` instance.
Raises:
:class:`NoSuchCommandError`: If the specified ``name_or_alias`` is not
contained within this instance.
"""
if name_or_alias not in self._command_lookup_table.keys():
raise NoSuchCommandError(name_or_alias)
return self._command_lookup_table[name_or_alias]
__getitem__ = get
async def run(
self,
name_or_alias: str,
parsed_args: pp.ParseResults
) -> int:
"""Run a command, validating the specified arguments.
In the event of coroutine-binding failure, this method will do quite a bit of
signature introspection to determine why a binding of user-specified arguments
to the coroutine signature might fail.
"""
try:
command: FrozenCommand = self[name_or_alias]
coro_signature: inspect.Signature = command.signature
except NoSuchCommandError as e:
raise e
pos_arg_values = [x for x in parsed_args.positionals]
raw_kwargs = {k: v for k, v in parsed_args.kv.asDict().items()}
resolved_kwargs, unresolved_kwargs = command.resolved_kwarg_names(raw_kwargs)
# Check if we have any extra/unresolvable kwargs.
if not command.has_var_kw_arg and unresolved_kwargs:
extra_kwargs = list(unresolved_kwargs.keys())
raise NoSuchArgumentError(*extra_kwargs)
# We can safely merged these kwarg dicts now since we know any unresolvable
# arguments must be due to a **kwargs variant.
merged_kwarg_dicts = {**unresolved_kwargs, **resolved_kwargs}
try:
bound_args = command.signature.bind(*pos_arg_values, **merged_kwarg_dicts)
can_bind = True
except TypeError:
can_bind = False
# If we can call our function, we next promote all eligible arguments and
# execute the coroutine call.
if can_bind:
for arg_name, value in bound_args.arguments.items():
param = coro_signature.parameters[arg_name]
arg_annotation = param.annotation
for _type, promoter_callable in self._type_promoter_mapping.items():
if not is_matching_type(_type, arg_annotation):
continue
new_value: Any
if param.kind == param.VAR_POSITIONAL:
# Promote over all entries in a *args variant.
new_value = tuple(promoter_callable(x) for x in value)
elif param.kind == param.VAR_KEYWORD:
# Promote over all values in a **kwargs variant.
new_value = {
k: promoter_callable(v) for k, v in value.items()
}
else:
# Promote a single value.
new_value = promoter_callable(value)
bound_args.arguments[arg_name] = new_value
await self._app.run_async_callbacks(
self._before_command_callbacks[command],
*bound_args.args, **bound_args.kwargs
)
ret = await command.run(*bound_args.args, **bound_args.kwargs)
await self._app.run_async_callbacks(
self._after_command_callbacks[command],
*bound_args.args, **bound_args.kwargs
)
return ret
# Otherwise, we do some inspection to generate an informative error.
try:
partially_bound_args = command.signature.bind_partial(
*pos_arg_values, **merged_kwarg_dicts
)
partially_bound_args.apply_defaults()
can_partially_bind = True
except TypeError:
can_partially_bind = False
if not can_partially_bind:
# Check if too many positional arguments were provided.
if not command.has_var_pos_arg:
pos_arg_types = (
inspect.Parameter.POSITIONAL_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD,
)
unbound_pos_args = [
param for name, param in command.signature.parameters.items()
if param.kind in pos_arg_types and name not in merged_kwarg_dicts
]
if len(pos_arg_values) > len(unbound_pos_args):
unbound_values = pos_arg_values[len(unbound_pos_args):]
raise TooManyPositionalArgumentsError(*unbound_values)
# Something else went wrong.
# XXX: should we be collecting more information here?
raise UnknownArgumentBindingError(
command.signature, pos_arg_values, merged_kwarg_dicts
)
# If we got this far, then we could at least partially bind to the coroutine
# signature. This means we are likely just missing some required arguments,
# which we can now enumerate.
missing_arguments = (
x for x in command.signature.parameters
if x not in partially_bound_args.arguments.keys()
)
if not missing_arguments:
raise UnknownArgumentBindingError(
command.signature, pos_arg_values, merged_kwarg_dicts
)
raise MissingArgumentsError(*missing_arguments)
def get_suggestions(
self,
name_or_alias: str,
max_suggestions: int = 3
) -> Tuple[str, ...]:
"""Find the closest matching names/aliases to the specified string.
Returns:
A possibly-empty tuple of :class:`Command`s that most
closely match the specified `name_or_alias` field.
"""
fuzz = FuzzyMatcher(
name_or_alias,
self._command_lookup_table.keys(),
num_max_matches=max_suggestions
)
return fuzz.matches
def keys(
self
) -> Tuple[str, ...]:
"""Get a tuple of all registered command names and aliases."""
return tuple(self._command_lookup_table.keys())
@property
def registered_commands(
self
) -> Tuple[FrozenCommand, ...]:
"""The :py:class:`FrozenCommand` instances registered on this engine."""
return tuple(self._registered_commands)
def __contains__(
self,
name_or_alias: str
) -> bool:
"""Whether a specified command name or alias is mapped."""
return name_or_alias in self._command_lookup_table.keys()
def __len__(
self
) -> int:
"""The number of total names/aliases mapped in this instance."""
return len(self._command_lookup_table.keys())
def __repr__(
self
) -> str:
return f'<{self.__class__.__qualname__} [{str(self)}]>'
def __str__(
self
) -> str:
return (
f'{len(self)} names mapped to {len(self._registered_commands)} commands'
)
| true
|
3e89995dd17a33516f8e2d317305b5445d44ee28
|
Python
|
mattiagiuri/rubikpy
|
/CubeSolverTest.py
|
UTF-8
| 3,575
| 3.1875
| 3
|
[
"Apache-2.0"
] |
permissive
|
from CubeSolver import CubeSolver
import numpy as np
# disposition = np.array([[['U', 'G', 'Y'],
# ['U', 'W', 'O'],
# ['R', 'Y', 'W']],
# [['G', 'G', 'U'],
# ['Y', 'R', 'G'],
# ['O', 'Y', 'U']],
# [['R', 'R', 'O'],
# ['W', 'G', 'O'],
# ['O', 'Y', 'R']],
# [['G', 'G', 'Y'],
# ['W', 'O', 'O'],
# ['G', 'U', 'W']],
# [['R', 'U', 'Y'],
# ['U', 'U', 'R'],
# ['U', 'R', 'W']],
# [['G', 'R', 'W'],
# ['W', 'Y', 'W'],
# ['O', 'O', 'Y']]
# ])
disposition = np.array([[['G', 'U', 'W'],
['Y', 'W', 'Y'],
['U', 'G', 'Y']],
[['R', 'R', 'O'],
['O', 'R', 'U'],
['O', 'O', 'O']],
[['G', 'G', 'Y'],
['Y', 'G', 'W'],
['U', 'O', 'R']],
[['R', 'O', 'U'],
['U', 'O', 'R'],
['U', 'R', 'R']],
[['Y', 'G', 'G'],
['U', 'U', 'G'],
['W', 'R', 'Y']],
[['O', 'W', 'G'],
['Y', 'Y', 'W'],
['W', 'W', 'W']]
])
sample_input = np.array([[['G', 'U', 'W'],
['Y', 'W', 'Y'],
['U', 'G', 'Y']],
[['R', 'R', 'O'],
['O', 'R', 'U'],
['O', 'O', 'O']],
[['G', 'G', 'Y'],
['Y', 'G', 'W'],
['U', 'O', 'R']],
[['R', 'O', 'U'],
['U', 'O', 'R'],
['U', 'R', 'R']],
[['Y', 'G', 'G'],
['U', 'U', 'G'],
['W', 'R', 'Y']],
[['O', 'W', 'G'],
['Y', 'Y', 'W'],
['W', 'W', 'W']]
])
print('input must be a numpy array and contain faces in this order: white, red, green, orange, blue, yellow')
print(sample_input)
print('white face input considered as if you had red face on top')
print('yellow face input considered as if you had orange face on top')
print('the other faces are considered as if you had yellow face on top')
print('while you solve the cube, you should look to the red face with yellow face on top')
print('F = move front (red) face clockwise, F1 = move front (red) face anticlockwise')
print('R = move right (green) face clockwise, R1 = move right (green) face anticlockwise')
print('B = move back (orange) face clockwise, B1 = move back (orange) face anticlockwise')
print('L = move left (blue) face clockwise, L1 = move left (blue) face anticlockwise')
print('U = move up (yellow) face clockwise, U1 = move up (yellow) face anticlockwise')
print('D = move down (white) face clockwise, D1 = move down (white) face anticlockwise')
print('\n\n')
solver = CubeSolver(disposition)
print(solver.mover.moves)
print(solver.mover.cube)
print(solver.mover.initial_cube)
| true
|
60b582707e3a0cd64370ec89e38de2f21f559372
|
Python
|
WKPlus/phone-address
|
/crawl_phone_prefix.py
|
UTF-8
| 1,494
| 2.828125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding:utf8 -*-
import requests
import re
from bs4 import BeautifulSoup
import os
import time
URL = "http://www.nowdl.cn/city/guangdong/{}.php"
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_4) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.89 Safari/537.36',
}
CITY = {
'广州': 'guangzhou', '湛江': 'zhanjiang',
'肇庆': 'zhaoqing', '中山': 'zhongshan',
'珠海': 'zhuhai', '潮州': 'chaozhou',
'东莞': 'dongguan', '佛山': 'fushan',
'河源': 'heyuan', '惠州': 'huizhou',
'江门': 'jiangmen', '揭阳': 'jieyang',
'茂名': 'maoming', '梅州': 'meizhou',
'清远': 'qingyuan', '汕头': 'shantou',
'汕尾': 'shanwei', '韶关': 'shaoguan',
'深圳': 'shenzhen', '阳江': 'yangjiang',
'云浮': 'yunfu',
}
def get_phone_prefix(cname, city, p=re.compile(r'^\d{7}$')):
url = URL.format(city)
r = requests.get(url)
bs = BeautifulSoup(r.content)
al = bs.find_all('a')
ret = [i.get_text() for i in al if p.match(i.get_text())]
print "%s爬取号码前缀%s个" % (cname, len(ret))
return ret
if __name__ == '__main__':
for cname, city in CITY.iteritems():
ret = get_phone_prefix(cname, city)
if os.path.isfile(city):
continue
with open(city, 'w') as out_fd:
out_fd.write(cname)
out_fd.write(" " + ",".join(ret))
out_fd.write("\n")
time.sleep(30)
| true
|
ee5fb9db73b16be2b59c3aae95f4d9eb548d1446
|
Python
|
Sanjo23Chaval/programming_oscar_sanjose
|
/exersices/tri_matrix.py
|
UTF-8
| 1,019
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
def read_row_column(text):
f = open(text,"r")
info = f.readline()
info = info.rstrip()
info = info.split()
for i in range(len(info)):
if info[i] == "rows":
rows = info[i+2]
rows = list(rows)
if "," in rows:
rows.remove(",")
elif info[i] == "cols":
cols = info[i+2]
cols = list(cols)
if "," in cols:
cols.remove(",")
return rows, cols, f
def pairs_from_matrix(rows, cols, file):
matrix = {}
col = 0
for line in file:
line = line.split()
for z in range(len(line)):
matrix[rows[z]+cols[col]] = line[z].replace(".","")
col += 1
return matrix
def fill_rest_matrix(matrix):
for i in matrix.keys():
if i[::-1] not in matrix.keys():
matrix[i[::-1]] = matrix[i]
return matrix
A = read_row_column("PAM.txt")
B = pairs_from_matrix(A[0], A[1], A[2])
C = fill_rest_matrix(B)
print C["TW"],C["WW"],C["WT"]
| true
|
6f9b13fd9c8621e629a8a365073fd19d3d7b2ba0
|
Python
|
shimy-abd/speech_recognizer
|
/src/recognizer.py
|
UTF-8
| 738
| 3.203125
| 3
|
[] |
no_license
|
import speech_recognition
class Recognizer:
def main(self):
recognizer = speech_recognition.Recognizer()
while True:
try:
with speech_recognition.Microphone() as mic:
recognizer.adjust_for_ambient_noise(mic, 0.5)
print("Ok, I'm listening...")
audio = recognizer.listen(mic)
text_list = recognizer.recognize_google(audio)
print(f"Right, you said: {text_list}")
except speech_recognition.UnknownValueError:
print("Sorry I didn't get that :(")
except Exception as e:
print(f"exception: {e}")
exit(1)
Recognizer().main()
| true
|
548e363588aca045738ebc575fe70ef9ac530dad
|
Python
|
pragadish92/wordpress-auto-post
|
/blogpost.py
|
UTF-8
| 2,500
| 2.78125
| 3
|
[] |
no_license
|
from wordpress_xmlrpc import Client, WordPressPost
from wordpress_xmlrpc.methods.posts import GetPosts, NewPost
from wordpress_xmlrpc.methods.users import GetUserInfo
from wordpress_xmlrpc import Client, WordPressPost
from wordpress_xmlrpc.compat import xmlrpc_client
from wordpress_xmlrpc.methods import media, posts
from os import remove
from datetime import datetime
import json
with open('auth.json') as f:
authData = json.load(f)
def make_post(content, categorys='0', tags='0', date = None):
'''
:param content: dict() formatado corretamente
:param categorys: lista com as categorias do post
:param tags: lista com as tags do post
:param date: data para o post ser publicado ( formato datetime.datetime())
:return:
'''
#URL DO SITE, USUARIO , SENHA !!!
wp = Client( authData['url'] + '/xmlrpc.php',
authData['adminUsername'], authData['adminPassword'])
post = WordPressPost()
post.title = content['title']
post.content = content['body']
if tags[0] != '0':
post.terms_names = {
'post_tag': tags
}
try:
categorys[0] == 0
except IndexError:
pass
else:
if categorys[0] != '0':
post.terms_names = {
'category': categorys
}
# Lets Now Check How To Upload Media Files
filename = content['image']
data = {
'name': content['title']+ '.jpeg',
'type': 'image/jpeg' # Media Type
}
# Now We Have To Read Image From Our Local Directory !
with open(filename, 'rb') as img:
data['bits'] = xmlrpc_client.Binary(img.read())
response = wp.call(media.UploadFile(data))
attachment_id = response['id']
# Above Code Just Uploads The Image To Our Gallery
# For Adding It In Our Main Post We Need To Save Attachment ID
post.thumbnail = attachment_id
#deletando do pc a imagem
remove(filename)
#setando para o post ser publicado (nao ficar de rascunho)
post.post_status = 'publish'
# marcando p/ o post ser postado na data desejada
if date != None:
post.date = date
post.id = wp.call(posts.NewPost(post))
# Set Default Status For Post .i.e Publish Default Is Draft
# We Are Done With This Part :) Lets Try To Run It
if not date:
print(post.title, " Postado com sucesso !")
if date:
print(post.title, " Vai ser postado em ",date,'!')
| true
|
decea53dc4b713b7333ad773c7c4169075ab73a3
|
Python
|
k1713310/Python-Sample
|
/Python TestScript.py
|
UTF-8
| 1,015
| 3.8125
| 4
|
[
"Unlicense"
] |
permissive
|
print()
print ("Hello World")
fred = 200
print(fred)
john = fred
print(john)
print()
number_of_coins = 240
print(number_of_coins)
found_coins = 20
magic_coins = 10
stolen_coins = 3
found_coins + magic_coins * 365 - stolen_coins * 52
stolen_coins = 2
magic_coins = 13
found_coins + magic_coins * 365 - stolen_coins * 52
fred = "why do gorillas have big nostrils? Big fingers!!"
print (fred)
fred = "what is pink and fluffy? Pink fluff!"
print (fred)
fred = '''how do dinosaurs pay their bills?
With tyrannosaurus checks!'''
print (fred)
silly_string = '''He said, "Aren't can't shoudn't wouldn't ."'''
print (silly_string)
joke_text = '%s: a device for finding furniture in the dark'
bodypart1 = 'knee'
bodypart2 = 'shin'
print(joke_text % bodypart1)
print(joke_text % bodypart2)
myscore = 2400
message = 'I scored %s points'
print(message % myscore)
nums = 'What did number %s say to number %s? Nice Belt!!'
print(nums % (0, 8))
print(10 * 'Hello ')
print (1000 * 'WOW! ')
| true
|
7351e72adbbd7316407333b52ad58e869176ba2f
|
Python
|
karagg/tt
|
/logistic/logistic_main.py
|
UTF-8
| 2,037
| 2.546875
| 3
|
[] |
no_license
|
import numpy as np
import Nor_sta as ns
from gradientDesent import gradientDesent
from gradientDesent_Reg import grad
import sys
sys.path.append(r"C:\Users\Lenovo\performance_evaluation")
from F1 import F1
import ROC_AUC as ra
from Accuray import Accuracy
from hold_out import hold_out
import Confusion_Matrix as cm
from plotDecisionBoundary import plotDB
from computecost import sigmoid
data = np.loadtxt('ex2data1.txt',delimiter=",")#下载数据文件,此数据文件数组为array数组
print(data)
X=data[:,0:-1]
h=ns.Nor()
#X=h.S_n_normalize(X,"standard")#数组标准化,数据符合正态分布
X=h.S_n_normalize(X,"normalize")#数组归一化,数据都在零和一之间
X1=X[:,0]
X2=X[:,1]
y=data[:,-1]
y1=y
y=y.reshape(-1,1)
#pl.plotData(X1,X2,y1)
m=X.shape[0]#获取原特征矩阵的行数
n=X.shape[1]#获取原特征矩阵的列数
ones=np.ones(m).reshape(-1,1)
X=np.hstack([ones,X])#特征矩阵中合并一个x0矩阵,x0初始为1
m=len(y)
"""分百分之八十数据作为训练集和验证集进行留出集验证,剩下为测试集进行模型性能准确率,F1,混淆矩阵的评估"""
q=int(m*0.8)
X_train=X[:q,:]
y_train=y[:q,:]
X_test=X[q:,:]
y_test=y[q:,:]
#print(X_train)
#print(X_test)
#theta,J=grad(X_train,y_train,0.3,1500,0.1)#梯度下降加正则化取得目标theta值
#theta,J=gradientDesent(X_train,y_train,0.3,1500)#梯度下降取得目标theta值
#print(theta)
#print(J)
theta=hold_out(X_train,y_train,0.8,num_val=10)#10次留出集评估取平均的theta值
plotDB(theta,X1,X2,y1)#决策边界加数据可视化
y_pre=sigmoid(np.dot(X_test,theta))
yP=sigmoid(np.dot(X,theta))
y_pre1=y_pre
y_pre1[y_pre<0.5]=0
y_pre1[y_pre>=0.5]=1#得到的y_pre1为最终预测标签
acc=Accuracy(y_pre1, y_test)#调用准确率函数
print('accuracy')
print(acc)
f1=F1(y_test, y_pre1)#调用函数
print("F1")
print(f1)
cm.Con_Mat(y_test,y_pre1)#调用混淆矩阵函数
roc=ra.ROC(y,yP,np.arange(0,1,0.05))#调用ROC函数
print(roc)
auc=ra.AUC(y,yP,np.arange(0,1,0.05))#调用AUC函数
print(auc)
| true
|
07de629782dbd09f1bdee837ee55ddecb6c818df
|
Python
|
FredericCanaud/Kattis
|
/Filip/main.py
|
UTF-8
| 277
| 3.734375
| 4
|
[] |
no_license
|
import sys
nombres = sys.stdin.readline().split()
nombre1 = int(nombres[0])
nombre2 = int(nombres[1])
nombre1 = str(nombre1)[::-1]
nombre2 = str(nombre2)[::-1]
nombre1 = int(nombre1)
nombre2 = int(nombre2)
if nombre1 <= nombre2:
print(nombre2)
else:
print(nombre1)
| true
|
d3eeab7b70ffe76cfa72b1ef6de37eed65dd0ab1
|
Python
|
vietanhdev/iFirewall
|
/iWAF/Config.py
|
UTF-8
| 3,221
| 2.65625
| 3
|
[] |
no_license
|
from threading import Lock
import redis
class Config:
PUBLIC_PARAMS = ["rate_limit_interval",
"rate_limit_warning_thresh",
"rate_limit_block_thresh",
"rate_limit_block_time",
"rate_limit_ddos_thresh",
"rate_limit_ddos_interval",
"rate_limit_whitelist_expiration_time",
"rate_limit_under_attack",
"rate_limit_ddos_blocking_time",
"servers",
"homepage"
]
def __init__(self, default_config):
self.config = default_config
self.data_lock = Lock()
self.redis_db = redis.StrictRedis(host=self.config["redis_host"], port=self.config["redis_port"], db=self.config["redis_db"])
def get(self):
self.data_lock.acquire()
config = self.config.copy()
self.data_lock.release()
return config
def get_public_params(self):
config = {}
self.data_lock.acquire()
for key in self.PUBLIC_PARAMS:
config[key] = self.config[key]
self.data_lock.release()
return config
def get_param(self, key):
self.data_lock.acquire()
value = self.config[key]
self.data_lock.release()
return value
def set_param(self, key, value):
self.data_lock.acquire()
self.config[key] = value
self.data_lock.release()
def update_params(self, params):
for key in params:
# Int params
if key in ["rate_limit_interval",
"rate_limit_warning_thresh",
"rate_limit_block_thresh",
"rate_limit_block_time",
"rate_limit_ddos_thresh",
"rate_limit_ddos_interval",
"rate_limit_ddos_blocking_time",
"rate_limit_whitelist_expiration_time"]:
try:
value = int(params[key])
if value <= 0:
return "Wrong value for %s!" % key
self.set_param(key, value)
except:
return "Wrong value for %s!" % key
elif key == "rate_limit_under_attack":
try:
value = bool(params[key])
self.set_param(key, value)
except:
return "Wrong value for %s!" % key
elif key == "servers":
try:
servers = []
for i in range(len(params["servers"])):
servers.append({
"id": i+1,
"address": params["servers"][i],
"server_status_url": params["servers"][i] + "server_status",
"online": True
})
self.set_param(key, servers)
except:
return "Wrong value for %s!" % key
elif key == "reset_protection_db": # Clear protection database
for key in self.redis_db.scan_iter("*"):
self.redis_db.delete(key)
return "Cleared database successfully!"
return "Updated successfully!"
| true
|
ee5ffb295a8b79d322362c5fb23f50e15fb76372
|
Python
|
tsl3su/moods
|
/streaming8emo2.py
|
UTF-8
| 8,530
| 2.734375
| 3
|
[] |
no_license
|
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import json
import csv
import math
from collections import Counter
import string
import nltk
from nltk.corpus import wordnet as wn
from nltk.stem.wordnet import WordNetLemmatizer
import time
import datetime
# Go to http://dev.twitter.com and create an app.
# The consumer key and secret will be generated for you after
consumer_key="SikqyZdm7zTQ2BwVujWqvHQNt"
consumer_secret="PFzKTkeD8OcyIxEsXR8vX7UGPXwg8Y0w3y33BZXbls9AmHNaFx"
# After the step above, you will be redirected to your app's page.
# Create an access token under the the "Your access token" section
access_token="2556855996-0hqPxqN0BOe2Qv8VBEJ7lIHo2KZeO7TDtceLonD"
access_token_secret="VyecktLuynfc4mYQLaPuma0RDxV2hRiLotMB3ICtC1pdY"
lmtzr = WordNetLemmatizer()
emotionCounter = Counter()
wordCounter = Counter()
emojiCounter = Counter()
# timer = int(datetime.datetime.fromtimestamp(time.time()).strftime('%M')) + 1
#interval of write interval csv out
prevtime = datetime.datetime.now()
interval = 3
# 12 different emotion types
_emotions = {
0: "Pleasant",
1: "Excited",
2: "Tense",
3: "Stressed",
4: "Unpleasant",
5: "Unhappy",
6: "Calm",
7: "Content"
}
# 9 different Emoji codes
_emojis = {
"\xF0\x9F\x98\x83": "Happy", # \xF0\x9F\x98\x83 - Happy [1500]
"\xF0\x9F\x98\x8C": "Relaxed", ## \xF0\x9F\x98\x8C - relieved face [315]
"\xF0\x9F\x98\x84": "Elated", ## \xF0\x9F\x98\x84 - smiling face with open mouth and smiling eyes[1500]
"\xF0\x9F\x98\x9D": "Excited", ## \xF0\x9F\x98\x9D - face with stuck-out toungue and tightly closing eyes [1211]
"\xF0\x9F\x98\xB3": "Stressed", ## \xF0\x9F\x98\xB3 - Flushed face [969]
"\xF0\x9F\x98\xA1": "Upset", ## \xF0\x9F\x98\xA1 - Red pouting face [670]
"\xF0\x9F\x98\x9E": "Unhappy", ## \xF0\x9F\x98\x9E - disappointed face [635]
"\xF0\x9F\x98\xA2": "Sad", ## \xF0\x9F\x98\xA2 - crying face [508]
"\xF0\x9F\x98\x8A": "Pleasant" # \xF0\x9F\x98\x8A - smiling face with smiling eyes [1460]
}
class StdOutListener(StreamListener):
""" A listener handles tweets are the received from the stream.
This is a basic listener that just prints received tweets to stdout.
"""
def on_data(self, data):
#print data
f = open("feels.txt", 'a')
if 'text' in json.loads(data):
# print json.loads(data)
tweet = json.loads(data)['text']
tweet = tweet.encode('UTF-8')
# print "\n", "\n",
results = tweetCat([tweet])
for r in results:
print(r)
# #counters for 12 emotion types
# emotionCounter[emotion] += 1
# print 'EMOTION COUNTER TALLY CURRENTLY ATM:', emotionCounter.most_common()
f.write(tweet + "\n" + "\n")
f.close()
return True
else: print json.loads(data)
def on_error(self, status):
print status
def scoreWord(word):
"""count a singular word's:
V.Mean.Sum -- Valence/Pleasure Mean Sum
A.Mean.Sum -- Arousal Mean Sum
and return a coordinate tuple in the following order of (Valence, Arousal)
"""
global anew
#gives stem of word/ lemmatized
stWord = lmtzr.lemmatize(word)
# print 'the word is', word, 'all other versions are', stWord
if word in anew:
# print word
valenceMeanSum = float(anew[word]['V.Mean.Sum'])
arousalMeanSum = float(anew[word]['A.Mean.Sum'])
return (valenceMeanSum, arousalMeanSum,word)
elif stWord in anew:
valenceMeanSum = float(anew[stWord]['V.Mean.Sum'])
arousalMeanSum = float(anew[stWord]['A.Mean.Sum'])
return (valenceMeanSum, arousalMeanSum,stWord)
# elif stWord + ed in anew:
# valenceMeanSum = float(anew[stWord]['V.Mean.Sum'])
# arousalMeanSum = float(anew[stWord]['A.Mean.Sum'])
# return (valenceMeanSum, arousalMeanSum)
else: return False
def scoreAngle(score):
"""Pass in a scoreWord tuple, Returns radians of tuple"""
#Divide by pi to get a transformatiom of (o, 2pi) -> (0, 2)
angle = math.atan2((score[1]/9.0) -.5, (score[0]/9.0) -.5)/math.pi
if angle < 0: angle +=2
return angle
def scoreEmo(score):
print score
print scoreAngle(score)
print round((scoreAngle(score))*4)
return _emotions[round(scoreAngle(score)*4)%8] #column[1][score[0]] for column in anew, ; need to get the word assoc. with it!
def search(tweet):
for key in _emojis.keys():
# print key
# emojis = _emojis[key]
# yield emojis
if key in tweet:
# print "key: %s, value: %s" % (key, _emojis[key])
yield _emojis[key]
print _emojis[key]
# else: return False
# def remove_left(f):
num = 0
csv = []
while True:
data = f.readline()
if data == '':
break
if (num < 1 or num > 8):
csv.append(data)
num += 1
f.seek(14)
for row in csv:
f.write(row)
# f.truncate()
numOfTweets = 0
numOfEmo = 0
def tweetCat(tweets):
global numOfTweets
# global timer
global numOfEmo
global prevtime
tweets = [tweet.translate(None, string.punctuation).split() for tweet in tweets]
# print tweets
allemotions = []
for tweet in tweets:
ts = time.time()
emojis = search(tweet)
# print type(emojis)
# for emoji in emojis:
# emojiCounter[emoji]+=1
# # print 'EMOJI IS:', emoji
print tweet
# for word in tweet: wordCounter[word]+=1
emotions = map(scoreEmo,filter(None,map(scoreWord,tweet)))
print 'emotions',emotions
numOfTweets += 1
print
print 'Number of Tweets ****** :', numOfTweets
allemotions.append(emotions)
for feeling in emotions: emotionCounter[feeling]+=1
st = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S') #%Y-%m-%d
minutes = datetime.datetime.fromtimestamp(ts).strftime('%M')
yield emotions
#returns top 20 ten words in emotion counter
# print 'This is the top twenty for Word Counter', wordCounter.most_common(20)
currenttime = datetime.datetime.now()
if(currenttime-prevtime).seconds >= interval:
#file for percentages per respective emotions
with open('/home/tsl/public_html/percenttest1.csv', 'a') as outfileone:
numOfEmo = sum(emotionCounter.itervalues())
for item in emotionCounter:
emoPercentage = float(emotionCounter[item])/float(numOfEmo)
print emoPercentage
outfileone.write(item + ',' + str(emoPercentage) + ','+ str(st) + ',' + '\n')
print '********** Just logged into Percentages ***************'
#file for raw number per respective emotions
with open('/home/tsl/public_html/rawtest1.csv', 'a') as outfiletwo:
print 'Number of Emotions ****** :', numOfEmo
for item in emotionCounter:
# runningEmoAvg += emoPercentage
outfiletwo.write(item + ',' + str(emotionCounter[item]) + ','+ str(st) + ',' + '\n')
emotionCounter[item] = 0
numOfTweets = 0
numOfEmo = 0
prevtime = currenttime
print 'Emotion Counter is: ', emotionCounter
# print 'This is the Emoji Counter: ', emojiCounter.most_common()
print "*************************************"
#empty dictionary to fill with tweet words in comment with ANEW words
anew = {}
#open ANEW csv file and read in a dictionary of like word's key-values pairs
with open('ANEWwordbank.csv', 'rb') as f:
reader = csv.DictReader(f)
for row in reader: anew.update({row['Word']:row})
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
stream.filter(track=[
'worldcup', 'world', 'cup'
], languages = ["en"])
# optative_mood = ['May', 'if only', 'save']
# imperative_mood = ['go', 'do','run', 'i have to', 'would', 'could']
# , 'would', 'could', 'actually', 'all in all', 'certainly', 'cleraly', 'doubtful', 'debatable', 'essentially', 'fortunately',
# 'unfortunately', 'in fact', 'inevitably', 'likely', 'maybe if', 'without a doubt', 'positively', 'really', 'technically',
# 'without a doubt', 'undeniably
| true
|
36849aef297944fb64f7b05417e387dacc0023c1
|
Python
|
killynathan/Algorithms
|
/Assign13-Johnson'sAPSP/BellmanFord.py
|
UTF-8
| 1,244
| 3.359375
| 3
|
[] |
no_license
|
import math
def findShortestPathsFromSource(numOfVertices, edges, source):
# constant
INF = 99999
# used for stopping early
listIsSameAsBefore = True
# get data structs
#numOfVertices, edges = getGraph2(filename) # !!!!!!! 1 vs 2
A = [INF]*(numOfVertices + 1) # vertices indexed starting from 1
A[source] = 0 # source node
for i in range(numOfVertices + 1):
listIsSameAsBefore = True
for u,v,w in edges:
if (A[u] + w < A[v]):
A[v] = A[u] + w
listIsSameAsBefore = False
if listIsSameAsBefore:
break
elif (i == numOfVertices):
return 'negative cycle'
return A
# return numOfVertices and [[vertex 1, v 2, weight of edge], ...]
def getGraph(filename):
graph = []
lines = open(filename).read().split('\n')
for line in lines:
values = line.split(' ')
graph.append([int(i) for i in values])
return graph[0][0], graph[1:]
def getGraph2(filename):
graph = []
lines = open(filename).read().split('\n')
for line in lines:
edges = line.split('\t')
for edge in edges[1:(len(edges) - 1)]:
edgeContent = edge.split(',')
graph.append([int(edges[0]), int(edgeContent[0]), int(edgeContent[1])])
return 200, graph
# n, test = getGraph('g3.txt')
# print(findShortestPathsFromSource(n, test, 1))
| true
|
4ee52bd0afcbb1dc6253e5a3f1b4f4ade9905c26
|
Python
|
BigR-Lab/concrete-python
|
/concrete/util/file_io.py
|
UTF-8
| 13,297
| 2.8125
| 3
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
"""Code for reading and writing Concrete Communications
"""
import cStringIO
import gzip
import bz2
import mimetypes
import os.path
import tarfile
import zipfile
import os
import pwd
import grp
import time
from thrift import TSerialization
from thrift.transport import TTransport
from concrete import Communication, TokenLattice
from concrete.util.references import add_references_to_communication
from concrete.util.thrift_factory import factory
def read_thrift_from_file(thrift_obj, filename):
"""Instantiate Thrift object from contents of named file
The Thrift file is assumed to be encoded using TCompactProtocol
**WARNING** - Thrift deserialization tends to fail silently. For
example, the Thrift libraries will not complain if you try to
deserialize data from the file `/dev/urandom`.
Args:
- `thrift_obj`: A Thrift object (e.g. a Communication object)
- `filename`: A filename string
Returns:
- The Thrift object that was passed in as an argument
"""
thrift_file = open(filename, "rb")
thrift_bytes = thrift_file.read()
TSerialization.deserialize(
thrift_obj, thrift_bytes,
protocol_factory=factory.protocolFactory)
thrift_file.close()
return thrift_obj
def read_communication_from_file(communication_filename, add_references=True):
"""Read a Communication from the file specified by filename
Args:
- `communication_filename`: String with filename
Returns:
- A Concrete `Communication` object
"""
comm = read_thrift_from_file(Communication(), communication_filename)
if add_references:
add_references_to_communication(comm)
return comm
def read_tokenlattice_from_file(tokenlattice_filename):
"""
Takes the filename of a serialized Concrete TokenLattice file,
reads the TokenLattice from the file and returns an instantiated
TokenLattice instance.
"""
return read_thrift_from_file(TokenLattice(), tokenlattice_filename)
def write_communication_to_file(communication, communication_filename):
return write_thrift_to_file(communication, communication_filename)
def write_thrift_to_file(thrift_obj, filename):
thrift_bytes = TSerialization.serialize(
thrift_obj,
protocol_factory=factory.protocolFactory)
thrift_file = open(filename, "wb")
thrift_file.write(thrift_bytes)
thrift_file.close()
class _FileTypeClass(object):
'''
An instance of this class represents filetypes abstractly; its
members correspond to individual filetypes. It probably doesn't
make sense to have more than one instance of this class.
'''
def __init__(self, *names):
self.CHOICES = tuple(names)
for (i, name) in enumerate(names):
if name == 'CHOICES':
raise ValueError('%s is an invalid filetype name' % name)
setattr(self, self._normalize(name), i)
@classmethod
def _normalize(cls, name):
return name.replace('-', '_').upper()
def lookup(self, ft):
'''
Return filetype (integer value) for ft, where ft may be
a filetype name or (for convenience) the filetype integer
value itself.
'''
if isinstance(ft, int):
return ft
elif isinstance(ft, str) or isinstance(ft, unicode):
return getattr(self, self._normalize(ft))
else:
raise ValueError('unknown filetype %s' % str(ft))
def add_argument(self, argparse_parser, flag='--input-filetype',
help_name='input file'):
'''
Add filetype argument to parser, return function that can be
called on argparse namespace to retrieve argument value, e.g.,
>>> get_filetype = add_argument(parser, ...)
>>> ns = parser.parse_args()
>>> filetype = get_filetype(ns)
'''
argparse_parser.add_argument(flag, type=str, choices=self.CHOICES,
default=self.CHOICES[0],
help='filetype for %s (choices: %s)' %
(help_name, ', '.join(self.CHOICES)))
def _get_value(ns):
return getattr(ns, flag.lstrip('-').replace('-', '_'))
return _get_value
FileType = _FileTypeClass(
'auto',
'zip',
'tar', 'tar-gz', 'tar-bz2',
'stream', 'stream-gz', 'stream-bz2',
)
class CommunicationReader(object):
"""Iterator/generator class for reading one or more Communications from a
file
The iterator returns a `(Communication, filename)` tuple
Supported filetypes are:
- a file with a single Communication
- a file with multiple Communications concatenated together
- a gzipped file with a single Communication
- a gzipped file with multiple Communications concatenated together
- a .tar.gz file with one or more Communications
- a .zip file with one or more Communications
-----
Sample usage:
for (comm, filename) in CommunicationReader('multiple_comms.tar.gz'):
do_something(comm)
"""
def __init__(self, filename, add_references=True, filetype=FileType.AUTO):
filetype = FileType.lookup(filetype)
self._add_references = add_references
self._source_filename = filename
if filetype == FileType.TAR:
self.filetype = 'tar'
self.tar = tarfile.open(filename, 'r|')
elif filetype == FileType.TAR_GZ:
self.filetype = 'tar'
self.tar = tarfile.open(filename, 'r|gz')
elif filetype == FileType.TAR_BZ2:
self.filetype = 'tar'
self.tar = tarfile.open(filename, 'r|bz2')
elif filetype == FileType.ZIP:
self.filetype = 'zip'
self.zip = zipfile.ZipFile(filename, 'r')
self.zip_infolist = self.zip.infolist()
self.zip_infolist_index = 0
elif filetype == FileType.STREAM:
self.filetype = 'stream'
f = open(filename, 'rb')
elif filetype == FileType.STREAM_GZ:
self.filetype = 'stream'
f = gzip.open(filename, 'rb')
elif filetype == FileType.STREAM_BZ2:
self.filetype = 'stream'
f = bz2.BZ2File(filename, 'r')
elif filetype == FileType.AUTO:
if tarfile.is_tarfile(filename):
self.filetype = 'tar'
self.tar = tarfile.open(filename, 'r|*')
elif zipfile.is_zipfile(filename):
self.filetype = 'zip'
self.zip = zipfile.ZipFile(filename, 'r')
self.zip_infolist = self.zip.infolist()
self.zip_infolist_index = 0
elif mimetypes.guess_type(filename)[1] == 'gzip':
# this is not a true stream---is_tarfile will have
# successfully seeked backwards on the file if we have
# reached this point
self.filetype = 'stream'
f = gzip.open(filename, 'rb')
elif mimetypes.guess_type(filename)[1] == 'bzip2':
# this is not a true stream
self.filetype = 'stream'
f = bz2.BZ2File(filename, 'r')
else:
# this is not a true stream
self.filetype = 'stream'
f = open(filename, 'rb')
else:
raise ValueError('unknown filetype %d' % filetype)
if self.filetype is 'stream':
self.transport = TTransport.TFileObjectTransport(f)
self.protocol = factory.createProtocol(self.transport)
self.transport.open()
def __iter__(self):
return self
def next(self):
"""Returns a `(Communication, filename)` tuple
If the CommunicationReader is reading from an archive, then
`filename` will be set to the name of the Communication file in
the archive (e.g. `foo.concrete`), and not the name of the archive
file (e.g. `bar.zip`). If the CommunicationReader is reading from
a concatenated file (instead of an archive), then all
Communications extracted from the concatenated file will have the
same value for the `filename` field.
"""
if self.filetype is 'stream':
return self._next_from_stream()
elif self.filetype is 'tar':
return self._next_from_tar()
elif self.filetype is 'zip':
return self._next_from_zip()
def _next_from_stream(self):
try:
comm = Communication()
comm.read(self.protocol)
if self._add_references:
add_references_to_communication(comm)
return (comm, self._source_filename)
except EOFError:
self.transport.close()
raise StopIteration
def _next_from_tar(self):
while True:
tarinfo = self.tar.next()
if tarinfo is None:
raise StopIteration
if not tarinfo.isfile():
# Ignore directories
continue
filename = os.path.split(tarinfo.name)[-1]
if filename[0] is '.' and filename[1] is '_':
# Ignore attribute files created by OS X tar
continue
comm = TSerialization.deserialize(
Communication(),
self.tar.extractfile(tarinfo).read(),
protocol_factory=factory.protocolFactory)
if self._add_references:
add_references_to_communication(comm)
# hack to keep memory usage O(1)
# (...but the real hack is tarfile :)
self.tar.members = []
return (comm, tarinfo.name)
def _next_from_zip(self):
if self.zip_infolist_index >= len(self.zip_infolist):
raise StopIteration
zipinfo = self.zip_infolist[self.zip_infolist_index]
self.zip_infolist_index += 1
comm = TSerialization.deserialize(
Communication(),
self.zip.open(zipinfo).read(),
protocol_factory=factory.protocolFactory)
if self._add_references:
add_references_to_communication(comm)
return (comm, zipinfo.filename)
class CommunicationWriter(object):
"""Class for writing one or more Communications to a file
-----
Sample usage:
writer = CommunicationWriter('foo.concrete')
writer.write(existing_comm_object)
writer.close()
"""
def __init__(self, filename=None):
if filename is not None:
self.open(filename)
def close(self):
self.file.close()
def open(self, filename):
self.file = open(filename, 'wb')
def write(self, comm):
thrift_bytes = TSerialization.serialize(
comm, protocol_factory=factory.protocolFactory)
self.file.write(thrift_bytes)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
class CommunicationWriterTar(object):
"""Class for writing one or more Communications to a .TAR archive
-----
Sample usage:
writer = CommunicationWriterTar('multiple_comms.tar')
writer.write(comm_object_one, 'comm_one.concrete')
writer.write(comm_object_two, 'comm_two.concrete')
writer.write(comm_object_three, 'comm_three.concrete')
writer.close()
"""
def __init__(self, tar_filename=None, gzip=False):
self.gzip = gzip
if tar_filename is not None:
self.open(tar_filename)
def close(self):
self.tarfile.close()
def open(self, tar_filename):
self.tarfile = tarfile.open(tar_filename, 'w:gz' if self.gzip else 'w')
def write(self, comm, comm_filename=None):
if comm_filename is None:
comm_filename = comm.uuid.uuidString + '.concrete'
thrift_bytes = TSerialization.serialize(
comm, protocol_factory=factory.protocolFactory)
file_like_obj = cStringIO.StringIO(thrift_bytes)
comm_tarinfo = tarfile.TarInfo()
comm_tarinfo.type = tarfile.REGTYPE
comm_tarinfo.name = comm_filename
comm_tarinfo.size = len(thrift_bytes)
comm_tarinfo.mode = 0644
comm_tarinfo.mtime = time.time()
comm_tarinfo.uid = os.getuid()
comm_tarinfo.uname = pwd.getpwuid(os.getuid()).pw_name
comm_tarinfo.gid = os.getgid()
comm_tarinfo.gname = grp.getgrgid(os.getgid()).gr_name
self.tarfile.addfile(comm_tarinfo, file_like_obj)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
class CommunicationWriterTGZ(CommunicationWriterTar):
"""Class for writing one or more Communications to a .TAR.GZ archive
-----
Sample usage:
writer = CommunicationWriterTGZ('multiple_comms.tgz')
writer.write(comm_object_one, 'comm_one.concrete')
writer.write(comm_object_two, 'comm_two.concrete')
writer.write(comm_object_three, 'comm_three.concrete')
writer.close()
"""
def __init__(self, tar_filename=None):
super(CommunicationWriterTGZ, self).__init__(tar_filename, gzip=True)
| true
|
a92a31caef6680afc6ebdb761cd61a228345a662
|
Python
|
TPSGrzegorz/PyCharmStart
|
/Collections/Collections_deque.py
|
UTF-8
| 326
| 2.859375
| 3
|
[] |
no_license
|
from collections import deque
n = int(input())
d = deque()
for i in range(n):
to_uppack= list(input().split())
if len(to_uppack) == 2:
command, item = to_uppack
arg = int(item)
eval('d.' + command + '(arg)')
else:
command = to_uppack[0]
eval('d.' + command + '()')
print(*d)
| true
|
fb12c09623e58c888353d97183078c8e2b1c734d
|
Python
|
JuanMazza85/TesisUBA
|
/Scripts/SOM.py
|
UTF-8
| 11,588
| 2.96875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 12 11:02:48 2017
@author: waldo
Función trainSOM
-------------
Parámetros:
P: es una matriz con los datos de los patrones con los cuales
entrenar la red neuronal.
filas: la cantidad de filas del mapa SOM
columnas: la cantidad de columnas del mapa SOM
alfa_inicial: velocidad de aprendizaje inicial
vecindad: vecindad inicial
fun_vecindad: función para determinar la vecindad (1: lineal, 2: sigmoide)
sigma: ancho de la campana (solo para vecindad sigmoide)
ite_reduce: la cantidad de iteraciones por cada tamaño de vecindad
(la cantidad de iteraciones total sera: total = ite_reduce * (vecindad+1))
dibujar: si vale True (y los datos son en dos dimensiones) dibuja los
ejemplos y el mapa SOM.
Devuelve:
w_O: la matriz de pesos de las neuronas competitivas
Ejemplo de uso:
(w_O) = trainSOM(P, filas, columnas, alfa, vecindad_inicial, fvecindario, sigma, reduce, True);
-------------------------------------------------------------------------------
Función trainOL
-------------
Parámetros:
P: es una matriz con los datos de los patrones con los cuales
entrenar la red neuronal.
T: es una matriz con la salida esperada para cada ejemplo. Esta matriz
debe tener tantas filas como neuronas de salida tenga la red
T_O: clases con su valor original (0 .. n-1) (Solo es utilizado para graficar)
w: es la matriz de pesos devuelta por la función trainSOM
filas: la cantidad de filas del mapa SOM
columnas: la cantidad de columnas del mapa SOM
alfa: velocidad de aprendizaje
max_ite: la cantidad de iteraciones del entrenamiento
dibujar: si vale True (y los datos son en dos dimensiones) dibuja los
ejemplos y el mapa SOM.
Devuelve:
w_S: la matriz de pesos de las neuronas de la capa de salida
Ejemplo de uso:
(w_S) = trainOL(P, T_matriz.T, T, w, filas, columnas, alfa, 100, True);
-------------------------------------------------------------------------------
Función umatrix
-------------
Parámetros:
def umatrix(w, filas, columnas):
w: es la matriz de pesos devuelta por la función trainSOM
filas: la cantidad de filas del mapa SOM
columnas: la cantidad de columnas del mapa SOM
Devuelve:
umatrix: la matriz de distancias del SOM
Ejemplo de uso:
umatrix = umatrix(w, filas, columnas)
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as col
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.spatial.distance import squareform
from mpl_toolkits.mplot3d import Axes3D #Para poder plottear en 3D
marcadores = {0:('.','b'), 1:('.','g'), 2:('x', 'y'), 3:('*', 'm'), 4:('.', 'r'), 5:('+', 'k')}
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 22 15:28:31 2017
@author: auvimo
"""
import numpy as np
import scipy.misc as sp
#En 2.7 hay que hacer otro import que ofrece imread (quizas scipy.image)
def OpenImage(archivo):
datos = np.array(sp.imread(archivo, mode='P')) # en mac es mode='I'
maximo = len(datos)
X = np.array([], dtype=np.int64).reshape(0,3)
colores = np.array([0, 9, 12, 10, 6, 11]) # negro rojo azul verde
for color in colores:
filas, columnas = np.where(datos == color)
clase = np.where(colores == color)[0][0]
clases = [clase] * len(filas)
X = np.vstack([X, np.column_stack((columnas+1, maximo-(filas+1), clases))])
return X
def dendograma(matriz, T):
(filas, columnas) = matriz.shape
labels=[]
for f in range(filas):
labels.append(str(int(T[f])))
for c in range(columnas):
if(f==c):
matriz[f,c] = 0
else:
if(matriz[f,c] == 0):
matriz[f,c] = 2
else:
matriz[f,c] = 1 / matriz[f,c]
dists = squareform(matriz)
linkage_matrix = linkage(dists, "single")
dendrogram(linkage_matrix, labels=labels)
plt.title("Dendograma")
plt.show()
def plot(P, T, W, filas, columnas, pasos, title):
plt.figure(0)
plt.clf()
#Ejemplos
x = []
y = []
if(T is None):
for i in range(P.shape[0]):
x.append(P[i, 0])
y.append(P[i, 1])
plt.scatter(x, y, marker='o', color='b', s=100)
else:
colores = len(marcadores)
for class_value in np.unique(T):
x = []
y = []
for i in range(len(T)):
if T[i] == class_value:
x.append(P[i, 0])
y.append(P[i, 1])
plt.scatter(x, y, marker=marcadores[class_value % colores][0], color=marcadores[class_value % colores][1])
#ejes
minimos = np.min(P, axis=0)
maximos = np.max(P, axis=0)
diferencias = maximos - minimos
minimos = minimos - diferencias * 0.1
maximos = maximos + diferencias * 0.1
plt.axis([minimos[0], maximos[0], minimos[-1], maximos[1]])
#centroides
(neuronas, patr) = W.shape
for neu in range(neuronas):
plt.scatter(W[neu,0], W[neu,1], marker='o', color='r')
#conexiones
if(pasos is not None):
#if(pasos != None):
for f in range(filas):
for c in range(columnas):
n1 = f*columnas + c
for ff in range(filas):
for cc in range(columnas):
if(pasos[f, c, ff, cc] == 1):
n2 = ff*columnas + cc
plt.plot([W[n1, 0], W[n2, 0]], [W[n1, 1], W[n2, 1]], color='r')
plt.title(title)
plt.draw()
plt.pause(0.00001)
def plot3D(P, T, W, filas, columnas, pasos, title):
fig = plt.figure(0)
plt.clf()
ax= fig.add_subplot(111, projection='3d')
c=[]
for i in range(P.shape[0]):
c.append(col.rgb2hex((P[i,0]/255,P[i,1]/255,P[i,2]/255)))
ax.scatter(P[:,0], P[:,1], P[:,2], c = c, marker = 'o', linewidths=.05, s=100)
ax.set_xlim3d(0,np.max(P[:,0]))
ax.set_ylim3d(0,np.max(P[:,1]))
ax.set_zlim3d(0,np.max(P[:,2]))
ax.set_xlabel('R')
ax.set_ylabel('G')
ax.set_zlabel('B')
#centroides
(neuronas, patr) = W.shape
for neu in range(neuronas):
ax.scatter(W[neu,0], W[neu,1], W[neu,2],marker='o', c='r')
#conexiones
if(pasos is not None):
#if(pasos != None):
for f in range(filas):
for c in range(columnas):
n1 = f*columnas + c
for ff in range(filas):
for cc in range(columnas):
if(pasos[f, c, ff, cc] == 1):
n2 = ff*columnas + cc
ax.plot([W[n1, 0], W[n2, 0]], [W[n1, 1], W[n2, 1]], [W[n1, 2], W[n2, 2]], c='r')
plt.title(title)
plt.draw()
plt.pause(0.00001)
def linkdist(filas, columnas):
pasos = np.zeros((filas, columnas, filas, columnas))
for f in range(filas):
for c in range(columnas):
for ff in range(filas):
for cc in range(columnas):
pasos[f, c, ff, cc] = abs(f-ff) + abs(c-cc)
return pasos
def trainSOM(P, filas, columnas, alfa_inicial, vecindad, fun_vecindad, sigma, ite_reduce, dibujar):
(cant_patrones, cant_atrib) = P.shape
ocultas = filas * columnas
w_O = np.random.rand(ocultas, cant_atrib) - 0.5
#w_O = np.ones((ocultas, cant_atrib)) * 0
pasos = linkdist(filas, columnas)
max_ite = ite_reduce * (vecindad + 1)
ite = 0;
while (ite < max_ite):
alfa = alfa_inicial * (1 - ite / max_ite)
for p in range(cant_patrones):
distancias = -np.sqrt(np.sum(np.power(w_O-P[p,:],2),1))
ganadora = np.argmax(distancias)
fila_g = int(np.floor(ganadora / columnas))
columna_g = int(ganadora % columnas)
for f in range(filas):
for c in range(columnas):
if(pasos[fila_g, columna_g, f, c] <= vecindad):
if fun_vecindad == 1:
gamma = 1
else:
gamma = np.exp(- pasos[fila_g, columna_g, f, c] / (2*sigma))
n = f * columnas + c
w_O[n,:] = w_O[n,:] + alfa * (P[p,:] - w_O[n,:]) * gamma
ite = ite + 1
if (vecindad >= 1) and ((ite % ite_reduce)==0):
vecindad = vecindad - 1;
if dibujar and (cant_atrib == 2):
plot(P, None, w_O, filas, columnas, pasos, 'Iteración: ' + str(ite))
if dibujar and (cant_atrib == 3):
plot3D(P, None, w_O, filas, columnas, pasos, 'Iteración: ' + str(ite))
if ite % 10 == 0:
if dibujar and (cant_atrib == 2):
plot(P, None, w_O, filas, columnas, pasos, 'Iteración: ' + str(ite))
if dibujar and (cant_atrib == 3):
plot3D(P, None, w_O, filas, columnas, pasos, 'Iteración: ' + str(ite))
return (w_O)
def trainOL(P, T, T_O, w, filas, columnas, alfa, max_ite, dibujar):
(cant_patrones, cant_atrib) = P.shape
(cant_patrones, salidas) = T.shape
ocultas = filas * columnas
pasos = linkdist(filas, columnas)
w_S = np.random.rand(salidas, ocultas) - 0.5
ite = 0;
while ( ite <= max_ite ):
for p in range(cant_patrones):
distancias = -np.sqrt(np.sum((w-(P[p,:])*np.ones((ocultas,1)))**2,1))
ganadora = np.argmax(distancias)
w_S[:, ganadora] = w_S[:, ganadora] + alfa * (T[p, :] - w_S[:, ganadora])
ite = ite + 1
if dibujar and (cant_atrib == 2):
plot(P, T_O, w, filas, columnas, pasos, 'Fin')
if dibujar and (cant_atrib == 3):
plot3D(P, T_O, w, filas, columnas, pasos, 'Fin')
return (w_S)
def umatrix(w, filas, columnas):
(ncen, atributos) = w.shape
umat = np.zeros((filas*2-1, columnas*2-1))
for f in range(filas):
for c in range(columnas):
ff = f*2
cc= c*2
n1 = f * columnas + c
suma = 0
n=0
n2 = f * columnas + (c+1)
if(cc < (columnas*2-2)):
umat[ff, cc+1] = np.sqrt(np.sum((w[n1,:]-w[n2,:])**2))
suma = suma + umat[ff, cc+1]
n=n+1
n2 = (f+1) * columnas + c
if(ff < (filas*2-2)):
umat[ff+1, cc] = np.sqrt(np.sum((w[n1,:]-w[n2,:])**2))
suma = suma + umat[ff+1, cc]
n=n+1
if(n==2):
umat[ff+1, cc+1] = suma / 2
suma = suma + umat[ff+1, cc+1]
n=n+1
if(n>0):
umat[ff, cc] = suma / n
umat[filas*2-2, columnas*2-2] = (umat[filas*2-3, columnas*2-2] + umat[filas*2-2, columnas*2-3]) / 2
return umat
def PrepararSOM(X, Y, CantColumnas):
for j in range(X.shape[1]):
MinCol = X[:,j].min()
MaxCol = X[:,j].max()
for i in range(X.shape[0]):
X[i,j] = (X[i,j] - MinCol) / (MaxCol - MinCol)
T = Y
P = X[:,:CantColumnas]
return (P, T)
| true
|
77599051f4300e3d3a95d0f1454df3864546e1ca
|
Python
|
saetar/pyEuler
|
/not_done/py_not_started/euler_614.py
|
UTF-8
| 896
| 3.96875
| 4
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ~ Jesse Rubin ~ project Euler ~
"""
Special partitions 2
http://projecteuler.net/problem=614
An integer partition of a number n is a way of writing n as a sum of positive integers. Partitions that differ only by the order of their summands are considered the same.
We call an integer partition special if 1) all its summands are distinct, and 2) all its even summands are also divisible by 4. For example, the special partitions of 10 are: 10 = 1+4+5=3+7=1+9
The number 10 admits many more integer partitions (a total of 42), but only those three are special.
Let be P(n) the number of special integer partitions of n. You are given that P(1) = 1, P(2) = 0, P(3) = 1, P(6) = 1, P(10)=3, P(100) = 37076 and P(1000)=3699177285485660336.
Find ∑_i=1^10^7P(i). Give the result modulo 10^9+7.
"""
def p614():
pass
if __name__ == '__main__':
p614()
| true
|
4b044a8a7f6774d4418b650224c7ff34d9925241
|
Python
|
nonas-hunter/warmup_project
|
/warmup_project/scripts/test.py
|
UTF-8
| 847
| 3.5
| 4
|
[] |
no_license
|
#!/usr/bin/env python3
import numpy
import math
def point_to_global(x, y):
theta = math.radians(0)
point = [x, y, 1]
rot_matrix = [[math.cos(theta), -math.sin(theta), 0], \
[math.sin(theta), math.cos(theta), 0], \
[0, 0, 1]]
trans_matrix = [[1, 0 ,0], \
[0, 1, 0], \
[0, 0, 1]]
temp1 = multiply(rot_matrix, point)
temp2 = multiply(trans_matrix, temp1)
return temp2[0], temp2[1]
def multiply(matrix, vector):
output = [0,0,0]
for i in range(len(matrix)):
for j in range(len(matrix[i])):
output[i] += matrix[i][j] * vector[j]
return output
if __name__ == "__main__":
print(point_to_global(1,0))
| true
|
184de29f33e5dca6d273c1defac3d83e89716f83
|
Python
|
algoitssm/algorithm_taeyeon
|
/Baekjoon/20924_tree_트리의기둥과가지.py
|
UTF-8
| 1,159
| 3.25
| 3
|
[] |
no_license
|
import sys
sys.setrecursionlimit(10 ** 6)
sys.stdin = open("input.txt")
# 양방향 처리 해줘야함 12,7일때 (3,7)
def dfs(giga_node, giga_length):
for node in tree[giga_node]:
if visited[node[0]][0] == 0: # 방문하지 않은경우
visited[node[0]][0] = 1 # 방문 표현
dfs(node[0], giga_length + node[1])
visited[node[0]][1] = giga_length + node[1]
N, R = map(int, sys.stdin.readline().split())
tree = [[] for i in range(N + 1)]
for i in range(N - 1):
a, b, d = map(int, sys.stdin.readline().split())
tree[a].append((b, d))
tree[b].append((a, d))
giga_f = []
for i in range(len(tree)):
if len(tree[i]) > 2:
giga_f.append(i)
# print(giga_f)
if len(giga_f) < 1:
giga_node = N
else:
giga_node = giga_f[0]
length = 0 # 기가로드를 통해 기가로드까지의 기둥 길이 구함
visited = [[0] * 2 for i in range(N + 1)]
for i in range(R, giga_node):
for j in tree[i]:
if visited[j[0]][0] == 0 and j[0] != R:
length += j[1]
visited[j[0]][0] = 1
visited[giga_node][0] = 1
dfs(giga_node, 0)
print(length, max(visited)[1])
| true
|
83ef188bec1f39920efcf302730ec9df4ef22c1b
|
Python
|
twarogm/pp1
|
/03-FileHandling/Exercises/03-19.py
|
UTF-8
| 193
| 3.203125
| 3
|
[] |
no_license
|
tab = []
with open('universities.txt', 'r') as file:
for line in file:
tab.append(line)
with open('universities.txt', 'w') as file:
for i in sorted(tab):
file.write(i)
| true
|
07a18fa5e624a4e59d445d794f8fd51fbbe64fda
|
Python
|
lamby/django-slack
|
/tests/test_escaping.py
|
UTF-8
| 2,117
| 3.125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
import django
import unittest
from django_slack import slack_message
from django_slack.utils import get_backend
class SlackTestCase(unittest.TestCase):
def setUp(self):
self.backend = get_backend()
self.backend.reset()
def assertMessageCount(self, count):
self.assertEqual(len(self.backend.messages), count)
def assertMessage(self, url=None, **kwargs):
"""
Ensure there was only one message sent with a URL and data values.
"""
self.assertMessageCount(1)
message = self.backend.messages[0]
# Optionally ensure the URL.
if url is not None:
self.assertEqual(url, message['url'])
# Ensure each input value in data.
for kwarg, value in kwargs.items():
self.assertEqual(value, message['message_data'][kwarg])
class TestEscaping(SlackTestCase):
def test_simple_message(self):
slack_message('test.slack', {'text': 'test'})
self.assertMessage(text='test')
def test_escaped(self):
"""
Simple test of the Django escaping to illustrate problem.
"""
slack_message('test.slack', {'text': '< > & " \''})
if django.VERSION[0] >= 3:
self.assertMessage(text='< > & " '')
else:
self.assertMessage(text='< > & " '')
def test_escape_tag(self):
"""
Test using the escape tag, but not escaping anything.
"""
slack_message('escape.slack', {'text': 'test'})
self.assertMessage(text='test')
def test_escape_chars(self):
"""
Test the characters Slack wants escaped.
See <https://api.slack.com/docs/formatting#how_to_escape_characters>
"""
slack_message('escape.slack', {'text': '< > &'})
self.assertMessage(text='< > &')
def test_not_escape_chars(self):
"""
Test normal HTML escaped characters that Slack doesn't want escaped.
"""
slack_message('escape.slack', {'text': '" \''})
self.assertMessage(text='" \'')
| true
|
95b9aba31e282dd44f8852b09caeee6b2499901f
|
Python
|
admalledd/sublime_text
|
/keymapper.py
|
UTF-8
| 1,818
| 2.703125
| 3
|
[] |
no_license
|
import sublime, sublime_plugin
class keymapperCommand(sublime_plugin.TextCommand,sublime_plugin.WindowCommand):
"""Key Mapper, sadly you still have to define all the keymaps in your
.sublime-keymap just point them all here that you want to be able to map around.
this subclasses both TextCommand and WindowCommand so that it can be used for anything!
in your project file have a new index called "keymapper", inside define keys as you would
in a .sublime-keymap. Note again that these keys that are available are only the ones
that you pointed to run keymapper in your master keymap file.
samples:::
** added to Default ($OS).sublime-keymap
{ "keys": ["ctrl+alt+r"], "command": "keymapper","args":{"key":"ctrl+alt+r"}}
** added to $PROJECT.sublime-project
"keymapper":[
{ "keys": ["ctrl+alt+r"], "command": "subprocess", "args": {"exe": "/home/admalledd/bin/pypy3"}},
]
Note that the .sublime-project sample is using one of my other plugins (sp.py/subprocess),
just because it is all I really use the keymapper for...
"""
def run(self,*args,key=None):
if ((not sublime.active_window().project_file_name()) or
'keymapper' not in sublime.active_window().project_data()):
print("keymapper: no project file found! aborting!")
return False
self.proj_keys = sublime.active_window().project_data()['keymapper']
for keymap in self.proj_keys:
if key in keymap['keys']:
print('keymapper: found keybinding!')
#here is where more complicated logics would go if more crazy is wanted
return sublime.active_window().active_view().run_command(
keymap['command'],keymap['args']
)
| true
|
a80d7822c73877119cbd81d2af444cc31aaef41d
|
Python
|
Kristof95/Security-Tools
|
/IP Address Scanner/Ip_address_scanner.py
|
UTF-8
| 1,284
| 3.390625
| 3
|
[] |
no_license
|
#python34
import requests
import json
def main():
ip = input("Please add an ip address:")
print("\n")
try:
ip_info = []
response = requests.get('http://ipinfo.io/'+ip)
json_resp = json.loads(response.text)
print("Hostname: "+json_resp["hostname"])
print("Location: "+json_resp["loc"])
print("IP Address: "+json_resp["ip"])
print("City: "+json_resp["city"])
print("Postal: "+json_resp["postal"])
print("Region: "+json_resp["region"])
print("Org: "+json_resp["org"])
print("Country: "+json_resp["country"])
print("\n")
print("Saved Ip address information to txt file!")
ip_info.append("Hostname: "+json_resp["hostname"] + "\n" +"Location: "+json_resp["loc"] + "\n" +"IP Address: "+json_resp["ip"]+ "\n" +"City: "+json_resp["city"]+ "\n" +"Postal: "+json_resp["postal"]+ "\n" +"Region: "+json_resp["region"]+ "\n" +"Org: "+json_resp["org"]+ "\n" +"Country: "+json_resp["country"])
save_to_txt(ip_info)
except Exception as e:
print(e.reason)
def save_to_txt(info):
with open("ip_info.txt", "a") as file:
for i in info:
file.write(str(i) + "\n")
if __name__ == "__main__":
main()
| true
|
d62ef5eb94731634ae604bada048fd573fe73d9e
|
Python
|
Cooton/Test
|
/study17.py
|
UTF-8
| 169
| 3.15625
| 3
|
[] |
no_license
|
#!/use/bin/env python3
#-*- conding:utf-8 -*-
a=[1,2,3,4,5,'a','b']
zong=0
for x in a:
if(type(x)==int) or (type(x)==float):
zong=zong+x
else:
continue
print(zong)
| true
|
d347204d471b6a8e5d5b442923358ee85ce71ab6
|
Python
|
leihong/learn_python
|
/html_parser.py
|
UTF-8
| 1,940
| 3.21875
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
from html.parser import HTMLParser
from urllib import request
import re
class MyHTMLParser(HTMLParser):
def __init__(self):
super(MyHTMLParser, self).__init__()
self.flag=0
self.logdata=0
self.information = []
self.__dataname = 0
def handle_starttag(self, tag, attrs):
if tag=='ul' and attrs[0][1] == 'list-recent-events menu':
print('find a ul:', attrs)
self.flag=1
if self.flag==1:
if tag=='a' and attrs[0][0] == 'href':
print('\nTitle: ', end='')
self.logdata=1
elif tag=='time':
print('Time: ', end='')
self.logdata=2
elif tag=='span' and self.logdata==2:
self.logdata=1
elif tag=='span' and attrs[0][1]=='event-location':
print('Location: ', end='')
self.logdata=1
#pass #print("Encountered a start tag:", tag)
def handle_endtag(self, tag):
if tag=='ul' and self.flag==1:
self.flag=0
print('encountered ul end')
#pass #print("Encountered an end tag:", tag)
def handle_data(self, data):
if self.flag == 1:
if self.logdata==1:
self.logdata=0
print(data)
elif self.logdata==2:
print(data, end=' ')
pass #print("Encountered some data:", data)
parser = MyHTMLParser()
#parser.feed('<html><head><title>Test</title></head>')
print('Feed some text to the parser. It is processed insofar as it consists of complete elements; incomplete data is buffered until more data is fed or close() is called. data must be str.')
#parser.feed('<body><h1>Parse me!</h1></body></html>')
URL='https://www.python.org/events/python-events/'
with request.urlopen(URL) as f:
data = f.read().decode('utf-8')
print(data)
parser.feed(data)
| true
|
48513fccf0b89b8e04686a6016521e13e6ee14ce
|
Python
|
Harry-Rogers/PythonNotesForProfessionals
|
/Chapter 2 Data Types/2.2 Set Data Types.py
|
UTF-8
| 418
| 3.78125
| 4
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 26 15:05:45 2020
@author: Harry
"""
basket = {"apple","orange","apple","pear","bannana"}
print(basket)
#duplicates from a set are removed on print
a = set("abracadabra")
print(a)
# prints the unique letters in a
a.add("z")
print(a)
#can add to a set but will be at the start of the set
b = frozenset("asdfgasa")
print(b)
# cant add to a frozen set
| true
|
65f78a399237a7d4afe2751cadc303d4d0478787
|
Python
|
smhemel/Problem-Solving-using-Python
|
/URI Online Judge/Uri 1059.py
|
UTF-8
| 69
| 3.609375
| 4
|
[] |
no_license
|
for i in range(100):
i+=1
if(i%2==0):
print("%d" %i)
| true
|
7282eee89b2b4a07daaea132882d5d40e6b9cd64
|
Python
|
DebojyotiRoy15/LeetCode
|
/268. Missing Number.py
|
UTF-8
| 324
| 3.109375
| 3
|
[] |
no_license
|
#https://leetcode.com/problems/missing-number/
#O(n)
class Solution:
def missingNumber(self, nums: List[int]) -> int:
n = len(nums)
sum =(n*(n+1))//2
sum_real = 0
for i in range(len(nums)):
sum_real = sum_real + nums[i]
diff = abs(sum_real - sum)
return diff
| true
|
083432c0814b53dedb373e1be67bc6b5b55d150d
|
Python
|
feisuxiaozhu/gate-complexity
|
/one_way_function/sparse_matrix_creater/4_no_hw1.py
|
UTF-8
| 4,507
| 2.75
| 3
|
[] |
no_license
|
import numpy as np
import random
import galois
from typing import List, Any
import os
import pickle
import multiprocessing
from sympy import Matrix, latex
from itertools import combinations, product, permutations
def weighted_sample(choices: List[Any], probs: List[float]):
"""
Sample from `choices` with probability according to `probs`
"""
probs = np.concatenate(([0], np.cumsum(probs)))
r = random.random()
for j in range(len(choices) + 1):
if probs[j] < r <= probs[j + 1]:
return choices[j]
def sidant_test(n, m):
j = 0
while True:
B_inv, B = matrix_generator(n,m)
# print(B)
A = np.matrix(B)
# j += 1
# print(j)
flag = 0
row_sum = np.array(np.sum(np.matrix(A), axis = 0))[0]
if np.any([i in row_sum for i in range(6)]): # first sidhant test
continue
print('passed column denstiy check!')
for ind in pair_cols: # second sidhant test
if np.sum(np.array(B[:, ind[0]] + B[:, ind[1]])) <= 5:
flag = 1
break
if flag:
continue
return (B_inv, B)
def matrix_generator(n,m,k):
GF2 = galois.GF(2)
counter = 0
c=int(n/2)
while True:
counter += 1
# if counter % 10000 == 0:
# os.system('cls')
# print(counter)
candidates = []
for i in range(n): # creat first n unit vectors of length n + m
temp = np.zeros((n,1), dtype=int)
temp[i][0] = 1
candidates.append(temp)
for i in range(c): # add n/2 gates to include all inputs
one = candidates[2*i]
two = candidates[2*i+1]
sum = (one + two) % 2
candidates.append(sum)
for i in range(c, m): # add m-c gates using Hamming weight distribution of previous candidates
Hamming = []
for candidate in candidates:
Hamming.append((np.sum(candidate))**(k)) # non-linear distribution
normed = [float(i)/np.sum(Hamming) for i in Hamming]
all_indices = [j for j in range(len(candidates))]
index1 = weighted_sample(all_indices, normed)
index2 = weighted_sample(all_indices, normed)
while index2 == index1:
index2 = weighted_sample(all_indices, normed)
random_indices = [index1, index2]
# print(random_indices)
one = candidates[random_indices[0]]
two = candidates[random_indices[1]]
sum = (one + two) % 2
candidates.append(sum)
all_indices = [j for j in range(len(candidates))]
# matrix_indices = random.sample(range(0, m+n), n) # build matrix
# matrix_indices = [i+m for i in range(n)] # use n last gate as output
i = m+n-1
result_matrix = candidates[i].T
counter = 1
while counter < n:
counter += 1
temp_matrix = []
while np.linalg.matrix_rank(temp_matrix) < counter and i>=n:
i -= 1
# if i<n:
# matrix_generator
# return np.zeros((n,n)), np.zeros((n,n))
# continue
temp_matrix = np.concatenate((result_matrix, candidates[i].T), axis=0)
temp_matrix = GF2(temp_matrix)
result_matrix = temp_matrix
if i == n-1:
break
if i == n-1:
continue
result_matrix = GF2(result_matrix)
result_matrix_inv = np.linalg.inv(result_matrix)
row_sum = np.array(np.sum(np.matrix(result_matrix_inv), axis = 0))[0]
# if np.any([i in row_sum for i in range(6)]): # must pass the fist Sidhant test!
# continue
return result_matrix, result_matrix_inv
def worker(n,m,k,i):
print('Thread '+str(i)+' working')
A,B = matrix_generator(n,m,k)
print(A)
print(B)
if __name__ == '__main__':
n=20
m=30
pair_cols = [list(i) for i in list(combinations(range(n), 2))]
jobs = [] # list of jobs
jobs_num = 10 # number of workers
for i in range(jobs_num):
k = 0.1*i + 0.1
p1 = multiprocessing.Process(target=worker, args=(n,m,k,i))
jobs.append(p1)
p1.start() # starting workers
# A,B = matrix_generator(n,m)
# print(A)
# print(B)
# print('A density: ' + str(np.count_nonzero(A)/n))
# print('B density: ' + str(np.count_nonzero(B)/n))
| true
|
f91114ebc15ddad18d35615248e622d509f169e1
|
Python
|
maxfiedler/breze
|
/breze/arch/model/gaussianprocess.py
|
UTF-8
| 3,913
| 2.53125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""Module containing models for Gaussian processes."""
import numpy as np
import theano.tensor as T
from theano.sandbox.linalg.ops import MatrixInverse, Det, psd, Cholesky
minv = MatrixInverse()
det = Det()
cholesky = Cholesky()
from ..util import lookup, get_named_variables
from ..component import misc, kernel as kernel_
# TODO document
def parameters(n_inpt):
return dict(length_scales=n_inpt, noise=1, amplitude=1)
def exprs(inpt, test_inpt, target, length_scales, noise, amplitude, kernel):
exprs = {}
# To stay compatible with the prediction api, target will
# be a matrix to the outside. But in the following, it's easier
# if it is a vector in the inside. We keep a reference to the
# matrix anyway, to return it.
target_ = target[:, 0]
# The Kernel parameters are parametrized in the log domain. Here we recover
# them and make sure that they do not get zero.
minimal_noise = 1e-4
minimal_length_scale = 1e-4
minimal_amplitude = 1e-4
noise = T.exp(noise) + minimal_noise
length_scales = T.exp(length_scales) + minimal_length_scale
amplitude = T.exp(amplitude) + minimal_amplitude
# In the case of stationary kernels (those which work on the distances
# only) we can save some work by caching the distances. Thus we first
# find out if it is a stationary tensor by checking whether the kernel
# can be computed by looking at diffs only---this is the case if a
# ``XXX_by_diff`` function is available in the kernel module.
# If that is the case, we add the diff expr to the exprs dict, so it can
# be exploited by code on the top via a givens directory.
kernel_by_dist_func = lookup('%s_by_dist' % kernel, kernel_, None)
stationary = kernel_by_dist_func is not None
kernel_func = lookup(kernel, kernel_)
if stationary:
inpt_scaled = inpt * length_scales.dimshuffle('x', 0)
diff = exprs['diff'] = misc.pairwise_diff(inpt_scaled, inpt_scaled)
D2 = exprs['sqrd_dist'] = misc.distance_matrix_by_diff(diff, 'l2')
gram_matrix = amplitude * kernel_by_dist_func(D2)
exprs['D2'] = D2
else:
gram_matrix = kernel_func(inpt, inpt, length_scales, amplitude)
# TODO clarify nomenclature; the gram matrix is actually the whole thing
# without noise.
gram_matrix += T.identity_like(gram_matrix) * noise
# This is an informed choice. I played around a little with various
# methods (e.g. using cholesky first) and came to the conclusion that
# this way of doing it was way faster than explicitly doing a Cholesky
# or so.
psd(gram_matrix)
inv_gram_matrix = minv(gram_matrix)
n_samples = gram_matrix.shape[0]
ll = (
- 0.5 * T.dot(T.dot(target_.T, inv_gram_matrix), target_)
- 0.5 * T.log(det(gram_matrix))
- 0.5 * n_samples * T.log(2 * np.pi))
nll = -ll
# We are interested in a loss that is invariant to the number of
# samples.
nll /= n_samples
loss = nll
# Whenever we are working with points not in the training set, the
# corresponding expressions are prefixed with test_. Thus test_inpt,
# test_K (this is the Kernel matrix of the test inputs only), and
# test_kernel (this is the kernel matrix of the training inpt with the
# test inpt.
test_kernel = kernel_func(inpt, test_inpt, length_scales, amplitude)
kTK = T.dot(test_kernel.T, inv_gram_matrix)
output = output_mean = T.dot(kTK, target_).dimshuffle(0, 'x')
kTKk = T.dot(kTK, test_kernel)
chol_inv_gram_matrix = cholesky(inv_gram_matrix)
diag_kTKk = (T.dot(chol_inv_gram_matrix.T, test_kernel) ** 2).sum(axis=0)
test_K = kernel_func(test_inpt, test_inpt, length_scales, amplitude,
diag=True)
output_var = ((test_K - diag_kTKk)).dimshuffle(0, 'x')
return get_named_variables(locals())
| true
|
7a07c5bcf3111fe432429167febdd3b8360c26c5
|
Python
|
mr-kkid/sklearn
|
/test10-Kfold&cross_validation&precision&recall.py
|
UTF-8
| 2,227
| 2.53125
| 3
|
[] |
no_license
|
#encoding=UTF-8
#K-fold and cross_validation and precision and recall
from sklearn import datasets
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import Perceptron
from sklearn.metrics import accuracy_score
import plot_decision_regions
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.cross_validation import StratifiedKFold,cross_val_score
from sklearn.metrics import f1_score,recall_score,precision_score
iris=datasets.load_iris()
X=iris.data[:,[0,1,2,3]]
Y=iris.target
X_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=0.3,random_state=0)
pipline_lr=Pipeline([('scl',StandardScaler()),('pca',PCA(n_components=2)),('cls',LogisticRegression(random_state=1))])
kfold=StratifiedKFold(y=Y_train,n_folds=10,random_state=1)
scores=[]
for k,(train,test) in enumerate(kfold):
pipline_lr.fit(X_train[train],Y_train[train])
score=pipline_lr.score(X_train[test],Y_train[test])
scores.append(score)
print ('Fold %d:%.3f'%(k+1,score))
scores=cross_val_score(estimator=pipline_lr,X=X_train,y=Y_train,n_jobs=2,cv=10)
print scores
print '\n'
#encoding=UTF-8
from sklearn import datasets
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import Perceptron
from sklearn.metrics import accuracy_score
import plot_decision_regions
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
iris=datasets.load_iris()
X=iris.data[:,[0,1,2,3]]
Y=iris.target
X_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=0.3,random_state=0)
print np.mean(scores)
lr=LogisticRegression()
lr.fit(X_train,Y_train)
Y_pred=lr.predict(X_test)
print '-----------------------------\n'
print precision_score(y_true=Y_test,y_pred=Y_pred,sample_weight=None)
print recall_score(y_true=Y_test,y_pred=Y_pred,sample_weight=None)
print f1_score(y_true=Y_test,y_pred=Y_pred,sample_weight=None)
| true
|
0ad87d83c1fd290fd46a803459a90d2365382075
|
Python
|
jordibusoms/APA
|
/Assignment2/main_cheating.py
|
UTF-8
| 7,251
| 2.828125
| 3
|
[] |
no_license
|
# import copy
# weakref
import re
import sys
def default_gt(node_a, node_b):
"""Not to be used outside lnk_list insert_sort but the users may
code another one to substitute it if needed, so it's outside the
class. Defines how two lnk_nodes should be compared, returning 1 if
node_a is greater or equal to b and 0 otherwise"""
if not node_a:
return 0
if not node_b:
return 1
if node_a.value >= node_b.value: # While not used in the task I
return 1 # considered it the most logic
else: # default
return 0
def gt_by_chom_arm_cheating(node_a, node_b):
"""Not to be used outside lnk_list insert_sort.
Defines how two lnk_nodes should be compared, returning 1 if
node_a is greater or equal to b and 0 otherwise using chromosome
arm as first discriminant if read_sec_locations_cheating used"""
if not node_a:
return 0
if not node_b:
return 1
if node_a.loci>=node_b.loci:
return 1
else:
return 0
def gt_by_chom_alone(node_a, node_b):
"""Not to be used outside lnk_list insert_sort.
Defines how two lnk_nodes should be compared, Suposing loci already
contains only the chromosome arm information."""
if not node_a:
return 0
if not node_b:
return 1
if (int(node_a.loci[:-1]),node_a.loci[-1])>= (int(node_b.loci[:-1]),
node_b.loci[-1]):
return 1 # considered it the most logic
else: # default
return 0
class lnk_list:
def __init__(self, loci, val, n=False):
self.root = lnk_node(loci,val)
self.current = self.root
self.last = self.root
def start(self):
"""Resets the cursor current back to the root"""
self.current = self.root
def next(self):
"""Advance one position, returns 0 if already at the end and 1
otherwise"""
n = self.current.n
self.current = n
if n!=False:
return 1
else:
return 0
def new_node(self,loci,val):
""" Creates a new node at the end of the list"""
self.last.insert(lnk_node(loci,val))
self.last = self.last.n
def insert(self, new_node):
"""Inserts a single node at the current position"""
self.current.insert(new_node)
def __add__(self, b):
"""Add the content of a second lnk_list after the own """
if type(self)!=type(b):
raise NameError('lnk_list can only be added to ln_list')
else:
self.last.n = b.root
self.last = b.last
def insert_sort(self, gt_function = default_gt):
""" Sorts the lnk_list using insertion sort"""
sorted_list = lnk_list(self.root.loci, self.root.value)
to_rmv = self.root
self.root = self.root.n
del(to_rmv) # giben that del just remove the reference to let
# the garbage collector to actuate creating to_rmv
# was not really needed but useful to be explicid.
while self.root:
to_add = self.root
self.root = to_add.n
sorted_list.current = sorted_list.root
if gt_function(sorted_list.root, to_add):
to_add.n = sorted_list.root
sorted_list.root = to_add
#print(to_add.loci)
continue
while not gt_function(sorted_list.current.n, to_add) and sorted_list.current.n:
sorted_list.next()
sorted_list.insert(to_add)
self.root = sorted_list.root
def __str__(self):
cur = self.root
ret = ""
while cur:
ret+="[loci: "+str(cur.loci)+", value: "+str(cur.value)+"] -> "
cur = cur.n
return ret
class lnk_node:
def __init__(self, loci, val, n=False):
self.loci = loci
#self.chrom = int(loci.split(".").strip(" \nq")
self.value = val
self.n = n
def next(self):
return self.n
def insert(self, new_node):
new_node.n = self.n
self.n = new_node
#print ("inserting", new_node.loci, new_node.value)
def read_sec_locations_cheating(file_name):
"""Reads a file with sequence id, chromosomal location and geometric
locations separated by taps and produces a lnk_list with the
relevant information stores chromosome arm and geometeical
coordinates sum instead of loci"""
with open(file_name) as fh:
fi=1
for line in fh:
loci = line.split("\t")[1]
location = ( tuple(float(x)
for x in re.split('\\(|,|\\)',line)[1:3]))
ident = (int(re.split('p|q',loci)[0]),
re.search('p|q',loci)[0], sum(location))
if fi:
to_return = lnk_list(ident,location)
fi=0
else:
to_return.new_node(ident,location)
return(to_return)
def count_close_reads_cheating(sorted_lnk_list, threshold):
"""This function assumes it is giben a lnk_list sorted using
gt_by_chom_arm as discriminant criteria and counts how many reads
on the same chromosom arm are closer than threshold.
Theorical basis for loop breaking:
gt_by_chom_arm sorts by chromosome arm but also by the sum of the
coordinates having the sum of the coordinates we know a point falls
in the line x + y = n and the mininum distance between two lines
like those is equal to min_dist([0,n1], line2)= |n1-n2|/sqrt(2)"""
max_diference_coord_sum = threshold * 1.5 # I use 1.5 as sqrt(2) to
# avoid decimal errors in reality would be 1.4142135623730951
max_distance_squared = threshold**2
node1 = sorted_lnk_list.root
chrom1 = str(node1.loci[0])+node1.loci[1]
count_result = lnk_list(chrom1, 0)
while node1.n:
node2 = node1.n
chrom2 = str(node2.loci[0])+node2.loci[1]
dif = 0
while node2 and chrom1==chrom2 and dif < max_diference_coord_sum:
if ((node1.value[0]-node2.value[0])**2+(node1.value[1]
-node2.value[1])**2) <= max_distance_squared:
count_result.last.value+=1
dif = sum(node1.value)- sum(node2.value)
dif = max(dif,-dif)
node2 = node2.n
if not node2:
break
chrom2 = str(node2.loci[0])+node2.loci[1]
node1 = node1.n
chrom1 = str(node1.loci[0])+node1.loci[1]
# There are repetitive pices of code but otherwise to know the
# chromosome arm without lossing loci information would require
# some kind of aditional data structure
if chrom1 != count_result.last.loci:
count_result.new_node(chrom1,0)
return(count_result)
if __name__=="__main__":
our_data = read_sec_locations_cheating(sys.argv[1])
our_data.start()
our_data.insert_sort(gt_by_chom_arm_cheating)
counts = count_close_reads_cheating(our_data, float(sys.argv[2]))
counts.insert_sort(gt_by_chom_alone)
print(counts)
| true
|
3d1129ce24bae925d5f338970367b9d55c5499c5
|
Python
|
P4SSER8Y/ProjectEuler
|
/pr117/pr117.py
|
UTF-8
| 295
| 2.96875
| 3
|
[] |
no_license
|
#coding:utf8
def foo(group):
n = 50
f = [0] * (n + max(group))
f[0] = 1
for i in range(n):
for j in group:
f[i+j] += f[i]
return f[n]
def pr117():
return foo([1, 2, 3, 4])
def run():
return pr117()
if __name__ == "__main__":
print(run())
| true
|
fc8db291afad84b025ec3013d898c0de46122f02
|
Python
|
iwiwi/darkopt
|
/example/chainer_mnist.py
|
UTF-8
| 3,221
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
import argparse
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer.training import extensions
import numpy as np
import darkopt
test_batch_size = 1024
param_space = {
'unit': 2 ** np.arange(3, 10),
'batch_size': 2 ** np.arange(3, 10),
'lr': 0.5 ** np.arange(1, 20),
}
class Model(chainer.Chain):
def __init__(self, n_units):
super(Model, self).__init__(
l1=L.Linear(None, n_units),
l2=L.Linear(None, n_units),
l3=L.Linear(None, 10),
)
def __call__(self, x):
h1 = F.relu(self.l1(x))
h2 = F.relu(self.l2(h1))
return self.l3(h2)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', '-g', type=int, default=-1,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--epoch', '-e', type=int, default=20,
help='Number of sweeps over the dataset to train')
parser.add_argument('--n_trials', '-n', type=int, default=10,
help='Number of trials for hyper-parameter search')
args = parser.parse_args()
train, test = chainer.datasets.get_mnist()
def eval_func(param, known_best_score):
print(param)
model = L.Classifier(Model(param['unit']))
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
model.to_gpu()
optimizer = chainer.optimizers.Adam(param['lr'])
optimizer.setup(model)
train_iter = chainer.iterators.SerialIterator(train, param['batch_size'])
test_iter = chainer.iterators.SerialIterator(
test, test_batch_size, repeat=False, shuffle=False)
updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)
# Trigger for pruning
pruned_stop_trigger = darkopt.ChainerTrigger(
# Key for the score to watch
score_key='validation/main/loss',
# If there's little chance to beat this score, we prune the training
known_best_score=known_best_score,
# Standard training termination trigger
stop_trigger=(args.epoch, 'epoch')
)
trainer = training.Trainer(updater, pruned_stop_trigger, out=args.out)
trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))
log_report_extension = extensions.LogReport()
trainer.extend(log_report_extension)
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
trainer.extend(extensions.ProgressBar())
trainer.run()
return log_report_extension.log[-1]['validation/main/loss']
opt = darkopt.Optimizer(eval_func, param_space)
best_trial_result = opt.optimize(args.n_trials)
print('Best validation loss:', best_trial_result.score)
print('Best parameter:', best_trial_result.param)
if __name__ == '__main__':
main()
| true
|
53252245d72f40ca7b0f9be56822801b14f6852e
|
Python
|
alexcrichton/wasmtime-py
|
/wasmtime/_instance.py
|
UTF-8
| 3,295
| 2.671875
| 3
|
[
"LLVM-exception",
"Apache-2.0"
] |
permissive
|
from ._ffi import *
from ctypes import *
from wasmtime import Module, Extern, Func, Table, Memory, Trap, Global
dll.wasm_instance_new.restype = P_wasm_instance_t
class Instance(object):
# Creates a new instance by instantiating the `module` given with the
# `imports` provided.
#
# The `module` must have type `Module`, and the `imports` must be an
# iterable of external values, either `Extern`, `Func`, `Table`, `Memory`,
# or `Global`.
#
# Raises an error if instantiation fails (e.g. linking or trap) and
# otherwise initializes the new instance.
def __init__(self, module, imports):
if not isinstance(module, Module):
raise TypeError("expected a Module")
import_types = module.imports()
if len(imports) != len(import_types):
raise RuntimeError("wrong number of imports provided")
imports_ffi = (P_wasm_extern_t * len(import_types))()
for i, ty in enumerate(import_types):
val = imports[i]
if isinstance(val, Extern):
imports_ffi[i] = val.__ptr__
elif isinstance(val, Func):
imports_ffi[i] = val.as_extern().__ptr__
elif isinstance(val, Memory):
imports_ffi[i] = val.as_extern().__ptr__
elif isinstance(val, Global):
imports_ffi[i] = val.as_extern().__ptr__
elif isinstance(val, Table):
imports_ffi[i] = val.as_extern().__ptr__
else:
raise TypeError("expected an external item as an import")
trap = P_wasm_trap_t()
ptr = dll.wasm_instance_new(
module.store.__ptr__, module.__ptr__, imports_ffi, byref(trap))
if not ptr:
if trap:
raise Trap.__from_ptr__(trap)
raise RuntimeError("failed to compile instance")
self.__ptr__ = ptr
self._module = module
@classmethod
def __from_ptr__(cls, ptr, module):
ty = cls.__new__(cls)
if not isinstance(ptr, P_wasm_instance_t):
raise TypeError("wrong pointer type")
ty.__ptr__ = ptr
ty._module = module
return ty
# Returns the exports of this module
def exports(self):
externs = ExternTypeList()
dll.wasm_instance_exports(self.__ptr__, byref(externs.vec))
ret = []
for i in range(0, externs.vec.size):
ret.append(Extern.__from_ptr__(externs.vec.data[i], externs))
return ret
# Gets an export from this module by name, returning `None` if the name
# doesn't exist.
def get_export(self, name):
if not hasattr(self, '_export_map'):
self._export_map = {}
exports = self.exports()
for i, export in enumerate(self._module.exports()):
self._export_map[export.name()] = exports[i]
if name in self._export_map:
return self._export_map[name]
else:
return None
def __del__(self):
if hasattr(self, '__ptr__'):
dll.wasm_instance_delete(self.__ptr__)
class ExternTypeList(object):
def __init__(self):
self.vec = wasm_extern_vec_t(0, None)
def __del__(self):
dll.wasm_extern_vec_delete(byref(self.vec))
| true
|
a42fd149f6f2557ae6e8b61c025a52111e8b2e91
|
Python
|
Kimuda/Phillip_Python
|
/longer_cool_code/movies.py
|
UTF-8
| 1,517
| 3.671875
| 4
|
[] |
no_license
|
movie1 = {
"name":"The grey",
"rating":79,
"year":2012,
"actors":["Liam Neeson","Frank Grillo","Dermot Mulroney","Dallas Roberts","Joe Anderson","Nonso Anozie"]
}
movie2 = {
"name":"LOTR: TFOTR"
}
movie2["rating"]=91
movie2["year"]=2002
movie2["actors"]=["Elijah Wood","Ian McKellen","Viggo Mortensen","Dermot Mulroney","Liv Tyler","Sean Bean","Cate Blanchett"]
movies = [movie1,movie2]
question=""
while question!="N" and question!="Y":
question = input("Do you want to enter another movie? (Y/N) ")
while question=="Y":
movie = {}
movie["name"]= input("Whats the name? ")
movie["rating"]= int(input("Whats the rating? "))
movie["year"]= int(input("when was released? "))
actors_str = input("list of actors (comma separated) ")
actor=""
actors=[]
for char in actors_str:
if char!=",":
actor = actor + char
else:
actors = actors + [actor]
actor = ""
if actor!="":
actors = actors + [actor]
movie["actors"]=actors
movies += [movie]
question=""
while question!="N" and question!="Y":
question = input("Do you want to enter another movie? (Y/N) ")
print (movies)
actor_q = input ("which actor should we look for?")
answer = []
# got into the list of movies
for movie in movies:
# get the actors
actors = movie["actors"]
# check each actor
if actor_q in actors:
print (movie["name"])
answer = answer + [movie]
# for actor in actors:
# if actor == actor_q:
# print (movie["name"])
print (answer)
| true
|
04dcb407acc970f3e68e132d660ea0f7e49c71e6
|
Python
|
bopopescu/licorn
|
/daemon/threads.py
|
UTF-8
| 33,467
| 2.65625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Licorn Daemon threads classes.
Worth reads (among many others):
* http://code.google.com/p/python-safethread/w/list
* http://www.python.org/dev/peps/pep-0020/
Copyright (C) 2010 Olivier Cortès <oc@meta-it.fr>
Licensed under the terms of the GNU GPL version 2.
"""
import time, __builtin__
from threading import Thread
from Queue import Queue
from licorn.foundations import logging, exceptions
from licorn.foundations import process, pyutils
from licorn.foundations.threads import RLock, Event
from licorn.foundations.styles import *
from licorn.foundations.ltrace import *
from licorn.foundations.ltraces import *
class BaseLicornThread(Thread):
""" A simple class of thread which records its own instances, and whose
start() method returns the current instance. This allows to instanciate
and start the thread in one line.
"""
def __init__(self, *a, **kw):
try:
# using self.__class__ will fill subclasses._instances,
# instead of BaseLicornThread._instances.
self.__class__._instances.append(self)
except AttributeError:
self.__class__._instances = [ self ]
Thread.__init__(self, *a, **kw)
def start(self):
""" Just a handy method that allows us to do::
>>> th = BaseLicornThread(...).start()
"""
Thread.start(self)
return self
def stop(self):
try:
self.__class__._instances.remove(self)
except ValueError:
pass
class LicornThread(Thread):
"""
A simple thread with an Event() used to stop it properly, and a Queue() to
get events from other threads asynchronically.
Every subclass must implement a process_message() method, which will be called
by the main loop until the thread stops.
"""
def __init__(self, tname=None):
Thread.__init__(self)
self.name = tname or self.__class__.__name__
# trap the original gettext translator, to avoid the builtin '_'
# trigerring an exception everytime we need to translate a string.
try:
self._ = __builtin__.__dict__['_orig__']
except KeyError:
# In case our threaded gettext is not set up. Eg in WMI, we
# don't set it up: Django does it, but much later that when
# we initialize. Thus, we take the original gettext as fallback.
self._ = __builtin__.__dict__['_']
self._stop_event = Event()
self._input_queue = Queue()
assert ltrace(TRACE_THREAD, '%s initialized' % self.name)
def start(self):
""" Just a handy method that allows us to do::
>>> th = LicornThread(...).start()
"""
Thread.start(self)
return self
def dispatch_message(self, msg):
""" get an incoming message in a generic way. """
self._input_queue.put(msg)
def run(self):
""" Process incoming messages until stop Event() is set. """
# don't call Thread.run(self), just override it.
assert ltrace(TRACE_THREAD, '%s running' % self.name)
while not self._stop_event.isSet():
data = self._input_queue.get()
if data is None:
break
self.process_data(data)
self._input_queue.task_done()
assert ltrace(TRACE_THREAD, '%s ended' % self.name)
def stop(self):
""" Stop current Thread
and put a special None entry in the queue, to be
sure that self.run() method exits properly. """
assert ltrace(TRACE_THREAD, '%s stopping' % self.name)
self._stop_event.set()
self._input_queue.put(None)
class LicornBasicThread(BaseLicornThread):
""" A simple Thread with an Event(), used to stop it properly. Threaded
gettext is setup in the :meth:`__init__` method.
You must implement a method named `run_action_method` which does
**one** processing at a time. This method will be automatically run
in a `while` loop. It will be Exception-immune: any encountered
exception is printed with a full stack trace, and next run goes on.
..warning:: Don't put any infinite loop in your `run_action_method`,
else the thread will never stop().
Optional:
- if a method named `pre_run_method` exists, it is run before entering
the main while loop.
- if a method named `post_run_method` exists, it is run after exiting
the main while loop.
- you should probably overload :method:`dump_status()` to provide
more informations about the current thread.
.. versionadded:: a long while ago, I can't even remember when. Before
version 1.2, this is sure.
"""
@property
def pretty_name(self):
return stylize(ST_NAME, self.name)
def __init__(self, *args, **kwargs):
# Pop our args to not bother upper classes.
tname = kwargs.pop('tname', None)
licornd = kwargs.pop('licornd', None)
BaseLicornThread.__init__(self, *args, **kwargs)
self.name = tname or self.__class__.__name__
self.__licornd = licornd
# trap the original gettext translator, to avoid the builtin '_'
# trigerring an exception everytime we need to translate a string.
try:
self._ = __builtin__.__dict__['_orig__']
except KeyError:
# In case our threaded gettext is not set up. Eg in WMI, we
# don't set it up: Django does it, but much later that when
# we initialize. Thus, we take the original gettext as fallback.
self._ = __builtin__.__dict__['_']
self._stop_event = Event()
assert ltrace(TRACE_THREAD, '%s initialized.' % self.name)
@property
def licornd(self):
return self.__licornd
def dump_status(self, long_output=False, precision=None, as_string=False):
if as_string:
return '%s%s (stop=%s)' % (
stylize(ST_RUNNING
if self.is_alive() else ST_STOPPED, self.name),
'&' if self.daemon else '',
self._stop_event.is_set()
)
else:
# TODO: more details
return process.thread_basic_info(self)
def run(self):
""" default run method, which calls:
* pre_run_method one time without arguments, if it exists.
* run_action_method multiple times during a while loop until stopped.
* post_run_method one time without arguments, if it exists.
.. versionadded:: 1.2.4
In previous versions, this run method didn't exist at all
(this call was purely abstract).
"""
# don't call Thread.run(self), just override it.
assert ltrace(TRACE_THREAD, '%s running' % self.name)
assert hasattr(self, 'run_action_method')
if hasattr(self, 'pre_run_method'):
try:
getattr(self, 'pre_run_method')()
except:
logging.exception(_(u'{0}: Exception in self.pre_run_method(), aborting.'),
(ST_NAME, self.name))
self.finish()
return
while not self._stop_event.is_set():
try:
self.run_action_method()
except:
logging.exception(_(u'{0}: Exception in self.run_action_method(), '
u'continuing.'), (ST_NAME, self.name))
if hasattr(self, 'post_run_method'):
try:
getattr(self, 'post_run_method')()
except:
logging.exception(_(u'{0}: Exception in self.post_run_method()'),
(ST_NAME, self.name))
self.finish()
def finish(self):
assert ltrace(TRACE_THREAD, '%s ended' % self.name)
def stop(self):
""" Stop current Thread. """
assert ltrace(TRACE_THREAD, '%s stopping' % self.name)
self._stop_event.set()
BaseLicornThread.stop(self)
class GQWSchedulerThread(BaseLicornThread):
""" Try to dynamically adapt the number of workers for a given qsize.
.. warning:: this scheduler is very basic, and in loaded conditions,
the priorities will not be respected: if all workers are currently
handling jobs, any incoming one (even with a higher priority) will
have to wait before beiing processed.
This can eventually be avoided by the use of greenlets everywhere
in licorn code, but this will need a deeper level of code rewrite.
"""
def __init__(self, klass, *a, **kw):
BaseLicornThread.__init__(self, *a, **kw)
self.scheduled_class = klass
self._stop_event = Event()
self.sleep_lock = RLock()
self.name = '%s(%s)' % (self.__class__.__name__, klass.__name__)
# trap the original gettext translator, to avoid the builtin '_'
# trigerring an exception everytime we need to translate a string.
# the try/except block is for the testsuite, which uses this thread
# as base class, but doesn't handle on-the-fly multilingual switch.
try:
self._ = __builtin__.__dict__['_orig__']
except KeyError:
self._ = __builtin__.__dict__['_']
def dump_status(self, long_output=False, precision=None, as_string=False):
if as_string:
return _('{0}{1} for {2} [{3} workers; wake up in {4}]\n{5}').format(
stylize(ST_RUNNING if self.is_alive() else ST_STOPPED,
self.__class__.__name__),
'&' if self.daemon else '',
self.scheduled_class.__name__,
# `.instances` is the number of instances
self.scheduled_class.instances,
pyutils.format_time_delta(self.sleep_start_time + self.sleep_time - time.time()),
'\n'.join(('\t%s' % t.dump_status(long_output, precision, as_string))
# `._instances` is the list of threads.
for t in self.scheduled_class._instances)
)
else:
return dict(workers=[ t.dump_status(long_output, precision, as_string)
for t in self.scheduled_class._instances ],
wake_up=self.sleep_start_time + self.sleep_time,
**process.thread_basic_info(self))
def run(self):
assert ltrace_func(TRACE_THREAD)
def throttle_up():
""" See if there are too many or missing peers to handle queued jobs,
and spawn a friend if needed, or terminate myself if no more useful.
"""
# we acquire the class lock to "freeze" starts/ends while we count
# everyone and throttle workers. This can make all workers wait, but
# doesn't happen that frequently.
with cls.lock:
# FIRST, see if peers are in need of help: if there are
# still a lot of job to do, spawn another peer to get the
# job done, until the configured thread limit is reached.
n_instances = cls.instances
if qsize > 0 and n_instances == cls.busy:
if n_instances < cls.peers_max:
# sleep a lot, because we start a lot of workers,
# but no more that 10 seconds.
return min(10.0, float(self.spawn_worker(min(qsize,
cls.peers_max - n_instances))))
else:
if time.time() - cls.last_warning > 5.0:
cls.last_warning = time.time()
logging.progress(_(u'{0}: maximum workers already '
u' running, but qsize={1}.').format(
self.name, qsize))
# all workers are currently busy, we can wait a lot
# before things settle. Keep CPU cycles for our workers.
return 10.0
else:
# things have not changed. Wait a little, but not that
# much in case things go up again fast.
return 2.0
def throttle_down():
""" See if there are too many peers to handle queued jobs, and then
send terminate signal (put a ``None`` job in the queue), so that
one of the peers will terminate.
"""
# we need to get the class lock, else peers could stop() while we are
# stopping too, and the daemon could be left without any thread for a
# given class of them.
with cls.lock:
# See if jobs have been worked out during the time
# we spent joing our job. If things have settled, signify to
# one of the peers to terminate (this could be me, or not),
# but only if there are still more peers than the configured
# lower limit.
n_instances = cls.instances
if qsize <= n_instances and n_instances > cls.busy:
# sleep a lot if we terminate a lot of workers, but
# no more than 10 seconds to avoid beiing flooded
# by next "queue attack".
return min(10.0, float(self.stop_worker(
n_instances - qsize - cls.peers_min)))
# things have not changed. Wait a little, but not that
# much in case things go up again fast.
return 2.0
cls = self.scheduled_class
q = cls.input_queue
prev_qsize = 0
from licorn.core import LMC
while not self._stop_event.is_set():
with cls.lock:
qsize = q.qsize()
with self.sleep_lock:
if qsize >= prev_qsize:
self.sleep_time = throttle_up()
else:
self.sleep_time = throttle_down()
# from time to time, or if we are in the process of settling down,
# ask the thread-cleaner to search for dead-threads to wipe from memory.
try:
if len(cls._instances) != cls.instances:
try:
LMC.licornd.clean_objects(delay=1.0)
except AttributeError:
# if daemon is currently stopping, the thread-cleaner
# could be already wiped out. Harmless.
pass
except AttributeError:
# the first time, cls._instances is not yet set. harmless.
pass
prev_qsize = qsize
self.sleep_start_time = time.time()
with self.sleep_lock:
# don't hold the lock while we sleep, this would cause
# dump_status() to hang.
sleep_time = self.sleep_time
if sleep_time < 0:
# Circumvent #901: in some very rare conditions, sleep_time
# seems to be negative, which raises an "IOError:
# Invalid Argument" exception on Unixes (cf. http://bytes.com/topic/python/answers/29389-behaviour-time-sleep-negative-arg).
sleep_time = 0
time.sleep(sleep_time)
assert ltrace_func(TRACE_THREAD, True)
def stop(self):
assert ltrace_func(TRACE_THREAD)
# Stop blocked workers
self.stop_worker(self.scheduled_class.instances)
# Stop sleeping workers
for inst in self.scheduled_class._instances:
inst.stop()
self._stop_event.set()
assert ltrace_func(TRACE_THREAD, True)
def spawn_worker(self, number=None):
for i in range(number or 1):
self.scheduled_class().start()
return number or 1.0
def stop_worker(self, number=None):
for i in range(number or 1):
self.scheduled_class.input_queue.put_nowait(
self.scheduled_class.stop_packet)
return number or 1.0
class AbstractTimerThread(LicornBasicThread):
""" Base (abstract) class for any advanced timer thread:
* the thread can loop any number of time (0 to infinite). 0 means it is
a one shot timer, 1 or more means it is like a scheduled job (but no
date handling yet).
* it can wait a specific time before starting to loop.
* the timer can be reset at any moment.
:param time: is a time.time() object before first execution, or for
one-shot jobs ("AT" like)
:param count: eventually the number of time you want the job to repeat (>0)
default: None => infinite loop
:param delay: a float in seconds to wait between each call when looping.
Only used in loop.
If looping (infinite or not), delay must be set.
args and kwargs are for target, which will be executed at least once.
tname is an optionnal thread name, to differentiate each, if you
launch more than one JobThread.
.. versionadded:: 1.2.4
.. warning:: This class is an abstract one, it does nothing besides
sleeping. All inheriting classes must implement a
``run_action_method``, else they will fail.
"""
def __init__(self, *args, **kwargs):
# Pop our args to not bother upper classes.
time = kwargs.pop('time', None)
count = kwargs.pop('count', None)
delay = kwargs.pop('delay', None)
daemon = kwargs.pop('daemon', False)
LicornBasicThread.__init__(self, *args, **kwargs)
self.time = time
self.delay = delay
self.count = count
self.daemon = daemon
# lock used when accessing self.time_elapsed and self.sleep_delay
self._time_lock = RLock()
# these 2 variable are internal to the sleep() method, but can be used
# R/O in remaining_time().
self.__time_elapsed = 0.0
self.__sleep_delay = None
#: used when we want to reset the timer, without running our action.
self._reset_event = Event()
#: used when we force running our action without waiting the full
#: delay. This event is triggered from the outside using methods.
self._trigger_event = Event()
self.__trigger_delay = None
if self.count is None:
self.loop = True
else:
if self.count <= 0:
raise exceptions.BadArgumentError('count can only be > 0')
elif self.count >= 1:
self.loop = False
assert ltrace(TRACE_THREAD, '| AbstractTimerThread.__init__(time=%s, '
'count=%s, delay=%s, loop=%s)' % (self.time,
self.count, self.delay, self.loop))
if (self.loop or self.count > 1) and self.delay is None:
raise exceptions.BadArgumentError(
'must provide a delay for looping.')
def sleep(self, delay=None):
""" sleep at most self.delay, but with smaller intervals, to allow
interruption without waiting to the end of self.delay, which can
be very long.
"""
if delay:
self.__sleep_delay = delay
else:
self.__sleep_delay = self.delay
assert ltrace(TRACE_THREAD, '| %s.sleep(%s)' % (self.name, delay))
with self._time_lock:
self.__time_elapsed = 0.0
while self.__time_elapsed < self.__sleep_delay:
#print "waiting %.1f < %.1f" % (current_delay, self.delay)
if self._stop_event.is_set():
break
if self._reset_event.is_set():
logging.progress(_(u'{0}: timer reset after {1} elapsed.').format(
stylize(ST_NAME, self.name),
pyutils.format_time_delta(self.__time_elapsed,
big_precision=True)))
with self._time_lock:
self.__time_elapsed = 0.0
self._reset_event.clear()
if self._trigger_event.is_set():
with self._time_lock:
if self.__trigger_delay is None:
self.__time_elapsed = self.__sleep_delay
else:
self.__time_elapsed = self.__sleep_delay - self.__trigger_delay
self._trigger_event.clear()
time.sleep(0.01)
with self._time_lock:
self.__time_elapsed += 0.01
assert ltrace(TRACE_THREAD, '| %s.sleep(EXIT)' % self.name)
def trigger_event(self, delay=None):
self.__trigger_delay = delay
self._trigger_event.set()
trigger = trigger_event
def reset_timer(self):
self._reset_event.set()
#: :meth:`reset` is an alias to :meth:`reset_timer`
reset = reset_timer
def remaining_time(self):
""" Returns the remaining time until next target execution, in seconds.
"""
with self._time_lock:
if self.__sleep_delay is None:
raise exceptions.LicornRuntimeException(
'%s: not yet started.' % self.name)
return self.__sleep_delay - self.__time_elapsed
def run(self):
""" TODO. """
self.current_loop = 0
# first occurence: we need to wait until time if it is set.
if self.time:
# only sleep 'til initial time if not already passed. Else just run
# the loop, to have the job done as soon as possible.
first_delay = self.time - time.time()
if first_delay > 0:
self.sleep(first_delay)
elif self.delay:
# we just have to wait a delay before starting (this is a
# simple timer thread).
self.sleep()
while self.loop or self.current_loop < self.count:
if self._stop_event.is_set():
break
self.run_action_method()
self.current_loop += 1
self.sleep()
LicornBasicThread.finish(self)
class GenericQueueWorkerThread(AbstractTimerThread):
""" Inherits from :class:`AbstractTimerThread` to be able to be interrupted
in the middle of a :meth:`sleep` triggered by a ``job_delay``. The
:meth:`run` method of the :class:`AbstractTimerThread` is totally
overriden though, and is never called, this is intended. We just need
the :meth:`sleep` one. The :meth:`stop` method comes from
:class:`LicornBasicThread`.
.. versionadded::
- created for the 1.2.5
- enhanced for the 1.5: add the ability to stop the thread in the
middle of a sleep, not only when the thread is idle.
"""
_setup_done = False
lock = RLock()
counter = 0
instances = 0
busy = 0
last_warning = 0
#: what we need to put in the queue for a worker to stop.
stop_packet = (-1, None, None, None)
scheduler = None
@classmethod
def setup(cls, licornd, input_queue, peers_min, peers_max, daemon=False):
""" Setup The Worker class. Starts the Scheduler thread, and return it.
You have to store the scheduler reference somewhere!
To stop the workers, invoke the scheduler `stop()` method.
"""
if cls._setup_done:
return
cls._instance = []
#cls.high_bypass = high_bypass
cls.peers_min = peers_min
cls.peers_max = peers_max
#: a reference to the licorn daemon
cls.licornd = licornd
#: should the worker instances be daemon threads?
cls.daemon = daemon
# the very first instance of the class has to setup everything
# for its future peers.
cls.input_queue = input_queue
# start the class scheduler, which will manage worker threads
cls.scheduler = GQWSchedulerThread(cls).start()
cls._setup_done = True
# start the first worker, which needs `_setup_done` to be True.
cls().start()
return cls.scheduler
def __init__(self, *a, **kw):
cls = self.__class__
if not cls._setup_done:
raise RuntimeError(_(u'Class method {0}.setup() has not been '
u'called!').format(cls.__name__))
kw.update({
#: the :class:`threading.Thread` attributes.
'tname' : '%s-%03d' % (cls.__name__, cls.counter),
'daemon' : cls.daemon,
'licornd' : cls.licornd,
# FAKE: we pass 1 for the AbstractTimerThread.__init__() not to
# bother us with 'must provide a delay for looping' check error.
# Anyway, as we override completely its run() method, this has
# no importance at all for the runtime, *we* loop like we want.
'count' : 1,
})
AbstractTimerThread.__init__(self, *a, **kw)
# trap the original gettext translator, to avoid the builtin '_'
# trigerring an exception everytime we need to translate a string.
# the try/except block is for the testsuite, which uses this thread
# as base class, but doesn't handle on-the-fly multilingual switch.
try:
self._ = __builtin__.__dict__['_orig__']
except KeyError:
self._ = __builtin__.__dict__['_']
#: used in dump_status (as a display info only), to know which
#: object our target function is running onto.
self.job = None
self.priority = None
self.job_args = None
self.job_kwargs = None
self.job_start_time = None
#: used to sync and not crash between run() and dump_status().
self.lock = RLock()
self.jobbing = Event()
#assert ltrace_locks(cls.lock)
with cls.lock:
cls.counter += 1
cls.instances += 1
def dump_status(self, long_output=False, precision=None, as_string=True):
""" get detailled thread status. """
with self.lock:
if as_string:
return '%s%s [%s]' % (
stylize(ST_RUNNING
if self.is_alive() else ST_STOPPED, self.name),
'&' if self.daemon else '',
'on %s since %s' % (self.__format_job(),
pyutils.format_time_delta(
time.time() - self.job_start_time,
big_precision=True))
if self.jobbing.is_set() else 'idle'
)
else:
return dict(job=self.job,
job_args=self.job_args,
job_kwargs=self.job_kwargs,
start_time=self.job_start_time,
jobbing=self.jobbing.is_set(),
**process.thread_basic_info(self))
def __format_job(self):
try:
job_name = self.job.__name__
except AttributeError:
# Pyro's `core._RemoteMethod` class has no __name__
# retro-compatibility attribute. We must fake to
# find the real name, but at least with Python, we can.
try:
job_name = getattr(self.job, '_RemoteMethod__name')
except:
# In case it wasn't a Pyro method, use a sane fallback.
job_name = str(self.job)
return stylize(ST_ON, '%s(%s%s%s)' % (
job_name,
', '.join([str(j) for j in self.job_args])
if self.job_args else '',
', ' if self.job_args and self.job_kwargs else '',
', '.join(['%s=%s' % (key, value) for key, value
in self.jobs_kwargs])
if self.job_kwargs else ''))
def run(self):
""" A queue worker can be interrupted in two ways:
- calling its :meth:`stop` method
- putting ``(None, …)`` in its queue.
"""
assert ltrace_func(TRACE_THREAD)
cls = self.__class__
q = cls.input_queue
while not self._stop_event.is_set():
# We need to store the item in a separate variable in case it is
# badly formed (see #898). Without this, we won't be able to
# display it in the message in case of an exception.
temp_data = q.get()
try:
self.priority, self.job, self.job_args, self.jobs_kwargs = temp_data
except ValueError:
logging.warning(_(u'{0}: invalid queue item "{1}", '
u'terminating.').format(self.name, temp_data))
# Even with a bad-built item, we successfully poped it from the
# queue. Notify any waiters before quitting.
q.task_done()
break
else:
del temp_data
if self.job is None:
# None is a fake message to unblock the q.get(), when the
# main process terminates the current thread with stop(). We
# emit task_done(), else the main process will block forever
# on q.join().
q.task_done()
# Don't set self._stop_event(), just break: when we encounter
# the 'stop job' item, the QueueWorkerScheduler is in the
# process of calling our stop() method too. Setting the event
# from here is thus not necessary; the loop will already break.
break
with cls.lock:
cls.busy += 1
with self.lock:
self.jobbing.set()
self.job_start_time = time.time()
# this will at most eventually sleep a little if called requested
# if, or at best (with 0.0s) switch to another thread needing the
# CPU in the Python interpreter before starting to process the job.
self.sleep(self.jobs_kwargs.pop('job_delay', 0.0))
# In case the sleep() was interrupted by a stop(), we need to
# check, to avoid running the job in a stopping daemon.
if self._stop_event.is_set():
with cls.lock:
cls.busy -= 1
with self.lock:
self.jobbing.clear()
q.task_done()
logging.warning(_(u'{0}: stopped during sleep, job {1} will '
u'not be run at all.').format(self.name, self.__format_job()))
break
#assert ltrace(TRACE_THREAD, '%s: running job %s' % (
# self.name, self.job))
try:
self.job(*self.job_args, **self.jobs_kwargs)
except Exception:
logging.exception(_(u'{0}: Exception encountered while running '
u'{1}'), self.name, self.__format_job())
q.task_done()
with self.lock:
self.jobbing.clear()
with cls.lock:
cls.busy -= 1
self.job = None
self.priority = None
self.job_args = None
self.job_kwargs = None
self.job_start_time = None
if q.qsize() == 0:
logging.progress(_(u'%s: queue is now empty, going '
u'asleep waiting for jobs.') % self.name)
# when ending, be sure to notice everyone interested.
with cls.lock:
cls.instances -= 1
class ServiceWorkerThread(GenericQueueWorkerThread):
pass
class ACLCkeckerThread(GenericQueueWorkerThread):
pass
class NetworkWorkerThread(GenericQueueWorkerThread):
pass
class TriggerTimerThread(AbstractTimerThread):
""" A Timer Thread whose sole action is to trigger an
:class:`~threading.Event`. It is used in conjunction of the
:class:`TriggerWorkerThread` (which waits on the
:class:`~threading.Event`).
.. versionadded:: 1.2.4
"""
def __init__(self, trigger_event, tname=None, time=None, count=None,
delay=0.0, daemon=False):
AbstractTimerThread.__init__(self, time=time, count=count, delay=delay,
daemon=daemon, tname=tname)
self._trigger_event = trigger_event
def run_action_method(self):
return self._trigger_event.set()
class TriggerWorkerThread(LicornBasicThread):
def __init__(self, *a, **kw):
assert ltrace(TRACE_THREAD, '| TriggerWorkerThread.__init__()')
try:
target = a.pop()
except:
try:
target = kw.pop('target')
except:
raise exceptions.BadArgumentError(_(u'{0}: the "target" first '
u'(or named) argument is mandatory!').format(
self.__class__.__name__))
try:
trigger_event = a.pop()
except:
try:
trigger_event = kw.pop('trigger_event')
except:
raise exceptions.BadArgumentError(_(u'{0}: the "trigger_event" '
u'second (or named) argument is mandatory!').format(
self.__class__.__name__))
try:
args = a.pop()
except:
args = kw.pop('args', ())
try:
kwargs = a.pop()
except:
kwargs = kw.pop('kwargs', {})
LicornBasicThread.__init__(self, *a, **kw)
self._disable_event = Event()
self._currently_running = Event()
self._trigger_event = trigger_event
self.target = target
self.args = args
self.kwargs = kwargs
# used on manual triggering only.
self.one_time_args = None
self.one_time_kwargs = None
def dump_status(self, long_output=False, precision=None, as_string=False):
if as_string:
return '%s%s (%s)' % (
stylize(ST_RUNNING if self.is_alive() else ST_STOPPED, self.name),
'&' if self.daemon else '',
stylize(ST_OFF, 'disabled') if self._disable_event.is_set()
else '%s, %s' % (
stylize(ST_ON, 'enabled'),
stylize(ST_ON, 'active') if
self._currently_running.is_set() else 'idle'
)
)
else:
return process.thread_basic_info(self)
def active(self):
""" Returns ``True`` if the internal worker is running, else ``False``
(the thread can be considered idle). """
return self._currently_running.is_set()
#: an alias from :meth:`running` to :meth:`active`.
running = active
def idle(self):
""" Exact inverse of :meth:`active` method. """
return not self._currently_running.is_set()
def enabled(self):
""" Returns ``True`` if not currently disabled. """
return not self._disable_event.is_set()
def disabled(self):
"""Exact inverse of :meth:`enabled` method. """
return self._disable_event.is_set()
def run(self):
# don't call Thread.run(self), just override it.
assert ltrace(TRACE_THREAD, '%s running' % self.name)
while True:
self._trigger_event.wait()
# be sure to wait at next loop.
self._trigger_event.clear()
if self._stop_event.is_set():
assert ltrace(TRACE_THREAD, '%s: breaking our loop now.' % self.name)
break
if self._disable_event.is_set():
assert ltrace(TRACE_THREAD, '%s: triggered, but currently '
'disabled: not doing anything.' % self.name)
continue
# use one_time arguments if we have been manually trigerred.
if self.one_time_args is not None:
args = self.one_time_args
self.one_time_args = None
else:
args = self.args
if self.one_time_kwargs is not None:
kwargs = self.one_time_kwargs
self.one_time_kwargs = None
else:
kwargs = self.kwargs
assert ltrace(TRACE_THREAD, '%s: triggered, running target %s(%s, %s)'
% (self.name, self.target, ', '.join(args), ', '.join(kwargs)))
self._currently_running.set()
self.target(*args, **kwargs)
self._currently_running.clear()
self.finish()
def trigger(self, *args, **kwargs):
""" Trigger a worker run if we are not currently stopping. """
assert ltrace(TRACE_THREAD, '| TriggerWorkerThread.trigger()')
self.one_time_args = args
self.one_time_kwargs = kwargs
if not self._stop_event.is_set():
return self._trigger_event.set()
#: An alias from :meth:`manual_trigger` to :meth:`trigger` for purists.
manual_trigger = trigger
def disable(self):
""" Disable next runs (until re-enabled), but only if we are not
currently stopping. """
assert ltrace(TRACE_THREAD, '| TriggerWorkerThread.disable()')
if not self._stop_event.is_set():
return self._disable_event.set()
def enable(self):
assert ltrace(TRACE_THREAD, '| TriggerWorkerThread.enable()')
return self._disable_event.clear()
def stop(self):
""" Stop the thread properly (things must be done in a certain order,
internally). """
# Stop the base things. At this moment, the thread is either waiting
# on the trigger, or running.
LicornBasicThread.stop(self)
# Then be sure to release the wait() on the trigger,
# else we will never quit...
self._trigger_event.set()
class LicornJobThread(AbstractTimerThread):
def __init__(self, target, tname=None, time=None, count=None,
delay=0.0, target_args=(), target_kwargs={}, daemon=False):
""" TODO: this class is meant to be removed in version 1.3+, replaced
by the couple
:class:`TriggerWorkerThread` / :class:`TriggerTimerThread`.
"""
AbstractTimerThread.__init__(self, time=time, count=count, delay=delay,
daemon=daemon, tname=tname)
self.target = target
self.target_args = target_args
self.target_kwargs = target_kwargs
# a parallel thread that will run the real job, to be able to continue
# to countdown the delay while job is running.
self.job_runner = None
def dump_status(self, long_output=False, precision=None, as_string=False):
""" get detailled thread status. """
if as_string:
return '%s%s [%s]' % (
stylize(ST_RUNNING
if self.job_runner else ST_STOPPED, self.name),
'&' if self.daemon else '',
'%s(%s%s%s)' % (self.target,
self.target_args if self.target_args else '',
', ' if (self.target_args and self.target_kwargs) else '',
self.target_kwargs if self.target_kwargs else '')
if self.job_runner else 'sleeping, wake up in %s'
% pyutils.format_time_delta(self.remaining_time())
)
else:
# TODO: more details
return process.thread_basic_info(self)
def run_action_method(self):
""" A method that will wrap self.target into a JobRunner simple
:class:`~threading.Thread`. """
'''
if (not self.job_runner) or (not self.job_runner.is_alive()):
self.job_runner = Thread(
target=self.target,
name=self.name + '.JobRunner',
args=self.target_args,
kwargs=self.target_kwargs
)
self.job_runner.daemon = True
self.job_runner.start()
'''
self.target(*self.target_args, **self.target_kwargs)
| true
|
70b36d0a3334a45334be9eb6e8b714b7ad865c22
|
Python
|
yienlyu/Python-Implementations
|
/Data_Structures/server.py
|
UTF-8
| 2,193
| 3.515625
| 4
|
[] |
no_license
|
# data format: <user_name> <user_id> <click_timestamp> <product_id> <sale>
# store the costumers' information with a list of tuples
def store_allInfo(a, all_info):
a = tuple(str(x) for x in a.split(" "))
all_info.append(a)
# store the products' information with a dictionary {product_id : {(user_id, sale), ...}}
def store_productInfo(a, product_info):
a = tuple(str(x) for x in a.split(" "))
if a[3] in product_info:
product_info.get(a[3]).add((a[0], a[4]))
else:
product_info[a[3]] = {(a[0], a[4])}
# store by coustumers with a dictionary {user_id : {(click_timestamp, product_id)}}
# def store_costumerInfo():
# count and print the information of the product's sales
def count_sales(sales_list, product_id, click):
purchase = 0
users = []
num = len(sales_list)
for i in range(num):
purchase = purchase + int(sales_list[i][1])
users.append(sales_list[i][0])
print(f'product {product_id}: {purchase} purchase / {click} click')
for i in range(num):
print(users[i])
# print sales and user who purchase among all clicks of the product (using filter)
def product_sales(product_id, product_info):
click = len(product_info.get(product_id))
sales_list = list(filter(lambda sale : sale[1] > "0", product_info.get(product_id)))
count_sales(sales_list, product_id, click)
# return the products clicked by both two users (using set intersection)
# def users_intersect(user_id1, user_id2, all_info):
# return 5 products the user most recently clicked
# def users_recentClick(user_id, costumerInfo):
# (after adding classification of products)
# return 5 recommended products to the user
# def users_recommend(user_id, product_info):
# read and process the data
def readData(all_info, product_info):
for i in range(15):
a = input()
store_allInfo(a, all_info)
store_productInfo(a, product_info)
# print(f'all_info: {all_info}')
# print(f'product_info: {product_info}')
if __name__ == '__main__':
all_info = []
product_info = {}
readData(all_info, product_info)
product_sales("14667", product_info)
product_sales("3k495", product_info)
| true
|
fe36d062eb999af0436b0cff93561c822aede2c5
|
Python
|
LValencia24/othellopy
|
/Othello_def1.py
|
UTF-8
| 10,954
| 3.375
| 3
|
[] |
no_license
|
# Autor : Luis Valencia 14-11335
# Variables iniciales
PlayerOneBlack = 1
PlayerTwoWhite = 2
turno = 0
# Funcion para crear el tablero
def crearTablero():
tablero = [ [0 for j in range(0, 8)] for i in range(0, 8)]
tablero[3][4] = tablero[4][3] = PlayerOneBlack
tablero[4][4] = tablero[3][3] = PlayerTwoWhite
a = ""
for k in range(8):
for j in range(8):
a+=str(tablero[k][j])+str(" ")
print (a)
a=""
return tablero
# Funcion cambiar jugador
def cambiarJugador(turn):
if turn == 0:
turn = 1
elif turn == 1:
turn = 0
return turn
def quedanFichas(tablero):
quedaFicha = True
for i in range(0, 8):
for j in range(0, 8):
if tablero[i][j] == 0:
break
else:
quedaFicha = False
return quedaFicha
# Funcion para definir si una jugada es valida o no
def esValida(tablero, columna, fila, turno):
valido = False
if fila > 8 or columna > 8:
pass
else:
if tablero[fila][columna] != 0:
pass
else:
if turno == 0:
tu_pieza = PlayerOneBlack
oponente = PlayerTwoWhite
elif turno == 1:
tu_pieza = PlayerTwoWhite
oponente = PlayerOneBlack
for x, y in [ [1, 1], [0, 1], [1, 0], [1, -1], [0, -1], [-1, -1], [-1, 0], [-1, 1] ]:
if valido == True:
break
columna_tablero, fila_tablero = columna, fila
columna_tablero = columna_tablero + x
fila_tablero = fila_tablero + y
if fila_tablero > 7 or columna_tablero > 7:
continue
if tablero[fila_tablero][columna_tablero] == oponente:
while tablero[fila_tablero][columna_tablero] == oponente:
columna_tablero = columna_tablero + x
fila_tablero = fila_tablero + y
if columna_tablero > 7 or fila_tablero > 7:
break
elif tablero[fila_tablero][columna_tablero] == tu_pieza:
valido = True
break
if fila_tablero > 7 or columna_tablero > 7:
continue
return valido
# Piezas verticalez que se consumen luego de la jugada
def consumoVertical(tablero, columna, fila, turno):
consumoVertical = []
consumoVerticalUp = []
consumoVerticalUpFinal = []
consumoVerticalDown = []
consumoVerticalDownFinal = []
if turno == 0:
tu_pieza = PlayerOneBlack
oponente = PlayerTwoWhite
elif turno == 1:
tu_pieza = PlayerTwoWhite
oponente = PlayerOneBlack
tablero_fila_up = fila + 1
if tablero_fila_up < 8:
if tablero[tablero_fila_up][columna] == oponente:
while tablero[tablero_fila_up][columna] == oponente:
consumoVerticalUp.append([tablero_fila_up, columna])
tablero_fila_up = tablero_fila_up + 1
if tablero_fila_up > 8:
continue
elif tablero[tablero_fila_up][columna] == tu_pieza:
consumoVerticalUpFinal = consumoVerticalUp
break
tablero_fila_down = fila - 1
if tablero[tablero_fila_down][columna] == oponente:
while tablero[tablero_fila_down][columna] == oponente:
consumoVerticalDown.append([tablero_fila_down, columna])
tablero_fila_down = tablero_fila_down - 1
if tablero_fila_down < 0:
continue
elif tablero[tablero_fila_down][columna] == tu_pieza:
consumoVerticalDownFinal = consumoVerticalDown
break
consumoVertical = consumoVerticalUpFinal + consumoVerticalDownFinal
return consumoVertical
# Piezas horizontales que se consumen luego de la jugada
def consumoHorizontal(tablero, columna, fila, turno):
consumoHorizontal = []
consumoHorizontalRight = []
consumoHorizontalRightFinal = []
consumoHorizontalLeft = []
consumoHorizontalLeftFinal = []
if turno == 0:
tu_pieza = PlayerOneBlack
oponente = PlayerTwoWhite
elif turno == 1:
tu_pieza = PlayerTwoWhite
oponente = PlayerOneBlack
tablero_columna_right = columna + 1
tablero_columna_left = columna - 1
if tablero[fila][tablero_columna_right] == oponente:
while tablero[fila][tablero_columna_right] == oponente:
consumoHorizontalRight.append([fila, tablero_columna_right])
tablero_columna_right = tablero_columna_right + 1
if tablero_columna_right > 8:
break
elif tablero[fila][tablero_columna_right] == tu_pieza:
consumoHorizontalRightFinal = consumoHorizontalRight
break
if tablero[fila][tablero_columna_left] == oponente:
while tablero[fila][tablero_columna_left] == oponente:
consumoHorizontalLeft.append([fila, tablero_columna_left])
tablero_columna_left = tablero_columna_left - 1
if tablero_columna_left > 8:
break
elif tablero[fila][tablero_columna_left] == tu_pieza:
consumoHorizontalLeftFinal = consumoHorizontalLeft
break
consumoHorizontal = consumoHorizontalRightFinal + consumoHorizontalLeftFinal
return consumoHorizontal
# Piezas diagonales que se consumen luego de la jugada
def consumoDiagonal(tablero, columna, fila, turno):
consumoDiagonal = []
consumoDiagonalRightUp = []
consumoDiagonalRightUpFinal = []
consumoDiagonalRightDown = []
consumoDiagonalRightDownFinal = []
consumoDiagonalLeftUp = []
consumoDiagonalLeftUpFinal = []
consumoDiagonalLeftDown = []
consumoDiagonalLeftDownFinal = []
if turno == 0:
tu_pieza = PlayerOneBlack
oponente = PlayerTwoWhite
elif turno == 1:
tu_pieza = PlayerTwoWhite
oponente = PlayerOneBlack
tablero_diagonal_up = fila + 1
tablero_diagonal_right = columna + 1
tablero_diagonal_left = columna - 1
tablero_diagonal_down = fila - 1
if tablero_diagonal_up < 8:
if tablero[tablero_diagonal_up][tablero_diagonal_right] == oponente:
while tablero[tablero_diagonal_up][tablero_diagonal_right] == oponente:
consumoDiagonalRightUp.append([tablero_diagonal_up, tablero_diagonal_right])
tablero_diagonal_up = tablero_diagonal_up + 1
tablero_diagonal_right = tablero_diagonal_right + 1
if tablero_diagonal_right > 8 or tablero_diagonal_up > 8:
break
elif tablero[tablero_diagonal_up][tablero_diagonal_right] == tu_pieza:
consumoDiagonalRightUpFinal = consumoDiagonalRightUp
break
if tablero[tablero_diagonal_up][tablero_diagonal_left] == oponente:
while tablero[tablero_diagonal_up][tablero_diagonal_left] == oponente:
consumoDiagonalLeftUp.append([tablero_diagonal_up, tablero_diagonal_left])
tablero_diagonal_up = tablero_diagonal_up + 1
tablero_diagonal_left = tablero_diagonal_left - 1
if tablero_diagonal_left < 0 or tablero_diagonal_up > 8:
break
elif tablero[tablero_diagonal_up][tablero_diagonal_right] == tu_pieza:
consumoDiagonalLeftUpFinal = consumoDiagonalLeftUp
break
if tablero_diagonal_right < 8:
if tablero[tablero_diagonal_down][tablero_diagonal_right] == oponente:
while tablero[tablero_diagonal_down][tablero_diagonal_right] == oponente:
consumoDiagonalRightDown.append([tablero_diagonal_down, tablero_diagonal_right])
tablero_diagonal_down = tablero_diagonal_down - 1
tablero_diagonal_right = tablero_diagonal_right + 1
if tablero_diagonal_right > 8 or tablero_diagonal_down < 0:
break
elif tablero[tablero_diagonal_down][tablero_diagonal_right] == tu_pieza:
consumoDiagonalRightDownFinal = consumoDiagonalRightDown
break
if tablero[tablero_diagonal_down][tablero_diagonal_left] == oponente:
while tablero[tablero_diagonal_down][tablero_diagonal_left] == oponente:
consumoDiagonalLeftDown.append([tablero_diagonal_down, tablero_diagonal_left])
tablero_diagonal_down = tablero_diagonal_down - 1
tablero_diagonal_left = tablero_diagonal_left - 1
if tablero_diagonal_left < 0 or tablero_diagonal_down < 0:
break
elif tablero[tablero_diagonal_down][tablero_diagonal_left] == tu_pieza:
consumoDiagonalLeftDownFinal = consumoDiagonalLeftDown
break
consumoDiagonal = consumoDiagonalLeftDownFinal + consumoDiagonalLeftUpFinal + consumoDiagonalRightDownFinal + consumoDiagonalRightUpFinal
print(consumoDiagonal)
return consumoDiagonal
# Piezas que se consumen en total luego de realizar la jugada
def consumo(tablero, columna, fila, turno):
consumoTotal = []
consumoTotal = consumoVertical(tablero, columna, fila, turno) + consumoHorizontal(tablero, columna, fila, turno) + consumoDiagonal(tablero, columna, fila, turno)
return consumoTotal
# Funcion para mostrar la jugada realizada
def reflejarJugada(tablero, columna, fila, turno):
if turno == 0:
jugada = 1
elif turno == 1:
jugada = 2
tablero[fila][columna] = jugada
jugadaReflejada = consumo(tablero, columna, fila, turno)
if len(jugadaReflejada) > 0:
for x, y in jugadaReflejada:
tablero[x][y] = jugada
else:
return False
a = ""
for k in range(8):
for j in range(8):
a+=str(tablero[k][j])+str(" ")
print (a)
a=""
return tablero
# Funcion para pedir la jugada a los jugadores
def obtenerJugada():
fila = int(input("Ingrese la fila en la que desea jugar: "))
columna = int(input("Ingrese la columna en la que desea jugar: "))
jugadaObtenida = [fila, columna]
return jugadaObtenida
turno = 0
tablero = crearTablero()
# Comienzo y bucle del juego
while True:
jugadaObtenida = obtenerJugada()
if esValida(tablero, jugadaObtenida[1], jugadaObtenida[0], turno) == True:
reflejarJugada(tablero, jugadaObtenida[1], jugadaObtenida[0], turno)
turno = cambiarJugador(turno)
elif esValida(tablero, jugadaObtenida[1], jugadaObtenida[0], turno) == False:
print("La jugada no es valida.")
| true
|
b59ba71084bc6c5af6827dec115a8fcd27a88595
|
Python
|
renll/rongba
|
/examples/experiments/loggers.py
|
UTF-8
| 3,436
| 2.53125
| 3
|
[
"Apache-2.0"
] |
permissive
|
import numpy as np
import pandas as pd
import pickle
from ngboost.evaluation import *
class RegressionLogger(object):
def __init__(self, args):
self.args = args
self.log = pd.DataFrame()
def tick(self, forecast, y_test):
pred, obs, slope, intercept = calibration_regression(forecast, y_test)
self.log = self.log.append([{
"r2": r2_score(y_test, forecast.loc),
"mse": mean_squared_error(y_test, forecast.loc),
"nll": -forecast.logpdf(y_test.flatten()).mean(),
"crps": forecast.crps(y_test.flatten()).mean(),
"slope": slope,
"calib": calculate_calib_error(pred, obs)
}])
def to_row(self):
return pd.DataFrame({
"dataset": [self.args.dataset],
"distn": [self.args.distn],
"score": [self.args.score],
"rmse": ["{:.2f} \pm {:.2f}".format(np.mean(np.sqrt(self.log["mse"])), np.std(np.sqrt(self.log["mse"])) / self.args.reps)],
"nll": ["{:.2f} \pm {:.2f}".format(np.mean(self.log["nll"]), np.std(self.log["nll"]) / self.args.reps)],
"crps": ["{:.2f} \pm {:.2f}".format(np.mean(self.log["crps"]), np.std(self.log["crps"]) / self.args.reps)],
"r2": ["{:.2f} \pm {:.2f}".format(np.mean(self.log["r2"]), np.std(self.log["r2"]) / self.args.reps)],
"calib": ["{:.2f} \pm {:.2f}".format(np.mean(self.log["calib"]), np.std(self.log["calib"]) / self.args.reps)],
"slope": ["{:.2f} \pm {:.2f}".format(np.mean(self.log["slope"]), np.std(self.log["slope"]) / self.args.reps)],
})
def save(self):
outfile = open("results/regression/logs_%s_%s_%s_%s.pkl" %
(self.args.dataset, self.args.score, self.args.natural,
self.args.distn), "wb")
pickle.dump(self, outfile)
class SurvivalLogger(object):
def __init__(self, args):
self.args = args
self.log = pd.DataFrame()
def tick(self, forecast, y_test):
C = 1-y_test['Event']
T = y_test['Time']
pred, obs, slope, intercept = calibration_time_to_event(forecast, T, C)
self.log = self.log.append([{
"cstat_naive": calculate_concordance_naive(forecast.loc, T, C),
"cstat_dead": calculate_concordance_dead_only(forecast.loc, T, C),
"cov": np.mean(np.sqrt(forecast.var) / forecast.loc),
"slope": slope,
"calib": calculate_calib_error(pred, obs)
}])
def to_row(self):
return pd.DataFrame({
"dataset": [self.args.dataset],
"score": [self.args.score],
"natural": [self.args.natural],
"cstat_naive": ["{:.2f} \pm {:.2f}".format(np.mean(self.log["cstat_naive"]), np.std(self.log["cstat_naive"]) / self.args.reps)],
"cstat_dead": ["{:.2f} \pm {:.2f}".format(np.mean(self.log["cstat_dead"]), np.std(self.log["cstat_dead"]) / self.args.reps)],
"calib": ["{:.2f} \pm {:.2f}".format(np.mean(self.log["calib"]), np.std(self.log["calib"]) / self.args.reps)],
"slope": ["{:.2f} \pm {:.2f}".format(np.mean(self.log["slope"]), np.std(self.log["slope"]) / self.args.reps)],
})
def save(self):
outfile = open("results/survival/logs_%s_%s_%s_%s.pkl" %
(self.args.dataset, self.args.score, self.args.natural,
self.args.distn), "wb")
pickle.dump(self, outfile)
| true
|
9771651a4aa5603f1db8e9f97a86cfce60aed4a2
|
Python
|
NotGeobor/Bad-Code
|
/Physics equations/vf vi a t/a = vf vi t.py
|
UTF-8
| 120
| 3.46875
| 3
|
[] |
no_license
|
vf = float(input("vf? "))
vi = float(input("vi? "))
t = float(input("t? "))
a = ((vf - vi) / t)
print()
print(a)
print()
| true
|
682ba4bda9c878651b53eed26dba5a2dd677177e
|
Python
|
wangye707/Test
|
/校招笔试/souhu.py
|
UTF-8
| 837
| 3.0625
| 3
|
[] |
no_license
|
#!D:/workplace/python
# -*- coding: utf-8 -*-
# @File : souhu.py
# @Author: WangYe
# @Date : 2019/8/29
# @Software: PyCharm
def go(num,n):
if num and len(str(n))>0:
m = 0
for i in range(n+1):
for k in str(i):
if num in k:
print(i)
m = m +1
return m
else:
return 0
if __name__ == '__main__':
x = input()
if x:
m = x.split(' ')
if len(m)>1:
print(go(m[0],int(m[1])))
else:
print(0)
else:
print(0)
# def go(n):
# if n==1:
# return 1
# if n==2:
# return 3
# if n==3:
# return 2
# if n == 4:
# return 3
# # if n ==5:
# # return
# if __name__ == '__main__':
# n = int(input())
# print(go(n))
| true
|
731840ae9a21e97e2befa24d3e3d40edbf179685
|
Python
|
dr1990/CSE-601-Data-Mining-Bioinformatics
|
/Project2/ClusteringAlgos/index.py
|
UTF-8
| 1,339
| 3.0625
| 3
|
[] |
no_license
|
import numpy as np
# Creates the incidence matrix indicating the similarity of cluster assignments to each datapoint
def get_incidence_matrix(ground_truth, cluster_group):
n = len(ground_truth)
incidence_matrix = np.zeros((n, n), dtype='int')
for k, v in cluster_group.items():
for i, row in enumerate(v):
for col in range(i, len(v)):
incidence_matrix[row - 1][v[col] - 1] = 1
incidence_matrix[v[col] - 1][row - 1] = 1
return incidence_matrix
# Creates a map of cluster numbers (key) to gene_ids (values)
# id - gene ids in the data set
# ground_truth - cluster numbers
def get_cluster_group(id, ground_truth):
cluster_group = dict()
for i in range(len(id)):
if ground_truth[i] in cluster_group.keys():
values = cluster_group[ground_truth[i]]
else:
values = list()
values.append(i)
cluster_group[ground_truth[i]] = values
return cluster_group
# Takes 2 maps (cluster number -> gene_ids) and computes a 2x2 matrix
# to indicate TP, FP, TN, FN
def get_categories(im_a, im_b):
categories = [[0, 0], [0, 0]]
(m, n) = np.shape(im_a)
for i in range(m):
for j in range(n):
x = im_a[i][j]
y = im_b[i][j]
categories[x][y] += 1
return categories
| true
|
81313238a49245fdb63ba719594ae5fd12f705f3
|
Python
|
georgeerol/LogAnalysis
|
/reportdb.py
|
UTF-8
| 3,089
| 3.265625
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
import psycopg2
# Question 1
popular_art_title = "What are the most popular three articles of all time?"
popular_art_query = \
(
"select articles.title, count(*) as views "
"from articles inner join log on log.path "
"like concat('%', articles.slug, '%') "
"where log.status like '%200%' group by "
"articles.title, log.path order by views desc limit 3")
# Question 2
popular_au_title = "Who are the most popular article authors of all time?"
popular_au_query = \
(
"select authors.name, count(*) as views from articles inner "
"join authors on articles.author = authors.id inner join log "
"on log.path like concat('%', articles.slug, '%') where "
"log.status like '%200%' group "
"by authors.name order by views desc")
# Question 3
error_day_title = "On which days did more than 1% of requests lead to errors"
error_day_query = \
(
"select day, pc from ("
"select day, round((sum(requests)/(select count(*) from log where "
"substring(cast(log.time as text), 0, 11) = day) * 100), 2) as "
"pc from (select substring(cast(log.time as text), 0, 11) as day, "
"count(*) as requests from log where status like '%404%' group by day)"
"as log_percentage group by day order by pc desc) as final_query "
"where pc >= 1")
def connect(db_name="news"):
"""
Connect to the Postgres database.
:param db_name: the name of the database
:return: the database connection
"""
try:
db = psycopg2.connect(database=db_name)
cursor = db.cursor()
return db, cursor
except:
print("Unable to connect to: {}".format(db_name))
def get_query(query):
"""
Return the given query information
:param query:
:return:
"""
db, cursor = connect()
cursor.execute(query)
data = cursor.fetchall()
db.close()
return data
def print_query_info(query):
"""
Print the query information
:param query:
"""
title = query[0]
print(title)
for index, results in enumerate(query[1]):
number = index + 1
first_result = results[0]
second_result = results[1]
print("{}) {}--{} views".format(number, first_result, second_result))
def print_error_day_info(error_day_query):
"""
Print the day and the percent of error/s
:param error_day_query:
:return:
"""
title = error_day_query[0]
print(title)
for results in error_day_query[1]:
first_result = results[0]
second_result = results[1]
print("{}--{}% errors".format(first_result, second_result))
if __name__ == '__main__':
print("\n")
popular_art_info = popular_art_title, get_query(popular_art_query)
print_query_info(popular_art_info)
print("\n")
popular_au_info = popular_au_title, get_query(popular_au_query)
print_query_info(popular_au_info)
print("\n")
error_day_info = error_day_title, get_query(error_day_query)
print_error_day_info(error_day_info)
| true
|
1f3b9ef27eb193b8433c5d7a6afe966f2190edd3
|
Python
|
msergeant/adventofcode
|
/2019/day10/main.py
|
UTF-8
| 2,372
| 3.28125
| 3
|
[] |
no_license
|
from math import atan2, pi
def main():
with open('./input') as file:
raw = file.read()
grid = [i for i in raw.strip().split("\n")]
width = len(grid[0])
height = len(grid)
max = 0
max_coords = (-1, -1)
for i in range(width):
for j in range(height):
if grid[j][i] == '#':
count = asteroids_in_site(width, height, i, j, grid)
if count > max:
max = count
max_coords = (i, j)
print(max_coords)
part1 = max
origin = (23, 19)
trajectories = sorted_trajectories(width, height, origin[0], origin[1], grid)
count = 0
traj_index = 0
while count < 200:
traj = trajectories[traj_index]
x, y = [origin[0] + traj[0], origin[1] + traj[1]]
while (x >= 0 and
y >= 0 and
x < width and
y < height):
if grid[y][x] == '#':
count += 1
grid[y] = grid[y][:x] + '.' + grid[y][x+1:]
last = (x,y)
break
x, y = [x + traj[0], y + traj[1]]
traj_index = (traj_index + 1) % len(trajectories)
traj = trajectories[traj_index]
print("200th", last)
part2 = last[0] * 100 + last[1]
return part1, part2
def angle(point):
ang = atan2(point[1], point[0])
ang = pi / 2 + ang
if ang < 0:
ang += 2 * pi
return ang
def sorted_trajectories(width, height, x, y, grid):
trajectories = set()
asteroids = 0
for i in range(width):
for j in range(height):
if grid[j][i] == '#' and not (i == x and j == y):
asteroids += 1
rise = j - y
run = i - x
if rise != 0 and run != 0:
denom = gcd(abs(rise), abs(run))
else:
denom = max(abs(rise), abs(run))
trajectories.add((run // denom, rise // denom))
all_trajectories = list(trajectories)
all_trajectories.sort(key=angle)
return all_trajectories
def asteroids_in_site(width, height, x, y, grid):
return len(sorted_trajectories(width, height, x, y, grid))
def gcd(a, b):
if a == 0:
return b
return gcd(b % a, a)
print("The answer to part 1 is %d\n"
"The answer to part 2 is %d" % main())
| true
|
925593027622e9ee4638300352690c936bb606cb
|
Python
|
HeyAnirudh/leetcode
|
/. Find Numbers with Even Number of Digits.py
|
UTF-8
| 229
| 3.1875
| 3
|
[] |
no_license
|
#lt=[12,12,333,1234,34545]
#print(len(str(lt[2])))
def findNumbers(lt):
count=0
for i in range(0,len(lt)):
if len(str(lt[i]))%2==0:
count+=1
return count
lt=[12,345,2,6,7896]
print(findNumbers(lt))
| true
|
ffb568c3876b6cb4d87b7f7a9a07889bcf7a8cae
|
Python
|
hyeonahkiki/home
|
/problem_soliving/sw_expert/5789. 현주의 상자바꾸기.py
|
UTF-8
| 484
| 2.953125
| 3
|
[] |
no_license
|
import sys
sys.stdin = open('input.txt', 'r')
T = int(input())
for tc in range(1, T+1):
# 상자개수, 반복횟수
N, Q = map(int, input().split())
box = [0] * (N + 1)
# 변화값
x = 0
for i in range(Q):
x += 1
L, R = map(int, input().split())
if x != Q+1:
for j in range(L, R+1):
box[j] = x
else:
break
ans = list(map(str, box[1:]))
print('#{} {}'.format(tc, ' '.join(ans)))
| true
|
995cbd9ff946b4779c7fbe6b88cc7d88f6bbd440
|
Python
|
johny65/acm
|
/Facebook Hacker Cup/2012/Qualification/c.py
|
UTF-8
| 565
| 2.953125
| 3
|
[] |
no_license
|
import sys
import math
t = int(input())
for f in range(t):
s = sys.stdin.readline()
h = a = c = k = e = r = u = p = 0
for i in s:
if i == 'H':
h += 1
elif i == 'A':
a += 1
elif i == 'C':
c += 1
elif i == 'E':
e += 1
elif i == 'R':
r += 1
elif i == 'K':
k += 1
elif i == 'U':
u += 1
elif i == 'P':
p += 1
l = (h, a, c, k, e, r, u, p)
m = min(l)
if c >= 2*m:
total = m
else:
c2 = c//2
if c2 <= m:
total = c2
else:
total = 0
print('Case #{}: {}'.format(f+1, total))
| true
|
bf20ff74eafa50b143f851ee79654c2fb272bc1e
|
Python
|
JiangHuYiXiao/Web-Autotest-Python
|
/第六章-pytest框架/pytest_参数化09/test_parametrize.py
|
UTF-8
| 962
| 3.265625
| 3
|
[] |
no_license
|
# -*- coding:utf-8 -*-
# @Author : 江湖一笑
# @Time : 2020/5/8 8:47
# @Software : Web-Autotest-Python
# @Python_verison : 3.7
'''
使用.parametrize装饰器可以实现测试用例的参数化。
pytest.mark.parametrize('参数',[value])
'''
import pytest
# 一个参数
list1 = ['寒山孤影','江湖故人','相逢何必曾相识']
@pytest.mark.parametrize('par1',list1)
def test_01(par1):
print('用例_',par1)
# 两个参数
list2 = [('寒山孤影',1),('江湖故人',2),('相逢何必曾相识',3)]
@pytest.mark.parametrize('par1,par2',list2)
def test_01(par1,par2):
print(par2,'用例_',par1)
# 通过参数化标记单个测试实例,为失败
@pytest.mark.parametrize("test_input,expected",
[("3+5", 8),("2+4", 6),
pytest.param("6 * 9", 42,
marks=pytest.mark.xfail)
,])
def test_eval(test_input, expected):
assert eval(test_input) == expected
if __name__ =='__main__':
pytest.main()
| true
|
69ff482fb58eb0a1b2f4b808dfa9dbcd5ca71f44
|
Python
|
williamejelias/Huffman-EncoderDecoder
|
/HuffmanDecode.py
|
UTF-8
| 1,424
| 3.21875
| 3
|
[] |
no_license
|
import pickle
import sys
import time
start = time.time()
infile = sys.argv[1]
filename = infile
try:
# read the file as bytes and unpickle
file = open(filename, "rb")
list_dictionary_message = pickle.load(file)
# extract dictionary
huffmanCodes = list_dictionary_message[0]
# extract the number of padded zeroes
lengthPaddedZeroes = list_dictionary_message[1]
# extract the encoded bitarray
bitarray = list_dictionary_message[2]
# remove the padded zeroes from the encoded bitarray
for i in range(lengthPaddedZeroes):
bitarray.pop()
# decode the bitarray into a list of characters
dec = bitarray.decode(huffmanCodes)
# join the characters into one string to complete the decoding
decoded_message = ''.join(dec)
# print("Decoded Message by decode method: ", decoded_message)
# write the decoded message to a new text file
newFilename = '' + filename[:-3] + '.txt'
newFile = open(newFilename, "w")
newFile.write(decoded_message)
# close the newly created file
newFile.close()
print("Number of symbols: ", len(huffmanCodes))
print("Time: ", (time.time() - start))
except FileNotFoundError:
print("No such file or directory...")
print("Exiting program.")
except pickle.UnpicklingError:
print("Error decompressing - file is likely corrupted or not in the correct format.")
print("Exiting program.")
| true
|
8b01c1f8e1edb35f9b72a1445ce0190eaf25b5a6
|
Python
|
phamtony/turtle-cross-game
|
/car_manager.py
|
UTF-8
| 820
| 3.375
| 3
|
[] |
no_license
|
from turtle import Turtle
import random
COLORS = ["red", "orange", "yellow", "green", "blue", "purple"]
STARTING_MOVE_DISTANCE = 5
MOVE_INCREMENT = 10
class CarManager(Turtle):
def __init__(self):
super().__init__()
self.shape("square")
self.color(random.choice(COLORS))
self.shapesize(stretch_wid=1, stretch_len=3)
self.penup()
self.x_cord = 0
self.goto(random.randint(-280, 280), random.randint(-260, 270))
def move(self, level_floor):
if level_floor == 1:
speed = STARTING_MOVE_DISTANCE
else:
speed = (level_floor - 1) * MOVE_INCREMENT
if self.xcor() <= -320:
self.goto(320, random.randint(-260, 270))
self.x_cord = self.xcor() - speed
self.goto(self.x_cord, self.ycor())
| true
|
589d65de2452c265c3c1b43f4d7d401a23cfecc1
|
Python
|
5l1v3r1/network-scanner-1
|
/networkscanner/PortScanner/PortScanExample.py
|
UTF-8
| 455
| 3
| 3
|
[] |
no_license
|
import PortScanner as ps
def main():
# Initialize a Scanner object that will scan top 50 commonly used ports.
scanner = ps.PortScanner(target_ports=50)
host_name = input('hostname:')
message = 'put whatever message you want here'
scanner.set_delay(15)
scanner.show_target_ports()
scanner.show_delay()
scanner.show_top_k_ports(100)
output = scanner.scan(host_name, message)
if __name__ == "__main__":
main()
| true
|