blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
9ddd00c32e39e75fc2dc86eab1a749ea4157c8f6
|
Python
|
hugoruscitti/pybox
|
/pybox/dialogs/open.py
|
UTF-8
| 2,130
| 2.734375
| 3
|
[] |
no_license
|
import cPickle
import gtk
class OpenDialog:
"""Abstract open dialog."""
def __init__(self, parent, canvas, status, pattern, name):
self.canvas = canvas
self.parent = parent
self.status = status
self._create_dialog(pattern, name)
self._run()
def _create_dialog(self, pattern, name):
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK)
action = gtk.FILE_CHOOSER_ACTION_OPEN
dialog = gtk.FileChooserDialog(action=action,
buttons=buttons,
parent=self.parent)
dialog.set_do_overwrite_confirmation(True)
# Filtro personalizado
filter = gtk.FileFilter()
label, mask = pattern
filter.set_name(label)
filter.add_pattern(mask)
dialog.add_filter(filter)
# Filtro (*)
filter = gtk.FileFilter()
filter.set_name("All Files")
filter.add_pattern("*")
dialog.add_filter(filter)
self.dialog = dialog
def _run(self):
response = self.dialog.run()
if response == gtk.RESPONSE_OK:
self._open(self.dialog.get_filename())
self.dialog.hide()
class Document(OpenDialog):
"Allow to save all diagram in a the program format."
def __init__(self, parent, canvas, status):
pattern = ("pybox Files", "*.pybox")
name = "untitled.pybox"
OpenDialog.__init__(self, parent, canvas, status, pattern, name)
def _open(self, filename):
file = open(filename, 'rb')
try:
dump = cPickle.load(file)
except cPickle.UnpicklingError:
self.status.error("Can't read: %s" %(filename))
return
self.canvas.open(filename)
for (x, y, model) in dump:
self.canvas.create_box(model, x, y, hierarchy_lines=False)
# Connect lines
for box in self.canvas.boxes:
self.canvas.connect_box(box, box.model)
self.status.info("File loaded as %s" %(filename))
self.canvas.session.open_document_notify(filename)
| true
|
588fa10a6ed9030eca0fdae37dc2148c30cacd6a
|
Python
|
fengges/leetcode
|
/101-150/132. 分割回文串 II.py
|
UTF-8
| 951
| 3.078125
| 3
|
[] |
no_license
|
class Solution:
def minCut(self, s):
dp=[[False for i in s] for j in s]
size=len(s)
for i in range(size):
dp[i][i]=True
for i in range(size-1):
if s[i+1]==s[i]:
dp[i][i+1]=True
for i in range(2,size+1):
for j in range(size-i):
dp[j][j+i]= dp[j+1][i+j-1] and s[i+j]==s[j]
flag=[0 for i in range(size)]
for i in range(1,size):
if dp[0][i]:
flag[i]=0
else:
flag[i]=flag[i-1]+1
for j in range(1,i+1):
if dp[j][i]:
flag[i]=min(flag[i],flag[j-1]+1)
return flag[-1]
s=Solution()
test=[
{"input":"ababababababababababababcbabababababababababababa", "output":1},
{"input":"aab", "output":1},
]
for t in test:
r=s.minCut(t['input'])
if r!=t['output']:
print("error:"+str(t)+" out:"+str(r))
| true
|
1aa4915635abfbaed83f4e09751e76a67e845c53
|
Python
|
smurugap/tools
|
/flow.py
|
UTF-8
| 2,459
| 2.765625
| 3
|
[] |
no_license
|
import os, subprocess, re, matplotlib, time, numpy
matplotlib.use('Agg')
import matplotlib.pyplot as plt
iter = 5
ind = numpy.arange(5)
width = 0.70 # the width of the bars
fig, ax = plt.subplots(figsize=(10,7))
ax.set_xticks(ind+width)
ax.set_ylabel('No of hash buckets')
ax.set_xlabel('Depth of the bucket')
ax.set_xticklabels( ('0', '1', '2', '3', '4') )
def autolabel(rects):
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d'%int(height),
ha='center', va='bottom')
# Get flow table size
pattern = 'Flow Table limit.*\s(\d+)'
output = subprocess.check_output('vrouter --info', shell=True, stderr=subprocess.STDOUT)
tablesize = re.search(pattern, output, re.M).group(1)
keys = int(tablesize)/4
okeys = 2048
def get_flow_counts():
os.system('fab -H ubuntu@169.254.0.3 -p ubuntu --no-pty -- sudo nohup /var/tmp/client.sh >> test.log &')
time.sleep(10)
pattern = '^\s*(\d+).*1.1.1.'
old_indices = 0
flow = dict()
oflow = dict()
while True:
# Get the indices
output = subprocess.check_output('flow -l', shell=True, stderr=subprocess.STDOUT)
indices = re.findall(pattern, output, re.M)
if len(indices) <= old_indices:
break
old_indices = len(indices)
time.sleep(30)
# Create dict
for i in range(keys):
flow[i] = 0
for i in range(okeys):
oflow[i] = 0
for index in indices:
key = int(index)/4
if key >= keys:
key = key - keys
oflow[key] += 1
else:
flow[key] += 1
return (flow.values(), oflow.values())
for i in range(iter):
(flows, oflows) = get_flow_counts()
values = (flows.count(0), flows.count(1), flows.count(2), flows.count(3), flows.count(4))
ovalues = (oflows.count(0), oflows.count(1), oflows.count(2), oflows.count(3), oflows.count(4))
print 'No of buckets with', '\n\t\t0 entry:', values[0], '\n\t\t1 entry:', values[1], '\n\t\t2 entry:', values[2], '\n\t\t3 entry:', values[3], '\n\t\t4 entry:', values[4]
print 'No of overflow buckets with', '\n\t\t0 entry:', ovalues[0], '\n\t\t1 entry:', ovalues[1], '\n\t\t2 entry:', ovalues[2], '\n\t\t3 entry:', ovalues[3], '\n\t\t4 entry:', ovalues[4]
rects = ax.bar(ind+width, values, width, alpha=0.5, align='center')
autolabel(rects)
time.sleep(240)
plt.savefig('/var/www/html/test')
| true
|
2a24d54cb74523befc9f166da6d039676a503794
|
Python
|
niklasadams/explainable_concept_drift_pm
|
/pm4py/algo/filtering/pandas/cases/case_filter.py
|
UTF-8
| 4,149
| 2.609375
| 3
|
[] |
no_license
|
import pandas as pd
from pm4py.util import constants, xes_constants
from enum import Enum
from pm4py.util import exec_utils
class Parameters(Enum):
TIMESTAMP_KEY = constants.PARAMETER_CONSTANT_TIMESTAMP_KEY
CASE_ID_KEY = constants.PARAMETER_CONSTANT_CASEID_KEY
def filter_on_ncases(df, case_id_glue=constants.CASE_CONCEPT_NAME, max_no_cases=1000):
"""
Filter a dataframe keeping only the specified maximum number of traces
Parameters
-----------
df
Dataframe
case_id_glue
Case ID column in the CSV
max_no_cases
Maximum number of traces to keep
Returns
------------
df
Filtered dataframe
"""
cases_values_dict = dict(df[case_id_glue].value_counts())
cases_to_keep = []
for case in cases_values_dict:
cases_to_keep.append(case)
cases_to_keep = cases_to_keep[0:min(len(cases_to_keep), max_no_cases)]
df = df[df[case_id_glue].isin(cases_to_keep)]
return df
def filter_on_case_size(df, case_id_glue="case:concept:name", min_case_size=2, max_case_size=None):
"""
Filter a dataframe keeping only traces with at least the specified number of events
Parameters
-----------
df
Dataframe
case_id_glue
Case ID column in the CSV
min_case_size
Minimum size of a case
max_case_size
Maximum case size
Returns
-----------
df
Filtered dataframe
"""
element_group_size = df[case_id_glue].groupby(df[case_id_glue]).transform('size')
df = df[element_group_size >= min_case_size]
if max_case_size:
element_group_size = df[case_id_glue].groupby(df[case_id_glue]).transform('size')
df = df[element_group_size <= max_case_size]
return df
def filter_on_case_performance(df, case_id_glue=constants.CASE_CONCEPT_NAME,
timestamp_key=xes_constants.DEFAULT_TIMESTAMP_KEY,
min_case_performance=0, max_case_performance=10000000000):
"""
Filter a dataframe on case performance
Parameters
-----------
df
Dataframe
case_id_glue
Case ID column in the CSV
timestamp_key
Timestamp column to use for the CSV
min_case_performance
Minimum case performance
max_case_performance
Maximum case performance
Returns
-----------
df
Filtered dataframe
"""
grouped_df = df[[case_id_glue, timestamp_key]].groupby(df[case_id_glue])
start_events = grouped_df.first()
end_events = grouped_df.last()
end_events.columns = [str(col) + '_2' for col in end_events.columns]
stacked_df = pd.concat([start_events, end_events], axis=1)
stacked_df['caseDuration'] = stacked_df[timestamp_key + "_2"] - stacked_df[timestamp_key]
stacked_df['caseDuration'] = stacked_df['caseDuration'].astype('timedelta64[s]')
stacked_df = stacked_df[stacked_df['caseDuration'] < max_case_performance]
stacked_df = stacked_df[stacked_df['caseDuration'] > min_case_performance]
i1 = df.set_index(case_id_glue).index
i2 = stacked_df.set_index(case_id_glue).index
return df[i1.isin(i2)]
def filter_case_performance(df, min_case_performance=0, max_case_performance=10000000000, parameters=None):
if parameters is None:
parameters = {}
timestamp_key = exec_utils.get_param_value(Parameters.TIMESTAMP_KEY, parameters,
xes_constants.DEFAULT_TIMESTAMP_KEY)
case_glue = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, constants.CASE_CONCEPT_NAME)
return filter_on_case_performance(df, min_case_performance=min_case_performance,
max_case_performance=max_case_performance, timestamp_key=timestamp_key,
case_id_glue=case_glue)
def apply(df, parameters=None):
del df
del parameters
raise NotImplementedError("apply method not available for case filter")
def apply_auto_filter(df, parameters=None):
del df
del parameters
raise Exception("apply_auto_filter method not available for case filter")
| true
|
d42089b5912b974f3c08638b9b276e81986a3f2a
|
Python
|
Sariel-D/SDomain
|
/dnsdb/rfile.py
|
UTF-8
| 938
| 2.734375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# encoding: utf-8
# Data From File
import platform
import os
def coupling_file_addr(file_addr):
current_os = platform.system().lower()
print '[!] 当前系统版本({0}), 尝试转换不规范路径.'.format(current_os)
if current_os == 'win':
file_addr = file_addr.replace('/', '\\')
elif current_os == 'linux' or os == 'unix':
file_addr = file_addr.replace('\\', '/')
else:
pass
return file_addr
def check_file_exist(file_addr):
if os.path.exists(file_addr):
return True
else:
return False
def data_integration(file_addr):
domains = {}
if not check_file_exist(file_addr):
return domains
with open(coupling_file_addr(file_addr)) as rfile:
for line in rfile:
line = eval(line)
if len(line):
domains[line['host']] = {line['type']: [line['value']]}
return domains
| true
|
78e2af45e6857829d99fc0c3c1962c5edaf5275f
|
Python
|
luka3117/toy
|
/py/山内 テキスト text sample code/PythonStatistical 山内/ch6/list6-7.py
|
UTF-8
| 415
| 2.828125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
# List 6-7 母分散の比の検定(母分散の等質性の検定)~f検定
import math
import numpy as np
from scipy.stats import f
m = 10
n = 10
xmean = 76.3
ymean = 70.5
xvar = 160.1
yvar = 59.6
F = xvar/yvar
f_lower = f.ppf(0.025, m-1, n-1)
f_upper = f.ppf(0.975, m-1, n-1)
print('F=', round(F, 4), 'reject=', (F<f_lower)or(f_upper<F))
# 出力結果は
# F= 2.6862 reject= False
| true
|
8e6efb1c7bec5df87d4ad89e434015163dd781fb
|
Python
|
PancrasL/chinamap-coloring
|
/coloring_algorithm.py
|
UTF-8
| 4,452
| 3.09375
| 3
|
[] |
no_license
|
from pprint import pprint
import copy
id_to_name = {'1': '北京',
'10': '黑龙江',
'11': '江苏',
'12': '浙江',
'13': '安徽',
'14': '福建',
'15': '江西',
'16': '山东',
'17': '河南',
'18': '湖北',
'19': '湖南',
'2': '天津',
'20': '广东',
'21': '甘肃',
'22': '四川',
'23': '贵州',
'24': '海南',
'25': '云南',
'26': '青海',
'27': '陕西',
'28': '广西',
'29': '西藏',
'3': '上海',
'30': '宁夏',
'31': '新疆',
'32': '内蒙古',
'33': '澳门',
'34': '香港',
'4': '重庆',
'5': '河北',
'6': '山西',
'7': '台湾',
'8': '辽宁',
'9': '吉林'}
# 图节点个数
N = 34
# 根据节点id获取省份名称
def get_province_name_by_id(id):
if id >= 1 and id <= N:
return id_to_name[str(id)]
return ""
# 加载地图数据,以邻接表形式保存地图信息
def load_graph(filename):
g = [0]*34
g.append(dict())
with open(filename, 'r') as f:
lines = f.read().splitlines()
for line in lines:
ids = line.split(',')
int_list = []
for id in ids:
int_list.append(int(id))
g[int_list[0]] = int_list[1:]
return g
# 将省份id记录转换为省份名称记录
def trans_record(id_record):
global N
name_record = []
for record in id_record:
cur_coloring_record = []
for i in range(1, N+1):
cur_coloring_record.append((get_province_name_by_id(i), record[i]))
name_record.append(cur_coloring_record)
return name_record
# 获取cur节点周围可用的颜色
def get_unused_color(cur, graph, node_color):
unused_color = set((1, 2, 3, 4))
for node in graph[cur]:
if (node_color[node] in unused_color):
unused_color.remove(node_color[node])
return list(unused_color)
# 查找下一个未被着色的节点
def find_next_node(node_color):
for node in range(1, N+1):
if(node_color[node] == 0):
return node
return -1
def coloring(cur, graph, node_color, all_coloring_record, true_coloring_record):
# 记录着色过程
all_coloring_record.append(copy.deepcopy(node_color))
# 获取可用的颜色
unused_color = get_unused_color(cur, graph, node_color)
# 如果存在可以使用的颜色
if(unused_color):
# 为节点 cur 上色
for color in unused_color:
node_color[cur] = color
next_node = find_next_node(node_color)
temp = copy.deepcopy(node_color)
if(next_node > 0):
result = coloring(next_node, graph, node_color, all_coloring_record, true_coloring_record)
# 当前着色方案可行
if(result):
true_coloring_record.append(temp)
return True
else:
continue
# 所有节点上色完毕
else:
true_coloring_record.append(temp)
return True
# 当前涂色方案不可行,清空颜色
node_color[cur] = 0
# 当前节点没有可用颜色或者当前涂色方案无解,返回False
return False
# node_id:表示着色开始的位置
# is_true:True:显示正确的着色路径
def start(node_id, is_true):
# 记录省份的颜色
# 0表示无色,1、2、3、4表示4种不同的颜色
all_coloring_record = []
true_coloring_record = []
china_map = load_graph('data/chinamap_adj_data.txt')
node_color = [0]*(N+1)
if(coloring(1, china_map, node_color, all_coloring_record, true_coloring_record)):
all_coloring_record.append(copy.deepcopy(node_color))
else:
print("无解")
return -1
result = []
if(is_true):
# 正确的着色序列
result = trans_record(true_coloring_record[::-1])
else:
# 所有的着色序列
result = trans_record(all_coloring_record)
return result
if __name__ == "__main__":
result = start(1, True)
| true
|
6b56e817be2421806a691237f5205c68288651f0
|
Python
|
elcerdo/projecteuler
|
/problem15.py
|
UTF-8
| 432
| 2.765625
| 3
|
[] |
no_license
|
import scipy as s
total=s.zeros((21,21),dtype=s.int0)
total[0,0]=1
def updatetotal(i,j):
nn=[]
if i>0: nn.append(total[i-1,j])
if j>0: nn.append(total[i,j-1])
total[i,j]=sum(nn)
for k in xrange(1,total.shape[0]):
for l in xrange(0,k+1):
updatetotal(k-l,l)
for k in xrange(-total.shape[0]+1,0):
for l in xrange(k,0):
updatetotal(total.shape[0]+k-l-1,total.shape[1]+l)
print total[-1,-1]
| true
|
fd59cd177546cee8feba33ba99d76615f7731f55
|
Python
|
kjchavez/pong-network
|
/Server/PongClient.py
|
UTF-8
| 12,059
| 2.84375
| 3
|
[] |
no_license
|
# Pong Client
import socket
from PongNetworkConstants import *
from GameEngine2D import *
USE_NETWORK = True
class PongWorld(World):
def __init__(self,surface):
World.__init__(self,"Pong",surface)
self.playerScore = 0
self.opponentScore = 0
self.playerSide = None
self.messageTimer = 0
def set_side(self,side):
self.playerSide = side
def award_point(self,side):
print "called award_point"
if side == self.playerSide:
print "point for player"
self.playerScore += 1
if self.playerScore >= WINNING_SCORE:
self.winner = True
self.end_game()
else:
self.reset()
else:
print "point for opponent"
self.opponentScore += 1
if self.opponentScore >= WINNING_SCORE:
self.winner = False
self.end_game()
else:
self.reset()
def reset(self):
ball = self.get_group("Ball")[0]
ball.move_to(self.size/2)
def display_message(self,message,time=1000,pause=True):
print message
def end_game(self):
if self.winner:
self.display_message("You Win!")
else:
self.display_message("You Lose!")
pygame.event.post(pygame.event.Event(QUIT,{}))
class MessageWriter(Entity):
def __init__(self,world,position=(200,200)):
self.world = world
self.type = "Message"
self.image = None
self.size = np.array(0,0) if not self.image else np.array(self.image.get_size())
self.position = np.array(position)
self.rect = pygame.Rect(self.position-self.size/2,self.size)
self.timer = 0.
self.message = ""
def process(self,dt):
if self.timer > 0:
self.timer -= dt
def erase(self):
pass
def render(self,dt):
pass
class Ball(GraphicEntity):
def __init__(self,world,position=(0,0),velocity=(200,200),size=30,color='red'):
self.world = world
self.position = np.array(position,float)
self.velocity = np.array(velocity ,float)
self.size = size
self.type = "Ball"
self.rect = pygame.Rect(self.position-(size/2,size/2),(size+1,size+1))
# Color can also be passed in as any string that pygame recognizes as a color
if isinstance(color,str):
color = pygame.Color(color)
# Create image
self.image = pygame.surface.Surface((size+1,size+1))
self.image.fill(pygame.Color('white'))
pygame.draw.circle(self.image,color,(size/2,size/2),size/2)
self.image.set_colorkey(self.image.get_at((0,0)))
# Other Internal variables
self.horizontalCollisionTimer = 0
self.verticalCollisionTimer = 0
def render(self,surface):
surface.blit(self.image,self.position-np.array(self.image.get_size())/2)
def erase(self):
self.world.surface.blit(self.world.background,self.rect,self.rect)
def process(self,dt):
if self.world.isPaused:
return
# Check collisions
self.has_hit_vertical()
sideHit = self.has_hit_horizontal()
if sideHit == RIGHT:
self.world.award_point(LEFT)
elif sideHit == LEFT:
self.world.award_point(RIGHT)
self.check_collisions()
self.position += self.velocity * dt
# Unless there's a real advantage to numpy, may just use Rects
self.rect.center = self.position
def check_collisions(self):
# Simple version
collisionPoints = [self.rect.midtop,self.rect.midright,self.rect.midbottom,self.rect.midleft]
for entity in self.world.get_group("Paddle"):
for point in collisionPoints:
if entity.collides_with(point):
if point == self.rect.midleft:
self.velocity[0] = abs(self.velocity[0])
elif point == self.rect.midright:
self.velocity[0] = -abs(self.velocity[0])
elif point == self.rect.midtop:
self.velocity[1] = -abs(self.velocity[1])
elif point == self.rect.midbottom:
self.velocity[1] = abs(self.velocity[1])
def collides_with(self,point):
if LA.norm(point-self.position) < self.size/2:
return True
return False
def has_hit_horizontal(self):
if (self.position[0] > self.world.get_width() - self.size/2):
self.velocity[0] = -abs(self.velocity[0])
return RIGHT
if (self.position[0] < self.size/2):
self.velocity[0] = abs(self.velocity[0])
return LEFT
return None
def has_hit_vertical(self):
if (self.position[1] > self.world.get_height() - self.size/2):
self.velocity[1] = -abs(self.velocity[1])
return BOTTOM
if (self.position[1] < self.size/2):
self.velocity[1] = abs(self.velocity[1])
return TOP
return None
class Paddle(GraphicEntity):
def __init__(self,world,position=(0,0),velocity=(0,0),controller=PLAYER,size=(30,100),color='black'):
self.world = world
self.position = np.array(position)
self.velocity = np.array(velocity)
self.size = np.array(size)
self.controller = controller
self.type = "Paddle"
self.rect = pygame.Rect(self.position-self.size/2,self.size)
# Color can also be passed in as any string that pygame recognizes as a color
if isinstance(color,str):
color = pygame.Color(color)
self.image = pygame.surface.Surface(self.size)
pygame.draw.rect(self.image,color,pygame.Rect((0,0),self.size))
def render(self,surface):
surface.blit(self.image,self.position-self.size/2)
def erase(self):
self.world.surface.blit(self.world.background,self.rect,self.rect)
def process(self,dt):
if self.world.isPaused:
return
if self.controller == PLAYER:
# Only the Y coordinate can move, according to the mouse position
self.move_to_y(pygame.mouse.get_pos()[1])
def move_to(self,pos):
self.position = np.array(pos)
self.rect.center = self.position
def move_to_y(self,y):
self.position[1] = y
self.rect.center = self.position
def get_pos(self):
return self.position
def check_collisions(self):
pass
def collides_with(self,point):
return self.rect.collidepoint(point)
class Score(GraphicEntity):
def __init__(self,world,position,owner,color='black'):
self.world = world
self.position = np.array(position)
self.owner = owner
self.currentScore = -1
self.color = color
self.type = "Score"
if isinstance(self.color,str):
self.color = pygame.Color(self.color)
self.font = pygame.font.Font(None,36)
self.image = self.font.render(str(self.currentScore),True,self.color)
self.size = np.array(self.image.get_size())
self.rect = pygame.Rect(self.position - self.size/2, self.size)
def render(self,surface):
surface.blit(self.image,self.rect)
def erase(self):
self.world.surface.blit(self.world.background,self.rect,self.rect)
def process(self,dt):
if self.owner == PLAYER:
newScore = self.world.playerScore
else:
newScore = self.world.opponentScore
if newScore != self.currentScore:
print "Change in score"
self.currentScore = newScore
self.update_image()
def update_image(self):
self.image = self.font.render(str(self.currentScore),True,self.color)
self.size = np.array(self.image.get_size())
self.rect = pygame.Rect(self.position - self.size/2,self.size)
class SocketEntity(Entity):
def __init__(self,world,host,port):
self.world = world
self.host = host
self.port = port
self.type = "Socket"
self.clientSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.clientSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.clientSocket.connect((host,port))
self.hasNotRecv = True
self.opponentConnected = False
def get_data(self):
self.hasNotRecv = False
string = self.clientSocket.recv(BUFFER_SIZE)
#print "Server says:",string
return string
def put_data(self,data):
self.clientSocket.send(str(data))
def process(self,dt):
if self.opponentConnected:
paddles = self.world.get_group("Paddle")
for paddle in paddles:
if paddle.controller == PLAYER:
self.put_data(paddle.get_pos()[1])
elif paddle.controller == OPPONENT:
message = self.get_data()
# Check if other player disconnected
if message == DISCONNECTED:
self.world.display_message("Opponent disconnected!")
self.opponentConnected = False
self.world.pause()
return
# Otherwise the message is a position
newY = int(message)
paddle.move_to_y(newY)
else:
# Ask the server if the other player is connected yet
print "Asking if opponent is connected..."
self.clientSocket.send(POLL)
response = self.clientSocket.recv(BUFFER_SIZE)
print "Server responded:",response
if response == YES:
self.opponentConnected = True
self.world.unpause()
def main():
pygame.init()
screen = pygame.display.set_mode((400,600))
screenWidth,screenHeight = screen.get_size()
screenDimensions = np.array(screen.get_size())
pygame.display.set_caption("Socket Pong")
screen.fill((255,255,255))
pygame.display.flip()
# Game Objects
world = PongWorld(screen)
if USE_NETWORK:
# Create SocketEntity first and receive client's player number
sock = SocketEntity(world,HOST,PORT)
world.add(sock)
playerNumber = sock.get_data()
print "This player designated as %s by the server." % playerNumber
controllers = (PLAYER,OPPONENT) if (playerNumber == PLAYER1) else (OPPONENT,PLAYER)
scores = controllers
world.set_side(LEFT if playerNumber == PLAYER1 else RIGHT)
else:
# For testing purposes, mouse will control both paddles
controllers = (PLAYER,PLAYER)
scores = (PLAYER,OPPONENT)
paddleLeft = Paddle(world,position=np.array([20,screenHeight/2]),controller=controllers[0])
paddleRight = Paddle(world,position=np.array([screenWidth-20,screenHeight/2]),controller=controllers[1])
ball = Ball(world,position=screenDimensions/2)
scoreLeft = Score(world,(screenWidth/4,50),scores[0])
scoreRight = Score(world,(3*screenWidth/4,50),scores[1])
world.add(paddleLeft)
world.add(scoreLeft)
world.add(paddleRight)
world.add(scoreRight)
world.add(ball)
if not USE_NETWORK:
world.unpause()
world.run_main_loop()
pygame.quit()
if __name__ == "__main__":
main()
| true
|
5e7ac2222ebb6cf2fcb267863f36d4dc16845475
|
Python
|
dagrewal/nfl-big-data-bowl-2021
|
/wrangling/eda.py
|
UTF-8
| 760
| 2.75
| 3
|
[] |
no_license
|
"""
NFL EDA of individual files using pyspark
"""
import pandas as pd
from pyspark.sql import SparkSession
from pyspark.context import SparkContext
from pyspark.sql.functions import *
from pyspark.sql.types import *
import utils
import sys
# initialise spark session
sc = SparkSession.builder.appName('nfl-eda')\
.master("local[15]").getOrCreate()
# read in games
games_df = sc.read.csv("../data/games.csv",
header=True,
inferSchema=True)
utils.get_raw_data_info(games_df)
# sort by date and time
games_df = games_df.sort("gameDate", "gameTimeEastern")
# rename some of the columns that end in "Abbr"
games_df = games_df.toDF(*(c.replace("Abbr", "") for c in games_df.columns))
print(games_df.columns)
| true
|
5db6a84b1882e96da0f66769cb24dd1023ee8bc2
|
Python
|
javiermontenegro/Python_Design.Patterns
|
/Behavioral/TemplatePattern.py
|
UTF-8
| 1,373
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#********************************************************************
# Filename: TemplatePattern.py
# Author: Javier Montenegro (https://javiermontenegro.github.io/)
# Copyright:
# Details: This code is the implementation of the template pattern.
#*********************************************************************
from abc import ABC, abstractmethod
class Template(ABC):
def __init__(self):
pass
@abstractmethod
def function1(self):
pass
@abstractmethod
def function2(self):
pass
def execute(self):
print("Run function1 and function2.")
self.function1()
self.function2()
class TemplateImplementation1(Template):
def function1(self):
print("TemplateImplementation1.function1() called.")
def function2(self):
print("TemplateImplementation1.function2() called.")
class TemplateImplementation2(Template):
def function1(self):
print("TemplateImplementation2.function1() called.")
def function2(self):
print("TemplateImplementation2.function2() called.")
if __name__ == "__main__":
template_implementation11 = TemplateImplementation1()
template_implementation11.execute()
print("-----------------------------------------------")
template_implementation12 = TemplateImplementation2()
template_implementation12.execute()
| true
|
f6d027205ecc66e4c3f4d9e823338fa75a7bc349
|
Python
|
GiladSchneider/laughing-pancake
|
/astar.py
|
UTF-8
| 787
| 3
| 3
|
[] |
no_license
|
from astar_algo import loop
#initializes the boards
def init(n,num):
board=[]
#print ("arrays5/arrays%s.txt" %(num))
file = open("arrays%a/arrays%s.txt" %(n,num),"r")
for i in range(n):
line = file.readline()
if ('S' in line):
xstart = i
ystart = line.index('S');
if ('T' in line):
xtar = i
ytar = line.index('T')
#print(list(line))
board.append(list(line[:-1]))
#printboard(board,n,num,folder)
return board,xstart,ystart,xtar,ytar,n,num
#Imports the boards from the txt files
def main():
for i in range(50):
#i=28
[board,xstart,ystart,xtar,ytar,n,num] = init(101,i)
loop(board,xstart,ystart,xtar,ytar,n,num)
main()
| true
|
8e292665e52f488fdafd2e658ef60e5a3784beb5
|
Python
|
pedrobf777/MarchMadness
|
/src/utils.py
|
UTF-8
| 1,957
| 2.59375
| 3
|
[] |
no_license
|
import pandas as pd
from numpy import nan
from sklearn.preprocessing import LabelEncoder
def load_target_sample():
target = pd.read_csv('data/SampleSubmissionStage2.csv').set_index('ID')\
.drop('Pred', axis=1)
target['Season'] = target.index.map(lambda i: i[:4])
target['team_a'] = target.index.map(lambda i: i[5:9])
target['team_b'] = target.index.map(lambda i: i[10:14])
target['in_target'] = True
target['game_set'] = 'ncaa'
return target
def load_data_template(season=False):
tourney_games = pd.read_csv('data/NCAATourneyCompactResults.csv')
tourney_games['game_set'] = 'ncaa'
data = [tourney_games]
if season:
season_games = pd.read_csv('data/RegularSeasonCompactResults_Prelim2018.csv')
season_games['game_set'] = 'season'
data.append(season_games)
data = pd.concat(data).astype({
'Season': str, 'WTeamID': str,
'LTeamID': str, 'DayNum': int
})
data['team_a'] = data[['WTeamID', 'LTeamID']]\
.apply(lambda t: t[0] if int(t[0]) < int(t[1]) else t[1], axis=1)
data['team_b'] = data[['WTeamID', 'LTeamID']]\
.apply(lambda t: t[0] if int(t[0]) > int(t[1]) else t[1], axis=1)
data['a_win'] = data['WTeamID'] == data['team_a']
data = data[['Season', 'team_a', 'team_b', 'a_win', 'DayNum', 'game_set']]
target = load_target_sample()
target_index = target.index.tolist()
data['in_target'] = data[['Season', 'team_a', 'team_b']]\
.apply(lambda r: '_'.join(r.values) in target_index, axis=1)
data = pd.merge(target, data,
on=['Season', 'team_a', 'team_b', 'game_set', 'in_target'],
how='outer')
data['DayNum'].fillna(366, inplace=True)
data = data.astype({
'Season': int, 'DayNum': int,
'team_a': str, 'team_b': str,
'in_target': bool
})
data['game_set'] = LabelEncoder().fit_transform(data['game_set'])
return data
| true
|
3c0ae787aea711aa725d092f17288a6140f24812
|
Python
|
ZombieSave/Python
|
/Урок1.5/Задача2.py
|
UTF-8
| 587
| 4.03125
| 4
|
[] |
no_license
|
# 2. Создать текстовый файл (не программно), сохранить в нем несколько строк, выполнить подсчет количества строк,
# количества слов в каждой строке.
try:
with open("Задача1.dat", "r", encoding="utf-8") as f:
data = [i for i in f]
except IOError:
print("Ошибка при чтении файла")
print(f"Строк в файле: {len(data)}\n")
for i, item in enumerate(data):
print(f"Строка {i+1} слов {len(item.split())}")
| true
|
ea972db451a8b60430d4860a88ab5061a36a2022
|
Python
|
uk-gov-mirror/ministryofjustice.opg-repository-reporting
|
/output/ouput.py
|
UTF-8
| 720
| 3.25
| 3
|
[] |
no_license
|
# output class
class outputer:
data_store = {}
conversion_function = None
save_function = None
def __init__(self, conversion_function, save_function):
self.conversion_function = conversion_function
self.save_function = save_function
return
def append(self, key, values):
self.data_store[key] = values
return
def output(self):
if len(self.data_store) > 0:
print('>>>>> Saving dataframe')
convert_func = self.conversion_function
processed = convert_func(self.data_store)
save = self.save_function
save(processed)
else:
print('>>>>> Nothing to save')
return
| true
|
bed310213d50d83d0bdf9941eb41e14c9b527cc0
|
Python
|
UCRoboticsLab/gourmetBot
|
/light_show.py
|
UTF-8
| 850
| 2.59375
| 3
|
[] |
no_license
|
#!/usr/bin/python2
import argparse
import sys
import rospy
import baxter_interface
def off_lights(navs):
for nav in navs:
nav.inner_led = False
nav.outer_led = False
def main():
rospy.init_node('light_show')
navs = (
baxter_interface.Navigator('left'),
baxter_interface.Navigator('right'),
baxter_interface.Navigator('torso_left'),
baxter_interface.Navigator('torso_right'),)
rate = rospy.Rate(10)
i = 0
for nav in navs:
nav.inner_led = True
nav.outer_led = False
while not rospy.is_shutdown() and i < 10:
for nav in navs:
nav.inner_led = not nav.inner_led
nav.outer_led = not nav.outer_led
rate.sleep()
i += 1
off_lights(navs)
print("Running light show...")
main()
print("Program executed.")
| true
|
e2c109b90721a4a3581a5c0c38f57f99f934fec5
|
Python
|
sftmsj/python-stepik
|
/loops.py
|
UTF-8
| 938
| 3.3125
| 3
|
[] |
no_license
|
# a = 5
# while a <= 55:
# print(a, end=' ')
# a += 2
# c = 1
# while c < 7:
# print('*' * c)
# c += 1
# i = 0
# while i < 5:
# print('*')
# if i % 2 == 0:
# print('**')
# if i > 2:
# print('***')
# i = i + 1
# a = int(input())
# s = 0
# while a != 0:
# s += a
# a = int(input())
# print(s)
# a = int(input())
# b = int(input())
# n = 1
# while n <= a * b:
# if (n % a == 0) and (n % b == 0):
# break
# else:
# n += 1
# print(n)
# a = int(input())
# b = int(input())
# d=a
# while d%b!=0:
# d+=a
# print(d)
# i = 0
# s = 0
# while i < 10:
# i = i + 1
# s = s + i
# if s > 15:
# continue
# i = i + 1
# print(i)
a = int(input())
while a < 100:
if a > 10:
print('output', a)
else:
a = int(input())
continue
a = int(input())
| true
|
bd40efa5bcb84bf7581e2f4122a4c7ab29292a95
|
Python
|
danihooven/Principles-of-Programming
|
/Homework07/homework07_02.py
|
UTF-8
| 1,332
| 3.984375
| 4
|
[] |
no_license
|
""" -------------------------------------------------------------------------------------------------------------------
ITEC 136: Homework 07_02
Modify your first program to print count of the words instead of percentage of the letters.
In this exercise you will get your input from a file.
Case should be ignored.
Write functions to analyze the words in file.
@author: Dani Hooven
@version: 10/20/2020
-------------------------------------------------------------------------------------------------------------------- """
f = open('data.txt', 'r')
count = {}
for line in f:
for word in line.split():
# remove punctuation
word = word.replace('_', '').replace('"', '').replace(',', '').replace('.', '')
word = word.replace('-', '').replace('?', '').replace('!', '').replace("'", "")
word = word.replace('(', '').replace(')', '').replace(':', '').replace('[', '')
word = word.replace(']', '').replace(';', '')
# ignore case
word = word.lower()
# ignore number
if word.isalpha():
if word in count:
count[word] = count[word] + 1
else:
count[word] = 1
keys = count.keys()
# keys.sort()
for word in sorted(keys):
print(word, " ", str(count[word]))
| true
|
1ddc78d43bdcfab2d5215ce793580551117c1671
|
Python
|
anhthuanprobg/ngotuantu-Fundamentals-c4e13
|
/Session01/homework/hello_world.py/area_of_circle.py
|
UTF-8
| 53
| 3.53125
| 4
|
[] |
no_license
|
r= int(input("Radius"))
s= 3.14*r^2
print("Area =",s)
| true
|
01f393a49e410da7cd29f4168e5f44abbe4450a9
|
Python
|
spanceac/raspboil
|
/mail.py
|
UTF-8
| 3,558
| 2.65625
| 3
|
[] |
no_license
|
import imaplib
import email
from datetime import datetime
import time
date = ""
action = ""
date_unsplit = ""
have_mail = 0 #variable used to avoid checking empty time structures in check_date at first run if no new mail available
def check_mail(none):
global date, action, have_mail, date_unsplit
result, maildata = mail.search(None, '(Unseen FROM "your_emai_address")')
ids = maildata[0] # data is a list.
id_list = ids.split() # ids is a space separated string
################ TO DO Check if id_list is not empty ;)
if not id_list: #if is empty
return
else:
have_mail = 1
latest_email_id = id_list[-1] # get the latest
result, maildata = mail.fetch(latest_email_id, "(RFC822)") # fetch the email body (RFC822) for the given ID
msg = email.message_from_string(maildata[0][1])
for part in msg.walk():
# each part is a either non-multipart, or another multipart message
# that contains further parts... Message is organized like a tree
if part.get_content_type() == 'text/plain':
continut = part.get_payload() # prints the raw text
stop_content = continut.strip()
if (stop_content == 'stop'): #print the stop
print "Stop mail received"
f = open('/dev/shm/state', 'w')
f.write("stop") #notify start
f.close()
return 1
continut_new = continut.split(" ")
action = continut_new[0]
date = continut_new[1].split("\n")
date = date[0]
date_unsplit = date
date = date.split("/")
return
def check_date(nada):
global date, action
day = int(date[0])
month = int(date[1])
hours = int(date[2])
minutes = int(date[3])
if (day > 0 and day <= 31) and (month > 0 and month <=12) and (hours >=0 and hours <= 23) and (minutes >=0 and minutes <= 59):
print "date is valid"
#let's see if the date is in the present or future
now = datetime.now()
if (month == now.month) and (day == now.day) and (hours == now.hour) and (minutes == now.minute):
print "The date is now"
f = open('/dev/shm/state', 'w')
if (action == "start"):
f.write("start now") #notify start
else:
print "Action is invalid"
f.close()
return
else: #if the date is not now, let's check if it's in the future or the past
if (month > now.month):
print "The date is in the future"
print_date_action(0)
return
elif (month == now.month):
if (day > now.day):
print "The date is in the future"
print_date_action(0)
return
elif (day == now.day):
if (hours > now.hour):
print "The date is in the future"
print_date_action(0)
return
elif (hours == now.hour):
if(minutes > now.minute):
print "The date is in the future"
print_date_action(0)
return
else:
print "The date is in the past"
return
else:
print "The date is in the past"
return
else:
print "The date is in the past"
return
else:
print "The date is in the past"
return
else:
print "date is invalid"
return
def print_date_action(nada):
global date_unsplit
print date_unsplit
f = open('/dev/shm/state', 'w')
a = 'start' + ' ' + date_unsplit + '\n'
f.write(a) #notify start
f.close()
mail = imaplib.IMAP4_SSL('imap.gmail.com')
mail.login('boilers_gmail_username', 'boilers_gmail_password')
mail.list()
# Out: list of "folders" aka labels in gmail.
mail.select("inbox") # connect to inbox.
stop = check_mail(0)
if have_mail == 1 and stop != 1:
check_date(0)
| true
|
3cb0abc40a21d61a3fcc81472345fc812962bc9b
|
Python
|
daniela2001-png/MIN_TIC_UNAL
|
/FUNCIONES/area.py
|
UTF-8
| 227
| 3.515625
| 4
|
[] |
no_license
|
#!/usr/bin/python3
from math import pi
def rectangulo(b, h):
return b * h
def circulo(r):
return pi * r ** 2
def total_area():
total = rectangulo(5, 10) + circulo(3) * 2
return total
print(total_area())
| true
|
183f19223366f5c96d26c7c16b61708e13e7e719
|
Python
|
Crossnonsense/Final-project-RTSoft
|
/main.py
|
UTF-8
| 4,698
| 2.84375
| 3
|
[] |
no_license
|
import cv2
import numpy as np
def canny_filter(frame):
r_channel = frame[:, :, 2]
binary = np.zeros_like(r_channel)
binary[r_channel > 220] = 255
s_channel = frame[:, :, 2]
binary2 = np.zeros_like(s_channel)
binary2[s_channel > 211] = 1
allBinary = np.zeros_like(binary)
allBinary[((binary == 1) | (binary2 == 1))] = 255
blurred_frame = cv2.GaussianBlur(allBinary, (5, 5), 0)
return cv2.Canny(blurred_frame, 50, 150)
def check_lines(lines):
if lines[0][0] <= -50:
return False, "Keep left"
elif lines[1][0] >= 690:
return False, "Keep right"
else:
return True, "Ok"
def region_of_interest(image):
polygons = np.array([(0, 320),
(550, 320),
(400, 155),
(200, 155)])
mask = np.zeros_like(image)
cv2.fillPoly(mask, np.array([polygons], dtype=np.int64), 1024)
masked_image = cv2.bitwise_and(image, mask)
return masked_image
def make_coordinates(image, line_parameters):
slope, intercept = line_parameters
y1 = image.shape[0]
y2 = int(y1 * (5/10))
x1 = int((y1 - intercept) / slope)
x2 = int((y2 - intercept) / slope)
return np.array([x1, y1, x2, y2])
right_prev = []
left_prev = []
def calculate_lines(image, lines):
left_fit = []
right_fit = []
global left_prev
global right_prev
while lines is not None:
for line in lines:
x1, y1, x2, y2 = line.reshape(4)
parameters = np.polyfit((x1, x2), (y1, y2), 1)
slope = parameters[0]
intercept = parameters[1]
if slope < 0:
left_fit.append((slope, intercept))
else:
right_fit.append((slope, intercept))
if left_fit:
left_fit_average = np.average(left_fit, axis=0)
left_line = make_coordinates(image, left_fit_average)
left_prev = left_line.copy()
if right_fit:
right_fit_average = np.average(right_fit, axis=0)
right_line = make_coordinates(image, right_fit_average)
right_prev = right_line.copy()
if (not left_fit) and (left_prev) and (right_prev):
for i in range(len(right_line)):
left_line[i] = left_prev[i] - (right_line[i] - right_prev[i])
left_prev = left_line.copy()
if (not right_fit) and (left_prev) and (right_prev):
for i in range(len(right_line)):
right_line[i] = right_prev[i] - (left_line[i] - left_prev[i])
right_prev = right_line.copy()
return np.array([left_line, right_line])
def display_lines(image, lines, ok, msg):
lined_image = np.zeros_like(image)
if lines is not None:
i = 1
for x1, y1, x2, y2 in lines:
if i == 1:
cv2.line(lined_image, (x1, y1), (x2, y2), (0, 255, 0), 8)
i+=1
p1 = [x1,y1,x2,y2]
else:
cv2.line(lined_image, (x1, y1), (x2, y2), (0, 255, 0), 8)
pts = np.array([[[ p1[0], p1[1]], [p1[2], p1[3]], [x2,y2],[x1,y1]]], dtype=np.int32)
if ok:
cv2.fillPoly(lined_image, pts, (202, 255, 192), lineType=8, shift=0, offset=None)
cv2.putText(lined_image, msg, (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
else:
cv2.fillPoly(lined_image, pts, (100, 100, 255), lineType=8, shift=0, offset=None)
cv2.putText(lined_image, msg, (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
return lined_image
def process_frame(frame):
copied_frame = frame.copy()
canny = canny_filter(frame)
roi = region_of_interest(canny)
lines = cv2.HoughLinesP(roi, 1, np.pi / 180, 20, np.array([()]), minLineLength=10, maxLineGap=5)
a_lines = calculate_lines(frame, lines)
ok, msg = check_lines(a_lines)
lined_image = display_lines(copied_frame, a_lines, ok, msg)
combined_frame = cv2.addWeighted(copied_frame, 0.9, lined_image, 0.5, 1)
return combined_frame
video = cv2.VideoCapture('msu1.mp4')
if not video.isOpened():
print("Error")
while video.isOpened():
video.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
video.set(cv2.CAP_PROP_FRAME_HEIGHT, 360)
success, frame = video.read()
if not success:
break
frame = cv2.resize(frame, (640, 360))
try:
result = process_frame(frame)
except:
pass
cv2.imshow('frame', result)
if cv2.waitKey(10) & 0xFF == ord('q'):
video.release()
cv2.destroyAllWindows()
video.release()
cv2.destroyAllWindows()
| true
|
859bb6e822e3da7f83c604c03bfd36ce71235756
|
Python
|
ot-vinta/PentaMail
|
/scripts/server/python/Normalizer.py
|
UTF-8
| 3,453
| 3.03125
| 3
|
[] |
no_license
|
from nltk.stem import LancasterStemmer
import Translator
def separate_char(s, c):
d = len(s)
j = 1
while j < d - 1:
if (s[j] == c) and (s[j - 1] != ' '):
s = s[0: j] + ' ' + s[j: len(s)]
d += 1
if (s[j] == c) and (s[j + 1] != ' '):
s = s[0: j + 1] + ' ' + s[j + 1: len(s)]
d += 1
j += 1
return s
def delete_special_chars(s):
s = separate_char(s, '{')
s = separate_char(s, '}')
s = separate_char(s, '[')
s = separate_char(s, ']')
s = separate_char(s, '.')
s = separate_char(s, ',')
s = separate_char(s, '?')
s = separate_char(s, '\'')
s = separate_char(s, '\"')
s = separate_char(s, ':')
s = separate_char(s, ';')
s = separate_char(s, '|')
s = separate_char(s, '_')
s = separate_char(s, '-')
s = separate_char(s, '+')
s = separate_char(s, '=')
s = separate_char(s, '!')
s = separate_char(s, '@')
s = separate_char(s, '#')
s = separate_char(s, '$')
s = separate_char(s, '№')
s = separate_char(s, '%')
s = separate_char(s, '^')
s = separate_char(s, '&')
s = separate_char(s, '<')
s = separate_char(s, '>')
s = separate_char(s, '*')
s = separate_char(s, '\\')
s = separate_char(s, '/')
s = separate_char(s, '£')
s = separate_char(s, '')
s = separate_char(s, 'ú')
s = separate_char(s, '‘')
s = separate_char(s, '(')
s = separate_char(s, ')')
s = s.replace('{', '')
s = s.replace('}', '')
s = s.replace(':)', '}')
s = s.replace(':(', '{')
s = s.replace('[', '')
s = s.replace(']', '')
s = s.replace('.', '')
s = s.replace(',', '')
s = s.replace('?', '')
s = s.replace('\'', '')
s = s.replace('\"', '')
s = s.replace(':', '')
s = s.replace('|', '')
s = s.replace('_', '')
s = s.replace(';', '')
s = s.replace('-', '')
s = s.replace('+', '')
s = s.replace('=', '')
s = s.replace('!', '')
s = s.replace('@', '')
s = s.replace('#', '')
s = s.replace('№', '')
s = s.replace('$', '')
s = s.replace('%', '')
s = s.replace('^', '')
s = s.replace('<', '')
s = s.replace('>', '')
s = s.replace('*', '')
s = s.replace('/', '')
s = s.replace('\\', '')
s = s.replace('£', '')
s = s.replace('', '')
s = s.replace('ú', '')
s = s.replace('‘', '')
s = s.replace('&', '')
s = s.replace(')', ' ')
s = s.replace('(', ' ')
s = separate_char(s, '{')
s = separate_char(s, '}')
return s
def replace_digits(s):
start_num = -1
digits = '0123456789'
j = 0
for c in s:
if (digits.find(c) != -1) and (start_num == -1):
start_num = j
elif (digits.find(c) == -1) and (start_num != -1):
temp = s[start_num: j]
j -= len(temp) - 1
s = s.replace(temp, '!', 1)
start_num = -1
j += 1
if start_num != -1:
temp = s[start_num: len(s[i]) - 1]
s = s.replace(temp, '!', 1)
s = separate_char(s, '!')
return s
def lemmatize(s):
lancaster = LancasterStemmer()
words = s.split()
for i in range(0, len(words)):
words[i] = lancaster.stem(words[i])
s = ' '.join(words)
return s
def normalize_data(s):
s = .translate_string(s)
s = s.lower()
s = delete_special_chars(s)
s = replace_digits(s)
s = lemmatize(s)
return s
| true
|
0afee82bc951a4c423c01b60a164725a277ebd34
|
Python
|
JuliaZxr/LagouPractice_Yuki
|
/appium_test/test_touchActionDemo.py
|
UTF-8
| 1,334
| 2.75
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/8/29 12:32
# @Author : Yuki
"""
https://github.com/appium/appium/blob/master/docs/en/writing-running-appium/touch-actions.md
TouchAction
TouchAction对象包含一系列事件。
在所有appium客户端库中,都会创建触摸对象并为其分配一系列事件。
规范中可用的事件是:
press
release
moveTo
tap
wait
longPress
cancel
perform
"""
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
class TestTouchAction:
def setup(self):
desire_cap = {
"platformName": "android",
"deviceName": "127.0.0.1:7555",
"appPackage": "com.xueqiu.android",
"appActivity": ".view.WelcomeActivityAlias",
"noReset": "true"
}
self.driver = webdriver.Remote("http://127.0.0.1:4723/wd/hub", desire_cap)
self.driver.implicitly_wait(5)
def teardown(self):
self.driver.quit()
def test_touchAction(self):
# 定义一个TouchAction类
action = TouchAction(self.driver)
# 打印界面的宽和高
# print(self.driver.get_window_rect())
# press按住,move_to移动到,release释放,perform执行
action.press(x=360, y=1150).move_to(x=360, y=400).release().perform()
| true
|
89aef2771bc3507278f56c56fdfaca8672fea77a
|
Python
|
Zander-M/ICS-Exercise
|
/Midterm/stack_union_student.py
|
UTF-8
| 1,290
| 3.671875
| 4
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 20 15:58:27 2017
@author: Mebius
Joyce edited it on Mar 12 2018
"""
import stack_student
def stack_union(lst1, lst2):
"""find the union of two *sorted* lists, with no duplicate in them
Arguments: two sorted lists, each in ascending order
Return: one list, also in *ascending* order
Example: stack_union([0, 1], [0, 2, 4]) returns [0, 1, 2, 4]
"""
s1 = stack_student.Stack()
s1.push_list(lst1)
s2 = stack_student.Stack()
s2.push_list(lst2)
union = []
#-------------your code below-----------------#
while s1.size() != 0 and s2.size() != 0:
if s1.peek() == s2.peek():
a = s1.pop()
s2.pop()
else:
a = s1.pop() if s1.peek() > s2.peek() else s2.pop()
if len(union) == 0 or a != union[-1]:
union.append(a)
union.sort()
#------------end of your code-----------------#
return s1.items + union if s1.size != 0 else s2.items + union
if __name__ == "__main__":
import random
random.seed(0)
l1 = [random.randint(1, 20) for i in range(5)]
l2 = [random.randint(1, 20) for i in range(5)]
l1.sort(); l2.sort()
print("l1: ", l1)
print("l2: ", l2)
r = stack_union(l1, l2)
print(r)
| true
|
dd63bb71b24d7fd3fd139603f9e10f003b56cbea
|
Python
|
CalvinMakelky/datasciencecoursera
|
/clientHW6.py
|
UTF-8
| 299
| 2.9375
| 3
|
[] |
no_license
|
import socket
host = socket.gethostname()
port = 21000
print 'Connecting to ', host, port
while True:
s = socket.socket()
s.connect((host, port))
msg = raw_input('CLIENT >> ')
s.send(msg)
msg = s.recv(1024)
print 'SERVER >> ', msg
s.close()
| true
|
fd6324baad96b1780839732d2c46830db490873b
|
Python
|
KatrinaHoffert/stroke-radius-segmentation
|
/run_dilation.py
|
UTF-8
| 2,561
| 3.125
| 3
|
[
"BSD-3-Clause",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
'''
Performs dilation (utilizing the dilate module) on all stroke/point images.
'''
import os, re, sys, enum
from dilate import no_error_dilate
from common import Study, dilation_radii
def dilate(study, strokes_loc, ground_truth_loc, output_loc, radius_range):
'''
Dilates all files.
study: The Study used, which determines naming scheme.
strokes_loc: Folder location for where the stroke files are.
ground_truth_loc: Folder location for where the ground truth files are.
output_loc: Folder location to save the dilated files in.
radius_range: A generator yielding the radiuses to dilate each image for.
'''
i = 0
files = os.listdir(strokes_loc)
for file in files:
# Extract data from file name
if study == Study.Rau:
file_re = re.search('(\d+)-(\d+)', file)
else:
file_re = re.search('(\d+)-(\d+)-(\d+)', file)
if file_re == None: continue
participant_id = file_re.group(1)
file_id = file_re.group(2)
if study == Study.Yuanxia: time_pressure = file_re.group(3)
# Run the dilation for all files with all dilation radius values
for radius in radius_range:
i += 1
print('\rProcessing file', i, 'of', len(files) * len(radius_range), end='')
sys.stdout.flush()
background_label = 149
foreground_label = 29
input_image = strokes_loc + '/' + file
ground_truth_image = ground_truth_loc + '/' + file_id + '-GT.png'
if study == Study.Rau:
output_image = output_loc + '/' + participant_id + '-' + file_id + '-' + str(radius) + '-dilate.png'
else:
output_image = output_loc + '/' + participant_id + '-' + file_id + '-' + time_pressure + '-' + str(radius) + '-dilate.png'
# Skip files that have already been dilated
if os.path.exists(output_image): continue
no_error_dilate(input_image, ground_truth_image, output_image, radius, foreground_label, background_label)
print('Processing Rau\'s strokes')
dilate(Study.Rau, './rau/strokes', './rau/ground_truth', './rau/dilated_strokes', dilation_radii)
print('\nProcessing Rau\'s points')
dilate(Study.Rau, './rau/points', './rau/ground_truth', './rau/dilated_points', dilation_radii)
print('\nProcessing Yuanxia\'s points')
dilate(Study.Yuanxia, './yuanxia/points', './yuanxia/ground_truth', './yuanxia/dilated', dilation_radii)
print()
| true
|
8f0b06b67d96e03b869356fb235b4190f344de07
|
Python
|
XinpeiWangMRI/MRI-AUTOMAP
|
/Other files/Automap_chongduan
|
UTF-8
| 1,250
| 2.53125
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 23 09:10:16 2018
@author: chongduan
"""
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
import math
import time
from generate_input import load_images_from_folder
# Load training data:
tic1 = time.time()
# Folder with images
dir_train = "/home/chongduan/Documents/6_DeepLearning_CMR-FT_Strain/Deep-MRI-Reconstruction-master/load_raw_T1_Map_data/sense_recon"
n_cases = (0,1) # load image data from 0 to 1
X_train, Y_train = load_images_from_folder( # Load images for training
dir_train,
n_cases,
normalize=False,
imrotate=False)
toc1 = time.time()
print('Time to load data = ', (toc1 - tic1))
print('X_train.shape at input = ', X_train.shape)
print('Y_train.shape at input = ', Y_train.shape)
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5)
model.evaluate(x_test, y_test)
| true
|
f9732e48f0533ade4ac82d514cf4b18fd6c42035
|
Python
|
earthinversion/PhD-Thesis-codes
|
/Earthquake-location-problem-Monte-Carlo/monte_carlo_eq_loc.py
|
UTF-8
| 3,512
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
np.random.seed(0)
plt.style.use('seaborn')
minx, maxx = -2, 2
miny, maxy = -3, 3
numstations = 30
stn_locs=[]
xvals = minx+(maxx-minx)*np.random.rand(numstations)
yvals = miny+(maxy-miny)*np.random.rand(numstations)
for num in range(numstations):
stn_locs.append([xvals[num],yvals[num],0])
eq_loc = [2,2,-2]
vel = 6 #kmps
origintime = 0
def calc_arrival_time(eq_loc, stnx, stny, stnz, vel, origintime):
eqx, eqy, eqz = eq_loc
dist = np.sqrt((eqx - stnx)**2 + (eqy - stny)**2 + (eqz - stnz)**2)
arr = dist/vel + origintime
return arr
d_obs = []
noise_level_data = 0.001
for stnx, stny, stnz in stn_locs:
arr = calc_arrival_time(eq_loc, stnx, stny, stnz, vel, origintime)
sign = np.random.choice([-1,1])
d_obs.append(arr+sign*noise_level_data*arr)
d_obs = np.array(d_obs)
def get_rand_number(min_value, max_value):
range_vals = max_value - min_value
choice = np.random.uniform(0,1)
return min_value + range_vals*choice
## Monte Carlo
num_iterations = 100000
inv_model = []
squared_error0 = 100000
mineqx, maxeqx = -3, 3
mineqy, maxeqy = -3, 3
mineqz, maxeqz = 0, -3
gen_num = []
lse = []
for i in range(num_iterations):
eqx0 = get_rand_number(mineqx, maxeqx)
eqy0 = get_rand_number(mineqy, maxeqy)
eqz0 = get_rand_number(mineqz, maxeqz)
vel0 = get_rand_number(5, 7)
origintime0 = get_rand_number(-1, 1)
d_pre = []
for stnx, stny, stnz in stn_locs:
d_pre.append(calc_arrival_time([eqx0, eqy0, eqz0], stnx, stny, stnz, vel0, origintime0))
d_pre = np.array(d_pre)
squared_error = np.sum((d_obs-d_pre)**2)
if squared_error < squared_error0:
print(i,squared_error)
gen_num.append(i)
lse.append(squared_error)
m0 = np.array([eqx0, eqy0, eqz0, vel0, origintime0])
if np.abs(squared_error-squared_error0)<0.001:
print("Terminated based on tol. value",np.abs(squared_error-squared_error0))
break
squared_error0 = squared_error
inv_model = m0
print("{:.2f} {:.2f} {:.2f} {:.2f} {:.2f}".format(inv_model[0],inv_model[1],inv_model[2],inv_model[3],inv_model[4]))
fig, ax = plt.subplots(1,1,figsize=(5,5))
ax.loglog(gen_num,lse, 'ko--')
ax.set_xlabel('Generations')
ax.set_ylabel('Least-squares error')
plt.savefig('iterations.png',bbox_inches='tight',dpi=300)
plt.close('all')
## to create the surface
X = np.linspace(-3, 3, 200)
Y = np.linspace(-3, 3, 200)
X, Y = np.meshgrid(X, Y)
Z = (X**2 + Y**2)*0
fig = plt.figure()
ax = plt.axes(projection='3d')
# plot stations
ax.scatter([x[0] for x in stn_locs],[x[1] for x in stn_locs],[x[2] for x in stn_locs],c='b',marker='^',s=50)
# plot surface
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.jet,linewidth=0, antialiased=False,alpha=0.1)
# plot actual EQ
ax.scatter(eq_loc[0],eq_loc[1],eq_loc[2],c='r',marker='*',s=100,label='Actual EQ location')
ax.scatter(inv_model[0],inv_model[1],inv_model[2],c='k',marker='*',s=100,label='Inverted EQ location')
plt.title("Inverted model EQ loc: ({:.2f},{:.2f},{:.2f}),\nvel: {:.2f} and origin time: {:.2f}\nsq_error: {:.2f}".format(inv_model[0],inv_model[1],inv_model[2],inv_model[3],inv_model[4],squared_error),fontsize=8)
ax.set_xlim([-3,3])
ax.set_ylim([-3,3])
ax.set_zlim(-3,0.1)
plt.legend()
plt.savefig('Earthquake_loc_monte_carlo.png',bbox_inches='tight',dpi=300)
plt.close('all')
| true
|
35b46348f92fbe1759948e0ef15e0d6179dff413
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03556/s834395263.py
|
UTF-8
| 73
| 3.171875
| 3
|
[] |
no_license
|
N=int(input())
if int((N+1)**.5)**2==N:print(N)
else:print(int(N**.5)**2)
| true
|
b0dc60718efa226ca2e42a421fd7cde62083066e
|
Python
|
makosenkov/Study
|
/lab2/src/main.py
|
UTF-8
| 30,959
| 2.921875
| 3
|
[] |
no_license
|
import math
import matplotlib.pyplot as plot
import statistics as stat
import scipy.stats as stats
import numpy as np
from lab2.src import help
# https://matplotlib.org/api/_as_gen/matplotlib.pyplot.plot.html
# =================================== Подготовка данных ===============================
f = open("../input/shuffled.txt", 'r')
line = f.readline().split(" ")
data = []
data2 = [[], [], [], [], [], [], [], [], [], []]
numOfPoints = int(line[2])
numOfPointsInOneUnderArray = numOfPoints / 10
for t in f.readline().split(' '):
data.append(float(t))
# создание 10 подвыборок
res = 0
for i in range(numOfPoints):
j = int(i // numOfPointsInOneUnderArray)
res += data[i]
data2[j].append(data[i])
# сортировка значений
list.sort(data)
for l in data2:
list.sort(l)
# =========================== функция распределения и гистограммы =====================
m = 40 # кол-во интервалов
min_value = min(data) # минимальное значение в выборке
max_value = max(data) # максимальное значение в выборке
distribution_fun = np.zeros(m)
h = (max_value - min_value) / m # шаг, с которым идут интервалы
# steps - точки на графике от -0.411479 до 4.87074 включительно, 40 элементов
steps = [] # массив 40 точек с шагом h
for t in range(1, m + 1):
steps.append(min_value + t * h)
index = 0
for value in data:
if value > steps[index]:
p = int(abs(steps[index] - value) // h) + 1
for i in range(1, p):
distribution_fun[index + i] = distribution_fun[index]
index += p
distribution_fun[index] = distribution_fun[index - 1]
distribution_fun[index] += 1
plot.title("Функция распределения")
plot.xlim([-0.5, 5])
plot.bar(steps, distribution_fun / numOfPoints)
# plot.savefig("../out/destibutionFunction.png", dpi=200)
# plot.show()
plot.close()
plot.title("Гистограмма")
plot.hist(data, steps)
# plot.savefig("../out/histogram.png", dpi=200)
# plot.show()
plot.close()
# !!!!!!!!!Для относительной гистограммы
index = 0
for_relative = np.zeros(m)
for value in data:
if value > steps[index]:
p = int(abs(steps[index] - value) // h) + 1
for_relative[index] = for_relative[index] / (h * numOfPoints)
index += p
for_relative[index] += 1
for_relative[m - 1] = for_relative[m - 1] / (h * numOfPoints)
# Проверка площади под гистограммой
ssss_____ = 0
for v in for_relative:
ssss_____ += v * h
print('Area under an histogram : ', str(ssss_____))
# Конец проверки площади
plot.bar(steps, for_relative, width=h)
plot.title("Относительная гистограмма")
# plot.savefig("../out/relativeHistogram.png", dpi=200)
# plot.show()
plot.close()
# !!!!!!!!!!!!!!Относительная гистограмма построена
# ================== ТОЧЕЧНЫЕ ОЦЕНКИ =========================
print("================== ТОЧЕЧНЫЕ ОЦЕНКИ =========================")
empty = np.zeros(11)
median = [stat.median(data)] # медианы
mean = [stat.mean(data)] # среднее арифметическое (мат. ожидание)
mid_range = [(min_value + max_value) / 2] # средина размаха
dispersion = [help.dispersion(data, mean[0])] # дисперсия s^2
root_of_dispersion = [math.sqrt(dispersion[0])] # корень из дисперсии s
third_central_moment = [help.central_moment(data, 3, mean[0])] # 3-ий центральный момент
fourth_central_moment = [help.central_moment(data, 4, mean[0])] # 4-ый центральный момент
asymmetry = [help.asymmetry(third_central_moment[0], root_of_dispersion[0])] # асимметрия
kurtosis = [help.kurtosis(fourth_central_moment[0], dispersion[0])] # эксцесса
interquantile_interval = help.interquantile_interval(numOfPoints, 0.5) # интерквантильный интервал
index = 1
for n in data2:
median.append(stat.median(n))
mean.append(stat.mean(n))
mid_range.append((min(n) + max(n)) / 2)
dispersion.append(help.dispersion(data, mean[index]))
root_of_dispersion.append((math.sqrt(dispersion[index])))
third_central_moment.append(help.central_moment(data, 3, mean[index]))
fourth_central_moment.append(help.central_moment(data, 4, mean[index]))
asymmetry.append(third_central_moment[index] / pow(root_of_dispersion[index], 3))
kurtosis.append(help.kurtosis(fourth_central_moment[index], dispersion[index]))
index += 1
print('\tMin: ', min_value, ' Max: ', max_value)
print('\tx_med :', median)
print('\tM[x] :', mean)
print('\tx_ср :', mid_range)
print('\ts^2 :', dispersion)
print('\ts :', root_of_dispersion)
print('\t∘µ_3 :', third_central_moment)
print('\t∘µ_4 :', fourth_central_moment)
print('\tAs :', asymmetry)
print('\tEx :', kurtosis)
print('\tJ (номера значений) :', interquantile_interval)
print('\tJ (значения) :',
"(" + str(data[interquantile_interval[0]]) + ", " + str(data[interquantile_interval[1] - 1]) + ")")
# ==================== ГРАФИКИ ТОЧЕЧНЫХ ПОКАЗАТЕЛЕЙ =========================
plot.figure()
ax1 = plot.subplot(9, 1, 1)
ax1.set_ylim(-0.1, 0.1)
ax1.set_yticks([])
ax1.set_yticklabels([])
plot.title('Медианы')
plot.plot(median, empty, 'r+')
plot.plot(median[0], 0, 'rp')
ax2 = plot.subplot(9, 1, 3)
ax2.set_yticklabels([])
ax2.set_yticks([])
plot.title('Среднее арифметическое (мат ожидание)')
plot.plot(mean, empty, 'b+')
plot.plot(mean[0], 0, 'bp')
ax3 = plot.subplot(9, 1, 5)
ax3.set_yticks([])
ax3.set_yticklabels([])
plot.title('Средина размаха')
plot.plot(mid_range, empty, 'g+')
plot.plot(mid_range[0], 0, 'gp')
ax4 = plot.subplot(9, 1, 7)
ax4.set_yticks([])
ax4.set_yticklabels([])
plot.title('Дисперсия')
plot.plot(dispersion, empty, 'g+')
plot.plot(dispersion[0], 0, 'gp')
ax5 = plot.subplot(9, 1, 9)
ax5.set_yticks([])
ax5.set_yticklabels([])
plot.title('Среднеквадратичное отклонение')
plot.plot(root_of_dispersion, empty, 'g+')
plot.plot(root_of_dispersion[0], 0, 'gp')
# plot.savefig("../out/moments1.png", dpi=200)
# plot.show()
plot.close()
plot.figure()
ax1 = plot.subplot(7, 1, 1)
ax1.set_ylim(-0.1, 0.1)
ax1.set_yticks([])
ax1.set_yticklabels([])
plot.title('Третий центральный момент')
plot.plot(third_central_moment, empty, 'r+')
plot.plot(third_central_moment[0], 0, 'rp')
ax2 = plot.subplot(7, 1, 3)
ax2.set_yticklabels([])
ax2.set_yticks([])
plot.title('Четвертый центральный момент')
plot.plot(fourth_central_moment, empty, 'b+')
plot.plot(fourth_central_moment[0], 0, 'bp')
ax3 = plot.subplot(7, 1, 5)
ax3.set_yticks([])
ax3.set_yticklabels([])
plot.title('Асимметрия')
plot.plot(asymmetry, empty, 'g+')
plot.plot(asymmetry[0], 0, 'gp')
ax4 = plot.subplot(7, 1, 7)
ax4.set_yticks([])
ax4.set_yticklabels([])
plot.title('Эксцесса')
plot.plot(kurtosis, empty, 'g+')
plot.plot(kurtosis[0], 0, 'gp')
plot.savefig("../out/moments2.png", dpi=200)
# plot.show()
plot.close()
# ==================== ГРАФИКИ ТОЧЕЧНЫХ ПОКАЗАТЕЛЕЙ НАЧЕРЧЕНЫ =================
# ======================!!! Часть 1.4 . Интервальные оценки !!!==================
print("======================!!! Часть 1.4 . Интервальные оценки !!!===============")
Q = 0.8 # доверительная вероятность
left_chi2inv = 5.7350e+03 # посчитаны в MATLAB функцией chi2inv((1 + Q) / 2, n-1)
right_chi2inv = 5.4638e+03 # посчитаны в MATLAB функцией chi2inv((1 - Q) / 2, n-1)
tinv = 1.2817 # посчитано в MATLAB функцией tinv(0.9, n-1), 0.9 = (1+q)/2, где q=0.8
mean_interval = [help.mean_interval(numOfPoints, mean[0], root_of_dispersion[0], tinv)]
dispersion_interval = [help.dispersion_interval(numOfPoints, dispersion[0], left_chi2inv, right_chi2inv)]
for i in range(1, 11):
mean_interval.append(help.mean_interval(numOfPoints, mean[i], root_of_dispersion[i], tinv))
dispersion_interval.append(help.dispersion_interval(numOfPoints, dispersion[i], left_chi2inv, right_chi2inv))
print("\t Интервальные оценки для мат. ожидания" + str(mean_interval))
print("\t Интервальные оценки для дисперсии" + str(dispersion_interval))
# =================== Чертим ИНТЕРВАЛЬНЫЕ ОЦЕНКИ МАТ ОЖИДАНИЯ М ДИСПЕРСИИ ====================================
# Для мат. ожидания
plot.figure()
axes = [plot.subplot(11, 1, 1)]
axes[0].set_yticks([])
axes[0].set_ylabel('Full')
plot.title('Интервальные оценки мат. ожидания')
plot.setp(axes[0].get_xticklabels(), visible=False)
plot.plot(mean[0], 0, 'rp')
plot.plot(mean_interval[0][0], 0, 'b<')
plot.plot(mean_interval[0][1], 0, 'b>')
for i in range(1, 11):
axes.append(plot.subplot(11, 1, i + 1, sharex=axes[0]))
axes[i].set_yticks([])
axes[i].set_ylabel(str(i))
if i < 10: plot.setp(axes[i].get_xticklabels(), visible=False)
plot.plot(mean[i], 0, 'r+')
plot.plot(mean_interval[i][0], 0, 'b<')
plot.plot(mean_interval[i][1], 0, 'b>')
axes[0].set_xlim([1.97, 2.05])
# plot.savefig("../out/intervalsMoments.png", dpi=200)
# plot.show()
plot.close()
# Для дисперсии
plot.figure()
axes = [plot.subplot(11, 1, 1)]
axes[0].set_yticks([])
axes[0].set_ylabel('Full')
plot.title('Интервальные оценки дисперсии')
plot.setp(axes[0].get_xticklabels(), visible=False)
plot.plot(dispersion[0], 0, 'rp')
plot.plot(dispersion_interval[0][0], 0, 'b<')
plot.plot(dispersion_interval[0][1], 0, 'b>')
for i in range(1, 11):
axes.append(plot.subplot(11, 1, i + 1, sharex=axes[0]))
axes[i].set_yticks([])
axes[i].set_ylabel(str(i))
if i < 10: plot.setp(axes[i].get_xticklabels(), visible=False)
plot.plot(dispersion[i], 0, 'r+')
plot.plot(dispersion_interval[i][0], 0, 'b<')
plot.plot(dispersion_interval[i][1], 0, 'b>')
axes[0].set_xlim([0.218, 0.232])
# plot.savefig("../out/intervalDispersion.png", dpi=200)
# plot.show()
plot.close()
# =================== графики ИНТЕРВАЛЬНЫХ ОЦЕНКИ МАТ ОЖИДАНИЯ М ДИСПЕРСИИ напечатаны! ==========================
# ============================= ТОЛЕРАНТНЫЕ ПРЕДЕЛЫ ===================================
print("============================= ТОЛЕРАНТНЫЕ ПРЕДЕЛЫ ===================================")
p = 0.95 # вероятность для интерквантильного промежутка
q = 0.8 # доверительная вероятность
tolerant_interval_average = [0, 0] # массив для толерантных пределов
k = help.find_k(numOfPoints, p, q) # кол-во отрасываемых точек
print("\tПредел k : " + str(k) + " , Значение биномиального распределения : " + str(
stats.binom.cdf(numOfPoints - k, numOfPoints, p)))
# Для всей выборки относительно среднего арифметического
if k % 2 == 0:
left_lim = int(k / 2)
right_lim = int(numOfPoints - k / 2)
tolerant_interval_average[0], tolerant_interval_average[1] = data[left_lim], data[right_lim]
else:
left_lim = int((k - 1) / 2)
right_lim = int(numOfPoints - (k - 1) / 2)
tolerant_interval_average[0], tolerant_interval_average[1] = data[left_lim], data[right_lim]
# Для всей выборки относительно нуля
# Для этого возьмем модули отрицательных значений и пересортируем выборку
data_abs = np.sort(abs(np.array(data)))
tolerant_interval_zero = [-data_abs[numOfPoints - k + 1], data_abs[numOfPoints - k + 1]]
print("\tТолерантные пределы для всей выборки относительно среднего: " + str(tolerant_interval_average))
print("\tТолерантные пределы для всей выборки относительно нуля" + str(tolerant_interval_zero))
# ЧЕРТИМ
plot.title("Толерантные пределы для интерквантильного \nпромежутка относительно среднего значения")
plot.yticks([])
plot.plot(tolerant_interval_average[0], 0, 'b<')
plot.plot(tolerant_interval_average[1], 0, 'b>')
plot.plot(data[interquantile_interval[0]], 0, 'ro')
plot.plot(data[interquantile_interval[1]], 0, 'ro')
plot.legend(("Левый толерантный предел", "Правый толерантный предел", "Интерквантильный промежуток"), loc='upper right')
# plot.savefig("../out/tolerantLimsAverage.png", dpi=200)
# plot.show()
plot.close()
plot.title("Толерантные пределы относительно нуля")
plot.yticks([])
plot.plot(tolerant_interval_zero[0], 0, 'b<')
plot.plot(tolerant_interval_zero[1], 0, 'b>')
plot.legend(("Левый толерантный предел", "Правый толерантный предел"), loc='upper right')
# plot.savefig("../out/tolerantLimsZero.png", dpi=200)
# plot.show()
plot.close()
# Считаем параметрические толерантные пределы подвыборок
k_tolerant_multiplier = 1.96
parametric_tolerant_interval = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]
for i in range(10):
parametric_tolerant_interval[i][0] = mean[i + 1] - k_tolerant_multiplier * root_of_dispersion[i + 1]
parametric_tolerant_interval[i][1] = mean[i + 1] + k_tolerant_multiplier * root_of_dispersion[i + 1]
print("\tПараметрические толерантные интервалы для подвыборок:")
print("\t\t" + str(parametric_tolerant_interval))
axes = []
plot.title("Параметрические толерантные пределы для подвыборок")
for i in range(10):
if i == 0:
axes.append(plot.subplot(10, 1, i + 1))
else:
axes.append(plot.subplot(10, 1, i + 1, sharex=axes[0]))
axes[i].set_yticks([])
axes[i].set_ylabel(str(i + 1))
if i < 9: plot.setp(axes[i].get_xticklabels(), visible=False)
plot.plot(parametric_tolerant_interval[i][0], 0, 'b<')
plot.plot(parametric_tolerant_interval[i][1], 0, 'b>')
plot.plot(mean[i + 1], 0, 'ro')
# plot.savefig("../out/parametricTolerantLims.png", dpi=200)
# plot.show()
plot.close()
# ============================= ЧАСТЬ 2 ========================================
# ========================== МЕТОД МОМЕНТОВ ====================================
print("===========================МЕТОД МОМЕНТОВ==========================")
# Нормальное, Гамма и Лапласа
# Для нормального распредления
print("\tДля нормального распредления")
print("\t\tc = " + str(mean[0]) + " s = " + str(root_of_dispersion[0]))
a_for_laplace_moment_method = median[0]
laplace_lambda_moment_method = math.sqrt(2 / dispersion[0])
print("\tДля распределения Лапласа")
print("\t\ta = " + str(a_for_laplace_moment_method) + " lambda = " + str(laplace_lambda_moment_method))
k_for_gamma_moment_method = (mean[0] ** 2) / dispersion[0]
theta_for_gamma_moment_method = dispersion[0] / mean[0]
print("\tДля Гамма-распредления")
print("\t\tk = " + str(k_for_gamma_moment_method) + " lambda = " + str(theta_for_gamma_moment_method))
# ======================================= ММП ====================================================
print("===========================ММП==========================")
# Для нормального распределения
c_for_normal_mmp = 1 / numOfPoints * sum(data)
dispersion_for_normal_mmp = 1 / numOfPoints * sum((np.array(data) - c_for_normal_mmp) ** 2)
s_for_normal_mmp = math.sqrt(dispersion_for_normal_mmp)
print("\tДля нормального распределения")
print("\t\tc = " + str(c_for_normal_mmp) + " s = " + str(s_for_normal_mmp))
# Для распределения Лапласа
a_for_laplace_mmp = mean[0]
laplace_lambda_mmp = numOfPoints * (1 / sum(abs(np.array(data) - a_for_laplace_mmp)))
print("\tДля распределения Лапласа")
print("\t\ta = " + str(a_for_laplace_mmp) + " lambda = " + str(laplace_lambda_mmp))
# Для Гамма-распределения
# Числовые значения, которые нужно посчитать
for_optimize1 = 0
for_optimize2 = 0
for v in data:
if v > 0:
for_optimize1 += v
for_optimize2 += np.log(v)
for_optimize3 = for_optimize1
for_optimize1 = np.log(for_optimize1 / numOfPoints)
for_optimize2 = for_optimize2 / numOfPoints
c_mmp = for_optimize1 - for_optimize2
# Достаем градиент Гамма-функции и ищем ее минимум, числа 19 и 12, найдены чисто эмпирически
# 19 дает отрицательное значение функции, 12 положительное
gamma_gradient = help.gammaGradient(c_mmp).gamma_gradient
print(gamma_gradient(19))
print(gamma_gradient(12))
k_for_gamma_mmp = help.fmin_bisection(gamma_gradient, 19, 12, 1e-14)
theta_for_gamma_mmp = for_optimize3 / (k_for_gamma_mmp * numOfPoints)
print("\tДля Гамма-распределения")
print("\t\tk = " + str(k_for_gamma_mmp) + " theta = " + str(theta_for_gamma_mmp))
# ======================= Построим финции распределения и плотности вместе с гистограммой
# Для нормального распределения
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.norm.html#scipy.stats.norm
plot.title("Сравнение с плотностью нормального распределения")
plot.bar(steps, for_relative, width=h)
plot.plot(data, stats.norm.pdf(np.array(data), loc=mean[0], scale=root_of_dispersion[0]), 'b')
plot.plot(data, stats.norm.pdf(np.array(data), loc=c_for_normal_mmp, scale=s_for_normal_mmp), 'r')
plot.legend(("Метод моментов", "ММП", "Гистограмма"), loc='upper right')
# plot.savefig("../out/withNorm.png", dpi=200)
# plot.show()
plot.close()
plot.title("Сравнение с нормальным распределением")
plot.bar(steps, distribution_fun / numOfPoints, width=h)
plot.plot(data, stats.norm.cdf(np.array(data), loc=mean[0], scale=root_of_dispersion[0]), 'b')
plot.plot(data, stats.norm.cdf(np.array(data), loc=c_for_normal_mmp, scale=s_for_normal_mmp), 'r')
plot.legend(("Метод моментов", "ММП", "Эмпирическая"), loc='upper right')
plot.savefig("../out/withNormCumulative.png", dpi=200)
# plot.show()
plot.close()
# Для распределения Лапласа
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.laplace.html
plot.title("Сравнение с плотностью распределения Лапласа")
plot.bar(steps, for_relative, width=h)
plot.plot(data,
stats.laplace.pdf(np.array(data), loc=a_for_laplace_moment_method, scale=1 / laplace_lambda_moment_method),
'b')
plot.plot(data, stats.laplace.pdf(np.array(data), loc=a_for_laplace_mmp, scale=1 / laplace_lambda_mmp), 'r')
plot.legend(("Метод моментов", "ММП", "Гистограмма"), loc='upper right')
# plot.savefig("../out/withLaplace.png", dpi=200)
# plot.show()
plot.close()
plot.title("Сравнение с распределением Лапласа")
plot.bar(steps, distribution_fun / numOfPoints, width=h)
plot.plot(data, stats.laplace.cdf(np.array(data), loc=mean[0], scale=1 / laplace_lambda_moment_method), 'b')
plot.plot(data, stats.laplace.cdf(np.array(data), loc=a_for_laplace_mmp, scale=1 / laplace_lambda_mmp), 'r')
plot.legend(("Метод моментов", "ММП", "Эмпирическая"), loc='upper right')
plot.savefig("../out/withLaplaceCumulative.png", dpi=200)
# plot.show()
plot.close()
# Для Гамма-распределения
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gamma.html
plot.title("Сравнение с плотностью Гамма-распределения")
plot.bar(steps, for_relative, width=h)
plot.plot(data, stats.gamma.pdf(np.array(data), k_for_gamma_moment_method, scale=theta_for_gamma_moment_method), 'b')
plot.plot(data, stats.gamma.pdf(np.array(data), k_for_gamma_mmp, scale=theta_for_gamma_mmp), 'r')
plot.legend(("Метод моментов", "ММП", "Гистограмма"), loc='upper right')
# plot.savefig("../out/withGamma.png", dpi=200)
# plot.show()
plot.close()
plot.title("Сравнение с Гамма-распределением")
plot.bar(steps, distribution_fun / numOfPoints, width=h)
plot.plot(data, stats.gamma.cdf(np.array(data), k_for_gamma_moment_method, scale=theta_for_gamma_moment_method), 'b')
plot.plot(data, stats.gamma.cdf(np.array(data), k_for_gamma_mmp, scale=theta_for_gamma_mmp), 'r')
plot.legend(("Метод моментов", "ММП", "Эмпирическая"), loc='upper right')
plot.savefig("../out/withGammaCumulative.png", dpi=200)
# plot.show()
plot.close()
# ========================== ПРОВЕРКА ГИПОТЕЗ ============================================
print("========================== ПРОВЕРКА ГИПОТЕЗ ==============================")
# _nk - кол-во точек, попавших в k-ый интервал
_nk = np.empty(m)
index = 0
for val in distribution_fun:
if index == 0:
_nk[index] = val
else:
_nk[index] = val - distribution_fun[index - 1]
index += 1
# =============== Хи-квадрат==============================================================
print("=============== Хи-квадрат статистика=====================")
print("\tКритическое значение = 45.0763") # Значение получено в MATLAB
print("\tДля нормального распределения")
index = 0
chi2_stat = 0
for i in range(40):
if i == 0:
___Pk = stats.norm.cdf(steps[index], loc=mean[0], scale=root_of_dispersion[0]) - \
stats.norm.cdf(min_value, loc=mean[0], scale=root_of_dispersion[0])
else:
___Pk = stats.norm.cdf(steps[index], loc=mean[0], scale=root_of_dispersion[0]) - \
stats.norm.cdf(steps[index - 1], loc=mean[0], scale=root_of_dispersion[0])
chi2_stat += (numOfPoints * ___Pk - _nk[index]) ** 2 / (numOfPoints * ___Pk)
index += 1
print("\t\tДля метода моментов = " + str(chi2_stat))
index = 0
chi2_stat = 0
for i in range(40):
if i == 0:
___Pk = stats.norm.cdf(steps[index], loc=c_for_normal_mmp, scale=s_for_normal_mmp) - \
stats.norm.cdf(min_value, loc=c_for_normal_mmp, scale=s_for_normal_mmp)
else:
___Pk = stats.norm.cdf(steps[index], loc=c_for_normal_mmp, scale=s_for_normal_mmp) - \
stats.norm.cdf(steps[index - 1], loc=c_for_normal_mmp, scale=s_for_normal_mmp)
chi2_stat += (numOfPoints * ___Pk - _nk[index]) ** 2 / (numOfPoints * ___Pk)
index += 1
print("\t\tДля ММП = " + str(chi2_stat))
print("\tДля распределения Лапласа")
index = 0
chi2_stat = 0
for i in range(40):
if i == 0:
___Pk = stats.laplace.cdf(steps[index], loc=a_for_laplace_moment_method,
scale=1 / laplace_lambda_moment_method) - \
stats.laplace.cdf(min_value, loc=a_for_laplace_moment_method, scale=1 / laplace_lambda_moment_method)
else:
___Pk = stats.laplace.cdf(steps[index], loc=a_for_laplace_moment_method,
scale=1 / laplace_lambda_moment_method) - \
stats.laplace.cdf(steps[index - 1], loc=a_for_laplace_moment_method,
scale=1 / laplace_lambda_moment_method)
chi2_stat += (numOfPoints * ___Pk - _nk[index]) ** 2 / (numOfPoints * ___Pk)
index += 1
print("\t\tДля метода моментов = " + str(chi2_stat))
index = 0
chi2_stat = 0
for i in range(40):
if i == 0:
___Pk = stats.laplace.cdf(steps[index], loc=a_for_laplace_mmp, scale=1 / laplace_lambda_mmp) - \
stats.laplace.cdf(min_value, loc=a_for_laplace_mmp, scale=1 / laplace_lambda_mmp)
else:
___Pk = stats.laplace.cdf(steps[index], loc=a_for_laplace_mmp, scale=1 / laplace_lambda_mmp) - \
stats.laplace.cdf(steps[index - 1], loc=a_for_laplace_mmp, scale=1 / laplace_lambda_mmp)
chi2_stat += (numOfPoints * ___Pk - _nk[index]) ** 2 / (numOfPoints * ___Pk)
index += 1
print("\t\tДля ММП = " + str(chi2_stat))
print("\tДля Гамма-распределения")
# Здесь мы начинаем цикл от 4, так как иначе будут взяты отрицательные значения, которые в гамма распределении
# отсутсвуют, а значит ___Pk будет ноль и мы получим деление на ноль
index = 4
chi2_stat = 0
for i in range(4, 40):
___Pk = stats.gamma.cdf(steps[index], k_for_gamma_moment_method, scale=theta_for_gamma_moment_method) - \
stats.gamma.cdf(steps[index - 1], k_for_gamma_moment_method, scale=theta_for_gamma_moment_method)
chi2_stat += (numOfPoints * ___Pk - _nk[index]) ** 2 / (numOfPoints * ___Pk)
index += 1
print("\t\tДля метода моментов = " + str(chi2_stat))
index = 4
chi2_stat = 0
for i in range(4, 40):
___Pk = stats.gamma.cdf(steps[index], k_for_gamma_mmp, scale=theta_for_gamma_mmp) - \
stats.gamma.cdf(steps[index - 1], k_for_gamma_mmp, scale=theta_for_gamma_mmp)
chi2_stat += (numOfPoints * ___Pk - _nk[index]) ** 2 / (numOfPoints * ___Pk)
index += 1
print("\t\tДля ММП = " + str(chi2_stat))
# =============== КОЛМАГОРОВА - СМИРНОВА==============================================================
print("=============== статистика КОЛМАГОРОВА - СМИРНОВА =====================")
# Посчитаем D критическое для N=5600, alpha=0.2
___Dcrit = np.sqrt(- (np.log(0.5 * 0.2) / (2 * numOfPoints))) - 1 / (6 * numOfPoints)
print("\tКритическое значение = " + str(___Dcrit))
print("\tДля нормального распределения")
___D = 0
index = 1
for val in data:
_____ddd = abs(stats.norm.cdf(val, loc=mean[0], scale=root_of_dispersion[0]) - index / numOfPoints)
if _____ddd > ___D: ___D = _____ddd
index += 1
print("\t\tДля метода моментов = " + str(___D))
___D = 0
index = 1
for val in data:
_____ddd = abs(stats.norm.cdf(val, loc=c_for_normal_mmp, scale=s_for_normal_mmp) - index / numOfPoints)
if _____ddd > ___D: ___D = _____ddd
index += 1
print("\t\tДля ММП = " + str(___D))
print("\tДля распределения Лапласа")
___D = 0
index = 1
for val in data:
_____ddd = abs(stats.laplace.cdf(val, loc=a_for_laplace_moment_method,
scale=1 / laplace_lambda_moment_method) - index / numOfPoints)
if _____ddd > ___D: ___D = _____ddd
index += 1
print("\t\tДля метода моментов = " + str(___D))
___D = 0
index = 1
for val in data:
_____ddd = abs(stats.laplace.cdf(val, loc=a_for_laplace_mmp, scale=1 / laplace_lambda_mmp) - index / numOfPoints)
if _____ddd > ___D: ___D = _____ddd
index += 1
print("\t\tДля ММП = " + str(___D))
print("\tДля Гамма-распределения")
___D = 0
index = 1
for val in data:
_____ddd = abs(stats.gamma.cdf(val, k_for_gamma_moment_method,
scale=theta_for_gamma_moment_method) - index / numOfPoints)
if _____ddd > ___D: ___D = _____ddd
index += 1
print("\t\tДля метода моментов = " + str(___D))
___D = 0
index = 1
for val in data:
_____ddd = abs(stats.gamma.cdf(val, k_for_gamma_mmp, scale=theta_for_gamma_mmp) - index / numOfPoints)
if _____ddd > ___D: ___D = _____ddd
index += 1
print("\t\tДля ММП = " + str(___D))
# ======================= критерий Мизеса ================================
print("=============== статистика Мизеса =====================")
print("\tКритическое значение = 0.2415") # Значение взято из таблицы
print("\tДля нормального распределения")
___w = 0
index = 1
for val in data:
___w += (stats.norm.cdf(val, loc=mean[0], scale=root_of_dispersion[0]) - (2 * index - 1) / (2 * numOfPoints)) ** 2
index += 1
___w = 1 / (12 * numOfPoints) + ___w
print("\t\tДля метода моментов = " + str(___w))
___w = 0
index = 1
for val in data:
___w += (stats.norm.cdf(val, loc=c_for_normal_mmp, scale=s_for_normal_mmp) - (2 * index - 1) / (
2 * numOfPoints)) ** 2
index += 1
___w = 1 / (12 * numOfPoints) + ___w
print("\t\tДля ММП = " + str(___w))
print("\tДля распределения Лапласа")
___w = 0
index = 1
for val in data:
___w += (stats.laplace.cdf(val, loc=a_for_laplace_moment_method,
scale=1 / laplace_lambda_moment_method) - (2 * index - 1) / (2 * numOfPoints)) ** 2
index += 1
___w = 1 / (12 * numOfPoints) + ___w
print("\t\tДля метода моментов = " + str(___w))
___w = 0
index = 1
for val in data:
___w += (stats.laplace.cdf(val, loc=a_for_laplace_mmp,
scale=1 / laplace_lambda_mmp) - (2 * index - 1) / (2 * numOfPoints)) ** 2
index += 1
___w = 1 / (12 * numOfPoints) + ___w
print("\t\tДля ММП = " + str(___w))
print("\tДля Гамма-распределения")
___w = 0
index = 1
for val in data:
___w += (stats.gamma.cdf(val, k_for_gamma_moment_method,
scale=theta_for_gamma_moment_method) - (2 * index - 1) / (2 * numOfPoints)) ** 2
index += 1
___w = 1 / (12 * numOfPoints) + ___w
print("\t\tДля метода моментов = " + str(___w))
___w = 0
index = 1
for val in data:
___w += (stats.gamma.cdf(val, k_for_gamma_mmp, scale=theta_for_gamma_mmp) - (2 * index - 1) / (
2 * numOfPoints)) ** 2
index += 1
___w = 1 / (12 * numOfPoints) + ___w
print("\t\tДля ММП = " + str(___w))
| true
|
bb1e7792704afe5b36323419282af6fb8d906e2e
|
Python
|
vonkez/r6s-rank-bot
|
/stat_providers/rate_limiter.py
|
UTF-8
| 1,049
| 2.671875
| 3
|
[] |
no_license
|
import time
from loguru import logger
from stat_providers.stat_provider import PlayerNotFound
class RateLimiter:
"""
A basic async rate limiter that raises exception if it exceeds the limit.
"""
def __init__(self, limit_per_minute):
self.tokens = limit_per_minute
self.token_rate = limit_per_minute
self.updated_at = time.time()
async def __aenter__(self):
if time.time() - self.updated_at > 60:
self.tokens = self.token_rate
self.updated_at = time.time()
if self.tokens > 0:
self.tokens -= 1
return True
else:
logger.error("Rate limit exceeded")
raise RateLimitExceeded()
async def __aexit__(self, exc_type, exc_val, exc_tb):
if exc_type is PlayerNotFound or exc_type is ConnectionError:
return
if exc_type is not None:
logger.error(exc_type)
logger.error(exc_val)
logger.error(exc_tb)
class RateLimitExceeded(Exception):
pass
| true
|
b6bafe8ec7b2fd147d6e5809e4ffdab16b1f84e9
|
Python
|
sherirosalia/Python_Challenge_Refactor
|
/main.py
|
UTF-8
| 4,335
| 3.34375
| 3
|
[] |
no_license
|
#refactored script
# #dependencies
import os
import csv
#import operator for running code on line 63
import operator
#csv file with election data
election_data=os.path.join("election_results.csv")
#variables
total_votes = 0
county_votes = {}
candidates = {}
#open csv
with open(election_data) as election_data:
readCSV = csv.reader(election_data, delimiter=',')
# Read the header
header = next(readCSV)
print(f'The headers are: {header}')
#loop through data to append dictionaries
for row in readCSV:
# Add to the total vote count
total_votes = total_votes + 1
county= row[1]
candidate = row[2]
if county not in county_votes:
# setting default value
county_votes[county] = 0
#appending regardless of whether county existed before or not
# because we are outside of the if statment (forcing function).
county_votes[county] += 1
#duplicate above comments substituting candidate
if candidate not in candidates:
candidates[candidate] = 0
candidates[candidate] += 1
# print(county_votes)
# print(f'Total Votes are: {total_votes}')
# print(county_votes.items())
#county winner
County_Most_Votes = (max(county_votes, key=county_votes.get))
#candidate winner
candidate_top_votes = (max(candidates, key=candidates.get))
# print(f'Candidate with most votes is: {candidate_top_votes}')
for candidate in sorted(candidates, key=candidates.get):
# print("%d '%s'" % (candidates[candidate], candidate))
# print("%s'%d'" % (candidate, candidates[candidate], ))
candidate_by_name = candidate
votes_per_candidate = candidates[candidate]
candidate_vote_percent = votes_per_candidate/total_votes * 100
print(f'{candidate_by_name} votes: ({votes_per_candidate:,}){candidate_vote_percent:.1f}% ')
candidate_tally = ("%s'%d'" % (candidate, candidates[candidate], ))
print(candidate_tally)
#function below allows us to call the percentage and tallies for counties and candidates
#avoids having to break up our write to file statement so that we can access loop data
def access_dictionary(dictionary, total):
layout = ''
#we don't need to define the dictionary in function
for item in dictionary:
tally = dictionary[item]
percentage=tally/total *100
tallies = ("%s'%d'" % (item, dictionary[item], ))
layout = layout + f'{item} votes: ({tally:,}) percentage: {percentage:.1f}% \n'
#print(tallies)
# print(f'{item} votes: ({tally:,}) percentage: {percentage:.1f}% ')
return layout
#common pattern to establish empty string, add to and return in function
access_dictionary(candidates, total_votes)
print(f'-----------------')
access_dictionary(county_votes, total_votes)
candidate_data = access_dictionary(candidates, total_votes)
county_data = access_dictionary(county_votes, total_votes)
#print to terminal
print(candidate_data)
#create text file to save analysis
results=os.path.join("analysis", "election_analysis.txt")
with open (results, "w") as txt_file:
election_talies = (
f'---Election Tallies ----\n'
f'------------------------\n'
f'Total Votes: {total_votes}\n'
f'------------------------\n'
f'------------------------\n'
f'County Votes: \n'
f'------------------------\n'
f'{county_data}'
f'------------------------\n'
f'------------------------\n'
f'Largest County Turnout: {County_Most_Votes}\n'
f'------------------------\n'
f'------------------------\n'
f'Candidates: \n'
f'------------------------\n'
f'{candidate_data}'
f'------------------------\n'
f'------------------------\n'
f'Winner: {candidate_top_votes} \n'
f'------------------------\n\n'
)
print(election_talies, end='')
txt_file.write(election_talies)
| true
|
ffe1fe1a46c0f209bf276f347ea3dc36f1762140
|
Python
|
PecPeter/Stock_Tracker
|
/main.py
|
UTF-8
| 2,504
| 3.234375
| 3
|
[] |
no_license
|
import os
import sys
import sqlite3
import database
import menuCommands
# Main program code
# TODO: change the options to work like terminal commands,
# make it so that you can choose which database to use, and pass the
# cursor for that database to the different functions. add functions to
# init a new database...
exitProgram = False
if len(sys.argv) != 2 :
print("Please enter database that you would like to open as a " \
"console argument")
exitProgram = True
else :
tmpStr = sys.argv[1].split("/")
dirStr = ""
for index in range(0,len(tmpStr)-1) :
dirStr += tmpStr[index] + "/"
if os.path.isdir(dirStr) == False :
print("Path to database doesn't exist")
exitProgram = True
else :
if os.path.isfile(sys.argv[1]) == False :
database.create_database(sys.argv[1])
conn = sqlite3.connect(sys.argv[1])
cur = conn.cursor()
while exitProgram == False :
print(menuCommands.introString)
userInput = input("Selection: ")
if userInput == "1" :
# Execute code for adding a new ticker
print("\n")
print("Adding new stock for tracking")
foundStock = False
while foundStock == False :
print("Enter 'quit' to exit to main menu")
userInput = input("New stock: ")
if userInput == "quit" :
break
else :
foundStock = database.track_stock(userInput,conn)
elif userInput == "2" :
# Execute code for removing a tracked ticker
print("\n")
print("Removing stock from tracking")
foundStock = False
while foundStock == False :
print("Enter 'quit' to exit to main menu")
userInput = input("Remove stock: ")
if userInput == "quit" :
break
else :
foundStock = database.untrack_stock(userInput,conn)
elif userInput == "3" :
# Execute code for listing all currently tracked tickers
database.list_stock(conn)
elif userInput == "4" :
# Execute code for listing all the tickers that can be tracked
print("4")
elif userInput == "5" :
# Execute code for updating all the information for the tracked tickers
print("5")
elif userInput == "6" :
# Exit
exitProgram = True
else :
print("Incorrect selection. Please try again.")
print("Quiting Stock Ticker Tracker")
| true
|
d7b154a988905c9da76950798e4e8ffbefa3dee8
|
Python
|
dmr8230/UNCW-Projects
|
/CSC231-Python/Chp 8 - Binary Tree/Lab8.py
|
UTF-8
| 5,232
| 4.0625
| 4
|
[] |
no_license
|
# Author: Dani Rowe
# Date: October 20th, 2020
# Description: tests the three different types of traversals by looping
# through the tree
class BinaryTree:
def __init__(self, payload = None, leftChild = None, rightChild = None):
'''
Constructor
:param payload (any value): default None
:param leftChild (a Binary Tree): default None
:param rightChild (a Binary Tree): default None
'''
self.__payload = payload
self.__leftChild = leftChild
self.__rightChild = rightChild
def getPayload(self):
'''
returns the current payload
:return: the current payload
'''
return self.__payload
def setPayload(self, payload):
'''
sets the current payload to the payload
:param payload: sets the current payload to the payload
:return: does not return anything
'''
self.__payload = payload
def getLeftChild(self):
'''
returns the current LeftChild
:return: the current LeftChild
'''
return self.__leftChild
def setLeftChild(self, leftChild):
'''
sets the current leftChild to the leftChild
:return: the current leftChild to the leftChild
'''
self.__leftChild = leftChild
def getRightChild(self):
'''
returns the current RightChild
:return: the current RightChild
'''
return self.__rightChild
def setRightChild(self, rightChild):
'''
sets the current rightChild to the rightChild
:return: the current rightChild to the rightChild
'''
self.__rightChild = rightChild
def __str__(self):
'''
converts the tree to a string represenation do an in-order traversal
:return: string
'''
return self.inorderTraversal()
def inorderTraversal(self): # not done -- rearrange spaces correct?
'''
This function produces a inorder traversal of the tree.
:return: a string that results from a inorder traversal
'''
result = ""
if self.isEmpty():
return result
else:
# visit the left subtree
if self.getLeftChild() is not None:
result += self.getLeftChild().inorderTraversal()
# visit the root node
result += " " + str(self.getPayload())
# visit the right subtree
if self.getRightChild() is not None:
result += " " + self.getRightChild().inorderTraversal()
return result
def preorderTraversal(self):
'''
This function produces a preorder traversal of the tree.
:return: a string that results from a preorder traversal
'''
result = ""
if self.isEmpty():
return result
else:
#visit the root node
result += str(self.getPayload())
#visit the left subtree
if self.getLeftChild() is not None:
result += " " + self.getLeftChild().preorderTraversal()
#visit the right subtree
if self.getRightChild() is not None:
result += " " + self.getRightChild().preorderTraversal()
return result
def postorderTraversal(self): # not done -- rearrange
'''
This function produces a preorder traversal of the tree.
:return: a string that results from a preorder traversal
'''
result = ""
if self.isEmpty():
return result
else:
# visit the left subtree
if self.getLeftChild() is not None:
result += self.getLeftChild().postorderTraversal()
# visit the right subtree
if self.getRightChild() is not None:
result += " " + self.getRightChild().postorderTraversal()
# visit the root node
result += " " + str(self.getPayload())
return result
def isEmpty(self):
'''
returns True if the tree is empty, False otherwise
:return (boolean): True if the tree is empty, False otherwise
'''
# It is not really possible to call the isEmpty if you don't have
# an object, so it really couldn't happen that self is None.
return self is None or self.getPayload() is None
def main():
BT = BinaryTree()
print("isEmpty() = " + str(BT.isEmpty()))
print(BT)
BT.setPayload(101)
print("isEmpty() = " + str(BT.isEmpty()))
print(BT)
BT.setLeftChild(BinaryTree(50))
print(BT)
BT.setRightChild(BinaryTree(250))
print(BT)
BT.getLeftChild().setLeftChild(BinaryTree(42))
print(BT)
BT.getLeftChild().getLeftChild().setLeftChild(BinaryTree(31))
print(BT)
BT.getRightChild().setRightChild(BinaryTree(315))
print(BT)
BT.getRightChild().setLeftChild(BinaryTree(200))
print(BT)
print("Inorder traversal: " + BT.inorderTraversal())
print("Preorder traversal: " + BT.preorderTraversal())
print("Postorder traversal: " + BT.postorderTraversal())
#creates the construct
if __name__ == "__main__":
main()
| true
|
e0af975128b412b0abad597e47344d69370a9b1b
|
Python
|
CommanderErika/DataVisualization-with-Covid19-data
|
/Codes/evoluçao_incidencia_final.py
|
UTF-8
| 5,912
| 2.9375
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 27 17:33:02 2020
@author: erika
"""
# Iportando as bibliotecas #
import pandas as pd
import numpy as np
import matplotlib
import seaborn as sns
import math
import matplotlib.pyplot as plt
# Região que iremos fazer os graficos #
regiao = 'Trairí'
nome = 'Trairí'
labels = ['Incidência acumulada no '+nome, 'Lajes Pintadas', 'Jaçanã', 'São José do Campestre', 'São Bento do Trairí', "Santa Cruz"]
# Importando os dados #
dataset_covid = pd.read_excel('02_07.xlsx')
dataset_covid['TERRITÓRIO DE CIDADANIA'] = dataset_covid['TERRITÓRIO DE CIDADANIA'].astype(str)
# FUNÇÕES AUXILIARES #
def getMunicipios(casos):
popu = []
municipios, index = np.unique(casos[:, 4], return_index = True)
for i in index:
popu = np.append(popu, casos[i][5])
return municipios, popu
def ajeitar(casos):
casos_positivos = []
for i in range(0, casos.shape[0]):
if(casos[i][0] == 'Positivo' and casos[i][3].year >= 2020 and casos[i][1] == regiao):
casos_positivos = np.append(casos_positivos, casos[i])
casos_positivos = casos_positivos.reshape((int(casos_positivos.shape[0]/casos.shape[1]),
casos.shape[1]))
return casos_positivos
# Pegar os dados do Territorio em Especifico #
def getValue(casos, dias, municipio, popu):
counters = [0] * dias.shape[0]
flag = False
counter = 0
for y, i in enumerate(dias):
for j in range(0, casos.shape[0]):
if(casos[j][3] == i):
if(casos[j][4] == municipio):
counter += 1
flag = True
if(flag == True):
counters[y] = counter
flag = False
else:
continue
counters = [(l/popu) * 100000 for l in counters]
return counters
def getValueT(casos, dias, popu):
counters = [0] * dias.shape[0]
flag = False
counter = 0
for y, i in enumerate(dias):
for j in range(0, casos.shape[0]):
if(casos[j][3] == i):
counter += 1
flag = True
if(flag == True):
counters[y] = counter
flag = False
else:
continue
counters = [(l / popu) * 100000 for l in counters]
return counters
def getPopu(casos):
popu = np.unique(casos[:, 5])
poputot = sum(int(i) for i in popu)
return poputot
# Criando o DataFrame #
def getDataframe(casos, dias, listaMunicipios, listapopu):
poputot = getPopu(casos)
frame1 = pd.DataFrame(getValueT(casos, dias, poputot), columns = ['Incidência em ' + str(nome)])
for (i, j) in zip(listaMunicipios, listapopu):
frame2 = pd.DataFrame(getValue(casos, dias, i, j), columns = [i])
frame1 = pd.concat([frame1, frame2], axis = 1)
dataframe = frame1
return dataframe
def getFix(data, listaMunicipios1):
frame1 = pd.DataFrame(data.iloc[:, 0].values, columns = ['Obitos acumulados em '+nome])
for i in listaMunicipios1:
casos = data.loc[:, i].values
for j in range(1, casos.shape[0]):
if(casos[j] == 0):
casos[j] = casos[j-1]
else:
continue
frame2 = pd.DataFrame(casos, columns = [i])
frame1 = pd.concat([frame1, frame2], axis = 1)
dataframe = frame1
return dataframe
def fourValues(data):
data1 = data.iloc[:, 0]
data2 = data.iloc[:, 1:]
data2 = data2.sort_values(axis = 1, by = [data2.shape[0]-1], ascending = False)
data2 = data2.iloc[:, 0:5]
teste = pd.concat([data1, data2], axis = 1)
return teste
def index(k):
lista = np.array([])
for i in range(0, k.shape[0]):
if(i%2 == 0):
lista = np.append(lista, k[i])
return lista
# Pegando os dados que queremos, no caso queremos casos e obitos e os territorios de cidadania #
casos = dataset_covid.loc[:, ['ResultadodoTeste', 'TERRITÓRIO DE CIDADANIA', 'ÓBITO', 'DatadaNotificação','MunicípiodeResidência', 'POP TOTAL 2020']].values
casos = ajeitar(casos) # Deixas apenas as datas validas e os casos Positivos e o Territorio #
listaMunicipios, listapopu = getMunicipios(casos) # Cria uma lista dos Territorios #
# Alterando o formato das datas para facilitar #
casos[:, 3] = pd.to_datetime(casos[:, 3]).strftime('%y/%m/%d')
dias = pd.date_range(start='2020-03-09', end='2020-07-02') # Array com os dias #
dias = pd.to_datetime(dias).strftime('%y/%m/%d')
data = getDataframe(casos, dias, listaMunicipios, listapopu)
# Ajeitando a questão das datas #
data.index = dias
data = data[(data.T != 0).any()]
dataFinal = getFix(data, listaMunicipios) # Ajeitando os dias para ficar apenas os que tiveram casos #
# Ajeitando a questão de pegar os 5 maiores valores #
dataFinal = fourValues(dataFinal) # Pegar os 5 maiores valores #
listaMunicipios = dataFinal.columns.values
# Fazendo as labels #
k2 = data.index.to_numpy(copy = True)
k = pd.to_datetime(k2).strftime('%y/%B')
k = index(k)
k1 = list(range(0, dataFinal.shape[0], 2))
# PLOTANDO OS GRAFICOS #
# Usando Seaborn para fazer os graficos #
sns.set(style = 'whitegrid')
plt.ylabel('Nº de casos acumulados/ 100 mil hab')
sns.despine(left=True, bottom=True)
sns.lineplot(data=dataFinal, linewidth=2.5,palette = 'colorblind', markers = False, dashes = True, legend = False)
plt.legend(labels = labels)
plt.xticks(rotation = 45, ticks = k1, labels = k, fontsize = 8.0, ha = 'right', weight='bold')
| true
|
b5f46271daa92d9dbfea4e21c51a1adeac8743a1
|
Python
|
Miguel-Neves/AA-Assignment1
|
/SVM_algorithm.py
|
UTF-8
| 2,397
| 3.203125
| 3
|
[] |
no_license
|
import numpy as np
from sklearn import metrics
from sklearn.svm import SVC
def validation_curve(Xtrain, ytrain, Xval, yval, C_vals, sigma_vals, kernel):
values, error_train, error_val = [], [], []
m_train = Xtrain.shape[0]
m_val = Xval.shape[0]
for i in C_vals:
C = i
for j in sigma_vals:
gamma = 1 / j
for k in kernel:
classifier = SVC(C=C, gamma=gamma, kernel=k, coef0=10.0)
classifier.fit(Xtrain, ytrain)
# predicting the output for training and validation sets
pred_train = classifier.predict(Xtrain)
pred_val = classifier.predict(Xval)
# computing the error
error_train.append(1/(2*m_train) * np.sum((pred_train - ytrain)**2))
error_val.append(1/(2 * m_val) * np.sum((pred_val - yval) ** 2))
values.append((i, gamma, k)) # (C, gamma, kernel)
# print("(C, gamma, kernel) = ", (i,gamma,k))
# print("Error val: ", 1/(2 * m_val) * np.sum((pred_val - yval) ** 2))
return values, error_train, error_val
def svm_algorithm(X_train, y_train, X_val, y_val, C_vals, sigma_vals, kernel_vals):
print(" - SVM algorithm - ")
print("C values:", C_vals)
print("sigma values:", sigma_vals)
print("kernel values:", kernel_vals)
values, error_train, error_val = validation_curve(X_train, y_train.ravel(), X_val, y_val.ravel(), C_vals, sigma_vals, kernel_vals)
index = np.argmin(error_val).min()
best_C, best_gamma, best_kernel = values[index]
print("\tResults")
print("Best values: C=", best_C, " sigma=", 1/best_gamma, " kernel=", best_kernel)
print("Error: ", error_val[index])
classifier = SVC(C=best_C, gamma=best_gamma, kernel=best_kernel, coef0=10.0)
classifier.fit(X_train, y_train)
pred_val = classifier.predict(X_val)
print("Accuracy:", round(metrics.accuracy_score(y_val, pred_val)*100, 1), "%")
print("Precision:", round(metrics.precision_score(y_val, pred_val)*100, 1), "%")
print("Sensitivity (recall):", round(metrics.recall_score(y_val, pred_val)*100, 1), "%")
print("F1 score:", round(metrics.f1_score(y_val, pred_val)*100, 1), "%")
print("Confusion matrix:\n", metrics.confusion_matrix(y_val, pred_val))
# TN FP
# FN TP
return error_val[index], values[index]
| true
|
d14a70f5b8b58b164447b9a1d076c95d86b05ae4
|
Python
|
ChrisTensorflow/2021QOSF
|
/QuantumSimulator.py
|
UTF-8
| 4,495
| 3.453125
| 3
|
[] |
no_license
|
"""
=====================
Quantum Circuit Simulator
=====================
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot, ion, show
from matplotlib.colors import ListedColormap
import random
from collections import Counter
import itertools
from numpy.linalg import multi_dot
"Ground state"
def ggs(num_qubits): #which is short for get_ground_state(num_qubits):
"return vector of size 2**num_qubits with all zeroes except first element which is 1"
ground_state = np.array([np.zeros(2**num_qubits)])
ground_state[0][0] = 1
return ground_state.T
"Quantum Logic Gates"
"One Qubit Gates"
X = np.array([[0, 1], [1, 0]])
Y = np.array([[0, -1j], [1j, 0]])
Z = np.array([[1, 0], [0, -1]])
H = np.dot(2**(-0.5), np.array([[1, 1],[1, -1]]))
I = np.array([[1, 0], [0, 1]])
def U(theta, nx, ny, nz):
"theta is the rotation angle and (nx, ny, nz) should be a unit vector in Cartesian coordinates in the Bloch sphere describing the rotation axis"
return 1j*np.cos(theta/2)*I + np.sin(theta/2)*(nx*X + ny*Y + nz*Z)
"Product Gates"
def gpg(arg): #which is short for get_product_gate(*arg):
#input the gates that you want to apply using square brackets and quotation marks (e.g. ["X","H","Z"] if you want X to the first qubit, H to the second and Z to the final qubit)
"return matrix of size 2**num_qubits by 2**num_qubits"
if len(arg) == 2:
return np.kron(eval(arg[-2]), eval(arg[-1]))
else:
argWithoutFirst = arg[1:]
return np.kron(eval(arg[0]), gpg(argWithoutFirst))
"Controlled Gates"
"Define useful matrices, such as the projection operations |0><0| and |1><1|"
P0X0 = np.array([[1, 0], [0, 0]])
P1X1 = np.array([[0, 0], [0, 1]])
def gcg(*arg): #which is short for get_controlled_gate(*arg):
#input the gates that you want to apply using quotation marks (e.g. "c","X","I","I" if you want the first to be the control and X to be (conditionally) applied to the second qubit while the third and fourth qubit is uninvolved
"return matrix of size 2**num_qubits by 2**num_qubits"
n = 0
for x in arg:
n = n + x.count('c')
if n == 0:
print("If there are no controls, please use gpg to implement the circuit correctly.")
if n > 1:
print("I cannot apply gates with multiple controls.")
else:
controlled_gate = np.zeros((2**len(arg),2**len(arg)))
arg0 = ["P0X0" if x == 'c' else 'I' for x in arg]
arg1 = ["P1X1" if x == 'c' else x for x in arg]
controlled_gate = gpg(arg0) + gpg(arg1)
return controlled_gate
"Compile and run the programme"
def comp(circuit): #which is short for compile
circuit_eval = [eval(circuit[x]) for x in range(len(circuit))]
if len(circuit_eval) == 1:
return circuit_eval[0]
else:
return np.linalg.multi_dot(circuit_eval)
def run_programme(circuit, ground_state):
final_state = np.dot(comp(circuit), ground_state)
return final_state
"Get the counts"
"This creates a list of all the possible outputs"
def bin_list(n):
return ["".join(seq) for seq in itertools.product("01", repeat = int(n))]
def measure_all(state_vector, num_shots):
weights = [np.abs(state_vector.tolist()[x])**2 for x in range(len(state_vector.tolist()))]
n = np.log2(len(state_vector.tolist()))
outputs = random.choices(bin_list(n), weights, k = num_shots)
cwzm = Counter(outputs) #which is short for counts_without_zeros_mentioned
list_of_all_counts = ['%s : %d' % (x, cwzm[x]) for x in bin_list(n)]
return list_of_all_counts
"This is where the user defines what they want to simulate"
"Define the ground state for the desired number qubits"
my_num_qubits = 3
my_ground_state = ggs(my_num_qubits)
"Define a programme consisting of well-defined gates"
#Note that gpg requires square brackets but gcg does not
my_circuit = ["gpg([\"X\",\"I\",\"I\"])",
"gcg(\"c\",\"I\",\"X\")",
"gcg(\"c\",\"U(np.pi,1,0,0)\",\"I\")",
"gpg([\"I\",\"I\",\"H\"])"
]
"Define the desired number of shots (i.e. the number of times the circuit is run)"
my_shots = 1000
"This tells Python to print the user's input, run their programme and print the results"
print(my_circuit, end = '\n \n')
print(measure_all(run_programme(my_circuit[::-1], my_ground_state), my_shots))
| true
|
1e1b86ca78e93d470577d779a98cecdd77807a3a
|
Python
|
kvpratama/medical-image-lib
|
/nii/Nii3D.py
|
UTF-8
| 2,230
| 2.59375
| 3
|
[] |
no_license
|
from .Nii import Nii
import nibabel as nib
import numpy as np
import matplotlib.pyplot as plt
class Nii3D(Nii):
def __init__(self, path, plane='axial'):
Nii.__init__(self, path)
self.nii_np = np.transpose(np.array(self.nii.dataobj), axes=[2, 1, 0])
self.shape = self.nii_np.shape
self.plane = plane
def plot(self, index=0):
plt.imshow(self.nii_np[index])
plt.show()
def to_axial(self):
if self.plane == 'axial':
print("current plane")
pass
elif self.plane == 'coronal':
self.nii_np = np.transpose(self.nii_np, axes=[1, 0, 2])
elif self.plane == 'sagittal':
self.nii_np = np.transpose(self.nii_np, axes=[1, 2, 0])
else:
print('Unknown plane!')
return
self.plane = 'axial'
self.shape = self.nii_np.shape
def to_coronal(self):
if self.plane == 'axial':
self.nii_np = np.transpose(self.nii_np, axes=[1, 0, 2])
elif self.plane == 'coronal':
print("current plane")
pass
elif self.plane == 'sagittal':
self.nii_np = np.transpose(self.nii_np, axes=[2, 1, 0])
else:
print('Unknown plane!')
return
self.plane = 'coronal'
self.shape = self.nii_np.shape
def to_sagittal(self):
if self.plane == 'axial':
self.nii_np = np.transpose(self.nii_np, axes=[2, 0, 1])
elif self.plane == 'coronal':
self.nii_np = np.transpose(self.nii_np, axes=[2, 1, 0])
elif self.plane == 'sagittal':
print("current plane")
pass
else:
print('Unknown plane!')
return
self.plane = 'sagittal'
self.shape = self.nii_np.shape
def save(self, save_path):
nii = nib.Nifti1Image(np.transpose(self.nii_np, axes=[2, 1, 0]), affine=None)
nib.save(nii, save_path)
def export_raw(self, save_path):
if save_path[-4:] != '.raw':
save_path += '.raw'
fileobj = open(save_path, mode='wb')
off = np.array(self.nii_np, dtype=np.int16)
off.tofile(fileobj)
fileobj.close()
| true
|
33840b619d91aa3fd2adc3311cfcf68b80468fea
|
Python
|
Fabfm4/flask-docker
|
/src/firstapp/core/db/models/models.py
|
UTF-8
| 834
| 2.546875
| 3
|
[] |
no_license
|
from datetime import datetime
from firstapp import db, bcrypt
class TimeStampedMixin(object):
id = db.Column(db.Integer, primary_key=True)
created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
updated = db.Column(db.DateTime, onupdate=datetime.utcnow)
class CatalogueMixin(TimeStampedMixin):
name = db.Column(db.String(600), nullable=False)
is_active = db.Column(db.Boolean())
class UserMixin(CatalogueMixin):
email = db.Column(db.String(120), unique=True, nullable=False)
password = db.Column(db.String(255), nullable=True)
login_with_password = True
def set_password(self, password):
self.password = bcrypt.generate_password_hash(password, 12)
def verify_password(self, password):
return bcrypt.check_password_hash(self.password, password)
| true
|
c0836ca1e0d756772b0260bb47c246ce8585dc2a
|
Python
|
Merubokkusu/Discord-S.C.U.M
|
/examples/TheGiverOfServers.py
|
UTF-8
| 1,502
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
'''
if someone replies to your message in a dm, this code will use a recent bug to give that person the SERVER badge (credits go to https://github.com/divinityseraph/server-badge-exploit)
here's how it looks: https://www.reddit.com/r/discordapp/comments/jzlnlb/discords_new_reply_feature_is_fun_and_bugged_lol/
this bug works both on servers and DMs :). The below code is only for DMs but can be easily modified to work only on guilds or on both.
**idk if this bug still works
'''
import discum
import time
bot = discum.Client(token='ur token')
bot.discord = 'https://discord.com/api/' #modify base url
@bot.gateway.command
def helloworld(resp):
if resp.event.ready_supplemental: #ready_supplemental is sent after ready
user = bot.gateway.session.user
print("Logged in as {}#{}".format(user['username'], user['discriminator']))
if resp.event.message:
m = resp.parsed.auto()
if m['content'] == 'turn me into a server':
bot.sendMessage(m['channel_id'], 'reply to one of my messages and I will make you a server :)')
if m['author']['id'] == bot.gateway.session.user['id']:
return
if m['type'] == 'reply':
if 'referenced_message' in m and m['referenced_message']['author']['id'] == bot.gateway.session.user['id']:
time.sleep(1)
bot.reply(m['channel_id'], m['id'], "The server Gods have allowed me to grant you the server badge. You are now a server :).")
bot.gateway.run()
| true
|
adb9b55c2d75afc94f217e4805e34ef5269b68de
|
Python
|
QianWanghhu/oconnell-runner
|
/source_runner/parameter_funcs.py
|
UTF-8
| 3,557
| 2.875
| 3
|
[] |
no_license
|
"""Helper functions to load parameters"""
import pandas as pd
__all__ = ['load_parameter_file', 'group_parameters', 'get_initial_param_vals']
def load_parameter_file(fn):
"""Load parameters from file
Parameters
==========
* fn : str, filename and location to load
Returns
==========
* tuple[parameter settings]
"""
# Importing parameters file
parameters = pd.read_csv(fn)
return parameters
# End load_parameter_file()
def group_parameters(parameters):
"""Group parameters for model analysis
Parameters
==========
* parameters : DataFrame, parameter values loaded from CSV
Returns
==========
* tuple[grouped parameters]
"""
# group parameters according to locations in Source
param_group = ['param_cmtgen',
'param_linkcons',
'param_node',
'param_linkrout',
'param_nodecons']
# parameter names
param_vename = ['param_cmtgen_list',
'param_linkcons_list',
'param_node_list',
'param_linkrout_list',
'param_nodecons_list']
# param_by_location: store parameter information grouped by locations
param_locations = ['v.model.catchment.generation',
'v.model.link.constituents',
'v.model.node',
'v.model.link.routing',
'v.model.node.constituents']
name_loc_mapping = zip(param_group, param_locations)
vename_loc_mapping = zip(param_vename, param_group)
param_by_location = {name: parameters.loc[parameters['Veneer_location'] == loca, :]
for name, loca in name_loc_mapping}
# param_vename_dic: store parameter names grouped by locations
param_vename_dic = {name: param_by_location[loca]['Veneer_name'].values for name, loca in vename_loc_mapping}
param_names = parameters['Veneer_name'].values
#param_count = len(param_names)
#parameter types
param_types = parameters.loc[:,'type']
return param_names, param_vename_dic, param_vename, param_types
# End group_parameters()
def get_initial_param_vals(v, param_names, param_vename, param_vename_dic):
"""Get initial parameter values from Source
Parameters
==========
* v : Veneer-py object
* param_names : list[str], of parameter names
* param_vename : list[str], of veneer-py parameter names
* param_vename_dic : dict, mapping of veneer-py parameter name to Source model
Returns
==========
* dict, of initial parameter values
"""
initial_params = {}
for i in param_names:
if i in param_vename_dic[param_vename[0]]:
initial_params[i] = v.model.catchment.generation.get_param_values(i)
if i in param_vename_dic[param_vename[1]]:
initial_params[i] = v.model.link.constituents.get_param_values(i)
if i in param_vename_dic[param_vename[2]]:
initial_params[i] = v.model.node.get_param_values(i)
if i in param_vename_dic[param_vename[3]]:
initial_params[i] = v.model.link.routing.get_param_values(i)
if i in param_vename_dic[param_vename[4]]:
initial_params[i] = v.model.node.constituents.get_param_values(i, node_types=['StorageNodeModel'],aspect='model')
return initial_params
# End get_initial_param_vals()
| true
|
c6c7a7e341c63e60023502971f17e3f278f19162
|
Python
|
goyal705/Hotel-Management-System
|
/hotel.py
|
UTF-8
| 5,933
| 2.59375
| 3
|
[
"Apache-2.0"
] |
permissive
|
from tkinter import *
from PIL import Image, ImageTk
from customer import Customer_win
from room import Room_win
from details import Details_win
from tkinter import messagebox
class Hotel:
def __init__(self, root):
self.root = root
self.root.title("Hotel management system By Tushar Goyal")
self.root.geometry('1366x768+0+0') # 1366x768 my size
# ********************first image**********************8
img1 = Image.open(r"C:\Users\hp\Desktop\Python project\hotel management "
r"system\hotel_images\hotel_images\Hotels1.png")
img1 = img1.resize((1366, 135), Image.ANTIALIAS) # ANTIALIAS convert hll image to lll image
self.photoimg1 = ImageTk.PhotoImage(img1)
labelimg = Label(self.root, image=self.photoimg1, bd=4, relief=RIDGE)
labelimg.place(x=0, y=0, width=1366, height=135)
# ********************logo image**********************8
img2 = Image.open(r"C:\Users\hp\Desktop\Python project\hotel management "
r"system\hotel_images\hotel_images\Logos.png")
img2 = img2.resize((230, 135), Image.ANTIALIAS)
self.photoimg2 = ImageTk.PhotoImage(img2)
labelimg = Label(self.root, image=self.photoimg2, bd=4, relief=RIDGE)
labelimg.place(x=0, y=0, width=230, height=135)
# *******************title****************************
lbl_title = Label(self.root, text="HOTEL MANAGEMENT SYSTEM", font=('times new roman', 40, "bold"),
bg='black', fg='gold', bd=4, relief=RIDGE)
lbl_title.place(x=0, y=135, width=1366, height=50)
# ********************main frame*****************************8
main_frame = Frame(self.root, bd=4, relief=RIDGE)
main_frame.place(x=0, y=185, width=1366, height=600)
# *********************frame named menu**********************
lbl_menu = Label(main_frame, text="Menu", font=('times new roman', 20, "bold"),
bg='black', fg='gold', bd=4, relief=RIDGE)
lbl_menu.place(x=0, y=0, width=230)
# ********************button frame*****************************8
button_frame = Frame(main_frame, bd=4, relief=RIDGE)
button_frame.place(x=0, y=35, width=230, height=190)
# ******************buttons in button frame*************************
cust_button = Button(button_frame, command=self.cust_details, text="Customer", width=22, font=('times new roman', 14, "bold"),
bg='black', fg="gold", bd=0, cursor="circle")
cust_button.grid(row=0, column=0, pady=1)
room_button = Button(button_frame, command=self.room_details, text="Room", width=22, font=('times new roman', 14, "bold"),
bg='black', fg="gold", bd=0, cursor="circle")
room_button.grid(row=1, column=0, pady=1)
details_button = Button(button_frame,command=self.details, text="Details", width=22, font=('times new roman', 14, "bold"),
bg='black', fg="gold", bd=0, cursor="circle")
details_button.grid(row=2, column=0, pady=1)
report_button = Button(button_frame, text="Report", width=22, font=('times new roman', 14, "bold"),
bg='black', fg="gold", bd=0, cursor="circle")
report_button.grid(row=3, column=0, pady=1)
loguot_button = Button(button_frame, text="Logout", width=22, font=('times new roman', 14, "bold"),
bg='black', fg="gold", bd=0, cursor="circle", command=self.exit_win)
loguot_button.grid(row=4, column=0, pady=1)
# *******************************right side image*********************
img3 = Image.open(r"C:\Users\hp\Desktop\Python project\hotel management "
r"system\hotel_images\hotel_images\slides3.png")
img3 = img3.resize((1366, 590), Image.ANTIALIAS) # ANTIALIAS convert hll image to lll image
self.photoimg3 = ImageTk.PhotoImage(img3)
labelimg = Label(main_frame, image=self.photoimg3, bd=4, relief=RIDGE)
labelimg.place(x=225, y=0, width=1150, height=550)
# *****************************down images left side*************************88
img4 = Image.open(r"C:\Users\hp\Desktop\Python project\hotel management "
r"system\hotel_images\hotel_images\600x400-5-1280x720.jpg")
img4 = img4.resize((230, 210), Image.ANTIALIAS) # ANTIALIAS convert hll image to lll image
self.photoimg4 = ImageTk.PhotoImage(img4)
labelimg = Label(main_frame, image=self.photoimg4, bd=4, relief=RIDGE)
labelimg.place(x=0, y=225, width=230, height=165)
img5 = Image.open(r"C:\Users\hp\Desktop\Python project\hotel management "
r"system\hotel_images\hotel_images\431710_EXT_ZEEE82.png")
img5 = img5.resize((230, 190), Image.ANTIALIAS) # ANTIALIAS convert hll image to lll image
self.photoimg5 = ImageTk.PhotoImage(img5)
labelimg = Label(main_frame, image=self.photoimg5, bd=4, relief=RIDGE)
labelimg.place(x=0, y=390, width=230, height=165)
def cust_details(self):
self.new_window = Toplevel(self.root)
self.app = Customer_win(self.new_window)
def room_details(self):
self.new_window = Toplevel(self.root)
self.app = Room_win(self.new_window)
def details(self):
self.new_window = Toplevel(self.root)
self.app = Details_win(self.new_window)
def exit_win(self):
mdelete = messagebox.askyesno("Logout", "Do You Wish To Logout", parent=self.root)
if mdelete > 0:
exit()
else:
return
if __name__ == '__main__':
root = Tk()
obj = Hotel(root)
root.mainloop()
| true
|
836a936540a80bc2b4c4e787f17fcec169cca317
|
Python
|
Dongdongshe/md5Cracker
|
/hashgenerator.py
|
UTF-8
| 1,205
| 2.84375
| 3
|
[] |
no_license
|
import md5
import base64
import string
b64_str='./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
final_str=""
def b64_from_24bit(a, b ,c ,d):
global final_str
w = (ord(a)<<16)|(ord(b)<<8)|ord(c)
for i in range(0, d):
final_str+=b64_str[w & 0x3f]
w = w >> 6
m=md5.new('chfT7jp2qc')
m_tem=m.digest()
m=md5.new('c$1$hfT7jp2q'+m_tem[0])
key='c'
length=len(key)
while(length>0):
if(length &1 !=0):
m.update('\0')
print 1
else:
m.update(key[0])
print 2
length>>=1
m_alt=m.digest()
print base64.encodestring(m_alt)
for i in range(0, 1000):
if( i&1 != 0):
m=md5.new('c')
else:
m=md5.new(m_alt)
if(i % 3 != 0):
m.update('hfT7jp2q')
if(i % 7 != 0):
m.update('c')
if(i & 1 !=0):
m.update(m_alt)
else:
m.update('c')
m_alt=m.digest()
print base64.encodestring(m.digest())
b64_from_24bit(m_alt[0],m_alt[6],m_alt[12],4)
b64_from_24bit(m_alt[1],m_alt[7],m_alt[13],4)
b64_from_24bit(m_alt[2],m_alt[8],m_alt[14],4)
b64_from_24bit(m_alt[3],m_alt[9],m_alt[15],4)
b64_from_24bit(m_alt[4],m_alt[10],m_alt[5],4)
b64_from_24bit('0','0',m_alt[11],2)
| true
|
f318b076d5b2b7218bb0d29b2fbf4edab4dbd195
|
Python
|
behrom/wprowadzenie_do_jezyka_pythona
|
/zadania_laboratorium/zadanie_3_przyblizanie_funkcji/zad_3_poprawka.py
|
UTF-8
| 9,040
| 3.828125
| 4
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from itertools import izip
def linear_intp(nodes):
"""Funkcja, ktora dla zadanej listy punktow zwraca funkcje liczaca interpolacje liniowa.
Parameters:
nodes - lista punktow, dla których wiadome jest, ze naleza do badanej funkcji
w postaci listy dwuelementowych tupl np. [(1, 2), (3, 8)]
Return:
funkcja liczaca interpolacje liniowa dla zadanej tablicy argumentow (numpy.ndarray)
"""
def linear(x):
"""Funkcja wyznaczajaca wartosci interpolacji liniowej.
Parameters:
x - tablica argumentow (numpy.ndarray), dla ktorych ma zostac policzona interpolacja
Return:
tablica wartosci (numpy.ndarray), zawierajaca wyliczone wartosci interpolacji
"""
# tworze pusta liste do ktorej beda dodawane wyliczone wartosci interpolacji
intp_y = []
for i in xrange(len(x)):
# wyznaczam w jakim przedziale znajduje sie x
# to znaczy po miedzy jakimi punktami z listy punktow znajduje sie
# x, dla ktorego liczona jest wartosc
idx = sum(map(lambda t: t <= x[i], zip(*nodes)[0])) - 1
# wyliczam wartosci interpolacji liniowej przy wspolczynnik kierunkowego funkcji
a = (nodes[idx + 1][1] - nodes[idx][1]) / (nodes[idx + 1][0] - nodes[idx][0])
intp_y.append(a * (x[i] - nodes[idx][0]) + nodes[idx][1])
# zwracam tablice wartosci (numpy.ndarray) po interpolacji
return np.array(intp_y)
# zwracam funkcje, liczaca dla zadanej tablicy argumentow, tablice wartosci po interpolacji
return linear
def nearest_intp(nodes):
"""Funkcja, ktora dla zadanej listy punktow zwraca funkcje liczaca interpolacje "nearest".
Parameters:
nodes - lista punktow, dla których wiadome jest, ze naleza do badanej funkcji
w postaci listy dwuelementowych tupl np. [(1, 2), (3, 8)]
Return:
funkcja liczaca interpolacje "nearest" dla zadanej tablicy argumentow (numpy.ndarray)
"""
def nearest(x):
"""Funkcja wyznaczajaca wartosci interpolacji "nearest".
Parameters:
x - tablica argumentow (numpy.ndarray), dla ktorych ma zostac policzona interpolacja
Return:
tablica wartosci (numpy.ndarray), zawierajaca wyliczone wartosci interpolacji
"""
# tworze pusta liste do ktorej beda dodawane wyliczone wartosci interpolacji
intp_y = []
for i in xrange(len(x)):
# wyznaczam w jakim przedziale znajduje sie x
# to znaczy po miedzy jakimi punktami z listy punktow znajduje sie
# x, dla ktorego liczona jest wartosc
idx = sum(map(lambda t: t <= x[i], zip(*nodes)[0])) - 1
# wyznaczam argument dla ktorego ma nastapic zmiana wartosci
# srodek pomiedzy punktami z listy
xs = (nodes[idx][0] + nodes[idx + 1][0]) / 2.0
# zapisanie odpowiednich wartosci do tablicy wartosci
if x[i] <= xs:
intp_y.append(nodes[idx][1])
else:
intp_y.append(nodes[idx + 1][1])
# zwracam tablice wartosci (numpy.ndarray) po interpolacji
return np.array(intp_y)
# zwracam funkcje, liczaca dla zadanej tablicy argumentow, tablice wartosci po interpolacji
return nearest
def zero_intp(nodes):
"""Funkcja, ktora dla zadanej listy punktow zwraca funkcje liczaca interpolacje "zero".
Parameters:
nodes - lista punktow, dla których wiadome jest, ze naleza do badanej funkcji
w postaci listy dwuelementowych tupl np. [(1, 2), (3, 8)]
Return:
funkcja liczaca interpolacje "zero" dla zadanej tablicy argumentow (numpy.ndarray)
"""
def zero(x):
"""Funkcja wyznaczajaca wartosci interpolacji "zero".
Parameters:
x - tablica argumentow (numpy.ndarray), dla ktorych ma zostac policzona interpolacja
Return:
tablica wartosci (numpy.ndarray), zawierajaca wyliczone wartosci interpolacji
"""
# tworze pusta liste do ktorej beda dodawane wyliczone wartosci interpolacji
intp_y = []
for i in xrange(len(x)):
# wyznaczam w jakim przedziale znajduje sie x
# to znaczy po miedzy jakimi punktami z listy punktow znajduje sie
# x, dla ktorego liczona jest wartosc
idx = sum(map(lambda t: t <= x[i], zip(*nodes)[0])) - 1
# dodaje do tablicy wyznaczone wartosci interpolacji
intp_y.append(nodes[idx][1])
# zwracam tablice wartosci (numpy.ndarray) po interpolacji
return np.array(intp_y)
# zwracam funkcje, liczaca dla zadanej tablicy argumentow, tablice wartosci po interpolacji
return zero
def lagrange_intp(nodes):
"""Funkcja, ktora dla zadanej listy punktow zwraca funkcje liczaca interpolacje Lagrange'a.
Parameters:
nodes - lista punktow, dla których wiadome jest, ze naleza do badanej funkcji
w postaci listy dwuelementowych tupl np. [(1, 2), (3, 8)]
Return:
funkcja liczaca interpolacje Lagrange'a dla zadanej tablicy argumentow (numpy.ndarray)
"""
def lagrange(x):
"""Funkcja wyznaczajaca wartosci interpolacji Lagrange'a.
Parameters:
x - tablica argumentow (numpy.ndarray), dla ktorych ma zostac policzona interpolacja
Return:
tablica wartosci (numpy.ndarray), zawierajaca wyliczone wartosci interpolacji
"""
# tworze pusta liste do ktorej beda dodawane wyliczone wartosci interpolacji
intp_y = []
# po wszystkich x dla których ma byc wyliczona wartosc
for i in xrange(len(x)):
# zmienna przechowujaca wynik mnozenia
# (x-xi)(xk-xi) po wszystkich 0 < i < n, i != k, , gdzie n - liczba interpolowanych punktow
tmp_x = 1.0
# zmienna przechowujaca sume mnozen
# tmp_x * yk dla wszystkich 0 < k < n, gdzie n - liczba interpolowanych punktow
tmp_y = 0
# implementacja wyliczenia zinterpolowanych wartosci funkcji na podstawie wzoru podanego
# w specyfikacji
for j in xrange(len(zip(*NODES)[0])):
tmp_x = 1.0
for k in xrange(len(zip(*NODES)[0])):
if j == k:
continue
tmp_x *= (x[i] - nodes[k][0]) / (nodes[j][0] - nodes[k][0])
tmp_y += nodes[j][1] * tmp_x
# dodanie wyliczonej wartosci do listy
intp_y.append(tmp_y)
# zwracam tablice wartosci (numpy.ndarray) po interpolacji
return np.array(intp_y)
# zwracam funkcje, liczaca dla zadanej tablicy argumentow, tablice wartosci po interpolacji
return lagrange
# tworze generator punktow, dla których wiadome jest, ze naleza do funkcji
# w tym przypadku funkcji sinus, ktorej dziedzina sa wartosci od 0 do 10
NODES = izip(np.arange(0, 10), np.sin(np.arange(0, 10)))
# tworze liste tych punktow
NODES = list(NODES)
# tworze tablice (numpy.ndarray) argumentow, dla ktorych beda wyliczane
# wartosci interpolacji
x = np.arange(0, 9, 0.01)
# dodaje podpis osi X do wykresu
plt.xlabel('Wartosci $x$', fontsize=23)
# dodaje podpis osi y do wykresu
plt.ylabel('Interpolacja wartosci $y$', fontsize=23)
# dodanie siatki do wykresu funkcji
plt.grid(color=(0.7, 0.8, 1.0), linestyle='-')
# rysuje punkty, dla ktorych znana jest wartosc funkcji na wykresie
plt.plot(zip(*NODES)[0], zip(*NODES)[1], label='punkty', marker='o', linestyle='', markersize=8, color='blue')
# rysuje jak wyglada interpolacja liniowa,
# dla wczesniej zdefiniowanej tablicy argumentow
# oraz wartosci funkcji wygenerowanych przy uzyciu zaimplementowanej wyzej funkcji
plt.plot(x, linear_intp(NODES)(x), label='linear', linestyle='-', linewidth=1.5, color='red')
# rysuje jak wyglada interpolacja Lagrange'a,
# dla wczesniej zdefiniowanej tablicy argumentow
# oraz wartosci funkcji wygenerowanych przy uzyciu zaimplementowanej wyzej funkcji
plt.plot(x, lagrange_intp(NODES)(x), label='Lagrange', linestyle='-', linewidth=1.5, color='blue')
# rysuje jak wyglada interpolacja "nearest",
# dla wczesniej zdefiniowanej tablicy argumentow
# oraz wartosci funkcji wygenerowanych przy uzyciu zaimplementowanej wyzej funkcji
plt.plot(x, nearest_intp(NODES)(x), label='nearest', linestyle='-', linewidth=1.5, color='#bfbf00')
# rysuje jak wyglada interpolacja "zero",
# dla wczesniej zdefiniowanej tablicy argumentow
# oraz wartosci funkcji wygenerowanych przy uzyciu zaimplementowanej wyzej funkcji
plt.plot(x, zero_intp(NODES)(x), label='zero', linestyle='-', linewidth=1.5, color='#00bfbf')
# umieszczam legende w lewym dolnym rogu wykresu
plt.legend(loc=3)
# pokazuje wykres na ekranie
plt.show()
| true
|
eda13393ba78b82f0f869e5d5a060d931004b9a7
|
Python
|
tejasarackal/PyConcept
|
/generators/gen.py
|
UTF-8
| 1,076
| 3.5
| 4
|
[] |
no_license
|
from time import sleep
from decorators import dec
@dec.timer
def add1(x, y):
return x + y
class Adder:
@dec.timer
def __call__(self, x, y):
return x + y
add2 = Adder()
@dec.timer
def heavy_compute():
rv = []
for i in range(101):
sleep(.1)
rv.append(i)
return rv
class Compute:
def __iter__(self):
self.last = 0
return self
def __next__(self):
rv = self.last
self.last += 1
if self.last > 100:
raise StopIteration()
sleep(.1)
return rv
def compute(): # easier to read generator
for i in range(101):
sleep(.1)
yield i
class Api:
def first(self, x, y):
return add1(x, y)
def second(self, x, y):
return add2(x, y)
def last(self):
return compute()
def __call__(self, x, y):
yield self.first(x, y)
yield self.second(x, y)
for value in self.last():
yield value
if __name__ == '__main__':
a = Api()
for i in a(10, 20):
print(i)
| true
|
cb216534ea3c84b63dc789dba76dc70932007388
|
Python
|
pablodarius/mod02_pyhton_course
|
/Python Exercises/3_question10.py
|
UTF-8
| 669
| 3.96875
| 4
|
[] |
no_license
|
import unittest
# Given an input string, count occurrences of all characters within a string
def cout_ocurrences(input_str):
result = dict()
for i in input_str:
count = input_str.count(i)
result[i] = count
return result
class testing(unittest.TestCase):
def setUp(self):
print("Preparing context...")
self.str = "Apple"
self.result = {'A': 1, 'p': 2, 'l': 1, 'e': 1}
def test01(self):
self.assertEqual(cout_ocurrences(self.str), self.result)
def tearDown(self):
print("Deleting context...")
del self.str
del self.result
if __name__ == "__main__":
unittest.main()
| true
|
168d0f234c60b692755eed70e19664fd0ef6d783
|
Python
|
Erik-Han/cs170_sp21_project
|
/choose_best_output.py
|
UTF-8
| 997
| 2.5625
| 3
|
[] |
no_license
|
import os
from shutil import copyfile
from parse import read_input_file, read_output_file, write_output_file
if __name__ == "__main__":
test_to_files = {}
inputs_to_graphs = {}
sizes = ('small', 'medium', 'large')
input_dir = "./all_inputs/"
output_dir = "./best_outputs/"
for size in sizes:
for i in range(1,301):
test = size+"-"+str(i)
test_to_files[test] = []
inputs_to_graphs[test] = read_input_file(input_dir+test+".in")
for root, dirs, files in os.walk("./"):
for file in files:
filepath = root+os.sep+file
if filepath.endswith(".out"):
test = file.split(".")[0]
print(filepath,test)
test_to_files[test].append((filepath, read_output_file(inputs_to_graphs[test], filepath)))
for test in test_to_files:
best_output = max(test_to_files[test], key = lambda item: item[1])
copyfile(best_output[0], output_dir+test+".out")
| true
|
2b7f5ff7e8d4804c163b17a93e66ee02980b448d
|
Python
|
diego-codes/next-train
|
/next-train.py
|
UTF-8
| 3,045
| 2.734375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
import re
import csv
import datetime
import math
from contextlib import contextmanager
from os import getenv
COLORS = {
'WARNING': '\033[31m',
'OK': '\033[32m',
'END': '\033[0m'
}
# Constants
BASE_DIR = getenv('HOME') + '/bin/metros/ATX/'
NOW = datetime.datetime.now()
WALKING_MINUTES = 18
ROUTE_ID = '550'
DIRECTION = 'S'
SERVICE_IDS = ['110-13301', '110-14701', '110-13401', '110-14801', '110-14601', '110-14304', '110-20804', '110-15504', '110-20805']
KRAMER_STATION_STOP_ID = '5539'
active_service_ids = set()
train_trip_ids = []
next_train = None
@contextmanager
def openCSVDict(filename):
with open(filename, 'rU') as opened_file:
file_csv = csv.DictReader(opened_file)
yield file_csv
def formatTime(time):
return time.strftime('%I:%M:%S %p')
# Get the right service ID for today
with openCSVDict(BASE_DIR + 'calendar_dates.txt') as dates:
for row in dates:
service_id = row['service_id']
date = row['date']
year = int(date[:4])
month = int(date[4:6])
day = int(date[6:8])
if ((service_id in SERVICE_IDS) and ((year, month, day) == (NOW.year, NOW.month, NOW.day))):
active_service_ids.add(service_id)
# Get trip IDs for train routes today
with openCSVDict(BASE_DIR + 'trips.txt') as trips:
for row in trips:
# Add row trip ID to trip ids if it is the rail route southbound for today's service
if (row['route_id'] == ROUTE_ID) and (row['dir_abbr'] == DIRECTION) and (row['service_id'] in active_service_ids):
train_trip_ids.append(row['trip_id'])
with openCSVDict(BASE_DIR + 'stop_times.txt') as stop_times:
for row in stop_times:
if row['trip_id'] in train_trip_ids and row['stop_id'] == KRAMER_STATION_STOP_ID:
departure_time = row['departure_time'].split(':')
hour = int(departure_time[0]) if departure_time[0] != '24' else 0
minute = int(departure_time[1])
departure_time = datetime.datetime(NOW.year, NOW.month, NOW.day, hour, minute)
if departure_time > NOW:
if not next_train:
next_train = departure_time
else:
next_train = departure_time if next_train > departure_time else next_train
if next_train:
time_delta = next_train - NOW
minutes_until_next_train = int(math.floor(time_delta.seconds / 60))
minute_string = 'minutes' if minutes_until_next_train > 1 else 'minute'
should_walk_to_train = minutes_until_next_train > WALKING_MINUTES
advice_color = COLORS['OK'] if should_walk_to_train else COLORS['WARNING']
advice_message = 'OK to take' if should_walk_to_train else 'Wait for next train'
print 'Next Departure: %s' % formatTime(next_train)
print '%s%s: You have %d %s to get to station %s' % (advice_color, advice_message, minutes_until_next_train, minute_string, COLORS['END'])
else:
print '%sSorry, the train does not run today anymore%s' % (COLORS['WARNING'], COLORS['END'])
| true
|
b539490bc60f04d0dc42a15cec8c7633e824f6c3
|
Python
|
yenshipotato/TLGMBT
|
/user_inf.py
|
UTF-8
| 1,501
| 2.734375
| 3
|
[] |
no_license
|
import json
import os
usr_dic={}
def adduser(id):
user={"latest":"","favorite":"","status":0,"lasttime":"14"}
usr_dic[str(id)]=user
def wInFile(id):
with open("user/"+str(id)+".json","w",encoding="utf-8") as f:
json.dump(usr_dic[str(id)],f,ensure_ascii=False)
def saveAll():
for key,value in usr_dic.items():
wInFile(int(key))
def setLatest(id,latest):
usr_dic[str(id)]["latest"]=latest
def setFavorite(id,F):
usr_dic[str(id)]["favorite"]=F
def setStatus(id,S):
usr_dic[str(id)]["status"]=S
def setLasttime(id,L):
usr_dic[str(id)]["lasttime"]=L
def setRecord(id,R):
usr_dic[str(id)]["record"]=R
def getLatest(id):
return usr_dic[str(id)]["latest"]
def getFavorite(id):
return usr_dic[str(id)]["favorite"]
def getStatus(id):
return usr_dic[str(id)]["status"]
def getLasttime(id):
return usr_dic[str(id)]["lasttime"]
def getRecord(id):
return usr_dic[str(id)]["record"]
def readData(id):
if os.path.isfile("user/"+str(id)+".json"):
#print("檔案存在。")
with open("user/"+str(id)+".json",encoding="utf-8") as f:
a=json.load(f)
usr_dic[str(id)]=a
else:
#print("檔案不存在。")
adduser(id)
if __name__ == '__main__':
adduser(1013334846)
setLatest(1013334846,"善化 南科")
setFavorite(1013334846,"善化 台南")
setStatus(1013334846,1)
wInFile(1013334846)
| true
|
87da43b90a31aa74ea14e7752aa69fbf58390ccf
|
Python
|
mccloskeybr/pystringevo
|
/stringevolution.py
|
UTF-8
| 1,786
| 3.296875
| 3
|
[] |
no_license
|
import string
import random
target = str(raw_input('enter desired string: '))
strings = []
reward = []
currentGeneration = 0
NUM_PER_GENERATION = 10
MUTATION_RATE = 0.05
def is_done():
return strings[0] == target
def sort():
for i in range(len(strings)):
biggest = i
for j in range(i, len(strings)):
if reward[j] > reward[i]:
biggest = j
if biggest != i:
t1 = reward[i]
t2 = strings[i]
reward[i] = reward[biggest]
strings[i] = strings[biggest]
reward[biggest] = t1
strings[biggest] = t2
def determine_reward():
for i in range(len(strings)):
r = 0
for j in range(len(target)):
if target[j] in strings[i]:
r += 2
if target[j] == strings[i][j]:
r += 6
reward[i] = r
def breed(str1, str2):
c = ""
for i in range(len(str1)):
if random.random() < MUTATION_RATE:
c += random.choice(string.ascii_letters + ' ')
elif random.random() < 2 * MUTATION_RATE:
c += random.choice(str1 + str2)
else:
c += random.choice(str1[i] + str2[i])
return c
for i in range(NUM_PER_GENERATION):
strings.append("".join(random.choice(string.ascii_letters + ' ') for _ in range(len(target))))
reward.append(0)
print strings[i]
while not is_done():
currentGeneration += 1
determine_reward()
sort()
for i in range(NUM_PER_GENERATION / 2, NUM_PER_GENERATION):
strings[i] = breed(strings[random.randint(0, NUM_PER_GENERATION / 2)], strings[random.randint(0, NUM_PER_GENERATION / 2)])
for s in strings:
print s
print "it took %d generations to evolve!" % currentGeneration
| true
|
cbf8eff6c0e028e340f0f962860a72463666566e
|
Python
|
hg-pyun/algorithm
|
/leetcode/leftmost-column-with-at-least-a-one.py
|
UTF-8
| 1,055
| 3.515625
| 4
|
[
"MIT"
] |
permissive
|
# """
# This is BinaryMatrix's API interface.
# You should not implement it, or speculate about its implementation
# """
#class BinaryMatrix(object):
# def get(self, x: int, y: int) -> int:
# def dimensions(self) -> list[]:
class Solution:
def binarySearch(self, binaryMatrix, current_col, size):
lo = 0
hi = size - 1
while lo < hi:
m = math.floor((lo + hi)/2)
if binaryMatrix.get(current_col, m) == 1:
hi = m
else:
lo = m + 1
if lo == hi and binaryMatrix.get(current_col, lo) == 0:
return math.inf
else:
return lo
def leftMostColumnWithOne(self, binaryMatrix: 'BinaryMatrix') -> int:
[col_len, row_len] = binaryMatrix.dimensions()
result = math.inf
for i in range(col_len):
left_most = self.binarySearch(binaryMatrix, i, row_len)
result = min(result, left_most)
return result if result is not math.inf else -1
| true
|
5e63d576cba3897224ccf9e3fb9b639b9616488e
|
Python
|
NU-ACCESS/Spectral-Microscope-Tools
|
/rotation.py
|
UTF-8
| 518
| 2.625
| 3
|
[] |
no_license
|
from ij import IJ, ImagePlus, ImageStack
from ij.process import ImageProcessor, FloatProcessor
from fiji.util.gui import GenericDialogPlus
gd = GenericDialogPlus("Input Parameters")
gd.addNumericField("Number of Angles", 10, 0) # show 3 decimals
gd.showDialog()
ang = 360/int(gd.getNextNumber())
imp = IJ.getImage()
n_slices = imp.getStack().getSize()
for i in range(1, n_slices+1):
imp.setSlice(i)
s = ((i*ang)-ang)*-1
IJ.run("Rotate... ", "angle="+str(s) + " grid=1 interpolation=Bilinear slice")
| true
|
119197fa0317fb44052f07e1736bca688caa8c3b
|
Python
|
daydaychallenge/leetcode-python
|
/00893/test_groups_of_special_equivalent_strings.py
|
UTF-8
| 440
| 2.765625
| 3
|
[] |
no_license
|
import unittest
from groups_of_special_equivalent_strings import Solution
class TestSolution(unittest.TestCase):
def test_longestPalindrome_Solution(self):
sol = Solution()
self.assertEqual(3, sol.numSpecialEquivGroups(["abcd", "cdab", "cbad", "xyzz", "zzxy", "zzyx"]))
self.assertEqual(3, sol.numSpecialEquivGroups(["abc", "acb", "bac", "bca", "cab", "cba"]))
if __name__ == '__main__':
unittest.main()
| true
|
d0808da5888571ff365d104db7f27406467565db
|
Python
|
dhockaday/deep-embedded-music
|
/src/models_embedding/gru_net.py
|
UTF-8
| 1,775
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
import tensorflow as tf
from src.models_embedding.base_model import BaseModel
from src.models_embedding.model_factory import ModelFactory
@ModelFactory.register("GRUNet")
class GRUNet(BaseModel):
""" A simple 1-dimensional GRU model. """
def __init__(self, params, model_name="GRUNet"):
"""
Initialises the model.
Calls the initialise method of the super class.
:param params: the global hyperparameters for initialising the model.
:param model_name: the name of the model.
"""
super(GRUNet, self).__init__(params=params, model_name=model_name, expand_dims=False)
self.gru_1 = tf.keras.layers.GRU(64, return_sequences=True)
self.gru_2 = tf.keras.layers.GRU(128)
@tf.function
def forward_pass(self, inputs, training=None):
"""
The forward pass through the network.
:param inputs: the input that will be passed through the model.
:param training: if the model is training, for disabling dropout, batch norm. etc.
:return: the output of the forward pass.
"""
# 1. GRU layer
x = self.gru_1(inputs)
# 2. GRU layer
x = self.gru_2(x)
# Embedding layer
x = self.flatten(x)
x = self.dense(x)
# L2 normalisation
x = self.l2_normalisation(x)
return x
def log_model_specific_layers(self):
"""
Logs the specific layers of the model.
Is used to log the architecture of the model.
:return: None.
"""
# 1. GRU layer
self.log_gru_layer(self.gru_1, layer_number=1)
self.logger.info("---")
# 2. GRU layer
self.log_gru_layer(self.gru_2, layer_number=2)
self.logger.info("---")
| true
|
9f48e1dc70686140af803252f5a887b0950cab1b
|
Python
|
daianasousa/POO
|
/LISTA DE EXERCÍCIOS/S_02_Ati_01_Q01_e_02.py
|
UTF-8
| 2,571
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
class Carro:
#Atributos
nome = None
ano = None
cor = None
veloc_max = None
veloc_atual = 0
estado = 'Desligado'
#Construtores
def __init__(self, cor, nome, veloc_max):
self.cor = cor
self.nome = nome
self.veloc_max = veloc_max
#Métodos
def ligar(self):
self.estado = 'Ligado'
if self.nome == 'Fusca':
print('O Fusca está ligado.')
else:
print('A Ferrari está ligada.')
def parar(self):
self.veloc_atual = 0
if self.nome == 'Fusca':
print('Seu Fusca está Parado.')
else:
print('Sua Ferrari está Parada.')
def desligar(self):
self.estado = 'Desligado'
if self.nome == 'Fusca':
print('Seu Fusca está desligado.')
else:
print('Sua Ferrari está desligada.')
def acelerar(self, valor):
if self.nome == 'Fusca':
if self.estado == 'Ligado':
self.veloc_atual = valor
if self.veloc_atual <= self.veloc_max:
print(f'A velocidade atual do Fusca é de {fusca.veloc_atual} km/h.')
else:
print(f'Voçê passou da velocidade máxima do Fusca que era de {self.veloc_max} Km/h.')
elif self.estado == 'Desligado':
print('O Fusca está desligado.')
else:
if self.estado == 'Ligado':
self.veloc_atual = valor
if self.veloc_atual <= self.veloc_max:
print(f'A velocidade atual da Ferrrari é de {fusca.veloc_atual} km/h.')
else:
print(f'Voçê passou da velocidade máxima da Ferrari que era de {self.veloc_max} Km/h.')
elif self.estado == 'Desligado':
print('A Ferrari está desligada.')
#Objetos
fusca = Carro('preto', 'Fusca', 80)
fusca.nome = 'Fusca'
fusca.ano = 1965
fusca.cor = 'preto'
fusca.veloc_max = 80
fusca.veloc_atual = 20
fusca.estado = 'ligado'
ferrari_sr2000 = Carro('vermelho', 'ferrari_sr2000', 300)
ferrari_sr2000.nome = 'ferrari_sr2000'
ferrari_sr2000.ano = 2014
ferrari_sr2000.cor = 'vermelho'
ferrari_sr2000.veloc_max = 300
ferrari_sr2000.veloc_atual = 0
ferrari_sr2000.estado = 'desligado'
fusca.acelerar(40)
ferrari_sr2000.acelerar(200)
fusca.desligar()
ferrari_sr2000.ligar()
ferrari_sr2000.acelerar(320)
ferrari_sr2000.parar()
ferrari_sr2000.desligar()
fusca.ligar()
fusca.acelerar(100)
fusca.desligar()
| true
|
ddf19011834c1554eb1fa5800606541a68cba671
|
Python
|
jellehu/Pythonlessen
|
/lessen/Jaar01/Periode01/Les06/pe6_5.py
|
UTF-8
| 215
| 3.0625
| 3
|
[] |
no_license
|
def kwadraten_som(grondgetallen):
resultaat = 0
for number in grondgetallen:
if number >= 0:
resultaat = resultaat + number ** 2
return resultaat
print(kwadraten_som([4, 5, 3, -81]))
| true
|
aa9c6671d78cafac45a4b620c54959ae75ba6151
|
Python
|
castlesmadeofcode/pythonFunctions
|
/chickenMonkey.py
|
UTF-8
| 563
| 4.78125
| 5
|
[] |
no_license
|
# Write a program that prints the numbers from 1 to 100.
# You can use Python's range() to quickly make a list of numbers.
# For multiples of five (5, 10, 15, etc.) print "Chicken" instead of the number.
# For the multiples of seven (7, 14, 21, etc.) print "Monkey".
# For numbers which are multiples of both five and seven print "ChickenMonkey".
x = range(101)
for n in x:
if n%5 == 0 and n%7 == 0:
print("chicken monkey")
elif n%5 == 0:
print("chicken")
elif n%7 == 0:
print("monkey")
else:
print(n)
| true
|
9b56fd98e97a12c48ca49c2475ee38c43e57820f
|
Python
|
sagynangare/Python-Programming
|
/practice program/parent.py
|
UTF-8
| 205
| 2.75
| 3
|
[] |
no_license
|
class Person():
def __init__(self,name,age,address,mobile_no):
self.name = name
self.age = age
self.address = address
self.mobile_no = mobile_no
| true
|
a7a51a550b4a07f90e8463d0d1d51e8ed0e4bb2f
|
Python
|
novalb12/ai
|
/Code-OpenCV.py
|
UTF-8
| 1,638
| 2.625
| 3
|
[] |
no_license
|
import cv2
import numpy as np
digits = cv2.imread("digits.png", cv2.IMREAD_GRAYSCALE)
test_digits = cv2.imread("test_digits.png", cv2.IMREAD_GRAYSCALE)
rows = np.vsplit(digits, 50)
cells = []
for row in rows:
row_cells = np.hsplit(row, 50)
for cell in row_cells:
cell = cell.flatten()
cells.append(cell)
cells = np.array(cells, dtype=np.float32)
k = np.arange(10)
cells_labels = np.repeat(k, 250)
test_digits = np.vsplit(test_digits, 50)
test_cells = []
for d in test_digits:
d = d.flatten()
test_cells.append(d)
test_cells = np.array(test_cells, dtype=np.float32)
# KNN
def cari(img) :
global cells
global cells_labels
knn = cv2.ml.KNearest_create()
knn.train(cells, cv2.ml.ROW_SAMPLE, cells_labels)
ret, result, neighbours, dist = knn.findNearest(img, k=3)
return result
def show_webcam(mirror=True):
cam = cv2.VideoCapture(0)
while True:
ret_val, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if mirror:
img = cv2.flip(img, 1)
gray=cv2.resize(gray, (28,28))
print(gray.shape)
print(cari(gray))
#text = get_string(img)
#font = cv2.FONT_HERSHEY_SIMPLEX
#if(len(text)> 0):
#print(text)
#cv2.putText(img,text,(0,10),font,0.5,(255,255,255),2,cv2.LINE_AA)
if cv2.waitKey(1) == 27:
break # esc to quit
cv2.imshow('my webcam', gray)
cv2.destroyAllWindows()
cam.release()
def main():
show_webcam(mirror=True)
if __name__ == '__main__':
main()
| true
|
2e0c5bcfbf9c6b4ca4a94cb9981467508f1f66bc
|
Python
|
singhv1shal/Style-Transfer-using-Genetic-Programming
|
/GeneticStyleTransfer.py
|
UTF-8
| 4,607
| 2.890625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 18 16:23:30 2019
@author: Vishal Singh
@email: singhvishal0304@gmail.com
"""
import random
import numpy as np
import itertools
import functools
import operator
'''
function for finding gram matrix of a given matrix
'''
def gramMatrix(p):
px=p.transpose(1,2,0).reshape(-1,p.shape[0])
pt=np.transpose(px)
gp=px.dot(pt)
return gp
'''
fitness score is given by measuring error through euclidean distance between the gram matrices of population evolving
and target style image
'''
def fitness_score(gene_img ,style_im,img_shape):
#This function transforms 1D matrix back to RGB image matrix
gene_im= np.reshape(a=gene_img, newshape=img_shape)
#print(gene_im.shape)
gmContent=gramMatrix(gene_im)
gmStyle=gramMatrix(style_im)
#finding distance between the gram matrix of the genes and style image
q1 = np.subtract(gmStyle,gmContent)
#element-wise multiplication for squaring the error
q2=np.multiply(q1,q1)
fitness =np.mean(q2)
return fitness
def survival_of_the_fittest(n_survived,current_gene,img_shape,style_im):
#initializing an empty matrix for survival genes: no.ofrows=no. to survive
survived= np.empty((n_survived, current_gene.shape[1]), dtype=np.uint8)
maxerror=999999999999999999999999999999999999999999999999999
#list to store fitness scores for genes
fitness= np.zeros(current_gene.shape[0])
#print(img_shape)
for i in range(current_gene.shape[0]):
fitness[i]=fitness_score(current_gene[i,:],style_im,img_shape)
for i in range(n_survived):
min_index = np.where(fitness == np.min(fitness))
min_index= min_index[0][0]
#storing survived genes
survived[i, :] = current_gene[min_index, :]
#replacing with maximum error so that no gene is selected twice
fitness[min_index]=maxerror
return survived
'''
This function generates offspring equal to initial population- survived
'''
def breed(survived,n_gene,img_shape):
new_genes = np.empty(shape=(n_gene,
functools.reduce(operator.mul, img_shape)),
dtype=np.uint8)
new_genes[0:survived.shape[0], :] = survived
# Getting how many offspring to be generated.
n_offspring = n_gene-survived.shape[0]
# all possible permutations of the selected parents.
survived_permutations = list(itertools.permutations(iterable=np.arange(0, survived.shape[0]), r=3))
# Randomly selecting the parents permutations to generate the offspring.
selected_permutations = random.sample(range(len(survived_permutations)),
n_offspring)
comb_idx = survived.shape[0]
for comb in range(len(selected_permutations)):
first = np.int32(new_genes.shape[1]/3)
second= 2*first
selected_comb_idx = selected_permutations[comb]
selected_comb = survived_permutations[selected_comb_idx]
new_genes[comb_idx+comb, 0:first] = survived[selected_comb[0],
0:first]
new_genes[comb_idx+comb, first:second] = survived[selected_comb[1],
first:second]
new_genes[comb_idx+comb, second:] = survived[selected_comb[2],
second:]
#pivot is selected as the 1/3rd and 2/3rd of the population after which elements are exchanged
# Generating the offspring using the permutations previously selected randmly.
return new_genes
''''
Mutation enables offsprings to deviate from the characteristics of parent genes
'''
def mutation(population,num_parents_mating, mut_percent):
for i in range(num_parents_mating, population.shape[0]):
#implementation inspired by project GARI by Ahmed F. Gad.
# A predefined percent of genes are selected randomly.
rand_idx = np.uint32(np.random.random(size=np.uint32(mut_percent/100*population.shape[1]))
*population.shape[1])
# Changing the values of the selected genes randomly.
new_values = np.uint8(np.random.random(size=rand_idx.shape[0])*256)
# Updating population after mutation.
population[i, rand_idx] = new_values
return population
| true
|
789a510fd3e2bf001542436e4326f46acb117db1
|
Python
|
ChristianEdge/Python
|
/CIS110/Week 9/Edge_Ch9Ex07.py
|
UTF-8
| 1,562
| 3.828125
| 4
|
[] |
no_license
|
'''
Program Name: Ch9Ex07
Program Description:
Author: Christian Edge
Date created: 29 July 2019
Last modified:
Notes of interest: Uses time module
'''
import time
from random import randrange
def main():
intro()
n = int(input("How many games to simulate? : "))
#Processing
wins = simNGames(n)
printSummary(wins, n)
outro()
#End main()
#****************
def intro():
print("\n\t***Welcome to Ch9Ex07***\n")
print("This program establishes the probability of winning at craps\n")
def simNGames(n):
wins= 0
for i in range(n):
if winCraps():
wins += 1
return wins
def winCraps():
roll = rollDice()
if roll == 7 or roll == 11:
return True
elif roll == 2 or roll == 3 or roll == 12:
return False
else:
return rollForPoint(roll)
def rollDice():
return randrange(1, 7) + randrange(1, 7)
def rollForPoint(toMake):
roll = rollDice()
while roll != 7 and roll != toMake:
roll = rollDice()
return roll == toMake
def printSummary(wins, n):
print("\nThe player wins", wins, "games.")
print("The probability of a win is : {0:0.2f}%".format(wins / n * 100))
def outro():
#Output to console the author, program, & date/time program was run
print("\nAuthor: Christian Edge")
print("CIS110 Ch9Ex07")
print(time.asctime(time.localtime(time.time())))
input("\nPress <Enter> to Quit")
#Run the program
if __name__ == '__main__':
main()
| true
|
22c04d2cc7b6877210f8f2462096a942104f7c22
|
Python
|
harvard-dce/le-dash
|
/tests/test_banner.py
|
UTF-8
| 1,211
| 2.625
| 3
|
[] |
no_license
|
from le_dash.banner import get_student_list
def test_get_student_info(mocker, student_list_maker):
fake_students = student_list_maker(
('12345', 'foo', 'b', 'baz'),
('12345', 'jane', 'q', 'public'),
('12345', 'fozzy', '', 'bear', 'Withdraw')
)
resp_data = {
"students": {
"student": [x for x in fake_students]
}
}
mocker.patch('le_dash.banner.banner_req', return_value=resp_data)
students = get_student_list("9999999")
assert len(students) == 2
assert students[1]['first_name'] == 'jane'
def test_get_student_info_one_result(mocker, student_maker):
resp_data = {
"students": {
"student": student_maker('23456', 'arther', 'j', 'fonzarelli')
}
}
mocker.patch('le_dash.banner.banner_req', return_value=resp_data)
students = get_student_list("10101010")
assert isinstance(students, list)
assert len(students) == 1
assert students[0]['huid'] == '23456'
def test_get_student_info_no_results(mocker):
mocker.patch('le_dash.banner.banner_req',
return_value={'students': {}})
students = get_student_list("33434334")
assert len(students) == 0
| true
|
2fdf5a75c8f1d1d70356ba95554ac6a9686da156
|
Python
|
Rybasher/parsepexels
|
/parse.py
|
UTF-8
| 1,663
| 2.96875
| 3
|
[] |
no_license
|
import os, urllib, webbrowser
import requests
from bs4 import BeautifulSoup
import pathlib
import csv
from datetime import datetime
from multiprocessing import Pool
#
# urls = [
# 'https://images.pexels.com/photos/772662/pexels-photo-772662.jpeg',
# 'https://images.pexels.com/photos/1994904/pexels-photo-1994904.jpeg',
# 'https://images.pexels.com/photos/1983772/pexels-photo-1983772.jpeg'
# ]
urls = []
def get_html(url):
r = requests.get(url, stream=True)
return r
def get_img_data(html):
soup = BeautifulSoup(html.text, 'lxml')
imgs = soup.find('div', class_='photos').find_all('img', class_='photo-item__img')
for img in imgs:
some = img.get('src')
urls.append(some)
def get_name(url):
name = url.split('/')[-1].split('?')[0]
folder = name.split('-')[0]
if not os.path.exists(folder):
os.makedirs(folder)
path = os.path.abspath(folder)
return path + '/' + name
# name = url.split('/')[-1]
# folder = name.split('-')[0]
# if not os.path.exists(folder):
# os.makedirs(folder)
# path = os.path.abspath(folder)
# return path + '/' + name
def save_image(name, file_object):
with open(name, 'wb') as f:
for chunk in file_object.iter_content(8192):
f.write(chunk)
def main():
url = 'https://www.pexels.com/'
get_img_data(get_html(url))
print(len(urls))
print(urls)
with Pool(30):
for i in urls:
print(get_name(i).split('-')[0])
# path = pathlib.Path(i)
# save_image(get_name(i), get_html(i))
# # print(path)
if __name__ == "__main__":
main()
| true
|
237ec23e9b211ffaa5ca2d5a94efd5830cecb12a
|
Python
|
so02e/TIL
|
/Python/Day3/forLab1.py
|
UTF-8
| 134
| 3.3125
| 3
|
[] |
no_license
|
# 1부터 10까지 추출되는 코드
for a in range (1,11,1):
print(a, end = " ") # '\n\n' 개행 처리 2번을 의미한다.
| true
|
fa3715b14ab4f4f6ccb2357d65d008d6f6de476e
|
Python
|
kaasbroodju/PROG-V1K-PepeHands
|
/functions.py
|
UTF-8
| 7,143
| 3.40625
| 3
|
[] |
no_license
|
import random
import api
import marvel
import arcade
import json
def write_to_json(name, score):
'''Writes name and score to leaderboard'''
with open('leaderboard.json') as file:
data = json.load(file)
data['data']['players'].append({'name': name, 'score': score})
file = open('leaderboard.json', 'w')
json.dump(data, file, indent=4)
def sort_leaderbord(leaderboard):
'''Sorting algorithm for leaderboard'''
#bubble sort
x = len(leaderboard)
for y in range(x):
for z in range(0, x-y-1):
# swap elements
if leaderboard[z]['score'] > leaderboard[z+1]['score'] :
leaderboard[z], leaderboard[z+1] = leaderboard[z+1], leaderboard[z]
return leaderboard
def get_leaderboard():
'''Gets list of players and their scores'''
with open('leaderboard.json') as file:
data = json.load(file)
return data['data']['players']
def newMultipleChoice(self):
'''Get new set of options of answer(buttons) for easy/multiple choice mode'''
self.notation_button_list = arcade.SpriteList()
self.correctCharacter = api.get_character(True) #Get 'correct character' i.e. the answer we're looking for. Argument True so we get character description, comics and series.
print(self.correctCharacter)
self.characterList = [] #Empty character list
self.possible_answer_buttons = arcade.SpriteList() #'Empty' our set of answer buttons
self.characterList.append(self.correctCharacter['name']) #Add the name of the correct character to our list of characters
self.description = self.correctCharacter['desc']['desc'] #Give variable 'description' the value of the description that belongs to our correct character
self.charNumber = 0
for char in self.description: #|
if self.charNumber >= 30 and char == ' ': #Make sure our description
self.tempString += '\n' #has regular \n's (new lines)
self.charNumber = 0 #so the entire thing
self.tempString += char #fits on screen.
self.charNumber += 1 #|
self.description = self.tempString #|
self.tempString = '' #|
for i in range(0, 9):
self.character = api.get_character() #Get a character name 9 times
"""
TODO
naam filteren/anomiseren in descriptions
"""
safety = 0
while self.character['name'] in self.characterList: #Make sure that the name is not a duplicate
self.character = api.get_character() #|
safety += 1
if safety == 10: #Make sure we dont infinitely keep requesting from API (daily limit)
break
self.characterList.append(self.character['name']) #Add the character to our list
random.shuffle(self.characterList) #Shuffle the character list (to make sure the correct answer isnt always in the same place)
for index in self.characterList: #Make a button for every character in characterlist
if self.characterList.index(index) < 5: #First 5 on the left side
self.possible_answer_buttons.append(marvel.CharacterButton(marvel.WINDOW_WIDTH/8, marvel.WINDOW_HEIGHT/6 * (self.characterList.index(index) + 1), 'Button.png', index))
else: #Following 5 on the right
self.possible_answer_buttons.append(marvel.CharacterButton(marvel.WINDOW_WIDTH/8*7, marvel.WINDOW_HEIGHT/6 * (self.characterList.index(index) - 4), 'Button.png', index))
def checkAnswerOpen(answerString, correctAnswer):
'''Checks answer for open/hard mode. Takes; user answer (string), correct answer (string). Returns boolean, True if correct, False if wrong'''
if answerString == correctAnswer:
rightAnswer = True
else:
rightAnswer = False
return rightAnswer
def checkAnswerMultipleChoice(answer, correctAnswer):
'''Checks answer for multiple choice/easy mode. Takes; user answer (string), correct answer (string)'''
if answer == correctAnswer:
rightAnswer = True
else:
rightAnswer = False
return rightAnswer
"""
LEGACY Jasper
Implemented elsewere in different form
def points(answer, correctAnswer, easy, hint):
'''Starts on 25 and removes 1 for every wrong answer, 3 for every hint. Maybe boolean argument for "hint" or smth if false it must be a wrong answer so you remove 1, if true remove 3'''
if easy == True: ##checks difficulty##
rightAnswer = checkAnswerMultipleChoice(answer, correctAnswer) ##checks answer##
if hint == True: ##if hint is asked, subtracts 3 points##
points = points - 3
else:
if rightAnswer == False: ##checks answer and assigns points depending on the answer##
points -= 1
else: ##hard##
checkAnswerOpen(answer, correctAnswer) ##checks answer##
if hint == True: ##if hint is asked, subtracts 3 points##
points = points - 3
else:
if rightAnswer == False: ##checks answer and assigns points depending on the answer##
points = points - 1
return points
"""
"""
LEGACY susan
niet geimplenteerd
def give_nickname():
\"""Asks the user for a username and returns this as string.\"""
nickname = input('Nickname: ')
file = 'scoreboard.json'
try:
with open(file, 'r') as json_file:
data = json.load(json_file)
data = data['scores']
for item in data.keys():
if nickname == item:
print('Nickname taken. Try something else')
give_nickname()
return nickname
except:
return nickname
def write_to_scoreboard(points):
\"\"\"Takes the amount of points earned by player as input. Asks the user to choose a nickname and writes the chosen
nickname and earned score to a jsonfile.\"\"\"
file = 'scoreboard.json'
nickname = give_nickname()
dict_scores = {}
output = {'scores': dict_scores}
dict_scores[nickname] = points
try: #kopieert de json file (naar de lijst waar de eerder geregristreerde inloggegevens ook in staan.
with open(file, 'r') as json_file:
data = json.load(json_file)
data = data['scores']
for item in data:
dict_scores[item] = data[item]
except: #doet niks als er geen inhoud in de json file is.
pass
with open(file, 'w') as json_file:
json_content = json.dump(output, json_file, indent=4)
def leaderboard():
\"""Reads file 'Scoreboard.json', returns a sorted list of the scores with nicknames. Begins with the highest
element.\"""
file = 'scoreboard.json'
leaderboard_list = []
with open(file, 'r') as json_file:
data = json.load(json_file)
data = data['scores']
leaderboard_list = sorted(data.items(), key=lambda x: x[1], reverse=True)
return leaderboard_list
"""
| true
|
63ffa3f0cd0f79659494190b1917880f34c7f8ea
|
Python
|
BrijeshDutta/stock-prices-dash
|
/StockPrices.py
|
UTF-8
| 290
| 3.28125
| 3
|
[] |
no_license
|
import yfinance as yf
#define the ticker symbol
tickerSymbol = 'TSLA'
#get data on this ticker
tickerData = yf.Ticker(tickerSymbol)
#get the historical prices for this ticker
tickerDf = tickerData.history(period='1d', start='2010-1-1', end='2020-1-25')
#see your data
print(tickerDf)
| true
|
60e3f3ba17613cc6d024235bffdd1a1a340ee38e
|
Python
|
AdamZhouSE/pythonHomework
|
/Code/CodeRecords/2684/60694/261951.py
|
UTF-8
| 660
| 3.6875
| 4
|
[] |
no_license
|
# www.geeksforgeeks.org/minimum-time-to-finish-tasks-without-skipping-two-consecutive
def minTime(arr, n):
if n <= 0: return 0
incl = arr[0] # First task is included
excl = 0 # First task is exluded
# Process remaining n-1 tasks
for i in range(1, n):
incl_new = arr[i] + min(excl, incl)
excl_new = incl
# Update incl and excl for next iteration
incl = incl_new
excl = excl_new
# Return maximum of two values for last task
return min(incl, excl)
T = int(input())
for _ in range(T):
N = int(input())
arr = list(map(int, input().split()))
print(minTime(arr, N))
| true
|
d9dc6df04a0fdd33a5a08952ba2bdd969be4e602
|
Python
|
KumarLamic/image-transform
|
/translate.py
|
UTF-8
| 1,313
| 2.890625
| 3
|
[] |
no_license
|
from PIL import Image
import os
import random
ori_image='three.png'
scale_img_path=''
img = Image.open(ori_image)
#Apply translation by changing c and f values
a = 1
b = 0
c = 0 #left/right (i.e. 5/-5)
d = 0
e = 1
f = 0 #up/down (i.e. 5/-5)
img = img.transform(img.size, Image.AFFINE, (a, b, c, d, e, f))
# img.save()
img.show()
# dir = "./test_images_ori"
#
# for name in os.listdir(dir):
# print(name)
# path = os.path.join(dir, name)
#
# fol = path.split('/')[2]
# # print(fol)
# for filename in os.listdir(path):
#
# ori_img_path = path +"/"+ filename
# print("ori_img_path : " + ori_img_path)
# ori_img= Image.open(ori_img_path)
#
# #create dir fol inside test_imgs_scale
# scale_im_p= "test_imgs_scale/" + fol
# scale_img_path = "test_imgs_scale/" + fol + "/" + filename
# if not os.path.exists(scale_im_p):
# os.makedirs(scale_im_p)
#
# # scale_factor = random.randint(2, 3)
# a = 0
# b = 0
# c = 1 # left/right (i.e. 5/-5)
# d = 0
# e = 0
# f = 1 # up/down (i.e. 5/-5)
# scale_img = ori_img.transform(ori_img.size, Image.AFFINE, (a, b, c, d, e, f))
# scale_img.save(scale_img_path)
#
# print("scale_img_path : "+scale_img_path)
| true
|
15847e832cfcad37081105a96a7b43e0b6c99644
|
Python
|
janosg/dag_gettsim
|
/dag_gettsim/main.py
|
UTF-8
| 7,030
| 2.9375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
import inspect
from functools import partial
from inspect import getfullargspec, getmembers
import networkx as nx
import pandas as pd
from dag_gettsim import aggregation, benefits, taxes
def tax_transfer(
baseline_date, data, functions=None, params=None, targets="all", return_dag=False
):
"""Simulate a tax and tranfers system specified in model_spec.
Args:
baseline_date (str): A date, e.g. '2019-01-01'. This is used to select a set of
baseline parameters (and in the future baseline functions).
data (dict): User provided dataset as dictionary of Series.
functions (dict): Dictionary with user provided functions. The keys are the
names of the function. The values are either callables or strings with
absolute or relative import paths to a function. If functions have the
same name as an existing gettsim function they override that function.
params (dict): A pandas Series or dictionary with user provided parameters.
Currently just mapping a parameter name to a parameter value, in the
future we will need more metadata. If parameters have the same name as
an existing parameter from the gettsim parameters database at the
specified date they override that parameter.
targets (list): List of strings with names of functions whose output is actually
needed by the user. By default, all results are returned.
Returns:
dict: Dictionary of Series containing the target quantities.
"""
user_params = {} if params is None else params
user_functions = {} if functions is None else functions
params = get_params(baseline_date, user_params)
func_dict = create_function_dict(user_functions=user_functions, params=params)
dag = create_dag(func_dict)
if targets != "all":
dag = prune_dag(dag, targets)
results = execute_dag(func_dict, dag, data, targets)
if return_dag:
results = (results, dag)
return results
def get_params(baseline_date, user_params):
"""Combine baseline parameters and user parameters.
Currently this just generates baseline independent parameters for the toy model.
In the long run it will load a database, query the baseline parameters and update
or extend it with user parameters.
Args:
baseline_date (str): A date, e.g. '2019-01-01'
user_params (dict or pd.Series): User provided parameters that override or
extend the baseline parameters.
Returns:
pd.Series
"""
params = {
"income_tax": 0.2,
"wealth_tax": 0.9, # 90 % wealth tax is just to make Max happy ;-)
"benefit_per_child": 2000,
"benefit_cutoff": 30000,
}
if isinstance(user_params, pd.Series):
user_params = user_params.to_dict()
params.update(user_params)
return pd.Series(params)
def create_function_dict(user_functions, params):
"""Create a dictionary of all functions that will appear in the DAG.
Args:
user_functions (dict): Dictionary with user provided functions. The keys are the
names of the function. The values are either callables or strings with
absolute or relative import paths to a function.
Returns:
dict: Dictionary mapping function names to callables.
"""
func_dict = {}
for module in taxes, benefits, aggregation:
func_dict.update(dict(getmembers(module, inspect.isfunction)))
func_dict.update(user_functions)
partialed = {name: partial(func, params=params) for name, func in func_dict.items()}
return partialed
def create_dag(func_dict):
"""Create a directed acyclic graph (DAG) capturing dependencies between functions.
Args:
func_dict (dict): Maps function names to functions.
Returns:
dict: The DAG, represented as a dictionary of lists that maps function names
to a list of its data dependencies.
"""
dag_dict = {name: getfullargspec(func).args for name, func in func_dict.items()}
return nx.DiGraph(dag_dict).reverse()
def prune_dag(dag, targets):
"""Prune the dag.
Args:
dag (nx.DiGraph): The unpruned DAG.
targets (list): Variables of interest.
Returns:
dag (nx.DiGraph): Pruned DAG.
"""
# Go through the DAG from the targets to the bottom and collect all visited nodes.
visited_nodes = set(targets)
visited_nodes_changed = True
while visited_nodes_changed:
n_visited_nodes = len(visited_nodes)
for node in visited_nodes:
visited_nodes = visited_nodes.union(nx.ancestors(dag, node))
visited_nodes_changed = n_visited_nodes != len(visited_nodes)
# Redundant nodes are nodes not visited going from the targets through the graph.
all_nodes = set(dag.nodes)
redundant_nodes = all_nodes - visited_nodes
dag.remove_nodes_from(redundant_nodes)
return dag
def execute_dag(func_dict, dag, data, targets):
"""Naive serial scheduler for our tasks.
We will probably use some existing scheduler instead. Interesting sources are:
- https://ipython.org/ipython-doc/3/parallel/dag_dependencies.html
- https://docs.dask.org/en/latest/graphs.html
The main reason for writing an own implementation is to explore how difficult it
would to avoid dask as a dependency.
Args:
func_dict (dict): Maps function names to functions.
dag (nx.DiGraph)
data (dict):
targets (list):
Returns:
dict: Dictionary of pd.Series with the results.
"""
# Needed for garbage collection.
visited_nodes = set()
results = data.copy()
for task in nx.topological_sort(dag):
if task not in results:
if task in func_dict:
kwargs = _dict_subset(results, dag.predecessors(task))
results[task] = func_dict[task](**kwargs)
else:
raise KeyError(f"Missing variable or function: {task}")
visited_nodes.add(task)
if targets != "all":
results = collect_garbage(results, task, visited_nodes, targets, dag)
return results
def _dict_subset(dictionary, keys):
return {k: dictionary[k] for k in keys}
def collect_garbage(results, task, visited_nodes, targets, dag):
"""Remove data which is no longer necessary.
If all descendants of a node have been evaluated, the information in the node
becomes redundant and can be removed to save memory.
Args:
results (dict)
task (str)
visited_nodes (set)
dag (nx.DiGraph)
Returns:
results (dict)
"""
ancestors_of_task = nx.ancestors(dag, task)
for node in ancestors_of_task:
is_obsolete = all(
descendant in visited_nodes for descendant in nx.descendants(dag, node)
)
if is_obsolete and task not in targets:
del results[node]
return results
| true
|
333b6e66fe4044d85ce78084d2728576ee5e1311
|
Python
|
Akatsuki06/Terminal-Pix
|
/colorinfo.py
|
UTF-8
| 344
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
# https://jonasjacek.github.io/colors/data.json
import json
def fetchdata():
with open('data.json') as data_file:
data=json.load(data_file)
return data
def getColorList():
data=fetchdata()
colorList=[];
for i in data:
# n=['colorId']
r=i['rgb']['r']
g=i['rgb']['g']
b=i['rgb']['b']
colorList.append([r,g,b])
return colorList
| true
|
44a2d9679d5b364250efe5183a2852cb3844eefe
|
Python
|
Caps3c/batch2shellcode
|
/bat2shell.py
|
UTF-8
| 1,294
| 3.171875
| 3
|
[] |
no_license
|
import os ,sys
### a script that reads a batch file then outputs a shellcode of the batch file
## a function to output the shell code to txt file with the name shellcode.txt
def write_shellcode():
try:
if sys.argv[2]=="txt":
output = open("shellcode.txt","w")
output.write(shellcode)
output.close()
else:
pass
except IndexError:
pass
try:
## open the batch file to read it's contents
handle = open(sys.argv[1], "rb")
dump = handle.read()
handle.close()
shellcode = ""
### checks every byte and turns the number to a hex string
for byte in dump:
hex_byte = (hex(byte))
hex_byte = "\\"+(hex_byte[1:])
if len(hex_byte)==3:
hex_byte = hex_byte[:2]+"0"+hex_byte[2:3]
else:
pass
shellcode += hex_byte
#print (hex_byte)
print ("The shell code of the following batch file is : "+sys.argv[1])
print (shellcode)
write_shellcode()
#print (dump)
except IndexError:
print ("no input files was detected \n")
print ("Usage: python batreader.py batchfile")
except FileNotFoundError:
print ("No such batch file was found " +sys.argv[1])
### author: capsec
### Friday 03/01/2020
### twitter : capsec1
| true
|
71114eb1bc1461f3b7aa517811bdbae3b9bf1351
|
Python
|
vietnvri/trigger-word
|
/preprocess/td_utils.py
|
UTF-8
| 3,401
| 2.90625
| 3
|
[] |
no_license
|
import matplotlib.pyplot as plt
from scipy.io import wavfile
import os
from pydub import AudioSegment
import matplotlib.mlab as m
import librosa
import numpy as np
class MaxSizeList(object):
ls = []
def __init__(self, mx):
self.val = mx
def push(self, st):
self.ls.append(st)
def get_list(self):
while len(self.ls) != self.val:
self.ls.pop(0)
return self.ls
def plot(y, title=""):
S = librosa.feature.melspectrogram(y, sr=16000, n_mels=40, n_fft=400, hop_length=160, power=1)
# Convert to log scale (dB). We'll use the peak power (max) as reference.
log_S = librosa.power_to_db(S, ref=np.max)
# Make a new figure
plt.figure(figsize=(12, 4))
# Display the spectrogram on a mel scale
# sample rate and hop length parameters are used to render the time axis
librosa.display.specshow(log_S, sr=16000, x_axis='time', y_axis='mel')
# Put a descriptive title on the plot
plt.title(title)
# draw a color bar
plt.colorbar(format='%+02.0f dB')
# Make the figure layout compact
plt.tight_layout()
def get_spectrogram(data):
"""
Function to compute a spectrogram.
Argument:
predictions -- one channel / dual channel audio data as numpy array
Returns:
pxx -- spectrogram, 2-D array, columns are the periodograms of successive segments.
"""
nfft = 200 # Length of each window segment
fs = 8000 # Sampling frequencies
noverlap = 120 # Overlap between windows
nchannels = data.ndim
if nchannels == 1:
pxx, _, _ = m.specgram(data, nfft, fs, noverlap=noverlap)
elif nchannels == 2:
pxx, _, _ = m.specgram(data[:, 0], nfft, fs, noverlap=noverlap)
return pxx
# %%
# Calculate and plot spectrogram for a wav audio file
def graph_spectrogram(wav_file):
rate, data = get_wav_info(wav_file)
nfft = 200 # Length of each window segment
fs = 8000 # Sampling frequencies
noverlap = 120 # Overlap between windows
nchannels = data.ndim
if nchannels == 1:
pxx, freqs, bins, im = plt.specgram(data, nfft, fs, noverlap=noverlap)
elif nchannels == 2:
pxx, freqs, bins, im = plt.specgram(data[:, 0], nfft, fs, noverlap=noverlap)
plt.close('all')
return pxx
# Load a wav file
def get_wav_info(wav_file):
rate, data = wavfile.read(wav_file)
return rate, data
# Used to standardize volume of audio clip
def match_target_amplitude(sound, target_dBFS):
change_in_dBFS = target_dBFS - sound.dBFS
return sound.apply_gain(change_in_dBFS)
# Load raw audio files for speech synthesis
def load_raw_audio():
activates = []
backgrounds = []
negatives = []
for filename in os.listdir("./raw_data/activates"):
if filename.endswith("wav"):
activate = AudioSegment.from_wav("./raw_data/activates/" + filename)
activates.append(activate)
for filename in os.listdir("./raw_data/backgrounds"):
if filename.endswith("wav"):
background = AudioSegment.from_wav("./raw_data/backgrounds/" + filename)
backgrounds.append(background)
for filename in os.listdir("./raw_data/negatives"):
if filename.endswith("wav"):
negative = AudioSegment.from_wav("./raw_data/negatives/" + filename)
negatives.append(negative)
return activates, negatives, backgrounds
| true
|
f249b2743f593ed2d68b1d492b8af569a9b21f3a
|
Python
|
korbelz/SC_Org_members
|
/scroll.py
|
UTF-8
| 1,138
| 3.140625
| 3
|
[
"MIT"
] |
permissive
|
#scroll to bottom of the page
from selenium import webdriver
import time
#Below are some example sites to test on.
#https://robertsspaceindustries.com/orgs/SECPRO/members
#https://robertsspaceindustries.com/orgs/AGBCORP/members
def scroll(target_url):
driver = webdriver.Chrome()
driver.get(f'{target_url}')
driver.page_source
SCROLL_PAUSE_TIME = 3
time.sleep(SCROLL_PAUSE_TIME)
# Get scroll height
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
# Scroll down to bottom
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
#pita = driver.page_source
#return pita
return driver.page_source
#pita = scroll('https://robertsspaceindustries.com/orgs/AGBCORP/members')
#print (pita)
| true
|
c7b9102c8637c5632b958827965ae209ba92bb0f
|
Python
|
danijar/dotfiles
|
/symlink.py
|
UTF-8
| 1,542
| 3.171875
| 3
|
[] |
no_license
|
import pathlib
def is_child(child, parent):
try:
child.relative_to(parent)
return True
except ValueError:
return False
def remove_broken_links(repo, user):
# Only search in one level into directories that are mirrored in the
# repository and only report broken links that point to the repository. It's
# good to be careful because links in mounted network drives may seem broken
# for the client.
for directory in repo.glob('**/*'):
if not directory.is_dir():
continue
for path in (user / directory.relative_to(repo)).glob('*'):
target = path.resolve()
if is_child(target, repo) and not target.exists():
print('Broken:', path)
path.unlink()
def create_links(repo, user):
for path in repo.glob('**/*'):
dest = user / path.relative_to(repo)
if path.is_dir():
dest.mkdir(exist_ok=True)
continue
if not dest.exists():
print('Create:', dest, '->', path)
dest.symlink_to(path)
continue
if dest.is_symlink() and is_child(dest.resolve(), repo):
if dest.resolve() != path:
message = f'Expected existing {dest} to resolve to {path} '
message += f'but resolved to {dest.resolve()} instead.'
raise RuntimeError(message)
continue
print('Exists:', dest)
def main():
repo = (pathlib.Path(__file__).parent / 'home').resolve()
user = pathlib.Path('~').expanduser().resolve()
remove_broken_links(repo, user)
create_links(repo, user)
print('Done.')
if __name__ == '__main__':
main()
| true
|
a021dc7e315a9e96dc8e85e84462d6aa10056044
|
Python
|
laolee010126/algorithm-with-python
|
/problems_solving/baekjoon/no_5430_AC.py
|
UTF-8
| 1,194
| 3.40625
| 3
|
[] |
no_license
|
"""Do AC language calculations over integer array
url: https://www.acmicpc.net/problem/5430
"""
from collections import deque
ERROR_MESSAGE = 'error'
REVERSE, DISCARD = 'RD'
def do_ac_calculations(arr, cmds):
deck = deque(arr)
# Preprocess commands
tmp_r = 0
new_cmds = ''
for c in cmds:
if c == DISCARD:
new_cmds += REVERSE if tmp_r % 2 else ''
new_cmds += DISCARD
tmp_r = 0
else:
tmp_r += 1
new_cmds += REVERSE if tmp_r % 2 else ''
# Do real calculations
reverse_now = False
for c in new_cmds:
if c == REVERSE:
reverse_now ^= True
else:
if not deck:
return ERROR_MESSAGE
if reverse_now:
deck.pop()
else:
deck.popleft()
if reverse_now:
deck.reverse()
return list(deck)
if __name__ == '__main__':
T = int(input())
ans = []
for _ in range(T):
cmds = input()
input()
arr = eval(input())
ans.append(do_ac_calculations(arr, cmds))
for ret in ans:
ret = str(ret).replace(' ', '')
print(ret)
| true
|
92b06f49babcec7474117d3a30173eb37994c3ca
|
Python
|
RaphP/ToutMonPython
|
/PythonLaptop/omeletes.py
|
UTF-8
| 548
| 2.921875
| 3
|
[] |
no_license
|
import numpy
import random
import matplotlib.pyplot as plt
import math
dt = 1
Tmax = 1000
#t va de 0 a Tmax par pas de dt
s = [0]
for i in range(Tmax):
s += [s[-1] + (1 -2*random.randint(0,1)) ]
def phi(t):
return 2*t*math.exp(-t**2)
def T(b,a) :
Dt = 3*a
T = 0
for t in range(b-Dt, b+Dt):
T+= s[t]*phi( (t-b)/a )
return T
#plt.plot(f)
#plt.show()
map = numpy.empty([200,int(Tmax)])
for a in range(10, 200):
print a
Dt = 3*a
for b in range(0+Dt, Tmax - Dt):
map[a-10][b] = T(b,a)
plt.imshow(map)
plt.show()
| true
|
2ce3ef0a7af55b2b0161eb18de450a46023b4124
|
Python
|
XYHC-MMDA/Multi-modal-Multi-task-DA
|
/mmdet3d/models/discriminators/disc.py
|
UTF-8
| 8,184
| 2.59375
| 3
|
[
"Apache-2.0"
] |
permissive
|
import torch.nn as nn
import torch
import torch.nn.functional as F
from ..registry import DISCRIMINATORS
@DISCRIMINATORS.register_module()
class FCDiscriminatorCE(nn.Module):
def __init__(self, in_dim=128):
super(FCDiscriminatorCE, self).__init__()
self.fc = nn.Sequential(
nn.Linear(in_dim, 256),
nn.ReLU(inplace=True),
nn.Linear(256, 512),
nn.ReLU(inplace=True),
nn.Linear(512, 2),
)
self.criterion = nn.CrossEntropyLoss()
def forward(self, x):
if x.dim() == 4:
x = x.permute([0, 2, 3, 1])
# x.shape=(N, in_dim=128)
x = self.fc(x)
if x.dim() == 4:
x = x.permute([0, 3, 1, 2])
return x
def loss(self, logits, src=True):
label_shape = logits.shape[:1] + logits.shape[2:]
if src:
labels = torch.ones(label_shape, dtype=torch.long).cuda()
else:
labels = torch.zeros(label_shape, dtype=torch.long).cuda()
return self.criterion(logits, labels)
@DISCRIMINATORS.register_module()
class ConvDiscriminator1x1(nn.Module):
def __init__(self, in_dim=128, activation='ReLU'):
# in_dim: input_channels
assert activation in ['ReLU', 'Leaky']
super(ConvDiscriminator1x1, self).__init__()
dim1, dim2 = 256, 256
self.conv = nn.Sequential(
nn.Conv2d(in_dim, dim1, kernel_size=1),
nn.BatchNorm2d(dim1),
self._make_activation(activation),
nn.Conv2d(dim1, dim2, kernel_size=1),
nn.BatchNorm2d(dim2),
self._make_activation(activation),
nn.Conv2d(dim1, 2, kernel_size=1),
)
self.criterion = nn.CrossEntropyLoss()
def forward(self, x):
# x.shape=(N, 128, 225, 400)
x = self.conv(x) # (N, 2, 225, 400)
return x
def loss(self, logits, src=True):
N, _, H, W = logits.shape
if src:
labels = torch.ones([N, H, W], dtype=torch.long).cuda()
else:
labels = torch.zeros([N, H, W], dtype=torch.long).cuda()
return self.criterion(logits, labels)
def _make_activation(self, act):
if act == 'ReLU':
return nn.ReLU(inplace=True)
elif act == 'Leaky':
return nn.LeakyReLU(0.2, inplace=True)
@DISCRIMINATORS.register_module()
class DetDiscriminator(nn.Module):
def __init__(self, in_dim=128):
# in_dim: input_channels
super(DetDiscriminator, self).__init__()
dim1, dim2, dim3 = 128, 256, 512
self.conv = nn.Sequential(
nn.Conv2d(in_dim, dim1, kernel_size=8, stride=4), # (49, 99)
nn.BatchNorm2d(dim1),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(dim1, dim1, kernel_size=3, padding=[1, 0], stride=[1, 2]), # (49, 49)
nn.BatchNorm2d(dim1),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(dim1, dim2, kernel_size=3, padding=1, stride=2), # (25, 25)
nn.BatchNorm2d(dim2),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(dim2, dim2, kernel_size=3, padding=1, stride=2), # (13, 13)
nn.BatchNorm2d(dim2),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(dim2, dim3, kernel_size=3, padding=1, stride=2), # (7, 7)
nn.BatchNorm2d(dim3),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(dim3, dim3, kernel_size=3, padding=1, stride=2), # (4, 4)
nn.BatchNorm2d(dim3),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(dim3, 2, kernel_size=4),
) # ImageGAN
self.criterion = nn.CrossEntropyLoss()
def forward(self, x):
# x.shape=(N, 128, 200, 400)
x = self.conv(x) # (N, 2, 49, 99)
return x
def loss(self, logits, src=True):
N, _, H, W = logits.shape
if src:
labels = torch.ones([N, H, W], dtype=torch.long).cuda()
else:
labels = torch.zeros([N, H, W], dtype=torch.long).cuda()
return self.criterion(logits, labels)
@DISCRIMINATORS.register_module()
class Conv2dDiscriminator(nn.Module):
def __init__(self, in_dim=128):
# in_dim: input_channels
super(Conv2dDiscriminator, self).__init__()
dim1, dim2 = 64, 64
self.conv = nn.Sequential(
nn.Conv2d(in_dim, dim1, kernel_size=3, stride=2, bias=True),
# nn.Dropout2d(p=0.5),
nn.ReLU(inplace=True),
nn.Conv2d(dim1, dim2, kernel_size=3, stride=2, bias=True),
# nn.Dropout2d(p=0.5),
nn.ReLU(inplace=True),
nn.Conv2d(dim1, 2, kernel_size=3, padding=1, bias=True),
)
self.criterion = nn.CrossEntropyLoss()
def forward(self, x):
# x.shape=(N, 128, 200, 400)
x = self.conv(x) # (N, 2, 49, 99)
return x
def loss(self, logits, src=True):
N, _, H, W = logits.shape
if src:
labels = torch.ones([N, H, W], dtype=torch.long).cuda()
else:
labels = torch.zeros([N, H, W], dtype=torch.long).cuda()
return self.criterion(logits, labels)
@DISCRIMINATORS.register_module()
class Conv2dDiscriminator01(nn.Module):
def __init__(self, in_dim=128):
# in_dim: input_channels
super(Conv2dDiscriminator01, self).__init__()
dim1, dim2 = 64, 64
self.conv = nn.Sequential(
nn.Conv2d(in_dim, dim1, kernel_size=3, padding=1, stride=2),
nn.BatchNorm2d(dim1),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(dim1, dim2, kernel_size=3, padding=1),
nn.BatchNorm2d(dim2),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(dim1, 2, kernel_size=3, padding=1),
) # receptive field=11; 11x11 PatchGAN
self.criterion = nn.CrossEntropyLoss()
def forward(self, x):
# x.shape=(N, 128, 200, 400)
x = self.conv(x) # (N, 2, 49, 99)
return x
def loss(self, logits, src=True):
N, _, H, W = logits.shape
if src:
labels = torch.ones([N, H, W], dtype=torch.long).cuda()
else:
labels = torch.zeros([N, H, W], dtype=torch.long).cuda()
return self.criterion(logits, labels)
@DISCRIMINATORS.register_module()
class FCDiscriminator(nn.Module):
def __init__(self, in_dim=128):
super(FCDiscriminator, self).__init__()
self.fc = nn.Sequential(
nn.Linear(in_dim, 64),
nn.ReLU(inplace=True),
nn.Linear(64, 64),
nn.ReLU(inplace=True),
nn.Linear(64, 2),
)
self.nllloss = nn.NLLLoss()
def forward(self, x):
# x.shape=(N, in_dim=128)
x = self.fc(x)
return x
def loss(self, logits, src=True):
if src:
labels = torch.ones(logits.size(0), dtype=torch.long).cuda()
else:
labels = torch.zeros(logits.size(0), dtype=torch.long).cuda()
return self.nllloss(F.log_softmax(logits, dim=1), labels)
@DISCRIMINATORS.register_module()
class FCDiscriminatorNew(nn.Module):
def __init__(self, in_dim=128):
super(FCDiscriminatorNew, self).__init__()
self.fc = nn.Sequential(
nn.Linear(in_dim, 64),
nn.ReLU(inplace=True),
nn.Linear(64, 64),
nn.ReLU(inplace=True),
nn.Linear(64, 2),
)
self.nllloss = nn.NLLLoss()
def forward(self, x, return_logits=False):
# x.shape=(N, in_dim=128)
logits = self.fc(x)
logprob = F.log_softmax(logits, dim=1)
if return_logits:
return logits, logprob
else:
return logprob
def loss(self, logprob, src=True):
if src:
labels = torch.ones(logprob.size(0), dtype=torch.long).cuda()
else:
labels = torch.zeros(logprob.size(0), dtype=torch.long).cuda()
return self.nllloss(logprob, labels)
if __name__ == '__main__':
disc = DetDiscriminator(128)
x = torch.rand(2, 128, 200, 400)
x = disc(x)
print(x.shape) # (2, 2, 49, 99)
| true
|
1292c75a3e0f02792748e47f89c3b9e04652e0b1
|
Python
|
SpadavecchiaAdrian/surveillance
|
/opticalFlow_v1.py
|
UTF-8
| 3,538
| 2.5625
| 3
|
[] |
no_license
|
# import the necessary packages
from pyimagesearch.tempimage import TempImage
from picamera.array import PiYUVArray
from picamera import PiCamera
import argparse
import warnings
import datetime
import dropbox
import imutils
import json
import time
import numpy as np
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--conf", required=True,
help="path to the JSON configuration file")
args = vars(ap.parse_args())
# filter warnings, load the configuration and initialize the Dropbox
# client
warnings.filterwarnings("ignore")
conf = json.load(open(args["conf"]))
client = None
# check to see if the Dropbox should be used
if conf["use_dropbox"]:
# connect to dropbox and start the session authorization process
client = dropbox.Dropbox(conf["dropbox_access_token"])
print("[SUCCESS] dropbox account linked")
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = tuple(conf["resolution"])
camera.framerate = conf["fps"]
rawCapture = PiYUVArray(camera, size=tuple(conf["resolution"]))
# allow the camera to warmup, then initialize the average frame, last
# uploaded timestamp, and frame motion counter
print("[INFO] warming up...")
time.sleep(conf["camera_warmup_time"])
avg = None
lastUploaded = datetime.datetime.now()
motionCounter = 0
# params for ShiTomasi corner detection
feature_params = dict(maxCorners=100,
qualityLevel=0.8,
minDistance=7,
blockSize=7)
# Parameters for lucas kanade optical flow
lk_params = dict(winSize=(15, 15),
maxLevel=2,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# Take first frame and find corners in it
#ret, old_frame = cap.read()
#old_gray = cv.cvtColor(old_frame, cv.COLOR_BGR2GRAY)
camera.capture(rawCapture, format="yuv", use_video_port=True)
old_gray = rawCapture.array[:, :, 0]
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
# Create a mask image for drawing purposes
mask = np.zeros_like(old_gray)
rawCapture.truncate(0)
# capture frames from the camera
for f in camera.capture_continuous(rawCapture, format="yuv", use_video_port=True):
# grab the raw NumPy array representing the image and initialize
# the timestamp and occupied/unoccupied text
frame_gray = f.array[:, :, 0] # accedemos a todas las filas, columnas del canal Y
frame = frame_gray.copy()
# calculate optical flow
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
# Select good points
good_new = p1[st == 1]
good_old = p0[st == 1]
# draw the tracks
for i, (new, old) in enumerate(zip(good_new, good_old)):
a, b = new.ravel()
c, d = old.ravel()
mask = cv2.line(mask, (a, b), (c, d), (0xff), 2)
frame = cv2.circle(frame, (a, b), 5, (0xff), -1)
img = cv2.add(frame, mask)
# check to see if the frames should be displayed to screen
if conf["show_video"]:
# display the security feed
cv2.imshow("img", img)
key = cv2.waitKey(1) & 0xFF
# if the `q` key is pressed, break from the lop
if key == ord("q"):
break
# Now update the previous frame and previous points
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1, 1, 2)
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
cv.destroyAllWindows()
| true
|
f6f88ef08b3c060609b64c0ddee0f191c8978832
|
Python
|
Avery123123/LeetCode
|
/dataStructure/array_and_string/寻找数组的中心索引.py
|
UTF-8
| 372
| 3.109375
| 3
|
[] |
no_license
|
class Solution(object):
def pivotIndex(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
left_sum = 0
right_sum = sum(nums)
for index,item in enumerate(nums):
right_sum -= item
if left_sum == right_sum:
return index
left_sum += item
return -1
| true
|
0504099a50fda05cf91693efbcfb1abd814ba999
|
Python
|
51running/cmdb
|
/asset/validators.py
|
UTF-8
| 2,087
| 2.6875
| 3
|
[] |
no_license
|
#encoding:utf-8
from .models import Host
from datetime import datetime
class Validator(object):
@classmethod
def is_interger(cls,value):
try:
int(value)
return True
except BaseException as e:
return False
class Host_Valid(Validator): #这里都是view.py在调用的
@classmethod
def valid_update_asset(cls,params):
is_valid = True
errors = {}
host = None
try:
host = Host.objects.get(pk=params.get('id','').strip())
except BaseException as e:
errors['id'] = '主机信息不存在'
is_valid = False
return is_valid, host, errors
print(host.cpu)
# return is_valid, host, errors
name = params.get('name', '').strip()
if not cls.valid_name_unique(name, host.id):
errors['name'] = '主机名已存在'
is_valid = False
else:
host.name = name
ip = params.get('ip', '0').strip()
host.ip = ip
# if not ip.isdigit():
# errors['ip'] = 'IP格式错误'
# is_valid = False
# else:
# host.ip = int(ip)
host.mac = int(params.get('mac', '0')) # w文件列表sex的值是整型,要不是整型,性别都为男
host.os = params.get('os', '0')
host.arch = params.get('arch', '0')
host.mem = params.get('mem', '0')
host.cpu = params.get('cpu', '0')
host.disk = params.get('disk', '0')
host.create_time = params.get('create_time', '0')
host.last_time = params.get('last_time', '0')
return is_valid, host, errors
@classmethod
def valid_name_unique(cls,name, uid):
host = None
try:
host = Host.objects.get(name=name)
except BaseException as e:
pass
if host is None:
return True
else:
return str(host.id) == str(uid) # 传进来是字符串类型,数据库是
| true
|
557249e5b478fc5b09941d0a4467e628c1f3160c
|
Python
|
vnherdeiro/project-euler
|
/p267.py
|
UTF-8
| 541
| 2.90625
| 3
|
[] |
no_license
|
#! /usr/bin/python3
from mpmath import mp
pow = mp.power
binom = mp.binomial
from scipy.optimize import minimize
thresh = 10**9
def g(f):
mp.dps = 400
if f <= 0 or f >= 1: #forcing minimization in (0,1)
return 0
f = mp.mpf( f[-1])
#print( f)
s = mp.mpf()
for k in range(1001):
gain = pow(1+2*f, k) * pow(1-f, 1000 - k)
if gain > thresh:
s += binom(1000, k)
#print(binom(1000, k))
#print( s)
s /= pow(2,1000)
return -s
output = minimize( g, .05, method="Powell", tol=1e-14,)
print( output)
#print( "%.15f" % g(0.05))
| true
|
4d64df957898d2758d81c7c6b84355ca7ac564cc
|
Python
|
yajvldrm/notepad_to_spreadsheet
|
/getting_sheet_from_workbook.py
|
UTF-8
| 205
| 3.03125
| 3
|
[] |
no_license
|
import openpyxl
wb = openpyxl.load_workbook('grading.xlsx')
print(wb.get_sheet_names())
sheet = wb['Sheet1']
print(sheet)
print(type(sheet))
print(sheet.title)
anothersheet = wb.active
print(anothersheet)
| true
|
17b42b80c67fbd5df0aed930f1dc5924657a1463
|
Python
|
akirapop/pytako
|
/SSOtable_py3.py
|
UTF-8
| 5,604
| 3.109375
| 3
|
[] |
no_license
|
####+
#
# 03 June 2013
#
# This class is intended to serve as an "internal" class
# to be used by the solarSystem.py module. In other words, it
# is expected that users will *not* make direct use of this
# class!
#
#
# Object instances of this class serve one fundamental
# purpose: To encapsulate the series of data needed to
# calculate approximate positions for given bodies in the
# solar system. It is therefore expected that the 'wrapper'
# module (solarSystem.py) will: (1) instantiate instances
# as requested and (2) access the data stored in the instances.
#
# The data stored are taken from the paper "Low-Precision
# Formulae for Planetary Positions", Van Flandern and Pulkkinen,
# Ap J. Supp., 41, 391-411 (1979). Note that the so-called
# "Fundamental Arguments" are _not_ encapsulated in this class!
# The solarSystem.py contains/reads this information.
#
####-
from generic_py3 import openfile, filedata
import math
sinCos= { "sin": math.sin,
"cos": math.cos }
#dataDir="/home/baluta/Scheduling/bin/Data/SolarSystemVanFlandern/"
dataDir="SolarSystemVanFlandern/"
# "SSO" --> "Solar System Object"
class SSOtable (object):
def __init__(self, name):
self.name=name.title()
self.__readUVW(dataDir)
self.__readSunDistance(dataDir)
# self.__readUVW("/home/baluta/Scheduling/bin/Data/SolarSystemVanFlandern/")
# self.__readSunDistance("/home/baluta/Scheduling/bin/Data/SolarSystemVanFlandern/")
def __str__(self):
return self.name
## The routines that appear below here are initialization-related
def __closePolyTerm(self, coeff, xp):
''' Form a closure of the form: coeff * T^(xp) or, if xp is 0, just coeff. '''
if xp==0:
def f(T):
return coeff
else:
def f(T):
return coeff * (T**xp)
return f
def __processSeriesString(self, dataString, norm):
''' Process the u/v/w portion of input. These should be strings
that look something like this:
"38966 0 sin 0 0 0 1 0 0 0 " '''
parts=dataString.split()
#####
## For the U, V, W series, the coefficients in the
## data files been multiplied ## by 100,000, so we
## must divide this term out. The series for the
## radius do not have this normalization applied.
#####
coeff= float(parts[0]) / norm
exponent=float(parts[1])
trigFunc= sinCos[parts[2]]
series=[]
####
# Now process the trigonometric series: If the term is a 0, skip it.
# Otherwise, prepare a tuple of the appropriate "fundamental element"
# and the current term. The "wrapper" library/module (solarSystem.py)
# will then wrap this information into the appropriate closures.
####
for k, term in enumerate(parts[3:]):
n=int(term)
if n==0:
continue
series.append ( (self.trigTerms[k], n) )
poly= self.__closePolyTerm(coeff, exponent)
trigFunc= sinCos[parts[2]]
if len(series) == 0:
series= [ (self.trigTerms[0], 0) ]
return (poly, trigFunc, tuple(series)) ## Make series a tuple -- don't allow anyone to mess it up!
def __readUVW (self, dir="./"):
# fileName= "".join([dir, self.name.lower(), "UVW"])
file= openfile("".join([dir, self.name.lower(), "UVW"]))
self.uSeries=[]
self.vSeries=[]
self.wSeries=[]
norm=100000.0
for line in filedata(file):
if line.startswith("TrigSeriesTerms"):
self.trigTerms=line.split()[1:]
elif line.startswith("LongitudeTerm"):
self.longitudeTerm=line.split()[1]
elif line.startswith("DeltaBar"):
self.deltaBar=float(line.split()[1])
else:
v, u, w= line.split("|")
if not u.isspace():
self.uSeries.append ( self.__processSeriesString(u, norm) )
if not v.isspace():
self.vSeries.append ( self.__processSeriesString(v, norm) )
if not w.isspace():
self.wSeries.append ( self.__processSeriesString(w, norm) )
## The data encapsulated in this class must not be changed. tuple-ize!
self.uSeries= tuple(self.uSeries)
self.vSeries= tuple(self.vSeries)
self.wSeries= tuple(self.wSeries)
def __readSunDistance (self, dir="./"):
# fileName= "".join([dir, self.name.lower(), "Radius"])
file= openfile("".join([dir, self.name.lower(), "Radius"]))
self.rSeries=[]
norm=1.0
for line in filedata(file):
if line.startswith("TrigSeriesTerms"):
continue
if not line.isspace():
self.rSeries.append ( self.__processSeriesString(line, norm) )
## The data encapsulated in this class must not be changed. tuple-ize!
self.rSeries= tuple(self.rSeries)
if __name__ == "__main__":
obj="jupiter"
sso= SSOtable (obj)
series=sso.__getattribute__('uSeries')
# for k,s in enumerate(series):
for k,s in enumerate(sso.uSeries):
print ("%r: %r" % (k,s[2]))
# name=sso.__getattribute__('name')
# print "\nThe name is: ",name
test= "".join(["blah! ", sso.__str__()])
print("test: %s" % test)
tterms=sso.trigTerms
lterm=sso.longitudeTerm
deltaBar=sso.deltaBar
print("\nTrig terms: %r" % tterms)
print("Longitude term: %r" % lterm)
print("DeltaBar= %r" % deltaBar)
print("\nR-Series:\n%r" % sso.rSeries)
| true
|
e6ea9a51cd0686ae417f560aa5d10fff751631ca
|
Python
|
ketakic/Home-Security-Smart-Door-System
|
/doorlock.py
|
UTF-8
| 805
| 2.671875
| 3
|
[] |
no_license
|
import RPi.GPIO as GPIO
import BlynkLib
import time
from time import sleep
BLYNK_AUTH = 'd2132438bc5244949d13241150f57958'
blynk = BlynkLib.Blynk(BLYNK_AUTH)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(03, GPIO.OUT)
pwm=GPIO.PWM(03, 50)
@blynk.VIRTUAL_WRITE(1)
def my_write_handler(value):
print('Current V! value: {}'.format(value))
pwm.start(0)
print('Value - ')
print(value)
if(value == '0'):
#pwm.start(7)
setAngle(54)
#pwm.stop()
print('A')
else:
#pwm.start(90)
setAngle(144)
print('B')
@blynk.VIRTUAL_READ(2)
def my_read_handler():
blynk.virtual_write(2, time.ticks_ms() // 1000)
pwm.stop()
GPIO.cleanup()
def setAngle(angle):
duty = angle /18 +2
GPIO.output(03, True)
pwm.ChangeDutyCycle(duty)
sleep(1)
GPIO.output(03, False)
pwm.ChangeDutyCycle(0)
blynk.run()
| true
|
00e364eaf769d388bca75f32047d4cd3b5d741c0
|
Python
|
amanchourasiya/leetcode
|
/dp/best-time-to-buy-and-sell-stock.py
|
UTF-8
| 419
| 3.484375
| 3
|
[] |
no_license
|
# https://leetcode.com/problems/best-time-to-buy-and-sell-stock/
class Solution:
def maxProfit(self, prices) :
# Kadane algorithm
maxpro = 0
minprice = 10 ** 5
for i in range(len(prices)):
minprice = min(minprice, prices[i])
maxpro = max(maxpro, prices[i] - minprice)
return maxpro
s = Solution()
print(s.maxProfit([7,1,5,3,6,4]))
| true
|
c9eeeb4a58396e4389a003f4ea3c8536b63bd63c
|
Python
|
blainerothrock/seisml
|
/seisml/core/transforms/sample/resample.py
|
UTF-8
| 1,355
| 2.75
| 3
|
[] |
no_license
|
from seisml.core.transforms import TransformException, BaseTraceTransform
class Resample(BaseTraceTransform):
"""
reasmple using Fourier method, passthrough of obspy.core.trace.Trace.reample
Args:
sampling_rate: (float): new sample rate in Hz.
source (string): the data source to filter, default: raw
output: (string): optional, the key of the output data in the dictionary, default: resampled
inplace: (Bool): optional, will overwrite the source data with the trim
Raises:
TransformException
Returns:
data: a modified dictionary with filters applied
"""
def __init__(
self,
sampling_rate,
source='raw',
output='resampled',
inplace=False):
super().__init__(source, output, inplace)
if not isinstance(sampling_rate, float):
raise TransformException(f'sampling_rate must be a float, got {type(sampling_rate)}')
self.sampling_rate = sampling_rate
def __call__(self, data):
super().__call__(data)
resampled = data[self.source].copy().resample(self.sampling_rate)
return super().update(data, resampled)
def __repr__(self):
return (
f'{self.__class__.__name__}('
f'sampling_rate: {self.sampling_rate})'
)
| true
|
2aa527f184f61badd36a8c54266d8767c7856e4d
|
Python
|
levylll/leetcode
|
/test30.py
|
UTF-8
| 2,794
| 3.25
| 3
|
[] |
no_license
|
class Solution:
def findSubstring(self, s, words):
"""
:type s: str
:type words: List[str]
:rtype: List[int]
"""
if not s or not words:
return []
word_dict = {}
res = []
word_len = len(words[0])
all_words = len(words)
for word in words:
if word not in word_dict:
word_dict[word] = 1
else:
word_dict[word] += 1
all_len = all_words * word_len
start = None
p = None
end = None
for i in range(word_len):
if p is not None and p == end:
start = start + word_len
else:
start = i
end = start + all_len
cur_dict = {}
p = start
while p < end and end <= len(s):
# print('==--=-=-')
# print(start)
tmp = s[p:p+word_len]
if tmp not in word_dict:
start = p + word_len
end = start + all_len
p = start
cur_dict = {}
continue
else:
if tmp in cur_dict:
if cur_dict[tmp] + 1 > word_dict[tmp]:
start_tmp = s[start:start+word_len]
if start_tmp in cur_dict:
if cur_dict[start_tmp] > 1:
cur_dict[start_tmp] -= 1
else:
del cur_dict[start_tmp]
start += word_len
end += word_len
else:
cur_dict[tmp] += 1
p += word_len
else:
if tmp not in word_dict:
start = p + word_len
end = start + all_len
p = start
else:
cur_dict[tmp] = 1
p += word_len
if p == end:
res.append(start)
start += word_len
end = start + all_len
cur_dict = {}
p = start
return res
# words = "wordgoodgoodgoodbestword"
# s = ["word","good","best","word"]
# s = "barfoothefoobarman"
# words = ["foo","bar"]
# s = "barfoofoobarthefoobarman"
# words = ["bar","foo","the"]
# s = 'lingmindraboofooowingdingbarrwingmonkeypoundcake'
# words = ["fooo","barr","wing","ding","wing"]
s = 'aaa'
words = ['a', 'a']
solution = Solution()
print(solution.findSubstring(s, words))
| true
|
c7be6109884b11a8f0246d1bb6c2ee48b0aaf642
|
Python
|
matthewmolinar/syllabus-scanner
|
/backend copy/util.py
|
UTF-8
| 1,805
| 2.78125
| 3
|
[] |
no_license
|
from PyPDF4 import PdfFileReader
from icalendar import Calendar, Event
from datetime import datetime
from io import StringIO
import os.path
import docx
import pytz
# to_text functions
def pdf_to_txt(file):
text = ''
pdfReader = PdfFileReader(file)
for i in range(pdfReader.numPages):
page = pdfReader.getPage(i)
text += page.extractText()
return text
def docx_to_txt(file):
print(type(file))
doc = docx.Document(file)
out_txt = StringIO()
for para in doc.paragraphs:
out_txt.write(para.text)
text = out_txt.getvalue()
out_txt.close()
return text.lower()
# prof_cal functions, will add more as we get more profs
def get_barr_events():
# hardcoding events
# summary, start, end, description
event_mat = ['testing', datetime(2021,1,10,15,0,0,tzinfo=pytz.utc), datetime(2021,1,10,17,0,0,tzinfo=pytz.utc), 'a description']
return event_mat
def get_newton_events():
# hardcoding events
# summary, start, end, description
event_mat = ['testing', datetime(2021,1,10,18,0,0,tzinfo=pytz.utc), datetime(2021,1,10,19,0,0,tzinfo=pytz.utc), 'a description']
return event_mat
def get_xu_events():
# hardcoding events
# summary, start, end, description
event_mat = ['testing', datetime(2021,1,10,18,0,0,tzinfo=pytz.utc), datetime(2021,1,10,19,0,0,tzinfo=pytz.utc), 'a description']
return event_mat
# returns a cal file
def merge_into_cal(event_mat):
# Create calander
cal = Calendar()
for e in event_mat:
event = Event()
event.add('summary', e[0])
event.add('dtstart', e[1])
event.add('dtend', e[2])
event.add('description', e[3])
cal.add_component(event)
cal = cal.to_ical().decode('utf-8')
return cal # this is a string
| true
|
a8928d439fe1f728e6eb2c3330398748c3bff10f
|
Python
|
rekeshali/kNN-DecisonTree
|
/implement.py
|
UTF-8
| 10,145
| 3.015625
| 3
|
[] |
no_license
|
import numpy as np
#######################################################################################
############################## K Nearest Neighbors ####################################
#######################################################################################
def kNN(k, Lnorm, Xtrain, Xtest, Ctrain):
# compares all test data to all training data
# nearest neighbors give probability of class
[Ntrain, F] = Xtrain.shape # samples in train
Ntest = Xtest.shape[0] # samples in test
dist = np.zeros((Ntrain)) # dist of nts compared to ntr
CkNN = np.zeros((Ntest )) # class of nts according to kNN
Clist = [] # list of all class values
for c in Ctrain: # populate class list
if c not in Clist:
Clist.append(c)
for nts in range(Ntest):
for ntr in range(Ntrain):
dist[ntr] = (np.sum(np.abs(Xtest[nts,:] - Xtrain[ntr,:])**Lnorm))**(1/Lnorm)
kNNidx = np.argsort(dist)[:k] # get first k sorted distance indices
Cprob = np.zeros((len(Clist))) # reinitialize to zero probability
for nn in kNNidx: # for all k neighbors
Cidx = Clist.index(Ctrain[nn]) # find index of class in list
Cprob[Cidx] += 1 # add one to sum at index of class
Cprob /= k # divide by k to get prob
maxprob = np.max(Cprob) # find max
CkNN[nts] = Clist[list(Cprob).index(maxprob)] # class = first appearance of maxprob
return CkNN
#######################################################################################
################################ Decision Tree ########################################
#######################################################################################
def GetPrior(C):
# Prior a sample of belonging to class C
Cm = []
pm = []
for ci in C: # for all samples
if ci not in Cm:
# Gather unseen class
Cm.append(ci)
pm.append(0)
# Add to sum for that class
Cmi = Cm.index(ci)
pm[Cmi] += 1
return np.asarray(pm)/len(C), Cm
def NodeImpurity(pm, imptype):
# Calculate impurity of node
# gini and misclass are dichotomous
if imptype == 'entropy':
e = 0.0
for pi in pm:
e += -pi*np.log2(pi)
elif imptype == 'gini':
e = 2*pm[0]*(1 - pm[0])
elif imptype == 'misclassification error':
e = 1.0 - np.max(pm)
return e
def SplitImpurity(C, n, imptype):
# Get the combined entropy of all resulting
# branches after splitting a node
e = 0.0
for j in n:
Cj = [C[i] for i in j] # gather classes belonging to j
pmj = GetPrior(Cj)[0] # get priors
ej = NodeImpurity(pmj, imptype) # impurity of branch j
e += len(j)*ej/len(C) # impurity of split
return e
def SplitIndex(X):
# Splitting a discrete valued array based on like instances
n = []
j = []
for t, xt in enumerate(X):
if xt not in j: # gather new value for instance
j.append(xt)
n.append([])
nidx = j.index(xt) # add to like valued group
n[nidx].append(t)
return n # returns array of index arrays for each group
def SplitAttribute(X, imptype, dtype):
# Best split determined at minimum impurity
# for any split at anny attribute
MinEnt = float('inf')
[N,D] = X.shape
for i in range(D)[:-1]: # for all attributes
if dtype == 'discrete':
n = SplitIndex(X[:,i])
e = SplitImpurity(X[:,-1], n, imptype)
if e < MinEnt: # minimize impurity
MinEnt = e
besti = i
bestn = n
elif dtype == 'numeric':
sort = list(X[:,i].argsort()) # sort indices by increasing X
for t in range(N)[1:]:
n = [ sort[0:t] , sort[t:N] ] # split indices
e = SplitImpurity(X[:,-1], n, imptype)
if e < MinEnt:
MinEnt = e
besti = i
bestn = n
return besti, bestn # returns best feature to split in, and best way to split
def AddTerminal(T, C):
# Create leaf on tree
T['type'] = 'terminal'
T['class'] = C
return T
def AddNode(T, index, dtype):
# Create a junction on tree
T['type'] = 'node'
T['index'] = index # attribute to compare
if dtype == 'discrete':
T['values'] = [] # possible values of branches
elif dtype == 'numeric':
T['midpoint'] = [] # 2 branches around midpoint
T[ 'above'] = {} # creating branches
T[ 'below'] = {}
return T
def AddBranch(T, val, dtype):
# Create new branch on tree based on indicator
if dtype == 'discrete':
T['values'].append(val)
T[ val ] = {}
elif dtype == 'numeric':
if val not in T['midpoint']:
T['midpoint'].append(val)
return T
def GetValue(X, i, j, n, dtype):
# Find value for branch indicator
if dtype == 'discrete':
val = X[j[0],i] # value of all rows in branch
tooth = val
elif dtype == 'numeric':
val = (np.max(X[n[0],i]) + np.min(X[n[1],i]))/2 # midpoint between branches
key = ['below', 'above']
tooth = key[ n.index(j) ]
return val, tooth
def GenerateTree(X, T, imptype, entmax, level, depth, depthmax, dtype):
# Train a dataset and output a nested dictionary holding structure of tree
# also output the depth of the tree
# Supports numeric and discrete data
# Stop conditions include impurity max and depth max
level = level + 1 # add a level to the tree
if level > depth: # keep highest level as depth
depth = level
[pm, Cm] = GetPrior(X[:,-1]) # get priors for instances in class
# If we meet an impurity threshold or reach user defined max depth, add leaf
if NodeImpurity(pm, imptype) < entmax or depth == depthmax:
T = AddTerminal(T, Cm[ list(pm).index(np.max(pm)) ])
return [depth, T]
# Otherwise define position as node and keep adding branches
else:
[i, n] = SplitAttribute(X, imptype, dtype) # minimum entropy split
T = AddNode(T, i, dtype) # save index of split in node definition
for j in n: # for all groups in split
[val, tooth] = GetValue(X, i, j, n, dtype) # get comparison value and branch indicator
T = AddBranch(T, val, dtype) # add branch to tree
Xj = X[j,:] # new X for that branch
# Continue algorithm down new branch
depth = GenerateTree(Xj, T[tooth], imptype, entmax, level, depth, depthmax, dtype)[0]
return [depth, T]
def TreeClassify(X, T, dtype):
if T['type'] == 'terminal': # if at a leaf
C = T['class']
return C
elif dtype == 'discrete':
i = T['index'] # index to compare value
dmin = float('inf')
for val in T['values']:
dist = abs(X[i] - val) # find nearest to value L1 norm
if dist < dmin:
dmin = dist
branch = val
C = TreeClassify(X, T[branch], dtype) # go down closest branch
return C
elif dtype == 'numeric':
i = T['index']
mp = T['midpoint']
if X[i] <= mp: # see if above or below midpoint
branch = 'below'
else:
branch = 'above'
C = TreeClassify(X, T[branch], dtype) # go down branch
return C
#######################################################################################
################################## Performance ########################################
#######################################################################################
def perform(Ck, Ct): # CkNN, Ctest
TF = np.zeros((4)) # TP, TN, FP, FN
N = Ck.shape[0]
for n in range(N):
if Ck[n] == Ct[n]: # if true
if Ck[n] == 4: # if pos
TF[0] += 1
elif Ck[n] == 2: # if neg
TF[1] += 1
elif Ck[n] != Ct[n]: # if false
if Ck[n] == 4: # if pos
TF[2] += 1
elif Ck[n] == 2: # if neg
TF[3] += 1
return TF
def metrics(TF):
# TF is [TP, TN, FP, FN]
[TP, TN, FP, FN] = TF
ACC = (TP + TN)/(sum(TF)) # accuracy
TPR = TP/(TP + FN) # true positive rate, recall, or sensitivity
PPV = TP/(TP + FP) # positive predictive value or precision
TNR = TN/(TN + FP) # true negative rate or specificity
F1S = 2*PPV*TPR/(PPV + TPR)
return ACC, TPR, PPV, TNR, F1S
#######################################################################################
############### PCA ###################################################################
#######################################################################################
def PCA(X, k, pvar):
from scipy import linalg as la
[U,S,V] = la.svd(X) # decompose
kmin = get_k(S, pvar) # min k to get min percent variance
if k == 'kmin':
k = kmin # use min k
Uk = U[:,0:k] # recompose reduced set
Sk = S[ 0:k ]
Vk = V[0:k,:]
Sig = diag(Sk)
Xk = Uk*Sig*Vk
return np.asarray(Xk)
def get_k(S, pvar): # k that satisfies percent var
PV = percent_var(S)
for k, var in enumerate(PV):
if var >= pvar:
break
return k + 1
def percent_var(S): # compute pvar array
S = S**2
SumS = np.sum(S)
PV = [S[0]]
for val in S[1:]:
PV.append(PV[-1] + val)
PV = np.asarray(PV)/SumS
return PV
def diag(S): # digaonalize array, Sig = S*I
rank = len(S)
Sig = [[0 for x in range(rank)] for y in range(rank)]
for i in range(rank):
Sig[i][i] = S[i]
return np.asmatrix(Sig)
#######################################################################################
############### END ###################################################################
#######################################################################################
| true
|
b59b566172eaa118b5239d5a59ec9d6b328beb48
|
Python
|
DOREMUS-ANR/recommender
|
/recsystem/index.py
|
UTF-8
| 2,598
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
import argparse
import json
import logging
from types import SimpleNamespace
from embedder import create_edgelists, embed, post_embed, combine_embeddings, visualizer, most_similar
logger = logging.getLogger(__name__)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('command',
choices=['create_edgelists', 'embed', 'post_embed', 'combine', 'visualise', 'most_similar'])
parser.add_argument('-f', '--feature',
help='The feature on which compute the script. It subscribes the one in config.json')
parser.add_argument('--reset', action='store_true',
help='Replace the previous embeddings instead of incrementing (only in combine mode)')
parser.add_argument('--show', action='store_true',
help='Show the picture to video instead of saving to file (only in visualise mode)')
parser.add_argument('-s', '--seed', nargs='?', default=None, help='The URI of the entity to be used as seed')
parser.add_argument('-t', '--target', nargs='?', default=None, help='The URI of the entity to be used as target')
parser.add_argument('-n', '--num_results', type=int, default=3, help='How many results are requested')
parser.add_argument('-c', '--config', type=argparse.FileType('r'), default='config/config.json',
help='Path of configuration file.')
return parser.parse_args()
def load_config(file):
cfg = json.load(file)
if type(cfg) == dict:
cfg = SimpleNamespace(**cfg)
return cfg
if __name__ == '__main__':
args = parse_args()
config = load_config(args.config)
config.feature = args.feature
config.seed = args.seed
config.target = args.target
config.num_results = args.num_results
config.reset = args.reset
config.show = args.show
try:
if args.command == 'create_edgelists':
create_edgelists.main(config)
if args.command == 'embed':
embed.main(config)
post_embed.main(config)
if args.command == 'post_embed':
post_embed.main(config)
if args.command == 'combine':
combine = combine_embeddings.CombineEmbeddings(config)
combine.run()
if args.command == 'visualise':
visualizer.main(config)
if args.command == 'most_similar':
ms = most_similar.MostSimilar(config)
ms.find(args.seed, args.num_results)
except RuntimeError as error:
logger.error('[ERROR] %s' % error)
| true
|
d044dafc29c428dabc7f6f501d1eb1d5966e494a
|
Python
|
mirhossain8248/Python-Practice-
|
/regularExpression_charClass_findAll.py
|
UTF-8
| 1,036
| 3.796875
| 4
|
[] |
no_license
|
#Charecter Classes, Findall
import re
message = "My phone numbers are: 775-737-8248, 775-824-9921"
phoneRegex = re.compile(r'((\d\d\d)-(\d\d\d-\d\d\d\d))')
phoneObjectFindAll= phoneRegex.findall(message)
print(phoneObjectFindAll)
#Charecter classes are short cuts built in, we can just look them up
lyrics = '''4 calling birds
5 gold rings
6 geese a-laying
7 swans a-swimming
8 maids a-milking
9 ladies dancing 10 lords a-leaping 11 pipers piping 12 drummers drumming'''
xmasRegex = re.compile(r'\d+\s\w+') #looking for one or more digit, space, one or more letters
xmasObjectFindAll = xmasRegex.findall(lyrics)
print(xmasObjectFindAll)
#make your own char class (object):
vowelRegex = re.compile(r'[aeiouAEIOU]')
#can also use params (a-z) gets all lower letters
vowelObjectFindAll = vowelRegex.findall(lyrics)
print(vowelObjectFindAll)
#Negative Charecter class
#negativeVowelRegex = re.compile(r'[^aeiouAEIOU])
#negObjectFindAll = negativeVowelRegex.findall(lyrics)
#print(negObjectFindAll)
#
| true
|
df0b0e448d69feed074e95b7cae930a99cdb49d1
|
Python
|
MrHarcombe/PiTwiddles
|
/mcpi/George/AirShip.py
|
UTF-8
| 1,025
| 2.578125
| 3
|
[] |
no_license
|
import minecraft.minecraft as minecraft
import minecraft.block as block
import time
import datetime
import math
mc = minecraft.Minecraft.create()
x = 64
y = 32
z = 64
xmv = 1
zmv = 0
try:
while True :
MOVE = 0
pos = mc.player.getPos()
if (pos.x > x - 6) and (pos.z < z + 6) and (pos.z > z - 6) and (pos.z < z + 6) and (pos.y - 1 == y):
MOVE = 1
xb = x - 5
while xb < x + 6:
zb = z - 5
while zb < x + 6:
mc.setBlock(xb, y, zb, 0)
zb = zb + 1
xb = xb + 1
x = x + xmv
z = z + zmv
xb = x - 5
while xb < x + 6:
zb = z - 5
while zb < x + 6:
mc.setBlock(xb, y, zb, 1)
zb = zb + 1
xb = xb + 1
if (MOVE == 1):
mc.player.setPos(pos.x + xmv, pos.y, pos.x + zmv)
time.sleep(1)
except KeyboardInterrupt:
xb = x - 5
while xb < x + 6:
zb = z - 5
while zb < z + 6:
mc.setBlock(xb, y, zb, 0)
zb = zb + 1
xb = xb + 1
x = x + xmv
z = z + zmv
xb = x - 5
while xb < x + 6:
zb = z - 5
while zb < z + 6:
mc.setBlock(xb, y, zb, 0)
zb = zb + 1
xb = xb + 1
| true
|
adb9b7dcadda89c40a69e332d4e571957b41274a
|
Python
|
sharmar0790/python-samples
|
/numpy_package/npVector.py
|
UTF-8
| 179
| 3.140625
| 3
|
[] |
no_license
|
import numpy as np
import random as rn
arr = [0 for i in range(30)]
for i in range(30):
arr[i] = rn.randint(1,1000)
print(arr)
data = np.mean(arr)
print("Mean === ",data)
| true
|
7326873f8e500fa958de5f701414cfa53f271b8e
|
Python
|
moylaugh/trading-toolbox
|
/myquant/my_util.py
|
UTF-8
| 10,468
| 2.828125
| 3
|
[] |
no_license
|
import datetime
import gmsdk
import logging
def average_true_range(dailybars, n):
if len(dailybars) != n + 1:
raise Exception('len(dailybars) = {0}'.format(len(dailybars)))
sum_tr = 0
for i in range(0, n):
bar = dailybars[i]
pre_bar = dailybars[i+1]
high = bar.high
low = bar.low
pre_close = pre_bar.close
tr = max(high-low, abs(high-pre_close), abs(low-pre_close))
sum_tr += tr
return sum_tr / n
def get_symbol_list(config):
return config['common']['symbol_list'].split(',')
# 获取最近n天日线数据,结果的日线有可能不足n天
# 返回:symbol_to_dailybars
# 返回类型:dict
def get_dailybars(strategy, symbol_list, n):
symbol_to_dailybars = {}
for symbol in symbol_list:
bars = strategy.get_last_n_dailybars(symbol, n)
symbol_to_dailybars[symbol] = bars
return symbol_to_dailybars
# 获取交易品种的相关信息
# 返回:symbol_to_instrument
# 返回类型:dict
def get_instruments(strategy, symbol_list):
symbol_to_instrument = {}
symbol_set = set(symbol_list)
instruments = strategy.get_instruments('SHFE', 4, 1)
instruments.extend(strategy.get_instruments('DCE', 4, 1))
instruments.extend(strategy.get_instruments('CZCE', 4, 1))
for i in instruments:
if i.symbol in symbol_set:
symbol_to_instrument[i.symbol] = i
if len(symbol_to_instrument) != len(symbol_list):
raise Exception(
'len(symbol_to_instrument)={0}, len(symbol_list)={1}'.format(
len(symbol_to_instrument), len(symbol_list)))
return symbol_to_instrument
# 获取策略子账户实际持仓数据
# 返回:symbol_to_position_account
# 返回类型:dict
def get_positions(strategy):
symbol_to_position_account = {}
for pos in strategy.get_positions():
symbol = '{0}.{1}'.format(pos.exchange, pos.sec_id)
symbol_to_position_account[symbol] = pos
return symbol_to_position_account
def open_long(strategy, tick, instrument, volume):
price = tick.last_price + 10 * instrument.price_tick
price = min(price, tick.upper_limit)
strategy.open_long(tick.exchange, tick.sec_id, price, volume)
def close_long(strategy, position, tick, instrument, volume):
if volume <= 0:
raise Exception('volume = {0}'.format(volume))
if (position.side != gmsdk.OrderSide_Bid or
position.volume < volume):
raise Exception('{0}: side={1}, volume={2}'.format(
position.sec_id, position.side, position.volume))
price = tick.last_price - 10 * instrument.price_tick
price = max(price, tick.lower_limit)
if position.volume_today >= volume:
strategy.close_long(tick.exchange, tick.sec_id, price, volume)
elif 0 < position.volume_today < volume:
strategy.close_long(tick.exchange, tick.sec_id,
price, position.volume_today)
strategy.close_long_yesterday(tick.exchange, tick.sec_id,
price, volume - position.volume_today)
else:
strategy.close_long_yesterday(tick.exchange, tick.sec_id,
price, volume)
def open_short(strategy, tick, instrument, volume):
price = tick.last_price - 10 * instrument.price_tick
price = max(price, tick.lower_limit)
strategy.open_short(tick.exchange, tick.sec_id, price, volume)
def close_short(strategy, position, tick, instrument, volume):
if volume <= 0:
raise Exception('volume = {0}'.format(volume))
if (position.side != gmsdk.OrderSide_Ask or
position.volume < volume):
raise Exception('{0}: side={1}, volume={2}'.format(
position.sec_id, position.side, position.volume))
price = tick.last_price + 10 * instrument.price_tick
price = min(price, tick.upper_limit)
if position.volume_today >= volume:
strategy.close_short(tick.exchange, tick.sec_id, price, volume)
elif 0 < position.volume_today < volume:
strategy.close_short(tick.exchange, tick.sec_id,
price, position.volume_today)
strategy.close_short_yesterday(tick.exchange, tick.sec_id,
price, volume - position.volume_today)
else:
strategy.close_short_yesterday(tick.exchange, tick.sec_id,
price, volume)
def sync_position(strategy, tick, instrument,
strategy_pos, account_pos):
strategy_volume_long = 0
account_volume_long = 0
strategy_volume_short = 0
account_volume_short = 0
if strategy_pos and strategy_pos.side == gmsdk.OrderSide_Bid:
strategy_volume_long = int(strategy_pos.volume)
if strategy_pos and strategy_pos.side == gmsdk.OrderSide_Ask:
strategy_volume_short = int(strategy_pos.volume)
if account_pos and account_pos.side == gmsdk.OrderSide_Bid:
account_volume_long = int(account_pos.volume)
if account_pos and account_pos.side == gmsdk.OrderSide_Ask:
account_volume_short = int(account_pos.volume)
long_diff = strategy_volume_long - account_volume_long
if strategy_volume_long > 0 and account_volume_long > 0:
# 暂时忽略这种情况,手动处理
long_diff = 0
if long_diff > 0: # 开多
open_long(strategy, tick, instrument, long_diff)
logging.info('[{0}.{1} open_long] {2} => {3}'.format(
tick.exchange, tick.sec_id,
account_volume_long,
strategy_volume_long))
elif long_diff < 0: # 平多
close_long(strategy, account_pos, tick, instrument, abs(long_diff))
logging.info('[{0}.{1} close_long] {2} => {3}'.format(
tick.exchange, tick.sec_id,
account_volume_long,
strategy_volume_long))
short_diff = strategy_volume_short - account_volume_short
if strategy_volume_short > 0 and account_volume_short > 0:
# 暂时忽略这种情况,手动处理
short_diff = 0
if short_diff > 0: # 开空
open_short(strategy, tick, instrument, short_diff)
logging.info('[{0}.{1} open_short] {2} => {3}'.format(
tick.exchange, tick.sec_id,
account_volume_short,
strategy_volume_short))
elif short_diff < 0: # 平空
close_short(strategy, account_pos, tick, instrument, abs(short_diff))
logging.info('[{0}.{1} close_short] {2} => {3}'.format(
tick.exchange, tick.sec_id,
account_volume_short,
strategy_volume_short))
return (long_diff, short_diff)
# 同步头寸函数,备份
def sync_position_backup(strategy, tick, instrument,
strategy_pos, account_pos):
strategy_volume_long = 0
account_volume_long = 0
strategy_volume_short = 0
account_volume_short = 0
if strategy_pos and strategy_pos.side == gmsdk.OrderSide_Bid:
strategy_volume_long = int(strategy_pos.volume)
if strategy_pos and strategy_pos.side == gmsdk.OrderSide_Ask:
strategy_volume_short = int(strategy_pos.volume)
if account_pos and account_pos.side == gmsdk.OrderSide_Bid:
account_volume_long = int(account_pos.volume)
if account_pos and account_pos.side == gmsdk.OrderSide_Ask:
account_volume_short = int(account_pos.volume)
long_diff = strategy_volume_long - account_volume_long
if long_diff > 0: # 开多
open_long(strategy, tick, instrument, long_diff)
logging.info('[{0}.{1} open_long] {2} => {3}'.format(
tick.exchange, tick.sec_id,
account_volume_long,
strategy_volume_long))
elif long_diff < 0: # 平多
close_long(strategy, account_pos, tick, instrument, abs(long_diff))
logging.info('[{0}.{1} close_long] {2} => {3}'.format(
tick.exchange, tick.sec_id,
account_volume_long,
strategy_volume_long))
short_diff = strategy_volume_short - account_volume_short
if short_diff > 0: # 开空
open_short(strategy, tick, instrument, short_diff)
logging.info('[{0}.{1} open_short] {2} => {3}'.format(
tick.exchange, tick.sec_id,
account_volume_short,
strategy_volume_short))
elif short_diff < 0: # 平空
close_short(strategy, account_pos, tick, instrument, abs(short_diff))
logging.info('[{0}.{1} close_short] {2} => {3}'.format(
tick.exchange, tick.sec_id,
account_volume_short,
strategy_volume_short))
return (long_diff, short_diff)
def pos_size_risk(core_equity, market_money, risk_points, multiplier,
core_equity_percent=0.009, market_money_percent=0.03, max_total_percent=0.015):
max_risk = (core_equity + market_money) * max_total_percent
if market_money > 0: # 盈利时
total_risk = (core_equity * core_equity_percent + market_money * market_money_percent)
else: # 亏损时
total_risk = (core_equity + market_money + market_money) * core_equity_percent
if total_risk > max_risk:
total_risk = max_risk
pos_size = int(total_risk / (risk_points * multiplier))
if pos_size < 1:
pos_size = 1
return pos_size
# 返回on_timer触发的时刻
# 返回类型:datetime.time
def timer_time():
return datetime.time(hour=20, minute=35, second=0)
def milliseconds_to_next_timer():
now = datetime.datetime.now()
ttime = timer_time()
if now.time() < ttime:
timer_datetime = datetime.datetime.combine(now.date(), ttime)
else:
tomorrow = now.date() + datetime.timedelta(days=1)
timer_datetime = datetime.datetime.combine(tomorrow, ttime)
result = int((timer_datetime-now).total_seconds() * 1000)
if result > 0:
return result
else:
return 3000
def position_to_str(position):
if not position:
return ''
side = ''
if position.side == gmsdk.OrderSide_Bid:
side = 'L'
elif position.side == gmsdk.OrderSide_Ask:
side = 'S'
return '{0}.{1}|{2}|{3}'.format(
position.exchange,
position.sec_id,
side,
int(position.volume))
def position_to_str_detail(position):
if not position:
return ''
side = ''
if position.side == gmsdk.OrderSide_Bid:
side = 'L'
elif position.side == gmsdk.OrderSide_Ask:
side = 'S'
return '{0}.{1}|{2}|{3}/{4}'.format(
position.exchange,
position.sec_id,
side,
int(position.volume),
int(position.volume_today))
| true
|
f8cd5fd444f7698eb5d105c40a89fbe027f74335
|
Python
|
nedima68/PseudoSupervisedAnomalyDetection
|
/utils/parse_json_files_to_latex_table.py
|
UTF-8
| 3,398
| 3.140625
| 3
|
[] |
no_license
|
import glob
import os
import json
class JSONtoLaTeXtableParser:
def __init__(self, output_file, header_titles):
"""
Constructor. Set path where the JSON files are stored,
e. g. "/some/path/to/results/*/*.json"
Also filename for output file is needed
"""
self.header = header_titles
self.lines = []
self.out = output_file
def __build_latex_tab_string(self, data, args):
""" extract values from data using keywords from args """
s = ""
for i, key in enumerate(args):
if i < len(args) - 1:
s += str(data[key]) + " & "
else:
s += str(data[key]) + "\\" + "\\ \\hdashline"
return s
def convert_num_format(self, str_num):
f1 = float(str_num[str_num.find('%')+1:str_num.find('\u00b1')-1])
f2 = float(str_num[str_num.find('\u00b1')+2:])
res = "{:.3f} \u00b1 {:.3f}".format(f1 / 100, f2 / 100)
return res
def add_line(self, data, data_keys):
s=""
for key in data_keys:
if type(key) == str:
t = str(data[key])
if t.find('%') != -1:
t = self.convert_num_format(t)
t = t.replace('\u00b1','\\pm')
t = t.replace('_','\_')
s += '$' + t + "$ & "
elif type(key) == tuple:
new_keys=key
while type(new_keys) == tuple:
data = data[new_keys[0]]
new_keys = new_keys[1]
for key_d in new_keys:
t = str(data[key_d])
if t.find('%') != -1:
t = self.convert_num_format(t)
t = t.replace('\u00b1','\\pm')
s += '$' + t + "$ & "
i = s.rfind('&')
s = s[:i]
s += "\\" + "\\"
self.lines.append(s)
def parse_second_level_to_latex_tab(self, first_level_key, *args):
""" extract data from a dictionary within a dictionary """
self.header= args
for file_n in self.files:
with open(file_n, 'r') as fd:
data = json.load(fd)[first_level_key]
s = self.__build_latex_tab_string(data, args)
self.lines.append(s)
def create_latex_table_2(self):
"""
create simple LaTeX table based on extracted data.
"""
self.table = "\\begin{tabular}{@{}l "
self.table += (len(self.header)-1) * "r " + "@{}}\\toprule \n"
for i, h in enumerate(self.header):
if i < len(self.header) - 1:
self.table += "\\textbf{" + h.replace("_", " ") + "} & "
else:
self.table += "\\textbf{" + h.replace("_", " ") + "} \\" + "\\ \\bottomrule \n"
for line in self.lines:
self.table += line + "\n"
self.table += "\\bottomrule \n"
self.table += "\\end{tabular}"
def write_rows_to_file(self):
""" write only rows with extracted data to file"""
with open(self.out, 'w') as fd:
for line in self.lines:
fd.write(line + "\n")
def write_table_to_file(self):
""" write complete table to file """
with open(self.out, 'w') as fd:
fd.write(self.table)
| true
|
c6b820fe14dbbe123026ed5956bede31a1a2d373
|
Python
|
baker-project/baker
|
/baker_wet_cleaning_application/scripts/test_detection_and_removing_dirt.py
|
UTF-8
| 2,522
| 2.59375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
from threading import Thread
import services_params as srv
from std_srvs.srv import Trigger
from cob_object_detection_msgs.msg import DetectionArray
import rospy
from utils import projectToCamera
from dirt_removing_behavior import DirtRemovingBehavior
"""
This file contains routine to start the dirt detection on the current
robot location and then execute the dirt removing behavior if needed.
WARNING: the functions used in this code are partly copy pasted from DryCleaningBehavior
and could be outdated (7.08.2019)
Usage:
1/ start the baker_wet_cleaning_application
2/ Start this file
"""
class TestDetector:
def __init__(self):
self.trash_topic_subscriber_ = rospy.Subscriber('/dirt_detection_server_preprocessing/dirt_detector_topic', DetectionArray, self.dirtDetectionCallback)
self.callTriggerService(srv.START_DIRT_DETECTOR_SERVICE_STR)
def callTriggerService(self, service_name):
rospy.wait_for_service(service_name)
try:
req = rospy.ServiceProxy(service_name, Trigger)
req()
except rospy.ServiceException, e:
print("Service call to {} failed: {}".format(service_name, e))
def dirtDetectionCallback(self, detections):
detections = detections.detections
if len(detections) == 0:
return
print('dirts detected')
self.callTriggerService(srv.STOP_DIRT_DETECTOR_SERVICE_STR)
# todo rmb-ma temporary solution. Keep camera, robot or room coordinates?
print('Project positions to camera')
detections = [projectToCamera(detection) for detection in detections]
if None in detections or len(detections) == 0:
print('No detections can be projected to the camera. Startin detection again')
self.callTriggerService(srv.START_DIRT_DETECTOR_SERVICE_STR)
return
print("DIRT(S) DETECTED!!")
# 1. Stop the dirt and the trash detections
# 2. Clean them
for detection in detections:
dirt_remover = DirtRemovingBehavior("DirtRemovingBehavior", [0],
move_base_service_str=srv.MOVE_BASE_SERVICE_STR,
map_accessibility_service_str=srv.MAP_ACCESSIBILITY_SERVICE_STR,
clean_pattern_str=srv.CLEAN_PATTERN_STR)
position = detection.pose.pose.position
dirt_remover.setParameters(dirt_position=position)
dirt_remover.executeBehavior()
if __name__ == '__main__':
rospy.init_node('test_dirt_detector', anonymous=True)
TestDetector()
rospy.spin()
| true
|
116a822dd47fae8f93b31a4e5cb7c0f4378a6374
|
Python
|
fidoriel/sudoku42
|
/legacy/sudoku.py
|
UTF-8
| 1,852
| 3.25
| 3
|
[] |
no_license
|
#!/usr/bin/python3
sudoku = [[0,0,8,0,0,4,9,3,5],
[0,0,0,0,0,5,2,6,0],
[0,0,0,0,2,3,0,0,8],
[0,0,0,0,0,0,1,0,7],
[6,1,0,0,7,0,0,9,0],
[0,0,0,4,5,0,0,0,0],
[5,2,0,0,0,0,0,0,3],
[0,0,3,0,1,0,0,0,0],
[0,0,0,0,0,0,6,0,1]]
def numberPossible(yP, xP, nr):
global sudoku
for i in range(0, 9):
if sudoku[yP][i] == nr:
return False
for i in range(0, 9):
if sudoku[i][xP] == nr:
return False
x0 = (xP//3)*3
y0 = (yP//3)*3
for i in range(0, 3):
for j in range(0, 3):
if sudoku[y0+i][x0+j] == nr:
return False
return True
def printSudoku():
global sudoku
print()
for i in range(0, 3):
print(" ", sudoku[i][0], sudoku[i][1], sudoku[i][2], " | ", sudoku[i][3], sudoku[i][4], sudoku[i][5], " | ", sudoku[i][6], sudoku[i][7], sudoku[i][8], " ")
print("---------+---------+--------")
for i in range(3, 6):
print(" ", sudoku[i][0], sudoku[i][1], sudoku[i][2], " | ", sudoku[i][3], sudoku[i][4], sudoku[i][5], " | ", sudoku[i][6], sudoku[i][7], sudoku[i][8], " ")
print("---------+---------+--------")
for i in range(6, 9):
print(" ", sudoku[i][0], sudoku[i][1], sudoku[i][2], " | ", sudoku[i][3], sudoku[i][4], sudoku[i][5], " | ", sudoku[i][6], sudoku[i][7], sudoku[i][8], " ")
print()
def solve():
global sudoku
for y in range(9):
for x in range(9):
if sudoku[y][x] == 0:
for nr in range(1, 10):
if numberPossible(y, x, nr):
sudoku[y][x] = nr
if solve():
return True
sudoku[y][x] = 0
return False
return True
solve()
printSudoku()
| true
|
84500c48cbbe32d115eaff2f96a30da9b6e4e7e9
|
Python
|
zzz136454872/leetcode
|
/hitBricks.py
|
UTF-8
| 2,174
| 2.890625
| 3
|
[] |
no_license
|
from typing import *
import copy
class Solution:
def hitBricks(self, grid: List[List[int]], hits: List[List[int]]) -> List[int]:
n1=len(grid)
n2=len(grid[0])
log=[False for i in range(len(hits))]
for i in range(len(hits)):
if not grid[hits[i][0]][hits[i][1]]:
log[i]=True
grid[hits[i][0]][hits[i][1]]=0
def p2n(p0,p1):
return p0*n2+p1
# print(n1,n2)
father=[i for i in range(n1*n2+1)]
size=[1 for i in range(n1*n2+1)]
size[n1*n2]=0
def find(a):
# print(a,father)
if a==father[a]:
return a
father[a]=find(father[a])
return father[a]
def union(a,b):
ra=find(a)
rb=find(b)
if ra>rb:
father[rb]=ra
size[ra]+=size[rb]
if ra<rb:
father[ra]=rb
size[rb]+=size[ra]
for i in range(n2):
if grid[0][i]:
union(i,n1*n2)
for i in range(n1):
for j in range(n2):
if grid[i][j]:
if i>0 and grid[i-1][j]:
union(p2n(i,j),p2n(i-1,j))
if j>0 and grid[i][j-1]:
union(p2n(i,j),p2n(i,j-1))
out=[0 for i in range(len(hits))]
for k in range(len(hits)-1,-1,-1):
if log[k]:
continue
i,j=hits[k]
last=size[-1]
if i==0:
union(p2n(i,j),n1*n2)
if i>0 and grid[i-1][j]:
union(p2n(i,j),p2n(i-1,j))
if j>0 and grid[i][j-1]:
union(p2n(i,j),p2n(i,j-1))
if i<n1-1 and grid[i+1][j]:
union(p2n(i,j),p2n(i+1,j))
if j<n2-1 and grid[i][j+1]:
union(p2n(i,j),p2n(i,j+1))
out[k]=max(0,size[-1]-last-1)
grid[i][j]=1
#print(grid,father,size)
return out
sl=Solution()
grid=[[1],[1],[1],[1],[1]]
hits=[[3,0],[4,0],[1,0],[2,0],[0,0]]
print(sl.hitBricks(grid,hits))
| true
|
0ea808eb5207dfc9c8f140024919dbec0626a66a
|
Python
|
nssalim/we2hwk13112020_codeclan_caraoke
|
/tests/bar_test.py
|
UTF-8
| 1,313
| 3.359375
| 3
|
[] |
no_license
|
import unittest
from classes.room import Room
from classes.guest import Guest
from classes.song import Song
from classes.drink import Drink
from classes.food import Food
class TestBar(unittest.TestCase):
def setUp(self):
self.song1 = Song("The Rolling Stones", "She's a rainbow")
self.song2 = Song("The Kinks", "All day and all of the night")
self.song3 = Song("The Beatles", "Norwegian Wood")
self.song4 = Song("The Troggs", "Wild Thing")
self.songs = [self.song1, self.song2, self.song3, self.song4]
self.drink = Drink("rum", 5.00, 30)
self.food = Food("peanuts", 2.00, 25)
self.guest1 = Guest("Holly Golighty", 20, 50, self.song1, 4)
self.guest2 = Guest("Paul Varjak", 27, 100, self.song2, 8)
self.guest3 = Guest("Cat", 100, 0, self.song4, 2)
self.guests = [self.guest1, self.guest2, self.guest3]
def test_guest_age(self):
self.assertEqual(20, self.guest1.age)
def test_guest_wallet(self):
self.assertEqual(50.00, self.guest1.wallet)
def test_sufficient_funds__true_if_enough(self):
self.assertEqual(True, self.guest1.sufficient_funds(self.drink))
def test_sufficient_funds__false_if_not_enough(self):
self.assertEqual(False, self.guest1.sufficient_funds(self.drink))
| true
|