blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
d78586e2b5684c7eae8df87da8c1dbd60e7645eb | Python | Unix-Code/pygeocodio | /tests/test_data.py | UTF-8 | 6,040 | 2.734375 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_geocodio
----------------------------------
Tests for `geocodio.data` module.
"""
import json
import os
import unittest
from geocodio.data import Address
from geocodio.data import Location
from geocodio.data import LocationCollection
class TestDataTypes(unittest.TestCase):
def setUp(self):
"""
Read the test data from JSON files which are modified from actual
service response only for formatting. This makes this file much easier
to read, the data easier to inspect, and ensures that the data matches
what the service actually replies with.
"""
fixtures = os.path.join(os.path.dirname(os.path.abspath(__file__)), "response/")
with open(os.path.join(fixtures, "single.json"), "r") as single_json:
self.single_response = json.loads(single_json.read())
with open(os.path.join(fixtures, "batch.json"), "r") as batch_json:
self.batch_response = json.loads(batch_json.read())
with open(os.path.join(fixtures, "batch_components.json"), "r") as batch_components_json:
self.batch_components_response = json.loads(batch_components_json.read())
with open(os.path.join(fixtures, "address.json"), "r") as address_json:
self.address_response = json.loads(address_json.read())
with open(os.path.join(fixtures, "missing_results.json"), "r") as missing_json:
self.missing_results = json.loads(missing_json.read())
with open(
os.path.join(fixtures, "batch_reverse.json"), "r"
) as batch_reverse_json:
self.batch_reverse_response = json.loads(batch_reverse_json.read())
def test_address_coords(self):
"""Ensure Address.coords property returns None when no location"""
x = Address(self.address_response)
self.assertEqual(None, x.coords)
def test_address_accuracy(self):
"""Ensure Address.accuracy property returns None when no location"""
x = Address(self.address_response)
self.assertEqual(None, x.accuracy)
def test_location_coords(self):
"""Ensure Location.coords property returns a suitable tuple"""
x = Location(self.single_response)
self.assertEqual(x.coords, (37.554895702703, -77.457561054054))
# Do the same with the order changed
x = Location(self.single_response, order="lng")
self.assertEqual(x.coords, (-77.457561054054, 37.554895702703))
def test_location_results_missing(self):
"""Ensure empty results are processed as a missing address"""
bad_results = Location(self.missing_results)
self.assertEqual(bad_results.coords, None)
def test_collection(self):
"""Ensure that the LocationCollection stores as a list of Locations"""
self.assertTrue(isinstance(self.batch_response, dict))
locations = LocationCollection(self.batch_response["results"])
self.assertTrue(isinstance(locations[0], Location))
locations = LocationCollection(self.batch_reverse_response["results"])
self.assertTrue(isinstance(locations[0], Location))
def test_collection_coords(self):
"""Ensure the coords property returns a list of suitable tuples"""
locations = LocationCollection(self.batch_response["results"])
self.assertEqual(
locations.coords,
[
(37.560890255102, -77.477400571429),
(37.554895702703, -77.457561054054),
None,
],
)
# Do the same with the order changed
locations = LocationCollection(self.batch_response["results"], order="lng")
self.assertEqual(
locations.coords,
[
(-77.477400571429, 37.560890255102),
(-77.457561054054, 37.554895702703),
None,
],
)
def test_collection_addresses(self):
"""Ensure that formatted addresses are returned"""
locations = LocationCollection(self.batch_response["results"])
self.assertEqual(
locations.formatted_addresses,
[
"3101 Patterson Ave, Richmond VA, 23221",
"1657 W Broad St, Richmond VA, 23220",
"",
],
)
def test_collection_get(self):
"""Ensure 'get' performs a key based lookup"""
locations = LocationCollection(self.batch_response["results"])
self.assertEqual(
locations.get("3101 patterson ave, richmond, va").coords,
(37.560890255102, -77.477400571429),
)
# Case sensitive on the specific query
self.assertRaises(KeyError, locations.get, "3101 Patterson Ave, richmond, va")
locations = LocationCollection(self.batch_components_response["results"])
self.assertEqual(locations.get({
"street": "1109 N Highland St",
"city": "Arlington",
"state": "VA"
}).coords, (38.886672, -77.094735))
# Requires all fields used for lookup
self.assertRaises(KeyError, locations.get,
{"street": "1109 N Highland St",
"city": "Arlington"})
locations = LocationCollection(self.batch_reverse_response["results"])
# The rendred query string value is acceptable
self.assertEqual(
locations.get("37.538758,-77.433594").coords, (37.538758, -77.433594)
)
# A tuple of floats is acceptable
self.assertEqual(
locations.get((37.538758, -77.433594)).coords, (37.538758, -77.433594)
)
# If it can be coerced to a float it is acceptable
self.assertEqual(
locations.get(("37.538758", "-77.433594")).coords, (37.538758, -77.433594)
)
# This is unacceptable
self.assertRaises(ValueError, locations.get, ("37.538758 N", "-77.433594 W"))
if __name__ == "__main__":
unittest.main()
| true |
3fea4d6ab04e3e03eb622b4a9920c9bff44fb2d9 | Python | lsl980628/DMNN | /utils.py | UTF-8 | 7,851 | 2.78125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 25 16:01:32 2019
@author: xxw
"""
import numpy as np
import pandas as pd
#%%
def minmaxscaler(data):
'''
Normalized function
data: input data
'''
min = np.amin(data)
max = np.amax(data)
return 10*(data-min)/(max-min)
#%%
def seg_new_data(datapath,storepath,week_num):
'''
databath: path of original data
storepath: path of data after segmented
week_num: week size of each subdataset
'''
data = pd.read_csv(datapath)
columns = data.columns
data = np.array(data)
lenth = week_num*7*24
total_len = data.shape[0]
rest_len = total_len//lenth
if (total_len % lenth) ==0:
if rest_len ==1:
seg_data =[]
for n in range(0,lenth):
seg_data.append(data[n])
seg_data = pd.DataFrame(seg_data,columns = columns)
np.save(storepath+'/'+"0.npy",seg_data)
else:
for m in range(0,total_len-lenth,lenth):
seg_data =[]
for n in range(m,m+lenth):
seg_data.append(data[n])
seg_data = pd.DataFrame(seg_data,columns = columns)
np.save(storepath+'/'+"%i.npy"%(m/(lenth-1)),seg_data)
else:
seg_last =[]
for i in range(0,total_len-rest_len-lenth,lenth):
seg_data =[]
for j in range(i,i+lenth):
seg_data.append(data[j])
np.save(storepath+'/'+"%i.npy"%(i/lenth),seg_data)
for l in range(total_len-lenth,total_len):
seg_last.append(data[l])
np.save(storepath+'/'+"%i.npy"%((l/lenth)+1),seg_last)
#%%
def data_split(filename,random_num, T ,n ,l_back,m ,l_forward,test_T):
'''
Input parameters:
filename: path of segmention data
l_forward: Input size of predition module, default = 72
T: periodic length,default = 24
n: length of periodic after sampled, default = 14
l_back: length of predicted sequence,default = 24
m: size of periodic data ,default = 1
random_num: start point, the model will predict m*l_back size of predited sequence. n*T <= random_num <= (data)-m*l_back
Return:
input data
'''
data_nT0,data_nT1,data_nT2 =[],[],[]
data_p,data_lf = [],[]
data = np.load(filename,allow_pickle=True)
if data.shape[1] ==4:
data = pd.DataFrame(np.array(data),columns = ["Num","Time","City","Value"])
data = data.drop(["Time","City"],axis = 1)
data = np.array(data)
#else:
# data = minmaxscaler(data)
start = random_num - (n*T)
end = random_num + m*l_back
for j in range(random_num-l_forward,random_num):
data_lf.append(np.array(data[j][1]))
for i in range(start,random_num,T):
data_T0,data_T1,data_T2 = [],[],[]
for l in range(0,int(l_back)):
a = data[i-1+l]
b = data[i+l]
c = data[i+1+l]
data_T0.append(np.array(a[1]))
data_T1.append(np.array(b[1]))
data_T2.append(np.array(c[1]))
# print(len(data_T0))
data_nT0.append(data_T0)
data_nT1.append(data_T1)
data_nT2.append(data_T2)
for k in range(random_num,end):
data_p.append(np.array(data[k][1]))
data_lf = np.reshape(data_lf,(l_forward,1)).T
data_p = np.reshape(data_p,(m*l_back,1))
data_nT0 = np.reshape(data_nT0,(n,l_back)).T
data_nT1 = np.reshape(data_nT1,(n,l_back)).T
data_nT2 = np.reshape(data_nT2,(n,l_back)).T
return data_nT0,data_nT1,data_nT2,data_lf,data_p
#%%
def create_multi_data(filename,store_path,select_num,step =4,split_rate = 1/2,train_count =384,T = 24 ,n =14,l_back = 24,m =1,l_forward =72):
'''
Input parameter:
select_num: number of short-term data in each subdataset
periodic length,default = 24
n: length of periodic after sampled, default = 14
l_back: length of predicted sequence,default = 24
m: size of periodic data ,default = 1
'''
ls_num = []
data = np.load(filename,allow_pickle=True)
lendata = np.array(data).shape[0]
# if n*T > l_forward:
# ls_num = list(range(n*T,lendata-m*l_back,step))
# else:
# ls_num = list(range(l_forward,lendata-m*l_back,step))
ls_num = list(range(n*T,lendata-m*l_back,step))
end_num = select_num*split_rate
split_num = int(1/split_rate)
for i in range(0,split_num):
train_num,test_num = ls_num[int(i*end_num) : int((i*end_num)+train_count)], ls_num[(int((i*end_num)+train_count)):(int((i+1)*end_num))]
s_T0,s_T1,s_T2,Forw,Pred = [],[],[],[],[]
s_T0_test,s_T1_test,s_T2_test,Forw_test,Pred_test = [],[],[],[],[]
if select_num > ((lendata-m*l_back-n*T)/3):
return print("Warning: select_num %i is out of range"%select_num )
for train_number in train_num:
T0,T1,T2,forward,pred=data_split(filename,train_number,T,n,l_back,m,l_forward,test_T = False)
s_T0.append({"T0":(T0)})
s_T1.append({"T1":(T1)})
s_T2.append({"T2":(T2)})
Forw.append({"Forw":(forward)})
Pred.append({"Pred":(pred)})
for test_number in (test_num):
T0,T1,T2,forward,pred=data_split(filename,test_number,T,n,l_back,m,l_forward,test_T = True)
s_T0_test.append({"T0":(T0)})
s_T1_test.append({"T1":(T1)})
s_T2_test.append({"T2":(T2)})
Forw_test.append({"Forw":(forward)})
Pred_test.append({"Pred":(pred)})
np.save(store_path+'/'+'T0_train%i.npy'%i,s_T0)
np.save(store_path+'/'+'T1_train%i.npy'%i,s_T1)
np.save(store_path+'/'+'T2_train%i.npy'%i,s_T2)
np.save(store_path+'/'+'Forw_train%i.npy'%i,Forw)
np.save(store_path+'/'+'Pred_train%i.npy'%i,Pred)
np.save(store_path+'/'+'T0_test%i.npy'%i,s_T0_test)
np.save(store_path+'/'+'T1_test%i.npy'%i,s_T1_test)
np.save(store_path+'/'+'T2_test%i.npy'%i,s_T2_test)
np.save(store_path+'/'+'Forw_test%i.npy'%i,Forw_test)
np.save(store_path+'/'+'Pred_test%i.npy'%i,Pred_test)
#%%
def load_data(select_num ,step ,split_rate,train_count ,T ,n ,l_back ,m ,l_forward,data_order,city_path ="data/hour/A.csv",store_seg ="data/seg", week_num = 24,filename = "data/seg/0.npy",store_path = "data/train", ):
seg_new_data(city_path,store_seg,week_num =24)
'''
All input parameter from other difined functions of utils.py
Return:
traing data and forecasting data
'''
create_multi_data(filename,store_path,select_num =select_num,step = step,split_rate = split_rate ,train_count = train_count,T = T,n = n,l_back = l_back ,m = m,l_forward = l_forward)
s_T0 = np.load(store_path + '/'+'T0_train%i.npy'%data_order,allow_pickle=True)
s_T1 = np.load(store_path + '/'+'T1_train%i.npy'%data_order,allow_pickle=True)
s_T2 = np.load(store_path + '/'+'T2_train%i.npy'%data_order,allow_pickle=True)
Forw = np.load(store_path + '/'+'Forw_train%i.npy'%data_order,allow_pickle=True)
Pred = np.load(store_path + '/'+'Pred_train%i.npy'%data_order,allow_pickle=True)
s_T0_test = np.load(store_path + '/'+'T0_test%i.npy'%data_order,allow_pickle=True)
s_T1_test = np.load(store_path + '/'+'T1_test%i.npy'%data_order,allow_pickle=True)
s_T2_test = np.load(store_path + '/'+'T2_test%i.npy'%data_order,allow_pickle=True)
Forw_test = np.load(store_path + '/'+'Forw_test%i.npy'%data_order,allow_pickle=True)
Pred_test = np.load(store_path + '/'+'Pred_test%i.npy'%data_order,allow_pickle=True)
return s_T0,s_T1,s_T2,Forw,Pred,s_T0_test,s_T1_test,s_T2_test,Forw_test,Pred_test
| true |
7732639a7c95f4d045c3f52a0d097ecfeca88580 | Python | g33kroid/Spector | /loading.py | UTF-8 | 388 | 2.828125 | 3 | [] | no_license | from __future__ import print_function
import sys
import time
from pyspin.spin import Spin5, Spinner
def loading():
# Choose a spin style.
spin = Spinner(Spin5)
print("Loading All Modules, Please wait ...")
# Spin it now.
for i in range(50):
print(u"\r{0}".format(spin.next()), end="")
sys.stdout.flush()
time.sleep(0.1)
print("Done :D ") | true |
b2d62958773f6b6dc5844ba5565782dc90fbbed4 | Python | sariya/macpro_server | /rdp_core_data/filtering_seqs/find_Ns.py | UTF-8 | 1,843 | 2.78125 | 3 | [] | no_license | #!/usr/bin/env python
__author__ = "Sanjeev Sariya"
__date__= "23 August 2016"
__maintainer__= "Sanjeev Sariya"
__status__= "development"
long_description= """
Find sequence identifiers which have Ns in them
How many Ns
Max Ns
Minimum Ns
"""
import sys, os, re, argparse,glob
import subprocess as sp
from Bio import SeqIO
import numpy as np
#
if sys.version_info < (2,7):
print "Python 2.7+ are needed for script"
sys.exit()
##
def print_seq_stats(length_array):
print "Minimum length is ", min(length_array)
print "Max length ",max(length_array)
print "mean of length is ", np.mean(length_array)
print "median of length is ", np.median(length_array)
print "Standard Dev of length is ", np.std(length_array)
#}}}function ends
#}}}function ends
#
def open_fasta(fasta_file):
seq_with_n=[] #array to store seq ids with Ns
count=0 #total seqs --
for record in SeqIO.parse(fasta_file,"fasta"):
#add_seq_length.append(len(record))
count+=1
if (record.seq).find('N')!=-1 or (record.seq).find('n')!=-1:
seq_with_n.append(record.id)
#print (record.seq).count('N'), (record.seq).count('n')
#if ends
#for loop ends
print "Sequences with Ns ",len(seq_with_n)
for i in seq_with_n:
print i
#--for loop ends
print "Total seqs in fasta file ",count
#print_seq_stats(add_seq_length) #send seq-length array to calculate mean...
print "Seqeucnes that can be used ",count-len(seq_with_n)
#
if __name__=="__main__":
#{{{ main starts
parser=argparse.ArgumentParser("description")
parser.add_argument ('-c','--combined',help='location/path of combined seqs file',required=True) # store seqs file
args_dict = vars(parser.parse_args()) # make them dict..
open_fasta(args_dict["combined"])
#}}} main ends
| true |
fc1564d0ac84ad1dd24f357613eb0e899ee82442 | Python | massi92/DIP-Project_MGFF | /Project_KPCat/preprocess.py | UTF-8 | 1,278 | 2.828125 | 3 | [] | no_license | import numpy as np
import cv2
import os
class Preprocess(object):
""" Classe con operazioni di preprocessing su immagini """
def adaHistEq(self,img):
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
cl1 = clahe.apply(img)
return cl1
def removeNoise(self,img):
kernel = np.ones((5,5),np.float32)/25 #kernel
dst = cv2.filter2D(img,-1,kernel)
return dst
def changeSize(self,img,width,height):
res = cv2.resize(img,(width,height))
return res
def toBin(self,img):
th = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
return th
# Da importare pySaliencyMap
def saliency(self,img):
imgsize = img.shape
img_width = imgsize[1]
img_height = imgsize[0]
sm = pySaliencyMap.pySaliencyMap(img_width, img_height)
map = sm.SMGetSM(img)
return map
def applyTransform(self,img,width,height):
out1 = self.changeSize(img,width,height)
#out1 = self.toBin(out1)
out1 = self.adaHistEq(out1)
#out1 = self.removeNoise(img)
return out1
if __name__ == '__main__':
#Prova
path = './imm/cat/cat000.jpg'
img = cv2.imread(path,0)
pp = Preprocess()
#out1 = pp.applyTransform(img,500,500)
out1 = pp.toBin(img)
cv2.imshow('Immagine trasformata',out1)
cv2.waitKey(0) | true |
b2285ea6c224c4c20fada46fb3b51cf0ad411d04 | Python | lldhliu/ZolarPushService | /app/forms/user.py | UTF-8 | 611 | 2.828125 | 3 | [] | no_license | from wtforms import PasswordField, Form, StringField
from wtforms.validators import DataRequired, Regexp, Length
class LoginForm(Form):
phone = StringField(validators=[Regexp(r'1[85347]\d{9}', message='手机号码格式不正确')])
password = PasswordField('密码', validators=[
Regexp(r'^[a-zA-Z0-9][a-zA-Z0-9_]{5, 15}$', message='密码里面只能包含大小写字母、数字、_,且不能以“_”开头'),
DataRequired(message='密码不可以为空, 请输入你的密码'),
Length(6, 16, message='密码长度最少为6个字符,最多为16个字符')
])
| true |
3daf0d2595f9b45377669b30968d658383a7cae4 | Python | aoloe/py-zahlen-und-codes | /main.py | UTF-8 | 3,196 | 2.640625 | 3 | [
"MIT"
] | permissive | import zahlen_code
import os
from tkinter import Tk, Button, Label, filedialog
print()
def codes_clicked(ignore = None):
# print(os.environ)
if 'PWD' in os.environ:
initialdir = os.environ['PWD']
elif 'HOME' in os.environ:
initialdir = os.environ['HOME']
# TODO: there does not seem to be a good way to get "My Documents"
elif 'HOMEPATH' in os.environ:
initialdir = os.environ['HOMEPATH']
filename = filedialog.askopenfilename(
initialdir = os.path.expanduser(initialdir),
title = 'Codes-Datei auswählen',
filetypes = [('text-Dateien', '*.txt')],
parent = window)
if filename:
codes_label.configure(text = filename)
names_btn.configure(state = 'normal')
window.bind('<space>', names_clicked)
def names_clicked(ignore = None):
filename = filedialog.askopenfilename(
title = 'Namen-Datei auswählen',
filetypes = [('text-Dateien', '*.txt')],
parent = window)
if filename:
names_label.configure(text = filename)
output_btn.configure(state = 'normal')
window.bind('<space>', output_clicked)
def output_clicked(ignore = None):
filename = filedialog.asksaveasfilename(
title = 'Output-Datei auswählen',
filetypes = [('text-Dateien', '*.txt')],
parent = window)
if filename:
output_label.configure(text = filename)
process_btn.configure(state = 'normal')
window.bind('<space>', process_clicked)
def process_clicked(ignore = None):
zahlen_code.process_names_and_codes(
codes_label.cget('text'),
names_label.cget('text'),
output_label.cget('text')
)
process_label.configure(text = '✓')
codes_btn.configure(state = 'disabled')
names_btn.configure(state = 'disabled')
output_btn.configure(state = 'disabled')
process_btn.configure(state = 'disabled')
def quit(ignore = None):
window.destroy()
window = Tk()
window.title('Zahlen und Codes')
window.geometry('640x480')
if os.name != 'nt':
window.attributes('-type', 'dialog')
# todo: does not work
window.bind('<Control-q>', quit)
window.bind('<Escape>', quit)
window.bind('<space>', codes_clicked)
codes_btn = Button(window, text = 'Codes-Datei',
command = codes_clicked)
codes_btn.grid(column = 0, row = 0)
codes_label = Label(window, text = '...')
codes_label.grid(column = 1, row = 0, sticky = 'W')
names_btn = Button(window, text = 'Namen-Datei',
state = 'disabled', command = names_clicked)
names_btn.grid(column = 0, row = 1)
names_label = Label(window, text = '...')
names_label.grid(column = 1, row = 1, sticky = 'W')
output_btn = Button(window, text = 'Output-Datei',
state = 'disabled', command = output_clicked)
output_btn.grid(column = 0, row = 2)
output_label = Label(window, text = '...')
output_label.grid(column = 1, row = 2, sticky = 'W')
process_btn = Button(window, text = 'Los!',
state = 'disabled', command = process_clicked)
process_btn.grid(column = 0, row = 3, sticky = 'W')
process_label = Label(window, text = '...')
process_label.grid(column = 1, row = 3, sticky = 'W')
window.mainloop()
| true |
58a9407aeca076cca34d864b0c15258a16c2c2a5 | Python | gschen/where2go-python-test | /1906101103王自强/蓝桥杯测试/5.py | UTF-8 | 939 | 3.609375 | 4 | [] | no_license | '''标题:书号验证
2004年起,国际ISBN中心出版了《13位国际标准书号指南》。
原有10位书号前加978作为商品分类标识;校验规则也改变。
校验位的加权算法与10位ISBN的算法不同,具体算法是:
用1分别乘ISBN的前12位中的奇数位(从左边开始数起),用3乘以偶数位,乘积之和以10为模,10与模值的差值再对10取模(即取个位的数字)即可得到校验位的值,其值范围应该为0~9。
输入:978-7-301-04815-3
输出:true
解释:该ISBN的第13位校验和是3,结果计算正确,返回true。
输入:978-7-115-38821-5
输出:false
解释:该ISBN的第13位校验和是6,结果计算错误,返回false。'''
n=input()
box=[]
for i in n:
if i!='-':
box.append(i)
print(box)
sum=0
for i in box:
i=int(i)
if i%2==0:
sum+=3*i
if i%2==1:
sum+=i
print(sum)
a=sum%10
print(a) | true |
5f255941decc6078669b0aa684e0e26142107b80 | Python | EvanWheeler99/kochSnowflake | /kochSnowflake.py | UTF-8 | 938 | 3.203125 | 3 | [] | no_license | import turtle
window = turtle.Screen()
t = turtle.Turtle()
t.speed = 0
t.hideturtle()
t.pu()
t.goto(0,200)
t.pd()
t.color('green')
def koch (t, size):
if size <= 10:
t.forward(size)
else:
for angle in [60,-120,60,0]:
koch(t, size / 3)
t.left(angle)
def snowflake(t,size):
for i in range(3):
koch(t, size)
t.right(120)
def triforce(t,size):
t.setheading(-60)
# t.right(60)
if size <= 25:
for i in range(3):
t.forward(size)
t.right(120)
t.forward(size)
else:
newsize = size / 2
triforce(t, newsize)
topRight = t.position()
triforce(t, newsize)
t.setheading(180)
t.forward(newsize)
bottom = t.position()
t.setheading(120)
t.forward(newsize)
topLeft = t.position()
triforce(t, newsize)
t.pu()
t.goto(topRight)
t.setheading(-60)
t.forward(newsize)
t.pd()
triforce(t,300)
# snowflake(t, 300)
window.exitonclick()
| true |
045fb6632d1929c7c105174d8d0bc59ebf92e00f | Python | nvaccess/wxPython | /wxPython/samples/wxPIA_book/Chapter-05/generictable.py | UTF-8 | 837 | 2.921875 | 3 | [] | no_license | import wx
import wx.grid
class GenericTable(wx.grid.PyGridTableBase):
def __init__(self, data, rowLabels=None, colLabels=None):
wx.grid.PyGridTableBase.__init__(self)
self.data = data
self.rowLabels = rowLabels
self.colLabels = colLabels
def GetNumberRows(self):
return len(self.data)
def GetNumberCols(self):
return len(self.data[0])
def GetColLabelValue(self, col):
if self.colLabels:
return self.colLabels[col]
def GetRowLabelValue(self, row):
if self.rowLabels:
return self.rowLabels[row]
def IsEmptyCell(self, row, col):
return False
def GetValue(self, row, col):
return self.data[row][col]
def SetValue(self, row, col, value):
pass
| true |
e0d540e04ca8ffbcf4e33dba399e90b9597e252f | Python | victoriaogomes/Unicorn.io | /lexical_analyzer/token_list.py | UTF-8 | 2,290 | 3.265625 | 3 | [] | no_license | import operator
from lexical_analyzer import tokens
# Classe utilizada para a manipulação da lista de tokens que é resultado da análise léxica
# e dos erros gerados pela análise sintática
class TokenList:
def __init__(self):
# Método utilizado para inicializar os atributos presentes nesta classe
self.tokens_list = dict()
self.token_number = 0
self.printable = ''
self.current_index = -1
self.endFileToken = tokens.Token('EOF', 'endOfFile($)', 0)
self.expression = ''
self.math_mode = False
def add_token(self, lexeme_type, lexeme_text, line):
# Método utilizado para adicionar um novo token na tabela de símbolos
self.tokens_list[self.token_number] = tokens.Token(lexeme_type, lexeme_text, line)
self.token_number = self.token_number + 1
self.endFileToken.file_line = line
def get_tokens(self):
# Método responsável por retornar de maneira ordenada todos os tokens previamente cadastrados na tabela de
# símbolos em forma de string, a qual será utilizada para escrever no arquivo de saída
self.tokens_list = sorted(self.tokens_list.values(), key=operator.attrgetter('file_line'))
for key in self.tokens_list:
self.printable = self.printable + str(key)
return self.printable
def lookahead(self):
if self.current_index + 1 < len(self.tokens_list):
if self.tokens_list[self.current_index + 1] in {'SIB', 'NMF', 'CMF', 'OpMF'}:
self.consume_token()
return self.tokens_list[self.current_index + 1]
else:
return self.endFileToken
def consume_token(self):
if self.current_index + 1 < len(self.tokens_list):
self.current_index += 1
if self.lookahead().lexeme_type in {'SIB', 'NMF', 'CMF', 'OpMF'}:
self.consume_token()
else:
if self.math_mode:
self.expression = self.expression + self.tokens_list[self.current_index].lexeme
return self.tokens_list[self.current_index]
else:
return self.endFileToken
def math_mode_switch(self):
self.math_mode = not self.math_mode
self.expression = ''
| true |
76ef6980e896b01603461f83c9f660bd2d271825 | Python | vitoriabf/FC-Python | /Lista 1/Ex17.py | UTF-8 | 959 | 3.953125 | 4 | [] | no_license | '''Uma certa firma fez uma pesquisa para saber se as pessoas gostaram ou não de um
novo produto lançado no mercado. Para isso, forneceu o sexo do entrevistado e sua
resposta (sim ou não). Sabendo-se que foram entrevistadas 2.000 pessoas, crie um
algoritmo que calcule e escreva:
a) o número de pessoas que responderam sim;
b) o número de pessoas que responderam não;a porcentagem de pessoas do
sexo masculino que responderam não.'''
contS = contN = cont = contM = 0
for i in range (0,2000):
sexo = input('digite seu sexo: ')
resposta = input('você gostou do produto? [S/N]')
cont+= 1
if resposta in 'Ss':
contS += 1
else:
contN += 1
if resposta in 'Nn' and sexo in 'Mm':
contM += 1
print (f'Número de pessoas que responderam sim[{contS}]. \n Número de pessoas que responderam não[{contN}]. \n'
f'A porcentagem de pessoas do sexo masculino que responderam não[{(100*contM)/cont}].')
| true |
fe8ca87c58774693640484c1e1d13f91e2ae9436 | Python | hamburgerguy/insight | /quantum_miner_v4.py | UTF-8 | 5,018 | 3.078125 | 3 | [] | no_license | #import libraries
import datetime
import operator
import hashlib
import math
from math import sqrt, pi
from collections import OrderedDict
from statistics import mean
from pylab import plot, ylim, xlim, show, xlabel, ylabel, grid, legend
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
#define lists for plotting later
c_hash = []
q_hash = []
#function for returning self values for the grover function
def GetOracle(xvalue):
return xvalue
#Define function that runs grovers algorithm and prints out how many iterations would be required on a quantum computer
def ExecuteGrover(target, objects, nvalue, rounds,oracleCount):
y_pos = np.arange(nvalue)
amplitude = OrderedDict.fromkeys(objects, 1/sqrt(nvalue))
for i in range(0, rounds, 2):
for k, v in amplitude.items():
if GetOracle(k) == target:
amplitude[k] = v * -1
oracleCount += 1
average = mean(amplitude.values())
for k, v in amplitude.items():
if GetOracle(k) == target:
amplitude[k] = (2 * average) + abs(v)
oracleCount += 1
continue
amplitude[k] = v-(2*(v-average))
print("Hashes using Grover's algorithm: " + str(oracleCount))
return amplitude
#create block class
class Block:
#setup block variables
blockNo = 0
data = None
next = None
hash = None
nonce = 0
diff = 10
target = 2**(256-diff)
previous_hash = 0x0
timestamp = datetime.datetime.now()
#initialize object
def __init__(self, data):
self.data = data
#utilize hashlib library, encode block values.
def hash(self):
h = hashlib.sha256()
h.update(
str(self.nonce).encode('utf-8') +
str(self.data).encode('utf-8') +
str(self.previous_hash).encode('utf-8') +
str(self.timestamp).encode('utf-8') +
str(self.blockNo).encode('utf-8')+
str(self.target).encode('utf-8')
)
return h.hexdigest()
class Blockchain:
#define blockchain variables
diff = 10
maxNonce = 2**32
target = 2**(256-diff)
lst = []
block = Block("Genesis")
dummy = head = block
#define add method to add blocks to blockchain
def add(self, block):
block.previous_hash = self.block.hash()
block.blockNo = self.block.blockNo + 1
self.block.next = block
self.block = self.block.next
#setup up mining method that employs both grovers algorithm and the normal mining strategy
def mine(self, block):
#instantiate list variable to be used in grovers algorithm
list = []
#setup special use case for blocks that require no revision of nonce space
if int(str(block.hash()),16) <= self.target:
nohash = "\nBlockHash: " + block.hash() + "\nBlockNo: " + str(block.blockNo) + \
"\nBlock Data: "+ str(Block.data) + "\nHashes: " + str(block.nonce) + \
"\nDifficulty: " + str(block.diff) + "\n--------------"
print(nohash)
print('0 hashes required')
return
#Iterate through nonce values in order to find winning nonce
for n in range(self.maxNonce):
if int(str(block.hash()), 16) <= self.target:
self.add(block)
hashstr = "\nBlockHash: " + list[-1] + "\nBlockNo: " + str(block.blockNo) + \
"\nBlock Data: "+ str(Block.data) + "\nHashes: " + str(block.nonce) + \
'\nDifficulty: ' + str(block.diff) + "\n--------------"
print(hashstr)
c_hash.append(block.nonce)
### Grovers Algorithm ###
length_lst = len(list)
calls = int((pi/4)*math.sqrt(length_lst))
q_hash.append(calls)
#execute grovers algorithm on list of hash values, find same value classically
grov_data = max(ExecuteGrover(min(list),list,length_lst,calls,0).items(), key=operator.itemgetter(1))
print('BlockHash: ' + str(grov_data[0]))
print('Corresponding amplitude: ' + str(grov_data[1]))
print('Nonce value for this amplitude: ' + str(length_lst))
print('_____________________________________________________________________________')
break
else:
#increase nonce value by 1
block.nonce += 1
list.append(block.hash())
#create blockchain variable
blockchain = Blockchain()
#Mine 10 blocks on blockchain variable
blocks = 10
for n in range(blocks):
blockchain.mine(Block("Block " + str(n+1)))
x = np.arange(blocks)
plt.scatter(x,c_hash,label = 'Hashes classically', color = 'r')
plt.scatter(x,q_hash,label = 'Hashes on a QC',color = 'b')
ylabel('Hashes required')
xlabel('Block number')
plt.legend()
#plt.show()
#counted = count_elements(q_hash)
#print(counted)
| true |
26143ad1ce3407ea0c91b2d871f390bc58c313d9 | Python | GuanHsu/Python_Next_Level | /Python-Beyond the Basic/labs/py3/generators/sqare_hard_way.py | UTF-8 | 444 | 3.78125 | 4 | [] | no_license | '''
This is the hard way to create an efficient of iterator
'''
class Squares:
def __init__(self, max_root):
self.max_root = max_root
self.root = 0
def __iter__(self):
return self
def __next__(self):
if self.root == self.max_root:
raise StopIteration
value = self.root ** 2
self.root += 1
return value
for sq in Squares(5):
print (sq) | true |
289aeb72204722dba7cb1f31b45726db321b14c2 | Python | jason12360/AID1803 | /pbase/day04/practise/unicode.py | UTF-8 | 319 | 3.984375 | 4 | [] | no_license | # 输入一个Unicode的开始值 用变量begin绑定
# 输入一个。。。。。结束值,用变量stop绑定
# 打印开始值至结束值之间的所有对应文字
begin = int(input('请输入开始值: '))
stop = int(input('请输入结束值: '))
for i in range(begin,stop+1):
print(chr(i),end = ' ') | true |
0e29c265b90fa6052fe3b5c45dd9469cfad4226e | Python | kampaitees/Ethical-Hacking-Basic-using-Socket-Programming | /Basic programs of Socket Programming/Chat application/client.py | UTF-8 | 674 | 3.296875 | 3 | [
"MIT"
] | permissive | import sys
import socket
#creating a socket having IP version as IPv4 and TCP/IP as working protocol in Transport layer
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#Initilizing host to out IP address and port number as 9999
host = '10.53.125.187'
port = 9999
count = 0
#connecting to the socket by combining IP address and port
s.connect((host, port))
while count < 20:
cmd = input("Enter message to be delivered:\n")
if cmd == 'Bye':
break
#sending some message to the server
s.send(str.encode(cmd))
#storing the information sent by the server
msg = s.recv(1024).decode()
print(msg)
count += 1
| true |
3b94bfd9bab6479ca3a0009b7c5786d9369fd491 | Python | quyencao/TensorFlow-MNIST | /MNISTPrediction.py | UTF-8 | 7,317 | 2.640625 | 3 | [] | no_license | import numpy as np
import os
from PIL import Image, ImageFilter
from random import randint
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
# Utility packages
class TFUtils:
def __init__(self):
return
# Xavier initialization
@staticmethod
def xavier_init(shape, name='', uniform=True):
num_input = sum(shape[:-1])
num_output = shape[-1]
if uniform:
init_range = tf.sqrt(6.0 / (num_input + num_output))
init_value = tf.random_uniform_initializer(-init_range, init_range)
else:
stddev = tf.sqrt(3.0 / (num_input + num_output))
init_value = tf.truncated_normal_initializer(stddev=stddev)
return tf.get_variable(name, shape=shape, initializer=init_value)
@staticmethod
def conv2d(X, W, strides=None, padding='SAME'):
if strides is None:
strides = [1, 1, 1, 1]
return tf.nn.conv2d(X, W, strides=strides, padding=padding)
@staticmethod
def max_pool(X, ksize=None, strides=None, padding='SAME'):
if ksize is None:
ksize = [1, 2, 2, 1]
if strides is None:
strides = [1, 2, 2, 1]
return tf.nn.max_pool(X, ksize=ksize, strides=strides, padding=padding)
@staticmethod
def build_cnn_layer(X, W, p_dropout=1., pool=True, reshape=None):
L = tf.nn.relu(TFUtils.conv2d(X, W))
if pool is True:
L = TFUtils.max_pool(L)
if reshape is not None:
L = tf.reshape(L, reshape)
if p_dropout == 1:
return L
else:
return tf.nn.dropout(L, p_dropout)
# MNIST base class
# main purpose is building cnn model
# can add other models
class MNIST:
model_path = None
data_path = None
sess = None
model = None
mnist = None
X = tf.placeholder(tf.float32, [None, 28, 28, 1])
Y = tf.placeholder(tf.float32, [None, 10])
p_keep_conv = tf.placeholder(tf.float32)
p_keep_hidden = tf.placeholder(tf.float32)
def __init__(self, model_path=None, data_path=None):
self.model_path = model_path
self.data_path = data_path
def init_session(self):
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(init)
def print_status(self, text):
print('---')
print(text)
def build_feed_dict(self, X, Y, p_keep_conv=1., p_keep_hidden=1.):
return {
self.X: X,
self.Y: Y,
self.p_keep_conv: p_keep_conv,
self.p_keep_hidden: p_keep_hidden
}
# define model
def build_cnn_model(self, p_keep_conv=1., p_keep_hidden=1.):
W1 = TFUtils.xavier_init([3, 3, 1, 32], 'W1')
W2 = TFUtils.xavier_init([3, 3, 32, 64], 'W2')
W3 = TFUtils.xavier_init([3, 3, 64, 128], 'W3')
W4 = TFUtils.xavier_init([128 * 4 * 4, 625], 'W4')
W5 = TFUtils.xavier_init([625, 10], 'W5')
with tf.name_scope('layer1') as scope:
# L1 Conv shape=(?, 28, 28, 32)
# Pool ->(?, 14, 14, 32)
L1 = TFUtils.build_cnn_layer(self.X, W1, p_keep_conv)
with tf.name_scope('layer2') as scope:
# L2 Conv shape=(?, 14, 14, 64)
# Pool ->(?, 7, 7, 64)
L2 = TFUtils.build_cnn_layer(L1, W2, p_keep_conv)
with tf.name_scope('layer3') as scope:
# L3 Conv shape=(?, 7, 7, 128)
# Pool ->(?, 4, 4, 128)
# Reshape ->(?, 625)
reshape = [-1, W4.get_shape().as_list()[0]]
L3 = TFUtils.build_cnn_layer(L2, W3, p_keep_conv, reshape=reshape)
with tf.name_scope('layer4') as scope:
# L4 FC 4x4x128 inputs -> 625 outputs
L4 = tf.nn.relu(tf.matmul(L3, W4))
L4 = tf.nn.dropout(L4, p_keep_hidden)
# Output(labels) FC 625 inputs -> 10 outputs
self.model = tf.matmul(L4, W5, name='model')
return self.model
def save_model(self):
if self.model_path is not None:
self.print_status('Saving my model..')
saver = tf.train.Saver(tf.global_variables())
saver.save(self.sess, self.model_path)
def load_model(self):
self.build_cnn_model()
saver = tf.train.Saver()
saver.restore(self.sess, self.model_path)
def check_accuracy(self, test_feed_dict=None):
check_prediction = tf.equal(tf.argmax(self.model, 1), tf.argmax(self.Y, 1))
accuracy = tf.reduce_mean(tf.cast(check_prediction, tf.float32))
accuracy_rates = self.sess.run(accuracy, feed_dict=test_feed_dict)
return accuracy_rates
# MNIST Prediction class
# check accuracy of test set
# predict random number from test set
# predict number from image
class MNISTPrediction(MNIST):
def __init__(self, model_path=None, data_path=None):
MNIST.__init__(self, model_path, data_path)
self.init()
def init(self):
self.print_status('Loading a model..')
self.init_session()
self.load_model()
if self.data_path is not None:
self.load_training_data(self.data_path)
def classify(self, feed_dict):
number = self.sess.run(tf.argmax(self.model, 1), feed_dict)[0]
accuracy = self.sess.run(tf.nn.softmax(self.model), feed_dict)[0]
return number, accuracy[number]
def accuracy_of_testset(self):
self.print_status('Calculating accuracy of test set..')
X = self.mnist.test.images.reshape(-1, 28, 28, 1)
Y = self.mnist.test.labels
test_feed_dict = self.build_feed_dict(X, Y)
accuracy = self.check_accuracy(test_feed_dict)
self.print_status('CNN accuracy of test set: %f' % accuracy)
def predict_random(self, show_image=False):
num = randint(0, self.mnist.test.images.shape[0])
image = self.mnist.test.images[num]
label = self.mnist.test.labels[num]
feed_dict = self.build_feed_dict(image.reshape(-1, 28, 28, 1), [label])
(number, accuracy) = self.classify(feed_dict)
label = self.sess.run(tf.argmax(label, 0))
self.print_status('Predict random item: %d is %d, accuracy: %f' %
(label, number, accuracy))
def predict(self, filename):
data = self.load_image(filename)
number, accuracy = self.classify({self.X: data})
self.print_status('%d is %s, accuracy: %f' % (number, os.path.basename(filename), accuracy))
def load_image(self, filename):
img = Image.open(filename).convert('L')
# resize to 28x28
img = img.resize((28, 28), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)
# normalization : 255 RGB -> 0, 1
data = [(255 - x) * 1.0 / 255.0 for x in list(img.getdata())]
# reshape -> [-1, 28, 28, 1]
return np.reshape(data, (-1, 28, 28, 1)).tolist()
script_dir = os.path.dirname(os.path.abspath(__file__))
model_path = script_dir + '/models/mnist-cnn'
mnist = MNISTPrediction(model_path)
mnist.predict(script_dir + '/imgs/digit-4.png')
mnist.predict(script_dir + '/imgs/digit-2.png')
mnist.predict(script_dir + '/imgs/digit-5.png')
mnist.predict(script_dir + '/imgs/digit-1.png')
mnist.predict(script_dir + '/imgs/digit-3.png') | true |
ee5cc9c811e9ff4d4e6641520b953abe9e7d625e | Python | ichise-lab/uwkgm | /api/dorest/dorest/libs/django/decorators/permissions.py | UTF-8 | 3,550 | 3.109375 | 3 | [
"BSD-3-Clause"
] | permissive | """Extensions to Django's permission validation mechanism
The Dorest project
:copyright: (c) 2020 Ichise Laboratory at NII & AIST
:author: Rungsiman Nararatwong
"""
from django.utils.translation import ugettext_lazy as _
from rest_framework.views import APIView
from rest_framework.request import Request
from rest_framework.response import Response
def require(validators: tuple):
"""Django's standard permission_required decorator does not recognize 'user' in the 'request'
received by Django rest_framework's APIView, resulting in the user being treated as anonymous.
This custom implementation solves the issue, as well as removes a redirect URL since
this channel of communication does not provide a user interface.
:param: validators: A tuple of permission validators.
By default, a user must have all required permissions to perform an action.
However, if only one permission was needed, set 'or' as the first member of the tuple.
For example:
(p1, p2, ('or', p3, p4))
In this case, a user must have permissions p1 and p2, and she must also have p3 or p4
in order to perform an action.
"""
def decorator(func):
def wrapper(*args, **kwargs):
"""Receives HTTP request handler (function) of Django rest_framework's APIView class.
---
args: [0] an object of a class inherited from Django's view
[1] an object of rest_framework.request.Request
"""
if _require_operator(validators, args[1], args[0], **kwargs):
return func(*args, **kwargs)
else:
return Response({'detail': _('Permission required')}, status=403)
return wrapper
return decorator
def _require_operator(validators: tuple, request: Request, view: APIView, **kwargs) -> bool:
"""Validates and applies AND operator on the results produced by the validators
:param validators: A tuple of validators
:param request: A request sent from Django REST framework
:param view: Django REST framework API view
:param kwargs: Validator's argument
:return: Validation result
"""
def validate(validator):
if type(validator) is tuple or type(validator) is list:
return _require_operator(validator, request, view)
elif type(validator) is str:
return request.user.has_perm(validator)
else:
v = validator()
try:
return v.has_permission(request, view, **kwargs)
except TypeError:
return v.has_permission(request, view)
# 'operator_or()' returns a list with 'or' as its first member
if type(validators) is not tuple and type(validators) is not list:
return validate(validators)
elif validators[0] == 'or':
return any([validate(v) for v in validators[1:]])
else:
return all([validate(v) for v in validators])
def operator_or(*args):
"""Another form of 'or' operator in 'permissions._require_operator'.
Instead of setting the first member of the tuple as 'or', e.g., 'permissions.require(p1, p2, ("or", p3, p4))',
this function offers a different format that does not include an operator as a member.
For convenience, import this function as '_or'.
The above example can then be written as 'permissions.require(p1, p2, _or(p3, p4))'.
"""
return ('or',) + args
| true |
5da6ac66b7af26fc817ce3f3d21a9ff51f460bdd | Python | Aasthaengg/IBMdataset | /Python_codes/p03304/s620661258.py | UTF-8 | 264 | 2.625 | 3 | [] | no_license | import sys
import math
import fractions
from collections import deque
from collections import defaultdict
sys.setrecursionlimit(10**7)
n, m, d = map(int, input().split())
if d == 0:
ans = (m - 1) / n
else:
ans = 2 * (n - d) * (m - 1) / (n * n)
print(ans)
| true |
e7e0c5aef7a6acc85dbd1cde028deaa6ae0b69f1 | Python | carolinetm82/AchatVoiture | /app.py | UTF-8 | 1,257 | 2.859375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# Run this app with `python app.py` and
# visit http://127.0.0.1:8050/ in your web browser.
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
import pandas as pd
df = pd.read_csv('carData.csv')
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
#
app.layout = html.Div([
html.H1('Comment acheter sa voiture', style={'textAlign': 'center'}),
dcc.Graph(id='graph-with-slider'),
dcc.Slider(
id='year-slider',
min=df['Year'].min(),
max=df['Year'].max(),
value=df['Year'].min(),
marks={str(year): str(year) for year in df['Year'].unique()},
step=None
)
])
@app.callback(
Output('graph-with-slider', 'figure'),
[Input('year-slider', 'value')])
def update_figure(selected_year):
filtered_df = df[df.Year == selected_year]
fig = px.scatter(filtered_df, x="Kms_Driven", y="Selling_Price",
color="Transmission")
fig.update_layout(transition_duration=1000)
return fig
if __name__ == '__main__':
app.run_server(debug=True) | true |
84e9864e6da87c13ffba48ced675040afda80d6f | Python | target/setupcfg2nix | /setupcfg2nix/cli.py | UTF-8 | 1,355 | 2.578125 | 3 | [
"MIT"
] | permissive | from setuptools.config import read_configuration
from pkg_resources import Requirement
import argparse
import setuptools
requires_sets = [ "install_requires", "setup_requires", "tests_require" ]
def main():
"""Parses a setup.cfg file and prints a nix representation to stdout.
The path to the setup.cfg is parsed from sys.argv.
Args:
cfg (str): The path to the setup.cfg file, defaults to 'setup.cfg' in the current directory
Returns:
None: Prints to stdout
"""
parser = argparse.ArgumentParser(description='Parse a setuptools setup.cfg into nix expressions')
parser.add_argument('cfg', metavar='CFG', nargs='?', default='setup.cfg', help='The path to the configuration file (defaults to setup.cfg)')
args = parser.parse_args()
cfg = read_configuration(args.cfg)
print('{')
print(f" pname = ''{cfg['metadata']['name']}'';")
print(f" version = ''{cfg['metadata']['version']}'';")
for r in requires_sets:
print_dependencies(cfg, r)
print('}')
def print_dependencies(cfg, name):
deps = cfg['options'].get(name, [])
if deps:
print(f" {name} = [")
for req in deps:
# TODO should we care about 'extras'?
print(f" ''{Requirement.parse(req).project_name}''")
print(' ];')
if __name__ == '__main__':
main()
| true |
07e777b6f3b0381a40dda671657f4a1d968a0eb8 | Python | inteljack/EL6183-Digital-Signal-Processing-Lab-2015-Fall | /project/Examples/Examples/PP2E/Internet/Ftp/getpython.py | UTF-8 | 1,455 | 2.734375 | 3 | [] | no_license | #!/usr/local/bin/python
###############################################################
# A Python script to download and build Python's source code.
# Uses ftplib, the ftp protocol handler which uses sockets.
# Ftp runs on 2 sockets (one for data, one for control--on
# ports 20 and 21) and imposes message text formats, but the
# Python ftplib module hides most of this protocol's details.
###############################################################
import os
from ftplib import FTP # socket-based ftp tools
Version = '1.5' # version to download
tarname = 'python%s.tar.gz' % Version # remote/local file name
print 'Connecting...'
localfile = open(tarname, 'wb') # where to store download
connection = FTP('ftp.python.org') # connect to ftp site
connection.login() # default is anonymous login
connection.cwd('pub/python/src') # xfer 1k at a time to localfile
print 'Downloading...'
connection.retrbinary('RETR ' + tarname, localfile.write, 1024)
connection.quit()
localfile.close()
print 'Unpacking...'
os.system('gzip -d ' + tarname) # decompress
os.system('tar -xvf ' + tarname[:-3]) # strip .gz
print 'Building...'
os.chdir('Python-' + Version) # build Python itself
os.system('./configure') # assumes unix-style make
os.system('make')
os.system('make test')
print 'Done: see Python-%s/python.' % Version
| true |
1990b0540637244e17e13b704adf865abd5b0062 | Python | GianlucaPal/Python_bible | /cinema.py | UTF-8 | 741 | 3.90625 | 4 | [] | no_license | films = {
'Finding Dory':[3,5],
'Bourne':[18,5],
'Tarzan':[15,5],
'Ghost Busters':[12,5]
}
while True:
choice = input('What film would you like to watch?:').strip().title()
if choice in films:
age = int(input('How old are you?:').strip())
#check users age
if age >=films[choice][1]:
#check enough seats
numSeats = films[choice][1]
if numSeats > 0:
print('Enjoy your film')
films[choice][1] = films[choice][1]-1
else:
print('sorry, we are sold out')
else:
print('You aree underaged, sorry')
else:
print('we do not have that film')
| true |
dc5dcb064ce3332a6dfe87ba9e87af55373cda5a | Python | kvanderwijst/New-damage-curves-and-multi-model-analysis-suggest-lower-optimal-temperature | /utils/colorutils.py | UTF-8 | 1,538 | 2.734375 | 3 | [] | no_license | """
Functions to transform between RGB, HEX and HLS and to lighten/darken a color
"""
import colorsys
import numpy as np
# Bugfix for Plotly default export size
import plotly.io as pio
pio.kaleido.scope.default_width = None
pio.kaleido.scope.default_height = None
colors_PBL = [
"#00AEEF",
"#808D1D",
"#B6036C",
"#FAAD1E",
"#3F1464",
"#7CCFF2",
"#F198C1",
"#42B649",
"#EE2A23",
"#004019",
"#F47321",
"#511607",
"#BA8912",
"#78CBBF",
"#FFF229",
"#0071BB",
]
model_to_color = {
"MIMOSA": colors_PBL[0],
"WITCH": colors_PBL[1],
"REMIND": colors_PBL[3],
}
model_to_color["MIMOSA_combined"] = colors_PBL[2]
explanation_annotation_style = dict(
arrowhead=6,
arrowcolor="#CCC",
bgcolor="#FFF",
arrowwidth=2,
bordercolor="#CCC",
font={"size": 12, "color": "#444"},
)
def hex_to_rgb(hex_str, normalise=False):
hex_str = hex_str.lstrip("#")
rgb = [int(hex_str[i : i + 2], 16) for i in (0, 2, 4)]
if normalise:
return [x / 255.0 for x in rgb]
else:
return rgb
def rgb_to_hex(rgb):
return "#%02x%02x%02x" % tuple(rgb)
def hex_to_hls(hex_str):
return colorsys.rgb_to_hls(*hex_to_rgb(hex_str, True))
def hls_to_hex(hls):
return rgb_to_hex([int(np.round(x * 255)) for x in colorsys.hls_to_rgb(*hls)])
def lighten_hex(hex_str, extra_lightness=0.1, extra_saturation=0.0):
hls = list(hex_to_hls(hex_str))
hls[1] += extra_lightness
hls[2] += extra_saturation
return hls_to_hex(hls)
| true |
ce4f7aad1d46c77492313b7afc36a3cf71919db3 | Python | GitMajonerGT/MoreZut | /Python/PTD/ptd_lab6.py | UTF-8 | 1,880 | 3.40625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 17 15:09:55 2019
@author: Paweł
"""
import numpy as np
from operator import xor
def haminga(bity):
S=0
wyjscie=[0,0,0,0,0,0,0]
if(len(bity)==4):
print('Sygnał wejsciowy: ',bity)
wyjscie[0]=bity[0]
wyjscie[1]=bity[1]
wyjscie[2]=bity[2]
wyjscie[4]=bity[3]
policzonyx1=xor(xor(wyjscie[4],wyjscie[2]),wyjscie[0])
policzonyx2=xor(xor(wyjscie[4],wyjscie[1]),wyjscie[0])
policzonyx4=xor(xor(wyjscie[2],wyjscie[1]),wyjscie[0])
wyjscie[3]=policzonyx4
wyjscie[5]=policzonyx2
wyjscie[6]=policzonyx1
print('Zakodowany sygnał: ',wyjscie)
elif(len(bity)==7):
wyjscie=bity
print('Sygnał do detekcji: ',wyjscie)
odczytanyx1=wyjscie[6]
odczytanyx2=wyjscie[5]
odczytanyx4=wyjscie[3]
policzonyx1=xor(xor(wyjscie[4],wyjscie[2]),wyjscie[0])
policzonyx2=xor(xor(wyjscie[4],wyjscie[1]),wyjscie[0])
policzonyx4=xor(xor(wyjscie[2],wyjscie[1]),wyjscie[0])
wynikx1=xor(odczytanyx1,policzonyx1)
wynikx2=xor(odczytanyx2,policzonyx2)
wynikx4=xor(odczytanyx4,policzonyx4)
S=1*wynikx1+np.power(2,1)*wynikx2+np.power(2,2)*wynikx4
if(S!=0):
print()
print('Przekłamany bit na pozycji: ',S)
print()
wyjscie.reverse()
if(wyjscie[S-1]==1):
wyjscie[S-1]=0
else:
wyjscie[S-1]=1
wyjscie.reverse()
print('Korekcja sygnału: ',wyjscie)
elif(len(bity)!=4 and S==0):
print('Sygnał jest poprawny')
return wyjscie
bity=[1,1,0,0]
(nowe)=haminga(bity)
tab=[1, 1, 0, 0, 0, 0, 0]
(pozamianie)=haminga(tab)
| true |
22102e50e83e6ba485044cb8fbb94fa1da2827f7 | Python | kkyoung28/Programming-Python- | /Function.py | UTF-8 | 1,825 | 3.875 | 4 | [] | no_license | numbers = [1, 5, -2, 0, 6]
print(numbers, "중 가장 큰 값은", max(numbers))
print(numbers, "중 가장 작은 값은", min(numbers))
print(numbers, "합계는", sum(numbers))
print("2의 10승은", pow(2,10))
pi=3.14152
print(pi, "의 소수점 1자리 반올림은", round(pi))
print(pi, "의 소수점 1자리 반올림은", round(pi,0))
print(pi, "의 소수점 2자리 반올림은", round(pi,1))
print(pi, "의 소수점 3자리 반올림은", round(pi,2))
print(pi, "의 소수점 4자리 반올림은", round(pi,3))
print(round(2.55,0)) #3.0
print(round(2.55,1)) #2.6아니라 2.5
print(round(2.55,2)) #2.55
user_name=input("이름은?")
user_age=input("나이는?")
print(user_name+"님!, 나이는 "+str(user_age)+"세군요!")
say="{0}님!, 나이는 {1}세군요! {1}세라니 놀라워요!"
print(say.format(user_name, user_age))
pi="3.14159"
print("문자열 출력: ", pi)
print("실수 변환 출력: ", float(pi)) #Float.parseFloat(pi) in Java
print(float(pi)+100)
year= "2019"
print("올해 연도: ", year)
print("100년 뒤는", int(year)+100,"년입니다.") #Integer.parseInt(pi)
print("숫자를 문자열로 변환하려면 str()을 이용합니다.")
print("올해는 "+str(year)+"년 입니다.")#String,valueOf(year) in Java
list =['d','c','a','b']
list.reverse()
print("리스트 항목 순서 뒤집기",list)
list.sort()
print("리스트 항목 정렬하기",list)
list.sort(reverse=True)
print("리스트 항목 역정렬하기",list)
for index, value in enumerate(list):
print("인덱스",index,"위치의 값은",value)
str="나는 문자열"
print(str)
n=3
#print(str(n)) 오류발생
#변수명에 사용하면 안되는 것?
#keyword
#내장함수이름
#사용자정의함수
def input(s):
print(s)
input("현재의 input()함수는 사용자 정의 함수입니다.")
| true |
6ed278d6363b386057bcdab01d3e44a3d0bdbdec | Python | pandas-dev/pandas | /pandas/tests/copy_view/test_core_functionalities.py | UTF-8 | 3,185 | 2.5625 | 3 | [
"BSD-3-Clause"
] | permissive | import numpy as np
import pytest
from pandas import DataFrame
import pandas._testing as tm
from pandas.tests.copy_view.util import get_array
def test_assigning_to_same_variable_removes_references(using_copy_on_write):
df = DataFrame({"a": [1, 2, 3]})
df = df.reset_index()
if using_copy_on_write:
assert df._mgr._has_no_reference(1)
arr = get_array(df, "a")
df.iloc[0, 1] = 100 # Write into a
assert np.shares_memory(arr, get_array(df, "a"))
def test_setitem_dont_track_unnecessary_references(using_copy_on_write):
df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 1})
df["b"] = 100
arr = get_array(df, "a")
# We split the block in setitem, if we are not careful the new blocks will
# reference each other triggering a copy
df.iloc[0, 0] = 100
assert np.shares_memory(arr, get_array(df, "a"))
def test_setitem_with_view_copies(using_copy_on_write):
df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 1})
view = df[:]
expected = df.copy()
df["b"] = 100
arr = get_array(df, "a")
df.iloc[0, 0] = 100 # Check that we correctly track reference
if using_copy_on_write:
assert not np.shares_memory(arr, get_array(df, "a"))
tm.assert_frame_equal(view, expected)
def test_setitem_with_view_invalidated_does_not_copy(using_copy_on_write, request):
df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 1})
view = df[:]
df["b"] = 100
arr = get_array(df, "a")
view = None # noqa: F841
df.iloc[0, 0] = 100
if using_copy_on_write:
# Setitem split the block. Since the old block shared data with view
# all the new blocks are referencing view and each other. When view
# goes out of scope, they don't share data with any other block,
# so we should not trigger a copy
mark = pytest.mark.xfail(
reason="blk.delete does not track references correctly"
)
request.node.add_marker(mark)
assert np.shares_memory(arr, get_array(df, "a"))
def test_out_of_scope(using_copy_on_write):
def func():
df = DataFrame({"a": [1, 2], "b": 1.5, "c": 1})
# create some subset
result = df[["a", "b"]]
return result
result = func()
if using_copy_on_write:
assert not result._mgr.blocks[0].refs.has_reference()
assert not result._mgr.blocks[1].refs.has_reference()
def test_delete(using_copy_on_write):
df = DataFrame(
np.random.default_rng(2).standard_normal((4, 3)), columns=["a", "b", "c"]
)
del df["b"]
if using_copy_on_write:
assert not df._mgr.blocks[0].refs.has_reference()
assert not df._mgr.blocks[1].refs.has_reference()
df = df[["a"]]
if using_copy_on_write:
assert not df._mgr.blocks[0].refs.has_reference()
def test_delete_reference(using_copy_on_write):
df = DataFrame(
np.random.default_rng(2).standard_normal((4, 3)), columns=["a", "b", "c"]
)
x = df[:]
del df["b"]
if using_copy_on_write:
assert df._mgr.blocks[0].refs.has_reference()
assert df._mgr.blocks[1].refs.has_reference()
assert x._mgr.blocks[0].refs.has_reference()
| true |
39686357e96e5d7a581e1ca2f6438c5f5db2d947 | Python | markosolopenko/python | /oop/numbers.py | UTF-8 | 712 | 3.828125 | 4 | [] | no_license |
class Numbers:
MULTIPLIER = 5
def __init__(self, x, y):
self.x = x
self.y = y
def add(self):
return self.x + self.y
@classmethod
def multiply(cls, a):
return cls.MULTIPLIER * a
@staticmethod
def subtract(b, c):
return b - c
@property
def value(self):
return self.x, self.y
@value.setter
def value(self, val):
self.x, self.y = val
@value.deleter
def value(self):
del self.x
del self.y
def __str__(self):
return f'{self.x} | {self.y}'
if __name__ == "__main__":
obj = Numbers(1, 2)
print(obj.x)
obj.x = 4
print(obj.value)
obj.value = 12
| true |
10b5da08ae1099b625faa71cce6e9049db59fed2 | Python | dkoh12/gamma | /google/minmax.py | UTF-8 | 1,028 | 3.65625 | 4 | [] | no_license | def maxsearch(lst):
mid = int(len(lst)/2)
#print(mid, lst[mid], lst)
if len(lst) == 2:
return max(lst)
if lst[mid] > lst[mid-1] and lst[mid] > lst[mid+1]:
return lst[mid]
#ascending
elif lst[mid] > lst[mid-1] and lst[mid] < lst[mid+1]:
return maxsearch(lst[mid:])
#descending
elif lst[mid] < lst[mid-1] and lst[mid] > lst[mid+1]:
return maxsearch(lst[:mid])
#given list with ascending section and descending section, find min and max
def minmax(lst):
mmin = lst[0]
mmax = maxsearch(lst)
return (mmin, mmax)
# DP longest subsequence problem
# abs(A[i]-A[j]) cannot be greater than abs(i-j)
def num_elem_rule(lst):
#indexlst = [(e, i) for i, e in enumerate(lst)]
#indexlst.sort()
#for e,i in indexlst:
A = set()
for i in range(len(lst)):
for j in range(len(lst)):
if i != j:
if not (abs(lst[i]-lst[j]) > abs(i-j)):
A.add(lst[i])
A.add(lst[j])
return len(A)
if __name__=="__main__":
lst = [2,3,4,5,6,7,10,9,8,7]
print(minmax(lst))
newlst = [13,5,4]
print(num_elem_rule(newlst)) | true |
9d5bf72dc375e1bfa673d77e7dfbf6ee73cbcc00 | Python | DARRENSKY/COMP9021 | /assignment/assignment_1/highest_scoring_word.py | UTF-8 | 2,491 | 3.359375 | 3 | [] | no_license | from itertools import combinations, permutations
dic = {'a':2, 'b':5, 'c':4, 'd':4, 'e':1, 'f':6, 'g':5, 'h':5, 'i':1, 'j':7,\
'k':6, 'l':3, 'm':5, 'n':2, 'o':3, 'p':5, 'q':7, 'r':2, 's':1, 't':2,\
'u':4, 'v':6, 'w':6, 'x':7, 'y':5, 'z':7}
#list(permutations([1, 2, 3], 2))
def get_score(word):
score = 0
for i in word:
score += dic[i]
return score
def sort_str(word):
l = list(word)
l.sort()
return ''.join(l)
if __name__ == "__main__":
s = input("Enter between 3 and 10 lowercase letters: ")
l = []
for i in s.split(' '):
if i == '':
continue
for j in i:
if j not in dic:
print('Incorrect input, giving up...')
exit(-1)
else:
l.append(j)
if len(l) < 3 or len(l) > 10:
print('Incorrect input, giving up...')
exit(-1)
f = open("wordsEn.txt", "r")
lines = f.readlines()
words = {}
sorted_words = {}
for i in lines:
i = i.strip('\n')
words[i] = 1
word = sort_str(i)
sorted_words[word] = 1
#print(sorted_words)
score_words = []
score = 0
for i in range(len(l), 0, -1):
for j in list(combinations(l, i)):
word = ''.join(j)
print(word)
word = sort_str(word)
cur_score = get_score(word)
print(cur_score)
print(score)
if score > cur_score:
continue
if word in sorted_words:
if score == cur_score and word not in score_words:
score_words.append(word)
#print(score_words)
elif score < cur_score:
score_words = [word]
#print(score_words)
score = cur_score
r = []
for i in list(words.keys()):
word = sort_str(i)
#print(word)
if word in score_words:
r.append(i)
score_words = r[:]
if len(score_words) == 0:
print('No word is built from some of those letters.')
elif len(score_words) == 1:
print('The highest score is %d.' % score)
print('The highest scoring word is %s' % score_words[0])
else:
print('The highest score is %d.' % score)
print('The highest scoring words are, in alphabetical order:')
score_words.sort()
for i in score_words:
print(' %s' % i)
| true |
2077f78de21fcc16d19289bcf1852aef7c9a6328 | Python | Rolemodel01291/HackerRank-Python-Solutions | /hackerrank_minion_game.py | UTF-8 | 3,015 | 4.59375 | 5 | [] | no_license | """
HACKERRANK THE MINION GAME
URL: https://www.hackerrank.com/challenges/the-minion-game/problem
TASK: Kevin and Stuart want to play the 'The Minion Game'.
RULES:
1) Both players are given the same string, S
2) Both players have to make substrings using the letters of the string S.
3) Stuart has to make words starting with consonants.
4) Kevin has to make words starting with vowels.
5) The game ends when both players have made all possible substrings.
SCORING:
A player gets +1 point for each occurrence of the substring in the string S
FOR EXAMPLE:
String S = BANANA
Kevin's vowel beginning word = ANA
Here, ANA occurs twice in BANANA. Hence, Kevin will get 2 Points.
For better understanding, see below:
STUART KEVIN
WORDS SCORE WORDS SCORE
B 1 A 3
N 2 AN 2
BA 1 ANA 2
NA 2 ANAN 1
BAN 1 ANANA 1
NAN 1
BANA 1
NANA 1
BANAN 1
BANANA 1
TOTAL 12 TOTAL 9
STUART WON
"""
# def find_unique_characters_in_a_string(string):
# unique_character_list = []
# for i in range(len(string)):
# flag = False
# for j in range(0, i):
# if(string[i] == string[j]):
# flag = True
# break
# if(flag == True):
# continue
# unique_character_list.append(string[i])
# return unique_character_list
def minion_game(string):
unique_characters_list = list(set(string))
# unique_characters_list = find_unique_characters_in_a_string(string)
stuart_count, kevin_count = 0, 0
for item in unique_characters_list:
if(item.upper() in ['A', 'E', 'I', 'O', 'U']):
for rec in range(len(string)):
if(item == string[rec]):
kevin_count = kevin_count + (len(string) - rec)
else:
for rec in range(len(string)):
if(item == string[rec]):
stuart_count = stuart_count + (len(string) - rec)
if(stuart_count > kevin_count):
print('Stuart ', stuart_count)
elif(kevin_count > stuart_count):
print('Kevin ', kevin_count)
else:
print('Draw')
if __name__ == '__main__':
s = input()
minion_game(s)
| true |
e23943b29f7605f3b8d1260060fd8d2600130ce2 | Python | Kalantri007/OOP | /class2.py | UTF-8 | 196 | 3.359375 | 3 | [] | no_license | class room:
student_count = 10
def marks(self):
a = 5
b = 6
c = 7
print(room.student_count)
Teacher = room()
read = Teacher.marks()
print(read.b) | true |
8ce08a9ac0f8b9b1c1641172cb37a9a2d1edc86c | Python | mirajaber1990/datasciencecoursera | /convert_json.py | UTF-8 | 595 | 2.84375 | 3 | [] | no_license | def convert_json(json_data):
import json
from pprint import pprint
print ('json data:\n')
pprint(json_data)
# Changing a value in the json
json_data["blocks"][0]["messages"][0]["0x420"]["signals"]["FUEL_FLOW"]["value"] = 0.9999999999
print ('\n\nChanged File: \n')
pprint (json_data)
# outputting json to file
with open('new_file.json', 'w') as outfile:
# indent, separators and sort_keys is only important for pretty printing. otherwise the file would only have one very long line
json.dump(json_data, outfile, indent=4, separators=(',', ': '), sort_keys=True)
return json_data | true |
315c2d4c592918876c5410064a883d174624b32e | Python | kokoaespol/kokoa-calendario | /kokoaCalendar/webapp/models.py | UTF-8 | 2,613 | 2.5625 | 3 | [] | no_license | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
'''
Estudiante (username[first_name], -PK- matricula[last_name]) using User from django.contrib.auth.models
Materias Disponibles (ForeignKey[Estudiante], codigo, nombre, paralelos[String])
Curso (codigo_materia, paralelo, horario_clases, horario_examenes, aula)
'''
class Usuario(models.Model):
user = models.ForeignKey(User)
matricula = models.CharField(max_length=10)
def __unicode__(self):
return unicode(self.matricula)
class Materia(models.Model):
creditos = models.PositiveSmallIntegerField()
nombre = models.CharField(max_length=20, blank=True, null=True, unique=True)
codigo = models.CharField(max_length=10, blank=True, null=True, unique=True)
def __unicode__(self):
return unicode(self.nombre)
class Paralelo(models.Model):
numero = models.PositiveSmallIntegerField()
materia = models.ForeignKey(Materia, related_name='paralelo')
def __unicode__(self):
return unicode(self.numero)
class Horario(models.Model):
dia = models.CharField(max_length=10)
inicio = models.CharField(max_length=10)
fin = models.CharField(max_length=10)
curso = models.CharField(max_length=10)
paralelo = models.ForeignKey(Paralelo, related_name ='horario')
def __unicode__(self):
return unicode(self.dia + self.curso)
#wsMateriasDisponibles
class MateriaDisponible(models.Model):
username = models.ForeignKey(User)
materia = models.ManyToManyField(Materia)
def __unicode__(self):
return unicode(self.materia)
class Curso(models.Model):
materia = models.ForeignKey(Materia)
paralelo = models.PositiveSmallIntegerField()
horario_clases = models.CharField(max_length=50)
horario_examenes = models.CharField(max_length=50)
aulaClase = models.CharField(max_length=200)
aulaExamen = models.CharField(max_length=200)
def __unicode__(self):
return unicode(self.materia)
class Plan(models.Model):
username = models.ForeignKey(User)
curso = models.ManyToManyField(Curso)
def __unicode__(self):
return unicode(self.username)
# wsInfoEstudianteGeneral
class Estudiante(models.Model):
username = models.ForeignKey(User)
matricula = models.CharField(max_length=11, blank=True, null=True)
carrera = models.CharField(max_length=50, blank=True, null=True)
promedio = models.CharField(max_length=5, blank=True, null=True)
plan = models.ForeignKey(Plan, blank=True, null=True, related_name="Plan")
materiaDisponibles = models.ManyToManyField(MateriaDisponible)
def __unicode__(self):
return unicode(self.username)
| true |
7198e5ed874fb806bfae7418bebebbb5f794947d | Python | williamqin123/python-sudoku | /Clash-Royale-Scripts/paper-extender.py | UTF-8 | 726 | 3.484375 | 3 | [] | no_license | import random
sentences = ["hemophilia is a disease", "hemophilia could potentially be deadly", "hemophilia is not a common disease", "hemophilia is genetic"]
fillers = ["the previous fact clearly indicates this statement's credibility", "many people commonly believe the following sentence to be true"]
befores = ["Additionally, ", "Actually, ", "It is commonly said that "]
afters = [", which is true.", ", which is officially accepted."]
def extend():
output = list(sentences)
for sentence in sentences:
output.insert(output.index(sentence) + 1, random.choice(fillers))
for sentence in output:
output[output.index(sentence)] = random.choice(befores) + sentence + random.choice(afters)
print(output)
extend() | true |
7ab20d8f270e82860b95a34175263e6ded9723f4 | Python | Runki2018/CvPytorch | /src/optimizers/__init__.py | UTF-8 | 3,534 | 2.515625 | 3 | [
"MIT"
] | permissive | # !/usr/bin/env python
# -- coding: utf-8 --
# @Time : 2021/2/5 14:16
# @Author : liumin
# @File : __init__.py
import torch
import torch.nn as nn
from copy import deepcopy
from torch.optim import SGD, Adam, AdamW, RMSprop, Adadelta
from .radam import RAdam
from .ranger import Ranger
from .adabelief import AdaBelief
__all__ = ['SGD','Adam', 'AdamW','Adadelta','RMSprop', 'RAdam', 'Ranger', 'AdaBelief']
def get_current_lr(optimizer):
return min(g["lr"] for g in optimizer.param_groups)
def build_optimizer(cfg, model):
'''
g0, g1, g2 = [], [], [] # optimizer parameter groups
for v in model_ft.modules():
if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias
g2.append(v.bias)
if isinstance(v, nn.BatchNorm2d): # weight with decay
g0.append(v.weight)
elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight without decay
g1.append(v.weight)
optimizer_ft = torch.optim.SGD(g0, lr=0.01, momentum=0.937, nesterov=True)
optimizer_ft.add_param_group({'params': g1, 'weight_decay': 0.0005}) # add g1 with weight_decay
optimizer_ft.add_param_group({'params': g2}) # add g2 (biases)
del g0, g1, g2
'''
# params = [p for p in model.parameters() if p.requires_grad]
_params = []
# filter(lambda p: p.requires_grad, model.parameters())
for n, p in dict(model.named_parameters()).items():
if p.requires_grad:
_args = deepcopy(cfg.OPTIMIZER.BIAS_PARAMS if "bias" in n else cfg.OPTIMIZER.WEIGHT_PARAMS)
_args.pop("data")
_params += [{"params": [p], "lr": cfg.BACKBONE_LR if 'backbone' in n and cfg.BACKBONE_LR is not None else cfg.INIT_LR, **_args}]
if "bias" in n:
_params[-1]["lr"] *= cfg.OPTIMIZER.BIAS_LR_MULTIPLIER or 1.0
opt_type = cfg.OPTIMIZER.TYPE.lower()
if opt_type == "sgd":
'''torch.optim.SGD(params, lr=0.001, momentum=0, dampening=0, weight_decay=0, nesterov=False)'''
optimizer = SGD(_params)
elif opt_type == "adam":
'''torch.optim.Adam(params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False)'''
optimizer = Adam(_params)
elif opt_type == "adamw":
'''torch.optim.AdamW(params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.01, amsgrad=False)'''
optimizer = AdamW(_params)
elif opt_type == "adadelta":
'''torch.optim.Adadelta(params, lr=1.0, rho=0.9, eps=1e-06, weight_decay=0)'''
optimizer = Adadelta(_params)
elif opt_type == 'rmsprop':
'''torch.optim.RMSprop(params, lr=0.01, alpha=0.99, eps=1e-08, weight_decay=0, momentum=0, centered=False)'''
optimizer = RMSprop(_params)
elif opt_type == 'radam':
'''optimizer = RAdam(filter(lambda p: p.requires_grad, model.parameters()), lr=0.01, betas=(0.90, 0.999), eps=1e-08, weight_decay=1e-4)'''
optimizer = RAdam(_params)
elif opt_type == 'ranger':
'''optimizer = Ranger(filter(lambda p: p.requires_grad, model.parameters()), lr=0.01, betas=(0.95, 0.999), eps=1e-08, weight_decay=1e-4)'''
optimizer = Ranger(_params)
elif opt_type == 'adabelief':
optimizer = AdaBelief(_params)
else:
raise ValueError("Unsupported optimizer type: {}, Expected optimizer method in {} ".format(cfg.OPTIMIZER.TYPE, __all__))
return optimizer | true |
2b6d06d84888e1dcd59803556407ec0df15e0e64 | Python | syurskyi/Algorithms_and_Data_Structure | /Cracking Coding Interviews - Mastering Algorithms/template/Section 2 Arrays and Strings/unique-characters.py | UTF-8 | 470 | 3.515625 | 4 | [] | no_license | # # Implement an algorithm to determine if a string has all unique characters.
#
# # "abc" -> True
# # "" -> True
# # "aabc" -> False
#
# # Brute Force - O(n^2)
# # Sorting - O(nlogn + n) -> O(nlogn)
# # Hashset - O(n)
#
# ___ unique_characters word
# visited_chars _ s..
#
# ___ i __ r.. l.. ?
# letter _ ? ?
#
# __ ? __ ?
# r_ F..
#
# ?.a.. ?
#
# r_ T..
#
# print(unique_characters("abc"))
# print(unique_characters(""))
# print(unique_characters("aabc"))
| true |
7c9975696e0fc8eaa3401cfb9f94324d554ad39a | Python | qrfaction/leetcode | /leetcode/LongestSubstringWithoutRepeatingCharacters_3.py | UTF-8 | 577 | 2.828125 | 3 | [] | no_license | class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
if len(s)==0:
return 0
prevIndex = {
s[0]:0
}
result = [1]
maxlen = 1
for i,c in enumerate(s[1:]):
last_res = result[-1]
if prevIndex.get(c,-1) == -1:
result.append(last_res+1)
else:
result.append(min(i+1-prevIndex[c],last_res+1))
prevIndex[c] = i+1
if result[-1] >= maxlen:
maxlen = result[-1]
return maxlen
| true |
3b975712747b4b74097d08124e5b7cf741b8eb67 | Python | laszlothebrave/Unshrederator2 | /Unshrederator2/ArrayManipulation.py | UTF-8 | 314 | 2.65625 | 3 | [] | no_license | from PIL import Image
from PIL.ImageShow import show
from cv2.cv2 import imwrite, imread
def show_from_array(array):
img = Image.fromarray(array, 'RGB')
show(img)
def make_image_from_array(matrix):
image = Image.fromarray(matrix, 'RGB')
imwrite('temp.png', matrix)
return imread('temp.png')
| true |
ddcd6d3fc5ed9477013c9d8ee759f2619cddfa93 | Python | nekonbu72/easyfox_server | /2to3/modified/marionette_driver/transport.py | UTF-8 | 9,499 | 2.8125 | 3 | [] | no_license | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import socket
import time
class SocketTimeout(object):
def __init__(self, socket, timeout):
self.sock = socket
self.timeout = timeout
self.old_timeout = None
def __enter__(self):
self.old_timeout = self.sock.gettimeout()
self.sock.settimeout(self.timeout)
def __exit__(self, *args, **kwargs):
self.sock.settimeout(self.old_timeout)
class Message(object):
def __init__(self, msgid):
self.id = msgid
def __eq__(self, other):
return self.id == other.id
def __ne__(self, other):
return not self.__eq__(other)
class Command(Message):
TYPE = 0
def __init__(self, msgid, name, params):
Message.__init__(self, msgid)
self.name = name
self.params = params
def __str__(self):
return "<Command id={0}, name={1}, params={2}>".format(self.id, self.name, self.params)
def to_msg(self):
msg = [Command.TYPE, self.id, self.name, self.params]
return json.dumps(msg)
@staticmethod
def from_msg(payload):
data = json.loads(payload)
assert data[0] == Command.TYPE
cmd = Command(data[1], data[2], data[3])
return cmd
class Response(Message):
TYPE = 1
def __init__(self, msgid, error, result):
Message.__init__(self, msgid)
self.error = error
self.result = result
def __str__(self):
return "<Response id={0}, error={1}, result={2}>".format(self.id, self.error, self.result)
def to_msg(self):
msg = [Response.TYPE, self.id, self.error, self.result]
return json.dumps(msg)
@staticmethod
def from_msg(payload):
data = json.loads(payload)
assert data[0] == Response.TYPE
return Response(data[1], data[2], data[3])
class Proto2Command(Command):
"""Compatibility shim that marshals messages from a protocol level
2 and below remote into ``Command`` objects.
"""
def __init__(self, name, params):
Command.__init__(self, None, name, params)
class Proto2Response(Response):
"""Compatibility shim that marshals messages from a protocol level
2 and below remote into ``Response`` objects.
"""
def __init__(self, error, result):
Response.__init__(self, None, error, result)
@staticmethod
def from_data(data):
err, res = None, None
if "error" in data:
err = data
else:
res = data
return Proto2Response(err, res)
class TcpTransport(object):
"""Socket client that communciates with Marionette via TCP.
It speaks the protocol of the remote debugger in Gecko, in which
messages are always preceded by the message length and a colon, e.g.:
7:MESSAGE
On top of this protocol it uses a Marionette message format, that
depending on the protocol level offered by the remote server, varies.
Supported protocol levels are 1 and above.
"""
max_packet_length = 4096
def __init__(self, addr, port, socket_timeout=60.0):
"""If `socket_timeout` is `0` or `0.0`, non-blocking socket mode
will be used. Setting it to `1` or `None` disables timeouts on
socket operations altogether.
"""
self.addr = addr
self.port = port
self._socket_timeout = socket_timeout
self.protocol = 1
self.application_type = None
self.last_id = 0
self.expected_response = None
self.sock = None
@property
def socket_timeout(self):
return self._socket_timeout
@socket_timeout.setter
def socket_timeout(self, value):
if self.sock:
self.sock.settimeout(value)
self._socket_timeout = value
def _unmarshal(self, packet):
msg = None
# protocol 3 and above
if self.protocol >= 3:
typ = int(packet[1])
if typ == Command.TYPE:
msg = Command.from_msg(packet)
elif typ == Response.TYPE:
msg = Response.from_msg(packet)
# protocol 2 and below
else:
data = json.loads(packet)
msg = Proto2Response.from_data(data)
return msg
def receive(self, unmarshal=True):
"""Wait for the next complete response from the remote.
:param unmarshal: Default is to deserialise the packet and
return a ``Message`` type. Setting this to false will return
the raw packet.
"""
now = time.time()
data = ""
bytes_to_recv = 10
while self.socket_timeout is None or (time.time() - now < self.socket_timeout):
try:
chunk = self.sock.recv(bytes_to_recv)
chunk = chunk.decode('utf-8') # nekonbu72 +
data += chunk
except socket.timeout:
pass
else:
if not chunk:
raise socket.error("No data received over socket")
sep = data.find(":")
if sep > -1:
length = data[0:sep]
remaining = data[sep + 1:]
if len(remaining) == int(length):
if unmarshal:
msg = self._unmarshal(remaining)
self.last_id = msg.id
if self.protocol >= 3:
self.last_id = msg.id
# keep reading incoming responses until
# we receive the user's expected response
if isinstance(msg, Response) and msg != self.expected_response:
return self.receive(unmarshal)
return msg
else:
return remaining
bytes_to_recv = int(length) - len(remaining)
raise socket.timeout(
"Connection timed out after {}s".format(self.socket_timeout))
def connect(self):
"""Connect to the server and process the hello message we expect
to receive in response.
Returns a tuple of the protocol level and the application type.
"""
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(self.socket_timeout)
self.sock.connect((self.addr, self.port))
except:
# Unset self.sock so that the next attempt to send will cause
# another connection attempt.
self.sock = None
raise
with SocketTimeout(self.sock, 2.0):
# first packet is always a JSON Object
# which we can use to tell which protocol level we are at
raw = self.receive(unmarshal=False)
hello = json.loads(raw)
self.protocol = hello.get("marionetteProtocol", 1)
self.application_type = hello.get("applicationType")
return (self.protocol, self.application_type)
def send(self, obj):
"""Send message to the remote server. Allowed input is a
``Message`` instance or a JSON serialisable object.
"""
if not self.sock:
self.connect()
if isinstance(obj, Message):
data = obj.to_msg()
if isinstance(obj, Command):
self.expected_response = obj
else:
data = json.dumps(obj)
payload = "{0}:{1}".format(len(data), data)
totalsent = 0
while totalsent < len(payload):
sent = self.sock.send(
payload[totalsent:].encode('utf-8')) # nekonbu72 +
if sent == 0:
raise IOError("Socket error after sending {0} of {1} bytes"
.format(totalsent, len(payload)))
else:
totalsent += sent
def respond(self, obj):
"""Send a response to a command. This can be an arbitrary JSON
serialisable object or an ``Exception``.
"""
res, err = None, None
if isinstance(obj, Exception):
err = obj
else:
res = obj
msg = Response(self.last_id, err, res)
self.send(msg)
return self.receive()
def request(self, name, params):
"""Sends a message to the remote server and waits for a response
to come back.
"""
self.last_id = self.last_id + 1
cmd = Command(self.last_id, name, params)
self.send(cmd)
return self.receive()
def close(self):
"""Close the socket.
First forces the socket to not send data anymore, and then explicitly
close it to free up its resources.
See: https://docs.python.org/2/howto/sockets.html#disconnecting
"""
if self.sock:
try:
self.sock.shutdown(socket.SHUT_RDWR)
except IOError as exc:
# If the socket is already closed, don't care about:
# Errno 57: Socket not connected
# Errno 107: Transport endpoint is not connected
if exc.errno not in (57, 107):
raise
self.sock.close()
self.sock = None
def __del__(self):
self.close()
| true |
d3a86e2c7e76feb986295aa138faf3b39c2cb8aa | Python | ozerelkerem/Project-Eular-Solutions | /python/problem25.py | UTF-8 | 668 | 4.1875 | 4 | [] | no_license | print("""
1000-digit Fibonacci number
Problem 25
The Fibonacci sequence is defined by the recurrence relation:
Fn = Fn−1 + Fn−2, where F1 = 1 and F2 = 1.
Hence the first 12 terms will be:
F1 = 1
F2 = 1
F3 = 2
F4 = 3
F5 = 5
F6 = 8
F7 = 13
F8 = 21
F9 = 34
F10 = 55
F11 = 89
F12 = 144
The 12th term, F12, is the first term to contain three digits.
What is the index of the first term in the Fibonacci sequence to contain 1000 digits?
""")
def fib():
old = 1
new = 1
yield old
yield new
while True:
old,new = new, old+new
yield new
for j,i in enumerate(fib()):
if(len(str(i)) >= 1000):
print(j+1)
break
| true |
16e3632fd024ba68cc8d3e3fb2d38ce83594c398 | Python | kavyakammaripalle/Advance-Python | /AdvancePython_Day9.py | UTF-8 | 739 | 3.875 | 4 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[4]:
#syntax for dataframe
# pandas.DataFrame(data,index,columns,dtype,copy)
#creating and empty datafarame
import pandas as pd
a=pd.DataFrame()
print(a)
# In[5]:
#creating dataframes with a list
data=[1,2,3,4,5,6,7]
d=pd.DataFrame(data)
print(d)
# In[17]:
x=[['kavya',80],['rahul',75],['karthik',100],['keerthi',78]]
y=pd.DataFrame(x,columns=['Student Name','Scores'],dtype=float)
print(y)
# In[16]:
#creating a dataframe from a dictionary of nd arrays/list
data={'Name':['kavya','karthi','keerthi','dhanush'],'Age':[23,24,23,19]}
df=pd.DataFrame(data,index=['1','2','3','4'])
print(df)
# In[19]:
data=[{'a':1,'b':3},{'b':0,'c':4}]
x=pd.DataFrame(data)
print(x)
# In[ ]:
| true |
f5dec8cb61316d29f753687906f323f56ec194f2 | Python | THUsatlab/AD2021 | /ncmmsc2021_baseline_svm/train_ad.py | UTF-8 | 3,386 | 3.109375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/7/5
# @Author : guyu
import pandas as pd
import os
import numpy as np
from sklearn import svm
from sklearn.metrics import accuracy_score,f1_score, precision_score, recall_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedKFold,KFold
from numpy import *
from pandas import DataFrame
# 加载训练数据
def load_train_data(train_dataset_path):
train=pd.read_csv(train_dataset_path)
features = [str(i) for i in range(1,1583)]
X_train = train.loc[:,features].values
Y_train = train.loc[:,'label'].values
return X_train,Y_train
#预测准确率,查准率,查全率,f1值
def evaluate(targets, predictions):
performance = {
'acc': accuracy_score(targets, predictions),
'f1': f1_score(targets, predictions, average='macro'),
'precision': precision_score(targets, predictions, average='macro'),
'recall': recall_score(targets, predictions, average='macro')}
return performance
#进行5次交叉验证
def cross(X,Y):
kf = StratifiedKFold(n_splits=5,shuffle=True,random_state=1)
kf.get_n_splits(X,Y)
rval0 = []
rval1 = []
rval2 = []
rval3 = []
for train_index, test_index in kf.split(X, Y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = Y[train_index], Y[test_index]
classifier.fit(X_train,y_train)
#Calculate the score (Accuracy)
train_score = classifier.score(X_train,y_train)
test_score = classifier.score(X_test,y_test)
predict = classifier.predict(X_test)
p = precision_score(y_test,predict,average='macro')
r = recall_score(y_test,predict,average='macro')
f1 = f1_score( y_test,predict,average='macro')
rval0.append(test_score)
rval1.append(p)
rval2.append(r)
rval3.append(f1)
print('Accuracy rate of five cross-validation:',rval0)
print('Precision of five cross-validation:',rval1)
print('Recall of five cross-validation:',rval2)
print('F1 of five cross-validation:',rval3)
print('ACC mean:',mean(rval0))
print('Precision mean:',mean(rval1))
print('Recall mean:',mean(rval2))
print('F1 mean:',mean(rval3))
if __name__ == "__main__":
# 初始化各个路径
train_dataset_path='./feature/train_1582.csv'
X_train,Y_train = load_train_data(train_dataset_path)
#把训练数据归一化
X_train = StandardScaler().fit_transform(X_train)
print(X_train.shape)
#将训练数据划分为训练集和验证集
X_train_1, X_validation_1, Y_train_1, Y_validation_1 = train_test_split (X_train,Y_train,test_size=0.3,random_state=1)
#定义分类器
classifier = svm.SVC(kernel='rbf', probability = True, gamma = 'auto')
classifier.fit(X_train_1,Y_train_1)
#估算Accuracy,precision,recall,f1值
train_score = classifier.score(X_train_1,Y_train_1)
print('train Accuracy:',train_score)
predict_validation = classifier.predict(X_validation_1)
performance = evaluate(Y_validation_1, predict_validation)
print('validation :',performance)
#五次交叉验证
cross(X_train,Y_train)
| true |
713722f4059b611d3e1dae0160c20cb7809debed | Python | nocibambi/ds-practice | /CS fundamentals/Random Walk with Python/2D_random_walk.py | UTF-8 | 734 | 3.71875 | 4 | [
"MIT"
] | permissive | import numpy as np
import pylab
import random
# number of steps
n = 100000
# two arrays containing the x and y coordinates
x = np.zeros(n)
y = np.zeros(n)
# filling coordinates with random variables
for i in range(1, n):
val = random.randint(1, 4)
if val == 1:
x[i] = x[i - 1] + 1
y[i] = y[i - 1]
elif val == 2:
x[i] = x[i - 1] - 1
y[i] = y[i - 1]
elif val == 3:
x[i] = x[i - 1]
y[i] = y[i - 1] + 1
else:
x[i] = x[i - 1]
y[i] = y[i - 1] - 1
# Plotting
pylab.title("Random Walk ($n = " + str(n) + "$ steps)")
pylab.plot(x, y)
pylab.savefig("rand_walk" + str(n) + ".png", bbox_inches="tight", dpi=600)
#pylab.show()
| true |
2b5b08a884765f29f946a6f68cb33d203f840b8e | Python | lo-tp/leetcode | /dp/689MaximumSumOf3Non-OverlappingSubarrays.py | UTF-8 | 1,161 | 2.796875 | 3 | [] | no_license | class Solution(object):
def maxSumOfThreeSubarrays(self, nums, k):
size = len(nums)
data = []
sum = 0
for i in xrange(0, k-1):
sum += nums[i]
for index, i in enumerate(nums[k-1: size]):
sum += i
data.append((index, sum))
sum -= nums[index]
m_start, m_end = k, len(data)-k-1
dp = [[0, data[i][0], 0, data[i][1]] for i in xrange(m_start, m_end+1)]
index = max_index = len(data)-1
for i in xrange(len(dp)-1, -1, -1):
dp[i][3] += data[max_index][1]
dp[i][2] = data[max_index][0]
index -= 1
if data[index][1] >= data[max_index][1]:
max_index = index
index = max_index = 0
for i in xrange(0, len(dp)):
dp[i][3] += data[max_index][1]
dp[i][0] = data[max_index][0]
index += 1
if data[index][1] > data[max_index][1]:
max_index = index
max_index = 0
for i in xrange(1, len(dp)):
if dp[i][3] > dp[max_index][3]:
max_index = i
return dp[max_index][0:3]
| true |
366280feb26bc5a77ca54ed3e3007fbd99644708 | Python | qwerboo/sec | /main_0810.py | UTF-8 | 4,684 | 2.96875 | 3 | [] | no_license | """."""
import re
import w3lib.html
import requests
import nltk
import pandas as pd
def clean(rawdata):
r"""清洗原文.
去除数字字符占比超过%25的tbale
去除所有tag
html entity 转义
替换\xa0
"""
while True:
# table = re.search('<table(?!.*<table).*?</table>', rawdata)
table = re.search('<table.*?</table>', rawdata)
if not table:
break
# import pdb; pdb.set_trace()
table = w3lib.html.replace_tags(table.group(), ' ')
table = w3lib.html.replace_entities(table)
table = re.sub('\xa0', ' ', table)
nLen = len(re.findall('\d', table))
cLen = len(re.findall('\w', table))
if cLen != 0 and nLen/cLen > 0.25:
table = ''
# rawdata = re.sub('<table(?!.*<table).*?</table>', table, rawdata, 1)
rawdata = re.sub('<table.*?</table>', table, rawdata, 1)
rawdata = w3lib.html.replace_tags(rawdata, ' ')
rawdata = w3lib.html.replace_entities(rawdata)
rawdata = re.sub('\xa0', ' ', rawdata)
return rawdata
def freq(content):
""".
对于每个文档,我们使用所有句子并使用总体不确定性,风险和歧义单词列表构建单词计数。
作为文件长度的度量,我们计算完整的Loughran和McDonald(2011)词典中出现的句子和单词的数量。
"""
sentences = nltk.sent_tokenize(content)
results = list()
for sent in sentences:
words_new = list()
words = nltk.word_tokenize(sent)
for word in words:
if len(word) > 1 and word.isalpha():
words_new.append(word.upper())
if len(words_new) > 0:
results.append(nltk.FreqDist(words_new))
return results
def count(freqs):
""".
计算频率
uncertainty/McDonald
Ambiguity/McDonald
Risk/McDonald
Ambiguity_sent/sentences
Risk_sent/sentences
negative/McDonald
positive/McDonald
"""
Sentences = len(freqs)
Uncertainty = 0
McDonald = 0
Ambiguity = 0
Risk = 0
Ambiguity_sent = 0
Risk_sent = 0
Negative = 0
Positive = 0
for record in freqs:
flag_risk = 0
flag_ambiguity = 0
for word in risk_list:
i = record.get(word)
if i:
Risk += i
Uncertainty += i
if flag_risk == 0:
Risk_sent += 1
flag_risk = 1
for word in ambiguity_list:
i = record.get(word)
if i:
Ambiguity += i
Uncertainty += i
if flag_ambiguity == 0:
Ambiguity_sent += 1
flag_ambiguity = 1
for word in uncertainty_list:
Uncertainty += record.get(word, 0)
for word in mcDonald_list:
McDonald += record.get(word, 0)
for word in negative_list:
Negative += record.get(word, 0)
for word in positive_list:
Positive += record.get(word, 0)
result = dict()
result['Uncertainty_McDonald'] = round(Uncertainty/McDonald, 4)
result['Ambiguity_McDonald'] = round(Ambiguity/McDonald, 4)
result['Risk_McDonald'] = round(Risk/McDonald, 4)
result['Ambiguity_sent_Sentences'] = round(Ambiguity_sent/Sentences, 4)
result['Risk_sent_Sentences'] = round(Risk_sent/Sentences, 4)
result['Negative_McDonald'] = round(Negative/McDonald, 4)
result['Positive_McDonald'] = round(Positive/McDonald, 4)
return result
global risk_list
global ambiguity_list
global uncertainty_list
global mcDonald_list
global negative_list
global positive_list
if __name__ == '__main__':
"""."""
f = open('demand20180810/risk.txt')
risk_list = list(l.strip() for l in f.readlines())
f = open('demand20180810/ambiguity.txt')
ambiguity_list = list(l.strip() for l in f.readlines())
f = open('demand20180810/uncertainty.txt')
uncertainty_list = list(l.strip() for l in f.readlines())
f = open('demand20180810/negative.txt')
negative_list = list(l.strip() for l in f.readlines())
f = open('demand20180810/positive.txt')
positive_list = list(l.strip() for l in f.readlines())
df = pd.read_excel('demand20180810/LoughranMcDonald_MasterDictionary_2016.xlsx', usecols=0)
mcDonald_list = list(df['Word'])
url = "https://www.sec.gov/Archives/edgar/data/1590714/000159071418000033/pah10-k20171231document.htm"
r = requests.get(url)
content = clean(r.text)
print('清洗完成!')
freqs = freq(content)
print('计算每个句子的单词频率完成!')
result = count(freqs)
print(result)
| true |
ab09dfa6714877f46c0549c58bbeaff05316bd7b | Python | alirezazahiri/WorkShop-Lecture | /essentials/strings.py | UTF-8 | 1,004 | 4.65625 | 5 | [] | no_license | #TODO: make title
# """
# given a string, you should make it title;
# 1 < len(string) < 1000
# example :
# helLoWoRLd -> Helloworld
# """
print('non-pythonic way : ')
string = 'helLoWoRLd' # input()
string = list(string)
string[0] = string[0].upper()
for i in range(1, len(string)):
string[i] = string[i].lower()
# ['a', 'l', 'i']
# 'a'+'l'+'i' -> 'ali'
print(''.join(string))
# or do it more like a python programmer
print('pythonic way:')
string = 'helLoWoRLd' # input()
print(string[0].upper() + string[1:].lower())
#TODO: count items
# """
# given a string, you should count alphabets, numbers and non-alpha-num characters
# example:
# 2(*8ffsidfu03 -> 7 4 2
# """
print()
string = '2(*8ffsidfu03' # input()
alphas, nums, other = 0, 0, 0
for i in range(len(string)):
if string[i].isalpha():
alphas += 1
elif string[i].isnumeric():
nums += 1
else:
other += 1
print(alphas, nums, other)
| true |
98d5da4d3fd96d3fbc065e0fde2fdf792baf5a07 | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2812/60753/234751.py | UTF-8 | 257 | 2.921875 | 3 | [] | no_license | import sys
import re
s=sys.stdin.read()
digits=re.findall(r"\d+",s)
listline= [int(e) for e in digits ]
del(listline[0])
filter=list(set(listline))
ways=len(filter)
for i in range(len(filter)):
if filter[i]==0:
ways-=1
break
print(ways) | true |
e38bc85b7d4ad6a888917203c03ff17a96cd6765 | Python | isjeffcom/coronavirusDataGlobal | /init.py | UTF-8 | 5,878 | 2.75 | 3 | [
"MIT"
] | permissive | '''
Create By JeffWu
https://isjeff.com
IMPORT ALL DATA INTO DATABASE BY MAIN()
CACHE ALL DATA TO JSON BY CACHE()
PLEASE CLEAN DATA FOLDER AND CLEAN DATABASE BEFORE RUN THIS SCRIPT
FOR INIT ONLY
MAKE SURE YOU HAVE GIT CLONED AT ROOT https://github.com/CSSEGISandData/COVID-19
'''
import json
import pymysql.cursors
import csv
import os
import os.path
from os import path
#declear path
dataPath = "./COVID-19/csse_covid_19_data/csse_covid_19_daily_reports"
# create git clond if data not exists
if not path.exists(dataPath):
print("Get dataset from JHU CSSE: https://github.com/CSSEGISandData/COVID-19")
os.system("git clone https://github.com/CSSEGISandData/COVID-19")
else:
print("Update Dataset from JHU CSSE: https://github.com/CSSEGISandData/COVID-19")
os.system('cd COVID-19 && git pull origin')
print("Dataset Updated")
# create folder if not exist
if not os.path.exists('./data'):
os.makedirs('./data')
# file path
files = os.listdir(dataPath)
# get database config
with open('./conf.json') as f:
db_conf = json.load(f)
# connect to database
connection = pymysql.connect(host=db_conf['host'],
user=db_conf['user'],
password=db_conf['password'],
db=db_conf['db'],
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
# clear and create db table
def createTable():
print("Clean and create history table")
with connection.cursor() as cursor:
drop = "DROP TABLE IF EXISTS `history`"
cursor.execute(drop)
create = "CREATE TABLE IF NOT EXISTS `history` (`id` int(32) NOT NULL AUTO_INCREMENT,`area_code` varchar(64) DEFAULT NULL,`admin_area` varchar(300) DEFAULT NULL,`province_state` varchar(300) DEFAULT NULL,`country_region` varchar(300) DEFAULT NULL,`last_update` varchar(300) DEFAULT NULL,`la` varchar(300) DEFAULT NULL,`lo` varchar(300) DEFAULT NULL,`confirmed` int(11) DEFAULT NULL,`death` int(11) DEFAULT NULL,`recovered` int(11) DEFAULT NULL,`active` int(11) DEFAULT NULL,`combined_key` text DEFAULT NULL,PRIMARY KEY (`id`))"
cursor.execute(create)
# insert 1 row to database
def write(code, admin, province, country, update, la, lo, confirmed, death, recovered, active, key):
if confirmed == '':
confirmed = 0
if death == "":
death = 0
if recovered == "":
recovered = 0
if active == "":
active = 0
try:
with connection.cursor() as cursor:
# INSERT
sql = "INSERT INTO `history` (`area_code`, `admin_area`, `province_state`, `country_region`, `last_update`, `la`, `lo`, `confirmed`, `death`, `recovered`, `active`, `combined_key`) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
cursor.execute(sql, (code, admin, province, country, update, la, lo, confirmed, death, recovered, active, key))
connection.commit()
finally:
print(update+" - insert done")
#connection.close()
# construct a data object for write json
def constData(code, admin, province, country, update, la, lo, confirmed, death, recovered, active, key):
data = {}
data["area_code"] = code
data["admin_area"] = admin
data["province_state"] = province
data["country_region"] = country
data["last_update"] = update
data["la"] = la
data["lo"] = lo
data["confirmed"] = confirmed
data["death"] = death
data["recovered"] = recovered
data["active"] = active
data["combined_key"] = key
return data
# create cache json file
def createCache(data, update):
# File name
n = update
with open('./data/' + n + '.json', 'w') as outfile:
json.dump(data, outfile)
print( n + " - json cached" )
# main loop
def main():
print("Main Process Started")
for file in files:
if not os.path.isdir(file):
if file.find("csv") != -1:
with open(dataPath+"/"+file) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
fnIdx = file.find(".")
thisUpdate = file[0:fnIdx]
output = []
for row in csv_reader:
line_count = line_count + 1
d = {}
if line_count != 1:
if len(row) == 6:
write('', '', row[0], row[1], thisUpdate, '', '', row[3], row[4], row[5], 0, '')
d = constData('', '', row[0], row[1], row[2], '', '', row[3], row[4], row[5], 0, '')
if len(row) == 8:
write('', '', row[0], row[1], thisUpdate, row[6], row[7], row[3], row[4], row[5], 0, '')
d = constData('', '', row[0], row[1], row[2], row[6], row[7], row[3], row[4], row[5], 0, '')
if len(row) == 12:
write(row[0], row[1], row[2], row[3], thisUpdate, row[5], row[6], row[7], row[8], row[9], row[10], row[11])
d = constData(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10], row[11])
output.append(d)
createCache(output, thisUpdate)
# on create
createTable()
#main()
main()
# close database
connection.close()
print("SUCCESS: Init Completed")
print("Notice: You can set up timer for regularly update by excute update.py")
print("Run APIs: Start restful APIs by: `npm i && npm run dev` (NodeJS Required)")
print("Twin Project: https://github.com/isjeffcom/coronvirusFigureUK")
print("Jeff Wu: https://isjeff.com") | true |
3bbb054d86280e44c98cc1ea8e9dfa8904fb50c0 | Python | jellis505/IMDB_Utilities | /IMDBUtilities.py | UTF-8 | 9,298 | 3.046875 | 3 | [] | no_license | #!/usr/bin/env python
# Created by Joe Ellis
# Columbia University DVMM Lab
# Standard Libs
import os
import json
# Extended Libs
import argparse
from bs4 import BeautifulSoup
import requests
class IMDBUtils():
def __init__(self, imdb_id=False):
""" This function contains the initlialization for the
"""
self.imdb_query_url = "http://www.imdb.com/search/title"
self.imdb_base = "http://www.imdb.com/"
self.genres = ["action", "animation", "comedy", "documentary",
"family", "film-noir", 'horror', "musical",
"romance", "sport", "war", "adventure",
"biography", "crime", "drama", "fantasy",
"history", "music", "mystery", "sci-fi",
"thriller", "western"]
self.query_base = "http://www.imsdb.com/scripts/"
if imdb_id == False:
self.movie_base = False
else:
self.movie_base = self.imdb_base + "title/" + imdb_id + "/"
def grab_movie_script(self, title):
""" grab the movie script for a given title of a movie
The title must be exact, if the html doesn't exist this doesn't work"""
query_url = query_base + title.replace(" ", "-") + ".html"
resp = requests.get(query_url)
if resp.ok:
parsed_script = self.parse_html()
else:
print "ERROR URL DOES NOT EXIST:", query_url
print "PROGRAM BREAKING"
quit()
return parsed_script
def parse_html(self, text):
""" This function uses beautiful soup to parse the html file
here and then make these two things work well together"""
# Create the soup object
soup = BeautifulSoup(text)
# Create the soup object
pre_obj = soup.find("pre")
script_text = pre_obj.find("pre")
if script_text is not None:
script_text = script_text.text
else:
script_text = pre_obj.text
# Now we need to parse the pre_obj line by line
# We will define each line with either description, words, name, etc.
# Split the lines using the "\n" character
lines = script_text.split("\n")
line_spaces = {}
total = 0
for line in lines:
length = count_spaces_beg(line)
if not (length in line_spaces.keys()):
line_spaces[length] = 1
total += 1
else:
line_spaces[length] += 1
total += 1
# Now we have the lines that we want let's get what each one is
good_vals = []
for key, val in line_spaces.iteritems():
if (key is not 0) and (val > 0.1 * total):
good_vals.append(key)
# Sort the values
good_vals.sort()
desc_start = good_vals[0]
speech_start = good_vals[1]
name_start = good_vals[2]
# Now that we have the start of each val, let's create a
# setup of the script
script_parse = []
for line in lines:
length = count_spaces_beg(line)
if length in good_vals:
# Now check through each section and then add them
if length is desc_start:
if not line[length:].isupper():
script_parse.append(('desc', line[length:]))
elif length is speech_start:
script_parse.append(('speech', line[length:]))
elif length is name_start:
script_parse.append(('name', line[length:]))
# Debug print out our script
return script_parse
def grab_genre_movies(self, genre1,limit=1000,genre2=None):
""" This function returns the ids, and movie titles
of the movies that are most related to this genre"""
# Create the query
#imdb_query_url = "http://www.imdb.com/search/title"
if not genre2:
q_parameters = {"count": 100,
"genres": genre1,
"num_votes": "100,",
"title_type": "feature"}
else:
q_parameters = {"count": 100,
"genres": ",".join((genre1, genre2)),
"num_votes": "100,",
"title_type": "feature"}
# Get the queries
title_and_links = []
for i in range(1, limit, 100):
# Go through these pages then parse the html
q_parameters['start'] = i
r = requests.get(self.imdb_query_url, params=q_parameters)
if not r.ok:
print "Something wrong with the request"
print r.url
else:
soup = BeautifulSoup(r.text)
rows = soup.find_all("tr")
if len(rows) == 3:
break # This breaks out of the request cycle
for row in rows[1:-1]:
tds = row.find_all("td")
if len(tds) > 1:
title_td = tds[2]
link = title_td.find("a")
title_and_links.append((link.get("href"), link.string))
return title_and_links
def grab_IMDB_keywords(self, movie_id=False):
""" This function grabs the keywords and how relvant they are for a
a given movie url"""
# Check to see if we initialized the class with a movie title
if movie_id != False:
r_url = self.imdb_base + "/title/" + movie_id + "/keywords"
elif self.movie_base != False:
r_url = self.movie_base + "keywords"
else:
print "No Supplied URL"
print "Program Breaking"
quit()
r = requests.get(r_url)
# Check to make sure that the requests went through
if not r.ok:
print "Couldn't grab keywords, breaking"
return 0
else:
# Beautiful Soup
soup = BeautifulSoup(r.text)
sodatext_divs = soup.find_all("div", {"class" : "sodatext"})
interesting_divs = soup.find_all("div", {"class" : "interesting-count-text"})
text_words = []
for sd,ind in zip(sodatext_divs,interesting_divs):
# Grab the keyword
a_string = sd.find("a").string
keyword = a_string.encode('ascii', 'ignore')
# Grab the relevance of each
a_string = ind.find("a").string.strip()
relevance_sentence = a_string.encode('ascii', 'ignore')
# These are the text keywords
text_words.append((keyword, relevance_sentence))
return text_words
def grab_actors(self, movie_id=False):
""" This function grabs the actors for a given movie """
# Check to see if we initialized the class with a movie title
if movie_id != False:
r_url = self.imdb_base + "/title/" + movie_id + "/fullcredits"
elif self.movie_base != False:
r_url = self.movie_base + "fullcredits"
else:
print "No Supplied URL"
print "Program Breaking"
quit()
r = requests.get(r_url)
if not r.ok:
print "Couldn't grab keywords, breaking"
return 0
else:
soup = BeautifulSoup(r.text)
div_fullcredit = soup.find("div", {"id": "fullcredits_content"})
table = div_fullcredit.find("table", {"class": "cast_list"})
td_text_divs = table.find_all("td", {"itemprop": "actor"})
actors_and_links = []
for td in td_text_divs:
person_link = td.find("a")['href']
person_name = td.find("span").string
actors_and_links.append((person_name, person_link))
return actors_and_links
def grab_actor_info(self, actor_id):
""" Not implemented """
pass
if __name__ == "__main__":
""" Test the functionality of the IMDBUtilities class """
# Test the title initialization in the class
imdb_id = "tt2322441"
imdb = IMDBUtils(imdb_id)
actors_and_links = imdb.grab_actors()
print "TESTING: INITALIZED CLASS WITH TITLE"
if len(actors_and_links) > 0:
print "Actors Found: SUCCESS"
else:
print "Actors Found: FAIL"
keywords = imdb.grab_IMDB_keywords()
if len(keywords) > 0:
print "Keywords Found: SUCCESS"
else:
print "Keywords Found: FAIL"
print "++++++++++++++++++++++"
imdb = IMDBUtils()
actors_and_links = imdb.grab_actors(imdb_id)
print "TESTING: INITALIZED CLASS WITHOUT TITLE"
if len(actors_and_links) > 0:
print "Actors Found: SUCCESS"
else:
print "Actors Found: FAIL"
keywords = imdb.grab_IMDB_keywords(imdb_id)
if len(keywords) > 0:
print "Keywords Found: SUCCESS"
else:
print "Keywords Found: FAIL"
print "+++++++++++++++++++++++"
print "TESTING: FINDING MOVIE WITH GENRE"
movies = imdb.grab_genre_movies(imdb.genres[0], 100)
if len(movies) > 0:
print "Movies Found: SUCCESS"
else:
print "Movies Found: FAIL"
| true |
bb6d21d7bf617db1f01184604cc84ce49609ed23 | Python | afilipch/nrlbio | /graphics/nontemplate_differential.py | UTF-8 | 2,706 | 2.625 | 3 | [] | no_license | # /usr/bin/python
'''Draws a plot of nontemplate addiction to miRNAs for wt versus ko'''
import sys
import argparse
from collections import defaultdict
import math
from pybedtools import BedTool
import matplotlib.pyplot as plt
import numpy as np
from nrlbio.pyplot_extension import remove_top_left_boundaries
parser = argparse.ArgumentParser(description='Draws a plot of nontemplate addiction to miRNAs for wt versus ko');
parser.add_argument('path', metavar = 'N', nargs = 2, type = str, help = "Path to the output of 'nontemplate_advanced.py' script, WT and KO");
parser.add_argument('--norm', nargs = '?', default=False, const = True, type = bool, help = "If set, binding modes will be normalized");
parser.add_argument('--output', nargs = '?', type = str, help = "Path to the output");
parser.add_argument('--extlen', nargs = '?', required=True, type = int, help = "The length of downstream extensions oof mature miRNAs");
args = parser.parse_args();
colors = ('greenyellow', 'skyblue', 'peachpuff', 'navy')
titles = ('WT', 'KO')
def path2weights(path):
counts = [[], [], [], []]
xlabels = [];
totals = [];
with open(path) as f:
f.next();
for l in f:
a = l.strip().split('\t')
for i in range(4):
counts[i].append(float(a[i+1]))
xlabels.append("\n".join(a[5].split(',')));
totals.append(sum([float(a[x+1]) for x in range(4)]))
maximum = max(totals);
length = len(counts[1])
xlabels.insert(length-args.extlen, ' ')
for l in counts:
l.insert(length-args.extlen, 0)
length+=1;
counts = [np.array(x)/maximum for x in counts]
return counts, xlabels, length
def draw_subplot(ax, counts, xlabels, length, title):
remove_top_left_boundaries(ax)
ylim = 1.01
ax.set_ylim((0,ylim))
ax.set_title(title)
#plt.axis((0, length+1, 0, ylim))
#set bins and boundaries
boundaries = range(0, length);
bins = range(0, length+1);
#plot real and control binding pattern
ax.hist([boundaries]*4, weights=counts, bins=bins, label=('A', 'C', 'T', 'G'), align='right', rwidth=0.7, color=colors, stacked=True)
ax.set_xticks(range(1, length+1));
ax.set_xticklabels(xlabels, rotation=0)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
for tic in ax.xaxis.get_major_ticks():
tic.tick1On = tic.tick2On = False
ax.set_ylabel('fraction of mappings')
ax.legend(loc=(0.85, 0.75),prop={'size':10}, frameon=False)
fig, axes = plt.subplots(nrows=1, ncols=2, sharex=False, sharey=False, figsize=(16, 5))
for ax, path, title in zip(axes, args.path, titles):
counts, xlabels, length = path2weights(path);
draw_subplot(ax, counts, xlabels, length, title);
#output plot
if(args.output):
plt.savefig(args.output, bbox_inches='tight')
else:
plt.show(); | true |
6155f2c5709bb1d62bcd788067bc664b7586c470 | Python | Aasthaengg/IBMdataset | /Python_codes/p02577/s555919261.py | UTF-8 | 80 | 2.859375 | 3 | [] | no_license | N = list(map(int,input()))
if(sum(N) % 9 == 0):
print("Yes")
else:
print("No") | true |
51b5de9096bdb80ec0674c108b54c380d9396a7b | Python | pfeffer90/lykkex-cli | /lykke/commands/services/lykkex_service.py | UTF-8 | 4,134 | 2.796875 | 3 | [] | no_license | import datetime
import logging as log
import lykkex
from lykke.commands.services.time_service import get_current_time
class LykkexService(object):
@staticmethod
def get_balance(api_key):
log.info("Retrieve current balance.")
time_stamp = get_current_time()
balance = lykkex.get_balance(api_key)
log.info("Number of assets: {}".format(len(balance)))
for x in range(0, len(balance)):
log.info(format('Current wealth ' + balance[x]['AssetId'].encode() + ': ' + str(balance[x]['Balance'])))
return time_stamp, balance
@staticmethod
def get_pending_orders(api_key):
log.info("Get pending orders.")
time_stamp = get_current_time()
pending_orders = lykkex.get_pending_orders(api_key)
if not pending_orders:
log.info("No pending orders")
return time_stamp, pending_orders
@staticmethod
def send_market_order(api_key, asset_pair, asset, order_action, volume):
log.info("Send market order - {}".format(asset))
time_stamp = get_current_time()
response = lykkex.send_market_order(api_key, asset_pair, asset, order_action, volume)
if response['Error']:
log.info("Error: Market order not successful")
raise RuntimeError("Error in sending market order. Check response {}".format(response))
final_price = response['Result']
log.info("Trade successful at price {}".format(final_price))
return time_stamp, final_price
@staticmethod
def send_limit_order(api_key, asset_pair, asset, price, order_action='BUY', volume='0.1'):
log.info("Send market order - {}".format(asset))
time_stamp = get_current_time()
response = lykkex.send_limit_order(api_key, asset_pair, asset, price, order_action, volume)
log.info("Limit order placed")
order_id = str(response)
return time_stamp, order_id
@staticmethod
def control_limit_order(api_key, order_id):
log.info("Check status of limit order {}", order_id)
time_stamp = get_current_time()
content = lykkex.get_order_status(api_key, order_id)
status = content['Status']
return time_stamp, status
@staticmethod
def get_price(asset_pair_id, side='BUY'):
log.info("Retrieve price: {}".format(side))
time_stamp = get_current_time()
order_book = lykkex.get_order_book(asset_pair_id)
price = LykkexService.get_asset_price(order_book, side)
volume = LykkexService.get_asset_trading_volume(order_book, side)
log.info("Timestamp: {}".format(time_stamp))
log.info("Price: {}".format(price))
return time_stamp, price, volume
def get_latency(self, asset_pair_id):
time_stamp = get_current_time()
order_book = lykkex.get_order_book(asset_pair_id)
time_ob = self.get_time_stamp_from_order_books(order_book)
time_delta = (time_stamp - time_ob).total_seconds()
log.info("System latency: {} secs".format(time_delta))
@staticmethod
def get_asset_trading_volume(order_books, side):
if side == 'BUY':
return order_books[1]['Prices'][0]['Volume']
elif side == 'SELL':
return order_books[0]['Prices'][0]['Volume']
else:
return log.error('No valid input')
@staticmethod
def get_time_stamp_from_order_books(order_books):
time_stamp_ob = order_books[1]['Timestamp']
val = datetime.datetime.strptime(time_stamp_ob, '%Y-%m-%dT%H:%M:%S.%f')
return val
@staticmethod
def get_asset_price(order_books, side):
try:
if side == 'BUY':
price = order_books[1]['Prices'][-1]['Price']
elif side == 'SELL':
price = order_books[0]['Prices'][0]['Price']
except IndexError as e:
log.error("Could not extract price from order books.")
log.error("{}".format(order_books))
raise RuntimeError(e.message)
return price
def __init__(self):
log.info("Initialize Lykkex connector.")
| true |
c63f8d5556fd41d46d6241eb1c800399c26aaff0 | Python | zahmdf/Materi_Python | /operasilogika.py | UTF-8 | 1,120 | 4.1875 | 4 | [] | no_license | # OPERASI LOGIKA ATAU BOOLEAN
# ada NOT, OR, AND, XOR
# NOT
print('====NOT====')
a = False
c = not a
print('Data a =',a)
print("NOT")
print('Data c =',c)
# OR (|) (jika salah satu nilai terpenuhi atau true, maka hasilnya = true)
print('====OR====')
a = False
b = False
c = a | b
print(a,'OR',b,'=',c)
a = False
b = True
c = a | b
print(a,'OR',b,'=',c)
a = True
b = False
c = a | b
print(a,'OR',b,'=',c)
a = True
b = True
c = a | b
print(a,'OR',b,'=',c)
# AND (&) (jika hanya satu yang terpenuhi atau true, maka hasilnya = false)
print('====AND====')
a = False
b = False
c = a & b
print(a,'AND',b,'=',c)
a = False
b = True
c = a & b
print(a,'AND',b,'=',c)
a = True
b = False
c = a & b
print(a,'AND',b,'=',c)
a = True
b = True
c = a & b
print(a,'AND',b,'=',c)
# XOR (^) (akan true jika salah satu true, sisanya false)
print('====XOR====')
a = False
b = False
c = a ^ b
print(a,'XOR',b,'=',c)
a = False
b = True
c = a ^ b
print(a,'XOR',b,'=',c)
a = True
b = False
c = a ^ b
print(a,'XOR',b,'=',c)
a = True
b = True
c = a ^ b
print(a,'XOR',b,'=',c)
| true |
76a536d9e532dc5842895a2f259f13067489c449 | Python | VisionAlex/odds-comparison | /odds/scrapers/tonybet.py | UTF-8 | 3,163 | 2.6875 | 3 | [] | no_license | from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup
import time
options = Options()
options.headless=True
options.add_argument('start-maximized')
options.add_argument("--no-sandbox")
options.add_argument("--disable-dev-shm-usage")
chrome_prefs = {}
options.experimental_options["prefs"] = chrome_prefs
chrome_prefs["profile.default_content_settings"] = {"images": 2}
def get_team_names(tonybet_code):
driver =webdriver.Chrome(options=options)
URL = f"https://tonybet.com/sport/{tonybet_code}"
driver.get(URL)
try:
element = WebDriverWait(driver,10).until(
EC.presence_of_element_located((By.CLASS_NAME, "wpt-odd-changer"))
)
except Exception as e:
print(e)
source = driver.page_source
# with open("page.html", "w") as f:
# f.write(source)
soup = BeautifulSoup(source, 'lxml')
matches = soup.select('.wpt-table__body .wpt-table__row')
team_names = []
for match in matches:
try:
teams = match.select(".wpt-teams__team span")
home_team = teams[0].text.strip()
away_team = teams[1].text.strip()
if home_team not in team_names:
team_names.append(home_team)
if away_team not in team_names:
team_names.append(away_team)
except Exception as e:
print(e)
driver.close()
return team_names
def get_odds(tonybet_code):
driver =webdriver.Chrome(options=options)
URL = f"https://tonybet.com/sport/{tonybet_code}"
driver.get(URL)
try:
element = WebDriverWait(driver,10).until(
EC.presence_of_element_located((By.CLASS_NAME, "wpt-odd-changer"))
)
except Exception as e:
print(e)
source = driver.page_source
# with open("page.html", "w") as f:
# f.write(source)
soup = BeautifulSoup(source, 'lxml')
matches = soup.select('.wpt-table__body .wpt-table__row')
scraped_matches = []
for match in matches:
obj = {}
try:
odds = match.select(".wpt-odd-changer")
teams = match.select(".wpt-teams__team span")
obj['home_team'] = teams[0].text.strip()
obj['away_team'] = teams[1].text.strip()
obj['odds'] = {
"1": odds[0].text,
"X": odds[1].text,
"2": odds[2].text,
"BTTS_YES": odds[3].text,
"BTTS_NO": odds[4].text,
f"Over {odds[5].span.text.strip()} Goals": odds[5].find(text=True, recursive=False),
f"Under {odds[6].span.text.strip()} Goals": odds[6].find(text=True, recursive=False),
}
scraped_matches.append(obj)
except Exception as e:
print(e)
continue
driver.close()
return scraped_matches
if __name__ == '__main__':
odds = get_odds('football/england/premier-league')
print(odds) | true |
c715019dc7e961c4c729c149d26b26a9628f37c1 | Python | ykpgkh/pythonReview | /data structures/linkedListsInfo.py | UTF-8 | 3,000 | 4.875 | 5 | [] | no_license | # Sources:
# https://realpython.com/linked-lists-python/
# https://www.tutorialspoint.com/python_data_structure/python_linked_lists.htm
# https://stackoverflow.com/questions/6256983/how-are-deques-in-python-implemented-and-when-are-they-worse-than-lists
# This is an overview on what linked lists are and how to implement them on Python
# Basics:
# Each element of a linked list consists of "two blocks" called a node.
# One block in the node has the data; it contains the value that was stored in it
# The other block contains a reference to the next node that should be followed
# In Python, lists are dynamic arrays so the memory usage of linked lists and lists is similary.
# Use .insert() and .remove() to target specific elements in a list
# Use .append() and .pop() to insert/remove elements at the end of the list
# Linked lists have a time complexity of O(1) in queues (FIFO), which is faster than normal lists, but they perform similarly when implementing a stack (LIFO)
# In element lookups, lists are faster when you know which element you want to access (they are O(1)), but linked lists take O(n) because they have to traverse the whole list
# When searching for specific elements they perform the same, O(n) because they need to traverse every element one by one to find what they want
# In Python we can use collections.deque which stands for double-ended queue. It uses an implementation of a linked list that you can access, insert, or remove elements from the beginning or the end of the list with O(1) performance.
# deques are useful for append and pop things at the end/front but lists are better for random-access (O(1) speed) and fixed-length operations including slicing
# Linked lists can be used for queues, stacks, and graphs.
# For queues use FIFO (First In, First Out); That means that the first element inserted in the list is the first one to be retrieved
# For stacks use LIFO (Last In, First Out); that means that the last element that was inserted is the first one you would get
# For graphs you could use an adjacency list; each vertex of the graph is stored alongside a collection of connected vertices
graph = {
1: [2, 3, None],
2: [4, None],
3: [None],
4: [5, 6, None],
5: [6, None],
6: [None]
}
# To create a linked list using deque write the below. It will create an empty list. You may also populate it at creation (refer to source)
from collections import deque
deque()
# Now we can manipulate the deque object by adding or removing elements
llist = deque("abcde")
print (llist)
llist.append("hola") # notice how since it's an append, it will be last in the list
print (llist)
llist.pop() # notice how you don't need to specify anything in pop because it automatically pops out hte last element in the list
print (llist)
llist.appendleft("adios") # use appendleft to specify you want it on the left side
print (llist)
llist.popleft() # use popleft to specify you want the leftmost element to be popped out
print (llist) | true |
f40612dd242e31452a851dc9e0a58a60d2f14ef3 | Python | iwotastic/srt4_website_bot | /bot_session.py | UTF-8 | 5,125 | 2.796875 | 3 | [] | no_license | from math import sin, pi
from selenium.webdriver.common.by import By
from selenium.webdriver import ActionChains
from utils import rand_email, rand_text, move_mouse_based_on_func
from random import random, randrange, shuffle, uniform
from rich.progress import track
import time
class SequentialBotSession:
def __init__(self, browser):
self.browser = browser
self.password = ""
def order(self, elements):
pass
def handle_input_type_text(self, el):
el.click()
el.send_keys(rand_text(10, 20))
def handle_input_type_email(self, el):
el.click()
el.send_keys(rand_email())
def handle_input_type_password(self, el):
el.click()
el.send_keys(self.password)
def handle_textarea(self, el):
el.click()
el.send_keys(rand_text(70, 110))
def execute(self, url):
self.browser.get(url)
self.password = rand_text(10, 20)
input_elements = self.browser.find_elements(By.TAG_NAME, "input")
select_elements = self.browser.find_elements(By.TAG_NAME, "select")
textarea_elements = self.browser.find_elements(By.TAG_NAME, "textarea")
elements = input_elements + select_elements + textarea_elements
self.order(elements)
for el in elements:
tag_name = el.tag_name.lower()
if tag_name == "input":
getattr(self, f"handle_input_type_{el.get_attribute('type')}", lambda _: None)(el)
else:
getattr(self, f"handle_{tag_name}", lambda _: None)(el)
self.browser.find_element_by_css_selector("button[type=submit]").click()
time.sleep(0.2)
class RandomBotSession(SequentialBotSession):
def order(self, elements):
shuffle(elements)
class StraightLineBotSession(SequentialBotSession):
def __init__(self, browser):
SequentialBotSession.__init__(self, browser)
self.last_x = 0
self.last_y = 0
def execute(self, url):
self.browser.get(url)
self.password = rand_text(10, 20)
input_elements = self.browser.find_elements(By.TAG_NAME, "input")
select_elements = self.browser.find_elements(By.TAG_NAME, "select")
textarea_elements = self.browser.find_elements(By.TAG_NAME, "textarea")
elements = input_elements + select_elements + textarea_elements
self.order(elements)
for el in elements:
tag_name = el.tag_name.lower()
if tag_name == "input":
getattr(self, f"handle_input_type_{el.get_attribute('type')}", lambda _: None)(el)
else:
getattr(self, f"handle_{tag_name}", lambda _: None)(el)
self.browser.find_element_by_css_selector("button[type=submit]").click()
time.sleep(0.5)
def click_element(self, el):
x_to_click = el.rect["x"] + 5 + randrange(el.rect["width"] - 10)
y_to_click = el.rect["y"] + 5 + randrange(el.rect["height"] - 10)
move_mouse_based_on_func(
self.browser,
(self.last_x, self.last_y),
(x_to_click, y_to_click),
lambda x: x,
[uniform(0, 0.005) for _ in range(randrange(2, 5))]
)
ActionChains(self.browser).click().perform()
def handle_input_type_text(self, el):
self.click_element(el)
el.send_keys(rand_text(10, 20))
def handle_input_type_email(self, el):
self.click_element(el)
el.send_keys(rand_email())
def handle_input_type_password(self, el):
self.click_element(el)
el.send_keys(self.password)
def handle_textarea(self, el):
self.click_element(el)
el.send_keys(rand_text(70, 110))
class ExponentialBotSession(StraightLineBotSession):
def click_element(self, el):
x_to_click = el.rect["x"] + 5 + randrange(el.rect["width"] - 10)
y_to_click = el.rect["y"] + 5 + randrange(el.rect["height"] - 10)
exp = uniform(0.01, 3)
move_mouse_based_on_func(
self.browser,
(self.last_x, self.last_y),
(x_to_click, y_to_click),
lambda x: x ** exp,
[uniform(0, 0.005) for _ in range(randrange(2, 5))]
)
ActionChains(self.browser).click().perform()
class SinBotSession(StraightLineBotSession):
def click_element(self, el):
x_to_click = el.rect["x"] + 5 + randrange(el.rect["width"] - 10)
y_to_click = el.rect["y"] + 5 + randrange(el.rect["height"] - 10)
mult = randrange(0, 5)
exp = uniform(0.01, 3)
exp2 = uniform(0.01, 3)
move_mouse_based_on_func(
self.browser,
(self.last_x, self.last_y),
(x_to_click, y_to_click),
lambda x: abs(sin((x ** exp) * (pi / 2 + mult * 2 * pi)) ** exp2),
[uniform(0, 0.005) for _ in range(randrange(2, 5))]
)
ActionChains(self.browser).click().perform()
class WiggleLineBotSession(StraightLineBotSession):
def click_element(self, el):
x_to_click = el.rect["x"] + 5 + randrange(el.rect["width"] - 10)
y_to_click = el.rect["y"] + 5 + randrange(el.rect["height"] - 10)
mult = randrange(0.000001, 0.5)
mult2 = randrange(4, 30)
exp = uniform(0.01, 3)
move_mouse_based_on_func(
self.browser,
(self.last_x, self.last_y),
(x_to_click, y_to_click),
lambda x: min(abs(mult * sin((mult2 * x) ** exp)), 1.0),
[uniform(0, 0.005) for _ in range(randrange(2, 5))]
)
ActionChains(self.browser).click().perform() | true |
2785b77d63cdeb0c2e037f79c93dab12524aacaf | Python | urands/cp2020 | /server/datasets.py | UTF-8 | 2,340 | 2.828125 | 3 | [] | no_license | import pandas as pd
def load():
# LOAD POWER
power = pd.read_csv('./datasets/gen_and_use7.csv', parse_dates=['M_DATE', 'M_DATE_DAY'])
power = power.drop(power[power.E_USE_FACT == 0].index).set_index('M_DATE')
power['DAY'] = power.index.year * 1000 + power.index.dayofyear
power['M_DATE'] = power.index
power = power.drop(columns=['TEMP'])
# LOAD TEMPERATURE
temp = pd.read_csv('./datasets/temperature.csv', parse_dates=[0])
# temp = temp.drop_duplicates()
temp['DAY'] = temp.M_DATE.dt.year * 1000 + temp.M_DATE.dt.dayofyear
temp = temp[temp.M_DATE <= power.M_DATE.max()]
# temp = temp.set_index('M_DATE')
power = pd.merge(power, temp[['DAY', 'TEMP']], how='left', on='DAY').set_index('M_DATE')
power['year'] = power.index.year
power['month'] = power.index.month
return power
def load_invest():
invest = pd.DataFrame({ \
'date': ['2009-06-01', '2010-06-01', '2011-06-01', '2012-06-01', '2013-06-01', '2014-06-01', '2015-06-01',
'2016-06-01'],
'money': [2332.1, 3057.9, 12035.7, 5924.9, 44427.5, 51795.5, 4574.9, 8843.8]
})
invest['date'] = pd.to_datetime(invest['date'])
invest = invest.set_index('date')
invest['year'] = invest.index.year
invest['month'] = invest.index.month
return invest
def load_buildings():
f = open('./datasets/house.txt', 'r')
data = f.readline()
f.close()
dates = []
for y in range(2015, 2021):
for m in range(1, 13):
dates.append(f'{y}-{m}-01')
dates = dates[:-2]
houses = pd.DataFrame({
'square': list(map(float, data.split(','))),
'date': dates
})
houses['date'] = pd.to_datetime(houses['date'])
houses = houses.set_index('date')
f = open('./datasets/apart.txt', 'r')
data = f.readline()
f.close()
dates = []
for y in range(2015, 2021):
for m in range(1, 13):
dates.append(f'{y}-{m}-01')
dates = dates[2:-2]
apart = pd.DataFrame({
'square': list(map(float, data.split(','))),
'date': dates
})
apart['date'] = pd.to_datetime(apart['date'])
apart = apart.set_index('date')
apart['sum'] = apart['square'] + houses['square']
apart['year'] = apart.index.year
apart['month'] = apart.index.month
return apart
| true |
b99a12ba7021422b2e46fb5549edc280d7c959e9 | Python | metsi/metsi.github.io | /examples/kod5.py | UTF-8 | 4,041 | 2.59375 | 3 | [
"MIT"
] | permissive | # ------------------------------------------------------------------------
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
clfs = {
'GNB': GaussianNB(),
'SVM': SVC(),
'kNN': KNeighborsClassifier(),
'CART': DecisionTreeClassifier(random_state=1234),
}
# ------------------------------------------------------------------------
datasets = ['australian', 'balance', 'breastcan', 'cryotherapy', 'diabetes',
'digit', 'ecoli4', 'german', 'glass2', 'heart', 'ionosphere',
'liver', 'monkthree', 'shuttle-c0-vs-c4', 'sonar', 'soybean',
'vowel0', 'waveform', 'wisconsin', 'yeast3']
# ------------------------------------------------------------------------
import numpy as np
from sklearn.model_selection import RepeatedStratifiedKFold
n_datasets = len(datasets)
n_splits = 5
n_repeats = 2
rskf = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats, random_state=1234)
scores = np.zeros((len(clfs), n_datasets, n_splits * n_repeats))
# ------------------------------------------------------------------------
from sklearn.base import clone
from sklearn.metrics import accuracy_score
for data_id, dataset in enumerate(datasets):
dataset = np.genfromtxt("datasets/%s.csv" % (dataset), delimiter=",")
X = dataset[:, :-1]
y = dataset[:, -1].astype(int)
for fold_id, (train, test) in enumerate(rskf.split(X, y)):
for clf_id, clf_name in enumerate(clfs):
clf = clone(clfs[clf_name])
clf.fit(X[train], y[train])
y_pred = clf.predict(X[test])
scores[clf_id, data_id, fold_id] = accuracy_score(y[test], y_pred)
np.save('results', scores)
# ------------------------------------------------------------------------
scores = np.load('results.npy')
print("\nScores:\n", scores.shape)
# ------------------------------------------------------------------------
mean_scores = np.mean(scores, axis=2).T
print("\nMean scores:\n", mean_scores)
# ------------------------------------------------------------------------
from scipy.stats import rankdata
ranks = []
for ms in mean_scores:
ranks.append(rankdata(ms).tolist())
ranks = np.array(ranks)
print("\nRanks:\n", ranks)
# ------------------------------------------------------------------------
mean_ranks = np.mean(ranks, axis=0)
print("\nModels: ", list(clfs.keys()))
print("Mean ranks: ", mean_ranks)
# ------------------------------------------------------------------------
from scipy.stats import ranksums
alfa = .05
w_statistic = np.zeros((len(clfs), len(clfs)))
p_value = np.zeros((len(clfs), len(clfs)))
ranks = ranks.T
for i in range(len(clfs)):
for j in range(len(clfs)):
w_statistic[i, j], p_value[i, j] = ranksums(ranks[i], ranks[j])
# ------------------------------------------------------------------------
from tabulate import tabulate
headers = list(clfs.keys())
names_column = np.expand_dims(np.array(list(clfs.keys())), axis=1)
w_statistic_table = np.concatenate((names_column, w_statistic), axis=1)
w_statistic_table = tabulate(w_statistic_table, headers, floatfmt=".2f")
p_value_table = np.concatenate((names_column, p_value), axis=1)
p_value_table = tabulate(p_value_table, headers, floatfmt=".2f")
print("\nw-statistic:\n", w_statistic_table, "\n\np-value:\n", p_value_table)
# ------------------------------------------------------------------------
advantage = np.zeros((len(clfs), len(clfs)))
advantage[w_statistic > 0] = 1
advantage_table = tabulate(np.concatenate(
(names_column, advantage), axis=1), headers)
print("\nAdvantage:\n", advantage_table)
# ------------------------------------------------------------------------
significance = np.zeros((len(clfs), len(clfs)))
significance[p_value <= alfa] = 1
significance_table = tabulate(np.concatenate(
(names_column, significance), axis=1), headers)
print("\nStatistical significance (alpha = 0.05):\n", significance_table)
| true |
46d8207a84050087d0d0dec33c29cdbfa408e00b | Python | akash988/comprehension | /comprehensionmethod.py | UTF-8 | 666 | 4.15625 | 4 | [] | no_license | list=[]
a=int(input("HOW MANY ELEMENT YOU WANT TO PRINT IN THIS LIST\n"))
for i in range(a):
b=int(input(f"Enter your element you want to add in this list{i}\n"))
list.append(b)
print(list)
print("Enter your choice 1.list comprehension 2.set comprehension 3.dictionary comprehension")
ch=int(input(" "))
if ch==1:
l1=[x for x in list]
print(l1)
elif ch==2:
l2={y for y in list}
print(l2)
elif ch==3:
l3={z:f"item {z+1}"for z in list}
print(l3)
print("Enter choice q quit and c continue\n")
ch1=" "
while(ch1!="q" and ch1!="c"):
ch1==input( )
if ch1=="q":
quit()
elif ch1=="c":
continue
| true |
a3b41dd7d8cb2069437739d782d4e4f5e0ca784d | Python | serapred/academy | /ffi/goldbach.py | UTF-8 | 2,026 | 4.3125 | 4 | [] | no_license |
"""
German mathematician Christian Goldbach (1690-1764) conjectured that every even number greater than 2 can be represented by the sum of two prime numbers. For example, 10 can be represented as 3+7 or 5+5.
Your job is to make the function return a list containing all unique possible representations of n in an increasing order if n is an even integer; if n is odd, return an empty list. Hence, the first addend must always be less than or equal to the second to avoid duplicates.
Constraints: 2 < n < 32000 and n is even
Examples
26 --> ['3+23', '7+19', '13+13']
100 --> ['3+97', '11+89', '17+83', '29+71', '41+59', '47+53']
7--> []
"""
def primality(n):
# naive
# base case
if n <= 3:
return n > 1
for i in range(2, n):
if n % i == 0:
return False
return True
def primality(n):
# sqrt optimization
if n <= 3:
return n > 1
for i in range(2, int(n**.5) + 1):
if n % i == 0:
return False
return True
def primality(n):
# while optimization
# base case
if n <= 3:
return n > 1
i = 2
while i**2 <= n:
if n % i == 0:
return False
i += 1
return True
def primality(n):
# 6k +/- 1 optimization
# base case
if n <= 3:
return n > 1
if n % 2 == 0 or n % 3 == 0:
return False
i = 5 # (1-4 excluded)
# i**2 to cicle up until sqrt(n)
while i**2 <= n:
# 6k - 1 or 6k + 1
if n % i == 0 or n % (i + 2) == 0:
return False
i += 6
return True
def primes(n):
for i in range(2, n):
if primality(i):
yield i
def goldbach(n):
p, seen = set(primes(n)), set()
for i in p:
anti = n - i
if anti in p and anti not in seen:
seen.add(anti)
yield i, anti
if __name__ == '__main__':
num = int(input('insert even number: ')) or 0
if num % 2:
print('Goodbye.'), exit()
print([f'{x}+{y}' for x, y in goldbach(num)])
| true |
d4a712dee7d351e9b641bf97e212b964f4b2b116 | Python | PatrickWilke/ANN_RL | /FourInARow.py | UTF-8 | 3,384 | 3.125 | 3 | [] | no_license | import numpy as np
import ANN
class FourInARowBoard:
def __init__(self):
self.board = np.zeros((2, 6, 7), dtype=bool)
self.hights = np.zeros((7), dtype=int)
self.next_move = 0
self.number_of_moves = 0
def SiteIsOccupied(self, row, column):
return self.board[0][row][column] or self.board[1][row][column]
def ColumnIsFull(self, column):
return self.hights[column] > 5
def WinningMove(self, column, row):
if self.hights[column] > 3:
if np.all(self.board[self.next_move,self.hights[column]-4:self.hights[column],column]):
return True
for possible_directions in [self.board[self.next_move,row],np.diagonal(self.board[self.next_move],offset=column-row),
np.diagonal(self.board[self.next_move,:, ::-1],offset=6-column-row)]:
count = 0
for element in possible_directions:
if element:
count += 1
if count == 4:
return True
else:
count = 0
return False
def MakeMove(self,column):
self.number_of_moves += 1
self.board[self.next_move, self.hights[column], column] = True
self.hights[column] += 1
if self.WinningMove(column, self.hights[column]-1):
return True
else:
self.next_move = 1 - self.next_move
return False
def __str__(self):
state = "-----------------------------\n"
for i in reversed(range(0,6)):
state += "|"
for j in range(0, 7):
if self.board[0, i, j]:
state += " X |"
elif self.board[1, i, j]:
state += " O |"
else:
state += " |"
state += "\n-----------------------------\n"
return state
def ResetGame(self):
self.board = np.zeros((2, 6, 7), dtype=bool)
self.hights = np.zeros((7), dtype=int)
self.next_move = 0
self.number_of_moves = 0
def GameEnded(self):
return self.number_of_moves == 42
def GetNextMove(self):
return self.next_move
def GetState(self):
return self.board.astype(float).flatten()
def GetColumnHight(self, column):
return self.hights[column]
class LearningFourInARow(FourInARowBoard):
winning_action_reward = 1.0
loosing_action_reward = -0.5
neutral_action_reward = 0.0
prohibited_action_reward = -1.0
def MakeMoveWithReward(self, action):
if self.ColumnIsFull(action):
return self.prohibited_action_reward, True
elif self.MakeMove(action):
return self.winning_action_reward, True
else:
if self.GameEnded():
return self.neutral_action_reward, True
return self.neutral_action_reward, False
store_path = "FourInARowNewTraining"
FourInARowANN = ANN.TrainingNetwork(84, 7, 0.05, [50, 50, 50])
if __name__ == '__main__':
board = LearningFourInARow()
FourInARowANN.Training_Episodic_Single_Matches_Reverse(board,store_path,10000, FourInARowANN.Q_Learning_Episodic_Single_Game)
#FourInARowANN.Training_Episodic_Decorrelated_Batches(board,store_path,50, 50, 5, FourInARowANN.Q_Learning_Episodic)
| true |
7feaca4a15326d6d61827278548da3b33fb8d9f3 | Python | TwilioDevEd/automated-survey-flask | /tests/parsers_tests.py | UTF-8 | 1,458 | 3.109375 | 3 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | import unittest
from automated_survey_flask import parsers
from automated_survey_flask.models import Question
class ParserTests(unittest.TestCase):
def setUp(self):
self.survey_as_json = '{"title": "TDD Survey", "questions": [\
{"body":"What is your name?", "type":"text"},\
{"body":"What is your age?", "type":"numeric"},\
{"body":"Do you know Python?", "type":"boolean"}]}'
def test_parse_survey(self):
survey = parsers.survey_from_json(self.survey_as_json)
self.assertEquals("TDD Survey", survey.title)
def test_survey_includes_questions(self):
survey = parsers.survey_from_json(self.survey_as_json)
self.assertEquals(3, survey.questions.count())
def test_parse_question_title(self):
questions = parsers.questions_from_json(self.survey_as_json)
self.assertEquals("What is your name?", questions[0].content)
def test_parse_text_question(self):
questions = parsers.questions_from_json(self.survey_as_json)
self.assertEquals(Question.TEXT, questions[0].kind)
def test_parse_numeric_question(self):
questions = parsers.questions_from_json(self.survey_as_json)
self.assertEquals(Question.NUMERIC, questions[1].kind)
def test_parse_boolean_questions(self):
questions = parsers.questions_from_json(self.survey_as_json)
self.assertEquals(Question.BOOLEAN, questions[2].kind)
| true |
eefdec737e292792fc7480b8545e4e9ccb50a605 | Python | pythonistic/FixMp3 | /FixMp3.py | UTF-8 | 3,249 | 2.578125 | 3 | [] | no_license | #!/usr/bin/env python
import eyeD3
import os
import os.path
import shutil
def sanitize(s):
if s == None or len(s) == 0:
s = "Unknown"
s = s.strip()
s = s.replace('/', '_')
s = s.replace('"', '_')
s = s.replace("'", '_')
s = s.replace('*', '_')
s = s.replace('&', '_')
s = s.replace(' ', '_')
s = s.replace('__', '_')
s = s.replace('__', '_')
s = s.replace('__', '_')
s = s.replace('.', '')
if s.startswith('The_') and not s == 'The_The':
s = s[4:]
return s
def get_tag(filename):
tag.link(filename, eyeD3.ID3_V2)
artist = None
album = None
title = None
artist1 = None
album1 = None
title1 = None
try:
artist = tag.getArtist()
album = tag.getAlbum()
title = tag.getTitle()
(disc, setSize) = tag.getDiscNum()
(track, discSize) = tag.getTrackNum()
except:
pass
try:
tag.link(filename, eyeD3.ID3_V1)
artist1 = tag.getArtist()
album1 = tag.getAlbum()
title1 = tag.getTitle()
(disc1, setSize) = tag.getDiscNum()
(track1, setSize) = tag.getTrackNum()
except:
pass
if artist1 != None and (artist == None or artist.strip() == ''):
artist = artist1
if album1 != None and (album == None or album.strip() == ''):
album = album1
if title1 != None and (title == None or title.strip() == ''):
title = title1
artist = sanitize(artist)
album = sanitize(album)
title = sanitize(title)
return (artist, album, title, track, disc)
tag = eyeD3.Tag()
inPath = "iPodMusic"
outPath = "MusicFixed"
maximumFilenameLength = 31
i = 0
if not os.path.exists(outPath):
os.mkdir(outPath)
# build a list of files
for root, dirs, files in os.walk(inPath):
for name in files:
filename = os.path.join(root, name)
if filename.endswith(".mp3"):
(artist, album, title, track, disc) = get_tag(filename)
# make the destination path
if len(artist) > maximumFilenameLength:
artist = artist[:maximumFilenameLength]
if len(album) > maximumFilenameLength:
album = album[:maximumFilenameLength]
outTitle = title
if track != None:
outTitle = str(track) + "-" + outTitle
if disc != None:
outTitle = str(disc) + "-" + outTitle
if outTitle > maximumFilenameLength - 4:
outTitle = outTitle[:maximumFilenameLength - 4]
destPath = os.path.join(outPath, artist)
if not os.path.exists(destPath):
os.mkdir(destPath)
destPath = os.path.join(destPath, album)
if not os.path.exists(destPath):
os.mkdir(destPath)
destPath = os.path.join(destPath, outTitle)
origDestPath = destPath
incr = 0
while os.path.exists(destPath + ".mp3"):
destPath = origDestPath + str(incr)
incr += 1
destPath += ".mp3"
shutil.copyfile(filename, destPath)
i += 1
else:
print filename
print "Processed ", i, " MP3 files"
| true |
94a6586b3f726d8070343c57ab198f2250f18dc5 | Python | GLAMOS/dataflow | /dataflow/DataReaders/VawFileReaders/VolumeChangeReader.py | UTF-8 | 7,423 | 2.8125 | 3 | [
"MIT"
] | permissive | '''
Created on 11.07.2018
@author: yvo
'''
import re
from dataflow.DataReaders.VawFileReaders.VawFileReader import VawFileReader
from dataflow.DataObjects.Exceptions.GlacierNotFoundError import GlacierNotFoundError
from dataflow.DataObjects.VolumeChange import VolumeChange
from dataflow.DataReaders.Exceptions.InvalidDataFileError import InvalidDataFileError
from dataflow.DataObjects.Enumerations.HeightCaptureMethodEnumeration import HeightCaptureMethodEnum
from dataflow.DataObjects.Enumerations.VolumeChangeEnumerations import AnalysisMethodEnum
from dataflow.DataObjects.Enumerations.DateEnumerations import DateQualityTypeEnum
class VolumeChangeReader(VawFileReader):
'''
Reader-class for parsing the VAW-ASCII-based volume change data files.
The header of the files follows the syntax:
---
# Glacier state and evolution data; <glacier name>; <VAW glacier id>
# name; date; volume; area; length; h_max; h_min; dV; dh_mean; dl
#
---
'''
# Additional header definition.
# Number of header lines.
__NUMBER_HEADER_LINES = 3
# Definition of the columns in the mass balance ASCII files (0-based index).
__FILE_COLUMN_DATE = 1
__FILE_COLUMN_AREA = 3
__FILE_COLUMN_ELEVATION_MAXIMUM = 5
__FILE_COLUMN_ELEVATION_MINIMUM = 6
__FILE_COLUMN_VOLUME_CHANGE = 7
__FILE_COLUMN_HEIGHT_CHANGE_MEAN = 8
# Secondary / Meta information retrieved from the primary data.
__FILE_COLUMN_DATE_QUALITY = 21
def __init__(self, config, fullFileName, glaciers):
'''
Constructor
@type config: configparser.ConfigParser
@param config: Configuration of the dataflow.
@type fullFileName: string
@param fullFileName: Absolute file path.
@type glaciers: Dictionary
@param glaciers: Dictionary with glaciers.
@raise GlacierNotFoundError: Exception in case of not a corresponding glacier was found.
@raise InvalidDataFileError: Exception in case of an invalid data file.
'''
# Setting the parameters of the data file.
self._numberHeaderLines = self.__NUMBER_HEADER_LINES
# Check if the given file is a correct volume change file.
searchResult = re.search(config.get("VolumeChange", "volumeChangePatternFilename"), fullFileName)
if searchResult == None:
message = "The file {0} is not a volume change data file.".format(fullFileName)
raise InvalidDataFileError(message)
# TODO: Additional test for file check to be included. If possible, implementation in a generic way in super-class VawFileReader.
try:
super().__init__(fullFileName, glaciers)
except GlacierNotFoundError as glacierNotFoundError:
raise glacierNotFoundError
def __str__(self):
pass
def parse(self):
with open(self._fullFileName, "r") as vc:
lineCounter = 0
self._numberDataLines = 0
dataLines = []
for line in vc:
lineCounter += 1
try:
if lineCounter > self.__NUMBER_HEADER_LINES:
data = self._getData(line)
if len(data) > 0:
self._numberDataLines += 1
# Intermediate storage of the parsed data for the later instances of volume changes.
dataLines.append(data)
except Exception as e:
errorMessage = "{0} @ {1}: {2}".format(vc, lineCounter, e)
print(errorMessage)
# Getting the individual volume change readings ready.
# An individual volume change reading consists of two data lines: i = reference, i + 1 = observation.
referenceReadingIndex = 0
changeReadingIndex = 1
for i in range(referenceReadingIndex, len(dataLines) - 1):
referenceReadingData = dataLines[referenceReadingIndex]
volumeChangeReadingData = dataLines[changeReadingIndex]
# Creating a new volume change object based on the reference and the observation data.
volumeChange = VolumeChange(
None,
referenceReadingData[self.__FILE_COLUMN_DATE], referenceReadingData[self.__FILE_COLUMN_DATE_QUALITY],
volumeChangeReadingData[self.__FILE_COLUMN_DATE], volumeChangeReadingData[self.__FILE_COLUMN_DATE_QUALITY],
referenceReadingData[self.__FILE_COLUMN_AREA], volumeChangeReadingData[self.__FILE_COLUMN_AREA],
HeightCaptureMethodEnum.NotDefinedUnknown, HeightCaptureMethodEnum.NotDefinedUnknown,
AnalysisMethodEnum.NotDefinedUnknown,
referenceReadingData[self.__FILE_COLUMN_ELEVATION_MAXIMUM], referenceReadingData[self.__FILE_COLUMN_ELEVATION_MINIMUM],
volumeChangeReadingData[self.__FILE_COLUMN_ELEVATION_MAXIMUM], volumeChangeReadingData[self.__FILE_COLUMN_ELEVATION_MINIMUM],
volumeChangeReadingData[self.__FILE_COLUMN_VOLUME_CHANGE],
volumeChangeReadingData[self.__FILE_COLUMN_HEIGHT_CHANGE_MEAN])
self._glacier.addVolumeChange(volumeChange)
referenceReadingIndex += 1
changeReadingIndex += 1
def _getData(self, dataLine):
# TODO: Description
# Dictionary with the unique values per volume change data line.
data = dict()
p = re.compile(' +')
dataLineParts = p.split(dataLine)
# Getting the data columns into the dictionary.
# Getting the date out of the data. Because every fucking file of VAW has it own format for date, an additional hack is needed (replace the - sign)
referenceDateInformation = self._reformateDateYyyyMmDd(dataLineParts[self.__FILE_COLUMN_DATE].strip().replace("-", ""))
# Getting the date object
referenceDate = referenceDateInformation[0]
# Getting the quality index of the reference date
referenceDateQuality = DateQualityTypeEnum(referenceDateInformation[1])
# Filling up of the parsed data to piped into an object.
data[self.__FILE_COLUMN_DATE] = referenceDate
data[self.__FILE_COLUMN_DATE_QUALITY] = referenceDateQuality
data[self.__FILE_COLUMN_AREA] = float(dataLineParts[self.__FILE_COLUMN_AREA].strip())
data[self.__FILE_COLUMN_ELEVATION_MAXIMUM] = float(dataLineParts[self.__FILE_COLUMN_ELEVATION_MAXIMUM].strip())
data[self.__FILE_COLUMN_ELEVATION_MINIMUM] = float(dataLineParts[self.__FILE_COLUMN_ELEVATION_MINIMUM].strip())
data[self.__FILE_COLUMN_VOLUME_CHANGE] = float(dataLineParts[self.__FILE_COLUMN_VOLUME_CHANGE].strip())
data[self.__FILE_COLUMN_HEIGHT_CHANGE_MEAN] = float(dataLineParts[self.__FILE_COLUMN_HEIGHT_CHANGE_MEAN].strip())
return data | true |
c06677c98c60b73a01bb9b4f71c978e45dddf31f | Python | Mortal/inlining | /example.py | UTF-8 | 544 | 2.859375 | 3 | [] | no_license | def foo(a, b, c, *d, **k):
dict(**k)
print(*d)
if c:
print(a, b)
if not c:
print(b, a)
if True:
print("if True")
if False:
print("if False")
elif a:
print("elif a")
if False:
print("if False")
elif True:
print("elif True")
else:
print("else after elif True")
if False:
print("if False")
else:
print("else")
return a + b
def bar(x):
r = 0
if x:
r = foo(x, 2, False, 10, 20, foo=42)
return r
| true |
616d59d1499ee0db79ea672bdcee367ecc975c47 | Python | miglesias91/dicenlosmedios | /test/test_eldestape.py | UTF-8 | 591 | 2.53125 | 3 | [
"MIT"
] | permissive | import unittest
import newspaper as np
from medios.diarios.eldestape import ElDestape
class TestElDestape(unittest.TestCase):
def test_entradas_feed(self):
ed = ElDestape()
url_fecha_titulo_categoria = ed.entradas_feed()
return len(url_fecha_titulo_categoria) == 50
def test_parsear_noticia(self):
ed = ElDestape()
texto = ed.parsear_noticia(url="https://www.eldestapeweb.com/nota/encuestas-2019-encuesta-muestra-que-la-formula-fernandez-fernandez-se-impone-a-macri-pichetto-en-la-provincia-de-buenos-aires-201972317240")
return 1 | true |
04a58c0b63de090fd6337c89cd9a72260517a7b0 | Python | dlfosterii/python103-small | /print_a_square.py | UTF-8 | 219 | 4.15625 | 4 | [] | no_license | #Print a 5x5 square of * characters
#setup
side = 5
y = 0
#Code to make it work
while(y < side):
x = 0
while(x < side):
x = x + 1
print('*', end = ' ')
y = y + 1
print('')
#end | true |
0242c721ba9be4956f132e65f02269181f7035b8 | Python | qbitkit/qbitkit | /qbitkit/provider/leap/provider.py | UTF-8 | 1,926 | 2.625 | 3 | [
"Apache-2.0"
] | permissive | from dwave.system import DWaveSampler as __DWSampler__
from dwave.system import DWaveCliqueSampler as __DWCSampler__
from dwave.system import LeapHybridSampler as __LHSampler__
from dwave.system import LeapHybridDQMSampler as __LHDQMSampler__
class Annealing:
def get_sampler(self=str('DWaveSampler'),
retry_interval=int(-1)):
"""Create a new D-Wave Sampler based on a specified sampler type.
Args:
self(str): D-Wave Sampler as a string. Can be 'DWaveSampler', 'DWaveCliqueSampler', 'LeapHybridSampler', or 'LeapHybridDQMSampler.' (default str('DWaveSampler'))
retry_interval(int): Interval in seconds to retry, or -1 to propogate SolverNotFound error to user (default int(-1))
Returns:
dimod.meta.SamplerABCMeta: A D-Wave Ocean SDK Plugin Sampler"""
# Initialize variable 'new_sampler'
new_sampler = None
# Check if requested sampler is DWaveSampler
if self == 'DWaveSampler':
# Create a DWaveSampler
new_sampler = __DWSampler__(retry_interval=retry_interval)
# Check if requested samlper is DWaveCliqueSampler
elif self == 'DWaveCliqueSampler':
# Create a DWaveCliqueSampler
new_sampler = __DWCSampler__(retry_interval=retry_interval)
# Check if requested sampler is LeapHybridSampler
elif self == 'LeapHybridSampler':
# Create a LeapHybridSampler
new_sampler = __LHSampler__(retry_interval=retry_interval)
elif self == 'LeapHybridDQMSampler':
# Create a LeapHybridDQMSampler
new_sampler = __LHDQMSampler__(retry_interval=retry_interval)
# Check if sampler does not meet any of the above conditions
else:
# Set sampler to return as None
new_sampler = None
# Return the sampler we just created
return new_sampler
| true |
32e1fe7df484dd37a4ddd7dc80d427b3f789ad41 | Python | maugryn/RollDice | /RollDice.py | UTF-8 | 252 | 2.703125 | 3 | [
"BSD-3-Clause"
] | permissive | #!-*- conding: utf8 -*-
import random
from pip._vendor.distlib.compat import raw_input
while True:
for x in range(1):
print(random.randint(1, 6))
resp = raw_input("Voce quer jogar novamente? s/n ")
if resp == 'n':
break
| true |
bf8944b72ab3ff0f93584bc907457977fa553c1e | Python | tremaru/pyiArduinoI2Cexpander | /pyiArduinoI2Cexpander/examples/reset.py | UTF-8 | 2,199 | 3.203125 | 3 | [
"MIT"
] | permissive | # Данный пример демонстрирует программную перезагрузку модуля.
# $ Строки со знаком $ являются необязательными.
from pyiArduinoI2Cexpander import * # Подключаем модуль для работы с расширителем выводов.
from time import sleep # Импортируем функцию ожидания из модуля времени
ext = pyiArduinoI2Cexpander(0x08) # Объявляем объект ext для работы с функциями модуля pyiArduinoI2Cexpander, указывая адрес модуля на шине I2C.
#
ext.pinMode(3, OUTPUT, ANALOG) # $ Конфигурируем вывод 3 на работу в качестве аналогового выхода.
ext.analogWrite(3, 2047) # Устанавливаем на 3 выводе уровень равный половине от максимума.
sleep(2) # Ждём две секунды.
ext.reset() # Перезагружаем модуль.
#
# ПРИМЕЧАНИЕ:
# Для проверки работы скетча подключите светодиод к 3 выводу.
# Сконфигурировав вывод №3 в режим аналогового выхода и подав на
# него аналоговый уровень, светодиод включится. Но через две сек.
# после его включения произойдёт перезагрузка модуля и все выводы
# переконфигурируются в значение по умолчанию (цифровые входы),
# что приведёт к исчезновению аналогового уровня с вывода №3 и,
# как следствие, отключению светодиода.
| true |
5b66ed95304dd717e850d3d3df784422430dfca5 | Python | leaxpm/piaPC | /autoNmap.py | UTF-8 | 1,025 | 2.71875 | 3 | [] | no_license | import logging
try:
import nmap
except:
logging.error("Falta la libreria nmap \n pip install python-nmap")
import requests
import socket
def publicIP():
"""
**PublicIP**
This Module find the public IP from the User
"""
req = requests.get("http://ifconfig.me")
return req.text
def scanner(ip):
"""
**Scanner**
This Module Scan a given IP for ports 1-1000
"""
scanner = nmap.PortScanner()
out = scanner.scan(ip,"1/1000")
return out
def privIp(): #Github Phanthaihuan
"""
**PrivIP**
This Module find the private IP from the User
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("google.com",80))
out = (s.getsockname()[0])
s.close()
return out
def exec(scan):
if scan == "private":
segmento = ".".join(privIp().split('.')[0:3])+".0/24"
return scanner(segmento)
elif scan == "public":
return scanner(publicIP())
| true |
af6b2f72b14a7038804b97ceb5d0bea58dea01d9 | Python | benkeanna/pyladies | /08/ukol6moje.py | UTF-8 | 287 | 2.78125 | 3 | [] | no_license | zvirata = ['pes', 'kočka', 'králík','had', 'andulka']
nova_zvirata = []
for zvire in zvirata:
prvek = zvire[1:], zvire
nova_zvirata.append(prvek)
print(nova_zvirata)
nova_zvirata.sort()
zvirata = []
for klic, zvire in nova_zvirata:
zvirata.append(zvire)
print(zvirata)
| true |
cff4a94d7f50472ab68699e5e8654733546d28b6 | Python | tamirez3dco/ThingsMaker | /quest/explorer/explore/test.py | UTF-8 | 375 | 2.65625 | 3 | [] | no_license | def all_params_perms(params):
if len(params) <=1:
return map(lambda x: [x], params[0])
else:
all_perms = []
for v in params[0]:
for perm in all_params_perms(params[1:]):
np = [v] + perm
all_perms.append(np)
return all_perms
pp = all_params_perms([[0,0.5,1],[0,0.2,0.4,0.6,0.8,1]])
print pp | true |
e149c25bb220b072370b4dcbf260acb1c3eafbd3 | Python | d-nalbandian0329/Python | /gomi11.py | UTF-8 | 556 | 3 | 3 | [] | no_license | #! /usr/bin/python
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
xmin, xmax = -np.pi, np.pi
x = np.arange(xmin, xmax, 0.1)
y_sin = np.sin(x)
y_cos = np.cos(x)
# sin plot
plt.subplot(2, 1, 1)
plt.plot(x, y_sin)
plt.title("$\sin x$")
plt.xlim(xmin, xmax)
plt.ylim(-1.3, 1.3)
# cos plot
plt.subplot(2, 1, 2)
plt.plot(x, y_cos)
plt.title("$\cos x$")
plt.xlim(xmin, xmax)
plt.ylim(-1.3, 1.3)
# Avoid to duplicate graphtitle
plt.tight_layout()
plt.show()
plt.savefig("/Users/iwaitoshiya/Desktop/graph.png")
| true |
f3f51cab352a7207a68b6c7b17ececf7ad8a7c04 | Python | Awawdi/IDvalidator | /IDvalidator.py | UTF-8 | 2,166 | 3.78125 | 4 | [] | no_license | MAX_NUMBER = 999999999
class IDIterator:
def __init__(self, id):
self._list_of_ids = []
self._starting_point = id
self._pointer = -1
self._id = int(id)
# self._id = '{:09d}'.format(id)
#self._id = int(str(id).zfill(9))
# for i in range(999999999):
# self._list_of_ids.append('{:09d}'.format(i))
def __iter__(self):
return self
def __next__(self):
print("next correct ID number:")
while self._id < MAX_NUMBER:
self._id += 1
if validity_check(self._id):
return self.get_arg()
def get_arg(self):
return self._id
class Not9DigitNumber(Exception):
def __init__(self, arg):
self._arg = arg
def __str__(self):
return "ID needs to be 9 digits exactly: %s" % self._arg
def get_arg(self):
return self._arg
class NotADigitInput(Exception):
def __init__(self, arg):
self._arg = arg
def __str__(self):
return "ID needs to be digits only: %s" % self._arg
def get_arg(self):
return self._arg
class WrongIDNumber(Exception):
def __init__(self, arg):
self._arg = arg
def __str__(self):
return "wrong ID number: %s" % self._arg
def get_arg(self):
return self._arg
def validity_check(id_number):
list_of_IDs = list(map(int, str(id_number)))
list_of_IDs[1::2] = map(lambda x: x * 2, list_of_IDs[1::2])
list_of_IDs = map(lambda x: (x % 10 + x // 10), list_of_IDs)
num1 = sum(list_of_IDs)
if num1 % 10 != 0:
return False
else:
return True
def main():
id_number = "066722422" #ID number to start check
if not id_number.isdigit():
raise NotADigitInput(id_number)
if not len(id_number) == 9:
raise Not9DigitNumber(id_number)
if not validity_check(id_number):
raise WrongIDNumber(id_number)
else:
IDiterable = IDIterator(id_number)
for itr in range(10):
print(str(next(IDiterable)).zfill(9))
main()
| true |
2e2bfdb98ff5d551a0320eb9c22dbeb34d9afb99 | Python | losnikitos/coursera | /course3/week4/bellman.py | UTF-8 | 2,173 | 3.453125 | 3 | [] | no_license | # python 3
inf = float("inf")
def read_graph():
n_vertices, n_edges = map(int, input().split())
edges = [map(int, input().split()) for _ in range(n_edges)]
return Graph(n_vertices, edges)
class Graph:
def __init__(self, n_vertices=0, edges=[]):
self.vertices = set(range(n_vertices))
self.edges = [set() for v in self.vertices]
for (a, b, w) in edges:
self.edges[a - 1].add((b - 1, w))
def visit(self, u, visited=set(), path=set()):
if u in visited:
return False
visited.add(u)
path.add(u)
for v in self.edges[u]:
if v in path or self.visit(v):
return True
path.remove(u)
return False
def has_cycles(self, fr, dist):
dist[fr] = 0
for i in range(len(self.vertices) - 1):
changed = False
for u in self.vertices:
for (v, w) in self.edges[u]:
if dist[v] > dist[u] + w:
changed = True
dist[v] = dist[u] + w
if not changed:
return False
changed = False
for u in self.vertices:
for (v, w) in self.edges[u]:
if dist[v] > dist[u] + w:
changed = True
dist[v] = dist[u] + w
return changed
def has_negative_weight_cycles(self):
dist = [inf for v in self.vertices]
for v in self.vertices:
if dist[v] == inf:
if self.has_cycles(v, dist):
return True
return False
def __str__(self):
return '\n'.join(['%s: %s' % (v, self.edges[v]) for v in self.vertices])
# TEST
def test(g, answer):
res = g.has_negative_weight_cycles()
print('ok' if res == answer else 'wrong: expected %s instead of %s' % (answer, res))
# test(Graph(4, [(1, 2, -5), (4, 1, 2), (2, 3, 2), (3, 1, 1)]), True)
# test(Graph(4, [(1, 2, 5), (4, 1, 2), (2, 3, 2), (3, 1, 1)]), False)
# test(Graph(4, [(2, 3, -1), (3, 4, -1), (4, 2, -1)]), True)
# RUN
g = read_graph()
print(1 if g.has_negative_weight_cycles() else 0)
| true |
15d8b5cad319a16d72598937bcb3cc02632abd3c | Python | lsjsss/PythonClass | /PythonBookAdditional/第02章 Python序列/code/Stack.py | UTF-8 | 1,670 | 3.75 | 4 | [
"MIT"
] | permissive | '''
Author: Dong Fuguo
QQ: 306467355
Wmail: dongfuguo2005@126.com
Date: 2014-11-10, Updated on 2015-12-13
'''
class Stack:
def __init__(self, size = 10):
self._content = [] #使用列表存放栈的元素
self._size = size #初始栈大小
self._current = 0 #栈中元素个数初始化为0
def empty(self):
self._content = []
self._current = 0
def isEmpty(self):
if not self._content:
return True
else:
return False
def setSize(self, size):
#如果缩小栈空间,则删除指定大小之后的已有元素
if size < self._current:
for i in range(size, self._current)[::-1]:
del self._content[i]
self._current = size
self._size = size
def isFull(self):
if self._current == self._size:
return True
else:
return False
def push(self, v):
if len(self._content) < self._size:
self._content.append(v)
self._current = self._current+1 #栈中元素个数加1
else:
print('Stack Full!')
def pop(self):
if self._content:
self._current = self._current-1 #栈中元素个数减1
return self._content.pop()
else:
print('Stack is empty!')
def show(self):
print(self._content)
def showRemainderSpace(self):
print('Stack can still PUSH ', self._size-self._current, ' elements.')
if __name__ == '__main__':
print('Please use me as a module.')
| true |
e65a4776cbbfe0a7d4567bbf4a9f3d8bdef37b3d | Python | LARC-CMU-SMU/fullmoon | /scripts/simulate_occupants.py | UTF-8 | 2,366 | 2.765625 | 3 | [] | no_license | import random
import time
from pprint import pprint
import json
from scripts.m_util import execute_sql_for_dict
OCCUPANCY_SQL= """INSERT INTO occupancy(timestamp, occupancy, cam_label, cubical_label, occupant_coordinates) VALUES (%s, %s, %s, %s, %s)"""
OCCUPANCY_CACHE_SQL = "INSERT INTO occupancy_cache(timestamp, occupancy, cam_label, cubical_label, occupant_coordinates) VALUES (%s, %s, %s, %s, %s)" \
"ON CONFLICT (cam_label,cubical_label) DO UPDATE " \
"SET timestamp = excluded.timestamp, occupancy = excluded.occupancy, occupant_coordinates = excluded.occupant_coordinates;"
cubicles = {
'a': {"x_min": 1139, "y_min": 240, "x_max": 1616, "y_max": 592},
'b': {"x_min": 330, "y_min": 640, "x_max": 1315, "y_max": 1080},
'c': {"x_min": 573, "y_min": 99, "x_max": 954, "y_max": 435},
'd': {"x_min": 123, "y_min": 297, "x_max": 572, "y_max": 637},
}
CAM_LABEL="b"
def get_time():
return(int(time.time()))
def get_random_occupancy():
rand_int = random.randint(0, 5)
if rand_int % 2 == 0:
return True
return False
def get_random_occupants(boundry, max_occupancy=2):
ret = []
for i in range(random.randint(1,max_occupancy)):
x_min, x_max = sorted([random.randint(boundry["x_min"], boundry["x_max"]), random.randint(boundry["x_min"], boundry["x_max"])])
y_min, y_max = sorted([random.randint(boundry["y_min"], boundry["y_max"]), random.randint(boundry["y_min"], boundry["y_max"])])
ret.append({"x_min":x_min, "x_max":x_max, "y_min":y_min, "y_max":y_max})
return json.dumps(ret)
def to_db():
ret = {}
for key, boundry in cubicles.items():
occupancy = get_random_occupancy()
occupancy_coord = None
if occupancy:
occupancy_coord=get_random_occupants(boundry)
ret[key]={"timestamp":get_time(), "label":key, "occupancy":occupancy, "occupant_coordinates":occupancy_coord}
return ret
def insert_to_db(data):
for values in data.values():
print(values)
sql_values=[values["timestamp"], values["occupancy"], CAM_LABEL, values["label"], values["occupant_coordinates"]]
execute_sql_for_dict(OCCUPANCY_SQL, sql_values)
execute_sql_for_dict(OCCUPANCY_CACHE_SQL, sql_values)
occupancy_data = to_db()
pprint(occupancy_data)
insert_to_db(occupancy_data)
| true |
2963282f8427c8cbd078d0b5f1e3a216b33b2d7e | Python | storrealba09/citiy_mean | /puzzle.py | UTF-8 | 536 | 3.203125 | 3 | [] | no_license | #Import Libraries
import pandas as pd
import plotly.express as px
#Format floats on panda
pd.options.display.float_format = '${:,.2f}'.format
#Read and uppercase dataframe
df = pd.read_csv('Data+for+TreefortBnB+Puzzle.csv')
df['City'] = df['City'].str.upper()
#Group Dataframe by City and aggregate the price mean
grouped = df.groupby('City').agg({'$ Price':['mean']}).reset_index()
#Visualization formatting and execution
grouped.columns = ['City', 'Mean']
fig = px.bar(grouped, x = ('City'), y= ('Mean'))
fig.show()
| true |
490fd095d9b0cd83f8412a852e23124e809685c1 | Python | ilkhem/market-report | /volume-index.py | UTF-8 | 14,843 | 2.859375 | 3 | [] | no_license | """
Script for calculating a 'volume-index' equal to the volume normalized by the spread i.e. the volume for a
constant spread of 1USD.
Data to extract from dumps:
from trade dumps: id,exchange,symbol,date,price,amount,sell (14698813,bf,btcusd,1451606422000,429.17,1.6817,false)
- price per minute: price of last trade of this minute
- number of trades per minute
- volume of trades per minute
from ob1 dumps: date,type,price,amount (1451606456000,b,428.98,5.7347)
- spread per minute
Clipping functions: (alpha equals to 2, but can be changed to change the width of the interval)
f1: clip to [0, mean + alpha*std]
f2: clip to [0, mean + alpha*rolling_std]
f3: clip to [0, rolling_mean + alpha*rolling_std]
f4: clip to [0, clean_mean + alpha*rolling_std] (clean_* is for * calculated by excluding the outliers)
f5: clip to [0, clean_mean + alpha*clean_std] (clean_* is for * calculated by excluding the outliers)
-> f5 showed the best performance overall (especially in crisis periods, and when exchanges go dark), it is
the one we will use.
RATES define CURRENCY to USD conversion rate.
@author: ilkhem
"""
import argparse
import glob
import json
import os
import urllib.request
import pandas as pd
import plotly.graph_objs as go
import plotly.plotly as py
from utils import parse_date, parse_month, get_dates, eod_to_ts
__author__ = 'ilkhem'
MISSING_DUMPS = ''
def rates_by_minute(from_ts, to_ts):
"""
returns CNY to USD, USD to USD and EUR to USD rates from the European Central Bank using the api of fixer.io
:param from_ts: starting timestamp
:param to_ts: ending timestamp
:return: df; index: dates with freq=1Min, columns: ['btcusd','btccny','btceur'] (pairs used instead of currencies)
"""
url = 'http://api.fixer.io/%s?%s'
# date range for rates to be used with the api
days = pd.date_range(pd.to_datetime(from_ts, unit='s').floor('D'), pd.to_datetime(to_ts, unit='s').floor('D'),
freq='D')
params = 'base=USD&symbols=CNY,EUR'
# a minute frequency date range to construct a rate by minute df based on the returns of the api,
# and by forward filling
rng = pd.date_range(pd.to_datetime(from_ts, unit='s'), pd.to_datetime(to_ts, unit='s'), freq='1Min')
rates = pd.DataFrame(index=rng)
for day in days:
js = json.loads(urllib.request.urlopen(url % (day.date(), params)).read().decode("utf-8"))
rates.loc[day, 'btcusd'] = 1
rates.loc[day, 'btceur'] = 1 / js["rates"]["EUR"]
rates.loc[day, 'btccny'] = 1 / js["rates"]["CNY"]
# since days may include daytime values not in rng (the day is considered at midnight, if start is not at midinght,
# this will be the case. So drop the value that is prior to rng[0]
return rates.sort_index().ffill().drop(rates[rates.index < rng[0]].index)
def ts_to_min(ts):
"""
transforms a timestamp in MILLISECONDS into the timestamp of the minute it belongs to in SECONDS
:param ts: timestamp in MILLISECONDS, can be either a string or an int
:return: timestamp of the minute in SECONDS
"""
t = int(ts)
return t - t % 60000
def read_trades(file):
"""
read trade dumps to extract the price, trade count, and volume per minute
:param file: path to a trade dump file (.csv like file)
:return: a list of lists, each element is a list of [timestamp, volume, count, price]
"""
with open(file) as f:
f.readline() # skip header line
ts = 0 # current timestamp, initialize at 0
vcp = [] # list of [ts,v,c,p]
v = 0 # volumes
c = 0 # count
p = 0 # price
for line in f:
line_split = line.split(',')
# for a given timestamp, this condition is only true when we first enter a minute
# we re-initialize all the variables but the outputs at each new timestamp
if ts != ts_to_min(line_split[3]):
if c != 0:
vcp += [[ts, v, c, p]]
ts = ts_to_min(line_split[3])
# reinitialize
c = 0
v = 0
p = 0
c += 1
v += float(line_split[5])
p = float(line_split[4])
vcp += [[ts, v, c, p]]
return vcp
def read_ob(file):
"""
read ob_1/ob_10 dumps to extract the spread per minute. works faster with ob_1
:param file: path to a ob_1(0) dump file (.csv like file)
:return: a list of lists, each element is a list of [timestamp, spread]
"""
with open(file) as f:
f.readline() # skip header line
ts = 0 # current timestamp, initialize at 0
sl = [] # list of [ts,v,c,p]
b = 0 # limit bid at current timestamp
a = 0 # limit ask at current timestamp
ask = False
for line in f:
line_split = line.split(',')
# for a given timestamp, this condition is only true when we first encounter that timestamp in BID side
# we re-initialize all the variables but the outputs at each new timestamp
if ts != ts_to_min(line_split[0]):
ts = ts_to_min(line_split[0]) # update the timestamp
b = float(line_split[2]) # the first line for each new timestamp is the limit bid
ask = False # we work on the bid side
# for a given timestamp, this condition is only true when we first encounter that timestamp in ASK side
# we re-initialize all the variables but the outputs each time we move from bids to asks
if not ask and line_split[1] == 'a':
ask = True
a = float(line_split[2])
sl += [[ts, a - b]]
return sl
# clipping functions
def f1(ts, alpha):
return ts.clip(0,
ts.mean() + alpha * ts.std())
def f2(ts, alpha, w):
return ts.clip(0,
ts.mean() + alpha * ts.rolling(w).std().fillna(ts.std()))
def f3(ts, alpha, w1, w2):
return ts.clip(0,
ts.rolling(w1).mean().fillna(ts.mean()) + alpha * ts.rolling(w2).std().fillna(ts.std()))
def f4(ts, alpha, w):
clean_ts = ts.drop(ts[ts > (ts.mean() + alpha * ts.rolling(w).std().fillna(ts.std()))].index)
return ts.clip(0,
clean_ts.mean() + alpha * ts.rolling(w).std().fillna(ts.std()))
def f5(ts, alpha):
clean_ts = ts.drop(ts[ts > (ts.mean() + alpha * ts.std())].index)
return ts.clip(0,
clean_ts.mean() + alpha * clean_ts.std())
def process_exchange(directory, xch, pair, from_ts, to_ts, orig=False):
"""
reads the dumps for a given exchange, extracts the price, volume, trade count and spread per minute,
filling empty values with 0 for volume and count, and a forward fill for price and spread
:param directory: general directory of the dumps, as downloaded using aws.py
:param xch: FULL exchange name (not the slug)
:param pair: pair
:param from_ts: timestamp of the starting month, can be a timestamp or a date (e.g.: '2016', '2016-03-31').
all the month is loaded even if the timestamp is not of the month's start
:param to_ts: timestamp of the ending month, can be a timestamp or a date (e.g.: '2016', '2016-03-31').
all the month is loaded even if the timestamp is not of the month's end
:param orig: return original values before applying a clipping function
:return: df, or (df,df_orig) if orig. index : date, columns: 's', 'v', 'v', 'p'
"""
global MISSING_DUMPS # for modifying global variable
if directory != '' and directory[-1] != '/':
directory += '/'
dates = get_dates(parse_date(from_ts), parse_date(to_ts))
filepaths = [
directory + '%s/' + pair + '/' + str(d.year) + '/' + parse_month(d.month) + '/' + xch + '/' for
d in dates]
df = pd.DataFrame()
df_orig = pd.DataFrame()
for fp in filepaths:
tr = []
ob = []
fpt = fp % 'trades'
if not os.path.exists(fpt):
MISSING_DUMPS += fpt + ' not found\n'
for f in glob.glob(fpt + '*.csv'):
print(f)
tr += read_trades(f)
fpo = fp % 'ob_1'
if not os.path.exists(fpo):
MISSING_DUMPS += fpo + ' not found\n'
for f in glob.glob(fpo + '*.csv'):
print(f)
ob += read_ob(f)
if tr != [] and ob != []:
tr_df = pd.DataFrame(tr, columns=['date', 'v', 'c', 'p'])
tr_df = tr_df.set_index(pd.to_datetime(tr_df.date.values / 1000, unit='s')).drop('date', axis=1).resample(
'1Min').mean().sort_index().fillna({'c': 0, 'v': 0}).ffill()
ob_df = pd.DataFrame(ob, columns=['date', 's'])
ob_df = ob_df.set_index(pd.to_datetime(ob_df.date.values / 1000, unit='s')).drop('date', axis=1).resample(
'1Min').mean().sort_index().ffill()
ob_df_orig = ob_df.copy() # keep a copy of the spreads before smoothing
# smooth spreads
# first, clip outliers using one of the clipping functions (f1, f2, ..., f5)
# best performance overall is achieved by using the f5 clip function
ob_df.s = f5(ob_df.s, 2)
# second, merge the spreads with volumes and prices
df = df.append(pd.merge(tr_df, ob_df,
how='inner', left_index=True, right_index=True))
df_orig = df_orig.append(pd.merge(tr_df, ob_df_orig,
how='inner', left_index=True,
right_index=True)) # a copy of df before smoothing
# third, remove lines where the spread is higher than 1% of the price
# since we used the f5 clipping function, this step is just an additional layer of protection against outliers
df = df.drop(df[df.s >= 0.01 * df.p].index, axis=0)
# Since reading files is done by months (due to how get_dates is coded), from_ts and to_ts are applied here
if not df.empty:
df = df.loc[pd.to_datetime(from_ts, unit='s'):pd.to_datetime(to_ts, unit='s')]
df_orig = df_orig.loc[pd.to_datetime(from_ts, unit='s'):pd.to_datetime(to_ts, unit='s')]
if orig:
return df, df_orig
return df
def process(directory, xch_list, from_ts, to_ts):
"""
processes a list of (exchange,pair), and returns the new normalized volume per exchange_pair
:param directory: general directory of the dumps, as downloaded using aws.py
:param xch_list: a list of (exchange,pair) pairs to be processed
:param from_ts: timestamp of the starting month, can be a timestamp or a date (e.g.: '2016', '2016-03-31').
all the month is loaded even if the timestamp is not of the month's start
:param to_ts: timestamp of the ending month, can be a timestamp or a date (e.g.: '2016', '2016-03-31').
all the month is loaded even if the timestamp is not of the month's end
:return: df. index: date, columns: 'exchange_pair' for exchange, pair in xch_list
"""
dfs = {}
df = pd.DataFrame()
for xch, pair in xch_list:
dfs[xch + '_' + pair] = process_exchange(directory, xch, pair, from_ts, to_ts)
if not all([dfs[x].empty for x in dfs]):
rates = rates_by_minute(from_ts, to_ts)
df = pd.DataFrame(index=dfs[
sorted({x: len(dfs[x]) for x in dfs}, key={x: len(dfs[x]) for x in dfs}.get, reverse=True)[0]].index)
for xch, pair in xch_list:
try:
df[xch + '_' + pair] = dfs[xch + '_' + pair]['v'] * ((dfs[xch + '_' + pair]['s'] * rates[pair]) ** 2)
except KeyError:
pass
return df
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Creates the volume-index for selected (exchange,pair)s')
parser.add_argument('directory',
help='directory to dumps as downloaded with aws.py')
parser.add_argument('from_ts',
help='start timestamp in seconds')
parser.add_argument('to_ts',
help='end timestamp in seconds')
parser.add_argument('-e', '--exchange', nargs='+', required=True,
help='<Required> List of exchange_pair to be processed. '
'Entry should be in this particular format: exchange_pair.'
'Exemple use: -e bitfinex_btcusd huobi_btccny')
parser.add_argument('-vd', '--volume-directory', default='', action='store',
help="local directory to store normalized-volume files, default is current folder ")
args = parser.parse_args()
args_dict = vars(args)
directory = args_dict['directory']
if directory[-1] != '/':
directory += '/'
from_ts = parse_date(args_dict['from_ts'])
to_ts = eod_to_ts(parse_date(args_dict['to_ts']))
xch_list = [(xp.split('_')[0].lower(), xp.split('_')[1].lower()) for xp in args_dict['exchange']]
volume_directory = args_dict['volume_directory']
if volume_directory != '':
if volume_directory[-1] != '/':
volume_directory += '/'
if not os.path.exists(volume_directory):
os.makedirs(volume_directory)
print('\nCalculating normalized volume ... \n')
df = process(directory, xch_list, from_ts, to_ts)
print('\nprocessing done!\n')
filename = 'normalized-volume-' + str(from_ts) + '-' + str(to_ts) + '-' + '-'.join(
xp for xp in args_dict['exchange'])
if not df.empty:
df.to_csv('%s%s.csv' % (volume_directory, filename))
print(' saved to %s%s.csv' % (volume_directory, filename))
grouped = df.resample('1D').sum()
grouped.to_csv('%s%s-day.csv' % (volume_directory, filename))
print(' saved to %s%s-day.csv\n' % (volume_directory, filename))
# Plotting grouped
trace = [go.Scatter(x=grouped.index, y=grouped[column], mode='lines+markers', name=column) for column in
grouped]
layout = go.Layout(
title='Standardized volume (' + ', '.join(x + '_' + p for x, p in xch_list) + ')',
xaxis=dict(
title=''
),
yaxis=dict(
title='Standardized volume',
),
)
fig = go.Figure(data=trace, layout=layout)
plot_url = py.plot(fig, sharing='private',
filename='standardized-volume/' + '-'.join(x + '_' + p for x, p in xch_list) + '-' + str(
from_ts) + '-' + str(to_ts), auto_open=False)
print(' Plot url:', plot_url)
print(MISSING_DUMPS)
print('\n> done!\n')
else:
print(MISSING_DUMPS)
print('\nno data was found, please check your dumps\n')
| true |
091373057df3ad63001d3a0bbd8a522c224490fa | Python | ljanzen17/cmpt120janzen | /guessing-game.py | UTF-8 | 850 | 4.09375 | 4 | [] | no_license | #guessing-game.py
animal = 'dog'
end = 'quit'
while animal != 'guess':
print('The program is thinking of an animal. Please try to guess the name of the animal')
guess = input()
if (guess.lower() != animal and guess.lower()[0] != end[0]) :
print("Sorry that is the wrong animal, please try again. If you would like to give up type quit when it asks for the animal name.")
elif guess.lower()[0] == end[0]:
print('Thanks for playing!')
break
else:
print('Yes, that is the animal I was thinking of. Congratulations!')
print('If you like this animal type y if you do not type n')
like = input()
if like =='y':
print("I agree, that is a great animal!")
else:
print("I don't like that animal either.")
break
break
| true |
539059c3ac72ce24e927d6fd00c4be155cd5e292 | Python | aroberge/talks | /pycon-2021/turtle.py | UTF-8 | 90 | 3.609375 | 4 | [
"MIT"
] | permissive | # Draw a square
import turtle as t
for i in range(4):
t.forward(100)
t.left(90)
| true |
6e88c87c1621abdc56219685f0a47c95210f847b | Python | GinnyGaga/2019.11-LJ-CODE | /PythonTest/class02.py | UTF-8 | 450 | 4.15625 | 4 | [] | no_license | # ==================== 进阶:能掌握就掌握,不能掌握就算了 ==============================
# 类的构造方法
class Person():
name = "李博宇"
sex = "男"
# 构造方法,固定的写法:初始化类
def __init__(self, xb, mz):
self.sex = xb
self.name = mz
self.test()
def test(self):
print("这是test方法")
d = Person("女", "王广发")
print(d.sex)
print(d.name)
| true |
9ad8aa9dcc2c91be866c5ec84d49b7501858ba27 | Python | RevelcoS/RegionOlimp | /hw_1/p_3/input.py | UTF-8 | 116 | 2.71875 | 3 | [] | no_license | from union import union
a = list(map(int, input().split()))
b = list(map(int, input().split()))
print(*union(a, b)) | true |
572195aa606705dcfb2811caf7a59aea63ce1875 | Python | zhaoqian3355/WebScraping | /wikiPedia/testCode.py | UTF-8 | 1,703 | 2.984375 | 3 | [] | no_license | from urllib.request import urlopen
from bs4 import BeautifulSoup
import unittest
from selenium import webdriver
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver import ActionChains
class TestAddition(unittest.TestCase):
def setUp(self):
print("Setting up the test")
def tearDown(self):
print("Tearing down the test")
def test_twoPlusTwo(self):
total=2+2
self.assertEqual(4,total)
class TestWikipedia(unittest.TestCase):
bsObj=None
@classmethod
def setUpClass(cls):
global bsObj
url="http://en.wikipedia.org/wiki/Monty_Python"
bsObj=BeautifulSoup(urlopen(url))
def test_titleText(self):
global bsObj
pageTitle=bsObj.find("h1").get_text()
self.assertEqual("Monty Python",pageTitle)
def test_contentExists(self):
global bsObj
content=bsObj.find("div",{"id":"mw-content-text"})
self.assertIsNone(content)
class TestDrag(unittest.TestCase):
driver=None
def setUp(self):
global driver
driver=webdriver.PhantomJS(executable_path="phantomjs")
url='http://pythonscraping.com/pages/javascript/draggableDemo.html'
driver.get(url)
def tearDown(self):
print("Tearing down the test")
def test_drag(self):
global driver
element=driver.find_element_by_id("draggable")
target=driver.find_element_by_id("div2")
actions=ActionChains(driver)
actions.drag_and_drop(element,target).perform()
self.assertEqual("You are definitely not a bot!",driver.find_element_by_id("message").text)
if __name__=='__main__':
unittest.main() | true |
e3d75c003c99371b7547289e92b60c1adb7ada2d | Python | vwvolodya/ads | /GRAPHs/graphs/maze/python/maze.py | UTF-8 | 15,283 | 3.671875 | 4 | [] | no_license | import random
import time
from Tkinter import *
def main():
height = 25
width = 40
maze_cell_size = 25
animation_delay = 0.005
def begin_simulation():
def on_cell_created(new_cell, previous_cell):
ui.draw_cell(new_cell)
if previous_cell is not None:
ui.draw_cell(previous_cell)
ui.do_animation_delay()
def on_cell_solved(cell, step_count):
ui.draw_cell(cell, color=UI.color_solved_cell_background, text=str(step_count))
ui.do_animation_delay()
def on_exit_path_updated(cell, previous_cell):
ui.draw_trace(cell, previous_cell)
ui.do_animation_delay()
# Generate a perfect maze (the one that has a unique path between every two cells).
maze = generate_maze(height, width, on_cell_created)
# Make the maze more interesting by removing random walls, thus producing alternative paths.
remove_random_walls(maze, wall_count=height * width / 15)
ui.draw_maze(maze)
# Calculate shortest paths to every cell and backtrace the path between start and end.
path_lengths, exit_path = solve_maze(maze, on_cell_solved, on_exit_path_updated)
ui = UI(height, width, maze_cell_size, animation_delay)
ui.set_action_for_go_button(begin_simulation)
ui.run()
def generate_maze(height, width, cell_created_callback=None):
maze = Maze(height, width)
# Just doing the typical DFS: putting the start cell in the stack.
# But in order to generate a maze, we'll also remember the cell we came from.
# Whenever we process a new cell, we'll break the wall between the current cell
# and the cell we came from. Therefore, we'll need to store multiple values in one stack item.
start_cell = maze.get_cell_by_coordinate(0, 0)
previous_cell = None
stack = [(start_cell, previous_cell)]
visited = [
[False for x in range(0, width)]
for y in range(0, height)
]
while len(stack) > 0:
# Retrieve another pair of cells from the stack.
current_cell, previous_cell = stack.pop()
# Ignoring the cell if we've already been there.
if visited[current_cell.y][current_cell.x]:
continue
# Mark this cell so we won't visit it anymore.
visited[current_cell.y][current_cell.x] = True
# Break the wall between the current cell and the previous cell.
if previous_cell is not None:
maze.break_wall_between_cells(previous_cell, current_cell)
# Notify the UI that we've created a new cell.
if cell_created_callback is not None:
cell_created_callback(current_cell, previous_cell)
# Get all neighbors that are within the bounds of the maze and have all 4 walls intact.
neighbors = maze.get_adjacent_cells(current_cell)
unvisited_neighbors = list([
neighbor
for neighbor in neighbors
if not visited[neighbor.y][neighbor.x] and neighbor.has_all_walls()
])
# Shuffle the neighbors so that we visit them in random order.
# That will make our maze much more interesting.
random.shuffle(unvisited_neighbors)
stack.extend([
(neighbor, current_cell) for neighbor in unvisited_neighbors
])
return maze
def solve_maze(maze, cell_solved_callback=None, exit_path_updated_callback=None):
# Just doing the typical BFS, but, in addition to the cell itself,
# we'll also store the length of the (start -> cell) path in the queue.
# Because it's BFS and the graph is not weighted, the path length will
# also be the MINIMUM path length. Obviously, the length is 0 for the start cell.
start_cell = maze.get_cell_by_coordinate(0, 0)
steps_taken = 0
queue = [(start_cell, steps_taken)]
visited = [
[False for x in range(0, maze.width)]
for y in range(0, maze.height)
]
# Remembering the shortest path lengths for every cell in the maze.
# Initially, we don't know any shortest paths, so it's None for every cell.
path_lengths = [
[None for x in range(0, maze.width)]
for y in range(0, maze.height)
]
while len(queue) > 0:
# Fetch the next (cell, steps) pair from the queue.
current_cell, steps_taken = queue.pop(0)
# If we've already analyzed this cell, ignore it completely.
if visited[current_cell.y][current_cell.x]:
continue
# Mark the cell as visited so that we won't visit it anymore.
visited[current_cell.y][current_cell.x] = True
path_lengths[current_cell.y][current_cell.x] = steps_taken
# Notify the UI that we've solved a new cell.
if cell_solved_callback is not None:
cell_solved_callback(current_cell, steps_taken)
# Discovering the unvisited neighbors that are reachable from the current_cell
# (the ones that don't have walls between them and our cell).
unvisited_neighbors = [
neighbor
for neighbor in maze.get_adjacent_reachable_cells(current_cell)
if not visited[neighbor.y][neighbor.x]
]
# Every neighbor will have a (moves + 1) minimum path length.
queue.extend([
(neighbor, steps_taken + 1)
for neighbor in unvisited_neighbors
])
# Now that we've computed the matrix of all shortest path lengths,
# we can reconstruct the path from the end cell to the start cell.
exit_path = trace_exit_path(
maze,
path_lengths,
maze.get_cell_by_coordinate(0, 0),
maze.get_cell_by_coordinate(maze.height - 1, maze.width - 1)
)
# Notify the UI that we've traced one more cell from the exit path.
if exit_path_updated_callback is not None:
for i in range(1, len(exit_path)):
exit_path_updated_callback(exit_path[i], exit_path[i - 1])
return path_lengths, exit_path
def remove_random_walls(maze, wall_count):
suitable_cells_for_removal = []
for y in range(1, maze.height - 2):
x = 1
while x < maze.width - 2:
# If there's a horizontal wall that spans 3 consecutive cells,
# we're safe to remove the middle wall without introducing any open areas:
#
# .......... ..........
# . . . . . . . .
# ========== -> ===....===
# . . . . . . . .
# .......... ..........
#
cell = maze.get_cell_by_coordinate(y, x)
if cell.has_wall(0) and maze.cells[y][x - 1].has_wall(0) and maze.cells[y][x + 1].has_wall(0):
suitable_cells_for_removal.append(cell)
x += 2
else:
x += 1
random.shuffle(suitable_cells_for_removal)
remove_list = suitable_cells_for_removal[:wall_count]
for cell in remove_list:
north_neighbor = maze.get_cell_by_coordinate(cell.y - 1, cell.x)
maze.break_wall_between_cells(cell, north_neighbor)
def trace_exit_path(maze, path_lengths, start_cell, exit_cell):
path = [exit_cell]
current_cell = exit_cell
# At each step, move to any neighbor cell that has a path length of (steps - 1)
# until we reach the start cell.
while not (current_cell.y == start_cell.y and current_cell.x == start_cell.x):
next_step_cell = [
neighbor
for neighbor in maze.get_adjacent_reachable_cells(current_cell)
if path_lengths[neighbor.y][neighbor.x] == path_lengths[current_cell.y][current_cell.x] - 1
][0]
# Once we've found a suitable neighbor, add it to the result.
path.append(next_step_cell)
current_cell = next_step_cell
return path
class Cell:
def __init__(self, y, x, walls):
self.y = y
self.x = x
self.walls = walls
def has_wall(self, index):
return self.walls[index]
def has_all_walls(self):
return self.walls.count(True) == len(self.walls)
def break_wall(self, index):
self.walls[index] = False
def __str__(self):
return "x = %d, y = %d" % (self.x, self.y)
class Maze:
steps_to_reach_neighbors = [
# y increment, x increment
(-1, 0), # North
(0, 1), # East
(1, 0), # South
(0, -1) # West
]
def __init__(self, height, width):
self.height = height
self.width = width
self.cells = [
[Cell(y, x, [True, True, True, True]) for x in range(0, width)]
for y in range(0, height)
]
def get_cell_by_coordinate(self, y, x):
return self.cells[y][x]
def break_wall_between_cells(self, cell1, cell2):
y_diff = cell1.y - cell2.y
x_diff = cell1.x - cell2.x
# Each cell maintains its own list of walls, so we need to break them symmetrically.
# E.g., if we break the southern wall of cell 1, we also need to break the northern wall of cell 2.
cell1_neighbor_index = self.steps_to_reach_neighbors.index((-y_diff, -x_diff))
cell2_neighbor_index = self.steps_to_reach_neighbors.index((y_diff, x_diff))
cell1.break_wall(cell1_neighbor_index)
cell2.break_wall(cell2_neighbor_index)
def exists_wall_between_cells(self, cell1, cell2):
y_diff = cell1.y - cell2.y
x_diff = cell1.x - cell2.x
wall_index = self.steps_to_reach_neighbors.index((y_diff, x_diff))
return cell2.has_wall(wall_index)
def get_adjacent_cells(self, cell):
return list([
self.get_cell_by_coordinate(cell.y + step[0], cell.x + step[1])
for step in self.steps_to_reach_neighbors
if 0 <= cell.y + step[0] < self.height and 0 <= cell.x + step[1] < self.width
])
def get_adjacent_reachable_cells(self, cell):
return [
# Return the N, E, S, W neighbors...
neighbor
for neighbor in self.get_adjacent_cells(cell)
# ...as long as there's no wall between us and the neighbor.
if not self.exists_wall_between_cells(cell, neighbor)
]
class UI:
color_window_background = "#D0D0D0"
color_cell_background = "#FFFFFF"
color_solved_cell_background = "#CCE5FF"
color_cell_text = "#000000"
color_wall = "#000000"
color_trace = "#800000"
def __init__(self, maze_height, maze_width, maze_cell_size, animation_delay):
self.maze_height = maze_height
self.maze_width = maze_width
self.maze_cell_size = maze_cell_size
self.animation_delay = animation_delay
self.window = None
self.button_frame = None
self.canvas_frame = None
self.canvas = None
self.go_button = None
self.create_window()
self.create_widgets()
def create_window(self):
self.window = Tk()
self.window.title("Maze Algorithms")
self.window.configure(background=self.color_window_background)
self.window.resizable(0, 0)
def create_widgets(self):
self.create_buttons()
self.create_canvas()
def create_buttons(self):
self.button_frame = Frame(self.window, padx=10, pady=10, background=self.color_window_background)
self.button_frame.pack()
self.go_button = Button(self.button_frame, text="Go!")
self.go_button.configure(highlightbackground=self.color_window_background)
self.go_button.pack()
def create_canvas(self):
self.canvas_frame = Frame(self.window, padx=30, pady=30, borderwidth=0)
self.canvas_frame.configure(background=self.color_window_background)
self.canvas_frame.pack()
self.canvas = Canvas(
self.canvas_frame,
width=self.maze_width * self.maze_cell_size + 2,
height=self.maze_height * self.maze_cell_size + 2,
background=self.color_window_background,
borderwidth=0,
highlightthickness=0
)
self.canvas.pack()
def set_action_for_go_button(self, action):
def wrapped_action():
# Clear the canvas.
self.canvas.delete("all")
# Block the button until its action is completed.
self.go_button.configure(state=DISABLED)
action()
self.go_button.configure(state=NORMAL)
self.go_button.configure(command=wrapped_action)
def draw_maze(self, maze):
for y in range(0, maze.height):
for x in range(0, maze.width):
cell = maze.get_cell_by_coordinate(y, x)
self.draw_cell(cell)
def draw_cell(self, cell, color=color_cell_background, text=None):
cell_upper_left_x = cell.x * self.maze_cell_size + 1
cell_upper_left_y = cell.y * self.maze_cell_size + 1
# Paint cell background.
self.canvas.create_rectangle(
cell_upper_left_x,
cell_upper_left_y,
cell_upper_left_x + self.maze_cell_size,
cell_upper_left_y + self.maze_cell_size,
width=0,
fill=color
)
# Paint text, if specified.
if text is not None:
self.canvas.create_text(
cell_upper_left_x + self.maze_cell_size / 2,
cell_upper_left_y + self.maze_cell_size / 2,
font=("", 8),
fill=self.color_cell_text,
text=str(text)
)
# Paint walls.
wall_line_templates = [
# x1, y1, x2, y2
(0, 0, 1, 0), # Northern
(1, 0, 1, 1), # Eastern
(0, 1, 1, 1), # Southern
(0, 0, 0, 1) # Western
]
for wall_index in range(0, len(cell.walls)):
if cell.has_wall(wall_index):
wall_template = wall_line_templates[wall_index]
self.canvas.create_line(
cell_upper_left_x + wall_template[0] * self.maze_cell_size,
cell_upper_left_y + wall_template[1] * self.maze_cell_size,
cell_upper_left_x + wall_template[2] * self.maze_cell_size,
cell_upper_left_y + wall_template[3] * self.maze_cell_size,
fill=self.color_wall
)
def draw_trace(self, cell, previous_cell):
current_cell_center_x = cell.x * self.maze_cell_size + self.maze_cell_size / 2
current_cell_center_y = cell.y * self.maze_cell_size + self.maze_cell_size / 2
previous_cell_center_x = previous_cell.x * self.maze_cell_size + self.maze_cell_size / 2
previous_cell_center_y = previous_cell.y * self.maze_cell_size + self.maze_cell_size / 2
self.canvas.create_line(
previous_cell_center_x,
previous_cell_center_y,
current_cell_center_x,
current_cell_center_y,
width=3,
fill=self.color_trace
)
def do_animation_delay(self):
time.sleep(self.animation_delay)
self.canvas.update()
def run(self):
self.window.mainloop()
if __name__ == "__main__":
main()
| true |
51c007fc8956397457b7d2a00dafbffde028a164 | Python | wtfwsk05/pyqt5-20200207 | /test_file/FirstMainWin.py | UTF-8 | 1,006 | 3.03125 | 3 | [] | no_license | import sys
from PyQt5.QtWidgets import QApplication,QMainWindow # 应用程序,主窗口
from PyQt5.QtGui import QIcon # 图标
class FirstMainWin(QMainWindow):
def __init__(self,parent=None):
super(FirstMainWin, self).__init__(parent)
# 设置窗口标题
self.setWindowTitle('第一个主窗口应用')
# 设置窗口大小
self.resize(400,300)
# 创建状态栏
self.status = self.statusBar()
# 设置状态栏信息
self.status.showMessage('只显示5秒',5000)
if __name__ == '__main__':
# 创建应用程序 sys.argv获取程序外命令行参数
app = QApplication(sys.argv)
# 设置应用程序的图标
app.setWindowIcon(QIcon(r'E:\PythonProject\mypyqt5\images\doc.ico'))
main = FirstMainWin() # 实例化所创建的类
main.show() # 显示窗体
sys.exit(app.exec_()) # 进行程序主循环并通过exit函数确保主循环结束 | true |
b4cb685977ec9024d2a728e1ba67b3684fbd621e | Python | harpribot/ML-basics | /basics 3 - MCMC Sampling/simulation.py | UTF-8 | 7,270 | 2.84375 | 3 | [] | no_license | import numpy as np
from random import gauss
from scipy.stats import norm,multivariate_normal
import matplotlib.pyplot as plt
class Sampling:
def __init__(self,num_samples):
self.total_samples = num_samples
def metroplis_1D(self,proposal_var,total_samples=1000):
self.sampling_method = "metropolis"
self.total_samples = total_samples
self.generated_samples = np.zeros(self.total_samples)
self.proposal_var = proposal_var
# first sample x
x = 0
self.generated_samples[0] = x
for i in range(1,self.total_samples):
# Proposal Distribution is N(0,sigma)
innov = gauss(0,self.proposal_var)
can = x + innov
## the actual distribution is p(x) = 0.3 N(-25,10) + 0.7 N(20,10)
self.dist_1 = norm(-25,10)
self.dist_2 = norm(20,10)
p_can = 0.3 * self.dist_1.pdf(can) + 0.7 * self.dist_2.pdf(can)
p_x = 0.3 * self.dist_1.pdf(x) + 0.7 * self.dist_2.pdf(x)
aprob = min(1, p_can/p_x)
u = np.random.rand()
if(u < aprob):
x = can
self.generated_samples[i] = x
# Code to check the mean of the distribution
print 'For Proposal Variance', proposal_var
print 'Mean of Sampled Distribution', (np.mean(self.generated_samples));
def metroplis_2D(self,proposal_var,total_samples=1000):
self.sampling_method = "metropolis"
self.total_samples = total_samples
self.generated_samples_x = np.zeros(self.total_samples)
self.generated_samples_y = np.zeros(self.total_samples)
self.proposal_var = proposal_var
# first sample x
x = 0
y = 0
self.generated_samples_x[0] = x
self.generated_samples_y[0] = y
for i in range(1,self.total_samples):
# Proposal Distribution is N(0,kI)
innov = np.random.multivariate_normal(np.array([0,0]),\
self.proposal_var * np.eye(2),1)
can_x = x + innov[0,0]
can_y = y + innov[0,1]
## the actual distribution is p(x) = 0.3 N(-25,10) + 0.7 N(20,10)
self.dist_1 = multivariate_normal(np.array([-25,-25]),10*np.eye(2))
self.dist_2 = multivariate_normal(np.array([20,20]),10 * np.eye(2))
p_can = 0.3 * self.dist_1.pdf(np.array([can_x,can_y])) + \
0.7 * self.dist_2.pdf(np.array([can_x,can_y]))
p_x = 0.3 * self.dist_1.pdf(np.array([x,y])) + \
0.7 * self.dist_2.pdf(np.array([x,y]))
aprob = min(1, p_can/p_x)
u = np.random.rand()
if(u < aprob):
x = can_x
y = can_y
self.generated_samples_x[i] = x
self.generated_samples_y[i] = y
def gibbs_sampling_2D(self,total_samples, T):
self.sampling_method = "gibbs sampling"
self.total_samples = total_samples
self.T = T
# Initialize the sample storage
self.generated_samples_x = np.zeros(self.total_samples)
self.generated_samples_y = np.zeros(self.total_samples)
self.dist_1 = norm(-25,10)
self.dist_2 = norm(20,10)
for i in range(0,self.total_samples):
# Initialization
x = 0.0
y = 0.0
for j in range(0,self.T):
# Ratio for x
a = self.dist_1.pdf(y)
b = self.dist_2.pdf(y)
first = 0.3 * (a/(a + b))
second = 0.7 * (b/(a + b))
u = np.random.rand()
if(u < first/(first + second)):
x = gauss(-25,10)
else:
x = gauss(20,10)
# Ratio for y
a = self.dist_1.pdf(x)
b = self.dist_2.pdf(x)
first = 0.3 * (a/(a + b))
second = 0.7 * (b/(a + b))
u = np.random.rand()
if(u < first/(first + second)):
y = gauss(-25,10)
else:
y = gauss(20,10)
self.generated_samples_x[i] = x
self.generated_samples_y[i] = y
def plot_histogram_1D(self):
n, bins, patches = plt.hist(self.generated_samples, \
50,normed=1, facecolor='g', alpha=0.75)
x = np.sort(self.generated_samples)
actual_plot = 0.3 * self.dist_1.pdf(x) + \
0.7 * self.dist_2.pdf(x)
plt.plot(x, actual_plot)
plt.xlabel('Sample')
plt.ylabel('Probability')
if self.sampling_method == "gibbs sampling":
plt.title('Histogram of samples for p(x) = 0.3 N(-25,10) + \
0.7 N(20,10) for T=%d' %(self.T))
elif self.sampling_method == "metropolis":
plt.title('Histogram of samples for p(x) = 0.3 N(-25,10) + \
0.7 N(20,10) for Var=%d'%(self.proposal_var))
plt.grid(True)
def plot_histogram_2D(self):
plt.hist2d(self.generated_samples_x,self.generated_samples_y,\
bins=50,range=np.array([[-50,50],[-50,50]]),normed=1)
plt.xlabel('Sample')
plt.ylabel('Probability')
if self.sampling_method == "gibbs sampling":
plt.title('Histogram of samples for p(x) = 0.3 N(-25,10) + \
0.7 N(20,10) for T=%d'%(self.T))
elif self.sampling_method == "metropolis":
plt.title('Histogram of samples for p(x) = 0.3 N(-25,10) + \
0.7 N(20,10) for Var=%d'%(self.proposal_var))
plt.grid(True)
def main():
########### Part 1 & Part 2 - 1D gaussian - Metropolis ###############
print('Starting Matropolis on 1D gaussian mixture')
sampler = Sampling(1000)
plt.figure()
sampler.metroplis_1D(1)
sampler.plot_histogram_1D()
plt.figure()
sampler.metroplis_1D(10)
sampler.plot_histogram_1D()
plt.figure()
sampler.metroplis_1D(20)
sampler.plot_histogram_1D()
plt.figure()
sampler.metroplis_1D(100)
sampler.plot_histogram_1D()
plt.figure()
sampler.metroplis_1D(400)
sampler.plot_histogram_1D()
plt.figure()
sampler.metroplis_1D(1000)
sampler.plot_histogram_1D()
print('1D metropolis completed\n\n')
########### Part 3 - 2D gaussian mixture - Metropolis ################
print('Starting Matropolis on 2D gaussian mixture')
plt.figure()
sampler.metroplis_2D(100,30000)
sampler.plot_histogram_2D()
plt.figure()
sampler.metroplis_2D(300,30000)
sampler.plot_histogram_2D()
plt.figure()
sampler.metroplis_2D(500,30000)
sampler.plot_histogram_2D()
plt.figure()
sampler.metroplis_2D(700,30000)
sampler.plot_histogram_2D()
print('2D metropolis completed\n\n')
############ Part 4 - 2D gaussian mixture - Gibbs Sampling ############
print('Starting Gibbs Sampling on 2D Gaussian mixture')
plt.figure()
sampler.gibbs_sampling_2D(7000,300)
sampler.plot_histogram_2D()
print('Gibbs sampling completed\n\n')
print('Plotting all the plots')
plt.show()
print('All done....exiting')
if __name__=="__main__":
main()
| true |
86debed96aa6bba69a3c4838b5f2a2eeeb447074 | Python | blackJack1982/h | /code_exploit.py | UTF-8 | 560 | 2.5625 | 3 | [] | no_license | # Zero days
# Metasploit
# a basic example of overwhelming the app/programme design with binary code exploit
import binascii
f = open('exploit.bin','wb')
f.write(b'\x0a\x0b\x0c' * 10)
f.close
# if you wanna read it, use library of binascii
f = open('exploit.bin','rb')
bytes = f.read()
print(binascii.b2a_uu(bytes))
# Remediation: be updated on software, vulnerabilities etc.(patch availability => regular patching);
# avoid suspicious sources (no copypaste of codes u do not understand in the app); multi-factor authentification (hm, check for updates)
| true |
de3407358677560ae2ad3b337d9d2c7b6e812c9e | Python | facelessuser/pyspelling | /pyspelling/plugin.py | UTF-8 | 2,655 | 3.15625 | 3 | [
"MIT"
] | permissive | """Base plugin class."""
from collections import OrderedDict
import warnings
class Plugin:
"""Base plugin."""
def __init__(self, options):
"""Initialization."""
self.config = self.get_default_config()
if self.config is None:
warnings.warn(
"'{}' did not provide a default config. ".format(self.__class__.__name__) +
"All plugins in the future should provide a default config.",
category=FutureWarning,
stacklevel=1
)
self.config = options
else:
self.override_config(options)
self.setup()
def get_default_config(self):
"""Get default configuration."""
return None
def setup(self):
"""Setup."""
def override_config(self, options):
"""Override the default configuration."""
for k, v in options.items():
# Reject names not in the default configuration
if k not in self.config:
raise KeyError("'{}' is not a valid option for '{}'".format(k, self.__class__.__name__))
self.validate_options(k, v)
self.config[k] = v
def validate_options(self, k, v):
"""Validate options."""
args = [self.__class__.__name__, k]
# Booleans
if isinstance(self.config[k], bool) and not isinstance(v, bool):
raise ValueError("{}: option '{}' must be a bool type.".format(*args))
# Strings
elif isinstance(self.config[k], str) and not isinstance(v, str):
raise ValueError("{}: option '{}' must be a str type.".format(*args))
# Integers (whole floats allowed)
elif (
isinstance(self.config[k], int) and
(not isinstance(v, int) and not (isinstance(v, float) and v.is_integer()))
):
raise ValueError("{}: option '{}' must be an int type.".format(*args))
# Floats (integers allowed)
elif isinstance(self.config[k], float) and not isinstance(v, (int, float)):
raise ValueError("{}: option '{}' must be a float type.".format(*args))
# Basic iterables (list, tuple, sets)
elif isinstance(self.config[k], (list, tuple, set)) and not isinstance(v, list):
raise ValueError("{}: option '{}' must be a float type.".format(*args))
# Dictionaries
elif isinstance(self.config[k], (dict, OrderedDict)) and not isinstance(v, (dict, OrderedDict)):
raise ValueError("{}: option '{}' must be a dict type.".format(*args))
def reset(self):
"""Reset anything needed on each iteration."""
| true |
871f7a9bf9a0582c29f036a95670fa0b7b4ff915 | Python | venachescu/psyclab | /psyclab/utilities/osc.py | UTF-8 | 4,077 | 2.6875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
psyclab/utilities/osc.py
Vince Enachescu 2019
"""
from functools import wraps
from itertools import chain
from threading import Thread
from pythonosc.dispatcher import Dispatcher
from pythonosc.osc_server import ThreadingOSCUDPServer
from pythonosc.udp_client import SimpleUDPClient
from psyclab.utilities.logs import Logged
def route(*paths):
""" Decorator to add functions as listeners for a set of OSC routes """
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if len(args) == 0:
return func(None, None, **kwargs)
return func(*args, **kwargs)
wrapper.paths = paths
return wrapper
return decorator
def is_route(obj):
def check_route(name):
member = getattr(obj, name, None)
if callable(member) and hasattr(member, 'paths'):
return [(p, member) for p in member.paths]
return check_route
def find_routes(obj):
return tuple(chain(*filter(None, map(is_route(obj), dir(obj)))))
class OSCResponder(Logged):
def __init__(self, host='0.0.0.0', port=7401, **kwargs):
self._host = host
self._port = port
self._client = None
self._server = None
self._server_thread = None
Logged.__init__(self, **kwargs)
self._dispatcher = Dispatcher()
self._dispatcher.set_default_handler(self.receive)
self._routes = {}
for osc_path, callback in find_routes(self):
self._dispatcher.map(osc_path, callback)
self._routes[osc_path] = callback
def start(self, *args):
if self._server is not None:
return
self._server = ThreadingOSCUDPServer((self._host, self._port), self._dispatcher, )
self._server_thread = Thread(target=self._server.serve_forever, name=str(self), daemon=True)
self._server_thread.start()
self._host, self._port = self._server.socket.getsockname()
self.debug('responder thread started.')
def stop(self, *args):
if self._server is not None:
self._server.shutdown()
if self._server_thread is not None:
self._server_thread.join()
self.debug('responder thread stopped.')
def send(self, route, message, to=None):
if self._client is None:
return
if not isinstance(self._client, dict):
self._client.send_message(route, message)
return
if to is not None:
if not isinstance(to, (tuple, list)):
to = (to,)
for name in filter(lambda key: key in self._client, to):
self._client[name].send_message(route, message)
return
for client in self._client.values():
client.send_message(route, message)
def receive(self, route, source, *messages):
self.info(f'[osc] {source[0]}:{source[1]} {route}')
for message in messages:
self.info(message)
# @route('/connect')
# def on_connect(self, route, source, data):
# """ Handle a connection call over OSC """
#
# host, port = source
# client = self.connect(host, data)
# if not isinstance(self._client, dict):
# self._client = client
# else:
# self._client[port] = client
#
# self.connected(host, port, data)
@staticmethod
def connect(host='localhost', port=7402):
""" Open an OSC client for network communication """
return SimpleUDPClient(host, port)
def connected(self, host, port, data):
""" Callback after an OSC connection created """
self.info(f'client connection, from {host}:{port}, port {data}')
@property
def client(self):
return self._client
def __str__(self):
return f'{self.__class__.__name__}@{self._host}:{self._port}'
def __repr__(self):
return f'<{self}>'
if __name__ == "__main__":
responder = OSCResponder()
responder.start()
| true |
d1362a83394c38bc26b1f1328b4be06ea6fcec86 | Python | bioJain/python_Bioinformatics | /Rosalind/Bioinformatic-textbook-track/BA1D_PatternMatch.py | UTF-8 | 269 | 3.3125 | 3 | [] | no_license | # BA1D
# Find All Occurrences of a Pattern in a String
def PatternMatch(pattern, genome):
matchList = []
i = 0
while i <= len(genome)-len(pattern) :
j = genome.find(pattern, i)
if j == -1 :
break
else :
matchList.append(j)
i = j+1
return matchList
| true |
dadde424a8ee1ca7a33d6aeb6162f76dac2d1c58 | Python | ajaysingh13/Python-Projects- | /forloops.py | UTF-8 | 527 | 3.96875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Jun 5 17:23:03 2021
@author: vijay
"""
import matplotlib.pyplot as plt
fruitlist = ['banana', 'orange', 'apple', 'watermelon']
"""
# for loop
for x in fruitlist:
print (x)
if (x == 'apple'):
print('this is an apple')
for x in range (1,11):
print(x)
"""
y= []
x = []
z = []
for i in range(1,101):
x.append(i)
for j in x:
y.append(j * j)
for k in x:
z.append(1 / k)
plt.show()
plt.plot(x,y)
plt.show()
plt.plot(x,z) | true |
75faaf6504e05ae11d617e0383af6e1c476669fa | Python | milger/DEVFUNDA | /movie_rental_store/tests/membership/membership_test.py | UTF-8 | 1,432 | 3.234375 | 3 | [] | no_license | import sys
sys.path.append('../../src/modules/membership')
import unittest
from membership import Membership
class MembershipTest(unittest.TestCase):
def setUp(self):
"""Setup method to instance an object of Membership class """
code = 9
name = "Gold"
discount = 10
self.membership = Membership(code, name, discount)
self.test_membership = Membership(5, "Platinum", 5)
def test_create_membership(self):
"""Test if an instance is created with the required data"""
self.assertIsInstance(self.test_membership, Membership)
def test_set_code_of_membership(self):
"""Test to update the membership code"""
other_code = 10
self.test_membership.set_code(other_code)
self.assertEqual(other_code, self.test_membership.get_code())
def test_set_name_of_membership(self):
""" Test to update the membership name"""
other_name = "Platinum"
self.test_membership.set_name(other_name)
self.assertEqual(other_name, self.test_membership.get_name())
def test_set_discont_of_membership(self):
""" Test to update the membership discount"""
other_discount = 11
self.test_membership.set_discount(other_discount)
self.assertEqual(other_discount, self.test_membership.get_discount())
if __name__ == "__main__":
unittest.main()
| true |
07e70addb02320eda7c144b2a098546dc7487eee | Python | BaeMinCheon/street-fighter-agent | /src/agent/Agent.py | UTF-8 | 3,447 | 2.578125 | 3 | [] | no_license |
import agent.Model as Model
import numpy as np
import tensorflow as tf
import random
import collections
def GetStacks(_main, _target, _batch):
stack_x = np.empty(0).reshape(0, _main.size_input)
stack_y = np.empty(0).reshape(0, _main.size_output)
for curr_state, decision, reward, next_state in _batch:
curr_main_Q = _main.Decide(curr_state)
next_main_Q = _main.Decide(next_state)
next_target_Q = _target.Decide(next_state)
curr_main_Q[0][decision] = float(reward) + _main.discount * next_target_Q[0][np.argmax(next_main_Q)]
stack_x = np.vstack([stack_x, curr_state])
stack_y = np.vstack([stack_y, curr_main_Q])
return stack_x, stack_y
class Agent:
def __init__(self, _agentConfig):
self.input_list = _agentConfig['input_list']
self.output_list = _agentConfig['output_list']
self.max_replay_number = _agentConfig['dqn']['max_replay_number']
self.train_period = _agentConfig['dqn']['train_period']
self.train_number = _agentConfig['dqn']['train_number']
self.batch_size = _agentConfig['dqn']['batch_size']
self.hidden_layer_size = _agentConfig['model']['hidden_layer_size']
self.learning_rate = _agentConfig['model']['learning_rate']
self.discount_rate = _agentConfig['model']['discount_rate']
self.InitModel()
def InitModel(self):
tf.reset_default_graph()
self.session = tf.Session()
self.model_main = Model.Model('main', self)
self.model_target = Model.Model('target', self)
self.session.run(tf.global_variables_initializer())
self.sync_op = Model.GetSyncOps('main', 'target')
self.session.run(self.sync_op)
self.state = [0] * len(self.input_list)
self.decision = 0
self.reward = 0
self.number_decide = 0
self.replay_queue = collections.deque()
self.number_epoch = 0
def LoadModel(self, _filepath):
saver = tf.train.Saver()
saver.restore(self.session, _filepath)
def SaveModel(self, _filepath):
saver = tf.train.Saver()
saver.save(self.session, _filepath)
def Input(self, _feature):
self.reward = _feature['Reward']
next_state = [None] * len(self.input_list)
for i in range(len(self.input_list)):
self.state[i] = _feature[self.input_list[i]]
self.replay_queue.append((self.state, self.decision, self.reward, next_state))
if len(self.replay_queue) > self.max_replay_number:
self.replay_queue.popleft()
self.state = next_state
if (self.number_decide % self.train_period) == (self.train_period - 1):
for i in range(self.train_number):
batch = random.sample(self.replay_queue, self.batch_size)
stack_x, stack_y = GetStacks(self.model_main, self.model_target, batch)
self.model_main.Train(stack_x, stack_y)
self.number_epoch += self.train_number
self.session.run(self.sync_op)
def Output(self):
self.number_decide += 1
random_boundary = 1.0 / float(1 + self.number_decide / self.train_period)
if np.random.rand(1) < random_boundary:
self.decision = random.randrange(0, len(self.output_list))
else:
self.decision = np.argmax(self.model_main.Decide(self.state))
return self.output_list[self.decision] | true |
d96e4868bb86696bffdcb58a0311bce1d2e3926d | Python | daniel-reich/ubiquitous-fiesta | /9Q5nsEy2E2apYHwX8_2.py | UTF-8 | 625 | 3.21875 | 3 | [] | no_license |
class programer:
def __init__ (self, sallery, work_hours):
self.sallary = sallery
self.work_hours = work_hours
def __del__ (self):
#code
return "oof, " + str(self.sallary) + ", " + str(self.work_hours)
def compare (self, other_programmer):
#code
if self.sallary == other_programmer.sallary:
if self.work_hours < other_programmer.work_hours:
return self
else:
return other_programmer
else:
if self.sallary < other_programmer.sallary:
return self
else:
return other_programmer
| true |
2335441e5e84cd6da4adab5eb0fa4d9ffdaec0d6 | Python | ifcheung2012/sampletor | /test/deco.py | UTF-8 | 802 | 3.109375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
__author__ = 'ifcheung'
def check(name):
def checkwrapper(func):
def checking(self):
print "check is %s." % name
print "start checking"
result = func(self)
print "end checking"
return result
return checking
return checkwrapper
def log(name):
def logwrapper(func):
def logging(self):
print "log is %s ." % name
print "start loging."
result = func(self)
print " end logging ."
return result
return logging
return logwrapper
class Myclass():
@check("checkname")
@log("logname")
def process(self):
print 'proccessing in class'
m = Myclass()
m.process()
if __name__ == '__main__':
pass | true |
8d5e0fdf710f562a44acc4c6e3f1f6a69173b72b | Python | estuelke/cure-center-tools-backend | /backend/members/routes.py | UTF-8 | 1,533 | 2.53125 | 3 | [] | no_license | from flask import Blueprint, jsonify, request, make_response
from .. import db
from .models import Member
from .schemas import MemberSchema
member_api = Blueprint('members', __name__)
member_schema = MemberSchema()
members_schema = MemberSchema(many=True)
@member_api.route('/members', methods=['GET', 'POST'])
def member_view():
if request.method == 'GET':
members = Member.query.all()
elif request.method == 'POST':
institution = request.get_json()
members = Member.query.filter_by(institution=institution)
result = members_schema.dump(members)
return jsonify({'members': result})
@member_api.route('/add_member', methods=['POST'])
def add_member_view():
member_data = request.get_json()
new_member = Member(
first_name=member_data['firstName'],
last_name=member_data['lastName'],
institution=member_data['institution'],
email_address=member_data['emailAddress']
)
db.session.add(new_member)
db.session.commit()
response = make_response()
return response
@member_api.route('/member/<int:member_id>')
def get_member(member_id):
member = Member.query.get_or_404(member_id)
result = member_schema.dump(member)
return jsonify({'member': result})
@member_api.route('/find_members', methods=['POST'])
def find_members():
search_string = request.get_json()
print(search_string)
find = Member.query.get(2)
result = member_schema.dump(find)
response = make_response()
return jsonify(result)
| true |
ade1f1af1b4f081a41eb93385960488edf9c5cbe | Python | syyxtl/Reinforcement_learning | /MountainCar/PLAY_QLearning_PLOY.py | UTF-8 | 2,113 | 2.96875 | 3 | [] | no_license | import time
import pickle
import gym
import numpy as np
# weights = np.zeros( (num_of_action, pow(num_of_param + 1, 2)) )
# 加载模型
with open('MountainCar_QLeaning-POLY.pickle', 'rb') as f:
Q = pickle.load(f)
print('model loaded')
env = gym.make('MountainCar-v0')
state_1_low, state_2_low = env.observation_space.low
state_1_high, state_2_high = env.observation_space.high
class POLY_ValueFunction:
def __init__(self, num_of_param, num_of_action):
# W, 参数 parameter
#
self.weights = np.copy( Q ) # each function also has one more constant parameter (called bias in machine learning)
# 每一个参数对应的 state 数量
self.bases = [[], [], []]
for action in [0, 1, 2]:
for i in range(0, num_of_param+1):
for j in range(0, num_of_param+1):
self.bases[action].append(lambda s_a, s_b, i=i, j=j: pow(s_a, i) * pow(s_b, j))
def value(self, state_1, state_2, action):
# map the state space into [0, 1]
state_1 /= float(state_1_high)
state_2 /= float(state_2_high)
feature = np.asarray([func(state_1, state_2) for func in self.bases[action]])
return np.dot(self.weights[action], feature)
# update parameters
def update(self, delta, state_1, state_2, action):
# map the state space into [0, 1]
state_1 /= float(state_1_high)
state_2 /= float(state_2_high)
# get derivative value
derivative_value = np.asarray([func(state_1, state_2) for func in self.bases[action]] )
self.weights[action] += delta * derivative_value
value_function = POLY_ValueFunction(6, 3)
s = env.reset()
score = 0
while True:
env.render()
time.sleep(0.01)
a = 0
v_next = -100000
for next_act in [0, 1, 2]:
if v_next < value_function.value(s[0], s[1], next_act) :
a = next_act
s, reward, done, _ = env.step(a)
score += reward
if done:
print('score:', score)
break
env.close() | true |