blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
c7db154a8c45b5a0d1b65a0ef5357878b1cf3aa6 | Python | Arthur-ZY/Machine-Learning-Z | /ML-3-1.py | UTF-8 | 1,914 | 2.765625 | 3 | [] | no_license | import pandas as pd
import numpy as np
from sklearn import preprocessing
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
import seaborn as sns
data = pd.read_csv("data\Banking.csv",header = 0)
data = data.dropna()
data['education'] = np.where(data['education']=='basic.9y','Basic',data['education'])
data['education'] = np.where(data['education']=='basic.4y','Basic',data['education'])
data['education'] = np.where(data['education']=='basic.6y','Basic',data['education'])
#print(data['education'].unique())
#print(data['y'].value_counts())
count_no_op = len(data[data['y']==0])
count_op = len(data[data['y']==1])
#print(data.groupby('y').mean()) #分组
'''
count_op = len(data['y']==1)
pct_of_no_sub = count_no_op/(count_no_op+count_op)
print(pct_of_no_sub)
'''
#smote 过采样 knn 解决数据不平衡问题
cat_vars=['job','marital','education','default','housing','loan','contact','month','day_of_week','poutcome']
for var in cat_vars:
cat_list = pd.get_dummies(data[var],prefix=var)
data = data.join(cat_list)
data_final = data.drop(cat_vars,axis=1)
X = data_final.loc[:,data_final.columns!='y']
y = data_final.loc[:,data_final.columns=='y'].values.ravel()
from imblearn.over_sampling import SMOTE
os = SMOTE(random_state=42)
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3,random_state=42)
#print(X_test) DataFrame
columns = X_train.columns
os_data_X,os_data_y = os.fit_sample(X_train,y_train)
#print(X) DataFrame
#print(os_data_X) Numpy
os_data_X = pd.DataFrame(data=os_data_X,columns=columns)
#os_data_y = pd.DataFrame(data=os_data_y,columns=['y'])
print(os_data_y)
log = LogisticRegression()
log.fit(os_data_X,os_data_y)
y_pred = log.predict(X_test)
print(log.score(X_test,y_test))
from sklearn.metrics import classification_report
print(classification_report(y_test,y_pred)) | true |
dc4db61c86af292739679ffdf627b8db0728a470 | Python | markovg/slithy | /examples/techfest/p2.py | UTF-8 | 6,676 | 2.578125 | 3 | [] | no_license | from slithy.library import *
from slithy.util import *
from fonts import fonts
import math
def sheared_rectangle( b, h, shear, shift ):
if shear == 0.0 or shift == 0.0:
rectangle( 0, 0, b, h )
else:
push()
if shear < 0.0:
scale( -1, 1, b/2.0, 0 )
shear = -shear
paths = []
sections = int(math.ceil((b + shear) / float(b)))
# first section
p = Path()
if sections > 2:
p.moveto( 0,h ).lineto( b,h ).lineto( b,h*(shear-b)/shear ).closepath()
else:
p.moveto( 0,h ).lineto( b,h ).lineto( b,0 ).lineto( shear,0 )
paths.append( p )
# middle sections
for i in range(1,sections-1):
x1 = i * b
x2 = (i+1) * b
p = Path()
p.moveto( x1, h * (shear+b - x1) / shear ).lineto( x2, h * (shear+b - x2) / shear )
if i == sections-2:
p.lineto( x2, 0 ).lineto( shear, 0 )
else:
p.lineto( x2, h * (shear - x2) / shear )
p.lineto( x1, h * (shear - x1) / shear )
p.closepath()
paths.append( p )
# last section
p = Path()
x = (sections-1) * b
p.moveto( x,0 ).lineto( b+shear,0 ).lineto( x, (h*(shear+b-x)) / shear ).closepath()
paths.append( p )
shifts = (0,) + split_sequence_smooth( sections-1, 1-shift, 0.3 )
i = 0
for p,s in zip(paths,shifts):
push()
translate( -s * (i*b), 0 )
fill( p )
pop()
i += 1
pop()
def pythagoras( a = (SCALAR,1,5,2),
slidea = (SCALAR, 0, 3),
slideb = (SCALAR, 0, 3),
linealpha = (SCALAR,0,1),
lineextend = (SCALAR,0,1),
subdivide = (SCALAR,0,1),
textlabel = (SCALAR,0,1),
info = (OBJECT),
):
b = 6-a
id( -1 )
clear( white )
set_camera( Rect( 0, 0, a+a+b, a+b+b ).outset( 0.1 ) )
c = math.sqrt( a*a + b*b )
af = (a*a) / (c*c)
bf = 1.0 - af
theta = math.atan2( a, b ) * 180.0 / math.pi
color( 0, 0, 0.5 )
rectangle( 0, b, a, a+b )
color( 0.8, 0, 0 )
rectangle( a, 0, a+b, b )
color( 0, 0.7, 0 )
push()
translate( a, a+b )
rotate( -theta )
rectangle( 0, 0, c, c )
pop()
push()
color( 1.0, 0.8, 0.0, subdivide )
if slidea < 1.0:
translate( a, a+b )
rotate( 90-theta )
translate( 0, -af * c )
sheared_rectangle( c, af * c, -b*a/c, slidea )
elif slidea < 2.0:
translate( a, a+b )
rotate( -90 * (slidea-1.0) )
polygon( 0, 0, 0, -a, a, b-a, a, b )
else:
translate( 0, b )
sheared_rectangle( a, a, b, 3.0-slidea )
pop()
push()
color( 0.5, 0.0, 0.7, subdivide )
if slideb < 1.0:
translate( a, a+b )
rotate( -90-theta )
translate( -c, af*c )
sheared_rectangle( c, bf * c, b*a/c, slideb )
elif slideb < 2.0:
translate( a+b, b )
rotate( 90 * (slideb-1.0) )
polygon( 0, 0, -b, 0, a-b, b, a, b )
else:
translate( a, b )
rotate( -90 )
sheared_rectangle( b, b, -a, 3.0-slideb )
pop()
if linealpha > 0.0:
thickness( 0.06 )
color( 0, 0, 0, linealpha )
line( a,b, a+a*lineextend+af*b, a+b*lineextend+b-af*a )
line( a+(af+0.05)*b,a+b-(af+0.05)*a,
a*0.95+(af+0.05)*b,a+b*0.95-(af+0.05)*a,
a*0.95+af*b,a+b*0.95-af*a )
if textlabel > 0.0:
color( 1, textlabel )
text( a/2.0,b+a/2.0, 'a', fonts['text'], size = 1.0, anchor = 'e' )
text( a/2.0+0.05,b+a/2.0+0.05, '2', fonts['text'], size = 0.6, anchor = 'sw' )
text( a+b/2.0,b/2.0, 'b', fonts['text'], size = 1.0, anchor = 'e' )
text( a+b/2.0+0.05,b/2.0+0.05, '2', fonts['text'], size = 0.6, anchor = 'sw' )
text( a+(a+b)/2.0, b+(a+b)/2.0, 'c', fonts['text'], size = 1.0, anchor = 'e' )
text( a+(a+b)/2.0+0.05, b+(a+b)/2.0+0.05, '2', fonts['text'], size = 0.6, anchor = 'sw' )
id( 1 )
color( invisible )
dot( 0.5, a, b )
if info:
info.last_drawn = mark()
class PythagorasDemo(Controller):
limit = 1.5
def create_objects( self ):
self.d = Drawable( None, pythagoras, textlabel=1, subdivide=0, linealpha=1, lineextend=1,
info = self )
return self.d
def start( self ):
self.toggle = 1
self.last_drawn = None
d = self.d
smooth( 1.0, d.subdivide, 0.8 )
smooth( 1.0, d.slidea, 1 )
smooth( 1.0, d.slidea, 2 )
smooth( 1.0, d.slidea, 3 )
smooth( 1.0, d.slideb, 1 )
smooth( 1.0, d.slideb, 2 )
smooth( 1.0, d.slideb, 3 )
def key( self, k, x, y, m ):
if k == 'a':
if self.toggle:
smooth( 0.5, self.d.subdivide, 0.0 )
set( self.d.slidea, 0.0 )
set( self.d.slideb, 0.0 )
self.toggle = 0
else:
smooth( 1.0, self.d.subdivide, 0.8 )
smooth( 1.0, self.d.slidea, 1 )
smooth( 1.0, self.d.slidea, 2 )
smooth( 1.0, self.d.slidea, 3 )
smooth( 1.0, self.d.slideb, 1 )
smooth( 1.0, self.d.slideb, 2 )
smooth( 1.0, self.d.slideb, 3 )
self.toggle = 1
def mousedown( self, x, y, m ):
if not self.last_drawn: return
what, = query_id( x, y )
if what:
self.drag = 1
x, y = unproject( x, y, self.last_drawn )
a = x
if self.limit > a:
a = self.limit
elif 6-self.limit < a:
a = 6-self.limit
print x, y, a, get(self.d.a)
set( self.d.a, a )
def mousemove( self, x, y, m ):
if self.drag:
x, y = unproject( x, y, self.last_drawn )
a = x
if self.limit > a:
a = self.limit
elif 6-self.limit < a:
a = 6-self.limit
set( self.d.a, a )
def mouseup( self, x, y, m ):
self.drag = 0
test_objects( PythagorasDemo, pythagoras )
| true |
31054d91e0c6ea097c69c72c227c2b925d588d1e | Python | dRoje/design-patterns-python | /command/macroCommand.py | UTF-8 | 411 | 2.65625 | 3 | [] | no_license | from command import Command
from typing import List
class MacroCommand(Command):
def __init__(self, commands):
# type: (List(Command)) -> None
assert isinstance(commands, list)
self.commands = commands
def execute(self):
for command in self.commands:
command.execute()
def undo(self):
for command in self.commands:
command.undo()
| true |
b55b280d8d1699c8b8c953cd97f635746367dbf3 | Python | samirad123/lab_lec5 | /task 3 f.py | UTF-8 | 116 | 3.5 | 4 | [] | no_license | def average_list(n):
sum = 0
for i in n:
sum += i
return sum/len(n)
print(average_list([1,2,3])) | true |
1dc5fa11c8d9ba084da73527c2cc8b5d747318c8 | Python | handsome12138/ComputerSimulationGit | /Homework/hw7/hw7.py | UTF-8 | 1,731 | 3.3125 | 3 | [] | no_license | '''
由于我的animation在jupyter notebook中不能正确跑出,这里用.py文件运行
'''
from Life import Life, LifeViewer
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
import thinkplot
rc('animation', html='html5')
def make_viewer(n, m, row, col, *strings):
"""Makes a Life and LifeViewer object.
n, m: rows and columns of the Life array
row, col: upper left coordinate of the cells to be added
strings: list of strings of '0' and '1'
"""
life = Life(n, m) # n行m列的格子
life.add_cells(row, col, *strings) # 左上角坐标
viewer = LifeViewer(life) # 瞅一眼
return viewer
def main1():
puffer_train1 = [
'0000000001111',
'0000000010001',
'0000000000001',
'0000000010010',
'0000000000000',
'0000000000000',
'0000000011000',
'0110000100100',
'1010000100100',
'0010001101100',
'0000000110000',
'0000000000000',
'0000000000000',
'0000000000000',
'0000000001111',
'0000000010001',
'0000000000001',
'0000000010010'
]
viewer = make_viewer(40, 100, 11, 1, *puffer_train1)
anim = viewer.animate(frames=400, interval=50, grid=True)
plt.show()
def main2():
puffer_train2 = [
'011100000000000111',
'100100000000001001',
'000100001110000001',
'000100001001000001',
'001000010000000010'
]
viewer = make_viewer(100, 30, 90, 5, *puffer_train2)
anim = viewer.animate(frames=400, interval=50, grid=True)
plt.show()
if __name__ == '__main__':
# 这里生成动画的速度可能比较慢
main1()
main2()
| true |
26439afc3ebd376f66c9a306e78023dc0a90a84c | Python | fege/Exercises | /exercises/sudoku.py | UTF-8 | 1,901 | 2.859375 | 3 | [] | no_license | import random
import itertools
def check(a):
sud = list(a)
sudo=[]
for pos,riga in enumerate(sud):
for griglia in range(3):
if not pos % 3: sudo.append([])
sudo[griglia + pos//3*3].extend(riga[griglia*3:griglia*3+3])
for pos in range(len(sudo)):
if [x for x in [set(sudo[pos])] if x != set(range(1,10))]:
return False
return True
soluzioni = set()
def sudoku():
global soluzioni
while True:
rg = range(9)
su = [[None for i in rg] for j in rg]
for x in rg:
if x in range(0,9,3): blockx = range(x, x+3)
elif x in range(1,9,3): blockx = range(x-1, x+2)
elif x in range(2,9,3): blockx = range(x-2, x+1)
for y in rg:
if y in range(0,9,3): blocky = range(y, y+3)
elif y in range(1,9,3): blocky = range(y-1, y+2)
elif y in range(2,9,3): blocky = range(y-2, y+1)
block = []
for i in blockx:
for j in blocky:
block.append(su[i][j])
ls = list(set(range(1,10)).difference(set(su[x])).\
difference(set(su[j][y] for j in rg)).difference(set(block)))
if len(ls) > 0:
su[x][y] = random.choice(ls)
done = True
for t in su:
for v in t:
if v == None:
done = False
if done == True:
a=tuple((tuple(i) for i in su))
if a not in soluzioni:
soluzioni.add(a)
yield a
for k in itertools.permutations(a):
if check(k):
if k not in soluzioni:
soluzioni.add(k)
yield k
if __name__ == "__main__":
for i in sudoku():
print(i,'\n')
| true |
e691205ce00710bdc57bbbfa2c4bafd47a1273db | Python | SShayashi/ABC | /abc51-100/abc085/b.py | UTF-8 | 155 | 3 | 3 | [] | no_license | N = int(input())
b = []
a = [int(input()) for i in range(N)]
cnt = 0
for i in a:
if i in b:
pass
else:
b.append(i)
print(len(b))
| true |
a51ab372587885a575f725b773d5bf932eb284af | Python | marczakkordian/python_code_me_training | /02_flow/02_for/01.py | UTF-8 | 336 | 3.765625 | 4 | [] | no_license | # Stwórz listę przedmiotów, które zabierzesz na samotną wyprawę w góry.
# Wyświetl nazwę właśnie spakowanego przedmiotu, po ostatnim przedmiocie pokaż informację: “Great, we are ready!”
item_list = ['bag', 'shoes', 'sweater', 'water', 'compas', 'phone']
for i in item_list:
print(i)
print('Great, we are ready')
| true |
cce6cb67cd269d1b06a373b4d0cee9d2f45fd06b | Python | dansackett/learning-playground | /python/python-cookbook/chapter_2/code/sanitizing_example.py | UTF-8 | 676 | 3.390625 | 3 | [] | no_license | import unicodedata
import sys
"""
Translating data
"""
s = 'pýtĥöñ\fis\tawesome\r\n'
print(s)
remap = {
ord('\t'): ' ',
ord('\f'): ' ',
ord('\r'): None
}
# Convert tabs, carriage returns, etc
a = s.translate(remap)
print(a)
# Create map for all combining unicode characters to None
cmb_chrs = dict.fromkeys(c for c in range(sys.maxunicode)
if unicodedata.combining(chr(c)))
# Normalize mappings into combinations
b = unicodedata.normalize('NFD', a)
print(b)
# Apply mapping
c = b.translate(cmb_chrs)
print(c)
"""
Using Encoding
"""
print(a)
b = unicodedata.normalize('NFD', a)
print(b.encode('ascii', 'ignore').decode('ascii'))
| true |
bb3b4512bb8ae7e0d5761e833629a2fbc8f711cb | Python | ssernapalleja/OrderNodes | /Test/llenarAleatorio.py | UTF-8 | 2,465 | 2.78125 | 3 | [] | no_license | '''
Created on 5/11/2019
@author: Guest
'''
from Node_WorkPlace.__init__ import Node_WorkPlace
from Test.printNodes import printPDFNodes
import random
from CreateMap import loadMaps
#Create Diagrams of process
proMaps = loadMaps('nodos0')
#maximo = max([obj.endDate for obj in proMaps])
#for a in proMaps:
# a.endDate= a.endDate/maximo*1340
#saveMaps(proMaps, 'nodos')
#Create Workplace
workPMap = loadMaps('workp0')
#Select Posible Initial nodes
total = 0
posibleInitial = []
for pro in proMaps:
for key,node in pro.nodes.items():
total +=1
if len(node.prev) == 0:
posibleInitial.append(node)
print(len(posibleInitial))
print(len(proMaps))
print(len(workPMap))
#crear un dictionary con los workplaces
dictWP = {};
for i in workPMap:
for a in i.work:
dictWP[a]=[]
for i in workPMap:
for a in i.work:
dictWP[a].append(i)
contador = 0
while(len(posibleInitial)>0):
print(str(len(posibleInitial)) + " "+str(contador)+" de "+str(total))
node = posibleInitial[random.randrange(len(posibleInitial))]
posibleWP = dictWP[node.work]
#date = int(input("ingresa hora"))
#wp = int(input("ingresa en cual puesto de trabajo de "+str(len(posibleWP))))
wp = random.randrange(len(posibleWP))
listaFechas = [x.endDate for x in posibleWP[wp].nod_wp]
date = 0
if len(listaFechas)>0:
date = max( listaFechas )+0.1 #encontrar el mas grande de todos
for k, prevN in node.prev.items():
date = max([prevN.nod_wp.endDate+0.1, date])
new = Node_WorkPlace(posibleWP[wp],node,date)
try:
if new.available:
#update new initial nodes
for k,next_ in node.next.items():
addNew = True;
if not( next_ in posibleInitial): # don't have already added to the initial
for k2,prev in next_.prev.items():
if(prev.isPlaced == False):
addNew = False
break
if addNew:
posibleInitial.append(next_)
#remove new one
posibleInitial.remove(node)
contador +=1
else :
print("valores no permitidos")
wp = int(input("xxxxxxx "+str(len(posibleWP))))
except:
print("error 420")
wp = int(input("xxxxxxx "+str(len(posibleWP))))
printPDFNodes(workPMap) | true |
9571d33c7f0e29b2408637ddde4dbad248e76b02 | Python | HyeonJun97/Python_study | /Chapter03/Chapter3_pb7.py | UTF-8 | 83 | 3.15625 | 3 | [] | no_license | #Q3.7
import time
a=int(time.time()%65)
a=65+a%26 # A=65,Z=90
print(chr(a)) | true |
5ee20e093139bc32f1428a56f0ab4da2f0e83b8d | Python | felipeochoa/thtml | /extract_svg_attrs.py | UTF-8 | 3,996 | 2.59375 | 3 | [
"MIT"
] | permissive | import attr
import os
import os.path
import re
from bs4 import BeautifulSoup
@attr.s
class Interface:
name = attr.ib()
globals = attr.ib()
specific = attr.ib()
@staticmethod
def global_name(name: str) -> str:
name = re.sub(r'\s([a-z])', lambda m: m.group(1).title(), name).replace(' ', '')
name = re.sub(r'-([a-z])', lambda m: m.group(1).title(), name).replace('-', '')
name = re.sub('xlink', 'Xlink', name, flags=re.IGNORECASE)
name = re.sub('aria', 'Aria', name, flags=re.IGNORECASE)
if name.startswith('Globals.'):
return "'%s'" % camelize(name[8:])
return 'Svg' + name
@property
def proper_name(self) -> str:
return 'Svg' + camelize(self.name).title()
def to_str(self):
pieces = []
if self.globals:
pieces.extend(sorted(map(self.global_name, self.globals)))
if self.specific:
pieces.extend("'%s'" % attr for attr in sorted(map(camelize, self.specific)))
return ' | '.join(pieces)
def load_interface(name, text) -> Interface:
start = text.find('\n---\n', 4) + 4
text = text[start:]
soup = BeautifulSoup(text, 'html5lib')
return Interface(name, load_globals(soup), load_specific(soup))
def load_globals(soup):
h3 = soup.find(id='Global_attributes')
attr_list = h3.find_next_sibling()
if attr_list.name == 'dl':
return [extract_global_name(dt.get_text()) for dt in attr_list.find_all('dt')]
elif attr_list.name == 'ul':
return [extract_global_name(li.get_text()) for li in attr_list.find_all('li')]
return []
def load_specific(soup):
header = soup.find(id='Specific_attributes')
if header is None:
header = soup.find(id='Attributes')
if header is None:
return []
attrs_ul = header.find_next_sibling()
if attrs_ul.name == 'ul':
attrs_lis = attrs_ul.find_all('li')
return [extract_attr_name(li.get_text()) for li in attrs_lis]
elif attrs_ul.name == 'dl':
attrs_dts = attrs_ul.find_all('dt')
return [extract_attr_name(li.get_text()) for li in attrs_dts]
return []
def extract_global_name(text: str) -> str:
text = clean(text)
match = re.match(r'\{\{\s*SVGAttr\([\'"]([a-z0-9:A-Z-]+)[\'"]\)\s*\}\}', text, flags=re.IGNORECASE)
if match:
text = 'Globals.' + re.sub('^class$', 'className', match.group(1), flags=re.IGNORECASE)
text = re.sub(' *attributes$', '', text, flags=re.IGNORECASE)
if text.lower() == 'style':
return 'Styling'
return text
def extract_attr_name(text: str) -> str:
text = clean(text)
match = re.match(r'\{\{\s*SVGAttr\([\'"]([a-z0-9:A-Z-]+)[\'"]\)\s*\}\}', text, flags=re.IGNORECASE)
if match:
return match.group(1)
match = re.match(r'\{\{\s*htmlattrxref\([\'"]([a-z0-9:A-Z-]+)[\'"], ?[\'"]([a-z0-9:A-Z-]+)[\'"]\)\s*\}\}',
text, flags=re.IGNORECASE)
if match:
return 'html-' + match.group(2) + '-' + match.group(1)
raise ValueError('Could not extract attribute from ' + text)
def clean(name: str) -> str:
return re.sub(r'\s', ' ', name).replace('»', '').strip()
def camelize(name: str) -> str:
name = re.sub(r'-([a-z])', lambda m: m.group(1).title(), name).replace('-', '')
name = re.sub(r':([a-z])', lambda m: m.group(1).title(), name)
return name
def maybe_quote(name: str) -> str:
if re.search('[^a-z]', name):
return "'%s'" % name
return name
def main():
for name in os.listdir('.'):
fullname = os.path.join(name, 'index.html')
if not os.path.isfile(fullname):
continue
text = open(fullname).read()
try:
interface = load_interface(name, text)
except Exception as e:
e.args = (e.args[0] + (' (%s)' % name),)
raise
print(' %s: %s,' % (maybe_quote(interface.name), interface.to_str()))
if __name__ == '__main__':
main()
| true |
9bc86857997f7c8a3efc45c47580be6937fb2c08 | Python | anthonydb/pneumatic | /pneumatic/db.py | UTF-8 | 5,584 | 2.859375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
import os
import sys
import csv
import sqlite3
from colorama import init
from .utils import Utils
class Database(object):
"""
A SQLite database to store results of file upload attempts to
DocumentCloud, along with database-related utilities.
"""
def __init__(self):
self.utils = Utils()
# Initialize colorama
init()
def make_db(self):
# Create a SQLite db whose name includes a timestamp.
timestamp = self.utils.timestamp()
self.db_name = 'dc-upload-' + timestamp + '.db'
# make the database file directory
if not os.path.isdir('pneumatic_db'):
os.mkdir('pneumatic_db')
else:
pass
self.db_full_path = os.path.join('pneumatic_db', self.db_name)
# Connect to the db and create the uploads table.
if not os.path.exists(self.db_full_path):
conn = sqlite3.connect(self.db_full_path)
cur = conn.cursor()
cur.execute('''
CREATE TABLE uploads
(
id Text, title Text, file_name Text, full_path Text,
upload_time Text, pages Integer, file_hash Text,
result Text, canonical_url Text, pdf_url Text,
text_url Text, exclude_flag Text, exclude_reason Text,
error_msg Text
)
''')
conn.commit()
conn.close()
print('\033[36m* New uploads database created at: ' +
self.db_full_path)
else:
pass
def insert_row(self, id, title, file_name, full_path, up_time, pages,
file_hash, result, canonical_url, pdf_url, text_url,
exclude_flag, exclude_reason, error_msg):
"""
Inserts a row in the table.
"""
conn = sqlite3.connect(self.db_full_path)
cur = conn.cursor()
cur.execute('''
INSERT INTO uploads VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?);
''', (id, title, file_name, full_path, up_time, pages,
file_hash, result, canonical_url, pdf_url, text_url,
exclude_flag, exclude_reason, error_msg))
conn.commit()
conn.close()
def update_row(self, db, id, title, file_name, full_path, up_time, pages,
file_hash, result, canonical_url, pdf_url, text_url,
exclude_flag, exclude_reason, error_msg):
"""
Updates a row in the table.
"""
conn = sqlite3.connect(db)
cur = conn.cursor()
cur.execute('''
UPDATE uploads
SET title = ?, pages = ?, file_hash = ?
WHERE id = ?;
''', (title, pages, file_hash, id))
conn.commit()
conn.close()
def dump_to_csv(self, db_name=None):
"""
Outputs the contents of a SQLite database to CSV.
"""
print('\n\033[36mDump to CSV')
timestamp = self.utils.timestamp()
# We can pass in a db name or use the one in the current session.
# Check if it exists, first.
if db_name:
if os.path.isfile(db_name):
db = db_name
else:
print('\033[31mERROR: \033[0mThe database file ' +
'specified does not exist.')
sys.exit()
else:
db = self.db_full_path
# Create an output folder and CSV file name.
if not os.path.isdir('pneumatic_csv'):
os.mkdir('pneumatic_csv')
else:
pass
self.csv_name = 'dc-output-' + timestamp + '.csv'
self.csv_full_path = os.path.join('pneumatic_csv', self.csv_name)
# Query the database and write the rows to the CSV.
row_counter = 0
with open(self.csv_full_path, 'w', newline='') as csvfile:
header_row = ('id', 'title', 'file_name', 'full_path',
'upload_time', 'pages', 'file_hash', 'result',
'canonical_url', 'pdf_url', 'text_url',
'exclude_flag', 'exclude_reason', 'error_msg')
writer = csv.writer(csvfile, delimiter=',', quotechar='"')
writer.writerow(header_row)
# Query the database and write rows to CSV.
conn = sqlite3.connect(db)
cur = conn.cursor()
for row in cur.execute('SELECT * FROM uploads;'):
writer.writerow(row)
row_counter += 1
conn.close()
print('\033[36m* CSV file created. ' + str(row_counter) +
' database records were exported to ' + self.csv_full_path)
def cleanup_empty_db(self, db_name):
"""
Checks for an empty db and removes if so.
"""
conn = sqlite3.connect(db_name)
cur = conn.cursor()
cur.execute('SELECT COUNT(*) from uploads;')
record_count = cur.fetchone()[0]
conn.close()
if record_count == 0:
print('\n\033[36mCleanup\n* The new uploads database contains ' +
'no records. Deleting it to reduce clutter.')
os.remove(db_name)
else:
pass
def print_db_name(self):
"""
Prints name of the database.
"""
print('\n\033[36mDatabase Name\n* Responses from the DocumentCloud ' +
'API are stored in a SQLite database in your current ' +
'directory at: ' + self.db_full_path)
| true |
280eee265af4ad17dbb71c354ffa9fbd50c16f31 | Python | erasmuss/raman-spectra-decomp-analysis | /ramandecompy/tests/test_dataprep.py | UTF-8 | 8,450 | 2.90625 | 3 | [
"MIT"
] | permissive | """docstring"""
import os
import h5py
from ramandecompy import dataprep
def test_new_hdf5():
"""
A function that tests that there are no errors in the `new_hdf5` function from dataprep.
"""
# check to ensure that the test file does not already exist and remove if it does
if os.path.exists('function_test.hdf5'):
os.remove('function_test.hdf5')
else:
pass
dataprep.new_hdf5('function_test')
# test inputs
try:
dataprep.new_hdf5(4.2)
except TypeError:
print('A float was passed to the function, and it was handled well with a TypeError.')
os.remove('function_test.hdf5')
def test_add_calibration():
"""
A function that tests the `add_calibration` function from dataprep. It first tests that no
errors occur when the function is run before testing the output to ensure that the calibration
compound was sucessfully added and labeled appropriately. It checks that the proper number of
peaks were saved as well as the wavenumber, counts, and residuals. It tests both the custom and
automatic labeling functionality before finally ensuring that input errors are handled well.
"""
# check to ensure that the test file does not already exist and remove if it does
if os.path.exists('test.hdf5'):
os.remove('test.hdf5')
else:
pass
dataprep.new_hdf5('test')
dataprep.add_calibration('test.hdf5',
'ramandecompy/tests/test_files/Methane_Baseline_Calibration.xlsx',
label='Methane')
cal_file = h5py.File('test.hdf5', 'r')
assert list(cal_file.keys())[0] == 'Methane', 'custom label not applied correctly'
assert len(cal_file) == 1, 'more than one first order group assigned to test.hdf5'
assert len(cal_file['Methane']) == 4, 'more then 1 peak was stored'
assert 'Methane/wavenumber' in cal_file, 'x data (wavenumber) not stored correctly'
assert 'Methane/counts' in cal_file, 'y data (counts) not stored correctly'
assert 'Methane/residuals' in cal_file, 'residuals not stored correctly'
# test that function assigns filename correctly as compound label
dataprep.new_hdf5('test1')
dataprep.add_calibration('test1.hdf5',
'ramandecompy/tests/test_files/Methane_Baseline_Calibration.xlsx')
cal_file1 = h5py.File('test1.hdf5', 'r')
assert list(cal_file1.keys())[0] == 'Methane_Baseline_Calibration', """
filename label not applied correctly"""
# test inputs
try:
dataprep.add_calibration(4.2, """ramandecompy/tests/test_files/
CarbonMonoxide_Baseline_Calibration.xlsx""")
except TypeError:
print('A float was passed to the function, and it was handled well with a TypeError.')
try:
dataprep.add_calibration('test.hdp5', 4.2)
except TypeError:
print('A float was passed to the function, and it was handled well with a TypeError.')
try:
dataprep.add_calibration('test.txt', """ramandecompy/tests/
test_files/CarbonMonoxide_Baseline_Calibration""")
except TypeError:
print('A .txt file was passed to the function, and it was handled will with a TypeError.')
os.remove('test.hdf5')
os.remove('test1.hdf5')
def test_add_experiment():
"""
A function that tests the `add_experiment` function from dataprep. It first tests that no
errors occur when the function is run before testing the output to ensure that the
experimental data was sucessfully added and labeled appropriately. It checks that the
proper number of peaks were saved as well as the wavenumber, counts, and residuals.
Lastly it ensures that input errors are handled well.
"""
# check to ensure that the test file does not already exist and remove if it does
if os.path.exists('exp_test_1.hdf5'):
os.remove('exp_test_1.hdf5')
else:
pass
dataprep.new_hdf5('exp_test_1')
dataprep.add_experiment('exp_test_1.hdf5',
'ramandecompy/tests/test_files/FA_3.6wt%_300C_25s.csv')
exp_file = h5py.File('exp_test_1.hdf5', 'r')
# test generated file
assert len(exp_file) == 1, 'incorrect number of 1st order groups'
assert list(exp_file.keys())[0] == '300C', '1st order group name incorrect'
assert len(exp_file['300C']) == 1, 'incorrect number of 2nd order groups'
assert list(exp_file['300C'].keys())[0] == '25s', '2nd order group name incorrect'
assert '300C/25s/wavenumber' in exp_file, 'x data (wavenumber) not stored correctly'
assert '300C/25s/counts' in exp_file, 'y data (counts) not stored correctly'
assert '300C/25s/residuals' in exp_file, 'residuals not stored correctly'
assert len(exp_file['300C/25s']) == 19, 'incorrect number of peaks + raw_data stored'
# test inputs
try:
dataprep.add_experiment(4.2, """ramandecompy/tests/test_files/
CarbonMonoxide_Baseline_Calibration.xlsx""")
except TypeError:
print('A float was passed to the function, and it was handled well with a TypeError.')
try:
dataprep.add_experiment('exp_test_1.hdp5', 4.2)
except TypeError:
print('A float was passed to the function, and it was handled well with a TypeError.')
try:
dataprep.add_experiment('exp_test_1.txt', """ramandecompy/tests/
test_files/CarbonMonoxide_Baseline_Calibration""")
except TypeError:
print('A .txt file was passed to the function, and it was handled will with a TypeError.')
os.remove('exp_test_1.hdf5')
def test_adjust_peaks():
"""
A function that tests the `adjust_peaks` function from dataprep. The function first looks to
see that no errors occur when running the function before then checking to ensure that input
errors are handled well.
"""
# check to ensure that the test file does not already exist and remove if it does
if os.path.exists('exp_test_2.hdf5'):
os.remove('exp_test_2.hdf5')
else:
pass
# generate test hdf5 file
dataprep.new_hdf5('exp_test_2')
dataprep.add_experiment('exp_test_2.hdf5', 'ramandecompy/tests/test_files/FA_3.6wt%_300C_25s.csv')
# peaks to add and drop form auto-fitting
add_list = [1270, 1350, 1385]
drop_list = ['Peak_01']
dataprep.adjust_peaks('exp_test_2.hdf5', '300C/25s', add_list, drop_list)
try:
dataprep.adjust_peaks(4.2, '300C/25s', add_list, drop_list, plot_fits=True)
except TypeError:
print('A float was passed to the function, and it was handled well with a TypeError.')
try:
dataprep.adjust_peaks('exp_test_2.txt', '300C/25s', add_list, drop_list, plot_fits=True)
except TypeError:
print('A .txt was passed to the function, and it was handled well with a TypeError.')
try:
dataprep.adjust_peaks('exp_test_2.hdf5', ['300C/25s', '400C/25s'],
add_list, drop_list, plot_fits=True)
except TypeError:
print('A list was passed to the function, and it was handled well with a TypeError.')
try:
dataprep.adjust_peaks('exp_test_2.hdf5', '300C/25s', 'add_list', drop_list, plot_fits=True)
except TypeError:
print('A str was passed to the function, and it was handled well with a TypeError.')
try:
dataprep.adjust_peaks('exp_test_2.hdf5', '300C/25s', add_list, 'drop_list', plot_fits=True)
except TypeError:
print('A str was passed to the function, and it was handled well with a TypeError.')
try:
dataprep.adjust_peaks('exp_test_2.hdf5', '300C/25s', add_list, drop_list, plot_fits=3)
except TypeError:
print('An int was passed to the function, and it was handled well with a TypeError.')
os.remove('exp_test_2.hdf5')
def test_view_hdf5():
"""
A function that tests the `view_hdf5` function from dataprep. The function first looks to
see that no errors occur when running the function before then checking to ensure that input
errors are handled well.
"""
# test inputs
dataprep.view_hdf5('ramandecompy/tests/test_files/dataprep_experiment.hdf5')
try:
dataprep.view_hdf5(4.2)
except TypeError:
print('A float was passed to the function, and it was handled well with a TypeError.')
try:
dataprep.view_hdf5('test.txt')
except TypeError:
print('A .txt was passed to the function, and it was handled well with a TypeError.')
| true |
00d6f8de99243d187cbaef49df0efaf44ad27082 | Python | ajiexw/old-zarkpy | /web/cgi/model/oauth/OAuth2.py | UTF-8 | 3,200 | 2.546875 | 3 | [] | no_license | #coding=utf-8
from .. import Model
import datetime, hashlib
class OAuth2(Model):
table_name = ''
column_names = ['Userid', 'access_token', 'open_id', 'access_expires', 'access_token_md5_int', 'open_id_md5_int', 'share', ]
def insert(self, data):
raise 'donnot use method OAuth2.insert'
def getOpenIdByAccessToken(self, access_token):
return self._getDB().fetchFirst('select open_id from '+self.table_name+' where access_token=%s order by '+self.table_name+'id desc limit 1', [access_token])
def getBy(self, open_id):
md5_int = self.getMD5Int(open_id)
return self.getOneByWhere('open_id_md5_int=%s and open_id=%s', [md5_int, open_id])
def getMD5Int(self, key):
md5 = hashlib.md5()
md5.update(str(key))
return int(md5.hexdigest(), 16) % 4000000000 # 因为mysql中的unsigned int最大值约为40亿
def insertBy(self, open_id, access_token, access_expires, user_id=None):
self.deleteByAccessToken(access_token)
new_data = {'open_id':open_id, 'access_token':access_token, 'access_expires':access_expires}
if user_id:
new_data['Userid'] = user_id
new_data['open_id_md5_int'] = self.getMD5Int(open_id)
new_data['access_token_md5_int'] = self.getMD5Int(access_token)
return Model.insert(self, new_data)
def updateAccessToken(self, open_id, access_token, access_expires):
access_token_md5_int = self.getMD5Int(access_token)
now = datetime.datetime.now()
self._getDB().update('update '+self.table_name+' set access_token=%s, access_token_md5_int=%s, access_expires=%s, updated=%s where open_id=%s', [access_token, access_token_md5_int, access_expires, now, open_id])
def deleteByAccessToken(self, access_token):
self._getDB().delete('delete from '+self.table_name+' where access_token=%s', access_token)
def bindUserid(self, access_token, user_id):
self._getDB().update('update ' + self.table_name + ' set Userid=%s where access_token=%s', [user_id, access_token])
def getsByUserid(self, user_id):
return self._getDB().fetchSome('select * from ' + self.table_name + ' where Userid=%s', [user_id])
def existsByUserid(self, user_id):
item = self._getDB().fetchFirst('select Userid from ' + self.table_name + ' where Userid=%s limit 1', [user_id])
return item is not None
table_template = '''
CREATE TABLE {$table_name} (
{$table_name}id int unsigned not null auto_increment,
Userid int unsigned not null default 0,
access_token varchar(100) not null default '',
open_id varchar(100) not null default '',
access_expires int unsigned not null default 0,
share enum('off','on') not null default 'on',
open_id_md5_int int unsigned not null,
access_token_md5_int int unsigned not null,
updated timestamp not null default current_timestamp,
primary key ({$table_name}id),
unique key (access_token_md5_int, access_token),
unique key (open_id_md5_int, open_id)
)ENGINE=InnoDB;
'''
| true |
70459db4e5ead29cbee516da5a1ecfcfa2961bfd | Python | bberzhou/LearningPython | /4FunctionalProgram/HighFunc.py | UTF-8 | 987 | 4.71875 | 5 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 高阶函数
# 一、变量可以指向函数
# 以python内置的函数abs()为例
print(abs(-10)) # 这里是函数调用
# <built-in function abs> 内置函数
print(abs) # abs是函数本身
# 函数本身也可以赋值给变量,即:变量可以指向函数
fun = abs
print(fun(-4))
# 输出4,函数本身也可以赋值给变量,即:变量可以指向函数。
# 函数名其实就是指向函数的变量,
# 二、函数名也是变量
# abs = 10
# abs(-10)
# 把abs指向10后,就无法通过abs(-10)调用该函数了!因为abs这个变量已经不指向求绝对值函数而是指向一个整数10!
# 三、传入函数,变量可以指向函数,函数的参数能接收变量,
# 那么一个函数就可以接收另一个函数作为参数,这种函数就称之为高阶函数
# 传入函数名fun
def add(x, y, f):
return f(x) + f(y)
print(add(-2, -4, abs))
# 6 传入fun函数名,
| true |
0bdc769662c381ee6c13f2a468aed030c3dcdc54 | Python | signoiidx/IEEE-pdf-renamer | /ieee_pdf_renamer.py | UTF-8 | 1,700 | 2.9375 | 3 | [] | no_license | import os
import re
import requests
import bs4
# make list of the files in the current directory
dir_files = str(os.listdir(os.getcwd()))
# search list for IEEE PDF files and arXiv PDF files
pdf_files = re.findall(r'\d{8}\.pdf|\d{4}\.\d{5}.pdf', dir_files) #fetch numbered pdf
pdf_num = [pdf_files.replace(".pdf", "") for pdf_files in pdf_files] #remove file extension
if len(pdf_files) > 0: #exist pdf file(s)
# gen URL for accessing IEEE Xplore, scrape title data, and rename file
for num, old_path in zip(pdf_num, pdf_files):
if re.match(r'\d{4}\.\d{5}',num): #arXiv pdf
# gen URL for accessing arXiv
access_URL = 'https://arxiv.org/abs/' + num
# scrape title data
get_url_info = requests.get(access_URL)
soup = bs4.BeautifulSoup(get_url_info.text, 'html.parser')
webtitle = soup.title.text
title = re.sub(r'\[\d{4}.\d{5}\] ', "", webtitle)
elif re.match(r'\d{8}',num): #IEEE PDF
# gen URL for accessing IEEE Xplore
num_url = num.lstrip('0')
access_URL = 'https://ieeexplore.ieee.org/document/' + num_url
# scrape title data
get_url_info = requests.get(access_URL)
soup = bs4.BeautifulSoup(get_url_info.text, 'html.parser')
webtitle = soup.title.text
title = re.sub(r' - IEEE.*', "", webtitle) # remove journal title
else: #unsupported PDF
print("{}.pdf is not supported currently".format(pdf_num))
continue
# genarate new filename
title2 = re.sub(r'[\/:,;*?"<>|]', "", title) # del f***ing char
new_path = re.sub(r' ', "_", title2) + ".pdf" # add underbar
# rename
os.rename(old_path, new_path)
# plot result
print("Done. \"" + old_path + "\" -> \"" + new_path + "\"")
else: # Not Found
print("No PDF files detected.")
| true |
e88365eb7e0714c20bb2429a9507b2405302ed05 | Python | benji06140/oci-prog-exos | /niveau-01/chapitre-7-conditions-avancees-operateurs-booleens/bonus--casernes-de-pompiers-validation.py | UTF-8 | 863 | 2.9375 | 3 | [] | no_license | ##################################
# fichier bonus--casernes-de-pompiers-validation.py
# nom de l'exercice : Bonus : Casernes de pompiers
# url : http://www.france-ioi.org/algo/task.php?idChapter=648&idTask=0&sTab=task&iOrder=7
# type : validation
#
# Nom du chapitre :
#
# Compétence développée :
#
# auteur :
##################################
# chargement des modules
# mettre votre code ici
nbPaires=int(input())
for loop in range(nbPaires):
abcissemin1=int(input())
abcissemax1=int(input())
ordonneemin1=int(input())
ordonneemax1=int(input())
abcissemin2=int(input())
abcissemax2=int(input())
ordonneemin2=int(input())
ordonneemax2=int(input())
if (abcissemax1<=abcissemin2 or abcissemax2<=abcissemin1)or(ordonneemax1<=ordonneemin2 or ordonneemax2<=ordonneemin1):
print("NON")
else:
print("OUI")
| true |
b1b6676bf5d23766c88be92cd5f72f03f99fa0d3 | Python | pddona/python_ejemplos | /PROBLEMAS resueltos/TKINTER_Indice_Masa_Corporal.py | UTF-8 | 2,540 | 3.65625 | 4 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from tkinter import * # python 3.
except:
from Tkinter import * # python 2.7
def main():
#Crear y configurar ventana principal
window = Tk()
window.title("Entry")
#window.geometry('500x150') # Si queremos un tamaño determinado de ventana
#widgets
titulo = Label(window, text="CÁLCULO DEL ÍNDICE DE MASA CORPORAL")
titulo.grid(row=0,column=0)
#Primer número
lbl_1 = Label(window, text="Peso en Kg")
lbl_1.grid(column=0, row=1)
numero_1 = Entry(window,width=10) # Introducir Primer número
numero_1.insert(0,0) # Colocar un cero como valor inicial en la primera posición
numero_1.grid(column=1, row=1)
#Segundo número
lbl_2 = Label(window, text="Altura en metros")
lbl_2.grid(column=0, row=2)
numero_2 = Entry(window,width=10) # Introducir Segundo número
numero_2.insert(0,0) # Colocar un cero como valor inicial en la primera posición
numero_2.grid(column=1, row=2)
#Resultado
label_resultado = Label(window, text='IMC')
label_resultado.grid(column=0, row=3)
resultado = Label(window, text='')
resultado.grid(column=1, row=3)
# FUNCION que realiza el cálculo, es llamada por el BOTON 'calcular' con 'command=imc'
def imc():
try: # Comprobar que los números introducidos son válidos y no son letras
peso = float( numero_1.get().replace(',', '.') ) # OJO al introducir por teclados números decimales con coma
altura = float( numero_2.get().replace(',','.') ) # SUSTITUIR COMAS por PUNTOS
solucion = float( peso / altura**2 )
resultado.config(text = solucion)# Modificar el contenido del label 'resultado'
except:
resultado.config(text ='Debe introducir números válidos')
#Botón que llama a la función 'media' deifnida arriba
calcular = Button(window,text='Calcular ICM',command=imc)
calcular.grid(column=0, row=4)
#Bucle principal de la ventana
window.mainloop()
if __name__ == "__main__": # averiguar si se está ejecutando o importando
main()
| true |
753fd43d537e6d0a896094f7ebdd078a18a53827 | Python | ivansipiran/Data-driven-cultural-heritage | /utils/utils.py | UTF-8 | 3,939 | 2.703125 | 3 | [] | no_license | import visdom
import os
import random
import json
import numpy as np
import torch
import pickle
import matplotlib
import matplotlib.pyplot as plt
#initialize the weighs of the network for Convolutional layers and batchnorm layers
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1 and classname.find('Conv2d') == -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1 and classname.find('BatchNorm2d') == -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def visdom_show_pc(pc, window, title, vis, Y = []):
if len(Y) != 0:
vis.scatter(X = pc, Y = Y, win = window, opts =
dict(
title = title,
markersize = 2,
xtickmin=-1,
xtickmax=1,
ytickmin=-1,
ytickmax=1,
ztickmin=-1,
ztickmax=1,
),
)
else:
vis.scatter(X = pc, win = window, opts =
dict(
title = title,
markersize = 2,
xtickmin=-1,
xtickmax=1,
ytickmin=-1,
ytickmax=1,
ztickmin=-1,
ztickmax=1,
),
)
#Create a folder for a model and copy the Python code to produce that model
def save_paths(save_path, trainFile, datasetFile, modelFile):
if not os.path.exists('./log/'):
os.mkdir('./log/')
dir_name = os.path.join('log', save_path)
if not os.path.exists(dir_name):
os.mkdir(dir_name)
logname = os.path.join(dir_name, 'log.txt')
os.system('cp ./' + trainFile + '.py %s' % dir_name)
os.system('cp ./dataset/' + datasetFile + '.py %s' % dir_name)
os.system('cp ./models/' + modelFile + '.py %s' % dir_name)
return dir_name, logname
#Save important information during training: losses, epochs and pytorch models
def save_model(network_state_dict, optimizer_state_dict, logname, dir_name, train_loss, val_loss, epoch, lrate , loss_avgs_train, loss_avgs_test, net_name = "model"):
with open(dir_name + "/" + net_name + '_loss_avgs_train.pkl','wb') as f: pickle.dump(loss_avgs_train, f)
with open(dir_name + "/" + net_name + '_loss_avgs_test.pkl','wb') as f: pickle.dump(loss_avgs_test, f)
log_table = {
"net" : net_name,
"train_loss" : train_loss.avg,
"val_loss" : val_loss.avg,
"epoch" : epoch,
"lr" : lrate,
}
with open(logname, 'a') as f:
f.write('json_stats: ' + json.dumps(log_table) + '\n')
print('saving net...')
checkpoint = {
'epoch': epoch + 1,
'state_dict': network_state_dict,
'optimizer': optimizer_state_dict
}
torch.save(checkpoint, '%s/%s.pth' % (dir_name, net_name))
def vis_curve(train_curve, test_curve, window, name, vis):
vis.line(X=np.column_stack((np.arange(len(train_curve)),np.arange(len(test_curve)))),
Y=np.column_stack((np.array(train_curve),np.array(test_curve))),
win=window,
opts=dict(title=name, legend=[name + "_curve" , name + "_curve" ], markersize=2, ), )
def generate_training_plot(path, name, train_loss, test_loss, best_train, best_test):
plt.figure()
plt.plot(train_loss)
plt.plot(test_loss)
plt.xlabel("epoch(" + "Best train:" + "{:.4f}".format(best_train) + " Best test:" + "{:.4f}".format(best_test) +")")
plt.ylabel("loss")
plt.text(0.5, 3, "text on plot")
plt.savefig(os.path.join(path, name) + ".png")
plt.show()
def open_pickle(path):
print(path)
file = open(path, "rb")
obj = pickle.load(file)
return obj
| true |
6013692547d3e60165fddffac241ddb35eae8f27 | Python | amoscookeh/WaterMePlsBot | /fun_facts_api.py | UTF-8 | 2,469 | 3.21875 | 3 | [] | no_license | import os
import praw
import random
PASSWORD = os.environ['REDDIT_PASSWORD']
reddit = praw.Reddit(client_id="8ZETxx_lxHX5b6exbgMBzw", # your client id
client_secret="88ZP0r0jT56J_jpCI5h5fc_eZr798g", # your client secret
user_agent="watermeplsbot", # user agent name
username="watermeplsbot", # your reddit username
password=PASSWORD) # your reddit password
def get_nature_facts():
sub = ['Awwducational'] # make a list of subreddits you want to scrape the data from
for s in sub:
subreddit = reddit.subreddit(s) # Chosing the subreddit
queries = ['plants', 'arctic', 'beach', 'environment', 'island', 'sea', 'fish', 'insects', 'fungus']
random_idx = random.randint(0, len(queries) - 1)
random_query = [queries[random_idx]]
query_limit = 50
titles = []
urls = []
bodies = []
for item in random_query:
# post_dict = {
# "title": [], # title of the post
# # "score": [], # score of the post
# # "id": [], # unique id of the post
# "url": [], # url of the post
# # "comms_num": [], # the number of comments on the post
# # "created": [], # timestamp of the post
# "body": [] # the description of post
# }
for submission in subreddit.search(random_query, sort="top", limit=query_limit):
# post_dict["title"].append(submission.title)
# # post_dict["score"].append(submission.score)
# # post_dict["id"].append(submission.id)
# post_dict["url"].append(submission.url)
# # post_dict["comms_num"].append(submission.num_comments)
# # post_dict["created"].append(submission.created)
# post_dict["body"].append(submission.selftext)
titles.append(submission.title)
bodies.append(submission.selftext)
urls.append(submission.url)
random_idx = random.randint(0, len(titles) / 2)
post = {
'title': titles[random_idx],
'body': bodies[random_idx],
'url': urls[random_idx],
}
return post
def main():
count = 0
for i in range(10):
fact = get_nature_facts()
msg = "🌲NATURE🌲 FACT: {}\n\nCredits: {}".format(fact['title'], fact['url'])
print(len(msg))
print(msg)
count += 1
print(count)
if __name__ == '__main__':
main()
| true |
16b825a5bb794c559803051fb9ad798877960d13 | Python | enrico-kaack/RoboticGames | /Finale_Auswertung/create_visualisation.py | UTF-8 | 3,159 | 2.53125 | 3 | [] | no_license | import sys
import os
import numpy as np
import matplotlib.pyplot as plt
import rosbag
import itertools
if len(sys.argv) != 2:
print("specify the base folder as first parameter. Exiting")
sys.exit(1)
base_path = sys.argv[1]
spawn_point=np.array([[0,0],[0,1.36],[0,1.23],[-2.1,2.1],[0,2.18]])
bag_mouse = rosbag.Bag(os.path.expanduser(base_path + '/mouse/mouse_path.bag'))
bag_cat = rosbag.Bag(os.path.expanduser(base_path + '/cat/cat_path.bag'))
point_list_mouse=np.array([[0,0]])
point_list_cat=np.array([[0,0]])
mouse_time=[]
cat_time=[]
all_topics=['/mouse/base_pose_ground_truth','/cat/base_pose_ground_truth']
for topic, msgs, t in bag_mouse.read_messages(topics=['/mouse/base_pose_ground_truth']):
point_list_mouse=np.append(point_list_mouse,[[msgs.pose.pose.position.x,msgs.pose.pose.position.y]],axis=0)
mouse_time.append(t)
for topic, msgs, t in bag_cat.read_messages(topics=['/cat/base_pose_ground_truth']):
point_list_cat=np.append(point_list_cat,[[msgs.pose.pose.position.x,msgs.pose.pose.position.y]],axis=0)
cat_time.append(t)
point_list_cat=list(point_list_cat)
cat_time=list(cat_time)
point_list_mouse = list(point_list_mouse)
mouse_time = list(mouse_time)
point_list_cat.pop(0)
while len(point_list_cat) > len(point_list_mouse):
point_list_cat.pop(0)
cat_time.pop(0)
while len(point_list_mouse) > len(point_list_cat):
point_list_mouse.pop(0)
mouse_time.pop(0)
point_list_cat=np.array(point_list_cat)
cat_time=np.array(cat_time)
point_list_mouse = np.array(point_list_mouse)
mouse_time = np.array(mouse_time)
def adjust_lightness(color, amount=0.5):
#<1 makes color brighter, >1 makes color darker
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], max(0, min(1, amount * c[1])), c[2])
colorModifier = np.linspace(0.3,1.7,len(point_list_mouse))
colorsMouse = map(lambda mod: adjust_lightness('blue', mod), colorModifier)
colorsCat = map(lambda mod: adjust_lightness('orange', mod), colorModifier)
"""
if base_path =="12" or base_path == "13":
background=plt.imread("rgarena3-4.png")
plt.imshow(background,extent=[-3.14+spawn_point[1,0],3.14+spawn_point[1,0],-3.14+spawn_point[1,1],3.14+spawn_point[1,1]])
"""
plt.scatter(point_list_mouse[1:,1],point_list_mouse[1:,0],label="mouse", color=colorsMouse)
plt.scatter(point_list_cat[1:,1],point_list_cat[1:,0],label="cat", color=colorsCat)
plt.ylabel("Y Position")
plt.xlabel("X Position")
plt.title("Pfad der Roboter")
plt.axis('equal')
plt.legend()
ax = plt.gca()
leg = ax.get_legend()
leg.legendHandles[0].set_color('blue')
leg.legendHandles[1].set_color('orange')
plt.savefig("report_pfad" + base_path + ".png", dpi=600)
plt.savefig("report_pfad" + base_path + ".svg", dpi=600)
plt.clf()
diff = (point_list_cat- point_list_mouse)**2
plt.plot( np.sqrt(diff[:,0]+diff[:,1]),label="Roboterabstand")
plt.title("Roboterabstand")
plt.ylabel("Distanz")
plt.xlabel("Zeit")
plt.legend()
plt.savefig("report_distance"+base_path + ".svg", dpi=600)
plt.savefig("report_distance"+base_path + ".png", dpi=600)
plt.clf()
| true |
52cdb7c3d62ccb14127a5d5edb3c649b65ab8f25 | Python | samhiner/code | /python/mnist-ml/mnist_neuralnet.py | UTF-8 | 3,187 | 3.3125 | 3 | [
"MIT"
] | permissive | import tensorflow as tf
from tensorflow import keras
import numpy as np
import h5py
# NEURAL NET DESIGN
class NeuralNet:
#create the neural network
#learning_rate: self-explanatory
#drop_rate: likelihood of throwing out a node with dropout regularization (this is logarithmic btw so 0.9 and 0.99 are very different)
#train_data: training dataset [features, labels]
#val_data: validation dataset [features, labels]
def __init__(self, learning_rate, drop_rate, train_data, val_data):
self.train_features = train_data[0]
self.train_labels = train_data[1]
self.val_features = val_data[0]
self.val_labels = val_data[1]
self.model = keras.Sequential()
self.model.add(keras.layers.Dense(100, activation='relu'))
self.model.add(keras.layers.Dense(100))
self.model.add(keras.layers.Dropout(drop_rate, noise_shape=None, seed=None))
self.model.add(keras.layers.Dense(10, activation='softmax'))
self.model.compile(optimizer=tf.train.AdagradOptimizer(learning_rate), loss='categorical_crossentropy', metrics=['accuracy'])
#traing the neural net
#epochs: number of times you will train over the dataset
#batch_size: number of samples the algo views before updating the gradient
def train(self, epochs, batch_size):
self.model.fit(self.train_features, self.train_labels, epochs = epochs, batch_size = batch_size, validation_data=(self.val_features, self.val_labels))
# DATA FORMATTING
def smaller_split(data,size):
return data[:size], data[size:]
def split_data(data, train_size = None, training = True):
labels = data[:,0]
labels = tf.keras.utils.to_categorical(labels)
features = data[:,1:]
features = features / 255
if not training:
return features, labels
train_labels, val_labels = smaller_split(labels, train_size)
train_features, val_features = smaller_split(features, train_size)
return [train_features, train_labels], [val_features, val_labels]
#get the first half of the mnist data (second half is for testing) and shuffle it.
print('Getting Data...')
mnist_data = np.genfromtxt('https://dl.google.com/mlcc/mledu-datasets/mnist_train_small.csv', delimiter=',')
#mnist_data = mnist_data[:10001]
np.random.shuffle(mnist_data)
train_data, val_data = split_data(data = mnist_data, train_size = 18000)
print('Getting Data Complete.')
# RUNNING THE NEURAL NETWORK
print('Training Neural Net...')
myNet = NeuralNet(learning_rate = 0.076, drop_rate = 0.927, train_data = train_data, val_data = val_data)
myNet.train(epochs = 100, batch_size = 50)
print('Training Neural Net Complete.')
# TESTING THE NEURAL NETWORK
print('Getting Test Data...')
test_data = np.genfromtxt('https://dl.google.com/mlcc/mledu-datasets/mnist_test.csv', delimiter=',')
#test_data = test_data[10001:]
np.random.shuffle(test_data)
test_features, test_labels = split_data(data = test_data, training = False)
print('Getting Test Data Complete.')
score = myNet.model.evaluate(test_features, test_labels, batch_size = 128)
print('Test Loss: ' + str(score[0]))
print('Test Accuracy: ' + str(score[1]))
# SAVING THE NETWORK
if score[1] >= 0.95:
myNet.model.save('mnist_over_95.h5')
print('Network Saved!')
else:
print('Network not accurate enough to save.') | true |
e8c3350b3d617ca407915dcce889189336185c30 | Python | alexanderdaffara/MusicDeepLearning | /src/midiTests.py | UTF-8 | 725 | 2.71875 | 3 | [] | no_license | from music21 import *
"""
[0, 0, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 74, 102, 0, 2560],
[0, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 73, 102, 2560, 512]
1280 == quarter note for all MIDI
"""
s1 = stream.Stream()
p = pitch.Pitch()
p.midi = 74
n = note.Note()
n.duration = duration.Duration( 2560 / 1280 )
n.pitch = p
n.volume.velocity = 120
s1.insert(0.0, n)
p = pitch.Pitch()
p.midi = 73
n = note.Note()
n.duration = duration.Duration( 512 / 1280)
n.pitch = p
n.volume.velocity = 102
s1.insert((2560 / 1280), n)
p = pitch.Pitch()
p.midi = 74
n = note.Note()
n.duration = duration.Duration( 512 / 1280)
n.pitch = p
n.volume.velocity = 102
s1.insert((3072 / 1280), n)
s1.show('midi')
| true |
ce3754eb9cf0f40c473aef0573ee001c6137ba81 | Python | wespatrocinio/programming_studies | /ooo/vector.py | UTF-8 | 1,600 | 3.921875 | 4 | [] | no_license | """
Playing around with overload
"""
class Vector:
""" Represent a vector in a multidimensional space. """
def __init__(self, d):
"""
Create a d-dimensional vector of zeros.
d Dimension of th vctor space (int)
"""
self._coords = [0]*d
def __len__(self):
""" Return the dimension of the vector. """
return len(self._coords)
def __getitem__(self, k):
""" Return k-th coordinate of the vector """
return self._coords[k]
def __setitem__(self, k, value):
""" Set k-th coordinate of vector to given value. """
self._coords[k] = value
def __add__(self, other):
"""
Return sum of two vectors.
other Another Vector instance. Expected the same dimension.
"""
if len( self) != len(other):
raise ValueError('Dimensions must match.')
result = Vector(len(self))
for i in range(len(self)):
result[i] = self[i] + other[i]
return result
def __eq__(self, other):
"""
Return True if vector has same coords as other.
other Another Vector instance.
"""
return self._coords == other._coords
def __ne__(self, other):
"""
Return True if vector differs from other. Rely on __eq__ method above.
other Another Vector instance.
"""
return not self == other
def __str__(self):
""" Produce string representation of vector. """
return '< {coords} >'.format(coords=self._coords) | true |
dc093d8b73a889a754193f73667f4db910b008f6 | Python | bhuvannarula/code-doodle | /hitting-with-projectile/main.py | UTF-8 | 2,639 | 3.703125 | 4 | [] | no_license | from math import sin, cos, tan, atan, pi
from matplotlib import pyplot
import numpy
def Range(initialVelocity, angle, accnGrav = 9.8):
tempRange = (initialVelocity**2) * sin(2*angle) / accnGrav
return tempRange
def trajectory(startC, initialVelocity, pointtoHit, pointsAbove = [], pointsBelow = [], accnGrav = 9.8, precision = 2):
'''
function which returns the angle required to hit according to given data.
startC is the point (x,y) where projectile starts
initialVelocity is the initial velocity of projectile
pointtoHit is point (x,y) where projectile is required to hit
pointsAbove & pointsBelow are to be used in future
accnGrav is acceleration due to gravity
precision determines how near the trajectory will be to the 'hit point'
'''
startAngle = atan((pointtoHit[1] - startC[1])/(pointtoHit[0] - startC[0]))
x = pointtoHit[0] - startC[0]
initY = pointtoHit[1] - startC[1]
count = 1
def tryAngleValue(angle, dAngle, count):
sign = (-1)**count
angle += dAngle
if angle >= (pi/2):
print("Can't do that!")
raise ValueError
tempRange = Range(initialVelocity, angle, accnGrav)
dy = x*tan(angle)*(1 - (x/tempRange)) - initY
if round(abs(dy),precision) <= 1/(10**precision):
return angle
elif dy*sign > 0:
return tryAngleValue(angle,dAngle, count)
elif dy*sign < 0:
count += 1
return tryAngleValue(angle, -dAngle/2, count)
return tryAngleValue(startAngle,pi/32, count)
'''
trajectory() function returns the angle required for projectile to hit at the given velocity.
'''
startC = (0,0) # the point where projectile will start
pointtoHit = (8,12) # the point where we need to reach
'''
Now, we can vary the initial velocity to create different trajectories.
Following code is for plotting graph for trajectories with initialVelocity in range(1,20)
'''
xHit, yHit = ((pointtoHit[i] - startC[i]) for i in range(2))
pyplot.plot(xHit,yHit, 'ro') # i don't know what 'ro' means, looked up on stackoverflow, this plots 'hit point'
for initialVelocity in range(1,20):
try:
angleFinal = trajectory(startC, initialVelocity, pointtoHit, precision=5)
except ValueError:
pass
else:
finalRange = Range(initialVelocity, angleFinal)
def projectileEquation(x, angle = angleFinal, r = finalRange): # this is equation of projectile for graph
return x*tan(angle)*(1-(x/r))
x = numpy.arange(0,finalRange, 0.1)
y = projectileEquation(x)
pyplot.plot(x,y)
pyplot.show() | true |
dcd75ae7071512047c81cda8c1fcfe23026a6a52 | Python | manasharma90/AoC-2020-Python | /Puzzle11/seatFinder2.py | UTF-8 | 5,046 | 3.828125 | 4 | [
"Apache-2.0"
] | permissive | with open('input.txt', 'r') as f:
a = f.read()
seats_draft = a.splitlines()
seats = []
#creating a list with elements as a list of rows
for row_string in seats_draft:
row_list = list(row_string)
seats.append(row_list)
# determining length of each row in the pattern.
# Each row is of equal size and each element of the list 'seats' represents a row
# Hence row length will be the len of any element within the list 'seats'
row_length = len(seats[0])
#Defining a function to check if two lists are exactly the same
#Since we are dealing with two dimensional arrays, the function checks the equality of both levels
def list_equality(list1, list2):
if len(list1) != len(list2):
return False
for i in range(0,len(list1)):
if len(list1[i]) != len(list2[i]):
return False
for j in range(0,len(list1[i])):
if list1[i][j] == list2[i][j]:
continue
else:
return False
return True
#Using two dimensional arrays to determine the occupied seats
#Inp: seatList: [[#,L,L,.,.,#],[L,L,L,.,#,#]...]; x = index of first order list (seats ie. north/south); y = index of second order list (rows ie. east/west)
def occupied_adjacent(seatList, x, y, rowLength):
occupied_count = 0
#checking east
for e in range((y+1), rowLength):
if seatList[x][e] == '.':
continue
if seatList[x][e] == '#':
occupied_count += 1
break
if seatList[x][e] == 'L':
break
#checking west
for w in reversed(range(y)):
if seatList[x][w] == '.':
continue
if seatList[x][w] == '#':
occupied_count += 1
break
if seatList[x][w] == 'L':
break
#checking north
for n in reversed(range(x)):
if seatList[n][y] == '.':
continue
if seatList[n][y] == '#':
occupied_count += 1
break
if seatList[n][y] == 'L':
break
#checking south
for s in range((x+1), len(seatList)):
if seatList[s][y] == '.':
continue
if seatList[s][y] == '#':
occupied_count += 1
break
if seatList[s][y] == 'L':
break
#checking north-east
i = 1
while (y+i) < rowLength and (x-i) >= 0:
if seatList[x-i][y+i] == '.':
i += 1
continue
if seatList[x-i][y+i] == '#':
occupied_count += 1
break
if seatList[x-i][y+i] == 'L':
break
#checking north-west
i = 1
while (y-i) >= 0 and (x-i) >= 0:
if seatList[x-i][y-i] == '.':
i += 1
continue
if seatList[x-i][y-i] == '#':
occupied_count += 1
break
if seatList[x-i][y-i] == 'L':
break
#checking south-east
i = 1
while (y+i) < rowLength and (x+i) < len(seatList):
if seatList[x+i][y+i] == '.':
i += 1
continue
if seatList[x+i][y+i] == '#':
occupied_count += 1
break
if seatList[x+i][y+i] == 'L':
break
#checking south-west
i = 1
while (y-i) >= 0 and (x+i) < len(seatList):
if seatList[x+i][y-i] == '.':
i += 1
continue
if seatList[x+i][y-i] == '#':
occupied_count += 1
break
if seatList[x+i][y-i] == 'L':
break
return occupied_count
new_seatList = []
#using a while loop to keep the code running till a matching pattern emerges
while True:
new_seatList = [] #duplicating this within the loop as well to ensure that we present a blank list everytime
for i in range(len(seats)):
new_sublist = [] #this will be the new row list within the new seat list
for j in range(len(seats[i])):
if seats[i][j] == 'L' and occupied_adjacent(seats, i, j, row_length) == 0:
new_sublist.append('#')
continue
if seats[i][j] == '#' and occupied_adjacent(seats, i, j, row_length) >= 5:
new_sublist.append('L')
continue
new_sublist.append(seats[i][j])
new_seatList.append(new_sublist) #appending the new row list as an element to the new seat list
if list_equality(seats, new_seatList): #executing break condition ie. when the new and the old seat list are equal
break
seats = new_seatList #if no equality, the 'old' seat list gets replaced by the new list compiled during this iteration of the loop
#In the next iteration of the while loop, the new seat list will again be blank and the 'old' list (seats) will be the list compiled in the previous iteration
#calculting the number of occupied seats when the pattern stabilizes
occupied_count = 0
for row in seats:
for seat in row:
if seat == '#':
occupied_count += 1
print(occupied_count) | true |
0eab8775821bfb89a254dae4895dafce5edaab7a | Python | ashbob999/Advent-of-Code | /2015/day19.py | UTF-8 | 1,298 | 3.046875 | 3 | [] | no_license | from typing import Callable
from os.path import isfile, join as path_join
file_name = path_join('input', 'day19.txt')
def to_list(mf: Callable = int, sep='\n'): return [mf(x) for x in open(file_name).read().split(sep) if x]
def to_gen(mf: Callable = int, sep='\n'): return (mf(x) for x in open(file_name).read().split(sep) if x)
if not isfile(file_name):
from aoc import get_input_file
get_input_file()
import random
data = open(file_name).read().strip().split("\n\n")
data1 = ["""e => H
e => O
H => HO
H => OH
O => HH""", "HOH"]
molecule = data[1]
replacements = []
for line in data[0].split("\n"):
parts = line.split("=>")
find = parts[0].strip()
replacement = parts[1].strip()
replacements.append((find, replacement))
subs = set(map(lambda x: x[0], replacements))
replacements = sorted(replacements, key=len, reverse=True)
def part1():
poss = set()
for i in range(len(molecule)):
for rep in replacements:
if molecule[i:i + len(rep[0])] == rep[0]:
poss.add(molecule[:i] + rep[1] + molecule[i + len(rep[0]):])
return len(poss)
def part2(curr, target):
count = 0
while curr != target:
rm = random.choice(replacements)
if rm[1] in curr:
curr = curr.replace(rm[1], rm[0], 1)
count += 1
return count
print(part1())
print(part2(molecule, "e"))
| true |
74ca6490030e4c28cf9f7a3187a5a5c17de6a157 | Python | tribeiro/specfit | /specfit/lib/specfit.py | UTF-8 | 21,017 | 2.53125 | 3 | [] | no_license | '''
specfit.py - Definition of class for fitting linear combination of spectra.
'''
######################################################################
import os
import numpy as np
from astropy.io import fits as pyfits
from astropysics import spec
import scipy.ndimage.filters
import scipy.interpolate
import logging
import scipy.constants
from scipy.optimize import leastsq
_c_kms = scipy.constants.c / 1.e3 # Speed of light in km s^-1
DF = -8.0
class SpecFit():
##################################################################
def __init__(self, nspec):
'''
Initialize class.
Input:
nspec = Number of spectra that composes the observed spectra
'''
# number of componentes
self.nspec = nspec
# Store template spectra and scale factor
self.template = [[]] * nspec
self.templateNames = [[]] * nspec
self.templateScale = [[]] * nspec
self.specgrid = [[]] * nspec
# velocity for each component
self.vel = np.zeros(nspec)+1.
# scale factor for each component
self.scale = [[]] * nspec
# self.mcscale = pm.Uniform("scale", 0, 1, size=nspec)
# template selected for each component
self.ntemp = np.zeros(nspec, dtype=int)
# template grid dimensions for each component
self.grid_ndim = np.zeros(nspec, dtype=int)
# Grids
self.Grid = [[]] * nspec
# store the observed spectra
self.ospec = None
self._autoprop = False
##################################################################
def setAutoProp(self, value):
self._autoprop = value
##################################################################
def loadNextGenTemplate(self, ncomp, filename):
'''
Loads template spectra from a list of files (in filename), for
component ncomp.
'''
splist = np.loadtxt(filename, unpack=True, usecols=(0,),
dtype='S', ndmin=1)
self.template[ncomp] = [0] * len(splist)
self.templateScale[ncomp] = [1] * len(splist)
logging.debug('Loading template spectra for component %i from %s[%i]' % (ncomp, filename, len(splist)))
for i in range(len(splist)):
logging.debug('Reading %s' % (splist[i]))
sp = np.loadtxt(splist[i], unpack=True, usecols=(0, 1),
converters={0: lambda s: float(s.replace('D', 'e')),
1: lambda s: float(s.replace('D', 'e'))})
asort = sp[0].argsort()
self.template[ncomp][i] = spec.Spectrum(sp[0][asort],
10 ** (sp[1][asort]) + 8.0)
return 0
##################################################################
def loadPickleTemplate(self, ncomp, filename):
'''
Loads template spectra from a list of files (in filename), for
component ncomp.
'''
splist = np.loadtxt(filename, unpack=True,
dtype='S', ndmin=2)
if splist.shape[0] < self.grid_ndim[ncomp]:
raise IOError('Grid dimensions is not consistent with expected. Expecting %i got %i.' % (
self.grid_ndim[ncomp], splist.shape[0]))
self.template[ncomp] = [0] * len(splist[0])
self.templateNames[ncomp] = [0] * len(splist[0])
self.templateScale[ncomp] = [1] * len(splist[0]) # np.zeros(len(splist))+1.0
if self.grid_ndim[ncomp] > 0:
grid = splist[1:self.grid_ndim[ncomp] + 1]
gdim = np.zeros(self.grid_ndim[ncomp], dtype=np.int)
for i in range(len(grid)):
gdim[i] = len(np.unique(grid[i]))
index = np.arange(len(splist[0])).reshape(gdim)
self.Grid[ncomp] = index
logging.debug('Loading template spectra for component %i from %s[%i]' % (ncomp, filename, len(splist)))
for i in range(len(splist[0])):
logging.debug('Reading %s' % (splist[0][i]))
sp = np.load(splist[0][i])
self.template[ncomp][i] = spec.Spectrum(sp[0], sp[1])
self.templateNames[ncomp][i] = splist[0][i]
return 0
##################################################################
def loadCoelhoTemplate(self, ncomp, filename):
'''
Loads template spectra from a list of files (in filename), for
component ncomp.
'''
splist = np.loadtxt(filename, unpack=True,
dtype='S', ndmin=2)
if splist.shape[0] < self.grid_ndim[ncomp]:
raise IOError('Grid dimensions is not consistent with expected. Expecting %i got %i.' % (
self.grid_ndim[ncomp], splist.shape[0]))
self.template[ncomp] = [0] * len(splist[0])
self.templateNames[ncomp] = [0] * len(splist[0])
self.templateScale[ncomp] = [1] * len(splist[0])
if self.grid_ndim[ncomp] > 0:
grid = splist[1:self.grid_ndim[ncomp] + 1]
index = np.arange(len(splist[0])).reshape((len(np.unique(grid[0])), len(np.unique(grid[1]))))
self.Grid[ncomp] = index
logging.debug('Loading template spectra for component %i from %s[%i]' % (ncomp, filename, len(splist)))
notFound = 0
for i in range(len(splist[0])):
logging.debug('Reading %s' % (splist[0][i]))
if os.path.exists(splist[0][i]):
hdu = pyfits.open(splist[0][i])
wave = hdu[0].header['CRVAL1'] + np.arange(len(hdu[0].data)) * hdu[0].header['CDELT1']
self.template[ncomp][i] = spec.Spectrum(wave, hdu[0].data)
self.templateNames[ncomp][i] = splist[0][i]
else:
logging.warning('Could not find template %s. %i/%i' % (splist[0][i], notFound, len(splist[0])))
notFound += 1
self.template[ncomp][i] = self.template[ncomp][i - 1]
self.templateNames[ncomp][i] = splist[0][i] + "NOTFOUND"
# sp = np.load(splist[0][i])
if notFound > len(splist[0]) / 2:
raise IOError('More than 50% of template spectra could not be loaded')
return 0
##################################################################
def loadPickle(self, filename, linearize=True):
'''
Loads observed spectra from numpy pickle file.
'''
logging.debug('Loading observed spectra for from %s' % (filename))
sp = np.load(filename)
self.ospec = spec.Spectrum(sp[0], sp[1])
if linearize and not self.ospec.isLinear():
logging.debug('Linearizing observed spectra')
self.ospec.linearize()
logging.debug('Done')
return 0
##################################################################
def loadtxtSpec(self, filename):
'''
Load the observed spectra.
'''
logging.debug('Loading observed spectra for from %s' % (filename))
sp = np.loadtxt(filename, unpack=True, usecols=(0, 1),
converters={0: lambda s: float(s.replace('D', 'e')),
1: lambda s: float(s.replace('D', 'e'))})
self.ospec = spec.Spectrum(sp[0], sp[1])
return 0
##################################################################
def loadSDSSFits(self, filename, linearize=False):
'''
Load the observed spectra.
'''
logging.debug('Loading observed spectra for from %s' % (filename))
sp = pyfits.open(filename)
mask = np.bitwise_and(sp[1].data['and_mask'] == 0,
sp[1].data['or_mask'] == 0)
self.ospec = spec.Spectrum(x=10 ** (sp[1].data['loglam'][mask]),
flux=sp[1].data['flux'][mask],
ivar=sp[1].data['ivar'][mask])
'''
if linearize and not self.ospec.isLinear():
logging.debug('Linearizing observed spectra')
self.ospec.linearize()
logging.debug('Done')
'''
return 0
##################################################################
def gridSpec(self, ncomp=0):
'''
Resample and grid template spectrum.
:return:
'''
# Use first spectrum as reference
refspec = self.template[ncomp][0]
specgrid = np.zeros((len(self.template[ncomp]), len(refspec.flux)))
for i in range(len(specgrid)):
specgrid[i] += self.template[ncomp][i].resample(refspec.x, replace=False)[1] * \
self.templateScale[ncomp][i]
self.specgrid[ncomp] = specgrid
self.scale[ncomp] = np.zeros(len(specgrid)).reshape(len(specgrid), -1) + 1. / len(specgrid)
##################################################################
def chi2(self, p):
'''
Calculate chi-square of the data against model.
'''
for i in range(self.nspec):
logging.debug('%f / %f' % (p[i], p[i + 1]))
self.scale[i] = p[i * 2]
self.vel[i] = p[i * 2 + 1]
model = self.modelSpec()
# c2 = np.mean( (self.ospec.flux - model.flux )**2.0 / self.ospec.flux)
c2 = self.ospec.flux - model.flux
return c2
##################################################################
def modelSpec(self):
'''
Calculate model spectra.
'''
# _model = self.template[0][self.ntemp[0]]
# logging.debug('Building model spectra')
dopCor = np.sqrt((1.0 + self.vel[0] / _c_kms) / (1. - self.vel[0] / _c_kms))
scale = self.scale[0][self.ntemp[0]] * self.templateScale[0][self.ntemp[0]]
# print dopCor, scale, len(self.template[0][self.ntemp[0]].x), len(self.template[0][self.ntemp[0]].flux)
# _model = MySpectrum(self.template[0][self.ntemp[0]].x * dopCor,
# self.template[0][self.ntemp[0]].flux * scale)
_model = MySpectrum(*MySpectrum(self.template[0][self.ntemp[0]].x * dopCor,
self.template[0][self.ntemp[0]].flux * scale).myResample(self.ospec.x, replace=False))
# logging.debug('Applying instrument signature')
# kernel = self.obsRes()/np.mean(_model.x[1:]-_model.x[:-1])
# _model.flux = scipy.ndimage.filters.gaussian_filter(_model.flux,kernel)
for i in range(1, self.nspec):
dopCor = np.sqrt((1.0 + self.vel[i] / _c_kms) / (1. - self.vel[i] / _c_kms))
scale = self.scale[i][self.ntemp[i]] * self.templateScale[i][self.ntemp[i]]
# print dopCor, scale, len(self.template[i][self.ntemp[i]].x), len(self.template[i][self.ntemp[i]].flux)
# tmp = MySpectrum(self.template[i][self.ntemp[i]].x * dopCor,
# self.template[i][self.ntemp[i]].flux * scale)
# logging.debug('Applying instrument signature')
# kernel = self.obsRes()/np.mean(tmp.x[1:]-tmp.x[:-1])
# tmp.flux = scipy.ndimage.filters.gaussian_filter(tmp.flux,kernel)
tmp = MySpectrum(*MySpectrum(self.template[i][self.ntemp[i]].x * dopCor,
self.template[i][self.ntemp[i]].flux * scale).myResample(self.ospec.x, replace=False))
_model.flux += tmp.flux
'''
if not _model.isLinear():
logging.warning('Data must be linearized...')
'''
# kernel = self.obsRes()/tmp.getDx()/2./np.pi
# _model.flux = scipy.ndimage.filters.gaussian_filter(_model.flux,kernel)
# logging.debug('Resampling model spectra')
# _model = MySpectrum(*_model.myResample(self.ospec.x, replace=False))
if self._autoprop:
mflux = np.mean(_model.flux)
oflux = np.mean(self.ospec.flux)
_model.flux *= (oflux / mflux)
return _model
##################################################################
def modelSpecThreadSafe(self, vel, scale, ntemp):
'''
Calculate model spectra.
'''
# _model = self.template[0][self.ntemp[0]]
logging.debug('Building model spectra')
dopCor = np.sqrt((1.0 + vel[0] / _c_kms) / (1. - vel[0] / _c_kms))
scale = scale[0] * self.templateScale[0][ntemp[0]]
_model = MySpectrum(self.template[0][ntemp[0]].x * dopCor,
self.template[0][ntemp[0]].flux * scale)
# logging.debug('Applying instrument signature')
# kernel = self.obsRes()/np.mean(_model.x[1:]-_model.x[:-1])
# _model.flux = scipy.ndimage.filters.gaussian_filter(_model.flux,kernel)
for i in range(1, self.nspec):
dopCor = np.sqrt((1.0 + vel[i] / _c_kms) / (1. - vel[i] / _c_kms))
scale = scale[i] * self.templateScale[i][ntemp[i]]
tmp = MySpectrum(self.template[i][ntemp[i]].x * dopCor,
self.template[i][ntemp[i]].flux * scale)
# logging.debug('Applying instrument signature')
# kernel = self.obsRes()/np.mean(tmp.x[1:]-tmp.x[:-1])
# tmp.flux = scipy.ndimage.filters.gaussian_filter(tmp.flux,kernel)
tmp = MySpectrum(*tmp.resample(_model.x, replace=False))
_model.flux += tmp.flux
'''
if not _model.isLinear():
logging.warning('Data must be linearized...')
'''
# kernel = self.obsRes()/tmp.getDx()/2./np.pi
# _model.flux = scipy.ndimage.filters.gaussian_filter(_model.flux,kernel)
logging.debug('Resampling model spectra')
_model = MySpectrum(*_model.myResample(self.ospec.x, replace=False))
return _model
##################################################################
def normTemplate(self, ncomp, w0, w1):
'''
Normalize spectra against data in the wavelenght regions
'''
for i in range(len(self.template[ncomp])):
maskt = np.bitwise_and(self.template[ncomp][i].x > w0,
self.template[ncomp][i].x < w1)
mask0 = np.bitwise_and(self.ospec.x > w0,
self.ospec.x < w1)
scale = np.mean(self.ospec.flux[mask0]) / np.mean(self.template[ncomp][i].flux[maskt])
self.templateScale[ncomp][i] = scale
# self.template[ncomp][i].flux *= scale
##################################################################
def gaussian_filter(self, ncomp, kernel):
for i in range(len(self.template[ncomp])):
if not self.template[ncomp][i].isLinear():
logging.warning('Spectra must be linearized for gaussian filter...')
self.template[ncomp][i].flux = scipy.ndimage.filters.gaussian_filter(self.template[ncomp][i].flux, kernel)
##################################################################
def obsRes(self):
return self.ospec.getDx()
##################################################################
def preprocTemplate(self):
'''
Pre-process all template spectra to have aproximate coordinates as
those of the observed spectrum and linearize the spectrum.
'''
logging.debug('Preprocessing all template spectra. Spectra will be trimmed and linearized')
ores = self.obsRes()
xmin = np.max([self.template[0][0].x[0], self.ospec.x[0] - 100.0 * ores])
xmax = np.min([self.template[0][0].x[-1], self.ospec.x[-1] + 100.0 * ores])
for i in range(self.nspec):
for j in range(len(self.template[i])):
# t_res = np.mean(self.template[i][j].x[1:]-self.template[i][j].x[:-1])
# newx = np.arange(xmin,xmax,t_res)
# self.template[i][j] = spec.Spectrum(*self.template[i][j].resample(newx,replace=False))
self.template[i][j].linearize(lower=xmin, upper=xmax)
tmp_spres = np.mean(self.template[i][j].x[1:] - self.template[i][j].x[:-1])
logging.debug('Template spres = %f' % (tmp_spres))
logging.debug('Data spres = %f' % (ores))
if tmp_spres < ores / 10.:
logging.debug('Template spectroscopic resolution too high! Resampling...')
newx = np.arange(xmin, xmax, ores / 10.)
self.template[i][j] = spec.Spectrum(*self.template[i][j].resample(newx, replace=False))
##################################################################
def saveTemplates2Pickle(self, ncomp, filename):
splist = np.loadtxt(filename, unpack=True, usecols=(0,),
dtype='S', ndmin=1)
logging.debug('Saving template spectra to pickle file...')
for ntemp in range(len(self.template[ncomp])):
logging.debug(splist[ntemp])
sp = np.array([self.template[ncomp][ntemp].x,
self.template[ncomp][ntemp].flux])
np.save(splist[ntemp], sp)
##################################################################
def suitableScale(self):
'''
Find a suitable scale values for all spectra.
'''
logging.debug('Looking for suitable scale in all spectra. Will choose the larger value.')
obsmean = np.mean(self.ospec.flux)
maxscale = 0.
minscale = obsmean
for i in range(len(self.template)):
for j in range(len(self.template[i])):
maskt = np.bitwise_and(self.template[i][j].x > self.ospec.x[0],
self.template[i][j].x < self.ospec.x[-1])
nscale = obsmean / np.mean(self.template[i][j].flux[maskt]) / self.templateScale[i][j]
if maxscale < nscale:
maxscale = nscale
if minscale > nscale:
minscale = nscale
return maxscale, minscale
##################################################################
def fit(self):
'''
Fit spectra with least square fit.
'''
def score(p, x, y):
for i in range(self.nspec):
# self.vel[i] = p[i*self.nspec]
# self.scale[i][self.ntemp[i]] = p[i*self.nspec+1]
self.vel[i] = 0.
self.scale[i][self.ntemp[i]] = p[i]
return y-self.modelSpec().flux
# pres, flag = leastsq(score, [self.vel[0], self.scale[0][self.ntemp[0]],
# self.vel[1], self.scale[1][self.ntemp[1]]],
# args=(self.ospec.x, self.ospec.flux))
pres, flag = leastsq(score, [self.scale[0][self.ntemp[0]],
self.scale[1][self.ntemp[1]]],
args=(self.ospec.x, self.ospec.flux))
return pres
######################################################################
class MySpectrum(spec.Spectrum):
def __init__(self, x, flux, err=None, ivar=None,
unit='wl', name='', copy=True, sort=True):
spec.Spectrum.__init__(self, x=x, flux=flux, err=err, ivar=ivar,
unit=unit, name=name, copy=copy, sort=sort)
##################################################################
def myResample(self, newx, replace=False):
'''
kernel = np.mean(newx[1:]-newx[:-1])/np.mean(self.x[1:]-self.x[:-1])
dx = self.x[1:]-self.x[:-1]
newy = scipy.ndimage.filters.gaussian_filter(self.flux,np.float(kernel))
tck = scipy.interpolate.splrep(self.x,newy)
newy2 =scipy.interpolate.splev(newx,tck)
'''
kernel = np.median(newx[1:] - newx[:-1]) / np.median(self.x[1:] - self.x[:-1]) #*4.0 #/2./np.pi
newflux = scipy.ndimage.filters.gaussian_filter1d(self.flux, kernel)
tck = scipy.interpolate.splrep(self.x, newflux)
return newx, scipy.interpolate.splev(newx, tck)
'''
newy = np.zeros(len(newx))
for i in range(len(newx)):
xini = 0
xend = 0
if i == 0:
xini = newx[i]-(newx[i+1]-newx[i])/2.
else:
xini = newx[i]-(newx[i]-newx[i-1])/2.
if i == len(newx)-1:
xend = newx[i]+(newx[i]-newx[i-1])/2.
else:
xend = newx[i]+(newx[i+1]-newx[i])/2.
mask = np.bitwise_and(self.x > xini, self.x < xend)
#newy[i] = np.sum( dx[mask[:-1]] * self.flux[mask] )
newy[i] = np.mean(self.flux[mask])
#print newx[i],newy[i],newy2[i],xini,xend, (xend-xini) , np.mean(self.flux[mask]),(xend-xini) * np.mean(self.flux[mask])
#print self.x[mask],self.flux[mask],dx[mask[:-1]]
return newx,newy #scipy.interpolate.splev(newx,tck)
'''
##################################################################
######################################################################
| true |
257b4ed372545039c66cfc312783f1f45148e122 | Python | juliakarabasova/programming-2021-19fpl | /queue_/queue_stack.py | UTF-8 | 1,911 | 4 | 4 | [
"MIT"
] | permissive | """
Programming for linguists
Implementation of the data structure "Queue" based on Stack
"""
from typing import Iterable
from stack.stack import Stack
# pylint: disable=invalid-name
class QueueStack_(Stack):
"""
Queue Data Structure On Stack
"""
def __init__(self, data: Iterable = (), capacity: int = 50):
super().__init__(data)
if not isinstance(capacity, int):
raise TypeError
self._capacity = capacity
def get(self):
"""
Remove and return an item from queue_
"""
if self.empty():
raise IndexError
temp_stack = []
while self.data:
temp_stack.append(self.data.pop(0))
last_elem = temp_stack.pop(0)
while temp_stack:
self.data = [temp_stack.pop()] + self.data
return last_elem
def put(self, element):
"""
Add the element ‘element’ at the end of queue_
:param element: element to add to queue_
"""
if self.full():
raise IndexError
self.data.append(element)
def top(self):
if self.empty():
raise IndexError
temp_stack = []
while self.data:
temp_stack.append(self.data.pop(0))
top_elem = temp_stack[0]
while temp_stack:
self.data = [temp_stack.pop()] + self.data
return top_elem
def full(self):
"""
Return whether queuestack_ is full or not
:return: True if size of queuestack_ equals the capacity of queue_.
False if the queuestack_ contains less elements.
"""
if self.size() == self._capacity:
return True
return False
def capacity(self):
"""
Return the capacity of queuestack_
:return: the capacity (maximum size) of queuestack_
"""
return self._capacity
| true |
0b38443c481d7bda22e9cf28e69628206dd29445 | Python | AsherYang/AsherUpload | /resources/python/copyfile.py | UTF-8 | 574 | 3.015625 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding:utf-8 -*-
from shutil import copyfile
import os
print 'copy file..'
def copy(srcPath, destPath):
copyfile(srcPath, destPath)
def main():
print 'please input src file path , and destPath'
# srcPath = raw_input('srcPath : ')
srcPath = '/Users/ouyangfan/Documents/1.txt'
# destPath = raw_input('destPath : ')
destPath = '/Users/ouyangfan/Documents/22.txt'
if not os.path.isfile(srcPath):
print 'src must a file'
pass
exit()
copy(srcPath, destPath)
if __name__ == '__main__':
main()
| true |
2a2fd4d90e0ece88fa3eab4629373d97a9a3c91f | Python | angelofgrace/holbertonschool-higher_level_programming | /0x05-python-exceptions/4-list_division.py | UTF-8 | 508 | 3.609375 | 4 | [] | no_license | #!/usr/bin/python3
def list_division(my_list_1, my_list_2, list_length):
list_3 = []
x = 0
while x < list_length:
try:
c = my_list_1[x] / my_list_2[x]
except ZeroDivisionError:
c = 0
print("division by 0")
except IndexError:
c = 0
print("out of range")
except TypeError:
c = 0
print("wrong type")
finally:
list_3.append(c)
x += 1
return list_3
| true |
d7ba2c85dcd6fb2fcc1d4b551b5afbd0fde11cb0 | Python | econchick/api-workshop | /full/github.py | UTF-8 | 1,552 | 2.671875 | 3 | [] | no_license | #! /usr/bin/env python
import github3
import geojson
class GithubError(Exception):
pass
def create_geojson(artists):
geo_list = []
j = 1
for artist in artists:
if artist.get('coordinates') == [0, 0]:
continue
data = {}
data["type"] = "Feature"
data["id"] = j
data["properties"] = {
"title": artist.get('name'),
"spotify_id": artist.get('spotify_id'),
"genres": ", ".join(artist.get('genres')),
"location": artist.get('location').get('location'),
"marker-symbol": 'music'
}
data["geometry"] = geojson.Point(artist.get('coordinates'))
j += 1
geo_list.append(data)
d = {"type": "FeatureCollection", "features": geo_list}
geojson_output = geojson.dumps(d)
return geojson_output
def login_github(github_oauth):
try:
return github3.login(token=github_oauth)
except github3.GitHubError as e:
msg = "Issue logging into GitHub: {0}".format(e)
raise GithubError(msg)
def post_gist_github(geojson, auth, title):
files = {
'artists.geojson': {
'content': geojson
}
}
try:
if not auth:
gist = github3.create_gist(title, files)
else:
gist = auth.create_gist(title, files, public=False)
except github3.GitHubError as e:
msg = "Issue posting an anonymous Gist: {0}".format(e)
raise GithubError(msg)
gist_url = gist.html_url
return gist_url
| true |
e3fe4ba6c6a62a17e3f569e14b6b2a7b459fa8f7 | Python | EduardoGiacomini/booboobee | /core/bots/bot_group.py | UTF-8 | 395 | 2.828125 | 3 | [] | no_license | from core.protocol import BotCompositeProtocol
class BotGroup(BotCompositeProtocol):
def __init__(self):
super().__init__()
def add(self, bot):
self.bots.append(bot)
def get_information(self):
bot_information = ''
for bot in self.bots:
bot_information += f'--- {bot.name} ---\n{bot.get_information()}\n'
return bot_information
| true |
10dec9425338bd018ea7679832e113e4a461a35c | Python | mgraupe/SutterMP285 | /sutterMP285.py | UTF-8 | 7,461 | 3 | 3 | [
"MIT"
] | permissive | # sutterMP285 : A python class for using the Sutter MP-285 positioner
#
# SUTTERMP285 implements a class for working with a Sutter MP-285
# micro-positioner. The Sutter must be connected with a Serial
# cable.
#
# This class uses the python "serial" package which allows for
# with serial devices through 'write' and 'read'.
# The communication properties (BaudRate, Terminator, etc.) are
# set when invoking the serial object with serial.Serial(..) (l105,
# see Sutter Reference manual p23).
#
# Methods:
# Create the object. The object is opened with serial.Serial and the connection
# is tested to verify that the Sutter is responding.
# obj = sutterMP285()
#
# Update the position display on the instrument panel (VFD)
# updatePanel()
#
# Get the status information (step multiplier, velocity, resolution)
# [stepMult, currentVelocity, vScaleFactor] = getStatus()
#
# Get the current absolute position in um
# xyz_um = getPosition()
#
# Set the move velocity in steps/sec. vScaleFactor = 10|50 (default 10).
# setVelocity(velocity, vScaleFactor)
#
# Move to a specified position in um [x y z]. Returns the elapsed time
# for the move (command sent and acknowledged) in seconds.
# moveTime = gotoPosition(xyz)
#
# Set the current position to be the new origin (0,0,0)
# setOrigin()
#
# Reset the instrument
# sendReset()
#
# Close the connetion
# __del__()
#
# Properties:
# verbose - The level of messages displayed (0 or 1). Default 1.
#
#
# Example:
#
# >> import serial
# >> from sutterMP285_1 import *
# >> sutter = sutterMP285()
# Serial<id=0x4548370, open=True>(port='COM1', baudrate=9600, bytesize=8, parity='N', stopbits=1, timeout=30, xonxoff=False, rtscts=False, dsrdtr=False)
# sutterMP285: get status info
# (64, 0, 2, 4, 7, 0, 99, 0, 99, 0, 20, 0, 136, 19, 1, 120, 112, 23, 16, 39, 80, 0, 0, 0, 25, 0, 4, 0, 200, 0, 84, 1)
# step_mul (usteps/um): 25
# xspeed" [velocity] (usteps/sec): 200
# velocity scale factor (usteps/step): 10
# sutterMP285 ready
# >> pos = sutter.getPosition()
# sutterMP285 : Stage position
# X: 3258.64 um
# Y: 5561.32 um
# Z: 12482.5 um
# >> posnew = (pos[0]+10.,pos[1]+10.,pos[2]+10.)
# >> sutter.gotoPosition(posnew)
# sutterMP285: Sutter move completed in (0.24 sec)
# >> status = sutter.getStatus()
# sutterMP285: get status info
# (64, 0, 2, 4, 7, 0, 99, 0, 99, 0, 20, 0, 136, 19, 1, 120, 112, 23, 16, 39, 80, 0, 0, 0, 25, 0, 4, 0, 200, 0, 84, 1)
# step_mul (usteps/um): 25
# xspeed" [velocity] (usteps/sec): 200
# velocity scale factor (usteps/step): 10
# >> del sutter
#
#
import serial
import struct
import time
import sys
from numpy import *
class sutterMP285 :
'Class which allows interaction with the Sutter Manipulator 285'
def __init__(self):
self.verbose = 1. # level of messages
self.timeOut = 30 # timeout in sec
# initialize serial connection to controller
try:
self.ser = serial.Serial(port='COM1',baudrate=9600,bytesize=serial.EIGHTBITS,parity=serial.PARITY_NONE,stopbits=serial.STOPBITS_ONE,timeout=self.timeOut)
self.connected = 1
if self.verbose:
print self.ser
except serial.SerialException:
print 'No connection to Sutter MP-285 could be established!'
sys.exit(1)
# set move velocity to 200
self.setVelocity(200,10)
self.updatePanel() # update controller panel
(stepM,currentV,vScaleF)= self.getStatus()
if currentV == 200:
print 'sutterMP285 ready'
else:
print 'sutterMP285: WARNING Sutter did not respond at startup.'
# destructor
def __del__(self):
self.ser.close()
if self.verbose :
print 'Connection to Sutter MP-285 closed'
def getPosition(self):
# send commend to get position
self.ser.write('c\r')
# read position from controller
xyzb = self.ser.read(13)
# convert bytes into 'signed long' numbers
xyz_um = array(struct.unpack('lll', xyzb[:12]))/self.stepMult
if self.verbose:
print 'sutterMP285 : Stage position '
print 'X: %g um \n Y: %g um\n Z: %g um' % (xyz_um[0],xyz_um[1],xyz_um[2])
return xyz_um
# Moves the three axes to specified location.
def gotoPosition(self,pos):
if len(pos) != 3:
print 'Length of position argument has to be three'
sys.exit(1)
xyzb = struct.pack('lll',int(pos[0]*self.stepMult),int(pos[1]*self.stepMult),int(pos[2]*self.stepMult)) # convert integer values into bytes
startt = time.time() # start timer
self.ser.write('m'+xyzb+'\r') # send position to controller; add the "m" and the CR to create the move command
cr = []
cr = self.ser.read(1) # read carriage return and ignore
endt = time.time() # stop timer
if len(cr)== 0:
print 'Sutter did not finish moving before timeout (%d sec).' % self.timeOut
else:
print 'sutterMP285: Sutter move completed in (%.2f sec)' % (endt-startt)
# this function changes the velocity of the sutter motions
def setVelocity(self,Vel,vScalF=10):
# Change velocity command 'V'xxCR where xx= unsigned short (16bit) int velocity
# set by bits 14 to 0, and bit 15 indicates ustep resolution 0=10, 1=50 uSteps/step
# V is ascii 86
# convert velocity into unsigned short - 2-byte - integeter
velb = struct.pack('H',int(Vel))
# change last bit of 2nd byte to 1 for ustep resolution = 50
if vScalF == 50:
velb2 = double(struct.unpack('B',velb[1])) + 128
velb = velb[0] + struct.pack('B',velb2)
self.ser.write('V'+velb+'\r')
self.ser.read(1)
# Update Panel
# causes the Sutter to display the XYZ info on the front panel
def updatePanel(self):
self.ser.write('n\r') #Sutter replies with a CR
self.ser.read(1) # read and ignore the carriage return
## Set Origin
# sets the origin of the coordinate system to the current position
def setOrigin(self):
self.ser.write('o\r') # Sutter replies with a CR
self.ser.read(1) # read and ignor the carrage return
# Reset controller
def sendReset(self):
self.ser.write('r\r') # Sutter does not reply
# Queries the status of the controller.
def getStatus(self):
if self.verbose :
print 'sutterMP285: get status info'
self.ser.write('s\r') # send status command
rrr = self.ser.read(32) # read return of 32 bytes without carriage return
self.ser.read(1) # read and ignore the carriage return
rrr
statusbytes = struct.unpack(32*'B',rrr)
print statusbytes
# the value of STEP_MUL ("Multiplier yields msteps/nm") is at bytes 25 & 26
self.stepMult = double(statusbytes[25])*256 + double(statusbytes[24])
# the value of "XSPEED" and scale factor is at bytes 29 & 30
if statusbytes[29] > 127:
self.vScaleFactor = 50
else:
self.vScaleFactor = 10
#print double(127 & statusbytes[29])*256
#print double(statusbytes[28]), statusbytes[28]
#print double(statusbytes[29]), statusbytes[29]
self.currentVelocity = double(127 & statusbytes[29])*256+double(statusbytes[28])
#vScaleFactor = struct.unpack('lll', rrr[30:31])
if self.verbose:
print 'step_mul (usteps/um): %g' % self.stepMult
print 'xspeed" [velocity] (usteps/sec): %g' % self.currentVelocity
print 'velocity scale factor (usteps/step): %g' % self.vScaleFactor
#
return (self.stepMult,self.currentVelocity,self.vScaleFactor)
| true |
83a0f1a474bd4a88c4a54179d9c6b39d5307ae8b | Python | danieltrut/alused | /2/ylesanne 2.1.py | UTF-8 | 212 | 2.96875 | 3 | [] | no_license | #kasutaja sisend
sisestatud_temperatuur = int(input("Sisesta ohu temperatuur: "))
#arvestused
if sisestatud_temperatuur > 4:
print("Ei ole jäätumise ohtu")
else:
print("On jäätumise oht")
#valjastus | true |
e197b655ae480c4b54a49c13061fd7ecf70eeb65 | Python | AlexPushkarev/LabaPython2 | /Python2.2.py | UTF-8 | 741 | 3.09375 | 3 | [] | no_license | s = input()
print(' ФИО', end=' ')
print('О студенте'.rjust(45))
list1 = s.split('_')
s1 = ''
count = 0
for i in range(1, len(list1)):
count = 0
list2 = list1[i].split(';')
for j in list2:
count = count + 1
s1 = str(j)
if count < 4:
if count == 3:
if i == 2:
print(s1.ljust(24), end='')
else:
print(s1.ljust(25), end='')
else:
print(s1, end=' ')
elif count == 4:
s1 = str(list2[count])
print(s1, end=',')
elif count == 5:
s1 = str(list2[count-2])
print(s1)
s1 = ''
| true |
00dd4f677a2e6918ad626fad31eafe44563c19c6 | Python | RPellowski/machinevision-toolbox-python | /machinevisiontoolbox/blob.py | UTF-8 | 19,799 | 2.515625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
"""
2D Blob feature class
@author: Dorian Tsai
@author: Peter Corke
"""
import numpy as np
import cv2 as cv
import spatialmath.base.argcheck as argcheck
import machinevisiontoolbox as mvt
from collections import namedtuple
import random as rng
import pdb
rng.seed(13543) # would this be called every time at Blobs init?
class Blobs:
"""
A 2D feature blob class
"""
# list of attributes
_area = []
_uc = [] # centroid (uc, vc)
_vc = []
_umin = [] # bounding box
_umax = []
_vmin = []
_vmax = []
_class = [] # TODO check what the class of pixel is?
_label = [] # label assigned to this region
_parent = [] # -1 if no parent, else index points to i'th parent contour
_children = [] # list of children, -1 if no children
_edgepoint = [] # (x,y) of a point on the perimeter
_edge = [] # list of edge points
_perimeter = [] # length of edge
_touch = [] # 0 if bbox doesn't touch the edge, 1 if it does
_a = [] # major axis length # equivalent ellipse parameters
_b = [] # minor axis length
_theta = [] # angle of major axis wrt the horizontal
_aspect = [] # b/a < 1.0
_circularity = []
_moments = [] # named tuple of m00, m01, m10, m02, m20, m11
# note that RegionFeature.m has edge, edgepoint - these are the contours
_contours = []
_image = []
_hierarchy = []
_perimeter = []
def __init__(self, image=None):
if image is None:
# initialise empty Blobs
# Blobs()
self._area = None
self._uc = None # Two element array, empty? Nones? []?
self._vc = None
self._perimeter = None
self._umin = None
self._umax = None
self._vmin = None
self._vmax = None
self._touch = None
self._a = None
self._b = None
self._theta = None
self._aspect = None
self._circularity = None
self._moments = None
self._contours = None
self._hierarchy = None
self._parent = None
self._children = None
self._image = None
else:
# check if image is valid - it should be a binary image, or a
# thresholded image ()
# convert to grayscale/mono
image = mvt.getimage(image)
image = mvt.mono(image)
# TODO OpenCV doesn't have a binary image type, so it defaults to uint8 0 vs 255
image = mvt.iint(image)
self._image = image
# I believe this screws up the image moment calculations though,
# which are expecting a binary 0 or 1 image
# detect and compute keypoints and descriptors using opencv
# TODO pass in parameters as an option?
# TODO simpleblob detector becomes backbone of ilabels?
"""
params = cv.SimpleBlobDetector_Params()
params.minThreshold = 0
params.maxThreshold = 255 # TODO check if image must be uint8?
params.filterByArea = False
params.minArea = 60
params.maxArea = 100
params.filterByColor = False # this feature might be broken
params.blobColor = 1 # 1 - 255, dark vs light
params.filterByCircularity = False
params.minCircularity = 0.1 # 0-1, how circular (1) vs line(0)
params.filterByConvexity = False
# 0-1, convexity - area of blob/area of convex hull, convex hull being tightest convex shape that encloses the blob
params.minConvexity = 0.87
params.filterByInertia = False
# 0-1, how elongated (circle = 1, line = 0)
params.minInertiaRatio = 0.01
d = cv.SimpleBlobDetector_create(params)
keypts = d.detect(image)
# set properties as a list for every single blob
self._area = np.array(
[keypts[k].size for k, val in enumerate(keypts)])
centroid = np.array(
[keypts[k].pt for k, val in enumerate(keypts)]) # pt is a tuple
self._uc = np.array([centroid[k][0]
for k, val in enumerate(centroid)])
self._vc = np.array([centroid[k][1] for k, val in
enumerate(centroid)])
"""
# simpleblobdetector - too simple. Cannot get pixel values/locations of blobs themselves
# findcontours approach
contours, hierarchy = cv.findContours(
image, mode=cv.RETR_TREE, method=cv.CHAIN_APPROX_NONE)
self._contours = contours
nc = len(self._contours)
# change hierarchy from a (1,M,4) to (M,4)
self._hierarchy = np.squeeze(hierarchy)
self._parent = self._hierarchy[:, 2]
self._children = self._getchildren()
# get moments as a dictionary for each contour
mu = [cv.moments(self._contours[i])
for i in range(nc)]
mf = self._hierarchicalmoments(mu)
self._moments = mf
# TODO for moments in a hierarchy, for any pq moment of a blob ignoring its
# children you simply subtract the pq moment of each of its children.
# That gives you the “proper” pq moment for the blob, which you then use
# to compute area, centroid etc.
# for each contour
# find all children (row i to hierarchy[0,i,0]-1, if same then no
# children)
# recompute all moments
# get mass centers:
mc = [(mf[i]['m10'] / (mf[i]['m00']), mf[i]['m01'] / (mf[i]['m00']))
for i in range(nc)]
mc = np.array(mc)
self._uc = mc[:, 0]
self._vc = mc[:, 1]
# get areas:
area = [mf[i]['m00'] for i in range(nc)]
self._area = np.array(area)
# TODO sort contours wrt area descreasing
# get perimeters:
# pdb.set_trace()
perimeter = [np.sum(len(self._contours[i])) for i in range(nc)]
self._perimeter = np.array(perimeter)
# get circularity
# apply Kulpa's correction factor when computing circularity
# should have max 1 circularity for circle, < 1 for non-circles
kulpa = np.pi / 8.0 * (1.0 + np.sqrt(2.0))
circularity = [((4.0 * np.pi * self._area[i]) /
((self._perimeter[i] * kulpa) ** 2))
for i in range(nc)]
self._circularity = np.array(circularity)
# get bounding box:
cpoly = [cv.approxPolyDP(c, epsilon=3, closed=True)
for i, c in enumerate(self._contours)]
bbox = [cv.boundingRect(cpoly[i]) for i in range(len(cpoly))]
bbox = np.array(bbox)
# bbox in [u0, v0, length, width]
self._umax = bbox[:, 0] + bbox[:, 2]
self._umin = bbox[:, 0]
self._vmax = bbox[:, 1] + bbox[:, 3]
self._vmin = bbox[:, 1]
self._touch = self._touchingborder()
# TODO could do these in list comprehensions, but then much harder
# to read?
# equivalent ellipse from image moments
w = [None] * nc
v = [None] * nc
theta = [None] * nc
a = [None] * nc
b = [None] * nc
for i in range(nc):
u20 = mf[i]['m20'] / mf[i]['m00'] - mc[i, 0]**2
u02 = mf[i]['m02'] / mf[i]['m00'] - mc[i, 1]**2
u11 = mf[i]['m11'] / mf[i]['m00'] - mc[i, 0]*mc[i, 1]
cov = np.array([[u20, u11], [u02, u11]])
w, v = np.linalg.eig(cov) # w = eigenvalues, v = eigenvectors
a[i] = 2.0 * np.sqrt(np.max(np.diag(v)) / mf[i]['m00'])
b[i] = 2.0 * np.sqrt(np.min(np.diag(v)) / mf[i]['m00'])
ev = v[:, -1]
theta[i] = np.arctan(ev[1] / ev[0])
self._a = np.array(a)
self._b = np.array(b)
self._theta = np.array(theta)
self._aspect = self._b / self._a
# self._circularity
def _touchingborder(self):
t = [False]*len(self._contours)
# TODO replace with list comprehension?
for i in range(len(self._contours)):
if ((self._umin[i] == 0) or (self._umax[i] == self._image.shape[0]) or
(self._vmin[i] == 0) or (self._vmax[i] == self._image.shape[1])):
t[i] = True
return t
def __len__(self):
return len(self._area)
def __getitem__(self, ind):
new = Blobs()
new._area = self._area[ind]
new._uc = self._uc[ind]
new._vc = self._vc[ind]
new._perimeter = self._perimeter[ind]
new._umin = self._umin[ind]
new._umax = self._umax[ind]
new._vmin = self._vmin[ind]
new._vmax = self._vmax[ind]
new._a = self._a[ind]
new._b = self._b[ind]
new._aspect = self._aspect[ind]
new._theta = self._theta[ind]
new._circularity = self._circularity[ind]
new._touch = self._touch[ind]
return new
# ef label(self, im, connectivity=8, labeltype, cctype):
# for label.m
# im = image, binary/boolean in
# connectivity, 4 or 8-way connectivity
# labeltype specifies the output label image type - considering the
# total number of labels, or tot. # of pixels in source image?? (only
# CV_32S and CV_16U supported), default seems to be CV_32S
# cctype = labelling algorithm Grana's and Wu's supported
# output:
# labels - a destination labeled image (?)
#
# cv.connectedComponentsWithStats()
# TODO why is self necessary here?
def _hierarchicalmoments(self, mu):
# to deliver all the children of i'th contour:
# first index identifies the row that the next contour at the same
# hierarchy level starts
# therefore, to grab all children for given contour, grab all rows
# up to i-1 of the first row value
# can only have one parent, so just take the last (4th) column
# hierarchy order: [Next, Previous, First_Child, Parent]
# for i in range(len(contours)):
# print(i, hierarchy[0,i,:])
# 0 [ 5 -1 1 -1]
# 1 [ 4 -1 2 0]
# 2 [ 3 -1 -1 1]
# 3 [-1 2 -1 1]
# 4 [-1 1 -1 0]
# 5 [ 8 0 6 -1]
# 6 [ 7 -1 -1 5]
# 7 [-1 6 -1 5]
# 8 [-1 5 9 -1]
# 9 [-1 -1 -1 8]
mh = mu
for i in range(len(self._contours)): # for each contour
inext = self._hierarchy[i, 0]
ichild = self._hierarchy[i, 2]
if not (ichild == -1): # then children exist
ichild = [ichild] # make first child a list
# find other children who are less than NEXT in the hierarchy
# and greater than -1,
otherkids = [k for k in range(i + 1, len(self._contours)) if
((k < inext) and (inext > 0))]
if not len(otherkids) == 0:
ichild.extend(list(set(otherkids) - set(ichild)))
for j in range(ichild[0], ichild[-1]+1): # for each child
# all moments that need to be computed
# subtract them from the parent moment
# mh[i]['m00'] = mh[i]['m00'] - mu[j]['m00'] ...
# do a dictionary comprehension:
mh[i] = {key: mh[i][key] -
mu[j].get(key, 0) for key in mh[i]}
# else:
# no change to mh, because contour i has no children
return mh
def _getchildren(self):
# gets list of children for each contour based on hierarchy
# follows similar for loop logic from _hierarchicalmoments, so
# TODO finish _getchildren and use the child list to do
# _hierarchicalmoments
children = [None]*len(self._contours)
for i in range(len(self._contours)):
inext = self._hierarchy[i, 0]
ichild = self._hierarchy[i, 2]
if not (ichild == -1):
# children exist
ichild = [ichild]
otherkids = [k for k in range(i + 1, len(self._contours))
if ((k < inext) and (inext > 0))]
if not len(otherkids) == 0:
ichild.extend(list(set(otherkids) - set(ichild)))
children[i] = ichild
else:
# else no children
children[i] = [-1]
return children
def drawBlobs(self,
drawing=None,
icont=None,
colors=None,
contourthickness=cv.FILLED,
textthickness=2):
# draw contours of blobs
# contours - the contour list
# icont - the index of the contour(s) to plot
# drawing - the image to draw the contours on
# colors - the colors for the icont contours to be plotted (3-tuple)
# return - updated drawing
if (drawing is None) and (self._image is not None):
drawing = np.zeros(
(self._image.shape[0], self._image.shape[1], 3), dtype=np.uint8)
if icont is None:
icont = np.arange(0, len(self._contours))
else:
icont = np.array(icont, ndmin=1, copy=False)
if colors is None:
# make colors a list of 3-tuples of random colors
colors = [None]*len(icont)
for i in range(len(icont)):
colors[i] = (rng.randint(0, 256),
rng.randint(0, 256),
rng.randint(0, 256))
# contourcolors[i] = np.round(colors[i]/2)
# TODO make a color option, specified through text,
# as all of a certain color (default white)
# make contour colours slightly different but similar to the text color
# (slightly dimmer)?
cc = [np.uint8(np.array(colors[i])/2) for i in range(len(icont))]
contourcolors = [(int(cc[i][0]), int(cc[i][1]), int(cc[i][2]))
for i in range(len(icont))]
# TODO check contours, icont, colors, etc are valid
hierarchy = np.expand_dims(self._hierarchy, axis=0)
# done because we squeezed hierarchy from a (1,M,4) to an (M,4) earlier
for i in range(len(icont)):
# TODO figure out how to draw alpha/transparencies?
cv.drawContours(drawing, self._contours, icont[i], contourcolors[i],
thickness=contourthickness, lineType=cv.LINE_8,
hierarchy=hierarchy)
for i in range(len(icont)):
ic = icont[i]
cv.putText(drawing, str(ic),
(int(self._uc[ic]), int(self._vc[ic])),
fontFace=cv.FONT_HERSHEY_SIMPLEX, fontScale=1,
color=colors[i], thickness=textthickness)
return drawing
"""
def drawBlobs(self,
drawing=None,
iblob=None,
colors=None)
# function to plot the blobs (as opposed to contours)
# TODO function to do contour filling using fillPoly
cpoly = [cv.approxPolyDP(c, epsilon=3, closed=True)
for i, c in enumerate(self._contours)]
return drawing
"""
@property
def area(self):
return self._area
@property
def uc(self):
return self._uc
@property
def vc(self):
return self._vc
@property
def a(self):
return self._a
@property
def b(self):
return self._b
@property
def theta(self):
return self._theta
@property
def bbox(self):
return ((self._umin, self._umax), (self._vmin, self._vmax))
@property
def umin(self):
return self._umin
@property
def umax(self):
return self._umax
@property
def vmax(self):
return self._vmax
@property
def vmin(self):
return self._vmin
@property
def bboxarea(self):
return (self._umax - self._umin) * (self._vmax - self._vmin)
@property
def centroid(self):
return (self._uc, self.vc)
# TODO maybe ind for centroid: b.centroid[0]?
@property
def perimeter(self):
return self._perimeter
@property
def touch(self):
return self._touch
@property
def circularity(self):
return self._circularity
def printBlobs(self):
# TODO accept kwargs or args to show/filter relevant parameters
# convenience function to plot
for i in range(len(self._contours)):
print(str.format('({0}) area={1:.1f}, \
cent=({2:.1f}, {3:.1f}), \
theta={4:.3f}, \
b/a={5:.3f}, \
touch={6:d}, \
parent={7}, \
children={8}',
i, self._area[i], self._uc[i], self._vc[i],
self._theta[i], self._aspect[i],
self._touch[i], self._parent[i], self._children[i]))
if __name__ == "__main__":
# read image
# im = cv.imread('images/test/longquechen-moon.png', cv.IMREAD_GRAYSCALE)
# ret = cv.haveImageReader('images/multiblobs.png')
# print(ret)
im = cv.imread('images/multiblobs.png', cv.IMREAD_GRAYSCALE)
# call Blobs class
b = Blobs(image=im)
b.area
b.uc
# draw detected blobs as red circles
# DRAW_MATCHES_FLAGS... makes size of circle correspond to size of blob
# im_kp = cv.drawKeypoints(im, keypoints, np.array([]), (0,0,255), cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# show keypoints
# cv.imshow('blob keypoints', im_kp)
# cv.waitKey(1000)
b0 = b[0].area
b02 = b[0:2].uc
print('Length of b =', len(b))
# TODO
# plot image
# plot centroids of blobs
# label relevant centroids for the labelled blobs
import random as rng # for random colors of blobs
rng.seed(53467)
drawing = np.zeros((im.shape[0], im.shape[1], 3), dtype=np.uint8)
colors = [None]*len(b)
icont = [None]*len(b)
for i in range(len(b)):
icont[i] = i
colors[i] = (rng.randint(0, 256), rng.randint(
0, 256), rng.randint(0, 256))
cv.rectangle(drawing, (b[i].umin, b[i].vmin), (b[i].umax, b[i].vmax),
colors[i], thickness=2)
# cv.putText(drawing, str(i), (int(b[i].uc), int(b[i].vc)),
# fontFace=cv.FONT_HERSHEY_SIMPLEX, fontScale=1, color=colors,
# thickness=2)
drawing = b.drawBlobs(drawing, icont, colors,
contourthickness=cv.FILLED)
# mvt.idisp(drawing)
im2 = cv.imread('images/multiblobs_edgecase.png', cv.IMREAD_GRAYSCALE)
b2 = Blobs(image=im2)
d2 = b2.drawBlobs(icont=1, contourthickness=-1)
# import matplotlib.pyplot as plt
# plt.imshow(d2)
# plt.show()
# mvt.idisp(d2)
# cv.imshow('blob contours', drawing)
# cv.waitKey()
# press Ctrl+D to exit and close the image at the end
import code
code.interact(local=dict(globals(), **locals()))
# pdb.set_trace()
| true |
39d5aedee790a0d86d59c15c4c17cdee4f316526 | Python | ph1-618O/cleaningApps | /describeData.py | UTF-8 | 2,037 | 2.6875 | 3 | [] | no_license | # Comment: Write file creates a module that can be imported with dependencies, %%writefile -a describeData.py appends, remove if func is changed
# Comment: This function prints stats for strings and integer value columns
import pandas as pd
import numpy as np
import requests
import os
import json
import matplotlib.pyplot as plt
from IPython.core.display import HTML
from datetime import date, datetime
def describeData(dataFrameName):
print('Executing describeData...')
print('-------------------------------')
global keyHeaders, colsData, stringDescribe, intDescribe, keyStr, KeyInt
keyStr, keyInt, keyHeaders, intDescribe, stringDescribe = [], [], [], [], []
for key, value in dataFrameName.items():
#grabs cols as keys into list
keyHeaders.append(key)
for i in keyHeaders:
#checks the cols data if string
if isinstance(dataFrameName[i][0], (str)):
stringDescribe.append(dataFrameName[keyHeaders][i].describe())
else:
intDescribe.append(dataFrameName[keyHeaders][i].describe())
stringDescribe = pd.DataFrame.from_dict(dict(zip(keyHeaders, stringDescribe)), orient='index')
intDescribe = pd.DataFrame.from_dict(dict(zip(keyHeaders, intDescribe)), orient='index')
#adding pretty print to dataframes, don't forget import statment when copying code
print('-------------------------------')
print('Object Describe Dataframe')
print('-------------------------------')
display(HTML(stringDescribe.to_html()))
#print(stringDescribe)
print('-------------------------------')
print('Integer/FloatDescribe Dataframe')
print('-------------------------------')
display(HTML(intDescribe.to_html()))
#print(intDescribe)
lengthofDF = len(dataFrameName)
print('-------------------------------')
print(f'Dataframe Length: {lengthofDF}')
print('-------------------------------')
columnNames = dataFrameName.columns.tolist()
print(f'ColumnNames: \n{columnNames}')
# Comment: by ph1-6180
| true |
bdd714580b98b85e2634912bc37ada18ea97842a | Python | nicolas-1997/Python_Profesional | /palindrome.py | UTF-8 | 625 | 4.84375 | 5 | [] | no_license | # This code is for practicing static typing
def is_palindrome(string: str):
string = string.replace(" ", "").lower() #we clean the word and save it in a variable
#we compare the word with same but other way around
if string == string[::-1]: #[::-1 serves to turn]
print("This is palindrome!!", string,"=", string[::-1])
else:
print("Not is a palindrome!!", string,"=", string[::-1])
def run():
palindrome = input("Enter a word: ") #we ask the user for a word
is_palindrome(palindrome) #we pass this word as a parameter for the function
if __name__=="__main__":
run()
| true |
4ad5b16e1982f0b431cff8073654d8a344f196f8 | Python | jlin12358/leetcode | /validAnagram.py | UTF-8 | 1,479 | 3.375 | 3 | [] | no_license | class Solution(object):
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
# O(n) time complexity
# O(n) space complexity
dictionary = {}
if len(s) != len(t):
return False
for i in range(len(s)):
if s[i] in dictionary:
dictionary[s[i]] += 1
else:
dictionary[s[i]] = 1
if t[i] in dictionary:
dictionary[t[i]] -= 1
else:
dictionary[t[i]] = -1
for v in dictionary.values():
if v != 0:
return False
return True
'''
# O(nlog(n)) time complexity
s = sorted(s)
t = sorted(t)
if len(s) != len(t):
return False
return s == t
'''
'''
# Brute Force using two dictionaries
# O(n) time complexity
dict_s = {}
dict_t = {}
if len(s) != len(t):
return False
for i in range(len(s)):
if s[i] in dict_s:
dict_s[s[i]] += 1
else:
dict_s[s[i]] = 1
if t[i] in dict_t:
dict_t[t[i]] += 1
else:
dict_t[t[i]] = 1
for each in s:
if dict_t[each] != dict_s[each]:
return False
return True
'''
| true |
a14e480f6e62faaa96fde15be599a4a903149e15 | Python | djdubois/smart-speakers-study | /scripts/smart-speakers-testbed/scripts/extract-ttml | UTF-8 | 1,999 | 3.328125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/python3
import sys
import os.path
from os import path
import xml.etree.ElementTree as ET
if len(sys.argv)<2:
print("This script extracts the subtitles from a file between [start] and [end] time in seconds.")
print()
print("If no start and end time are specified, all the subtitles will be printed.")
print("If only start time is specified, the default end time is the same as start time.")
print("An optional tolerance in second will be subtracted from start time, and added to end time.")
print()
print(f"Usage: {sys.argv[0]} <XML subtitle file> [start time] [end time] [tolerance]")
sys.exit(0)
begin_search = 0
end_search = 999999999
ttml = sys.argv[1]
if not path.exists(ttml):
print(f"Error: file '{ttml}' does not exist.")
sys.exit(1)
if len(sys.argv)>2:
try:
begin_search = int(sys.argv[2])
end_search = begin_search
except ValueError:
print("Error: start time must be a number.")
sys.exit(1)
if len(sys.argv)>3:
try:
end_search = int(sys.argv[3])
except ValueError:
print("Error: end time must be a number.")
sys.exit(1)
if len(sys.argv)>4:
try:
tolerance = int(sys.argv[4])
begin_search -= tolerance
end_search += tolerance
except ValueError:
print("Error: tolerance must be a number.")
sys.exit(1)
if end_search<begin_search:
printf("Error: end time is smaller than start time.")
sys.exit(1)
try:
tree = ET.parse(ttml)
except:
print(f"Error: file '{ttml}' cannot be loaded. Is it a proper TTML file?")
sys.exit(1)
subtitles=tree.findall(".//{http://www.w3.org/2006/10/ttaf1}p[@begin]")
for subtitle in subtitles:
begin_attr = int(float(subtitle.get('begin')[:-1])/10000000)
end_attr = int(float(subtitle.get('end')[:-1])/10000000)
text = " ".join(subtitle.itertext())
if begin_search<=end_attr and end_search>=begin_attr:
print(f"{begin_attr} {end_attr} {text}")
| true |
bcf193070bf661cb9fec1b39a7d891dccaf58f64 | Python | aouyang1/InsightInterviewPractice | /same_tree_guang.py | UTF-8 | 1,126 | 3.6875 | 4 | [] | no_license | # Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param p, a tree node
# @param q, a tree node
# @return a boolean
def isSameTree(self, p, q):
# first two levels edge cases
if p is None and q is None:
return True
if (p is None) is (q is not None):
return False
# check base structure is same
if (p.left is None) is (q.left is not None):
return False
if (p.right is None) is (q.right is not None):
return False
# check current node values
if p.val != q.val:
return False
# base case
if p.left is None and p.right is None:
return True
# other cases
elif p.left is None:
return self.isSameTree(p.right, q.right)
elif p.right is None:
return self.isSameTree(p.left, q.left)
else:
return self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right)
| true |
d36c4d6ff306c6c5f2d91a834cc69ed6c038f5aa | Python | agatakawalec/wdi | /klasy.py | UTF-8 | 2,785 | 3.4375 | 3 | [] | no_license | class Node:
def __init__(self, da):
self.data = da
self.next = None
self.prev = None
def __str__(self):
return str(self.data)
class BidirectionalList:
def __init__(self):
self.head = None
self.tail = None
self.size = 0
def addtail(self, data):
if not self.head:
n = Node(data)
self.head = n
self.tail = n
self.size += 1
else:
m = self.tail
new_node = Node(data)
m.next = new_node
new_node.prev = m
self.tail = new_node
self.size += 1
def addhead(self, data):
if not self.head:
n = Node(data)
self.head = n
self.tail = n
self.size += 1
else:
m = self.head
new_node = Node(data)
m.prev = new_node
new_node.next = m
self.head = new_node
self.size += 1
def insert(self, data, index):
if not self.head:
n = Node(data)
self.head = n
self.tail = n
self.size += 1
else:
n=self.head
i=0
while n and i !=index:
i+=1
n=n.next
nextNode=n.next
prevNode=n
newNode = Node(data)
newNode.prev = prevNode
newNode.next = nextNode
prevNode.next = newNode
nextNode.prev = newNode
self.size += 1
def removetail(self,data):
if not self.head:
n = Node(data)
self.head = n
self.tail = n
self.size += 1
else:
n=self.tail
n.prev=m #m= NewNode
m=self.tail
m.next=None
self.size-=1
def removehead(self,data):
if not self.head:
n = Node(data)
self.head = n
self.tail = n
self.size += 1
else:
n=self.head
n.next=m
m=self.tail
m.prev=None
self.size-=1
def printList(self):
n= self.head
while n:
print(n)
n = n.next
def printListT(self):
n=self.tail
while n:
print(n)
n=n.prev
def findBest(selfself):
if not self.head:
n = Node(data)
self.head = n
self.tail = n
self.size += 1
else:
n=self.head
i=n
while n:
if(n.data>i.data)
i=n
n=n.next
ll = BidirectionalList()
ll.add(14)
ll.add("test")
ll.add(2.34)
ll.add(True)
ll.printList() | true |
15d2713622331ab6ef7d9f29362c91fcbb8e29c1 | Python | Andmontc/AirBnB_clone | /tests/test_models/test_user.py | UTF-8 | 1,877 | 3.015625 | 3 | [] | no_license | #!/usr/bin/python3
"""
Test User containing classes to test on the Place class:
* Style.
* Documentation.
* Functionality.
"""
import unittest
import pep8
from models import user
from models.user import User
class TestPep8B(unittest.TestCase):
""" Check for pep8 validation. """
def test_pep8(self):
""" test base and test_base for pep8 conformance """
style = pep8.StyleGuide(quiet=True)
file1 = 'models/user.py'
file2 = 'tests/test_models/test_user.py'
result = style.check_files([file1, file2])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warning).")
class TestDocsB(unittest.TestCase):
""" Check for documentation. """
def test_module_doc(self):
""" check for module documentation """
self.assertTrue(len(user.__doc__) > 0)
def test_class_doc(self):
""" check for documentation """
self.assertTrue(len(user.__doc__) > 0)
def test_method_docs(self):
""" check for method documentation """
for func in dir(User):
self.assertTrue(len(func.__doc__) > 0)
class TestPlace(unittest.TestCase):
""" New class to test class Amenity"""
def setUp(self):
""" Setting up"""
self.new = User()
def tearDown(self):
""" Cleaning up after each test"""
del self.new
def test_is_instance(self):
""" Check if attributes are instances"""
self.assertTrue(type(self.new) is User)
def test_if_str(self):
"""Check if the attribute is a str"""
self.assertTrue(type(self.new.email) is str)
self.assertTrue(type(self.new.password) is str)
self.assertTrue(type(self.new.first_name) is str)
self.assertTrue(type(self.new.last_name) is str)
if __name__ == '__main__':
unittest.main()
| true |
237cd74ad4cf8248e803738397db5022ca5bc708 | Python | VinitaNarayanamurthi/Python_course_assignments | /Lab_7_LinkedLists/dlList.py | UTF-8 | 4,697 | 3.875 | 4 | [] | no_license | """
dlList.py
A circular doubly linked List interface and implementation in Python
author: Steven Carnovale and Vinita Narayanamurthi
"""
from dlnode import DoublyLinkedNode
class DoublyLinkedList:
__slots__ = '__head'
def __init__( self ):
""" Create an empty list.
"""
self.__head = None
def append( self, new_value ):
# we will need to append the node to the end
# It can have two possibilities - head is null or node already exists
# if head is null -
# create the new node,
# - newnode next is pointed to itlsef
# - new node prev also is pointed to itself
# else
# head.prev will give yu last node
# then make the next of last node point to the newnode
# also the newnode prev shd point to the last node
#newnode next shd point to head
# head.prev will point to new node
node = self.__head
newNode = DoublyLinkedNode(new_value)
if node == None:
# newNode.next = None
# newNode.prev = None
# self.__head = newNode
newNode.next = newNode
newNode.prev = newNode
self.__head = newNode
else:
# while node.next != None:
# node = node.next
# node.next = newNode
# newNode.prev = node
# newNode.next = None
node_last = self.__head.prev
node_last.next = newNode
newNode.prev = node_last
newNode.next = self.__head
self.__head.prev = newNode
def prepend(self, new_value):
""" Add value to the beginning of the list.
List is modified.
:param new_value: the value to add
:return: None
"""
"""
Prepend again has two options:
head is null - similar to append
head not null -
head prev to newnode (here node is head)
newnode next to node
newnode prev to None
"""
#self.__head = DoublyLinkedNode( new_value, self.__front )
node = self.__head
newNode = DoublyLinkedNode(new_value)
if node == None:
# newNode.prev = None
# newNode.next = None
# self.__head = newNode
newNode.prev = newNode
newNode.next = newNode
self.__head = newNode
else:
# node.prev = newNode
# newNode.next = node
# newNode.prev = None
# self.__head = newNode
node_last = self.__head.prev
node.prev = newNode
node_last.next = newNode
newNode.prev = node_last
newNode.next = node
self.__head = newNode
def move_clockwise(self, num):
print('The music starts (' + str(num) +'): ')
curr_node = self.__head
while(num >= 0):
print(curr_node.value + '->', end=' ')
curr_node = curr_node.next
num -=1
print(curr_node.prev.value + ' is stuck holding the potato')
# print(curr_node.prev.prev.value)
# print(curr_node.prev.prev.next.value)
# print('before changing', curr_node.prev.value)
curr_node.prev = curr_node.prev.prev
# print('after changing', curr_node.prev.value)
curr_node.prev.next = curr_node
self.__head = curr_node
# print('finally we have', curr_node.prev.next.value)
def move_anticlockwise(self, num):
print('The music starts (' + str(num) + '): ')
num = abs(num)
curr_node = self.__head
while (num >= 0):
# print('potato passing anticlock to', curr_node.value)
print(curr_node.value + '->', end=' ')
curr_node = curr_node.prev
num -= 1
print( curr_node.next.value + ' is stuck holding the potato')
# print(' before changind curr node next is', curr_node.next)
curr_node.next = curr_node.next.next
# print(' after changing curr node next is', curr_node.next)
# print('before chaning u have ', curr_node.next.prev )
curr_node.next.prev = curr_node
# print('also changed is ', curr_node.next.prev )
self.__head = curr_node
def print_clockwise(self):
curr = self.__head
if(curr is curr.next):
print(curr.value)
while(curr.next != self.__head):
print(curr.value )
curr = curr.next
print(curr.value)
def exists(self):
return self.__head
| true |
c58ecb2266da67ca71e5909da81fea89f543e568 | Python | beast3334/sudokusolver | /Solver.py | UTF-8 | 2,920 | 2.875 | 3 | [] | no_license | import pyautogui
import cv2, numpy as np
from PIL import Image
import BoardSolver
topLeftLocation = pyautogui.locateCenterOnScreen("TopLeft.png")
bottomRightLocation = pyautogui.locateCenterOnScreen("BottomRight.png")
sudokuGrid = [[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0]]
print("Puzzle Location: ",topLeftLocation,bottomRightLocation)
size = (300,350)
im = pyautogui.screenshot(imageFilename="my_screenshot.png",region=(topLeftLocation[0],topLeftLocation[1],bottomRightLocation[0]-topLeftLocation[0],bottomRightLocation[1]-topLeftLocation[1] + 10))
im2 = Image.open("my_screenshot.png")
im2.thumbnail(size,Image.ANTIALIAS)
im2.save("my_screenshot2.png","PNG")
imageList = ["1","2","3","4","5","6","7","8","9"]
img_rgb = cv2.imread("my_screenshot2.png")
for index, imageIndex in enumerate(imageList):
# Reads in the images into CV2 objects
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread(str(imageIndex) + ".png",0)
#Gets the width and height of the template object?
w,h = template.shape[::-1]
print("Width - Height: " , w,h)
#finds the images in the main image
res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
threshold = 0.825
loc = np.where(res >= threshold)
print(loc)
print(loc[::-1])
print(*loc[::-1])
#Draws rectangle, using zip?
for pt in zip(*loc[::-1]):
cv2.rectangle(img_rgb,pt,(pt[0] + w, pt[1] + h), (0,0,255), 2)
#enumerating through the xvalues of the locations
for i in range(9):
for j in range(9):
for k, location in enumerate(loc[1]):
if((location >= 6 + (33*i) and location <= 37 + (33*i)) and (loc[0][k] >= 1 + (33*j) and loc[0][k] <= 32 + (33*j))):
sudokuGrid[j][i] = index + 1
print(sudokuGrid)
cv2.imshow("output",img_rgb)
cv2.waitKey(0)
#Start input onto webpage
solvedBoard = BoardSolver.solveBoard(sudokuGrid)
for i in range(9):
try:
firstIndex = [i,sudokuGrid[i].index(0)]
break
except:
pass
print(firstIndex)
pyautogui.click((topLeftLocation[0] + 25 + (31 * firstIndex[1]),topLeftLocation[1] + 25 + (31 * firstIndex[0])))
for rowIndex, row in enumerate(sudokuGrid):
for columnIndex, column in enumerate(row):
if column != 0:
if not(rowIndex == 8 and columnIndex == 8):
pyautogui.press("tab")
else:
pyautogui.press(str(solvedBoard[rowIndex][columnIndex]))
if not(rowIndex == 8 and columnIndex == 8):
pyautogui.press("tab")
pyautogui.press("enter")
#Squares are 31 pixals long, starting at 6
#Sqaures are 31 pixals long, starting at 1 | true |
24d01f3c490c16130a0912020feef2c0a373db44 | Python | zzf531/leetcode | /每日一题/面试题57 - II. 和为s的连续正数序列.py | UTF-8 | 427 | 3.109375 | 3 | [] | no_license | class Solution(object):
def findContinuousSequence(self, target):
ans = []
a = target // 2 + 1
for i in range(1,a):
res = []
while sum(res) <= target:
if sum(res) == target:
ans.append(res)
break
res.append(i)
i += 1
return ans
a = Solution()
print(a.findContinuousSequence(9)) | true |
1ff9831112d4f33d350b5325807f9ad5f30a871c | Python | fald/algo-trade-strat | /main.py | UTF-8 | 2,794 | 3.65625 | 4 | [] | no_license | # Description:
# This program uses the dual moving average crossover to determine
# buy and sell points of stock.
import pandas as pd
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
figsize = (12.5, 4.5)
filename = "AAPL.csv"
#filename = "kaggle_AAPL.csv"
aapl = pd.read_csv(filename)
plt.figure(figsize=figsize)
plt.plot(aapl['Adj Close'], label="AAPL")
plt.title("Apple's adjusted closing price history")
#plt.xlabel("29 Sept 2014 - 29 Mar 2018")
plt.ylabel("Adj. close prices - USD")
plt.legend(loc="upper left")
plt.show()
# Simple moving average, 30 day window.
sma30 = pd.DataFrame()
sma30['Adj Close'] = aapl['Adj Close'].rolling(window=30).mean()
# Long term average
sma100 = pd.DataFrame()
sma100['Adj Close'] = aapl['Adj Close'].rolling(window=100).mean()
# Visualise
plt.figure(figsize=figsize)
plt.plot(aapl['Adj Close'], label="AAPL")
plt.plot(sma30['Adj Close'], label="30-day")
plt.plot(sma100['Adj Close'], label="100-day")
plt.title("Apple's adjusted closing price history")
#plt.xlabel("29 Sept 2014 - 29 Mar 2018")
plt.ylabel("Adj. close prices - USD")
plt.legend(loc="upper left")
plt.show()
# New dataframe
data = pd.DataFrame()
data['AAPL'] = aapl['Adj Close']
data['SMA30'] = sma30['Adj Close']
data['SMA100'] = sma100['Adj Close']
# Return buy/sell prices to plot on chart directly
def buy_sell(data):
buy = []
sell = []
flag = -1 # When do moving averages cross?
for i in range(len(data)):
if data['SMA30'][i] > data['SMA100'][i]:
if flag != 1:
buy.append(data['AAPL'][i])
sell.append(np.nan)
flag = 1
else:
buy.append(np.nan)
sell.append(np.nan)
elif data['SMA30'][i] < data['SMA100'][i]:
if flag != 0:
buy.append(np.nan)
sell.append(data['AAPL'][i])
flag = 0
else:
buy.append(np.nan)
sell.append(np.nan)
else:
buy.append(np.nan)
sell.append(np.nan)
return buy, sell
b_s = buy_sell(data)
data['Buy Signal Price'] = b_s[0]
data['Sell Signal Price'] = b_s[1]
# Visualise data + strategy
plt.figure(figsize=figsize)
plt.plot(data['AAPL'], label='AAPL', alpha=0.35)
plt.plot(data['SMA30'], label='SMA30', alpha=0.35)
plt.plot(data['SMA100'], label='SMA100', alpha=0.35)
plt.scatter(data.index, data['Buy Signal Price'], label="Buy", marker="^", color="green")
plt.scatter(data.index, data['Sell Signal Price'], label="Sell", marker="v", color="red")
plt.title("Apple Adj Close Price History - Buy/Sell Signals")
plt.ylabel("Adj. close prices - USD")
plt.legend(loc='upper left')
plt.plot()
| true |
67e3e95c15ad4b3cdc7ce45ca085cb1988e89f50 | Python | wxkpythonwork/contest | /Tianchi_License/leak_view.py | UTF-8 | 929 | 3.015625 | 3 | [
"Apache-2.0"
] | permissive | # encoding=utf-8
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv('train.chusai.csv',header=0)
df1 = pd.read_csv('train.csv',header=0)
mdf = pd.merge(df, df1, on='ds', how='left')
print mdf[mdf['ds'] == '2016-05-01'].index[0]
mdf['ratio'] = mdf['cnt_y']/mdf['cnt_x'] #fusai/chusai = 1.44
mdf['cnt_y'] = mdf['cnt_x'] * 1.44
mdf[['ds', 'cnt_y']].to_csv('data/real_sum.csv',index=False)
pdf = mdf[1215:]
plt.plot(pdf['ds'], pdf['cnt_x'])
plt.plot(pdf['ds'], pdf['cnt_y'])
print mdf['ratio'].describe()
"""
count 1258.000000
mean inf
std NaN
min 0.000000
25% 1.213120
50% 1.441673
75% 1.749564
max inf
Name: ratio, dtype: float64
fusai /chusai = 1.44 附近
chusai: answer_a ->2017-02-17 about .
fusai: a -> 2016-10-09 about .
"""
mdf.plot('ds', 'ratio')
mdf.plot('ds', ['cnt_x','cnt_y'])
plt.show()
| true |
becbf0d06cd75a433827ff9e438f00e64e570a07 | Python | JASONews/leveldb | /avggf.py | UTF-8 | 1,309 | 2.5625 | 3 | [
"LicenseRef-scancode-generic-cla",
"BSD-3-Clause"
] | permissive | #i/usr/bin/python
import sys
import math
def dev(l):
t = 0
for i in l:
t += float(i)
avg = float(t) / len(l)
t = 0
for i in l:
t += (float(i) - avg)**2
return math.sqrt(t/len(l))
f = open(sys.argv[1])
header = f.readline()
t = []
for i in f:
t.append(tuple(i.split(',')))
t.sort()
avg=[]
i = 0
cur = t[i][0]
total = 0
perops = 0
wa = 0
ic = 0
count = 0
devtotal = []
devperops = []
devic = []
devwa = []
while i < len(t):
if t[i][0] == cur:
total += float(t[i][1])
perops += float(t[i][2])
ic += float(t[i][3])
wa += float(t[i][4])
devtotal.append(t[i][1])
devperops.append(t[i][2])
devic.append(t[i][3])
devwa.append(t[i][4])
i += 1
count+=1
elif cur > 0:
avg.append("%s,%f,%f,%f,%f,%f,%f,%f,%f" % (cur, total/float(count),dev(devtotal), perops/float(count), dev(devperops), ic/float(count), dev(devic), wa/float(count), dev(devwa)))
cur = t[i][0]
count = 0
wa = 0
ic = 0
perops=0
total = 0
devtotal=[]
devperops=[]
devwa=[]
devic=[]
avg.append("%s,%f,%f,%f,%f,%f,%f,%f,%f" % (cur, total/float(count),dev(devtotal), perops/float(count), dev(devperops), ic/float(count), dev(devic), wa/float(count), dev(devwa)))
f2 = open(sys.argv[1]+"_avg.csv",mode='w')
f2.write(header)
for i in avg:
f2.write(i+"\n");
| true |
cc286c8ab27187a6b505d6abe036a678a12b8499 | Python | dominonivictor/raw_tbs_game | /functions/map_functions.py | UTF-8 | 834 | 2.734375 | 3 | [] | no_license | import constants.colors as colors
from random import randint
#TODO TOO MUCH REPETITION
def random_map_cost_tile_gen():
r = randint(1, 12)
if r in [1, 2]:
move_cost = 2
tile_color = colors.FOREST_GREEN
elif r in [3, 4]:
move_cost = 3
tile_color = colors.MOUNTAIN_ORANGE
else:
move_cost = 1
tile_color = colors.BASIC_BLACK
return move_cost, tile_color
def defined_map_cost_tile_gen(x, y, t_coords):
'''this is kinda messy and not so reusable...'''
if (x, y) in t_coords["mountain"]:
move_cost = 3
tile_color = colors.MOUNTAIN_ORANGE
elif (x, y) in t_coords["forest"]:
move_cost = 2
tile_color = colors.FOREST_GREEN
else:
move_cost = 1
tile_color = colors.BASIC_BLACK
return move_cost, tile_color
| true |
5906e3a79928dda23781b17b0dd92f48d63dfc4f | Python | BigWillieN/PoliTO-Schoolwork | /Labs/Lab05/ex01.py | UTF-8 | 268 | 3.703125 | 4 | [] | no_license | def ex01_main():
list = []
ex1 = []
while list != ".":
ex1 = input("Enter a string:")
if ex1 != ".":
list.append(ex1)
else:
break
sorted_list = sorted(list)
print(sorted_list)
ex01_main()
| true |
7d6454d988eec1a08dbe2f8129ccef610a573c3a | Python | botblox/botblox-manager-software | /botblox_config/switch/port.py | UTF-8 | 369 | 3.234375 | 3 | [
"MIT"
] | permissive | class Port:
def __init__(self, name: str, port_id: int) -> None:
"""
:param name: Name of the port. This name is used in CLI commands to refer to the port.
:param port_id: ID of the port. For internal use by the library.
"""
self.name = name
self.id = port_id
def __repr__(self) -> str:
return self.name
| true |
535d2acb6c84c63c20d1fdb93d683c74411b4f23 | Python | fabo893/holbertonschool-higher_level_programming | /0x0A-python-inheritance/2-is_same_class.py | UTF-8 | 452 | 3.984375 | 4 | [] | no_license | #!/usr/bin/python3
"""
2-is_same_class
This module is to check an instance
"""
def is_same_class(obj, a_class):
"""Check if an object is exactly an instance of the specified class
Args:
obj - object to be verified
a_class - class to check the object
Return - If is instance return True, otherwise False
"""
if type(obj) is a_class:
return True
else:
return False
| true |
e4b4a630c6b733622fa61576e8edea24e18b6779 | Python | Jingliwh/python3- | /pyfunc.py | UTF-8 | 7,178 | 3.765625 | 4 | [] | no_license | #python 高级面向对象属性
#动态绑定属性和方法
#定义类后,再将方法和属性绑定
'''
class Ball(object):
name="ball"
def ball_add(self):
print("ball method")
from types import MethodType
#给某个类的对象绑定方法,不影响其他类的对象
pingpang=Ball()
pingpang.ball_add=MethodType(ball_add,pingpang)
pingpang.ball_add() #ball method
#volleyball=Ball()
#volleyball.ball_add()#报错,volleyball没有该方法
#给类绑定方法,所有类的对象拥有该方法
Ball.ball_add=ball_add
volleyball=Ball()
volleyball.ball_add() #ball method
'''
#python 限制类的属性扩展(__slots__)
#定义后类的属性不能在类的对象上进行扩展
#:注意,__slots__只对当前类作用,对子类无作用
'''
class People(object):
__slots__=("name","age","height") #元组定义属性
xiaoming=People()
xiaoming.name="xiaoming"
xiaoming.age=18
xiaoming.height=176
#xiaoming.weight=77 #报错,People object has no attribute 'weight'
print(xiaoming.age,xiaoming.name,xiaoming.height)
'''
#装饰器,@property广泛应用在类的定义中,可以让调用者写出简短的代码,
#同时保证对参数进行必要的检查,这样,程序运行时就减少了出错的可能性。
#_xxx (受保护) 不能用'from module import *'导入
#__xxx__ 系统定义名字
#__xxx 类中的私有变量名
'''
class Screen(object):
@property
def width(self):
return self.__width
@property
def height(self):
return self.__height
@width.setter
def width(self,value):
if not isinstance(value,int):
raise ValueError("不是一个整数 not a integer")
if value<0 or value>1080:
raise ValueError("超出范围 must between 0-1080")
self.__width=value
@height.setter
def height(self,value):
if not isinstance(value,int):
raise ValueError("不是一个整数 not a integer")
if value<0 or value>1920:
raise ValueError("超出范围 must between 0-1920")
self.__height=value
@property
def resolution(self):
return 1920*1080
mycall=Screen()
mycall.width=1000
print("mycall.width=",mycall.width)
mycall.height=986
print("mycall.height=",mycall.height)
print("mycall.resolution=",mycall.resolution)
mycall.height=10
print("mycall.height=",mycall.height)
#mycall.resolution=600 #AttributeError: can't set attribute
print("mycall.resolution=",mycall.resolution)
'''
#python 支持多种继承 子类可继承过个父类
#class Apple(Fruit,Plant)
#python 系统自定义函数__xxx__ 如果需要改写相应功能,也可以自己实现,
#类似与java Object类的toString() equals()---
#主要方法如下
#1:__str__ 类似于toString()方法
#2:__iter__ 类似于iterator迭代输出
#3:__slots__ 限制类的属性扩展
#4:__getitem__ #像list那样按照下标取出元素
#5:__getattr__ #只有在没有找到属性的情况下,才调用__getattr__,已有的属性,比如name,不会在__getattr__中查找。
#6:__call__ #s(),类似于java构造方法
#python 枚举
from enum import Enum
#定义枚举类1
'''
Season=Enum('Season',('Spring','Summer','Autumn','Winter'))
for name,member in Season.__members__.items():
print(name,"=>",member,",",member.value)
#Spring => Season.Spring , 1
#Summer => Season.Summer , 2
#Autumn => Season.Autumn , 3
#Winter => Season.Winter , 4
'''
#定义枚举类2
'''
class Season(Enum):
Spring=1
Summer=2
Autumn=3
Winter=4
for name,member in Season.__members__.items():
print(name,"=>",member,",",member.value)
#Spring => Season.Spring , 1
#Summer => Season.Summer , 2
#Autumn => Season.Autumn , 3
#Winter => Season.Winter , 4
'''
#元类
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#ORM 对象关系映射
#type()函数也允许我们动态创建出类来,也就是说,动态语言本身支持运行期动态创建类
#metaclass就可以根据这个类创建出实例,先定义metaclass,然后创建类
def fn(self, name='world'): # 先定义函数
print('Hello, %s.' % name)
Hello = type('Hello', (object,), dict(hello=fn)) # 创建Hello class
h = Hello()
print('call h.hello():')
h.hello()
print('type(Hello) =', type(Hello))
#.lambda匿名函数
'''
class ListMetaclass(type):
def __new__(cls, name, bases, attrs):
attrs['add'] = lambda self, value: self.append(value)
return type.__new__(cls, name, bases, attrs)
class MyList(list, metaclass=ListMetaclass):
pass
l=MyList()
l.add(13)
l.add(15)
print(l[0])
print(l[1])
'''
#ORM例子
#首先来定义Field类,它负责保存数据库表的字段名和字段类型:
'''
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
' Simple ORM using metaclass '
class Field(object):
def __init__(self, name, column_type):
self.name = name
self.column_type = column_type
def __str__(self):
return '<%s:%s>' % (self.__class__.__name__, self.name)
class StringField(Field):
def __init__(self, name):
super(StringField, self).__init__(name, 'varchar(100)')
class IntegerField(Field):
def __init__(self, name):
super(IntegerField, self).__init__(name, 'bigint')
class ModelMetaclass(type):
def __new__(cls, name, bases, attrs):
if name=='Model':
return type.__new__(cls, name, bases, attrs)
print('Found model: %s' % name)
mappings = dict()
for k, v in attrs.items():
if isinstance(v, Field):
print('Found mapping: %s ==> %s' % (k, v))
mappings[k] = v
for k in mappings.keys():
attrs.pop(k)
attrs['__mappings__'] = mappings # 保存属性和列的映射关系
attrs['__table__'] = name # 假设表名和类名一致
return type.__new__(cls, name, bases, attrs)
class Model(dict, metaclass=ModelMetaclass):
def __init__(self, **kw):
super(Model, self).__init__(**kw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Model' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
def save(self):
fields = []
params = []
args = []
for k, v in self.__mappings__.items():
fields.append(v.name)
params.append('?')
args.append(getattr(self, k, None))
sql = 'insert into %s (%s) values (%s)' % (self.__table__, ','.join(fields), ','.join(params))
print('SQL: %s' % sql)
print('ARGS: %s' % str(args))
# testing code:
class User(Model):
id = IntegerField('id')
name = StringField('username')
email = StringField('email')
password = StringField('password')
u = User(id=12345, name='Michael', email='test@orm.org', password='my-pwd')
u.save()
''' | true |
a9d31b633418762ebae164bfa9bc6762d42f69dc | Python | anujpuri72/LeetCodeSubmissions | /MayChallenge/Week1/RansomNote..py | UTF-8 | 453 | 2.90625 | 3 | [] | no_license | class Solution:
def canConstruct(self, ransomNote: str, magazine: str) -> bool:
resa = defaultdict(lambda: -1)
for keys in ransomNote:
resa[keys] = resa.get(keys, 0) + 1
resb = defaultdict(lambda: -1)
for keys in magazine:
resb[keys] = resb.get(keys, 0) + 1
for a, b in resa.items():
if(resb[a] == -1 or resa[a] > resb[a]):
return False
return True
| true |
6b20d0a875ef217883b5b414d4215c6c18c19809 | Python | ShinjiKatoA16/tkinter_sample | /tk25.pyw | UTF-8 | 258 | 3.328125 | 3 | [
"MIT"
] | permissive | # P17 tk25.pyw
import tkinter as tk
def get_text():
print(tx.get('1.5', '3.4'))
root = tk.Tk()
tx = tk.Text(width=30, height=5)
bt = tk.Button(text='get Line1-Col6 to Line3-Col4', command=get_text)
[widget.pack() for widget in (tx,bt)]
root.mainloop() | true |
f37b487a7e16338ee4c8d27e386ef90c4aa3b5e7 | Python | lyz05/Sources | /北理珠/python/python123/遍历字符串并错后显示.py | UTF-8 | 140 | 3.421875 | 3 | [] | no_license | s = input()
for ch in s:
if (ch=='z'):
exit(0)
else:
print(chr(ord(ch)+1),end='')
print(' 哈哈,成功遍历!') | true |
70aab12b938671aaa45d357a833eb7473d9a366a | Python | flameous/tiltech-medhack-bot | /models.py | UTF-8 | 4,508 | 2.84375 | 3 | [] | no_license | import requests
import json
from telebot import types
state_chatting = 'state_chatting'
state_menu = 'state_menu'
button_open_jira = 'Открыть веб-интерфейс'
button_chat = 'Чат со специалистом'
button_back_to_menu = 'Закрыть чат'
ikb = types.InlineKeyboardButton
class User:
def __init__(self, tg_id: int, state: str, mobile_number: int):
self.tg_id = tg_id
self.state = state
self.mobile_number = mobile_number
def __str__(self):
return self.dump()
def dump(self) -> str:
return json.dumps({
"uid": self.tg_id,
"state": self.state,
"mobile_number": self.mobile_number
})
class Database:
def __init__(self, addr_port: str = "http://80.211.129.44:8100/user"):
"""
Обёртка для БД
:param addr_port: адрес и порт удалённого сервера с БД
"""
self.addr_port = addr_port
self.dict = {}
def get_user(self, tg_id: int) -> User:
return self.dict.get(tg_id, None)
r = requests.get(self.addr_port + str(tg_id))
if r.status_code not in (200, 404):
raise BaseException('/get_user error, response text: -- ' + r.text)
if r.status_code == 404:
return None
data = json.loads(r.text)
u = User(
data['tg_id'],
data['state'],
data['mobile_number']
)
return u
def save_user(self, u: User):
return self.dict.update({u.tg_id: u})
r = requests.post(self.addr_port + str(u.tg_id), data={"user": u.dump()})
if r.status_code != 200:
raise BaseException('save user error, response text: --' + r.text)
return
def reset(self):
requests.post(self.addr_port + '/reset')
def save_contact_number(self, uid, number):
return
class Logic:
def __init__(self, db: Database):
self.db = db
pass
def set_state_and_save(self, u: User, state: str):
u.state = state
self.db.save_user(u)
@staticmethod
def handle_start():
markup = types.ReplyKeyboardMarkup()
markup.add(types.KeyboardButton('Отправить номер', request_contact=True))
return "Чтобы работать в нашей системе, надо дать согласие на ...\n" \
"Разрешите получить ваш номер телефона", markup
def handle_save_number(self, uid, contact):
self.db.save_user(User(uid, state_menu, contact))
return self.menu()
@staticmethod
def menu():
markup = types.InlineKeyboardMarkup()
markup.row(*[ikb(button_chat, callback_data=button_chat)])
markup.row(*[ikb(button_open_jira, callback_data=button_open_jira, url="http://panacea.cloud/")])
markup.row(*[ikb(button_back_to_menu, callback_data=button_back_to_menu)])
return "Добро пожаловать в систему!", markup
@staticmethod
def markup_button_back():
markup = types.InlineKeyboardMarkup()
markup.add(ikb(button_back_to_menu, callback_data=button_back_to_menu))
return markup
def handle(self, uid: int, message: str) -> tuple:
"""
Общая логика бота
:param uid: id юзера
:param message: его сообщение
:return: сообщение, отсылаемое юзеру (опционально: кнопки)
"""
# достаём юзера
u = self.db.get_user(uid)
if not u:
# если это новый юзер, то запрашиваем его номер
return self.handle_start()
if message == button_back_to_menu:
self.set_state_and_save(u, state_menu)
return self.menu()
if u.state == state_chatting:
return self.handle_chat(uid, message)
if message == button_chat:
self.set_state_and_save(u, state_chatting)
return "Сейчас с вами свяжется кейс-менеджер >%s<" % str(uid), None
return self.menu()
def reset(self):
self.db.reset()
pass
@staticmethod
def handle_chat(uid, message):
requests.post('http://80.211.129.44:8100/send_to_chat', data={"uid": uid, "message": message})
return None, None
| true |
2c7698618317468cbc2db01713ac8edc5fdfc541 | Python | Tymotheus/Ensimag-Python | /4_Listes/suffixes.py | UTF-8 | 6,803 | 3.4375 | 3 | [] | no_license | #!/usr/bin/env python3
"""
Generalne Description:
In the following task, I have implemented a class for a List with shared suffixes.
I have proposed several metohds for operating on it allocating and optimising memory.
The most important "suffixe" allows to concatenate one list to another, saving memory for shared suffixes.
Comments are both in French (by the teacher) and in English (by me).
"""
from tycat import data_tycat
class Cellule:
"""
une cellule d'une liste. contient une valeur, un pointeur
vers la cellule suivante, un compteur comptabilisant
combien de listes ou de cellules pointent dessus.
"""
# pylint: disable=too-few-public-methods
def __init__(self, valeur, suivant=None):
self.valeur = valeur
self.suivant = suivant
self.utilisation = 1
class Liste:
"""
liste de cellules.
des listes differentes peuvent partager des cellules communes.
EN: But they don't have to share cells - the lists can be totally separate.
EN: That can be a bit confusing cause in the picture in desription of the task they all share suffixe
"""
def __init__(self, mot):
"""
transforme un mot en liste non-partagee.
EN: List is being built starting with the tail, finishing with the head
"""
premiere_cellule = None
self.taille = 0
for lettre in reversed(mot):
premiere_cellule = Cellule(lettre, premiere_cellule)
self.taille += 1
self.tete = premiere_cellule
def cellules(self):
"""
iterateur sur toute les cellules de la liste.
"""
cellule_courante = self.tete
while cellule_courante is not None:
yield cellule_courante
cellule_courante = cellule_courante.suivant
def get_word(self):
if self.taille == 0:
print("Mot vide")
return None
else:
cell = self.tete
output = ""
while cell is not None:
output += cell.valeur
cell = cell.suivant
return output
def get_this_cell(self,number): #number is position of a desired cell
if number < 1:
print("Error, number must be a positive number.")
if number > self.taille:
print("Error, number exceeds list size.")
return None
cell = self.tete
for i in range(1,number):
cell = cell.suivant
return cell
def copy_list(self,cell):
"""
EN: 'cell' argument here is a cell 'just before' a cell with utilisation >1 (or head with util >1)
EN: Here we go again to split lists which number of utilisation is greater than 1
"""
if cell == None:
print ("Error, invalid cell")
return None
if cell.utilisation > 1 and cell == self.tete:
self.tete.utilisation -= 1
new_head = Cellule(cell.valeur)
self.tete = new_head
current_new = new_head
current_old = cell
else:
current_new = cell
current_old = cell
while current_old.suivant is not None:
#if current_old.suivant.utilisation > 1: #this part is wrong but needs further verification
#current_old.suivant.utilisation -= 1
new_cellule = Cellule(current_old.suivant.valeur)
help_cellule = current_old #this one is to prevent problems when new = old
current_new.suivant = new_cellule
current_old = help_cellule.suivant
current_new = new_cellule
def suffixe(self, autre):
"""
ajoute la liste autre a la fin de la liste self
(en partageant les cellules communes).
si la fin de self etait deja partagee avec quelqu'un, alors
on dedouble toute la partie partagee avant l'ajout.
EN: This command adds 'autre' at the end of 'self'
EN: If some cells is 'self' are already shared, we need to create a new list (new 'self') leaving old as a suffix
"""
#EN: First - we need to check if some cells are shared.
#EN: So we need to check if any cell has "utilisation" greater than 1
if autre.taille == 0:
print("Autre est une liste vide")
return None
if self == autre:
print("Concatenating a list with itself")
#EN: I think we could use here some reccurence
new_liste = Liste(self.get_word())
new_liste.get_this_cell(new_liste.taille).suivant = self.tete
self.tete.utilisation += 1
new_liste.taille = self.taille
self.tete = new_liste.tete
return None
cell = self.tete
#EN: Case when sufix of list starting with head is used more than once
if cell.utilisation > 1:
self.copy_list(cell)
#EN: Case when sufix of list non starting with head is used more than once
else:
while cell.suivant is not None:
if cell.suivant.utilisation > 1:
self.copy_list(cell)
break
else:
cell = cell.suivant
self.get_this_cell(self.taille).suivant = autre.tete
self.taille += autre.taille
autre.tete.utilisation += 1
def __del__(self):
"""
FR: destructeur.
"""
print("Calling destructor")
cell = self.tete
while cell is not None:
if cell.utilisation > 1:
print("Decreasing utilisation of Cell: '" + cell.valeur + "' from: " + str(cell.utilisation) + " to: " + str(cell.utilisation-1) )
cell.utilisation -= 1
return None
cell = cell.suivant
def test_listes():
"""
FR: on teste toutes les operations de base, dans differentes configurations.
"""
#EN: Important remark - we have an array of lists
listes = [Liste(mot) for mot in ("SE", "PAS", "DE", "DEVIS")]
data_tycat(listes)
_ = input()
print("on ajoute listes[0] apres liste[1], puis un mot vide")
listes[1].suffixe(listes[0])
listes[1].suffixe(Liste(""))
data_tycat(listes)
_ = input()
print("on ajoute listes[1] apres listes[2] et listes[0] apres listes[3]")
listes[2].suffixe(listes[1])
listes[3].suffixe(listes[0])
data_tycat(listes)
_ = input()
print("on efface 'DEVIS'")
del listes[3]
data_tycat(listes)
_ = input()
print("on ajoute 'NT' apres 'PASSE'")
listes[1].suffixe(Liste("NT"))
data_tycat(listes)
_ = input()
print("on ajoute 'SE' apres elle-meme")
listes[0].suffixe(listes[0])
data_tycat(listes)
if __name__ == "__main__":
test_listes()
| true |
d16684c54366d4923047d2a50c276934123ae5c2 | Python | sukhleen-kaur/Autonomous_Systems_Practical | /sudo/brain/src/body/arduino.py | UTF-8 | 4,393 | 3 | 3 | [] | no_license |
import time
import serial
import brain
import logging
import util.nullhandler
logging_namespace = 'Borg.Brain.Util.Arduino'
logging.getLogger(logging_namespace).addHandler(util.nullhandler.NullHandler())
class Arduino(object):
"""
Used to get basic sensor information from the Arduino device.
WARNING:
- Make sure to add yourself to the dailout group:
sudo usermod -a -G dialout <username>
TODO: Fix issues related to hotplugging the device (e.d. after starting this module).
"""
def __init__(self, port = "/dev/ttyACM0"):
self.__logger = logging.getLogger(logging_namespace)
self.__port = port
self.__serial = None
self.__pre_settings = None
self.__retry_timeout = 3.0
self.__retry_start = time.time()
self.__topic_dict = {}
#Used to buffer part of a line (so we are certain to process a complete single line);
self.__pre_line = ""
#States: CONNECT, RUN, RETRY
self.__state = "CONNECT"
def __connect(self):
try:
self.__disconnect()
#Non-blocking:
self.__serial = serial.Serial(self.__port, 9600, timeout = 0)
if self.__pre_settings:
self.__serial.applySettingsDict(self.__pre_settings)
self.__serial.open()
self.__pre_settings = self.__serial.getSettingsDict()
self.__logger.info("Connected to Arduino on %s" % self.__port)
return True
except Exception as e:
self.__logger.error(e)
self.__logger.error("Unable to connect, retrying in %s seconds..." % self.__retry_timeout)
return False
def __disconnect(self):
if self.__serial:
self.__pre_line = ""
self.__serial.close()
def __receive(self):
try:
line = self.__serial.readline(255)
#Make sure to process a complete line:
if line == "":
return True
if line[-1] == '\n':
line = self.__pre_line + line[:-1]
self.__pre_line = ""
try:
self.__decode_line(line)
except Exception as e:
self.__logger.warning(e)
self.__logger.warning("Decoding error... (-> noise or wrong device selected?).")
else:
self.__pre_line += line
return True
except Exception as e:
self.__logger.error(e)
self.__logger.error("Unable to receive data, retrying in %s seconds..." % self.__retry_timeout)
return False
def __decode_line(self, line):
value_string_list = line.split(",")
for value_string in value_string_list:
(name, value) = value_string.split("=")
(name, value) = (name.strip(), value.strip())
self.__topic_dict[name] = float(value)
def get_state(self):
return self.__state
def is_connected(self):
return not (self.__state == "CONNECT" or self.__state == "RETRY")
def get(self, topic):
if topic in self.__topic_dict:
return self.__topic_dict[topic]
else:
return None
def update(self):
"""
Connects, reads and processes data to and from the Arduino.
To be executed as often as possible (at 10 Hz or so).
"""
if self.__state == "CONNECT":
if self.__connect():
self.__state = "RUN"
else:
self.__state = "RETRY"
self.__retry_start = time.time()
elif self.__state == "RETRY":
if (time.time() - self.__retry_start) > self.__retry_timeout:
self.__state = "CONNECT"
elif self.__state == "RUN":
if not self.__receive():
self.__state = "RETRY"
self.__retry_start = time.time()
def __del__(self):
self.__disconnect()
if __name__ == "__main__":
brain.setup_logging(logging.getLogger(logging_namespace), None, None, "DEBUG")
arduino = Arduino()
while True:
arduino.update()
if arduino.is_connected():
(hum, temp) = (arduino.get("hum"), arduino.get("temp"))
if hum and temp:
print "hum: %f, temp: %f" % (hum, temp)
time.sleep(0.1)
| true |
3f0ac0afd021bdf18ce0344bdfca17f6623df6cf | Python | cooperative-computing-lab/graph-benchmark | /graph_generator_matching/graph_generator/generate.py | UTF-8 | 3,563 | 3.171875 | 3 | [] | no_license | import graph
import time
import sys
import argparse
def main( args ):
# Setup arguments necessary for graph
scale_l = args.scale_l
scale_r = args.scale_r
edge_factor = args.edge_factor
weighted = args.weighted
covered = args.covered
visual = args.visual
rand_probs = args.rand_probs
if args.output is not None:
adj_outfile = 'adj_' + args.output
edge_outfile = 'edge_' + args.output
else:
adj_outfile = 'adj_%dx%dx%d.csv' % (scale_l, scale_r, edge_factor)
edge_outfile = 'edge_%dx%dx%d.csv' % (scale_l, scale_r, edge_factor)
print('\nCovered :', covered)
print('Random :', rand_probs)
print('Weighted :', weighted, '\n')
#############################################################################
############################ Create and Generate ############################
#############################################################################
# Create graph object with input parameters
bipart = graph.Graph(scale_l, scale_r, edge_factor, weighted=weighted)
# Generate bipartite graph with option of coverting all vertices or not
bipart.generate_bipartite(covered=covered, rand_probs=rand_probs)
# Get the stats from the graph that was generated
bipart.get_stats()
# Write the graph to a file in the form of adjacency list or edge list
bipart.write_adj_list(adj_outfile)
#bipart.write_edge_list(edge_outfile)
# Produce visuals of the distribution of the generated graph
if args.visual is True:
bipart.plot_distribution()
bipart.plot_histogram()
#############################################################################
#############################################################################
#############################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'This script generates \
scale-free graphs of user specified scale. The graphs can be \
represented as adjacency lists or edge lists. These graphs may then \
be written to files of your specification.')
parser.add_argument('scale_l', help='Scale of the left set of vertices, N,\
where N = 2^scale_l.', type=int)
parser.add_argument('scale_r', help='Scale of the right set of vertices, P,\
where P = 2^scale_r.', type=int)
parser.add_argument('edge_factor', help='Determines number of edges, M, where\
M = edge_factor * N.', type=int)
parser.add_argument('-o', '--output', action='store',
dest='output', type=str, metavar='',
help='Name of file to write graph to. Will be appended to\
edge_ or adj_ depending on type of graph.')
parser.add_argument('-c', '--cover', action='store_true',
dest='covered', help='Whether or not the vertex sets\
should cover all elements from [-N, -1] and [1, P]. NOTE:\
Generates graphs that tend to be consistent with input\
parameters, but forces a higher percentage of matches\
than without.')
parser.add_argument('-w', '--weight', action='store_true', dest='weighted',
help='Whether or not the graph to be generated should \
have weighted edges.')
parser.add_argument('-v', '--visual', action='store_true', dest='visual',
help='Uses pyplot to visualize the degree distibution of\
the generated graph.')
parser.add_argument('-r', '--random', action='store_true', dest='rand_probs',
help='Randomly determines a, b, c, d probabilities during\
generation.')
args = parser.parse_args()
main( args )
| true |
258ab5aa48db7592905da441c6a45072ce32b24f | Python | christopher-roelofs/microgotchi | /hud.py | UTF-8 | 4,287 | 2.6875 | 3 | [
"MIT"
] | permissive | import board
import displayio
import terminalio
from adafruit_display_text import label
import adafruit_imageload
from time import sleep
from util import colors
import util
class Hud:
def __init__(self,pet):
self.pet = pet
self.display = board.DISPLAY
self.font = terminalio.FONT
self.color = colors.black
self.batter_check_cooldown = 1000
self.batter_check_timeout = self.batter_check_cooldown
color_bitmap = displayio.Bitmap(160, 128, 1)
color_palette = displayio.Palette(1)
color_palette[0] = colors.white
bg_sprite = displayio.TileGrid(color_bitmap, pixel_shader=color_palette, x=0, y=0)
self.sprite_sheet, self.palette = adafruit_imageload.load("/avatar-0.bmp",bitmap=displayio.Bitmap,palette=displayio.Palette)
self.palette.make_transparent(0)
self.display_group = displayio.Group(max_size=20)
self.display_group.append(bg_sprite)
# Battery label
battery_level = util.get_battery_level()
battery_text = "Battery: {}".format(battery_level)
self.battery_text = label.Label(self.font, text=battery_text, color=self.color)
self.battery_text.x = 85
self.battery_text.y = 120
self.display_group.append(self.battery_text)
# Name label
name_text = "Name: {} ".format(self.pet.get_name())
self.name_label = label.Label(self.font, text=name_text, color=self.color)
self.name_label.x = 10
self.name_label.y = 10
self.display_group.append(self.name_label)
# Age label
self.age_text = "Age: {} ".format(self.pet.get_age())
self.age_label = label.Label(self.font, text=self.age_text, color=self.color)
self.age_label.x = 10
self.age_label.y = 25
self.display_group.append(self.age_label)
# Health label
health_text = "Health: {} ".format(self.pet.health)
self.health_label = label.Label(self.font, text=health_text, color=self.color)
self.health_label.x = 10
self.health_label.y = 40
self.display_group.append(self.health_label)
# Happiness Label
happiness_text = "Happiness: {} ".format(self.pet.happiness)
self.happiness_label = label.Label(self.font, text=happiness_text, color=self.color)
self.happiness_label.x = 10
self.happiness_label.y = 55
self.display_group.append(self.happiness_label)
# Hunger label
hunger_text = "Hunger: {} ".format(self.pet.hunger)
self.hunger_label = label.Label(self.font, text=hunger_text, color=self.color)
self.hunger_label.x = 10
self.hunger_label.y = 70
self.display_group.append(self.hunger_label)
self.sprite = displayio.TileGrid(self.sprite_sheet, pixel_shader=self.palette,width = 1,height = 1,tile_width = 16,tile_height = 16)
self.sprite.x = 55
self.sprite.y = 20
self.sprite[0] = self.pet.get_avatar()
self.sprite_group = displayio.Group(scale=2)
self.sprite_group.append(self.sprite)
self.display_group.append(self.sprite_group)
def update(self):
name_text = "Name: {}".format(self.pet.get_name())
self.name_label.text = name_text
age_text = "Age: {}".format(self.pet.get_age())
self.age_label.text = age_text
hunger_text = "Hunger: {}".format(self.pet.get_hunger())
self.hunger_label.text = hunger_text
happiness_text = "Happiness: {}".format(self.pet.get_happiness())
self.happiness_label.text = happiness_text
health_text = "Health: {} ".format(self.pet.get_health())
self.health_label.text = health_text
if self.batter_check_timeout < 1:
battery_level = util.get_battery_level()
battery_text = "Battery: {}".format(battery_level)
self.battery_text.text = battery_text
self.batter_check_timeout = self.batter_check_cooldown
else:
self.batter_check_timeout -= 1
self.sprite[0] = self.pet.get_avatar()
def draw(self):
self.update()
self.display.show(self.display_group)
| true |
c66f40def05ee13fff0ef5cf8f5e78ebd4ea3c13 | Python | PhyuCin/CP1404PRAC | /Prac_02/word_generator_ver_3.py | UTF-8 | 625 | 3.46875 | 3 | [] | no_license | import random
VOWELS = "aeiou"
CONSONANTS = "bcdfghjklmnpqrstvwxyz"
print("""For word format:
(C)onsonants and 'v' for vowels:""")
word_format = input("Enter the word format using 'c' for consonants and 'v' for vowels: ")
word_format = word_format.lower()
if word_format == "auto":
word_format = ""
word_num = random.randrange(2,13)
for num in range (0, word_num):
word_format += random.choice("c" + "v")
print("Word format:", word_format)
word = ""
for kind in word_format:
if kind == "c":
word += random.choice(CONSONANTS)
else:
word += random.choice(VOWELS)
print(word)
| true |
62834bc8aabd77038298f9a1bb36c4a3fece5d05 | Python | AnastaFilatova/Diploma_1_Base_Python | /diplomskrpt.py | UTF-8 | 2,580 | 2.9375 | 3 | [] | no_license | import requests
from pprint import pprint
with open('token.txt', 'r') as file_object:
token = file_object.read().strip()
class VkUser:
version = '5.130'
url = 'https://api.vk.com/method/'
def __init__(self, token, version):
self.token = token
self.version = version
self.params = {
'access_token': self.token,
'v': self.version
}
self.big_photos = [] # сюда собираются словари с размерами фото 'type': 'z'
self.owner_id = requests.get(self.url + 'users.get', self.params).json()['response'][0]['id']
def get_photos(self, user_id=None):
if user_id is None:
user_id = self.owner_id
fotos_url = self.url + 'photos.get'
fotos_params = {
'count': 1000,
'album_id': 'wall',
'owner_id': user_id,
'extended': 1, # Если был задан параметр extended=1, возвращаются likes — количество отметок Мне нравится
'photo_sizes': 1,
}
self.photos = requests.get(fotos_url, params={**self.params, **fotos_params}).json()
return self.photos
def choose_max_photo(self):
""" Отбирает фото наибольшего формата
Дает названия для фото на основе количества лайков
"""
self.photos = self.get_photos()
# pprint(self.photos)
# for response in self.photos.keys():
# pprint(response['items'][0]['sizes'][-1])
# pprint(self.photos['response']['items'][0]['sizes'][-1])
like_count = 0
for respones in self.photos.values():
for i in respones['items']:
pprint(i)
if i['sizes'][-1]['type'] == 'z':
like_count = i['likes']['count']
self.big_photos.append(i['sizes'][-1])
# pprint(i['sizes'][-1])
# for i['sizes']
# pprint(self.big_photos)
# if ph['response']['items'][0]['sizes'][0]['type'] == 'z':
# print(ph['response']['items'][0]['sizes'][0]['type'])
# self.big_photos.append(photos['response']['items'][0])
# print(self.big_photos)
# return self.big_photos
# def
if __name__ == '__main__':
vk_client_1 = VkUser(token, '5.130')
# f = vk_client_1._get_photos()
vk_client_1.choose_max_photo()
# pprint(z)
# pprint(f)
| true |
6c2dc6fb9b121f0582ae600f8a1514832551617a | Python | skibold/tkinter-example | /LibraryMain.py | UTF-8 | 1,322 | 2.65625 | 3 | [] | no_license | from LibraryView import *
from LibraryDB import LibraryDB
from sys import argv
logfile = None
if(len(argv) >= 2):
logfile = argv[1]
else:
logfile = "library.log"
lib = LibraryDB(logfile)
mw = Tk()
mw.title("Library")
mw.geometry('1000x500')
# setup frames, but don't pack yet
bs = BookSearchFrame(lib, mw)
ls = LoanSearchFrame(lib, mw)
bm = BorrowerManagement(lib, mw)
fm = FineManagement(lib, mw)
# toggle visible frame functions
def showBookSearch():
fm.pack_forget()
bm.pack_forget()
ls.pack_forget()
bs.pack(fill='both', expand=True)
def showLoanSearch():
fm.pack_forget()
bm.pack_forget()
bs.pack_forget()
ls.pack(fill='both', expand=True)
def showFines():
bm.pack_forget()
bs.pack_forget()
ls.pack_forget()
fm.pack(fill='both', expand=True)
def showBorrower():
fm.pack_forget()
bs.pack_forget()
ls.pack_forget()
bm.pack(fill='both', expand=True)
MenuFrame = Frame(mw)
Button(MenuFrame, text="Book Search", command=showBookSearch).grid(column=1,row=0)
Button(MenuFrame, text="Loan Search", command=showLoanSearch).grid(column=2,row=0)
Button(MenuFrame, text="Manage Fines", command=showFines).grid(column=3,row=0)
Button(MenuFrame, text="Manage Borrowers", command=showBorrower).grid(column=4,row=0)
MenuFrame.pack(pady=5)
# show book search frame on startup
showBookSearch()
mw.mainloop()
| true |
7771aa0e4c25e880407f90dbfb177b5e6b8250f1 | Python | zhangwei22/machine-learning | /principle_of_algorithm/source_code/chapter04/testRecommsvd.py | UTF-8 | 896 | 2.84375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# Filename : testRecomm01.py
from numpy import *
import numpy as np
import operator
from svdRec import *
import matplotlib.pyplot as plt
eps = 1.0e-6
# 夹角余弦,避免除0
def cosSim(inA,inB):
denom = linalg.norm(inA)*linalg.norm(inB)
return float(inA*inB.T)/(denom+eps)
# 加载修正后数据
A = mat([[5, 5, 3, 0, 5, 5],[5, 0, 4, 0, 4, 4],[0, 3, 0, 5, 4, 5],[5, 4, 3, 3, 5, 5]])
new = mat([[5,5,0,0,0,5]])
U,S,VT = linalg.svd(A.T)
V =VT.T
Sigma = diag(S)
r = 2 # 取前两个奇异值
# 近似后的U,S,V值
Ur = U[:,:r]
Sr = Sigma[:r,:r]
Vr = V[:,:r]
# 计算new的坐标值
newresult = new*Ur*linalg.inv(Sr)
print newresult
maxv = 0 # 最大的余弦值
maxi = 0 # 最大值的下标
indx= 0
# 计算最近似的结果
for vi in Vr:
temp = cosSim(newresult,vi)
if temp > maxv:
maxv = temp
maxi = indx
indx +=1
print maxv,maxi
| true |
9b210ffd005c997d796ab5d29140122d2c77433b | Python | NazneenV/DemoGitRepo | /basic python.py | UTF-8 | 637 | 3.8125 | 4 | [] | no_license | '''p="welcome"
print(p[4:])
print(p[4:-1])
print(ord('B'))
print(max('X,Y,A,B,D'))
s="python"
for i in s:
print(i,end="")'''
var1=10
def fn1():
var1=100 #here ,defining a local var with the same name as global var
print(var1)
fn1()
print(var1) #global variable's var1 value remains unchanged
# output
#100 if there is a clash in the name i.e both local and global var have same name-preference given to local var
#10
var2=10
print(var2)
def fn1():
global var2 #here since
var2=100 #now no difference bet var2 outside and inside
print(var2)
fn1()
print(var2)
#expected output
#10
#100
#100
| true |
9de5a441d3603356f4f342e2574f375923fb75c8 | Python | srf94/adventofcode | /2019/python/day13.py | UTF-8 | 2,587 | 3.203125 | 3 | [] | no_license | from copy import copy
from utils import read_data
from intcode.vm import IntcodeVM
def draw_board(tiles):
tiles = copy.copy(tiles)
for tile in tiles:
for loc in range(len(tile)):
tile[loc] = str(tile[loc]).replace("0", " ").replace("2", "B").replace("3", "_").replace("4", "O")
print("\n".join("".join(str(i) for i in tile) for tile in tiles))
def print_tuple(x, y, tile):
if tile == 3:
name = "Paddle"
elif tile == 4:
name = "Ball"
else:
return
print("{}: x: {}, y: {}".format(name, x, y))
def single_step(value, vm):
return vm.run(value), vm.run(), vm.run()
def get_location(board, last, paddle=False, ball=False):
if paddle:
val = 3
elif ball:
val = 4
else:
raise Exception()
for y, row in enumerate(board):
for x, value in enumerate(row):
if value == val:
return x, y
return last
def ball_intersection(paddle_loc, ball_loc, ball_direction):
return ball_loc[0] + (paddle_loc[1] - ball_loc[1] - 1) * ball_direction
def create_board(vm_input, vm, dim_x, dim_y):
total_pixels = dim_x * dim_y
board = [[0] * dim_x for _ in range(dim_y)]
for _ in xrange(total_pixels):
x, y, tile = single_step(vm_input, vm)
board[y][x] = tile
return board
def play_game(board, vm):
ball_direction = 1
ball_loc = None
paddle_loc = None
last_score = None
ball_locs = []
while True:
ball_loc = get_location(board, ball_loc, ball=True)
paddle_loc = get_location(board, paddle_loc, paddle=True)
ball_locs.append(ball_loc)
if len(ball_locs) > 1:
diff = ball_locs[-1][0] - ball_locs[-2][0]
if diff != 0:
ball_direction = diff
intersection = ball_intersection(paddle_loc, ball_loc, ball_direction)
if intersection > paddle_loc[0]:
direction = 1
elif intersection < paddle_loc[0]:
direction = -1
else:
direction = 0
x, y, tile = single_step(direction, vm)
if x is None:
return last_score
if x == -1:
last_score = tile
else:
board[y][x] = tile
raw = read_data(13)[0].split(",")
dim_x = 44
dim_y = 20
vm = IntcodeVM(raw)
board = create_board(0, vm, dim_x, dim_y)
print("Part 1:")
print(sum(sum(i == 2 for i in row) for row in board))
vm = IntcodeVM(raw, mutate_input={0: 2})
board = create_board(0, vm, dim_x, dim_y)
print("Part 2:")
print(play_game(board, vm))
| true |
ad8b2974739a7af15e91e23220d354a5fc6692c3 | Python | novayo/LeetCode | /0092_Reverse_Linked_List_II/try_1.py | UTF-8 | 1,174 | 3.71875 | 4 | [] | no_license | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def reverseBetween(self, head: ListNode, left: int, right: int) -> ListNode:
newHead = ListNode(0)
newHead.next = head
def print_():
tmp = newHead
while tmp:
print(tmp.val, end=' -> ')
tmp = tmp.next
print()
# 先找到左邊的頭
preHead = newHead
leftHead = head
cur = 1
while cur < left:
leftHead = leftHead.next
preHead = preHead.next
cur += 1
# 開始丟到頭
preLeftHead = preHead
curHead = leftHead.next
preHead = leftHead
while cur < right:
preHead.next = curHead.next
curHead.next = leftHead
preLeftHead.next = curHead
leftHead = preLeftHead.next
curHead = preHead.next
cur += 1
# print_()
return newHead.next | true |
a54b297cd0dd3b87a8b41ceedadbfa1987f2a1aa | Python | madeibao/PythonAlgorithm | /PartA/Py_一个月有多少天.py | UTF-8 | 526 | 3.828125 | 4 | [] | no_license |
# 指定年份 Y 和月份 M,请你帮忙计算出该月一共有多少天。
# 输入:Y = 1992, M = 7
# 输出:31
#================================================================
from typing import List
class Solution():
def numberOfDays(self, Y: int, M: int) -> int:
D = [0,31,28,31,30,31,30,31,31,30,31,30,31]
if Y % 400 == 0 or Y % 4 == 0 and Y % 100 != 0:
D[1] += 1
return D[M]
if __name__ == "__main__":
s = Solution()
print(s.numberOfDays(1995, 8))
| true |
17564971f1ad1a5b6dc616908f06daa99377b49c | Python | yun63/fast | /base/singleton.py | UTF-8 | 707 | 3.03125 | 3 | [] | no_license | # coding=UTF-8
import threading
class Singleton(type):
_instance_lock = threading.Lock()
_instance = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instance:
with Singleton._instance_lock:
if cls not in cls._instance:
cls._instance[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instance[cls]
class SingletonTest(Singleton):
__metaclass__ = Singleton
def __init__(self):
print('SingletonTest.init')
def load(self):
print('load')
if __name__ == '__main__':
t = SingletonTest()
t2 = SingletonTest()
print(id(t), id(t2))
t.load()
t2.load()
| true |
92005905ef017c65d3e3a46d85e5b6007ef596dd | Python | ECMora/SoundLab | /sound_lab_core/ParametersMeasurement/Adapters/WaveletParametersAdapters/WaveletMeanParameterAdapter.py | UTF-8 | 861 | 2.515625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from sound_lab_core.ParametersMeasurement.Adapters.WaveletParametersAdapters.WaveletParameterAdapter import WaveletParameterAdapter
from sound_lab_core.ParametersMeasurement.SpectralParameters.WaveletParameters import WaveletMeanParameter
class WaveletMeanParameterAdapter(WaveletParameterAdapter):
"""
Adapter class for the peaks above parameter.
"""
def __init__(self):
WaveletParameterAdapter.__init__(self)
self.name = "WaveletMean"
def get_instance(self):
self.compute_settings()
try:
wavelet = self.settings.param(unicode(self.tr(u'Wavelet'))).value()
except Exception as e:
wavelet = self.wavelet
self.wavelet = wavelet
return WaveletMeanParameter(level=self.level, wavelet=self.wavelet, decimal_places=self.decimal_places)
| true |
5a4adfa0924cfae8f97d6657e1435e6fb1396891 | Python | spurthihemadri/Spurthi-SridharBabu- | /simplecalculator.py | UTF-8 | 4,514 | 3.15625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from tkinter import *
import math
exp = " "
def click(number):
global exp
exp = exp + str(number)
s.set(exp)
def clickequal():
try:
global exp
total = str(eval(exp))
s.set(total)
expression = ""
except:
s.set(" error ")
expression = ""
def clear():
global exp
exp = ""
s.set("")
if __name__ == "__main__":
r= Tk()
r.configure(background="light grey")
r.title("Simple Calculator")
r.geometry("300x300")
s = StringVar()
entry_field= Entry(r, textvariable=s,font=('Arial',12,'bold'))
entry_field.grid(columnspan=7, ipadx=70)
s.set('|')
b1 = Button(r, text=' 1 ', fg='blue', command=lambda:click(1), height=2, width=8,relief=RIDGE,borderwidth=3,font=('Arial',8,"bold"))
b1.grid(row=2, column=0)
b2 = Button(r, text=' 2 ', fg='blue', command=lambda: click(2), height=2, width=8,relief=RIDGE,borderwidth=3,font=('Arial',8,"bold"))
b2.grid(row=2, column=1)
b3 = Button(r, text=' 3 ', fg='blue', command=lambda: click(3), height=2, width=8,relief=RIDGE,borderwidth=3,font=('Arial',8,"bold"))
b3.grid(row=2, column=2)
b4 = Button(r, text=' 4 ', fg='blue', command=lambda: click(4), height=2, width=8,relief=RIDGE,borderwidth=3,font=('Arial',8,"bold"))
b4.grid(row=2, column=3)
b5 = Button(r, text=' 5 ', fg='blue', command=lambda: click(5), height=2, width=8,relief=RIDGE,borderwidth=3,font=('Arial',8,"bold"))
b5.grid(row=3, column=0)
b6 = Button(r, text=' 6 ', fg='blue', command=lambda: click(6), height=2, width=8,relief=RIDGE,borderwidth=3,font=('Arial',8,"bold"))
b6.grid(row=3, column=1)
b7 = Button(r, text=' 7 ', fg='blue', command=lambda: click(7), height=2, width=8,relief=RIDGE,borderwidth=3,font=('Arial',8,"bold"))
b7.grid(row=3, column=2)
b8 = Button(r, text=' 8 ', fg='blue', command=lambda: click(8), height=2, width=8,relief=RIDGE,borderwidth=3,font=('Arial',8,"bold"))
b8.grid(row=3, column=3)
b9 = Button(r, text=' 9 ', fg='blue', command=lambda: click(9), height=2, width=8,relief=RIDGE,borderwidth=3,font=('Arial',8,"bold"))
b9.grid(row=4, column=0)
b0 = Button(r, text=' 0 ', fg='blue', command=lambda: click(0), height=2, width=8,relief=RIDGE,borderwidth=3,font=('Arial',8,"bold"))
b0.grid(row=4, column=1)
plus = Button(r, text=' + ', fg='blue', command=lambda: click("+"), height=2, width=8,relief=RIDGE,borderwidth=3,font=('Arial',8,"bold"))
plus.grid(row=4, column=2)
minus = Button(r, text=' - ', fg='blue', command=lambda: click("-"), height=2, width=8,relief=RIDGE,borderwidth=3,font=('Arial',8,"bold"))
minus.grid(row=4, column=3)
mul= Button(r, text=' * ', fg='blue', command=lambda: click("*"), height=2, width=8,relief=RIDGE,borderwidth=3,font=('Arial',8,"bold"))
mul.grid(row=5, column=0)
div = Button(r, text=' / ', fg='blue', command=lambda: click("/"), height=2, width=8,relief=RIDGE,borderwidth=3,font=('Arial',8,"bold"))
div.grid(row=5, column=1)
equal = Button(r, text=' = ', fg='blue', command=clickequal, height=2, width=8,relief=RIDGE,borderwidth=3,font=('Arial',8,"bold"))
equal.grid(row=5, column=2)
clear = Button(r, text='CLR', fg='blue', command=clear, height=2, width=8,relief=RIDGE,borderwidth=3,font=('Arial',8,"bold"))
clear.grid(row=5, column=3)
Decimal= Button(r, text='.', fg='blue', command=lambda: click('.'), height=2, width=8,relief=RIDGE,borderwidth=3,font=('Arial',8,"bold"))
Decimal.grid(row=6, column=0)
Remainder= Button(r, text='REM', fg='blue', command=lambda: click('%'), height=2, width=8,relief=RIDGE,borderwidth=3,font=('Arial',8,"bold"))
Remainder.grid(row=6, column=1)
openparam= Button(r, text='(', fg='blue', command=lambda: click('('), height=2, width=8,relief=RIDGE,borderwidth=3,font=('Arial',8,"bold"))
openparam.grid(row=6, column=2)
closeparam= Button(r, text=')', fg='blue', command=lambda: click(')'), height=2, width=8,relief=RIDGE,borderwidth=3,font=('Arial',8,"bold"))
closeparam.grid(row=6, column=3)
sqroot= Button(r, text='SQRT', fg='blue', command=lambda: click('math.sqrt'), height=2, width=8,relief=RIDGE,borderwidth=3,font=('Arial',8,"bold"))
sqroot.grid(row=7, column=0)
r.mainloop() | true |
180414813eeb2e2298440dc7a4bcc8ea6f0bb092 | Python | MaximumBeings/public | /swaptionPut.py | UTF-8 | 11,914 | 2.71875 | 3 | [] | no_license | """
Author: Oluwaseyi Awoga
IDE: CS50 IDE on Cloud 9/AWS
Topic: ARRC Swaption - LIBOR-SOFR Transition
Sources: David R. Smith - Financial Analyst Journal - May/June 1991
Location: Milky-Way Galaxy
"""
from __future__ import division
import math
from scipy.optimize import fsolve
import sys
import copy
import scipy.stats
import datetime
import numpy as np
import pandas as pd
from dateutil.relativedelta import relativedelta
tIME = [0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,5.5,6,6.5,7]
yTM = [8.8700, 9.0400,9.155,9.2700,9.3150,9.3600,\
9.3850,9.4100,9.4350,9.4600,9.4700,9.4800,\
9.4900,9.5000]
def spotHelper(guess,timeSoFar, semiAYield,soln):
s = guess[0]
ans = []
s= s/100
a = 0.0
for x in range(0,len(timeSoFar)):
if x == len(timeSoFar) - 1:
a = (100+semiAYield/2.0)/(1+s/2.0)**(x+1)
ans.append(a)
elif x != len(timeSoFar) - 1:
y = soln[x]
a = (semiAYield/2.0)/((1+y/2.0)**((x+1)))
ans.append(a)
a = 0.0
return (100.00 - sum(ans))
def spotRates(time,ytm,sofRSpread=[0.0]):
soln = []
guess = [0.12]
for x in range(len(time)):
if x == 0:
soln.append((ytm[0]+sofRSpread[0])/100)
else:
semiAYield = ytm[x]+ sofRSpread[0]
timeSoFar = copy.deepcopy(time[:x+1])
data =(timeSoFar,semiAYield,soln)
temp = fsolve(spotHelper,guess,args=data,xtol=1.49012e-8,)[0]
soln.append(temp/100)
return soln
def discountFactorCalculator(zeroRates,tIME):
"""
Helper Function to Calculate Discount Rates from Spot Rates
Args:
param1: (a) Spot Rates
(b) Payment Dates
Returns:
A list of Discount Rates for all Payment Dates.
"""
discountRates = []
for x in range(len(zeroRates)):
discountRates.append(1/((1+zeroRates[x]/2.0)**(x+1)))
return discountRates
#discountFactors2 = discountFactorCalculator(zeroRates2,tIME)
def futureValueCurve(discountFactors):
"""
Helper Function to Calculate Future Value from Discount Rates
Args:
param1: (a) Discount Rates
Returns:
A list of Future Values for all Payment Dates.
"""
futureValueCurve = []
for x in range(len(discountFactors)):
futureValueCurve.append(1/discountFactors[x])
return futureValueCurve
#futureValueCurve2 = futureValueCurve(discountFactors2)
def forwardRateCurve(futureValueCurve,zeroRates):
"""
Helper Function to Calculate Forward Rates
Args:
param1: (a) Future Values
Returns:
A list of Forward Rates for all Payment Dates.
"""
forwardRateCurve = []
for x in range(len(futureValueCurve)):
if x == 0:
forwardRateCurve.append(zeroRates[0])
else:
forwardRateCurve.append((((futureValueCurve[x]\
/futureValueCurve[x-1])**(1/(0.5*2))) - 1)*2)
return forwardRateCurve
#forwardRateCurve2 = forwardRateCurve(futureValueCurve2)
def forwardRateCurve3(zeroRates,tIME):
forwardRateCurve2 = []
for x in range(len(zeroRates)):
if x == 0:
forwardRateCurve2.append(zeroRates[0])
else:
a =(1+zeroRates[x]/2)**(tIME[x]*2)
b =(1+zeroRates[x-1]/2)**(tIME[x-1]*2)
c = a**(0.5*2)/b**(0.5*2)
d = c**(1/(0.5*2))
e = d -1
f = e * 2
forwardRateCurve2.append(f)
return forwardRateCurve2
def annualSpotRate(zeroRates):
"""
Helper Function to Calculate Annual Spot Rates
Args:
param1: (a) Forward Rates
Returns:
A list of Annual Spot Rates for all Payment Dates.
"""
annualSpotRate = []
for x in range(len(zeroRates)):
annualSpotRate.append((1 + zeroRates[x]/2)**2 - 1)
return annualSpotRate
#annualSpotRate2 = annualSpotRate(zeroRates2)
def forwardRateCalculator(zeroRates,sTART,sTOP):
"""
Helper Function to Calculate Underlying Forward Rate
Args:
param1: (a) Zero Rates, Start, Stop
Returns:
The underlying forward rate for the swaption.
"""
a =(1+zeroRates[sTOP*2-1]/2)**(sTOP*2)
b =(1+zeroRates[sTART*2-1]/2)**(sTART*2)
c = a/b
d = c**(1.0/(2*2))
e = d -1
f = e * 2
return f
def cumm_dens_function_scipy(t):
"""
Function to Calculate Cummulative Density Function
Args:
param1: (a) Time
Returns:
Cummulative Density Function.
"""
return scipy.stats.norm.cdf(t)
def blackSeventySix(SP,EP,r,v,t):
"""
Helper Function to calculate Swaption Call & Put Prices
Args:
param1: (a) Security price, Strike, Spot Rate at the Start of Swap, Volatility
Returns:
Swaption Call & Put Prices.
"""
d1 = (math.log(SP/EP) + (0.5 * v * v * t/365.0))/(v * math.sqrt(t/365.0))
d2 = d1 - v * math.sqrt(t/365.0)
ND1 = cumm_dens_function_scipy(d1)
ND2 = cumm_dens_function_scipy(d2)
call_Value = (SP * ND1 - EP * ND2) * ( 1 + 0.5* r)**(t/(-365.0/2)) * 100.0
put_Value = call_Value - (SP - EP) * ( 1 + 0.5* r)**(t/(-365.0/2)) * 100.0
result = {'call': call_Value, 'put': put_Value}
return result
def anuitizedModelPricePutSwaption(cashflow,period, spot):
sum=0.0
for x in range(1,period+1):
sum = sum + (1/((1+spot)**(x))) * cashflow
return sum
"""
Declare the variables
"""
Settlement_Date = datetime.date(1990, 3, 14)
Maturity_Date = datetime.date(1995, 3, 14)
def putPrice(Settlement_Date, Maturity_Date, zeroRates, sTart, sTop,vol, strike,type,display=False):
t = (Maturity_Date - Settlement_Date).days
SP = forwardRateCalculator(zeroRates,sTart,sTop)
if sTart == 0:
r = zeroRates[sTart]
elif sTart != 0:
r = zeroRates[sTart*2-1]
EP = strike/100
v = vol
price = blackSeventySix(SP,EP,r,v,t)
period = 2 * 2 #
cashflow = price[type]/2.0
spot = SP/2.0
ans = anuitizedModelPricePutSwaption(cashflow,period, spot)/100 * 100000000
if display==True:
print(f"Put Value is: {round(price[type],4)}")
print()
return ans
def calculateSwaptionPriceLIBOR(display=False):
guess = [0.10]
zeroRates2 = spotRates(tIME,yTM)
discountFactors2 = discountFactorCalculator(zeroRates2,tIME)
futureValueCurve2 = futureValueCurve(discountFactors2)
forwardRateCurve2 = forwardRateCurve(futureValueCurve2, zeroRates2)
annualSpotRate2 = annualSpotRate(zeroRates2)
result = putPrice(Settlement_Date, Maturity_Date, zeroRates2, 5, 7,0.11, 9.500,"put",display)
if display==True:
print(f"Present Value of a Call Swaption with a Notional of $100 Million is: ${round(result,4)}")
return result
zeroRates2 = spotRates(tIME,yTM)
discountFactors2 = discountFactorCalculator(zeroRates2,tIME)
futureValueCurve2 = futureValueCurve(discountFactors2)
forwardRateCurve2 = forwardRateCurve(futureValueCurve2,zeroRates2)
annualSpotRate2 = annualSpotRate(zeroRates2)
d = {'Time' : pd.Series(tIME),'YTM' : pd.Series(yTM),'Spot Rates' : pd.Series(zeroRates2),\
'Disc_Factors' : pd.Series(discountFactors2), 'Future_Value' : pd.Series(futureValueCurve2),\
'Forward_Rate' : pd.Series(forwardRateCurve2), 'Annual_Spot_Rate' : pd.Series(annualSpotRate2)}
df = pd.DataFrame(d,columns=['Time', 'YTM', 'Spot Rates','Disc_Factors',\
'Future_Value','Forward_Rate','Annual_Spot_Rate'])
print("BootStrap Curve - LIBOR")
print()
print(df.to_string(index=False))
print()
print("Results: ")
forwardRateUnderlying = forwardRateCalculator(zeroRates2,5,7)
print()
print("The 7 year Forward Rate Starting in year 5 is: %s " % round(forwardRateUnderlying,4))
print()
calculateSwaptionPriceLIBOR(display=True)
print()
print("*************************************************************************************")
print("*************************************************************************************")
print()
def calculateSwaptionPriceSOFR(SOFR_Spread, display=False):
guess = [0.12]
zeroRates2 = spotRates(tIME,yTMS,SOFR_Spread)
discountFactors2 = discountFactorCalculator(zeroRates2,tIME)
futureValueCurve2 = futureValueCurve(discountFactors2)
forwardRateCurve2 = forwardRateCurve(futureValueCurve2, zeroRates2)
annualSpotRate2 = annualSpotRate(zeroRates2)
result = putPrice(Settlement_Date, Maturity_Date, zeroRates2, 5, 7,0.11, 9.500,"put",display)
if display==True:
print(f"Present Value of a Call Swaption with a Notional of $100 Million is: ${round(result,4)}")
return result
def optimizationfunc(spread):
a = calculateSwaptionPriceLIBOR()
b = calculateSwaptionPriceSOFR(spread)
return (a - b)
yTMS = [8.8700/1.5, 9.0400/1.5,9.155/1.5,9.2700/1.5,9.3150/1.5,9.3600/1.5,\
9.3850/1.5,9.4100/1.5,9.4350/1.5,9.4600/1.5,9.4700/1.5,9.4800/1.5,\
9.4900/1.5,9.5000/1.5]
solutions = fsolve(optimizationfunc,[0.4/100],xtol=1.49012e-08,)
spreadtoUse = solutions[0]
print(f"The Spread Required on SOFR to Equate the Original Present Value is {round(spreadtoUse,4)}")
print()
for x in range(len(yTMS)):
yTMS[x] = spreadtoUse + yTMS[x]
zeroRates2 = spotRates(tIME,yTMS,solutions)
discountFactors2 = discountFactorCalculator(zeroRates2,tIME)
futureValueCurve2 = futureValueCurve(discountFactors2)
forwardRateCurve2 = forwardRateCurve(futureValueCurve2,zeroRates2)
annualSpotRate2 = annualSpotRate(zeroRates2)
print("BootStrap Curve - SOFR Plus Spread")
print()
d = {'Time' : pd.Series(tIME),'YTM' : pd.Series(yTMS),'Spot Rates' : pd.Series(zeroRates2),\
'Disc_Factors' : pd.Series(discountFactors2), 'Future_Value' : pd.Series(futureValueCurve2),\
'Forward_Rate' : pd.Series(forwardRateCurve2), 'Annual_Spot_Rate' : pd.Series(annualSpotRate2)}
df = pd.DataFrame(d,columns=['Time', 'YTM', 'Spot Rates','Disc_Factors',\
'Future_Value','Forward_Rate','Annual_Spot_Rate'])
print(df.to_string(index=False))
print()
print("Results: SOFR Plus Spread")
forwardRateUnderlying = forwardRateCalculator(zeroRates2,5,7)
print()
print("The 7 year Forward Rate Starting in year 5 is: %s " % round(forwardRateUnderlying,4))
print()
calculateSwaptionPriceSOFR([0.0],display=True)
print()
print("**************************************************************************************")
print("**************************************************************************************")
print()
yTMS = [8.8700/1.5, 9.0400/1.5,9.155/1.5,9.2700/1.5,9.3150/1.5,9.3600/1.5,\
9.3850/1.5,9.4100/1.5,9.4350/1.5,9.4600/1.5,9.4700/1.5,9.4800/1.5,\
9.4900/1.5,9.5000/1.5]
print()
zeroRates2 = spotRates(tIME,yTMS, [0.0])
discountFactors2 = discountFactorCalculator(zeroRates2,tIME)
futureValueCurve2 = futureValueCurve(discountFactors2)
forwardRateCurve2 = forwardRateCurve(futureValueCurve2,zeroRates2)
annualSpotRate2 = annualSpotRate(zeroRates2)
d = {'Time' : pd.Series(tIME),'YTM' : pd.Series(yTMS),'Spot Rates' : pd.Series(zeroRates2),\
'Disc_Factors' : pd.Series(discountFactors2), 'Future_Value' : pd.Series(futureValueCurve2),\
'Forward_Rate' : pd.Series(forwardRateCurve2), 'Annual_Spot_Rate' : pd.Series(annualSpotRate2)}
df = pd.DataFrame(d,columns=['Time', 'YTM', 'Spot Rates','Disc_Factors',\
'Future_Value','Forward_Rate','Annual_Spot_Rate'])
print("BootStrap Curve - SOFR Without Spread")
print()
print(df.to_string(index=False))
print()
print()
print("Results: SOFR Without Spread")
forwardRateUnderlying = forwardRateCalculator(zeroRates2,5,7)
print()
print("The 7 year Forward Rate Starting in year 5 is: %s " % round(forwardRateUnderlying,4))
print()
calculateSwaptionPriceSOFR([0.0],display=True)
print()
print("**************************************************************************************")
print("**************************************************************************************")
print()
| true |
c0a08426aaa66b8b9d9191d85173272b5fba7997 | Python | alexander-kononenko/pythonGitBash | /TestCases/test01/test_01.py | UTF-8 | 1,357 | 2.859375 | 3 | [] | no_license | import requests as re
import pytest
print 'Count users which contains 5 in zipcode'
try:
response = re.get('http://jsonplaceholder.typicode.com/users', timeout=(1000, 1))
userTable = response.json()
yes = 0
no = 0
for itemUsr in userTable:
if '5' in str([itemUsr['address']['zipcode']]):
yes += 1
else:
no += 1
print "Number 5 is found for", yes, "users"
print "For", no, "number 5 is not found"
print "//////////////////////////"
print "list with POST from body for used with id=3"
responsePost = re.get('http://jsonplaceholder.typicode.com/posts', timeout=(1000, 1))
postTable = responsePost.json()
listPost = []
for itemPst in postTable:
if itemPst['userId'] == 3:
listPost.append([itemPst['body']])
print listPost
# assert used 1 has todos
responseTodos = re.get('http://jsonplaceholder.typicode.com/todos', timeout=(1000, 1))
todosTable = responseTodos.json()
q = 0
for itemTodos in todosTable:
if itemTodos['userId'] == 1:
q += 1
print "shtyk", q
# assert (q > 0), 'Not passed'
def test_1():
assert q > 0
except re.exceptions.ReadTimeout:
print('Oops. Read timeout occured')
except re.exceptions.ConnectTimeout:
print('Oops. Connection timeout occured!')
| true |
55ca9bed2548e693e1d53a5e8ec7271fef741081 | Python | Gerry84/Python-for-everybody | /6.1.py | UTF-8 | 201 | 2.90625 | 3 | [] | no_license | #6.1
str = 'X-DSPAM-Confidence:0.8475'
stpoint = str.find(':')
stpoint = int(stpoint)
print(stpoint)
length = len(str)
print(length)
number = str[stpoint+1:length]
number = float(number)
print(number)
| true |
ce77d1533df655874df2f6e907ab124d54b8e08c | Python | diwakarjaiswal880/DDCN2019-MNNIT-Allahabad | /code/pattern1.py | UTF-8 | 130 | 3.90625 | 4 | [] | no_license | n=int(input("Enter no of rows: "))
for i in range(n,0,-1):
for j in range(n,i-1,-1):
print(j,end=' ')
print()
| true |
18e11f38b4da4c498de64301c43ff1b3633ea317 | Python | nittyan/word-counter | /word_counter.py | UTF-8 | 1,312 | 2.875 | 3 | [] | no_license | import codecs
import sys
from collections import Counter
from typing import List
from tqdm import tqdm
from janome.analyzer import Analyzer
from janome.tokenfilter import ExtractAttributeFilter, POSKeepFilter
token_filters = [
POSKeepFilter(['名詞', '動詞']),
ExtractAttributeFilter('base_form')
]
analyzer = Analyzer(token_filters=token_filters)
def main():
file_path = sys.argv[1]
print(f'{sys.argv[1]} を解析')
tokens: List[str] = analyze(read_file(file_path))
sorted_tokens: List[str] = count(tokens)
write_file(sorted_tokens)
def read_file(path: str) -> List[str]:
with codecs.open(path, 'r', 'utf-8') as f:
return [row.strip() for row in f]
def write_file(tokens: List[str]):
with codecs.open('word_count.csv', 'w', 'utf-8') as f:
for token in tokens:
f.write(f'{token[0]},{token[1]}')
f.write('\n')
def analyze(texts: List[str]) -> List[str]:
tokens = []
for text in tqdm(texts, desc='解析中'):
for token in analyzer.analyze(text):
tokens.append(token)
return tokens
def count(tokens: List[str]) -> List[str]:
counter = Counter(tokens)
items = counter.items()
return sorted(list(items), key=lambda x: x[1], reverse=True)
if __name__ == '__main__':
main()
| true |
14a90b1d372d740fb73f091a78839b466d78026f | Python | davidcGIThub/quadcopter_simulation | /exampleAnimation2.py | UTF-8 | 651 | 2.71875 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
from matplotlib.animation import FuncAnimation
list_var_points = (1, 5, 4, 9, 8, 2, 6, 5, 2, 1, 9, 7, 10)
fig, ax = plt.subplots()
xfixdata, yfixdata = 14, 8
xdata, ydata = 5, None
ln, = plt.plot([], [], 'ro-', animated=True)
plt.plot([xfixdata], [yfixdata], 'bo', ms=10)
def init():
ax.set_xlim(0, 15)
ax.set_ylim(0, 15)
return ln,
def update(frame):
ydata = list_var_points[frame]
ln.set_data([xfixdata,xdata], [yfixdata,ydata])
return ln,
ani = FuncAnimation(fig, update, frames=range(len(list_var_points)),
init_func=init, blit=True)
plt.show() | true |
da4932ba434c16b9b5bd4875a29e8ea66583c7c9 | Python | EtienneAmany/Ligue1-2019-2020-season-prediction | /dataframe_prepation.py | UTF-8 | 7,594 | 3.296875 | 3 | [] | no_license | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option('display.max_row', 111)
pd.set_option('display.max_column', 111)
df = pd.read_csv('data/ligue1_0919.csv').drop('Unnamed: 0', axis = 1)
df.drop('Div', axis = 1, inplace = True)
#On drop les lignes avec des NaN
df.drop([2985,818,931], inplace = True)
dates1 = df.Date.iloc[0:379].str.split('-')
dates2 = df.Date.iloc[380:].str.split('/')
#Colonnes Year, Month et Day
#Year
years1 = dates1.apply(lambda x: x[0])
dictyears = {}
for year in list(range(10,19)):
key = str(year)
value = '20'+str(year)
dictyears.update({key :value})
years2 = dates2.apply(lambda x : x[-1]).replace(dictyears)
years = pd.concat([years1,years2])
#Month
month1 = dates1.apply(lambda x : x[1])
month2 = dates2.apply(lambda x : x[1])
months = pd.concat([month1, month2])
#Day
days1 = dates1.apply(lambda x : x[-1])
days2 = dates2.apply(lambda x : x[0])
days = pd.concat([days1,days2])
df['Year'] = years
df['Month'] = months
df['Day'] = days
df.drop('Date', axis = 1, inplace = True)
#How many matches by team
teams = df['HomeTeam'].unique()
matches_played = pd.DataFrame(index = teams, columns = ['Matches Played'])
for team in teams:
x1 = df[df['HomeTeam'] == team]
x2 = df[df['AwayTeam'] == team]
xsum = x1.shape[0] + x2.shape[0]
matches_played.loc[team, "Matches Played"] = xsum
matches_played = matches_played.apply(pd.to_numeric)
#Home wins, away wins, home losses, away losses, home draws, away losses
columns = ['home_wins', 'home_losses', 'home_draws', 'away_wins', 'away_losses', "away_draws",
'half_home_wins','half_home_losses', 'half_home_draws',
'half_away_wins', 'half_away_losses', 'half_away_draws']
df1 = pd.DataFrame(index = teams, columns = columns)
for team in teams:
x1 = df[df['HomeTeam'] == team]
x2 = df[df['AwayTeam'] == team]
df1.loc[team, 'home_wins'] = x1[x1['Full Time Result'] == "H"].shape[0]
df1.loc[team, 'home_losses'] = x1[x1['Full Time Result'] == "A"].shape[0]
df1.loc[team, 'home_draws'] = x1[x1['Full Time Result'] == "D"].shape[0]
df1.loc[team, 'away_wins'] = x2[x2['Full Time Result'] == 'A'].shape[0]
df1.loc[team, 'away_losses'] = x2[x2['Full Time Result'] == 'H'].shape[0]
df1.loc[team, 'away_draws'] = x2[x2['Full Time Result'] == "D"].shape[0]
df1 = df1.apply(pd.to_numeric)
df1 = df1.merge(matches_played, left_index = True, right_index = True, how = 'left')
#adding total wins, total losses and total draws
df1['total_wins'] = df1['home_wins'] + df1['away_wins']
df1['total_losses'] = df1['home_losses'] + df1['away_losses']
df1['total_draws'] = df1['home_draws'] + df1['away_draws']
#% wins, % losses, % draws
df1['% wins'] = 100*df1['total_wins']/df1['Matches Played']
df1['% losses'] = 100*df1['total_losses']/df1['Matches Played']
df1['% draws'] = 100*df1['total_draws']/df1['Matches Played']
#Same thing but with half time results
for team in teams:
x1 = df[df['HomeTeam'] == team]
x2 = df[df['AwayTeam'] == team]
df1.loc[team, 'half_home_wins'] = x1[x1['Half Time Result'] == "H"].shape[0]
df1.loc[team, 'half_home_losses'] = x1[x1['Half Time Result'] == "A"].shape[0]
df1.loc[team, 'half_home_draws'] = x1[x1['Half Time Result'] == "D"].shape[0]
df1.loc[team, 'half_away_wins'] = x2[x2['Half Time Result'] == 'A'].shape[0]
df1.loc[team, 'half_away_losses'] = x2[x2['Half Time Result'] == 'H'].shape[0]
df1.loc[team, 'half_away_draws'] = x2[x2['Half Time Result'] == "D"].shape[0]
df1['half_total_wins'] = df1['half_home_wins'] + df1['half_away_wins']
df1['half_total_losses'] = df1['half_home_losses'] + df1['half_away_losses']
df1['half_total_draws'] = df1['half_home_draws'] + df1['half_away_draws']
#% wins, % losses, % draws
df1['% half wins'] = 100*df1['half_total_wins']/df1['Matches Played']
df1['% half losses'] = 100*df1['half_total_losses']/df1['Matches Played']
df1['% half draws'] = 100*df1['half_total_draws']/df1['Matches Played']
#Regarding the rest of the columns, we want : the total at home, away, the mean by match for every team
df.columns
cols_home = ['Full Time Home Team Goals', 'Half Time Home Team Goals',
'Home Team Shots', 'Home Team Shots on Target',
'Home Team Fouls Committed', 'Home Team Corners',
'Home Team Yellow Cards','Home Team Red Cards']
cols_away = ['Full Time Away Team Goals','Half Time Away Team Goals',
'Away Team Shots', 'Away Team Shots on Target',
'Away Team Fouls Committed','Away Team Corners',
'Away Team Yellow Cards', 'Away Team Red Cards']
#Home
for col in cols_home:
ft1 = 'Total ' + col
ft2 = col + ' mean'
df1[ft1] = float('NaN')
df1[ft2] = float("NaN")
for team in teams:
x1 = df[df['HomeTeam'] == team]
df1.loc[team, ft1] = x1[col].sum()
df1.loc[team, ft2] = x1[col].mean()
#Away
for col in cols_away:
ft1 = 'Total ' + col
ft2 = col + ' mean'
df1[ft1] = float('NaN')
df1[ft2] = float("NaN")
for team in teams:
x1 = df[df['AwayTeam'] == team]
df1.loc[team, ft1] = x1[col].sum()
df1.loc[team, ft2] = x1[col].mean()
home = ['home_wins', 'home_losses', 'home_draws', 'half_home_wins', 'half_home_losses', 'half_home_draws',
'Matches Played', 'total_wins', 'total_losses', 'total_draws', '% wins',
'% losses', '% draws', 'half_total_wins', 'half_total_losses',
'half_total_draws', '% half wins', '% half losses', '% half draws',
'Total Full Time Home Team Goals', 'Full Time Home Team Goals mean',
'Total Half Time Home Team Goals', 'Half Time Home Team Goals mean',
'Total Home Team Shots', 'Home Team Shots mean',
'Total Home Team Shots on Target', 'Home Team Shots on Target mean',
'Total Home Team Fouls Committed', 'Home Team Fouls Committed mean',
'Total Home Team Corners', 'Home Team Corners mean',
'Total Home Team Yellow Cards', 'Home Team Yellow Cards mean',
'Total Home Team Red Cards', 'Home Team Red Cards mean']
away = ['away_wins', 'away_losses',
'away_draws', 'half_away_wins', 'half_away_losses', 'half_away_draws',
'Matches Played', 'total_wins', 'total_losses', 'total_draws', '% wins',
'% losses', '% draws', 'half_total_wins', 'half_total_losses',
'half_total_draws', '% half wins', '% half losses', '% half draws',
'Total Full Time Away Team Goals', 'Full Time Away Team Goals mean',
'Total Half Time Away Team Goals', 'Half Time Away Team Goals mean',
'Total Away Team Shots', 'Away Team Shots mean',
'Total Away Team Shots on Target', 'Away Team Shots on Target mean',
'Total Away Team Fouls Committed', 'Away Team Fouls Committed mean',
'Total Away Team Corners', 'Away Team Corners mean',
'Total Away Team Yellow Cards', 'Away Team Yellow Cards mean',
'Total Away Team Red Cards', 'Away Team Red Cards mean']
#Merging the news features with the original dataframe
df1_home = df1[home]
df1_away = df1[away]
df1_home.to_csv('data/df1_home.csv')
df1_away.to_csv('data/df1_away.csv')
df_final = df.copy()
df_final = df_final.merge(df1_home, right_index = True, left_on='HomeTeam', how='right')
df_final = df_final.merge(df1_away, right_index = True, left_on='AwayTeam', how='right')
#df_final.reset_index(drop = True, inplace = True)
df_final.to_csv('df_final.csv', index = False)
| true |
d04a37542cebf3d68ef2c572d844943a63cb6a8a | Python | BurakYyurt/pystras | /scripts/strain.py | UTF-8 | 340 | 2.90625 | 3 | [] | no_license | import numpy as np
def engineering_strain(gradient):
return 0.5 * (gradient + gradient.T) - np.identity(3)
def green_lagrange(gradient):
return 0.5 * (np.dot(gradient.T, gradient) - np.identity(3))
def green_lagrange_rate(gradient, gradient_rate):
mult = np.dot(gradient_rate.T, gradient)
return 0.5 * (mult + mult.T)
| true |
1d7803e872d8d9854d40650976ee1ce1c05167ca | Python | vodneva/steps | /server.py | UTF-8 | 293 | 2.53125 | 3 | [] | no_license | import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Socked created!")
s.bind(('0.0.0.0', 2222))
s.listen(10)
while True:
conn, addr = s.accept()
while True:
data = conn.recv(1024)
if not data: break
if data == 'close': break
conn.send(data)
conn.close()
| true |
765f73a25064f5c13262977a3e14d509f6b63758 | Python | sidv/Assignments | /Bhargava_Krishna/AUG_9_10/greater_4.py | UTF-8 | 375 | 4.0625 | 4 | [] | no_license | a = int(input("Enter 1st number"))
b = int(input("Enter 2nd number"))
c = int(input("Enter 3rd number"))
d = int(input("Enter 4th number"))
if (a>b and a>c and a>d):
print("The greater num is" +str(a))
elif(b>a and b>c and b>d):
print("The greater num is" +str(b))
elif(c>a and c>b and c>d):
print("The greater num is" +str(c))
else:
print("The greater num is" +str(d))
| true |
2194ff211843889bb646cd8690503232eb4c03ae | Python | andrthu/mek4250 | /adjoint/my_bfgs/lbfgs.py | UTF-8 | 16,581 | 2.578125 | 3 | [] | no_license | import numpy as np
from linesearch.strong_wolfe import *
from scaler import PenaltyScaler
from diagonalMatrix import DiagonalMatrix
#from matplotlib.pyplot import *
from my_vector import SimpleVector, MuVector,MuVectors
from LmemoryHessian import LimMemoryHessian, MuLMIH
class LbfgsParent():
"""
Parent class for L-BFGS optimization algorithm.
"""
def __init__(self,J,d_J,x0,Hinit=None,options=None,scale=None):
"""
Initials for LbfgsParent
Valid options are:
* J : Functional you want minimized
* d_J : Gradient of the functional
* x0 : Initial guess
* Hinit : Initial approximation for the inverted hessian
* options : Options are spesific to the sunder classes
"""
self.J = J
self.d_J = d_J
self.x0 = x0
self.set_options(options)
if Hinit==None:
#self.Hinit = np.identity(len(x0))
self.Hinit = DiagonalMatrix(len(x0))
self.scaler = self.scale_problem(scale)
def set_options(self,user_options):
"""
Method for setting options
"""
options = self.default_options()
if user_options!=None:
for key, val in user_options.iteritems():
options[key]=val
#options["line_search_options"]['ftol'] = options['jtol']
#options["line_search_options"]['gtol'] = max([1-options['jtol'],0.9])
self.options = options
def check_convergance(self,df0,k):
"""
Stopping criterion for the algorithm based on L2norm of gardient.
"""
if self.scaler==None:
grad_norm = np.sqrt(np.sum((df0.array())**2)/len(df0))
else:
N = self.scaler.N
gamma = self.scaler.gamma
y = df0.array()
grad_norm = np.sum((y[:N+1])**2)/len(df0)
grad_norm+=np.sum((y[N+1:])**2)/(len(df0)*gamma**2)
grad_norm = np.sqrt(grad_norm)
if grad_norm<self.options['jtol']:
return 1
if k>self.options['maxiter']:
return 1
return 0
def scale_problem(self,scale):
if scale==None:
return None
J = self.J
grad_J = self.d_J
try:
x0 = self.x0.array()
my_vec = True
except AttributeError:
my_vec = False
x0 = self.x0
m = scale['m']
if scale.has_key('factor'):
scaler = PenaltyScaler(J,grad_J,x0,m,
factor=scale['factor'])
else:
scaler = PenaltyScaler(J,grad_J,x0,m)
N = len(x0)-m
y0 = scaler.var(x0)
J_ = lambda x: J(scaler.func_var(x))
grad_J_ = lambda x : scaler.grad(grad_J)(scaler.func_var(x))
self.J = J_
if my_vec:
self.x0 = self.options['Vector'](y0)
else:
self.x0 = y0
self.d_J = grad_J_
self.scale = True
if self.options['scale_hessian']==True:
#self.Hinit[range(N+1,N+m),range(N+1,N+m)] = 1./scaler.gamma**2
self.Hinit.diag[N+1:] = 1./scaler.gamma**2
return scaler
def rescale(self,x):
if self.scaler==None:
return x
N = self.scaler.N
gamma=self.scaler.gamma
y = x.array()
y[N+1:] = y[N+1:].copy()*gamma
return self.options['Vector'](y)
def default_options(self):
"""
Class spesific default options
"""
raise NotImplementedError, 'Lbfgs.default_options() not implemented'
def do_linesearch(self,J,d_J,x,p):
"""
Method that does a linesearch using the strong Wolfie condition
Arguments:
* J : The functional
* d_J : The gradient
* x : The starting point of the linesearch
* p : The direction of the search, i.e. -d_J(x)
Return value:
* x_new : The ending point of the linesearch
* alpha : The step length
"""
x_new = x.copy()
Vec = self.options['Vector']
def phi(alpha):
"""
Convert functional to a one variable functon dependent
on step size alpha
"""
x_new=x.copy()
x_new.axpy(alpha,p)
return J(x_new.array())
def phi_dphi(alpha):
"""
Derivative of above function
"""
x_new = x.copy()
x_new.axpy(alpha,p)
f = J(x_new.array())
djs = p.dot(Vec(d_J(x_new.array())))
return f,float(djs)
phi_dphi0 = J(x.array()),float(p.dot(Vec(d_J(x.array()))))
if self.options["line_search"]=="strong_wolfe":
ls_parm = self.options["line_search_options"]
ftol = ls_parm["ftol"]
gtol = ls_parm["gtol"]
xtol = ls_parm["xtol"]
start_stp = ls_parm["start_stp"]
ls = StrongWolfeLineSearch(ftol,gtol,xtol,start_stp,
ignore_warnings=False)
alpha = ls.search(phi, phi_dphi, phi_dphi0)
x_new=x.copy()
x_new.axpy(alpha,p)
return x_new, float(alpha)
def solve(self):
"""
Method that does optimization
"""
raise NotImplementedError, 'Lbfgs.default_solve() not implemented'
########################################
##########################
############################
######################### LBFGS
############################
##############################################
class Lbfgs(LbfgsParent):
"""
Straight foreward L_BFGS implementation
"""
def __init__(self,J,d_J,x0,pc=None,Hinit=None,options=None,scale=None):
"""
Initials for LbfgsParent
Valid options are:
* J : Functional you want minimized
* d_J : Gradient of the functional
* x0 : Initial guess
* Hinit : Initial approximation for the inverted hessian
* options : Options are as follows:
- jtol : Stopping tolerance
- maxiter : maximal amount of allowed iteration before exiting solver
- line_search_options: options for the linesearch
- mem_lim : Number of iterations the inverted hessian remembers
- Hinit : Initial inverted Hessian
- beta : scaling variable for inverted hessian
- return_data : boolean return the data instance or control
"""
LbfgsParent.__init__(self,J,d_J,x0,Hinit=Hinit,options=options,scale=scale)
mem_lim = self.options['mem_lim']
beta = self.options["beta"]
self.pc = pc
if pc==None:
self.p_direction = self.direction
else:
self.p_direction = self.pc_direction
Hessian = LimMemoryHessian(self.Hinit,mem_lim,beta=beta)
self.data = {'control' : self.x0,
'iteration' : 0,
'lbfgs' : Hessian ,
'scaler' : self.scaler,}
def default_options(self):
"""
Method that gives sets the default options
"""
ls = {"ftol": 1e-3, "gtol": 0.9, "xtol": 1e-1, "start_stp": 1}
default = {"jtol" : 1e-4,
"rjtol" : 1e-6,
"gtol" : 1e-4,
"rgtol" : 1e-5,
"maxiter" : 200,
"display" : 2,
"line_search" : "strong_wolfe",
"line_search_options" : ls,
"mem_lim" : 5,
"Vector" : SimpleVector,
"Hinit" : "default",
"beta" : 1,
"return_data" : False,
"scale_hessian" : False,}
return default
def direction(self,grad,H):
return H.matvec(-grad)
def pc_direction(self,grad,H):
Vec = self.options['Vector']
return -Vec(self.pc(H.matvec(grad).array()))
def solve(self):
"""
Method that solves the opttmizaton problem
Return value:
* x : The optimal control
or :
* data
- control : The optimal control
- iterations : number of iterations reqiered
- lbfgs : The class of the limited memory inverted hessian
"""
Vec = self.options['Vector'] # Choose vector type
x0 = self.x0 # set initial guess
n = x0.size() # find number of variables
x = Vec(np.zeros(n)) # convert to vector class
Hk = self.data['lbfgs'] # get inverted hessian
df0 = Vec(self.d_J(x0.array())) # initial gradient
df1 = Vec(np.zeros(n)) # space for gradient
iter_k = self.data['iteration']
p = Vec(np.zeros(n))
tol = self.options["jtol"]
max_iter = self.options['maxiter']
#the iterations
while self.check_convergance(df0,iter_k)==0:
p = self.p_direction(df0,Hk) #Hk.matvec(-df0)
x,alfa = self.do_linesearch(self.J,self.d_J,x0,p)
df1.set(self.d_J(x.array()))
s = x-x0
"""
if self.scaler!=None:
s = self.rescale(s)
"""
y = df1-df0
#s =self.rescale(s)
#y = self.rescale(y)
Hk.update(y,s)
x0=x.copy()
df0=df1.copy()
iter_k=iter_k+1
self.data['iteration'] = iter_k
self.data['control'] = x
x = self.rescale(x)
self.data['control'] = x
if self.options["return_data"] == True:
return self.data
return x
def one_iteration(self,comm):
"""
Method that does one iteration of lbfgs.
The point of this method is to check if parallel actually works.
"""
rank = comm.Get_rank()
x0 = self.x0 # set initial guess
Vec = self.options['Vector'] # Choose vector type
n = x0.size() # find number of variables
x = Vec(np.zeros(n)) # convert to vector class
Hk = self.data['lbfgs'] # get inverted hessian
df0 = Vec(self.d_J(x0.array())) # initial gradient
df1 = Vec(np.zeros(n)) # space for gradient
iter_k = self.data['iteration']
p = self.p_direction(df0,Hk) #Hk.matvec(-df0)
x,alfa = self.do_linesearch(self.J,self.d_J,x0,p)
df1.set(self.d_J(x.array()))
s = x-x0
y = df1-df0
#print y.array(),rank, 'hei'
Hk.update(y,s)
x0=x.copy()
df0=df1.copy()
return x0
########################################
##########################
############################
######################### MU LBFGS
############################
##############################################
class MuLbfgs(LbfgsParent):
"""
L-BFGS class made s.t. it can save and take in previous invertad hessians
and modify them by updating a mu variable. Usful in a penalty setting.
"""
def __init__(self,J,d_J,x0,Mud_J,Hinit=None,options=None):
"""
Initials for LbfgsParent
Valid options are:
* J : Functional you want minimized
* d_J : Gradient of the functional
* x0 : Initial guess
* Mud_J : Helps with the mu stuff
* Hinit : Initial approximation for the inverted hessian
* options : Options are as follows:
- jtol : Stopping tolerance
- maxiter : maximal amount of allowed iteration before exiting solver
- line_search_options: options for the linesearch
- mem_lim : Number of iterations the inverted hessian remembers
- Hinit : Initial inverted Hessian
- beta : scaling variable for inverted hessian
- mu_val : The current mu
- old_hessian : memory of previous inverted Hessian
- save_number : Size of memory taken from old hessian
- return_data : boolean return the data instance or control
"""
LbfgsParent.__init__(self,J,d_J,x0,Hinit=Hinit,options=options)
self.Mud_J = Mud_J
mem_lim = self.options['mem_lim']
beta = self.options["beta"]
mu = self.options["mu_val"]
H = self.options["old_hessian"]
save_num = self.options["save_number"]
Hessian = MuLMIH(self.Hinit,mu=mu,H=H,mem_lim=mem_lim,beta=beta,
save_number=save_num)
self.data = {'control' : x0,
'iteration' : 0,
'lbfgs' : Hessian }
def default_options(self):
"""
Method that gives sets the default options
"""
ls = {"ftol": 1e-3, "gtol": 0.9, "xtol": 1e-1, "start_stp": 1}
default = {"jtol" : 1e-4,
"rjtol" : 1e-6,
"gtol" : 1e-4,
"rgtol" : 1e-5,
"maxiter" : 200,
"display" : 2,
"line_search" : "strong_wolfe",
"line_search_options" : ls,
"mem_lim" : 5,
"Vector" : SimpleVector,
"Hinit" : "default",
"beta" : 1,
"mu_val" : 1,
"old_hessian" : None,
"penaly_number" : 1,
"save_number" :-1,
"return_data" : False, }
return default
def solve(self):
"""
Method that solves the opttmizaton problem
Return value:
* x : The optimal control
or :
* data
- control : The optimal control
- iterations : number of iterations reqiered
- lbfgs : The class of the limited memory inverted hessian
"""
Vec = self.options['Vector']
x0=self.x0
n=x0.size()
m=self.options["penaly_number"]
x = Vec(np.zeros(n))
Hk = self.data['lbfgs']
mu = self.options["mu_val"]
mu_df0, mu_x0 = self.Mud_J(x0)
mu_df1 = None
mu_x1 = None
iter_k = self.data['iteration']
df0 = Vec(self.d_J(x0.array()))
df1 = Vec(np.zeros(n))
p = Vec(np.zeros(n))
tol = self.options["jtol"]
max_iter = self.options['maxiter']
while self.check_convergance(df0,iter_k)==0:
p = Hk.matvec(-df0)
x,alfa = self.do_linesearch(self.J,self.d_J,x0,p)
df1.set(self.d_J(x.array()))
mu_df1,mu_x1 = self.Mud_J(x)
Hk.update(mu_df1-mu_df0,mu_x1-mu_x0)
mu_df0 = mu_df1.copy()
mu_x0 = mu_x1.copy()
x0=x.copy()
df0=df1.copy()
iter_k=iter_k+1
self.data['iteration'] = iter_k
self.data['control'] = x
if self.options["return_data"]:
return self.data
return x
if __name__== "__main__":
def J(x):
s=0
for i in range(len(x)):
s = s + (x[i]-1)**2
return s
def d_J(x):
return 2*(x-1)
x0=SimpleVector(np.linspace(1,30,30))
solver = Lbfgs(J,d_J,x0)
print solver.solve().array()
| true |
0aa062d9a2ea67d5657a434f513d62bf232cb9da | Python | chenxu0602/LeetCode | /2309.greatest-english-letter-in-upper-and-lower-case.py | UTF-8 | 413 | 3.234375 | 3 | [] | no_license | #
# @lc app=leetcode id=2309 lang=python3
#
# [2309] Greatest English Letter in Upper and Lower Case
#
# @lc code=start
class Solution:
def greatestLetter(self, s: str) -> str:
s = set(s)
upper, lower = ord('Z'), ord('z')
for i in range(26):
if chr(upper - i) in s and chr(lower - i) in s:
return chr(upper - i)
return ''
# @lc code=end
| true |
0ad8716f210685ab8a92f60d216b2a7be6a2a7da | Python | cruizeship/competitive-programming | /USACO-Bronze:Training-python/USACO-whereami/main.py | UTF-8 | 2,047 | 3.40625 | 3 | [] | no_license | '''
ID: cruzan1
LANG: PYTHON3
TASK: whereami
'''
#Misinterpreted the problem - At first, I thought the problem wanted you to find the unique strings and find the minimum length of these strings, but then I read over the problem again, and it said to instead find the smallest value of K for a string of length K that can be found with any consecutive string of length K in the total string.
#Switched code so now it processes the largest pair of same strings and returns the length of one of the strings plus 1 - There is still a bunch of useless code in the program that are finding the unique strings.
def inPut():
f = open('whereami.in', 'r')
numHouses = f.readline().strip()
houseLetterLst = list(f.readline().strip())
return houseLetterLst
def calculate(houseLetterLst):
currentMin = len(houseLetterLst)
longestLength = 0
currentLength = 1
allSet = set()
for m in range(len(houseLetterLst)): #all letters in the lst
allSet.add(houseLetterLst[m])
for currentLength in range(1, len(houseLetterLst)): #changes the length of currentString
for i in range(len(houseLetterLst) - currentLength + 1): #starts at every letter
originalStr = ""
for k in range(i, currentLength + i): #creating currentString
if i + currentLength > len(houseLetterLst):
break
else:
originalStr += houseLetterLst[k]
for j in range(len(houseLetterLst) - currentLength + 1):
counter = 0
if i == j:
pass
else:
compareStr = ""
for l in range(j, currentLength + j):
if j + currentLength > len(houseLetterLst):
break
else:
compareStr += houseLetterLst[l]
if originalStr == compareStr:
counter = 1
break
if counter != 0:
if len(originalStr) > longestLength:
longestLength = len(originalStr)
currentMin = longestLength + 1
return currentMin
out = open('whereami.out', 'w')
out.write(str(calculate(inPut())) + '\n')
out.close() | true |
fa5b1870e499d76bee73f5929cada2a40e2f07ee | Python | leetonfreestyle/repo | /main.py | UTF-8 | 9,259 | 2.625 | 3 | [] | no_license | #!/usr/bin/env python
# -- coding:utf-8 --
from support import *
import math
import Queue
import threading
import time
class Segmenter(object):
kMIRA = 5
beamSize = 10
model = Model()
wq = Queue.Queue()
isAllTerminated = False
# validSequence map, used in _validSequence()
_vsMap = {
'#':['B', 'S'],
'B':['I', 'E'],
'I':['I', 'E'],
'E':['B', 'S'],
'S':['B', 'S']
}
def _validSequence(self, preTag, tag, isTrainMode):
'''
'''
if isTrainMode:
return True
if preTag in self._vsMap:
if tag in self._vsMap[preTag]:
return True
else:
return False
else:
print("Error! In validSequence(), invalid preTag %s"%preTag)
exit(-1)
def getAllValidActions(self, state, isTrainMode):
'''
'''
preTag = '#'
if state.getAction():
preTag = state.getAction()
valideActions = []
allPossibleActions = self.model.allPossibleActions()
for action in allPossibleActions:
if self._validSequence(preTag, action, isTrainMode):
valideActions.add(action)
return valideActions
def decodeBeamSearch(self,sent,trainType):
'''解码函数
Args:
sent:类型为Sentence
trainType:提供多种模式,有test, standard, early, max,MIRA
Return:
一个SegState对象数组,通常为两个元素,MIRA模式下返回多个元素,第一个元素为最佳解,其余为次优解
Raise:
None
'''
isTrainMode = False
goldActions = [] # <str>
goldState = None
goldActionPosition = 0
results = [None] * 2
agenda = []# <SegState>
heap = []# <SegState>
scoreBoard = [float("-inf")] * self.beamSize
# for gold-standard state
if trainType != "test":
isTrainMode = True
goldActions = sent.getAllActions()
goldState = sent.buildInitState()
# for max-violation
if trainType == "max":
goldPartialStates = []# <SegState>
predPartialStates = []# <SegState>
maxViolationPosition = -1
maxMargin = float("-inf")
if trainType == "MIRA":
results = [None] * (self.kMIRA + 1)
agenda.append(sent.buildInitState())
circle = 0
while True:
circle += 1
if circle > 1000:
print "*"
for state in agenda:
print "(%d)"%state.getStep()
# ==========get gold action for the current step====================
goldAction = ""
lengthOfGoldActions = goldActions.__len__()
if lengthOfGoldActions != 0:
if goldActionPosition < lengthOfGoldActions:
goldAction = goldActions[goldActionPosition]
goldActionPosition += 1
if goldAction != "":
goldState = goldState.transit(goldAction,True,model)
# ==========one step transit for each state==============
scoreBoard = [float("-inf")] * self.beamSize
heap = []
# build new state
for state in agenda:
if state.isTerminated():
heap.append(state)
continue
unlabeledFeatures = state.getUnlabeledFeatures()
actions = self.getAllValidActions(state,isTrainMode)
for action in actions:
labeledFeatures = [] # <str>
for feature in unlabeledFeatures:
labeledFeatures.append("%s:%s"%(feature,action))
score = model.score(model.getFeatureVecotr(labeledFeatures)) + state.getScore()
#error handling on variable score
if state < min(scoreBoard):
continue
if goldAction == "":
newState = state.transit(action,True,model)
else:
newState = state.transit(action,goldAction == action,model)
if newState.getScore() < min(scoreBoard):
continue
heap.append(newState)
scoreBoard[-1] = newState.getScore()
scoreBoard.sort(reverse=True)
# keep k-best state
agenda = []
if heap.__len__() == 0:
print "Parsing Fault."
# exit()
else:
heap.sort(key=lambda x:x.getScore())
while (heap.__len__() != 0) and (agenda.__len__() < self.beamSize):
agenda.append(heap[-1])
del heap[-1]
# ==========================
if trainType == "early":
containedGoldState = None
for state in agenda:
if state.isGold():
containedGoldState = state
break
if containedGoldState == None:
results[0] = goldState
results[1] = agenda[0]
return results
else:
if trainType == "max":
curMargin = agenda[0].getScore() - goldState.getScore()
if curMargin > maxMargin:
maxMargin = curMargin
maxViolationPosition += 1
goldPartialStates.append(goldState)
predPartialStates.append(agenda[0])
# ===========check terminated===================
if self.isAllTerminated:# terminated when all state in the beam reach terminal state
allterm = True
for state in agenda:
if not state.isTerminated():
allterm = False
break
if allterm:
break
else:# terminated when the best state reach the terminal state
if agenda.__len__() != 0 and agenda[0].isTerminated():
break
if trainType == "max":
results[0] = goldPartialStates[maxViolationPosition]
results[1] = predPartialStates[maxViolationPosition]
elif trainType == "MIRA":
results[0] = goldState
results.extend(agenda)
else:
results[0] = goldState
if agenda.__len__() != 0:
results[1] = agenda[0]
return results
def ParserTask(self,sentences):
'''解码线程函数
Args:
sentences:待解码的Sentence列表
Return:
None
Raise:
None
'''
results = []
for one in sentences:
results.append(self.decodeBeamSearch(one,"test")[0].getFinalResult())
self.wq.put(results)
def decodeParalle(self,testSet,outpath,numThreads,numPerTheads):
'''多线程解码函数
Args:
testSet:测试数据集
outpath:解码结果的保存路径
numThreads:总线程数
numPerTheads:每个线程中的任务数
Return:
None
Raise:
None
'''
startTime = time.time()
batch = 0
testSet.reset()
while testSet.hasNext():
print str(batch) + " "
batch += 1
# read #numThreads * miniSize instances
sentences = []
for i in range(numThreads * numPerTheads):
if testSet.hasNext():
sentences.append(testSet.next())
LengthOfSentences = sentences.__len__()
if LengthOfSentences > numThreads:
actualThreads = numThreads
else:
actualThreads = LengthOfSentences
actualMiniSize = int(math.ceil(LengthOfSentences /float(actualThreads)))
# wq = Queue.Queue()
threads = []
for i in range(actualThreads):
startPos = actualMiniSize * i
endPos = startPos + actualMiniSize
if endPos > LengthOfSentences:
endPos = LengthOfSentences
threads.append(threading.Thread(target=self.ParserTask(sentences[startPos:endPos])))
# start threads and join main threads
for t in threads:
t.start()
for t in threads:
t.join()
# fetch the results
results = []
while not self.wq.empty():
results.extend(self.wq.get())
# write file
with open(outpath,'w') as outFile:
for one in results:
outFile.write(one + "\n")
print "Time: %f"%(time.time() - startTime)
def main():
sg = Segmenter()
# sg.decodeBeamSearch(Sentence(),'standard')
sg.decodeParalle(SentenceReader(),"test.txt",2,1)
if __name__ == '__main__':
main()
| true |
6c6f83480f845ed856eae006cc3294af312ce4f6 | Python | kileung-at-cb/pythonlib | /cardinal_pythonlib/rnc_ui.py | UTF-8 | 3,604 | 2.9375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
# -*- encoding: utf8 -*-
"""Support functions for user interaction.
Author: Rudolf Cardinal (rudolf@pobox.com)
Created: 2009
Last update: 24 Sep 2015
Copyright/licensing:
Copyright (C) 2009-2015 Rudolf Cardinal (rudolf@pobox.com).
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import errno
import getpass
import os
# noinspection PyUnresolvedReferences
# from six.moves import input
import sys
from typing import Optional
if sys.version_info > (3,):
# Python 3
import tkinter
import tkinter.filedialog
filedialog = tkinter.filedialog
else:
# Python 2
# noinspection PyUnresolvedReferences
import Tkinter
tkinter = Tkinter
# noinspection PyUnresolvedReferences
import tkFileDialog
filedialog = tkFileDialog
def ask_user(prompt: str,
default: str = None,
to_unicode: bool = False) -> Optional[str]:
"""Prompts the user, with a default. Returns str or unicode."""
if default is None:
prompt += ": "
else:
prompt += " [" + default + "]: "
result = input(prompt.encode(sys.stdout.encoding))
if to_unicode:
result = result.decode(sys.stdin.encoding)
return result if len(result) > 0 else default
def ask_user_password(prompt: str) -> str:
"""Read a password from the console."""
return getpass.getpass(prompt + ": ")
def get_save_as_filename(defaultfilename: str,
defaultextension: str,
title: str = "Save As") -> str:
"""Provides a GUI "Save As" dialogue and returns the filename."""
root = tkinter.Tk() # create and get Tk topmost window
# (don't do this too early; the command prompt loses focus)
root.withdraw() # won't need this; this gets rid of a blank Tk window
root.attributes('-topmost', True) # makes the tk window topmost
filename = filedialog.asksaveasfilename(
initialfile=defaultfilename,
defaultextension=defaultextension,
parent=root,
title=title
)
root.attributes('-topmost', False) # stop the tk window being topmost
return filename
def get_open_filename(defaultfilename: str,
defaultextension: str,
title: str = "Open") -> str:
"""Provides a GUI "Open" dialogue and returns the filename."""
root = tkinter.Tk() # create and get Tk topmost window
# (don't do this too early; the command prompt loses focus)
root.withdraw() # won't need this; this gets rid of a blank Tk window
root.attributes('-topmost', True) # makes the tk window topmost
filename = filedialog.askopenfilename(
initialfile=defaultfilename,
defaultextension=defaultextension,
parent=root,
title=title
)
root.attributes('-topmost', False) # stop the tk window being topmost
return filename
def mkdir_p(path: str) -> None:
"""Makes a directory if it doesn't exist."""
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST:
pass
else:
raise
| true |
01dc7a912a50aff3bc09ab2ed48965f02922a96b | Python | Noronha1612/wiki_python-brasil | /Estruturas de repetição/ex05.py | UTF-8 | 1,452 | 3.734375 | 4 | [] | no_license | from functions.validação import lerFloat, lerInt
popA = lerInt('Popoulação do país A: ', pos=True, erro='Digite uma população válida')
while True:
creA = lerFloat('Taxa de crescimento, em %, do país A: ', pos=True, erro='Digite um valor entre 0 e 100')
if 0 <= creA <= 100:
break
print('Digite um valor entre 0 e 100')
popB = lerInt('População do país B: ', pos=True, erro='Digite uma população válida')
while True:
creB = lerFloat('Taxa de crescimento, em %, do país B: ', pos=True, erro='Digite um valor entre 0 e 100')
if 0 <= creB <= 100:
break
print('Digite um valor entre 0 e 100')
pais1 = [popA, creA/100, 'país A']
pais2 = [popB, creB/100, 'país B']
iguais = False
if pais1[0] == pais2[0]:
iguais = True
print('-='*35)
if not iguais:
if pais1[0] > pais2[0]:
maior = pais1
menor = pais2
else:
maior = pais2
menor = pais1
if menor[1] < maior[1] or menor[0] == 0:
print(f'A população do {menor[2]} nunca chegará a população do {maior[2]}.')
else:
anos = 0
while menor[0] < maior[0]:
anos += 1
menor[0] += menor[0] * menor[1]
maior[0] += maior[0] * maior[1]
print(f'São necessários {anos} anos para a população do {menor[2]} ultrapassar a população do {maior[2]}')
else:
print('A população dos 2 países são inicialmente iguais.')
print('-='*35)
| true |
9733aa897340a2d16e92811d2e57ccbda69e145f | Python | sergtimosh/GrokkingAlgorithms | /src/sandbox/recursiveSumArray.py | UTF-8 | 138 | 3.15625 | 3 | [] | no_license | def recurSumArr(arr):
if len(arr) == 0:
return 0
return arr[0] + recurSumArr(arr[1:])
print(recurSumArr([109, 650, 777])) | true |
e327c1494c586bfe0437f445e47720e8554ab9a6 | Python | parthddosani/edu-search | /qna.py | UTF-8 | 3,367 | 2.875 | 3 | [] | no_license | # Using flask to make an api
# import necessary libraries and functions
from flask import Flask, jsonify, request
from youtube_transcript_api import YouTubeTranscriptApi
import json
from deeppavlov import build_model, configs
from flask_cors import CORS
final_stopWords = []
temp_file = open('stopwords.txt', 'r')
final_stopWords = [line.rstrip('\n') for line in temp_file]
model = build_model(configs.squad.squad, download=False)
# creating a Flask app
app = Flask(__name__)
CORS(app)
# on the terminal type: curl http://127.0.0.1:5000/
# returns hello world when we use GET.
# returns the data that we send when we use POST.
@app.route('/', methods = ['GET', 'POST'])
# A simple function to calculate the square of a number
# the number to be squared is sent in the URL when we use GET
# on the terminal type: curl http://127.0.0.1:5000 / home / 10
# this returns 100 (square of 10)
@app.route('/<query>/<videoID>', methods = ['GET'])
def getJSON(videoID, query):
returnList = []
if query in final_stopWords: #if the query is not good enough to be searched,return -1
return(json.dumps(returnList))
keywordList = YouTubeTranscriptApi.get_transcript(videoID)
full_transcript = ""
for element in keywordList:
full_transcript = full_transcript + element['text'] + " "
#print(full_transcript)
#query = " What is the goal"
pred = model([full_transcript], [query])
pred = pred[0][0]
print(query)
print(pred)
returnList = {"answer" : pred}
returnList1 = []
l = pred.split(" ")
#print(l)
l1 = []
for el in l:
#print(el)
if el not in final_stopWords:
l1.append(el)
#print(l1)
returnList1.append(returnList)
for el in l1:
keywordList = YouTubeTranscriptApi.get_transcript(videoID)
for i in keywordList:
phrase = i['text']
if el.lower() in phrase.lower():
temp = {"timestamp": str(i['start']) + 's',
"phrase": i['text']}
returnList1.append(temp)
# fullString = '{ "results": [ '
# for i in range(len(returnList)):
# if i == len(returnList)-1:
# fullString += '{ \"timestamps": \"' + str(returnList[i]['start']) + 's\", \"phrase\": \"' + returnList[i]['text'] + '\" } '
# else:
# fullString += '{ \"timestamps\": \"' + str(returnList[i]['start']) + 's\", \"phrase\": \"' + returnList[i]['text'] + '\" }, '
# fullString += '] }'
return (json.dumps(returnList1))
# for i in keywordList:
# phrase = i['text']
# if query.lower() in phrase.lower():
# temp = {"timestamp": str(i['start']) + 's',
# "phrase": i['text']}
# returnList.append(temp)
# fullString = '{ "results": [ '
# for i in range(len(returnList)):
# if i == len(returnList)-1:
# fullString += '{ \"timestamps": \"' + str(returnList[i]['start']) + 's\", \"phrase\": \"' + returnList[i]['text'] + '\" } '
# else:
# fullString += '{ \"timestamps\": \"' + str(returnList[i]['start']) + 's\", \"phrase\": \"' + returnList[i]['text'] + '\" }, '
# fullString += '] }'
#return (json.dumps(returnList))
# driver function
if __name__ == '__main__':
app.run(host='127.0.0.1', port='5003', threaded=True, debug = True)
| true |
d2e69220bb6ea03ba85513636b69df35f77c0a2a | Python | howardh/rl | /test/learner/test_linear_learner.py | UTF-8 | 3,422 | 2.671875 | 3 | [] | no_license | import unittest
import numpy as np
import scipy.sparse
import torch
from tqdm import tqdm
from learner.linear_learner import LinearLearner
#class TestTabularLearner(unittest.TestCase):
#
# LEARNING_RATE = 0.1
# DISCOUNT_FACTOR = 0.9
#
# def setUp(self):
# self.learner = LinearLearner(
# num_features = 3,
# action_space=np.array([0,1]),
# discount_factor=self.DISCOUNT_FACTOR,
# learning_rate=self.LEARNING_RATE,
# trace_factor=0
# )
#
# def test_get_state_action_value(self):
# val1 = self.learner.get_state_action_value(np.array([1,1,-1]),0)
#
# self.learner.weights = torch.from_numpy(np.array([[1,0,0],[0,2,0]])).float().cuda()
#
# expected = 1
# output = self.learner.get_state_action_value(np.array([1,1,-1]),0)
# self.assertAlmostEqual(expected, output, msg="Wrong output")
#
# expected = 2
# output = self.learner.get_state_action_value(np.array([1,1,-1]),1)
# self.assertAlmostEqual(expected, output, msg="Wrong output")
#
# def test_observe_step(self):
# self.learner.weights *= 0
#
# self.learner.observe_step(
# np.array([1,0,0]),
# 0,
# 1,
# np.array([1,0,0]),
# False
# )
# """
# Target = 1+gamma*0 = 1
# prediction = wx = 0
# loss = 0.5(1-wx)^2
# dloss/dw = -(1-wx)x = [-1 0 0]
# times learning rate of 0.1, and negative: [0.1 0 0]
# """
# expected = np.array([[0.1,0,0],[0,0,0]])
# output = self.learner.weights.cpu().numpy()
# diff = np.sum(expected-output)
# self.assertAlmostEqual(diff, 0, msg="Gradient is wrong")
#
# self.learner.observe_step(
# np.array([0,1,0]),
# 1,
# 1,
# np.array([0,1,0]),
# False
# )
# expected = np.array([[0.1,0,0],[0,0.1,0]])
# output = self.learner.weights.cpu().numpy()
# diff = np.sum(expected-output)
# self.assertAlmostEqual(diff, 0, msg="Gradient is wrong")
#class TestTabularLearnerTraces(unittest.TestCase):
#
# LEARNING_RATE = 0.1
# DISCOUNT_FACTOR = 0.9
#
# def setUp(self):
# self.learner = LinearLearner(
# num_features = 3,
# action_space=np.array([0,1]),
# discount_factor=self.DISCOUNT_FACTOR,
# learning_rate=self.LEARNING_RATE,
# trace_factor=1
# )
#
# def test_get_state_action_value(self):
# val1 = self.learner.get_state_action_value(np.array([1,1,-1]),0)
#
# def test_observe_step(self):
# self.learner.weights *= 0
#
# self.learner.observe_step(
# np.array([1,0,0]),
# 0,
# 1,
# np.array([1,0,0]),
# False
# )
# """
# Target = 1+gamma*0 = 1
# prediction = wx = 0
# loss = 0.5(1-wx)^2
# dloss/dw = -(1-wx)x = [-1 0 0]
# times learning rate of 0.1, and negative: [0.1 0 0]
# """
# expected = np.array([[0.1,0,0],[0,0,0]])
# output = self.learner.weights.cpu().numpy()
# diff = np.sum(expected-output)
# self.assertAlmostEqual(diff, 0, msg="Gradient is wrong")
if __name__ == "__main__":
unittest.main()
| true |
2b64804b202290ab1b634d33366fb3c07ea69255 | Python | vvoZokk/dnn | /scripts/lib/evolve_state.py | UTF-8 | 437 | 2.90625 | 3 | [
"MIT"
] | permissive |
import pickle
from os.path import join as pj
class State(object):
FNAME = "state.p"
def __init__(self, seed):
self.vals = []
self.seed = seed
def add_val(self, X, tells):
self.vals.append( (X, tells) )
def dump(self, wd):
pickle.dump(self, open(pj(wd, State.FNAME), "wb"))
@staticmethod
def read_from_dir(wd):
return pickle.load(open(pj(wd, State.FNAME), "rb"))
| true |
b7c6c15c8d516915aef35a63be4a30bd21c2254a | Python | AnTznimalz/python_prepro | /Prepro2019/road_to_legend.py | UTF-8 | 481 | 3.84375 | 4 | [] | no_license | """0068: Road to Legend"""
def main():
"""Main Func."""
num = int(input())
count = 0
time = 0
while count <= num:
text = input()
if text == "WIN":
count += 1
else:
if count > 0:
count -= 1
time += 15
hour = time//60
minute = (time - hour*60)
if minute != 0:
minute = (100/(60/minute))/100
print("Congratulations, You've played %.2f hour(s)." %(hour+minute))
main()
| true |
6f1548a99e2468d30300065ddad2c99c75b73f2d | Python | dohyun93/python_playground | /section14_(유형)_정렬문제들/14-3.실패율(카카오2019).py | UTF-8 | 2,819 | 3.40625 | 3 | [] | no_license | # 슈퍼 게임 개발자 오렐리는 큰 고민에 빠졌다. 그녀가 만든 프랜즈 오천성이 대성공을 거뒀지만, 요즘 신규 사용자의 수가 급감한 것이다. 원인은 신규 사용자와 기존 사용자 사이에 스테이지 차이가 너무 큰 것이 문제였다.
#
# 이 문제를 어떻게 할까 고민 한 그녀는 동적으로 게임 시간을 늘려서 난이도를 조절하기로 했다. 역시 슈퍼 개발자라 대부분의 로직은 쉽게 구현했지만, 실패율을 구하는 부분에서 위기에 빠지고 말았다. 오렐리를 위해 실패율을 구하는 코드를 완성하라.
#
# 실패율은 다음과 같이 정의한다.
# 스테이지에 도달했으나 아직 클리어하지 못한 플레이어의 수 / 스테이지에 도달한 플레이어 수
# 전체 스테이지의 개수 N, 게임을 이용하는 사용자가 현재 멈춰있는 스테이지의 번호가 담긴 배열 stages가 매개변수로 주어질 때, 실패율이 높은 스테이지부터 내림차순으로 스테이지의 번호가 담겨있는 배열을 return 하도록 solution 함수를 완성하라.
#
# 제한사항
# 스테이지의 개수 N은 1 이상 500 이하의 자연수이다.
# stages의 길이는 1 이상 200,000 이하이다.
# stages에는 1 이상 N + 1 이하의 자연수가 담겨있다.
# 각 자연수는 사용자가 현재 도전 중인 스테이지의 번호를 나타낸다.
# 단, N + 1 은 마지막 스테이지(N 번째 스테이지) 까지 클리어 한 사용자를 나타낸다.
# 만약 실패율이 같은 스테이지가 있다면 작은 번호의 스테이지가 먼저 오도록 하면 된다.
# 스테이지에 도달한 유저가 없는 경우 해당 스테이지의 실패율은 0 으로 정의한다.
# https://programmers.co.kr/learn/courses/30/lessons/42889
def solution(N, stages):
answer = []
numPeople = len(stages)
failed_people = [0] * (N + 2) # 0 ~ N+1 스테이지에 도달한 도전자의 수 구하기
for i in stages:
failed_people[i] += 1
# challenger
# 0 1 2 3 4 5 6 - idx
# 0 1 3 2 1 0 1 - failed people
# 0 8 7 4 2 1 - - people
# 0으로 나누는 경우의 실패율 구하기 -> 42~44라인 예외처리 필요.
# 0 1 3 4 0 0 0 - failed people
# 0 8 7 4 0 0 - - people
people = [0] * (N + 1)
people[1] = numPeople
for i in range(2, N + 1):
people[i] = people[i - 1] - failed_people[i - 1]
fail_rate = [] # 실패율
for i in range(1, N + 1):
if people[i] == 0:
fail_rate.append([i, 0])
continue
fail_rate.append([i, failed_people[i] / people[i]])
fail_rate.sort(key=lambda x: [-x[1], x[0]])
# print(fail_rate)
for i in fail_rate:
answer.append(i[0])
return answer | true |
26663496a22a89825c627bd85d7a2903b9ac15e0 | Python | AdminSDA/Lab212 | /main.py | UTF-8 | 878 | 3.203125 | 3 | [] | no_license | import os
import glob
from problem import Problem
if __name__ == '__main__':
# List all classes in this directory and
# import all that are derived from Problem
for module in os.listdir('.'):
if module[-3:] == '.py':
__import__(module[:-3], locals(), globals())
# For each subclass generate a statement and
# the detailed solution for that statement
statements = []
solutions = []
for derived in Problem.__subclasses__():
p = derived()
statement = str(p)
solution = p.solve()
statements.append(statement)
solutions.append(solution)
print('### Test SDA ###')
print('Cerinte:')
for statement in statements:
print(statement)
print('')
print('')
print('Rezolvari:')
for solution in solutions:
print(solution)
print('')
| true |
eff1913376e25a92dd19cbb4400026908b4e6a21 | Python | Ruban-chris/Interview-Prep-in-Python | /elements_of_programming_interviews/19/19-4.py | UTF-8 | 1,795 | 4.09375 | 4 | [] | no_license | # degrees of connectedness
# Write a program that takes as input an undirected graph, which you can assume to be connected,
# and checks if the graph is minimally connected.
# Ideas
# Use DFS with visited set and parent.
# Time complexity is the same as DFS O(|V| + |E|)
# Space complexity is O(n) where n is the number of vertices in the graph.
class Vertex:
def __init__(self,id=0):
self.id = id
self.nbrs = []
class Graph:
def __init__(self):
self.vertices = []
a = Vertex('a')
b = Vertex('b')
c = Vertex('c')
d = Vertex('d')
e = Vertex('e')
a.nbrs = [b,c]
b.nbrs = [e,a,d]
d.nbrs = [c,b]
c.nbrs = [a,d]
e.nbrs = [b]
graphWithCycles = Graph()
graphWithCycles.vertices = [b,e, c,d,a]
f = Vertex('f')
g = Vertex('g')
h = Vertex('h')
i = Vertex('i')
j = Vertex('j')
k = Vertex('k')
l = Vertex('l')
m = Vertex('m')
f.nbrs = [g,h]
g.nbrs = [k,l]
h.nbrs = [i, j, m]
minimallyConnectedGraph = Graph()
minimallyConnectedGraph.vertices = [f, g, h, i, j, k, l, m]
def isMinimallyConnected(graph):
if len(graph.vertices) <= 1: return True
return isMinimallyConnectedHelper(graph.vertices[0], [], None)
def isMinimallyConnectedHelper(vertex, visited, pred):
print(vertex.id, [vertex.id for vertex in visited])
if len(vertex.nbrs) == 1 and vertex.nbrs[0] == pred: return True
if vertex in visited: return False
visited.append(vertex)
return all([isMinimallyConnectedHelper(nbr, visited, vertex) for nbr in vertex.nbrs if nbr != pred])
# how do i do this problem with using all? how do i do this in a for loop? i could collect them and put it into an array and return true if all th values in the array are true
assert(isMinimallyConnected(graphWithCycles) == False)
assert(isMinimallyConnected(minimallyConnectedGraph) == True)
| true |
62a822bb54154b7048c5f10ceaba6a38c2e24f42 | Python | sajandl/FlightGrid | /UI_Code.py | UTF-8 | 14,142 | 2.59375 | 3 | [] | no_license | import os
import tkinter as tk
from tkinter import filedialog
import Drone_Grid_UI
class GridInputUI:
def __init__(self, master):
super().__init__()
self.master = master
self.output_file = None
self.master.title('Grid Parameters')
self.master.columnconfigure(2, weight=1)
self.master.config(padx=11, pady=11)
self.init_ui()
def file_select(self):
self.output_file = filedialog.asksaveasfilename(
defaultextension='.csv',
filetypes=[('csv', '*.csv'), ('CSV', '*.CSV')],
initialdir=os.getcwd(),
parent=self.master,
title='Select output file location'
)
self.collect_parameters()
Drone_Grid_UI.write_file(self.calced_points)
def collect_parameters(self):
self.lat = float(self.lat_entry.get())
self.lat_h = float(self.lat_h_entry.get())
self.lon = float(self.lon_entry.get())
self.lon_h = float(self.lon_h_entry.get())
self.alt = int(self.alt_entry.get())
self.head = int(self.head_entry.get())
self.len_p = int(self.len_p_entry.get())
self.len_h = int(self.len_h_entry.get())
self.overlap = int(self.overlap_entry.get())
self.sample = int(self.overlap_entry.get())
if self.direction_str.get() == 'To Right':
self.direction = 1
else:
self.direction = -1
if self.mode_str.get() == 'Photo':
self.mode = 1
else:
self.mode = 0
if self.contour_str.get() == 'Follow Contour':
self.contour = 1
else:
self.contour = 0
if self.north_str.get() == 'True North':
self.north = 1
else:
self.north = 0
self.calced_points = Drone_Grid_UI.calculate_points(
lat=self.lat,
lat_h=self.lat_h,
lon=self.lon,
lon_h=self.lon_h,
altitude=self.alt,
heading_input=self.head,
length_p=self.len_p,
length_h=self.len_h,
overlap=self.overlap,
sample=self.overlap,
direction=self.direction,
mode=self.mode,
output_file=self.output_file,
contour=self.contour,
north=self.north
)
def init_ui(self):
# create labels for entry boxes
self.lat_lbl = tk.Label(self.master, text='Latitude Start/Home')
self.lon_lbl = tk.Label(self.master, text='Longitude Start/Home')
self.alt_lbl = tk.Label(
self.master,
text='Altitude above home position (ft)'
)
self.head_lbl = tk.Label(
self.master,
text='Initial Heading (North=0)'
)
self.len_p_lbl = tk.Label(self.master,
text='Length perpendicular to Heading (ft)')
self.len_h_lbl = tk.Label(
self.master,
text='Length in direction of Heading (ft)'
)
self.overlap_lbl = tk.Label(self.master, text='Overlap Percent')
self.sample_lbl = tk.Label(self.master, text='# Contour Samples btw Points')
self.direction_lbl = tk.Label(self.master, text='Column Direction w/r Heading')
self.mode_lbl = tk.Label(self.master, text='Mode Selection')
self.contour_lbl = tk.Label(self.master, text='Elevation Mode')
self.north_lbl = tk.Label(self.master, text='True or Magnetic North')
# labels for displayed values
self.col_lbl = tk.Label(self.master, text='Columns', bg='darkblue',
fg='white')
self.row_lbl = tk.Label(self.master, text='Rows', bg='darkblue',
fg='white')
self.area_lbl = tk.Label(self.master, text='Area (acres)',
bg='darkblue', fg='white')
self.route_len_lbl = tk.Label(
self.master,
text='Route Length (miles)',
bg='darkblue', fg='white'
)
self.col_ol_lbl = tk.Label(self.master, text='Column Overlap (%)',
bg='darkblue', fg='white')
self.row_ol_lbl = tk.Label(self.master, text='Row Overlap (%)',
bg='darkblue', fg='white')
self.home_lbl = tk.Label(self.master, text='Home Point', bg='darkblue',
fg='white')
self.c1_lbl = tk.Label(self.master, text='Start Corner', bg='darkblue',
fg='white')
self.c2_lbl = tk.Label(self.master, text='Second Corner',
bg='darkblue', fg='white')
self.c3_lbl = tk.Label(self.master, text='Third Corner', bg='darkblue',
fg='white')
self.c4_lbl = tk.Label(self.master, text='Fourth Corner',
bg='darkblue', fg='white')
# create entry boxes for parameters
self.lat_entry = tk.Entry(self.master)
self.lat_h_entry = tk.Entry(self.master)
self.lon_entry = tk.Entry(self.master)
self.lon_h_entry = tk.Entry(self.master)
self.alt_entry = tk.Entry(self.master)
self.head_entry = tk.Entry(self.master)
self.len_p_entry = tk.Entry(self.master)
self.len_h_entry = tk.Entry(self.master)
self.overlap_entry = tk.Entry(self.master)
self.sample_entry = tk.Entry(self.master)
self.direction_str = tk.StringVar(self.master)
self.direction_str.set('To Right')
self.direction_opmenu = tk.OptionMenu(
self.master,
self.direction_str,
'To Right',
'To Left'
)
self.mode_str = tk.StringVar(self.master)
self.mode_str.set('Photo')
self.mode_opmenu = tk.OptionMenu(
self.master,
self.mode_str,
'Photo',
'Video'
)
self.contour_str = tk.StringVar(self.master)
self.contour_str.set('Follow Contour')
self.contour_opmenu = tk.OptionMenu(
self.master,
self.contour_str,
'Follow Contour',
'Constant'
)
self.north_str = tk.StringVar(self.master)
self.north_str.set('True North')
self.north_opmenu = tk.OptionMenu(
self.master,
self.north_str,
'True North',
'Magnetic North'
)
# create display values button
self.display_vals_btn = tk.Button(
self.master,
text='Display Values',
command=self.display_values,
pady=11,
padx=11
)
# create output file button
self.output_btn = tk.Button(
self.master,
text='Create File',
command=self.file_select,
pady=11,
padx=11
)
# add labels to the master window
self.lat_lbl.grid(row=1, column=1, sticky=tk.E)
self.lon_lbl.grid(row=2, column=1, sticky=tk.E)
self.alt_lbl.grid(row=3, column=1, sticky=tk.E)
self.head_lbl.grid(row=4, column=1, sticky=tk.E)
self.len_p_lbl.grid(row=5, column=1, sticky=tk.E)
self.len_h_lbl.grid(row=6, column=1, sticky=tk.E)
self.overlap_lbl.grid(row=7, column=1, sticky=tk.E)
self.sample_lbl.grid(row=8, column=1, sticky=tk.E)
self.direction_lbl.grid(row=9, column=1, sticky=tk.E)
self.mode_lbl.grid(row=10, column=1, sticky=tk.E)
self.contour_lbl.grid(row=11, column=1, sticky=tk.E)
self.north_lbl.grid(row=12, column=1, sticky=tk.E)
# add labels for displayed values to the master window
self.col_lbl.grid(row=13, column=1, sticky=tk.E)
self.row_lbl.grid(row=14, column=1, sticky=tk.E)
self.area_lbl.grid(row=15, column=1, sticky=tk.E)
self.route_len_lbl.grid(row=16, column=1, sticky=tk.E)
self.col_ol_lbl.grid(row=17, column=1, sticky=tk.E)
self.row_ol_lbl.grid(row=18, column=1, sticky=tk.E)
self.home_lbl.grid(row=19, column=1, sticky=tk.E)
self.c1_lbl.grid(row=20, column=1, sticky=tk.E)
self.c2_lbl.grid(row=21, column=1, sticky=tk.E)
self.c3_lbl.grid(row=22, column=1, sticky=tk.E)
self.c4_lbl.grid(row=23, column=1, sticky=tk.E)
# add entry boxes to the master window
self.lat_entry.grid(row=1, column=2, sticky=tk.EW)
self.lat_h_entry.grid(row=1, column=3, sticky=tk.EW)
self.lon_entry.grid(row=2, column=2, sticky=tk.EW)
self.lon_h_entry.grid(row=2, column=3, sticky=tk.EW)
self.alt_entry.grid(row=3, column=2, sticky=tk.EW)
self.head_entry.grid(row=4, column=2, sticky=tk.EW)
self.len_p_entry.grid(row=5, column=2, sticky=tk.EW)
self.len_h_entry.grid(row=6, column=2, sticky=tk.EW)
self.overlap_entry.grid(row=7, column=2, sticky=tk.EW)
self.sample_entry.grid(row=8, column=2, sticky=tk.EW)
self.direction_opmenu.grid(row=9, column=2, sticky=tk.EW)
self.mode_opmenu.grid(row=10, column=2, sticky=tk.EW)
self.contour_opmenu.grid(row=11, column=2, sticky=tk.EW)
self.north_opmenu.grid(row=12, column=2, sticky=tk.EW)
# add display values button
self.display_vals_btn.grid(row=24, column=3, sticky=tk.EW)
# add create file button
self.output_btn.grid(row=25, column=3, sticky=tk.EW)
# create labels which house the values, to be displayed
self.colval_lbl = tk.Label(self.master, bg='darkgrey', fg='white',
relief='sunken')
self.rowval_lbl = tk.Label(self.master, bg='darkgrey', fg='white',
relief='sunken')
self.areaval_lbl = tk.Label(self.master, bg='darkgrey', fg='white',
relief='sunken')
self.routeval_lbl = tk.Label(self.master, bg='darkgrey', fg='white',
relief='sunken')
self.col_ol_lbl = tk.Label(self.master, bg='darkgrey', fg='white',
relief='sunken')
self.row_ol_lbl = tk.Label(self.master, bg='darkgrey', fg='white',
relief='sunken')
self.homeval_lbl = tk.Label(self.master, bg='darkgrey', fg='white',
relief='sunken')
self.c1val_lbl = tk.Label(self.master, bg='darkgrey', fg='white',
relief='sunken')
self.c2val_lbl = tk.Label(self.master, bg='darkgrey', fg='white',
relief='sunken')
self.c3val_lbl = tk.Label(self.master, bg='darkgrey', fg='white',
relief='sunken')
self.c4val_lbl = tk.Label(self.master, bg='darkgrey', fg='white',
relief='sunken')
# add labels which house the values, to the master window
self.colval_lbl.grid(row=13, column=2, sticky=tk.EW)
self.rowval_lbl.grid(row=14, column=2, sticky=tk.EW)
self.areaval_lbl.grid(row=15, column=2, sticky=tk.EW)
self.routeval_lbl.grid(row=16, column=2, sticky=tk.EW)
self.col_ol_lbl.grid(row=17, column=2, sticky=tk.EW)
self.row_ol_lbl.grid(row=18, column=2, sticky=tk.EW)
self.homeval_lbl.grid(row=19, column=2, sticky=tk.EW)
self.c1val_lbl.grid(row=20, column=2, sticky=tk.EW)
self.c2val_lbl.grid(row=21, column=2, sticky=tk.EW)
self.c3val_lbl.grid(row=22, column=2, sticky=tk.EW)
self.c4val_lbl.grid(row=23, column=2, sticky=tk.EW)
def display_values(self):
self.collect_parameters()
output_values = self.calc_output_values()
# add text to labels to show the output values
self.colval_lbl.config(text=output_values['columns'])
self.rowval_lbl.config(text=output_values['rows'])
self.areaval_lbl.config(text=output_values['area'])
self.routeval_lbl.config(text=output_values['route_length'])
self.homeval_lbl.config(text='{} {}'.format(*output_values['home']))
self.c1val_lbl.config(text='{} {}'.format(*output_values['c1']))
self.col_ol_lbl.config(text=output_values['col_ol'])
self.row_ol_lbl.config(text=output_values['row_ol'])
self.c2val_lbl.config(text='{} {}'.format(*output_values['c2']))
self.c3val_lbl.config(text='{} {}'.format(*output_values['c3']))
self.c4val_lbl.config(text='{} {}'.format(*output_values['c4']))
def calc_output_values(self):
lat_h = self.calced_points[0]
lon_h = self.calced_points[1]
columns = self.calced_points[4]
rows = self.calced_points[8]
length_h = self.calced_points[12]
length_p = self.calced_points[13]
a_overlap_h = self.calced_points[14]
a_overlap_p = self.calced_points[15]
lat = self.calced_points[17]
lon = self.calced_points[18]
lat_b2 = self.calced_points[22]
lon_b2 = self.calced_points[23]
lat_b3 = self.calced_points[24]
lon_b3 = self.calced_points[25]
lat_b4 = self.calced_points[26]
lon_b4 = self.calced_points[27]
route_length_f = self.calced_points[28]
area = round(length_p * length_h / 43560, 1)
route_length = round(route_length_f / 5280, 1)
col_overlap = round(a_overlap_p, 1)
row_overlap = round(a_overlap_h, 1)
home = (round(lat_h, 6), round(lon_h, 6))
c1 = (round(lat, 6), round(lon, 6))
c2 = (round(lat_b2, 6), round(lon_b2, 6))
c3 = (round(lat_b3, 6), round(lon_b3, 6))
c4 = (round(lat_b4, 6), round(lon_b4, 6))
vals_dictionary = {
'columns': columns,
'rows': rows,
'area': area,
'route_length': route_length,
'col_ol': col_overlap,
'row_ol': row_overlap,
'home': home,
'c1': c1,
'c2': c2,
'c3': c3,
'c4': c4
}
return vals_dictionary | true |