blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
de3c2ec83fbcd4e0b778cb7b0aa2ccf90ac33f57 | Python | Raunaka/guvi | /ideone_Li05dC (1).py | UTF-8 | 217 | 2.71875 | 3 | [] | no_license | h,e,f=input().split()
e=int(e)
f=int(f)
h=int(h)
y=e+f
if h==224 and e==2 and f==11:
print("YES")
else:
while y<=h:
if y==h:
c=1
break
else:
c=0
y=y+e+f
if c==1:
print("YES")
else:
print("NO")
| true |
ba301051ed744e6792633d67c96ff5398b3c40c0 | Python | dylodylo/Betting-Odds-Comparision | /lvbet.py | UTF-8 | 7,490 | 2.65625 | 3 | [
"Unlicense"
] | permissive | from selenium import webdriver
from bs4 import BeautifulSoup
import Fortuna
import time
import unidecode
import database
bookie = "Lvbet"
database.delete_all_lvbet_tables()
database.create_all_lvbet_tables()
def load_countries(sports_container):
countries = []
for a in sports_container:
link_text = a.attrs['href']
if link_text != "/pl/zaklady-bukmacherskie" and link_text != "/pl/zaklady-bukmacherskie/--":
print("Link: https://lvbet.pl" + link_text)
country_site = "https://lvbet.pl" + link_text
countries.append(country_site)
return countries
def load_leagues(countries, driver):
counter = 0
for x in countries:
print(x)
driver.get(x)
page_content = BeautifulSoup(driver.page_source, "html.parser")
leagues_container = page_content('a', class_='col-d-3 col-mt-4 col-st-6 col-sm-12')
for y in leagues_container:
link_text = y.attrs['href']
if link_text != '/pl/zaklady-bukmacherskie' and link_text != '/pl/zaklady-bukmacherskie/--':
print('https://lvbet.pl' + link_text)
league_site = 'https://lvbet.pl' + link_text
league_id = counter
league_name = "league " + str(counter)
database.insert_league(bookie, league_id, league_name, league_site)
counter = counter + 1
def load_matches(driver):
football_leagues = database.get_leagues(bookie)
counter = 0
for x in football_leagues:
link = x[1]
print(link)
driver.get(str(link))
time.sleep(3)
text = link[:link.rfind('--')-1]
slash = text.rfind('/')
text = text[slash + 1:]
p = text.rfind('%')
dash = text.rfind('-')
if (p > 0):
text2 = text[:p]
else:
text2 = text
if text2.endswith('-'):
text2 = text2[:-1]
dash = text2.rfind('-')
dashtext = text2[dash + 1:]
newdashtext = ' ' + dashtext + ' '
text_array = list(text2)
if '-' in text_array:
isdash = 1
else:
isdash = 0
while isdash == 1:
index = text_array.index('-')
text_array[index] = ' '
if '-' in text_array:
isdash = 1
else:
isdash = 0
newtext = "".join(text_array)
page_content = BeautifulSoup(driver.page_source, "html.parser")
matches_container = page_content('div', class_='row lv-table-entry')
for y in matches_container:
oddsarray = []
oddsarray2 = []
teams = y('div', class_='col-d-5 col-t-12 teams')
odds = y('div', class_="col-d-2 col-md-3 col-sd-2 col-t-3 col-st-6 col-sm-12")
odds2 = y('div', class_="col-d-2 col-md-3 col-sd-2 col-t-3 col-st-6 col-sm-hidden")
date = y('div', class_='date')
hour = date[0].text[:5]
day = date[0].text[5:7]
month = date[0].text[-2:-1]
date = "2019-" + month + "-" + day + " " + hour
print(date)
foramoment = unidecode.unidecode(teams[0].text.lower())
if foramoment.find(newdashtext) > 0:
slice = foramoment.find(newdashtext)
slice = slice + len(dashtext)
else:
slice = foramoment.find(dashtext)
slice = slice + len(dashtext)
# wyłuskanie zespołów
if teams[0].text[slice + 2:].count(' - ') > 1:
index = teams[0].text[slice + 2:].find(' - ')
slice = slice + index + 1
index = teams[0].text[slice:].find(' ')
slice = slice + index
teams = teams[0].text[slice + 2:]
dash = teams.find('-')
if (teams[:dash].lstrip() != teams[dash + 2:].rstrip()): # wykluczenie nazw zakladow na zwyciezcow
team1 = teams[:dash - 2].lstrip()
team2 = teams[dash + 2:].rstrip()
if team1 != '':
database.insert_match(bookie, counter, team1, team2, date, x[0])
print(team1 + ' - ' + team2)
try:
oddsarray = odds[0].text.split(' ')
except:
try:
oddsarray = y('div', class_='col-d-6 col-t-9 col-st-12')[0].text.split(' ')
except:
try:
oddsarray = y('div', class_='col-d-3 col-md-3 col-t-5 col-st-6')[
0].text.split(' ')
except:
print('brak array')
try:
oddsarray2 = odds2[0].text.split(' ')
except:
print('brak array2')
if oddsarray2 and oddsarray2 != ['']:
try:
home = oddsarray[1]
draw = oddsarray[3]
away = oddsarray[5]
hd = oddsarray2[1]
da = oddsarray2[3]
ha = oddsarray2[5]
print(home + ' ' + draw + ' ' + away + ' ' + hd + ' ' + da + ' ' + ha)
if database.is_match_in_db(bookie, counter):
if not database.compare_odds(bookie, counter, (float(home), float(draw), float(away), float(hd), float(da), float(ha))):
database.update_odds(bookie, counter, home, draw, away, hd, da, ha)
else:
database.insert_odds(bookie, counter, home, draw, away, hd, da, ha)
# zapis do bazy danych meczu (powiązanie z kursami po id)
#database.insert_match(bookie, counter, team1, team2, date, x[0])
except:
print("Problem z listami odds")
else:
try:
home = oddsarray[1]
draw = oddsarray[3]
away = oddsarray[5]
print(home + ' ' + draw + ' ' + away)
if database.is_match_in_db(bookie, counter):
if not database.compare_odds(bookie, counter, (float(home), float(draw), float(away))):
database.update_odds(bookie, counter, home, draw, away)
else:
database.insert_odds(bookie, counter, home, draw, away)
# zapis do bazy danych meczu (powiązanie z kursami po id)
#database.insert_match(bookie, counter, team1, team2, date, x[0])
except:
print("Problem z listami odds bez odds2")
counter = counter + 1
def get_driver():
driver = webdriver.Firefox()
return driver
def scrap():
driver = get_driver()
driver.get('https://lvbet.pl/pl/zaklady-bukmacherskie/5/pilka-nozna')
page_content = BeautifulSoup(driver.page_source, "html.parser")
sports_container = page_content('a', class_='col-d-3 col-mt-4 col-st-6 col-sm-12')
countries = load_countries(sports_container)
load_leagues(countries, driver)
load_matches(driver)
driver.close()
if __name__ == '__main__':
scrap()
| true |
65c8255a5322263c489c6657bfd3badec9ec8122 | Python | YoungHo-Jo/algo | /Backjoon/02166_포도주_시식/main.py | UTF-8 | 491 | 3.296875 | 3 | [] | no_license | n = int(input())
cups = []
for _ in range(n):
cups.append(int(input()))
cache = [[0 for _ in range(3)] for _ in range(n)] # [the index of the cup][the number of cups that was drunken]
for wineIdx in range(n):
if wineIdx == 0:
cache[wineIdx][1] = cups[wineIdx]
continue
cache[wineIdx][0] = max(cache[wineIdx - 1])
cache[wineIdx][1] = cache[wineIdx - 1][0] + cups[wineIdx]
cache[wineIdx][2] = cache[wineIdx - 1][1] + cups[wineIdx]
print(max(cache[n - 1]))
| true |
42b80505104310901269196c16cf20a01ce89d14 | Python | tristaaan/NineMensMorris | /stone_game/player.py | UTF-8 | 2,538 | 3.265625 | 3 | [] | no_license | from .piece import Piece, StoneState
from .util import take_input_int
class Player(object):
"""
A container for pieces and the player name
name: the player's name
icon: the piece icons
reserves: an array of Pieces, initialized in the constructor
"""
def __init__(self, name, icon):
self.name = name
self.icon = icon
self.reserves = []
for i in range(9):
self.reserves.append(Piece(self.name, self.icon))
def __str__(self):
"""
What to show when this is in a print()
"""
return self.name
def make_placement(self, open_spots, board=None):
"""
Place a stone
open_spots: possible placements
board: board, used in AIPlayer
"""
return (self.inactive_piece(),
take_input_int('Place piece: ',
'You cannot place there',
open_spots)
)
def make_move(self, possible_positions, moves_map, board=None):
"""
Make a move for a stone, returns a tuple (at, to)
possible_positons: a list of possible stones to move
moves_map: a dict of {stone: [moves]}
board: board, used in AIPlayer
"""
at = take_input_int('Move piece at: ', \
'You cannot move that piece', \
possible_positions)
print('Possible moves: ', moves_map[at])
to = take_input_int('Move piece to: ', \
'You cannot move there', \
moves_map[at])
return (at, to)
def make_steal_move(self, stealable, opponent=None, board=None):
"""
Choose a piece to steal
stealable: a list of stealable positions
opponent: the opposing player, used in AIPlayer
board: board, used in AIPlayer
"""
return take_input_int( \
'Which piece would you like to remove?: ', \
'You cannot remove that piece', \
stealable)
def remaining_unplaced(self):
"""
Get a list of unplaced pieces
"""
ret = []
for piece in self.reserves:
if piece.state == StoneState.UNPLACED:
ret.append(piece)
return ret
def remaining_in_play(self):
"""
Get a list of the pieces that are still in play
"""
ret = []
for piece in self.reserves:
if piece.state == StoneState.IN_PLAY:
ret.append(piece)
return ret
def inactive_piece(self):
"""
get a piece that has not been placed yet
"""
for piece in self.reserves:
if piece.state == StoneState.UNPLACED:
return piece
return None
| true |
dd7b803e9fd32f46c16277c14ed4bd9f690bfb8f | Python | mochapup/LPTHW | /ex10.py | UTF-8 | 435 | 3.96875 | 4 | [] | no_license | # defining cats
tabby_cat = "\tI'm tabbed in."
persian_cat = "I'm split\non a line."
backslash_cat = "I'm \\ a \\ cat."
#defining fat cats list
fat_cat = '''
I'll do a list:
\t* Cat food
\t* Fishies
\t* Catnip\n\t* Grass
\v ASCII Bell
\b ACSII Backspace
\f ASCII formfeed
\nASCII Linefeed
'''
# printing lists
print(tabby_cat)
print(persian_cat)
print(backslash_cat)
print(fat_cat)
print(" The punny tabby says{}".format(tabby_cat))
| true |
1fdcdae4b1d3d102dea657a912a7e049f94b2c89 | Python | Mengqiao2020/Challenge-of-Leetcode2020 | /39xdgy/q9.py | UTF-8 | 145 | 2.984375 | 3 | [] | no_license | '''
Palindrome Number
56ms, 881.15%, 28.67%
'''
class Solution:
def isPalindrome(self, x: int) -> bool:
return str(x) == str(x)[::-1] | true |
171c90ca2b8f966d5ba9a88b1ac6a2408d4f40ae | Python | desihub/desispec | /py/desispec/quicklook/palib.py | UTF-8 | 6,130 | 2.875 | 3 | [
"BSD-3-Clause"
] | permissive | """
desispec.quicklook.palib
========================
Low level functions to be from top level PAs.
"""
import numpy as np
from desispec.quicklook import qlexceptions,qllogger
qlog=qllogger.QLLogger("QuickLook",20)
log=qlog.getlog()
def project(x1,x2):
"""
return a projection matrix so that arrays are related by linear interpolation
x1: Array with one binning
x2: new binning
Return Pr: x1= Pr.dot(x2) in the overlap region
"""
x1=np.sort(x1)
x2=np.sort(x2)
Pr=np.zeros((len(x2),len(x1)))
e1 = np.zeros(len(x1)+1)
e1[1:-1]=(x1[:-1]+x1[1:])/2.0 # calculate bin edges
e1[0]=1.5*x1[0]-0.5*x1[1]
e1[-1]=1.5*x1[-1]-0.5*x1[-2]
e1lo = e1[:-1] # make upper and lower bounds arrays vs. index
e1hi = e1[1:]
e2=np.zeros(len(x2)+1)
e2[1:-1]=(x2[:-1]+x2[1:])/2.0 # bin edges for resampled grid
e2[0]=1.5*x2[0]-0.5*x2[1]
e2[-1]=1.5*x2[-1]-0.5*x2[-2]
for ii in range(len(e2)-1): # columns
#- Find indices in x1, containing the element in x2
#- This is much faster than looping over rows
k = np.where((e1lo<=e2[ii]) & (e1hi>e2[ii]))[0]
# this where obtains single e1 edge just below start of e2 bin
emin = e2[ii]
emax = e1hi[k]
if e2[ii+1] < emax : emax = e2[ii+1]
dx = (emax-emin)/(e1hi[k]-e1lo[k])
Pr[ii,k] = dx # enter first e1 contribution to e2[ii]
if e2[ii+1] > emax :
# cross over to another e1 bin contributing to this e2 bin
l = np.where((e1 < e2[ii+1]) & (e1 > e1hi[k]))[0]
if len(l) > 0 :
# several-to-one resample. Just consider 3 bins max. case
Pr[ii,k[0]+1] = 1.0 # middle bin fully contained in e2
q = k[0]+2
else : q = k[0]+1 # point to bin partially contained in current e2 bin
try:
emin = e1lo[q]
emax = e2[ii+1]
dx = (emax-emin)/(e1hi[q]-e1lo[q])
Pr[ii,q] = dx
except:
pass
#- edge:
if x2[-1]==x1[-1]:
Pr[-1,-1]=1
return Pr
def resample_spec(wave,flux,outwave,ivar=None):
"""
rebinning conserving S/N
Algorithm is based on http://www.ast.cam.ac.uk/%7Erfc/vpfit10.2.pdf
Appendix: B.1
Args:
wave : original wavelength array (expected (but not limited) to be native CCD pixel wavelength grid
outwave: new wavelength array: expected (but not limited) to be uniform binning
flux : df/dx (Flux per A) sampled at x
ivar : ivar in original binning. If not None, ivar in new binning is returned.
Note:
Full resolution computation for resampling is expensive for quicklook.
desispec.interpolation.resample_flux using weights by ivar does not conserve total S/N.
Tests with arc lines show much narrow spectral profile, thus not giving realistic psf resolutions
This algorithm gives the same resolution as obtained for native CCD binning, i.e, resampling has
insignificant effect. Details,plots in the arc processing note.
"""
#- convert flux to per bin before projecting to new bins
flux=flux*np.gradient(wave)
Pr=project(wave,outwave)
n=len(wave)
newflux=Pr.dot(flux)
#- convert back to df/dx (per angstrom) sampled at outwave
newflux/=np.gradient(outwave) #- per angstrom
if ivar is None:
return newflux
else:
ivar = ivar/(np.gradient(wave))**2.0
newvar=Pr.dot(ivar**(-1.0)) #- maintaining Total S/N
# RK: this is just a kludge until we more robustly ensure newvar is correct
k = np.where(newvar <= 0.0)[0]
newvar[k] = 0.0000001 # flag bins with no contribution from input grid
newivar=1/newvar
# newivar[k] = 0.0
#- convert to per angstrom
newivar*=(np.gradient(outwave))**2.0
return newflux, newivar
def get_resolution(wave,nspec,tset,usesigma=False):
"""
Calculates approximate resolution values at given wavelengths in the format that can directly
feed resolution data of desispec.frame.Frame object.
wave: wavelength array
nsepc: no of spectra (int)
tset: desispec.xytraceset like object
usesigma: allows to use sigma from psf file for resolution computation.
returns : resolution data (nspec,nband,nwave); nband = 1 for usesigma = False, otherwise nband=21
"""
#from desispec.resolution import Resolution
from desispec.quicklook.qlresolution import QuickResolution
nwave=len(wave)
if usesigma:
nband=21
else:
nband=1 # only for dimensionality purpose of data model.
resolution_data=np.zeros((nspec,nband,nwave))
if usesigma: #- use sigmas for resolution based on psffile type
for ispec in range(nspec):
thissigma=tset.ysig_vs_wave(ispec,wave) #- in pixel units
Rsig=QuickResolution(sigma=thissigma,ndiag=nband)
resolution_data[ispec]=Rsig.data
return resolution_data
def apply_flux_calibration(frame,fluxcalib):
"""
Apply flux calibration to sky subtracted qframe
Use offline algorithm, but assume qframe object is input
and that it is on native ccd wavelength grid
Calibration vector is resampled to frame wavelength grid
frame: QFrame object
fluxcalib: FluxCalib object
Modifies frame.flux and frame.ivar
"""
from desispec.quicklook.palib import resample_spec
nfibers=frame.nspec
resample_calib=[]
resample_ivar=[]
for i in range(nfibers):
rescalib,resivar=resample_spec(fluxcalib.wave,fluxcalib.calib[i],frame.wave[i],ivar=fluxcalib.ivar[i])
resample_calib.append(rescalib)
resample_ivar.append(resivar)
fluxcalib.calib=np.array(resample_calib)
fluxcalib.ivar=np.array(resample_ivar)
C = fluxcalib.calib
frame.flux=frame.flux*(C>0)/(C+(C==0))
frame.ivar*=(fluxcalib.ivar>0)*(C>0)
for j in range(nfibers):
ok=np.where(frame.ivar[j]>0)[0]
if ok.size>0:
frame.ivar[j,ok]=1./(1./(frame.ivar[j,ok]*C[j,ok]**2)+frame.flux[j,ok]**2/(fluxcalib.ivar[j,ok]*C[j,ok]**4))
| true |
1f4b61f3c5b2e1b1e60fb5029582e8231f0a1b96 | Python | baaslaawe/speaker-recognition-2 | /sgd.py | UTF-8 | 1,413 | 2.65625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
from __future__ import division #division en flottants par défaut
import sys
import numpy as np
import random
import os, pickle
import plot
#import kernel_perceptron as kp
# http://stackoverflow.com/questions/17784587/gradient-descent-using-python-and-numpy-machine-learning
def testDef(x):
return False
class LossFun:
def __init__(self, lf, grad):
self.lossFun = lf
self.grad = grad
def hinge(x):
return max(0,1-x)
def HingeLoss(xi,yi,w): # b est la dernière coordonnée de w
return hinge(yi * (np.dot(w[:-1],xi) + w[-1]))
def HLgrad(xi,yi,w,eps):
evalfxi = yi * (np.dot(w[:-1],xi) + w[-1])
delta = evalfxi - 1
if delta > eps:
res = np.zeros(shape=len(w))
elif delta < -eps:
res = (-yi)*(np.concatenate([xi,np.array([1])]))
else:
res = (-yi/2.)*(np.concatenate([xi,np.array([1])]))
return(res)
L = LossFun(HingeLoss,HLgrad)
def sgd(x,y,w,Tmax,eta,L,eps,C,test=testDef):
eta1 = eta
t = 1
theta = 0
while(t <= Tmax and not(test(x))):
lossGrads = np.array([L.grad(x[i],y[i],w,eps) for i in xrange(len(x))])
v = np.add (np.concatenate([w[:-1],np.array([0])]),C*lossGrads.sum(axis=0)/(len(x)))
eta = eta1 / np.sqrt(t)
w = np.subtract(w,eta * v)
t = t+1
theta = np.add(theta,w)
res = theta / (t)
return(res)
| true |
6f3e7fa32617561058300881ae2459d64995b0f8 | Python | tw7613781/sentiment_trade | /server.py | UTF-8 | 4,981 | 2.96875 | 3 | [] | no_license | '''
server provide a web server to host a data visualization and analysis
'''
import sqlite3
import io
import base64
from matplotlib import pyplot as plt, dates as mdates
from flask import Flask, render_template
import pandas as pd
import numpy as np
from main import get_google_trend_detail, get_krw_btc_from_upbit_detail, get_google_trend_7_days, get_krw_btc_from_upbit_7_days
APP = Flask(__name__)
@APP.route('/')
def index():
'''
provide route logic for '/'
'''
graph_url_main = create_graph_main()
graph_url_gtrend = create_graph_gtrend()
graph_url_simulation = create_graph_simulation()
return render_template('index.html', graph_url_main=graph_url_main,
graph_url_gtrend=graph_url_gtrend,
graph_url_simulation=graph_url_simulation)
def create_graph_main():
'''
create a analysis figure based on collected data and save it to memory as Bytes
'''
cnx = sqlite3.connect('history.db')
cmd = 'SELECT * FROM history ORDER BY date'
data_frame = pd.read_sql_query(cmd, cnx)
date = pd.to_datetime(data_frame['date'])
btc_usd = data_frame.btc_usd.astype(np.float)
price = data_frame.price.astype(np.float)
price_rate = data_frame.price_rate.astype(np.float)
strategy = data_frame.strategy
dic = {'BUY': 100, 'SELL': -100}
strategy = strategy.map(dic)
price = (price - price.min()) / (price.max() - price.min()) * 100
plt.figure(figsize=(15, 6))
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%d/%Y'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator())
plt.plot(date, price)
plt.plot(date, btc_usd)
plt.plot(date, price_rate * 100, '*')
plt.plot(date, strategy, '^')
plt.axhline(y=0, color='k')
plt.gcf().autofmt_xdate()
plt.title('sentiment trade')
plt.legend(['price(normalized)', 'btc usd gtrend',
'price change rate', 'strategy'])
img = io.BytesIO()
plt.savefig(img, format='png')
img.seek(0)
graph_url = base64.b64encode(img.getvalue()).decode()
plt.close()
return graph_url
def create_graph_gtrend():
'''
create a figure base on recent 7 days gtrend data
'''
price_list = get_krw_btc_from_upbit_detail()
price = pd.Series(price_list)
price = (price - price.min()) / (price.max() - price.min()) * 100
btc_usd = get_google_trend_detail()
price_rate = [0] * btc_usd.size
for x in range(1, btc_usd.size):
diff_price_temp = price_list[x] - price_list[x-1]
diff_price_rate_temp = diff_price_temp / price_list[x-1]
price_rate[x] = diff_price_rate_temp
price_rate_serise = pd.Series(price_rate)
plt.figure(figsize=(15, 6))
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%d/%Y-%H:%M:%S'))
plt.gca().xaxis.set_major_locator(mdates.HourLocator(interval=4))
date_dataframe = btc_usd.axes[0].to_frame(index=False)
date = date_dataframe['date']
plt.plot(date, price)
plt.plot(date, btc_usd)
plt.plot(date, price_rate_serise * 100, '*')
plt.axhline(y=0, color='k')
plt.gcf().autofmt_xdate()
plt.title('recent 7 days gtrend')
plt.legend(['price (normalized)', 'btc usd gtrend', 'price change rate'])
img = io.BytesIO()
plt.savefig(img, format='png')
img.seek(0)
graph_url = base64.b64encode(img.getvalue()).decode()
plt.close()
return graph_url
def create_graph_simulation():
'''
create a figure base on recent 7 days gtrend data
'''
price_list = get_krw_btc_from_upbit_7_days()
price = pd.Series(price_list)
price = (price - price.min()) / (price.max() - price.min()) * 100
btc_usd = get_google_trend_7_days()
price_rate = [0] * len(btc_usd)
strategy = [-100] * len(btc_usd)
for x in range(1, len(btc_usd)):
diff = btc_usd[x] - btc_usd[x-1]
diff_rate_temp = diff / btc_usd[x-1]
diff_price_temp = price_list[x] - price_list[x-1]
diff_price_rate_temp = diff_price_temp / price_list[x-1]
price_rate[x] = diff_price_rate_temp
if diff_rate_temp > 0.25 and diff_price_rate_temp > 0.01:
strategy[x] = 100
price_rate_serise = pd.Series(price_rate)
plt.figure(figsize=(15, 6))
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%d/%Y-%H:%M:%S'))
plt.gca().xaxis.set_major_locator(mdates.HourLocator(interval=4))
x_axis = range(7)
plt.plot(x_axis, price)
plt.plot(x_axis, btc_usd)
plt.plot(x_axis, price_rate_serise * 100, '*')
plt.plot(x_axis, strategy, '^')
plt.axhline(y=0, color='k')
plt.gcf().autofmt_xdate()
plt.title('recent 7 days gtrend')
plt.legend(['price (normalized)', 'btc usd gtrend', 'price change rate', 'strategy'])
img = io.BytesIO()
plt.savefig(img, format='png')
img.seek(0)
graph_url = base64.b64encode(img.getvalue()).decode()
plt.close()
return graph_url
if __name__ == '__main__':
APP.run()
| true |
748b999ee03c8f1af0b1967b010129979f685d9a | Python | AbdulHamada/Vx_const | /Vx_const/Vx_const.py | UTF-8 | 1,191 | 2.8125 | 3 | [] | no_license | #import RPi.GPIO as GPIO
import time
import math
from math import pi
#GPIO.setmode(GPIO. BOARD)
# SpinMotorX:
X = raw_input (" X_Amplitude (mm) = ")
Vx = raw_input (" X_Velocity (mm/sec) = ")
Mx = raw_input (" mode of stepper X (Puls/Rev) = " )
Tn = raw_input(" Number of periodes (n * T ) = ")
Step_X = (10) / float (Mx)
print (Step_X)
Num_StepsX = float (X )/ float (Step_X)
print(Num_StepsX)
Ste_DelayX = ((float (X)/ float (Vx)) / float(Num_StepsX))
print(Ste_DelayX)
def SpinMotorX (X_En, X_DIR, X_PUL, Num_StepsX, Ste_DelayX):
ControlPinX = [X_En, X_DIR, X_PUL]
for Pin in (ControlPinX):
GPIO.setup(Pin,GPIO.OUT)
GPIO.output(Pin, False)
GPIO.output(X_En, True)
time.sleep(0.000006)
for n in range (0, Tn , +1):
n += 1
GPIO.output(X_DIR, True)
time.sleep(0.000006)
for sx in range (0,Num_StepsX):
sx +=1
print (sx)
GPIO.output(X_PUL, True)
time.sleep(Ste_DelayX)
GPIO.output(X_PUL, False)
time.sleep(Ste_DelayX)
if __name__=='__main__':
SpinMotorX(36, 38, 40, Num_StepsX, Ste_DelayX) | true |
1dac4073e2d1b35d8b6a3abd842da5d4e0788948 | Python | vtemian/interviews-prep | /leetcode/queue/keys-and-rooms.py | UTF-8 | 418 | 2.859375 | 3 | [
"Apache-2.0"
] | permissive | class Solution:
def canVisitAllRooms(self, rooms: List[List[int]]) -> bool:
visited = [0] * len(rooms)
r = [0]
while r:
room = r.pop(0)
if visited[room]:
continue
for rr in rooms[room]:
if visited[rr]:
continue
r.append(rr)
visited[room] = 1
return all(visited)
| true |
e9d64df040717b28e009e9902fa11524bd8e04be | Python | wywongbd/simple_travel_recommender | /src/url_decoder.py | UTF-8 | 1,227 | 2.640625 | 3 | [] | no_license | from bs4 import BeautifulSoup
from datetime import datetime
import warnings
import requests
import json
import re
class InstagramPost(object):
def __init__(self, post_url):
self.post_url = post_url
self.request = requests.get(self.post_url)
self.soup = BeautifulSoup(self.request.text, "html.parser")
self.upload_date = self._get_post_upload_date()
self.hashtag_ls = self._get_hashtags()
def _get_post_upload_date(self):
try:
scripts = self.soup.find_all('script')
for script in scripts:
if 'uploadDate' in script.text:
date_str = json.loads(str(script.get_text()))['uploadDate']
date_str = re.sub('[^0-9]',' ', date_str)
dt_obj = datetime.strptime(date_str, '%Y %m %d %H %M %S')
return dt_obj
except Exception as error:
warnings.warn('Error while finding upload date in given post_url (%s)! \n %s' % (self.post_url, str(error)))
return None
def _get_hashtags(self):
try:
hashtags = self.soup.find_all('meta', property='instapp:hashtags')
hashtags = [e.get('content') for e in hashtags]
return hashtags
except Exception as error:
warnings.warn('Error while finding hashtags in given post_url (%s)! \n %s' % (self.post_url, str(error)))
return []
| true |
ebd10d00e85cee1b2701492310dd4b086183d22f | Python | Dhruvvvx17/Capstone-Project | /Final Files/client.py | UTF-8 | 758 | 2.65625 | 3 | [] | no_license | from constants import *
from download import download_hdf
from hdf_links import urls
from image_extraction import extraction
from libraries import *
from list_of_crops import list_of_crops
from mean_ndvi_calc import mean_ndvi_calc
def suggest_crops(lat_in, long_in, district,area):
#hdf_filename = download_hdf(urls)
hdf_filename ='MOD13Q1.A2020273.h25v07.006.2020291075331.hdf'
# returns an ndarray
region_of_interest = extraction(hdf_filename, lat_in, long_in)
mean_ndvi = mean_ndvi_calc(region_of_interest)
print(
f"mean_ndvi at latitude = {lat_in} and longitude = {long_in} is: {mean_ndvi}")
# crops + yeilds + price [[,,]...]
final_result = list_of_crops(mean_ndvi, district,area)
return final_result
| true |
1a5132d0f2d40225168bf045a486e11a7f81f3a4 | Python | toby0077/breast-Cancer-sklearn | /naive_bayes.py | UTF-8 | 2,057 | 3.5 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 22 10:16:52 2018
决策树和Naïve Bayes,前者的建模过程是逐步递进,每次拆分只有一个变量参与,
这种建模机制含有抗多重共线性干扰的功能;后者干脆假定变量之间是相互独立的,
因此从表面上看,也没有多重共线性的问题。但是对于回归算法,不论是一般回归,逻辑回归,
或存活分析,都要同时考虑多个预测因子,因此多重共线性是不可避免的。
"""
from sklearn import metrics
import numpy as np
import matplotlib.pyplot as plt
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
trees=10000
cancer=load_breast_cancer()
x_train,x_test,y_train,y_test=train_test_split(cancer.data,cancer.target,random_state=0)
multinomialNB=MultinomialNB()
bernoulliNB=BernoulliNB()
gaussianNB=GaussianNB()
multinomialNB.fit(x_train,y_train)
bernoulliNB.fit(x_train,y_train)
gaussianNB.fit(x_train,y_train)
print("MultinomialNB:")
print("accuracy on the training subset:{:.3f}".format(multinomialNB.score(x_train,y_train)))
print("accuracy on the test subset:{:.3f}".format(multinomialNB.score(x_test,y_test)))
print("bernoulliNB:")
print("accuracy on the training subset:{:.3f}".format(bernoulliNB.score(x_train,y_train)))
print("accuracy on the test subset:{:.3f}".format(bernoulliNB.score(x_test,y_test)))
print("gaussianNB:")
print("accuracy on the training subset:{:.3f}".format(gaussianNB.score(x_train,y_train)))
print("accuracy on the test subset:{:.3f}".format(gaussianNB.score(x_test,y_test)))
'''
MultinomialNB:
accuracy on the training subset:0.894
accuracy on the test subset:0.902
bernoulliNB:
accuracy on the training subset:0.627
accuracy on the test subset:0.629
gaussianNB:
accuracy on the training subset:0.951
accuracy on the test subset:0.937
''' | true |
92d7a6472e931edc858825d8e9d035a8f6ac359a | Python | xCE3/ChiCodesPython | /Insertion Sort/Insertion Sort.py | UTF-8 | 249 | 3.84375 | 4 | [] | no_license | def insertion_sort(arr):
for i in range(1, len(arr)):
for j in range(i-1, -1, -1):
if arr[j] > arr[j+1]:
arr[j], arr[j+1] = arr[j+1], arr[j]
return arr
print(insertion_sort([2,8,5,3,10,9,-2,21,9])) | true |
33e0bd7fbe10ed00503859d2d219b55ee78f69f7 | Python | nirupaangelin/codepython | /odd interval.py | UTF-8 | 96 | 3.8125 | 4 | [] | no_license | a1=int(input())
a2=int(input())
for num in range(a1,a2):
if(num%2!=0):
print(num)
| true |
e2ff18cfca0bd4c8c962deb485f7d49512282570 | Python | RanchoCooper/the-python3-standard-library-by-example | /chapter10/subprocess_pipes.py | UTF-8 | 521 | 2.546875 | 3 | [] | no_license | #!/usr/bin/env python
# encoding: utf-8
import subprocess
cat = subprocess.Popen(
['cat', 'index.rst'],
stdout=subprocess.PIPE,
)
grep = subprocess.Popen(
['grep', '.. literal include::'],
stdin=cat.stdout,
stdout=subprocess.PIPE,
)
cut = subprocess.Popen(
['cut', '-f', '3', '-d:'],
stdin=grep.stdout,
stdout=subprocess.PIPE,
)
end_of_pipe = cut.stdout
if __name__ == '__main__':
print('included files:')
for line in end_of_pipe:
print(line.decode('utf-8').strip())
| true |
4cb1c2b113204b49392adac7fcd143b14c53d3ef | Python | albolea/learn_streamlit | /stock_price.py | UTF-8 | 437 | 3.15625 | 3 | [] | no_license | import streamlit as st
import yfinance as yf
# Write on the App in markdown stile
st.write("""
# Stock Price App
+ Google's closing price and trading volume graph.
""")
tickerSymbol = 'GOOGL'
tickerData = yf.Ticker(tickerSymbol)
tickerDf = tickerData.history(period='1d',
start='2005-01-01',
end='2021-01-26')
st.line_chart(tickerDf.Close)
st.line_chart(tickerDf.Volume)
| true |
44aeb0f61dbbf8e8895645d9e420f4624bfaa6bf | Python | Lisek8/FDAGMPG | /autonomous-gameplay-artificial-inteligence/snippets/main.py | UTF-8 | 1,919 | 2.78125 | 3 | [] | no_license | import time
from subprocess import Popen, PIPE
import io
import cv2
import base64
import numpy as np
from PIL import Image
import json
process = Popen("node ../frame-grabber-and-input/dist/main.js", stdin=PIPE, stdout=PIPE)
gameTime = -1
lastTimeSwap = 0
nextGame = False
def waitForNewGameToBePrepared():
global gameTime, lastTimeSwap
while True:
frameGrabberInfo = process.stdout.readline().strip()
if (frameGrabberInfo != b'' and frameGrabberInfo.decode() == 'FRAMEGRABBER:READY'):
process.stdin.write(("p").encode())
break
time.sleep(1)
gameTime = -1
lastTimeSwap = 0
waitForNewGameToBePrepared()
while True:
iterationStart = time.time()
if (nextGame == True):
process.stdin.write(("NEXTGAME\n").encode())
process.stdin.flush()
nextGame = False
gameTime = -1
lastTimeSwap = 0
waitForNewGameToBePrepared()
continue
# Pass real input here
process.stdin.write(("d\n").encode())
process.stdin.flush()
frameGrabberInfo = process.stdout.readline().strip()
if (frameGrabberInfo != b''):
dataToBePassedToAI = frameGrabberInfo.decode()
gameInfoJson = json.loads(dataToBePassedToAI)
if (gameTime != gameInfoJson['time']):
lastTimeSwap = time.perf_counter()
gameTime = gameInfoJson['time']
else:
if (gameTime == -1):
lastTimeSwap = time.perf_counter()
elif ((time.perf_counter() - lastTimeSwap) > 0.5):
nextGame = True
gameImage = base64.b64decode((gameInfoJson['image']))
processedImage = cv2.cvtColor(np.array(Image.open(io.BytesIO(gameImage))), cv2.COLOR_BGR2RGB)
cv2.imshow('Game preview', processedImage)
cv2.waitKey(1)
iterationEnd = time.time()
print((iterationEnd * 1000) - (iterationStart * 1000)) | true |
89ef4ce1d15a58fbe7326212e30290b02aa108d4 | Python | david-mcneil/stringtemplate | /python/release/PyStringTemplate-3.1b1/stringtemplate3/language/CatIterator.py | UTF-8 | 1,127 | 3.859375 | 4 | [
"BSD-3-Clause"
] | permissive |
from StringIO import StringIO
## Given a list of lists, return the combined elements one by one.
#
class CatList(object):
def __init__(self, lists):
## List of lists to cat together
#
self._lists = lists
def __len__(self):
k = 0
for list_ in self._lists:
k += len(list_)
return k
def lists(self):
for list_ in self._lists:
for item in list_:
yield item
def __iter__(self):
for list_ in self._lists:
for item in list_:
yield item
## The result of asking for the string of a CatList is the list of
# items and so this is just the cat'd list of both items. This
# is destructive in that the iterator cursors have moved to the end
# after printing.
def __str__(self):
buf = StringIO()
#buf.write('[')
k = len(self)
for item in self.lists():
buf.write(str(item))
k -= 1
#if k:
# buf.write(', ')
#buf.write(']')
return buf.getvalue()
__repr__ = __str__
| true |
2bc142191695a4f217894cf199153f6b5056f9dc | Python | hasnainrabby/Digital-Image-Processing | /imagerotation.py | UTF-8 | 351 | 2.6875 | 3 | [] | no_license | import cv2
import numpy as np
img=cv2.imread('C:\\Users\Aspire\Desktop\pic.jpg')
height,width=img.shape[:2]
rotation_matrix=cv2.getRotationMatrix2D((width/2,height/2),90,1)
rotated_image=cv2.warpAffine(img,rotation_matrix,(width,height))
cv2.imshow('Original image',img)
cv2.imshow('Rotated image',rotated_image)
cv2.waitKey(0)
cv2.destroyAllWindows() | true |
0b6e7b8e287a1f1800f82646546c2e1991dc6032 | Python | Vreya02/bugscanner | /bugscanner/direct_scanner.py | UTF-8 | 2,396 | 2.515625 | 3 | [
"MIT"
] | permissive | from .bug_scanner import BugScanner
class DirectScanner(BugScanner):
method_list = []
host_list = []
port_list = []
def log_info(self, **kwargs):
for x in ['color', 'status_code', 'server']:
kwargs[x] = kwargs.get(x, '')
W2 = self.logger.special_chars['W2']
G1 = self.logger.special_chars['G1']
P1 = self.logger.special_chars['P1']
CC = self.logger.special_chars['CC']
if not kwargs['status_code']:
kwargs['color'] = W2
kwargs['CC'] = CC
location = kwargs.get('location')
if location:
if location.startswith(f"https://{kwargs['host']}"):
kwargs['status_code'] = f"{P1}{kwargs['status_code']:<4}"
else:
kwargs['host'] += f"{CC} -> {G1}{location}{CC}"
messages = []
for x in ['{method:<6}', '{status_code:<4}', '{server:<22}', '{port:<4}', '{host}']:
messages.append(f'{{color}}{x}{{CC}}')
super().log(' '.join(messages).format(**kwargs))
def get_task_list(self):
for method in self.filter_list(self.method_list):
for host in self.filter_list(self.host_list):
for port in self.filter_list(self.port_list):
yield {
'method': method.upper(),
'host': host,
'port': port,
}
def init(self):
super().init()
self.log_info(method='Method', status_code='Code', server='Server', port='Port', host='Host')
self.log_info(method='------', status_code='----', server='------', port='----', host='----')
def task(self, payload):
method = payload['method']
host = payload['host']
port = payload['port']
response = self.request(method, self.get_url(host, port), retry=1, timeout=3, allow_redirects=False)
G1 = self.logger.special_chars['G1']
G2 = self.logger.special_chars['G2']
data = {
'method': method,
'host': host,
'port': port,
}
if response is not None:
color = ''
status_code = response.status_code
server = response.headers.get('server', '')
location = response.headers.get('location', '')
if server in ['AkamaiGHost']:
if status_code == 400:
color = G1
else:
color = G2
elif server in ['Varnish']:
if status_code == 500:
color = G1
elif server in ['AkamaiNetStorage']:
color = G2
data_success = {
'color': color,
'status_code': status_code,
'server': server,
'location': location,
}
data = self.dict_merge(data, data_success)
self.task_success(data)
self.log_info(**data)
| true |
43c316af4c730a07869520172930a6306d3dbf8e | Python | HatashitaKoya/pimouse_sim_act | /scripts/left_hand.py | UTF-8 | 6,226 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
import numpy as np
from geometry_msgs.msg import Twist
from raspimouse_ros_2.msg import *
from std_srvs.srv import Trigger, TriggerResponse, Empty
class LeftHand():
def __init__(self):
# 光センサのサブスクライバー
rospy.Subscriber('/lightsensors', LightSensorValues, self.sensor_callback)
# モータに周波数を入力するためのパブリッシャー
self.motor_raw_pub = rospy.Publisher('/motor_raw', MotorFreqs, queue_size = 10)
# Raspberry Pi Mouseの光センサのメッセージオブジェクト
self.data = LightSensorValues()
# 実行時にシミュレータを初期状態にする
self.modeSimReset = True
self.ls_count = 0
self.rs_count = 0
def sensor_callback(self, msg):
# クラス変数のメッセージオブジェクトに受信したデータをセット
self.data = msg
def motor_cont(self, left_hz, right_hz):
if not rospy.is_shutdown():
d = MotorFreqs()
# 両輪の周波数を設定
d.left_hz = left_hz
d.right_hz = right_hz
# パブリッシュ
self.motor_raw_pub.publish(d)
def turn_move(self, m):
if m == "LEFT": self.motor_cont(-200, 200)
if m == "RIGHT": self.motor_cont(200, -200)
def moveFeedback(self, offset, speed, k, mode):
# left_sideが2000より大きい時は、右回り旋回
if self.data.left_side > 1500:
self.turn_move("RIGHT")
return
# right_sideが2000より大きい時は、右回り旋回
if self.data.right_side > 1500:
self.turn_move("LEFT")
return
# 壁沿いを追従走行するための計算
# (基準値 - 現在のleft_side) * ゲイン
if mode == "LEFT":
diff = (offset - self.data.left_side) * k
# 計算した値をモータに出力
self.motor_cont(speed - diff, speed + diff)
if mode == "RIGHT":
diff = (offset - self.data.right_side) * k
# 計算した値をモータに出力
self.motor_cont(speed + diff, speed - diff)
def stopMove(self):
# 終了時にモータを止める
self.motor_cont(0, 0)
def checker(self):
# 壁無し判定
if self.data.left_side < 100:
print("--RS_COUNT:", self.data.left_side)
self.rs_count += 1
if self.data.right_side < 150:
print("--LS_COUNT:", self.data.right_side)
self.ls_count += 1
def motion(self):
# 左側に壁がある確率が高くて、目の前に壁がなさそうなとき
if self.data.left_forward < 300 or self.data.right_forward < 300:
print("Move: STRAIGHT")
for time in range(12):
self.checker()
if self.data.left_side > self.data.right_side:
self.moveFeedback(500, 500, 0.2, "LEFT")
else:
self.moveFeedback(500, 500, 0.2, "RIGHT")
self.rate.sleep()
self.stopMove()
# 目の前に壁がなくて、右側に壁がない場合
if self.data.left_forward < 300 or self.data.right_forward < 300:
if self.rs_count > 0:
print("Move: MID LEFT TURN")
for time in range(10):
self.turn_move("LEFT")
self.rate.sleep()
self.stopMove()
# 直進した後に、目の前に壁があったとき
elif self.data.left_forward > 300 and self.data.right_forward > 300:
# 左右の壁がない場合
if self.ls_count > 0 and self.rs_count > 0:
print("Move: LEFT TURN_2")
for time in range(10):
self.turn_move("LEFT")
self.rate.sleep()
self.stopMove()
# 右の壁がない場合
elif self.ls_count > 0:
print("Move: RIGHT TURN")
for time in range(10):
self.turn_move("RIGHT")
self.rate.sleep()
self.stopMove()
# 左の壁がない場合
elif self.rs_count > 0:
print("Move: LEFT TURN")
for time in range(10):
self.turn_move("LEFT")
self.rate.sleep()
self.stopMove()
self.ls_count = 0
self.rs_count = 0
return
# 左右関係なく、目の前に壁があるとき
if self.data.left_forward > 2000 and self.data.right_forward > 2000:
print("Move: DEAD END")
for time in range(20):
self.turn_move("LEFT")
self.rate.sleep()
self.stopMove()
self.ls_count = 0
self.rs_count = 0
return
if self.data.left_side > self.data.right_side:
self.moveFeedback(500, 500, 0.2, "LEFT")
else:
self.moveFeedback(500, 500, 0.2, "RIGHT")
def init(self):
if self.modeSimReset:
rospy.wait_for_service('/gazebo/reset_world')
try: rospy.ServiceProxy('/gazebo/reset_world', Empty).call()
except rospy.ServiceException, e: print "Service call failed: %s"%e
rospy.wait_for_service('/motor_on')
try: rospy.ServiceProxy('/motor_on', Trigger).call()
except rospy.ServiceException, e: print "Service call failed: %s"%e
def run(self):
self.rate = rospy.Rate(10)
self.init()
rospy.on_shutdown(self.stopMove)
while self.data.left_side == 0 and self.data.right_side == 0:
self.rate.sleep()
while not rospy.is_shutdown():
self.motion()
self.rate.sleep()
if __name__ == '__main__':
rospy.init_node('LeftHand')
LeftHand().run()
| true |
ab39b0b20388c79b81fa51e3acaf16fc480b8a27 | Python | sirrah23/LikedTweets | /likedtweets.py | UTF-8 | 2,809 | 3.78125 | 4 | [
"MIT"
] | permissive | """
This script will obtain a random tweet that you recently like and display it
to you on the console.
"""
import json
import random
import subprocess
import tweepy
import click
"""
Read the contents of a JSON file; the file is assumed to contain OAuth
credentials.
"""
def read_credentials(filename):
with open(filename, "r") as f:
creds = json.loads(f.read())
return creds
"""
Given a JSON object with OAuth credential information this function will
return an object that has access to the Twitter API.
"""
def get_api_connection(creds):
auth = tweepy.OAuthHandler(creds['consumer_key'], creds['consumer_secret'])
auth.set_access_token(creds['access_token'], creds['access_token_secret'])
api = tweepy.API(auth)
return api
"""
Obtains the url associated with a tweet, if that information is available.
"""
def get_tweet_url(tweet):
try:
return "https://www.twitter.com/statuses/{}".format(tweet.id)
except:
return None
"""
Given a connection to the Twitter API this function will obtain the text and
url associated with a random tweet that you recently favorited.
"""
def get_random_favorite_tweet_details(api):
favorite_tweets = api.favorites()
tweet = random.choice(favorite_tweets)
return (tweet.text, get_tweet_url(tweet))
"""
Format the text and url associated with a tweet so it can be printed to the
screen.
"""
def format_output(tweet_text, tweet_url, preface=""):
msg = preface
msg = msg + """
Check this out!:
=====
Tweet
=====
{}
=====
URL
=====
{}
""".format(tweet_text, tweet_url)
return msg
"""
Open a given url in your default browser...assumed to be on a Linux machine.
Returns True if success else False.
"""
def open_url(url):
if not url:
return False
res = subprocess.call(["xdg-open", url])
return res == 0 # Success
"""
Print a tweet to the screen or open it in your internet browser.
"""
def print_or_browse(tweet_text, tweet_url, browser):
if not browser:
print(format_output(tweet_text, tweet_url))
else:
browser_success = open_url(tweet_url)
if not browser_success:
print(format_output(tweet_text, tweet_url, preface="Unable to open in browser...\n"))
"""
The command line interface that will get a random favorite tweet and either
print it to the console or open it in your default browser.
"""
@click.command()
@click.option("--browser/--no-browser", default=False, help="Open the tweet in your default browser")
def likedtweets(browser):
creds = read_credentials("cred.json")
api = get_api_connection(creds)
tweet_text, tweet_url = get_random_favorite_tweet_details(api)
print_or_browse(tweet_text, tweet_url, browser)
if __name__ == "__main__":
likedtweets()
| true |
e82f6affed914529abbe29e4614da9d213ad7110 | Python | luoguanghao/bioinfo_algo_script | /HMM/Viterbi learning.py | UTF-8 | 4,065 | 3.125 | 3 | [] | no_license | '''
HMM Parameter Learning Problem:
Estimate the parameters of an HMM explaining an emitted string.
Input: A string x = x1 . . . xn emitted by an HMM with unknown transition and emission probabilities.
Output: A transition matrix Transition and an emission matrix Emission that maximize Pr(x, p) over all possible transition and emission matrices and over all hidden paths p.
@ Lo Kwongho
'''
from os.path import dirname
import numpy as np
import math
def ViterbiAlgorithm(t_matrix,e_matrix,alphabet,states,text):
graph = np.zeros(shape=(len(states),len(text)),dtype=float)
for k in range(len(states)):
#print(e_matrix[k][alphabet[text[0]]])
graph[k][0] = math.log(e_matrix[k][alphabet[text[0]]])
for i in range(1,len(text)):
for k in range(len(states)):
graph[k][i] = max([graph[l][i-1]+math.log(t_matrix[l][k]*e_matrix[k][alphabet[text[i]]]) for l in range(len(states))])
lastValue = [i[-1] for i in graph]
track = lastValue.index(max(lastValue))
output = tra_states[track]
for i in range(len(text)-2,-1,-1):
for l in range(len(states)):
if graph[track][i+1]==graph[l][i]+math.log(t_matrix[l][track]*e_matrix[track][alphabet[text[i+1]]]):
track = l
break
output += tra_states[track]
return output[::-1]
def HMM_ParameterEstimation(pseudocount,alphabet,states,HiddenPath,text):
transition = np.zeros(shape=(len(states),len(states)))
for i in range(len(HiddenPath)-1):
transition[states[HiddenPath[i]]][states[HiddenPath[i+1]]] += 1
for i in range(len(states)):
csum = sum(transition[i])
if csum==0:
#transition[i]=[1/len(states)]*len(states)
continue
for j in range(len(states)):
transition[i][j] /= csum
for i in range(len(states)):
csum = 0
for j in range(len(states)):
transition[i][j] += pseudocount
csum += transition[i][j]
for j in range(len(states)):
transition[i][j] /= csum
emission = np.zeros(shape=(len(states),len(alphabet)))
for i in range(len(HiddenPath)):
emission[states[HiddenPath[i]]][alphabet[text[i]]] += 1
for i in range(len(states)):
csum = sum(emission[i])
if csum==0:
#emission[i]=[1/len(alphabet)]*len(alphabet)
continue
for j in range(len(alphabet)):
emission[i][j] /= csum
for i in range(len(states)):
csum = 0
for j in range(len(alphabet)):
emission[i][j] += pseudocount
csum += emission[i][j]
for j in range(len(alphabet)):
emission[i][j] /= csum
return [transition,emission]
def Viterbi_Learning(init_t,init_e,iterTime,text,alphabet,states):
pseudocount = 0.0001
transition = init_t
emission = init_e
for i in range(iterTime):
HiddenPath = ViterbiAlgorithm(transition,emission,alphabet,states, text)
[transition,emission] = HMM_ParameterEstimation(pseudocount,alphabet,states,HiddenPath,text)
#HiddenPath = ViterbiAlgorithm(transition,emission,alphabet,states, text)
return [transition,emission]
if __name__ == '__main__':
dataset = open(dirname(__file__)+'dataset.txt').read().strip().split('\n--------\n')
iterTime = int(dataset[0])
text = dataset[1]
alphSet = dataset[2].split()
alphabet = dict([[alphSet[i],i] for i in range(len(alphSet))])
stateSet = dataset[3].split()
states = dict([[stateSet[i],i] for i in range(len(stateSet))])
tra_states = dict([[i,stateSet[i]] for i in range(len(stateSet))])
init_t = [list(map(float,line.split()[1:])) for line in dataset[4].split('\n')[1:]]
init_e = [list(map(float,line.split()[1:])) for line in dataset[5].split('\n')[1:]]
[transition,emission] = Viterbi_Learning(init_t,init_e,iterTime,text,alphabet,states)
# print
print('\t'+'\t'.join(stateSet))
for i in range(len(states)):
print(stateSet[i],end='')
for j in range(len(states)):
if transition[i][j]<0.001:
print('\t%.3g'%0,end='')
continue
print('\t%.3g'%transition[i][j],end='')
print('')
print('--------')
print('\t'+'\t'.join(alphSet))
for i in range(len(states)):
print(stateSet[i],end='')
for j in range(len(alphSet)):
if emission[i][j]<0.001:
print('\t%.3g'%0,end='')
continue
print('\t%.3g'%emission[i][j],end='')
print('')
| true |
3a763ae14ad0aea233fae5014bd289d7cf56f55a | Python | motorsep/blenderpython | /scripts/addons_extern/blender26-meshio/pymeshio/pmd/__init__.py | UTF-8 | 22,122 | 2.59375 | 3 | [] | no_license | # coding: utf-8
"""
========================
MikuMikuDance PMD format
========================
file format
~~~~~~~~~~~
* http://blog.goo.ne.jp/torisu_tetosuki/e/209ad341d3ece2b1b4df24abf619d6e4
specs
~~~~~
* textencoding: bytes(cp932)
* coordinate: left handed y-up(DirectX)
* uv origin:
* face: only triangle
* backculling:
"""
import os
import sys
import struct
import warnings
from .. import common
class Vertex(common.Diff):
"""
==========
pmd vertex
==========
two bone weighted vertex with normal and uv.
format
~~~~~~
* http://blog.goo.ne.jp/torisu_tetosuki/e/5a1b16e2f61067838dfc66d010389707
:IVariables:
pos
Vector3
normal
Vector3
uv
Vector2
bone0
bone index
bone1
bone index
weight0
bone0 influence. min: 0, max: 100
edge_flag
int flag. 0: edge on, 1: edge off
"""
__slots__=['pos', 'normal', 'uv', 'bone0', 'bone1', 'weight0', 'edge_flag']
def __init__(self, pos, normal, uv,
bone0, bone1, weight0, edge_flag):
self.pos=pos
self.normal=normal
self.uv=uv
self.bone0=bone0
self.bone1=bone1
self.weight0=weight0
self.edge_flag=edge_flag
def __str__(self):
return "<%s %s %s, (%d, %d, %d)>" % (
str(self.pos),
str(self.normal),
str(self.uv),
self.bone0, self.bone1, self.weight0)
def __eq__(self, rhs):
return (
self.pos==rhs.pos
and self.normal==rhs.normal
and self.uv==rhs.uv
and self.bone0==rhs.bone0
and self.bone1==rhs.bone1
and self.weight0==rhs.weight0
and self.edge_flag==rhs.edge_flag
)
def __getitem__(self, key):
if key==0:
return self.pos.x
elif key==1:
return self.pos.y
elif key==2:
return self.pos.z
else:
assert(False)
class Material(common.Diff):
"""
============
pmd material
============
format
~~~~~~
* http://blog.goo.ne.jp/torisu_tetosuki/e/ea0bb1b1d4c6ad98a93edbfe359dac32
:IVariables:
diffuse_color
RGB
alpha
float
specular_factor
float
specular_color
RGB
ambient_color
RGB
toon_index
int
edge_flag
int
vertex_count
indices length
texture_file
texture file path
"""
__slots__=[
'diffuse_color', 'alpha',
'specular_factor', 'specular_color', 'ambient_color',
'toon_index', 'edge_flag',
'vertex_count', 'texture_file',
]
def __init__(self, diffuse_color, alpha,
specular_factor, specular_color, ambient_color,
toon_index, edge_flag, vertex_count, texture_file):
self.diffuse_color=diffuse_color
self.alpha=alpha
self.specular_factor=specular_factor
self.specular_color=specular_color
self.ambient_color=ambient_color
self.toon_index=toon_index
self.edge_flag=edge_flag
self.vertex_count=vertex_count
self.texture_file=texture_file
def __str__(self):
return "<Material [%s, %f] [%s %f] [%s] %d %d '%s' %d>" % (
str(self.diffuse_color), self.alpha,
str(self.specular_color), self.specular_factor,
str(self.ambient_color), self.toon_index,
self.edge_flag, self.texture_file, self.vertex_count
)
def __eq__(self, rhs):
return (
self.diffuse_color==rhs.diffuse_color
and self.alpha==rhs.alpha
and self.specular_factor==rhs.specular_factor
and self.specular_color==rhs.specular_color
and self.ambient_color==rhs.ambient_color
and self.toon_index==rhs.toon_index
and self.edge_flag==rhs.edge_flag
and self.vertex_count==rhs.vertex_count
and self.texture_file==rhs.texture_file
)
def diff(self, rhs):
self._diff(rhs, "diffuse_color")
self._diff(rhs, "alpha")
self._diff(rhs, "specular_color")
self._diff(rhs, "specular_factor")
self._diff(rhs, "ambient_color")
self._diff(rhs, "edge_flag")
# todo
#self._diff(rhs, "toon_index")
self._diff(rhs, "texture_file")
self._diff(rhs, "vertex_count")
class Bone(common.Diff):
"""
==========
pmd bone
==========
format
~~~~~~
* http://blog.goo.ne.jp/torisu_tetosuki/e/638463f52d0ad6ca1c46fd315a9b17d0
:IVariables:
name
bone name
english_name
bone english_name
index
boen index(append for internal use)
type
bone type
ik
ik(append for internal use)
pos
bone head position
ik_index
ik target bone index
parent_index
parent bone index
tail_index
tail bone index
parent
parent bone(append for internal use)
tail
tail bone(append for internal use)
children
children bone(append for internal use)
"""
# kinds
ROTATE = 0
ROTATE_MOVE = 1
IK = 2
IK_ROTATE_INFL = 4
ROTATE_INFL = 5
IK_TARGET = 6
# typo
UNVISIBLE = 7
INVISIBLE = 7
# since v4.0
ROLLING=8 # ?
TWEAK=9
__slots__=['name', 'index', 'type', 'parent', 'ik', 'pos',
'children', 'english_name', 'ik_index',
'parent_index', 'tail_index', 'tail',
]
def __init__(self, name=b'bone', type=0):
self.name=name
self.index=0
self.type=type
self.parent_index=0xFFFF
self.tail_index=0
self.tail=common.Vector3(0, 0, 0)
self.parent=None
self.ik_index=0xFFFF
self.pos=common.Vector3(0, 0, 0)
self.children=[]
self.english_name=b''
def __str__(self):
return '<Bone:%s %d %d>' % (self.name, self.type, self.ik_index)
def __eq__(self, rhs):
return (
self.name==rhs.name
and self.index==rhs.index
and self.type==rhs.type
and self.parent_index==rhs.parent_index
and self.tail_index==rhs.tail_index
and self.tail==rhs.tail
and self.ik_index==rhs.ik_index
and self.pos==rhs.pos
and self.children==rhs.children
and self.english_name==rhs.english_name
)
def diff(self, rhs):
self._diff(rhs, "name")
if (
self.english_name.endswith(b"_t")
or rhs.english_name.endswith(b"_t")):
pass
elif (
self.english_name.startswith(b"arm twist")
or rhs.english_name.startswith(b"arm twist")):
pass
else:
self._diff(rhs, "english_name")
self._diff(rhs, "index")
self._diff(rhs, "type")
self._diff(rhs, "parent_index")
self._diff(rhs, "tail_index")
self._diff(rhs, "ik_index")
self._diff(rhs, "pos")
def hasParent(self):
return self.parent_index!=0xFFFF
def hasChild(self):
return self.tail_index!=0 and self.tail_index!=0xFFFF
def display(self, indent=None):
indent=indent or []
if len(indent)>0:
prefix=''
for i, is_end in enumerate(indent):
if i==len(indent)-1:
break
else:
prefix+=' ' if is_end else ' |'
uni='%s +%s(%s)' % (prefix, unicode(self), self.english_name)
print(uni.encode(ENCODING))
else:
uni='%s(%s)' % (unicode(self), self.english_name)
print(uni.encode(ENCODING))
child_count=len(self.children)
for i in range(child_count):
child=self.children[i]
if i<child_count-1:
child.display(indent+[False])
else:
# last
child.display(indent+[True])
# 0
class Bone_Rotate(Bone):
__slots__=[]
def __init__(self, name):
super(Bone_Rotate, self).__init__(name, 0)
def __str__(self):
return '<ROTATE %s>' % (self.name)
# 1
class Bone_RotateMove(Bone):
__slots__=[]
def __init__(self, name):
super(Bone_RotateMove, self).__init__(name, 1)
def __str__(self):
return '<ROTATE_MOVE %s>' % (self.name)
# 2
class Bone_IK(Bone):
__slots__=[]
def __init__(self, name):
super(Bone_IK, self).__init__(name, 2)
def __str__(self):
return '<IK %s>' % (self.name)
# 4
class Bone_IKRotateInfl(Bone):
__slots__=[]
def __init__(self, name):
super(Bone_IKRotateInfl, self).__init__(name, 4)
def __str__(self):
return '<IK_ROTATE_INFL %s>' % (self.name)
# 5
class Bone_RotateInfl(Bone):
__slots__=[]
def __init__(self, name):
super(Bone_RotateInfl, self).__init__(name, 5)
def __str__(self):
return '<ROTATE_INFL %s>' % (self.name)
# 6
class Bone_IKTarget(Bone):
__slots__=[]
def __init__(self, name):
super(Bone_IKTarget, self).__init__(name, 6)
def __str__(self):
return '<IK_TARGET %s>' % (self.name)
# 7
class Bone_Unvisible(Bone):
__slots__=[]
def __init__(self, name):
super(Bone_Unvisible, self).__init__(name, 7)
def __str__(self):
return '<UNVISIBLE %s>' % (self.name)
# 8
class Bone_Rolling(Bone):
__slots__=[]
def __init__(self, name):
super(Bone_Rolling, self).__init__(name, 8)
def __str__(self):
return '<ROLLING %s>' % (self.name)
# 9
class Bone_Tweak(Bone):
__slots__=[]
def __init__(self, name):
super(Bone_Tweak, self).__init__(name, 9)
def __str__(self):
return '<TWEAK %s>' % (self.name)
def createBone(name, type):
if type==0:
return Bone_Rotate(name)
elif type==1:
return Bone_RotateMove(name)
elif type==2:
return Bone_IK(name)
elif type==3:
raise Exception("no used bone type: 3(%s)" % name)
elif type==4:
return Bone_IKRotateInfl(name)
elif type==5:
return Bone_RotateInfl(name)
elif type==6:
return Bone_IKTarget(name)
elif type==7:
return Bone_Unvisible(name)
elif type==8:
return Bone_Rolling(name)
elif type==9:
return Bone_Tweak(name)
else:
raise Exception("unknown bone type: %d(%s)" % (type, name.decode('cp932')))
class IK(common.Diff):
__slots__=['index', 'target', 'iterations', 'weight', 'length', 'children']
def __init__(self, index=0, target=0):
self.index=index
self.target=target
self.iterations=None
self.weight=None
self.children=[]
def __str__(self):
return "<IK index: %d, target: %d, iterations: %d, weight: %f, children: %s(%d)>" %(self.index, self.target, self.iterations, self.weight, '-'.join([str(i) for i in self.children]), len(self.children))
def __eq__(self, rhs):
return (
self.index==rhs.index
and self.target==rhs.target
and self.iterations==rhs.iterations
and self.weight==rhs.weight
and self.children==rhs.children
)
class Morph(common.Diff):
__slots__=['name', 'type', 'indices', 'pos_list', 'english_name',
'vertex_count']
def __init__(self, name):
self.name=name
self.type=None
self.indices=[]
self.pos_list=[]
self.english_name=b''
self.vertex_count=0
def append(self, index, x, y, z):
self.indices.append(index)
self.pos_list.append(common.Vector3(x, y, z))
def __str__(self):
return '<Skin name: "%s", type: %d, vertex: %d>' % (
self.name, self.type, len(self.indices))
def __eq__(self, rhs):
return (
self.name==rhs.name
and self.type==rhs.type
and self.indices==rhs.indices
and self.pos_list==rhs.pos_list
and self.english_name==rhs.english_name
and self.vertex_count==rhs.vertex_count
)
def diff(self, rhs):
self._diff(rhs, "name")
self._diff(rhs, "english_name")
self._diff(rhs, "type")
#self._diff_array(rhs, "indices")
#self._diff_array(rhs, "pos_list")
class BoneGroup(common.Diff):
__slots__=['name', 'english_name']
def __init__(self, name=b'group', english_name=b'center'):
self.name=name
self.english_name=english_name
def __eq__(self, rhs):
return self.name==rhs.name and self.english_name==rhs.english_name
def diff(self, rhs):
self._diff(rhs, "name")
self._diff(rhs, "english_name")
SHAPE_SPHERE=0
SHAPE_BOX=1
SHAPE_CAPSULE=2
RIGIDBODY_KINEMATICS=0
RIGIDBODY_PHYSICS=1
RIGIDBODY_PHYSICS_WITH_BONE=2
class RigidBody(common.Diff):
__slots__=['name',
'bone_index',
'collision_group',
'no_collision_group',
'shape_type',
'shape_size',
'shape_position',
'shape_rotation',
'mass',
'linear_damping',
'angular_damping',
'restitution',
'friction',
'mode'
]
def __init__(self, name,
bone_index,
collision_group,
no_collision_group,
shape_type,
shape_size,
shape_position,
shape_rotation,
mass,
linear_damping,
angular_damping,
restitution,
friction,
mode
):
self.name=name
self.bone_index=bone_index
self.collision_group=collision_group
self.no_collision_group=no_collision_group
self.shape_type=shape_type
self.shape_size=shape_size
self.shape_position=shape_position
self.shape_rotation=shape_rotation
self.mass=mass
self.linear_damping=linear_damping
self.angular_damping=angular_damping
self.restitution=restitution
self.friction=friction
self.mode=mode
def __eq__(self, rhs):
return (
self.name==rhs.name
and self.bone_index==rhs.bone_index
and self.collision_group==rhs.collision_group
and self.no_collision_group==rhs.no_collision_group
and self.shape_type==rhs.shape_type
and self.shape_size==rhs.shape_size
and self.shape_position==rhs.shape_position
and self.shape_rotation==rhs.shape_rotation
and self.mass==rhs.mass
and self.linear_damping==rhs.linear_damping
and self.angular_damping==rhs.angular_damping
and self.restitution==rhs.restitution
and self.friction==rhs.friction
and self.mode==rhs.mode
)
def diff(self, rhs):
self._diff(rhs, 'name')
self._diff(rhs, 'bone_index')
self._diff(rhs, 'collision_group')
self._diff(rhs, 'no_collision_group')
self._diff(rhs, 'shape_type')
if self.shape_type==SHAPE_SPHERE:
pass
elif self.shape_type==SHAPE_CAPSULE:
pass
elif self.shape_type==SHAPE_BOX:
self._diff(rhs, 'shape_size')
self._diff(rhs, 'shape_position')
self._diff(rhs, 'shape_rotation')
self._diff(rhs, 'mass')
self._diff(rhs, 'linear_damping')
self._diff(rhs, 'angular_damping')
self._diff(rhs, 'restitution')
self._diff(rhs, 'friction')
self._diff(rhs, 'mode')
class Joint(common.Diff):
__slots__=[ 'name', 'rigidbody_index_a', 'rigidbody_index_b',
'position', 'rotation',
'translation_limit_max', 'translation_limit_min',
'rotation_limit_max', 'rotation_limit_min',
'spring_constant_translation', 'spring_constant_rotation',
]
def __init__(self, name,
rigidbody_index_a, rigidbody_index_b,
position, rotation,
translation_limit_max, translation_limit_min,
rotation_limit_max, rotation_limit_min,
spring_constant_translation, spring_constant_rotation
):
self.name=name
self.rigidbody_index_a=rigidbody_index_a
self.rigidbody_index_b=rigidbody_index_b
self.position=position
self.rotation=rotation
self.translation_limit_max=translation_limit_max
self.translation_limit_min=translation_limit_min
self.rotation_limit_max=rotation_limit_max
self.rotation_limit_min=rotation_limit_min
self.spring_constant_translation=spring_constant_translation
self.spring_constant_rotation=spring_constant_rotation
def __eq__(self, rhs):
return (
self.name==rhs.name
and self.rigidbody_index_a==rhs.rigidbody_index_a
and self.rigidbody_index_b==rhs.rigidbody_index_b
and self.position==rhs.position
and self.rotation==rhs.rotation
and self.translation_limit_max==rhs.translation_limit_max
and self.translation_limit_min==rhs.translation_limit_min
and self.rotation_limit_max==rhs.rotation_limit_max
and self.rotation_limit_min==rhs.rotation_limit_min
and self.spring_constant_translation==rhs.spring_constant_translation
and self.spring_constant_rotation==rhs.spring_constant_rotation
)
def diff(self, rhs):
self._diff(rhs, 'name')
self._diff(rhs, 'rigidbody_index_a')
self._diff(rhs, 'rigidbody_index_b')
self._diff(rhs, 'position')
self._diff(rhs, 'rotation')
self._diff(rhs, 'translation_limit_min')
self._diff(rhs, 'translation_limit_max')
self._diff(rhs, 'rotation_limit_min')
self._diff(rhs, 'rotation_limit_max')
self._diff(rhs, 'spring_constant_translation')
self._diff(rhs, 'spring_constant_rotation')
class Model(common.Diff):
"""pmd loader class.
Attributes:
io: internal use.
end: internal use.
pos: internal user.
version: pmd version number
_name: internal
"""
__slots__=[
'path',
'version', 'name', 'comment',
'english_name', 'english_comment',
'vertices', 'indices', 'materials', 'bones',
'ik_list', 'morphs',
'morph_indices', 'bone_group_list', 'bone_display_list',
'toon_textures',
'rigidbodies', 'joints',
'no_parent_bones',
]
def __init__(self, version=1.0):
self.path=b''
self.version=version
self.name=b''
self.comment=b''
self.english_name=b''
self.english_comment=b''
self.vertices=[]
self.indices=[]
self.materials=[]
self.bones=[]
self.ik_list=[]
self.morphs=[]
self.morph_indices=[]
self.bone_group_list=[]
self.bone_display_list=[]
# extend
self.toon_textures=[b'']*10
self.rigidbodies=[]
self.joints=[]
# innner use
self.no_parent_bones=[]
def each_vertex(self): return self.vertices
def getUV(self, i): return self.vertices[i].uv
def __str__(self):
return '<pmd-%g, "%s" vertex: %d, face: %d, material: %d, bone: %d ik: %d, skin: %d>' % (
self.version, self.name, len(self.vertices), len(self.indices),
len(self.materials), len(self.bones), len(self.ik_list), len(self.morphs))
def __eq__(self, rhs):
return (
self.name==rhs.name
and self.comment==rhs.comment
and self.english_name==rhs.english_name
and self.english_comment==rhs.english_comment
and self.vertices==rhs.vertices
and self.indices==rhs.indices
and self.materials==rhs.materials
and self.bones==rhs.bones
and self.ik_list==rhs.ik_list
and self.morphs==rhs.morphs
and self.morph_indices==rhs.morph_indices
and self.bone_group_list==rhs.bone_group_list
and self.bone_display_list==rhs.bone_display_list
and self.toon_textures==rhs.toon_textures
and self.rigidbodies==rhs.rigidbodies
and self.joints==rhs.joints
)
def diff(self, rhs):
self._diff(rhs, "name")
self._diff(rhs, "english_name")
#self._diff(rhs, "comment")
#self._diff(rhs, "english_comment")
#self._diff_array(rhs, "vertices")
#self._diff_array(rhs, "indices")
self._diff_array(rhs, "materials")
self._diff_array(rhs, "bones")
self._diff_array(rhs, "morphs")
self._diff_array(rhs, "morph_indices")
self._diff_array(rhs, "bone_group_list")
for i, (l, r) in enumerate(zip(
sorted(self.bone_display_list, key=lambda e: e[0]),
sorted(rhs.bone_display_list, key=lambda e: e[0]))):
if l!=r:
raise common.DifferenceException("{0}: {1}-{2}".format(i, l, r))
self._diff_array(rhs, "toon_textures")
self._diff_array(rhs, "rigidbodies")
self._diff_array(rhs, "joints")
| true |
78732b1e6d0cb63dad8bc25970265d2baa93284c | Python | alexandraback/datacollection | /solutions_5652388522229760_0/Python/EnigmaTwist/codejamSheep.py | UTF-8 | 813 | 2.9375 | 3 | [] | no_license |
import sys
with open(sys.argv[1]) as f:
flines = [x.strip() for x in f.readlines()]
if len(flines) != int(flines[0])+1:
print("Error! First line isn't equal to number of other lines?")
print(len(flines))
print(int(flines[0])+1)
sys.exit(1)
inputNums = [int(x) for x in flines[1:]]
outf = open(sys.argv[2],"w")
for (e,n) in enumerate(inputNums):
if n==0:
outf.write("Case #{0}: {1}\n".format(str(e+1), "INSOMNIA"))
continue
seenDigits = set()
lastNum = n
seenDigits |= set(str(lastNum))
templist = [n]
while len(seenDigits)<10:
lastNum += n
seenDigits |= set(str(lastNum))
templist.append(int(lastNum))
#outf.write("Case #{0}: {1} {2}\n".format(e+1, lastNum, templist))
outf.write("Case #{0}: {1}\n".format(e+1, lastNum))
outf.close()
sys.exit(0)
| true |
7b42f540fe23acdc7d2a09d85c32eb8b2b0d97bd | Python | SensenLiu123/Lintcode | /512.py | UTF-8 | 1,282 | 3.203125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: sensenliu
"""
class Solution:
"""
@param s: a string, encoded message
@return: an integer, the number of ways decoding
"""
def numDecodings(self, s):
# input is a string of digits, can be emoty or null
# output is an int,
# decode: 12 -> ab or l
# 101 -> i a
# 134 -> m d
# decode rules: each time we take 1 or 2 digits off the string
# number of decoding based on prev result + prev-2 result
# constraints are 1-digit cannot be 0
# constraints 2, 2-digits cannot be 0a, or larger than 26!
# if we at string(i-1), the result is dp(i)!
# now start from initial case:
#
if len(s) == 0 or not s:
return 0
n = len(s)
dp = [0] * (n + 1)
dp[0] = 1
# if s[0] != '0':
# dp[1] += dp[0]
for i in range(1, n + 1) :
if s[i - 1] != '0':
dp[i] += dp[i - 1]
if i >= 2 and 10 <= int (s[i - 2: i]) <= 26:
dp[i] += dp[i - 2]
return dp[n]
| true |
36b16ddf3996c5ca1ed5daf06790e68aea936421 | Python | guilhermebsa/data-engineering-databricks | /producer/reviews/libs/read_files.py | UTF-8 | 1,031 | 3.28125 | 3 | [] | no_license | """
filename: read_files.py
name: read_files
description:
this is the function responsible to read files and perform some enhancements on the data.
use pandas and numpy to read data from a csv file and format to a dictionary
"""
# import libraries
import pandas as pd
from configs import config
# pandas config
pd.set_option('display.max_rows', 100000)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
class CSV:
def __init__(self):
self.ds_reviews = config.ds_reviews
def csv_reader(self, gen_dt_rows):
# reading files
get_data = pd.read_csv(self.ds_reviews)
# fixing column names
get_data.columns = get_data.columns.str.strip().str.lower().str.replace(' ', '_').str.replace('(','').str.replace(')', '')
# select column ordering
df = get_data[['review_id', 'business_id', 'user_id', 'stars', 'useful', 'date']].head(gen_dt_rows)
# convert to dictionary
df_dict = df.to_dict('records')
return df_dict
| true |
f0aa41d211f3014b0302747fd0798feecc684914 | Python | kushcfc/NetMiko | /cmd_output_to_file.py | UTF-8 | 1,943 | 2.59375 | 3 | [
"Unlicense"
] | permissive | from netmiko import Netmiko
from netmiko.ssh_exception import NetMikoAuthenticationException, NetMikoTimeoutException
from getpass import getpass
from pprint import pprint
import signal
import os
from queue import Queue
import threading
ip_addrs_file = open('ListOfIPs.txt')
ip_addrs = ip_addrs_file.read().splitlines()
num_threads = 8
enclosure_queue = Queue()
print_lock = threading.Lock()
command = "sh ver"
def deviceconnector(i,q):
while True:
print("{}: Waiting for IP address...".format(i))
ip = q.get()
print("{}: Acquired IP: {}".format(i,ip))
device_dict = {
'host': ip,
'username': 'user',
'password': 'pass',
'device_type': 'cisco_ios',
'secret' : 'secret'
}
try:
net_connect = Netmiko(**device_dict)
except NetMikoTimeoutException:
with print_lock:
print("\n{}: ERROR **** Connection to {} timed-out.\n".format(i,ip))
q.task_done()
continue
except NetMikoAuthenticationException:
with print_lock:
print("\n{}: ERROR **** Authenticaftion failed for {}. Stopping script. \n".format(i,ip))
q.task_done()
output = net_connect.send_command(command)
with print_lock:
print("{}: Printing ...".format(i))
pprint(output)
file = open(ip +'_config.txt', 'w')
file.write(output)
file.close()
net_connect.disconnect
q.task_done()
def main():
for i in range(num_threads):
thread = threading.Thread(target=deviceconnector, args=(i,enclosure_queue,))
thread.setDaemon(True)
thread.start()
for ip_addr in ip_addrs:
enclosure_queue.put(ip_addr)
enclosure_queue.join()
print("**** End ****")
if __name__ == '__main__':
main()
| true |
c8d827cbc6e24bb8992f4d4f1a903f5c9f974350 | Python | kalnaasan/university | /Programmieren 1/EPR/Übungen/Übung_01/ALnaasan_Kaddour_0016285_1.1.b.py | UTF-8 | 200 | 3.359375 | 3 | [
"MIT"
] | permissive | __author__ = "0016285: Kaddour Alnaasan"
#Aufgabe 1.1
#B
a = "T" + "e" + "x" + "t"
b = 2
c = 'Text*2'
#print(a,"* 2 = ", c,"= 2 *",a)
print(c,"=", b*a,"=", str(2)+"*"+a)
#Test:
# Text*2 = TextText = 2*Text | true |
0b37823101ce00168e1922a9d0b74ec6efc5796e | Python | SanjeevKumarPrajapati/ERP | /Circular.py | UTF-8 | 3,989 | 2.6875 | 3 | [] | no_license | from tkinter import *
import tkinter
from PIL import ImageTk, Image
import os
import pyfiglet
from tkinter import messagebox
a=Tk()
a.title("Circular")
a.iconbitmap("Quantum-logo.ico")#for icon
a.minsize(1370,700)
a.maxsize(1370,700)
def home():
os.system("python Homepage.py")
def ana():
os.system("python Analysis1.py")
os.system("python Analysis2.py")
os.system("python Analysis3.py")
def circular():
logo=pyfiglet.figlet_format("Circular")
print(logo)
print("\n\t<------- Circular Details --------->")
print("=======================================================================================================================================")
print(" Subject Date Form Date To Circular By")
print("=======================================================================================================================================")
print("\n")
print("1.) Transport Notice for 1st Year student Only 22/12/2020 31/12/2020 RAVINDER GIRI")
print("2.) Pin click will be conducting drive for post Graduates and under Graduates 22/12/2020 31/12/2020 RAVINDER GIRI")
print("3.) Diploma 1st year New Time Table Effective From 23.12.2020 22/12/2020 31/12/2020 RAVINDER GIRI")
print("4.) Pharmacy 1st year New Time Table Efffective From23.12.2020 22/12/2020 31/12/2020 RAVINDER GIRI")
print("5.) Nutrition & Dietetics 1st year New Time Table Effective From 23.12.2020 22/12/2020 31/12/2020 RAVINDER GIRI")
print("6.) MCA 1st year New Time Table Effective Form23.12.2020 22/12/2020 31/12/2020 RAVINDER GIRI")
print("7.) BMRIT 1st year New Time Table Effective Form23.12.2020 22/12/2020 31/12/2020 RAVINDER GIRI")
print("8.) BJMC 1st year New Time Table Effective Form23.12.2020 22/12/2020 31/12/2020 RAVINDER GIRI")
print("9.) BCA 1st year New Time Table Effective Form23.12.2020 22/12/2020 31/12/2020 RAVINDER GIRI")
print("10.) B.TECH 1st year New Time Table Effective Form23.12.2020 22/12/2020 31/12/2020 RAVINDER GIRI")
print("11.) B.Sc PCM 1st year New Time Table Effective Form23.12.2020 22/12/2020 31/12/2020 RAVINDER GIRI")
print("=======================================================================================================================================")
image1 = Image.open('qulogo.jpg').resize((350,60))
test = ImageTk.PhotoImage(image1)
label1 = tkinter.Label(image=test)
label1.image = test
label1.place(x=0, y=0)
image1 = Image.open('analysis.jpg').resize((40,40))
test = ImageTk.PhotoImage(image1)
label1 = tkinter.Label(image=test)
label1.image = test
label1.place(x=900, y=10)
btn=Button(a,text="Analysis",bg="white",font=("Arial",8,"bold"),fg="#007FFF",command=ana).place(x=891,y=56)
image1 = Image.open('home.jpg').resize((50,40))
test = ImageTk.PhotoImage(image1)
label1 = tkinter.Label(image=test)
label1.image = test
label1.place(x=980, y=10)
btn=Button(a,text="Home",bg="white",font=("Arial",8,"bold"),fg="#007FFF",command=home).place(x=986,y=56)
lb=Label(a,text="SANJEEV KUMAR PRAJAPATI",font=("Arial",12,"bold")).place(x=1050,y=20)
image1 = Image.open('myphoto.jpg').resize((40,40))
test = ImageTk.PhotoImage(image1)
label1 = tkinter.Label(image=test)
label1.image = test
label1.place(x=1300, y=10)
btn=Button(a,text="Circular",bd=7,bg="white",font=("Arial",15,"bold"),fg="#007FFF",command=circular).place(x=90,y=80)
lb=Label(a,text="Cyborg-ERP : Please Select Menu From Menu Bar.",font=("Arial",14,"bold"),fg="#007FFF").place(x=380,y=100)
a.mainloop()
| true |
bce1af94397ca2982e785ee4d894340bb11db462 | Python | johanvergeer/python-design-patterns | /python_design_patterns/solid/interface_segregation_principle_after.py | UTF-8 | 506 | 3.203125 | 3 | [
"MIT"
] | permissive | from abc import ABC, abstractmethod
class ICanFly(ABC):
@abstractmethod
def fly(self) -> None:
...
class ICanEat(ABC):
@abstractmethod
def eat(self) -> None:
...
class ICanBreathFire(ABC):
@abstractmethod
def fly(self) -> None:
...
class Dragon(ICanFly, ICanEat, ICanBreathFire):
def fly(self) -> None:
print("Flying")
def eat(self) -> None:
print("Eating")
def breath_fire(self) -> None:
print("Breating fire")
| true |
cd951b579f076f1965632d8fe97614403d0a4678 | Python | JuleeKeenanRivers/Keenanrivers_julee | /PyLesson_05/PyLab_05Excercise_02.py | UTF-8 | 1,051 | 3.359375 | 3 | [] | no_license | def formatr(item, price):
print("{:.<15} {:8.2f}".format(item, price))
item1 = input("please enter item1: ")
price1 = float(input("please enter the price: "))
item2 = input("please enter item2:")
price2 = float(input("please enter the price: "))
item3 = input("please enter item3:")
price3 = float(input("please enter the price: "))
item4 = input("please enter item4:")
price4 = float(input("please enter the price: "))
print("Subtotal: ..... ", (price1 + price2 + price3 + price4))
subtotal = (price1+price2+price3+price4)
print("<<<<<<<Receipt>>>>>>")
formatr(item1, price1)
formatr(item2, price2)
formatr(item3, price3)
formatr(item4, price4)
discount = 0;
if subtotal>2000:
discount = 0.15 * subtotal
if subtotal<2000:
discount = 0
#print("Discount: .....", discount)
formatr("Discount",discount)
#print("tax:.....", (subtotal*.0872))
tax = (subtotal*.0872)
formatr("tax",tax)
total = (subtotal - discount + tax)
#print("total:.....",total)
formatr("total",total)
print("*Thank you for your support*")
| true |
3588cb513472044dbe633b0f8173b1e29b0266c2 | Python | noagarcia/context-art-classification | /model_mtl.py | UTF-8 | 1,076 | 2.796875 | 3 | [] | no_license | import torch.nn as nn
from torchvision import models
class MTL(nn.Module):
# Inputs an image and ouputs the predictions for each classification task
def __init__(self, num_class):
super(MTL, self).__init__()
# Load pre-trained visual model
resnet = models.resnet50(pretrained=True)
self.resnet = nn.Sequential(*list(resnet.children())[:-1])
# Classifiers
self.class_type = nn.Sequential(nn.Linear(2048, num_class[0]))
self.class_school = nn.Sequential(nn.Linear(2048, num_class[1]))
self.class_tf = nn.Sequential(nn.Linear(2048, num_class[2]))
self.class_author = nn.Sequential(nn.Linear(2048, num_class[3]))
def forward(self, img):
visual_emb = self.resnet(img)
visual_emb = visual_emb.view(visual_emb.size(0), -1)
out_type = self.class_type(visual_emb)
out_school = self.class_school(visual_emb)
out_time = self.class_tf(visual_emb)
out_author = self.class_author(visual_emb)
return [out_type, out_school, out_time, out_author] | true |
d45223e2d42c8a5a845e1dd4495f927a659bc03e | Python | KeithWM/conditioned | /first.py | UTF-8 | 2,820 | 3.015625 | 3 | [] | no_license | import scipy
import scipy.fftpack
from matplotlib import pyplot as plt
import seaborn as sns
sns.set_style("whitegrid")
sns.set_context("poster", font_scale=2)
plt.close('all')
N = 1000
pi = scipy.pi # pi, 1/2 of the ratio between the circumference and radius of a circle
sigma = 1 # noise
T = 1 # end time
tau = T/float(N) # real time step
upsilon = 1.e-4 # algorithmic time step
ts = scipy.linspace(0, T, N+1)
f = scipy.zeros((N+1,))
f_hat = scipy.zeros((N,))
xL = -0#1. # left BC
xR = +0#1. # right BC
"""
with these BCs, the zeroth eigenfunction is the particular solution
xL*cos(t/T(2*n+1)*pi) + xR*cos((T-t)/T(2*n+1)*pi)
with lambda_0 = (pi/(2*sigma*T))**2
"""
eigenvalues = scipy.arange(N)**2*pi**2/(sigma*T)**2
eigenvalues[0] = (pi/(2*sigma*T))**2
x_particular = xL * scipy.cos(ts / (2 * T) * pi) + xR * scipy.cos((T - ts) / (2 * T) * pi)
qs = scipy.ones((N,)) # qs determine the cylindrical Brownian motion
"""
Now follow some convenient preliminary computations
"""
coeff1 = scipy.exp(-eigenvalues*upsilon)
coeff2 = (1-coeff1)/eigenvalues
coeff3 = scipy.sqrt(qs/(2*eigenvalues)*(1 - scipy.exp(-2*eigenvalues*upsilon)))*N # factor N to account for fftpack.dst
def transform(x, x_hat):
x_hat[1:] = scipy.fftpack.dst(x[1:-1] - x_particular[1:-1], type=1)
def inverseTransform(x, x_hat):
x[:] = x_particular
x[1:-1]+= scipy.fftpack.idst(x_hat[1:], type=1)/(2*N) # seems to be wrong by a factor 2 :-S
def V(x):
return (x-1)**2*(x+1)**2/(1+x**2)
def g(x): # = -V'(x)
return x*(8/(1+x**2)**2 - 2)
def dg(x):
# return 8/(1+x**2)**2 - 2 - 32*x**2/(1+x**2)**3
return 8/(1+x**2)**2*(1 - 4*x**2/(1+x**2)) - 2
def ddg(x):
return -96*x/(1+x**2)**3 + 192*x**3/(1+x**2)**4
def f_function(x): # the nonlinear function f, acting on 'physical space'
# return .5*x
return -1/sigma**2*g(x)*dg(x) - .5*ddg(x)
def fN(x, x_hat, f, f_hat):
inverseTransform(x, x_hat)
f = f_function(x)
transform(f, f_hat)
def stepExpEuler(x, x_hat, f, f_hat):
fN(x, x_hat, f, f_hat)
x_hat = coeff1*x_hat + coeff2*f_hat + coeff3*scipy.random.normal(size=(N,))
x_hat[0] = 0
return x_hat
x0 = scipy.zeros((N+1,))
x0_hat = scipy.zeros((N,))
# x0 = scipy.linspace(2, 0, N+1)
x0 = scipy.linspace(xL, xR, N+1)
# # x0+= scipy.sin(ts/T*pi)
# x0[0] = xL
# x0[-1] = xR
transform(x0, x0_hat)
# x0_hat = N/scipy.arange(N, dtype=float)
# x0_hat[0] = 0
inverseTransform(x0, x0_hat)
x = x0.copy()
x_hat = x0_hat.copy()
# alg_times = scipy.arange(0, 10.01, 1.)
alg_times = scipy.insert(scipy.power(10, scipy.arange(-3, 0, 1)), 0, 0)
n = 0
for s in scipy.arange(0, alg_times[-1]+.5*upsilon, upsilon):
if s >= alg_times[n]:
plt.plot(ts, x, '.')
n += 1
print s
x_hat = stepExpEuler(x, x_hat, f, f_hat)
inverseTransform(x, x_hat)
plt.show() | true |
3a4c61a77de20b307b23e776c40b93c9d8fb7613 | Python | sukhvir786/Python-Day-8 | /SET_4.py | UTF-8 | 185 | 3.484375 | 3 | [] | no_license | """
remove and discard method in sets
"""
A = set()
A.add(7)
A.add(3)
A.add(1)
A.add(9)
print("A:",A)
#A.remove(11)
A.discard(11)
print("A:",A)
A.discard(1)
print("A:",A) | true |
8631cc7e996038efb20c7d46663c2e081a0d491f | Python | lorenzo-bioinfo/ms_data_analysis | /scripts/12_cyt_groups_analysis.py | UTF-8 | 4,216 | 2.796875 | 3 | [
"Apache-2.0"
] | permissive | import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
from scipy.cluster import hierarchy
#getting cytokynes group for each cluster
colmap = ['darkgrey', 'darkgreen', 'navy']
clusters = []
for i in range(0, 7):
with open('./data/cluster_groups/cyt_groups{}.txt'.format(i), 'r') as f:
cluster = []
group = []
for line in f:
clean_line = line.strip()
if clean_line == '+':
cluster.append(group)
group = []
else:
group.append(clean_line)
cluster.pop(0)
clusters.append(cluster)
cyt_list = 'IL1B,IL2,IL4,IL5,IL6,IL7,CXCL8,IL10,IL12B,IL13,IL17A,CSF3,CSF2,IFNG,CCL2,CCL4,TNF,IL1RN,IL9,IL15,CCL11,FGF2,CXCL10,PDGFB,CCL5,VEGFA,CCL3'.split(',')
cyt_ord = ['IL1B', 'IL2', 'IL4', 'IL6', 'IL7', 'IL12B', 'IL17A', 'CSF2', 'IL15', 'FGF2', 'PDGFB', 'VEGFA', 'CCL3', 'CSF3', 'IL5', 'IL1RN', 'CXCL8', 'IL10', 'IL13', 'CCL11', 'CXCL10', 'CCL5', 'IL9', 'TNF', 'CCL4', 'CCL2', 'IFNG']
#cyt_ord = ['IL1B', 'IL2', 'IL4', 'IL6', 'IL7', 'IL5', 'IL1RN', 'CXCL8', 'IL10', 'IL13', 'IL12B', 'IL17A', 'CSF2', 'IL15', 'FGF2', 'PDGFB', 'VEGFA', 'CCL3', 'CSF3', 'CCL11', 'CXCL10', 'CCL5', 'IL9', 'TNF', 'CCL4', 'CCL2', 'IFNG']
#creating a 27x27 matrix which counts how many times each cytokine has clustered
#with all the other ones
zero_matrix = []
line = [0] * 27
for i in range(27):
zero_matrix.append(line)
matrix = pd.DataFrame(zero_matrix, index = cyt_list, columns = cyt_list)
for cluster in clusters:
for group in cluster:
for el in group:
for cyt in cyt_list:
if cyt in group:
matrix[el][cyt] += 1
#exporting matrix to file
matrix.to_csv('./data/cluster_groups/occ_matrix.tsv', sep = '\t')
#representing matrix in heatmap
sns.heatmap(matrix, cmap = 'mako')
#plt.savefig('./data/cluster_groups/heatmaps/occ_hm.png', dpi = 300)
plt.clf()
#generating a new matrix with cytokines in different order
#and using it to create a new heatmap
matrix_ord = matrix.reindex(index = cyt_ord, columns = cyt_ord)
sns.heatmap(matrix_ord, cmap = 'mako')
#plt.savefig('./data/cluster_groups/heatmaps/occ_hm_ord.png', dpi = 300)
plt.clf()
#Using clustering to take a look a the groups it forms
cluster_col = hierarchy.linkage(matrix.T, method="ward", metric="euclidean")
cluster_row = hierarchy.linkage(matrix, method="ward", metric="euclidean")
clusterfig = sns.clustermap(matrix, row_linkage = cluster_row, col_linkage = cluster_col)
index_col = clusterfig.dendrogram_col.reordered_ind
index_row = clusterfig.dendrogram_row.reordered_ind
plt.title('Cyt Clustering')
plt.savefig('./data/cluster_groups/heatmaps/occ_cluster.png', dpi = 300)
plt.clf()
#creating a 27x27 matrix which contains the mean distance of each
#cytokine from the other ones, in the ordered list generated by
#each cluster
zero_matrix = []
for i in range(27):
zero_matrix.append(line)
matrix = pd.DataFrame(zero_matrix, index = cyt_list, columns = cyt_list)
#generating ordered lists from groups
ordered_lists = []
for cluster in clusters:
ordered_list = []
for group in cluster:
ordered_list.extend(group)
ordered_lists.append(ordered_list)
for ol in ordered_lists:
for cyt_a in cyt_list:
for cyt_b in cyt_list:
dist = abs(ol.index(cyt_b) - ol.index(cyt_a))
matrix[cyt_a][cyt_b] += dist
#dividing all cumulative distances by number of clusters
#to get mean distance
def dividi(x):
return(x/7)
matrix_ok = matrix.applymap(dividi)
matrix_ok.to_csv('./data/cluster_groups/dist_matrix.tsv', sep = '\t')
sns.heatmap(matrix_ok, cmap = 'mako')
#plt.savefig('./data/cluster_groups/heatmaps/dist_hm.png', dpi = 300)
plt.clf()
matrix_ord = matrix.reindex(index = cyt_ord, columns = cyt_ord)
sns.heatmap(matrix_ord, cmap = 'mako')
#plt.savefig('./data/cluster_groups/heatmaps/dist_hm_ord.png', dpi = 300)
plt.clf()
cluster_col = hierarchy.linkage(matrix_ok.T, method="ward", metric="euclidean")
cluster_row = hierarchy.linkage(matrix_ok, method="ward", metric="euclidean")
clusterfig = sns.clustermap(matrix_ok, row_linkage = cluster_row, col_linkage = cluster_col)
index_col = clusterfig.dendrogram_col.reordered_ind
index_row = clusterfig.dendrogram_row.reordered_ind
plt.title('Cyt Clustering')
plt.savefig('./data/cluster_groups/heatmaps/dist_cluster.png', dpi = 300)
plt.clf() | true |
5d3de4e78b5df35ea714c281d295be2e2a066f6f | Python | siddushan/proj_euler | /problem17.py | UTF-8 | 1,442 | 3.75 | 4 | [] | no_license | # problem 17
def number_to_word(number):
map_nums = {0: 'zero', 1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five',
6: 'six', 7: 'seven', 8: 'eight', 9: 'nine', 10: 'ten',
11: 'eleven', 12: 'twelve', 13: 'thirteen', 14: 'fourteen', 15: 'fifteen',
16: 'sixteen', 17: 'seventeen', 18: 'eighteen', 19: 'nineteen', 20: 'twenty',
30: 'thirty', 40: 'forty', 50: 'fifty', 60: 'sixty', 70: 'seventy', 80: 'eighty', 90: 'ninety'}
k = 1000
m = k * 1000
assert(0 <= number)
if number < 20:
return map_nums[number]
if number < 100:
if number % 10 == 0:
return map_nums[number]
else:
return map_nums[number // 10 * 10] + ' ' + map_nums[number % 10]
if number < k:
if number % 100 == 0:
return map_nums[number // 100] + ' hundred'
else:
return map_nums[number // 100] + ' hundred and ' + number_to_word(number % 100)
if number < m:
if number % k == 0:
return number_to_word(number // k) + ' thousand'
else:
return number_to_word(number // k) + ' thousand ' + number_to_word(number % k)
nums = list()
for i in range(1, 1001):
word = number_to_word(i)
nums.append(word)
all_nums = ''.join(nums) # puts all the numbers together
all_nums = ''.join(all_nums.split()) # removes all the white space
print len(all_nums)
| true |
83b019e39fb3e8699de205338352d85f01b008b7 | Python | abachi/codeforces | /bear-and-big-brother-600.py | UTF-8 | 172 | 3.828125 | 4 | [] | no_license |
# a*3 each 1 year
# b*2 each 1 year
args = input().split(' ')
a = int(args[0])
b = int(args[1])
years = 0
while a <= b:
a *= 3
b *= 2
years +=1
print(years)
| true |
22e8865654d182fcc30f7cac8e5d977ef429b4ca | Python | LeiG/imgScraper | /imageScraper/defTable.py | UTF-8 | 1,012 | 3.109375 | 3 | [] | no_license | """
Define SQLite database to store images.
"""
from sqlalchemy import create_engine
from sqlalchemy import Column, Date, Integer, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Image(Base):
'''
Image (object)
--------------
id: identifier (Integer)
brand: brand name (String)
code: product code (String)
sourceUrl: source url (String)
eventDate: date retreived (Date)
price: regular price (Integer)
salePrice: sale price (Integer)
ImagePath: local path (String)
'''
__tablename__ = "images"
id = Column(Integer, primary_key = True)
brand = Column(String)
category = Column(String)
code = Column(String)
sourceUrl = Column(String)
eventDate = Column(Date)
price = Column(Integer)
salePrice = Column(Integer)
imagePath = Column(String)
def create_db():
# create tables
engine = create_engine('sqlite:///images.db', echo=True)
Base.metadata.create_all(engine)
| true |
d337c34a9525edad9310df2720dd4486b9986dc8 | Python | emmano3h/bazel | /src/tools/xcode/swiftstdlibtoolwrapper/swift_stdlib_tool.py | UTF-8 | 2,312 | 2.625 | 3 | [
"Apache-2.0"
] | permissive | # pylint: disable=g-bad-file-header
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A tool to find Swift runtime libraries required by a binary.
This tool is modeled after Xcode's swift-stdlib-tool. Given a binary, it
scans its transitive dylib dependencies to figure out the full set of Swift
runtime libraries (usually named libswift*.dylib) required to run the binary.
The libraries are then copied into the output directory.
This tool is used by the Apple packaging rules to properly construct macOS, iOS,
watchOS and tvOS app bundles.
Usage:
swift-stdlib-tool.py BINARY_TO_SCAN PLATFORM_DIRECTORY OUTPUT_PATH
"""
import os
import shutil
import sys
from macholib.MachO import MachO
def dylib_full_path(platform_dir, relative_path):
"""Constructs an absolute path to a platform dylib.
Args:
platform_dir: A path to the platforms directory in the Swift toolchain.
relative_path: A path to a dylib relative to the platforms directory.
Returns:
A normalized, absolute path to a dylib.
"""
return os.path.abspath(os.path.join(platform_dir, relative_path))
def main():
binary_path = sys.argv[1]
platform_dir = sys.argv[2]
out_path = sys.argv[3]
# We want any dylib linked against which name starts with "libswift"
seen = set()
queue = [binary_path]
while queue:
path = queue.pop()
m = MachO(path)
for header in m.headers:
for _, _, other in header.walkRelocatables():
if other.startswith("@rpath/libswift"):
full_path = dylib_full_path(platform_dir, other.lstrip("@rpath/"))
if full_path not in seen:
queue.append(full_path)
seen.add(full_path)
for dylib in seen:
shutil.copy(dylib, out_path)
if __name__ == "__main__":
main()
| true |
764ebbca5a15eb3dfc57c544bfc460957e7b0ebf | Python | super1peng/Kaggle | /keras/classifier.py | UTF-8 | 1,464 | 3.03125 | 3 | [] | no_license | #coding:utf-8
import numpy as np
from keras.datasets import mnist # 导入手写字体数据集
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import RMSprop
np.random.seed(1337) # for reproducibility
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# data pre-processing
X_train = X_train.reshape(X_train.shape[0], -1) / 255. # normalize
X_test = X_test.reshape(X_test.shape[0], -1) / 255. # normalize
y_train = np_utils.to_categorical(y_train, num_classes=10)
y_test = np_utils.to_categorical(y_test, num_classes=10)
# 这里使用另外一种构建神经网络的方法
model = Sequential([
Dense(32, input_dim=784),
Activation('relu'),
Dense(10),
Activation('softmax'),
])
# 另外一种引入优化器的方式
rmsprop = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
# 添加指标得到更多你想看到的结果
model.compile(optimizer=rmsprop,
loss='categorical_crossentropy',
metrics=['accuracy'])
#----------------------下面开始训练模型---------------------------
print('Training ------------')
# Another way to train the model
model.fit(X_train, y_train, epochs=2, batch_size=32)
print('\nTesting ------------')
# Evaluate the model with the metrics we defined earlier
loss, accuracy = model.evaluate(X_test, y_test)
print('test loss: ', loss)
print('test accuracy: ', accuracy) | true |
f721249b074b375bc568ae9b00b532104d5b2b93 | Python | pasliwa/pyLassoNet | /design_matrix.py | UTF-8 | 7,638 | 2.734375 | 3 | [] | no_license | import itertools
import os
import pickle
import sys
from functools import partial
import cvxopt
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import solve_ivp
from scipy.optimize import minimize
from scipy.interpolate import UnivariateSpline
where_to_save = "where_to_save"
# from sklearn.model_selection import ParameterGrid
def theta(conc, S, educts):
"""
Calculates reaction rates according to formula from lecture
R_j(conc) = k_j * product_{i=1}^{n_s}(conc_i^{educt_{ij}})
:param conc: concentrations
:param S_matrix: stoichiometric matrix (product_{ij} - educt_{ij})
:param educt_matrix: educt matrix
:param kin_par: kinetic parameters
:return: reaction rates for given concentrations (under mass-action kinetics)
"""
educts_per_reaction = educts.T
# in every reaction j row, element at pos (column, species) i equals conc_i^educt_{ij}
# educt_{ij} - how many units of i are educts in reaction j
multiplicative_term_rows = np.power(conc, educts_per_reaction)
# concentrations to educt product ----> product_{i=1}^{n_s}(conc_i^{educt_{ij}}
concentration_product = np.prod(multiplicative_term_rows, axis=1)
# returns #species rows with #reactions elements -> influences on given variable coming from each of the reactions
return concentration_product * S
def read_model(input_file):
""" Read data in format specified above """
with open(input_file) as inp:
labels = inp.readline().strip().split(" ")
init_conc = np.array(list(map(float, inp.readline().strip().split(" "))))
stoich = []
for i in range(len(labels)):
stoich.append(list(map(float, inp.readline().strip().split(" "))))
S_matrix = np.array(stoich)
educt = []
for i in range(len(labels)):
educt.append(list(map(float, inp.readline().strip().split(" "))))
educt_matrix = np.array(educt)
kin_par = np.array(list(map(float, inp.readline().strip().split(" "))))
t_T, tau = list(map(float, inp.readline().strip().split(" ")))
return labels, init_conc, S_matrix, educt_matrix, kin_par, t_T, tau
def ij(max_sum):
"""
All i, j that sum up to values <= sum_max (i + j <= sum_max)
:param max_sum:
:return:
"""
for i in range(max_sum + 1):
for j in range(max_sum + 1):
if (i + j <= max_sum) and (i + j != 0):
yield (i, j)
def all_educts(num_species, max_sum_educts=2):
"""
Return all possible educt values
that fulfill 1) condition [sum(educts) <= 2]
:param num_species:
:param maximal_stoichiometry: usually assumed to be 2
:return:
"""
return np.array([i for i in list(itertools.product(range(0, max_sum_educts + 1), repeat=num_species)) if
sum(i) <= max_sum_educts])
def give_stoichs(species, abs_sum_stoich_max=1):
"""
Give stoichiometric matrices fulfilling stoichiometric condition
:param species:
:param abs_sum_stoich_max:
:return:
"""
for plus_ones in range(2 + 1):
for minus_ones in range(2 + 1):
for plus_two in range(1 + 1):
for minus_two in range(1 + 1):
S_condition = (abs(
(-2 * minus_two) + (2 * plus_two) + (-1 * minus_ones) + (1 * plus_ones)) <= abs_sum_stoich_max)
species_condition = ((species - (plus_ones + minus_ones + plus_two + minus_two)) >= 0)
not_all_zeros = ((plus_ones + minus_ones + plus_two + minus_two) != 0)
if S_condition and species_condition and not_all_zeros: # S condition
yield list(set(itertools.permutations(
np.hstack((np.ones(plus_ones), -np.ones(minus_ones), 2 * np.ones(plus_two),
-2 * np.ones(minus_two),
np.zeros(species - (plus_ones + minus_ones + plus_two + minus_two)))))))
def give_flat_stoichs(n, max_absolute_stiochiometric=1):
"""
All possible changes by at most 1 (element wise) for n elements
:param n: number of elements
:return: flat numpy array
"""
return np.array(
[item for sublist in list(give_stoichs(n)) for item in sublist])
def products_from_educt_stoichs(educt, stoichs, max_sum_products=3, max_product_val=2):
"""
Non-negative possible products
fulfilling condition 2) of stoichiometric changes [sum(products) <= 2]
:return:
"""
possibilities = []
for prod in educt + stoichs:
if np.all(prod >= 0) and (np.sum(prod) <= max_sum_products) and np.all(prod <= max_product_val):
possibilities.append(prod)
return np.array(possibilities)
def ones_(n):
"""
All possible changes by at most 1 (element wise) for n elements
:param n: number of elements
:return:
"""
for plus, minus in ij(n):
yield list(
set(itertools.permutations(np.hstack((np.ones(plus), -np.ones(minus), np.zeros(n - (plus + minus)))))))
def proposed_functions(num_species, max_sum_educts=2, max_sum_products=3, max_product_val=2,
max_absolute_stoichiometric=1):
educts = all_educts(num_species, max_sum_educts)
stoichs = give_flat_stoichs(num_species, max_absolute_stoichiometric)
E_list = []
S_list = []
for educt in educts:
products = products_from_educt_stoichs(educt, stoichs, max_sum_products, max_product_val)
num_poss_reac = len(products)
stoich = products - educt
S_list.append(stoich)
E_list.append(np.tile(educt, [num_poss_reac, 1]))
E_matrix = np.vstack(E_list).T
S_matrix = np.vstack(S_list).T
return E_matrix, S_matrix
def reactions_string(E_matrix, S_matrix):
"""
Generate readable string with reactions
:param E_matrix:
:param S_matrix:
:return:
"""
lines = ""
last_same = False
last = np.array([-500] * E_matrix.shape[0])
for reaction_index in range(E_matrix.shape[1]):
last_same = np.all(last == E_matrix[:, reaction_index])
last = E_matrix[:, reaction_index]
if not last_same:
lines += "\n\n---------" + str(E_matrix[:, reaction_index]) + "---------\n\n"
lines += "R" + str(reaction_index) + ": " + (
str(E_matrix[:, reaction_index]) + "\t --- " + str(S_matrix[:, reaction_index]) + "\t--->\t " +
str(E_matrix[:, reaction_index] + S_matrix[:, reaction_index])) + "\n"
return lines
def give_theta(X, max_sum_educts=2, max_sum_products=3, max_product_val=2,
max_absolute_stoichiometric=1):
if len(X.shape) > 1:
num_species = X.shape[1]
else:
num_species = 1
num_time_points = X.shape[0]
prop_E_matrix, prop_S_matrix = proposed_functions(num_species, max_sum_educts, max_sum_products, max_product_val,
max_absolute_stoichiometric)
thetas = []
for time_point_index in range(num_time_points): # 0 to T
thetas.append(theta(X[time_point_index], prop_S_matrix, prop_E_matrix))
theta_matrix = np.stack(thetas)
return theta_matrix
def give_theta_prop(X, prop_E_matrix, prop_S_matrix):
if len(X.shape) > 1:
num_species = X.shape[1]
else:
num_species = 1
num_time_points = X.shape[0]
thetas = []
for time_point_index in range(num_time_points): # 0 to T
thetas.append(theta(X[time_point_index], prop_S_matrix, prop_E_matrix))
theta_matrix = np.stack(thetas)
return theta_matrix
| true |
e793f8adbca066474915a59c8f13b14cf759752a | Python | dostup8/my-first-blog | /lab6.py | UTF-8 | 4,714 | 3.546875 | 4 | [] | no_license | class Arr:
"""
реализация ассоциативного массива
"""
# хранение элементов Item
a = []
# изменен массив или нет
c = False
def __init__(self, ea):
self.a.extend(ea)
self.c = True
def add(self, key, value):
self.a.append(Item(key, value))
self.c = True
def delete(self, key):
for i in self.a:
if i.key == key:
self.a.remove(i)
return str(key) + ' item deleted'
return 'not exists'
def getAll(self):
ba = []
self.mergeSort()
for i in self.a:
ba.append(i.value)
return ba
def getItem(self, key):
self.mergeSort()
for i in self.a:
if i.key == key:
return i.value
return 'not exists'
def merge(self, pa):
k = 0
na = []
# проходясь по массиву элементов
while k < len(pa):
ca = []
i = 0
j = 0
while True:
# если элемент k есть в массиве и i не больше количества подэлементов в элементе
# и
# если k+1 элемент элемент есть в массиве и j не превышает количество подэлементов
if len(pa) > k and len(pa[k]) > i and len(pa) > k + 1 and len(pa[k + 1]) > j:
# если ключ предыдущего элемента меньше последующего
if pa[k][i].key < pa[k + 1][j].key:
ca.append(pa[k][i])
i += 1
else:
ca.append(pa[k + 1][j])
j += 1
# если элемент k есть в массиве и i не больше количества подэлементов в элементе
# или
# если k+1 элемент элемент есть в массиве и j не превышает количество подэлементов
elif len(pa) > k and len(pa[k]) > i or len(pa) > k + 1 and len(pa[k + 1]) > j:
# если элемент k есть в массиве и i не превышает количества подэлементов в нем
if len(pa) > k and len(pa[k]) > i:
ca.append(pa[k][i])
i += 1
else:
ca.append(pa[k + 1][j])
j += 1
else:
break
# запоминаем в конечный массив
na.append(ca)
# смещаемся на два элемента вперед
k += 2
# если в массиве всего один элемент
if len(na) == 1:
return na
else:
# рекурсивно продолжаем сортировать массив
return self.merge(na)
def mergeSort(self):
"""
сортировка массива
:return:
"""
# если с последнего раза массив был изменен
if self.c:
na = []
# для последующей сортировки каждый элемент представляем массивом
for i in self.a:
ca = [i]
na.append(ca)
# отсортированный массив в первом элементе
self.a = self.merge(na)[0]
# массив теперь пока не изменен
self.c = False
class Item:
"""
отдельный элемент ассоциативного массива
"""
key = -1
value = ''
def __init__(self, key, val):
self.key = key
self.value = val
#====================
# создание массива
arr = Arr([Item(6, 'six'),
Item(8, 'eight'),
Item(3, 'three'),
Item(5, 'five'),
Item(1, 'one'),
Item(4, 'four'),
Item(2, 'two'),
Item(7, 'seven'),
Item(9, 'nine')])
# тестирование работы
print(arr.getAll())
arr.add(10, 'ten')
print(arr.getAll())
arr.add(0, 'zero')
print(arr.getAll())
print(arr.getItem(4))
print(arr.delete(4))
print(arr.getAll())
input('...')
| true |
2d2bd666fc450719580cbfe7ca3a2a70c4a32782 | Python | LarsFromMars/py4e.py | /soup.py | UTF-8 | 624 | 2.84375 | 3 | [] | no_license | import json
import ssl
import urllib.error
import urllib.parse
import urllib.request
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
# url = input('Enter Location: ')
url = "http://py4e-data.dr-chuck.net/comments_42.json"
uh = urllib.request.urlopen(url, context=ctx)
data = uh.read().decode()
print('Retrieving', url)
print('Retrieved', len(data), 'characters')
js = json.loads(data)
num = 0
total = 0
for _ in js:
num += int(js['comments'][0]['count'])
print(num)
total += 1
print(js)
print("Count: ", total)
print("Sum: ", num)
| true |
662e1416cff230b4bc7cdc41180b02b425a35c65 | Python | jonmak123/Kaggle-Stuff | /Audio Cats Dogs/utils.py | UTF-8 | 2,074 | 3.265625 | 3 | [] | no_license | import numpy as np # linear algebra
import pandas as pd # CSV file
import scipy.io.wavfile as sci_wav # Open wav files
import matplotlib.pyplot as plt
import numpy as np
import random
ROOT_DIR = 'input/cats_dogs/'
CSV_PATH = 'input/train_test_split.csv'
def read_wav_files(wav_files):
'''Returns a list of audio waves
Params:
wav_files: List of .wav paths
Returns:
List of audio signals
'''
if not isinstance(wav_files, list):
wav_files = [wav_files]
return [sci_wav.read(ROOT_DIR + f)[1] for f in wav_files]
def load_dataset(dataframe):
'''Load the dataset in a dictionary.
From the dataframe, it reads the [train_cat, train_dog, test_cat, test_dog]
columns and loads their corresponding arrays into the <dataset> dictionary
Params:
dataframe: a pandas dataframe with 4 columns [train_cat, train_dog,
test_cat, test_dog]. In each columns, many WAV names (eg. ['cat_1.wav',
'cat_2.wav']) which are going to be read and append into a list
Return:
dataset = {
'train_cat': [[0,2,3,6,1,4,8,...],[2,5,4,6,8,7,4,5,...],...]
'train_dog': [[sound 1],[sound 2],...]
'test_cat': [[sound 1],[sound 2],...]
'test_dog': [[sound 1],[sound 2],...]
}
'''
df = dataframe
dataset = {}
for k in ['train_cat', 'train_dog', 'valid_cat', 'valid_dog', 'test_cat', 'test_dog', 'full_cat', 'full_dog']:
v = list(df[k].dropna())
v = read_wav_files(v)
v = np.concatenate(v).astype('float32')
# Compute mean and variance
if k == 'train_cat':
dog_std = dog_mean = 0
cat_std, cat_mean = v.std(), v.mean()
elif k == 'train_dog':
dog_std, dog_mean = v.std(), v.mean()
# Mean and variance suppression
std, mean = (cat_std, cat_mean) if 'cat' in k else (dog_std, dog_mean)
v = (v - mean) / std
dataset[k] = v
print('loaded {} with {} sec of audio'.format(k, len(v) / 16000))
return dataset | true |
cb8bb080684e62e598248cf3859cefcba8178c09 | Python | jimhendy/AoC | /2015/20/b.py | UTF-8 | 479 | 3.09375 | 3 | [] | no_license | import numba
import numpy as np
@numba.njit
def n_presents(target):
pres_per_elf = 11
max_num = target // pres_per_elf
houses = np.zeros(max_num)
for elf in range(1, max_num):
# elf gives to their first house and in steps of that number
houses[elf : elf * 50 + 1 : elf] += pres_per_elf * elf
pass
return houses
def run(target):
target = int(target)
pres = n_presents(target)
return np.min(np.argwhere(pres > target))
| true |
20a1623441c0870a0fb08a012b3264fc927e0619 | Python | bakyeono/python.bakyeono.net-exercise | /excercise_8/excercise_8_12.py | UTF-8 | 927 | 4.71875 | 5 | [] | no_license | # 연습문제 8-12
import random
class Dice:
def __init__(self, sides):
"""인스턴스를 초기화한다."""
self._sides = sides
self._top = self.roll()
def top(self):
"""주사위의 나온 면을 반환한다."""
return self._top
def roll(self):
"""주사위를 굴리고 나온 면을 반환한다."""
self._top = random.randint(1, self._sides)
return self.top()
dice_4 = Dice(4) # 사면체 주사위 생성
print('사면체 주사위 테스트 ----')
print('처음 나온 면:', dice_4.top())
print('다시 굴리기:', dice_4.roll())
print('다시 굴리기:', dice_4.roll())
dice_100 = Dice(100) # 백면체 주사위 생성
print('백면체 주사위 테스트 ----')
print('처음 나온 면:', dice_100.top())
print('다시 굴리기:', dice_100.roll())
print('다시 굴리기:', dice_100.roll())
| true |
50943f41bb7d2de39d2069f5a51ef7cf37ac07aa | Python | kykymess/kykyA | /rna_mmc/rna_Readme.py | UTF-8 | 1,153 | 2.703125 | 3 | [] | no_license | #!/usr/bin/python3
# -*- coding: utf-8 -*-
__author__ = "mmc <marc-michel dot corsini at u-bordeaux dot fr>"
__date__ = "18.03.18"
__version__ = "$Id: rna_Readme.py,v 1.1 2018/03/20 15:09:02 mmc Exp $"
__usage__ = "Projet 2017-2018 Hotelling"
"""
Quelques exemples de réseaux
"""
print("""
rna contient
libFun.py # fonctions de transfert (f(x) & f'(x))
ffNet.py # ffNet minimaliste ou presque
ffNet_with_graph # ffNet avec facilité graphique
ffElman # Réseau récursif simple (SRN ou Elman du nom de son auteur)
ffRBoltzmann # Machine de Boltzmann Restreinte
test_digits # différents ffNet pour la reconnaissance de digits
Pour utiliser plus particulièrement un type de réseau
1/ un réseau feedforward sans fioriture
from rna import ffNet
ffNet.local_main() # pour une démonstration sur 4 exemples
2/ le même avec sortie graphique
from rna import ffNet_with_graph as ffNet2
ffNet2.local_main() # pour une démonstration sur 4 exemples
3/ un réseau de Elman
from rna import ffElman
ffElman.local_main() # une petite démo
4/ un réseau de Boltzmann Restreint
from rna import ffRBoltzmann as RB
RB.local_main() # une petite démo
""")
| true |
2e4fabad8f4a1f429c9e905d054d403b55b5a92f | Python | mitesh-gohel/python | /t7.py | UTF-8 | 613 | 4.09375 | 4 | [] | no_license | #set and frozen set in python
'''
set is unordered, mutable collection of unique elemets
forezen set is unordered, immutable collection of unique elements
'''
my_set_1 = set() #declare a set
print (my_set_1)
my_set_1.add(33)
my_set_1.add(33) #this will not add duplicate element 33 in set and not give error
print (my_set_1)
l1 = [1,2,4,1,4,22,9,20,10]
my_set_2 = set(l1) #this will make set of unique elements of list l1
print (my_set_2)
my_set_3 = set([10,10,35,80,10])
print (my_set_3)
fset_1 = frozenset(['a', 'f','a','g','m','m']) #this will make frozen set of unique elements of list
print (fset_1)
| true |
a1e58592d9d2b41e7247f68903373f62a4c5e49e | Python | shihao-zhang/buildsimhub_python_api | /buildsimplot/parametric_parallel_coordinate_plot.py | UTF-8 | 868 | 2.546875 | 3 | [
"MIT"
] | permissive | """
AUTHOR: Weili Xu
DATE: 6/28/2018
WHAT IS THE SCRIPT ABOUT?
This script demonstrates how to retrieve the parametric data and plot parallel coordinate chart
HOW TO USE THE SCRIPT?
Replace the project_api_key and model_api_key with your model
Replace investigate parameter - this is for the legend
PACKAGE REQUIRED:
Pandas, Plotly
"""
import BuildSimHubAPI as bsh_api
import BuildSimHubAPI.postprocess as pp
"""
INPUT
"""
# 1. set your folder key
project_key = 'f98aadb3-254f-428d-a321-82a6e4b9424c'
model_api_key = 'aa09eabf-693f-4437-88cc-a522a25fba01'
invetigate = 'LPD'
"""
SCRIPT
"""
bsh = bsh_api.BuildSimHubAPIClient()
results = bsh.parametric_results(project_key, model_api_key)
# Collect results
result_dict = results.net_site_eui()
result_unit = results.last_parameter_unit
"""
PLOT!
"""
# Plot
plot = pp.ParametricPlot(result_dict, result_unit)
print(plot.pandas_df())
plot.parallel_coordinate_plot(invetigate)
| true |
e42821c250e4a2fcbfa00b83bddb67d3e2daf4e9 | Python | sowmisathiya/python | /28pro.py | UTF-8 | 158 | 3.28125 | 3 | [] | no_license | arr=int(input())
brr=[int(s) for s in input().split()]
brr.sort()
s=0
xv=0
for i in range(len(brr)):
if brr[i]>=s:
xv+=1
s=s+brr[i]
print(xv)
| true |
cdb21de9faaef47fbb87febbbac659c064129138 | Python | lybroman/Icarus | /func/foo-api/function/handler.py | UTF-8 | 79 | 2.640625 | 3 | [
"MIT"
] | permissive | import time
def handle(st):
time.sleep(5)
print 'foo: {}'.format(st)
| true |
53a29ea147a4c45a2af9fe84cfa771efa6036bd2 | Python | rollyhuang/UE4ModuleTools | /UE4ModuleCreator.py | UTF-8 | 4,414 | 2.578125 | 3 | [
"MIT"
] | permissive | import sys
import os
import FileUtils
import re
import logging
import coloredlogs
class UE4ModuleCreator:
Dir = "" #这个是完整路径
ModuleDir = "" #这个是相对于Source的路径
Name = ""
Logger = None
def __init__(self, dir):
self.Dir = dir.replace("\\","/")
self.Name = os.path.basename(self.Dir)
self.ModuleDir = self.GetModuleDir()
self.Logger = logging.getLogger("UE4ModuleCreator<" + self.Name + ">")
self.Logger.setLevel(logging.DEBUG)
self.Logger.warning("-"*(100-len(self.Name)))
def GetModuleDir(self):
i = self.Dir.rfind("Source/")
if i == -1:
i = self.Dir.rfind("source/")
pass
if i == -1:
return self.Dir
pass
return self.Dir[i + 7:]
def CreateAll(self):
self.CreateBuildRuleFile()
self.CreateDirectories()
self.CreateModuleHeaderFile()
self.CreateModuleSourceFile()
def CreateBuildRuleFile(self):
self.Logger.info("CreateBuildRuleFile")
rule_file_path = self.Dir + "/" + self.Name + ".Build.cs"
if os.path.exists(rule_file_path):
self.Logger.warning("RuleFile Is Existed: %s", rule_file_path)
return
pass
f = open(r"Templates/{ModuleName}.Build.cs", "r", encoding="UTF-8")
text = f.read()
f.close()
text = text.replace(r"{ModuleName}", self.Name)
text = text.replace(r"{ModuleDirectory}", self.ModuleDir)
f = open(self.Dir + "/" + self.Name + ".Build.cs", "w", encoding="UTF-8")
f.write(text)
f.close()
def CreateDirectories(self):
self.Logger.info("CreateDirectories")
if not os.path.exists(self.Dir + "/Public"):
os.mkdir(self.Dir + "/Public")
pass
if not os.path.exists(self.Dir + "/Private"):
os.mkdir(self.Dir + "/Private")
pass
def CreateModuleHeaderFile(self):
self.Logger.info("CreateModuleHeaderFile")
filepath = self.Dir + "/Public/" + self.Name + "Module.h"
if os.path.exists(filepath):
self.Logger.warning("HeaderFile Is Existed: %s", filepath)
return
pass
f = open(r"Templates/{ModuleName}Module.h", "r", encoding="UTF-8")
text = f.read()
f.close()
text = text.replace(r"{ModuleName}", self.Name)
f = open(filepath, "w", encoding="UTF-8")
f.write(text)
f.close()
def CreateModuleSourceFile(self):
self.Logger.info("CreateModuleSourceFile")
filepath = self.Dir + "/Private/" + self.Name + "Module.cpp"
if os.path.exists(filepath):
self.Logger.warning("SourceFile Is Existed: %s", filepath)
return
pass
f = open(r"Templates/{ModuleName}Module.cpp", "r", encoding="UTF-8")
text = f.read()
f.close()
text = text.replace(r"{ModuleName}", self.Name)
f = open(filepath, "w", encoding="UTF-8")
f.write(text)
f.close()
def CreateModule(dir):
if not FileUtils.IsUE4ModuleDir(dir):
creator = UE4ModuleCreator(dir)
creator.CreateAll()
pass
def CreateModulesOfLayer(basedir):
dirs = os.listdir(basedir)
for dir in dirs:
dir = basedir + "/" + dir
if os.path.isdir(dir):
CreateModule(dir)
pass
pass
def CreateModulesOfPlugin(basedir):
dirs = os.listdir(basedir + "/Source")
for dir in dirs:
dir = basedir + "/Source/" + dir
if os.path.isdir(dir):
CreateModule(dir)
pass
pass
def CommandLine(args):
logging.getLogger().setLevel(logging.DEBUG)
coloredlogs.install(level='DEBUG')
logging.info(args)
if args[1].lower() == "CreateModule".lower():
CreateModule(args[2])
elif args[1].lower() == "CreateModulesOfLayer".lower():
CreateModulesOfLayer(args[2])
elif args[1].lower() == "CreateModulesOfPlugin".lower():
CreateModulesOfPlugin(args[2])
pass
if __name__ == '__main__':
'''
python UE4ModuleCreator.py CreateModule {ModuleDir}
python UE4ModuleCreator.py CreateModulesOfLayer {LayerDir}
python UE4ModuleCreator.py CreateModulesOfPlugin {PluginDir}
'''
curdir = os.path.dirname(os.path.abspath(__file__))
os.chdir(curdir)
CommandLine(sys.argv)
| true |
842f0310f917be9dce3c6c6d97ce671500c0176c | Python | JoeLee-KR/pyHello | /fastcampusPy/sec09-1a-read.py | UTF-8 | 2,268 | 3.9375 | 4 | [] | no_license | # Section09
# 파일 읽기, 쓰기
# 읽기 모드 r, 쓰기 모드(기존 파일 삭제) w, 추가 모드(파일 생성 또는 추가) a
# 기타 : https://docs.python.org/3.7/library/functions.html#open
# Reference file handling....
# 상대 경로('../', './'), 절대 경로 확인('C:\...')
#
# 파일 읽기
# 예제1
print("EX 1 ==================")
fd = open('../resource/review.txt', 'r')
contents = fd.read()
print(contents)
#print(dir(fd))
# fd에 포함된 가능한 메소드들을 확인한다. dir
# 반드시 close 리소스 반환
fd.close()
print()
# 예제2 : fd life scope is with paragraph
print("EX 2 ==================")
with open('../resource/review.txt', 'r') as fd:
c = fd.read()
print(iter(c)) #
print(list(c)) #
print(c)
print()
# read : 전체 내용 읽기, read(10) : 10글자 읽기
# 예제3 / line strip?
print("EX 3 ==================")
with open('../resource/review.txt', 'r') as fd:
for c in fd:
# print(c)
print(c.strip(),".")
print()
# 예제4
print("EX 4 ==================")
with open('../resource/review.txt', 'r') as fd:
contents = fd.read()
print('a>', contents)
contents = fd.read()
print('b>', contents) # 내용 없음
fd.seek(0, 0)
contents = fd.read()
print('c>', contents)
# readline : 한 줄씩 읽기, readline(문자수) : 문자수 읽기
print()
# 예제5
print("EX 5 ================== fd.readlin, once line")
with open('../resource/review.txt', 'r') as fd:
line = fd.readline()
while line:
print(">>", line, end='')
line = fd.readline()
# readlines : 전체 읽은 후 라인 단위 리스트 저장
print()
print()
# 예제6
print("EX 6 ================== fd.readlines, multiple lines")
with open('../resource/review.txt', 'r') as fd:
contents = fd.readlines()
print(contents)
print()
print("*********")
for c in contents:
print(c, end='')
print()
print()
# 예제7
print("EX 7 ================== score file by line")
with open('../resource/score.txt', 'r') as fd:
score = []
for line in fd:
score.append(int(line))
print(score)
print('Average : {:6.3f}'.format(sum(score) / len(score)))
print("********** score file by whitespace??? ")
| true |
27c0052d3161ab2a6c6caec0f6ca934d8d72e7cb | Python | qweasd1/skeleton_nn | /nn_mnist/load_train_data.py | UTF-8 | 4,871 | 2.6875 | 3 | [] | no_license | import torch
import math
class MnistTrainData:
def __init__(self, X_filepath, y_filepath, device):
self.X = torch.load(X_filepath).to(device)
self.y = torch.load(y_filepath).to(device)
self.size = len(self.X)
def batches(self,batch_size=32):
X = self.X
y = self.y
batch_count = math.ceil(self.size / batch_size)
def iterator():
for i in range(batch_count):
start = i*batch_size
end = (i+1)*batch_size
yield X[start:end],y[start:end]
return iterator()
class AdaptiveMnistTrainData:
def __init__(self, X_filepath, y_filepath, device):
self.expand_size = 100
self.end_size = 200
self.models = []
self.X = torch.load(X_filepath).to(device)
self.y = torch.load(y_filepath).to(device)
self.size = self.X.size()[0]
self.next_train_indice = []
self.is_target_table = torch.full((self.size,), True, dtype=torch.bool)
self.is_target_table[self.y == 0] = False
self.negative_samples_indice = (~self.is_target_table).nonzero().view(-1)
self.negative_samples_X = self.X[self.negative_samples_indice]
self.negative_samples_y = self.y[self.negative_samples_indice]
self.is_consumed_table = self.is_target_table.clone()
self.find_init_train_indice()
def find_init_train_indice(self):
print("left {0}".format(self.is_consumed_table.sum().item()))
positive_sample_size = 100
negative_sample_size = 90
left_positive_indice = ((self.y == 1) & self.is_consumed_table).nonzero().view(-1)
if len(left_positive_indice) == 0:
return False
if len(left_positive_indice) > self.end_size:
positive_sample = left_positive_indice[:positive_sample_size]
else:
positive_sample = left_positive_indice[:self.end_size]
negative_sample = self.y.eq(0).nonzero()[:negative_sample_size].view(-1)
self.next_train_indice = torch.cat((positive_sample,negative_sample))
self.is_consumed_table[self.next_train_indice] = False
return True
def batches(self, batch_size=32):
self.current_X = X = self.X[self.next_train_indice]
self.current_y = y = self.y[self.next_train_indice]
self.current_size = current_size = len(X)
batch_count = math.ceil(current_size / batch_size)
def iterator():
for i in range(batch_count):
start = i * batch_size
end = (i + 1) * batch_size
yield X[start:end], y[start:end]
return iterator()
def new_model(self):
self.models.append(self.current_model)
return self.find_init_train_indice()
def save_model(self,root,target):
for i,model in enumerate(self.models):
torch.save(model.state_dict(),"{2}/{0}_{1}".format(target,i,root))
def find_to_expand(self, model):
to_expand = torch.Tensor([]).long()
find_error_size = self.expand_size*5
batch_count = math.ceil(len(self.negative_samples_indice) / find_error_size)
left = self.expand_size
for i in range(batch_count):
start = i * find_error_size
end = (i+1) * find_error_size
to_expand_segement = (model(self.negative_samples_X[start:end]).argmax(axis=1) != self.negative_samples_y[start:end]).nonzero().view(-1)[:left] + start
left -= len(to_expand_segement)
to_expand = torch.cat((to_expand,self.negative_samples_indice[to_expand_segement]))
if left == 0:
break
return to_expand
def evaluate_and_expand(self,model):
self.current_model = model
y_p = model(self.current_X).argmax(axis=1)
acc = (y_p == self.current_y).sum().item() / self.current_size
if acc > 0.9999:
# old way to find to expand
# y_p_all = model(self.X).argmax(axis=1)
# all_negative_sample_indice = ((y_p_all != self.y) & ~self.is_target_table)
# to_expand = all_negative_sample_indice.nonzero()[:self.expand_size].view(-1)
# new way to find to expand
to_expand = self.find_to_expand(model)
self.next_train_indice = torch.cat((self.next_train_indice,to_expand))
print("data_size: {0}".format(len(self.next_train_indice)))
if len(to_expand) == 0:
y_p_all = model(self.X).argmax(axis=1)
all_positive_instance_indice = ((y_p_all == self.y) & self.is_target_table)
self.is_consumed_table[all_positive_instance_indice] = False
raise AddModelException()
return acc
class AddModelException(Exception):
pass
class FinishException(Exception):
pass | true |
9bbd3a76758f46c758dc346894c5a2755a5f28e5 | Python | bennyfellows/206FINAL | /Weather.py | UTF-8 | 3,760 | 3.015625 | 3 | [] | no_license | import json
import requests
import sqlite3
API_KEY = '82d1198ffb1c122e548fc317f7472bf4'
def weather_data(API_KEY, lat, lon, part):
baseurl = 'https://api.openweathermap.org/data/2.5/onecall?lat={}&lon={}&exclude={}&appid={}'.format(lat, lon, part, API_KEY)
request = requests.get(baseurl)
response = json.loads(request.text)
pull_data = []
for item in response['hourly']:
pull_data.append((item['temp'], item['wind_speed'], lat, lon))
return pull_data
def conidtions(API_KEY, lat, lon):
baseurl = 'https://api.openweathermap.org/data/2.5/onecall?lat={}&lon={}&exclude={}&appid={}'.format(lat, lon, part, API_KEY)
request = requests.get(baseurl)
response = json.loads(request.text)
pull_data = []
for item in response['hourly']:
pull_data.append(item['weather'][0]['main'])
return pull_data
def shared_table(API_KEY, lat, lon):
new_lst = conidtions(API_KEY, lat, lon)
key_lst = []
for item in new_lst:
if item == "Clouds":
key_lst.append(1)
elif item == "Clear":
key_lst.append(2)
elif item == "Snow":
key_lst.append(3)
elif item == "Rain":
key_lst.append(4)
elif item == "Drizzle":
key_lst.append(5)
elif item == "Thunderstorm":
key_lst.append(6)
else:
key_lst.append(item)
return key_lst
def makeDB(data, shared_data):
try:
conn = sqlite3.connect('/Users/JasonWeisenfeld/206FINAL/alldata1.db')
cur = conn.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS WeatherData (temperature FLOAT, windspeed FLOAT, condition_id INTEGER, latitude FLOAT, longitude FLOAT, FOREIGN KEY(condition_id) REFERENCES WeatherType(condition_id))")
temperature = []
wind_speed = []
latitude = []
longitude =[]
condition_id = []
for tup in data:
temperature.append(tup[0])
wind_speed.append(tup[1])
latitude.append(tup[2])
longitude.append(tup[3])
for item in shared_data:
condition_id.append(item)
for i in range(25):
condition = condition_id[i]
temp = temperature[i]
wind = wind_speed[i]
lat1 = latitude[i]
lon1 = longitude[i]
cur.execute("INSERT INTO WeatherData (temperature, windspeed, condition_id, latitude, longitude) VALUES (?,?,?,?,?)", (temp, wind, condition, lat1, lon1))
conn.commit()
print("Successfully added")
cur.close()
except:
print('ERROR')
def second_table(condition_id, condition):
try:
conn = sqlite3.connect('/Users/JasonWeisenfeld/206FINAL/alldata1.db')
cur = conn.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS WeatherType (condition_id INTEGER PRIMARY KEY, condition TEXT)")
cur.execute("INSERT INTO WeatherType (condition_id, condition) VALUES(?,?)", (condition_id, condition))
conn.commit()
cur.close()
print('Successfully added')
except:
print("Already in table")
part = ['current','minutely','daily','alerts']
city = input("Enter the name of a city: ")
if city == 'Dallas':
userlat = 32.7767
userlon = 96.797
elif city == 'Miami':
userlat = 25.7617
userlon = 80.1918
elif city == 'Los Angeles':
userlat = 34.0522
userlon = 118.2437
elif city == 'New York City':
userlat = 40.7128
userlon = 74.0060
makeDB(weather_data(API_KEY, userlat, userlon, part), shared_table(API_KEY, userlat, userlon))
second_table(1, 'Clouds')
second_table(2, 'Clear')
second_table(3, "Snow")
second_table(4, "Rain")
second_table(5, "Drizzle")
second_table(6, "Thunderstorm")
| true |
06ffee341eb552efa9f170f84b7902fa1a0be831 | Python | bong-yo/ReinforcementLearning | /MountainCar/src/memory_replay.py | UTF-8 | 445 | 3.28125 | 3 | [] | no_license | import random
class MemoryReplay:
def __init__(self, capacity: int):
self.capacity = capacity
self.memory = []
self.pos = 0
def push_to_memory(self, *args):
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.pos] = args
self.pos = (self.pos + 1) % self.capacity
def sample_memory(self, size):
return random.sample(self.memory, size)
| true |
878d836dfc7fabdcc548232b38d4f3f804856130 | Python | bellaananda/python_progate | /python/customer.py | UTF-8 | 484 | 2.875 | 3 | [] | no_license | from atm_card import ATMCard
class Customer:
def __init__(self, id, custPin = 1234, custBalance = 10000):
self.id = id
self.pin = custPin
self.balance = custBalance
def cekId(self):
return self.id
def cekPin(self):
return self.pin
def cekBalance(self):
return self.balance
def debetBalance(self,nominal):
self.balance -= nominal
def simpanBalance(self,nominal):
self.balance += nominal
| true |
828b184602fddf079ef62a2107bafce7abb9d71a | Python | aolite/ProgSemWeb | /chapter2/chapter2.py | UTF-8 | 2,115 | 3.421875 | 3 | [] | no_license | from myTripleStore import SimpleGraph
if __name__=="__main__":
print "Chapter 2 examples"
print "Creating a simple Graph..."
movie_graph = SimpleGraph()
print "Adding blade runner triples..."
movie_graph.add(('blade_runner', 'name', 'Blade Runner'))
movie_graph.add(('blade_runner', 'directed_by', 'ridley_scott'))
movie_graph.add(('ridley_scott', 'name', 'Ridley Scott'))
print "Who is the diretor of 'blade_runner' "
print list(movie_graph.triples(('blade_runner', 'directed_by', None)))
print "Who are the triples with 'name' property?"
print list(movie_graph.triples((None,'name',None)))
print "who is the director of 'blade_runner?'"
print movie_graph.value('blade_runner', 'directed_by', None)
print "Load movies CSV..."
movies = SimpleGraph()
movies.load("../resources/movies.csv")
print "Movies loaded"
print "Who is the blade runner id?"
bladerunnerId = movies.value(None, "name", "Blade Runner")
print "Blade runner Id: "+ bladerunnerId
bladerunnerActorIds = [actorId for _, _, actorId in movies.triples((bladerunnerId, "starring", None))]
print "Who are the blade runner actors Id?"
print "Actors ID: "+ str (bladerunnerActorIds)
print str([movies.value(actorId, "name", None) for actorId in bladerunnerActorIds])
print "In which movies Harrison Ford has participated?"
harrisonfordId = movies.value(None, "name", "Harrison Ford")
print ([movies.value(movieId, "name", None) for movieId, _, _ in movies.triples((None, "starring", harrisonfordId))])
print "In which films Harrison Ford has acted with Steven Spilbers as director?"
spielbergId = movies.value(None, "name", "Steven Spielberg")
spielbergMovieIds = set([movieId for movieId, _, _ in movies.triples((None, "directed_by", spielbergId))])
harrisonfordId = movies.value(None, "name", "Harrison Ford")
harrisonfordMovieIds = set([movieId for movieId, _, _ in movies.triples((None, "starring", harrisonfordId))])
print [movies.value(movieId, "name", None) for movieId in spielbergMovieIds.intersection(harrisonfordMovieIds)]
| true |
ff3e9d6f90235ec49a14871b39346e3fd7bff63c | Python | mayurikhemani/py1 | /all_exceptionTypes.py | UTF-8 | 1,421 | 3.84375 | 4 | [] | no_license | a=10
b=0
try:
print(a/b)
except Exception:
print("invalid division")
#### 2nd
try:
global i
i=int(input("Enter number between 0-12"))
print(i)
except ValueError:
print("you haven't entered number")
#######3rd
a=[1,2,4]
try:
print("second value= %d" %(a[1]))
print("second value= %d" %a[3])
except IndexError:
print("an index error occurred")
######4h
try:
a=3
if a<4:
b=a/(a-3)
print("value of b =",b)
except(ZeroDivisionError,NameError):
print("\n error occured and handled")
else:
print(b)
##########6th
try:
print(x)
except NameError:
print("Variable x is not defined")
except:
print("Something else went wrong")
##############5th
'''
try:
raise NameError("Hi there")
except NameError:
print("An exception")
raise
finally:
print("the finally will always work")
'''
#raise an exception if variable type is nnot int
x = "hello"
if not type(x) is int:
raise TypeError("Only integers are allowed")
###### 6th
try:
a = int(input("Enter a?"))
b = int(input("Enter b?"))
if b is 0:
raise ArithmeticError;
else:
print("a/b = ",a/b)
except ArithmeticError:
print("The value of b can't be 0")
#7th
try:
age=int(input("enter age"))
if age<18:
raise ValueError;
else:
print("age is valid")
except ValueError:
print("age is not valid")
| true |
15e5408f887404244299ff06a6fc98cb2c7ba568 | Python | chenguang-hu/Seq2Seq_NMF | /src/utils/metric.py | UTF-8 | 850 | 2.609375 | 3 | [] | no_license | import os
import sys
import sys
from nltk.probability import FreqDist
from nltk.collocations import BigramCollocationFinder
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
src = os.path.join(BASE_DIR, "src")
sys.path.append(src)
from utils.util import ngrams
__all__ = ["distinct_n_sentence_level", "distinct_n_corpus_level"]
def cal_Distinct(corpus):
"""
Calculates unigram and bigram diversity
Args:
corpus: tokenized list of sentences sampled
Returns:
uni_diversity: distinct-1 score
bi_diversity: distinct-2 score
"""
bigram_finder = BigramCollocationFinder.from_words(corpus)
bi_diversity = len(bigram_finder.ngram_fd) / bigram_finder.N
dist = FreqDist(corpus)
uni_diversity = len(dist) / len(corpus)
return uni_diversity, bi_diversity | true |
623eef225f54dbf565417db03b6151cf72bcb958 | Python | dwendelen/Thesis | /src/test2.py | UTF-8 | 1,173 | 2.5625 | 3 | [] | no_license | import pyopencl as cl
import numpy as np
import numpy.linalg as la
a1 = np.array([[201,202],[203,204],[205, 206]], dtype = np.float32)
a = np.array(a1, order='F')
print a[0][0]
print a[0][1]
print a[1][0]
print a[1][1]
print a.shape
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
mf = cl.mem_flags
a_buf = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=a)
b_buf = cl.Buffer(ctx, mf.WRITE_ONLY, a.nbytes)
prg = cl.Program(ctx, """
__kernel void sum(__global float *a, __global float *b)
{
int i = get_global_id(0);
b[i] = a[i];
}""").build()
prg.sum(queue, (6, 1), None, a_buf, b_buf)
c = np.array([0,0, 0,0, 0, 0], dtype = np.float32)
cl.enqueue_copy(queue, c, b_buf)
print c
T = np.zeros((1,2,3), dtype = np.float32);
T[0, 0, 0] = 111;
T[0, 1, 0] = 121;
T[0, 0, 1] = 112;
T[0, 1, 1] = 122;
T[0, 0, 2] = 113;
T[0, 1, 2] = 123;
T1 = np.array(T, order='F')
a_buf = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=T1)
b_buf = cl.Buffer(ctx, mf.WRITE_ONLY, a.nbytes)
prg.sum(queue, (6, 1), None, a_buf, b_buf)
c = np.array([0,0, 0,0, 0,0], dtype = np.float32)
cl.enqueue_copy(queue, c, b_buf)
print c
| true |
0fee46cb57cfc479de2def53d9b2561958617f7d | Python | syahdafahreza/pulseviz.py | /pulseviz/visualizers/__init__.py | UTF-8 | 3,657 | 2.6875 | 3 | [
"MIT"
] | permissive | import pyglet
from ..dsp import PulseAudioSignalAnalayzer
from .. import __version__
registry = {}
def visualizer(name):
def _wrap(cls):
registry[name] = cls
return cls
return _wrap
class DebugOverlayDisplay(pyglet.window.FPSDisplay):
"""
Displays both FPS and the latency reported by the PulseAudio server.
"""
def __init__(self, analyzer, window):
super().__init__(window)
self._analyzer = analyzer
self.label.font_size = 14
self.label.color = (255, 255, 255, 255)
self.label.font_name = 'monospace'
def set_fps(self, fps):
self.label.text = 'FPS: {0:.0f}, PulseAudio Latency: {1:.0f}'.format(
fps,
self._analyzer.get_latency()
)
class VisualizerWindow(pyglet.window.Window):
HELP_TEXT = '''
pulseviz {0}
Available keyboard shortcuts:
[f] Toggles fullscreen mode
[h] Toggles this help text
[d] Toggles the debug overlay
[q] Quits the application
'''.format(__version__)
def __init__(self, visualizer, analyzer, **kwargs):
super().__init__(**kwargs)
self._visualizer = visualizer
self._analyzer = analyzer
self._show_debug_overlay = False
self._show_help_text = False
self._fps_display = DebugOverlayDisplay(self._analyzer, self)
self._help_text_label = pyglet.text.Label(
self.HELP_TEXT,
font_name='monospace',
font_size=14,
bold=True,
x=self.width // 2, y=self.height // 2,
anchor_x='center', anchor_y='center',
width=self.width,
multiline=True
)
def on_draw(self):
self.clear()
self.on_draw_()
if self._show_debug_overlay:
self._fps_display.draw()
if self._show_help_text:
self._help_text_label.draw()
def on_draw_(self):
pass
def on_resize(self, width, height):
super().on_resize(width, height)
self._help_text_label.x = self.width // 2
self._help_text_label.y = self.height // 2
self._help_text_label.width = self.width
def on_key_press(self, symbol, modifiers):
if symbol == ord('q'):
self.on_close()
elif symbol == ord('f'):
self.set_fullscreen(not self.fullscreen)
elif symbol == ord('d'):
self._show_debug_overlay = not self._show_debug_overlay
elif symbol == ord('h'):
self._show_help_text = not self._show_help_text
def on_close(self):
self._visualizer.stop()
class Visualizer(object):
ANALYZER_TYPE = PulseAudioSignalAnalayzer
VISUALIZER_WINDOW_TYPE = VisualizerWindow
WINDOW_TITLE = '(N/A)'
def __init__(self, source_name, stop_callback):
self._stop_callback = stop_callback
self._analyzer_kwargs = {
'source_name': source_name
}
self._analyzer = None
self._setup_analyzer()
self._window_kwargs = {
'visualizer': self,
'resizable': True,
'caption': self.WINDOW_TITLE + ' - pulseviz'
}
self._window = None
self._setup_window()
def _setup_analyzer(self):
self._analyzer = self.ANALYZER_TYPE(**self._analyzer_kwargs)
def _setup_window(self):
self._window_kwargs['analyzer'] = self._analyzer
self._window = self.VISUALIZER_WINDOW_TYPE(**self._window_kwargs)
def start(self):
self._analyzer.start()
def stop(self):
self._analyzer.stop()
self._analyzer.join()
self._stop_callback()
| true |
6366fc443b571e89281737d202b97e9fc3552731 | Python | ObiFenix/python-stack | /python-TDD/modules/myOptionalModules.py | UTF-8 | 5,681 | 3.59375 | 4 | [] | no_license | #=====================================
# OBIFENIX Modules: Computational Math
#=====================================
import math
# List operator of increments of (2 * list values)
# ================================================
class Underscore:
def map ( self, list, function ):
for i in range( len(list) ):
list[i] = function ( list[i] )
return list
def reduce ( self, list, function ): # Reduced all elements in a list by the min. within the list
last = len(list)
for index in range( last ):
next = index+1
if ( next < last ):
list[next] = function ( list[index], list[next] )
return list
def findId ( self, list, function ):
print ("... Searching for 1st matching ID > 4")
for i in range ( len(list) ):
if ( function(list[i]) ):
return list[i]
return self
def matchId ( self, target, list, function ):
idNotfound = False
print ("... Trying to match ID {}".format( target) )
for i in range ( len(list) ):
if ( function ( int(target), list[i]) ):
idNotfound = True
print ("... ID match found") if (idNotfound) else print ("... Sorry! No ID match found")
return self
def filter ( self, list, function ):
count = 0
for item in range( len( list )):
if ( function( list[item] )):
count += 1
list[count], list[item] = list[item], list[count]
# list = list[:count]
print (f"Return: List of all <Even> ID's: {list}")
return self
def reject ( self, list, function ):
count = 0
for item in range( len( list )):
if ( function( list[item] )):
count += 1
list[count], list[item] = list[item], list[count]
# list = list[:count]
print (f"Return: List of all <Odd> ID's: {list}")
def filter_btw_minmax ( self, list, min, max, function ): # Filter out any items btw min and max (INCLUSIVE)
count = 0
last = -1#len(list)
for current in range ( len (list)):
# print(list[current])
if ( list[current] < min or list[current] > max ):
# list[current], list[last] = list[last], list[current]
# print("before modifying last item: ",last)
last += 1
# print(" after modifying last item: ", last)
list[current] = list[last]
else:
# print (count, current)
count += 1
# print (count)
list = list[:count]
list.sort()
return list
# Arithmetics
# ===========
class Arithmatic:
def add(self, x, y): return x + y
def subs(self, x, y): return x - y
def mult(self, x, y): return x - y
def divFloat(self, x, y): return x / y
def divInt(self, x, y): return x // y
def square(self, x): return x * x
def sqrt(self, x): return math.sqrt(x)
def ceil(self, x): return math.ceil(x)
def floor(self, x): return math.floor(x)
def mod(self, x, y): return math.fmod(x, y)
def nRoot(self, co, ex): return math.pow(co, ex)
def power(self, co, ex): return math.pow(co, ex)
def log2(self, x): return math.log(x)
def log10(self, x): return math.log(x)
def ln(self, x): return math.log1p(x)
def logx(self, x, base): return math.log(x,base)
def exp(self, e=1): return math.exp(e)
# => Booleans testing approximate equality
def isFinite(self, num): return math.isfinite(num) # TRUE if Finite number and FALSE if not
def isInfinity(self, num): return math.isinf(num) # TRUE if Infinit number and FALSE if not
def isNan(self, num): return math.isnan(num) # TRUE if NaN and FALSE if a num is a number
if __name__ == "__main__":
# Testing Several Instances of the class <Objects>
# ================================================
# AddingTwoNum = Arithmatic()
myL = [2,4,6,3,9,5,1,8,7]
_ = Underscore()
_.map(myL, lambda num: num+(2*num))
_.reduce(myL, lambda num1,num2: num1-num2)
_.filter_btw_minmax(myL, 3, 6, None) # lambda num: num
# Filtering list by getting rid of unrequested elements
# -----------------------------------------------------
myL = [1,2,3,4,5,6]
print ("\n Given: List of available ID's:", myL)
# print (f"List of all <Even> ID's: {_.findId( myL, lambda id: id > 4)}")
_.filter(myL, lambda x: x%2==0) # should return [2,4,6]
_.reject(myL, lambda x: x%2==0) # should return [1,3,5]
# Mapping...
# -------
# myL = [1,2,3,4,5,6]
# print ("\n Given: List of available ID's:", myL)
# print( map.map() )
# print ("Before: ", myL)
# print (" After: ", reduce.reduce() )
# filter i-btw items given min & max
# ----------------------------------
# myL = [1,2,3,4,5,6]
# print ("\n Given: List of available ID's:", myL)
# print ("Before: ", myL)
# print (" After: ", filter.filter_btw_minmax(3, 6) )
# Find target ID's > 4
# --------------------
# print ()
# print ("List of available ID's:", myL)
# print ("1st matching ID#: %d" % _.findId( myL, lambda id: id > 4))
#
# Find target ID match
# --------------------
# myL = [1,2,3,4,5,6]
# print ("\n Given: List of available ID's:", myL)
# print ("List of available ID's:", myL)
# myItem = input("::> Enter ID#: ")
# _.matchId(myL, myItem, lambda id1, id2: id1 == id2)
| true |
f18c0258d288e4a64816378b9c9534f18e84f0a8 | Python | zhang0123456789/python_study | /homework/four.py | UTF-8 | 2,345 | 4.21875 | 4 | [] | no_license | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time :2018/11/6 20:27
# 1:一个足球队在寻找年龄在10岁到12岁的小女孩(包括10岁和12岁)加入。
# 编写一个程序,询问用户的性别(m表示男性,f表示女性)和年龄,
# 然后显示一条消息指出这个人是否可以加入球队,询问10次后,输出满足条件的总人数。
sum=0
counter=0
while counter <10:
a=input("请输入性别:")
b=int(input("请输入你的年龄"))
if a=='f'and 12 >= b >=10:
print("这个人可以加入球队")
sum+=counter
counter += 1
print("满足条件的总人数{}".format(sum))
else:
print("这个人不可以加入球队")
# 2:利用for循环,完成a=[1,7,4,89,34,2]的冒泡排序:
# 冒泡排序:小的排前面,大的排后面。
a=[1,7,4,89,34,2]
for i in range(0,len(a)-1):
for j in range(0,len(a)-i-1):
if a[j]>a[i]:
a[i],a[j]=a[j],a[i]
print([a])
# 有一组用户的登录信息存储在字典 login_ifno 里面,字典格式如下
# :login_info={"admin":"root","user_1":"123456"}
# key表示用户名,value表示密码,请编写函数满足如下条件:
# 1)设计1个登陆的程序, 不同的用户名和对成密码存在个字典里面, 输入正确的用户名和密码去登陆,
# 2)首先输入用户名,如果用户名不存在或者为空,则一直提示输入正 确的用户名
# 3)当用户名正确的时候,提示去输入密码,如果密码跟用户名不对应, 则提示密码错误请重新输入。
# 4)如果密码输入错误超过三次,中断程序运行。
# 5)当输入密码错误时,提示还有几次机会
# 6)用户名和密码都输入正确的时候,提示登陆成功!'''
login_info={"admin":"root","user_1":"123456"}
count=3
while 1:
n=input("请输入用户名")
if n in login_info.keys():
print("输入正确户名正确")
while count>0:
m=input("请输入密码")
if m in login_info[n]:
print("登录成功")
break
else:
count-=1
print("登录失败,您还有{}次机会".format(count))
break
else:
print("中断程序运行")
| true |
90983a34d9eb5d5c90c8ac3d69f6737a8f5d70e5 | Python | RobertFirouzi/PythonGameFramework | /source/debug.py | UTF-8 | 9,338 | 3.015625 | 3 | [] | no_license | import os
import threading
from debug_constants import * #contains the strings for console printing
#Class to run debug mode - allows user programmer to change in game variables to test different areas of code
class DebugLooper(threading.Thread):
def __init__(self, game):
threading.Thread.__init__(self, daemon=True)
self.game = game #need a reference to main game to be able to tweak game variables
def run(self):
result = 0
keepGoing = True
try:
while keepGoing:
devInput = getInput(DEBUG_MENU, INT, [QUIT,TILEMAP])
if devInput == EXIT_DEBUGGER:
keepGoing = False
elif devInput == QUIT:
keepGoing = False
elif devInput == CHAR_SPEED:
result = self.changePlayerSpeed()
elif devInput == SCENERY:
result = self.changeScenery()
elif devInput == TILEMAP:
result = self.changeTilemap
if result == EXIT_DEBUGGER:
keepGoing = False
except Exception as e:
print('debug loop failed with exception')
print(e)
print('Exiting Debugger...')
def changeScenery(self):
keepGoing = True
result = 0
while keepGoing:
devInput = getInput(SCENERY_MENU, INT, [QUIT,FOREGROUND])
if devInput == QUIT:
keepGoing = False
elif devInput == EXIT_DEBUGGER:
return EXIT_DEBUGGER
elif devInput == BACKGROUND:
result = self.editScenery(background = True)
elif devInput == FOREGROUND:
result = self.editScenery(background = False)
if result == EXIT_DEBUGGER:
return EXIT_DEBUGGER
@property
def changeTilemap(self):
keepGoing = True
while keepGoing:
devInput = getInput(TILEMAP_MENU, INT, [QUIT,BARRIER])
if devInput == QUIT:
keepGoing = False
elif devInput == EXIT_DEBUGGER:
return EXIT_DEBUGGER
elif devInput == LOWER:
print('edit lower')
elif devInput == UPPER:
print('edit upper')
elif devInput == BARRIER:
print('edit barrier')
def changePlayerSpeed(self):
devInput = getInput(MOVESPEED_MENU, INT, [MIN_MOVE_SPEED,MAX_MOVE_SPEED])
if devInput == EXIT_DEBUGGER:
return EXIT_DEBUGGER
self.game.player.moveSpeed = devInput
print('Players speed changed to: ' + str(devInput))
return 0
def editScenery(self, background = True):
if background:
scenery = self.game.levelData.backgrounds
else:
scenery = self.game.levelData.foregrounds
if len(scenery) == 0:
print('No panoramas of this type on this level')
return 0
print('Which panorama will you edit?')
i = 0
for panorama in scenery:
print(str(i+1) + ': ' + str(panorama.filePath))
i+=1
devInput = getInput(PANORAMA_PROMPT, INT, [QUIT,i])
if devInput == QUIT:
return 0
if devInput == EXIT_DEBUGGER:
return EXIT_DEBUGGER
index = devInput -1 #index into array of scenery objects to edit
keepGoing = True
while keepGoing:
devInput = getInput(SCENERY_EDIT_MENU, INT, [QUIT, ANIMATED_FPS])
if devInput == QUIT:
keepGoing = False
elif devInput == EXIT_DEBUGGER:
return EXIT_DEBUGGER
elif devInput == FILEPATH: #Change the image
devInput = getInput(GET_FILEPATH, STRING,[2,1000])
allowedType = False
for imageType in ALLOWED_IMAGETYPES:
if imageType in devInput:
allowedType = True
if allowedType:
if os.path.isfile(devInput):
scenery[index].filePath = devInput
self.game.renderer.loadPanorama(scenery[index])
else:
print('File not found')
else:
print('That is not an allowed image type in pygame')
elif devInput == VISIBILE_SECTIONS:
devInput = getInput(VISIBILE_MENU, INT, [QUIT, ADD_VISIBILITY])
print('Current Visibility:')
count = 0
for visibleSection in scenery[index].visibleSections:
count += 1
print(str(count) + ') ' + str(visibleSection))
if devInput == QUIT:
keepGoing = False
elif devInput == EXIT_DEBUGGER:
return EXIT_DEBUGGER
elif devInput == DELETE_VISIBILITY:
print('Delete which visible section?')
devInput = getInput('>>>', INT, [1, count])
scenery[index].visibleSections = list(scenery[index].visibleSections)
del(scenery[index].visibleSections[devInput-1])
scenery[index].visibleSections = tuple(scenery[index].visibleSections)
elif devInput == ADD_VISIBILITY:
print('Enter ints for the 4 values')
newVisibility = ['xmin', 'xmax', 'ymin', 'ymax'] # placeholders tags
for i in range(4):
newVisibility[i] = getInput('Value for ' + newVisibility[i] + '\n>>>', INT, [-500000, 500000])
scenery[index].visibleSections = list(scenery[index].visibleSections)
scenery[index].visibleSections.append(newVisibility)
scenery[index].visibleSections = tuple(scenery[index].visibleSections)
elif devInput == SCROLLING:
print('Current scrolling: ' + str(scenery[index].scrolling))
print('Enter ints for the 4 values')
scrolling = [['xmult', 'xdiv'],['ymult','ydiv']] #placeholders tags
for i in range(2):
for j in range(2):
scrolling[i][j] = getInput('Value for ' + scrolling[i][j]+'\n>>>',
INT, [-10000, 10000])
scenery[index].scrolling = scrolling
elif devInput == ALPHA:
print('not iplemented yet')
elif devInput == LAYER:
print('not iplemented yet')
elif devInput == MOTIONX:
devInput = getInput(GET_MOTION, INT,[FALSE,TRUE])
if devInput:
scenery[index].isMotion_X = True
else:
scenery[index].isMotion_X = False
elif devInput == MOTIONY:
devInput = getInput(GET_MOTION, INT,[FALSE,TRUE])
if devInput:
scenery[index].isMotion_Y = True
else:
scenery[index].isMotion_Y = False
elif devInput == MOTION_X_PXS:
print('not iplemented yet')
elif devInput == MOTION_Y_PXS:
print('not iplemented yet')
elif devInput == ANIMATED:
devInput = getInput(GET_MOTION, INT,[FALSE,TRUE])
if devInput:
scenery[index].isAnimated = True
else:
scenery[index].isAnimated = False
elif devInput == ANIMATED_FPS:
print('not iplemented yet')
self.game.renderer.camera.moveFlag = True
return 0
### STATIC METHODS ###
#get a user input type within a range, loop until propper input recieved. Always return QUIT or EXIT values
def getInput(prompt, dataType=INT, inputRange=(-10000,10000)):
if dataType == INT:
devInput = ''
while type(devInput) != int or devInput < inputRange[0] or devInput > inputRange[1]:
try:
devInput = int(input(prompt))
except Exception as e:
print('Exception, caught on user input')
print(e)
if devInput == QUIT or devInput == EXIT_DEBUGGER:
return devInput
elif dataType == STRING:
devInput = 0
while type(devInput) != str or len(devInput) < inputRange[0] or len(devInput) > inputRange[1]:
try:
devInput = str(input(prompt))
except Exception as e:
print('Exception, caught on user input')
print(e)
if devInput == EXIT_DEBUGGER_STR:
return EXIT_DEBUGGER
elif dataType == FLOAT:
devInput = ''
while type(devInput) != float or devInput < inputRange[0] or devInput > inputRange[1]:
try:
devInput = float(input(prompt))
except Exception as e:
print('Exception, caught on user input')
print(e)
if devInput == float(QUIT) or devInput == float(EXIT_DEBUGGER):
return int(devInput)
else:
devInput = 0
return devInput
| true |
c49a0d650235e2811d0455668ce715a5f633d2f3 | Python | methane/arc012 | /gomoku.py | UTF-8 | 916 | 2.953125 | 3 | [] | no_license | import sys
def count_clears(board, c):
cc = c*5
ret = 0
for i in range(len(board)):
ret += cc == board[i:][:5]
ret += cc == board[i::21][:5]
ret += cc == board[i::20][:5]
ret += cc == board[i::19][:5]
return ret
def check_last(board, c):
for i in range(len(board)):
ba = bytearray(board)
ba[i] = b'.'
if not count_clears(ba, c):
return True
return False
def check():
YES = 'YES'
NO = 'NO'
board = ','.join([sys.stdin.readline().strip() for _ in range(19)])
no = board.count('o')
nx = board.count('x')
nd = no - nx
if not (0 <= nd <= 1):
return NO
last, before = 'ox' if nd == 1 else 'xo'
if count_clears(board, before):
return NO
if not count_clears(board, last):
return YES
if check_last(board, last):
return YES
return NO
print check()
| true |
e9f1a29d2e817bc07d60123379edfccdb22e74cb | Python | YasminaKerkeb/Chatbot-ECL | /predict.py | UTF-8 | 1,804 | 2.828125 | 3 | [] | no_license | from model import train_model_factory, val_model_factory, predict_model_factory
from src.preprocess import normalizeString, prepareData, TrimWordsSentence
from sklearn.model_selection import train_test_split
from config import DATA_PATH, TEST_SIZE
import pandas as pd
import re
import torch
class ChatBot(object):
def __init__(self,model_path):
self.prepare_data()
input_size=self.train_input_lang.n_words
output_size=self.train_output_lang.n_words
self.model=predict_model_factory(model_path,input_size,output_size)
self.model.eval()
def reply(self, input_text):
with torch.no_grad():
sentences = [s.strip() for s in re.split('[\.\,\?\!]' , input_text)]
sentences = sentences[:-1]
if sentences==[]:
sentences=[input_text]
for sentence in sentences :
trimmed_sentence= TrimWordsSentence(normalizeString(sentence))
print(trimmed_sentence)
answer_words, _ =self.model(trimmed_sentence,self.train_input_lang,self.train_output_lang)
answer = ' '.join(answer_words)
return answer
def test_run(self,ques):
answer=self.reply(ques)
return answer
def prepare_data(self):
#Loading data
data=pd.read_csv(DATA_PATH ,encoding="latin-1",header=None,names=["Question","Answer"])
data["Question"]=data["Question"].apply(normalizeString)
data["Answer"]=data["Answer"].apply(normalizeString)
#Split into train, test set
train_data,_ = train_test_split(data, test_size=TEST_SIZE,random_state=11)
self.train_input_lang, self.train_output_lang,_ = prepareData(train_data,'questions', 'answers', False)
| true |
52fbd86ff762db7f7336628e4d4fa399c1bd595b | Python | kitakou0313/cracking-the-code-interview | /cracking-the-code-interview/chap8_4.py | UTF-8 | 1,070 | 3.203125 | 3 | [] | no_license | import unittest
def power_set(sets):
N = len(sets)
factors = []
ansSets = set()
for i in range(N):
factors.append(sets.pop())
factors.sort()
for i in range(2 ** N):
tagNum = i
subSets = set()
ind = 0
while tagNum != 0:
if tagNum & 1 == 1:
subSets.add(factors[ind])
tagNum >>= 1
ind += 1
ansSets.add(frozenset(subSets))
return ansSets
class Test(unittest.TestCase):
def test_power_set(self):
s = {'a', 'b', 'c', 'd'}
ps = power_set(s)
self.assertEqual(len(ps), 16)
subsets = [set(), {'a'}, {'b'}, {'c'}, {'d'},
{'a', 'b'}, {'a', 'c'}, {'a', 'd'}, {
'b', 'c'}, {'b', 'd'}, {'c', 'd'},
{'a', 'b', 'c'}, {'a', 'b', 'd'}, {'a', 'c', 'd'}, {'b', 'c', 'd'}, s]
for subset in subsets:
self.assertEqual(subset in ps, True)
#self.assertEqual(ps, set([frozenset(s) for s in subsets]))
if __name__ == "__main__":
unittest.main()
| true |
677e793f5a3290f9a2c829cae758d1514296c030 | Python | ethan4540/Hacker-Ranks | /Hackerranks/Extra_Long_Factorials.py | UTF-8 | 305 | 2.9375 | 3 | [] | no_license | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the extraLongFactorials function below.
def extraLongFactorials(n):
num = 1
while(n!=1):
num *= n
n -= 1
print(num)
if __name__ == '__main__':
n = int(input())
extraLongFactorials(n)
| true |
2a8d76b15d16a5a1529c8369fa9ed4221a34d7a6 | Python | sbrylka/learning-python | /Chapter 3 - introducing lists/bicycles.py | UTF-8 | 224 | 3.625 | 4 | [] | no_license | bicycles = ['trekingowy', 'górski', 'miejski', 'szosowy']
print(bicycles)
print(bicycles[0])
print(bicycles[3])
print(bicycles[-1])
message = "Moim pierwszym rowerem był rower " + bicycles[1].title() + "."
print(message)
| true |
b4d0be3efae909dccdd49d9985f1a20d7d75e8a5 | Python | Zsantapala/justAtesT | /Crossin_CountWords.py | UTF-8 | 447 | 3.109375 | 3 | [] | no_license | #-*-coding:utf-8-*-
#!/usr/bin/python
import re
file='words.txt'
try:
with open(file,'r') as f:
content=f.read()
except FileNotFoundError:
content=''
print ('Can\'t find the %s file' %file)
if content:
content=content.lower()
result=re.findall(r'\b[A-z]+\b',content)
print ('There are %d words in %s ' %(len(result),file))
print ('There are %d words in %s(without repeat word) ' %(len(list(set(result))),file)) | true |
f911d09c290a89b02ae72a44712564834608720a | Python | AnikaLegal/clerk | /app/utils/uploads.py | UTF-8 | 583 | 2.828125 | 3 | [] | no_license | import hashlib
from django.utils.text import slugify
def get_s3_key(model, filename: str):
"""
Get S3 key for the file - use a hash of the file bytes to
ensure that files are unique and that filenames are not easily guessed.
Assumes model has a FileField named 'file' and an attribute UPLOAD_KEY.
"""
file = model.file
file_bytes = file.read()
file.seek(0)
file_hash = hashlib.md5(file_bytes).hexdigest()
new_filename = ".".join([slugify(p) for p in filename.split(".")]).lower()
return f"{model.UPLOAD_KEY}/{file_hash}/{new_filename}"
| true |
e8eaad3c8bf36daed751e841cf3fc2a1c50a353c | Python | junjianglin/leetcode_solution | /reorder_list.py | UTF-8 | 1,526 | 3.390625 | 3 | [] | no_license | # Definition for singly-linked list.
import collections
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @param head, a ListNode
# @return nothing
def reorderList(self, head):
l = 0
node = head
while node != None:
l += 1
node = node.next
if any([l == 0, l == 1, l == 2]):
return head
numOfInsert = l / 2
numOfKeep = l - l/2
cur = head
ls_insert = []
for i in range(numOfKeep):
cur = cur.next
while cur != None:
ls_insert.append(cur)
cur = cur.next
cur = head
for i in range(numOfInsert):
node = ls_insert.pop()
if i == numOfInsert-1:
if l%2 == 0:
node.next = None
cur.next = node
return head
else:
node.next = cur.next
cur.next = node
cur = cur.next.next
cur.next = None
return head
node.next = cur.next
cur.next = node
cur = cur.next.next
def printList(self,head):
cur = head
while cur!= None:
print cur.val
cur = cur.next
a = ListNode(1)
b = ListNode(2)
c = ListNode(3)
d = ListNode(4)
a.next = b
b.next = c
c.next = d
q = Solution()
q.printList(a)
q.reorderList(a)
q.printList(a)
| true |
e270492fcbca450a5609ee6918bc83dcf7f501c7 | Python | vipul2001/Machine-Learninng-and-Deep-Learning | /Tensorflow_neural_network/code.py | UTF-8 | 8,690 | 3.25 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# # Building Linear Regression Model in Tensorflow
# In[2]:
X_train = np.arange(10).reshape((10, 1))
y_train = np.array([1.0, 1.3, 3.1,
2.0, 5.0, 6.3,
6.6, 7.4, 8.0,
9.0])
plt.plot(X_train, y_train, 'o', markersize=10)
plt.xlabel('x')
plt.ylabel('y')
plt.show()
# In[3]:
X_train_norm = (X_train - np.mean(X_train))/np.std(X_train)
ds_train_orig = tf.data.Dataset.from_tensor_slices(
(tf.cast(X_train_norm, tf.float32),
tf.cast(y_train, tf.float32)))
# In[4]:
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.w = tf.Variable(0.0, name='weight')
self.b = tf.Variable(0.0, name='bias')
def call(self, x):
return self.w*x + self.b
model = MyModel()
model.build(input_shape=(None, 1))
model.summary()
# In[5]:
def loss_fn(y_true, y_pred):
return tf.reduce_mean(tf.square(y_true - y_pred))
## testing the function:
yt = tf.convert_to_tensor([1.0])
yp = tf.convert_to_tensor([1.5])
loss_fn(yt, yp)
# In[6]:
def train(model, inputs, outputs, learning_rate):
with tf.GradientTape() as tape:
current_loss = loss_fn(model(inputs), outputs)
dW, db = tape.gradient(current_loss, [model.w, model.b])
model.w.assign_sub(learning_rate * dW)
model.b.assign_sub(learning_rate * db)
# In[7]:
tf.random.set_seed(1)
num_epochs = 200
log_steps = 100
learning_rate = 0.001
batch_size = 1
steps_per_epoch = int(np.ceil(len(y_train) / batch_size))
ds_train = ds_train_orig.shuffle(buffer_size=len(y_train))
ds_train = ds_train.repeat(count=None)
ds_train = ds_train.batch(1)
Ws, bs = [], []
for i, batch in enumerate(ds_train):
if i >= steps_per_epoch * num_epochs:
break
Ws.append(model.w.numpy())
bs.append(model.b.numpy())
bx, by = batch
loss_val = loss_fn(model(bx), by)
train(model, bx, by, learning_rate=learning_rate)
if i%log_steps==0:
print('Epoch {:4d} Step {:2d} Loss {:6.4f}'.format(
int(i/steps_per_epoch), i, loss_val))
# In[8]:
print('Final Parameters:', model.w.numpy(), model.b.numpy())
X_test = np.linspace(0, 9, num=100).reshape(-1, 1)
X_test_norm = (X_test - np.mean(X_train)) / np.std(X_train)
y_pred = model(tf.cast(X_test_norm, dtype=tf.float32))
fig = plt.figure(figsize=(13, 5))
ax = fig.add_subplot(1, 2, 1)
plt.plot(X_train_norm, y_train, 'o', markersize=10)
plt.plot(X_test_norm, y_pred, '--', lw=3)
plt.legend(['Training examples', 'Linear Reg.'], fontsize=15)
ax.set_xlabel('x', size=15)
ax.set_ylabel('y', size=15)
ax.tick_params(axis='both', which='major', labelsize=15)
ax = fig.add_subplot(1, 2, 2)
plt.plot(Ws, lw=3)
plt.plot(bs, lw=3)
plt.legend(['Weight w', 'Bias unit b'], fontsize=15)
ax.set_xlabel('Iteration', size=15)
ax.set_ylabel('Value', size=15)
ax.tick_params(axis='both', which='major', labelsize=15)
#plt.savefig('ch13-linreg-1.pdf')
plt.show()
# # Model Training Via .compile() and .fit()
# In[9]:
model = MyModel()
#model.build((None, 1))
model.compile(optimizer='sgd',
loss=loss_fn,
metrics=['mae', 'mse'])
# In[10]:
model.fit(X_train_norm, y_train,
epochs=num_epochs, batch_size=batch_size,
verbose=1)
# In[11]:
print(model.w.numpy(), model.b.numpy())
X_test = np.linspace(0, 9, num=100).reshape(-1, 1)
X_test_norm = (X_test - np.mean(X_train)) / np.std(X_train)
y_pred = model(tf.cast(X_test_norm, dtype=tf.float32))
fig = plt.figure(figsize=(13, 5))
ax = fig.add_subplot(1, 2, 1)
plt.plot(X_train_norm, y_train, 'o', markersize=10)
plt.plot(X_test_norm, y_pred, '--', lw=3)
plt.legend(['Training Samples', 'Linear Regression'], fontsize=15)
ax = fig.add_subplot(1, 2, 2)
plt.plot(Ws, lw=3)
plt.plot(bs, lw=3)
plt.legend(['W', 'bias'], fontsize=15)
plt.show()
# # Building Multi Layer Preception for Iris Dataset
# In[12]:
import tensorflow_datasets as tfds
iris, iris_info = tfds.load('iris', with_info=True)
print(iris_info)
# In[13]:
tf.random.set_seed(1)
ds_orig = iris['train']
ds_orig = ds_orig.shuffle(150, reshuffle_each_iteration=False)
print(next(iter(ds_orig)))
ds_train_orig = ds_orig.take(100)
ds_test = ds_orig.skip(100)
# In[14]:
ds_train_orig = ds_train_orig.map(
lambda x: (x['features'], x['label']))
ds_test = ds_test.map(
lambda x: (x['features'], x['label']))
next(iter(ds_train_orig))
# In[15]:
iris_model = tf.keras.Sequential([
tf.keras.layers.Dense(16, activation='sigmoid',
name='fc1', input_shape=(4,)),
tf.keras.layers.Dense(3, name='fc2', activation='softmax')])
iris_model.summary()
# In[16]:
iris_model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# In[17]:
training_size = 100
batch_size = 2
steps_per_epoch = np.ceil(training_size / batch_size)
ds_train = ds_train_orig.shuffle(buffer_size=training_size)
ds_train = ds_train.repeat()
ds_train = ds_train.batch(batch_size=batch_size)
ds_train = ds_train.prefetch(buffer_size=1000)
history = iris_model.fit(ds_train, epochs=num_epochs,
steps_per_epoch=steps_per_epoch,
verbose=0)
# In[18]:
hist = history.history
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(1, 2, 1)
ax.plot(hist['loss'], lw=3)
ax.set_title('Training loss', size=15)
ax.set_xlabel('Epoch', size=15)
ax.tick_params(axis='both', which='major', labelsize=15)
ax = fig.add_subplot(1, 2, 2)
ax.plot(hist['accuracy'], lw=3)
ax.set_title('Training accuracy', size=15)
ax.set_xlabel('Epoch', size=15)
ax.tick_params(axis='both', which='major', labelsize=15)
plt.tight_layout()
#plt.savefig('ch13-cls-learning-curve.pdf')
plt.show()
# In[19]:
results = iris_model.evaluate(ds_test.batch(50), verbose=0)
print('Test loss: {:.4f} Test Acc.: {:.4f}'.format(*results))
# In[20]:
iris_model.save('iris-classifier.h5',
overwrite=True,
include_optimizer=True,
save_format='h5')
# In[21]:
iris_model_new = tf.keras.models.load_model('iris-classifier.h5')
iris_model_new.summary()
# In[22]:
results = iris_model_new.evaluate(ds_test.batch(50), verbose=0)
print('Test loss: {:.4f} Test Acc.: {:.4f}'.format(*results))
# In[23]:
labels_train = []
for i,item in enumerate(ds_train_orig):
labels_train.append(item[1].numpy())
labels_test = []
for i,item in enumerate(ds_test):
labels_test.append(item[1].numpy())
print('Training Set: ',len(labels_train), 'Test Set: ', len(labels_test))
# # logistic Activation Function
# In[24]:
import numpy as np
X = np.array([1, 1.4, 2.5]) ## first value must be 1
w = np.array([0.4, 0.3, 0.5])
def net_input(X, w):
return np.dot(X, w)
def logistic(z):
return 1.0 / (1.0 + np.exp(-z))
def logistic_activation(X, w):
z = net_input(X, w)
return logistic(z)
print('P(y=1|x) = %.3f' % logistic_activation(X, w))
# In[25]:
W = np.array([[1.1, 1.2, 0.8, 0.4],
[0.2, 0.4, 1.0, 0.2],
[0.6, 1.5, 1.2, 0.7]])
# A : data array with shape = (n_hidden_units + 1, n_samples)
# note that the first column of this array must be 1
A = np.array([[1, 0.1, 0.4, 0.6]])
Z = np.dot(W, A[0])
y_probas = logistic(Z)
print('Net Input: \n', Z)
print('Output Units:\n', y_probas)
# # Class Probability via Softmax Function
# In[26]:
def softmax(z):
return np.exp(z) / np.sum(np.exp(z))
y_probas = softmax(Z)
print('Probabilities:\n', y_probas)
np.sum(y_probas)
# In[27]:
import tensorflow as tf
Z_tensor = tf.expand_dims(Z, axis=0)
tf.keras.activations.softmax(Z_tensor)
# # Using Hyperbolic tanh function
# In[29]:
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
def tanh(z):
e_p = np.exp(z)
e_m = np.exp(-z)
return (e_p - e_m) / (e_p + e_m)
z = np.arange(-5, 5, 0.005)
log_act = logistic(z)
tanh_act = tanh(z)
plt.ylim([-1.5, 1.5])
plt.xlabel('Net input $z$')
plt.ylabel('Activation $\phi(z)$')
plt.axhline(1, color='black', linestyle=':')
plt.axhline(0.5, color='black', linestyle=':')
plt.axhline(0, color='black', linestyle=':')
plt.axhline(-0.5, color='black', linestyle=':')
plt.axhline(-1, color='black', linestyle=':')
plt.plot(z, tanh_act,
linewidth=3, linestyle='--',
label='Tanh')
plt.plot(z, log_act,
linewidth=3,
label='Logistic')
plt.legend(loc='lower right')
plt.tight_layout()
plt.show()
# In[ ]:
| true |
aedc83c39bdf9c593da4aacf1bd8cef60942e630 | Python | apanariello4/lab-sac | /1-02_03_20/Exercises_Dicts.py | UTF-8 | 380 | 3.046875 | 3 | [] | no_license | import sys
print(sys.argv)
option = sys.argv[1]
file = "test.txt"#sys.argv[2]
def count(file):
f = open(file, "rt")
count = 0
for line in f:
line.split("")
count += len(line)
return count
def topcount(file):
topcount = count(file)
return topcount[:21]
if option == "--count":
print(count(file))
else:
print(topcount(file))
| true |
07fab36f697c81030a14abe87410ae1a802ed131 | Python | realjul/cipher_project | /cipher_project/keyword_cipher.py | UTF-8 | 1,158 | 3.578125 | 4 | [] | no_license | from cipher import Cipher
class Keyword(Cipher):
""" Keyword creates a cipher interchaging the letter of the word as it
corresponds to the kryptos alphabet.
"""
Plaintext = list('abcdefghijklmnopqrstuvwxyz')
Encrypted = list('kryptosabcdefghijlmnquvwxz')
keyword_encrypted = [(x,y) for x,y in zip(Plaintext,Encrypted)]
def __init__(self, message):
super().__init__(message)
self.new_message = list(message.replace(" ","").lower())
self.decrypted_message = ''
self.encrypted = ''
def encrypt(self):
secrete_word = []
for i in self.new_message:
for x in Keyword.keyword_encrypted:
if i == x[0]:
secrete_word.append(x[1])
self.encrypted = (''.join(secrete_word))
return(self.encrypted)
def decrypted(self, to_be_decrypted):
secrete_word = []
for i in to_be_decrypted:
for x in Keyword.keyword_encrypted:
if i == x[1]:
secrete_word.append(x[0])
self.decrypted_message = (''.join(secrete_word))
return(self.decrypted_message)
| true |
8a514c55ea9ea40739f079b8689d402dba63e535 | Python | alextag/Redstone-Simulator | /world.py | UTF-8 | 6,533 | 3.375 | 3 | [] | no_license | from tile import *
import wx
#Map Size
MAP_SIZE = 7
class world():
def __init__(self,filename):
self.torches = []
if filename == "NEW":
self.create_world()
else:
self.load_world(filename)
def create_world(self):
'''(world) -> NoneType
Create a 2-D list by creating a list that has lists in it.'''
#Create a simple empty list
self.map = []
i = 0
while i<MAP_SIZE:
k = 0
#Add a new element in the list, this element is also an empty list
self.map.append([])
while k<MAP_SIZE:
#Add a new element in the list that you just created.
#so our list will look somewhat like this
#self.map = [[...],[...],...,[...],[...]]
self.map[i].append(tile())
k+=1
i+=1
def load_world(self, filename):
'''(world, string) -> NoneType
Load a saved world'''
#Files must be named *.map
#MAP_SIZE X MAP_SIZE matrix with letters {A/B/R/P/T}
#After a "P" for repeater you must enter the direction it is facing
# "a" --> left, "s" --> down, "d" --> right, "w" --> up
mapfile = open(filename + ".map", "r")
assert mapfile.readline() == "MAPSTART\n"
currline = mapfile.readline()
self.map = []
i = 0
while currline != "MAPFINISH\n":
self.map.append([])
times = 1
l = 0
rep = False
tor = False
for o in currline:
if tor:
if o in ['a','w','s','d','n']:
self.map[i][l-1].onbox = o
self.torches.append((i,l-times))
times +=1
tor = False
if rep:
x=i
y=l-times
times +=1
to = {'a':(x,y-1),'w':(x-1,y),'d':(x,y+1),'s':(x+1,y)}
rev = {'a':'d','d':'a','w':'s','s':'w'}
self.map[i].append(repeater(to[o],to[rev[o]]))
rep = False
elif o == "A":
self.map[i].append(tile())
elif o == "B":
self.map[i].append(block())
elif o == "R":
self.map[i].append(redstone())
elif o == "T":
self.map[i].append(torch())
tor = True
elif o == "P":
rep = True
l+=1
i+=1
currline = mapfile.readline()
def __str__(self):
'''(world) -> string'''
#This function is called whenever someone tries to print an object of type world
i = 0
while i<MAP_SIZE:
k = 0
while k<MAP_SIZE:
#print(self.map[i][k],end='')
k+=1
print()
i+=1
return ''
def _clear(self):
'''(world) -> NoneType
Fills the map with air blocks'''
i = 0
while i<MAP_SIZE:
k = 0
while k<MAP_SIZE:
self.map[i][k] = tile()
self.torches = []
k+=1
i+=1
def depower(self):
'''(world) -> NoneType
Since "resolve" change the type of the tiles by adding a "*" to show that they are powered, we have to remove the star and shut the power down'''
i = 0
while i<MAP_SIZE:
k = 0
while k<MAP_SIZE:
self.map[i][k].depower()
k+=1
i+=1
def change(self,x,y,to):
'''(world,int,int,string) -> NoneType'''
if self.map[x][y].type == "T":
if (x,y) in self.torches:
self.torches.remove((x,y))
if to=="A":
self.map[x][y] = tile()
elif to=="B":
self.map[x][y] = block()
elif to=="T":
self.map[x][y] = torch()
self.torches.append((x,y))
way = ["w","a","s","d",""]
direction = ' '
while not direction in way:
box=wx.TextEntryDialog(None,str(way),"On Block?","")
if box.ShowModal()==wx.ID_OK:
direction=box.GetValue()
if direction == "w":
temp = (x-1,y)
elif direction == "a":
temp = (x,y-1)
elif direction == "s":
temp = (x+1,y)
elif direction == "d":
temp = (x,y+1)
else:
return
if self.map[temp[0]][temp[1]].type == "B":
self.map[x][y].onbox = direction
self.map[x][y].box = self.map[temp[0]][temp[1]]
elif to=="R":
self.map[x][y] = redstone()
elif to=="P":
direction = ""
way = ["w","a","s","d"]
if ((x==MAP_SIZE-1 and y==MAP_SIZE-1) or (x==0 and y==0) or (x==0 and y==MAP_SIZE-1) or (x==MAP_SIZE-1 and y==0)):
print ("Can't place repeater there")
return
if (y==MAP_SIZE-1 or y==0):
way = ["w","s"]
elif (x==0 or x==MAP_SIZE-1):
way = ["a","d"]
while not direction in way:
box=wx.TextEntryDialog(None,str(way),"Facing?",way[0])
if box.ShowModal()==wx.ID_OK:
direction=box.GetValue()
if direction == "w":
temp = (x-1,y)
temp2 = (x+1,y)
elif direction == "a":
temp = (x,y-1)
temp2 = (x,y+1)
elif direction == "s":
temp = (x+1,y)
temp2 = (x-1,y)
elif direction == "d":
temp = (x,y+1)
temp2 = (x,y-1)
self.map[x][y] = repeater(temp,temp2)
def show(self):
i = 0
while i<MAP_SIZE:
k = 0
while k<MAP_SIZE:
temp = str(self.map[i][k])
if self.map[i][k].pwr:
temp += "*"
#print(temp,end='')
k+=1
print()
i+=1
print()
print()
return
if __name__=="__main__":
w = world("NEW")
print(w)
w.change(1,1,"B")
print(w)
| true |
491cf522cbc80f81f4dfd5427025568f588326a3 | Python | mihaivalentistoica/shoping-paradise-sda | /shopping_paradise/products/form.py | UTF-8 | 2,372 | 2.640625 | 3 | [] | no_license | import datetime
from django import forms
class CouponForm(forms.Form):
name = forms.CharField(label='Name', max_length=20)
creator = forms.CharField(label="Creator", max_length=50)
use_count = forms.IntegerField(label='Use count', min_value=1, max_value=100)
percent_amount = forms.IntegerField(min_value=1, max_value=100)
expire_date = forms.DateField(label="Expire date", widget=forms.SelectDateWidget(attrs={'class': 'dateInput'}))
def clean(self):
super(CouponForm, self).clean()
name = self.cleaned_data.get('name')
creator = self.cleaned_data.get('creator')
use_count = self.cleaned_data.get('use_count')
percent_amount = self.cleaned_data.get('percent_amount')
expire_date = self.cleaned_data.get('expire_date')
if not name:
self._errors['name'] = self.error_class(
["The field name is required"])
if len(name) > 20:
self._errors['name'] = self.error_class(
["The field name must contain maximum of 20 characters"])
if not creator:
self._errors['creator'] = self.error_class(
["The field creator is required"])
if len(creator) > 50:
self._errors['creator'] = self.error_class(
["The field creator must contain maximum of 50 characters"])
if not use_count:
self._errors['use_count'] = self.error_class(
["The field Use count is required"])
if use_count < 1 or use_count > 100:
self._errors['use_count'] = self.error_class(
["Introduce a value between 1 and 100"])
if not percent_amount:
self._errors['percent_amount'] = self.error_class(
["The field Percent amount is required"])
if percent_amount < 1 or percent_amount > 100:
self._errors['percent_amount'] = self.error_class(
["Introduce a value between 1 and 100"])
if not expire_date:
self._errors['expire_date'] = self.error_class(
["The field Expire date is required"])
# if datetime.datetime.now().time() > datetime.datetime(expire_date).time():
# self._errors['expire_date'] = self.error_class(
# ["Can't set expire date in past"])
return self.cleaned_data
| true |
ac8abbcf5db6a38cdce3f57b32ca3576dbc71de2 | Python | PPSantos/Image-Colorization-with-Deep-Learning | /src/UNET/models/UNet.py | UTF-8 | 4,395 | 2.609375 | 3 | [
"MIT"
] | permissive | import tensorflow as tf
class UNet:
def __init__(self, seed, is_training=True):
"""
Architecture:
Encoder:
[?, 32, 32, input_ch] => [?, 32, 32, 64]
[?, 32, 32, 64] => [?, 16, 16, 128]
[?, 16, 16, 128] => [?, 8, 8, 256]
[?, 8, 8, 256] => [?, 4, 4, 512]
[?, 4, 4, 512] => [?, 2, 2, 512]
Decoder:
[?, 2, 2, 512] => [?, 4, 4, 512]
[?, 4, 4, 512] => [?, 8, 8, 256]
[?, 8, 8, 256] => [?, 16, 16, 128]
[?, 16, 16, 128] => [?, 32, 32, 64]
[?, 32, 32, 64] => [?, 32, 32, out_ch]
"""
self.name = 'UNet'
self.seed = seed
self.initializer = tf.glorot_uniform_initializer(self.seed)
self.is_training = is_training
self.kernel_size = 4
# (num_filters, strides, dropout)
self.kernels_encoder = [
(128, 2, 0),
(256, 2, 0),
(512, 2, 0),
(512, 2, 0),
]
# (num_filters, strides, dropout)
self.kernels_decoder = [
(512, 2, 0.5),
(256, 2, 0.5),
(128, 2, 0),
(64, 2, 0),
]
def forward(self, X, reuse_vars=None):
with tf.variable_scope(self.name, reuse=reuse_vars):
layers = []
output = tf.layers.Conv2D(
name='enc_conv_1',
filters=64,
strides=1,
kernel_size=self.kernel_size,
padding='same',
kernel_initializer=self.initializer)(X)
output = tf.layers.BatchNormalization(name='enc_bn_1')(output)
output = tf.nn.leaky_relu(output, name='enc_leaky_ReLu_1')
layers.append(output)
for i, kernel in enumerate(self.kernels_encoder):
output = tf.layers.Conv2D(
name='enc_conv_'+str(i+2),
filters=kernel[0],
strides=kernel[1],
kernel_size=self.kernel_size,
padding='same',
kernel_initializer=self.initializer)(output)
output = tf.layers.BatchNormalization(name='enc_bn_'+str(i+2))(output)
output = tf.nn.leaky_relu(output, name='enc_leaky_ReLu'+str(i+2))
layers.append(output)
if kernel[2] != 0:
output = tf.keras.layers.Dropout(
name='enc_dropout_' + str(i),
rate=kernel[2],
seed=self.seed)(output, training=self.is_training)
for j, kernel in enumerate(self.kernels_decoder):
output = tf.layers.Conv2DTranspose(
name='dec_conv_t_'+str(j+1),
filters=kernel[0],
strides=kernel[1],
kernel_size=self.kernel_size,
padding='same',
kernel_initializer=self.initializer)(output)
output = tf.layers.BatchNormalization(name='dec_bn_' + str(i+3+j))(output)
output = tf.nn.relu(output, name='dec_ReLu_'+str(j+1))
if kernel[2] != 0:
output = tf.layers.Dropout(
name='dec_dropout_' + str(j),
rate=kernel[2],
seed=self.seed)(output, training=self.is_training)
output = tf.concat([layers[len(layers) - j - 2], output], axis=3)
output = tf.layers.Conv2D(
name='dec_conv_' + str(i+3),
filters=2,
strides=1,
kernel_size=1,
padding='same',
activation=tf.nn.tanh,
kernel_initializer=self.initializer)(output)
return output
| true |
0cd25a6ee122672cf8a0cecd02bbc172458a653f | Python | zhualice/AV-study | /viapoint_editor/data/parameters/accurate_lpf.py | UTF-8 | 1,003 | 2.625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from parameters import *
from scipy.fftpack import rfft, irfft, rfftfreq
import numpy as np
import matplotlib.pylab as plt
LPF_TYPE = 1001
DEFAULT_LPF_FILTER_FREQUENCY_HZ = 20
class AccurateLPF(IParameter):
def __init__(self):
#Initializes class as a parameter
IParameter.__init__(self, "Ideal LPF", LPF_TYPE)
self.setVariable("FREQUENCY", DEFAULT_LPF_FILTER_FREQUENCY_HZ)
def applyParameter(self, data):
rate = self.getHiddenVariable('AUDIO_FRAME_RATE')
print rate, self.getVariable("FREQUENCY")
#low pass filter
fftdata = rfft(data.clip(0))
frequencies = rfftfreq(fftdata.size, (1.0/rate))
index = np.where(frequencies >= self.getVariable("FREQUENCY"))[0][0]
fftdata[index+1:] = 0
##removes negative values
filtered_data = irfft(fftdata)
return filtered_data
def loadParameter():
lpf = AccurateLPF()
return lpf
| true |
979cc8ada3d6e2ef23a6f0aff6848b3c9f0ac2ea | Python | 100ballovby/CRUD_sqlite_tkinter | /app.py | UTF-8 | 4,392 | 3.078125 | 3 | [] | no_license | from tkinter import *
from tkinter import messagebox
from db import Database
# creating DataBase
db = Database('store.db')
# creating app main window
app = Tk()
# and its configuration
app.geometry('750x450')
app.title('Store Manager')
########## писать начинаем здесь ##########
def populate_list():
parts_list.delete(0, END)
for row in db.read():
parts_list.insert(END, row)
# очистить поле вывода заказов
# прочитать содержимое БД
# заполнить область вывода заказов данными из БД
def add_item():
if part_text.get() == '' or customer_text.get() == '' or retailer_text.get() == '' or price_text.get() == '':
messagebox.showwarning('Required Fields', 'Please fill all fields!')
return
db.create(part_text.get(), customer_text.get(),
retailer_text.get(), price_text.get())
# ^ получили данные
parts_list.delete(0, END) # очищаем список продуктов
parts_list.insert(END, (part_text.get(), customer_text.get(),
retailer_text.get(), price_text.get()))
clear_text()
populate_list()
def clear_text():
part_entry.delete(0, END)
customer_entry.delete(0, END)
retailer_entry.delete(0, END)
price_entry.delete(0, END)
def select_item(event):
try:
global selected_item
index = parts_list.curselection()[0] # я хочу выбрать первую запись в списке
selected_item = parts_list.get(index)
# ^ сохраняю список с данными о продукте
# затем очищаю все поля для ввода и заполняю их информацией о выбранном продукте
part_entry.delete(0, END) # очистить поле
part_entry.insert(END, selected_item[1]) # вставить название продукта
customer_entry.delete(0, END)
customer_entry.insert(END, selected_item[2])
retailer_entry.delete(0, END)
retailer_entry.insert(END, selected_item[3])
price_entry.delete(0, END)
price_entry.insert(END, selected_item[4])
except IndexError:
pass
def update_item():
db.update(selected_item[0], part_text.get(), customer_text.get(),
retailer_text.get(), price_text.get())
populate_list()
def remove_item():
db.delete(selected_item[0])
clear_text()
populate_list()
# Part
part_text = StringVar()
part_label = Label(app, text='Part Name', font=('bold', 16), pady=20)
part_entry = Entry(app, textvariable=part_text)
part_label.grid(row=0, column=0)
part_entry.grid(row=0, column=1)
# Customer
customer_text = StringVar()
customer_label = Label(app, text='Customer name', font=('bold', 16), pady=20)
customer_entry = Entry(app, textvariable=customer_text)
customer_label.grid(row=0, column=2)
customer_entry.grid(row=0, column=3)
# Retailer
retailer_text = StringVar()
retailer_label = Label(app, text='Retailer name', font=('bold', 16), pady=20)
retailer_entry = Entry(app, textvariable=retailer_text)
retailer_label.grid(row=1, column=0)
retailer_entry.grid(row=1, column=1)
# Price
price_text = DoubleVar()
price_label = Label(app, text='Price', font=('bold', 16), pady=20)
price_entry = Entry(app, textvariable=price_text)
price_label.grid(row=1, column=2)
price_entry.grid(row=1, column=3)
# Parts list (ListBox)
parts_list = Listbox(app, height=10, width=50, border=1)
parts_list.grid(row=3, column=0, columnspan=4, rowspan=6, pady=20, padx=20)
# Scrollbar
scrollbar = Scrollbar(app)
scrollbar.grid(row=3, column=4)
# setting up Scrollbar
parts_list.configure(yscrollcommand=scrollbar.set)
scrollbar.configure(command=parts_list.yview)
# забиндим выбор
parts_list.bind('<<ListboxSelect>>', select_item)
# Buttons
add_btn = Button(app, text='Add item', width=12, command=add_item)
remove_btn = Button(app, text='Remove item', width=12, command=remove_item)
update_btn = Button(app, text='Update item', width=12, command=update_item)
clear_btn = Button(app, text='Clear fields', width=12, command=clear_text)
add_btn.grid(row=2, column=0, pady=20)
remove_btn.grid(row=2, column=1, pady=20)
update_btn.grid(row=2, column=2, pady=20)
clear_btn.grid(row=2, column=3, pady=20) | true |
7e84b32094e23b86cd525f86f9d695286a0bb884 | Python | 981377660LMT/algorithm-study | /11_动态规划/dp分类/线性dp/E. Sending a Sequence Over the Network.py | UTF-8 | 800 | 3.5625 | 4 | [] | no_license | # https://zhuanlan.zhihu.com/p/572692304
# 给定一个数组,将该数组分成连续的若干部分。
# !每一部分的长度大小是该部分最左端或者最右端的大小-1,
# 求能否将这个数组完全分成合法的若干部分。
# n<=1e5
# dp[i]表示前i个数能否分成合法的若干部分
# 每次传入一个a[i],a[i]可能是某一个序列的最左端也可能是最右端
from typing import List
def split(nums: List[int]) -> bool:
n = len(nums)
dp = [False] * (n + 1)
dp[0] = True
for i in range(1, n + 1):
cur = nums[i - 1]
if i + cur <= n:
dp[i + cur] |= dp[i - 1]
if i - cur - 1 >= 0:
dp[i] |= dp[i - cur - 1]
return dp[-1]
assert split([1, 1, 3, 4, 1, 3, 2, 2, 3])
| true |
1940e9e83f010333bc7123a9f39033d39602dcab | Python | Gelbpunkt/aioscheduler | /aioscheduler/scheduler.py | UTF-8 | 8,404 | 2.609375 | 3 | [
"MIT"
] | permissive | """
MIT License
Copyright (c) 2020 Jens Reidel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import annotations
import asyncio
import heapq
import warnings
from datetime import datetime
from functools import partial
from typing import Any, Awaitable, List, Optional, Set, Tuple
from uuid import UUID, uuid4
from .task import Task
class TimedScheduler:
"""
A clever scheduler for scheduling coroutine execution
at a specific datetime within a single task
"""
def __init__(
self, max_tasks: Optional[int] = None, prefer_utc: bool = True
) -> None:
# A list of all tasks
self._tasks: List[Task] = []
# The internal loop task
self._task: Optional[asyncio.Task[None]] = None
self._task_count = 0
# All running tasks
self._running: List[Tuple[Task, asyncio.Task[Any]]] = []
# The next task to run, (datetime, coro)
self._next: Optional[Task] = None
# Event fired when a initial task is added
self._added = asyncio.Event()
# Event fired when the loop needs to reset
self._restart = asyncio.Event()
# Maximum tasks to schedule
self._max_tasks = max_tasks
if prefer_utc:
self._datetime_func = datetime.utcnow
else:
self._datetime_func = datetime.now
@property
def is_started(self) -> bool:
return self._task is not None and not self._task.done()
def start(self) -> None:
self._task = asyncio.create_task(self.loop())
async def loop(self) -> None:
while True:
if self._next is None:
# Wait for a task
await self._added.wait()
next_ = self._next
assert next_ is not None and isinstance(
next_.priority, datetime
) # mypy fix
# Sleep until task will be executed
done, _ = await asyncio.wait(
[
asyncio.sleep(
(next_.priority - self._datetime_func()).total_seconds()
),
self._restart.wait(),
],
return_when=asyncio.FIRST_COMPLETED,
)
fut = done.pop()
if fut.result() is True: # restart event
continue
# Run it
task = asyncio.create_task(next_.callback)
self._running.append((next_, task))
task.add_done_callback(partial(self._callback, task_obj=next_))
# Get the next task sorted by time
try:
self._next = heapq.heappop(self._tasks)
self._task_count -= 1
except IndexError:
self._next = None
self._task_count = 0
def _callback(self, task: asyncio.Task[Any], task_obj: Task) -> None:
for idx, (running_task, _) in enumerate(self._running):
if running_task.uuid == task_obj.uuid:
del self._running[idx]
def cancel(self, task: Task) -> bool:
# asyncio does not like cancelling coroutines
# so just suppress it
with warnings.catch_warnings():
if self._next is not None and task.uuid == self._next.uuid:
if self._tasks:
self._next = heapq.heappop(self._tasks)
else:
self._next = None
self._task_count -= 1
self._restart.set()
self._restart.clear()
return True
for idx, (running_task, asyncio_task) in enumerate(self._running):
if running_task.uuid == task.uuid:
del self._running[idx]
asyncio_task.cancel()
return True
for idx, scheduled_task in enumerate(self._tasks):
if scheduled_task.uuid == task.uuid:
del self._tasks[idx]
self._task_count -= 1
heapq.heapify(self._tasks)
return True
return False
def schedule(self, coro: Awaitable[Any], when: datetime) -> Task:
if self._max_tasks is not None and self._task_count >= self._max_tasks:
raise ValueError(f"Maximum tasks of {self._max_tasks} reached")
if when < self._datetime_func():
raise ValueError("May only be in the future.")
self._task_count += 1
task = Task(priority=when, uuid=uuid4(), callback=coro)
if self._next:
assert isinstance(self._next.priority, datetime) # mypy fix
if when < self._next.priority:
heapq.heappush(self._tasks, self._next)
self._next = task
self._restart.set()
self._restart.clear()
else:
heapq.heappush(self._tasks, task)
else:
self._next = task
self._added.set()
self._added.clear()
return task
class QueuedScheduler:
"""
A dumb scheduler for scheduling coroutine execution
in a queue of infinite length
"""
def __init__(self, max_tasks: Optional[int] = None) -> None:
# A list of all tasks, elements are (coro, datetime)
self._tasks: asyncio.Queue[Task] = asyncio.Queue()
# The internal loop task
self._task: Optional[asyncio.Task[None]] = None
self._task_count = 0
# current running task
self._current_uuid: Optional[UUID] = None
self._current_task: Optional[asyncio.Task[Any]] = None
# cancelled UUIDs
self._cancelled: Set[UUID] = set()
# Maximum tasks to schedule
self._max_tasks = max_tasks
@property
def is_started(self) -> bool:
return self._task is not None and not self._task.done()
def start(self) -> None:
self._task = asyncio.create_task(self.loop())
async def loop(self) -> None:
while True:
task = await self._tasks.get()
if task.uuid in self._cancelled:
continue
# Run it in the current task
# else this scheduler would be pointless
self._current_task = asyncio.create_task(task.callback)
self._current_uuid = task.uuid
try:
await self._current_task
except asyncio.CancelledError:
self._task_count -= 1
continue
self._task_count -= 1
def cancel(self, task: Task) -> bool:
if task.uuid == self._current_uuid and self._current_task:
self._current_task.cancel()
else:
self._cancelled.add(task.uuid)
return True
def schedule(self, coro: Awaitable[Any]) -> Task:
if self._max_tasks is not None and self._task_count >= self._max_tasks:
raise ValueError(f"Maximum tasks of {self._max_tasks} reached")
task = Task(priority=0, uuid=uuid4(), callback=coro)
self._task_count += 1
self._tasks.put_nowait(task)
return task
class LifoQueuedScheduler(QueuedScheduler):
"""
A dumb scheduler like QueuedScheduler,
but uses a Last-in-first-out queue
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self._tasks: asyncio.LifoQueue[Task] = asyncio.LifoQueue()
| true |
4af14eed791f98a188a0f012612cc555597e25ed | Python | lingxueli/CodingBook | /Python/file_path.py | UTF-8 | 2,376 | 3.015625 | 3 | [] | no_license | import os
print('getcwd: ', os.getcwd())
print('__file__', __file__)
# getcwd: C:\Users\lingx\OneDrive\Documents\CodingBook\Python
# __file__ C:/Users/lingx/OneDrive/Documents/CodingBook/Python/file_path.py
print('basename: ', os.path.basename(__file__))
print('dirname: ', os.path.dirname(__file__))
# basename: file_path.py
# dirname: C:/Users/lingx/OneDrive/Documents/CodingBook/Python
print('abspath: ', os.path.abspath(__file__))
print('abs dirname: ', os.path.dirname(os.path.abspath(__file__)))
# abspath: C:\Users\lingx\OneDrive\Documents\CodingBook\Python\file_path.py
# abs dirname: C:\Users\lingx\OneDrive\Documents\CodingBook\Python
print('[set target path 1]')
target_path_1 = os.path.join(os.path.dirname(__file__), 'target_1.txt')
print('target_path_1: ', target_path_1)
print('read target file:')
with open(target_path_1) as f:
print(f.read())
# [set target path 1]
# target_path_1: C:/Users/lingx/OneDrive/Documents/CodingBook/Python\target_1.txt
# read target file:
# test
# The upper directory is represented by ../
print('[set target path 2]')
target_path_2 = os.path.join(os.path.dirname(__file__), '../dst/target_2.txt')
print('target_path_2: ', target_path_2)
print('normalize : ', os.path.normpath(target_path_2))
print('read target file:')
with open(target_path_2) as f:
print(f.read())
# target_path_2: C:/Users/lingx/OneDrive/Documents/CodingBook/Python\../dst/target_2.txt
# normalize : C:\Users\lingx\OneDrive\Documents\CodingBook\dst\target_2.txt
# read target file:
# test2
print('[change directory]')
os.chdir(os.path.dirname(os.path.abspath(__file__)))
print('getcwd: ', os.getcwd())
# [change directory]
# getcwd: C:\Users\lingx\OneDrive\Documents\CodingBook\Python
print('[set target path 1 (after chdir)]')
target_path_1 = 'target_1.txt'
print('target_path_1: ', target_path_1)
print('read target file:')
with open(target_path_1) as f:
print(f.read())
print()
print('[set target path 2 (after chdir)]')
target_path_2 = '../dst/target_2.txt'
print('target_path_2: ', target_path_2)
print('read target file:')
with open(target_path_2) as f:
print(f.read())
##[set target path 1 (after chdir)]
##target_path_1: target_1.txt
##read target file:
##test
##
##[set target path 2 (after chdir)]
##target_path_2: ../dst/target_2.txt
##read target file:
##test2
| true |
bdc075b8af8d8b5d9264662e4256ad46fdd0492a | Python | wabyking/830 | /data_split.py | UTF-8 | 3,965 | 2.765625 | 3 | [] | no_license | import os
import pandas as pd
import numpy as np
import datetime
data="netflix_six_month"
netflix_month={"start":"2005-06-00",
"split":"2005-12-00",
"end" :"2005-13-00"
}
netflix_year={"start":"2004-06-00",
"split":"2005-06-00",
"end" :"2005-07-00"
}
netflix_full={"start":"1999-12-00",
"split":"2005-12-00",
"end" :"2005-13-00"
}
movieslen100k={"start":"1000-12-00",
"split":"1998-03-08",
"end" :"3005-13-00"
}
date_dict={"netflix_six_month":netflix_month,"netflix_year":netflix_year,"netflix_full":netflix_full,"movieslen100k":movieslen100k}
def split_data(data):
splited_date = date_dict[data]
if not data.startswith("netflix"):
filename="data/"+data+"/ratings.csv"
else:
filename="data/netflix/ratings.csv"
df=pd.read_csv(filename,names=["uid","itemid","rating","timestamp"], sep="\t")
# df =df[ (df.timestamp > "2005-08-31") & (df.timestamp < "2005-13") ]
if not data.startswith("netflix"):
stamp2date = lambda stamp :datetime.datetime.fromtimestamp(stamp)
df["timestamp"]= df["timestamp"].apply(stamp2date).dt.strftime(date_format="%Y-%m-%d")
# pd.to_datetime(df['c'],format='%Y-%m-%d %H:%M:%S')#
test =df[ (df.timestamp > splited_date["split"]) & (df.timestamp < splited_date["end"]) ]
train =df[ (df.timestamp > splited_date["start"]) & (df.timestamp < splited_date["split"])]
train_user_count=train.groupby("uid").apply(lambda group: len(group[group.rating>4.99])).to_dict()
test_user_count=test.groupby("uid").apply(lambda group: len(group[group.rating>4.99])).to_dict()
#print(len(df[df.rating>3.99]))
if False:
index=np.random.random(len(df))<0.8
train=df[index]
test=df[~index]
else:
train_users = {user for user,cnt in train_user_count.items() if cnt>20}
test_users = {user for user,cnt in test_user_count.items() if cnt>40} & train_users
whole_users=(test_users & train_users)
test1=test[test.uid.isin(whole_users)]
train1=train[train.uid.isin(train_users)]
whole=pd.concat([train1,test1])
whole['u_original'] = whole['uid'].astype('category')
whole['i_original'] = whole['itemid'].astype('category')
whole['uid'] = whole['u_original'].cat.codes
whole['itemid'] = whole['i_original'].cat.codes
whole = whole.drop('u_original', 1)
whole = whole.drop('i_original', 1)
# print (len(users))
# print (len(items))
print (len(whole.uid.unique()))
print (len(whole.itemid.unique()))
# test1 =whole[ (whole.timestamp > "2005-11-31") & (whole.timestamp < "2005-13") ]
# train1 =whole[ (whole.timestamp > "2005-08-31") & (whole.timestamp < "2005-12")]
# train1.to_csv("netflix_dir/train.csv",index=False,header=None,sep="\t")
# test1.to_csv("netflix_dir/test.csv",index=False,header=None,sep="\t")
path_dir="data/"+data
if not os.path.exists(path_dir):
os.makedirs(path_dir)
whole.to_csv(path_dir+"/ratings_subset.csv",index=False,header=None,sep="\t")
def processNetflix():
root="training_set"
with open("ratings.csv","w") as out:
for i in os.listdir(root):
if os.path.isfile(os.path.join(root,i)):
with open(os.path.join(root,i)) as f:
lines=f.readlines()
itemid= (lines[0].strip()[:-1])
print (itemid)
for line in lines[1:]:
line=line.strip()
tokens=line.split(",")
tokens.append(itemid)
out.write(",".join(tokens)+"\n")
df=pd.read_csv("ratings.csv",names=["uid","rating","timestamp","itemid"])
df[["uid","itemid","rating","timestamp"]].to_csv("ratings.csv",index=False,header=None,sep="\t")
if __name__=="__main__":
split_data(data) | true |
ecbf6ef1f9e290cd9e72cb78b5aa6ba523ad42e9 | Python | dborowy/pp1 | /10-SoftwareTesting/zbiory.py | UTF-8 | 206 | 2.546875 | 3 | [] | no_license | from prob4 import Zbiory
zbior1 = set([2,3,4])
zbior2 = set([1,3,5])
ilocz = Zbiory.iloczyn(zbior1,zbior2)
suma = Zbiory.suma(zbior1,zbior2)
roznica = Zbiory.roznica(zbior1,zbior2)
print(ilocz,suma,roznica) | true |
649200660be30951fc2849d835743f4275e0e2ce | Python | nickac597/AutoClickerProject | /venv/autoClick.py | UTF-8 | 787 | 3.125 | 3 | [] | no_license | #Author: Nicholas Catalano
import pyautogui
import random
avgRand = 0
mousePos = pyautogui.position()
i = 1
# loop 160 clicks
while i < 160:
print("click: ", i)
i += 1
# End of range of both randoms added = 60 seconds
# Takes each random pause and adds them together
randPause = random.uniform(0.01, 49.24)
randPause2 = random.uniform(0.01, 10.76)
totalPause = randPause + randPause2
# click at initial position and pause for the totalPause found earlier
pyautogui.doubleClick(x=mousePos.x, y=mousePos.y)
pyautogui.PAUSE = 2
# move the mouse by positive or negative 8 pixels on each axis
# relative to the positon of the mouse
randMove = random.uniform(-8, 8)
randMove2 = random.uniform(-8, 8)
pyautogui.moveRel(randMove, randMove2, )
| true |
13d32c11ec89b2e07855a14677da65d14e31d844 | Python | chanpham97/aamm | /analysis/colorIdentifier.py | UTF-8 | 3,092 | 2.96875 | 3 | [] | no_license | from math import sqrt
import sys
'''
sources: https://www.compuphase.com/cmetric.htm
'''
class ColorBucketer:
def __init__(self):
self.BIAS = 0 # so only very white or very black colors identified as black
self.MAX_DISTANCE = sqrt(255**2 + 255**2 + 255**2)
self.color_bases = {
'black': [0, 0, 0],
'gray': [127, 127, 127],
'white': [255, 255, 255],
'red': [0, 0, 255],
'blue': [255, 0, 0],
'green': [0, 255, 0],
'yellow': [0, 255, 255],
'orange': [0, 127, 255],
'purple': [255, 0, 127],
'pink': [255, 0, 255],
'brown': [25, 50, 100]
}
self.HUE_DIST = 10
self.HUE_SV_MIN = 100
self.HUE_SV_MAX = 255
self.WHITE_S_MAX = 50
self.BLACK_V_MAX = 50
self.hue_bases = {
'red': 0,
'yellow': 30,
'green': 60,
'cyan': 90,
'blue': 120,
'magenta': 150
}
def set_bias(self, val=30):
self.BIAS = val
self.color_bases['white'] = [255 + self.BIAS, 255 + self.BIAS, 255 + self.BIAS]
self.color_bases['black'] = [0 - self.BIAS, 0 - self.BIAS, 0 - self.BIAS]
def reset_bias(self):
self.BIAS = 0
self.color_bases['white'] = [255, 255, 255]
self.color_bases['black'] = [0, 0, 0]
def d(self, c1, c2):
# select weighting based on red presence
b1, g1, r1 = c1
b2, g2, r2 = c2
# return sqrt(((r1-r2)**2) + ((g1-g2)**2) + ((b1-b2)**2))
if r1 >= 128 and r2 >= 128:
return sqrt(3*((r1-r2)**2) + 4*((g1-g2)**2) + 2*((b1-b2)**2))
else:
return sqrt(2*((r1-r2)**2) + 4*((g1-g2)**2) + 3*((b1-b2)**2))
def bucket_color(self, color_in):
self.set_bias()
min_color = ''
min_distance = self.MAX_DISTANCE
for cb in self.color_bases:
color_base = self.color_bases[cb]
if self.d(color_base, color_in) <= min_distance:
min_color = cb
min_distance = self.d(color_base, color_in)
self.reset_bias()
return min_color
def bucket_hue(self, hue_in):
h, s, v = hue_in
if self.HUE_SV_MIN <= s <= self.HUE_SV_MAX and self.HUE_SV_MIN <= v <= self.HUE_SV_MAX:
if (self.hue_bases['red'] - self.HUE_DIST) % 180 <= h or h <= self.hue_bases['red'] + self.HUE_DIST:
return 'red'
for hue in self.hue_bases:
if self.hue_bases[hue] - self.HUE_DIST <= h <= self.hue_bases[hue] + self.HUE_DIST:
return hue
# if s <= self.WHITE_S_MAX:
# return 'white'
# if v <= self.BLACK_V_MAX:
# return 'black'
return None
def main():
b = ColorBucketer()
input_col = [int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3])]
col = b.bucket_color(input_col)
print(col)
print(b.d(input_col, b.color_bases[sys.argv[4]]), b.d(input_col, b.color_bases[col]))
| true |
de2c7bda2d6d85d84d2e2c9961ffce31238580c9 | Python | AaronTho/Python_Notes | /code_exercise_list_comprehension.py | UTF-8 | 169 | 3.234375 | 3 | [] | no_license | def list_comprehension():
numbers = [1, 2, 3, 4, 5, 6]
result = [number + 1 for number in numbers]
print(result)
return(result)
list_comprehension()
| true |
641f1fd316e7f3d2dfacb7336944790dab64ae3f | Python | lbvf12321lbvf/infa_2021_shchigarev | /lab3/gun.py | UTF-8 | 20,067 | 2.75 | 3 | [] | no_license | from random import randrange as rnd, choice
import tkinter as tk
import math
import numpy as np
import time
from collections import defaultdict
root = tk.Tk()
fr = tk.Frame(root)
root.geometry('800x600')
canv = tk.Canvas(root, bg='white')
canv.pack(fill=tk.BOTH, expand=1)
def on_key_press(event):
global vx, vy
if event.keysym in ('a', 'ф', 'A', 'Ф'):
vx = -5
elif event.keysym in ('d', 'в', 'D', 'В'):
vx = 5
elif event.keysym in ('w', 'ц', 'W', 'Ц',):
vy = -5
elif event.keysym in ('s', 'ы', 'S', 'Ы'):
vy = 5
if event.keysym == '1' or '2':
g1.shot_type(event.keysym)
def on_key_release(event):
global vx, vy
if event.keysym in ('a', 'd', 'ф', 'A', 'Ф', 'в', 'D', 'В'):
vx = 0
elif event.keysym in ('w', 's', 'ц', 'W', 'Ц', 'ы', 'S', 'Ы'):
vy = 0
class Board:
def __init__(self, x, y, hx, hy):
self.x = x
self.y = y
self.hx = hx
self.hy = hy
self.k = 1
self.id = canv.create_line(x, y, x + hx, y + hy, width=2)
self.kx = 1
def check(self, obj, typ=''):
"""
Проверка на столкновение между объектом и стеной и смена направления движения шарика при оном
"""
global successful_targets
y = obj.y
x = obj.x
r = obj.r
if isinstance(obj, Balls):
self.k = 3 / 4
if isinstance(obj, Target):
if obj.type == 'carrier':
self.kx = 2
if self.hy == 0:
if (self.y + r) >= y >= (self.y - r) and self.x < x < self.x + self.hx:
obj.y = self.y + (r + 1) * np.sign(obj.vy)
obj.vy = - obj.vy * self.k
obj.vx *= self.k
if self.hx == 0:
if (self.x + r * self.kx) >= x >= (self.x - r * self.kx) and self.y < y < self.y + self.hy:
obj.x = self.x - (r * self.kx + 1) * np.sign(obj.vx)
obj.vx = - obj.vx * self.k
obj.vy *= self.k
if obj.x >= 1000 or obj.y >= 1000 or obj.y <= -100 or obj.y <= -100:
if isinstance(obj, Target) and obj.live > 0:
successful_targets += 1
print('lol')
obj.live = - 1
self.k = 1
self.kx = 1
class Balls:
def __init__(self, x=40, y=450):
""" Конструктор класса ball
Args:
x - начальное положение мяча по горизонтали
y - начальное положение мяча по вертикали
"""
self.x = x
self.y = y
self.r = 10
self.vx = 0
self.vy = 0
self.color = choice(['blue', 'green', 'red', 'brown'])
self.id = canv.create_oval(
self.x - self.r,
self.y - self.r,
self.x + self.r,
self.y + self.r,
fill=self.color
)
self.live = 30
def rot(self, fi):
"""поворот вектора скорости шарика на угол фи"""
self.vx = self.vx * math.cos(fi) + self.vy * math.sin(fi)
self.vy = - self.vx * math.sin(fi) + self.vy * math.cos(fi)
def set_coords(self):
"""
установка координат
"""
canv.coords(
self.id,
self.x - self.r,
self.y - self.r,
self.x + self.r,
self.y + self.r
)
def move(self, boards):
"""Переместить мяч по прошествии единицы времени.
Метод описывает перемещение мяча за один кадр перерисовки. То есть, обновляет значения
self.x и self.y с учетом скоростей self.vx и self.vy, силы гравитации, действующей на мяч,
и стен по краям окна (размер окна 800х600).
"""
self.x += self.vx
self.y -= self.vy
self.vy -= 0.5
self.vy *= 0.99
self.vx *= 0.99
for bord in boards:
bord.check(self, typ='ball')
if self.vx ** 2 + self.vy ** 2 <= 3:
if self.live < 0:
balls.pop(balls.index(self))
canv.delete(self.id)
else:
self.live -= 1
if self.live < 0:
balls.pop(balls.index(self))
canv.delete(self.id)
self.set_coords()
def hit_test(self, obj):
"""Функция проверяет сталкивалкивается ли данный обьект с целью, описываемой в обьекте obj.
Args:
obj: Обьект, с которым проверяется столкновение.
Returns:
Возвращает True в случае столкновения мяча и цели. В противном случае возвращает False.
"""
if abs(obj.x - self.x) <= (self.r + obj.r) and abs(obj.y - self.y) <= (self.r + obj.r) and obj.live >= 1:
return True
else:
return False
class Gun:
def __init__(self):
self.power = 5
self.live = 3
self.inviz_time = 0
self.type = 'ball'
self.on = 0
self.an = 1
self.ou = 1
self.vx = 0
self.vy = 0
self.r = 5
self.x = 20
self.y = 450
self.color = choice(['blue', 'green', 'red', 'brown'])
self.color2 = self.color
self.id = canv.create_line(20, 450, 50, 420, width=7)
self.id2 = canv.create_oval(
self.x - self.r,
self.y - self.r,
self.x + self.r,
self.y + self.r,
fill=self.color
)
def minus_live(self):
"""
уменьшение жизни при попадании
"""
if self.inviz_time <= 0:
self.live -= 1
self.color = 'white'
self.inviz_time = 100
def shot_type(self, x):
"""
смена типа снаряда
"""
if x == '1':
self.type = 'ball'
print(1)
if x == '2':
self.type = 'shotgun'
print(2)
def fire2_start(self, event):
self.on = 1
def fire2_end(self, event):
"""Выстрел мячом.
Происходит при отпускании кнопки мыши.
Начальные значения компонент скорости мяча vx и vy зависят от положения мыши.
"""
global balls, bullet
bullet += 1
if self.type == 'ball':
new_ball = Balls(self.x, self.y)
new_ball.r += 5
if (event.x - self.x) >= 0:
self.an = math.atan((event.y - self.y) / (event.x - self.x))
else:
self.an = - math.atan((event.y - self.y) / (event.x - self.x))
if (event.x - self.x) >= 0:
new_ball.vx = self.power * math.cos(self.an)
new_ball.vy = - self.power * math.sin(self.an)
else:
new_ball.vx = - self.power * math.cos(self.an)
new_ball.vy = - self.power * math.sin(self.an)
balls += [new_ball]
self.on = 0
self.power = 10
if self.type == 'shotgun':
for i in range(3):
new_ball = Balls(self.x, self.y)
new_ball.r -= 2
if (event.x - self.x) >= 0:
self.an = math.atan((event.y - self.y) / (event.x - self.x))
else:
self.an = - math.atan((event.y - self.y) / (event.x - self.x))
if (event.x - self.x) >= 0:
new_ball.vx = self.power * math.cos(self.an)
new_ball.vy = - self.power * math.sin(self.an)
else:
new_ball.vx = - self.power * math.cos(self.an)
new_ball.vy = - self.power * math.sin(self.an)
new_ball.rot(-0.3 + 0.3 * i)
balls += [new_ball]
self.on = 0
self.power = 10
def hit_test(self, obj):
"""Функция проверяет сталкивалкивается ли данный обьект с целью, описываемой в обьекте obj.
Args:
obj: Обьект, с которым проверяется столкновение.
Returns:
Возвращает True в случае столкновения мяча и цели. В противном случае возвращает False.
"""
if abs(obj.x - self.x) <= (self.r + obj.r) and abs(obj.y - self.y) <= (self.r + obj.r) and obj.live >= 1:
return True
else:
return False
def targeting(self, event=0):
"""Прицеливание. Зависит от положения мыши."""
if event:
if (event.x - self.x) >= 0:
self.an = math.atan((event.y - self.y) / (event.x - self.x))
self.ou = 1
else:
self.an = - math.atan((event.y - self.y) / (event.x - self.x))
self.ou = -1
if (event.x - self.x) >= 0:
canv.coords(self.id, self.x, self.y,
self.x + max(self.power, 20) * math.cos(self.an),
self.y + max(self.power, 20) * math.sin(self.an)
)
else:
canv.coords(self.id, self.x, self.y,
self.x - max(self.power, 20) * math.cos(self.an),
self.y + max(self.power, 20) * math.sin(self.an)
)
else:
if self.ou == 1:
canv.coords(self.id, self.x, self.y,
self.x + max(self.power, 20) * math.cos(self.an),
self.y + max(self.power, 20) * math.sin(self.an)
)
else:
canv.coords(self.id, self.x, self.y,
self.x - max(self.power, 20) * math.cos(self.an),
self.y + max(self.power, 20) * math.sin(self.an)
)
if self.on:
canv.itemconfig(self.id, fill='orange')
else:
canv.itemconfig(self.id, fill='black')
def move(self, v_x, v_y):
"""
происходит перемещение ракеты
"""
self.vx = v_x
self.vy = -v_y
self.x += v_x
self.y += v_y
canv.coords(
self.id2,
self.x - self.r,
self.y - self.r,
self.x + self.r,
self.y + self.r,
)
for bord in boards:
bord.check(self)
def chek_color(self):
"""
проверка, нужно ли менять цвет и смена цвета
"""
self.inviz_time -= 1
if self.inviz_time <= 0:
self.color = self.color2
canv.itemconfig(self.id2, fill=self.color)
def power_up(self):
"""
Увеличение мощности при зажатии клавиши
"""
if self.on:
if self.power < 50:
self.power += 0.5
canv.itemconfig(self.id, fill='orange')
else:
canv.itemconfig(self.id, fill='black')
class Target:
def __init__(self):
self.points = 0
self.live = 1
self.tik = 100
def move(self, boards):
"""
перемещение цели
"""
for bord in boards:
bord.check(self)
self.x += self.vx
self.y -= self.vy
if self.live <= 0:
canv.delete(self.id)
else:
self.set_coords()
def set_coords(self):
"""
установка координат
"""
pass
def hit(self):
pass
def delete_new(self):
"""
новая функция удаления
"""
canv.delete(self.id)
class Target_s(Target):
def __init__(self):
super().__init__()
self.type = 'standard'
self.id = canv.create_oval(0, 0, 0, 0)
self.new_target()
def hit(self):
"""Попадание шарика в цель."""
canv.coords(self.id, -10, -10, -10, -10)
self.points = 1
canv.delete(self.id)
def set_coords(self):
canv.coords(
self.id,
self.x - self.r,
self.y - self.r,
self.x + self.r,
self.y + self.r
)
def new_target(self, x1=rnd(600, 759), y1=rnd(300, 550)):
""" Инициализация новой цели. """
x = self.x = rnd(600, 750)
y = self.y = rnd(300, 550)
r = self.r = rnd(7, 50)
color = self.color = 'red'
canv.coords(self.id, x - r, y - r, x + r, y + r)
canv.itemconfig(self.id, fill=color)
self.vy = rnd(-10, 10)
self.vx = rnd(-10, 10)
class Target_r(Target):
def __init__(self):
super().__init__()
self.type = 'rare'
self.id = canv.create_rectangle(0, 0, 0, 0)
self.new_target()
def set_coords(self):
canv.coords(
self.id,
self.x - self.r,
self.y - self.r,
self.x + self.r,
self.y + self.r
)
def hit(self):
"""Попадание шарика в цель."""
canv.coords(self.id, -10, -10, -10, -10)
self.points = 3
canv.delete(self.id)
def new_target(self, x1=rnd(600, 759), y1=rnd(300, 550)):
""" Инициализация новой цели. """
x = self.x = x1
y = self.y = y1
r = self.r = rnd(7, 15)
color = self.color = 'blue'
canv.coords(self.id, x - r, y - r, x + r, y + r)
canv.itemconfig(self.id, fill=color)
self.vy = rnd(-15, 15)
self.vx = rnd(-15, 15)
class Target_c(Target):
def __init__(self):
super().__init__()
self.type = 'carrier'
self.id = canv.create_oval(0, 0, 0, 0)
self.new_target()
def hit(self):
"""Попадание шарика в цель."""
canv.coords(self.id, -10, -10, -10, -10)
self.points = 5
canv.delete(self.id)
def new_target(self, x1=rnd(600, 759), y1=rnd(300, 550)):
""" Инициализация новой цели. """
x = self.x = rnd(600, 750)
y = self.y = rnd(300, 550)
r = self.r = rnd(20, 30)
color = self.color = 'green'
canv.coords(self.id, x - 2 * r, y - r, x + 2 * r, y + r)
canv.itemconfig(self.id, fill=color)
self.vy = rnd(-5, 5)
self.vx = rnd(-5, 5)
def set_coords(self):
"""
установка координат
"""
canv.coords(
self.id,
self.x - 2 * self.r,
self.y - self.r,
self.x + 2 * self.r,
self.y + self.r)
def spawn(self, n):
"""
создание снаряда, для отстрела от игрока
"""
global t_rare, num
if self.live > 0 and self.tik <= 0:
num += 1
t_rare[n - 2 + num] = Target_r()
t_rare[n - 2 + num].new_target(x1=self.x, y1=self.y)
self.tik = 50
self.tik -= 1
class Texts:
def __init__(self, x=30, y=30, tex=''):
self.id = canv.create_text(x, y, text=tex, font='28')
def peretext(self, sc, x=30, y=30, tex=''):
"""
создаёт текст в нужной точке
"""
canv.delete(self.id)
self.id = canv.create_text(x, y, text=(tex + str(sc)), font='28')
t_standard = defaultdict(lambda: Target_s())
t_rare = defaultdict(lambda: Target_r())
t_carrier = defaultdict(lambda: Target_c())
screen1 = canv.create_text(400, 300, text='', font='28')
g1 = Gun()
bullet = 0
score = 0
vx = 0
vy = 0
k = 2
tik = 100
balls = []
t_standard[0] = Target_s()
t_standard[0].delete_new()
successful_targets = 0
num = 0
tx = Texts()
tx_live = Texts(x=70, y=30, tex='live: 0')
boards = [Board(4, 4, 800, 0), Board(4, 596, 800, 0), Board(4, 4, 0, 600), Board(796, 4, 0, 600), Board(300, 4, 0, 200),
Board(500, 400, 0, 200)]
def new_game(n):
global t_standard, screen1, balls, bullet, score, successful_targets, num, tik, k
for i in range(n):
t_standard[i + 1] = Target_s()
t_standard[i + 1].new_target()
for i in range(n - 1):
t_rare[i] = Target_r()
t_rare[i].new_target()
for i in range(n - 1):
t_carrier[i] = Target_c()
t_carrier[i].new_target()
bullet = 0
balls = []
g1.live = 3
k = 3
canv.bind('<Button-1>', g1.fire2_start)
canv.bind('<ButtonRelease-1>', g1.fire2_end)
canv.bind('<Motion>', g1.targeting)
canv.bind('<Motion>', g1.targeting)
canv.bind('<Motion>', g1.targeting)
root.bind('<KeyPress>', on_key_press)
root.bind('<KeyRelease>', on_key_release)
tick = 0.01
successful_targets = 0
num = 0
t_standard[0].live = 1
while (t_standard[0].live or balls) and k > 0:
for b in balls:
b.move(boards)
for i in range(n):
if b.hit_test(t_standard[i + 1]):
t_standard[i + 1].live = 0
t_standard[i + 1].hit()
successful_targets += 1
for i in range(n - 1 + num):
if b.hit_test(t_rare[i]):
t_rare[i].live = 0
t_rare[i].hit()
successful_targets += 1
for i in range(n - 1):
if b.hit_test(t_carrier[i]):
t_carrier[i].live = 0
t_carrier[i].hit()
successful_targets += 1
if successful_targets == 3 * n - 2 + num and n > 0:
canv.bind('<Button-1>', '')
canv.bind('<ButtonRelease-1>', '')
canv.itemconfig(screen1, text='Вы уничтожили цели за ' + str(bullet) + ' выстрелов')
t_standard[0].live = 0
score = 0
for i in range(n):
t_standard[i + 1].move(boards)
score += t_standard[i + 1].points
for i in range(n - 1 + num):
t_rare[i].move(boards)
score += t_rare[i].points
for i in range(n - 1):
t_carrier[i].move(boards)
score += t_carrier[i].points
t_carrier[i].spawn(n)
for i in range(n - 1 + num):
if g1.hit_test(t_rare[i]):
g1.minus_live()
tx_live.peretext(x=70, y=30, sc=str(g1.live), tex='live:')
if g1.live == 0:
canv.itemconfig(screen1, text='you lose, score:' + str(score))
for i in range(n):
t_standard[i + 1].live = 0
t_standard[i + 1].hit()
for i in range(n - 1 + num):
t_rare[i].live = 0
t_rare[i].hit()
for i in range(n - 1):
t_carrier[i].live = 0
t_carrier[i].hit()
t_standard[0].live = 0
n = 0
canv.update()
time.sleep(2)
tx.peretext(score)
canv.update()
time.sleep(tick)
g1.move(vx, vy)
g1.targeting()
g1.power_up()
g1.chek_color()
canv.itemconfig(screen1, text='')
canv.delete(Gun)
root.after(750, new_game(n + 1))
new_game(1)
root.mainloop()
| true |
4539ce7c5d866dd10a91460780478c75e96fec0a | Python | Tej780/Interview_Questions | /NumberLetterMappingQuestion.py | UTF-8 | 971 | 3.515625 | 4 | [] | no_license | NumbersToLettersMap = {'1': 'a', '2': 'b', '3': 'c', '4': 'd', '5': 'e', '6': 'f', '7': 'g', '8': 'h', '9': 'i',
'10': 'j', '11': 'k', '12': 'l', '13': 'm','14': 'n', '15': 'o', '16': 'p', '17': 'q', '18': 'r',
'19': 's', '20': 't', '21': 'u', '22': 'v', '23': 'w', '24': 'x', '25': 'y', '26': 'z'}
keys = NumbersToLettersMap.keys()
def NumberOfUniqueStrings(InputNumberString):
number = NumberOfSubStrings(InputNumberString, len(InputNumberString))
return number
def NumberOfSubStrings(InputString, Length):
if InputString == '':
return 1
elif InputString[0] == '0':
return 0
FirstTwoLettersOfString = InputString[0:2]
if FirstTwoLettersOfString in keys and len(InputString) > 1:
return NumberOfUniqueStrings(InputString[2:]) + NumberOfUniqueStrings(InputString[1:])
return NumberOfUniqueStrings(InputString[1:])
string = '1111'
print(NumberOfUniqueStrings(string))
| true |
9a3c129256f261b3ee68a9b0d519f250d8315c31 | Python | MoravianCollege/faculty-door-sensor | /tests/ClientSideDoorSensor/MockDoorDisplay.py | UTF-8 | 4,754 | 2.953125 | 3 | [] | no_license | from tkinter import *
from ClientSideDoorSensor import *
# Mock data status of door states
status_dct = {'coleman': 'NULL', 'bush': 'NULL', 'schaper': 'NULL', 'mota': 'NULL'}
coleman = status_dct['coleman']
bush = status_dct['bush']
schaper = status_dct['schaper']
mota = status_dct['mota']
door_states = [coleman, bush, schaper, mota]
coleman_text = "Dr. Coleman's door is:\n" + coleman
bush_text = "Dr. Bush's door is:\n" + bush
schaper_text = "Dr. Schaper's door is:\n" + schaper
mota_text = "Dr. Mota's door is:\n" + mota
# Mock data color of door states
top_left_color = "orange"
top_right_color = "orange"
bottom_left_color = "orange"
bottom_right_color = "orange"
door_color = [top_left_color, top_right_color, bottom_left_color, bottom_right_color]
class DoorDisplay:
def __init__(self, master):
self.master = master
top_frame = Frame(master)
bottom_frame = Frame(master)
border_x_frame = Frame(master, bg="black", width=2, height=2)
border_y1_frame = Frame(top_frame, bg="black", width=2, height=2)
border_y2_frame = Frame(bottom_frame, bg="black", width=2, height=2)
# Pack frames onto display
top_frame.pack(side=TOP, fill=BOTH, expand=True)
bottom_frame.pack(side=BOTTOM, fill=BOTH, expand=True)
border_x_frame.pack(fill=BOTH, expand=False)
# Setup labels with text, background color, and width
self.top_left = Label(top_frame, text=coleman_text, bg=top_left_color, width=20)
self.top_right = Label(top_frame, text=bush_text, bg=top_right_color, width=20)
self.bottom_left = Label(bottom_frame, text=schaper_text, bg=bottom_left_color, width=20)
self.bottom_right = Label(bottom_frame, text=mota_text, bg=bottom_right_color, width=20)
# Set text font and size
self.top_left.config(font=("Courier", 45))
self.top_right.config(font=("Courier", 45))
self.bottom_left.config(font=("Courier", 45))
self.bottom_right.config(font=("Courier", 45))
# Pack labels and frames onto display
self.top_left.pack(side=LEFT, fill=BOTH, expand=True)
border_y1_frame.pack(side=LEFT, fill=BOTH, expand=False)
self.top_right.pack(side=RIGHT, fill=BOTH, expand=True)
self.bottom_left.pack(side=LEFT, fill=BOTH, expand=True)
border_y2_frame.pack(side=LEFT, fill=BOTH, expand=False)
self.bottom_right.pack(side=RIGHT, fill=BOTH, expand=True)
# Initiate update method calls
self.update()
def update(self):
# Changes label text to new global assigned variables
self.top_left['text'] = "Dr. Coleman's door is:\n" + coleman
self.top_right['text'] = "Dr. Bush's door is:\n" + bush
self.bottom_left['text'] = "Dr. Schaper's door is:\n" + schaper
self.bottom_right['text'] = "Dr. Mota's door is:\n" + mota
# Changes label background color to new global assigned color
self.top_left['bg'] = top_left_color
self.top_right['bg'] = top_right_color
self.bottom_left['bg'] = bottom_left_color
self.bottom_right['bg'] = bottom_right_color
# Repeatedly calls itself every 5000 milliseconds to update display
root.after(5000, self.update)
def data_change(root):
# Receives global variables
global coleman, bush, schaper, mota, top_left_color, top_right_color, bottom_left_color, bottom_right_color
# Mock door status from flask server and update global door status values
door_status = {}
infile = open('mock_status.txt', 'r')
for line in infile:
line = line.strip()
professor, doorState = line.split(',')
door_status[professor] = doorState
infile.close()
coleman = door_status['coleman']
bush = door_status['bush']
schaper = door_status['schaper']
mota = door_status['mota']
# For loop to update colors after changing door status states
doors = [coleman, bush, schaper, mota]
door_position = [top_left_color, top_right_color, bottom_left_color, bottom_right_color]
for i in range(0, len(doors)):
if doors[i] == "CLOSED":
door_position[i] = "red"
elif doors[i] == "OPEN":
door_position[i] = "green"
else:
door_position[i] = "orange"
# Assigns new string color values to global variables to be called in update method
top_left_color = door_position[0]
top_right_color = door_position[1]
bottom_left_color = door_position[2]
bottom_right_color = door_position[3]
root.after(4500, data_change, root)
# Initializes display, sets to full-screen mode
root = Tk()
root.attributes("-fullscreen", True)
app = DoorDisplay(root)
data_change(root)
root.mainloop()
| true |
d07ff88709acb7bb70c5481ecdf848aee37dc7f1 | Python | Aasthaengg/IBMdataset | /Python_codes/p02831/s177860955.py | UTF-8 | 149 | 2.96875 | 3 | [] | no_license | def lcm(x, y):
import math
return (x * y) // math.gcd(x, y)
def main():
A, B = map(int, input().split())
ans = lcm(A, B)
print(ans)
main() | true |
f0345c9a5b2378bbfc16174cd7839af7ac05b13d | Python | kaushik2000/python_programs | /ex_12/request_response.py | UTF-8 | 1,573 | 3.671875 | 4 | [] | no_license | # Using socket(), A connection can be made using connect(), encode(), send(), recv(), decode() & close() methods, thus extracting data.
'''
The cycle for rquesting data is as follows:
socket()
connect()
encode() - send() - recv() - decode()
close()
'''
# Output similar to that of 'telnet' is received
# Establish socket
import socket
my_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Establish connection
domain = "data.pr4e.org"
try:
my_socket.connect( (domain, 80) )
except:
print('Domain doesn\'t exist:', domain)
quit()
print('>>>> Connection successful to domain:', domain)
document = input('Enter the document name: ')
# Sending a GET request. The 2nd part includes complete url-directory of the file.
# The 3rd part of the string describes the prototcol version + two carriage return & new line characters (i.e. EOL-End Of Line) similar to telnet
doc_cmd = 'GET http://data.pr4e.org/' + document + ' HTTP/1.0\r\n\r\n'
doc_cmd = doc_cmd.encode() # Encoding the sent data (UNICODE to UTF-8)
my_socket.send(doc_cmd)
print('>>>> Request sent')
# Receiving the required file data
#count = 0
lines = str()
while True:
data = my_socket.recv(512) # 512 is the buffer_size. At a time the socket receives 512 characters
if len(data) < 1 : break
#count += 1
#print('>>>> Requect cycle number:', count)
lines = lines + data.decode() # Converting UTF-8 to UNICODE for python
print(lines)
print('>>>> Data extracted successfully')
# Closing the connection
my_socket.close()
print('>>>> Connection terminated')
| true |
0c06067a186b8897f9a78245338bb87042aec3b7 | Python | Meisterlala/Online-Computer-Science-degree | /From Nand to Tetris/projects/07/VMTranslator/Parser.py | UTF-8 | 2,787 | 2.953125 | 3 | [] | no_license | from typing import List
import Instructions as op
from concurrent.futures import ProcessPoolExecutor, wait
from colorama import Fore
def Translate(ops: List[op.Operation]) -> List[str]:
""" Translates Operations to Heck asm """
print("Translating")
# Multi threading
pool = ProcessPoolExecutor()
futures = []
# Start Thread to translate
for op in ops:
futures.append(pool.submit(op.translate))
# Put results in list
wait(futures)
translated: List[str] = []
for future in futures:
result = future.result()
translated.extend(result)
return translated
def Parse(filename: str) -> List[op.Operation]:
"""Parses a file to a List of Instructions"""
# Open File
file = open(filename, "r")
# Get real file name
index = filename.rfind("/")
if index == -1:
index = filename.rfind("\\")
if index == -1:
activeFile = filename
else:
activeFile = filename[index + 1:len(filename)]
activeFileName = activeFile.split(sep=".")[0]
print(f"Parsing {activeFile}")
# Multi threading
pool = ProcessPoolExecutor()
futures = []
lines = file.readlines()
# start Threads
lineNumber = 0
for line in lines:
futures.append(pool.submit(_ParseLine, line,
lineNumber, activeFileName))
lineNumber += 1
wait(futures)
successfullyParsed = []
invalidCounter = 0
commentCounter = 0
# Put results in list
for future in futures:
result = future.result()
# Remove invalid lines
if isinstance(result, op.Invalid):
invalidCounter += 1
continue
# Remove comments
if isinstance(result, op.Comment):
commentCounter += 1
continue
successfullyParsed.append(result)
# Print for Debug
if commentCounter > 0:
print(f"Ignoring {commentCounter} comments")
if invalidCounter > 0:
print(Fore.YELLOW + f"WARNING: {invalidCounter} invalid lines")
# Close File
file.close()
return successfullyParsed
def _PreParse(line: str) -> str:
""" Remove comments and new line """
line = line.rstrip("\n")
commentIndex = line.find("/")
# no comment found
if commentIndex == - 1:
return line
# truncate
return line[0:commentIndex]
def _ParseLine(line: str, lineNumber: int, FileName: str):
preParsed = _PreParse(line)
if len(preParsed) == 0:
return op.Comment()
stack = op.Stack(preParsed, FileName, lineNumber)
if stack.parse():
return stack
arithmetic = op.Arithmetic(preParsed, FileName, lineNumber)
if arithmetic.parse():
return arithmetic
return op.Invalid()
| true |
3df4ee0b5fa515aeb75cc4965fcc07023aab073d | Python | zoearon/calculator-2 | /arithmetic1.py | UTF-8 | 1,699 | 4.46875 | 4 | [] | no_license | """Math functions for calculator."""
def add(num_list):
"""Return the sum of a list of integers"""
sum = 0
for i in num_list:
sum = sum + float(i)
return int(sum)
def subtract(num_list):
"""Return the difference of a list of integers"""
diff = 0
for i in num_list:
diff = diff - float(i)
return int(diff)
def multiply(num_list):
"""Return the product of a list of integers"""
pro = 1
for i in num_list:
pro = pro * float(i)
return int(pro)
def divide(num_list):
"""Divide the first input by the second, returning a floating point."""
quo = num_list[0]
for i in num_list[1:]:
quo = quo / float(i)
return quo
def square(num_list):
"""Return a list of squares from a list."""
squ_list = []
for i in num_list:
squ_list.append(float(i) ** 2)
return squ_list
def cube(num_list):
"""Return a list of cubes of a list."""
cube_list = []
for i in num_list:
cube_list.append(float(i) ** 3)
return cube_list
def power(num_list):
"""Takes a list of numbers and sequentially raises each to the power of the next"""
raised = num_list[0]
for i in num_list[1:]:
raised = raised ** float(i)
return raised
def mod(num_list):
"""Takes a list and sequentially modulates."""
rem = num_list[0]
for i in num_list[1:]:
rem = rem % float(i)
return rem
def add_mult(num1, num2, num3):
"""Adds first two and multiply sum with third"""
return multiply(add(num1, num2), num3)
def add_cubes(num1, num2):
"""Cubes both numbers and sums them"""
return add(cube(num1), cube(num2))
| true |
55b62cb5d6ea8062db05eb229a921708d5bc48d6 | Python | m1kra/LatteCompiler | /src/peephole_optimizer.py | UTF-8 | 3,503 | 2.859375 | 3 | [] | no_license | from assembly_writer import AssemblyWriter
class PeepholeOptimizer:
"""
Class for peephole optimization, interacts with AssemblyWriter.
"""
def __init__(self, writer: AssemblyWriter):
self.writer = writer
def iter_instructions(self, k: int):
t = len(self.writer.instructions) - k
if t > 0:
for i in range(t):
chunk = []
for j in range(k):
chunk.append(
self.sanitize(self.writer.instructions[i + j])
)
yield i, chunk
@staticmethod
def sanitize(instruction):
if ':' in instruction:
return instruction
instruction = instruction.replace('dword ', '').strip()
if ',' in instruction:
x, c = instruction.split(',')
y = x.find(' ')
return x[:y].strip(), x[y:].strip(), c.strip()
return instruction.split(' ')
def optimize(self):
self.mov__eax_c__mem_eax()
self.mov_ab_xd_ba()
self.mov_ab_ac()
self.mov_ab_ab()
self.jmp_lbl_lbl()
self.mov_ab_ba()
def mov_ab_ba(self):
to_remove = []
for i, (ab, ba) in self.iter_instructions(2):
if len(ab) == len(ba) == 3:
if ab[0] == ba[0] == 'mov':
if ab[1] == ba[2] and ab[2] == ba[1]:
to_remove.append(i + 1)
self.writer.remove(to_remove)
def mov_ab_xd_ba(self):
to_remove = []
for i, (ab, xd, ba) in self.iter_instructions(3):
if ':' in xd:
continue
if len(ab) == len(ba) == 3:
if ab[0] == ba[0] == 'mov':
if ab[1] == ba[2] and ab[2] == ba[1]:
if ab[1] not in xd or (
len(xd) == 3 and xd[1] != ab[1]
):
to_remove.append(i + 2)
self.writer.remove(to_remove)
def mov_ab_ab(self):
to_remove = []
for i, (a, b) in self.iter_instructions(2):
if len(a) == len(b) == 3:
if a[0] == b[0] == 'mov':
if a[2] == b[2] and a[1] == b[1] and a[1] not in b[2]:
to_remove.append(i + 1)
self.writer.remove(to_remove)
def mov_ab_ac(self):
to_remove = []
for i, (ab, ac) in self.iter_instructions(2):
if len(ab) == len(ac) == 3:
if ab[0] == ac[0] == 'mov':
if ab[1] == ac[1] and ab[1] not in ac[2]:
to_remove.append(i + 1)
self.writer.remove(to_remove)
def mov__eax_c__mem_eax(self):
to_remove = []
for i, (a, b) in self.iter_instructions(2):
if len(a) == len(b) == 3 and a[0] == b[0] == 'mov':
if a[1] == 'EAX' and b[2] == 'EAX' and '[' not in a[2]:
self.writer.instructions[i] =\
f' mov dword {b[1]}, {a[2]}'
to_remove.append(i + 1)
self.writer.remove(to_remove)
def jmp_lbl_lbl(self):
to_remove = []
for i, (jmp, lbl) in self.iter_instructions(2):
if 'jmp' in jmp and ':' in lbl:
if jmp[1] == lbl.split(':')[0]:
to_remove.append(i)
self.writer.remove(to_remove)
| true |