blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
0e916e2c5c21fa90619c5a498a3f02adb866766d
|
Python
|
NewFrenchDev/Coding-Project
|
/Code python/Script/Trier_fichier.py
|
UTF-8
| 828
| 2.890625
| 3
|
[] |
no_license
|
import shutil
import glob
import os
import time
#path
CUR_DIR = os.path.dirname(__file__)
#Files extension
extensions = {".mp3": "Musique",
".wav": "Musique",
".mp4": "Videos",
".avi": "Videos",
".jpeg": "Images",
".jpg": "Images",
".png": "Images",
".pdf": "Documents",
".ipynb": "Notebooks",
".exe": "Software"}
fichiers = glob.glob(os.path.join(f'{CUR_DIR}', "*"))
for fichier in fichiers:
extension = os.path.splitext(fichier)[-1]
folder = extensions.get(extension)
if folder:
folder_path = os.path.join(f'{CUR_DIR}', folder)
os.makedirs(folder_path, exist_ok=True)
shutil.move(fichier, folder_path)
print("Files have been moved successfully!")
time.sleep(2)
| true
|
ae210e9cc2991c3923baf123520b633ffc85a1b3
|
Python
|
jm-avila/REST-APIs-Python
|
/Refresher/06_advanced_set_operations/code.py
|
UTF-8
| 431
| 3.875
| 4
|
[] |
no_license
|
art = {"Bob", "Jen", "Rolf", "Charlie"}
science = {"Bob", "Jen", "Adam", "Anne"}
# difference takes one set and removes the items found at passed set.
onlyArt = art.difference(science)
onlyScience = science.difference(art)
print("only Art", onlyArt)
print("only Science", onlyScience)
print()
# intersection takes one set and removes the items not found at passed set.
both = art.intersection(science)
print("take both", both)
| true
|
0e15b6e8a15f34ec7cdb7e7c3c13ef38d955352d
|
Python
|
JGilstrap1/Sportsbook
|
/exe/SportsBook.py
|
UTF-8
| 37,040
| 2.765625
| 3
|
[] |
no_license
|
import pandas as pd
from pandas import DataFrame
import numpy as np
from tkinter import *
from tkinter import ttk
import statistics
import cmath
root = Tk()
root.title('NHL Prediction Calculator')
root.iconbitmap('/Users/jimbo/Documents/Sportsbook/exe/Skating.ico')
root.geometry("600x1000")
def webScrapeTeamStatsUrl(StatUrl):
dfs = pd.read_html(StatUrl)
for df in dfs:
df_subset = DataFrame(df, columns = ['Team', 'GP', 'W', 'L', 'OTL','Points', 'SF', 'SF/GP', 'SA', 'SA/GP', 'GF', 'GF/GP', 'GF/GP_Rank',
'xGF', 'xGF_Rank', 'xGA', 'xGA_Rank', 'GA', 'GA/GP', 'GA/GP_Rank', 'CompareGF', 'OffensiveRank',
'CompareGA', 'DefensiveRank', 'SCF', 'SCGF', 'SCO', 'SCO_Rank', 'SCA', 'SCGA', 'SCD', 'SCD_Rank'])
for idx, row in df_subset.iterrows():
df_subset.loc[idx, 'GF/GP'] = row['GF'] / row['GP']
df_subset.loc[idx, 'GA/GP'] = row['GA'] / row['GP']
df_subset.loc[idx, 'SF/GP'] = row['SF'] / row['GP']
df_subset.loc[idx, 'SA/GP'] = row['SA'] / row['GP']
for idx, row in df_subset.iterrows():
df_subset.loc[idx, 'CompareGF'] = row['GF/GP'] / df_subset['GF/GP'].mean()
df_subset.loc[idx, 'CompareGA'] = row['GA/GP'] / df_subset['GF/GP'].mean()
df_subset.loc[idx, 'SCO'] = (row['SCGF'] / row['SCF']) * 100
df_subset.loc[idx, 'SCD'] = (row['SCGA'] / row['SCA']) * 100
df_subset['GF/GP_Rank'] = df_subset['CompareGF'].rank(ascending=False)
df_subset['GA/GP_Rank'] = df_subset['CompareGA'].rank()
df_subset['xGF_Rank'] = df_subset['xGF'].rank(ascending=False)
df_subset['xGA_Rank'] = df_subset['xGA'].rank()
df_subset['SCO_Rank'] = df_subset['SCO'].rank(ascending=False)
df_subset['SCD_Rank'] = df_subset['SCD'].rank()
for idx, row in df_subset.iterrows():
df_subset['OffensiveRank'] = ((df_subset['xGF_Rank'] + df_subset['GF/GP_Rank'] + df_subset['SCO_Rank']) / 3)
df_subset['DefensiveRank'] = ((df_subset['xGA_Rank'] + df_subset['GA/GP_Rank'] + df_subset['SCD_Rank']) / 3)
df_subset['GF/GP'] = df_subset['GF/GP'].map('{:,.2f}'.format)
df_subset['GA/GP'] = df_subset['GA/GP'].map('{:,.2f}'.format)
df_subset['OffensiveRank'] = df_subset['OffensiveRank'].map('{:,.0f}'.format)
df_subset['DefensiveRank'] = df_subset['DefensiveRank'].map('{:,.0f}'.format)
return df_subset
def webScrapeGoalieStatsUrl(StatUrl):
dfs = pd.read_html(StatUrl)
for df in dfs:
df_subset = DataFrame(df, columns = ['Player', 'Team', 'GP', 'Shots Against', 'Saves', 'Goals Against', 'SV%', 'SV% Rank', 'GAA', 'GAA Rank'])
df_subset['SV% Rank'] = df_subset['SV%'].rank(ascending=False)
df_subset['GAA Rank'] = df_subset['GAA'].rank()
return df_subset
def webScrapePowerPlayStatsUrl(StatUrl):
dfs = pd.read_html(StatUrl)
for df in dfs:
df_subset = DataFrame(df, columns = ['Team', 'GF', 'PP', 'GF_Rank', 'PP_Rank', 'xGF', 'xGF_Rank', 'SCF', 'SCGF', 'SCO', 'SCO_Rank'])
for idx, row in df_subset.iterrows():
df_subset.loc[idx, 'PP'] = row['GF'] / df_subset['GF'].mean()
df_subset.loc[idx, 'SCO'] = (row['SCGF'] / row['SCF']) * 100
df_subset['PP'] = df_subset['PP'].map('{:,.2f}'.format)
df_subset['GF_Rank'] = df_subset['PP'].rank(ascending=False)
df_subset['xGF_Rank'] = df_subset['xGF'].rank(ascending=False)
df_subset['SCO_Rank'] = df_subset['SCO'].rank(ascending=False)
for idx, row in df_subset.iterrows():
df_subset['PP_Rank'] = ((df_subset['GF_Rank'] + df_subset['xGF_Rank'] + df_subset['SCO_Rank']) / 3)
df_subset['PP_Rank'] = df_subset['PP_Rank'].map('{:,.0f}'.format)
return df_subset
def webScrapePenaltyKillStatsUrl(StatUrl):
dfs = pd.read_html(StatUrl)
for df in dfs:
df_subset = DataFrame(df, columns = ['Team', 'GA', 'PK', 'GA_Rank', 'PK_Rank', 'xGA', 'xGA_Rank', 'SCA', 'SCGA', 'SCD', 'SCA_Rank'])
for idx, row in df_subset.iterrows():
df_subset.loc[idx, 'PK'] = row['GA'] / df_subset['GA'].mean()
df_subset.loc[idx, 'SCD'] = (row['SCGA'] / row['SCA']) * 100
df_subset['PK'] = df_subset['PK'].map('{:,.2f}'.format)
df_subset['GA_Rank'] = df_subset['PK'].rank()
df_subset['xGA_Rank'] = df_subset['xGA'].rank()
df_subset['SCA_Rank'] = df_subset['SCD'].rank()
for idx, row in df_subset.iterrows():
df_subset['PK_Rank'] = ((df_subset['GA_Rank'] + df_subset['xGA_Rank'] + df_subset['SCA_Rank']) / 3)
df_subset['PK_Rank'] = df_subset['PK_Rank'].map('{:,.0f}'.format)
return df_subset
def webScrapeStreakStatsUrl(StatUrl):
dfs = pd.read_html(StatUrl)
for df in dfs:
df_subset = DataFrame(df, columns = ['Team', 'Wins', 'Losses', 'Home Wins', 'Home Losses', 'Road Wins', 'Road Losses'])
return df_subset
def homeTeamSelected(homeTeam):
global parsedHomeStats
#homeTeam = homeTeamCombo.get()
homeFilter = homeTeamStats['Team'] == homeTeam
parsedHomeStats = homeTeamStats[homeFilter]
global parsedHomePpStats
homePpFilter = homePowerPlayStats['Team'] == homeTeam
parsedHomePpStats = homePowerPlayStats[homePpFilter]
global parsedHomePkStats
homePkFilter = homePenaltyKillStats['Team'] == homeTeam
parsedHomePkStats = homePenaltyKillStats[homePkFilter]
global parsedHomeStreakStats
homeStreakFilter = homeStreakStats['Team'] == homeTeam
parsedHomeStreakStats = homeStreakStats[homeStreakFilter]
def awayTeamSelected(awayTeam):
global parsedAwayStats
awayFilter = awayTeamStats['Team'] == awayTeam
parsedAwayStats = awayTeamStats[awayFilter]
global parsedAwayPpStats
awayPpFilter = awayPowerPlayStats['Team'] == awayTeam
parsedAwayPpStats = awayPowerPlayStats[awayPpFilter]
global parsedAwayPkStats
awayPkFilter = awayPenaltyKillStats['Team'] == awayTeam
parsedAwayPkStats = awayPenaltyKillStats[awayPkFilter]
global parsedAwayStreakStats
awayStreakFilter = awayStreakStats['Team'] == awayTeam
parsedAwayStreakStats = awayStreakStats[awayStreakFilter]
def homeGoalieSelected(homeGoalie):
global parsedHomeGoalieStats
homeFilter = homeGoalieStats['Player'] == homeGoalie
parsedHomeGoalieStats = homeGoalieStats[homeFilter]
def awayGoalieSelected(awayGoalie):
global parsedAwayGoalieStats
awayFilter = awayGoalieStats['Player'] == awayGoalie
parsedAwayGoalieStats = awayGoalieStats[awayFilter]
def homeGoalieDisplay():
#home goalie GP
gaaLabelHome = Label(goalieStatsFrame, text = "GP")
gaaLabelHome.grid(row = 0, column = 0, padx = 10, pady = 10)
#home goalie GAA
gaaLabelHome = Label(goalieStatsFrame, text = "GAA")
gaaLabelHome.grid(row = 1, column = 0, padx = 10, pady = 10)
#home goalie GAA Rank
gaaRankLabelHome = Label(goalieStatsFrame, text = 'GAA Rank')
gaaRankLabelHome.grid(row = 2, column = 0, padx = 10, pady = 10)
#home goalie SV%
svLabelHome = Label(goalieStatsFrame, text = "SV%")
svLabelHome.grid(row = 3, column = 0, padx = 10, pady = 10)
#home goalie SV% Rank
svRankLabelHome = Label(goalieStatsFrame, text = 'SV% Rank')
svRankLabelHome.grid(row = 4, column = 0, padx = 10, pady = 10)
def awayGoalieDisplay():
#away goalie GP
gaaLabelaway = Label(goalieStatsFrame, text = "GP")
gaaLabelaway.grid(row = 0, column = 2, padx = 10, pady = 10)
#away goalie GAA
gaaLabelaway = Label(goalieStatsFrame, text = "GAA")
gaaLabelaway.grid(row = 1, column = 2, padx = 10, pady = 10)
#away goalie GAA Rank
gaaRankLabelAway = Label(goalieStatsFrame, text = 'GAA Rank')
gaaRankLabelAway.grid(row = 2, column = 2, padx = 10, pady = 10)
#away goalie SV%
svLabelAway = Label(goalieStatsFrame, text = "SV%")
svLabelAway.grid(row = 3, column = 2, padx = 10, pady = 10)
#away goalie SV% Rank
svRankLabelAway = Label(goalieStatsFrame, text = 'SV% Rank')
svRankLabelAway.grid(row = 4, column = 2, padx = 10, pady = 10)
def homeTeamDisplay():
#home team wins
winsLabelhome = Label(teamStatsFrame, text = "Wins")
winsLabelhome.grid(row = 0, column = 0, padx = 10, pady = 10)
#home team losses
lossLabelhome = Label(teamStatsFrame, text = "Losses")
lossLabelhome.grid(row = 1, column = 0, padx = 10, pady = 10)
#home team OTL
otlLabelhome = Label(teamStatsFrame, text = "OTL")
otlLabelhome.grid(row = 2, column = 0, padx = 10, pady = 10)
#home team GF/GP
gfgpLabelhome = Label(teamStatsFrame, text = "GF/GP")
gfgpLabelhome.grid(row = 3, column = 0, padx = 10, pady = 10)
#home team GA/GP
gagpLabelhome = Label(teamStatsFrame, text = "GA/GP")
gagpLabelhome.grid(row = 4, column = 0, padx = 10, pady = 10)
# #home team offensive strength
# osLabelhome = Label(teamStatsFrame, text = "Offsensive Strength")
# osLabelhome.grid(row = 5, column = 0, padx = 10, pady = 10)
#home team offensive rank
orLabelhome = Label(teamStatsFrame, text = "Offsensive Rank")
orLabelhome.grid(row = 5, column = 0, padx = 10, pady = 10)
# #home team defensive strength
# dsLabelhome = Label(teamStatsFrame, text = "Defensive Strength")
# dsLabelhome.grid(row = 7, column = 0, padx = 10, pady = 10)
#home team defensive rank
drLabelhome = Label(teamStatsFrame, text = "Defensive Rank")
drLabelhome.grid(row = 6, column = 0, padx = 10, pady = 10)
# #home team powerplay strength
# ppsLabelhome = Label(teamStatsFrame, text = "Powerplay Strength")
# ppsLabelhome.grid(row = 9, column = 0, padx = 10, pady = 10)
#home team powerplay ranking
pprLabelhome = Label(teamStatsFrame, text = "Powerplay Ranking")
pprLabelhome.grid(row = 7, column = 0, padx = 10, pady = 10)
# #home team pentaly kill strength
# pksLabelhome = Label(teamStatsFrame, text = "Penalty Kill Strength")
# pksLabelhome.grid(row = 11, column = 0, padx = 10, pady = 10)
#home team pentaly kill ranking
pkrLabelhome = Label(teamStatsFrame, text = "Penalty Kill Ranking")
pkrLabelhome.grid(row = 8, column = 0, padx = 10, pady = 10)
#home team win streak
wsLabelhome = Label(teamStatsFrame, text = "Win Streak")
wsLabelhome.grid(row = 9, column = 0, padx = 10, pady = 10)
#home team loss streak
lsLabelhome = Label(teamStatsFrame, text = "Loss Streak")
lsLabelhome.grid(row = 10, column = 0, padx = 10, pady = 10)
def awayTeamDisplay():
#away team wins
winsLabelAway = Label(teamStatsFrame, text = "Wins")
winsLabelAway.grid(row = 0, column = 3, padx = 10, pady = 10)
#away team losses
lossLabelAway = Label(teamStatsFrame, text = "Losses")
lossLabelAway.grid(row = 1, column = 3, padx = 10, pady = 10)
#away team OTL
otlLabelAway = Label(teamStatsFrame, text = "OTL")
otlLabelAway.grid(row = 2, column = 3, padx = 10, pady = 10)
#away team GF/GP
gfgpLabelAway = Label(teamStatsFrame, text = "GF/GP")
gfgpLabelAway.grid(row = 3, column = 3, padx = 10, pady = 10)
#away team GA/GP
gagpLabelAway = Label(teamStatsFrame, text = "GA/GP")
gagpLabelAway.grid(row = 4, column = 3, padx = 10, pady = 10)
# #away team offensive strength
# osLabelAway = Label(teamStatsFrame, text = "Offsensive Strength")
# osLabelAway.grid(row = 5, column = 3, padx = 10, pady = 10)
#away team offensive rank
orLabelAway = Label(teamStatsFrame, text = "Offsensive Rank")
orLabelAway.grid(row = 5, column = 3, padx = 10, pady = 10)
# #away team defensive strength
# dsLabelAway = Label(teamStatsFrame, text = "Defensive Strength")
# dsLabelAway.grid(row = 7, column = 3, padx = 10, pady = 10)
#away team defensive rank
drLabelAway = Label(teamStatsFrame, text = "Defensive Rank")
drLabelAway.grid(row = 6, column = 3, padx = 10, pady = 10)
# #away team powerplay strength
# ppsLabelAway = Label(teamStatsFrame, text = "Powerplay Strength")
# ppsLabelAway.grid(row = 9, column = 3, padx = 10, pady = 10)
#away team powerplay ranking
pprLabelAway = Label(teamStatsFrame, text = "Powerplay Ranking")
pprLabelAway.grid(row = 7, column = 3, padx = 10, pady = 10)
# #away team pentaly kill strength
# pksLabelAway = Label(teamStatsFrame, text = "Penalty Kill Strength")
# pksLabelAway.grid(row = 11, column = 3, padx = 10, pady = 10)
#away team pentaly kill ranking
pkrLabelAway = Label(teamStatsFrame, text = "Penalty Kill Ranking")
pkrLabelAway.grid(row = 8, column = 3, padx = 10, pady = 10)
#away team win streak
wsLabelAway = Label(teamStatsFrame, text = "Win Streak")
wsLabelAway.grid(row = 9, column = 3, padx = 10, pady = 10)
#away team loss streak
lsLabelAway = Label(teamStatsFrame, text = "Loss Streak")
lsLabelAway.grid(row = 10, column = 3, padx = 10, pady = 10)
def populateHomeGoalieStats():
homeGp = IntVar()
homeGaa = IntVar()
homeGaaRank = IntVar()
homeSV = IntVar()
homeSvRank = IntVar()
homeGp.set(parsedHomeGoalieStats.iloc[0]['GP'])
homeGaa.set(parsedHomeGoalieStats.iloc[0]['GAA'])
homeGaaRank.set(parsedHomeGoalieStats.iloc[0]['GAA Rank'])
homeSV.set(parsedHomeGoalieStats.iloc[0]['SV%'])
homeSvRank.set(parsedHomeGoalieStats.iloc[0]['SV% Rank'])
gpHome = Label(goalieStatsFrame, textvariable = homeGp)
gpHome.grid(row = 0, column = 1, padx = 10, pady = 10)
gaaHome = Label(goalieStatsFrame, textvariable = homeGaa)
gaaHome.grid(row = 1, column = 1, padx = 10, pady = 10)
gaaRankHome = Label(goalieStatsFrame, textvariable = homeGaaRank)
gaaRankHome.grid(row = 2, column = 1, padx = 10, pady = 10)
svHome = Label(goalieStatsFrame, textvariable = homeSV)
svHome.grid(row = 3, column = 1, padx = 10, pady = 10)
svRankHome = Label(goalieStatsFrame, textvariable = homeSvRank)
svRankHome.grid(row = 4, column = 1, padx = 10, pady = 10)
def populateAwayGoalieStats():
awayGp = IntVar()
awayGaa = IntVar()
awayGaaRank = IntVar()
awaySV = IntVar()
awaySvRank = IntVar()
awayGp.set(parsedAwayGoalieStats.iloc[0]['GP'])
awayGaa.set(parsedAwayGoalieStats.iloc[0]['GAA'])
awayGaaRank.set(parsedAwayGoalieStats.iloc[0]['GAA Rank'])
awaySV.set(parsedAwayGoalieStats.iloc[0]['SV%'])
awaySvRank.set(parsedAwayGoalieStats.iloc[0]['SV% Rank'])
gpAway = Label(goalieStatsFrame, textvariable = awayGp)
gpAway.grid(row = 0, column = 3, padx = 10, pady = 10)
gaaAway = Label(goalieStatsFrame, textvariable = awayGaa)
gaaAway.grid(row = 1, column = 3, padx = 10, pady = 10)
gaaRankAway = Label(goalieStatsFrame, textvariable = awayGaaRank)
gaaRankAway.grid(row = 2, column = 3, padx = 10, pady = 10)
svAway = Label(goalieStatsFrame, textvariable = awaySV)
svAway.grid(row = 3, column = 3, padx = 10, pady = 10)
svRankAway = Label(goalieStatsFrame, textvariable = awaySvRank)
svRankAway.grid(row = 4, column = 3, padx = 10, pady = 10)
def populateHomeTeamStats():
homeWins = IntVar()
homeLoss = IntVar()
homeOTL = IntVar()
homeGFGP = IntVar()
homeGAGP = IntVar()
homeOS = IntVar()
homeOR = IntVar()
homeDS = IntVar()
homeDR = IntVar()
homePP = IntVar()
homePPR = IntVar()
homePK = IntVar()
homePKR = IntVar()
homeStreakWins = IntVar()
homeStreakLosses = IntVar()
homeWins.set(parsedHomeStats.iloc[0]['W'])
homeLoss.set(parsedHomeStats.iloc[0]['L'])
homeOTL.set(parsedHomeStats.iloc[0]['OTL'])
homeGFGP.set(parsedHomeStats.iloc[0]['GF/GP'])
homeGAGP.set(parsedHomeStats.iloc[0]['GA/GP'])
homeOR.set(parsedHomeStats.iloc[0]['OffensiveRank'])
homeDR.set(parsedHomeStats.iloc[0]['DefensiveRank'])
homePPR.set(parsedHomePpStats.iloc[0]['PP_Rank'])
homePKR.set(parsedHomePkStats.iloc[0]['PK_Rank'])
homeStreakWins.set(parsedHomeStreakStats.iloc[0]['Wins'])
homeStreakLosses.set(parsedHomeStreakStats.iloc[0]['Losses'])
winsHome = Label(teamStatsFrame, textvariable = homeWins)
winsHome.grid(row = 0, column = 1, padx = 10, pady = 10)
lossesHome = Label(teamStatsFrame, textvariable = homeLoss)
lossesHome.grid(row = 1, column = 1, padx = 10, pady = 10)
otlHome = Label(teamStatsFrame, textvariable = homeOTL)
otlHome.grid(row = 2, column = 1, padx = 10, pady = 10)
gfgpHome = Label(teamStatsFrame, textvariable = homeGFGP)
gfgpHome.grid(row = 3, column = 1, padx = 10, pady = 10)
gagpHome = Label(teamStatsFrame, textvariable = homeGAGP)
gagpHome.grid(row = 4, column = 1, padx = 10, pady = 10)
orHome = Label(teamStatsFrame, textvariable = homeOR)
orHome.grid(row = 5, column = 1, padx = 10, pady = 10)
drHome = Label(teamStatsFrame, textvariable = homeDR)
drHome.grid(row = 6, column = 1, padx = 10, pady = 10)
PprHome = Label(teamStatsFrame, textvariable = homePPR)
PprHome.grid(row = 7, column = 1, padx = 10, pady = 10)
PkrHome = Label(teamStatsFrame, textvariable = homePKR)
PkrHome.grid(row = 8, column = 1, padx = 10, pady = 10)
winsStreakHome = Label(teamStatsFrame, textvariable = homeStreakWins)
winsStreakHome.grid(row = 9, column = 1, padx = 10, pady = 10)
lossStreakHome = Label(teamStatsFrame, textvariable = homeStreakLosses)
lossStreakHome.grid(row = 10, column = 1, padx = 10, pady = 10)
def populateAwayTeamStats():
awayWins = IntVar()
awayLoss = IntVar()
awayOTL = IntVar()
awayGFGP = IntVar()
awayGAGP = IntVar()
awayOS = IntVar()
awayOR = IntVar()
awayDS = IntVar()
awayDR = IntVar()
awayPP = IntVar()
awayPPR = IntVar()
awayPK = IntVar()
awayPKR = IntVar()
awayStreakWins = IntVar()
awayStreakLosses = IntVar()
awayWins.set(parsedAwayStats.iloc[0]['W'])
awayLoss.set(parsedAwayStats.iloc[0]['L'])
awayOTL.set(parsedAwayStats.iloc[0]['OTL'])
awayGFGP.set(parsedAwayStats.iloc[0]['GF/GP'])
awayGAGP.set(parsedAwayStats.iloc[0]['GA/GP'])
awayOR.set(parsedAwayStats.iloc[0]['OffensiveRank'])
awayDR.set(parsedAwayStats.iloc[0]['DefensiveRank'])
awayPPR.set(parsedAwayPpStats.iloc[0]['PP_Rank'])
awayPKR.set(parsedAwayPkStats.iloc[0]['PK_Rank'])
awayStreakWins.set(parsedAwayStreakStats.iloc[0]['Wins'])
awayStreakLosses.set(parsedAwayStreakStats.iloc[0]['Losses'])
winsAway = Label(teamStatsFrame, textvariable = awayWins)
winsAway.grid(row = 0, column = 4, padx = 10, pady = 10)
lossesAway = Label(teamStatsFrame, textvariable = awayLoss)
lossesAway.grid(row = 1, column = 4, padx = 10, pady = 10)
otlAway = Label(teamStatsFrame, textvariable = awayOTL)
otlAway.grid(row = 2, column = 4, padx = 10, pady = 10)
gfgpAway = Label(teamStatsFrame, textvariable = awayGFGP)
gfgpAway.grid(row = 3, column = 4, padx = 10, pady = 10)
gagpAway = Label(teamStatsFrame, textvariable = awayGAGP)
gagpAway.grid(row = 4, column = 4, padx = 10, pady = 10)
orAway = Label(teamStatsFrame, textvariable = awayOR)
orAway.grid(row = 5, column = 4, padx = 10, pady = 10)
drAway = Label(teamStatsFrame, textvariable = awayDR)
drAway.grid(row = 6, column = 4, padx = 10, pady = 10)
PprAway = Label(teamStatsFrame, textvariable = awayPPR)
PprAway.grid(row = 7, column = 4, padx = 10, pady = 10)
PkrAway = Label(teamStatsFrame, textvariable = awayPKR)
PkrAway.grid(row = 8, column = 4, padx = 10, pady = 10)
winsStreakAway = Label(teamStatsFrame, textvariable = awayStreakWins)
winsStreakAway.grid(row = 9, column = 4, padx = 10, pady = 10)
lossStreakAway = Label(teamStatsFrame, textvariable = awayStreakLosses)
lossStreakAway.grid(row = 10, column = 4, padx = 10, pady = 10)
def calculateStatistics():
#home goalie stats
#home goalie / 2.5 since the number of goalies is significantly higher than number of teams
numOfHomeGoalies = len(homeGoalieStats)
homeGoalieGaaCalc = (((numOfHomeGoalies - parsedHomeGoalieStats.iloc[0]['GAA Rank']) * 0.10) / 2.5)
homeGoalieSvCalc = (((numOfHomeGoalies - parsedHomeGoalieStats.iloc[0]['SV% Rank']) * 0.10) / 2.5)
#home strength calcs
numOfHomeTeams = len(homeTeamStats)
homeOffensiveStrengthCalc = ((numOfHomeTeams - int(parsedHomeStats.iloc[0]['OffensiveRank'])) * 0.30)
homeDefensiveStrengthCalc = ((numOfHomeTeams - int(parsedHomeStats.iloc[0]['DefensiveRank'])) * 0.20)
homePowerPlayStrengthCalc = ((numOfHomeTeams - int(parsedHomePpStats.iloc[0]['PP_Rank'])) * 0.15)
homePentalyKillStrengthCalc = ((numOfHomeTeams - int(parsedAwayPkStats.iloc[0]['PK_Rank'])) * 0.15)
#away goalie stats
#away goalie / 2.5 since the number of goalies is significantly higher than number of teams
numAwayGoalies = len(awayGoalieStats)
awayGoalieGaaCalc = (((numAwayGoalies - parsedAwayGoalieStats.iloc[0]['GAA Rank']) * 0.10) / 2.5)
awayGoalieSvCalc = (((numAwayGoalies - parsedAwayGoalieStats.iloc[0]['SV% Rank']) * 0.10) / 2.5)
#away offensive strength
numOfAwayTeams = len(awayTeamStats)
awayOffensiveStrengthCalc = ((numOfAwayTeams - int(parsedAwayStats.iloc[0]['OffensiveRank'])) * 0.30)
awayDefensiveStrengthCalc = ((numOfAwayTeams - int(parsedAwayStats.iloc[0]['DefensiveRank'])) * 0.20)
awayPowerPlayStrengthCalc = ((numOfAwayTeams - int(parsedAwayPpStats.iloc[0]['PP_Rank'])) * 0.15)
awayPentalyKillStrengthCalc = ((numOfAwayTeams - int(parsedAwayPkStats.iloc[0]['PK_Rank'])) * 0.15)
#calculate totals
homeTotalCount = homeGoalieGaaCalc + homeGoalieSvCalc + homeOffensiveStrengthCalc + homeDefensiveStrengthCalc + homePowerPlayStrengthCalc + homePentalyKillStrengthCalc
awayTotalCount = awayGoalieGaaCalc + awayGoalieSvCalc + awayOffensiveStrengthCalc + awayDefensiveStrengthCalc + awayPowerPlayStrengthCalc + awayPentalyKillStrengthCalc
totalCount = homeTotalCount + awayTotalCount
homePercentage = ((homeTotalCount / totalCount) * 100)
awayPercentage = ((awayTotalCount / totalCount) * 100)
#display calculations
finalHomePercentage = StringVar()
finalHomePercentage.set("{:.2f}".format(homePercentage) + " %")
homeTeam = Label(totalCalcFrame, text = parsedHomeStats.iloc[0]['Team'])
homeTeam.grid(row = 0, column = 0, padx = 10, pady = 10)
homePercentageLabel = Label(totalCalcFrame, textvariable = finalHomePercentage)
homePercentageLabel.grid(row = 1, column = 0, padx = 10, pady = 10)
finalAwayPercentage = StringVar()
finalAwayPercentage.set("{:.2f}".format(awayPercentage) + " %")
awayTeam = Label(totalCalcFrame, text = parsedAwayStats.iloc[0]['Team'])
awayTeam.grid(row = 0, column = 1, padx = 10, pady = 10)
awayPercentageLabel = Label(totalCalcFrame, textvariable = finalAwayPercentage)
awayPercentageLabel.grid(row = 1, column = 1, padx = 10, pady = 10)
def calculateGoalsScored():
homeOffensiveAdjust = 0
awayOffensiveAdjust = 0
if int(parsedHomeStats.iloc[0]['OffensiveRank']) <= 7:
homeOffensiveAdjust = 1.0
elif int(parsedHomeStats.iloc[0]['OffensiveRank']) <= 15 and int(parsedHomeStats.iloc[0]['OffensiveRank']) > 7:
homeOffensiveAdjust = 0.5
if int(parsedHomeStats.iloc[0]['OffensiveRank']) <= 7:
awayOffensiveAdjust = 1.0
elif int(parsedHomeStats.iloc[0]['OffensiveRank']) <= 15 and int(parsedHomeStats.iloc[0]['OffensiveRank']) > 7:
awayOffensiveAdjust = 0.5
#calculate score method one
homeMethodOne = (statistics.mean([float(parsedHomeStats.iloc[0]['GF/GP']), float(parsedAwayStats.iloc[0]['GA/GP'])]) + homeOffensiveAdjust)
awayMethodOne = (statistics.mean([float(parsedAwayStats.iloc[0]['GF/GP']), float(parsedHomeStats.iloc[0]['GA/GP'])]) + awayOffensiveAdjust)
#calculate score method two
homeExpectedShots = statistics.mean([float(parsedHomeStats.iloc[0]['SF/GP']), float(parsedAwayStats.iloc[0]['SA/GP'])])
awayExpectedShots = statistics.mean([float(parsedAwayStats.iloc[0]['SF/GP']), float(parsedHomeStats.iloc[0]['SA/GP'])])
homeMethodTwo = (((1.0 - float(parsedAwayGoalieStats.iloc[0]['SV%'])) * homeExpectedShots) + homeOffensiveAdjust)
awayMethodTwo = (((1.0 - float(parsedHomeGoalieStats.iloc[0]['SV%'])) * awayExpectedShots) + awayOffensiveAdjust)
#calculate score method three
homeAvgShotsPerGoal = (float(parsedHomeStats.iloc[0]['SF/GP']) / float(parsedHomeStats.iloc[0]['GF/GP']))
awayAvgShotsPerGoal = (float(parsedAwayStats.iloc[0]['SF/GP']) / float(parsedAwayStats.iloc[0]['GF/GP']))
homeMethodThree = ((homeExpectedShots / homeAvgShotsPerGoal) + homeOffensiveAdjust)
awayMethodThree = ((awayExpectedShots / awayAvgShotsPerGoal) + awayOffensiveAdjust)
#method four
homeMethodfour = (float(parsedAwayStats.iloc[0]['GA/GP']) + homeOffensiveAdjust)
awayMethodFour = (float(parsedHomeStats.iloc[0]['GA/GP']) + awayOffensiveAdjust)
#method five
homeMethodFive = (float(parsedHomeStats.iloc[0]['GF/GP']) + homeOffensiveAdjust)
awayMethodFive = (float(parsedAwayStats.iloc[0]['GF/GP']) + awayOffensiveAdjust)
homeAverage = statistics.mean([homeMethodOne, homeMethodTwo, homeMethodThree, homeMethodfour, homeMethodFive])
awayAverage = statistics.mean([awayMethodOne, awayMethodTwo, awayMethodThree, awayMethodFour, awayMethodFive])
homeScore = homeAverage
awayScore = awayAverage
if int(parsedAwayStats.iloc[0]['DefensiveRank']) < (len(awayTeamStats) / 2):
homeScore = ((1 - (((len(awayTeamStats) / 2) - int(parsedAwayStats.iloc[0]['DefensiveRank'])) / 100)) * homeAverage)
elif int(parsedAwayStats.iloc[0]['DefensiveRank']) > (len(awayTeamStats) / 2):
homeScore = ((1 + ((int(parsedAwayStats.iloc[0]['DefensiveRank']) - (len(awayTeamStats) / 2)) / 100)) * homeAverage)
if int(parsedAwayStats.iloc[0]['DefensiveRank']) < (len(awayTeamStats) / 2):
awayScore = ((1 - (((len(homeTeamStats) / 2) - int(parsedAwayStats.iloc[0]['DefensiveRank'])) / 100)) * awayAverage)
elif int(parsedAwayStats.iloc[0]['DefensiveRank']) > (len(awayTeamStats) / 2):
awayScore = ((1 + ((int(parsedAwayStats.iloc[0]['DefensiveRank']) - (len(homeTeamStats) / 2)) / 100)) * awayAverage)
#display final calculated goals
finalHomeGoals = StringVar()
finalHomeGoals.set("Goals: ""{:.2f}".format(homeScore))
homeGoalsLabel = Label(totalCalcFrame, textvariable = finalHomeGoals)
homeGoalsLabel.grid(row = 2, column = 0, padx = 10, pady = 10)
finalAwayGoals = StringVar()
finalAwayGoals.set("Goals: ""{:.2f}".format(awayScore))
homeGoalsLabel = Label(totalCalcFrame, textvariable = finalAwayGoals)
homeGoalsLabel.grid(row = 2, column = 1, padx = 10, pady = 10)
def computeStats():
populateHomeGoalieStats()
populateAwayGoalieStats()
populateHomeTeamStats()
populateAwayTeamStats()
calculateStatistics()
calculateGoalsScored()
def updateHomeListBox(homeTeamValues):
homeTeamList.delete(0, END)
for item in homeTeamValues:
homeTeamList.insert(END, item)
def fillOutHomeTeamList(e):
homeTeamSearchBox.delete(0, END)
homeTeamSearchBox.insert(0, homeTeamList.get(ANCHOR))
homeTeamSelected(homeTeamList.get(ANCHOR))
def checkHomeTeam(e):
typed = homeTeamSearchBox.get()
data = homeTeamValues
if typed == "":
data = homeTeamValues
else:
data = []
for item in homeTeamValues:
if typed.lower() in item.lower():
data.append(item)
updateHomeListBox(data)
def updateAwayListBox(awayTeamValues):
awayTeamList.delete(0, END)
for item in awayTeamValues:
awayTeamList.insert(END, item)
def fillOutAwayTeamList(e):
awayTeamSearchBox.delete(0, END)
awayTeamSearchBox.insert(0, awayTeamList.get(ANCHOR))
awayTeamSelected(awayTeamList.get(ANCHOR))
def checkAwayTeam(e):
typed = awayTeamSearchBox.get()
data = awayTeamValues
if typed == "":
data = awayTeamValues
else:
data = []
for item in awayTeamValues:
if typed.lower() in item.lower():
data.append(item)
updateAwayListBox(data)
def updateHomeGoalieListBox(homeGoalieValues):
homeGoalieList.delete(0, END)
for item in homeGoalieValues:
homeGoalieList.insert(END, item)
def fillOutHomeGoalieList(e):
homeGoalieSearchBox.delete(0, END)
homeGoalieSearchBox.insert(0, homeGoalieList.get(ANCHOR))
homeGoalieSelected(homeGoalieList.get(ANCHOR))
def checkHomeGoalie(e):
typed = homeGoalieSearchBox.get()
data = homeGoalieValues
if typed == "":
data = homeGoalieValues
else:
data = []
for item in homeGoalieValues:
if typed.lower() in item.lower():
data.append(item)
updateHomeGoalieListBox(data)
def updateAwayGoalieListBox(awayGoalieValues):
awayGoalieList.delete(0, END)
for item in awayGoalieValues:
awayGoalieList.insert(END, item)
def fillOutAwayGoalieList(e):
awayGoalieSearchBox.delete(0, END)
awayGoalieSearchBox.insert(0, awayGoalieList.get(ANCHOR))
awayGoalieSelected(awayGoalieList.get(ANCHOR))
def checkAwayGoalie(e):
typed = awayGoalieSearchBox.get()
data = awayGoalieValues
if typed == "":
data = awayGoalieValues
else:
data = []
for item in awayGoalieValues:
if typed.lower() in item.lower():
data.append(item)
updateAwayGoalieListBox(data)
#web scrape links
# homeTeamStatsUrl = 'http://www.naturalstattrick.com/teamtable.php?fromseason=20202021&thruseason=20202021&stype=2&sit=5v5&score=all&rate=n&team=all&loc=H&gpf=410&fd=&td='
# awayTeamStatsUrl = 'http://www.naturalstattrick.com/teamtable.php?fromseason=20202021&thruseason=20202021&stype=2&sit=5v5&score=all&rate=n&team=all&loc=A&gpf=410&fd=&td='
# homeGoalieStatsUrl = 'http://www.naturalstattrick.com/playerteams.php?fromseason=20202021&thruseason=20202021&stype=2&sit=5v5&score=all&stdoi=g&rate=n&team=ALL&pos=S&loc=H&toi=0&gpfilt=none&fd=&td=&tgp=410&lines=single&draftteam=ALL'
# awayGoalieStatsUrl = 'http://www.naturalstattrick.com/playerteams.php?fromseason=20202021&thruseason=20202021&stype=2&sit=5v5&score=all&stdoi=g&rate=n&team=ALL&pos=S&loc=A&toi=0&gpfilt=none&fd=&td=&tgp=410&lines=single&draftteam=ALL'
# homePowerPlayStatsUrl = 'http://www.naturalstattrick.com/teamtable.php?fromseason=20202021&thruseason=20202021&stype=2&sit=pp&score=all&rate=n&team=all&loc=H&gpf=410&fd=&td='
# awayPowerPlayStatsUrl = 'http://www.naturalstattrick.com/teamtable.php?fromseason=20202021&thruseason=20202021&stype=2&sit=pp&score=all&rate=n&team=all&loc=A&gpf=410&fd=&td='
# homePenaltyKillStatsUrl = 'http://www.naturalstattrick.com/teamtable.php?fromseason=20202021&thruseason=20202021&stype=2&sit=pk&score=all&rate=n&team=all&loc=H&gpf=410&fd=&td='
# awayPenaltyKillStatsUrl = 'http://www.naturalstattrick.com/teamtable.php?fromseason=20202021&thruseason=20202021&stype=2&sit=pk&score=all&rate=n&team=all&loc=A&gpf=410&fd=&td='
homeTeamStatsUrl = 'http://www.naturalstattrick.com/teamtable.php?fromseason=20192020&thruseason=20202021&stype=2&sit=5v5&score=all&rate=n&team=all&loc=H&gpf=410&fd=&td='
awayTeamStatsUrl = 'http://www.naturalstattrick.com/teamtable.php?fromseason=20192020&thruseason=20202021&stype=2&sit=5v5&score=all&rate=n&team=all&loc=A&gpf=410&fd=&td='
homeGoalieStatsUrl = 'http://www.naturalstattrick.com/playerteams.php?fromseason=20192020&thruseason=20202021&stype=2&sit=5v5&score=all&stdoi=g&rate=n&team=ALL&pos=S&loc=H&toi=0&gpfilt=none&fd=&td=&tgp=410&lines=single&draftteam=ALL'
awayGoalieStatsUrl = 'http://www.naturalstattrick.com/playerteams.php?fromseason=20192020&thruseason=20202021&stype=2&sit=5v5&score=all&stdoi=g&rate=n&team=ALL&pos=S&loc=A&toi=0&gpfilt=none&fd=&td=&tgp=410&lines=single&draftteam=ALL'
homePowerPlayStatsUrl = 'http://www.naturalstattrick.com/teamtable.php?fromseason=20192020&thruseason=20202021&stype=2&sit=pp&score=all&rate=n&team=all&loc=H&gpf=410&fd=&td='
awayPowerPlayStatsUrl = 'http://www.naturalstattrick.com/teamtable.php?fromseason=20192020&thruseason=20202021&stype=2&sit=pp&score=all&rate=n&team=all&loc=A&gpf=410&fd=&td='
homePenaltyKillStatsUrl = 'http://www.naturalstattrick.com/teamtable.php?fromseason=20192020&thruseason=20202021&stype=2&sit=pk&score=all&rate=n&team=all&loc=H&gpf=410&fd=&td='
awayPenaltyKillStatsUrl = 'http://www.naturalstattrick.com/teamtable.php?fromseason=20192020&thruseason=20202021&stype=2&sit=pk&score=all&rate=n&team=all&loc=A&gpf=410&fd=&td='
streakStatsUrl = 'http://www.naturalstattrick.com/teamstreaks.php'
#web scrape to get dataframe tables in Pandas
homeTeamStats = webScrapeTeamStatsUrl(homeTeamStatsUrl)
awayTeamStats = webScrapeTeamStatsUrl(awayTeamStatsUrl)
homeGoalieStats = webScrapeGoalieStatsUrl(homeGoalieStatsUrl)
awayGoalieStats = webScrapeGoalieStatsUrl(awayGoalieStatsUrl)
homePowerPlayStats = webScrapePowerPlayStatsUrl(homePowerPlayStatsUrl)
awayPowerPlayStats = webScrapePowerPlayStatsUrl(awayPowerPlayStatsUrl)
homePenaltyKillStats = webScrapePenaltyKillStatsUrl(homePenaltyKillStatsUrl)
awayPenaltyKillStats = webScrapePenaltyKillStatsUrl(awayPenaltyKillStatsUrl)
homeStreakStats = webScrapeStreakStatsUrl(streakStatsUrl)
awayStreakStats = webScrapeStreakStatsUrl(streakStatsUrl)
mainFrame = LabelFrame(root)
mainFrame.pack(fill = BOTH, expand = 1)
myCanvas = Canvas(mainFrame)
myCanvas.pack(side = LEFT, fill = BOTH, expand = 1)
yscrollbar = ttk.Scrollbar(mainFrame, orient = VERTICAL, command = myCanvas.yview)
yscrollbar.pack(side = RIGHT, fill = Y)
myCanvas.configure(yscrollcommand = yscrollbar.set)
myCanvas.bind('<Configure>', lambda e: myCanvas.configure(scrollregion = myCanvas.bbox('all')))
myFrame = Frame(myCanvas)
myCanvas.create_window((0,0), window = myFrame, anchor = "nw")
#create user input frames
selectionFrame = LabelFrame(myFrame, padx = 10, pady = 10)
selectionFrame.pack(padx = 75, pady = 5)
#goalie statistics frame
goalieStatsFrame = LabelFrame(myFrame, padx = 10, pady = 10)
goalieStatsFrame.pack(padx = 75, pady = 5)
#team statistics frame
teamStatsFrame = LabelFrame(myFrame, padx = 10, pady = 10)
teamStatsFrame.pack(padx = 75, pady = 5)
#calculation frame
totalCalcFrame = LabelFrame(myFrame, padx = 10, pady = 10, width = 250, height = 150)
totalCalcFrame.pack(padx = 75, pady = 5)
homeGoalieDisplay()
awayGoalieDisplay()
homeTeamDisplay()
awayTeamDisplay()
#home team selection label
homeTeamLabel = ttk.Label(selectionFrame, text = 'Select Home Team')
homeTeamLabel.grid(row = 0, column = 0, padx = 5, pady = 10)
#home team searchbox
homeTeamValues = list(homeTeamStats['Team'].unique())
homeTeamSearchBox = Entry(selectionFrame, text = "Search")
homeTeamSearchBox.grid(row = 1, column = 0)
homeTeamList = Listbox(selectionFrame)
homeTeamList.grid(row = 2, column = 0)
updateHomeListBox(homeTeamValues)
homeTeamList.bind("<<ListboxSelect>>", fillOutHomeTeamList)
homeTeamSearchBox.bind("<KeyRelease>", checkHomeTeam)
#away team selection label
awayTeamLabel = ttk.Label(selectionFrame, text = 'Select Away Team')
awayTeamLabel.grid(row = 0, column = 1, padx = 5, pady = 10)
#away team searchbox
awayTeamValues = list(awayTeamStats['Team'].unique())
awayTeamSearchBox = Entry(selectionFrame)
awayTeamSearchBox.grid(row = 1, column = 1)
awayTeamList = Listbox(selectionFrame)
awayTeamList.grid(row = 2, column = 1)
updateAwayListBox(awayTeamValues)
awayTeamList.bind("<<ListboxSelect>>", fillOutAwayTeamList)
awayTeamSearchBox.bind("<KeyRelease>", checkAwayTeam)
#home goalie selection label
homeGoalieLabel = ttk.Label(selectionFrame, text = 'Select Home Goalie')
homeGoalieLabel.grid(row = 3, column = 0, padx = 5, pady = 10)
#home goalie combobox
homeGoalieValues = list(homeGoalieStats['Player'].unique())
homeGoalieSearchBox = Entry(selectionFrame)
homeGoalieSearchBox.grid(row = 4, column = 0)
homeGoalieList = Listbox(selectionFrame)
homeGoalieList.grid(row = 5, column = 0)
updateHomeGoalieListBox(homeGoalieValues)
homeGoalieList.bind("<<ListboxSelect>>", fillOutHomeGoalieList)
homeGoalieSearchBox.bind("<KeyRelease>", checkHomeGoalie)
#away goalie selection label
awayGoalieLabel = ttk.Label(selectionFrame, text = 'Select Away Goalie')
awayGoalieLabel.grid(row = 3, column = 1, padx = 5, pady = 10)
#away goalie combobox
awayGoalieValues = list(awayGoalieStats['Player'].unique())
awayGoalieSearchBox = Entry(selectionFrame)
awayGoalieSearchBox.grid(row = 4, column = 1)
awayGoalieList = Listbox(selectionFrame)
awayGoalieList.grid(row = 5, column = 1)
updateAwayGoalieListBox(awayGoalieValues)
awayGoalieList.bind("<<ListboxSelect>>", fillOutAwayGoalieList)
awayGoalieSearchBox.bind("<KeyRelease>", checkAwayGoalie)
# awayGoalieCombo = ttk.Combobox(selectionFrame, value = awayGoalieValues)
# awayGoalieCombo.grid(row = 4, column = 1)
# awayGoalieCombo.bind("<<ComboboxSelected>>", awayGoalieSelected)
button = ttk.Button(myFrame, text = 'Run', command=lambda : computeStats())
button.pack(pady = 10)
#GUI mainloop
root.mainloop()
| true
|
ab3478fbda20466fc7ea7e0121b3867724b60175
|
Python
|
dantin/daylight
|
/dcp/045/solution.py
|
UTF-8
| 584
| 3.53125
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
# -*- coding: utf-8 -*-
import random
def rand5():
return random.randint(1, 5)
def rand7():
while True:
num = 5 * (rand5() - 1) + (rand5() - 1)
if num < 21:
return num % 7 + 1
if __name__ == '__main__':
count = 10 ** 6
result_dict = {}
for _ in range(count):
num = rand7()
if num not in result_dict:
result_dict[num] = 0
result_dict[num] += 1
print('\n Output:')
for num, freq in sorted(result_dict.items()):
print(' {}: {}, probability: {}'.format(num, freq, freq / count))
| true
|
5786c068e4f55b1e2d683103a1d2f43ac54dc00a
|
Python
|
claying/state-space-energy
|
/period.py
|
UTF-8
| 1,725
| 3.4375
| 3
|
[] |
no_license
|
import numpy as np
def weekends_array(T):
T1 = 95
T_we = 48
T_wd = 120
weekends = []
for i in range(T):
if i > T1 and (i-T1-1)%168<T_we:
weekends.append(True)
else:
weekends.append(False)
weekends = np.array(weekends)
workdays = np.invert(weekends)
return weekends, workdays
def days_array(T):
T_day = 11
T_night = 24 - T_day
T1_night = 7
days = []
for i in range(T):
if i >= T1_night and (i-T1_night)%24 < T_day:
days.append(True)
else:
days.append(False)
days = np.array(days)
return days
def nights_array(T):
TT_day = 14
TT_night = 24 - TT_day
TT1_night = 6
nights = []
for i in range(T):
if i >= TT1_night and (i-TT1_night)%24 < TT_day:
nights.append(False)
else:
nights.append(True)
nights = np.array(nights)
complet_days = np.invert(nights)
return nights, complet_days
def regimes(T, t0=1, di=(8,19), ni=(21,7), tw0=95, wl=48):
"""
tw0: start position
wl: weekend length
"""
regimes = np.zeros(T, dtype=int)
for i in range(T):
time = (t0 + i)%24
if (i >= tw0 and (i-tw0)%168 < wl):
# weekend
regimes[i] = 4
elif ((time >= ni[0] and time < 24) or (time + 24 >= ni[0] and time < ni[1])):
# night
regimes[i] = 1
elif time >= di[0] and time < di[1]:
# day
regimes[i] = 0
elif (time >= di[1] and time < ni[0]):
# day-to-night
regimes[i] = 2
else:
# night-to-day
regimes[i] = 3
return regimes
# a = np.ones(200)*3
# a[days_array(200)] = 0
# a[nights_array(200)[0]] = 1
# a[weekends_array(200)[0]] = 1
# print(a)
# b = regimes(200)
# print(b)
# print(regimes(200, t0=1, di=(10,20), ni=(23,5)))
# print(regimes(200, t0=1, di=(7,18), ni=(21,5)))
# print(a==b)
# print (regimes(100, di=(8,19), ni=(21,6), tw0=0, wl=48))
| true
|
dc5d54f56c22785f00a277b3730982ca70fe654b
|
Python
|
demongolem/MultilevelSentiment
|
/CharLSTMSentiment.py
|
UTF-8
| 9,001
| 2.578125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# encoding: utf-8
'''
Created on Sep 19, 2018
@author: g.werner
'''
import Config
import json
from lib_model.bidirectional_lstm import LSTM
import logging
import nltk
from nltk import Tree
from nltk.tokenize import sent_tokenize, word_tokenize
import os
from os import listdir
from os.path import isfile, join
from queue import Queue
from stanfordcorenlp import StanfordCoreNLP
nltk.download('punkt')
# for testing only please! Use the server created in Entry => StanfordSentiment please for deployment usage
def getCoreNlpInstance(config_item):
# don't need sentiment, however the stanford annotator does need it
props={'annotators': 'tokenize,ssplit,pos,lemma,ner,parse,coref,sentiment',
'pipelineLanguage':'en',
'outputFormat':'json',
'parse.model':'edu/stanford/nlp/models/srparser/englishSR.ser.gz',
'sentiment.model': os.path.realpath(__file__) + '/../model/stanford/model-0000-70.74.ser.gz'
}
# we do not provide the same level of recovery as in StanfordSentiment. Please manually start your server first
return StanfordCoreNLP(config_item.STANFORD_SERVER, port=config_item.STANFORD_PORT, logging_level=logging.DEBUG, max_retries=5, memory='8g')
def convert_scale(positive):
return 2 * positive - 1
def flatten(input_list):
return [val for sublist in input_list for val in sublist]
def tree_to_str(tree):
return ' '.join([w for w in tree.leaves()])
def get_rep_mention(coreference):
for reference in coreference:
if reference['isRepresentativeMention'] == True:
pos = (reference['startIndex'], reference['headIndex'])
text = reference['text']
return text, pos
def get_subtrees(tree):
""" Return chunked sentences """
subtrees = []
queue = Queue()
queue.put(tree)
while not queue.empty():
node = queue.get()
for child in node:
if isinstance(child, Tree):
queue.put(child)
if node.label() == "S":
# if childs are (respectively) 'NP' and 'VP'
# convert subtree to string, else keep looking
# TODO: MAKE SURE NP IS A PERSON
child_labels = [child.label() for child in node]
if "NP" in child_labels and "VP" in child_labels:
sentence = tree_to_str(node)
for child in node:
if child.label() == "NP":
# look for NNP
subchild_labels = [subchild.label() for subchild in child]
if "NNP" in subchild_labels:
noun = ""
for subchild in child:
if subchild.label() == "NNP":
noun = ' '.join([noun, subchild.leaves()[0]])
subtrees.append((noun, sentence))
return subtrees
class CharLSTMSentiment(object):
def __init__(self):
self.network = LSTM()
self.network.build()
self.server_on = False
def config(self, config, nlp):
self.nlp = nlp
self.server_on = True
def init_dict(self):
local_dict = {}
for k, _ in self.contexts:
if not k in local_dict:
local_dict[k] = None
self.entities = local_dict
def evaluate_single_document(self, document, mode):
if mode == 'document':
document = document[0:1000]
p = self.network.predict_sentences([document])
positive = p[0][0][0]
return [convert_scale(positive)]
elif mode == 'sentence':
return self.evaluate_sentences(sent_tokenize(document))
elif mode == 'entity':
return self.get_entity_sentiment(document)
else:
return ['UNKNOWN MODE']
#sentence sentiment function
def evaluate_sentences(self, sentences):
scores = []
p = self.network.predict_sentences(sentences)
for i in range(0, len(sentences)):
positive = p[0][i][0]
scores.append(convert_scale(positive))
return scores
# the following in this class all have to do with entity sentiment
# we need to make sure it is serializable to json (i.e. beware of float32)
def get_entity_sentiment(self, document):
""" Create a dict of every entities with their associated sentiment """
print('Parsing Document...')
self.parse_doc(document)
print('Done Parsing Document!')
self.init_dict()
#sentences = [sentence.encode('utf-8') for _, sentence in self.contexts]
sentences = [sentence for _, sentence in self.contexts]
print('Predicting!')
predictions = self.network.predict_sentences(sentences)
for i, c in enumerate(self.contexts):
key = c[0]
if self.entities[key] != None:
self.entities[key] += (predictions[0][i][0] - predictions[0][i][1])
self.entities[key] /= 2
else:
self.entities[key] = (predictions[0][i][0] - predictions[0][i][1])
for e in self.entities.keys():
# conversion for json purposes
self.entities[e] = str(self.entities[e])
print('Entity: %s -- sentiment: %s' % (e, self.entities[e]))
return self.entities
def parse_doc(self, document):
""" Extract relevant entities in a document """
print('Tokenizing sentences...')
# why are we mixing nlp pipelines here?
#nltk
sentences = sent_tokenize(document)
print('Done Sentence Tokenize!')
# Context of all named entities
ne_context = []
for sentence in sentences:
# change pronouns to their respective nouns
print('Anaphora resolution for sentence: %s' % sentence)
(output, modified_sentence) = self.coreference_resolution(sentence)
tree = self.parse_sentence(output, modified_sentence)
print('Done Anaphora Resolution!')
# get context for each noun
print('Named Entity Clustering:')
context = get_subtrees(tree)
for n, s in context:
print('%s' % s)
ne_context.append(context)
self.contexts = flatten(ne_context)
def coreference_resolution(self, sentence):
# coreference resolution
# corenlp
print('Starting document annotation for ' + sentence)
output_string = self.nlp.annotate(sentence)
print('Done document annotation')
output = json.loads(output_string)
coreferences = output['corefs']
entity_keys = coreferences.keys()
tokens = word_tokenize(sentence)
for k in entity_keys:
# skip non PERSON NP
if coreferences[k][0]['gender'] == 'MALE' or coreferences[k][0]['gender'] == 'FEMALE':
rep_mention, pos = get_rep_mention(coreferences[k])
for reference in coreferences[k]:
if not reference['isRepresentativeMention']:
start, end = reference['startIndex'] - 1, reference['headIndex'] - 1
if start == end:
tokens[start] = rep_mention
else:
tokens[start] = rep_mention
del tokens[start + 1: end]
sentence = ' '.join(tokens)
print('Ending coref function')
return (output, sentence.encode('utf-8'))
def parse_sentence(self, output, sentence):
""" sentence --> named-entity chunked tree """
try:
return Tree.fromstring(output['sentences'][0]['parse'])
except TypeError as e:
import pdb; pdb.set_trace()
side_effect = []
def fetch_files(directory):
global side_effect
filelines = []
onlyfiles = [f for f in listdir(directory) if isfile(join(directory, f))]
for onlyfile in onlyfiles:
side_effect.append(onlyfile)
with open(join(directory, onlyfile), 'r', encoding="utf-8") as f:
filelines.append(f.readlines())
return filelines
if __name__ == '__main__':
cls = CharLSTMSentiment()
config_item = Config.DevelopmentConfig
cls.config(config_item, getCoreNlpInstance(config_item))
document = 'Bob talked with the great ruler John yesterday. John mentioned how horrible Tesla is. The nefarious Bob agreed.'
print('Fetching files')
filelines = fetch_files('input/test')
print(len(filelines))
limit_files_to = 10
for i in range(0, len(filelines)):
if i == limit_files_to:
break
print(i)
fileline = filelines[i]
document = '\n'.join(fileline)
result = cls.evaluate_single_document(document, 'entity')
print(result)
| true
|
8bb363f298f22a6c70291075d6e6769d698a3ca5
|
Python
|
Jonsm/Data_analysis
|
/T2R_loop_fit.py
|
UTF-8
| 2,618
| 2.609375
| 3
|
[] |
no_license
|
from matplotlib import pyplot as plt
import numpy as np
import h5py
from scipy.optimize import curve_fit
from scipy.optimize import OptimizeWarning
import warnings
warnings.simplefilter("error", OptimizeWarning)
warnings.simplefilter("error", RuntimeWarning)
def func(x, a, b, c, d, g):
return a*np.exp(-x/b)*np.cos(2*np.pi*c*((x-g))) + d
directory = 'D:\Data\Fluxonium #10_7.5GHzCav\T2R'
fname = '062217_T2R_YOKO_24.49mA_Cav7.3644GHz_-7dBm_Qubit4.1335GHz_25dBm_PiPulse420ns_Count40_TimeStep20.h5'
path = directory + '\\' + fname
T2_array = []
T2_err_array = []
fR_array = []
fR_err_array = []
pts_num = 40
time_step = 20
t2_guess = 1e-6
f2_guess = 2e6
time = np.linspace(0, pts_num*time_step, pts_num)
time_nice = np.linspace(0, pts_num*time_step, pts_num*100)
loop_count = 90
#Read data and fit
with h5py.File(path,'r') as hf:
print('List of arrays in this file: \n', hf.keys())
count = np.array(hf.get('count'))
phase_raw = hf.get('PHASEMAG_Phase0')
# print phase_raw
for idx in range(loop_count):
phase = phase_raw[idx, 0]
phase = np.unwrap(phase)*180/np.pi
phase = phase - np.min(phase)
phase = abs(phase)
guess = [np.max(phase) - np.min(phase), t2_guess, f2_guess, 0, 0]
try:
popt, pcov = curve_fit(func, time*1e-9, phase, guess)
except RuntimeError:
print ("Doesn't fit well entry " + str(idx))
continue
except RuntimeWarning:
print ("Doesn't fit well entry " + str(idx))
continue
except OptimizeWarning:
print ("Doesn't fit well entry " + str(idx))
continue
a,b,c,d,g = popt #b is T2, c is fR
phase_fit = func(time_nice*1e-9, a, b, c, d, g)
perr = np.sqrt(abs(np.diag(pcov)))
T2 = b*1e6
T2_err = perr[1]*1e6
fR = c*1e-3 #in kHz
fR_err = perr[2]*1e-3
if T2 < time_step*1e-3:
continue
T2_array = np.append(T2_array, T2)
T2_err_array = np.append(T2_err_array, T2_err)
fR_array = np.append(fR_array, fR)
fR_err_array = np.append(fR_err_array, fR_err)
plt.figure(1)
plt.plot(time, phase, 'g-o', alpha = 0.25)
plt.plot(time_nice, phase_fit, 'k-')
# print T2
# print T2_err
count = np.linspace(0, len(T2_array), len(T2_array))
plt.figure(2)
plt.errorbar(count, T2_array, yerr=T2_err_array, fmt = 'h', mfc = 'none', mew = 2.0, mec = 'y', ecolor = 'y')
plt.figure(3)
plt.errorbar(count, fR_array, yerr=fR_err_array, fmt = 'h', mfc = 'none', mew = 2.0, mec = 'k', ecolor = 'k')
plt.show()
| true
|
fff810740eac3d131a1e6ab3f968ede46381b7c8
|
Python
|
gonzalob24/Learning_Central
|
/Python_Programming/BootCamp/regx.py
|
UTF-8
| 5,314
| 4.125
| 4
|
[] |
no_license
|
import re
patterns = ['term1', 'term2']
text = 'this is a string with term1, but not other term'
# print(re.search('hello', 'hello world'))
for pattern in patterns:
print('Search for "%s" in: \n"%s"' % (pattern, text))
# Check for a match
if re.search(pattern, text):
print("\n")
print("match was found")
else:
print("\n")
print("Match was not found")
print("\n\n")
# Splitting with regx
split_term = '@'
phrase = "what is your email, is it hello@gmail.com"
print(re.split(split_term, phrase))
print("\n\n")
# Finding all instances of a pattern
print(re.findall("match", "here is one match, here is another match"))
def multi_re_find(patterns, phrase):
"""
Takes in a list of regex patterns
prints a lift of all matches
:param patterns:
:param phrase:
:return:
"""
for pattern in patterns:
print("Searching the phrase using the re check: %r" % pattern)
print(re.findall(pattern, phrase))
print("\n")
print("\n\nUsing meta chatracters:")
test_phrase = "sdsd..sssddd...sdddsddd...dsds...dsssss...sdddd"
test_patterns = ['sd*', # s followed by zero or more d's
'sd+', # s followed by one or more d's
'sd?', # s followed by zero or one d's
'sd{3}', # s followed by 3 d's
'sd{2,3}', # s followed by by two to three d's
]
multi_re_find(test_patterns, test_phrase)
"""
There are five ways to express repetition in a pattern:
1.A pattern followed by the meta-character * is repeated zero or more times.
2. Replace the * with + and the pattern must appear at least once.
3. Using ? means the pattern appears zero or one time.
4. For a specific number of occurrences, use {m} after the pattern, where m is
replaced with the number of times the pattern should repeat.
5. Use {m,n} where m is the minimum number of repetitions and n is the maximum.
Leaving out n {m,} means the value appears at least m times, with no maximum.
"""
"""
Character sets:
Are used when you wish to match any one of a group of characters at a
point in the input. Brackets are used to construct character set inputs.
For example: the input [ab] searches for occurrences of either a or b. Let's see
some examples:
"""
test_patterns2 = ['[sd]', # either s or d
's[sd]+'] # s followed by one or more s or d
multi_re_find(test_patterns2, test_phrase)
"""
Exclusion (^)
We can use ^ to exclude terms by incorporating it into the bracket syntax notation.
For example: [^...] will match any single character not in the brackets. Let's see
some examples:
"""
'''
Use [^!.? ] to check for matches that are not a !,.,?, or space.
Add a + to check that the match appears at least once. This basically translates
into finding the words.
'''
test_phrase3 = 'This is a string! But it has punctuation. How can we remove it?'
print(re.findall('[^!.? ]+', test_phrase3))
print("\n\nCharacter Ranges")
"""
Character Ranges
As character sets grow larger, typing every character that should (or should not)
match could become very tedious. A more compact format using character ranges
lets you define a character set to include all of the contiguous characters between
a start and stop point. The format used is [start-end].
Common use cases are to search for a specific range of letters in the alphabet.
For instance, [a-f] would return matches with any occurrence of letters between a and f.
Let's walk through some examples:
"""
test_phrase4 = 'This is an example sentence. Lets see if we can find some letters.'
test_patterns4 = ['[a-z]+', # sequences of lower case letters
'[A-Z]+', # sequences of upper case letters
'[a-zA-Z]+', # sequences of lower or upper case letters
'[A-Z][a-z]+'] # one upper case letter followed by lower case letters
multi_re_find(test_patterns4, test_phrase4)
print("\n\nEscape Codes")
"""
Escape Codes
You can use special escape codes to find specific types of patterns in your data,
such as digits, non-digits, whitespace, and more. For example:
Code Meaning
\d a digit
\D a non-digit
\s whitespace (tab, space, newline, etc.)
\S non-whitespace
\w alphanumeric
\W non-alphanumeric
Escapes are indicated by prefixing the character with a backslash \. Unfortunately,
a backslash must itself be escaped in normal Python strings, and that results in
expressions that are difficult to read. Using raw strings, created by prefixing the
literal value with r, eliminates this problem and maintains readability.
Personally, I think this use of r to escape a backslash is probably one of the things
that block someone who is not familiar with regex in Python from being able to read
regex code at first. Hopefully after seeing these examples this syntax will become clear.
"""
test_phrase5 = 'This is! a string!.? with some numbers 1233 and a symbol #hashtag'
test_patterns5 = [r'\d+', # sequence of digits
r'\D+', # sequence of non-digits
r'\s+', # sequence of whitespace
r'\S+', # sequence of non-whitespace
r'\w+', # alphanumeric characters
r'\W+', # non-alphanumeric
]
multi_re_find(test_patterns5, test_phrase5)
| true
|
b8fe3d4664a2ffa4c2d642a0dd6bab1801902142
|
Python
|
rajlath/rkl_codes
|
/codechef/INTY2018_INF1803.py
|
UTF-8
| 898
| 2.875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
# @Date : 2018-10-21 18:32:13
# @Author : raj lath (oorja.halt@gmail.com)
# @Link : link
# @Version : 1.0.0
#learned from solution by huggy_hermit
from sys import stdin
max_val=int(10e12)
min_val=int(-10e12)
def read_int() : return int(stdin.readline())
def read_ints() : return [int(x) for x in stdin.readline().split()]
def read_str() : return input()
def read_strs() : return [x for x in stdin.readline().split()]
limit = 100001
mods = 1000000007
def build_series():
series = [0]
for i, v in enumerate(range(5, limit, 3)):
series.append((series[-1] + pow(v, v, mods))%mods)
return series
nb_test = read_int()
series = build_series()
for _ in range(nb_test):
left, rite = read_ints()
left = max(0, (left // 3) -1)
rite = (rite - 2 )//3
resu = series[rite] - series[left]
if resu < 0: resu += mods
print(resu)
| true
|
e428e5a679e68058f0d5f55a4d602803f14ef919
|
Python
|
kenneth-miura/Drive-Syncer
|
/syncer.py
|
UTF-8
| 3,108
| 2.671875
| 3
|
[] |
no_license
|
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
import glob
import os
import re
import argparse
# TODO: alter so this can take N directories & targets
def set_up_drive(settings_file, credentials_file):
print(
f'Settings File Location: {settings_file} Credentials File Location: {credentials_file}'
)
gauth = GoogleAuth(settings_file=settings_file)
# Try to load saved client credentials
gauth.LoadCredentialsFile(credentials_file)
if gauth.credentials is None:
# Authenticate if they're not there
gauth.LocalWebserverAuth()
elif gauth.access_token_expired:
# Refresh them if expired
gauth.GetFlow()
gauth.flow.params.update({'access_type': 'offline'})
gauth.flow.params.update({'approval_prompt': 'force'})
gauth.Refresh()
else:
# Initialize the saved creds
gauth.Authorize()
# Save the current credentials to a file
gauth.SaveCredentialsFile(credentials_file)
return GoogleDrive(gauth)
def get_drive_target_dir_id(drive, drive_folder):
folder_query = "'root' in parents and trashed=false and mimeType='application/vnd.google-apps.folder'"
folder_list = drive.ListFile({"q": folder_query}).GetList()
for folder in folder_list:
if folder['title'] == drive_folder:
drive_target_dir_id = folder['id']
return drive_target_dir_id
def backup_dir(drive, drive_folder, folder_to_sync):
drive_target_dir_id = get_drive_target_dir_id(drive, drive_folder)
# Clean all old files in target
query = f"'{drive_target_dir_id}' in parents and trashed=false"
file_list = drive.ListFile({'q': query}).GetList()
for file1 in file_list:
file1.Delete()
# Upload local files to drive
for file_path in glob.glob(os.path.join(folder_to_sync, '*')):
file_name = re.split('/', file_path)[-1]
print(file_name)
g_file = drive.CreateFile({"parents": [{"id": drive_target_dir_id}]})
g_file.SetContentFile(file_path)
g_file['title'] = file_name
g_file.Upload()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("setting_file",
nargs='?',
default='settings.yaml',
help="location of the settings.yaml file")
parser.add_argument("credentials_file",
nargs='?',
default='credentials.txt',
help="location of the credentials.txt file")
parser.add_argument("drive_folder", help="Path to Folder in Drive that you want to backup to")
parser.add_argument("folder_to_sync", help="Path to Local Folder you want to sync")
settings_file = parser.parse_args().setting_file
credentials_file = parser.parse_args().credentials_file
drive_folder = parser.parse_args().drive_folder
folder_to_sync = parser.parse_args().folder_to_sync
drive = set_up_drive(settings_file, credentials_file)
backup_dir(drive, drive_folder, folder_to_sync)
if __name__ == '__main__':
main()
| true
|
104dd8ebea76a7037dbefff2f51774689eacd740
|
Python
|
fabiocaccamo/python-benedict
|
/tests/dicts/io/test_io_dict_xls.py
|
UTF-8
| 10,772
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
from unittest.mock import patch
from decouple import config
from benedict.dicts.io import IODict
from benedict.exceptions import ExtrasRequireModuleNotFoundError
from .test_io_dict import io_dict_test_case
class io_dict_xls_test_case(io_dict_test_case):
"""
This class describes an IODict / xls test case.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._extensions = [
"xlsx",
"xlsm",
"xls",
]
def test_from_xls_with_valid_file_valid_content(self):
expected_dict = {
"values": [
{
"mon": 10,
"tue": 11,
"wed": 12,
"thu": 13,
"fri": 14,
"sat": 15,
"sun": 16,
},
{
"mon": 20,
"tue": 21,
"wed": 22,
"thu": 23,
"fri": 24,
"sat": 25,
"sun": 26,
},
{
"mon": 30,
"tue": 31,
"wed": 32,
"thu": 33,
"fri": 34,
"sat": 35,
"sun": 36,
},
]
}
for extension in self._extensions:
with self.subTest(
msg=f"test_from_xls_({extension})_with_valid_file_valid_content"
):
filepath = self.input_path(f"valid-content.{extension}")
# static method
d = IODict.from_xls(filepath)
self.assertTrue(isinstance(d, dict))
self.assertEqual(d, expected_dict)
# constructor explicit format
d = IODict(filepath, format=extension)
self.assertTrue(isinstance(d, dict))
self.assertEqual(d, expected_dict)
# constructor implicit format
d = IODict(filepath)
self.assertTrue(isinstance(d, dict))
self.assertEqual(d, expected_dict)
@patch("benedict.serializers.xls.xls_installed", False)
def test_from_xls_with_valid_file_valid_content_but_xls_extra_not_installed(self):
for extension in self._extensions:
with self.subTest(
msg=f"test_from_xls_({extension})_with_valid_file_valid_content_but_xls_extra_not_installed"
):
filepath = self.input_path(f"valid-content.{extension}")
# static method
with self.assertRaises(ExtrasRequireModuleNotFoundError):
_ = IODict.from_xls(filepath)
# constructor explicit format
with self.assertRaises(ExtrasRequireModuleNotFoundError):
_ = IODict(filepath, format=extension)
# constructor implicit format
with self.assertRaises(ExtrasRequireModuleNotFoundError):
_ = IODict(filepath)
def test_from_xls_with_valid_url_valid_content(self):
expected_dict = {
"values": [
{
"mon": 10,
"tue": 11,
"wed": 12,
"thu": 13,
"fri": 14,
"sat": 15,
"sun": 16,
},
{
"mon": 20,
"tue": 21,
"wed": 22,
"thu": 23,
"fri": 24,
"sat": 25,
"sun": 26,
},
{
"mon": 30,
"tue": 31,
"wed": 32,
"thu": 33,
"fri": 34,
"sat": 35,
"sun": 36,
},
]
}
for extension in self._extensions:
with self.subTest(
msg=f"test_from_xls_({extension})_with_valid_url_valid_content"
):
# url = f"https://github.com/fabiocaccamo/python-benedict/raw/s3/tests/dicts/io/input/valid-content.{extension}"
url = f"https://github.com/fabiocaccamo/python-benedict/raw/main/tests/dicts/io/input/valid-content.{extension}"
# static method
d = IODict.from_xls(url)
self.assertTrue(isinstance(d, dict))
self.assertEqual(d, expected_dict)
# constructor explicit format
d = IODict(url, format=extension)
self.assertTrue(isinstance(d, dict))
self.assertEqual(d, expected_dict)
# constructor implicit format
d = IODict(url)
self.assertTrue(isinstance(d, dict))
self.assertEqual(d, expected_dict)
def test_from_xls_with_valid_s3_url_valid_content(self):
aws_access_key_id = config("AWS_ACCESS_KEY_ID", default=None)
aws_secret_access_key = config("AWS_SECRET_ACCESS_KEY", default=None)
if not all([aws_access_key_id, aws_secret_access_key]):
# don't use s3 on GH CI
return
s3_options = {
"aws_access_key_id": aws_access_key_id,
"aws_secret_access_key": aws_secret_access_key,
}
expected_dict = {
"values": [
{
"mon": 10,
"tue": 11,
"wed": 12,
"thu": 13,
"fri": 14,
"sat": 15,
"sun": 16,
},
{
"mon": 20,
"tue": 21,
"wed": 22,
"thu": 23,
"fri": 24,
"sat": 25,
"sun": 26,
},
{
"mon": 30,
"tue": 31,
"wed": 32,
"thu": 33,
"fri": 34,
"sat": 35,
"sun": 36,
},
]
}
for extension in self._extensions:
with self.subTest(
msg=f"test_from_xls_({extension})_with_valid_s3_url_valid_content"
):
url = f"s3://python-benedict/valid-content.{extension}"
# static method
d = IODict.from_xls(url, s3_options=s3_options)
self.assertTrue(isinstance(d, dict))
self.assertEqual(d, expected_dict)
# constructor explicit format
d = IODict(url, format=extension, s3_options=s3_options)
self.assertTrue(isinstance(d, dict))
self.assertEqual(d, expected_dict)
# constructor implicit format
d = IODict(url, s3_options=s3_options)
self.assertTrue(isinstance(d, dict))
self.assertEqual(d, expected_dict)
def test_from_xls_with_valid_file_valid_content_custom_sheet_by_index_and_columns(
self,
):
expected_dict = {
"values": [
{
"name": "Red",
"hex": "#FF0000",
},
{
"name": "Green",
"hex": "#00FF00",
},
{
"name": "Blue",
"hex": "#0000FF",
},
]
}
for extension in self._extensions:
with self.subTest(
msg=f"test_from_xls_({extension})_with_valid_file_valid_content_custom_sheet_by_index_and_columns"
):
filepath = self.input_path(f"valid-content.{extension}")
# static method
d = IODict.from_xls(
filepath,
sheet=1,
columns=["name", "hex"],
columns_row=False,
)
self.assertTrue(isinstance(d, dict))
self.assertEqual(d, expected_dict)
def test_from_xls_with_invalid_file(self):
for extension in self._extensions:
with self.subTest(
msg=f"test_from_xls_({extension})_with_valid_file_valid_content"
):
filepath = self.input_path(f"invalid-file.{extension}")
# static method
with self.assertRaises(ValueError):
IODict.from_xls(filepath)
# constructor explicit format
with self.assertRaises(ValueError):
IODict(filepath, format=extension)
# constructor implicit format
with self.assertRaises(ValueError):
IODict(filepath)
def test_from_xls_with_valid_url_invalid_content(self):
for extension in self._extensions:
with self.subTest(
msg=f"test_from_xls_({extension})_with_valid_url_invalid_content"
):
url = "https://github.com/fabiocaccamo/python-benedict"
# static method
with self.assertRaises(ValueError):
IODict.from_xls(url)
# constructor explicit format
with self.assertRaises(ValueError):
IODict(url, format=extension)
# constructor implicit format
with self.assertRaises(ValueError):
IODict(url)
def test_from_xls_with_invalid_url(self):
for extension in self._extensions:
with self.subTest(msg=f"test_from_xls_({extension})_with_invalid_url"):
url = "https://github.com/fabiocaccamo/python-benedict-invalid"
# static method
with self.assertRaises(ValueError):
IODict.from_xls(url)
# constructor explicit format
with self.assertRaises(ValueError):
IODict(url, format=extension)
# constructor implicit format
with self.assertRaises(ValueError):
IODict(url)
def test_to_xls(self):
d = IODict(
{
"values": [
{"x": "1"},
{"x": "2"},
{"x": "3"},
{"x": "4"},
{"x": "5"},
],
}
)
with self.assertRaises(NotImplementedError):
_ = d.to_xls()
| true
|
587785f798ac61ed031c2d1ab2b77144b160392b
|
Python
|
feulf/ecc-mul-pow-jsong-pb-denver-070918
|
/test/index_test.py
|
UTF-8
| 475
| 2.765625
| 3
|
[] |
no_license
|
from unittest import TestCase
from ipynb.fs.full.index import *
class FieldElementTest(TestCase):
def test_mul(self):
a = FieldElement(24, 31)
b = FieldElement(19, 31)
self.assertEqual(a*b, FieldElement(22, 31))
def test_pow(self):
a = FieldElement(17, 31)
self.assertEqual(a**3, FieldElement(15, 31))
a = FieldElement(5, 31)
b = FieldElement(18, 31)
self.assertEqual(a**5 * b, FieldElement(16, 31))
| true
|
13016854e8029f2766ebe175a061bef3b15ca74d
|
Python
|
pranu46/Pythontrials
|
/Class_HW/Praveena_Homework3.1.py
|
UTF-8
| 1,620
| 4.40625
| 4
|
[] |
no_license
|
'''
Addition and subtraction of quadratic expressions by using operator overloading.
Check the equality of the quadratic expressions.
Check the co-efficients of the quadratic expressions to put + or - in return string
'''
class Quadratic:
def __init__(self, Q1, Q2, Q3):
self.Q1 = Q1
self.Q2 = Q2
self.Q3 = Q3
def __str__(self):
if self.Q1 < 0:
self.Q1 = '-x^2'
elif self.Q1 == 1:
self.Q1 = 'x^2'
else:
self.Q1 = '%sx^2' % self.Q1
if self.Q2 < 0:
self.Q2 = ' - %sx' % (self.Q2 * -1)
elif self.Q2 == 0:
self.Q2 = ''
elif self.Q2 == 1:
self.Q2 = ' + x'
else:
self.Q2 = ' + %sx' % self.Q2
if self.Q3 < 0:
self.Q3 = ' - %s' % (self.Q3 * -1)
elif self.Q3 == 0:
self.Q3 = ''
else:
self.Q3 = ' + %s' % self.Q3
return self.Q1 + self.Q2 + self.Q3
def __add__(self, other):
return Quadratic(self.Q1 + other.Q1, self.Q2 + other.Q2, self.Q3 + other.Q3)
def __sub__(self, other):
return Quadratic(self.Q1 - other.Q1, self.Q2 - other.Q2, self.Q3 - other.Q3)
def __eq__(self,other):
if self.Q1 == other.Q1 and self.Q2 == other.Q2 and self.Q3 == other.Q3:
return True
else:
return False
Q1 = Quadratic(3, 8, -5)
Q2 = Quadratic(2, 3, 7)
print('Sum of 2 quadratic expressions:', Q1 + Q2)
print('Difference between 2 quadratic expressions:', Q1 - Q2)
print('Is the 2 given quadratic expressions are same ?', Q1 == Q2)
| true
|
426215825b247cf9fa6432c18ee688cd1be21e9d
|
Python
|
vishal-1codes/python
|
/INTRODUCTION_TO_CLASSES/Class_Syntax.py
|
UTF-8
| 90
| 2.609375
| 3
|
[] |
no_license
|
#user-defined Python class names start with a capital letter.
class Animal(object):
pass
| true
|
99cb5d839933b8df7b2d77d703f342546782bd46
|
Python
|
rk-exxec/micropython
|
/tests/extmod/vfs_posix.py
|
UTF-8
| 2,447
| 2.828125
| 3
|
[
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
# Test for VfsPosix
try:
import gc
import os
os.VfsPosix
except (ImportError, AttributeError):
print("SKIP")
raise SystemExit
# We need a directory for testing that doesn't already exist.
# Skip the test if it does exist.
temp_dir = "micropy_test_dir"
try:
import os
os.stat(temp_dir)
print("SKIP")
raise SystemExit
except OSError:
pass
# getcwd and chdir
curdir = os.getcwd()
os.chdir("/")
print(os.getcwd())
os.chdir(curdir)
print(os.getcwd() == curdir)
# stat
print(type(os.stat("/")))
# listdir and ilistdir
print(type(os.listdir("/")))
# mkdir
os.mkdir(temp_dir)
# file create
f = open(temp_dir + "/test", "w")
f.write("hello")
f.close()
# close on a closed file should succeed
f.close()
# construct a file object with a raw fileno
f = open(2)
print(f)
# file read
f = open(temp_dir + "/test", "r")
print(f.read())
f.close()
# file finaliser, also see vfs_fat_finaliser.py
names = [temp_dir + "/x%d" % i for i in range(4)]
basefd = temp_dir + "/nextfd1"
nextfd = temp_dir + "/nextfd2"
with open(basefd, "w") as f:
base_file_no = f.fileno()
for i in range(1024): # move GC head forwards by allocating a lot of single blocks
[]
def write_files_without_closing():
for n in names:
open(n, "w").write(n)
sorted(list(range(128)), key=lambda x: x) # use up Python and C stack so f is really gone
write_files_without_closing()
gc.collect()
with open(nextfd, "w") as f:
next_file_no = f.fileno()
print("next_file_no <= base_file_no", next_file_no <= base_file_no)
for n in names + [basefd, nextfd]:
os.remove(n)
# rename
os.rename(temp_dir + "/test", temp_dir + "/test2")
print(os.listdir(temp_dir))
# construct new VfsPosix with path argument
vfs = os.VfsPosix(temp_dir)
print(list(i[0] for i in vfs.ilistdir(".")))
# stat, statvfs (statvfs may not exist)
print(type(vfs.stat(".")))
if hasattr(vfs, "statvfs"):
assert type(vfs.statvfs(".")) is tuple
# check types of ilistdir with str/bytes arguments
print(type(list(vfs.ilistdir("."))[0][0]))
print(type(list(vfs.ilistdir(b"."))[0][0]))
# remove
os.remove(temp_dir + "/test2")
print(os.listdir(temp_dir))
# remove with error
try:
import os
os.remove(temp_dir + "/test2")
except OSError:
print("remove OSError")
# rmdir
os.rmdir(temp_dir)
print(temp_dir in os.listdir())
# rmdir with error
try:
import os
os.rmdir(temp_dir)
except OSError:
print("rmdir OSError")
| true
|
4c4615dde5b56c2dac8a11f5cab49c72691edea9
|
Python
|
redstarkeT/CS-115-Assignments
|
/CS 115 Assignments/lab1-TimothyStephens.py
|
UTF-8
| 1,170
| 4
| 4
|
[] |
no_license
|
"""Timothy Stephens, I pledge my honor that I have abided by the Stevens Honor System."""
from math import factorial
from cs115 import reduce
import math
def inverse(n):
"""This function returns the inverse of the number plugged in."""
return 1/n
def add(x,y): return x+y
def e(n):
"""This function returns the summation of 1+1/n!"""
Step1=list(range(1,n+1))
Step2=list(map(factorial,Step1))
Step3=list(map(inverse,Step2))
Step4=reduce(add,Step3)
Step5=1+Step4
return Step5
def error(n):
"""This function returns the absolute value of the difference between math.e and e(n)."""
diff=math.e - e(n)
return abs(diff)
#e(n):
#Algorithm copied down from written stuff (sort of like proofs from discrete working backwards)
#Step 1: Make a range from 1 to n+1
#Step 2: Apply summation on all of list using map
#Step 3: Apply an inverse function on all of list using map
#Step 4: Apply add to all the numbers using reduce
#Step 5: Add answer to +1 for final answer
#goal is to return 1+1/1!+1/2!+1/3!+...+1/n!
#>>>import math
#>>> math.e
#2.718281828459045
#>>> math.factorial(2)
#2
#>>> abs(-1)
#1
| true
|
23538c4646ce239f3893ffbfd3181c034ae1ad42
|
Python
|
lsx137946009/bandparser
|
/sensparser/sensomics_utils.py
|
UTF-8
| 1,933
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 1 15:45:47 2019
@author: lsx
"""
import numpy as np
import time
def function_date_parse(frame, date0_loc, date1_loc, date2_loc, date3_loc):
date0 = '{:08b}'.format(frame[date0_loc])
date1 = '{:08b}'.format(frame[date1_loc])
date2 = '{:08b}'.format(frame[date2_loc])
date3 = '{:08b}'.format(frame[date3_loc])
date = (date0+date1+date2+date3)[::-1]
date_sec = int(date[0 : 5], 2)
date_min = int(date[6 :11], 2)
date_hour = int(date[12:16], 2)
date_day = int(date[17:21], 2)
date_mon = int(date[22:25], 2)
date_year = int(date[26:31], 2) + 2000
time_ = time.struct_time(
tm_year=date_year,
tm_mon =date_mon,
tm_mday=date_day,
tm_hour=date_hour,
tm_min =date_min,
tm_sec =date_sec)
return time_
def function_byte_shift(byte_list):
byte_list.reverse()
data_len = len(byte_list)
shift = np.array(list(map(lambda x: 2**x, range(0, data_len*8, 8)))) # [1, 256, 65536, ...]
data = np.array(byte_list)
value = float(np.dot(shift, data))
return value
def function_byte_shift_sign(byte_list):
sign = byte_list.pop(0)
if sign == 0:
sign = -1
elif sign == 1:
sign = 1
else:
value = None
value = function_byte_shift(byte_list)
value = float(sign*value)
return value
kind_field_mapping = [
[int(0xff), int(0xa1)],
[int(0xff), int(0x84)]
]
def function_kind_parse(kind_list):
byte_kind = function_byte_shift(kind_list)
kind = kind_field_mapping[byte_kind]
return kind
def function_time_ts2sec(time):
return time
time_func_mapping = {'sec': function_time_ts2sec}
| true
|
cc9eea6bb4ff801b076b262e8a4e22bfbfa4ed33
|
Python
|
BuyankinM/JetBrainsAcademyProjects
|
/Rock-Paper-Scissors/Problems/Writing to a file immediately/task.py
|
UTF-8
| 196
| 2.921875
| 3
|
[] |
no_license
|
long_list = list(range(1000000))
file_name = "my_file.txt"
opened_file = open(file_name, 'w')
for _item in long_list:
command = "print(_item, file=opened_file, flush=True)"
opened_file.close()
| true
|
7d18c5412a5559876ba0c732b3e71006c7f5d734
|
Python
|
hzhcui/ScrapingNBA
|
/Test 1 for basketball-reference.py
|
UTF-8
| 773
| 2.84375
| 3
|
[] |
no_license
|
import urllib
from urllib2 import urlopen
from bs4 import BeautifulSoup
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def make_soup(url):
thepage = urllib.urlopen(url)
soupdata = BeautifulSoup(thepage, "html.parser")
return soupdata
playerdatasaved=""
soup = make_soup("https://www.basketball-reference.com/players/a/")
for record in soup.find_all('tr'):
playerdata=""
for data in record.find_all(['th','td']):
playerdata=playerdata+","+data.text
if len(playerdata)!=0:
playerdatasaved=playerdatasaved+"\n"+playerdata[1:]
#header="Player, From, To, Pos, Ht, Wt, Birth Date, College"
file=open('C:/users/hzhcui/desktop/Basketball.csv',"wb")
#file.write(bytes(header))
file.write(bytes(playerdatasaved))
print(playerdatasaved)
| true
|
e481bc9df7fc980fd0c5f997a3edc159d565a47f
|
Python
|
lucasdpn/query_builder
|
/model/operations.py
|
UTF-8
| 15,477
| 2.71875
| 3
|
[] |
no_license
|
from sqlalchemy.sql import select, and_, or_
from sqlalchemy import Table, cast, Integer, func
from sqlalchemy.sql.expression import literal_column, between
from utils.db import dal
from model import sql_operations
"""
An operation represents a query that is built based on the input configuration
-params- and optionally, it can depend on intermediate tables -sub_operations-.
This way, a single operation can be used to compose many queries.
Basically, when a new operation must be written, we basically heritage the
IOperation class and override the method get_statement.
"""
class IOperation():
"""
Abstract class that defines the interface to create new operations.
"""
def get_statement(self, params, sub_operations):
"""
This method defines the operation. *args:
params - a dictionary that has specific information about the
operation.
sub_operations - It has a list of operations in which this new
operation depends.
It must return a SQLAlchemy select statement.
"""
raise NotImplementedError("Implement this method")
class GreatEqual(IOperation):
OPERATION = "great_equal"
def get_statement(self, params, sub_operations):
table = Table(params['db'], dal.metadata, autoload=True,
schema=params['schema_input'])
stm = select(
[table]).where(table.c.signal >= literal_column(params['value']))
return stm
class CombinedMaps(IOperation):
OPERATION = 'join'
def get_statement(self, params, sub_operations):
# load tables.
sub_tables = []
for table in sub_operations.values():
sub_tables.append(Table(table.save_at(), dal.metadata,
schema=dal.schema_output, autoload=True))
# join statement
stm = select([sub_tables[0]])
stm_join = sub_tables[0]
for i in range(1, len(sub_tables)):
stm_join = stm_join.join(sub_tables[i], sub_tables[i-1].c.pixel ==
sub_tables[i].c.pixel)
stm = stm.select_from(stm_join)
return stm
class BadRegions(IOperation):
OPERATION = "bad_regions"
def get_statement(self, params, sub_operations):
table = Table(params['db'], dal.metadata, autoload=True,
schema=params['schema_input'])
stm = select([table]).where(sql_operations.BitwiseAnd(
cast(table.c.signal, Integer),
literal_column(params['value'])) >
literal_column('0'))
return stm
class Footprint(IOperation):
OPERATION = 'footprint'
def get_statement(self, params, sub_operations):
inner_join = ["exposure_time", "depth_map", "mangle_map"]
left_join = ["bad_regions"]
inner_join_ops = []
left_join_ops = []
# divide operations accordingly
if sub_operations:
for k, v in list(sub_operations.items()):
if k in inner_join:
inner_join_ops.append(v)
elif k in left_join:
left_join_ops.append(v)
else:
raise("operations does not exist.")
# load tables.
# review from data.
table_footprint = Table(params['db'], dal.metadata,
autoload=True, schema=params['schema_input'])
sub_tables_inner = []
for table in inner_join_ops:
sub_tables_inner.append(Table(table.save_at(), dal.metadata,
autoload=True, schema=dal.schema_output))
sub_tables_left = []
for table in left_join_ops:
sub_tables_left.append(Table(table.save_at(), dal.metadata,
autoload=True, schema=dal.schema_output))
stm = select([table_footprint])
# join statement
stm_join = table_footprint
# Inner join
for table in sub_tables_inner:
stm_join = stm_join.join(table, table_footprint.c.pixel ==
table.c.pixel)
# Left Join
for table in sub_tables_left:
stm_join = stm_join.join(table, table_footprint.c.pixel ==
table.c.pixel, isouter=True)
if len(sub_tables_inner) > 0 or len(sub_tables_left) > 0:
stm = stm.select_from(stm_join)
if len(sub_tables_left) > 0:
for table in sub_tables_left:
stm = stm.where(table.c.pixel == None)
return stm
class Reduction(IOperation):
OPERATION = 'reduction'
def get_statement(self, params, sub_operations):
# load tables.
t_footprint = Table(sub_operations['footprint'].save_at(),
dal.metadata, autoload=True,
schema=dal.schema_output)
t_objects_ring = Table(params['table_coadd_objects_ring'],
dal.metadata, autoload=True,
schema=params['schema_input'])
# join statement
stm_join = t_footprint
stm_join = stm_join.join(t_objects_ring, t_footprint.c.pixel ==
t_objects_ring.c.pixel)
stm = select([t_objects_ring.c.coadd_objects_id]).\
select_from(stm_join)
return stm
class Cuts(IOperation):
OPERATION = 'cuts'
BANDS = ['g', 'r', 'i', 'z', 'y']
def get_statement(self, params, sub_operations):
t_reduction = Table(sub_operations['reduction'].save_at(),
dal.metadata, autoload=True,
schema=dal.schema_output)
t_coadd = Table(params['table_coadd_objects'], dal.metadata,
autoload=True, schema=params['schema_input'])
# join statement
stm_join = t_reduction
stm_join = stm_join.join(t_coadd, t_reduction.c.coadd_objects_id ==
t_coadd.c.coadd_objects_id)
_where = []
# cuts involving only coadd_objects_columns
# sextractor flags
if 'sextractor_bands' in params and\
'sextractor_flags' in params:
# combine_flags
queries = []
sum_flags = sum(params['sextractor_flags'])
for band in params['sextractor_bands']:
query = []
col = getattr(t_coadd.c, 'flags_%s' % band)
if 0 in params['sextractor_flags']:
query.append(col == literal_column('0'))
if sum_flags > 0:
and_op = sql_operations.BitwiseAnd(
col,
literal_column(str(sum_flags)))
query.append((and_op) > literal_column('0'))
queries.append(or_(*query))
_where.append(and_(*queries))
# bbj
if 'remove_bbj' in params['additional_cuts']:
_where.append(or_(
t_coadd.c.nepochs_g > literal_column('0'),
t_coadd.c.magerr_auto_g > literal_column('0.05'),
t_coadd.c.mag_model_i - t_coadd.c.mag_auto_i >
literal_column('-0.4')
))
# niter model
if 'niter_model' in params['additional_cuts']:
tmp = []
for band in ObjectSelection.BANDS:
col = getattr(t_coadd.c, 'niter_model_%s' % band)
tmp.append(col > literal_column('0'))
_where.append(and_(*tmp))
# spreaderr model
if 'spreaderr_model' in params['additional_cuts']:
tmp = []
for band in Cuts.BANDS:
col = getattr(t_coadd.c, 'spreaderr_model_%s' % band)
tmp.append(col > literal_column('0'))
_where.append(and_(*tmp))
# bad astronomic color
if 'bad_astronomic_colors' in params['additional_cuts']:
_where.append(and_(
and_(
func.abs(t_coadd.c.alphawin_j2000_g -
t_coadd.c.alphawin_j2000_i) <
literal_column('0.0003'),
func.abs(t_coadd.c.deltawin_j2000_g -
t_coadd.c.deltawin_j2000_i) <
literal_column('0.0003')
),
or_(
t_coadd.c.magerr_auto_g > literal_column('0.05')
)
))
# REVIEW: zero_point is not beeing applied. mag_auto is hardcoded.
# signal to noise cuts
if 'sn_cuts' in params:
tmp = []
for element in params['sn_cuts'].items():
band, value = element
col = getattr(t_coadd.c, 'magerr_auto_%s' % band)
tmp.append(and_(
col > literal_column('0'),
literal_column('1.086')/col >
literal_column(str(value))
))
_where.append(and_(*tmp))
# magnitude limit
if 'magnitude_limit' in params:
tmp = []
for element in params['magnitude_limit'].items():
band, value = element
col = getattr(t_coadd.c, 'mag_auto_%s' % band)
tmp.append(col < literal_column(str(value)))
_where.append(and_(*tmp))
# bright magnitude limit
if 'bright_magnitude' in params:
tmp = []
for element in params['bright_magnitude'].items():
band, value = element
col = getattr(t_coadd.c, 'mag_auto_%s' % band)
tmp.append(col > literal_column(str(value)))
_where.append(and_(*tmp))
# color cuts
if 'color_cuts' in params:
tmp = []
for element in params['color_cuts'].items():
band, value = element
col_max = getattr(t_coadd.c, 'mag_auto_%s' % band[0])
col_min = getattr(t_coadd.c, 'mag_auto_%s' % band[1])
tmp.append(between(literal_column(str(col_max - col_min)),
literal_column(str(value[0])),
literal_column(str(value[1]))))
_where.append(and_(*tmp))
stm = select([t_coadd.c.coadd_objects_id]).\
select_from(stm_join).where(and_(*_where))
return stm
class Bitmask(IOperation):
OPERATION = 'bitmask'
def get_statement(self, params, sub_operations):
sub_op = list(sub_operations.values())[0]
# load tables.
t_sub_op = Table(sub_op.save_at(), dal.metadata, autoload=True,
schema=dal.schema_output)
_where = []
# bitmask
alias_table = None
t_coadd_molygon = Table(params['table_coadd_objects_molygon'],
dal.metadata, autoload=True,
schema=params['schema_input'])
t_molygon = Table(params['table_molygon'], dal.metadata,
autoload=True, schema=params['schema_input'])
stm_join = t_sub_op
stm_join = stm_join.join(t_coadd_molygon,
t_sub_op.c.coadd_objects_id ==
t_coadd_molygon.c.coadd_objects_id)
for band in params['mangle_bitmask']:
# give the str column and retrieve the attribute.
alias_table = t_molygon.alias('molygon_%s' % band)
col = getattr(t_coadd_molygon.c, 'molygon_id_%s' % band)
stm_join = stm_join.join(alias_table,
col == alias_table.c.id)
_where.append(alias_table.c.hole_bitmask != literal_column('1'))
stm = select([t_sub_op.c.coadd_objects_id]).\
select_from(stm_join).where(and_(*_where))
return stm
class ObjectSelection(IOperation):
OPERATION = 'object_selection'
def get_statement(self, params, sub_operations):
sub_op = list(sub_operations.values())[0]
# load tables.
t_sub_op = Table(sub_op.save_at(), dal.metadata, autoload=True,
schema=dal.schema_output)
stm = select([t_sub_op.c.coadd_objects_id])
return stm
class SgSeparation(IOperation):
OPERATION = 'sg_separation'
def get_statement(self, params, sub_operations):
# load tables.
t_obj_selection = Table(sub_operations['object_selection'].save_at(),
dal.metadata, autoload=True,
schema=dal.schema_output)
t_sg = []
for table in params['tables_sg']:
t_sg.append(Table(table, dal.metadata, autoload=True,
schema=params['schema_input']))
_where = []
# join statement
stm_join = t_obj_selection
for table in t_sg:
stm_join = stm_join.join(
table, t_obj_selection.c.coadd_objects_id ==
table.c.coadd_objects_id)
col = getattr(table.c, '%s' % params['ref_band'])
_where.append(col == literal_column('0'))
stm = select([t_obj_selection.c.coadd_objects_id]).\
select_from(stm_join).where(and_(*_where))
return stm
class PhotoZ(IOperation):
OPERATION = 'photoz'
def get_statement(self, params, sub_operations):
sub_op = list(sub_operations.values())[0]
# load tables.
t_sub_op = Table(sub_op.save_at(), dal.metadata, autoload=True,
schema=dal.schema_output)
t_pz = []
for table in params['tables_zp']:
t_pz.append(Table(table, dal.metadata, autoload=True,
schema=params['schema_input']))
_where = []
# join statement
stm_join = t_sub_op
for table in t_pz:
stm_join = stm_join.join(
table, t_sub_op.c.coadd_objects_id ==
table.c.coadd_objects_id)
_where.append(and_(table.c.z_best >
literal_column(str(params['zmin'])),
table.c.z_best <
literal_column(str(params['zmax']))))
stm = select([t_sub_op.c.coadd_objects_id]).\
select_from(stm_join).where(and_(*_where))
return stm
class GalaxyProperties(IOperation):
OPERATION = 'galaxy_properties'
def get_statement(self, params, sub_operations):
sub_op = list(sub_operations.values())[0]
# load tables.
t_sub_op = Table(sub_op.save_at(), dal.metadata, autoload=True,
schema=dal.schema_output)
t_gp = []
for table in params['tables_gp']:
t_gp.append(Table(table, dal.metadata, autoload=True,
schema=params['schema_input']))
# join statement
stm_join = t_sub_op
for table in t_gp:
stm_join = stm_join.join(
table, t_sub_op.c.coadd_objects_id == table.c.coadd_objects_id)
stm = select([t_sub_op.c.coadd_objects_id]).\
select_from(stm_join)
return stm
| true
|
e3c759c6c4cdde58c5b8cde67bebfb8c388e4b8c
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03402/s814954157.py
|
UTF-8
| 324
| 3.171875
| 3
|
[] |
no_license
|
import sys
input = sys.stdin.readline
A,B=map(int,input().split())
a=[["#"]*100 for i in range(50)]
b=[["."]*100 for i in range(50)]
for i in range(A-1):
a[2*(i//50)][2*(i%50)]="."
for i in range(B-1):
b[2*(i//50)+1][2*(i%50)]="#"
print(100,100)
for i in a:
print("".join(i))
for i in b:
print("".join(i))
| true
|
a85fccbb18a4d5a2592c1d98793b460ef465d873
|
Python
|
Cyxapic/arcade
|
/core/parts/commons/miniature.py
|
UTF-8
| 919
| 3.421875
| 3
|
[
"MIT"
] |
permissive
|
from abc import ABC, abstractmethod
from pygame import image
class Miniature(ABC):
""" Parent class for menu, gameover etc
Arguments:
screen -- Main display surface
image_file -- image file path
"""
def __init__(self, screen, image_file):
self.screen = screen
self.width, self.heigth = screen.get_size()
self._img = image.load(image_file)
self._get_rect()
self._titles = self._create_titles()
def _get_rect(self):
self._rect = self._img.get_rect()
self._rect.x = self.width // 2 - self._rect.width // 2
self._rect.y = 50
@abstractmethod
def _create_titles(self):
""" Render titles method
must return tuple
((Surface, dest), ... )
"""
def run(self):
"""Entry point method"""
self.screen.blits(((self._img, self._rect), *self._titles))
| true
|
be02294595732e464de715633722a3dba33a3841
|
Python
|
pbudzyns/BigDataITMO2018
|
/SparkTask.py
|
UTF-8
| 15,161
| 2.828125
| 3
|
[] |
no_license
|
from pyspark.sql import SparkSession
from pyspark.sql.functions import UserDefinedFunction
from pyspark.sql.functions import collect_set, array_contains, col, max, mean, desc, sum
from pyspark.sql.types import ArrayType
import os
os.environ["PYSPARK_PYTHON"] = "/home/pawel/PycharmProjects/HPC/venv/bin/python3.5"
os.environ["PYSPARK_DRIVER_PYTHON"] = "/home/pawel/PycharmProjects/HPC/venv/bin/python3.5"
"""
Process data and build user profile vector with the following characteristics:
1) count of comments, posts (all), original posts, reposts and likes made by user
2) count of friends, groups, followers
3) count of videos, audios, photos, gifts
4) count of "incoming" (made by other users) comments, max and mean "incoming" comments per post
5) count of "incoming" likes, max and mean "incoming" likes per post
6) count of geo tagged posts
7) count of open / closed (e.g. private) groups a user participates in
Medium:
1) count of reposts from subscribed and not-subscribed groups
2) count of deleted users in friends and followers
3) aggregate (e.g. count, max, mean) characteristics for comments and likes (separtely) made by (a) friends
and (b) followers per post
4) aggregate (e.g. count, max, mean) characteristics for comments and likes (separtely) made by (a) friends
and (b) followers per user
5) find emoji (separately, count of: all, negative, positive, others) in (a) user's posts (b) user's comments
"""
class SparkTask:
def __init__(self):
"""
Configuration for Spark:
master: address to Master node or local
path: path to folder with *.parquet files
"""
self.path = "/home/pawel/bd_parquets/"
self.master = "local"
self.app_name = "Spark Task"
self.spark = SparkSession.builder \
.master(self.master) \
.appName(self.app_name) \
.getOrCreate()
def read_parquet_file(self, filename):
return self.spark.read.parquet(self.path + filename)
def task1a(self):
"""1) count of comments, posts (all), original posts, reposts and likes made by user"""
user_wall_likes = self.read_parquet_file("userWallLikes.parquet")
user_wall_posts = self.read_parquet_file("userWallPosts.parquet")
user_wall_comments = self.read_parquet_file("userWallComments.parquet")
likes_count = user_wall_likes \
.groupBy('likerId') \
.count() \
.withColumnRenamed('likerId', 'UserId') \
.withColumnRenamed('count', 'likes')
posts_count = user_wall_posts \
.groupBy('from_id') \
.count() \
.withColumnRenamed('from_id', 'UserId') \
.withColumnRenamed('count', 'posts(all)')
original_posts_count = user_wall_posts \
.filter(user_wall_posts['is_reposted'] == 'false') \
.groupBy('from_id') \
.count() \
.withColumnRenamed('from_id', 'UserId') \
.withColumnRenamed('count', 'original_posts')
reposts_count = user_wall_posts \
.filter(user_wall_posts['is_reposted'] == 'true') \
.groupBy('from_id') \
.count() \
.withColumnRenamed('from_id', 'UserId') \
.withColumnRenamed('count', 'reposts')
comments_cout = user_wall_comments \
.groupBy('from_id') \
.count() \
.withColumnRenamed('from_id', 'UserId') \
.withColumnRenamed('count', 'comments')
final_table = comments_cout \
.join(posts_count, 'UserId') \
.join(original_posts_count, 'UserId') \
.join(reposts_count, 'UserId') \
.join(likes_count, 'UserId')
return final_table
def task2a(self):
"""2) count of friends, groups, followers"""
followers = self.read_parquet_file("followers.parquet")
friends = self.read_parquet_file("friends.parquet")
groupsSubs = self.read_parquet_file("userGroupsSubs.parquet")
friends_count = friends \
.groupBy('profile') \
.count() \
.withColumnRenamed('profile', 'UserId') \
.withColumnRenamed('count', 'friends')
groups_count = groupsSubs \
.groupBy('user') \
.count() \
.withColumnRenamed('user', 'UserId') \
.withColumnRenamed('count', 'groups')
followers_count = followers \
.groupBy('profile') \
.count() \
.withColumnRenamed('profile', 'UserId') \
.withColumnRenamed('count', 'followers')
result_table = friends_count.join(groups_count, 'UserId').join(followers_count, 'UserId')
return result_table
def task3a(self):
"""3) count of videos, audios, photos, gifts"""
friends_profiles = self.read_parquet_file("followerProfiles.parquet")
result_table = friends_profiles \
.filter(friends_profiles.counters.isNotNull()) \
.select(friends_profiles.id.alias("UserId"),
friends_profiles.counters.videos.alias("videos"),
friends_profiles.counters.audios.alias("audios"),
friends_profiles.counters.photos.alias("photos"),
friends_profiles.counters.gifts.alias("gifts"))
return result_table
def task4a(self):
"""4) count of "incoming" (made by other users) comments, max and mean "incoming" comments per post"""
user_wall_comments = self.read_parquet_file("userWallComments.parquet")
comments_by_post_count = user_wall_comments \
.filter(user_wall_comments['from_id'] != user_wall_comments['post_owner_id']) \
.select('post_id', 'post_owner_id') \
.groupBy('post_id') \
.count()
comment_to_user = user_wall_comments \
.filter(user_wall_comments['from_id'] != user_wall_comments['post_owner_id']) \
.select('post_id', 'post_owner_id') \
.dropDuplicates()
result_table = comment_to_user\
.join(comments_by_post_count, "post_id")\
.groupBy("post_owner_id").agg(sum("count"), max("count"), mean("count"))
return result_table
def task5a(self):
"""5) count of "incoming" likes, max and mean "incoming" likes per post"""
userWallLikes = self.read_parquet_file("userWallLikes.parquet")
likes_per_post = userWallLikes \
.filter(userWallLikes['ownerId'] != userWallLikes['likerId']) \
.groupBy('itemId') \
.count()
post_to_user = userWallLikes \
.filter(userWallLikes['ownerId'] != userWallLikes['likerId']) \
.select('itemId', 'ownerId') \
.dropDuplicates()
result_table = post_to_user\
.join(likes_per_post, 'itemId')\
.groupBy('ownerId')\
.agg(sum('count'), max('count'), mean('count'))
return result_table
def task6a(self):
"""6) count of geo tagged posts"""
userWallPosts = self.read_parquet_file("userWallPosts.parquet")
geo_tagged_posts_count = userWallPosts \
.filter(userWallPosts['geo.coordinates'] != 'null') \
.groupBy('owner_id') \
.count() \
.withColumnRenamed('owned_id', 'UserId') \
.withColumnRenamed('count', 'geo_tagged_posts') \
result_table = geo_tagged_posts_count
return result_table
def task7a(self):
"""7) count of open / closed (e.g. private) groups a user participates in"""
groupsProfiles = self.read_parquet_file("groupsProfiles.parquet")
userGroupsSubs = self.read_parquet_file("userGroupsSubs.parquet")
invert_id = UserDefinedFunction(lambda x: -int(x))
user_to_group = userGroupsSubs \
.select("user", invert_id("group")) \
.withColumnRenamed("<lambda>(group)", "group")\
.dropDuplicates()
group_type = groupsProfiles\
.select("id", "is_closed")\
.withColumnRenamed("id", "group")\
.dropDuplicates()
user_to_group_type = user_to_group\
.join(group_type, "group")\
opened_groups = user_to_group_type\
.filter(user_to_group_type['is_closed'] == 0)\
.groupBy("user")\
.count()\
.withColumnRenamed("count", "opened")
closed_groups = user_to_group_type\
.filter(user_to_group_type['is_closed'] > 0)\
.groupBy("user")\
.count()\
.withColumnRenamed("count", "closed")
result_table = opened_groups\
.join(closed_groups, "user", how="full_outer")\
.fillna(0)
return result_table
def task1b(self):
"""1) count of reposts from subscribed and not-subscribed groups"""
userWallPosts = self.read_parquet_file("userWallPosts.parquet")
userGroupsSubs = self.read_parquet_file("userGroupsSubs.parquet")
reposts_t = userWallPosts \
.filter(userWallPosts.is_reposted) \
.select('owner_id', 'repost_info.orig_owner_id')\
.withColumnRenamed("owner_id", "user")
reposts = reposts_t.filter(reposts_t["orig_owner_id"] < 0)
user_to_group_sub = userGroupsSubs\
.select("user", "group")\
.groupBy("user")\
.agg(collect_set("group"))\
.withColumnRenamed("collect_set(group)", "groups")
def contains(id, groups):
if not groups:
return False
if str(id) in groups:
return True
else:
return False
contains_udf = UserDefinedFunction(contains)
temp = reposts.join(user_to_group_sub, "user", how="left_outer")
reposts_from = temp\
.withColumn("from_subscribed", contains_udf(temp.orig_owner_id, temp.groups))
reposts_from_subscribed = reposts_from\
.filter(reposts_from.from_subscribed == 'true')\
.select('user')\
.groupBy('user')\
.count()\
.withColumnRenamed("count", "from_subscribed")
reposts_not_from_subscribed = reposts_from \
.filter(reposts_from['from_subscribed'] == 'false') \
.select('user')\
.groupBy("user")\
.count()\
.withColumnRenamed("count", "not_from_subscribed")
result_table = reposts_from_subscribed\
.join(reposts_not_from_subscribed, 'user', how="full_outer")\
.fillna(0)
return result_table
def task2b(self):
"""2) count of deleted users in friends and followers"""
friends = self.read_parquet_file("friends.parquet")
followers = self.read_parquet_file("followers.parquet")
friendsProfiles = self.read_parquet_file("friendsProfiles.parquet")
followersProfiles = self.read_parquet_file("followerProfiles.parquet")
deleted_friends_profiles = friendsProfiles\
.filter(friendsProfiles.deactivated == "deleted")\
.select("id", "deactivated")\
.withColumnRenamed("id", "follower")
deleted_follower_profiles = followersProfiles\
.filter(followersProfiles.deactivated == "deleted")\
.select("id", "deactivated")\
.withColumnRenamed("id", "follower")
deleted_friends = friends\
.join(deleted_friends_profiles, "follower", how="inner")\
.select('profile', 'deactivated')\
.dropDuplicates()\
.groupBy('profile')\
.count()\
.withColumnRenamed('count', 'deleted_fiends_acc')
deleted_followers = followers\
.join(deleted_follower_profiles, "follower", how="inner")\
.select("profile", "deactivated")\
.dropDuplicates()\
.groupBy("profile")\
.count()\
.withColumnRenamed("count", "deleted_followers_acc")
result_table = deleted_friends\
.join(deleted_followers, "profile", how="full_outer")\
.fillna(0)
return result_table
def task3b(self):
"""3) aggregate (e.g. count, max, mean) characteristics for comments and likes (separtely) made by (a) friends
and (b) followers per post"""
pass
def task4b_friends(self):
"""3) aggregate (e.g. count, max, mean) characteristics for comments and likes (separtely) made by (a) friends
and (b) followers per user"""
friends = self.read_parquet_file("friends.parquet")
# userWallPosts = self.read_parquet_file("userWallPosts.parquet")
userWallComments = self.read_parquet_file("userWallComments.parquet")
userWallLikes = self.read_parquet_file("userWallLikes.parquet")
user_friends = friends\
.groupBy("profile")\
.agg(collect_set("follower"))\
.withColumnRenamed("collect_set(follower)", "friends")\
.select("profile", "friends")
comments = userWallComments.select("post_owner_id", "from_id", "post_id")
def contains(id, groups):
if not groups:
return False
if str(id) in groups:
return True
else:
return False
contains_udf = UserDefinedFunction(contains)
post_comment_to_relation = comments\
.withColumnRenamed("post_owner_id", "profile")\
.join(user_friends, "profile", how="left_outer")\
.withColumn("is_from_friend", contains_udf(col("from_id"), col("friends")))\
.select("profile", "is_from_friend", "post_id")\
.filter(col("is_from_friend") == "true")\
comments_from_friends_per_post = post_comment_to_relation\
.groupBy("post_id")\
.count()
result_table = post_comment_to_relation\
.select("profile", "post_id")\
.join(comments_from_friends_per_post, "post_id")\
.groupBy("profile")\
.agg(max("count"), mean("count"), sum("count"))\
.sort(desc("sum(count)"))
result_table.show()
def task4b_followers(self):
followers = self.read_parquet_file("followers.parquet")
user_followers = followers\
.groupBy("profile")\
.agg(collect_set("follower"))\
.withColumnRenamed("collect_set(follower)", "followers")\
.select("profile", "followers")
def task5b(self):
"""5) find emoji (separately, count of: all, negative, positive, others) in
(a) user's posts (b) user's comments """
pass
if __name__ == "__main__":
spark = SparkTask()
# spark.task1a().show()
# spark.task2a().show()
# spark.task3a().show()
spark.task4a().show()
# spark.task5a().show()
# spark.task6a().show()
# spark.task7a().show()
# spark.task1b().show()
# spark.task2b().show()
# spark.task4b_friends()
# print(res.show())
| true
|
3a5f5a8a5084b16341f89ab254c224c11149da94
|
Python
|
QuantEcon/QuantEcon.py
|
/quantecon/optimize/tests/test_lcp_lemke.py
|
UTF-8
| 3,218
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
"""
Tests for lcp_lemke
"""
import numpy as np
from numpy.testing import assert_, assert_allclose, assert_equal
from quantecon.optimize import lcp_lemke
def _assert_ray_termination(res):
# res: lcp result object
assert_(not res.success, "incorrectly reported success")
assert_equal(res.status, 2, "failed to report ray termination status")
def _assert_success(res, M, q, desired_z=None, rtol=1e-15, atol=1e-15):
if not res.success:
msg = "lcp_lemke status {0}".format(res.status)
raise AssertionError(msg)
assert_equal(res.status, 0)
if desired_z is not None:
assert_allclose(res.z, desired_z,
err_msg="converged to an unexpected solution",
rtol=rtol, atol=atol)
assert_((res.z >= -atol).all())
w = M @ res.z + q
assert_((w >= -atol).all())
assert_allclose(w * res.z, np.zeros_like(res.z), rtol=rtol, atol=atol)
class TestLCPLemke:
def test_Murty_Ex_2_8(self):
M = [[1, -1, -1, -1],
[-1, 1, -1, -1],
[1, 1, 2, 0],
[1, 1, 0, 2]]
q = [3, 5, -9, -5]
M, q = map(np.asarray, [M, q])
res = lcp_lemke(M, q)
_assert_success(res, M, q)
def test_Murty_Ex_2_9(self):
M = [[-1, 0, -3],
[1, -2, -5],
[-2, -1, -2]]
q = [-3, -2, -1]
M, q = map(np.asarray, [M, q])
res = lcp_lemke(M, q)
_assert_ray_termination(res)
def test_Kostreva_Ex_1(self):
# Cycling without careful tie breaking
M = [[1, 2, 0],
[0, 1, 2],
[2, 0, 1]]
q = [-1, -1, -1]
M, q = map(np.asarray, [M, q])
res = lcp_lemke(M, q)
_assert_success(res, M, q)
def test_Kostreva_Ex_2(self):
# Cycling without careful tie breaking
M = [[1, -1, 3],
[2, -1, 3],
[-1, -2, 0]]
q = [-1, -1, -1]
M, q = map(np.asarray, [M, q])
res = lcp_lemke(M, q)
_assert_ray_termination(res)
def test_Murty_Ex_2_11(self):
M = [[-1.5, 2],
[-4, 4]]
q = [-5, 17]
d = [5., 16.]
M, q, d = map(np.asarray, [M, q, d])
res = lcp_lemke(M, q, d=d)
_assert_ray_termination(res)
res = lcp_lemke(M, q, d=np.ones_like(d))
_assert_success(res, M, q, atol=1e-13)
def test_bimatrix_game(self):
A = [[3, 3],
[2, 5],
[0, 6]]
B = [[3, 2, 3],
[2, 6, 1]]
A, B = map(np.asarray, [A, B])
m, n = A.shape
I = np.cumsum([0, m, n, m, m, n, n])
M = np.zeros((3*m+3*n, 3*m+3*n))
M[I[0]:I[1], I[1]:I[2]] = -A + A.max()
M[I[0]:I[1], I[2]:I[3]], M[I[0]:I[1], I[3]:I[4]] = 1, -1
M[I[1]:I[2], I[0]:I[1]] = -B + B.max()
M[I[1]:I[2], I[4]:I[5]], M[I[1]:I[2], I[5]:I[6]] = 1, -1
M[I[2]:I[3], I[0]:I[1]], M[I[3]:I[4], I[0]:I[1]] = -1, 1
M[I[4]:I[5], I[1]:I[2]], M[I[5]:I[6], I[1]:I[2]] = -1, 1
q = np.zeros(3*m+3*n)
q[I[2]:I[3]], q[I[3]:I[4]] = 1, -1
q[I[4]:I[5]], q[I[5]:I[6]] = 1, -1
res = lcp_lemke(M, q)
_assert_success(res, M, q)
| true
|
275229f22a2979a0c26aab289d6dedd4a7af5998
|
Python
|
HyunSeungBum/sbhyun_python_lib
|
/sbhyun_utils.py
|
UTF-8
| 5,909
| 2.640625
| 3
|
[] |
no_license
|
#!/usr/local/python2.7/bin/python
# -*- coding: UTF-8 -*-
''' Useful function packages '''
__author__ = "Seung-Bum Hyun <orion203@gmail.com>"
__date__ = "27 March 2012"
__version__ = "0.1"
__License__ = "GPL"
import os
import time
import sys
import socket
import fcntl
import struct
import locale
import logging
def GetIpAddress(ifname):
'''get the IP address associated with a network interface (linux only)
http://code.activestate.com/recipes/439094'''
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
def GetHwAddress(ifname):
'''get the Network Mac Address associated with a network interface (linux only)
http://code.activestate.com/recipes/439094'''
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', ifname[:15]))
return ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1]
def GetHostname():
'''get hostname '''
return os.uname()[1]
''' GetHumanReadableSize lambda function '''
GetHumanReadableSize = lambda s: [(s % 1024 ** i and "%.1f" % (s / 1024.0 ** i) or str(s / 1024 ** i)) + x.strip()
for i, x in enumerate(' KMGTPEZY') if s < 1024 ** (i + 1) or i == 8][0]
def human_num(num, divisor=1, power=""):
"""Convert a number for human consumption
http://www.pixelbeat.org/scripts/human.py"""
locale.setlocale(locale.LC_ALL, '')
num = float(num)
if divisor == 1:
return locale.format("%ld", int(num), 1)
elif divisor == 1000:
powers = [" ", "K", "M", "G", "T", "P"]
elif divisor == 1024:
powers = [" ", "Ki", "Mi", "Gi", "Ti", "Pi"]
else:
raise ValueError("Invalid divisor")
if not power:
power = powers[0]
while num >= 1000: # 4 digits
num /= divisor
power = powers[powers.index(power) + 1]
human_num(num, divisor, power)
if power.strip():
return "%6.1f%s" % (num, power)
else:
return "%4ld %s" % (num, power)
def GetLoadAverage():
"""get LoadAverage """
load = 'load average: %.2f %.2f %.2f' % os.getloadavg()
return load
def logs(logdir=None):
""" Cumtomizing logging """
if logdir is None:
logdir = os.environ['HOME'] + '/logs/' + time.strftime("%Y") + '/' + time.strftime("%m%d")
if os.path.exists(logdir) is False:
os.makedirs(logdir, 0755)
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
filename = os.path.basename(sys.argv[0])
# create file handler which logs even debug messages
fh = logging.FileHandler(logdir + '/' + filename + '_' + time.strftime("%Y%m%d%H%M%S") + '.log')
fh.setLevel(logging.INFO)
# create console handler with a higher log level
ch = logging.FileHandler(logdir + '/' + filename + '_' + time.strftime("%Y%m%d%H%M%S") + '_error.log')
ch.setLevel(logging.ERROR)
# create formatter and add it to the handlers
formatter = logging.Formatter('[%(asctime)s] - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
# add the handlers to logger
logger.addHandler(ch)
logger.addHandler(fh)
return logger
""" require python 2.7 above"""
from collections import namedtuple
def DiskPartitions(all=False):
"""Return all mountd partitions as a nameduple.
If all == False return phyisical partitions only."""
phydevs = []
disk_ntuple = namedtuple('partition', 'device mountpoint fstype')
f = open("/proc/filesystems", "r")
for line in f:
if not line.startswith("nodev"):
phydevs.append(line.strip())
retlist = []
f = open('/etc/mtab', "r")
for line in f:
if not all and line.startswith('none'):
continue
fields = line.split()
device = fields[0]
mountpoint = fields[1]
fstype = fields[2]
if not all and fstype not in phydevs:
continue
if device == 'none':
device = ''
ntuple = disk_ntuple(device, mountpoint, fstype)
retlist.append(ntuple)
return retlist
def DiskUsage(path):
"""Return disk usage associated with path."""
usage_ntuple = namedtuple('usage', 'total used free percent')
st = os.statvfs(path)
free = (st.f_bavail * st.f_frsize)
total = (st.f_blocks * st.f_frsize)
used = (st.f_blocks - st.f_bfree) * st.f_frsize
try:
percent = ret = (float(used) / total) * 100
except ZeroDivisionError:
percent = 0
# NB: the percentage is -5% than what shown by df due to
# reserved blocks that we are currently not considering:
# http://goo.gl/sWGbH
return usage_ntuple(total, used, free, round(percent, 1))
if __name__ == '__main__':
# get Ip, Mac Address for Network Interface Card
print GetIpAddress('eth0')
print GetHwAddress('eth0')
for part in DiskPartitions():
print part
print " %s\n" % str(DiskUsage(part.mountpoint))
print GetHumanReadableSize(273675342365) + 'iB'
print GetLoadAverage()
print human_num(378682763, 1024) + 'B'
log = logs()
log.error('Error: asd')
log.info('INFO: ad')
| true
|
ae08da179786ac815f8d5fbf495990f85574c185
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p02582/s956413300.py
|
UTF-8
| 133
| 3.171875
| 3
|
[] |
no_license
|
s = input()
cnt = 0
ans = [0]
for i in range(3):
if s[i] == 'R':
cnt+=1
ans.append(cnt)
else:
cnt = 0
print(max(ans))
| true
|
af43a3ccfc523ffee7e0f818b9c24e6f63f989aa
|
Python
|
robdunn220/List_Exercises
|
/sum_num_list.py
|
UTF-8
| 92
| 3.375
| 3
|
[] |
no_license
|
numbers = [1, 2, 3, 4, 5, 6]
num_sum = 0
for x in numbers:
num_sum += x
print num_sum
| true
|
c9de04b35b67d01a9ddde260d8e103195fc33de8
|
Python
|
xiaodongdreams/Random-Forest
|
/DataClean.py
|
UTF-8
| 908
| 2.6875
| 3
|
[] |
no_license
|
import matplotlib.pyplot as plt
import numpy as np
import csv
import pandas as pd
import scipy as sp
from sklearn.preprocessing import *
'''
dataSet=[]
with open('all.csv', 'r') as file:
csvReader = csv.reader(file)
for line in csvReader:
dataSet.append(line)
#print dataSet
Data=np.mat(dataSet)
Data=Data.T
m,n=np.shape(Data)
print m,n
#Data[2]=Data[2]*10
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
nums=np.arange(0,n,step=1)
nums=np.mat(nums)
print np.shape(nums.T)
print np.shape(Data[0].T)
ax.plot(nums.T,Data[1].T,label="CO")
ax.plot(nums.T,Data[3].T,label="HC")
ax.plot(nums.T,Data[5].T,label="NO")
ax.set_xlabel("numbers")
ax.set_ylabel("")
ax.legend(loc="upper left")
plt.suptitle("Exhaust")
plt.show()
#plt.savefig("Exhaust.jpg")
'''
dataSet=pd.read_csv('all.csv')
dataSet=np.mat(dataSet)
m,n=dataSet.shape
print m,n
for i in range(n):
print i,sp.sum(sp.isnan(dataSet[:,i]))
| true
|
852d1dce8e8552f5f0cdd93065a8c92a8ca3fc54
|
Python
|
ScienceMan117/BabyPython
|
/Chapter_10/addition.py
|
UTF-8
| 531
| 4.0625
| 4
|
[] |
no_license
|
import math
while True:
# Allows user to input values to be added together
try:
first_number = input ("Enter the first number: ")
if first_number == 'q':
break
second_number = input ("\nEnter the second number: ")
if second_number == 'q':
break
addition = int(first_number) + int(second_number)
# Handles any errors that are not integers
except Exception as x:
print("Get your shit together!")
else:
print(addition)
| true
|
9c317b83451deedee4d80a4f11a03423039d067a
|
Python
|
winstonplaysfetch/binf2111-woo-python
|
/LotteryGenerator.py
|
UTF-8
| 229
| 3.34375
| 3
|
[] |
no_license
|
#! /usr/bin/env python
import random
def lottery():
for i in xrange(6):
yield random.randint(1,40)
yield random.randint(1,15)
for random_number in lottery():
print "Next lottery number: %d" %random_number
| true
|
5b40405315171a3d46d7def37ef8edb99022bb69
|
Python
|
Deepakdv15/Selenium10
|
/Prectice/square_test.py
|
UTF-8
| 352
| 2.890625
| 3
|
[] |
no_license
|
import math
import pytest
@pytest.mark.great
def test_sqr():
assert 5==math.sqrt(25)
@pytest.mark.great
def testequal():
assert 7*7==40
@pytest.mark.great
def tesNum():
assert 10==11
@pytest.mark.others
def test_grester_numner():
num=20
assert 21>num
@pytest.mark.others
def test_less_number():
num=20
assert 12<num
| true
|
a25fd25bbb217f6e7aaed9ddc913519f900401d2
|
Python
|
ritchie46/concatPDF
|
/test/test.py
|
UTF-8
| 800
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
import unittest
from concatPDF.builder import Build, str_to_flt, natural_keys
class TestFileOrder(unittest.TestCase):
def test_str_to_flt(self):
self.assertEqual(str_to_flt("2.20"), 2.2)
self.assertEqual(str_to_flt("2.2.0"), "2.2.0")
self.assertEqual(natural_keys("1.1_you"), [1.1, "_you"])
self.assertEqual(natural_keys("1_1_you"), [1, '_', 1, "_you"])
self.assertEqual(natural_keys("_1.1_1foo2"), ['_', 1.1, '_', 1.0, 'foo', 2.0])
def test_file_order(self):
b = Build("./test/source/config.ini")
self.assertEqual(b._def_path_order(".txt"), ['./test/source/1_file.txt', './test/source/1.3-file.txt',
'./test/source/1.3_file.txt'])
if __name__ == "__main__":
unittest.main()
| true
|
54b2621d608942bb715acd8332e7f478f1121cfb
|
Python
|
nnnazek/ICT
|
/ict/19.py
|
UTF-8
| 279
| 3.703125
| 4
|
[] |
no_license
|
import math
height = float(input("Please enter a height from which an object is dropped from in meters: "))
acceleration = 9.8
finalVelocity = math.sqrt(2*acceleration*height)
print("The final velocity when the object hits the ground is {}m/s^2.".format(finalVelocity))
| true
|
7b948fe8e21adb9a023cff0b5fca18d10fdfe76b
|
Python
|
roije/portal_scraper
|
/portal.py
|
UTF-8
| 9,079
| 2.828125
| 3
|
[] |
no_license
|
import requests
import hashlib
from config import Config
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from hashchecker import HashChecker
from db import DatabaseConnector
import datetime
class PortalScraper():
def __init__(self, person, init_page):
self.person = person
self.init_soup = self.request_init_page(init_page)
self.db = DatabaseConnector(Config.DATABASE)
def request_init_page(self, init_page):
# request portal.fo
res = requests.get(init_page)
#read text and using bs4 html parser
soup = BeautifulSoup(res.text, 'html.parser')
return soup
def get_person_article_links(self):
"""
Reads through the init page (http://portal.fo/seinastu+vidmerkingarnar.html)
And gets every comment of search person and stores the link to article in which
the comment was given in a Set.
Returns: Set if page is changed, and None if nothing has changed.
"""
comment_items = self.init_soup.find_all("div", class_="comment_item")
hash_checker = HashChecker()
hash_checker.generate_hash(comment_items)
# Will compare with the hash saved in prev_hash.txt
page_has_changed = hash_checker.hash_compare()
if page_has_changed:
hash_checker.save_new_hash()
search_person_article_links = set()
for comment in comment_items:
commenter_name = comment.find(class_="comment_profilename").text
if(commenter_name == self.person):
search_person_article_links.add(comment.parent.get('href'))
return search_person_article_links
else:
return None
def scrape_articles(self, articles):
# test = 0
for article in articles:
# test_file = "test-" + str(test) + ".txt"
comment_section_soup = self.get_comment_section(article)
self.extract_comment_data(comment_section_soup, article)
'''
with open(test_file, "w", encoding="utf-8") as fo:
fo.write(str(comment_section_soup.prettify()))
test = test + 1
'''
def get_comment_section(self, article):
"""
-- This method is only meant to be used in this file --
The Facebook Comments Plugin is loaded with Javascript, so we can't use the
request module to read the articles, because it only gets static server HTML.
This method uses Selenium, so we can wait for the plugin to have been loaded
Returns: Soup for each article comment section (BeautifulSoup object)
"""
driver = Config.get_driver()
driver.get(article)
timeout = 10
try:
# First we have to wait until the page is fully loaded. Using selenium and WebDriverWait to do that
# Facebook Comments plugin is loaded via Javascript, so we cant use the request module to simply read the page
element_present = EC.presence_of_element_located((By.CLASS_NAME, 'fb_iframe_widget'))
WebDriverWait(driver, timeout).until(element_present)
# wait for fb_iframe_widget_loader to disappear
self.wait_until_disappeared(driver, 'fb_iframe_widget_loader')
# Now the Facebook plugin has been loaded
# First get innerHTML of the page and use BeautifulSoup HTML parser so that we can work with it
innerHTML = driver.execute_script("return document.body.innerHTML") #returns the inner HTML as a string
soup_comments = BeautifulSoup(innerHTML, 'html.parser')
# This is the Facebook comments plugin which is an iframe
facebook_plugin_iframe = soup_comments.find('iframe', class_="fb_ltr")
frame_id = facebook_plugin_iframe.get('id')
# Because we need to work with another iframe, we need to change the frame
# First set the current frame of the driver to the default
# Then switch to iframe with the id we got from the Facebook comments plugin (line 29)
# Then get innerHTML of the iframe and use BeautifulSoup so that we can work with it
driver.switch_to_default_content()
driver.switch_to.frame(frame_id)
self.press_load_more_comments_if_present(driver)
self.press_open_replies_if_present(driver)
iframe_innerhtml = driver.execute_script("return document.body.innerHTML") #returns the inner HTML as a string
iframe_soup = BeautifulSoup(iframe_innerhtml, 'html.parser')
return iframe_soup
except TimeoutException:
print("Timed out waiting for page to load")
def wait_until_disappeared(self, driver ,element):
timeout = 10
try:
element = WebDriverWait(driver, timeout).until(EC.invisibility_of_element_located((By.CLASS_NAME, element)))
except TimeoutException:
print("Timed out waiting for element to disappear")
def press_load_more_comments_if_present(self, driver):
load_more_buttons = driver.find_elements_by_xpath("//*[contains(text(), 'more comments')]")
for load_button in load_more_buttons:
# Navigate one level up to the anchor tag
driver.execute_script("arguments[0].scrollIntoView();", load_button)
load_button.click()
def press_open_replies_if_present( self, driver):
"""
-- This method is only meant to be used in this file --
"""
span_show_more_replies = driver.find_elements_by_xpath("//*[contains(text(), 'more replies in this thread') or contains(text(), 'more reply in this thread')]")
for span_tag in span_show_more_replies:
# Navigate one level up to the anchor tag
anchor_clickable = span_tag.find_element_by_xpath('..')
driver.execute_script("arguments[0].scrollIntoView();", anchor_clickable)
anchor_clickable.click()
# Wait until all loading spans are gone.
# The presence of them means that the plugin is loading the comments
timeout = 10
try:
element = WebDriverWait(driver, timeout).until(EC.invisibility_of_element_located((By.XPATH, "//span[@aria-valuetext='Loading...']")))
except TimeoutException:
print("Timed out waiting for element to disappear")
def extract_comment_data(self, comment_section_soup, article):
comment_divs = comment_section_soup.find_all(class_='UFICommentActorName')
for comment_div in comment_divs:
# Get commenter name and compare it with the person we are searching for
commenter_name = comment_div.text
if(commenter_name == self.person):
print('This is ', self.person)
person_dict = {}
# Traverse to parent span, so that we can traverse to the other divs from here
# PARENT
parent_span = comment_div.parent
# GO TO TOP SIBLING OF PARENT
# Go to the next sibling of the parent span. This is where the comment is located
comment_sibling_div = parent_span.find_next_sibling()
# print(comment_sibling_div)
comment_text = comment_sibling_div.text
# GO TO TOP SIBLING OF COMMENT_SIBLING
# Div that contains lin to comment and time of comment
like_time_sibling_div = comment_sibling_div.find_next_sibling()
# print('Hey', like_time_sibling_div.prettify())
# Check if the i tag exists. Then there are likes
likes = ''
for child in like_time_sibling_div.children:
itag = child.find('i')
if itag:
likes = child.text
comment_utime = like_time_sibling_div.find("abbr", { "class" : "UFISutroCommentTimestamp"}).get('data-utime')
comment_timestamp = self.utime_to_timespamp(comment_utime)
person_dict['name'] = commenter_name
person_dict['text'] = comment_text
person_dict['article'] = article
person_dict['likes'] = likes
person_dict['comment_timestamp'] = comment_timestamp
self.db.insert_comment(person_dict)
def utime_to_timespamp(self, utime):
return datetime.datetime.fromtimestamp(int(utime)).strftime('%Y-%m-%d %H:%M:%S')
def __repr__(self):
return "Search person: %s" % (self.init_soup)
| true
|
dea484075682bce5707bc3cb61eef2c2d6bcfb72
|
Python
|
varun3108/Agent-Selector-based-on-issue
|
/AgentSelect.py
|
UTF-8
| 5,430
| 3.203125
| 3
|
[] |
no_license
|
import datetime
import random
agent_list= [1001, True, datetime.time(8, 0), 'Support', 1002, True, datetime.time(10, 0), 'Sales', 1003, True, datetime.time(11, 0), 'Spanish speaker', 1004, True, datetime.time(12, 0), 'Sales', 1005, True, datetime.time(11, 0), 'Support', 1006, True, datetime.time(12, 0), 'Spanish speaker', 1007, True, datetime.time(15, 0), 'Sales', 1008, True, datetime.time(16, 0), 'Spanish speaker', 1009, True, datetime.time(9, 0), 'Sales', 1010, True, datetime.time(16, 0), 'Support']
roles = ['Support', 'Sales', 'Spanish speaker']
"""
LIST OF AGENTS
The following code is to enter details of agents to create an agent list.
It also takes into consideration the types of roles by making a list of roles.
AGENT LIST CREATION (OPTIONAL)
i = 'Y'
agent_list = []
roles = []
while i == 'Y':
add_detail = input('Do you want to enter agent details (Y/N):')
if add_detail == 'Y':
agent_id = int(input('Agent ID:'))
is_available = bool(input('Agent Availablity (True/False):'))
available = input('specify time since agent is available in HH,MM format:')
available = available.split(',')
available_since = datetime.time(int(available[0]),int(available[1]))
role = str(input('Enter the Role:'))
agent_list.append(agent_id)
agent_list.append(is_available)
agent_list.append(available_since)
agent_list.append(role)
if role not in roles:
roles.append(role)
else:
break
"""
issues = [9001, 'Technical problems with the platform', datetime.time(13, 38), 'Support', 9002, 'Want to know about features in spanish language.', datetime.time(8, 49), 'Sales,Spanish speaker', 9003, 'Want an AI chatbot for my website', datetime.time(11, 39), 'Sales,Support', 9004, 'Want to know about features in Japanese Language', datetime.time(10, 40), 'Japanese speaker,Sales']
"""
LIST OF ISSUES
The following code is to enter the issue and the type of role/roles of agents related to the issue.
issues = []
j = 'Y'
while j == 'Y':
add_detail = input('Do you want to enter issues details (Y/N):')
if add_detail == 'Y':
issue_id = int(input('Issue ID:'))
issue_desp = input('Issue:')
role_issue = str(input('Enter the Roles involved separated by comma:'))
timenow = ((datetime.datetime.now()).strftime("%X")).split(":")
#timenow = timenow.split(":")
time_now = datetime.time(int(timenow[0]),int(timenow[1]))
issues.append(issue_id)
issues.append(issue_desp)
issues.append(time_now)
issues.append(role_issue)
else:
break
"""
print ('AGENT LIST: ',agent_list)
print ('AGENT ROLES: ',roles)
print ('ISSUES LIST: ',issues)
select_mode = ''
the_issue = []
def agent_select(agent_list,select_mode,the_issue):
agent_reqd = []
types_roles= (the_issue[3]).split(',')
for k in range(len(types_roles)):
for l in range(int(len(agent_list)/4)):
if types_roles[k] == agent_list[(4*l +3)]:
agent_reqd.append(agent_list[(4*l)])
agent_reqd.append(agent_list[(4*l+1)])
agent_reqd.append(agent_list[(4*l+2)])
agent_reqd.append(agent_list[(4*l+3)])
if types_roles[k] not in agent_list:
print ('The role '+ types_roles[k] + ' is not available')
# print('LIST OF AGENTS WITH ROLES RELATED TO THE ISSUE: ',agent_reqd)
if select_mode == 'All available':
print('For the issue:')
print(the_issue)
print('with selection mode:')
print(select_mode)
print('Here are the agents:')
print(agent_reqd)
if select_mode == 'Least busy':
o = 0
Diff = []
timeDiff = 0
for n in range(int(len(agent_reqd)/4)):
A = datetime.datetime.combine(datetime.date.today(), the_issue[2])
B = datetime.datetime.combine(datetime.date.today(), agent_reqd[(4*n+2)])
timeDiff = A-B
timeDiff = int(timeDiff.total_seconds())
Diff.append(agent_reqd[(4*n)])
Diff.append(timeDiff)
if timeDiff > o :
o = timeDiff
# print('HIGHEST TIME DIFFERENCE BETWEEN THE ISSUE AND AVAILABILITY OF THE AGENT: ',o)
# print('LIST OF TIME DIFFERENCES: ',Diff)
for p in range(int(len(Diff)/2)):
if o == Diff[2*p+1]:
agent_reqd = agent_reqd[(4*p):(4*p+4)]
print('For the issue:')
print(the_issue)
print('with selection mode:')
print(select_mode)
print('Here is the agent:')
if o >0:
print(agent_reqd)
else:
print('NIL')
if select_mode == 'Random':
q = random.randrange(int(len(agent_reqd)/4))
agent_reqd = agent_reqd[(4*q):(4*q+4)]
print('For the issue:')
print(the_issue)
print('with selection mode:')
print(select_mode)
print('Here is the agent:')
print(agent_reqd)
for m in range(int(len(issues)/4)):
the_issue = issues[(4*m):(4*m+4)]
print ('THE ISSUE: ',the_issue)
select_mode = input('Select a selection mode (All available/Least busy/Random):')
agent_select(agent_list,select_mode,the_issue)
| true
|
729e3657584d0f00c4c4681672e5c5c1e892e754
|
Python
|
Audi-Un-Autre/TheTranslators
|
/COGS/errorHandling.py
|
UTF-8
| 1,248
| 2.796875
| 3
|
[] |
no_license
|
# This cog listens to all command calls and reports error directly to the user in the channel
import discord
from discord.ext import commands
from botmain import config
class ErrorHandling(commands.Cog):
def __init__(self, bot):
self.bot = bot
# General error response
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
errorFormat = self.bot.get_cog('Formatting')
if isinstance(error, commands.CommandNotFound):
await errorFormat.formatGeneral(ctx, 'Command not recognized.')
if isinstance(error, commands.CheckFailure):
await errorFormat.formatGeneral(ctx, 'You do not have permission to do this.')
if isinstance(error, commands.BadArgument):
await errorFormat.formatGeneral(ctx, 'Invalid argument. Command canceled.')
if isinstance(error, commands.MissingRequiredArgument):
await errorFormat.formatGeneral(ctx, 'Nothing to translate. Please retry in the format of: ```!french + word``````!english + word```')
if isinstance(error, discord.Forbidden):
await errorFormat.formatGeneral(ctx, 'I don\'t have permission to do this.')
def setup(bot):
bot.add_cog(ErrorHandling(bot))
| true
|
6a8266f7275c8f233ef9efb4c1f1eba9886d2e71
|
Python
|
Tom-Adamski/PythonPlayground
|
/matplotlib/triangle.py
|
UTF-8
| 1,374
| 2.953125
| 3
|
[] |
no_license
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
width = 50
height = 50
fig, ax = plt.subplots()
ax.set_xlim([0,width])
ax.set_ylim([0,height])
patches = []
N = 100
ratio = 0.01
ratioInv = 1 - ratio
colors = []
point = []
#triangle
point += [[0,0]]
point += [[25, 50]]
point += [[50,0]]
pt = np.array(point)
polygon = Polygon(pt, True)
patches.append(polygon)
for nb in range(N):
nPoint = []
for i in range(len(point) - 1):
x = ratio * point[i][0] + ratioInv * point[i+1][0]
y = ratio * point[i][1] + ratioInv * point[i+1][1]
nPoint += [[ x , y ]]
x = ratio * point[i+1][0] + ratioInv * point[0][0]
y = ratio * point[i+1][1] + ratioInv * point[0][1]
nPoint += [[ x , y ]]
npt = np.array(nPoint)
polygon = Polygon(npt, True)
patches.append(polygon)
point = nPoint
#ratio += 0.001
ratioInv = 1 - ratio
p = PatchCollection(patches, cmap=matplotlib.cm.jet, alpha=0.4)
p.set_color([1,0,1])
#for i in range(N):
# if i%2 == 0 :
# colors += [ 50 ]
# else:
# colors += [ 100 ]
for i in range(N):
colors += [ i ]
p.set_array(np.array(colors))
ax.add_collection(p)
plt.show()
| true
|
33cc09f486e7f0c4620b261dfd84f56a4c32ef49
|
Python
|
BenGreenDev/ATDeepLearningAssetGeneration
|
/ConvNet.py
|
UTF-8
| 3,266
| 2.640625
| 3
|
[] |
no_license
|
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D
from tensorflow.keras.callbacks import TensorBoard
import pickle
import time
import tensorflow.keras
NAME = "Satellite-Image-Classifier-cnn-64x2-{}".format(int(time.time()))
# Create callback board
tensorboard = TensorBoard(log_dir='logs\\{}'.format(NAME))
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
pickle_in = open("X.pickle", "rb")
X = pickle.load(pickle_in)
pickle_in = open("Y.pickle", "rb")
Y = pickle.load(pickle_in)
Y = tf.keras.utils.to_categorical(Y, 8)
#X = X/255.0
# dense_layers = [1, 2]
# layer_sizes = [32, 64, 128, 256]
# conv_layers = [1, 2, 3]
# kernal_sizes = [3, 7]
# denselayer_sizes = [8]
# num_epochs = [50, 100, 200, 300, 400, 500, 800, 1000, 1200]
dense_layers = [1]
dense_layer_sizes = [256]
conv_layers = [2]
conv_layer_sizes = [128]
kernal_sizes = [7]
num_epochs = [550]
for dense_layer in dense_layers:
for dense_layer_size in dense_layer_sizes:
for conv_layer in conv_layers:
for conv_layer_size in conv_layer_sizes:
for kernal_size in kernal_sizes:
for num_epoch in num_epochs:
NAME = "{}-conv-{}-nodes-{}-dense-{}-kernal-{}-numepoch-{}".format(conv_layer, dense_layer_size, dense_layer, kernal_size, num_epoch, int(time.time()))
print(NAME)
model = Sequential()
model.add(Conv2D(conv_layer_size, (kernal_size, kernal_size), input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.15))
for l in range(conv_layer-1):
model.add(Conv2D(conv_layer_size, (kernal_size, kernal_size)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.15))
model.add(Flatten())
for _ in range(dense_layer):
model.add(Dense(dense_layer_size))
model.add(Activation('relu'))
model.add(Dropout(0.35))
model.add(Dense(8))
model.add(Activation('softmax'))
tensorboard = TensorBoard(log_dir="logs\\{}".format(NAME))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
model.fit(X, Y,
batch_size=32,
# steps_per_epoch=50,
# validation_steps=25,
epochs=num_epoch,
validation_split=0.3,
callbacks=[tensorboard],
shuffle=True)
model.save('CNN.model')
| true
|
218d6ac49e4668947b8f563ee82bf3ee00ec49e9
|
Python
|
Sohailsaifi/Internet-Speed-Test
|
/test.py
|
UTF-8
| 374
| 2.96875
| 3
|
[] |
no_license
|
import speedtest
s = speedtest.Speedtest()
option = int(input('''What do you want to know :
1 - upload speed
2 - download speed
3 - ping \n'''))
if option == 1:
print(s.upload())
elif option == 2:
print(s.download())
elif option == 3:
server = []
s.get_servers(server)
print(s.results.ping)
else :
print("Invalid Option!!...Please try again.")
| true
|
e0f3d984d34775a40246acc75fa859013fa4f37f
|
Python
|
gagan1510/PythonBasics
|
/PythonRef/file.py
|
UTF-8
| 509
| 3.65625
| 4
|
[] |
no_license
|
fName = input("Please enter the file name with extension to be read: ")
try:
f = open(fName)
line = f.readline()
while line:
print(line, )
line = f.readline()
f.close()
print("This is being printed using the for loop.\n")
for line in open(fName):
print(line)
f.close()
year = 20
principle = 2000
f = open(fName, "w")
f.write("%3d %.2f\n" % (year, principle))
f.close()
except Exception:
print("Couldn't find file " + fName)
| true
|
6f16c076fad7413890d76074098c478fb1866aea
|
Python
|
mina0805/Programming-with-Python
|
/Programming_Basics_with_Python/02.ПРОСТИ ПРЕСМЯТАНИЯ/10.Radians_To_degrees.py
|
UTF-8
| 105
| 3.296875
| 3
|
[] |
no_license
|
import math
rad = float(input())
deg = (180/math.pi)*rad
deg_round = round(deg, 2)
print(deg_round)
| true
|
7cf1928e181b0781f79fc5148dd42522a40eac3d
|
Python
|
RichardLaBella/PyLessons
|
/ex6
|
UTF-8
| 668
| 4.25
| 4
|
[] |
no_license
|
#!/usr/bin/python
# using %d format character to reference the 10
x = "There are %d types of people." % 10
binary = "binary"
do_not = "don't"
# using %s format character twice to reference the % variable in parenthesis
y = "Those who know %s and those who %s." % (binary, do_not)
# Just printing x and then printing y
print x
print y
# printing this to screen. %r is referencing x which is "there are 10 types of people"
print "I said: %r. " % x
print "I also said: '%s'. " % y
hilarious = False
joke_evaluation = "isn't that joke so funny?! %r"
print joke_evaluation % hilarious
w = "This is the left side of..."
e = "a sting with a right side."
print w + e
| true
|
0878226013543f04a6b03b6bf2f3301139bcbdc7
|
Python
|
gtmkr1234/learn-python39
|
/learn-python39/set_Python/practice_questions_sheet.py
|
UTF-8
| 482
| 4.125
| 4
|
[] |
no_license
|
"""
Write a Python program to remove an item from a set if it is present in the set. HereBothItem
and Set is enter by the User.
"""
# user data is space separated
h = set(map(int, input().split()))
itm = eval(input('enter the item '))
# h.remove(itm)
h.discard(itm)
print(h)
# 2nd
st1 = set('hello')
st2 = set('hi')
re = st1 - st2
# re = st1.difference(st2)
print(re)
# 3rd
st1 = set('hello')
st2 = set('hi')
re = st1 ^ st2
# re = st1.symmetric_difference(st2)
print(re)
| true
|
654f0ce008a1716dfbdc396b7328d0de2418085c
|
Python
|
ishantk/ENC2020PYAI1
|
/Session58C.py
|
UTF-8
| 417
| 3.515625
| 4
|
[] |
no_license
|
import nltk
# nltk.download('punkt') -> Required for word_tokenize
# nltk.download('averaged_perceptron_tagger') -> Required for POS Tagging
from nltk import word_tokenize, pos_tag
sentence = "A very Happy Navratras to All. Code Well. Be at Home. Stay Safe :)"
tokens = word_tokenize(sentence)
print(tokens)
# POS is Parts of Speech
# POS Tagging is tagging the token with language grammar
print(pos_tag(tokens))
| true
|
e36286ce329ec9eee60b25f950f32548070c16dc
|
Python
|
ljw0096/Python_300_practice
|
/py121_130.py
|
UTF-8
| 261
| 3.015625
| 3
|
[] |
no_license
|
import requests
btc =requests.get("https://api.bithumb.com/public/ticker/").json()['data']
variation = int(btc['max_price'])-int(btc['min_price'])
res = variation+int(btc['opening_price'])
if res>int(btc['max_price']):
print("up")
else:
print("down")
| true
|
e775fa42a75450c2e4b675e886b26bf8e0d5a651
|
Python
|
jaimegildesagredo/booby
|
/booby/inspection.py
|
UTF-8
| 1,674
| 2.53125
| 3
|
[
"Apache-2.0"
] |
permissive
|
# -*- coding: utf-8 -*-
#
# Copyright 2014 Jaime Gil de Sagredo Luna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The :mod:`inspection` module provides users and 3rd-party library
developers a public api to access :mod:`booby` objects and classes internal
data, such as defined fields, and some low-level type validations.
This module is based on the Python :py:mod:`inspect` module.
"""
from booby import models
def get_fields(model):
"""Returns a `dict` mapping the given `model` field names to their
`fields.Field` objects.
:param model: The `models.Model` subclass or instance you want to
get their fields.
:raises: :py:exc:`TypeError` if the given `model` is not a model.
"""
if not is_model(model):
raise TypeError(
'{} is not a {} subclass or instance'.format(model, models.Model))
return dict(model._fields)
def is_model(obj):
"""Returns `True` if the given object is a `models.Model` instance
or subclass. If not then returns `False`.
"""
try:
return (isinstance(obj, models.Model) or
issubclass(obj, models.Model))
except TypeError:
return False
| true
|
3a2dfc3cf3ba23889f2658b175a95e195e65d7d6
|
Python
|
AkaiTobira/TetrisAgents
|
/Libraries/game.py
|
UTF-8
| 1,604
| 2.625
| 3
|
[] |
no_license
|
import pygame
import time
from Libraries.consts import *
from Libraries.Structures.tetrisGame import Tetris
from Libraries.Structures.displayers import FPSDisplayer
from Libraries.Structures.tetrominoSpawner import RandomSpawnTetromino, SimpleSpawnTetrimino
from Libraries.Structures.playerList import PlymodeController
from Libraries.Structures.meansures import Meansures
class Game:
screen = None
resolution = None
name = ""
fpsRate = None
def __init_pygame(self, resolution, name):
pygame.display.set_caption(name)
self.screen = pygame.display.set_mode(resolution)
def reset_resolution(self):
self.__init_pygame(self.resolution,self.name)
def __init__(self, resolution, name):
self.name = name
self.resolution = resolution
self.reset_resolution()
self.tetris = Tetris(self.screen, [OFFSET/2 + 6, OFFSET/2 +6], SimpleSpawnTetrimino(), PlymodeController())
self.fpsRate = FPSDisplayer (self.screen, [ OFFSET/2 + 6 + 100, OFFSET/2 +6 + (GRID_HEIGHT + 15) * SQUARE_SIZE ])
def is_running(self):
return self.running
def process(self, event):
self.tetris.process(event)
def draw(self):
self.screen.fill(get_color(Colors.BLACK))
self.tetris.draw()
self.fpsRate.draw_text(19)
pygame.display.flip()
def update(self, delta):
if self.tetris.is_game_over: self.tetris.reset()
self.tetris.update(delta)
Meansures.tick()
self.fpsRate.update(delta)
| true
|
9195d7c958e1e050e22466a523e6f4c18a70a23c
|
Python
|
rjnp2/deep_learning_from_scratch
|
/loss/loss.py
|
UTF-8
| 3,194
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 2 15:57:07 2020
@author: rjn
"""
# importing required library
import cupy as cp
class Loss(object):
def loss(self, y: cp.array ,
y_pred: cp.array ):
'''
Parameters
----------
y : cp.array
Ground truth value.
y_pred : cp.array
Predicted values.
Returns
-------
NotImplementedError : TYPE
DESCRIPTION.
'''
return NotImplementedError()
def gradient(self, y: cp.array ,
y_pred: cp.array ):
'''
Parameters
----------
y : cp.array
Ground truth value.
y_pred : cp.array
Predicted values.
Returns
-------
NotImplementedError : TYPE
DESCRIPTION.
'''
raise NotImplementedError()
def acc(self, y: cp.array ,
y_pred: cp.array ):
'''
Parameters
----------
y : cp.array
Ground truth value.
y_pred : cp.array
Predicted values.
Returns
-------
NotImplementedError : TYPE
DESCRIPTION.
'''
return 0
class mean_absolute_loss(Loss):
'''
Mean absolute Error (MSE) is the commonly used regression loss function.
MAE is the sum of absolute distances between our target variable and predicted values.
Computes the mean absolute error between labels and predictions.
The logistic function is defined as follows:
L(y,y_pred) = 1
--- * ∑ |y - y_pred|
N
`loss = mean(abs(y_true - y_pred), axis=-1)`
MAE is not sensitive towards outliers and given several examples with the same
input feature values, and the optimal prediction will be their median target value.
This should be compared with Mean Squared Error, where the optimal prediction is
the mean.
'''
def __init__(self):
pass
def loss(self, y: cp.array ,
y_pred: cp.array )-> cp.array:
'''
Parameters
----------
y : cp.array
Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred : cp.array
The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns
-------
cp.array
Mean absolute error values. shape = `[batch_size, d0, .. dN-1]`.
'''
return cp.mean(cp.abs(y - y_pred))
def gradient(self, y: cp.array ,
y_pred: cp.array ) -> cp.array:
'''
Parameters
----------
y : cp.array
Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred : cp.array
The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns
-------
cp.array
Mean squared error values. shape = `[batch_size, d0, .. dN-1]`.
'''
return (y_pred - y)
| true
|
4d8737f2a14db091ee464476a7abc71547b38d5c
|
Python
|
Conor12345/misc
|
/challenges/venv/challenge7maybe.py
|
UTF-8
| 145
| 3.375
| 3
|
[] |
no_license
|
str1 = "abcdefghijklmnopqrstuvwxyz"
for i in range(0, 26):
foo = str1[i:10]
if foo == "":
break
print(foo)
| true
|
65b081ea259df0caab212231a2ed29da08b1a244
|
Python
|
roxor05/Programs
|
/Python-scripts/ALL-snowflake_tables-arrived-today.py
|
UTF-8
| 3,688
| 2.625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
#### Requirement for running this code ####
# First install pip3 then install snowflake connector
# pip3 install asn1crypto==1.3.0
# pip3 install snowflake-connector-python
#########################################################################
import snowflake.connector
from datetime import datetime
from datetime import date, timedelta
date_today = date.today()
Current_date=date_today.strftime('%Y-%m-%d')
print("--------------------------------------------------------------------------")
print(Current_date + " --- Today's date")
print("--------------------------------------------------------------------------")
####### For a specific date uncomment the below line with the Date required #############
# Current_date='2020-02-19'
#########################################################################################
open("snowflake_tables.txt", "w").close()
Username = ''
Password = ''
account = ''
conn = snowflake.connector.connect(
user= Username,
password= Password,
account= account,
warehouse='COMPUTE_WH_XS',
database='AVNR_STAGE',
schema='INFORMATION_SCHEMA'
)
# Create cursor
cur = conn.cursor()
#### Tables in L1 schema ####
cur.execute("SELECT DISTINCT TABLE_NAME FROM TABLES WHERE TABLE_SCHEMA LIKE '%L1%' ORDER BY TABLE_NAME ASC")
for row in cur:
print(f'{row[0]}',file=open("snowflake_tables.txt", "a"))
cur.close()
conn.close()
print("Tables in L1 schema are copied to snowflake_tables.txt")
print("--------------------------------------------------------------------------")
##### To get the count for L1 tables #####
try:
conn = snowflake.connector.connect(
user= Username,
password= Password,
account= account,
warehouse='COMPUTE_WH_XS',
database='AVNR_STAGE',
schema='L1'
)
cur = conn.cursor()
# Execute SQL statement
with open('snowflake_tables.txt','r') as f:
for line in f:
# we use veeva crm then this line is excuted as it doesnt have Extract date column in it
if 'VEEVA_CRM' in line:
try:
cur.execute("select max(last_modified_time) from {};".format(line))
for (last_modified_time,) in cur:
print('{}------'.format(line.strip()) + 'last_modified_time: {0}'.format(datetime.date(last_modified_time)))
# else:
# pass
except Exception as e:
print(e)
# this prints all the others in the data
else:
try:
cur.execute("select MAX(EXTRACT_DT),count(*) from {} where EXTRACT_DT='{}';".format(line,Current_date))
#cur.execute("select MAX(EXTRACT_DT) from {};".format(line))
for (EXTRACT_DT,count) in cur:
if count == 0:
pass
else:
#for EXTRACT_DT in cur:
print('{}------'.format(line.strip()) + 'EXTRACT_DT:{0}------Count:{1}'.format(EXTRACT_DT, count))
#print('{}------'.format(line.strip()) + 'EXTRACT_DT:{0}'.format(EXTRACT_DT),file=open("snowflake_count.txt", "a"))
except Exception as e:
pass
except Exception as e:
pass
cur.close()
conn.close()
| true
|
af0ddd3696971ae804471efae6cee7614c1fd13c
|
Python
|
ElijahEldredge/Turtlebot_Navigation
|
/src/navigation/Node.py
|
UTF-8
| 1,451
| 3.53125
| 4
|
[] |
no_license
|
#!/usr/bin/env python
# Team 14: Brandon Knox, Elijah Eldredge, Jon Andrews
# RBE 3002, Unified Robotics IV
# Assignment: Final Term Project
import math, geometry_msgs, time
from geometry_msgs.msg import Point
# Node(object)
# This class generates node objects, giving them aspects of
# coordinates, parents, and distance. Instansiating both node
# neighbors and node contents
class Node(object):
# definitions:
def __init__(self, coord, parent, dist):
self.coord = coord
self.parent = parent
self.dist = dist
# getNeighbors returns node neighbors given a resolution,
# creating a new list and appending each new neighbor to it
def getNeighbors(self, res):
myNeighbors =[]
for x in [-res,0,res]:
for y in [-res,0,res]:
time.sleep(.001)
pos = Point()
pos.x = self.coord.x + x
pos.y = self.coord.y + y
pos.z = self.coord.z
newNode = Node(pos, self, self.dist + math.sqrt(x**2 + y**2))
# if statement to add only new nodes
if self.contains(pos, res):
pass
else:
myNeighbors.append(newNode)
return myNeighbors
# contains checks if the res/2 is less than or greater to the
# individual contents of the node in the x,y,z or parent
def contains(self, pos, res):
if self.coord.x + res/2 > pos.x and self.coord.x - res/2 < pos.x and self.coord.y + res/2 > pos.y and self.coord.y - res/2 < pos.y:
return True
elif not self.parent:
return False
else:
return self.parent.contains(pos, res)
| true
|
aa1d3baff0aef1666687b2b08b5123ce366e1bf5
|
Python
|
threexc/WeatherVane
|
/vane/weathervane.py
|
UTF-8
| 5,749
| 3.015625
| 3
|
[] |
no_license
|
import requests
import sys
import re
import datetime
import os
import errno
import xml.etree.ElementTree as ET
# WeatherCollector will collect and tidy the necessary METAR and TAF data for
# a specified aerodrome. This is currently hard-coded to work with the NAV
# CANADA website only. This object-oriented implementation is still under
# development.
class WeatherCollector:
def __init__(self, station, page_format='raw', language='anglais', region='can'):
# The page of interest. Use GET calls to provide it the parameters for
# each aerodrome
self.url = "https://flightplanning.navcanada.ca/cgi-bin/Fore-obs/metar.cgi"
self.station = station
# Default headers to mimic for scraping the page
self.headers = {
"user-agent": "Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:58.0) Gecko/20100101 Firefox/58.0"
}
# The parameters to be encoded and passed to the page as part of the
# GET request. Note that the only parameter that should ever be
# modified (and is in fact intended to be) is the station (aerodrome).
# The others are provided as inputs for testing and for potential
# future-proofing
self.params = {'Stations': station, 'format': page_format, 'Langue': language, 'Region': region}
# class variables to hold the data for each aerodrome once it is parsed
self.parsed_data = None
self.metar = None
self.taf = None
# The core component of the WeatherCollector class. This function takes the
# input URL and parameters, then performs the following:
#
# 1. Encodes the base URL and parameters while sending a GET request to the
# page
# 2. Removes all of the HTML formatting from the text data (string)
# 3. Removes whitespace and leftover formatting (" ...")
# 4. Creates a baseline string containing all of the data before further
# isolating the METAR and TAF content
# 5. Splits out the TAF data and truncates it following the final "=", which
# is found in every aerodrome reading tested
# 6. Similarly splits out the METAR data and truncates it following the
# final "=" character
# 7. Sets the class variables self.parsed_data, self.metar, and self.taf
# equal to their corresponding strings
# TODO: Write extensive error handling into this function
def gather_weather_data(self):
response = self.send_query()
self.parsed_data = self.clean_query(response)
self.taf = self.format_taf(self.parsed_data)
self.metar = self.format_metar(self.parsed_data)
return
def send_query(self):
# Send the GET request with the base URL and parameters
response = requests.get(self.url, params=self.params, verify=False, headers=self.headers)
return response
def clean_query(self, response):
# Strip out all of the HTML formatting
stripped = re.sub("<.*?>", "", response.text)
# Get rid of newlines
cleaned = stripped.replace('\n', '')
# Remove the mysterious substring
tidied = cleaned.replace(' ', ' ')
# Ignore the text before the first occurrence of the substring "TAF"
split_data = tidied.split('TAF ', 1)[1]
return split_data
def format_taf(self, cleaned_query):
# Pluck the TAF from the data and remove the trailing "="
taf = cleaned_query.split('TAF ', 1)[1]
fixed_taf = taf[:taf.rfind('=')]
return fixed_taf
def format_metar(self, cleaned_query):
# Pluck the METAR from the data and remove the trailing "="
metar = cleaned_query.split('TAF ', 1)[0]
fixed_metar = metar[:metar.rfind('=')]
return fixed_metar
def get_dromeID(self):
return self.station
# This class writes the METAR and TAF to respective time-stamped files, and
# does so in a clean format
class WeatherWriter:
# The root_path input is the root directory that the data will be
# written to. The station argument refers to the aerodrome code, e.g. "CYOW"
# for Ottawa. All of the directory and filename setup is done in this
# constructor; all other functions handle and write to these directories
# and/or filenames
def __init__(self, station, root_path, collector):
self.station = station
self.root_path = root_path
self.collector = collector
self.date = None
# Create separate subdirectories for the station's METAR and TAF
self.metar_dir = ("{}/{}".format(self.station, "METAR"))
self.taf_dir = ("{}/{}".format(self.station, "TAF"))
# Create a hyphen-delimited timestamp to concatenate with the aerodrome
# ID to make the filename for the data. The whitespace is replaced with
# an underscore to make sure the filenames will play nice with Unix and
# Linux systems
date = str(datetime.datetime.utcnow().strftime("%Y-%m-%d %H-%M-%S"))
self.date = date.replace(' ', '_')
# Create a string to use for each text file's name
self.metar_file = ("{}_{}_{}".format(self.station, "METAR", self.date))
self.taf_file = ("{}_{}_{}".format(self.station, "TAF", self.date))
# Combine the parts of the filename. Might be overkill
# TODO: Determine if there is a simpler way to do this
self.full_metar_path = ("{}/{}/{}".format(self.root_path, self.metar_dir, self.metar_file))
self.full_taf_path = ("{}/{}/{}".format(self.root_path, self.taf_dir, self.taf_file))
def write_metar(self, metar):
if not os.path.exists(self.root_path + '/' + self.metar_dir):
os.makedirs(self.root_path + '/' + self.metar_dir)
with open(self.full_metar_path, 'w') as f:
f.write(metar)
f.close()
def write_taf(self, taf):
if not os.path.exists(self.root_path + '/' + self.taf_dir):
os.makedirs(self.root_path + '/' + self.taf_dir)
with open(self.full_taf_path, 'w') as f:
f.write(taf)
f.close()
# The analysis class. Reads from the files for a specific aerodrome and does
# various statistical analyses depending on inputs
class WeatherAnalyzer:
pass
| true
|
6d6ca8449fd692bf2ee32c6714d665295b5c81d5
|
Python
|
CS26-BW1-Javascript-Is-Bad/FE-Client
|
/core/domain/map.py
|
UTF-8
| 3,554
| 2.625
| 3
|
[] |
no_license
|
import math
import core.util.constants as constants
import os.path as path
import pygame as pg
import pytmx
from core.util.colors import *
from core.util.functions import draw_text
from core.util.settings import *
class Map:
def __init__(self, rooms):
self.rooms = rooms
self.size = math.sqrt(len(rooms))
self.visited_list = []
self.game = None
self.mini_map = None
def show_map(self):
grid_size = math.sqrt(len(self.rooms))
x, y = self.game.screen.get_size()
room_size = int(x / grid_size * .2)
pos = (x//2 - (room_size * (grid_size//2)), y//2 - (room_size * (grid_size//2)))
#self.game.screen.fill(BLACK)
while True:
offsety = 0
offsetx = 0
i = 1
counter = 0
events = pg.event.get()
for event in events:
print(event)
if event.type == pg.QUIT:
pg.quit()
quit()
keys = pg.key.get_pressed()
if keys[pg.K_n]:
return
offset_y = 150
offset_x = 50
room_dimension = 12
room_scale = 30
n_to_x_offset = 4
n_to_y_offset = 17
n_to_line_width = 4
n_to_line_height = 18
for room in self.rooms:
room_color = PURPLE
if room.x == self.game.room.x and room.y == self.game.room.y:
room_color = GREEN
pg.draw.rect(self.game.screen, room_color, (offset_y + room.y * room_scale, offset_x + room.x * room_scale,
room_dimension, room_dimension))
if room.n_to != 0:
pg.draw.rect(self.game.screen, RED, (offset_y + room.y * room_scale + n_to_x_offset,
offset_x + room.x * room_scale - n_to_y_offset, n_to_line_width,
n_to_line_height))
if room.e_to != 0:
pg.draw.rect(self.game.screen, RED, (offset_y + room.y * room_scale + n_to_y_offset - 4,
offset_x + room.x * room_scale + n_to_x_offset, n_to_line_height,
n_to_line_width))
pg.display.update()
pg.display.flip()
class MiniMap():
def __init__(self, game, map):
self.game = game
self.map = map
def build_map(self):
self.map_group = pg.sprite.Group()
col = 0
room_index = 0
self.map_node_list = []
while col < self.map.size:
row = 0
while row < self.map.size:
room = self.map.rooms[room_index]
if room == self.game.room:
contains_player = True
else:
contains_player = False
node = MiniMapNode(row, col, room.visited, contains_player)
self.map_node_list.append(node)
self.map_group.add(node)
row += 1
room_index += 1
col += 1
return self.map_group
class MiniMapNode(pg.sprite.Sprite):
def __init__(self, x, y, visited, contains_player):
self.image = pg.image.load(path.join(sprite_folder, PLATFORM_IMG))
self.rect = self.image.get_rect()
pg.sprite.Sprite.__init__(self)
| true
|
92c32f115940ef9d24b0b0815d5dc0d9424c9cc4
|
Python
|
HardyYao/hardy_python
|
/project/004 国内三大交易所数据爬虫程序/scraw_shanghai_data.py
|
UTF-8
| 5,966
| 2.578125
| 3
|
[] |
no_license
|
#!usr/bin/env python
'''
#-*- coding:utf-8 -*-
@Author HardyYao
@Time 2017/10/2 7:28
'''
import requests
import xlwt
import time
import random
from urllib.error import URLError, HTTPError
from json.decoder import JSONDecodeError
from conn import headers
class getShangHaiFutures(object):
def scraw_shanghai_data(self, place_shanghai, variety, contract_id, year, month, day, url, path):
req = requests.get(url.format(year, month, day), headers=headers())
if req.status_code == 200:
try:
jsonData = req.json()
except(URLError, HTTPError, JSONDecodeError) as e:
print('Error:{}'.format(e))
if len(jsonData['o_cursor']) > 10:
# 创建一个空的excel文件
workbook = xlwt.Workbook()
# 创建表
worksheet = workbook.add_sheet('Sheet1')
first_col = worksheet.col(0)
first_col.width = 256 * 16
secon_col = worksheet.col(1)
secon_col.width = 256 * 16
third_col = worksheet.col(2)
third_col.width = 256 * 16
four_col = worksheet.col(3)
four_col.width = 256 * 16
five_col = worksheet.col(4)
five_col.width = 256 * 16
six_col = worksheet.col(5)
six_col.width = 256 * 16
seven_col = worksheet.col(6)
seven_col.width = 256 * 16
eight_col = worksheet.col(7)
eight_col.width = 256 * 16
nine_col = worksheet.col(8)
nine_col.width = 256 * 16
ten_col = worksheet.col(9)
ten_col.width = 256 * 16
elev_col = worksheet.col(10)
elev_col.width = 256 * 16
twel_col = worksheet.col(11)
twel_col.width = 256 * 16
thritteen_col = worksheet.col(12)
thritteen_col.width = 256 * 16
fourteen_col = worksheet.col(13)
fourteen_col.width = 256 * 16
fifteen_col = worksheet.col(14)
fifteen_col.width = 256 * 16
sixteen_col = worksheet.col(15)
sixteen_col.width = 256 * 16
# 写标题行数据
worksheet.write(0, 0, '日期')
worksheet.write(0, 1, '交易所')
worksheet.write(0, 2, '品种')
worksheet.write(0, 3, '合约')
worksheet.write(0, 4, '成交量名次')
worksheet.write(0, 5, '成交量会员简称')
worksheet.write(0, 6, '成交量')
worksheet.write(0, 7, '成交量增减')
worksheet.write(0, 8, '持买单量名次')
worksheet.write(0, 9, '持买单量会员简称')
worksheet.write(0, 10, '持买单量')
worksheet.write(0, 11, '持买单量增减')
worksheet.write(0, 12, '持卖单量名次')
worksheet.write(0, 13, '持卖单量会员简称')
worksheet.write(0, 14, '持卖单量')
worksheet.write(0, 15, '持卖单量增减')
for i in range(len(jsonData['o_cursor'])):
if jsonData['o_cursor'][i]['INSTRUMENTID'].replace(' ', '') == contract_id:
if jsonData['o_cursor'][i]['RANK'] != -1 and jsonData['o_cursor'][i]['RANK'] != 0 and jsonData['o_cursor'][i]['RANK'] != 999:
worksheet.write(int(jsonData['o_cursor'][i]['RANK']), 0, '{}/{}/{}'.format(year, month, day)) # 日期
worksheet.write(jsonData['o_cursor'][i]['RANK'], 1, place_shanghai) # 交易所
worksheet.write(jsonData['o_cursor'][i]['RANK'], 2, variety) # 品种
worksheet.write(jsonData['o_cursor'][i]['RANK'], 3, contract_id) # 合约
worksheet.write(jsonData['o_cursor'][i]['RANK'], 4, jsonData['o_cursor'][i]['RANK']) # 成交量名次
worksheet.write(jsonData['o_cursor'][i]['RANK'], 5, jsonData['o_cursor'][i]['PARTICIPANTABBR1'],) # 成交量会员简称
worksheet.write(jsonData['o_cursor'][i]['RANK'], 6, jsonData['o_cursor'][i]['CJ1']) # 成交量
worksheet.write(jsonData['o_cursor'][i]['RANK'], 7, jsonData['o_cursor'][i]['CJ1_CHG']) # 成交量增减
worksheet.write(jsonData['o_cursor'][i]['RANK'], 8, jsonData['o_cursor'][i]['RANK']) # 持买单量名次
worksheet.write(jsonData['o_cursor'][i]['RANK'], 9, jsonData['o_cursor'][i]['PARTICIPANTABBR2']) # 持买单量会员简称
worksheet.write(jsonData['o_cursor'][i]['RANK'], 10, jsonData['o_cursor'][i]['CJ2']) # 持买单量
worksheet.write(jsonData['o_cursor'][i]['RANK'], 11, jsonData['o_cursor'][i]['CJ2_CHG']) # 持买单量增减
worksheet.write(jsonData['o_cursor'][i]['RANK'], 12, jsonData['o_cursor'][i]['RANK']) # 持卖单量名次
worksheet.write(jsonData['o_cursor'][i]['RANK'], 13, jsonData['o_cursor'][i]['PARTICIPANTABBR3']) # 持卖单量会员简称
worksheet.write(jsonData['o_cursor'][i]['RANK'], 14, jsonData['o_cursor'][i]['CJ3']) # 持卖单量
worksheet.write(jsonData['o_cursor'][i]['RANK'], 15, jsonData['o_cursor'][i]['CJ3_CHG']) # 持卖单量增减
workbook.save('{}\\{}.{}.{}.xls'.format(path, year, month, day))
print('{}.{}.{}的({}-{})数据已采集到并保存为excel文件'.format(year, month, day, variety, contract_id))
time.sleep(random.randint(0, 1))
| true
|
346c15e9d335d5ef4873cdc58a2f27928178c686
|
Python
|
faemiyah/dnload
|
/dnload/glsl_name_strip.py
|
UTF-8
| 5,314
| 2.78125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
import re
from dnload.glsl_name import is_glsl_name
########################################
# GlslNameStrip ########################
########################################
class GlslNameStrip:
"""Strip of names used for renaming purposes."""
def __init__(self, block, name):
"""Constructor."""
self.__blocks = [block]
self.__names = []
self.addName(name)
def addBlock(self, op):
"""Add block to the list of blocks."""
self.__blocks += [op]
def addName(self, name):
"""Append one name to the list."""
if not is_glsl_name(name):
raise RuntimeError("not a GLSL name: %s" % (str(name)))
if (self.getNameCount() >= 1) and (name != self.__names[0]):
raise RuntimeError("trying to append unrelated names: %s != %s" % (str(self.__names[0]), str(name)))
# Used and declared name lists may contain the exact same name.
for ii in self.__names:
if ii is name:
return
self.__names += [name]
def appendTo(self, op):
"""Appends all names into another GLSL name strip."""
for ii in self.__blocks:
op.addBlock(ii)
for ii in self.__names:
op.addName(ii)
def collectMemberAccesses(self):
"""Collect all member name accesses from the blocks."""
# First, collect all uses from members.
uses = {}
for ii in self.__blocks:
collect_member_uses(ii, uses)
# Then collect all uses from names.
for ii in self.__names:
aa = ii.getAccess()
# Might be just declaration.
if not aa:
continue
aa.disableSwizzle()
name_object = aa.getName()
name_string = name_object.getName()
if not (name_string in uses):
raise RuntimeError("access '%s' not present outside members" % (str(aa)))
uses[name_string] += [name_object]
# Expand uses, set types and sort.
ret = []
for kk in uses.keys():
name_list = uses[kk]
if 1 >= len(name_list):
print("WARNING: member '%s' of '%s' not accessed" % (name_list[0].getName(), str(block)))
typeid = name_list[0].getType()
if not typeid:
raise RuntimeError("name '%s' has no type" % (name_list[0]))
for ii in name_list[1:]:
current_typeid = ii.getType()
# Check that there is no conflicting type.
if current_typeid:
if current_typeid != typeid:
raise RuntimeError("member access %s type %s does not match base type %s" % (str(ii), str(current_typeid), str(typeid)))
continue
# No existing type, fill it in.
ii.setType(typeid)
ret += [name_list]
return sorted(ret, key=len, reverse=True)
def getBlock(self):
"""Gets the block that declared the original name."""
return self.__blocks[0]
def getBlockCount(self):
"""Gets the number of blocks."""
return len(self.__blocks)
def getBlockList(self):
"""Accessor."""
return self.__blocks
def getName(self):
"""Gets the declared name associated with this name strip."""
return self.__names[0]
def getNameCount(self):
"""Gets the number of names in this name strip."""
return len(self.__names)
def getNameList(self):
"""Accessor."""
return self.__names
def isUniform(self):
"""Tells if this name strip originates from an uniform block."""
return is_glsl_block_uniform(self.__blocks[0])
def lockNames(self, op):
"""Lock all names to given string."""
for ii in self.__names:
ii.lock(op)
def updateNameTypes(self):
"""Update all name types and check for errors."""
typeid = self.getName().getType()
if not typeid:
raise RuntimeError("declared name in GlslNameStrip has no type id")
for ii in self.__names[1:]:
found_type = ii.getType()
if found_type:
if typeid != found_type:
raise RuntimeError("conflicting type found for %s: %s vs. %s" % (str(ii), str(typeid), str(found_type)))
else:
ii.setType(typeid)
def __lt__(lhs, rhs):
"""Comparison operator."""
return lhs.getNameCount() < rhs.getNameCount()
def __str__(self):
"""String representation."""
return "GlslNameStrip('%s', %i)" % (self.getName().getName(), self.getNameCount())
########################################
# Functions ############################
########################################
def collect_member_uses(block, uses):
"""Collect member uses from inout struct block."""
for ii in block.getMembers():
name_object = ii.getName()
name_string = name_object.getName()
if name_string in uses:
uses[name_string] += [name_object]
else:
uses[name_string] = [name_object]
def is_glsl_name_strip(op):
"""Tells if given object is a GLSL name strip."""
return isinstance(op, GlslNameStrip)
| true
|
44d3b1b32fe317b75dc84bcf1a34576766e91631
|
Python
|
jquintus/PiProject
|
/Feather/button_and_led_matrix/code.py
|
UTF-8
| 1,168
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
import time
import digitalio
import board
import adafruit_matrixkeypad
import simpleio
cols = [digitalio.DigitalInOut(x) for x in (board.D9, board.D10, board.D11, board.D12, board.D13)]
rows = [digitalio.DigitalInOut(x) for x in (board.D6, board.D7)]
keys = ((1, 2, 4, 8, 16),
(32, 64, 128, 256, 512))
# rowsx = [digitalio.DigitalInOut(board.D6)]
# keysx = (("Blue 1", "White 1", "Yellow 1", "Black 1", "Red 1"))
keypad = adafruit_matrixkeypad.Matrix_Keypad(rows, cols, keys)
print ("setting up LEDs")
data = digitalio.DigitalInOut(board.D2)
data.direction = digitalio.Direction.OUTPUT
latch = digitalio.DigitalInOut(board.D4)
latch.direction = digitalio.Direction.OUTPUT
clk = digitalio.DigitalInOut(board.D3)
clk.direction = digitalio.Direction.OUTPUT
print ("Start pressing buttons")
while True:
keys = keypad.pressed_keys
print("Pressed: ", keys)
byte = sum(keys)
# if (last_byte != byte and byte > 0):
# write to 595 chip
latch.value = False
simpleio.shift_out(data, clk, byte, bitcount=10)
print("sending: {0:#020b} {0}".format(byte),end="\n")
latch.value = True
last_byte = byte
time.sleep(0.1)
| true
|
192b37702cddbcc526c206775140d87cfb4b4be0
|
Python
|
jbrusey/cogent-house
|
/tests/model_tests/testRoom.py
|
UTF-8
| 3,664
| 2.875
| 3
|
[] |
no_license
|
"""
Test for the Sensor Type Classes
"""
#from datetime import datetime
import datetime
#Python Module Imports
import sqlalchemy.exc
import cogent.base.model as models
import tests.base as base
import json
class TestRoom(base.ModelTestCase):
def _serialobj(self):
"""Helper Method to provde an object to serialise"""
theItem = models.Room(id=1,
name="Test Room",
roomTypeId = 1)
return theItem
def _dictobj(self):
"""Helper method to provide a dictionay representaiton of the object
generated by _serialobj()"""
theDict = {"__table__":"Room",
"id":1,
"name":"Test Room",
"roomTypeId" : 1 }
return theDict
def testEq(self):
"""Test for Equality"""
item1 = models.Room(id=1,
name="Test Room",
roomTypeId = 1)
item2 = models.Room(id=1,
name="Test Room",
roomTypeId = 1)
self.assertEqual(item1,item2)
self.assertReallyEqual(item1,item2)
#Not massivly botherered about Id at the moment
item2.id = 5
self.assertEqual(item1,item2)
self.assertReallyEqual(item1,item2)
def testNEQ(self):
item1 = models.Room(id=1,
name="Test Room",
roomTypeId = 1)
item2 = models.Room(id=1,
name="Test Room",
roomTypeId = 1)
self.assertEqual(item1,item2)
item2.name = "FOO"
self.assertNotEqual(item1,item2)
self.assertReallyNotEqual(item1,item2)
item2.name = item1.name
item2.roomTypeId = 2
def testCmp(self):
"""Test Compaison function
(actually __lt__ for Py3K Comat)"""
item1 = models.Room(id=1,
name="Test Room",
roomTypeId = 1)
item2 = models.Room(id=1,
name="Test Room",
roomTypeId = 1)
self.assertEqual(item1,item2)
#Order On Name
item2.name = "A_Test"
self.assertGreater(item1,item2)
item2.name = "Z_Test"
self.assertLess(item1,item2)
item2.name = item1.name
item2.roomTypeId = 0
self.assertGreater(item1,item2)
item2.roomTypeId = 2
self.assertLess(item1,item2)
# def testAssociations(self):
# """Test if backrefs and foriegn keys work correctly"""
# session = self.session
# roomtype = models.RoomType(name = "Foo Type")
# session.add(roomtype)
# session.flush()
# room = models.Room(name = "Test Room",
# roomTypeId = roomtype.id)
# session.add(room)
# session.flush()
# session.commit()
# #And then do the check the backrefs etc
# qryroom = session.query(models.Room).filter_by(name="Test Room").first()
# print "-"
# print "-"*70
# print "OR RM: ",room
# print "NE RM: ",qryroom
# print "OR TY: ",roomtype
# print "TY ID: ",qryroom.roomTypeId
# print "-"*70
# self.assertEqual(room,qryroom)
# #Check the Ids match
# qrytype = qryroom.roomTypeId
# self.assertEqual(qrytype, roomtype.id)
# #And check on the backrefs
# qrytype = qryroom.roomType
# self.assertEqual(qrytype,roomtype)
| true
|
966d056edffa7772267f787a2ad80e4d062bc860
|
Python
|
agdsn/pycroft
|
/tests/helpers/test_functional.py
|
UTF-8
| 1,305
| 2.640625
| 3
|
[
"Apache-2.0"
] |
permissive
|
# Copyright (c) 2023. The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details
import pytest
from pycroft.helpers.functional import map_collecting_errors
@pytest.mark.parametrize(
"input",
[
[1, 2, 3],
[object(), 0, 7, "foo", None],
list(reversed([object(), 0, 7, "foo", None])),
[],
[None],
[float("NaN")],
],
)
@pytest.mark.parametrize("error_type", [Exception, ZeroDivisionError, ValueError])
def test_identity_no_error_collection(input, error_type):
assert map_collecting_errors(lambda x: x, error_type, input) == (list(input), [])
@pytest.mark.parametrize(
"input, expected_result",
[
([1, 0, 2, 0, 3], [1, 2, 3]),
([1, 0, 2, 0, 3, 0], [1, 2, 3]),
([0, 1, 0, 2, 0, 3], [1, 2, 3]),
([0, 1, 0, 2, 0, 3, 0], [1, 2, 3]),
],
)
def test_error_collection_inversion(input, expected_result):
result, errors = map_collecting_errors(
lambda x: 1 / (1 / x), ZeroDivisionError, input
)
assert result == expected_result
assert len(errors) == len(input) - len(expected_result)
assert all(isinstance(err, ZeroDivisionError) for err in errors)
| true
|
62f959e0b4126bbc963087ca6ec694a464c31533
|
Python
|
vivek111/Pywork
|
/stack.py
|
UTF-8
| 854
| 4.21875
| 4
|
[] |
no_license
|
def push(stack,ele):
stack.append(ele)
return
def pop1(stack):
x=stack.pop()
return x
def display(stack):
print(stack)
stack=[]
top=-1
size=10
while True:
print("1.Push\n2.Pop\n3.Display\n4.Exit")
x=int(input("Enter your choice\n"))
if x==1:
if top==size-1:
print("stack Full")
else:
ele=int(input("Enter the element to push\n"))
top+=1
push(stack,ele)
elif x==2:
if top==-1:
print("Empty stack")
else:
print("Deleted element=",pop1(stack))
top-=1
elif x==3:
if top==-1:
print("Empty stack")
else:
print("Elements of stack are\n")
display(stack)
else:
break
| true
|
f9d4c71610b59a7a0415beb1f520e362f83781c5
|
Python
|
SharujanMuthu/NhlDiscordBot
|
/WebScraper.py
|
UTF-8
| 1,023
| 2.625
| 3
|
[] |
no_license
|
import urllib
import urllib.request
from bs4 import BeautifulSoup
import os
def create_soup(url):
page = urllib.request.urlopen(url)
link = BeautifulSoup(page, 'html.parser')
return link
soup = create_soup('https://www.hockey-reference.com/leagues/NHL_2022_skaters.html#stats::points')
def get_data():
'''
Gets the real time stats of every NHL player who has played a game in the current NHL season and saves it to
nhl_stats.csv
'''
all_data = ""
for records in soup.select('tr[class!="over_header"]'):
team_data = ""
for data in records.findAll('td'):
team_data = team_data + "," + data.text
all_data = all_data + '\n' + team_data[1:]
headers = "Rk,Player,Age,Tm,Pos,GP,G,A,PTS,+/-,PIM,PS,EV,PP,SH,GW,EV,PP,SH,S,S%,TOI,ATOI,BLK,HIT,FOW,FOL,FO%"
file=open(os.path.expanduser('nhl_stats.csv'), 'wb')
file.write(bytes(headers, encoding='ascii', errors='ignore'))
file.write(bytes(all_data, encoding='ascii', errors='ignore'))
| true
|
38aa6ce2c06c64000a2353e4deb5f8b2804f2a38
|
Python
|
VerstraeteBert/algos-ds
|
/test/vraag4/src/isbn/71.py
|
UTF-8
| 3,031
| 3.640625
| 4
|
[] |
no_license
|
def isISBN_13(getal):
if isinstance(getal, int):
return False
for i in range(len(getal)):
if getal[i].isalpha():
return False
som1 = 0
som2 = 0
for i in range(1, 12, 2):
som1 += int(getal[i - 1])
for i in range(2, 13, 2):
som2 += int(getal[i - 1])
dertiende = (10 - (som1 + (3 * som2) % 10) % 10)
if dertiende == int(getal[12]):
return True
elif dertiende == 10 and int(getal[12]) == 0:
return True
else:
return False
def overzicht(codes):
dict = {}
for element in codes:
if isISBN_13(element):
letter = element[3]
if letter == '0' or letter == '1':
try:
teller = dict['Engelstalige landen']
teller += 1
dict['Engelstalige landen'] = teller
except KeyError:
dict['Engelstalige landen'] = 1
elif letter == '2':
try:
teller = dict['Franstalige landen']
teller += 1
dict['Franstalige landen'] = teller
except KeyError:
dict['Franstalige landen'] = 1
elif letter == '3':
try:
teller = dict['Duitstalige landen']
teller += 1
dict['Duitstalige landen'] = teller
except KeyError:
dict['Duitstalige landen'] = 1
elif letter == '4':
try:
teller = dict['Japan']
teller += 1
dict['Japan'] = teller
except KeyError:
dict['Japan'] = 1
elif letter == '5':
try:
teller = dict['Russischtalige landen']
teller += 1
dict['Russischtalige landen'] = teller
except KeyError:
dict['Russischtalige landen'] = 1
elif letter == '7':
try:
teller = dict['China']
teller += 1
dict['China'] = teller
except KeyError:
dict['China'] = 1
elif letter == '6' or letter == '8' or letter == '9':
try:
teller = dict['Overige landen']
teller += 1
dict['Overige landen'] = teller
except KeyError:
dict['Overige landen'] = 1
else:
try:
teller = dict['Fouten']
teller += 1
dict['Fouten'] = teller
except KeyError:
dict['Fouten'] = 1
tuple = ('Engelstalige landen','Franstalige landen','Duitstalige landen','Japan','Russischtalige landen','China','Overige landen','Fouten')
for element in tuple:
print(element + ':{0:>2}'.format(dict[element]))
| true
|
15c01ca5df97d2e0ff861a3f8cc6bd350f4ab89d
|
Python
|
quantumech3/WUSB-Donor-Monitor
|
/Source/debug.py
|
UTF-8
| 1,223
| 3.59375
| 4
|
[] |
no_license
|
# -*- coding: utf-8 -*-
'''
Created by Scott Burgert on 2/20/2019
Project name: WUSB Donor Monitor ©
Module name: debug.py
Module description:
Has methods used by different modules to log events and warnings.
These logs only show when 'VERBOSE' = True
'''
# Hard coded constant. if true, status, log and warning messages will show in console
VERBOSE = True
def success(msg):
'''
Prints value of ‘msg’ in green if VERBOSE = True
:param msg: Any
:return: None
'''
if VERBOSE:
# Print green message
print("\n\u001b[32m" + "SUCCESS[]: " + msg + "\u001b[0m")
def warn(msg):
'''
Prints value of ‘msg’ in yellow if VERBOSE = True
:param msg: Any
:return: None
'''
if VERBOSE:
# Print yellow message
print("\n\u001b[33m" + "WARN[]: " + msg + "\u001b[0m")
def err(msg):
'''
Prints ‘msg’ in red regardless of VERBOSE value
:param msg: Any
:return: None
'''
# Print red message
print("\n\u001b[31m" + "ERROR[]: " + msg + "\u001b[0m")
def log(msg):
'''
Prints error message in grey if VERBOSE = True
:param msg: Any
:return: None
'''
if VERBOSE:
print('\nLOG[]: ' + msg)
| true
|
fedbbeb5789f7167280c0130ad18e791785f5498
|
Python
|
stuglaser/advent2020
|
/days/day10.py
|
UTF-8
| 1,162
| 2.671875
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
from collections import deque
from collections import namedtuple
import enum
import itertools
import unittest
import sys
from utils import *
INPUT = 'inputs/input10.txt'
class TestToday(unittest.TestCase):
def test_common(self):
pass
def main():
nums = []
with open(INPUT, 'r') as fin:
for line in fin:
line = line.rstrip()
nums.append(int(line))
BUILTIN = max(nums) + 3
CHARGE = 0
nums.sort()
num1 = 0
num3 = 1
for i in range(len(nums)):
last = 0 if i == 0 else nums[i - 1]
if nums[i] - last == 3:
num3 += 1
elif nums[i] - last == 1:
num1 += 1
print('part 1:', num1 * num3)
nums = [0] + nums + [BUILTIN]
ways = [0] * len(nums)
ways[0] = 1
for i in range(1, len(nums)):
j = i - 1
while j >= 0 and nums[j] >= nums[i] - 3:
ways[i] += ways[j]
j -= 1
print('part 2:', ways[-1])
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'test':
unittest.main(argv=sys.argv[:1] + sys.argv[2:])
else:
main()
| true
|
c0c8c7acfbd1d95de93f87e119551a94fe14c644
|
Python
|
simon-zhangmuye/leetcode
|
/83.py
|
UTF-8
| 745
| 3.765625
| 4
|
[] |
no_license
|
# coding=utf-8
__author__ = 'Simon Zhang'
__date__ = '2019/10/12 15:26'
# 给定一个链表,删除链表的倒数第 n 个节点,并且返回链表的头结点。
#
# 示例:
#
# 给定一个链表: 1->2->3->4->5, 和 n = 2.
#
# 当删除了倒数第二个节点后,链表变为 1->2->3->5.
class Solution:
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
end = head
start = head
for i in range(n):
end = end.next
if not end:
return head.next
while end.next:
start = start.next
end = end.next
if start.next.next:
start.next = start.next.next
else:
start.next = None
return head
| true
|
de6d80cd4f822dc2cc6be8d742bc78b5881eee49
|
Python
|
Leoberium/CS
|
/stepik_data_structures/network_packets.py
|
UTF-8
| 942
| 3.296875
| 3
|
[] |
no_license
|
import sys
class Processor:
def __init__(self, size, n):
self.times = [(-1, -1)] * n
self.free = size
self.queue = []
self.cnt = 0
def add_packet(self, a, d):
time = a
while self.queue and self.times[self.queue[0]][1] <= a:
self.queue.pop(0)
self.free += 1
if self.queue:
time = self.times[self.queue[-1]][1]
if self.free:
self.free -= 1
self.times[self.cnt] = (time, time + d)
self.queue.append(self.cnt)
self.cnt += 1
def output(self):
for start, end in self.times:
print(start)
def main():
size, n = map(int, sys.stdin.readline().split())
p = Processor(size, n)
for _ in range(n):
arrival, duration = map(int, sys.stdin.readline().split())
p.add_packet(arrival, duration)
p.output()
if __name__ == '__main__':
main()
| true
|
21b7e43bb7d5eda5c41ca5496a2d1923ca8efae3
|
Python
|
abrarhamzah/Hearty
|
/models/classification/Naive_Bayes/naiveBayes.py
|
UTF-8
| 2,303
| 3.078125
| 3
|
[] |
no_license
|
#########################################################
# File : naiveBayes.py
# Project : FIT3164 project
#
# Date : 5/10/2020
# Author : Abrar Fauzan Hamzah
#
# Purpose : Implement Naive Bayes classifier
# & test Gaussian, multinomial and Bernoulli
########################################################
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score,f1_score,roc_auc_score,recall_score,precision_score
from sklearn import metrics
from sklearn.naive_bayes import MultinomialNB, GaussianNB, BernoulliNB
from sklearn.model_selection import cross_val_score
#Read external data
Cad_train = pd.read_csv('./data/filtered_features/heart_train.csv')
Cad_test = pd.read_csv('./data/filtered_features/heart_test.csv')
#extract Xand Y
Y_train = Cad_train.CAD_Yes.values
Y_test = Cad_test.CAD_Yes.values
X_train = Cad_train.drop(['CAD_Yes'], axis = 1)
X_test = Cad_test.drop(['CAD_Yes'], axis = 1)
#Classifier Gaussian
Gaus_train = GaussianNB()
Gaus_train.fit(X_train, Y_train)
expected = Y_test
Gaus_predicted = Gaus_train.predict(X_test)
# summarize the fit of the model
#print(metrics.classification_report(expected, predicted))
print(metrics.confusion_matrix(expected, Gaus_predicted))
# Binomial
binomial = BernoulliNB()
binomial.fit(X_train, Y_train)
binomial_predicted = binomial.predict(X_test)
# summarize the fit of the model
#print(metrics.classification_report(expected_test, predicted_test))
print(metrics.confusion_matrix(expected, binomial_predicted))
#binomial model accuracy
binomial.score(X_test, Y_test)
#cv 10
scores = cross_val_score(binomial, X_test, Y_test, cv=10, scoring="accuracy")
meanScore = scores.mean()
print(meanScore * 100)
# Multinomial
multi = MultinomialNB()
multi.fit(X_train, Y_train)
expected =Y_test
predicted = multi.predict(X_test)
# summarize the fit of the model
#print(metrics.classification_report(expected, predicted))
print(metrics.confusion_matrix(expected, predicted))
multi.score(X_train, Y_train)
scores = cross_val_score(multi, X_test, Y_test, cv=10, scoring="accuracy")
meanScore = scores.mean()
print(scores)
print(meanScore * 100)
# Gaussian : 71.4% (w/o cv) & 80.11% (with cv)
# Bernoulli : 83.52% (w/o cv) & 84.89% (with cv)
# Multinomial : 58.24% (w/o cv) & 67% (with cv)
| true
|
4913910752d8e22574ba640551da0b59adcb2719
|
Python
|
alexlaplap/Pycharm-Exercises
|
/Exercise 06-Character Length/main.py
|
UTF-8
| 204
| 3.9375
| 4
|
[] |
no_license
|
a = input('Please enter name: ')
b = len(a)
if b < 3:
print('Name must be 3 characters long.')
elif b > 50:
print('Name must not exceed 50 characters long.')
else:
print('Name is registered.')
| true
|
b8dae593bb847a64e5fc5755c82dbe83a8c5f44e
|
Python
|
woider/runoob
|
/sorting/python_sort.py
|
UTF-8
| 249
| 2.765625
| 3
|
[] |
no_license
|
'''
原生排序
'''
from exec_time import exectime
from random_list import load_random_array
@exectime
def python_sort(array):
array.sort()
return array
array = load_random_array('numbers.json')
print(python_sort(array))
| true
|
d54b40791dd81650f2ebf736c25771fe901c0a52
|
Python
|
christinaWiss/math_projects
|
/math_projects/ShortRatesModels/PythonCode/dothan.py
|
UTF-8
| 965
| 2.75
| 3
|
[] |
no_license
|
import constants
import main
import matplotlib.pyplot as plt
"""The dothan Model model: Here the differential equation is given by dr(t)=βr(t)dt +σr(t)dW∗(t).
"""
beta = -.06
sigma = 0.1
initial_r = 0.08
def short_rate_dothan_model(beta, sigma, initial_r):
Brownian_Motion = [i / (len(constants.gather_unit)) ** 0.5 for i in main.random_walk(prob=0.5, gather=constants.gather_unit)]
short_rate = [initial_r]
last = initial_r
last_b = 0
for i in Brownian_Motion:
short_rate += [last + (beta * last)/constants.width + sigma * last * (i-last_b)]
last = short_rate[-1]
last_b = i
short_rate.pop()
return short_rate
r_path3 = short_rate_dothan_model(beta, sigma, .1)
r_path2 = short_rate_dothan_model(beta, sigma, .1)
r_path = short_rate_dothan_model(beta, .0, .1)
plt.plot(constants.gather_unit, r_path3)
plt.plot(constants.gather_unit, r_path2)
plt.plot(constants.gather_unit, r_path)
plt.show()
| true
|
6014c4444fb3b89c61b612b4d488848a56b82964
|
Python
|
leeo1116/PyCharm
|
/Algorithms/leetcode_charlie/030_substring_with_concatenation_of_all_words.py
|
UTF-8
| 1,220
| 4.28125
| 4
|
[] |
no_license
|
"""
You are given a string, s, and a list of words, words, that are all of the same length. Find all starting indices of
substring(s) in s that is a concatenation of each word in words exactly once and without any intervening characters.
For example, given:
s: "barfoothefoobarman"
words: ["foo", "bar"]
You should return the indices: [0,9].
(order does not matter).
"""
class Solution(object):
def find_substring(self, s, words):
"""
find all indices of words concatenation in a string
:type s: str
:type words: list
:param s: string that contains words
:param words: words that have the same length
:return: indices of all concatenation words in string s
:rtype: list[int]
"""
if len(words) == 0:
return []
word_len = len(words[0])
index = []
for i in range(len(s)):
for j in range(len(words)):
if s[j*word_len+i:j*word_len+i+word_len] not in words:
j = 0
break
if j == len(words)-1:
index.append(i)
return index
s = Solution()
print(s.find_substring("barfoothefoobarman", ["foo", "bar"]))
| true
|
28dfed963268573039baf80a37e6ef29afe16f7b
|
Python
|
ulillilu/MachineLearning-DeepLearning
|
/01-04.All_Download_From_Link/cr_path.py
|
UTF-8
| 303
| 3.140625
| 3
|
[] |
no_license
|
#상대 경로를 절대 경로로 전환
from urllib.parse import urljoin
url = "http://example.com/html/a.html"
print( urljoin(url, "b.html") )
print( urljoin(url, "sub/c.html") )
print( urljoin(url, "../index.html") )
print( urljoin(url, "../img/hoge.png") )
print( urljoin(url, "../css/hoge.css") )
| true
|
a6dea4f0755681d6fa631e8d2b1328f10c030e71
|
Python
|
lanking520/CYTON_VETA_FILE
|
/Internet Model/Client side/gamepad (Always send).py
|
UTF-8
| 1,070
| 2.8125
| 3
|
[] |
no_license
|
import time
import pygame
import SimpleGUICS2Pygame.simpleguics2pygame as simplegui
import socket
pygame.init()
joy = pygame.joystick.Joystick(0)
joy.init()
out = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
s = socket.socket() # Create a socket object
host = '192.168.32.191' # Get local machine IP
port = 12345 # Reserve a port for your service.
s.bind((host, port)) # Bind to the port
s.listen(5) # Now wait for client connection.
c, addr = s.accept() # Establish connection with client.
print 'Got connection from', addr
def joystick():
it = 0 #iterator
pygame.event.pump()
#Read input from the two joysticks
for i in range(0, joy.get_numaxes()):
out[it] = joy.get_axis(i)
it+=1
#Read input from buttons
for i in range(0, joy.get_numbuttons()):
out[it] = joy.get_button(i)
it+=1
out[16] +=1
return out
def network():
package = joystick()
c.send(str(package))
package_timer = simplegui.create_timer(50,network)
package_timer.start()
| true
|
1a38483cd4b38802fe04b121692fee8e0b4f6e85
|
Python
|
stat17-hb/algorithm
|
/geeksforgeeks/n진수 구하기.py
|
UTF-8
| 816
| 3.5625
| 4
|
[] |
no_license
|
#%%
n = 2
number = 32
from collections import deque
deq = deque([number])
res = deque()
while True:
temp = deq.popleft()
deq.append(temp // n) # 몫
res.appendleft(temp % n) # 나머지
if temp // n == 0 : break
res
#%%
n = 2
number = 32
res = []
while True:
q = number // n # 몫
res[:0] = [number % n] # 나머지
if number // n == 0 : break # 몫이 0일 때 멈춤
number = q
res
#%%
n = 26
number = 26
res = ""
while True:
q = number // n # 몫
res = str(number % n) + res # 나머지
if number // n == 0 : break # 몫이 0일 때 멈춤
number = q
int(res)
#%%
n = 5
number = 10**5
res = ""
while True:
q = number // n # 몫
res = str(number % n) + res # 나머지
if number // n == 0 : break # 몫이 0일 때 멈춤
number = q
int(res)
| true
|
1cef718723094f8741b730dba5c1b74a47c2d8de
|
Python
|
ken0105/competitive-programming
|
/procon-archive/atcoder.jp/dp/dp_h/Main.py
|
UTF-8
| 846
| 2.78125
| 3
|
[] |
no_license
|
def main():
h, w = map(int, input().split())
a = []
for _ in range(h):
a.append(input())
route = [[0] * w for _ in range(h)]
has_wall_w, has_wall_h = False, False
for i in range(h):
for j in range(w):
if i == 0 and a[i][j] == "." and not has_wall_h:
route[i][j] = 1
elif i == 0 and a[i][j] == "#":
has_wall_h = True
elif j == 0 and a[i][j] == "." and not has_wall_w:
route[i][j] = 1
elif j == 0 and a[i][j] == "#":
has_wall_w = True
for i in range(1, h):
for j in range(1, w):
if a[i][j] == "#":
continue
route[i][j] = route[i - 1][j] + route[i][j - 1]
print(route[h - 1][w - 1] % (10 ** 9 + 7))
if __name__ == '__main__':
main()
| true
|
7031537917e2179a3e115f1a16ce02a816232b81
|
Python
|
fandiandian/python_learning_excerise
|
/part1.4习题/minima_in_permutation(最小置换).py
|
UTF-8
| 1,132
| 4.21875
| 4
|
[] |
no_license
|
# 最小置换
# minima in permutation
# 生成一个长度为 n ,个数为 m 的二维随机数组,输出生成的排列中从左至右极小数的数量的平均值
# 通过定义函数的,调用函数的方式实现
# (我对这个题目的理解可能有问题)
import random
m = int(input('请输入数组的行数\n'))
n = int(input('请输入数组的列数\n'))
# 随机数的范围定在 [1,20]
# 构建函数
def minima_in_permutation(a,b):
# 构建随机数组
rand_list = [[random.randrange(1,21) for i in range(b)] for j in range(a)]
# 保存最小值 mimima 及其个数 time 的列表 times
times = []
for ii in range(a):
# 计数 time 及 最小值
time = 0
minima = min(rand_list[ii])
for jj in range(b):
if rand_list[ii][jj] == minima:
time += 1
times += [(minima,time)]
print(times)
total = 0
for iii in range(len(times)):
total += times[iii][1]
print('极小值的个数的为:{}'.format(total/len(times)))
# 调用函数
minima_in_permutation(m,n)
| true
|
6cbe8cb84b88dc7f1613e77ed18cfab7bb37bfb6
|
Python
|
jimmyhzuk/morning-stock-market-emails
|
/ScrapeInformation/pe_ratio.py
|
UTF-8
| 3,332
| 2.78125
| 3
|
[] |
no_license
|
import requests
import time
year_month = time.strftime("%Y-%m-%d")
r = requests.get('https://financialmodelingprep.com/api/v4/industry_price_earning_ratio?date=' + year_month +'&exchange=NYSE&apikey=e49e22b0865cfeea71aa0771ddf965a1')
print(year_month)
ratio = r.json()
# for x in ratio:
# print(x)
year = ['2021']
for x in year:
print('https://financialmodelingprep.com/api/v4/sector_price_earning_ratio?date='+ x + '-'+year_month +'&exchange=NYSE&apikey=e49e22b0865cfeea71aa0771ddf965a1')
sectors = requests.get('https://financialmodelingprep.com/api/v4/sector_price_earning_ratio?date='+ x +'-'+ year_month +'&exchange=NYSE&apikey=e49e22b0865cfeea71aa0771ddf965a1')
sectors = sectors.json()
for i in sectors:
if i['sector'] == 'Technology':
print(x,": ",i)
apple = []
times = []
new_group = []
stocks = ['AAPL','MA','BR','CSGP','GLOB','CDNS','PETS','NVR','TWLO','AMCX','FDX','CMG','TREX','PINS']
# stocks = ['AAPL','MA']
for stock in stocks:
try:
i = 0
price = requests.get('https://financialmodelingprep.com/api/v3/historical-price-full/'+stock+'?from=2021-03-12&to=2021-07-09&apikey=e49e22b0865cfeea71aa0771ddf965a1')
# price = requests.get('https://financialmodelingprep.com/api/v3/historical-chart/30min/'+ stock +'?apikey=e49e22b0865cfeea71aa0771ddf965a1')
price = price.json()
price = price['historical']
# print(price)
total_len = len(price)
for x in price:
if x['date'] > '2021-06-18 09:30:00':
i += 1
# print(i," of ", total_len)
price = x['high']
date = x['date']
json = {'price': price, 'date': date}
apple.append(json)
# if stock == 'PINS' and x['date']:
# print(x)
stock_price = [x['price'] for x in apple]
minimum = min(stock_price)
maximum = max(stock_price)
# print(minimum)
# print(maximum)
for x in apple:
if x['price'] == minimum:
min_value = x
# print(min_value)
# print(min_value['date'])
low = min_value['price']
for x in apple:
if x['price'] == maximum and min_value['date'] < x['date']:
max_value = x
print(max_value['date'])
print(min_value)
print("max",max_value)
high = max_value['price']
difference = float(((-(low)/high)+1)*100).__round__(2)
print(stock,": ",difference,"%")
print('\n')
apple.clear()
times.clear()
except:
Exception
# print(low_date)
# for x in times:
# stock_date = x['time']
# unix = requests.get('https://showcase.api.linx.twenty57.net/UnixTime/tounix?date=' + stock_date )
# unix = float(unix.json())
# if unix > float(low_date):
# new_group.append(x['price'])
# highest = max(new_group)
# for x in times:
# if x['price'] == highest:
# print(x)
# apple.clear()
# times.clear()
# difference = float(((-(low/highest)+1)*100).__round__(2))
# print(stock,": ",difference,"%")
# apple.clear()
# times.clear()
# print('\n')
# print(unix.json())
| true
|
fa31f5e2ba22494e270527767180f8155799e129
|
Python
|
SUDARSHANSHARMA1998/WebMaps
|
/Map1.py
|
UTF-8
| 1,218
| 2.828125
| 3
|
[] |
no_license
|
import folium
import pandas
data=pandas.read_excel("Volcanoes.xlsx",sheet_name=0)
data = data.dropna(how='any',axis=0)
lat = list(data["Latitude"])
lon = list(data["Longitude"])
elev = list(data["Elevation"])
name= list(data["Volcano Name"])
def color_producer(elevation):
if elevation < 1000:
return 'green'
elif 1000 <= elevation < 3000:
return 'orange'
else:
return 'red'
map = folium.Map(location=[50.21,8.80], zoom_start=2, tiles="Mapbox Bright")
fgv = folium.FeatureGroup(name="Volcanoes")
for lt, ln, el,s in zip(lat, lon, elev,name):
fgv.add_child(folium.CircleMarker(location=[lt, ln], radius = 4, popup=str(s)+":"+str(el)+" m",
fill_color=color_producer(el), fill=True, color = 'grey', fill_opacity=0.9))
fgp = folium.FeatureGroup(name="Population")
fgp.add_child(folium.GeoJson(data=open('world.json', 'r', encoding='utf-8-sig').read(),
style_function=lambda x: {'fillColor':'green' if x['properties']['POP2005'] < 10000000
else 'orange' if 10000000 <= x['properties']['POP2005'] < 30000000 else 'red','weight':1}))
map.add_child(fgv)
map.add_child(fgp)
map.add_child(folium.LayerControl())
map.save("Map1.html")
| true
|
4e11dce24d9fa6ec16ca8a2f5e41107da40dc5ce
|
Python
|
zerojpyle/learningPy
|
/ex10.py
|
UTF-8
| 502
| 3.921875
| 4
|
[] |
no_license
|
tabby_cat = "\tI'm tabbed in."
persian_cat = "I'm split\non a line."
backslash_cat = "I'm \\ a \\ cat."
fat_cat = """
I'll do a list:
\t* Cat food
\t* Fishies
\t* Catnip\n\t* Grass
"""
print(tabby_cat)
print(persian_cat)
print(backslash_cat)
print(fat_cat)
# Practice escape character "\b"
test = "{}{}{}{}"
print(test.format('test','\b','\b','?'))
print(test.format('test','\b','','?'))
print(test.format('test','1','2','3'))
print(test.format('test','\b\b\b\b','?','?'))
print('...^...^...^...^')
| true
|
87a6af65ccfc471025da4bb745d333e02488deeb
|
Python
|
dariaserkova/helper_scripts
|
/git-apies/archivation.py
|
UTF-8
| 1,643
| 2.625
| 3
|
[] |
no_license
|
#!/usr/bin/env python
'''
Docs to be written
'''
import yaml
import gitlab
import sys
#####################
## Reading the configuration
#####################
try:
with open("config.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
except IOError:
print('Can not find config.yml in the current directory')
sys.exit(1)
try:
GITLAB_TOKEN = cfg['GITLAB_TOKEN']
GITLAB_ENDPOINT = cfg['GITLAB_ENDPOINT']
except KeyError, exc:
print("No such option in config.yml - {}".format(str(exc)))
sys.exit(1)
#####################
## Connection
#####################
def connection():
global gitlab_conn
gitlab_conn = gitlab.Gitlab(GITLAB_ENDPOINT,GITLAB_TOKEN)
gitlab_conn.auth()
#########################
## Check if group exists
#########################
def check_group(group_name):
try:
group = gitlab_conn.groups.get(group_name)
except (gitlab.GitlabHttpError, gitlab.GitlabGetError, gitlab.GitlabError):
group = None
return group
def main():
global gitlab_group_name
gitlab_group_name = sys.argv[1]
connection()
gitlab_group = check_group(gitlab_group_name)
for group_project in gitlab_group.projects.list(all=True):
try:
project = gitlab_conn.projects.get(group_project.get_id())
print(project.attributes.get('path_with_namespace') + '\n')
project.archive()
except gitlab.exceptions.GitlabCreateError, exc:
print("Get an error for {}:\n{}".format(project, str(exc)))
continue
if __name__ == "__main__":
main()
| true
|
cb1bfe7e9ff6642f4f7977bf2609e9ffd3fc57d1
|
Python
|
DQDH/Algorithm_Code
|
/ProgramForLeetCode/LeetCode/75_sortColors.py
|
UTF-8
| 1,413
| 3.34375
| 3
|
[] |
no_license
|
class Solution(object):
def sortColors(self, nums):
"""
:type nums: List[int]
:rtype: None Do not return anything, modify nums in-place instead.
"""
left,current,right=0,0,len(nums)-1
while current<=right:
if left>right:
break
if nums[current]==0:
nums[left],nums[current]=nums[current],nums[left]
left+=1
current+=1
elif nums[current]==2:
nums[right],nums[current]=nums[current],nums[right]
right-=1
else:
current+=1
return nums
def sortColors1(self, nums):
"""
:type nums: List[int]
:rtype: None Do not return anything, modify nums in-place instead.
"""
count=[0,0,0]
for nums_i in nums:
if nums_i==0:
count[0]+=1
elif nums_i==1:
count[1]+=1
else:
count[2]+=1
i=0
while count[0]>0:
nums[i]=0
i+=1
count[0]-=1
while count[1]>0:
nums[i] = 1
i += 1
count[1] -= 1
while count[2]>0:
nums[i] = 2
i += 1
count[2] -= 1
return nums
print(Solution().sortColors([2,0,1]))
| true
|
6d8ce500b8d8845ca66bd1949681e7ad7637b2d2
|
Python
|
barmako/cv_cifar10
|
/BOWPreprocessor.py
|
UTF-8
| 1,362
| 2.875
| 3
|
[] |
no_license
|
import random
import numpy as np
from sklearn.cluster import MiniBatchKMeans
from SIFTPreprocessor import SIFTPreprocessor
class SIFTBOWPreprocessor:
def __init__(self, decorated=SIFTPreprocessor(concat=False), n_words=1000, kmeans_train_size=5000):
self.kmeans_train_size = kmeans_train_size
self.n_words = n_words
self.decorated = decorated
self.kmeans = MiniBatchKMeans(n_words)
self.kmeans_model = None
def preprocess(self, data):
self.decorated.preprocess(data)
base_descs = self.decorated.get_descriptors(data)
sifts = []
for sifts_array in base_descs:
for sift in sifts_array:
sifts.append(sift)
print "Clustering sifts (shape %s) with k-means to build a codebook" % str(np.shape(sifts))
kmeans_train_data = random.sample(sifts, self.kmeans_train_size)
self.kmeans_model = self.kmeans.fit(kmeans_train_data)
def get_descriptors(self, data):
data_sifts = self.decorated.get_descriptors(data)
return [self.quantize_descriptor(sifts) for sifts in data_sifts]
def quantize_descriptor(self, sifts):
image_desc = np.zeros(self.n_words)
words = self.kmeans_model.predict(sifts)
for word in words:
image_desc[word] = image_desc[word] + 1
return image_desc
| true
|
8a0bda9cad85e4c958e677d16855b9e8df7b4d81
|
Python
|
ckoryom/MiningGitHub
|
/Application/menu.py
|
UTF-8
| 3,601
| 2.671875
| 3
|
[] |
no_license
|
'''
Created on May 6, 2014
@author: ckoryom
'''
from Application.mining import Mining
from Model.parameters import Parameters
class Menu(object):
mining = Mining()
def selectMenu (self):
menuId = 0
while (int(menuId) != 1 and int(menuId) != 2 and int(menuId) != 3 and int(menuId) != 4):
print "1) Mine Repository Data to XML"
print "2) Mine Repository Data to R"
print "3) Mine Repository Data"
print "4) Exit"
menuId = raw_input("Selection:")
if (int(menuId) == 1):
self.mining.startMiningProcedure()
elif (int(menuId) == 2):
print "Using R Module..."
parameters = Parameters()
parameters.values["mineCommits"] = False
parameters.values["writeXML"] = False
parameters.values["useR"] = True
parameters.values["issuesLimit"] = 10
parameters.values["labels"] = "bug"
self.mining.menu = self
self.mining.parameters = parameters
self.mining.startMiningProcedure()
elif (int(menuId) == 3):
self.collectDataMenu()
elif (int(menuId) == 4):
return ""
def groupOption(self):
print "Group data size:"
size = raw_input("size:")
return size
def collectDataMenu(self):
menuId = 0
while (int(menuId) != 1 and int(menuId) != 2):
print "1) Export Data to CSV"
print "2) Read CSV File"
menuId = raw_input("Selection:")
if (int(menuId) == 1):
self.exportToCsv()
elif (int(menuId) == 2):
self.readCsvFile()
def exportToCsv(self):
sinceDate = raw_input("Mine Issues Since: (DD/MM/YYYY)")
untilDate = raw_input("Mine Issues Until: (DD/MM/YYYY)")
parameters = Parameters()
parameters.values["sinceDate"] = sinceDate
parameters.values["untilDate"] = untilDate
parameters.values["mineCommits"] = False
parameters.values["writeXML"] = False
parameters.values["useR"] = False
parameters.values["exportToCsv"] = True
parameters.values["issuesLimit"] = None
parameters.values["labels"] = "bug"
self.mining.menu = self
self.mining.parameters = parameters
self.mining.startMiningProcedure()
def collectData(self):
print "Finished"
def readCsvFile(self):
print ""
def RMenu(self, rModule):
menuId = 0
while (int(menuId) != 1 and int(menuId) != 2 and int(menuId) != 3 and int(menuId) != 4):
print "1) Get MTTR"
print "2) Get MTTF"
print "3) Get MTBF"
print "4) Get ALL"
menuId = raw_input("Selection:")
if (int(menuId) == 1):
rModule.calculateMeanTimeToRepair(self.groupOption())
elif (int(menuId) == 2):
rModule.calculateMeanTimeToFailure(self.groupOption())
elif (int(menuId) == 3):
rModule.calculateMeanTimeBetweenFailure(self.groupOption())
elif (int(menuId) == 4):
untouchedModule = rModule
MTBF = rModule.calculateMeanTimeBetweenFailure(self.groupOption())
rModule = untouchedModule
MTTR = rModule.calculateMeanTimeToRepair(self.groupOption())
rModule = untouchedModule
MTTF = rModule.calculateMeanTimeToFailure(self.groupOption())
availability = rModule.calculateAvailability(MTBF, MTTR)
| true
|
fd99ccb6903909dc5ead2da77e2e65b44f764e79
|
Python
|
patchiu/math-programmmmm
|
/ml/gradient descent 1d.py
|
UTF-8
| 1,460
| 4.0625
| 4
|
[] |
no_license
|
#gradient descent 1d
import numpy as np
import matplotlib.pyplot as plt
# 目標函數:y=x^2
def func(x): return np.square(x)
# 目標函數一階導數:dy/dx=2*x
def dfunc(x): return 2 * x
def GD(x_start, df, epochs, lr):
""" 梯度下降法。給定起始點與目標函數的一階導函數,求在epochs次反覆運算中x的更新值
:param x_start: x的起始點
:param df: 目標函數的一階導函數
:param epochs: 反覆運算週期
:param lr: 學習率
:return: x在每次反覆運算後的位置(包括起始點),長度為epochs+1
"""
xs = np.zeros(epochs+1)
x = x_start
xs[0] = x
for i in range(epochs):
dx = df(x)
# v表示x要改變的幅度
v = - dx * lr
x += v
xs[i+1] = x
return xs
# Main
# 起始權重
x_start = 5
# 執行週期數
epochs = 15
# 學習率
lr = 0.3
# 梯度下降法
x = GD(x_start, dfunc, epochs, lr=lr)
print (x)
# 輸出:[-5. -2. -0.8 -0.32 -0.128 -0.0512]
color = 'r'
#plt.plot(line_x, line_y, c='b')
from numpy import arange
t = arange(-6.0, 6.0, 0.01)
plt.plot(t, func(t), c='b')
plt.plot(x, func(x), c=color, label='lr={}'.format(lr))
plt.scatter(x, func(x), c=color, )
plt.legend()
plt.show()
| true
|
145437ddfadf9c210bf34d37104791ff8d3b3c74
|
Python
|
matt-ankerson/racing_prediction
|
/Scraper/event_scraper.py
|
UTF-8
| 8,526
| 2.890625
| 3
|
[] |
no_license
|
import requests
import re
from datetime import date
from bs4 import BeautifulSoup
from race_event_types import Event
from race_event_types import Race
from race_event_types import Competitor
from race_event_types import Bet
def ScrapeEvent(race_result_url):
month_dict = {'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': 6,
'Jul': 7, 'Aug': 8, 'Sep': 9, 'Oct': 10, 'Nov': 11, 'Dec': 12}
r = requests.get(race_result_url)
# Create soup from web request result content
content = r.content
soup = BeautifulSoup(content, 'html5lib')
# Get the event name
event_element = \
soup.find('div', class_='infoPanelBar navbar').find('strong')
event_name = event_element.contents[0].strip()
# Get the event date
event_date_arr = event_name.split(' ')[-3:]
event_date = date(int(event_date_arr[2]), month_dict[event_date_arr[1]],
int(event_date_arr[0]))
# Create the event
event = Event(name=event_name, date=event_date, races=[])
# Take the race containers from the soup
# - This collection of elements contains info
# about the races at this event.
race_containers = soup.find_all('div', class_='content')
for race_container in race_containers:
# race_container is a tag
try:
race_title = race_container.find('div',
class_='raceTitle')\
.contents[0].strip()
except AttributeError:
race_title = race_container.find('div', class_='header bold')
race_title = race_title.text.strip().replace('index', '')
race_det_arr = [x.rstrip() for x in race_title.split()]
new_race = Race(race_number=race_det_arr[1],
distance=race_det_arr[race_det_arr.index('m') - 1],
stake=race_container.find('th', text='Stake:')
.fetchNextSiblings()[0].contents[0],
track_condition=race_container.find('th', text='Track:')
.fetchNextSiblings()[0].contents[0],
weather=race_container.find('th', text='Weather:')
.fetchNextSiblings()[0].contents[0],
bets=[],
winning_margins=[],
winner_owners=[],
winner_trainer=None,
winner_breeding=None,
sub=None,
winner_time=None,
competitors=[])
# add first 3 results (if there are any)
try:
placing_results = \
race_container.find('table', class_='raceResults')\
.find('tbody').find_all('tr')
win_results = placing_results[0].findChildren()
second_results = placing_results[1].findChildren()
third_results = placing_results[2].findChildren()
# first place
new_race.competitors.append(Competitor(
place_in_race=(
len(new_race.competitors) + 1),
number=win_results[0].contents[0],
name=win_results[1].contents[0],
jockey=win_results[2].contents[0],
win=win_results[3].contents[0].strip(),
place=win_results[4]
.contents[0].strip()))
# second place
new_race.competitors.append(Competitor(
place_in_race=(
len(new_race.competitors) + 1),
number=second_results[0].contents[0],
name=second_results[1].contents[0],
jockey=second_results[2].contents[0],
win=None,
place=second_results[4].contents[0]
.strip()))
# third place
new_race.competitors.append(Competitor(
place_in_race=(
len(new_race.competitors) + 1),
number=third_results[0].contents[0],
name=third_results[1].contents[0],
jockey=third_results[2].contents[0],
win=None,
place=third_results[4]
.contents[0].strip()))
except:
pass
# add remaining competitors (if any are listed)
try:
also_ran = race_container.find('strong', text='ALSO RAN:')\
.parent.text.strip()
# also_ran_arr is quite a dirty array
also_ran_arr = [x.replace('ALSO RAN:\n', '')
for x in also_ran.split(',')]
for competitor in also_ran_arr:
competitor_breakdown = competitor.split('-')
# add other competitors to collection
new_race.competitors.append(
Competitor(number=competitor_breakdown[0],
place_in_race=(len(new_race.competitors) + 1),
name=competitor_breakdown[1],
jockey=re.sub(r'\([^)]*\)', '',
competitor_breakdown[2]),
win=None,
place=None,
lengths_behind_leader=None))
except:
pass
# scrape bet information (of which there are an arbitrary number)
try:
bets = race_container.find('div', text='Bet Type')\
.parent.parent.parent.parent.find('tbody').find_all('tr')
except:
bets = []
for bet_row in bets:
clean_bet_row = [x.text.strip() for x in bet_row.findChildren()]
runners = re.split(r'[`\-=~!@#$%^&*()_+\[\]{};\'\\:"|<,./<>?]',
clean_bet_row[1])
runners = [x.strip() for x in runners]
new_race.bets.append(Bet(bet_type=clean_bet_row[0],
runners=runners,
dividend=clean_bet_row[2]))
# scrape remaining race information
try:
race_metadata = race_container\
.find('strong', text='Winning Margins:')\
.parent.parent.text.split('\n')
race_metadata = \
[x.strip().split(':') for x in race_metadata if x != '']
race_metadata = \
[item for sublist in race_metadata for item in sublist]
race_metadata = \
[x for x in race_metadata if x != '']
except:
race_metadata = []
# sometimes these attributes are missing
try:
new_race.winning_margins = \
race_metadata[race_metadata.index('Winning Margins') + 1]
except:
pass
try:
new_race.winner_owners = \
race_metadata[race_metadata.index('Owners') + 1]
except:
pass
try:
new_race.winner_trainer = \
race_metadata[race_metadata.index('Trainer') + 1]
except:
pass
try:
new_race.winner_breeding = \
race_metadata[race_metadata.index('Breeding') + 1]
except:
pass
try:
new_race.sub = \
race_metadata[race_metadata.index('SUB') + 1]
except:
pass
try:
winners_time = \
race_metadata[race_metadata.index('Winners Time') + 1:
race_metadata.index('Winners Time') + 3]
new_race.winner_time = winners_time[0] + ':' + winners_time[1]
except:
pass
# append this race to the event
event.races.append(new_race)
return event
# event = ScrapeEvent("https://ebet.tab.co.nz/results/DBAI-reslt03251700.html")
# print(str(event))
| true
|
f86ab10f51621e1828f0e14ded911f9a6ebf2b34
|
Python
|
alexandraback/datacollection
|
/solutions_1483488_1/Python/rfw/actually_c.py
|
UTF-8
| 839
| 3.25
| 3
|
[] |
no_license
|
import math
import multiprocessing
import functools
def rotations(n):
results = []
expn = int(math.log(n, 10))
exp = 10 ** expn
for _ in xrange(0, expn):
n, r = divmod(n, 10)
n += r * exp
results.append(n)
return frozenset(results)
def n_rotations(p, a, b):
results = set()
for r in p.map(functools.partial(rots_for, a, b), xrange(a, b)):
results |= r
return len(results)
def rots_for(a, b, n):
results = set()
for m in rotations(n):
if a <= m <= b and n != m:
results.add(frozenset([n, m]))
return results
if __name__ == "__main__":
p = multiprocessing.Pool(20)
for i in xrange(1, int(raw_input()) + 1):
x, y = [ int(j) for j in raw_input().split(" ") ]
print "Case #{0}: {1}".format(i, n_rotations(p, x, y))
| true
|
0d0176f631237c0ed0be8fe261d80b91276049a3
|
Python
|
00116/TouPy
|
/hai.py
|
UTF-8
| 1,434
| 3.484375
| 3
|
[] |
no_license
|
# 牌の種類などを管理する
class Hai:
KIND = {0: '萬', 1: '筒', 2: '索', 3: '東', 4: '南', 5: '西', 6: '北', 7: '白', 8: '発', 9: '中'}
AKADORA = {16: '赤5萬', 52: '赤5筒', 88: '赤5索'}
# number0to135に0から135の整数を入力するとその牌の内容が生成される
# self.kindは0~3までありそれぞれ萬子筒子索子字牌を表す
# self.numberは数牌では数字(-1)を表し、字牌では0から順に東南西北白発中を表す
# self.akaariは赤ドラが存在するゲームの際に全ての牌においてTrueとなる
# self.akahaiはその牌自体が赤ドラの際にTrueとなる
def __init__(self, number0to135, aka=False):
self.number0to135 = number0to135
self.akaari = aka
self.akahai = False
# 数牌の場合の処理
if self.number0to135 < 108:
self.kind = self.number0to135 // 36
self.number = self.number0to135 // 4 - self.kind * 9
if aka and self.number0to135 in self.AKADORA:
self.str = self.AKADORA[self.number0to135]
self.akahai = True
else:
self.str = str(self.number + 1) + self.KIND[self.kind]
# 字牌の場合の処理
else:
self.kind = 3
self.number = (self.number0to135 - 108) // 4
self.str = self.KIND[self.kind + self.number]
| true
|
4de0a47906d8206765327d4100256027f1bef214
|
Python
|
kohei-okazaki/work-3g
|
/ha-selenium/src/main/python/common/util.py
|
UTF-8
| 950
| 2.890625
| 3
|
[] |
no_license
|
# -*- coding:utf-8 -*-
'''
Created on 2020/06/09
健康管理アプリで使用する共通的な関数をまとめたPython
@version: 1.0.0
'''
from src.main.python.login.login_auth import LoginAuth
from src.main.python.login.login_form import LoginForm
def login_default_selenium_user(driver):
'''
健康情報画面がログイン後の画面のため、最初にログイン処理を行う
@driver Driver
'''
LoginAuth(driver).doLogin(LoginForm({
"mailAddress": "selenium@gmail.com",
"password": "seleniumuser",
}))
def setTextById(driver, element_id, text):
'''
指定したブラウザに文字列を設定する
@param element_id ブラウザ情報のDOMのキー
@param text 設定対象文字列
'''
driver.find_element_by_id(element_id).click()
driver.find_element_by_id(element_id).clear()
driver.find_element_by_id(element_id).send_keys(text)
| true
|
80f6470d558ab11a59b0e120f880ced6c08ce201
|
Python
|
saraselis/ProgramcaoOrientadaObjetosEC
|
/Ativades de Sala/Atividade_06/ControleEstoque.py
|
UTF-8
| 8,404
| 3.078125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# coding: utf-8
#
# # Atividade 06 - POO
# Projete e implemente o sistema que faz controle de estoque de uma dessas plataformas de venda online, guardando a disposição dos itens nos locais de armazenamento, tipos de pagamento e preço de frete para a entrega.
# * Banco onde vamos gerir o estoque dos produtos
# In[1]:
from pymongo import MongoClient
# In[2]:
cliente = MongoClient('mongodb://localhost:27017/')
db = cliente.sistema
produtos = db.produtos
# In[3]:
produtos
# * Biblioteca que vamos utilizar pra abrir o modelo pra realizar a predicao das futuras vendas
# In[4]:
import joblib
# * Logs para acompanhar o sistema
# In[5]:
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# * Excções personalizadas do sistema
# In[6]:
class ModeloZoado(Exception):
'''
Excecao estourada quando não se consegue importar o
modelo para prever as vendas.
'''
def __init__(self):
super().__init__("Seu modelo por algum motivo não pode ser importado!")
# In[7]:
class Generica(Exception):
'''
Excecao estourada em casos genericos nao previstos pelo sistema.
'''
def __init__(self):
super().__init__("Ocorreu um erro, estamos verificando!")
# * Classe generica para instanciar os produtos
# In[8]:
class Produto:
'''
Instância um produto a ser trabalhado no sistema de estoque.
'''
def __init__(self, _id: int, nome: str, lugar: str, tipo_pagamento: str, preco_frete: float,
preco_produto: float, quantidade: int):
self._id = _id
self._nome = nome
self._lugar = lugar
self._tipo_pagamento = tipo_pagamento
self._preco_frete = preco_frete
self._preco_produto = preco_produto
self._quantidade = quantidade
@property
def nome(self) -> str:
''' Retorna o nome do produto. '''
return self._nome
@nome.setter
def nome(self, new_name: str):
'''
Altera o nome do produto
:new_name: novo nome
'''
self._nome = new_name
@property
def lugar(self) -> str:
''' Retorna o lugar onde o produto será colocado. '''
return self._lugar
@lugar.setter
def lugar(self, new_lugar: str):
'''
Altera o lugar do produto
:new_lugar: novo lugar
'''
self._lugar = new_lugar
@property
def tipo_pagamento(self) -> str:
''' Retorna o tipo de pagamento aceito por aquele produto. '''
return self._tipo_pagamento
@tipo_pagamento.setter
def tipo_pagamento(self, new_pag: str):
'''
Altera o tipo de pagamento do produto.
:new_pag: novo pagamento
'''
self._tipo_pagamento = new_pag
@property
def preco_frete(self) -> float:
''' Retorna o preco do frete daquele produto. '''
return self._preco_frete
@preco_frete.setter
def preco_frete(self, new_frete: float):
'''
Altera o preco de frete do produto.
:new_frete: novo frete
'''
self._preco_frete = new_frete
@property
def preco_produto(self) -> float:
''' Retorna o preco do produto. '''
return self._preco_produto
@preco_produto.setter
def preco_produto(self, new_preco: float):
'''
Altera o preco do produto.
:new_preco: novo preco
'''
self._preco_produto = new_preco
@property
def quantidade(self) -> int:
''' Retorna a quantidade daquele produto. '''
return self._quantidade
@quantidade.setter
def quantidade(self, new_qtd: int):
'''
Altera a quantidade do produto.
:new_qtd: nova quantidade
'''
self._quantidade = new_qtd
def _decorator(foo):
''' Decorador para a funcao de predict. '''
def mostra(self) :
print("Siga Rexa compras nas redes sociais e não perca as melhores ofertas!")
foo(self)
return mostra
@_decorator
def predict(self) -> str:
'''
Realiza predição de venda baseado na quantidade do produto e no preco deles.
O modelo foi treinado utilizando o preco do produto e a quantidade em estoque para
prever a venda deste produto.
'''
try:
logging.info('Iniciando a predicao')
modelo = joblib.load('estoque.joblib')
except FileNotFoundError as error:
logging.warning('Excecao estourada')
mensagem = "Não pudemos finalizar sua compra :( mas já estamos verificando!"
raise ModeloZoado
except Exception as error:
logging.warning('Excecao estourada')
mensagem = "Não pudemos finalizar sua compra :( mas vamos verificar o que ocorreu!"
raise Generico
else:
estoque = self._quantidade
preco = self._preco_produto
input_ = [[estoque, preco]]
venda = modelo.predict(input_)[0]
logging.info('Predicao finalizada')
mensagem = f"A previsão de vendas do produto {self._nome} é de {int(venda)} unidades, então corra já e garanta a sua!"
finally:
logging.info('Processo finalizado')
print("Obrigada por comprar conosco")
print(mensagem)
return mensagem
def __str__(self) -> str:
return "O produto foi instanciado"
# In[9]:
tv = Produto(3, 'tv da sony', 'Setor A', 'Boleto', 19.0, 10.8, 2)
# In[10]:
print(tv)
# In[11]:
tv._id, tv.nome, tv.lugar, tv.tipo_pagamento, tv.preco_frete, tv.preco_produto, tv.quantidade
# In[12]:
tv.predict()
# In[13]:
class Sistema():
''' Instancia um sistema de gencia de estoque. '''
def __init__(self, nome_sistema: str):
self._nome_sistema = nome_sistema
@property
def nome_sistema(self) -> str:
''' Retorna o nome do sistema. '''
return self._nome_sistema
@nome_sistema.setter
def nome_sistema(self, new_nome: str):
'''
Altera o nome do sistema.
:new_name: novo nome
'''
self._nome_sistema = new_nome
def coloca_produto(self, prod: Produto) -> str:
'''
Insere o produto no banco de dados.
Params
:prod: Produto a ser inserido
'''
produto = produtos.insert_one(prod.__dict__).inserted_id
logging.info('Produto inserido no banco')
return f'Produto: {prod._nome} inserido!'
def mostra_prod(self, prod: Produto) -> dict:
'''
Mostra o produto no banco de dados.
Params
:prod: Produto a ser mostrado
'''
logging.info('Mostrando dados do banco')
return produtos.find_one({"_id": prod._id})
def retira_produto(self, prod: Produto):
'''
Retira o produto no banco de dados.
Params
:prod: Produto a ser retirado
'''
produtos.delete_one({"_id": prod._id})
logging.info('Produto excluido no banco')
def atualiza_produto(self, prod: Produto, attr, new_attr):
'''
Atualiza o produto no banco de dados.
Params
:prod: Produto a ser atualizado
'''
produtos.update_one({'_id': prod._id}, {'$set': {attr: new_attr}})
logging.info('Produto atualizado')
@staticmethod
def mostra_todos() -> dict:
''' Mostra os produtos no banco de dados. '''
logging.info('Mostrando dados do banco')
return produtos.find()
def __str__(self) -> str:
return "O sistema foi instanciado"
# In[14]:
sis = Sistema('sistemoso')
# In[15]:
print(sis)
# In[16]:
sis.nome_sistema
# In[17]:
sis.coloca_produto(tv)
# In[18]:
sis.mostra_prod(tv)
# In[19]:
sis.atualiza_produto(tv, '_nome', 'Tevelisao')
# In[20]:
sis.mostra_prod(tv)
# In[21]:
sis.retira_produto(tv)
# In[22]:
sis.mostra_todos()
# In[ ]:
| true
|
f4dd417072d632fe028beef1070a1551aa29ddad
|
Python
|
MCRogersI/Panorama
|
/Users/features.py
|
UTF-8
| 3,860
| 3.1875
| 3
|
[] |
no_license
|
from pony.orm import *
#Luego deberíamos cambiar la importación de pony para que no se importen todas las cosas con * (mala práctica).
import os
import hashlib
import pandas
from tabulate import tabulate
def createUser(db,name, level,password):
''' Este método crea una nueva entrada en la tabla de Usuarios de la base de datos
Si es que en el futuro un administrador (usuario con level = 1) desea crear un nuevo usuario,
este puede crearlo con una contraseña por defecto (0000 por ejemplo como para los codigos PIN de las tarjetas SIM)
y notificar al usuario respectivo para que él cambie su contraseña'''
salt,hashed_password = createSaltHash(password)
with db_session:
u = db.Users(user_name = name, user_level = level,salt = salt, hashed_password = hashed_password)
commit()
#falta un editUser o no?
def editUserLevel(db, name, new_level, password):
with db_session:
if checkPassEntry(db, name, password):
u = db.Users.get(user_name = name)
u.user_level = new_level
print('\n Usuario editado con éxito.')
else:
print('\n Usuario o contraseña incorrectos.')
commit()
def deleteUser(db,name):
with db_session:
db.Users.get(user_name = name).delete()
def printUsers(db):
with db_session:
print('')
Users = db.Users.select()
data = [u.to_dict() for u in Users]
df = pandas.DataFrame(data, columns = ['user_name','user_level'])
df.columns = ['RUT de usuario', 'Nivel de usuario']
print( tabulate(df, headers='keys', tablefmt='psql'))
def checkPassEntry(db,name_request, password):
''' Este método revisa que los datos ingresados para el sign-up sean correctos
Retorna True si lo son y False en el caso contrario '''
with db_session:
user = db.Users.get(user_name = name_request)
if user == None:
return False
else:
if hashComparison(password,user.salt,user.hashed_password):
return True
else:
return False
def getUserLevel(db,user_name):
''' Este método entrega el nivel del usuario correspondiente al nombre de usuario entregado.'''
with db_session:
user = db.Users.get(user_name = user_name)
return user.user_level
def createSaltHash(password):
''' Este método produce un salt y luego una 'hashed password' a partir de la contraseña ingresa.
Retorna ambos valores en una tupla '''
salt = os.urandom(64)
#Es muy importante notar que aquí no se verifica si la salt ya fue usada para el hash de otro usuario
#Lo ideal es que los salt sean únicos, pero dado el tamaño de usuarios de la empresa, la probabilidad de que una salt se repita
#es muy baja (y aunque se repitiera no debería ser un problema).
#Decidimos hacerlo así por simplicidad
encoded_pass = password.encode('utf-8')
hashed_password = hashlib.sha256(salt + encoded_pass).digest()
return (salt, hashed_password)
def changePassword(db,user_name, password):
''' Este método modifica la contraseña del usuario correspondiente '''
with db_session:
user = db.Users.get(user_name = user_name)
salt, hashed_password = createSaltHash(password)
user.salt = salt
user.hashed_password = hashed_password
def hashComparison(password,salt,hashed_password):
''' Este método verifica si la contraseña entrega (sometida al algoritmo de hash) produce el valor esperado para el hashed_pass
Retorna True si el valor coincide y False si no'''
encoded_pass = password.encode('utf-8')
auxiliar_hashed_password = hashlib.sha256(salt + encoded_pass).digest()
if auxiliar_hashed_password == hashed_password:
return True
else:
return False
| true
|
0bcaf54b4debd112f858f07d8bba80b0ee79c2ad
|
Python
|
syool/sketchbook
|
/Recurrenet/cellkit.py
|
UTF-8
| 4,130
| 3.09375
| 3
|
[] |
no_license
|
# cellkit :: recurrent cells module #
# Based on Numpy #
# Austin Hyeon, 2020. "Sociology meets Computer Science" #
import numpy as np
from funckit import *
class VanillaCell:
''' the vanilla cell '''
def __init__(self, Wx, Wh, b):
self.params = [Wx, Wh, b]
self.grads = [np.zeros_like(Wh), np.zeros_like(Wx), np.zeros_like(b)]
self.cache = None
def forward(self, x, h_prev):
Wx, Wh, b = self.params
t = np.matmul(h_prev, Wh) + np.matmul(x, Wx) + b
h_next = np.tanh(t)
self.cache = (x, h_prev, h_next) # tuple: the unamendable list
return h_next
def backward(self, dh_next):
Wx, Wh, b = self.params
x, h_prev, h_next = self.cache
dt = dh_next * (1 - h_next ** 2) # derivative of t; h before tanh()
db = np.sum(dt, axis=0) # derivative of bias
dWh = np.matmul(h_prev.T, dt) # derivative of Wh
dh_prev = np.matmul(dt, Wh.T) # derivative of h_prev
dWx = np.matmul(x.T, dt) # derivative of Wx
dx = np.matmul(dt, Wx.T) # derivative of x
self.grads[0][...] = dWx # input dWx into grads[0][0] to [0][n]
self.grads[1][...] = dWh # input dWh into grads[1][0] to [1][n]
self.grads[2][...] = db # input db into grads[2][0] to [2][n]
# grads = 3 X n matrix
return dx, dh_prev
class LSTMCell:
''' the long short-term memory cell '''
def __init__(self, Wx, Wh, b):
self.params = [Wx, Wh, b]
self.grads = [np.zeros_like(Wh), np.zeros_like(Wx), np.zeros_like(b)]
self.cache = None
def forward(self, x, h_prev, c_prev):
Wx, Wh, b = self.params
N, H = h_prev.shape
f = np.matmul(x, Wx) + np.matmul(h_prev, Wh) + b
''' slice 'ƒ' into four pieces for each gate '''
f1 = f[:, :H]
f2 = f[:, H:2*H]
f3 = f[:, 2*H:3*H]
f4 = f[:, 3*H:]
''' set gates '''
sig1 = sigmoid(f1) # sigmoid dam 1
tanh_f = np.tanh(f2) # tanh(ƒ) dam
sig2 = sigmoid(f3) # sigmoid dam 2
sig3 = sigmoid(f4) # sigmoid dam 3
''' set the flow '''
c_next = (c_prev * sig1) + (tanh_f * sig2)
h_next = np.tanh(c_next) * sig3
# cache for bptt
self.cache = (x, h_prev, c_prev, sig1, tanh_f, sig2, sig3, c_next)
return h_next, c_next
def backward(self, dh_next, dc_next):
Wx, Wh, b = self.params
x, h_prev, c_prev, sig1, tanh_f, sig2, sig3, c_next = self.cache
tanh_c_next = np.tanh(c_next)
ds = dc_next + (dh_next * sig3) * (1 - tanh_c_next ** 2)
dc_prev = ds * sig1
d_sig1 = ds * c_prev # derivative of sigmoid dam 1
d_tanh_f = ds * sig2 # derivative of tanh(ƒ) dam
d_sig2 = ds * tanh_f # derivative of sigmoid dam 2
d_sig3 = dh_next * tanh_c_next # derivative of sigmoid dam3
d_sig1 *= sig1 * (1 - sig1)
d_tanh_f *= (1 - tanh_f ** 2)
d_sig2 *= sig2 * (1 - sig2)
d_sig3 *= sig3 * (1 - sig3)
df = np.hstack((d_sig1, d_tanh_f, d_sig2, d_sig3)) # merge all derivatives(= gradients) of each gate
''' distribute dƒ '''
dWh = np.dot(h_prev.T, df) # derivative of Wh
dWx = np.dot(x.T, df) # derivative of Wx
db = df.sum(axis=0) # derivative of b
''' record gradients '''
self.grads[0][...] = dWx
self.grads[1][...] = dWh
self.grads[2][...] = db
dx = np.dot(df, Wx.T)
dh_prev = np.dot(df, Wh.T)
return dx, dh_prev, dc_prev
class GRUCell:
''' the gated recurrent unit cell '''
def __init__(self):
None
class EmbeddingCell:
''' the embedding recurrent cell '''
def __init__(self, EM):
self.params = [EM]
self.grads = [np.zeros_like(EM)]
self.idx = None
def forward(self, idx):
EM, = self.params
self.idx = idx
out = EM[idx]
return out
def backward(self, dstream):
dEM, = self.grads
dEM[...] = 0
np.add.at(dEM, self.idx, dstream)
return None
| true
|
a352abcc3291a02b1c3791b4a7e7f020d9170d59
|
Python
|
shi-kejian/nyu
|
/csuy-1114/lab/2/lab2.py
|
UTF-8
| 2,164
| 4.25
| 4
|
[] |
no_license
|
from math import *
from turtle import *
from datetime import *
def kilo_pound():
kilo = int(input('Please put in the weight in kilograms: '))
KILO_POUND = 2.2046
POUND_OUNCE = 16
pounds_total = kilo * KILO_POUND
pounds = str(pounds_total).split('.')[0]
ounces_float = '.' + str(pounds_total).split('.')[1]
ounces = float(ounces_float) * 16
print('The answer is %s pounds and %s ounces' % (pounds, ounces))
def circular():
radius = input('Please put in the radius of the circle: ')
area = (int(radius) ** 2) * pi
circumference = int(radius) * 2 * pi
print('The area of the circle is %s and the circumeference is %s' % (area, circumference))
def draw_hexagon():
setup(500,500)
pendown()
for x in range(0, 6):
forward(50)
right(60)
penup()
def get_dob():
dob = input('Please input your date of birth (YYYYMMDD): ')
dob_year = int(dob[0:4:])
dob_month = int(dob[4:6:])
dob_day = int(dob[6::])
dob_dateobject = date(dob_year, dob_month, dob_day)
today_datetime = datetime.today()
today_list = str(today_datetime).split(' ')[0].split('-')
today_dateobject = date(int(today_list[0]), int(today_list[1]), int(today_list[2]))
print('Today\'s date is %s' % today_dateobject)
yes_no = input('Is that correct? (yes/no) \n')
if yes_no == 'yes':
days_difobject = str(today_dateobject - dob_dateobject)
days_dif = int(days_difobject.split(' ')[0])
years_between = days_dif // 365
months_between = (days_dif % 365) // 30
days_between = (days_dif % 365) % 30
print('You are %s years %s months and %s days old' % (years_between, months_between, days_between))
else:
get_dob()
def add_length():
first_feet = input('Feet of the first length: ')
first_inch = input('Inches of the first length: ')
second_feet = input('Feet of the second length: ')
second_inch = input('Inches of the second length: ')
first_total = int(first_feet) * 12 + int(first_inch)
second_total = int(second_feet) * 12 + int(second_inch)
total = first_total + second_total
print('The total is %s feet and %s inches' % (total//12,total%12))
| true
|
90629a407ca0f504f4451dd73cd0b9271f53a200
|
Python
|
alancleetus/Linux-Task-Manager
|
/processStats.py
|
UTF-8
| 7,944
| 2.546875
| 3
|
[] |
no_license
|
import os
import re
import pwd
import time
import json
import subprocess
from stat import *
from process import Process
from helperFunctions import readFile, round2, BasicCounter
processDict = {}
inodeDict = {}
sysWideCpuTime = 0
vMemTotal = None
phyMemTotal = None
pageSize = None
def setSysWideCpuTime(time):
global sysWideCpuTime
sysWideCpuTime = time
def setPhyMemTotal(valInMB):
"""
This function calculates the number of pages in the physical memory of the system.
The size of physical memory is in MB, but we convert it to byte then to pages because the rss value is given in pages.
Note: This calculation is only done once because of the if statement.
Parameters:
valInMB: actual size of physical memory in MB. This value is passed in from the taskManager.py module.
Returns:
int: total number of physical memory pages
"""
global phyMemTotal
try:
if phyMemTotal ==None:
phyMemTotal = (int(valInMB)*1024*1024)/int(pageSize) #convert to bytes then to total number of pages
#print("Phy memtotal = ", phyMemTotal)
except:
pass
return phyMemTotal
def getPageSize():
global pageSize
if pageSize:
return pageSize
else:
pageSize = subprocess.check_output(["getconf","PAGE_SIZE"]).decode("utf-8")
return (int)(pageSize)
getPageSize()
def calculateVMemTotal():
"""
This function calculates the max virtual memory size of the system.
Note: This calculation is only done once because of the if statement.
Returns:
int: total size of virutal memory in bytes
"""
global vMemTotal
try:
if vMemTotal ==None:
architecture = subprocess.check_output(["uname","-m"]).decode("utf-8")
if "64" in architecture:
vMemTotal = 2**64
else:
vMemTotal = 2**32
#print(vMemTotal)
except:
print("Error: getting vmem total")
return vMemTotal
calculateVMemTotal()
def getAllPids():
"""
This function gets all the pids from the /proc subdirectory.
Returns:
set: a set of all pids currently runnning on the system
"""
try:
return set(filter(lambda dir: str.isdigit(dir), os.listdir("/proc")))
except:
print("Error: Unable to get all PIDs")
def getInodeDict():
global inodeDict
return inodeDict
def parseInodeNumber(pid):
inodeVal = ""
try:
path = "/proc/"+pid+"/fd"
inner = os.listdir(path)
for fileName in inner:
if str.isdigit(fileName):
innerpath="/proc/"+pid+"/fd/"+fileName
if S_ISSOCK(os.stat(innerpath).st_mode):
global inodeDict
inodeDict[str(os.stat(innerpath).st_ino)] = pid
inodeVal = str(os.stat(innerpath).st_ino)
except:
#print("No inode number found for PID:", pid)
return inodeVal
return inodeVal
def parseInfo(pid, statFile, statusFile, readTime):
try:
try:
statFile = statFile.split()
"""
bug: if program name = (Web content)
split() will split break it into two parts
"""
if statFile[1][-1] != ')':
statFile[1] = statFile[1]+" "+statFile[2]
statFile.pop(2)
name = statFile[1][1:-1]
userMode = statFile[13]
sysMode = statFile[14]
vmem = statFile[22]
rss = statFile[23]
except:
print("Error: parsing /proc/{}/stat file".format(pid))
return None
userName= ""
try:
uid = re.findall(r'Uid:\s+\d+\s',statusFile)[0].split()[1]
userName = pwd.getpwuid(int(uid)).pw_name
except:
print("Error: trying to get userName")
inodeNumber = parseInodeNumber(pid)
if pid in processDict:
processDict[pid].updateAll(name, userName, inodeNumber, userMode, sysMode, vmem, rss, readTime)
return processDict[pid]
else:
temp_process = Process(pid)
temp_process.updateAll(name, userName, inodeNumber, userMode, sysMode, vmem, rss, readTime)
return temp_process
except:
print("Error: process parsing information")
return None
def removeProc(pidSet):
"""
This function removes all processes from processDict if the process is complete.
For each process, check if pid is in pidSet(passed in from fetchAll()), if not remove that process from dict.
Parameters:
pidSet: set of pid's that are currenlty running
"""
try:
global processDict
pids = processDict.keys()
processDict = {k: v for k, v in processDict.items() if k in pidSet}
except:
print("Error removing process from processDict")
def fetchAll():
"""
This function reads sends the information to parseInfo() to be processed.
This function first get all the pid currently running in the system. Then this function passes the contents of /proc/pid/stat and /status files to parseInfo() to be processed. This is done for every single process in the system.
Returns:
dictionary: A dictionary holding information for each currently active process.
"""
pidSet = getAllPids()
removeProc(pidSet)
try:
global processDict
global inodeDict
for pid in pidSet:
if os.path.exists("/proc/"+pid):
readTime = time.time()
statFile = readFile("/proc/"+pid+"/stat")
statusFile = readFile("/proc/"+pid+"/status")
temp_process = parseInfo(pid, statFile, statusFile, readTime)
#print(temp_process)
if temp_process:
processDict[pid]=temp_process
return processDict
except:
print("Error: FetchAll error in processStat.py")
return {}
def toJSON():
global sysWideCpuTime
global phyMemTotal
global vMemTotal
dataList = []
for pid, eachProcess in fetchAll().items():
cpu = eachProcess.calculateCpuUtilization(sysWideCpuTime)
data = {}
data['pid'] = eachProcess.pid
data['name'] = eachProcess.name
data['userName'] = eachProcess.userName
data['inodeNumber'] = eachProcess.inodeNumber
data['userMode'] = cpu["userMode"]
data['sysMode'] = cpu["sysMode"]
data['total'] = cpu["total"]
data['vMemUtil'] = eachProcess.calculateVMemUtil(vMemTotal)
data['vMemAvg'] = eachProcess.calculateVMemAvg()
data['phyMemUtil'] = eachProcess.calculatePhyMemUtil(phyMemTotal)
dataList.append(data)
json_data = json.dumps(dataList)
return json_data
def printAll():
global sysWideCpuTime
global phyMemTotal
global vMemTotal
print("\n|{:>6}|{:>15}|{:>15}|{:>15}|{:>15}|{:>15}|{:>15}|{:>15}|{:>15}|".format(
"pid", "Program", "UserName", "Inode Number", "User Util", "Sys Util", "Total Util", "VMem Avg", "Phy Mem Util"))
print("|{:>6}|{:>15}|{:>15}|{:>15}|{:>15}|{:>15}|{:>15}|{:>15}|{:>15}|".format("","","", "", "", "", "", "", ""))
for pid, eachProcess in fetchAll().items():
cpu = eachProcess.calculateCpuUtilization(sysWideCpuTime)
print("|{:>6}|{:>15}|{:>15}|{:>15}|{:>15}|{:>15}|{:>15}|{:>15}|{:>15}|".format(
eachProcess.pid,
eachProcess.name,
eachProcess.userName,
eachProcess.inodeNumber,
cpu["userMode"],
cpu["sysMode"],
cpu["total"],
eachProcess.calculateVMemAvg(),
eachProcess.calculatePhyMemUtil(phyMemTotal)))
#print(sysWideCpuTime)
| true
|
c1922f6d97050d45ad1e8db7c9fec4babfa226ba
|
Python
|
jorgeOmurillo/Python
|
/intro/control_structures.py
|
UTF-8
| 448
| 3.6875
| 4
|
[] |
no_license
|
import math
wordlist = ['conejo', 'perro', 'raton', 'gato']
letterlist = []
count =0
contar = 0
for aword in wordlist:
count += 1
for aletter in aword:
contar +=1
letterlist.append(aletter)
print letterlist
print count
print contar
n = 2
if n<0:
print "Sorry"
else:
print math.sqrt(n)
sqlist = []
for x in range(1,11):
sqlist.append(x*x)
print sqlist
newlist = [x*3 for x in range(1,11)]
print newlist
| true
|
37f57c64d81060a5ee82aeec2e304f9a78c68697
|
Python
|
lilianluong16/cogworks_team4
|
/Face_Rec_Package/Face_Rec/__init__.py
|
UTF-8
| 13,979
| 2.59375
| 3
|
[] |
no_license
|
from os import path, makedirs
from pathlib import Path
from camera import take_picture
import os
import pickle
import numpy as np
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import skimage.io as io
import dlib_models
from dlib_models import load_dlib_models
from dlib_models import models
import numpy as np
from camera import save_camera_config
import cloudinary
import cloudinary.uploader
import cloudinary.api
load_dlib_models()
save_camera_config(port=1, exposure=0.7)
_path = Path(path.dirname(path.abspath(__file__)))
__all__ = ['get_img_from_camera', 'get_img_from_file', 'display_img', 'find_faces', 'find_descriptors',
'describe', 'calc_dist', 'is_face', 'identify_face', 'compare_faces', 'new_database', 'retrieve_database',
'write_database', 'add_image', 'initialize', 'clear_database' , 'del_person', 'identify', 'draw_faces', 'go', 'go_friend', 'add_file']
# uncomment for the first time running on a new machine
"""from dlib_models import download_model, download_predictor
download_model()
download_predictor()"""
# TO CHANGE DEFAULT DATA FILE, CHANGE STRING BELOW
DATABASE_FR = "data/facial_features.txt"
# creates data file if it doesn't exist
if not os.path.exists(DATABASE_FR):
os.makedirs('/'.join(str.partition(DATABASE_FR, "/")[:-1]))
with open(DATABASE_FR, "w+"):
pass
db = {}
def get_img_from_camera():
"""
Gets an image numpy array from the default camera
Parameters:
-----------
None
Returns:
--------
img (numpy array):
the (H,W,3) rgb values of the image
"""
img_array = take_picture()
return img_array
def get_img_from_file(filepath):
"""
Gets an image numpy array from the default camera
Parameters:
-----------
the string file path of the picture
Returns:
--------
img (numpy array):
the (H,W,3) rgb values of the image
"""
img_array = io.imread(filepath)
return img_array
def display_img(img_array):
"""
For testing. Shows the image based on it's numpy array
Parameters:
-----------
None
Returns:
--------
None; shows the image
"""
fig, ax = plt.subplots()
ax.imshow(img_array)
def find_faces(img_array):
"""
Finds all faces in an image
Parameters:
-----------
img_array (numpy array):
the array (H,W,3) of rgb values for the image
Returns:
--------
detections (list):
each element has the corners of the bounding box for that detected face
"""
face_detect = models["face detect"]
# Number of times to upscale image before detecting faces.
# When would you want to increase this number?
upscale = 1
detections = face_detect(img_array, upscale) # returns sequence of face-detections
detections = list(detections)
if len(detections) > 0:
det = detections[0] # first detected face in image
# bounding box dimensions for detection
l, r, t, b = det.left(), det.right(), det.top(), det.bottom()
return detections
def find_descriptors(img_array, detections):
"""
Provides descriptors of the faces bounded by the detection boxes in the img array
Parameters:
-----------
img_array (numpy array):
the array (H,W,3) of rgb values for the image
detections (list):
each element has the corners of the bounding box for that detected face
Returns:
--------
descriptors (list of numpy arrays):
a list of descriptors for each face in the image (has shape (128,))
"""
descriptors = []
for det in detections:
shape_predictor = models["shape predict"]
shape = shape_predictor(img_array, det)
face_rec_model = models["face rec"]
descriptor = np.array(face_rec_model.compute_face_descriptor(img_array, shape))
descriptors.append(descriptor)
return descriptors
def describe():
"""
Takes a picture and finds the descriptors of each face in it
Parameters:
-----------
None; will use configured camera
Returns:
--------
descriptors (list of numpy arrays):
a list of descriptors for each face in the image (has shape (128,))
"""
img = get_img_from_camera()
rects = find_faces(img)
descriptors = find_descriptors(img, rects)
return descriptors
def calc_dist(test, data):
"""
Calculates the L2 distance between two feature vectors.
Parameters
----------
test: numpy array, shape (N,)
data: numpy array, shape (N,)
Returns
-------
float
"""
return np.sqrt(np.sum((test - data) ** 2))
def is_face(test_desc, profile_mean, threshold):
"""
Determines whether or not a descriptor is close enough to a face,
returning False if the L2 distance is greater than the threshold.
Parameters
----------
test_desc: numpy array, shape (N,)
The descriptor of the unknown face being tested.
profile_mean: numpy array, shape (N,)
The mean of descriptors for the profile being tested.
threshold: numerical value (int, float)
The maximum L2 distance accepted as a match.
Returns
-------
float, if L2 distance is less than the threshold
None, otherwise
"""
l2d = calc_dist(test_desc, profile_mean)
if l2d < threshold:
return l2d
return None
def identify_face(desc, database, threshold=0.5, face_thres=0):
"""
Compares a test descriptor to all faces in a database and determines the best match, if any.
Parameters
----------
desc: numpy array, shape (N,)
The descriptor of the unknown face being tested.
database: dictionary
The database containing name keys and a list of descriptor vectors as well as the mean.
threshold: numerical value (int, float)
The maximum L2 distance accepted as a face match.
face_thres: numerical value (int, float)
The minimum distance between the top two matches to count a match.
Returns
-------
string, representing the name/key if a match is found
None, otherwise
"""
matches = []
for key, data in db.items():
i_f = is_face(desc, data[1], threshold)
if i_f is not None:
matches.append((key, i_f))
if len(matches) == 0:
return None
if len(matches) == 1:
return matches[0][0]
matches = sorted(matches, key=lambda x: x[1])
if matches[1][1] - matches[0][1] > face_thres:
return matches[0][0]
return None
def compare_faces(descriptors, database, threshold=0.45):
"""
Compares each face with the database and returns a list of detected people.
Parameters
----------
descriptors: list of numpy arrays
List of descriptor vectors corresponding to the features of each face.
database: dictionary
The database containing name keys and a list of descriptor vectors as well as the mean.
Returns
-------
list of strings, or None if match not found for that unit
"""
people = []
for d in descriptors:
result = identify_face(d, database, threshold=threshold)
people.append(result)
return people
def new_database(filepath=DATABASE_FR):
"""
Creates a new text file and folder in the filepath; uses
If creating additional filepaths, specify it in the filepath variable
in all functions with the filepath kwarg
"""
if not os.path.exists(filepath):
os.makedirs(str.partition(filepath, "/")[0])
with open(filepath, "w+"):
pass
def retrieve_database():
global db
with open(DATABASE_FR, "rb") as f:
db = pickle.load(f)
return db
def write_database(self, filepath=DATABASE_FR):
"""
Simple function that writes to the Database
"""
with open(filepath, "wb") as f:
global db
pickle.dump(db, f)
# Add image to database
def add_image(descriptor, name=None):
"""
Assigns a descritpor to a name depending on whether the name is already in the Database or not.
Parameters
----------
descriptor: numpy.array, shape (128,)
The descriptor of the face whose image is to be added to the Database
name= string
If available, the name of the face is passed to added the corresponding descriptor to the Database
Returns
-------
Nothing. The purpose of this function is to associate the incoming descriptor with the right name (if present)
or to ask the user to input a new name and associate it with the incoming descriptor
"""
global db
if name != None:
old_descriptor_list = list(db.get(name))[0]
old_descriptor_list.append(descriptor)
new_list = old_descriptor_list
num_descriptors = len(new_list)
temp_arr = np.array(new_list)
new_mean = np.sum(temp_arr) / num_descriptors
db[name] = [new_list, new_mean]
if name == None:
the_name = input("Please enter your name: ")
if the_name in db:
add_image(descriptor, name=the_name)
else:
db[the_name] = [[descriptor], descriptor]
def clear_database(password):
"""
Clears everything in the database given the incoming parameter 'password'
"""
if password.lower() == "yes i am sure":
if input("Are you very sure?").lower() == "y":
global db
db = {}
# Start
def initialize():
"""
Initializes the Database
"""
cloudinary.config(
cloud_name="luong44976",
api_key="165891819185365",
api_secret="p2ib0QA6Rl2nK8CNxlBFQeJmoaM"
)
global db
db = retrieve_database()
def del_person(name):
"""
Deletes a person and their descriptors and mean from the Database.
Parameters
----------
name= string
The name of the individual whose descriptors are to be deleted from the Database
Returns
-------
Nothing. The incoming name parameter is simply deleted, along with its accompanying descriptor(s) and mean
"""
del db[name]
def identify(save=True, force_input=False, from_file=False):
"""
Takes a picture with configured camera and identifies all of the faces in the picture
Parameters:
-----------
save (boolean):
whether or not to add the captured image to the database
from_file(boolean):
whether or not expect a filename instead of taking a picture
Returns:
--------
names (list)
the list of the name of each person in the picture
"""
if not from_file:
img = get_img_from_camera()
dets = find_faces(img)
descs = find_descriptors(img, dets)
else:
filepath = input('Please enter the location (filepath) of the image: ')
img = get_img_from_file(filepath)
dets = find_faces(img)
descs = find_descriptors(img, dets)
names = compare_faces(descs, db, threshold=0.4)
if save:
if len(descs) > 1:
print("Cannot add multiple people at once.")
elif len(descs) < 1:
print("There's no one there!")
else:
if force_input:
add_image(descs[0])
else:
add_image(descs[0], name=names[0])
draw_faces(dets, names, img)
return names
# In[4]:
def draw_faces(detections, people, img):
"""
Draws bounding boxes over image, and labels them with people.
Parameters
----------
detections: list of rectangles
List of bounding box rectangles corresponding to the position of each detected face.
people: list of strings
List of the keys/names of people as found by compare_faces(), or None if no match is found.
img: numpy array, shape (480, 640, 3)
The array representing the image.
Returns:
--------
None
"""
fig, ax = plt.subplots()
ax.imshow(img)
for i in range(len(detections)):
d = detections[i]
rect = patches.Rectangle((d.left(), d.top()), d.width(), d.height(), fill=False, linewidth=1.2, color='#57FF36')
ax.add_patch(rect)
if people[i] is not None:
ax.text(d.left() + 8, d.top() + d.height() + 15, people[i], backgroundcolor='#57FF36', fontsize='5', color='black', weight='bold')
# plt.show()
plt.savefig('static/img.png')
return cloudinary.uploader.upload('static/img.png')['secure_url']
def go():
"""
Takes a picture from the configured camera and displays the image with recognized faces and labels
Parameters:
-----------
None
Returns:
--------
compared: list of strings
Names of everyone found in photo.
img: numpy array
The image itself.
url: string
URL of location for img file
descs: list of numpy arrays
Face descriptors.
"""
img = get_img_from_camera()
dets = find_faces(img)
descs = find_descriptors(img, dets)
compared = compare_faces(descs, db)
url = draw_faces(dets, compared, img)
return compared, img, url, descs
def go_friend():
"""
Takes a picture from the configured camera and returns descs, names
Parameters:
-----------
None
Returns:
--------
compared: list of strings
Names of everyone found in photo.
descs: list of numpy arrays
Face descriptors.
"""
img = get_img_from_camera()
dets = find_faces(img)
descs = find_descriptors(img, dets)
compared = compare_faces(descs, db)
return compared, descs
def add_file(filepath):
"""
Adds a person to the database given a picture of their face
Will ask for their name
Parameters
----------
filepath (string):
the location of the file that is the picture of the person's face
Returns:
--------
None
"""
img = get_img_from_file(filepath)
det = find_faces(img)
descriptor = find_descriptors(img, det)
add_image(descriptor)
| true
|
caec9dc5aef8d0e7b90c14f7279598cbf41b4316
|
Python
|
StevenWang30/tx2_sync
|
/gpio_sync.py
|
UTF-8
| 1,053
| 2.734375
| 3
|
[] |
no_license
|
import Jetson.GPIO as GPIO
import time as time #引用需要用的库
lidar_trigger = 11
camera_trigger = 13
GPIO.setmode(GPIO.BOARD)
# GPIO.setup(lidar_trigger, GPIO.OUT)
# GPIO.setup(camera_trigger, GPIO.OUT)
#
# trig = 0
#
# try:
# while (True):
# if trig % 10 == 0:
# GPIO.output(lidar_trigger, GPIO.HIGH)
# GPIO.output(camera_trigger, GPIO.HIGH)
# time.sleep(0.1)
#
# GPIO.output(lidar_trigger, GPIO.LOW)
# GPIO.output(camera_trigger, GPIO.LOW)
#
# trig += 1
# except KeyboardInterrupt:
# GPIO.cleanup()
#
GPIO.setup(lidar_trigger, GPIO.OUT, initial=GPIO.HIGH)
l_t = GPIO.PWM(lidar_trigger, 50) # 50Hz
l_t.start(25) # 25% duty cycle
GPIO.setup(camera_trigger, GPIO.OUT, initial=GPIO.HIGH)
c_t = GPIO.PWM(camera_trigger, 50) # 50Hz
c_t.start(25) # 25% duty cycle
print("PWM running. Press CTRL+C to exit.")
try:
while True:
# p.ChangeDutyCycle(dc) # where 0.0 <= dc <= 100.0
time.sleep(1)
finally:
l_t.stop()
c_t.stop()
GPIO.cleanup()
| true
|
b658e9e83b22b571a34b10002b6b637f2f852387
|
Python
|
Anusha-A-R/code-library
|
/codechefmay1.py
|
UTF-8
| 142
| 2.984375
| 3
|
[] |
no_license
|
# cook your dish here
t=int(input())
for i in range(t):
x,a,b=list(map(int,input().split(" ")))
ans=a+(100-x)*b
print(ans*10)
| true
|
f14ec513d81e84b7c54e077bc849d2aa64753429
|
Python
|
ruthvik4215/openCV
|
/scripts/face_detection.py
|
UTF-8
| 1,435
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
import cv2
import random
from random import randrange
# loading the trained data set from the opencv.
trained_face_data = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# getting live feed form the default webcam or any other video processing programs in your system .
default_v_p = cv2.VideoCapture(0)
# continues the live feed video for ever until we shut it down.
while True:
# reading the frames from the live feed.
successful_frame_read, frame = default_v_p.read()
# changing the color for the live feed video to enable the facial detection.
gray_scale_video = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# pointing out the coordinate location for face in the image for drawing a rectangle around it.
face_coordinates = trained_face_data.detectMultiScale(gray_scale_video)
# looping through multiple human faces in a single frame.
for (x, y, w, h) in face_coordinates:
# drawing a rectangle around a face
cv2.rectangle(frame, (x, y), (x+w,y+h), (0, 255, 0), 4)
# pulling out the frame
cv2.imshow("Ruthvik", frame)
# waitkey waits for proccessing the video for 1 second to continue the frames.
key = cv2.waitKey(1)
# if Q button in keyborad is pressed than the live feed will be terminated.
# As of ASCII the number for the character 'Q' is 113.
if key==81 or key==113:
break
# Release's thew default_v_p video.
default_v_p.release()
| true
|
c727d51bf36256e09c66aed30ed9dc5c38bbf95e
|
Python
|
AppDaemon/appdaemon
|
/conf/example_apps/yr.py
|
UTF-8
| 2,128
| 2.59375
| 3
|
[
"Apache-2.0"
] |
permissive
|
import hassapi as hass
import requests
import xml.etree.ElementTree as ET
from datetime import datetime, timedelta
"""
Get detailed Yr weather data
Arguments:
- event: Entity name when publishing event
- interval: Update interval, in minutes. Must be at least 10
- source: Yr xml source
- hours: Number of hours to forecast, at most 48
"""
disclaimer = "Weather forecast from Yr, delivered by the Norwegian Meteorological Institute and NRK"
user_agent = "HomeAssistant/Appdaemon Python/requests"
class Yr(hass.Hass):
def initialize(self):
self.url = self.args["source"]
self.entity = self.args["event"]
self.hours = self.args["hours"]
inOneMinute = datetime.now() + timedelta(minutes=1)
interval = int(self.args["interval"])
if interval < 10:
raise Exception("Update interval ({}) must be at least 10 minutes".format(interval))
# delay first launch with one minute, run every 'interval' minutes
self.run_every(self.updateState, inOneMinute, interval * 60)
def updateState(self, kwargs):
forecast = self.fetchForecast()
self.set_app_state(self.entity, {"state": "", "attributes": forecast})
def fetchData(self):
res = requests.get(self.url, headers={"User-Agent": user_agent})
return res.text
def fetchForecast(self):
data = self.fetchData()
root = ET.fromstring(data)
periods = root.find(".//tabular")
return {
"disclaimer": disclaimer,
"forecast": [
{
"from": x.get("from"),
"to": x.get("to"),
"weather": x.find("symbol").get("name"),
"symbol": x.find("symbol").get("var"),
"precip": x.find("precipitation").get("value"),
"windSpeed": x.find("windSpeed").get("mps"),
"windDirection": x.find("windDirection").get("deg"),
"temp": x.find("temperature").get("value"),
}
for x in periods[: self.hours]
],
}
| true
|
6c3e80eb8f9fa561a06869b27be0ede07008e673
|
Python
|
ollema/AoC
|
/day3/day3_part2.py
|
UTF-8
| 489
| 3.515625
| 4
|
[] |
no_license
|
file = open('input.txt')
def is_triangle(triangle):
sorted_triangle = sorted(triangle)
return sorted_triangle[2] < sorted_triangle[0] + sorted_triangle[1]
lines = file.readlines()
count = 0
numbers = []
for line in lines:
numbers.extend([int(x) for x in line.rstrip('\n').split()])
if len(numbers) == 9:
for i in range(0, 3):
if is_triangle([numbers[i], numbers[i + 3], numbers[i + 6]]):
count += 1
numbers = []
print(count)
| true
|
ad106d4fc0d60f278c0e34cc5f9b0b09f11eebb8
|
Python
|
ZJXD/DropBoxFile
|
/Python/ShiyanLou/WeatherAnalysis/Pic9-1.py
|
UTF-8
| 2,043
| 3.28125
| 3
|
[] |
no_license
|
# coding:utf-8
# 每个城市的温度折线图
import numpy as np
import pandas as pd
import datetime
df_asti = pd.read_csv('./WeatherData/asti_270615.csv')
df_bologna = pd.read_csv('./WeatherData/bologna_270615.csv')
df_cesena = pd.read_csv('./WeatherData/cesena_270615.csv')
df_faenza = pd.read_csv('./WeatherData/faenza_270615.csv')
df_ferrara = pd.read_csv('./WeatherData/ferrara_270615.csv')
df_mantova = pd.read_csv('./WeatherData/mantova_270615.csv')
df_milano = pd.read_csv('./WeatherData/milano_270615.csv')
df_piacenza = pd.read_csv('./WeatherData/piacenza_270615.csv')
df_ravenna = pd.read_csv('./WeatherData/ravenna_270615.csv')
df_torino = pd.read_csv('./WeatherData/torino_270615.csv')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from dateutil import parser
# 读取温度和日期数据
y1 = df_ravenna['temp']
x1 = df_ravenna['day']
y2 = df_faenza['temp']
x2 = df_faenza['day']
y3 = df_cesena['temp']
x3 = df_cesena['day']
y4 = df_milano['temp']
x4 = df_milano['day']
y5 = df_asti['temp']
x5 = df_asti['day']
y6 = df_torino['temp']
x6 = df_torino['day']
# 把日期从 string 类型转化为标准的 datetime 类型
day_ravenna = [parser.parse(x) for x in x1]
day_faenza = [parser.parse(x) for x in x2]
day_cesena = [parser.parse(x) for x in x3]
day_milano = [parser.parse(x) for x in x4]
day_asti = [parser.parse(x) for x in x5]
day_torino = [parser.parse(x) for x in x6]
# 调用 subplots() 函数,重新定义 fig, ax 变量,fig 是图像对象,ax 是坐标轴对象
fig, ax = plt.subplots()
#plt.figure('Weather')
# 调整x轴坐标刻度,使其旋转70度,方便查看
plt.xticks(rotation=70)
# 设定时间格式
hours = mdates.DateFormatter('%H:%M')
# 设定X轴显示的格式
ax.xaxis.set_major_formatter(hours)
ax.set_title('Test')
ax.set_xlabel('Time')
#这里需要画出三根线,所以需要三组参数, 'g'代表'green'
ax.plot(day_ravenna,y1,'r',day_faenza,y2,'r',day_cesena,y3,'r')
ax.plot(day_milano,y4,'g',day_asti,y5,'g',day_torino,y6,'g')
plt.show('Weather')
| true
|