blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a2894adf35af90e98848ade70a858d4557552402
|
Python
|
vagueGM/GamersPlane
|
/api/src/helpers/endpoint.py
|
UTF-8
| 225
| 2.90625
| 3
|
[] |
no_license
|
def require_values(data_obj: object, fields: list) -> list:
missing_fields = []
for key in fields:
if key not in data_obj or not data_obj[key]:
missing_fields.append(key)
return missing_fields
| true
|
c2e51ac045202c5400fd2a3d179255e928213be2
|
Python
|
Himanshu-jn20/PythonNPysparkPractice
|
/Practise_beginner/test_vbasic.py
|
UTF-8
| 558
| 3.3125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 18 00:06:49 2020
@author: Himanshu
"""
import sys
print ("himsanshu")
#name=input("What's your name? ")
#color=input("Color? ")
#print(name + ' likes ' + color)
currency=[1,2,5,10,50,100]
print(currency[1 - 1])
val=currency[2]//currency[1]
val2=currency[2]%currency[2]
print(str(val) + '-' + str(val2))
cnt=2
cnt2=5
cnt=cnt - 1
print(cnt)
cnt2=cnt2
n_perms = [1]+[0]*5
print(n_perms)
res=sys.maxsize
print(res)
table = [0 for i in range(5 + 1)]
print(table)
print(5+-1)
| true
|
3a2b60e97c54333f5b3239c4d3dfce087b47aabe
|
Python
|
resurgo-genetics/scATAC-seq
|
/BernoulliMixture_generatedata.py
|
UTF-8
| 2,846
| 2.875
| 3
|
[] |
no_license
|
#code to generate data using generative model for infinite bernoulli mixture
import scipy.stats
import numpy as np
import pymc3 as pm
import math
#number of cell types = K
#number of sites = D
#number of cells = N
#proportion of each cluster in the data is a vector pi
K = 5
D = 500
N=500
pi = [.1,.25,.4,.07,.18]
clusters = [0,1,2,3,4]
#1: set separate hyperparameters for each site
beta1 = {}
gamma = {}
for d in range(D):
beta1[d] = np.random.exponential(5)
gamma[d] = beta = np.random.exponential(5)
#sample cluster parameters from prior distributions
p = {}
for k in range(K):
p[k] = [0]*D
for d in range(D):
p[k][d] = np.random.beta(beta1[d],gamma[d])
#2: sample cell-specific scaling factors for technical variation
alpha = {}
for n in range(N):
alpha[n] = np.random.beta(180,75)
#3: for each cell generate data according to the model
data = np.zeros(shape=(500,500))
for n in range(N):
#choose a cluster
k = np.random.choice(clusters,p=pi)
data[n] = np.random.binomial([1]*D,np.multiply(alpha[n],p[k]))
#now run algorithm
#attempt 1: my implementation without scaling factor
for i in range(50):
#for each data point
print i
for n in range(data.shape[0]):
#unassign data point
x_n = data[n]
x_minusn = np.concatenate((data[0:n],data[n+1:]),axis=0)
zs_minusn = np.concatenate((z[0:n],z[n+1:]))
#compute conditional probabilities of z
z_conditionals, zvals = conditprob_zj(zs_minusn,x_minusn,x_n,hyperbeta,hypergamma,hyperalpha)
#workaround for now
z[n] = sample_z_log(zvals, z_conditionals)
#update parameter values
p = update_bernoullip(z,data)
#attempt 2: specified as in BISCUIT
alphaprime=10
model = pm.Model()
with model: # model specifications in PyMC3 are wrapped in a with-statement
pi1 = pm.Dirichlet('pi', a=[alphaprime]*k)
# Define priors
pk = Beta('pk', 1,1,shape=k)
alpha1 = Beta('alpha',1,.1,shape=N)
z = Categorical("z",p=pi,shape=N)
# Define likelihood
likelihood =Bernoulli('y', p=pk[z]*alpha1[y],observed=data)
step1 = pm.Metropolis(vars=[pk, pi1, alpha1])
step2 = pm.ElemwiseCategorical(vars=[z], values=[0, 1, 2])
tr = pm.sample(10000, step=[step1, step2])
traceplot(trace)
#attempt 3: specified as in BISCUIT
def stick_breaking(beta):
portion_remaining = tt.concatenate([[1], tt.extra_ops.cumprod(1 - beta)[:-1]])
return beta * portion_remaining
with pm.Model() as model:
alphaprime = pm.Gamma('alpha', 1., 1.)
beta1 = pm.Beta('beta1', 1., alphaprime, shape=K)
w = pm.Deterministic('w', stick_breaking(beta))
pk = Beta('pk', 1,1,shape=K)
alpha = Beta('alphaprime',1,.1,shape=N)
likelihood =Bernoulli('y', p=pk[w]*alpha[data],observed=data)
trace = pm.sample(2000, n_init=50000, random_seed=SEED)
traceplot(trace)
| true
|
6b814106906e3919fe0e77e6405968f297f89107
|
Python
|
jpxiong/platform_cts
|
/tools/selinux/SELinuxNeverallowTestGen.py
|
UTF-8
| 2,066
| 2.9375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
import re
import sys
import SELinuxNeverallowTestFrame
usage = "Usage: ./gen_SELinux_CTS_neverallows.py <input policy file> <output cts java source>"
# extract_neverallow_rules - takes an intermediate policy file and pulls out the
# neverallow rules by taking all of the non-commented text between the 'neverallow'
# keyword and a terminating ';'
# returns: a list of strings representing these rules
def extract_neverallow_rules(policy_file):
with open(policy_file, 'r') as in_file:
policy_str = in_file.read()
# remove comments
no_comments = re.sub(r'#.+?$', r'', policy_str, flags = re.M)
# match neverallow rules
return re.findall(r'(^neverallow\s.+?;)', no_comments, flags = re.M |re.S);
# neverallow_rule_to_test - takes a neverallow statement and transforms it into
# the output necessary to form a cts unit test in a java source file.
# returns: a string representing a generic test method based on this rule.
def neverallow_rule_to_test(neverallow_rule, test_num):
squashed_neverallow = neverallow_rule.replace("\n", " ")
method = SELinuxNeverallowTestFrame.src_method
method = method.replace("testNeverallowRules()",
"testNeverallowRules" + str(test_num) + "()")
return method.replace("$NEVERALLOW_RULE_HERE$", squashed_neverallow)
if __name__ == "__main__":
# check usage
if len(sys.argv) != 3:
print usage
exit()
input_file = sys.argv[1]
output_file = sys.argv[2]
src_header = SELinuxNeverallowTestFrame.src_header
src_body = SELinuxNeverallowTestFrame.src_body
src_footer = SELinuxNeverallowTestFrame.src_footer
# grab the neverallow rules from the policy file and transform into tests
neverallow_rules = extract_neverallow_rules(input_file)
i = 0
for rule in neverallow_rules:
src_body += neverallow_rule_to_test(rule, i)
i += 1
with open(output_file, 'w') as out_file:
out_file.write(src_header)
out_file.write(src_body)
out_file.write(src_footer)
| true
|
e4c2d48ba6338d37d8c31fa936629b450bbc787b
|
Python
|
openZH/covid_19
|
/scrapers/scrape_sz_districts.py
|
UTF-8
| 1,678
| 2.515625
| 3
|
[
"CC-BY-4.0"
] |
permissive
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from bs4 import BeautifulSoup
import scrape_common as sc
url = 'https://www.sz.ch/behoerden/information-medien/medienmitteilungen/coronavirus.html/72-416-412-1379-6948'
content = sc.download(url, silent=True)
soup = BeautifulSoup(content, 'html.parser')
pdf_url = soup.find('a', text=re.compile(r'Coronafälle pro Gemeinde')).get('href')
content = sc.pdfdownload(pdf_url, layout=True, silent=True)
date = sc.find(r'Stand\W+(\d+\.\d+\.20\d{2})', content)
date = sc.date_from_text(date).isoformat()
district_data = re.findall(r'^Bezirk\W+(\w+)\s+(≤?\s?\d+)', content, re.MULTILINE)
# https://www.bfs.admin.ch/bfs/de/home/statistiken/kataloge-datenbanken/karten.assetdetail.5688189.html
district_ids = {
'Einsiedeln': 501,
'Gersau': 502,
'Höfe': 503,
'Küssnacht': 504,
'March': 505,
'Schwyz': 506,
}
# https://www.sz.ch/kanton/bezirke/schwyz.html/72-210-112-106
population = {
'Einsiedeln': 16027,
'Gersau': 2314,
'Höfe': 29123,
'Küssnacht': 13270,
'March': 43528,
'Schwyz': 55390,
}
assert len(district_data) == len(district_ids), f'expected {len(district_ids)} districts available, but got {len(district_data)}: {district_data}'
for district, total_cases in district_data:
assert district in district_ids, f'District {district} is unknown'
dd = sc.DistrictData(canton='SZ', district=district)
dd.url = pdf_url
dd.district_id = district_ids[district]
dd.population = population[district]
dd.date = date
# skip total_cases for ≤ entries
if not sc.find(r'(≤)', total_cases):
dd.total_cases = total_cases
print(dd)
| true
|
e3d9ce8cd5f2e0ce010b215e89fa5ec8a3ea6578
|
Python
|
shinys88/challenges-python
|
/Day_04_Requests_reg_legit/main.py
|
UTF-8
| 1,586
| 3.3125
| 3
|
[] |
no_license
|
import os
import requests, re
# Response Status Codes
# https://2.python-requests.org/en/master/user/quickstart/#response-status-codes
# url 검증 함수.
def legit() :
print("Welcome to UsUtDown.py!")
url_arr = input(str("Please write a URL or URLs you want to check. (separated by comma)\n")).split(",")
print("----------------------------------------")
for url in url_arr :
url = url.strip().replace("\t","")
url = re.sub(' +', '', url)
# 정규표현식 - https://wikidocs.net/4308
p = re.compile('^http(s)?://', re.I)
m = p.match(url)
if m == None :
url = "http://"+url
try :
rq = requests.get(url)
if rq.status_code == requests.codes.ok :
print(f"{rq.url} is Up!")
else :
print(f"{rq.url} is Down!")
except requests.exceptions.HTTPError :
print(f'{url} is HTTPError!')
except requests.exceptions.MissingSchema :
print(f'{url} is MissingSchema!')
except requests.exceptions.ConnectionError :
print(f'{url} is ConnectionError!')
except requests.exceptions.InvalidSchema :
print(f'{url} is InvalidSchema!')
except requests.exceptions.HTTPError :
print(f'{url} is HTTPError!')
#Start Main.
yn_flag = True
while yn_flag :
legit()
while True :
print("----------------------------------------")
yn = input(str("Do you want to start over? y/n "))
if yn == 'y':
os.system('clear')
# os.system('cls')
yn_flag = True
break
elif yn == 'n':
print('Bye.')
yn_flag = False
break
| true
|
18d5dd9a3be370aaec63d727990a43a43003f9ad
|
Python
|
JosephLevinthal/Research-projects
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4145/codes/1836_2603.py
|
UTF-8
| 155
| 2.578125
| 3
|
[] |
no_license
|
from numpy import*
from numpy.linalg import*
m= array(eval(input("matriz4x4: ")))
m=m
for i in range(4):
m[:,i]= sorted(m[:,i], reverse = True)
print(m)
| true
|
77a5987f276bcca6c1b0ebb0d37085a29750285e
|
Python
|
asleniovas/keywordAnalysis
|
/test/test_main.py
|
UTF-8
| 859
| 2.96875
| 3
|
[] |
no_license
|
import unittest
import os
from main import cleanTextFiles
class TestMainMethods(unittest.TestCase):
def setUp(self):
self.data_folder = os.path.join(os.path.expanduser("~"),
"Documents/repos/keywordAnalysis/data")
self.stop_words = {"one", "two"}
# test with no file names
def test_emptyInputs(self):
emptyFileList = []
result = len(cleanTextFiles(self.data_folder, emptyFileList,
self.stop_words))
self.assertEqual(result, 0)
# test return with 1 file
def test_oneFile(self):
fileList = ["Apple_Event_2017_09.txt"]
result = len(cleanTextFiles(self.data_folder, fileList,
self.stop_words))
self.assertEqual(result, 1)
| true
|
05690c0215eb794136878e3b3ea6f99b32e372ad
|
Python
|
HyeminNoh/Coding-Test-Study
|
/Programmers/Lv2/NextMaxNum.py
|
UTF-8
| 135
| 2.875
| 3
|
[] |
no_license
|
def solution(n):
cnt = bin(n).count('1')
for i in range(n+1,1000001):
if bin(i).count('1') == cnt:
return i
| true
|
de4a5e41e5a72f1b5a740e240838c077efb0c2f9
|
Python
|
Mistery03/mainMenu
|
/Main Menu/menu.py
|
UTF-8
| 5,373
| 3
| 3
|
[] |
no_license
|
import pygame;
class Menu():
def __init__(self,game):
self.game = game;
self.mid_w, self.mid_h = self.game.DISPLAY_W/2, self.game.DISPLAY_H/2;
self.runDisplay = True;
self.cursorRect = pygame.Rect(0,0,20,20);
self.offset = -100;
def drawCursor(self):
self.game.drawText("*",20,self.cursorRect.x,self.cursorRect.y);
def blitScreen(self):
self.game.window.blit(self.game.display, (0,0));
pygame.display.update()
self.game.resetKeys();
class mainMenu(Menu):
def __init__(self,game):
Menu.__init__(self,game);
self.state = "Start";
self.startx, self.starty = self.mid_w, self.mid_h + 30;
self.optionx, self.optiony = self.mid_w, self.mid_h + 50;
self.creditx, self.credity = self.mid_w, self.mid_h + 70;
self.cursorRect.midtop = (self.startx + self.offset, self.starty);
def displayMenu(self):
self.runDisplay = True;
while self.runDisplay:
self.game.checkEvents()
self.checkInput();
self.game.display.fill(self.game.BLACK);
self.game.drawText('Main Menu', 20, self.game.DISPLAY_W/2, self.game.DISPLAY_H/2-20);
self.game.drawText('Start', 20,self.startx,self.starty );
self.game.drawText('Options', 20, self.optionx,self.optiony);
self.game.drawText('Credits', 20, self.creditx,self.credity);
self.drawCursor();
self.blitScreen();
def moveCursor(self):
if self.game.DOWN_KEY:
if self.state == "Start":
self.cursorRect.midtop = (self.optionx + self.offset, self.optiony);
self.state = "Options";
elif self.state == "Options":
self.cursorRect.midtop = (self.creditx + self.offset, self.credity);
self.state = "Credits";
elif self.state == "Credits":
self.cursorRect.midtop = (self.startx + self.offset, self.starty);
self.state = "Start";
elif self.game.UP_KEY:
if self.state == "Start":
self.cursorRect.midtop = (self.creditx + self.offset, self.credity);
self.state = "Credits";
elif self.state == "Options":
self.cursorRect.midtop = (self.startx + self.offset, self.starty);
self.state = "Start";
elif self.state == "Credits":
self.cursorRect.midtop = (self.optionx + self.offset, self.optiony);
self.state = "Options";
def checkInput(self):
self.moveCursor();
if self.game.START_KEY:
if self.state == "Start":
self.game.playing = True;
elif self.state == "Options":
self.game.currMenu = self.game.options;
elif self.state == "Credits":
self.game.currMenu = self.game.credits;
self.runDisplay = False;
class OptionsMenu(Menu):
def __init__(self,game):
Menu.__init__(self,game);
self.state = "Volume";
self.volx,self.voly = self.mid_w, self.mid_h + 20;
self.controlx,self.controly = self.mid_w, self.mid_h + 40;
self.cursorRect.midtop = (self.volx + self.offset, self.voly);
def displayMenu(self):
self.runDisplay = True;
while self.runDisplay:
self.game.checkEvents();
self.checkInput();
self.game.display.fill(self.game.BLACK);
self.game.drawText('Options', 20, self.game.DISPLAY_W/2, self.game.DISPLAY_H/2-30);
self.game.drawText('Volume', 15,self.volx,self.voly );
self.game.drawText('Controls', 15, self.controlx,self.controly);
self.drawCursor();
self.blitScreen();
def checkInput(self):
if self.game.BACK_KEY:
self.game.currMenu = self.game.main_menu;
self.runDisplay = False;
elif self.game.UP_KEY or self.game.DOWN_KEY:
if self.state == "Volume":
self.cursorRect.midtop = (self.controlx + self.offset, self.controly);
self.state = "Controls";
elif self.state == "Controls":
self.cursorRect.midtop = (self.volx + self.offset, self.voly);
self.state = "Volume";
elif self.game.START_KEY:
#create volume and control
pass;
class CreditsMenu(Menu):
def __init__(self,game):
Menu.__init__(self,game);
def displayMenu(self):
self.runDisplay = True;
while self.runDisplay:
self.game.checkEvents();
self.checkInput();
self.game.display.fill(self.game.BLACK);
self.game.drawText('Credits', 20, self.game.DISPLAY_W/2, self.game.DISPLAY_H/2-20);
self.game.drawText('Made by Mistery', 15, self.game.DISPLAY_W/2, self.game.DISPLAY_H/2);
self.blitScreen();
def checkInput(self):
if self.game.START_KEY or self.game.BACK_KEY:
self.game.currMenu = self.game.main_menu;
self.runDisplay = False;
| true
|
f9001c241655b00ac02468e341a25bc3859195af
|
Python
|
seymasultan/HIT-SONG-PREDICTION
|
/SVM.py
|
UTF-8
| 2,319
| 2.96875
| 3
|
[] |
no_license
|
import pickle
import joblib
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.svm import SVC
import SpotifyConnection
import dataset
def main():
allSong, targetList = dataset.main()
allSong = np.array(allSong)
targetList = np.array(targetList)
allSong = listNormalizer(allSong)
model(allSong, targetList)
def listNormalizer(mylist: np.ndarray):
x_normed = mylist / mylist.max(axis=0)
print("NORMALIZE EDILDI.")
return x_normed
def model(allSong, targetList):
predicted = []
X_train, X_test, y_train, y_test = train_test_split(allSong, targetList, test_size=0.2)
# Parametre olarak farklı kernel trick tipleri verilebilir.
# Başarı oranının değiştiği gözlemlenecektir. ( ‘linear’, ‘poly’, ‘rbf’, ‘sigmoid’, ‘precomputed’ )
# kernelin default değeri 'rbf' dir.
svc = SVC()
svc.fit(X_train, y_train)
joblib.dump(svc, 'SVM.pkl')
for i in range(len(X_test)):
predict_me = np.array(X_test[i].astype(float))
predict_me = predict_me.reshape(-1, len(predict_me))
prediction = svc.predict(predict_me)
predicted.append(prediction)
print(confusion_matrix(y_test, predicted))
print(classification_report(y_test, predicted))
print("Accuracy of Decision Tree classifier on training set: {:.2f}".format(svc.score(X_train, y_train)))
print("Accuracy of Decision Tree classifier on test set: {:.2f}".format(svc.score(X_test, y_test)))
predictionSong()
def predictionSong():
songUri = "spotify:track:4WQWrSXYLnwwcmdNk8dYqN"
if songUri.find("spotify") != -1:
songUri = songUri[14:]
artistName, songName, songInfo = SpotifyConnection.getSongInfo(songUri)
allSong, targetList = dataset.main()
allSong.append(songInfo)
allSong = np.array(allSong)
allSong = allSong / allSong.max(axis=0)
mySong = allSong[-1:]
model = joblib.load('SVM.pkl', mmap_mode='r')
y_pred = model.predict(mySong)
print(y_pred)
print("Sanatçı:" + artistName)
print("Şarkı Adı:" + songName)
if (y_pred == [0]):
print("THIS SONG IS NOT HIT")
else:
print("THIS SONG IS HIT")
if __name__ == '__main__':
main()
| true
|
774c1651efef6b366c891f99281db3a9eea603aa
|
Python
|
diegopso/hybrid-urban-routing-tutorial-sbrc
|
/smaframework/common/hashing.py
|
UTF-8
| 116
| 2.71875
| 3
|
[] |
no_license
|
import hashlib
def md5(string):
m = hashlib.md5()
m.update(string.encode('utf-8'))
return m.hexdigest()
| true
|
1e42d8e560631c82b5cb392c9838481105fe8344
|
Python
|
lilei8630/leetcode
|
/68_Text_Justification.py
|
UTF-8
| 1,133
| 2.796875
| 3
|
[] |
no_license
|
s = [""]
l = 2
res = []
line=""
i=0
while i < len(s):
if(len(line)+len(s[i])<=l):
line = line + s[i]
line = line +" "
if(i==(len(s)-1)):
len1 = len(line.replace(' ',''))
len2 = len(line.strip())
remain = l - len1
temp = line.strip().split(" ")
num_words = len(temp)
num_slot = num_words-1
more = remain if num_slot==0 else remain % num_slot
each = 0 if num_slot==0 else remain / num_slot
if(len2<l):
res.append(line+" "*(l-len2))
else:
newline = ""
for j in range(0,num_words):
newline += temp[j]
if(j==0):
newline +=' '*(each+more)
else:
newline +=' '*each
res.append(newline[0:l])
else:
len1 = len(line.replace(' ',''))
remain = l - len1
temp = line.strip().split(" ")
num_words = len(temp)
num_slot = num_words-1
more = remain if num_slot==0 else remain % num_slot
each = 0 if num_slot==0 else remain / num_slot
newline = ""
for j in range(0,num_words):
newline += temp[j]
if(j==0):
newline +=' '*(each+more)
else:
newline +=' '*each
res.append(newline[0:l])
line=""
i = i - 1
i = i+1
print res
| true
|
d89c3102daf0acc46b99cfb7d9084b5991c6bfe9
|
Python
|
BouzasLab25/Curso_LaboratorioVirtualenPython
|
/Viernes - Distribución Normal y Teoría de Detección de Señales/CursoPython_Normal_Funciones.py
|
UTF-8
| 710
| 3.171875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 30 11:07:06 2017
@author: Adriana
"""
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
import math
import scipy.stats
mu = 0
varianza = 1
sigma = math.sqrt(varianza)
x = np.linspace(-6,6, 100)
valor = 3
plt.plot(x,mlab.normpdf(x,mu, sigma))
plt.plot([valor,valor], [0,0.55], 'red')
plt.show()
"""Obteniendo PDF's"""
pdf = scipy.stats.norm(0,1).pdf(valor)
print(pdf)
print(scipy.stats.norm(0,1).pdf(valor))
"""Obtener CDF's"""
cumulative = scipy.stats.norm(0,1).cdf(valor)
print(cumulative)
print(scipy.stats.norm(0,1).cdf(valor))
"""Obtener Puntajes Z"""
Z = scipy.stats.norm(0,1).ppf(cumulative)
print(Z)
| true
|
4a3bb004554719cd949c8caf9f04651124aea86e
|
Python
|
danielsada/100daysofalgorithms
|
/algorithms/sliding-window/sliding-window-max-sum.py
|
UTF-8
| 1,076
| 4.1875
| 4
|
[] |
no_license
|
"""
Maximum Sum Subarray of Size K (easy)
Problem Statement
Given an array of positive numbers and a positive number ‘k,’ find the maximum sum of any contiguous subarray of size ‘k’.
Example 1:
Input: [2, 1, 5, 1, 3, 2], k=3
Output: 9
Explanation: Subarray with maximum sum is [5, 1, 3].
Example 2:
Input: [2, 3, 4, 1, 5], k=2
Output: 7
Explanation: Subarray with maximum sum is [3, 4].
"""
def maximum_sum_subarray(input:list[int], k:int) -> int:
window_start, maxsum, current_sum = 0, 0, 0
for window_end in range(len(input)):
if window_end - window_start - k == 0:
current_sum += input[window_end]
current_sum -= input[window_start]
window_start += 1
else:
current_sum += input[window_end]
maxsum = max(maxsum, current_sum)
return maxsum
import unittest
class SumKUnitTests(unittest.TestCase):
def test_sumk(self):
self.assertEqual(maximum_sum_subarray([2, 1, 5, 1, 3, 2], 3), 9)
self.assertEqual(maximum_sum_subarray([2, 3, 4, 1, 5], 2), 7)
| true
|
ce19c50540358752b096ca6034855ae9278bfdf6
|
Python
|
minhduc9699/mx-game-logic
|
/fsm.py
|
UTF-8
| 6,160
| 2.578125
| 3
|
[] |
no_license
|
import random
from datetime import date, datetime, timedelta
# admn
# readonly
players = [
{
"player_name": "huy",
"quizzes": [{
'question': 'Học viên đang pick tướng',
'choices': [0, 0, 0, 2, 0, 0, 0, 0, 0],
'time_allowed': 12,
'right_choice_indexes': [3],
'date_sent': datetime.date(2019, 3, 27),
'time_sent': datetime.datetime(2019, 3, 27, 0, 44, 28, 400551)
}],
"results": [
{
"correct": True,
"open_times": 3,
"rewards": ["Ao phong mindx", "Sex toy"],
}
],
"extra_quota": 0,
},
{
"player_name": "huy",
"quizzes": [],
"results": [],
"extra_quota": 0,
},
]
# Hoc: 0
# Facebook: 1
# LOL: 2
# Bug: 3
# Youtube: 4
# admn
# crud
quiz_configs = [
{
"questions": [
"Học viên đang vào fb",
"Học viên đang lướt facebook",
"Học viên đang xem newsfeed",
"Học viên xem face.book"
],
"time_allowed": 12,
"right_choices_count": 2,
"wrong_choice_values": [0],
"right_choice_values": [1]
},
{
"questions": [
"Học viên đang vào youtube",
"Học viên đang lướt youtube",
"Học viên đang xem video",
"Học viên xem youtube"
],
"time_allowed": 12,
"right_choices_count": 3,
"wrong_choice_values": [0],
"right_choice_values": [4]
},
{
"questions": [
"Học viên đang vào lol",
"Học viên đang choi lol",
"Học viên đang xem lien minh",
"Học viên đang pick tướng"
],
"time_allowed": 12,
"right_choices_count": 1,
"wrong_choice_values": [0],
"right_choice_values": [2]
}
]
# admin
# edit
settings = {
"reward_frequency": 0.5,
"initial_quota": 4,
}
# admin
# CRUD
reward_configs = [
{
"name": "thẻ cào 20k",
"quantity": 50,
"given": 0,
},
{
"name": "áo mindX",
"quantity": 30,
"given": 0,
},
{
"name": "Vé xem phim",
"quantity": 20,
"given": 0,
},
]
def generate_quiz(player ,config):
today_quizzes = [quiz for quiz in player["quizzes"] if quiz["date_sent"] == date.today()]
today_quota = 0
if len(today_quizzes) == 0:
today_quota = 1
quota = today_quota + player["extra_quota"]
if quota <= 0:
return {"quota": 0, "questions": "Fuck off", "choices": []}
if today_quota == 0:
player["extra_quota"] -= 1
question = random.choice(config["questions"])
right_choices_count = config["right_choices_count"]
wrong_choices = config["wrong_choice_values"] * (9 - right_choices_count)
right_choices = config["right_choice_values"] * right_choices_count
choices = wrong_choices + right_choices
random.shuffle(choices)
right_choice_indexes = [index for index, choice in enumerate(choices) if choice in right_choices]
time = config["time_allowed"]
return {
"quota": quota - 1,
"questions": question,
"choices": choices,
"time_allowed": time,
"right_choice_indexes": right_choice_indexes,
"date_sent": date.today(),
"time_sent": datetime.now(),
}
def open_reward(open_times):
given_reward_list = []
for _ in range(open_times):
dice = random.random()
if dice < settings["reward_frequency"]:
reward_list = []
for reward_config in reward_configs:
if reward_config["quantity"] > reward_config["given"]:
reward_list += [reward_config] * (reward_config["quantity"] - reward_config["given"])
reward = random.choice(reward_list)
reward["given"] += 1
given_reward_list.append({
"name": reward["name"]
})
else:
given_reward_list.append({
"name": "chúc bạn may mắn lần sau"
})
return given_reward_list
def check_answer(player, player_choice_indexes):
today_quizzes = [quiz for quiz in player["quizzes"] if quiz["date_sent"] == date.today()]
if len(today_quizzes) == 0:
return "Get /quiz first"
else:
today_quiz = today_quizzes[-1]
player_time_spent = datetime.now() - today_quiz["time_sent"]
if "player_choice_indexes" in today_quiz:
return "Already answer"
elif player_time_spent > timedelta(seconds=today_quiz["time_allowed"]):
return "too late"
else:
today_quiz["player_choice_indexes"] = player_choice_indexes
if set(player_choice_indexes) == set(today_quiz["right_choice_indexes"]):
player_seconds_spent = player_time_spent.total_seconds()
speed = 1 - (player_seconds_spent / today_quiz["time_allowed"])
if speed > 0.7: # duoi 4stime_sent
open_times = 3
elif speed > 0.3: # duoi 9s
open_times = 2
else: # tren 9s
open_times = 1
rewards = open_reward(open_times)
return {
"correct": True,
"open_times": open_times,
"rewards": rewards,
"right_choice": today_quiz["right_choice_indexes"]
}
else:
return {
"correct": False,
"right_choice": today_quiz["right_choice_indexes"]
}
# def spin_reward():
while True:
cmd = input("cmd: ").lower().strip()
if cmd == "login":
player_name = input("playername").lower()
found_players = [player for player in players if player["player_name"] == player_name]
player = None
if len(found_players) == 0:
new_player = {"player_name": player_name, "extra_quota": settings["initial_quota"], "quizzes": [], "results": []}
players.append(new_player)
player = new_player
print("Welcome new player")
else:
player = found_players[0]
print("Welcome")
elif cmd == "quiz":
quiz_config = random.choice(quiz_configs)
quiz = generate_quiz(player, quiz_config)
player["quizzes"].append(quiz)
player_quiz = quiz.copy()
print(player_quiz)
elif cmd == "answer":
answer = input("Answer? ").strip().split(" ")
choice_indexes = [int(choice_str) for choice_str in answer if choice_str.isdigit()]
result = check_answer(player, choice_indexes)
player["results"].append(result)
print(result)
elif cmd == "exit":
break
| true
|
8798ce524341933d5596b8b7a90ded9c87621417
|
Python
|
hanpiness/history_study
|
/历史文件/爬取经纬度.py
|
UTF-8
| 1,107
| 2.9375
| 3
|
[] |
no_license
|
import json
from urllib.request import urlopen,quote
import requests,csv
import time
# 构造经纬度获取函数
def getlnglat(address):
url = 'http://api.map.baidu.com/geocoding/v3/'
output = 'json'
ak = 'txrm69lvmWHa66jgClsR1F8yuVfhKNkK'
add = quote(address) # 由于本文城市变量为中文
uri = url+'?'+'address='+add+'&output='+output +'&ak='+ak
req = urlopen(uri)
res = req.read().decode()
temp = json.loads(res)
return temp
# 批量获取城市经纬度坐标
file = open(r'.\\point.txt','w') # 建立json数据文件
with open(r'C:\\Users\\92149\\Desktop\\BaiduMap_cityCode_1102.csv','r',encoding='utf-8') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
b = line[1].strip()
c = getlnglat(b)
if(c['status']!=1):
lng = c['result']['location']['lng']
lat = c['result']['location']['lat']
time.sleep(0.1)
str_temp = '{"lat":' + str(lat) + ',"lng":' + str(lng) + "loc:" + b + '},' + '\n'
print(str_temp)
file.write(str_temp) # 写入文档
file.close
| true
|
c4ee13fa23519552402efdefd5657b2633b4ab10
|
Python
|
tks3210/autoJudge
|
/regiater_cmd.py
|
UTF-8
| 1,339
| 2.828125
| 3
|
[] |
no_license
|
import os
import sys
import argparse
if __name__ == '__main__':
# 登録するコマンド名を受け取るパーサーの作成
parser = argparse.ArgumentParser()
# 引数がなければatjudgeにする
parser.add_argument('command_name', help='commmand name to register', type=str, nargs='*', default='atjudge')
args = parser.parse_args()
targetfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'run.sh')
# OSの判定
if os.name == 'nt':
cmd = [targetfilepath, 'C:/commands/' + args.command_name]
# cmd = ['mklink', + args.command_name, targetfilepath]
elif os.name == 'posix':
cmd = [targetfilepath, '/usr/local/bin/' + args.command_name]
else:
print('This OS is not supported\nPlease create a symbolic link or register an alias manually\n')
print('ファイルパス:')
print(targetfilepath)
sys.exit()
print('unlink path:' + ' '.join(cmd))
try:
os.symlink(*cmd)
except FileExistsError as e:
print(e)
except OSError:
print('Permission denied\nPlease run it again with administrative privileges')
except Exception as e:
print(e)
else:
print('The command was successfully registered.')
print('We can run with "{}"'.format(args.command_name))
| true
|
c3935acbf2b781ab5c5e30452647390d901b3d2b
|
Python
|
jicruz96/AirBnB_clone_v2
|
/3-deploy_web_static.py
|
UTF-8
| 1,892
| 2.828125
| 3
|
[] |
no_license
|
#!/usr/bin/python3
""" generates a .tgz archive of web_stack folder """
from fabric.api import local, run, put, env
from os.path import exists
from datetime import datetime as time
web_01 = '35.190.188.58'
web_02 = '52.23.162.134'
env.hosts = [web_01, web_02]
def do_pack():
""" does pack """
time_and_date = time.now().strftime("%Y%m%d%H%M%S")
archive_name = "versions/web_static_{}.tgz".format(time_and_date)
local("if [ ! -d versions ]; then mkdir versions; fi")
try:
local("tar -czvf {} web_static/".format(archive_name))
return archive_name
except:
return None
def do_deploy(archive_path):
""" does deploy """
if archive_path is None or not exists(archive_path):
return False
# Create strings for archive name, link path, and target directory
archive_name = archive_path.split('/')[-1]
link_path = '/data/web_static/current'
dir = '/data/web_static/releases/{}/'.format(archive_name.split('.')[0])
try:
# Transfer archive
put(archive_path, '/tmp/')
# Make directory
run('mkdir -p {}'.format(dir))
# Extract contents of archive
run('tar -xzf /tmp/{} -C {}'.format(archive_name, dir))
# Delete archive
run('rm -rf /tmp/{}'.format(archive_name))
# Move files from unzipped archive folder to dir
run('mv {}web_static/* {}'.format(dir, dir))
# Delete unzipped archive folder
run('rm -rf {}web_static/'.format(dir))
# Delete old symbolic link
run('rm -rf {}'.format(link_path))
# Make new symbolic link
run('ln --symbolic {} {}'.format(dir, link_path))
# If we made it here, print this message
print('New version deployed!')
return True
except:
return False
def deploy():
""" deploys """
return do_deploy(do_pack())
| true
|
cb9ada82985019326278f18c00b6287bb552f0d9
|
Python
|
lade043/grade-program
|
/user_IO.py
|
UTF-8
| 3,135
| 4.0625
| 4
|
[
"MIT"
] |
permissive
|
import exceptions
def user_input():
todo = input("Ok, do you want to see them or to edit them [see/edit/exit]?\n")
if todo == "see":
print("Which subject do you want to know the information?")
subject = input()
return "output", subject
elif todo == "edit":
print("Do you want to add a subject, a category or a grade [subject/category/grade]?")
user_add = input()
if user_add == "subject":
subject = input("Which subject do you want to add? \n")
main_subject = input("Is this subject a main subject [y/n]? \n")
if main_subject == 'y':
main_subject = True
elif main_subject == 'n':
main_subject = False
else:
raise exceptions.WrongCaseException
oral_exam = input("Will there be an oral exam in this subject [y/n]? \n")
if oral_exam == 'y':
oral_exam = True
elif oral_exam == 'n':
oral_exam = False
else:
raise exceptions.WrongCaseException
return "input", "subject", subject, main_subject, oral_exam
elif user_add == "category":
subject = input("To which subject do you want to add the category? \n")
category = input("Which category do you want to add? \n")
rating = float(input("Rating? \n"))
if exceptions.convertible(rating):
return "input", "category", subject, category, rating
elif user_add == "grade":
subject = input("To which subject do you want to add the grade? \n")
category = input("To which category do you want to add it? \n")
grade = input("Grade? \n")
grade_name = input("What's the name of the grade? \n")
if exceptions.convertible(grade):
return "input", "grade", subject, category, grade, grade_name
elif todo == "exit":
return "exit"
def user_output(subject, categories, average, average_subject):
print("\n\n In the subject {} are the following categories:".format(subject))
for counter, category in enumerate(categories):
print("The average in {} is {} with these grades:".format(category, average[counter]))
string = ""
for grade in categories[category][0]:
string += str(grade.grade) + ", "
print(string + "\n")
print("The average of {} is {}.".format(subject, average_subject))
def exception_raised(exception):
print("Dear user you've raised an exception. The cause of the exception is:")
if exception is exceptions.SubjectNotExistingException:
print("The subject you tried to access is not created")
elif exceptions is exceptions.CategoryNotExistingException:
print("The category you tried to add something is non-existent in this subject")
elif exception is exceptions.NotANumberException:
print("The number you gave the program is not a number")
elif exception is exceptions.WrongCaseException:
print("This was not one of the available choices")
| true
|
87d22e4c599cd62f6dfe94fe33417973b8878d6f
|
Python
|
chenshanghao/LeetCode_learning
|
/Problem_31/learning_solution.py
|
UTF-8
| 879
| 2.859375
| 3
|
[] |
no_license
|
class Solution(object):
def nextPermutation(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
# Example 6 9 7 4 3 2
# Step 1 *6* 8 7 4 3 2
# Step 2 6 8 *7* 4 3 2
# Step 3 7 *8 6 4 3 2*
# Step 4 7 2 3 4 6 8
n=len(nums)
if n <=1:
return
i = n-1
while i-1 >= 0 and nums[i] <= nums[i-1]:
i-=1
if i>0:
j = n-1
while j>=i:
if nums[j] > nums[i-1]:
nums[j], nums[i-1] = nums[i-1],nums[j]
break
j-=1
m = n-1
while i<m:
nums[i], nums[m] = nums[m], nums[i]
i+=1
m-=1
| true
|
e0bf749a61fd0a6e342944553f05ad2e21732f19
|
Python
|
nordugrid/arc
|
/src/services/acix/core/test/test_bloomfilter.py
|
UTF-8
| 1,301
| 2.640625
| 3
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
from twisted.trial import unittest
from acix.core import bloomfilter
KEYS = ['one', 'two', 'three', 'four']
FALSE_KEYS = ['five', 'six', 'seven' ]
SIZE = 160
class BloomFilterTestCase(unittest.TestCase):
def setUp(self):
self.bf = bloomfilter.BloomFilter(SIZE)
def testContains(self):
for key in KEYS: self.bf.add(key)
for key in KEYS: self.failUnlessIn(key, self.bf)
for key in FALSE_KEYS: self.failIfIn(key, self.bf)
def testSerialization(self):
for key in KEYS: self.bf.add(key)
s = self.bf.serialize()
bf2 = bloomfilter.BloomFilter(SIZE, s)
for key in KEYS: self.failUnlessIn(key, bf2)
for key in FALSE_KEYS: self.failIfIn(key, bf2)
def testReconstruction(self):
# create filter with some non-standard hashes...
bf1 = bloomfilter.BloomFilter(SIZE, hashes=['js', 'dek', 'sdbm'])
for key in KEYS: bf1.add(key)
# just to be sure
for key in KEYS: self.failUnlessIn(key, bf1)
for key in FALSE_KEYS: self.failIfIn(key, bf1)
# reconstruct
bf2 = bloomfilter.BloomFilter(SIZE, bits=bf1.serialize(), hashes=bf1.get_hashes())
for key in KEYS: self.failUnlessIn(key, bf2)
for key in FALSE_KEYS: self.failIfIn(key, bf2)
| true
|
c80e24c688b9407efe0902af617ad6e00f855baf
|
Python
|
slad99/pythonscripts
|
/Application/Check if the Application is Running or Not/check-if-the-application-is-running-or-not.py
|
UTF-8
| 465
| 2.828125
| 3
|
[] |
no_license
|
#To define a particular parameter, replace the 'parameterName' inside itsm.getParameter('parameterName') with that parameter's name
appName =itsm.getParameter('parameterName')
import os
def IsAppRunning(appName):
proObj = os.popen('TASKLIST /FI "STATUS eq running"')
runApps = proObj.read()
return appName in runApps
if IsAppRunning(appName):
print 'Success: '+appName+' is running'
else:
print 'Fail: '+appName+' is not running'
| true
|
c659081c42aaf020ab3e8d3390bf6a6c57a14f22
|
Python
|
kelvinfan001/mini-programs
|
/MarsTime Converter/MarsTime.py
|
UTF-8
| 5,148
| 3.984375
| 4
|
[] |
no_license
|
"""
MarsTime Converter Module
Instructions:
Excel dates in a plain text file named 'excel_time.txt' will be converted into
Mars time and written on a plain text file named 'marstime.txt'
"""
from typing import List
import math
path = 'excel_time.txt'
new_path = 'marstime.txt'
time_file = open(path, 'r')
time_file_string = time_file.read()
time_list = time_file_string.split('\n')
marstime_file = open(new_path, 'w')
def remove_time(timelist: list) -> None:
"""
Remove the time from all times in timelist.
>>> time = ['42839.33194', '42843.4436', '42844.10072']
>>> remove_time(time)
>>> time
[42839, 42843, 42844]
"""
for i in range(len(timelist)):
timelist[i] = int(float(timelist[i]))
def create_mars_format(mars_time_info: list) -> str:
"""
Return a new date in MarsTime format based on date.
Precondition:
date contains four items.
Index 0 of date contains year;
Index 1 of date contains period;
Index 2 of date contains week;
Index 3 of date contains day.
>>> create_mars_format([2018, 6, 2, 6])
'Y2018P6W2D6'
"""
if len(mars_time_info) != 4:
raise Exception
return 'Y{}P{}W{}D{}'.format(str(mars_time_info[0]), str(mars_time_info[1]),
str(mars_time_info[2]), str(mars_time_info[3]))
def convert_2017_base(original_base: int) -> int:
"""
Return a date in 2017 base based on original_base. (Convert to number of
days after December 31, 2016.
>>> convert_2017_base(42839)
104
>>> convert_2017_base(42736)
1
"""
return original_base - 42735
def convert_mars_time_info(excel_date: int) -> List[int]:
"""
Return a date in list form in MarsTime information.
Precondition: excel_year is converted to 2017 base (number of days after
December 31, 2016)
>>> convert_mars_time_info(1)
[2017, 1, 1, 1]
>>> convert_mars_time_info(104)
[2017, 4, 3, 6]
>>> convert_mars_time_info(366)
[2018, 1, 1, 2]
>>> convert_mars_time_info(2555)
[2023, 13, 4, 7]
>>> convert_mars_time_info(2435)
[2023, 9, 3, 6]
>>> convert_mars_time_info(2191)
[2022, 13, 4, 7]
>>> convert_mars_time_info(728)
[2018, 13, 4, 7]
"""
# get year
if excel_date in range(1, 365):
year = 2017
elif excel_date in range(365, 729):
year = 2018
elif excel_date in range(729, 1093):
year = 2019
elif excel_date in range(1093, 1464):
year = 2020
elif excel_date in range(1464, 1828):
year = 2021
elif excel_date in range(1828, 2192):
year = 2022
elif excel_date in range(2192, 2556):
year = 2023
elif excel_date in range(2556, 2920):
year = 2024
elif excel_date in range(2920, 3284):
year = 2025
elif excel_date in range(3284, 3655):
year = 2026
else:
raise ValueError
# get period
period = math.ceil(convert_to_days_after_year(excel_date) / (4 * 7))
# get week
week = math.ceil(convert_to_days_after_period(excel_date) / 7)
# get day
day = convert_to_days_after_week(excel_date)
return [year, period, week, day]
def convert_to_days_after_year(excel_date: int) -> int:
"""
Return the number of days after a full Mars year.
>>> convert_to_days_after_year(2)
2
>>> convert_to_days_after_year(364)
364
>>> convert_to_days_after_year(365)
1
"""
if excel_date < 1093:
if excel_date % 364 == 0:
return 364
else:
return excel_date % 364
elif 1093 <= excel_date < 1464:
return excel_date - 1092
elif 1464 <= excel_date < 3284:
if (excel_date - 1463) % 364 == 0:
return 364
else:
return (excel_date - 1463) % 364
def convert_to_days_after_period(excel_date: int) -> int:
"""
Return the number of days after a full Mars period.
>>> convert_to_days_after_period(2)
2
>>> convert_to_days_after_period(366)
2
>>> convert_to_days_after_period(2191)
28
"""
days_after_year = convert_to_days_after_year(excel_date)
return 28 if days_after_year % 28 == 0 else days_after_year % 28
def convert_to_days_after_week(excel_date: int) -> int:
"""
Return the number of days after a full Mars week.
>>> convert_to_days_after_week(2)
2
>>> convert_to_days_after_week(366)
2
>>> convert_to_days_after_week(2191)
7
"""
days_after_period = convert_to_days_after_period(convert_to_days_after_year(excel_date))
return 7 if days_after_period % 7 == 0 else days_after_period % 7
if __name__ == '__main__':
remove_time(time_list)
mediary_time_list = [convert_2017_base(time) for time in time_list]
mediary2_time_list = [convert_mars_time_info(time) for time in mediary_time_list]
final_time_list = [create_mars_format(info) for info in mediary2_time_list]
# put each Mars time into a string
s = ''
for time in final_time_list:
s += time + '\n'
marstime_file.write(s)
time_file.close()
marstime_file.close()
| true
|
003b61fed0f7b5af5e1fed3fafb9c563536f6786
|
Python
|
UtkrishtDhankar/cubinator
|
/rotation.py
|
UTF-8
| 1,208
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
from point import *
import math
def rotate_about_x_clockwise(point):
rotation_matrix = [[1, 0, 0],
[0, 0, -1],
[0, 1, 0]]
return point.return_rotation(rotation_matrix)
def rotate_about_x_counter_clockwise(point):
rotation_matrix = [[1, 0, 0],
[0, 0, 1],
[0, -1, 0]]
return point.return_rotation(rotation_matrix)
def rotate_about_y_clockwise(point):
rotation_matrix = [[0, 0, 1],
[0, 1, 0],
[-1, 0, 0]]
return point.return_rotation(rotation_matrix)
def rotate_about_y_counter_clockwise(point):
rotation_matrix = [[0, 0, -1],
[0, 1, 0],
[1, 0, 0]]
return point.return_rotation(rotation_matrix)
def rotate_about_z_clockwise(point):
rotation_matrix = [[0, -1, 0],
[1, 0, 0],
[0, 0, 1]]
return point.return_rotation(rotation_matrix)
def rotate_about_z_counter_clockwise(point):
rotation_matrix = [[0, 1, 0],
[-1, 0, 0],
[0, 0, 1]]
return point.return_rotation(rotation_matrix)
| true
|
c970e1a9cba99cb609cf00b4911034208edf9d11
|
Python
|
montellasebastien/resume
|
/education.py
|
UTF-8
| 5,983
| 3.078125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
from manimlib.imports import *
class Education:
def __init__(self, seb_resume, title='Education', color=YELLOW):
self.seb_resume = seb_resume
self.seb_resume.apply_transition(title=title,
color=color)
def show_universities(self):
# COUNTRIES
france_txt = TextMobject("France")
france_txt.scale(0.8)
france_txt.set_color(BLUE)
taiwan_txt = TextMobject("Taiwan")
taiwan_txt.scale(0.8)
taiwan_txt.set_color(BLUE)
# SCHOOLS
utbm_name_txt = TextMobject('University of Technology of Belfort-Montbeliard')
utbm_name_txt.scale(0.65)
utbm_name_txt.set_color(WHITE)
utbm_txt = TextMobject('UTBM')
utbm_txt.scale(0.75)
utbm_txt.set_color(WHITE)
ncu_name_txt = TextMobject('National Central University')
ncu_name_txt.scale(0.65)
ncu_name_txt.set_color(WHITE)
ncu_txt = TextMobject('NCU')
ncu_txt.scale(0.75)
ncu_txt.set_color(WHITE)
# DATES
utbm_date = TextMobject("(2013 - 2019)")
utbm_date.scale(0.6)
utbm_date.set_color(GREEN)
ncu_date = TextMobject('(2016 - 2019)')
ncu_date.scale(0.6)
ncu_date.set_color(GREEN)
# RELATIVE POSITION
france_txt.next_to(utbm_name_txt, DOWN)
utbm_date.next_to(france_txt, DOWN)
taiwan_txt.next_to(ncu_name_txt, DOWN)
ncu_date.next_to(taiwan_txt, DOWN)
# GROUPS
utbm_group = VGroup(utbm_name_txt,
france_txt,
utbm_date)
ncu_group = VGroup(ncu_name_txt,
taiwan_txt,
ncu_date)
coordinate_utbm = 3 * LEFT + 0.5 * DOWN
coordinate_ncu = 3 * RIGHT + 0.5 * DOWN
utbm_group.move_to(coordinate_utbm)
ncu_group.move_to(coordinate_ncu)
utbm_txt.move_to(utbm_name_txt.get_center())
ncu_txt.move_to(ncu_name_txt.get_center())
self.seb_resume.play(FadeIn(utbm_group))
self.seb_resume.wait(2)
self.seb_resume.play(Transform(utbm_name_txt, utbm_txt))
self.seb_resume.wait(3)
self.seb_resume.play(FadeIn(ncu_group))
self.seb_resume.wait(2)
self.seb_resume.play(Transform(ncu_name_txt, ncu_txt))
self.seb_resume.wait(3)
separate_line = Line(np.asarray([0, -5, 0]), np.asarray([0, 2, 0]))
self.seb_resume.play(Write(separate_line),
ApplyMethod(utbm_txt.move_to, utbm_txt.get_center() + 2 * UP),
ApplyMethod(ncu_txt.move_to, ncu_txt.get_center() + 2 * UP),
FadeOut(utbm_group),
FadeOut(ncu_group))
self.seb_resume.wait(3)
scale_txt = 0.75
# UTBM
programming_basics = TextMobject("Programming Fundamentals")
programming_basics.move_to(utbm_txt.get_center() + DOWN)
programming_basics.scale(scale_txt)
programming_basics.set_color(BLUE)
mathematics_txt = TextMobject("Mathematics")
mathematics_txt.move_to(programming_basics.get_center() + DOWN)
mathematics_txt.scale(scale_txt)
mathematics_txt.set_color(GREEN)
management_txt = TextMobject('Management')
management_txt.move_to(mathematics_txt.get_center() + DOWN)
management_txt.scale(scale_txt)
management_txt.set_color(RED)
marketing_txt = TextMobject('Marketing')
marketing_txt.move_to(management_txt.get_center() + DOWN)
marketing_txt.scale(scale_txt)
marketing_txt.set_color(WHITE)
# NCU
machine_learning_txt = TextMobject("Machine Learning")
machine_learning_txt.move_to(ncu_txt.get_center() + DOWN)
machine_learning_txt.scale(scale_txt)
machine_learning_txt.set_color(BLUE)
deep_learning_txt = TextMobject("Deep Learning")
deep_learning_txt.move_to(machine_learning_txt.get_center() + DOWN)
deep_learning_txt.scale(scale_txt)
deep_learning_txt.set_color(GREEN)
nlp_txt = TextMobject("NLP")
nlp_txt.move_to(deep_learning_txt.get_center() + DOWN)
nlp_txt.scale(scale_txt)
nlp_txt.set_color(RED)
ir_txt = TextMobject('Information Retrieval')
ir_txt.move_to(nlp_txt.get_center() + DOWN)
ir_txt.scale(scale_txt)
ir_txt.set_color(WHITE)
# PLAY ANIMATION UTBM
self.seb_resume.play(FadeInFromDown(programming_basics))
self.seb_resume.wait(1.5)
self.seb_resume.play(FadeInFromDown(mathematics_txt))
self.seb_resume.wait(1.0)
self.seb_resume.play(FadeInFromDown(management_txt))
self.seb_resume.wait(0.5)
self.seb_resume.play(FadeInFromDown(marketing_txt))
self.seb_resume.wait(3)
# PLAY ANIMATION NCU
self.seb_resume.play(FadeInFromDown(machine_learning_txt))
self.seb_resume.wait(1.5)
self.seb_resume.play(FadeInFromDown(deep_learning_txt))
self.seb_resume.wait(1.0)
self.seb_resume.play(FadeInFromDown(nlp_txt))
self.seb_resume.wait(0.5)
self.seb_resume.play(FadeInFromDown(ir_txt))
self.seb_resume.wait(3)
# REMOVE EDUCATION
self.seb_resume.play(FadeOut(utbm_txt),
FadeOut(ncu_txt),
FadeOut(separate_line),
FadeOut(programming_basics),
FadeOut(mathematics_txt),
FadeOut(management_txt),
FadeOut(marketing_txt),
FadeOut(machine_learning_txt),
FadeOut(deep_learning_txt),
FadeOut(nlp_txt),
FadeOut(ir_txt))
self.seb_resume.wait(3)
| true
|
5d31f63c6e947d5d9021f02233b4002045dac529
|
Python
|
nstarman/templates
|
/python/script.py
|
UTF-8
| 3,924
| 2.859375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE :
# AUTHOR :
# PROJECT :
#
# ----------------------------------------------------------------------------
"""**DOCSTRING**.
This script can be run from the command line with the following parameters:
Parameters
----------
"""
__author__ = ""
# __copyright__ = "Copyright 2019, "
# __credits__ = [""]
# __license__ = ""
# __version__ = "0.0.0"
# __maintainer__ = ""
# __email__ = ""
# __status__ = "Production"
# __all__ = [
# ""
# ]
##############################################################################
# IMPORTS
# BUILT-IN
import argparse
import typing as T
import warnings
##############################################################################
# PARAMETERS
# General
_PLOT: bool = True # Whether to plot the output
# Log file
_VERBOSE: int = 0 # Degree of logfile verbosity
##############################################################################
# CODE
##############################################################################
class ClassName(object):
"""Docstring for ClassName."""
def __init__(self, arg):
"""Initialize class."""
super().__init__()
self.arg = arg
# /def
# /class
# -------------------------------------------------------------------
def function():
"""Docstring."""
pass
# /def
##############################################################################
# Command Line
##############################################################################
def make_parser(
*, inheritable: bool = False, plot: bool = _PLOT, verbose: int = _VERBOSE
) -> argparse.ArgumentParser:
"""Expose ArgumentParser for ``main``.
Parameters
----------
inheritable: bool, optional, keyword only
whether the parser can be inherited from (default False).
if True, sets ``add_help=False`` and ``conflict_hander='resolve'``
plot : bool, optional, keyword only
Whether to produce plots, or not.
verbose : int, optional, keyword only
Script logging verbosity.
Returns
-------
parser: |ArgumentParser|
The parser with arguments:
- plot
- verbose
..
RST SUBSTITUTIONS
.. |ArgumentParser| replace:: `~argparse.ArgumentParser`
"""
parser = argparse.ArgumentParser(
description="",
add_help=~inheritable,
conflict_handler="resolve" if ~inheritable else "error",
)
# plot or not
parser.add_argument("--verbose", action="store", default=_PLOT, type=bool)
# script verbosity
parser.add_argument("-v", "--verbose", action="store", default=0, type=int)
return parser
# /def
# ------------------------------------------------------------------------
def main(
args: T.Union[list, str, None] = None,
opts: T.Optional[argparse.Namespace] = None,
):
"""Script Function.
Parameters
----------
args : list or str or None, optional
an optional single argument that holds the sys.argv list,
except for the script name (e.g., argv[1:])
opts : `~argparse.Namespace`| or None, optional
pre-constructed results of parsed args
if not None, used ONLY if args is None
"""
if opts is not None and args is None:
pass
else:
if opts is not None:
warnings.warn("Not using `opts` because `args` are given")
if isinstance(args, str):
args = args.split()
parser = make_parser()
opts = parser.parse_args(args)
# /if
# /def
# ------------------------------------------------------------------------
if __name__ == "__main__":
# call script
main(args=None, opts=None) # all arguments except script name
# /if
##############################################################################
# END
| true
|
227a79a67c2b1f2416b7996f856b317cf414e4fd
|
Python
|
wgf5544/wugaofeng
|
/python/matplotlib_learning.py
|
UTF-8
| 3,261
| 3.375
| 3
|
[
"Apache-2.0"
] |
permissive
|
__author__ = 'wgf'
__date__ = ' 下午11:54'
'''
量化交易系统中,绘图是数据可视化最直接的方法,也是直观分析数据必不可少的步骤。
Matplotlib是Python中专门用于数据可视化操作的第三方库,也是最流行的会图库。
两种绘图方式:函数式绘图和对象式绘图。
'''
# 函数式绘图
'''
MATLAB是数据绘图领域广泛使用的语言和工具,调用函数命令可以轻松绘图。、
Matplotlib是受NATLAB的启发而构建,设计了一套完全仿照MATLAB函数形式的绘图API。
'''
import matplotlib.pyplot as plt # 导入Matplotlib库中的pyplot模块,该模块集合了类似MATLAB的绘图API
import numpy as np
import matplotlib
plt.rcParams['font.sans-serif'] = ['Arial Unicode MS'] # 用来正常显示中文标签 mac下可正常显示中文
# plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
y_value = np.random.randn(200)
x_value = np.arange(200)
ylim_min = y_value.min()-1
ylim_max = y_value.max()+1
yticks_min = y_value.min()+0.5
yticks_max = y_value.max()-0.5
ylim_setp = (yticks_max - yticks_min)/2.1
# xlim(min,max)和ylim(min,max)函数分别设置X轴和Y轴的刻度范围
plt.xlim(0,len(x_value))
plt.ylim(ylim_min,ylim_max)
# xticks(location,labels)和yticks(location,labels)函数分别设定X轴和Y轴的坐标标签。location为浮点数或整数组成的列表,
# 表示坐标轴上坐标的位置。labels为location等长的字符串列表,表示坐标的显示标签。
# Rotation参数可旋转调节坐标标签,当坐标密集时可避免标签重叠。
plt.xticks(np.arange(0, len(x_value), 20),
['2015-02-01', '2015-03-01', '2015-04-02', '2015-05-02',
'2015-06-02', '2015-07-02', '2015-08-02', '2015-09-02',
'2015-10-02', '2015-11-02'],rotation=45)
plt.yticks(np.arange(yticks_min, yticks_max, ylim_setp), [u'上限预警值', u'标准值', u'下限预警值'])
#注释(4):title()函数添加标题,参数loc可调整标题显示的位置,分别为center、left、right
plt.title(u"函数式编程")#注释(4)
#注释(5):xlabel()和ylabel()函数添加X轴、Y轴的显示标签
plt.xlabel(u"日期")#注释(5)
plt.ylabel(u"数值")#注释(5)
#注释(6):grid(b=None, which='major', axis='both', **kwargs)函数增加并设定图形背景,便于更直观地读取线条中点的坐标取值及线条整体分布范围。参数b设定是否显示grid;参数which设定坐标轴分割线类型;参数axis制定绘制grid的坐标轴。
plt.grid(True)#注释(6)
#注释(7):legend()函数增加图例显示,当多条曲线显示在同一张图中时,便于识别不同的曲线。参数loc用于设定图例在图中的显示位置,包括best(最适宜位置)、upper right(右上角)等。注:在绘制图形时需设定label,label值即为图例显示的文本内容。
plt.legend(loc='best')#注释(7)
#注释(8):plot()函数用于绘制线条,linestyle参数设定线条类型,color参数指定线条的颜色,market参数设置数据点的形状,linewidth参数设定线条的宽度
plt.plot(x_value,y_value,label=u"随机误差",ls='-',c='r',lw=1) #注释(8)
plt.show()
| true
|
1a8e5e3682f8514ea2ae16d6adb424e7754e13ae
|
Python
|
Jnewgeek/handson-ml
|
/tackle_titanic.py
|
UTF-8
| 8,047
| 2.765625
| 3
|
[
"Apache-2.0"
] |
permissive
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 9 11:19:39 2019
@author: Administrator
# Tackle The Titanic datasets
"""
import os
os.chdir(os.getcwd())
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc("axes",labelsize=14)
mpl.rc("xtick",labelsize=12)
mpl.rc("ytick",labelsize=12)
plt.rcParams["font.sans-serif"]=["SimHei"]
plt.rcParams["axes.unicode_minus"]=False
import seaborn as sns
sns.set(font="SimHei")
chapter_id="titanic"
def save_fig(fig_id,tight_layout=True):
path=os.path.join(".","images",chapter_id,fig_id+".png")
if tight_layout:
plt.tight_layout()
plt.savefig(path,format="png",dpi=300)
####################################### load data ###########################################
TITANIC_PATH = os.path.join("datasets", "titanic")
import pandas as pd
import time
def load_titanic_data(filename, titanic_path=TITANIC_PATH):
csv_path = os.path.join(titanic_path, filename)
return pd.read_csv(csv_path)
print(">> Starting loading data...")
time1=time.time()
train_data = load_titanic_data("train.csv")
test_data = load_titanic_data("test.csv")
time2=time.time()
print("finished! use time %.2fs."%(time2-time1))
#train_data.head()
#train_data.info()
#train_data.describe()
#train_data["Survived"].value_counts()
################################ Prepare the data ####################################
from sklearn.base import BaseEstimator, TransformerMixin
# A class to select numerical or categorical columns
# since Scikit-Learn doesn't handle DataFrames yet
def get_preprocess_pipeline(num_columns=["Age", "SibSp", "Parch", "Fare"],
cat_columns=["Pclass", "Sex", "Embarked"]):
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attribute_names]
from sklearn.pipeline import Pipeline
try:
from sklearn.impute import SimpleImputer # Scikit-Learn 0.20+
except ImportError:
from sklearn.preprocessing import Imputer as SimpleImputer
# 数值型数据取中位数填补缺失值
#num_columns=["Age", "SibSp", "Parch", "Fare"]
num_pipeline = Pipeline([
("select_numeric", DataFrameSelector(num_columns)),
("imputer", SimpleImputer(strategy="median")),
])
#num_pipeline.fit_transform(train_data)
# 字符型数据取众数填补缺失值
class MostFrequentImputer(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
self.most_frequent_ = pd.Series([X[c].value_counts().index[0] for c in X],
index=X.columns)
return self
def transform(self, X, y=None):
return X.fillna(self.most_frequent_)
try:
from sklearn.preprocessing import OrdinalEncoder # just to raise an ImportError if Scikit-Learn < 0.20
from sklearn.preprocessing import OneHotEncoder
except ImportError:
from future_encoders import OneHotEncoder # Scikit-Learn < 0.20
cat_pipeline = Pipeline([
("select_cat", DataFrameSelector(cat_columns)),
("imputer", MostFrequentImputer()),
("cat_encoder", OneHotEncoder(sparse=False)),
])
#cat_pipeline.fit_transform(train_data)
# 合并特征
from sklearn.pipeline import FeatureUnion
preprocess_pipeline = FeatureUnion(transformer_list=[
("num_pipeline", num_pipeline),
("cat_pipeline", cat_pipeline),
])
return preprocess_pipeline
# prepared data finally
preprocess_pipeline=get_preprocess_pipeline()
X_train = preprocess_pipeline.fit_transform(train_data)
y_train = train_data["Survived"]
################################## Train model ######################################
def select_model(model_name="SVC",X_train=X_train,y_train=y_train):
print(">> %s model...\n"%model_name+"-"*40)
time.sleep(0.5)
time1=time.time()
if model_name=="SVC":
# SVC
from sklearn.svm import SVC
model = SVC(gamma="auto")
#model.fit(X_train, y_train)
elif model_name=="RF":
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=100, random_state=42)
else:
return None
# cross_val_score
from sklearn.model_selection import cross_val_score
model_scores = cross_val_score(model, X_train, y_train, cv=10)
time2=time.time()
print("finished! use time %.2fs,%s mean score:"%(time2-time1,model_name),model_scores.mean())
# test check
# X_test = preprocess_pipeline.transform(test_data)
# y_pred = svm_clf.predict(X_test)
return model,model_scores
svm_clf,svm_scores=select_model()
forest_clf,forest_scores=select_model("RF")
def plot_modelScores():
plt.figure(figsize=(8, 4))
plt.plot([1]*10, svm_scores, ".")
plt.plot([2]*10, forest_scores, ".")
plt.boxplot([svm_scores, forest_scores], labels=("SVM","Random Forest"))
plt.ylabel("Accuracy", fontsize=14)
#plot_modelScores()
#################### add more feature
train_data["AgeBucket"] = train_data["Age"] // 15 * 15
#train_data[["AgeBucket", "Survived"]].groupby(['AgeBucket']).mean()
train_data["RelativesOnboard"] = train_data["SibSp"] + train_data["Parch"]
#train_data[["RelativesOnboard", "Survived"]].groupby(['RelativesOnboard']).mean()
# new pipeline
preprocess_pipeline=get_preprocess_pipeline(num_columns=["AgeBucket", "RelativesOnboard", "Fare"])
X_train = preprocess_pipeline.fit_transform(train_data)
y_train = train_data["Survived"]
# new models
svm_clf,svm_scores=select_model("SVC",X_train,y_train)
forest_clf,forest_scores=select_model("RF",X_train,y_train)
plot_modelScores()
# Grid
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import randint
time1=time.time()
param_distribs = {
'n_estimators': randint(low=1, high=200),
'max_features': randint(low=1, high=8),
}
forest_reg = RandomForestClassifier(random_state=42)
rnd_search = RandomizedSearchCV(forest_reg, param_distributions=param_distribs,
n_iter=10, cv=5, scoring='accuracy', random_state=42,
verbose=5,n_jobs=-1)
rnd_search.fit(X_train, y_train)
time2=time.time()
print("\n>> Grid Search sucessfully,use time %.2fs\n"%(time2-time1))
final_model=rnd_search.best_estimator_
# 预测值
test_data["AgeBucket"] = test_data["Age"] // 15 * 15
#train_data[["AgeBucket", "Survived"]].groupby(['AgeBucket']).mean()
test_data["RelativesOnboard"] = test_data["SibSp"] + test_data["Parch"]
X_test_prepared = preprocess_pipeline.transform(test_data)
final_predictions = final_model.predict(X_test_prepared)
submission=load_titanic_data("gender_submission.csv")
# 混淆矩阵
from sklearn.metrics import confusion_matrix
true_survive=submission["Survived"].values
print("混淆矩阵:\n",confusion_matrix(true_survive,final_predictions))
from sklearn.metrics import precision_score, recall_score,f1_score
print("精确度:",precision_score(true_survive,final_predictions))
print("召回率:",recall_score(true_survive,final_predictions))
print("F1分数:",f1_score(true_survive,final_predictions))
# ROC
from sklearn.metrics import roc_curve
fpr,tpr,thresholds=roc_curve(true_survive,final_predictions)
#
def plot_roc_curve(fpr,tpr,label=None):
plt.plot(fpr,tpr,linewidth=2,label=label)
plt.plot([0,1],[0,1],'k--')
plt.axis([0,1,0,1])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.figure(figsize=(8, 6))
plot_roc_curve(fpr, tpr)
from sklearn.metrics import roc_auc_score
print("ROC值:",roc_auc_score(true_survive,final_predictions))
submission["Survived"]=final_predictions
submission.to_csv("./datasets/titanic/gender_submission_new.csv",index=False,encoding="utf-8")
| true
|
d517cf84b3fb6b346397b26174491b3c0c7995b0
|
Python
|
closcruz/wallbreakers-code
|
/week1/reverseWords.py
|
UTF-8
| 305
| 3.875
| 4
|
[] |
no_license
|
# Reverse words in a string while preserving spaces and word order
class ReverseWords:
def reverseWords(self, s):
reversedSentence = " ".join(list(map(lambda x: x[::-1], s.split())))
return reversedSentence
t1 = ReverseWords().reverseWords("Let's take LeetCode contest")
print(t1)
| true
|
1c13d15c727f9939858e8072c32de5bc5f8ea044
|
Python
|
ipunk007/Blog_TaufikSutanto
|
/TSutantoSMA.py
|
UTF-8
| 9,517
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 10 11:25:43 2018
MIT License with Acknowledgement
@author: Taufik Sutanto
Simple Social Media Analytics ver 0.11.1
https://taufiksutanto.blogspot.com/2018/01/easiest-social-media-analytics.html
"""
from pattern.web import Twitter, URL
from nltk.tokenize import TweetTokenizer; Tokenizer = TweetTokenizer(reduce_len=True)
from tqdm import tqdm
from wordcloud import WordCloud
from sklearn.feature_extraction.text import CountVectorizer
from textblob import TextBlob
from Sastrawi.StopWordRemover.StopWordRemoverFactory import StopWordRemoverFactory
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
from bs4 import BeautifulSoup as bs
from sklearn.decomposition import LatentDirichletAllocation as LDA
import re, networkx as nx, matplotlib.pyplot as plt, operator, numpy as np,community
def crawl(topic, N=100, Nbatch = 25):
t = Twitter() # language='en','id'
M = N//Nbatch #integer
i, Tweets, keepCrawling = None, [], True
for j in tqdm(range(M)):
if keepCrawling:
for tweet in t.search(topic, start=i, count=Nbatch):
try:
Tweets.append(tweet)
i = tweet.id
except:
print("Twitter Limit reached")
keepCrawling = False # Second Break (outer loop)
break
else:
break
print('Making sure we get the full tweets, please wait ...')
for i, tweet in enumerate(tqdm(Tweets)):
try:
webPage = URL(tweet.url).download()
soup = bs(webPage,'html.parser')
full_tweet = soup.find_all('p',class_='TweetTextSize')[0] #modify this to get all replies
full_tweet = bs(str(full_tweet),'html.parser').text
Tweets[i]['fullTxt'] = full_tweet
except:
Tweets[i]['fullTxt'] = tweet.txt
print('Done!... Total terdapat {0} tweet'.format(len(Tweets)))
return Tweets
def strip_non_ascii(string,symbols):
''' Returns the string without non ASCII characters''' #isascii = lambda s: len(s) == len(s.encode())
stripped = (c for c in string if 0 < ord(c) < 127 and c not in symbols)
return ''.join(stripped)
def cleanTweets(Tweets):
factory = StopWordRemoverFactory(); stopwords = set(factory.get_stop_words()+['rt','pic','com','yg','ga'])
factory = StemmerFactory(); stemmer = factory.create_stemmer()
for i,tweet in enumerate(tqdm(Tweets)):
txt = tweet['fullTxt'] # if you want to ignore retweets ==> if not re.match(r'^RT.*', txt):
txt = txt.lower() # Lowercase
txt = re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+','',txt)# clean urls
txt = Tokenizer.tokenize(txt)
symbols = set(['@']) # Add more if you want
txt = [strip_non_ascii(t,symbols) for t in txt] #remove all non ASCII characters
txt = ' '.join([t for t in txt if len(t)>1])
Tweets[i]['cleanTxt'] = txt # this is not a good Python practice, only for learning.
txt = stemmer.stem(txt).split()
Tweets[i]['nlp'] = ' '.join([t for t in txt if t not in stopwords])
return Tweets
def translate(txt,language='en'): # txt is a TextBlob object
try:
return txt.translate(to=language)
except:
return txt
def sentiment(Tweets): #need a clean tweets
print("Calculating Sentiment and Subjectivity Score: ... ")
T = [translate(TextBlob(tweet['cleanTxt'])) for tweet in tqdm(Tweets)]
Sen = [tweet.sentiment.polarity for tweet in tqdm(T)]
Sub = [float(tweet.sentiment.subjectivity) for tweet in tqdm(T)]
Se, Su = [], []
for score_se, score_su in zip(Sen,Sub):
if score_se>0.1:
Se.append('pos')
elif score_se<-0.05: #I prefer this
Se.append('neg')
else:
Se.append('net')
if score_su>0.5:
Su.append('Subjektif')
else:
Su.append('Objektif')
label_se = ['Positif','Negatif', 'Netral']
score_se = [len([True for t in Se if t=='pos']),len([True for t in Se if t=='neg']),len([True for t in Se if t=='net'])]
label_su = ['Subjektif','Objektif']
score_su = [len([True for t in Su if t=='Subjektif']),len([True for t in Su if t=='Objektif'])]
PieChart(score_se,label_se); PieChart(score_su,label_su)
Sen = [(s,t['fullTxt']) for s,t in zip(Sen,Tweets)]
Sen.sort(key=lambda tup: tup[0])
Sub = [(s,t['fullTxt']) for s,t in zip(Sub,Tweets)]
Sub.sort(key=lambda tup: tup[0])
return (Sen, Sub)
def printSA(SA, N = 2, emo = 'positif'):
Sen, Sub = SA
e = emo.lower().strip()
if e=='positif' or e=='positive':
tweets = Sen[-N:]
elif e=='negatif' or e=='negative':
tweets = Sen[:N]
elif e=='netral' or e=='neutral':
net = [(abs(score),t) for score,t in Sen if abs(score)<0.01]
net.sort(key=lambda tup: tup[0])
tweets = net[:N]
elif e=='subjektif' or e=='subjective':
tweets = Sub[-N:]
elif e=='objektif' or e=='objective':
tweets = Sub[:N]
else:
print('Wrong function input parameter = "{0}"'.format(emo)); tweets=[]
print('"{0}" Tweets = '.format(emo))
for t in tweets:
print(t)
def wordClouds(Tweets):
txt = [t['nlp'] for t in Tweets]; txt = ' '.join(txt)
wc = WordCloud(background_color="white")
wordcloud = wc.generate(txt)
plt.figure(num=1, facecolor='w', edgecolor='k')
plt.imshow(wordcloud, cmap=plt.cm.jet, interpolation='nearest', aspect='auto'); plt.xticks(()); plt.yticks(())
plt.show()
def PieChart(score,labels):
fig1 = plt.figure(); fig1.add_subplot(111)
plt.pie(score, labels=labels, autopct='%1.1f%%', startangle=140)
plt.axis('equal');plt.show()
return None
def drawGraph(G, Label = False):
fig3 = plt.figure(); fig3.add_subplot(111)
pos = nx.spring_layout(G)
nx.draw_networkx_nodes(G,pos, alpha=0.2,node_color='blue',node_size=600)
if Label:
nx.draw_networkx_labels(G,pos)
nx.draw_networkx_edges(G,pos,width=4); plt.show()
def Graph(Tweets, Label = True): # Need the Tweets Before cleaning
print("Please wait, building Graph .... ")
G=nx.Graph()
for tweet in tqdm(Tweets):
G.add_node(tweet.author)
mentionS = re.findall("@([a-zA-Z0-9]{1,15})", tweet['fullTxt'])
for mention in mentionS:
if "." not in mention: #skipping emails
usr = mention.replace("@",'').strip()
G.add_node(usr); G.add_edge(tweet.author,usr)
Nn=G.number_of_nodes();Ne=G.number_of_edges()
print('Finished. There are %d nodes and %d edges in the Graph.' %(Nn,Ne))
if Label:
drawGraph(G, Label = True)
else:
drawGraph(G)
return G
def Centrality(G, N=10):
phi = 1.618033988749895 # largest eigenvalue of adj matrix
ranking = nx.katz_centrality_numpy(G,1/phi)
important_nodes = sorted(ranking.items(), key=operator.itemgetter(1))[::-1]#[0:Nimportant]
Mstd = 1 # 1 standard Deviation CI
data = np.array([n[1] for n in important_nodes])
out = len(data[abs(data - np.mean(data)) > Mstd * np.std(data)]) # outlier within m stDev interval
if out>N:
dnodes = [n[0] for n in important_nodes[:N]]
print('Influencial Users: {0}'.format(str(dnodes)))
else:
dnodes = [n[0] for n in important_nodes[:out]]
print('Influencial Users: {0}'.format(str(important_nodes[:out])))
Gt = G.subgraph(dnodes)
drawGraph(Gt, Label = True)
return Gt
def Community(G):
part = community.best_partition(G)
values = [part.get(node) for node in G.nodes()]
mod, k = community.modularity(part,G), len(set(part.values()))
print("Number of Communities = %d\nNetwork modularity = %.2f" %(k,mod)) # https://en.wikipedia.org/wiki/Modularity_%28networks%29
fig2 = plt.figure(); fig2.add_subplot(111)
nx.draw_shell(G, cmap = plt.get_cmap('gist_ncar'), node_color = values, node_size=30, with_labels=False)
plt.show
return values
def print_Topics(model, feature_names, Top_Topics, n_top_words):
for topic_idx, topic in enumerate(model.components_[:Top_Topics]):
print("Topic #%d:" %(topic_idx+1))
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
def getTopics(Tweets,n_topics=5, Top_Words=7):
Txt = [t['nlp'] for t in Tweets] # cleaned: stopwords, stemming
tf_vectorizer = CountVectorizer(strip_accents = 'unicode', token_pattern = r'\b[a-zA-Z]{3,}\b', max_df = 0.95, min_df = 2)
dtm_tf = tf_vectorizer.fit_transform(Txt)
tf_terms = tf_vectorizer.get_feature_names()
lda_tf = LDA(n_components=n_topics, learning_method='online', random_state=0).fit(dtm_tf)
vsm_topics = lda_tf.transform(dtm_tf); doc_topic = [a.argmax()+1 for a in tqdm(vsm_topics)] # topic of docs
print('In total there are {0} major topics, distributed as follows'.format(len(set(doc_topic))))
fig4 = plt.figure(); fig4.add_subplot(111)
plt.hist(np.array(doc_topic), alpha=0.5); plt.show()
print('Printing top {0} Topics, with top {1} Words:'.format(n_topics, Top_Words))
print_Topics(lda_tf, tf_terms, n_topics, Top_Words)
return lda_tf, dtm_tf, tf_vectorizer
| true
|
ae5f9bbbe93ec33df57dfab1b8914b7e6d9f69a7
|
Python
|
floor66/fsr
|
/main.py
|
UTF-8
| 26,687
| 2.515625
| 3
|
[] |
no_license
|
"""
main.py
Created by Floris P.J. den Hartog, 2018
Main file for the GUI / processing of Force Sensitive Resistor data
Used in conjunction with Arduino for analog-digital conversion
"""
import matplotlib
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.ticker import FuncFormatter
import matplotlib.pyplot as plt
import tkinter as Tk
import time, serial, calculations, logger
from utils import millis, timerunning, touch
class FSR:
def __init__(self):
self.__start__ = time.time()
####### User defined variables ##############################################################
self.INIT_TIMEOUT = 5 # The amount of seconds to wait for Arduino to initialize
self.NUM_ANALOG = 6 # 6 max possible analog pins
self.MEASURE_FRQ = 10 # Measurement frequency (Hz)
#############################################################################################
# Misc. variable setup, don't touch
self.recordings = 0
self.curr_rec_count = 0
self.logger = logger.logger("logs/log_%i.txt" % self.__start__, self.__start__)
self.recording = False
self.OPT_RAW = 0
self.OPT_VOLTAGE = 1
self.OPT_RESISTANCE = 2
self.OPT_CONDUCTANCE = 3
self.OPT_VOLTAGE_AVG = 4
self.OPT_RESISTANCE_AVG = 5
self.OPT_CONDUCTANCE_AVG = 6
self.SHOW_PINS = [] # Linked to checkbuttons
self.REC_PINS = [] # Linked to checkbuttons
self.logger.log("Logging started @ %s (GMT)" % time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()))
self.init_gui()
self.reset_vars()
# At-a-glance status label
def status(self, txt):
self.status_lbl.configure(text="%s" % txt)
# Update the GUI
def update_gui(self):
self.root.update_idletasks()
self.root.update()
# Appending to data file
def save_data(self, data):
try:
file = open(self.SAVE_FILE, "a")
file.write(data)
file.close()
except Exception as e:
self.logger.log("Error saving data %s" % e)
# Reset variables for plotting
def reset_vars(self):
self.calc = calculations.calculations(self.Vcc.get(), self.pulldown.get())
self.times = []
self.annotations = []
self.resistor_data_raw = []
self.resistor_data = []
for i in range(0, self.NUM_ANALOG):
self.times.append([])
self.resistor_data_raw.append([]) # Raw sensor readouts, these are used for calculations
self.resistor_data.append([]) # Processed sensor readouts (voltage, resistance, etc.), these are drawn
self.plot_lines[i].set_data([], [])
def check_rec_pins(self):
if self.recording:
if len(self.REC_PINS) > 0:
self.logger.log("Recording from pin%s A%s" % ("s" if len(self.REC_PINS) > 1 else "", ", A".join(str(pin) for pin in self.REC_PINS)))
self.status("Recording #%i active...\nSaving: A%s" % (self.recordings, ", A".join(str(pin) for pin in self.REC_PINS)))
else:
self.logger.log("Warning: no data is being saved! Please check 'Save data' for the pin(s) you wish to record.")
self.status("Recording #%i active...\nWarning: no data is being saved!" % self.recordings)
def rec_stop(self):
self.recording = False
self.logger.log("Stopping recording, saved %i measurements" % self.curr_rec_count)
self.reset_vars()
try:
self.ser.close() # Close the serial connection
except AttributeError:
pass # Occurs when the serial connection was never established
except Exception as e:
self.logger.log(e)
self.rec_stop_btn.configure(state="disabled")
self.rec_start_btn.configure(state="normal")
self.root.focus() # Remove focus from the start button, could cause problems when trying to annotate
self.status("Recording stopped")
def rec_start(self):
self.recording = True
self.status("Initiating connection")
self.rec_start_btn.configure(state="disabled")
self.rec_stop_btn.configure(state="normal")
self.root.focus() # Remove focus from the start button, could cause problems when trying to annotate
# Check if we can initiate the serial communication
if self.init_serial():
self.status("Connection initiated (COM port: %s)" % self.COM_PORT)
self.recordings += 1
self.SAVE_FILE = "sensordata/data_%i_%i.txt" % (self.__start__, self.recordings)
self.ANNOTATION_FILE = "sensordata/annotations_%i_%i.txt" % (self.__start__, self.recordings)
# Generate new, empty data files
touch(self.SAVE_FILE)
touch(self.ANNOTATION_FILE)
self.logger.log("Arduino initialized, starting recording #%i of this session" % self.recordings)
self.logger.log("Currently recording to file: %s" % self.SAVE_FILE)
self.save_data("; Recording @ %i Hz, Baud rate %i\n" % (self.MEASURE_FRQ, self.BAUD_RATE.get()))
self.save_data("; Vcc = %.02f V, pulldown = %i Ohm\n" % (self.Vcc.get(), self.pulldown.get()))
self.save_data("; Key: time (ms), pin (A0-5), readout (0-1023)\n")
self.check_rec_pins()
self.__rec_start__ = time.time()
self.record()
else:
self.recording = False
self.rec_start_btn.configure(state="normal")
self.rec_stop_btn.configure(state="disabled")
self.status("Connection failed")
self.logger.log("Connection failed")
def quit_gui(self):
if Tk.messagebox.askokcancel("Quit", "Do you want to quit?"):
self.root.quit()
self.root.destroy()
self.logger.log("GUI exit")
def toggle_sensor_display(self):
for i in range(0, self.NUM_ANALOG):
changed = False
state = self.sensor_display_vars[i].get()
if state == 1:
if not i in self.SHOW_PINS:
self.SHOW_PINS.append(i)
changed = True
elif state == 0:
if i in self.SHOW_PINS:
self.SHOW_PINS.pop(self.SHOW_PINS.index(i))
changed = True
if changed:
self.logger.log("Reset display data for Pin A%i" % i)
self.times[i] = []
self.resistor_data_raw[i] = []
self.resistor_data[i] = []
self.plot_lines[i].set_data([], [])
def toggle_sensor_record(self):
for i in range(0, self.NUM_ANALOG):
changed = False
state = self.sensor_record_vars[i].get()
if state == 1:
if not i in self.REC_PINS:
self.REC_PINS.append(i)
changed = True
elif state == 0:
if i in self.REC_PINS:
self.REC_PINS.pop(self.REC_PINS.index(i))
changed = True
if changed:
self.check_rec_pins()
def y_unit_change(self, val):
try:
i = self.y_unit_opts.index(val)
except ValueError:
val = self.y_unit_opts[0]
self.data_plot.set_ylabel(val)
self.reset_vars()
def add_annotation(self, e):
check = sum([len(s) for s in self.times])
if check == 0:
self.logger.log("Can't add an annotation if no data is being shown")
return
t = self.times[self.SHOW_PINS[0]][-1]
msg = Tk.simpledialog.askstring("Add annotation", "Message (optional):", parent=self.root)
if msg is not None:
ln = self.data_plot.axvline(x=t, color="#000000", linewidth=2)
txt = self.data_plot.text(t, 0, " %s" % msg, fontsize=16)
self.annotations.append((t, msg, ln, txt))
data = "%s,%s\n" % (t, msg)
try:
file = open(self.ANNOTATION_FILE, "a")
file.write(data)
file.close()
except Exception as e:
self.logger.log("Error saving data %s" % e)
def init_gui(self):
# Initialize Tk, create layout elements
self.root = Tk.Tk()
self.root.wm_title("Sensor Data (%i)" % self.__start__)
self.root.protocol("WM_DELETE_WINDOW", self.quit_gui)
# Required to make the plot resize with the window, row0 col1 (= the plot) gets the "weight"
self.root.rowconfigure(0, weight=1)
self.root.columnconfigure(1, weight=1)
# So that we lose Entry focus on clicking anywhere
self.root.bind_all("<1>", lambda event:event.widget.focus_set())
# For adding timestamps
self.root.bind("<space>", self.add_annotation)
self.panel_left = Tk.Frame(master=self.root)
self.panel_right = Tk.Frame(master=self.root)
self.canvas_container = Tk.Frame(master=self.root)
# Left panel
# Status label+frame
self.status_frame = Tk.LabelFrame(master=self.panel_left, text="Status")
self.status_lbl = Tk.Label(master=self.status_frame)
self.status_lbl.pack()
self.status("Disconnected")
# Start/stop buttons+frame
self.controls_frame = Tk.LabelFrame(master=self.panel_left, text="Controls", pady=10)
self.rec_start_btn = Tk.Button(master=self.controls_frame, text="Start Recording", command=self.rec_start)
self.rec_stop_btn = Tk.Button(master=self.controls_frame, text="Stop Recording", command=self.rec_stop)
self.rec_stop_btn.configure(state="disabled")
# Graph refresh scale
self.REFRESH_MS = Tk.IntVar()
self.REFRESH_MS.set(500)
self.refresh_entry = Tk.Scale(master=self.controls_frame, length=150, from_=1, to=1000, resolution=25, label="Graph refreshrate (ms)", orient=Tk.HORIZONTAL, variable=self.REFRESH_MS)
# The amount of data points to show on screen
self.POP_CUTOFF = Tk.IntVar()
self.POP_CUTOFF.set(1000)
self.cutoff_entry = Tk.Scale(master=self.controls_frame, length=150, from_=100, to=2500, resolution=100, label="Datapoints to show", orient=Tk.HORIZONTAL, variable=self.POP_CUTOFF)
# Y-axis unit selection
self.y_unit = Tk.StringVar()
self.y_unit_opts = ["Raw value (0-1023)", "Voltage (mV)", "Resistance (Ohm)", "Conductance (uS)", \
"Avg. voltage (mV)", "Avg. resistance (Ohm)", "Avg. conductance (uS)"]
self.y_unit.set(self.y_unit_opts[self.OPT_RAW])
self.unit_select_label = Tk.Label(master=self.controls_frame, text="Y-axis unit:")
self.unit_select_opts = Tk.OptionMenu(self.controls_frame, self.y_unit, *self.y_unit_opts, command=self.y_unit_change)
# Y-axis scaling
self.Y_RANGE_LOW = Tk.IntVar()
self.Y_RANGE_HIGH = Tk.IntVar()
self.Y_RANGE_LOW.set("")
self.Y_RANGE_HIGH.set("")
self.scaling_label = Tk.Label(master=self.controls_frame, text="Y-axis scale:")
self.y_low_label = Tk.Label(master=self.controls_frame, text="Minimum")
self.y_high_label = Tk.Label(master=self.controls_frame, text="Maximum")
self.y_low_entry = Tk.Entry(master=self.controls_frame, textvariable=self.Y_RANGE_LOW, width=6)
self.y_high_entry = Tk.Entry(master=self.controls_frame, textvariable=self.Y_RANGE_HIGH, width=6)
self.scaling_label_under = Tk.Label(master=self.controls_frame, text="(Empty = auto-scaling)")
# Misc. settings
self.settings_frame = Tk.LabelFrame(master=self.panel_left, text="Misc. settings", pady=10)
self.COM_PORT = Tk.StringVar()
self.COM_PORT.set("COM4")
self.com_label = Tk.Label(master=self.settings_frame, text="COM port:")
self.com_entry = Tk.Entry(master=self.settings_frame, textvariable=self.COM_PORT, width=8)
self.BAUD_RATE = Tk.IntVar()
self.BAUD_RATE.set(128000)
self.baud_label = Tk.Label(master=self.settings_frame, text="Baud rate:")
self.baud_entry = Tk.Entry(master=self.settings_frame, textvariable=self.BAUD_RATE, width=8)
self.Vcc = Tk.DoubleVar()
self.Vcc.set(5.06)
self.Vcc_label = Tk.Label(master=self.settings_frame, text="Vcc:")
self.Vcc_entry = Tk.Entry(master=self.settings_frame, textvariable=self.Vcc, width=8)
self.pulldown = Tk.IntVar()
self.pulldown.set(10000)
self.pulldown_label = Tk.Label(master=self.settings_frame, text="Pulldown:")
self.pulldown_entry = Tk.Entry(master=self.settings_frame, textvariable=self.pulldown, width=8)
# Setup the grid within panel_left
self.rec_start_btn.grid(row=0, column=0, columnspan=2)
self.rec_stop_btn.grid(row=1, column=0, columnspan=2)
self.refresh_entry.grid(row=3, column=0, pady=10, columnspan=2)
self.cutoff_entry.grid(row=4, column=0, columnspan=2)
self.scaling_label.grid(row=5, column=0, pady=(10, 0), columnspan=2)
self.y_low_label.grid(row=6, column=0)
self.y_low_entry.grid(row=6, column=1)
self.y_high_label.grid(row=7, column=0)
self.y_high_entry.grid(row=7, column=1)
self.scaling_label_under.grid(row=8, column=0, columnspan=2)
self.unit_select_label.grid(row=9, column=0, columnspan=2, pady=(10, 0))
self.unit_select_opts.grid(row=10, column=0, columnspan=2)
self.com_label.grid(row=0, column=0)
self.com_entry.grid(row=0, column=1)
self.baud_label.grid(row=1, column=0)
self.baud_entry.grid(row=1, column=1)
self.Vcc_label.grid(row=2, column=0)
self.Vcc_entry.grid(row=2, column=1)
self.pulldown_label.grid(row=3, column=0)
self.pulldown_entry.grid(row=3, column=1)
self.status_frame.grid(row=0, column=0, sticky="nsew")
self.controls_frame.grid(row=1, column=0, sticky="nsew", pady=(10,0))
self.settings_frame.grid(row=2, column=0, sticky="nsew", pady=10)
self.panel_left.grid(row=0, column=0, sticky="nw", padx=10, pady=10)
# Quit button
self.quit_btn = Tk.Button(master=self.root, text="Quit", command=self.quit_gui)
self.quit_btn.grid(row=0, column=0, sticky="s", pady=5)
# Init matplotlib graph at this point
self.init_mpl()
# Right panel
# Display selection frame
self.sensor_select_frame = Tk.LabelFrame(master=self.panel_right, padx=5, text="Sensor selection")
self.sensor_select_labels = [Tk.Label(master=self.sensor_select_frame, text="Pin A%i:" % i) for i in range(0, self.NUM_ANALOG)]
self.sensor_record_boxes = []
self.sensor_display_boxes = []
self.sensor_record_vars = [Tk.IntVar() for i in range(0, self.NUM_ANALOG)]
self.sensor_display_vars = [Tk.IntVar() for i in range(0, self.NUM_ANALOG)]
j = 0
for i in range(0, self.NUM_ANALOG):
self.sensor_select_labels[i].grid(row=j, column=0)
self.sensor_display_boxes.append(Tk.Checkbutton(master=self.sensor_select_frame, text="Display in graph", \
command=self.toggle_sensor_display, variable=self.sensor_display_vars[i]))
self.sensor_record_boxes.append(Tk.Checkbutton(master=self.sensor_select_frame, text="Save data", \
command=self.toggle_sensor_record, variable=self.sensor_record_vars[i]))
self.sensor_display_boxes[i].grid(row=j, column=1, sticky="w")
self.sensor_record_boxes[i].grid(row=(j+1), column=1, sticky="w", pady=(0, (5 if i < (self.NUM_ANALOG - 1) else 0)))
j += 2
self.sensor_select_frame.grid(row=0, column=0, padx=10, pady=10, sticky="nsew")
# Sensor readouts frame
self.sensor_readout_frame = Tk.LabelFrame(master=self.panel_right, padx=5, text="Live readouts")
# Create 1 label per pin
self.sensor_readouts = [Tk.Label(master=self.sensor_readout_frame, text=("Pin A%i: 0 mV / 0.00 N" % i)) for i in range(0, self.NUM_ANALOG)]
for i in range(0, self.NUM_ANALOG):
self.sensor_readouts[i].pack(side=Tk.TOP, anchor="w")
self.sensor_readout_frame.grid(row=1, column=0, sticky="nsew", padx=10, pady=(0, 10))
# Apply grid to right panel
self.panel_right.grid(row=0, column=2, sticky="n")
# Instantiate Tk window for the first time
self.update_gui()
def init_mpl(self):
# Initialize matplotlib
self.plot_lines = []
self.cols = ["b-", "r-", "g-", "b-", "m-", "c-"]
self.fig = plt.figure()
self.data_plot = self.fig.add_subplot(111)
self.data_plot.set_autoscale_on(True)
self.data_plot.set_title("Sensor Data\n")
self.data_plot.set_ylabel(self.y_unit.get())
self.data_plot.set_xlabel("Time")
# Instantiate a line in the graph for every pin we could potentially read
for i in range(0, self.NUM_ANALOG):
tmp, = self.data_plot.plot([], [], self.cols[i])
self.plot_lines.append(tmp)
self.canvas = FigureCanvasTkAgg(self.fig, master=self.canvas_container)
self.canvas.draw()
self.canvas.get_tk_widget().pack(fill=Tk.BOTH, expand=1)
self.canvas_container.grid(row=0, column=1, sticky="nesw")
def init_serial(self):
self.can_start = False # To wait for Arduino to give the go-ahead
# Wait for serial connection
timer = millis()
while True:
self.update_gui()
if not self.recording:
return False
try:
self.ser = serial.Serial(self.COM_PORT.get(), self.BAUD_RATE.get())
break
except serial.SerialException as e:
if (millis() - timer) >= 1000: # Give an error every second
self.status("Connect Arduino to USB!")
self.logger.log("Connect Arduino to USB!")
timer = millis()
# Wait for the go-ahead from Arduino
timer = millis()
while True:
self.update_gui()
if not self.recording:
return False
try:
data_in = self.ser.readline()
except Exception as e:
self.logger.log(e)
if len(data_in) > 0:
try:
data_in = data_in.decode().rstrip()
if data_in == "INIT_COMPLETE":
self.can_start = True
return True
except Exception as e:
self.logger.log(e)
if (millis() - timer) >= (self.INIT_TIMEOUT * 1000):
self.logger.log("Arduino failed to initialize after %i sec" % self.INIT_TIMEOUT)
return False
# Main loop
def record(self):
if not self.can_start:
return False
self.draw_timer = millis()
while self.recording:
self.update_gui()
try:
data_in = self.ser.readline()
except serial.serialutil.SerialException as e:
self.logger.log("Reading from the serial port failed: %s" % e)
finally:
if not self.recording:
return
# Check the received data
if len(data_in) > 1:
data_in = data_in.decode()
unpack = data_in.rstrip().split(",")
if len(unpack) == 3: # We expect 3 variables. No more, no less
try:
timestamp = int(unpack[0])
pin = int(unpack[1])
res_val = int(unpack[2])
except ValueError:
self.logger.log("Faulty serial communication: %s" % ",".join(unpack))
continue
if pin in self.REC_PINS:
self.curr_rec_count += 1
self.save_data(data_in) # Save the data to file
# Display readout in the proper label
self.sensor_readouts[pin].config(text="Pin A%i: %i mV / %.02f N" % (pin, self.calc.val_to_volt(res_val) * 1000, self.calc.val_to_N(res_val)))
if not pin in self.SHOW_PINS: # Skip the pins we don't want/need to read
continue
self.times[pin].append(timestamp)
self.resistor_data_raw[pin].append(res_val)
# Here we can interject and do calculations based on which y-axis unit we want to see
opt = self.y_unit_opts.index(self.y_unit.get())
if opt == self.OPT_RAW:
self.resistor_data[pin].append(res_val)
elif opt == self.OPT_VOLTAGE:
a = self.calc.val_to_volt(res_val) * 1000
self.resistor_data[pin].append(a)
elif opt == self.OPT_RESISTANCE:
a = self.calc.volt_to_Rfsr(self.calc.val_to_volt(res_val))
self.resistor_data[pin].append(a)
elif opt == self.OPT_CONDUCTANCE:
a = 10**6 / self.calc.volt_to_Rfsr(self.calc.val_to_volt(res_val)) if res_val > 0 else 0
self.resistor_data[pin].append(a)
elif opt == self.OPT_VOLTAGE_AVG:
a = sum([self.calc.val_to_volt(v) * 1000 for v in self.resistor_data_raw[pin]]) / len(self.resistor_data_raw[pin]) if len(self.resistor_data_raw[pin]) > 0 else 0
self.resistor_data[pin].append(a)
elif opt == self.OPT_RESISTANCE_AVG:
a = sum([self.calc.volt_to_Rfsr(self.calc.val_to_volt(v)) for v in self.resistor_data_raw[pin]]) / len(self.resistor_data_raw[pin]) \
if len(self.resistor_data_raw[pin]) > 0 else 0
self.resistor_data[pin].append(a)
elif opt == self.OPT_CONDUCTANCE_AVG:
a = sum([10**6 / self.calc.volt_to_Rfsr(self.calc.val_to_volt(v)) if v > 0 else 0 for v in self.resistor_data_raw[pin]]) / len(self.resistor_data_raw[pin]) \
if len(self.resistor_data_raw[pin]) > 0 else 0
self.resistor_data[pin].append(a)
self.plot_lines[pin].set_data(self.times[pin], self.resistor_data[pin])
if len(self.times[pin]) > self.POP_CUTOFF.get():
self.times[pin] = self.times[pin][-self.POP_CUTOFF.get():]
self.resistor_data_raw[pin] = self.resistor_data_raw[pin][-self.POP_CUTOFF.get():]
self.resistor_data[pin] = self.resistor_data[pin][-self.POP_CUTOFF.get():]
self.draw()
# Adjust scale of axes according to data/entries
def do_auto_scale(self):
# Required to properly scale axes
self.data_plot.relim()
self.data_plot.autoscale_view(True, True, True)
try:
low_entry = int(self.Y_RANGE_LOW.get())
except Exception as e:
low_entry = None
try:
high_entry = int(self.Y_RANGE_HIGH.get())
except Exception as e:
high_entry = None
low_data = None
high_data = None
for i in range(0, self.NUM_ANALOG):
try:
min_ = min(self.resistor_data[i])
max_ = max(self.resistor_data[i])
if (low_data is None) or (min_ < low_data):
low_data = min_
if (high_data is None) or (max_ > high_data):
high_data = max_
except ValueError:
pass
except Exception:
raise
if low_entry is not None:
if high_entry is not None:
self.data_plot.set_ylim(low_entry, high_entry)
else:
self.data_plot.set_ylim(low_entry, high_data + ((high_data if high_data > 0 else 1) * 0.05))
else:
if high_entry is not None:
self.data_plot.set_ylim(low_data - ((low_data if low_data > 0 else 1) * 0.05), high_entry)
else:
self.data_plot.set_ylim(low_data - ((low_data if low_data > 0 else 1) * 0.05), \
high_data + ((high_data if high_data > 0 else 1) * 0.05))
def draw(self):
# Draw when it's time to draw!
if (millis() - self.draw_timer) >= self.REFRESH_MS.get():
self.draw_timer = millis()
# Remove annotations that are no longer in the current time window
for i in range(0, len(self.annotations)):
try:
t, msg, ln, txt = self.annotations[i]
if (t <= self.data_plot.get_xlim()[0]):
ln.remove()
del ln
txt.remove()
del txt
del self.annotations[i]
except IndexError:
break
self.data_plot.set_title("Sensor data\nRecording: %s\n" % timerunning(time.time() - self.__rec_start__))
self.data_plot.xaxis.set_major_formatter(FuncFormatter(lambda x, pos: timerunning(x / 1000)))
self.do_auto_scale()
# Speeds up drawing tremendously
self.data_plot.draw_artist(self.data_plot.patch)
for i in range(0, self.NUM_ANALOG):
if i in self.SHOW_PINS:
self.data_plot.draw_artist(self.plot_lines[i])
self.fig.canvas.draw_idle()
self.fig.canvas.flush_events()
if __name__ == "__main__":
try:
fsr = FSR()
except (KeyboardInterrupt, SystemExit): # Doesn't function yet
fsr.quit_gui()
raise
except Exception as e:
fsr.log(e)
| true
|
a122fcc01f2961735c8e4e03ef17e86b257be884
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03400/s119273461.py
|
UTF-8
| 93
| 2.71875
| 3
|
[] |
no_license
|
n, d, x, *a = map(int, open(0).read().split())
for i in a:
x += 1 + (d - 1) // i
print(x)
| true
|
60ddf5ffce56a17c0f2782f58a55b48493dbdc44
|
Python
|
TeodorStefanPintea/Sentiment-mining-of-the-bioinformatics-literature
|
/AnaliseSentiment/sentiment_AnalyseSentiment.py
|
UTF-8
| 1,596
| 3.21875
| 3
|
[] |
no_license
|
'''
This method uses VADER to measure the sentiment score.
The thresholds can be changed.
'''
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
class AnalyseSentiment:
def __init__(self):
pass
def Analyse(self, sentence):
# parameter can be a paragraph or a sentence
sentiment = {
"sentence or paragraph":sentence,
"overall_sentiment":"",
"overall_sentiment_score":0.00,
"scores":[]
}
sid_obj = SentimentIntensityAnalyzer()
sentiment_dict = sid_obj.polarity_scores(sentence)
if sentiment_dict['compound'] >= 0.50:
sentiment["overall_sentiment"] = "Positive"
sentiment["overall_sentiment_score"] = sentiment_dict['compound']
sentiment["scores"].append({"positive":sentiment_dict['pos'], "negative":sentiment_dict['neg'],"neutral":sentiment_dict['neu']})
elif sentiment_dict['compound'] <= - 0.50:
sentiment["overall_sentiment"] = "Negative"
sentiment["overall_sentiment_score"] = sentiment_dict['compound']
sentiment["scores"].append({"positive":sentiment_dict['pos'], "negative":sentiment_dict['neg'],"neutral":sentiment_dict['neu']})
else:
sentiment["overall_sentiment"] = "Neutral"
sentiment["overall_sentiment_score"] = sentiment_dict['compound']
sentiment["scores"].append({"positive":sentiment_dict['pos'], "negative":sentiment_dict['neg'],"neutral":sentiment_dict['neu']})
return sentiment['overall_sentiment_score']
| true
|
8eded6328c694c8272b32c29794189523581a585
|
Python
|
one-last-time/python
|
/NLTk/TF-IDF.py
|
UTF-8
| 3,086
| 3.109375
| 3
|
[] |
no_license
|
import nltk
import re
import nltk
import heapq
import numpy as np
paragraph="""Thank you all so very much. Thank you to the Academy. Thank you to all of you in this room. I have to congratulate the
other incredible nominees this year. The Revenant was the product of the tireless efforts of an unbelievable cast and crew. First off,
to my brother in this endeavor, Mr. Tom Hardy. Tom, your talent on screen can only be surpassed by your friendship off screen … thank you
for creating a transcendent cinematic experience. Thank you to everybody at Fox and New Regency … my entire team. I have to thank everyone
from the very onset of my career … To my parents; none of this would be possible without you. And to my friends, I love you dearly; you know
who you are.
And lastly, I just want to say this: Making The Revenant was about man's relationship to the natural world.
A world that we collectively felt in 2015 as the hottest year in recorded history. Our production needed to move
to the southern tip of this planet just to be able to find snow. Climate change is real, it is happening right now. It is
the most urgent threat facing our entire species, and we need to work collectively together and stop procrastinating. We need
to support leaders around the world who do not speak for the big polluters, but who speak for all of humanity, for the indigenous people
of the world, for the billions and billions of underprivileged people out there who would be most affected by this. For our children’s
children, and for those people out there whose voices have been drowned out by the politics of greed. I thank you all for this amazing award
tonight. Let us not take this planet for granted. I do not take tonight for granted. Thank you so very much. """
sentences = nltk.sent_tokenize(paragraph)
wordCount={}
for i in range(len(sentences)):
sentences[i]=sentences[i].lower()
sentences[i]=re.sub(r'\W',' ',sentences[i])
sentences[i]=re.sub(r'\s+',' ',sentences[i])
words = nltk.word_tokenize(sentences[i])
for word in words:
if word not in wordCount.keys():
wordCount[word]=1
else:
wordCount[word]+=1
frequent_words=heapq.nlargest(100,wordCount,key=wordCount.get)
#IDF
idf={}
for word in frequent_words:
doc_count=0
total= len(sentences)
for sentence in sentences:
if word in nltk.word_tokenize(sentence):
doc_count+=1
#print(word,' doc count =',doc_count,' word count=',wordCount[word])
idf[word]=np.log((total/doc_count)+1)
#print(idf)
#tf
tf={}
for word in frequent_words:
vc=[]
for sentence in sentences:
count=0
for w in nltk.word_tokenize(sentence):
if w==word:
count+=1
vc.append(count/len(nltk.word_tokenize(sentence)))
tf[word]=vc
#print(tf)
#tf-idf
tf_idf=[]
for w in tf.keys():
vc=[]
for val in tf[w]:
vc.append(val*idf[w])
tf_idf.append(vc)
#print(tf_idf)
tfidf_matrix=np.asarray(tf_idf)
tfidf_matrix=np.transpose(tfidf_matrix)
print(tfidf_matrix)
| true
|
f66cdd81d52308315933415bedc1a6b45df342ca
|
Python
|
swift-fox/ml-demos
|
/ml-fundementals/pytorch_mnist_validation.py
|
UTF-8
| 858
| 2.515625
| 3
|
[] |
no_license
|
import torch, torchvision
from torch import nn, optim
from torchvision import datasets, transforms
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
validate_set = datasets.MNIST('data', download=True, train=False, transform=transform)
validate_loader = torch.utils.data.DataLoader(validate_set, shuffle=True)
model = torch.load('mnist.pt')
all_count = 0
correct_count = 0
for image, label in validate_loader:
image = image.view(1, 784)
with torch.no_grad():
pred = model(image)
pred = list(torch.exp(pred).numpy()[0])
pred_label = pred.index(max(pred))
true_label = label.item()
if true_label == pred_label:
correct_count += 1
all_count += 1
print("all = {}, correct = {}, {}%".format(all_count, correct_count, float(correct_count) / all_count * 100))
| true
|
54678f6f657f8dad5aa6f6e8eca6e06805338758
|
Python
|
leeanna96/Python
|
/Chap03/숫자 맞추기 게임_도전문제.py
|
UTF-8
| 315
| 3.796875
| 4
|
[] |
no_license
|
answer=5
print("숫자 게임에 오신 것을 환영합니다")
while True:
n=int(input("숫자를 맞춰보세요: "))
if n==answer:
print("사용자가 이겼습니다.")
break
elif n>answer:
print("너무 큼")
else:
print("너무 작음")
print("게임 종료")
| true
|
977c8ba9d54dfd5808726fc3bc7fdbf645c3fc16
|
Python
|
nricklin/leafpy
|
/leafpy/leaf.py
|
UTF-8
| 1,606
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
from .auth import login
import requests
BASE_URL = 'https://gdcportalgw.its-mo.com/api_v200413_NE/gdc/'
class Leaf(object):
"""Make requests to the Nissan Connect API to get Leaf Info"""
custom_sessionid = None
VIN = None
region_code = None
def __init__(self, username=None, password=None, custom_sessionid=None, VIN=None, region_code='NNA'):
self.region_code = region_code
if username and password:
self.custom_sessionid, self.VIN = login(username, password, self.region_code)
elif custom_sessionid and VIN:
self.custom_sessionid = custom_sessionid
self.VIN = VIN
else:
raise Exception('Need either username & password or custom_sessionid & VIN.')
def __getattr__(self, name):
"""
Top secret magic. Calling Leaf.<some_function_name>() hits <some_function_name>.php
"""
if name.startswith('__'):
raise AttributeError(name)
def call(**kwargs):
url = BASE_URL + name + '.php'
data = {
"RegionCode": self.region_code,
"custom_sessionid": self.custom_sessionid,
"VIN": self.VIN
}
for k in kwargs:
data[k] = kwargs[k]
r = requests.post(url, data=data)
r.raise_for_status()
if not r.json()['status'] == 200:
raise Exception('Error making request. Perhaps the session has expired.')
return r.json()
return call
| true
|
9e1fc4ab3006a73cfbb96ea5292d214d1c4b2b93
|
Python
|
RavingSmurfGB/MuteOnMuteOff
|
/setup.py
|
UTF-8
| 7,579
| 2.5625
| 3
|
[] |
no_license
|
import os, shutil, pathlib, ctypes, time, sys, glob, subprocess, stat
current_file_path = pathlib.Path(__file__).parent.absolute() #This will get the current file path but will not update if you move the setup.py, move the setup.py last
print(current_file_path)
#-1. Relaunch program as admin if not: Done (with error)
#0. Install pip requirements!!! Done
#1. Move files to setup at launch: Done
#2. Move files to start menu: Done
#3. Move all files to program files in permanent location Done
#4. Launch program Not started
#*. Perhaps work on gui showing what is happening
#*. Recreate the shortcuts under programfiles...
#*. If already installed perhaps delete and reinstall
reinstall = False
#-1.#////////////////////////////////Admin Check///////////////////////////////
#Is ran to determine if the program was started with admin rights, if so continues, if not uac prompt
###DOES NOT WORK..................................................................................................................................
def is_admin():
try:
return ctypes.windll.shell32.IsUserAnAdmin()
except:
return False
if is_admin():
# Code of your program here
print("Setup already initialised with Administrator rights")
else:
# Re-run the program with admin rights
print("Setup was not started with Administrator rights, restarting...")
ctypes.windll.shell32.ShellExecuteW(None, "runas", sys.executable, " ".join(sys.argv), None, 1)
print("\n")
#0.#////////////////////////////////Installing Requirements///////////////////////////////
print("Installing requirments")
subprocess.call('cmd /c "pip install -r Requirtements.txt"') ## doesnt wooooooooooooooooooooooooooooooooooooooooooooooooooooooork!
import progressbar
#1.#////////////////////////////////Setting launch at Startup///////////////////////////////
print("Setting program to start on boot")
#Get's current username
username = os.getlogin()
dst_launch_startup_path = ("C:\\Users\\" + username + "\\AppData\\Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup") #Creates the path to startup, including the current user.
src_launch_startup_path = current_file_path.joinpath("support_files\\startup") #Adds support_files\startup to the current file path
check_dst = dst_launch_startup_path + "\\MuteOnMuteOff.lnk" #Creates a full file path to startup file, to check if it exists already
def startup_copy(): # Defines fucntion to copy seutp file, used later in logic
file_names = pathlib.Path.iterdir(src_launch_startup_path)
try:
for file_name in file_names:
shutil.copy(pathlib.PurePath.joinpath(src_launch_startup_path, file_name), dst_launch_startup_path)
except:
print("Warning: Something went wrong during moving startup file... \n" + "Double check that file exists under \n" + check_dst)
if pathlib.Path(check_dst).is_file() == False:
# If there isnt a file in starup then:
print("Moving file to startup")
startup_copy()
elif pathlib.Path(check_dst).is_file() == True:
#If there is a file in startup then:
if reinstall == False:
print("ERROR: Startup file already exsists under : \n" + " " + check_dst + "\n Please select reinstall from the menu if you would like to continue")
if reinstall == True:
#insert code to delete file here
print("not yet implemented")
#///////////////////////////////
print("\n")
#2.#////////////////////////////////Adding to start menu///////////////////////////////
dst_launch_startup_path = ("C:\\ProgramData\\Microsoft\\Windows\\Start Menu\\Programs") #Creates the path to startup, including the current user.
src_launch_startup_path = current_file_path.joinpath("support_files\\start_menu") #Adds support_files\startup to the current file path
check_dst = dst_launch_startup_path + "\\MuteOnMuteOff.lnk" #Creates a full file path to startup file, to check if it exists already
def start_menu_copy():
try:
file_names = pathlib.Path.iterdir(src_launch_startup_path)
for file_name in file_names:
shutil.copy(pathlib.PurePath.joinpath(src_launch_startup_path, file_name), dst_launch_startup_path)
except:
print("Warning: Something went wrong during moving start menu file... \n" + "Double check that file exists under \n" + check_dst)
if pathlib.Path(check_dst).is_file() == False:
print("Moving file to start_menu")
start_menu_copy()
elif pathlib.Path(check_dst).is_file() == True:
#If there is a file in startup then:
if reinstall == False:
print("ERROR: Start Menu file already exsists under : \n" + " " + check_dst + "\n Please select reinstall from the menu if you would like to continue")
if reinstall == True:
#insert code to delete file here
print("not yet implemented")
#///////////////////////////////
print("\n")
#3. ////////////////////////////////Moving Main Files///////////////////////////////
maindir = "C:\\Py_Ormolu"
projectname = "\\MuteOnMuteOff"
target_dir = maindir + projectname
if pathlib.Path(maindir).is_dir() == False: # We check if our main directory is in place ("This is used for multiple projects")
pathlib.Path(maindir).mkdir() # if does not exist create it!!
source_dir = current_file_path
def on_rm_error(func, path, exc_info):
#from: https://stackoverflow.com/questions/4829043/how-to-remove-read-only-attrib-directory-with-python-in-windows
os.chmod(path, stat.S_IWRITE)
os.unlink(path)
def move_main_files():
source_dir = current_file_path
try:
for i in os.listdir(source_dir):
if i.endswith('.git'):
tmp = os.path.join(source_dir, i)
# We want to unhide the .git folder before unlinking it.
while True:
subprocess.call(['attrib', '-H', tmp])
break
shutil.rmtree(tmp, onerror=on_rm_error)
source_dir = current_file_path
file_names = os.listdir(source_dir)
for file_name in file_names:
if pathlib.Path(target_dir).is_dir() == False:
pathlib.Path(target_dir).mkdir()
shutil.move(os.path.join(source_dir, file_name), target_dir)
except:
print("Warning: Something went wrong during moving main files... \n" + "Double check that files exists under \n" + target_dir)
if pathlib.Path(target_dir).is_dir() == False:
print("Moving main files")
move_main_files()
elif pathlib.Path(target_dir).is_dir() == True:
#If there is a file in startup then:
if reinstall == False:
print("ERROR: Main files already exsists under : \n" + " " + target_dir + "\n Please select reinstall from the menu if you would like to continue")
if reinstall == True:
#insert code to delete file here
print("not yet implemented")
#///////////////////////////////
#3. ////////////////////////////////Moving Main Files///////////////////////////////
print("Starting Program")
start_script = current_file_path.joinpath("support_files\\relaunch.vbs") #Adds the relaunch script to the current directory path
subprocess.call("cmd /c " + str(start_script)) #str() is needed to convert the windows_path to a string for subproccess
input("Press Enter to continue...") # Makes the user hit enter to conitnue
exit()
#///////////////////////////////
| true
|
23ae85c8f83c6c26eb082460b08f31356b243895
|
Python
|
wansang93/Algorithm
|
/SW Expert Academy/Python/Python D3/10505. 소득 불균형.py
|
UTF-8
| 257
| 3.671875
| 4
|
[] |
no_license
|
T = int(input())
for t in range(1, T+1):
N = int(input())
income = list(map(int, input().split()))
average = sum(income) / N
answer = 0
for i in income:
if i <= average:
answer += 1
print(f'#{t} {answer}')
| true
|
6c34300f4eb44654b15ef2399901e0437ee808dc
|
Python
|
Free0xFF/DbtLock
|
/Redlock-python/lock_utility.py
|
UTF-8
| 2,006
| 2.828125
| 3
|
[
"Apache-2.0"
] |
permissive
|
'''
@author: yongmao.gui
'''
from Redlock import Redlock,Lock
import logging
key = None
redis_connection = ["redis://localhost:6379/0"]
'''
get lock
'''
def lock(name, validity, retry_count=3, retry_delay=500, **kwargs):
global key
if retry_count < 0:
retry_count = 0
is_blocking = True # unlimited retry times
else:
is_blocking = False # limited retry times
while True:
try:
dlm = Redlock(redis_connection, retry_count=retry_count+1, retry_delay=retry_delay/1000.0)
lock = dlm.lock(name, validity)
if lock is False:
logging.info("Obtain lock failed!")
err = 1
else:
logging.info("Obtain lock successfully!")
key = lock.key.decode()
logging.info("lock.key: {}".format(key))
return 0
except Exception as ex:
logging.error("Error occurred while obtain lock: %s" % str(ex))
err = 3
if is_blocking:
continue
else:
return err
'''
release lock
'''
def unlock(name):
global key
try:
dlm = Redlock(redis_connection)
lock = Lock(0, name, key)
dlm.unlock(lock)
except Exception as err:
logging.error("Error occurred while release lock: %s" % str(err))
return 3
return 0
if __name__ == '__main__':
#simulate clients
import redis
server = redis.StrictRedis.from_url(redis_connection[0])
def incr(name):
v = server.get(name)
if v is None:
v = 1
else:
v = int(v.decode()) + 1
server.set(name, v)
for i in range(50000):
retcode = lock("test_dbt_lock", 3000)
print("lock:retcode="+str(retcode))
incr("key")
retcode = unlock("test_dbt_lock")
print("unlock:retcode="+str(retcode))
| true
|
f093ec7a51346f267692a8b322cf6e40a136cd79
|
Python
|
eecheve/Gaussian-2-Blender
|
/gui/IonRegion.py
|
UTF-8
| 4,439
| 2.78125
| 3
|
[
"Apache-2.0"
] |
permissive
|
import tkinter as tk
import CreateTooltip
tooltip = CreateTooltip.CreateTooltip
import SelectedIon
class IonRegion(object):
"""Section of the app that receives information about possible ions present"""
def __init__(self, parent):
self.ionCount = 0
self.lst_ions = []
self.var_ionNames = tk.StringVar()
self.int_hasIons = tk.IntVar()
self.int_unitCell = tk.IntVar()
self.frm_ions = tk.LabelFrame(master=parent,
padx=5,
text="Ion information",
fg="blue",
relief=tk.GROOVE,
borderwidth=2)
self.frm_ions.grid(row=2, column=0, padx=2, pady=2, sticky="W", rowspan=2)
self.canvas = tk.Canvas(self.frm_ions)
self.frm_inside = tk.Frame(self.canvas)
self.scrl_frame = tk.Scrollbar(master=self.frm_ions,
orient="vertical",
command=self.canvas.yview)
self.canvas.configure(yscrollcommand=self.scrl_frame.set)
self.scrl_frame.pack(side="right",fill="y")
self.canvas.pack(side="left")
self.canvas.create_window((0,0),
window=self.frm_inside,
anchor='nw')
self.frm_inside.bind("<Configure>", self.canvasConfig)
self.chk_hasIons = tk.Checkbutton(master=self.frm_inside,
text="check for ionic radii",
variable=self.int_hasIons,
command=self.activator)
self.ttp_hasIons = tooltip(self.chk_hasIons,
"Check if some elements radii are ionic radii instead of covalent radii")
self.btn_addIon = tk.Button(text="add",
master=self.frm_inside,
command=self.addIon,
state=tk.DISABLED)
self.ttp_addIon = tooltip(self.btn_addIon,
"Click here to add another ion to specify")
self.btn_removeIon = tk.Button(master=self.frm_inside,
text="remove",
command=self.removeIon,
state=tk.DISABLED)
self.ttp_removeIon = tooltip(self.btn_removeIon,
"Click here to remove the last added ion")
self.chk_unitCell = tk.Checkbutton(master=self.frm_inside,
text="unit cell boundaries",
variable=self.int_unitCell,
state=tk.DISABLED)
self.ttp_hasIons = tooltip(self.chk_unitCell,
"Check to replace dashed bonds with solid lines")
self.chk_hasIons.grid(row=0, column=0)
self.chk_unitCell.grid(row=0, column=1)
self.btn_addIon.grid(row=1, column=0)
self.btn_removeIon.grid(row=1, column=1)
def addIon(self):
ion = SelectedIon.SelectedIon(self.frm_inside, self.ionCount + 2, 0)
self.lst_ions.append(ion)
self.ionCount += 1
def removeIon(self):
last_element = self.lst_ions.pop()
last_element.delete()
if self.ionCount > 0:
self.ionCount -= 1
def removeAllIons(self):
for ion in self.lst_ions:
ion.delete()
self.lst_ions.clear()
self.ionCount = 0
def activator(self):
if self.btn_addIon['state'] == tk.DISABLED:
self.btn_addIon['state'] = tk.NORMAL
self.btn_removeIon['state'] = tk.NORMAL
self.chk_unitCell['state'] = tk.NORMAL
print("##### ACTIVATING IONS INFORMATION INPUT ####")
else:
self.btn_addIon['state'] = tk.DISABLED
self.btn_removeIon['state'] = tk.DISABLED
self.chk_unitCell['state'] = tk.DISABLED
self.removeAllIons()
print("#### DEACTIVATING ION INFORMATION INPUT ####")
def canvasConfig(self, event):
self.canvas.configure(scrollregion=self.canvas.bbox("all"),
width=325, height=125)
| true
|
ea1e046d3ca712c6f9fc74adc0c9f3bcd7fe8019
|
Python
|
tms1337/tensorflow-tutorial
|
/intro.py
|
UTF-8
| 1,848
| 2.859375
| 3
|
[] |
no_license
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
import numpy as np
def main():
node1 = tf.constant(3.0, tf.float32)
node2 = tf.constant(4.0)
print(node1, node2)
sess = tf.Session()
print(sess.run([node1, node2]))
print(sess.run(node1))
node3 = tf.add(node1, node2)
print(sess.run(node3))
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
print(a, b)
c = a + b
print(sess.run(c, {a: 3, b: 4.5}))
W = tf.Variable([-3.0], tf.float32)
b = tf.Variable([-0.4], tf.float32)
x = tf.placeholder(tf.float32)
linear_model = W * x + b
init = tf.global_variables_initializer()
sess.run(init)
result = sess.run(linear_model, {x: 2.0})
print(result)
result = sess.run(linear_model, {x: [1, 2, 3, 4]})
print(result)
y = tf.placeholder(tf.float32)
deltas_sq = tf.square(y - linear_model)
loss = tf.reduce_sum(deltas_sq)
fix_W = tf.assign(W, [1.0])
fix_b = tf.assign(b, [-1.0])
sess.run([fix_W, fix_b])
io = {x: [1, 2, 3, 4], y: [1, 2, 3, 4]}
result = sess.run(loss, io)
print(result)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train = optimizer.minimize(loss)
print(sess.run([W, b]))
for i in range(10):
sess.run(train, io)
print(sess.run([W, b]))
def contrib():
features = [tf.contrib.layers.real_valued_column("x", dimension=1)]
estimator = tf.contrib.learn.LinearRegressor(feature_columns=features)
x = np.array([1., 2., 3., 4.])
y = np.array([0., -1., -2., -3.])
input_fn = tf.contrib.learn.io.numpy_input_fn({"x": x}, y, batch_size=4, num_epochs=1000)
estimator.fit(input_fn=input_fn, steps=1000)
print(estimator.evaluate(input_fn=input_fn))
if __name__ == "__main__":
contrib()
| true
|
a96dcbc864d6ce6e61ae7c71fbfa40f54e832992
|
Python
|
BetulCengiz/pythonProject
|
/Harmonik Toplam.py
|
UTF-8
| 292
| 3.453125
| 3
|
[] |
no_license
|
def harmonik_toplam(n):
if n == 1:
return 1
else:
return 1/n + harmonik_toplam(n-1)
def harmoniktoplamiteratif(n):
toplam = 0
for i in range (1, n+1):
toplam = toplam + 1
return toplam
print(harmonik_toplam(4))
print(harmoniktoplamiteratif(4))
| true
|
086ddb804bb6c3b866c839d126614f1effb8745f
|
Python
|
Zt-1021/ztpython
|
/study/mogugu/unit/testHomePageSuite.py
|
UTF-8
| 1,017
| 2.515625
| 3
|
[] |
no_license
|
"""初学测试集"""
import unittest
from study.mogugu.unit_test.home_page_test import homepage
import HTMLTestRunner
# 加载用例
suite = unittest.TestSuite()
# 通过对象加载用例
# suite.addTest(testMathMethod.TestMathMethodAdd('test_two_zero')) # TestMathMethodAdd('test_two_zero')
loader = unittest.TestLoader()
# 通过类加载用例
# suite = loader.loadTestsFromTestCase(testMathMethod.TestMathMethodAdd) # TestMathMethodAdd
# 通过模块加载
suite = loader.loadTestsFromModule(homepage)
# 执行报告
# with open('math.txt', 'w', encoding='utf-8') as file:
with open('math.html', 'wb') as file:
# 执行用例
# runner = unittest.TextTestRunner(stream=file, descriptions=True, verbosity=2)
runner = HTMLTestRunner.HTMLTestRunner(stream=file,
verbosity=2,
title='测试mogugu首页',
description="第一份测试报告")
runner.run(suite)
| true
|
f7760f34e905e3fbcab992e2f1cbdae4503c2a75
|
Python
|
entn-at/rnnt-speech-recognition
|
/debug/get_common_voice_stats.py
|
UTF-8
| 1,401
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
from argparse import ArgumentParser
from scipy.io.wavfile import read as read_wav
import glob
import os
def main(args):
max_length = 0
min_length = 0
total_length = 0
count = 0
with open(os.path.join(args.data_dir, args.split + '.tsv'), 'r') as f:
next(f)
for line in f:
line_split = line.split('\t')
audio_fn = line_split[1]
filepath = os.path.join(args.data_dir, 'clips', audio_fn[:-4] + '.wav')
sr, data = read_wav(filepath)
length = len(data) / sr
if length > max_length:
max_length = length
if length < min_length or min_length == 0:
min_length = length
total_length += length
count += 1
avg_length = total_length / count
print('Total: {:.4f} s'.format(total_length))
print('Min length: {:.4f} s'.format(min_length))
print('Max length: {:.4f} s'.format(max_length))
print('Average length: {:.4f} s'.format(avg_length))
def parse_args():
ap = ArgumentParser()
ap.add_argument('-d', '--data_dir', required=True, type=str,
help='Directory of common voice dataset.')
ap.add_argument('-s', '--split', type=str, default='train',
help='Split to get statistics for.')
return ap.parse_args()
if __name__ == '__main__':
args = parse_args()
main(args)
| true
|
15f4841840d1e636d9ab8c0f059f183e16dc13d4
|
Python
|
mikochou/leetcode_record
|
/FlattenBinaryTreetoLinkedList.py
|
UTF-8
| 556
| 3.1875
| 3
|
[] |
no_license
|
class Solution(object):
def flatten(self, root):
"""
:type root: TreeNode
:rtype: None Do not return anything, modify root in-place instead.
"""
s = []
while root and (root.left or root.right or s):
if root.right:
s.append(root.right)
root.right = None
if root.left:
root.right, root.left = root.left, None
root = root.right
elif s:
root.right = s.pop()
root = root.right
| true
|
db9789a91a878710c9ec91db541bee65b28396f0
|
Python
|
piazentin/programming-challenges
|
/hacker-rank/implementation/absolute_permutation.py
|
UTF-8
| 541
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
# https://www.hackerrank.com/challenges/absolute-permutation
def absolute_permutation(n, k):
if not k:
return ' '.join(str(i) for i in range(1, n + 1))
elif n % (k * 2) != 0:
return -1
else:
cicle = [k + i for i in range(1, k + 1)] + [i for i in range(1, k + 1)]
ans = [2 * k * m + i for m in range(n // (k * 2)) for i in cicle]
return ' '.join(str(i) for i in ans)
T = int(input())
for _ in range(T):
n, k = [int(i) for i in input().split()]
print(absolute_permutation(n, k))
| true
|
46094f7f22b056b7487a4f9c73e71703dce3e313
|
Python
|
tarsioonofrio/PySDDP
|
/PySDDP/dessem/script/bateria.py
|
UTF-8
| 3,367
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
from PySDDP.dessem.script.templates.bateria import BateriaTemplate
import pandas as pd
from typing import IO
import os
COMENTARIO = '&'
class Bateria(BateriaTemplate):
"""
Classe que contem todos os elementos comuns a qualquer versao do arquivo Bateria do Dessem.
Esta classe tem como intuito fornecer duck typing para a classe Dessem e ainda adicionar um nivel de especificacao
dentro da fabrica. Alem disso esta classe deve passar adiante a responsabilidade da implementacao dos metodos de
leitura e escrita
"""
def __init__(self):
super().__init__()
self.cad = dict()
self.cad['mneumo'] = list()
self.cad['num'] = list()
self.cad['nome'] = list()
self.cad['capac'] = list()
self.cad['carreg'] = list()
self.cad['descarreg'] = list()
self.cad['eficiencia'] = list()
self.cad['barra'] = list()
self.cad['subm'] = list()
self.inic = dict()
self.inic['mneumo'] = list()
self.inic['num'] = list()
self.inic['carreg'] = list()
self.comentarios = list()
def ler(self, file_name: str) -> None:
try:
with open(file_name, 'r', encoding='latin-1') as f: # type: IO[str]
continua = True
while continua:
self.next_line(f)
linha = self.linha.strip()
if linha[0] == COMENTARIO:
self.comentarios.append(linha)
continue
if linha[0:17] == 'ARMAZENAMENTO-CAD':
self.cad['mneumo'].append(linha[0:17])
self.cad['num'].append(linha[18:22])
self.cad['nome'].append(linha[23:35])
self.cad['capac'].append(linha[36:46])
self.cad['carreg'].append(linha[47:57])
self.cad['descarreg'].append(linha[58:68])
self.cad['eficiencia'].append(linha[69:79])
self.cad['barra'].append(linha[80:85])
self.cad['subm'].append(linha[86:89])
continue
if linha[0:18] == 'ARMAZENAMENTO-INIC':
self.inic['mneumo'].append(linha[0:18])
self.inic['num'].append(linha[19:23])
self.inic['carreg'].append(linha[24:34])
continue
except Exception as err:
if isinstance(err, StopIteration):
self.bloco_inic['df'] = pd.DataFrame(self.inic)
self.bloco_cad['df'] = pd.DataFrame(self.cad)
print("OK! Leitura do", os.path.split(file_name)[1], "realizada com sucesso.")
else:
raise
def escrever(self, file_out: str) -> None:
try:
with open(file_out, 'w', encoding='latin-1') as f: # type: IO[str]
for idx, value in self.bloco_cad['df'].iterrows():
linha = self.bloco_cad['formato'].format(**value)
f.write(linha)
for idx, value in self.bloco_inic['df'].iterrows():
linha = self.bloco_inic['formato'].format(**value)
f.write(linha)
except Exception:
raise
| true
|
63494e10404b75704f19d9381eac743d3ec1bd2f
|
Python
|
mac389/phikal
|
/src/old/calculate-effect-matrix.py
|
UTF-8
| 1,037
| 2.703125
| 3
|
[] |
no_license
|
import json
import numpy as np
from progress.bar import Bar
from awesome_print import ap
#create effect matrix
db = json.load(open('../data/db.json','rb'))
effects = open('../data/master-class-list','rb').read().splitlines()
taxonomy = json.load(open('../data/drug-taxonomy.json','rb'))
def process(effect,entry):
bar.next()
ap('Looking for whether %s in'%effect)
ap([effect for drug in entry["drugs"]
for effect in taxonomy[drug]["effects"]])
ap(1 if effect in [effect for drug in entry["drugs"]
for effect in taxonomy[drug]["effects"]] else -1 )
return 1 if effect in [effect for drug in entry["drugs"]
for effect in taxonomy[drug]["effects"]] else -1
bar = Bar("Filling occurence matrix",max =len(db)*len(effects))
m = np.array([[process(effect,entry)
for entry in db.itervalues()]
for effect in effects],dtype=int)
bar.finish()
np.savetxt('../data/effect-matrix.tsv', m, fmt='%d',delimiter='\t')
np.savetxt('../data/effect-correlation-matrix.tsv', m.dot(m.T), fmt='%d',delimiter='\t')
| true
|
6ec1532cf6d78c660f368ce694d15a9895d6b743
|
Python
|
woosikyang/NLP
|
/torchtext_tutorial.py
|
UTF-8
| 1,971
| 3.015625
| 3
|
[] |
no_license
|
'''
Creating Dataset
'''
import torch
import torchtext.data as data
import pandas as pd
import os
import pickle
with open('data/data1.txt', 'rb') as f:
train = pickle.load(f)
train = train.iloc[:,:-1]
train = train[['business_goal','class_code']]
train.columns = ['text','label']
with open('data/data2.txt', 'rb') as f:
test = pickle.load(f)
test = test.iloc[-5000:,:-1]
test = test[['business_goal','class_code']]
test.columns = ['text','label']
# step1
TEXT = data.Field(sequential=True,
use_vocab=True,
tokenize=str.split,
lower=True,
batch_first=True)
LABEL = data.LabelField()
# step2 - Dataset
class DataFrameDataset(data.Dataset):
def __init__(self, df, text_field, label_field, is_test=False, **kwargs):
fields = [('text', text_field), ('label', label_field)]
examples = []
for i, row in df.iterrows():
label = row.label if not is_test else None
text = row.text
examples.append(data.Example.fromlist([text, label], fields))
super().__init__(examples, fields, **kwargs)
@staticmethod
def sort_key(ex):
return len(ex.text)
@classmethod
def splits(cls, text_field, label_field, train_df, val_df=None, test_df=None, **kwargs):
train_data, val_data, test_data = (None, None, None)
if train_df is not None:
train_data = cls(train_df.copy(), text_field, label_field, **kwargs)
if val_df is not None:
val_data = cls(val_df.copy(), text_field, label_field, **kwargs)
if test_df is not None:
test_data = cls(test_df.copy(), text_field, label_field, True, **kwargs)
return tuple(d for d in (train_data, val_data, test_data) if d is not None)
train_ds, test_ds = DataFrameDataset.splits(
text_field=TEXT, label_field=LABEL, train_df=train, test_df=test)
TEXT.build_vocab(train_ds)
len(TEXT.vocab)
| true
|
c3c97d7fb642b1d68b5a7499b5f8eea5a22fb31c
|
Python
|
screnary/Algorithm_python
|
/sort_mian17_14_smallestK.py
|
UTF-8
| 1,116
| 3.328125
| 3
|
[] |
no_license
|
# 最小K个数: 堆排序
# 维护K维大顶堆
class Solution:
def smallestK(self, arr: List[int], k: int) -> List[int]:
"""
input| arr: List[int], k: int
output| List[int]
"""
if k==0: return []
# max heap
def sift_down(arr, root, n):
cur_val = arr[root]
while (2*root+1) < n: # if has child
child = 2*root + 1
if child+1 < n and arr[child+1]>arr[child]:
child = child + 1
# check if valid
if cur_val < arr[child]:
arr[root] = arr[child]
root = child
else:
break
arr[root] = cur_val
# construct k-max heap
k_max_heap = arr[:k]
for i in range((k-1-1)//2, -1, -1):
sift_down(k_max_heap, i, k)
# process the remained items
for num in arr[k:]:
if num < k_max_heap[0]:
k_max_heap[0] = num
sift_down(k_max_heap, 0, k)
return k_max_heap
| true
|
00f839b6003b83c16d3697f30fae4759ada94a18
|
Python
|
rootid23/fft-py
|
/heap/meeting-rooms-ii.py
|
UTF-8
| 1,809
| 3.90625
| 4
|
[] |
no_license
|
class Interval:
def __init__(self, s=0, e=0):
self.start = s
self.end = e
#W/ Sorting
# Very similar with what we do in real life. Whenever you want to start a meeting,
# you go and check if any empty room available (available > 0) and
# if so take one of them ( available -=1 ). Otherwise,
# you need to find a new room someplace else ( numRooms += 1 ).
# After you finish the meeting, the room becomes available again ( available += 1 ).
def minMeetingRooms(self, intervals):
starts = []
ends = []
for i in intervals:
starts.append(i.start)
ends.append(i.end)
starts.sort()
ends.sort()
s = e = 0
numRooms = available = 0
while s < len(starts):
if starts[s] < ends[e]:
if available == 0:
numRooms += 1
else:
available -= 1
s += 1
else:
available += 1
e += 1
return numRooms
#W/ heap
import heapq
def minMeetingRooms(intvs) :
if(not intvs) : return 0
#sorted by the start time
intvsnew = sorted(intvs, key=lambda item: item.start)
cnt = 1
h = []
heapq.heappush(h ,intvsnew[0].end)
for i in range(1, len(intvsnew)) :
if(h[0] <= intvsnew[i].start) : #has meeting finished ?
heapq.heappop(h)
heapq.heappush(h, intvsnew[i].end)
cnt = max(cnt, len(h))
return cnt
#Given [[15, 20],[0, 30],[5, 10]],
lst = [ Interval(0, 30), Interval(5, 10), Interval(15, 20)]
print ( minMeetingRooms(lst) )
#Meeting Rooms II
#Given an array of meeting time intervals consisting of start and end times [[s1,e1],[s2,e2],...] (si < ei), find the
#minimum number of conference rooms required.
#For example,
#Given [[0, 30],[5, 10],[15, 20]],
#return 2.
| true
|
bf3a19938b8d7220b0f74a4a3a5f6d19d08fc143
|
Python
|
openforcefield/openff-interchange
|
/openff/interchange/_tests/unit_tests/components/test_toolkit.py
|
UTF-8
| 3,291
| 2.578125
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-unknown"
] |
permissive
|
import pytest
from openff.toolkit import Molecule, Topology
from openff.toolkit.topology._mm_molecule import _SimpleMolecule
from openff.utilities.testing import skip_if_missing
from openff.interchange._tests import _BaseTest
from openff.interchange.components.toolkit import (
_check_electrostatics_handlers,
_combine_topologies,
_get_14_pairs,
_get_num_h_bonds,
_simple_topology_from_openmm,
)
@pytest.fixture()
def simple_methane():
return _SimpleMolecule.from_molecule(Molecule.from_smiles("C"))
@pytest.fixture()
def simple_water(water):
return _SimpleMolecule.from_molecule(water)
def test_simple_topology_uniqueness(simple_methane, simple_water):
topology = Topology.from_molecules(
[
simple_methane,
simple_water,
simple_methane,
simple_methane,
simple_water,
],
)
assert len(topology.identical_molecule_groups) == 2
class TestToolkitUtils(_BaseTest):
@pytest.mark.parametrize(
("smiles", "num_pairs"),
[
("C#C", 1),
("CCO", 12),
("C1=CC=CC=C1", 24),
("C=1=C=C1", 0),
("C=1=C=C=C1", 0),
("C=1(Cl)-C(Cl)=C1", 1),
("C=1=C(Cl)C(=C=1)Cl", 5),
],
)
def test_get_14_pairs(self, smiles, num_pairs):
mol = Molecule.from_smiles(smiles)
assert len([*_get_14_pairs(mol)]) == num_pairs
assert len([*_get_14_pairs(mol.to_topology())]) == num_pairs
def test_check_electrostatics_handlers(self, tip3p):
tip3p.deregister_parameter_handler("Electrostatics")
assert _check_electrostatics_handlers(tip3p)
tip3p.deregister_parameter_handler("LibraryCharges")
assert not _check_electrostatics_handlers(tip3p)
@pytest.mark.parametrize(
("smiles", "num_h_bonds"),
[("C", 4), ("C#C", 2), ("O", 2)],
)
def test_get_num_h_bonds(self, smiles, num_h_bonds):
topology = Molecule.from_smiles(smiles).to_topology()
assert _get_num_h_bonds(topology) == num_h_bonds, smiles
def test_combine_topologies(self, water):
ethanol = Molecule.from_smiles("CCO")
ethanol.name = "ETH"
ethanol_topology = ethanol.to_topology()
water.name = "WAT"
water_topology = water.to_topology()
combined = _combine_topologies(ethanol_topology, water_topology)
for attr in (
"atoms",
"bonds",
):
attr = "n_" + attr
assert getattr(combined, attr) == getattr(ethanol_topology, attr) + getattr(
water_topology,
attr,
)
@skip_if_missing("openmm")
def test_simple_topology_from_openmm(self):
simple_topology = _simple_topology_from_openmm(
Topology.from_molecules(
[
Molecule.from_smiles("O"),
Molecule.from_smiles("CCO"),
],
).to_openmm(),
)
assert all(
isinstance(molecule, _SimpleMolecule)
for molecule in simple_topology.molecules
)
assert sorted(molecule.n_atoms for molecule in simple_topology.molecules) == [
3,
9,
]
| true
|
3857855d2b80b4437a16f34eb270752f4658e367
|
Python
|
Zagrebelin/django-voice-machine
|
/voice_machine/models.py
|
UTF-8
| 5,440
| 2.671875
| 3
|
[] |
no_license
|
import datetime
from django.db import models
from django.template import Context, Template
from django.utils import timezone
from . import humanize
class Holiday(models.Model):
date = models.DateField()
year = models.IntegerField()
def __str__(self):
return self.date.strftime('%d.%m.%Y')
class ScheduleItemManager(models.Manager):
weekdaynames = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']
def for_date(self, dt: datetime.datetime = None):
if not dt:
dt = datetime.datetime.now()
dt = dt.replace(second=0, microsecond=0)
is_holiday = Holiday.objects.filter(date=dt.date()).count() > 0
qs = super().get_queryset()
if is_holiday:
qs = qs.filter(use_holiday=True)
else:
qs = qs.filter(use_workday=True)
weekday_field = 'use_' + self.weekdaynames[dt.weekday()]
qs = qs.filter(**{weekday_field: True})
qs = qs.filter(time=dt.time())
qs = qs.order_by('order')
return qs
def list_to_choices(*items):
return [(item, item) for item in items]
class ScheduleItem(models.Model):
use_holiday = models.BooleanField()
use_workday = models.BooleanField()
use_monday = models.BooleanField(default=True)
use_tuesday = models.BooleanField(default=True)
use_wednesday = models.BooleanField(default=True)
use_thursday = models.BooleanField(default=True)
use_friday = models.BooleanField(default=True)
use_saturday = models.BooleanField(default=True)
use_sunday = models.BooleanField(default=True)
voice_type = models.CharField(max_length=50, choices=list_to_choices('primary', 'secondary', 'random'),
default='primary')
voice_emotion = models.CharField(max_length=50, choices=list_to_choices('neutral', 'evil', 'good'),
default='neutral')
time = models.TimeField()
message = models.TextField(help_text='Возможные замены: {{time}}, {{date}}, {{weekday}}, {{weather_today}}, {{weather_tomorrow}}.')
order = models.IntegerField(default=0)
objects = ScheduleItemManager()
def render(self, dt):
tomorrow = dt + datetime.timedelta(days=1)
t = Template(self.message)
context = {
'date': humanize.date_as_string(dt),
'time': humanize.time_as_string(dt),
'weekday': humanize.weekday_as_string(dt),
'weather_today': humanize.weather_for_day(Weather.objects.for_morning(dt),
Weather.objects.for_day(dt),
Weather.objects.for_evening(dt)),
'weather_tomorrow': humanize.weather_for_day(Weather.objects.for_morning(tomorrow),
Weather.objects.for_day(tomorrow),
Weather.objects.for_evening(tomorrow))
}
ret = t.render(Context(context))
return ret
@property
def display_date(self):
wd = []
if self.use_monday:
wd.append('пн')
if self.use_tuesday:
wd.append('вт')
if self.use_wednesday:
wd.append('ср')
if self.use_thursday:
wd.append('чт')
if self.use_friday:
wd.append('пт')
if self.use_saturday:
wd.append('сб')
if self.use_sunday:
wd.append('вс')
if not wd:
return 'Никогда'
if len(wd) == 7:
a1 = 'В любой день'
else:
a1 = 'В ' + ' '.join(wd)
if self.use_workday and not self.use_holiday:
a2 = 'если это рабочий день'
elif not self.use_workday and self.use_holiday:
a2 = 'если это выходной или праздник'
elif self.use_workday and self.use_holiday:
a2 = 'если это выходной, праздник или рабочий день'
else:
return 'Никогда'
if len(wd) == 7 and self.use_workday and self.use_holiday:
return 'Каждый день'
return f'{a1}, {a2}'
class WeatherManager(models.Manager):
def for_morning(self, dt: datetime.datetime):
return self.for_hour_range(dt, 5, 10)
def for_day(self, dt: datetime.datetime):
return self.for_hour_range(dt, 11, 16)
def for_evening(self, dt: datetime.datetime):
return self.for_hour_range(dt, 16, 20)
def for_hour_range(self, dt: datetime.datetime, from_hour: int, to_hour: int):
d1 = dt.replace(hour=from_hour, minute=0, second=0, microsecond=0)
d2 = dt.replace(hour=to_hour, minute=0, second=0, microsecond=0)
if timezone.is_naive(d1):
d1 = timezone.make_aware(d1)
if timezone.is_naive(d2):
d2 = timezone.make_aware()
qs = super().get_queryset()
qs = qs.filter(when__range=(d1, d2))
return qs
class Weather(models.Model):
when = models.DateTimeField(unique=True)
wind = models.CharField(max_length=200)
temperature = models.IntegerField()
description = models.CharField(max_length=200)
objects = WeatherManager()
def __str__(self):
return str(self.when)
| true
|
26e715b219dcd5284406fbda597ade4f11af03e9
|
Python
|
xRame/PyMaster
|
/web/Json parse/parse.py
|
UTF-8
| 1,523
| 3.046875
| 3
|
[] |
no_license
|
import json
def del_elem(fromd, key):
fromd.pop(key)
def check_type(data, key):
if issubclass(list, type(data[key])):
print("list: ", key)
size_list(data, key, data[key])
pass
elif issubclass(tuple, type(data[key])):
print("tuple: ", key)
pass
elif issubclass(dict, type(data[key])):
print("dict: ", key)
size_dict(data, key)
pass
elif issubclass(bool, type(data[key])):
print("bool: ", key)
elif issubclass(int, type(data[key])):
print("int: ", key)
elif issubclass(str, type(data[key])):
print("str: ", key,'size',len(data[key]))
elif issubclass(float, type(data[key])):
print("float: ", key)
pass
def size_dict(data, keym):
print('\n\n',keym)
print('size: ', len(data[keym].keys()))
if len(data[keym].keys()) == 0:
return False
else:
print(data[keym].keys())
for key in data[keym].keys():
#print(key)
check_type(data[keym], key)
def size_list(data, key, l):
print('size: ',len(l))
if len(l) == 0:
return False
if len(l)==1:
<<<<<<< Updated upstream
data[key]=l[0]
=======
data[key]=l[0]
print('!!!',key,' was replaced ')
>>>>>>> Stashed changes
def size_tuple(t):
print('size: ',len(t))
if len(l) == 0:
return False
with open('data.json', 'r') as data_file:
data = json.load(data_file)
keys = data.keys()
newd = {"tesas1t": "1"}
newd.update({"tesast": "2"})
print(len(data.keys()))
for key in data.keys():
check_type(data, key)
#print(key, ' ',data[key], end = '\n\n')
for key in data.keys():
check_type(data, key)
| true
|
d637cd6cf51c2e1dd3961b4623685133ab8e4d43
|
Python
|
sdwivedi19/inventory_management_system
|
/create_db1.py
|
UTF-8
| 1,721
| 2.59375
| 3
|
[
"CC0-1.0"
] |
permissive
|
import sqlite3
def create_db():
con=sqlite3.connect(database=r'pntb.db')#creating connection, r is used to avoid path issue
cur=con.cursor() #to execute queries
#cur.execute("DROP TABLE IF EXISTS employee")
cur.execute("CREATE TABLE IF NOT EXISTS employee(eid INTEGER PRIMARY KEY AUTOINCREMENT,"
"name TEXT,email TEXT,gender TEXT,contact TEXT,dob TEXT,doj TEXT,pass TEXT,utype TEXT,"
"address TEXT,salary TEXT)")#to create table
con.commit()#commit to commit the query
#cur.execute("DROP TABLE IF EXISTS supplier")
cur.execute("CREATE TABLE IF NOT EXISTS supplier(invoice INTEGER PRIMARY KEY AUTOINCREMENT,"
"name TEXT,contact TEXT,description TEXT)")
con.commit() # commit to commit the query
# cur.execute("DROP TABLE IF EXISTS category")
cur.execute("CREATE TABLE IF NOT EXISTS category(cid INTEGER PRIMARY KEY AUTOINCREMENT,name TEXT)")
con.commit() # commit to commit the query
# cur.execute("DROP TABLE IF EXISTS supplier")
cur.execute("CREATE TABLE IF NOT EXISTS products(pid INTEGER PRIMARY KEY AUTOINCREMENT,"
"product TEXT UNIQUE,category TEXT ,supplier TEXT,price TEXT,quantity TEXT,status TEXT)")
con.commit() # commit to commit the query
#cur.execute("DROP TABLE IF EXISTS users")
cur.execute("CREATE TABLE IF NOT EXISTS users(uid INTEGER PRIMARY KEY AUTOINCREMENT,"
"fname TEXT, lname TEXT, contact INT UNIQUE, email TEXT UNIQUE, securityque VARCHAR(100), securityans TEXT,"
"username TEXT UNIQUE, password TEXT)")
con.commit() # commit to commit the query
if __name__=="__main__":
create_db()
| true
|
46116bd167fe6041f4ee907cf094144e5a3db487
|
Python
|
hugovk/mass_shoot_bot
|
/mass_shoot_bot.py
|
UTF-8
| 9,114
| 2.609375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
"""
There is a mass shooting on average every day in the United States.
Here are the shootings on this day last year.
https://twitter.com/mass_shoot_bot
"""
import argparse
import csv
import datetime
import os.path
import sys
import webbrowser
import inflect # pip install inflect
import twitter # pip install twitter
import yaml # pip install pyyaml
from dateutil.parser import parse # pip install python-dateutil
from dateutil.relativedelta import relativedelta
from pytz import timezone # pip install pytz
# from pprint import pprint
def timestamp():
""" Print a timestamp and the filename with path """
print(datetime.datetime.now().strftime("%A, %d. %B %Y %I:%M%p") + " " + __file__)
def load_yaml(filename):
"""
File should contain:
consumer_key: TODO_ENTER_YOURS
consumer_secret: TODO_ENTER_YOURS
access_token: TODO_ENTER_YOURS
access_token_secret: TODO_ENTER_YOURS
wordnik_api_key: TODO_ENTER_YOURS
"""
with open(filename) as f:
data = yaml.safe_load(f)
if not data.keys() >= {
"access_token",
"access_token_secret",
"consumer_key",
"consumer_secret",
}:
sys.exit("Twitter credentials missing from YAML: " + filename)
if "last_shooting" not in data:
data["last_shooting"] = None
return data
def save_yaml(filename, data):
"""
Save data to filename in YAML format
"""
with open(filename, "w") as yaml_file:
yaml_file.write(yaml.dump(data, default_flow_style=False))
def tweet_it(string, credentials, image=None, location=None):
""" Tweet string and image using credentials """
if len(string) <= 0:
return
# Create and authorise an app with (read and) write access at:
# https://dev.twitter.com/apps/new
# Store credentials in YAML file
auth = twitter.OAuth(
credentials["access_token"],
credentials["access_token_secret"],
credentials["consumer_key"],
credentials["consumer_secret"],
)
t = twitter.Twitter(auth=auth)
if location and not args.test:
place_id = place_id_for_location(t, location)
print("TWEETING THIS:\n" + string)
if args.test:
print("(Test mode, not actually tweeting)")
else:
if image:
print("Upload image")
# Send images along with your tweets.
# First just read images from the web or from files the regular way
with open(image, "rb") as imagefile:
imagedata = imagefile.read()
t_up = twitter.Twitter(domain="upload.twitter.com", auth=auth)
id_img = t_up.media.upload(media=imagedata)["media_id_string"]
if place_id:
result = t.statuses.update(
status=string,
media_ids=id_img,
display_coordinates=True,
place_id=place_id,
)
else:
result = t.statuses.update(status=string, media_ids=id_img)
elif place_id:
result = t.statuses.update(
status=string, display_coordinates=True, place_id=place_id
)
print(place_id)
else:
result = t.statuses.update(status=string)
url = (
"http://twitter.com/"
+ result["user"]["screen_name"]
+ "/status/"
+ result["id_str"]
)
print("Tweeted:\n" + url)
if not args.no_web:
webbrowser.open(url, new=2) # 2 = open in a new tab, if possible
def place_id_for_location(t, location):
"""Look up place_id from Twitter using a city/state info"""
# https://dev.twitter.com/rest/reference/get/geo/search
query = location
contained_within = "96683cc9126741d1" # USA
place = t.geo.search(
query=query,
granularity="city",
contained_within=contained_within,
max_results=1,
)
place_id = place["result"]["places"][0]["id"]
print("Location:", location)
print("Place ID:", place_id)
return place_id
def filename_for_year(year, version):
filename = year + version + ".csv"
print("Filename:", filename)
filename = os.path.join(args.csv, filename)
print("Filename:", filename)
return filename
def get_location(shooting):
"""
Old format CSV has a single Location field,
for example: "San Francisco, CA".
New format CSV has State,"City Or County",Address fields.
Return something like a city and state.
Don't really need street-level.
New new format CSV State,"City Or County",Address fields
"""
try:
location = shooting["Location"]
except KeyError:
city_or_county = shooting["City Or County"]
state = shooting["State"]
location = city_or_county + ", " + state
return location
def format_shooting(shooting):
try:
dead = int(shooting["Dead"])
injured = int(shooting["Injured"])
except KeyError: # 2016 format is different
dead = int(shooting["# Killed"])
injured = int(shooting["# Injured"])
if dead > 0:
d = p.number_to_words(dead, threshold=10)
pd = p.plural("person", dead)
if injured > 0:
i = p.number_to_words(injured, threshold=10)
pi = p.plural("person", injured)
if dead > 0 and injured > 0:
shot = f"{d} {pd} shot dead and {i} injured"
elif dead > 0 and injured == 0:
shot = f"{d} {pd} shot dead"
elif dead == 0 and injured > 0:
shot = f"{i} {pi} shot and injured"
location = get_location(shooting)
try:
date = shooting["Date"]
except KeyError: # 2016 format is different
date = shooting["Incident Date"]
text = f"{date}: {shot} in {location}"
if "Article1" in shooting and shooting["Article1"] != "":
text += " " + shooting["Article1"]
return text
def massshooting():
pacific = timezone("US/Pacific")
now = datetime.datetime.now(pacific)
# now = eastern.localize(now)
# TEMP TEST this year
# now = now + relativedelta(years=1)
# now = now + relativedelta(days=5)
# TEMP TEST this year
print("US/Pacific now:", now)
last_year = str(now.year - 1)
print("This year:", now.year)
print("Last year:", last_year)
this_day_last_year = now - relativedelta(years=1)
print("this_day_last_year:", this_day_last_year)
filename = filename_for_year(last_year, "MASTER")
if not os.path.isfile(filename):
filename = filename_for_year(last_year, "CURRENT")
with open(filename) as infile:
reader = csv.DictReader(infile)
# shootings = list(reader)
todays_shootings = []
for rownum, row in enumerate(reader):
try:
indate = parse(row["Date"])
except KeyError: # 2016 format is different
indate = parse(row["Incident Date"])
if indate.date() == this_day_last_year.date():
todays_shootings.append(row)
if not todays_shootings:
print("No shootings today")
return None, None
# Already had one today?
if data["last_shooting"] in todays_shootings:
# Yes. Which one?
already_today = todays_shootings.index(data["last_shooting"])
# Which next?
next_today = already_today + 1
if next_today >= len(todays_shootings):
print("No more shootings today")
return None, None
next_shooting = todays_shootings[next_today]
else:
print("This is the first today")
next_shooting = todays_shootings[0]
# Update YAML
data["last_shooting"] = next_shooting
print("Next:", next_shooting)
print("Next:", format_shooting(next_shooting))
location = get_location(next_shooting)
return format_shooting(next_shooting), location
if __name__ == "__main__":
timestamp()
parser = argparse.ArgumentParser(
description="There is a mass shooting on average every day in the United "
"States. Here are the shootings on this day last year.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("-c", "--csv", default="data/", help="Directory for CSV file")
parser.add_argument(
"-y",
"--yaml",
default="/Users/hugo/Dropbox/bin/data/mass_shoot_bot.yaml",
help="YAML file location containing Twitter keys and secrets",
)
parser.add_argument(
"-nw",
"--no-web",
action="store_true",
help="Don't open a web browser to show the tweeted tweet",
)
parser.add_argument(
"-x",
"--test",
action="store_true",
help="Test mode: go through the motions but don't tweet anything",
)
args = parser.parse_args()
data = load_yaml(args.yaml)
p = inflect.engine()
tweet, location = massshooting()
if tweet:
tweet_it(tweet, data, location=location)
if not args.test:
save_yaml(args.yaml, data)
# End of file
| true
|
09c2596c53eb41fe76800868ee347a1dc793a1bc
|
Python
|
Cloudxtreme/vpnease-l2tp
|
/src/python/codebay/l2tpserver/config/daemon.py
|
UTF-8
| 3,969
| 2.546875
| 3
|
[
"WTFPL"
] |
permissive
|
"""Configuration and start/stop wrapper for a system daemon."""
__docformat__ = 'epytext en'
import os, time
from codebay.common import logger
from codebay.l2tpserver import helpers
from codebay.l2tpserver import constants
from codebay.l2tpserver import runcommand
from codebay.l2tpserver import daemonstart
run_command = runcommand.run_command
class DaemonConfig:
"""L2TP system daemon configuration.
Writes configuration files based on configuration root taken as input
and takes care of stopping and starting of a specific daemon.
Subclasses are expexted to override create_config, start and *_stop methods
as well as the optional post_start, pre_stop and get_args method.
Subclasses are also required to define class variables 'name',
'command', 'pidfile, 'cleanup_files'.
"""
# overwrite in subclass
name = None
command = None
pidfile = None
# overwrite in subclass when required
def get_args(self):
return None
def get_name(self):
return self.name
def __init__(self):
self.configs = []
self._log = logger.get(self.name + '-daemon')
self.d = daemonstart.DaemonStart(self._log)
def write_config(self):
for i in self.configs:
mode = 0644
try:
mode = i['mode']
except:
pass
helpers.write_file(i['file'], i['cont'], perms=mode)
def check_process(self):
"""Check existence of the daemon process using pidfile."""
if self.pidfile is None:
# XXX: we should warn here if/when all processes use pidfile
# self._log.warning('check_process: no pidfile, checking based on process name')
[rv, out, err] = run_command([constants.CMD_PIDOF, self.name])
if rv != 0 or out is None:
return False
pids = out.split(' ')
if len(pids) != 1:
return False
try:
os.kill(int(pids[0]), 0)
except OSError:
return False
return True
try:
if not os.path.exists(self.pidfile):
self._log.warning('missing pidfile: %s, assume process exited' % self.pidfile)
return False
f = open(self.pidfile, 'rb')
self._log.debug('check_process: fd=%s' % f.fileno())
pid = int(f.read())
f.close()
os.kill(pid, 0) # 0 = just check existence
except OSError:
return False
except:
self._log.error('check_process failed unexpectedly')
return False
return True
# implement in subclass
def create_config(self, cfg, resinfo):
raise Exception('not implemented')
# overwrite in subclass when required
def pre_start(self):
pass
def start(self):
"""Default daemon start."""
self.d.start_daemon(command=self.command, pidfile=self.pidfile, args=self.get_args())
def post_start(self, *args):
pass
def pre_stop(self):
pass
def soft_stop(self, silent=False):
"""Default soft stop daemon."""
ret = self.d.stop_daemon(command=self.command, pidfile=self.pidfile)
if ret != 0:
if not silent:
# XXX: if process was not started, this generates non-relevant
# warning message. Override in specific daemon config to prevent
# that.
self._log.warning('Process soft stop failed: %d' % ret)
else:
self._log.debug('Process soft stop failed (silent): %d' % ret)
def hard_stop(self):
"""Default hard stop daemon."""
self.d.hard_stop_daemon(command=self.command, pidfile=self.pidfile)
self.d.cleanup_daemon(pidfile=self.pidfile, cleanup_files=self.cleanup_files)
def post_stop(self):
pass
| true
|
0e15ea1b8090b6f8c069f652472021d1e0651d9c
|
Python
|
martinMutuma/ah-cli
|
/utils/files.py
|
UTF-8
| 1,616
| 2.875
| 3
|
[] |
no_license
|
import click
import json
import os
import random
import csv
import logging
def create_imports_folder():
if not os.path.isdir('./imports'):
os.mkdir("./imports", 755)
def export_to_json(filename="ah_cli", data={}):
create_imports_folder()
file_name = "./imports/"+filename+'.json'
if os.path.isfile(file_name):
return export_to_json(filename + str(random.randint(1, 101)), data)
click.secho("Exporting data to json file ...", fg="blue")
with click.open_file(file_name, "w") as exportFile:
exportFile.write(json.dumps(data, indent=4))
click.secho("success open: {}".format(file_name), fg="green")
def export_to_csv(filename="ah_cli", data={}):
create_imports_folder()
file_name = "./imports/"+filename+'.csv'
if os.path.isfile(file_name):
return export_to_csv(filename + str(random.randint(1, 101)), data)
click.secho("Exporting data to csv file ...", fg="blue")
if isinstance(data, list):
try:
headers = data[0].keys()
with click.open_file(file_name, "w") as exportFile:
csvFile = csv.DictWriter(exportFile, headers)
csvFile.writeheader()
csvFile.writerows(data)
except Exception as error:
logging.error(error)
if isinstance(data, dict):
headers = data.keys()
with click.open_file(file_name, "w") as exportFile:
csvFile = csv.DictWriter(exportFile, headers)
csvFile.writeheader()
csvFile.writerow(data)
click.secho("success open: {}".format(file_name), fg="green")
| true
|
777239c6fe1afa6220808faf73a1ac9edeb674e6
|
Python
|
SBen-IV/TP3-Algo2
|
/grafo.py
|
UTF-8
| 2,301
| 2.984375
| 3
|
[] |
no_license
|
from random import choice
class Grafo:
def __init__(self,grafo_dirigido):
self.dic_vertice={}
self.dirigido=grafo_dirigido
def agregar_vertice(self,vertice):
if vertice in self.dic_vertice:
return False
self.dic_vertice[vertice]={}
return True
def borrar_vertice(self,vertice):
if not vertice in self.dic_vertice:
return False
self.dic_vertice.pop(vertice)
for dic_arista in self.dic_vertice.values():
if vertice in dic_arista:
dic_arista.pop(vertice)
return True
def adyacentes_vertice(self,vertice):
lista=[]
if not vertice in self.dic_vertice:
return None
dic_arista=self.dic_vertice[vertice]
for w in dic_arista.keys():
lista.append(w)
return lista
def agregar_arista(self,vertice_a,vertice_b,peso):
if not (vertice_a in self.dic_vertice and vertice_b in self.dic_vertice):
return False
dic_arista_a=self.dic_vertice[vertice_a]
if vertice_b in dic_arista_a:
return False
dic_arista_a[vertice_b]=peso
if not self.dirigido:
dic_arista_b=self.dic_vertice[vertice_b]
dic_arista_b[vertice_a]=peso
return True
def borrar_arista(self,vertice_a,vertice_b):
if not (vertice_a in self.dic_vertice and vertice_b in self.dic_vertice):
return None
dic_arista_a = self.dic_vertice[vertice_a]
if not vertice_b in dic_arista_a:
return None
peso = dic_arista_a.pop(vertice_b)
if not self.dirigido:
dic_arista_b=self.dic_vertice[vertice_b]
dic_arista_b.pop(vertice_a)
return peso
def pertenece_vertice(self,vertice):
return vertice in self.dic_vertice
def peso_arista(self,vertice_a,vertice_b):
if not (vertice_a in self.dic_vertice and vertice_b in self.dic_vertice):
return None
dic_arista_a=self.dic_vertice[vertice_a]
if not vertice_b in dic_arista_a:
return None
return dic_arista_a[vertice_b]
def obtener_vertice_aleatorio(self):
lista=[]
for vertice in self.dic_vertice.keys():
lista.append(vertice)#QUISE hacer un random al diccionario pero aveces daba error
return choice(lista)
def obtener_todos_vertices(self):
"""Devuelve tdos los vertice en una lista"""
lista=[]
for vertice in self.dic_vertice.keys():
lista.append(vertice)
return lista
def cantidad_vertice(self):
return len(self.dic_vertice)
def __str__(self):
return "{}".format(self.dic_vertice)
| true
|
f5ba1fc7fa6bc06e57a4b1174c90cf1e7d84fbdb
|
Python
|
dFoiler/a-password-manager
|
/JASocket/jasocket.py
|
UTF-8
| 2,147
| 3.484375
| 3
|
[] |
no_license
|
''' Just another socket '''
import json # loads, dumps
import socket # socket
import string # printable
class JASocket:
''' This is a simple socket wrapper class '''
def is_printable(s):
'''
Determines if s contains printable characters
Parameters
----------
s : str
String to test
Returns
-------
bool, true iff s is printable
'''
for c in s:
if c not in string.printable:
return False
return True
def __init__(self, host, port, is_server=False, queuelength=5, sd=None):
'''
Parameters
----------
host : str
Name of host to connect to
port : str
Name of port of host to connect to
is_server : bool, optional
Whether or nor this is a server
queuelength : int, optional
Length of the queue
sd : socket
Socket to wrap around if provided
'''
self.is_server = is_server
# Pass in a socket through client
if sd:
self.socket = sd
return
# Keep going
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if not is_server:
self.socket.connect((host, port))
else:
self.socket.bind((host, port))
self.socket.listen(queuelength)
def accept(self):
'''
Function to accept connections
'''
if not self.is_server:
raise Exception('client cannot accept')
client,addr = self.socket.accept()
return JASocket(None, None, sd=client), addr
def send(self, message):
'''
Function sending along the socket
Parameters
----------
message : str
Message to send
'''
if not isinstance(message, str):
raise TypeError('can only send strings')
if not JASocket.is_printable(message):
raise Exception('message not printable')
if len(message) >= 4096:
raise Exception('message too long')
self.socket.sendall(message.encode())
def recv(self):
'''
Function to receive from the socket
Returns
-------
str, the first 4096 characters of the received message
'''
message = self.socket.recv(4096).decode()
if not JASocket.is_printable(message):
raise Exception('message corrupted')
return message
def close(self):
'''
Function to close the connection
'''
self.socket.close()
| true
|
3e158338fb838fc448c342fc2babd96235478656
|
Python
|
afgane/slurmscale
|
/slurmscale/nodes/nodes.py
|
UTF-8
| 7,773
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
"""Represent and manage nodes of the target cluster."""
import re
from bunch import Bunch
import pyslurm
from .node import Node
from slurmscale.util.config_manager import ConfigManagerFactory
from slurmscale.util.provision_manager import ProvisionManagerFactory
import slurmscale as ss
import logging
log = logging.getLogger(__name__)
class Nodes(object):
"""A service object to inspect and manage worker nodes."""
def __init__(self, provision_manager_name=None, config_manager_name=None):
"""
Initialize manager names.
Nodes are managed by a provision manager and a config manager. Supply
the class names for the respective managers.
:type provision_manager_name: ``str``
:param: provision_manager_name: Class name for the manager to be used
when provisioning nodes. Only
``JetstreamIUProvisionManager`` is
supported at the moment.
:type config_manager_name: ``str``
:param config_manager_name: Class name for the manager to be used
when provisioning nodes. Only
``GalaxyJetstreamIUConfigManager`` is
supported at the moment.
"""
self._provision_manager_name = ss.config.get_config_value(
'provision_manager_name', 'JetstreamIUProvisionManager')
self._config_manager_name = ss.config.get_config_value(
'config_manager_name', 'GalaxyJetstreamIUConfigManager')
self._provision_manager = ProvisionManagerFactory.get_provision_manger(
self._provision_manager_name)
self._config_manager = ConfigManagerFactory.get_config_manager(
self._config_manager_name)
@property
def _nodes(self):
"""Fetch fresh data."""
return pyslurm.node().get()
def list(self, only_idle=False):
"""
List the nodes available on the cluster.
:type only_idle: ``bool``
:param only_idle: If set, return only IDLE nodes.
:rtype: ``list`` of :class:`.Node`
:return: A list of ``Node`` objects.
"""
slurm_nodes = self._nodes
current_nodes = []
for n in slurm_nodes:
if only_idle:
if slurm_nodes.get(n).get('state') == 'IDLE':
current_nodes.append(Node(slurm_nodes[n]))
else:
current_nodes.append(Node(slurm_nodes[n]))
return current_nodes
def get(self, name=None, ip=None):
"""
Return a object representing the node identified by one of the args.
It's necessary to supply only one argument. If both are supplied, the
name takes precedence.
:type name: ``str``
:param name: Name of the node to try and get.
:type ip: ``str``
:param ip: IP address of the node to try and get.
:rtype: object of :class:`.Node` or ``None``
:return: An object representing the node, or None if a matching node
cannot be found.
"""
for node in self.list():
if name == node.name or ip == node.ip:
return node
return None
def _next_node_name(self, prefix):
"""
Get the next logical node name.
The returned name will be based on the supplied prefix with the
number incremented from the largest available suffix. For example, if
the following is a current list of nodes: ``jetstream-iu-large[0-5]``,
the method will return ``jetstream-iu-large6``.
:type prefix: ``str``
:param prefix: Common prefix for the name across existing nodes.
:rtype: ``str``
:return: The next logical name with the supplied prefix.
"""
largest_suffix = 0
for node in self.list():
if prefix in node.name:
suffix = re.sub('^{0}'.format(prefix), '', node.name)
try:
suffix = int(suffix)
if suffix > largest_suffix:
largest_suffix = suffix
except ValueError as e:
log.warn("Value error figuring out suffix {0} for node "
"{1}: {2}".format(suffix, node.name, e))
# First node number starts at 0
suffix = largest_suffix + 1 if largest_suffix or largest_suffix == 0 \
else 0
name = "{0}{1}".format(prefix, suffix)
log.debug("Next node name: {0}".format(name))
return name
def add(self):
"""
Add a new node into the cluster.
This method will provision a new server from a cloud provider and
configure it for use with the cluster.
TODO:
- Allow a number of nodes to be added in one request
:rtype: object of :class:`.Node` or None
:return: Return a handle to the new node that was added.
"""
instance_name = self._next_node_name(
prefix=ss.config.get_config_value('node_name_prefix',
'jetstream-iu-large'))
instance = self._provision_manager.create(instance_name=instance_name)
inst = Bunch(name=instance.name, ip=instance.private_ips[0])
ret_code, _ = self.configure(self.list() + [inst])
if ret_code == 0:
return self.get(name=instance_name)
return None
def remove(self, nodes, delete=True):
"""
Remove nodes from the cluster.
This will disable the specified nodes and terminate the underlying
machine.
:type nodes: list of :class:`.Node` or a single :class:`.Node` object
:param nodes: Node(s) to remove from the cluster.
:type delete: ``bool``
:param delete: If ``True``, also delete VMs used by the removed nodes.
:rtype: ``bool``
:return: ``True`` if removal was successful.
"""
log.debug("Removing nodes {0}".format(nodes))
if not isinstance(nodes, list):
nodes = [nodes]
existing_nodes = set(self.list())
keep_set = [node for node in existing_nodes if node not in nodes]
delete_nodes = [] # Keep a copy (node info no longer available later)
for node in nodes:
delete_nodes.append(Bunch(name=node.name, ip=node.ip))
node.disable(state=pyslurm.NODE_STATE_DOWN)
ret_code, _ = self.configure(servers=keep_set)
if ret_code == 0 and delete:
log.debug("Reconfigured the cluster without node(s) {0}; deleting "
"the node(s) now.".format(nodes))
self._provision_manager.delete(delete_nodes)
return True
return False
def configure(self, servers):
"""
(Re)configure the supplied servers as cluster nodes.
This step will will run the configuration manager over the supplied
servers and configure them into the current cluster.
Note that the supplied list should contain any existing cluster nodes
in addition to any new nodes. Only the supplied list of nodes will be
configured as the cluster nodes.
:type servers: list of objects with ``name`` and ``ip`` properties
:param servers: A list of servers to configure. Each element of the
list must be an object (such as ``Node`` or ``Bunch``)
that has ``name`` and ``ip`` fields.
:rtype: tuple of ``str``
:return: A tuple with the process exit code and stdout.
"""
return self._config_manager.configure(servers)
| true
|
7a87c43b79d27ff5e64235dbde04e233c93174cb
|
Python
|
Chadyka/python-projects
|
/4_muveletek/diamond.py
|
UTF-8
| 412
| 3.953125
| 4
|
[] |
no_license
|
#!/usr/bin/env python3
# coding: utf-8
def diamond(num):
if num % 2 == 0:
print("Diamond failed! Input has to be an odd number.")
else:
for i in range(1, num+1, 2):
print(("*"*i).center(num))
for i in range(num-2, -1, -2):
print(("*"*i).center(num))
def main():
diamond(int(input("Give me an odd number: ")))
if __name__ == "__main__":
main()
| true
|
332cecc09eca15451a3537ad26b803a39b030f42
|
Python
|
biglukefish/krystallion
|
/game.py
|
UTF-8
| 2,695
| 3.078125
| 3
|
[] |
no_license
|
import pygame
import pytmx
import characters
import platforms
import leveldata
"""
holds game object and level objects
"""
class Game(object):
'''class for new instances of game'''
def __init__(self):
# Create level objects
self.current_level_number = 0
self.current_level = Level(leveldata.level_data[0])
self.krystal = characters.Krystal(self)
def go_to(self, level):
self.current_level_number = level
self.current_level = Level(leveldata.level_data[level])
class Scene(object):
def __init__(self):
pass
class Level(Scene):
def __init__(self, level_data):
super(Level, self).__init__()
self.image = pygame.image.load(level_data['bg_image']).convert()
self.level_rect = self.image.get_rect()
pygame.mixer.music.load(level_data['bg_tunes'])
pygame.mixer.music.set_endevent(pygame.constants.USEREVENT)
pygame.mixer.music.play(loops=-1)
self.tmx_file = level_data['tmx_file']
self.bee_coords = level_data['bee_coords']
self.vulture_coords = level_data['vulture_coords']
# create mushrooms, each tile in background is 64x64 pixels.
# params --> x, bottom, left bound, right bound
self.shroom_coords = level_data['shroom_coords']
# create collision rects for level and change them
# into sprites
self.terrain_rects = self.create_tmx_rects('Terrain', self.tmx_file)
self.all_collision_rects = []
# TODO can I delete the two lines below? Answer is NO
for terrain in self.terrain_rects:
self.all_collision_rects.append(terrain)
self.platform_sprites = pygame.sprite.Group()
for rect in self.terrain_rects:
plat = platforms.Platforms(rect.x, rect.y, rect.width, rect.height)
self.platform_sprites.add(plat)
# initialize enemies
self.bee_sprites = pygame.sprite.Group()
self.shroom_sprites = pygame.sprite.Group()
self.vulture_sprites = pygame.sprite.Group()
for element in self.shroom_coords:
shroom = characters.Shroom(element)
self.shroom_sprites.add(shroom)
for element in self.bee_coords:
bee = characters.Bee(element)
self.bee_sprites.add(bee)
self.all_enemy_sprites = pygame.sprite.Group()
self.all_enemy_sprites.add(
self.bee_sprites, self.shroom_sprites,
self.vulture_sprites
)
#create sprite group for dying sprites
self.dead_sprites = pygame.sprite.Group()
def create_tmx_rects(self, layer_name, level_map):
'''create list of rectangles for use in collision.
:param layer_name: string
:return: list of rect objects
'''
rectangles = []
tiled_map = pytmx.TiledMap(level_map)
group = tiled_map.get_layer_by_name(layer_name)
for obj in group:
objrect = pygame.Rect(obj.x, obj.y, obj.width, obj.height)
rectangles.append(objrect)
return rectangles
| true
|
1b1ec3d019933e0da8d10ee4ff164bd604318978
|
Python
|
RaskurSevenflame/masterneuralgaswithcnn
|
/self_organizing_maps/TrainSelfOrganizingMap.py
|
UTF-8
| 1,686
| 2.640625
| 3
|
[] |
no_license
|
import pickle
from self_organizing_maps.Base import Base
from errorcalculations.DistributedCrossEntropy import DistributedCrossEntropy
from errorcalculations.CrossEntropy import CrossEntropy
import numpy as np
from self_organizing_maps.growing_neural_gas.GNG import GNG
class TrainSelfOrganizingMap:
@staticmethod
def train_self_organizing_maps_algorithm(data, label, algorithm, x_axis_length, y_axis_length, number_of_iterations,
start_learning_rate, start_radius_multiplikator, end_learning_rate,
random_type, optimized, file_to_load, amount_of_differen_labels):
# trains the algorithm and saves its neuron-information in a pickle file in /saves
base = Base(x_axis_length, y_axis_length, algorithm, data, number_of_iterations, start_learning_rate,
start_radius_multiplikator, end_learning_rate, random_type)
neurons = base.train()
error = DistributedCrossEntropy()
error_value = error.measure_error(data, neurons, base, label, amount_of_differen_labels, False)
print(algorithm.get_name() + " has an error value of: " + str(error_value) + " " + error.get_name())
information = []
for neuron in neurons:
information.append([[neuron.x_axis_counter, neuron.y_axis_counter], neuron.weights])
name_tag = "NeuronWeights" + "_Datasetsize" + str(len(data))
if optimized:
name_tag = name_tag + "optimized"
file_name = file_to_load + algorithm.get_name() + name_tag
pickle.dump(information, open(
"saves/" + file_name,
"wb"))
| true
|
998649baa7285122e041cdaf4a5dfbe984bc7c86
|
Python
|
vishnuap/Algorithms
|
/Chapter-03-Arrays/Zip-It/Zip-It.py
|
UTF-8
| 1,449
| 5.125
| 5
|
[] |
no_license
|
# Chapter-3: Arrays
# Zip-It
# 1. Create a function that accepts two arrays and combines their values sequentially into a new array at alternating indices starting with the first array. Extra values of either array should be included afterwards. Given [1,2] and [10,20,30], return [1,10,2,20,30]
# 2. Combine the two arrays in the same way but in the first array instead of creating a new array
# Assume the arguments being passed are both arrays
# Assume use of built in functions (for doing this without builtin functions, use the approach from the Array-Insert-At solution earlier in this chapter)
# 1
def zipIt(arr1, arr2):
result = []
length = len(arr1) + len(arr2)
for i in range(0, length):
if i < len(arr1):
result.append(arr1[i])
if i < len(arr2):
result.append(arr2[i])
return result
# 2
def zipIt2(arr1, arr2):
arr1Len = len(arr1)
arr2Len = len(arr2)
idx = 0
while (len(arr1) < arr1Len + arr2Len):
if (idx < arr1Len):
arr1.insert((idx * 2) + 1, arr2[idx])
else:
arr1.insert(len(arr1), arr2[idx])
idx += 1
myArr1 = [1,2,3,4,5]
myArr2 = [10,20,30,40,50]
print("The original arrays are {} and {}").format(myArr1, myArr2)
print("The zipped array is {}").format(zipIt(myArr1, myArr2))
print("The zipped array is {}").format(zipIt(myArr2, myArr1))
zipIt2(myArr1, myArr2)
print("The zipped array is {}").format(myArr1)
| true
|
2afee61559f61ea0344f2ae911d3df7aaf5881e5
|
Python
|
devwill77/Python
|
/Ex01.py
|
UTF-8
| 312
| 3.90625
| 4
|
[
"MIT"
] |
permissive
|
'''
DESAFIO 01
- Crie um script Python que leia o nome de uma pessoa e mostre
uma mensagem de boas vindas de acordo com o valor digitado.
'''
nome = str(input('\033[36mQual o seu nome?\033[m '))
print('\033[31mOlá\033[m {}{}{}\033[31m, muito prazer em te conhecer!\033[m'.format('\033[1;34m', nome, '\033[m'))
| true
|
80e0fce75467513da62c7ee9a65ce8761a5cb7dc
|
Python
|
smckay/TicTacToe
|
/Player.py
|
UTF-8
| 641
| 3.15625
| 3
|
[] |
no_license
|
class Player:
def __init__(self, user_id, arrival_time, address, char):
self.user_id = user_id
self.arrival_time = arrival_time
self.address = address
self.status = "Available"
self.char = char
def get_user_id(self):
return self.user_id
def get_arrival_time(self):
return self.arrival_time
def get_address(self):
return self.address
def get_status(self):
return self.status
def set_status(self, status):
self.status = status
def get_char(self):
return self.char
def set_char(self, char):
self.char = char
| true
|
3eeb4b146a8459dd2b18e27882f883284382c92f
|
Python
|
EEExphon/Basic_Codes_for_Python
|
/LIST/Add element.py
|
UTF-8
| 289
| 3.96875
| 4
|
[] |
no_license
|
print("Make a list!")
BB = [ ]
YON="y"
PPO=0
while YON=="y":
YU=input("Add an element:")
PPO=PPO+1
BB.append(YU)
YON=input("Enter 'y' in order to add a new element.")
input("(press enter to look at the whole list)")
for i in range (0,PPO):
print(i,"---",BB[i-1])
| true
|
08f8da6091bab58d711878d3ab166ce8a1bf479d
|
Python
|
souzajunior/URI
|
/uri - 1235.py
|
UTF-8
| 1,190
| 3.375
| 3
|
[] |
no_license
|
N = int(input())
for i in range(N):
entrada = input()
if (len(entrada) % 2 == 0):
primeira_parte = entrada[len(entrada)//2 -1::-1]
segunda_parte = entrada[:len(entrada)//2 -1:-1]
else:
primeira_parte = entrada[len(entrada)//2::-1]
segunda_parte = entrada[:len(entrada)//2:-1]
primeira_parte = primeira_parte.split()
segunda_parte = segunda_parte.split()
segunda_parte[0] = primeira_parte[-1] + segunda_parte[0]
del primeira_parte[-1]
if ((len(primeira_parte) > 1) and (len(segunda_parte) > 1)):
primeira_parte = ' '.join(primeira_parte)
primeira_parte += ' '
segunda_parte = ' '.join(segunda_parte)
else:
primeira_parte = ' '.join(primeira_parte)
segunda_parte = ' '.join(segunda_parte)
resultado_final = primeira_parte + segunda_parte
print(resultado_final)
'''
N = int(input())
for h in range(N):
Entrada = input()
Meio = len(Entrada)//2
Fim = len(Entrada)
String = ""
for i in reversed(range(Meio)):
String += Entrada[i]
for j in reversed(range(Meio, Fim)):
String += Entrada[j]
print(String)
'''
| true
|
a3a454a15f2116e23eb438c4340a16fa3b873997
|
Python
|
chiubor/VPhysics
|
/VPhthon進階練習/03_2_simple_projectile_spring.py
|
UTF-8
| 1,469
| 2.875
| 3
|
[] |
no_license
|
from visual import *
size = 0.2
scene = display(center = vector(0, 4, 0), background = vector(0.5, 0.5, 0))
ball1 = sphere(radius = size, color = color.red, make_trail = True)
ball2 = sphere(radius = size, color = color.blue, make_trail = True)
ball3 = sphere(radius = size, color = color.yellow, make_trail = True)
cubec = box(length = 0.32, width = 0.32, height = 0.32, color= color.green, make_trail = True)
ball1.pos = vector(-5, size, 0)
ball2.pos = vector(-6, size, 0)
ball3.pos = (ball1.pos + ball2.pos)/2.0
cubec.pos = (ball1.pos + ball2.pos)/2.0
floor = box(length=12, width = 10, height = 0.1)
spring = helix(radius = 0.1, coils = 10, thickness = 0.05)
spring.pos = ball1.pos
spring.axis = ball2.pos - ball1.pos
spring.L = abs(spring.axis)
spring.k = 100000
ball1.m = 1
ball2.m = 1
all_ball = [ball1, ball2, ball3]
dt = 0.001
g = vector(0, -9.8, 0)
ball1.v = vector(4, 12, 0)
ball2.v = vector(10, 4, 0)
ball3.v = vector(7, 8, 0)
while ball3.y>=size:
rate(500)
ball3.v = ball3.v + g*dt
ball3.pos = ball3.pos + ball3.v*dt
while ball1.y >= size and ball2.y >= size :
rate(100)
spring.pos = ball1.pos
spring.axis = ball2.pos - ball1.pos
F = - spring.k * (abs(spring.axis)-spring.L) * spring.axis.norm()
ball1.v = ball1.v + g*dt - F / ball1.m * dt
ball1.pos = ball1.pos + ball1.v*dt
ball2.v = ball2.v + g*dt + F / ball1.m * dt
ball2.pos = ball2.pos + ball2.v*dt
cubec.pos = (ball1.pos + ball2.pos)/2.0
| true
|
9e1e7525e9e9f9eb614430282ece159e10bd843d
|
Python
|
kushanjanith/Python-Exercises-from-www.practicepython.org-website
|
/Exercises/04Divisors.py
|
UTF-8
| 162
| 3.609375
| 4
|
[] |
no_license
|
import math
num = int(input("Number: "))
list = []
x = num / 2
for i in range(1,math.floor(x)):
if num % i == 0:
list.append(i)
print(list)
| true
|
573d430a0fd110321109d73d100dcf8c3ead2511
|
Python
|
akshay-bhagdikar/FLASK_REST_API
|
/Create_DB.py
|
UTF-8
| 3,133
| 2.96875
| 3
|
[] |
no_license
|
## Created by: Akshay Bhagdikar
## Date modified: 11/02/2018
## Application to create a database and tables if they do not exist
import mysql.connector
from mysql.connector import errorcode
#Function to create a connection to the remote database service. Returns cursor and connection object
def create_connection_cursor(host='data-challenge.cqc9xz3gmhnl.us-west 2.rds.amazonaws.com'\
,port=3306,user='bhagdikara',password='maxocoil12'):
cnx = mysql.connector.connect(host=host\
,port=port,user=user,password=password)
cursor = cnx.cursor()
return cnx,cursor
#Function to create a database. If fails then throws the corresponding error. Returns void
def create_database(cursor,DB_NAME):
try:
cursor.execute(
"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".format(DB_NAME))
except mysql.connector.Error as err:
print("Failed creating database: {}".format(err))
#Function to execute the creation of database. Checks if the database creation is successful. Returns void
def check_and_execute_creation(cursor,cnx,DB_NAME):
DB_NAME = DB_NAME
try:
cursor.execute("USE {}".format(DB_NAME))
except mysql.connector.Error as err:
print("Database {} does not exists.".format(DB_NAME))
if err.errno == errorcode.ER_BAD_DB_ERROR:
create_database(cursor,DB_NAME)
print("Database {} created successfully.".format(DB_NAME))
else:
print("Database creation unsuccessful: {}".format(err))
finally:
cursor.close()
cnx.close()
#Function to create table in the specified database.'tables' should be a dictionary with the valid insert\query. Returns void
def create_table(tables,cursor,cnx,DB_NAME):
try:
cursor.execute("USE {}".format(DB_NAME))
for table_name in tables:
table_description = tables[table_name]
try:
print("Creating table {}: ".format(table_name), end='')
cursor.execute(table_description)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:
print("already exists.")
else:
print(err.msg)
else:
print("Table {} successfully created".format(table_name))
except mysql.connector.Error as err:
print("Failed to connect to the database: {}".format(err))
finally:
cnx.close()
cursor.close()
tables = {}
tables['transactions_table'] = (
"CREATE TABLE `transactions_table` ("
" `row_id` int(6) NOT NULL AUTO_INCREMENT,"
" `user` VARCHAR(5) NOT NULL,"
" `transaction_date` date,"
" `sales_amount` DECIMAL(6,2),"
" `joining_date` date,"
" `region` CHAR(1),"
" PRIMARY KEY (`row_id`)"
") ENGINE=InnoDB")
DB_NAME = 'transactions'
cnx,cursor = create_connection_cursor()
check_and_execute_creation(cursor,cnx,DB_NAME)
cnx,cursor = create_connection_cursor()
create_table(tables, cursor,cnx, DB_NAME)
| true
|
b6c79d5fd799e24a60c065010f4cd2e9426bcf96
|
Python
|
chenfeng125078/Test
|
/work_tips/tensorflow2.1固化模型以及c++上预测/jsonToImg.py
|
UTF-8
| 1,927
| 2.765625
| 3
|
[] |
no_license
|
import numpy as np
import json
import os
import sys
import glob
import cv2
def clip_image(current_image, x1, x2, y1, y2, image_number):
img = cv2.imread(current_image)
# print(img.shape)
cut_img = img[x1:x2, y1:y2, :]
try:
cv2.imwrite("%s.bmp" % image_number, cut_img)
except:
print("can not write")
# cv2.imshow("image", img)
# cv2.waitKey(0)
if __name__ == '__main__':
json_dir = "1_json"
base_dir = os.path.join("./", json_dir)
images = glob.glob(os.path.join(base_dir, "*.json"))
# print(images)
for item in images[:]:
# print(item)
# 对应的bmp文件
image_number = item.split("\\")[-1].split(".")[0]
if len(image_number) >= 2:
image_number_source = image_number[:-1]
# 对应的图像文件夹
if os.path.exists(os.path.join("./1", "%s.bmp" % image_number)):
current_image = os.path.join("./1", "%s.bmp" % image_number)
elif os.path.exists(os.path.join("./1", "%s.bmp" % image_number_source)):
current_image = os.path.join("./1", "%s.bmp" % image_number_source)
else:
continue
# print(current_image)
with open(item, "r") as f:
data = json.load(f)
# print(data)
point_1 = data["shapes"][0]["points"][0]
point_2 = data["shapes"][0]["points"][1]
x1, y1 = point_1[0], point_1[1]
x2, y2 = point_2[0], point_2[1]
if x1 < x2:
pass
else:
x1, x2 = x2, x1
if y1 < y2:
pass
else:
y1, y2 = y2, y1
x1, y1 = int(np.ceil(x1)), int(np.ceil(y1))
x2, y2 = int(np.floor(x2)), int(np.floor(y2))
print(x1, y1, x2, y2)
clip_image(current_image, x1, x2, y1, y2, image_number)
# print(point_1, point_2)
| true
|
669de0fe16ba945a67922f60e30ac9c2cf213ab2
|
Python
|
IIioneR/VM_HM_14
|
/VM_HM_9.py
|
UTF-8
| 3,463
| 3.921875
| 4
|
[] |
no_license
|
import random
class Fighter(object):
max_health = 100
max_armor = 50
def __init__(self, name, health, armour, power):
self.name = name
self.__health = health
self.armour = armour
self.power = power
self.currenthealth = health
self.equipments = []
def put_on(self, equipment):
if equipment in self.equipments:
raise Exception
self.armour = self.armour + equipment.armour
self.power = self.power + equipment.power
self.equipments.append(equipment)
def remove(self, equipment):
if equipment in self.equipments:
self.armour = self.armour - equipment.armour
self.power = self.power - equipment.power
self.equipments.remove(equipment)
else:
raise Exception
def get_health(self):
return self.__health
def set_health(self, value):
if value > self.max_health:
self.__health = self.max_health
health = property(get_health, set_health)
def get_armor(self):
return self.__armor
def set_armor(self, value):
if value > self.max_armor:
self.__armor = self.max_armor
armor = property(get_armor, set_armor)
def heal(self):
self.__health = self.max_health
return self.__health
class Equipment(object):
def __init__(self, _type, power, armour):
self._type = _type
self.power = power
self.armour = armour
class Fight(object):
@staticmethod
def fight(first_fighter, second_fighter):
while first_fighter.currenthealth > 0 and second_fighter.currenthealth > 0:
choise = random.choice([0, 1])
if choise == 0:
first_fighter.currenthealth -= second_fighter.power
elif choise == 1:
second_fighter.currenthealth -= first_fighter.power
if first_fighter.currenthealth > 0:
del second_fighter
return first_fighter
# Ранндомная битва
else:
del first_fighter
return second_fighter
class FighterFirstRang(Fighter):
max_health = 100
rang_f = 0
def put_on(self, equipment):
if len(self.equipments) >= 2:
del self.equipments[0]
self.equipments.append(equipment)
super().put_on(equipment)
class FighterSecondRang(Fighter):
rang_f = 1
max_health = 100
def put_on(self, equipment):
if len(self.equipments) > 1:
del self.equipments[0]
self.equipments.append(equipment)
super().put_on(equipment)
class FighterThirtRang(Fighter):
rang_f = 2
max_health = 100
max_armor = 10
def put_on(self, equipment):
raise Exception("low rang")
fighter_1 = FighterFirstRang("Fighter 1", 100, 30, 30)
fighter_2 = FighterFirstRang("Fighter 2", 100, 30, 30)
if fighter_1.rang_f == fighter_2.rang_f: # Проверка соотвествия рангов
shield = Equipment('shield', -3, 2)
sword = Equipment('sword', 10, -5)
fighter_1.put_on(shield)
fighter_2.put_on(shield)
fight = Fight()
winner = fight.fight(fighter_1, fighter_2)
print(winner.name)
else:
raise Exception("differents rangs")
fighter_1 = FighterFirstRang("Fighter 1", 10, 30, 30)
print(fighter_1.health)
fighter_1.heal() # Лечение бойца
print(fighter_1.health)
| true
|
18c0206e8b9f754ceb8e6bf718bd834cb991b136
|
Python
|
jeMATHfischer/Bayesian_Data_Assimilation
|
/Sheet8Ex1.py
|
UTF-8
| 791
| 2.578125
| 3
|
[] |
no_license
|
import numpy as np
def likilihood(z):
return np.exp(-(1-z)**2/2)
def normalizer(L,v):
return 1/np.dot(L,v).sum()
def resampler(M, pi):
p_resampled = np.zeros(len(pi))
for i in range(M):
dummy = np.zeros(len(pi))
u = np.random.rand()
ind = (pi.cumsum() > u).sum()
dummy[ind] = 1
p_resampled += dummy
return p_resampled/p_resampled.sum()
z = np.array([1,2,3])
P = np.array([[1/2,1/4,1/4],[1/4,1/2,1/4],[1/4,1/4,1/2]])
p0 = np.array([0,0,1])
L = np.diag(likilihood(z))
def exact_filter(P, L, p):
return np.dot(L,np.dot(P,p))
def sequential_MC(P, L, M, p):
return np.dot(L, resampler(M,np.dot(P, p)))
M = [10,100,1000]
p_seq = p0
for i in range(100):
p_seq = sequential_MC(P,L*normalizer(L,p_seq), M[0], p_seq)
| true
|
e0f669e4a2da9c192a03d0e2608950068e5804fa
|
Python
|
luoxc613/dronedeploy
|
/dronedeploy.py
|
UTF-8
| 2,768
| 2.71875
| 3
|
[] |
no_license
|
import math
import cv2
import numpy as num
imgpath = 'Camera Localization/IMG_6726.jpg'
img = cv2.imread(imgpath)
print("Processing image: ", imgpath)
img = cv2.resize(img, (500, 900))
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
re, threshold = cv2.threshold(imgray, 150, 255, cv2.THRESH_BINARY)
contourimage, contours, hierarchy = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(img, contours, -1, (0, 255, 0), 3)
cv2.imshow("contoured image", img)
cv2.imwrite("process/during.PNG", img)
areas = [cv2.contourArea(contour) for contour in contours]
max_area_index = num.argmax(areas)
patterncontour = contours[max_area_index]
dirc = {}
for c in contours:
value = cv2.moments(c)
if value["m00"] == 0:
continue
cX = int(value["m10"] / value["m00"])
cY = int(value["m01"] / value["m00"])
if dirc.get((cX, cY), 0):
dirc[(cX, cY)] += 1
else:
dirc[(cX, cY)] = 1
three_hits = []
two_hits = []
for key in dirc:
if dirc[key] == 3:
three_hits.append(key)
if dirc[key] == 2:
two_hits.append(key)
if len(three_hits) == 3:
pass
elif len(three_hits) == 2:
pass
else:
pass
rect = cv2.minAreaRect(patterncontour)
box = cv2.boxPoints(rect)
box = num.int0(box)
cv2.drawContours(img, [box], 0, (0, 0, 255), 2)
rotatedangle = rect[2]
print("Rotation Angle : {0:.2f} ".format(rotatedangle))
def mid_point(point_X, point_Y):
return [(point_X[0] + point_Y[0]) / 2, (point_X[1] + point_Y[1]) / 2]
def distance(point_X, point_Y):
return math.sqrt((point_X[0] - point_Y[0]) ** 2 + (point_X[1] - point_Y[1]) ** 2)
min_dist = 1000
for point in box:
temp_dist = distance(patterncontour[0][0], point)
min_dist = min(min_dist, temp_dist)
print("Degree : {0:.2f} ".format(min_dist))
P = box[0]
Q = box[1]
R = box[2]
S = box[3]
P_Q = mid_point(P, Q)
R_S = mid_point(R, S)
P_S = mid_point(P, S)
Q_R = mid_point(Q, R)
PS_QR_dist = distance(P_S, Q_R)
PQ_RS_dist = distance(P_Q, R_S)
if PS_QR_dist > PQ_RS_dist:
width = PS_QR_dist
height = PS_QR_dist
else:
width = PS_QR_dist
height = PS_QR_dist
width_ratio1 = 300
height_ratio1 = 600
width_ratio2 = 130
height_ratio2= 250
one_foot_height = 1 / (height / height_ratio1)
one_foot_width = 1 / (width / width_ratio1)
two_foot_height = 2 / (height / height_ratio2)
two_foot_width = 2 / (width / width_ratio2)
distance_away = (one_foot_width + one_foot_height + two_foot_width + two_foot_height) / 4
print("Image move {0:.2f} feet ".format(distance_away, min_dist, rotatedangle))
cv2.drawContours(img, contours, max_area_index, (0, 125, 0), 3)
cv2.imshow("result image", img)
cv2.imwrite("process/result.PNG", img);
cv2.waitKey(0)
cv2.destroyAllWindows()
| true
|
174b143960437085286e1536ca3b577d78c8bcd7
|
Python
|
ruzhaa/SDA_SI_2015
|
/project/IndianaJones/validation.py
|
UTF-8
| 662
| 2.96875
| 3
|
[] |
no_license
|
def validation_max_weight(number):
try:
number == int(number)
except (TypeError, ValueError):
raise TypeError("Error type!")
def validation_commands(split_text):
if split_text[0] == "exit" or split_text[0] == "EXIT":
return True
if len(split_text) != 3:
raise Exception("Your input is not validation!")
item = split_text[0]
weight = split_text[1]
value = split_text[2]
if item.isdigit():
raise TypeError("Error type!")
try:
item == str(item)
weight == int(weight)
value == int(value)
except (TypeError, ValueError):
raise TypeError("Error type!")
| true
|
5acd369ebd03515411da37111f7d6807f225ffd2
|
Python
|
ryansalsbury1/NCAA-Tournament-Modeling
|
/Pre_Tournament_Data_Scrape.py
|
UTF-8
| 50,928
| 2.625
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 26 19:11:27 2020
@author: ryansalsbury
"""
#import libraries
try:
import urllib.request as urllib2
except ImportError:
import urllib2
from urllib.request import urlopen
from urllib.error import HTTPError
from bs4 import BeautifulSoup
import pandas as pd
import re
########## get all schools ##########
url = "https://www.sports-reference.com/cbb/seasons/" + str(2020) + "-school-stats.html"
page = urlopen(url).read()
#print(page)
soup = BeautifulSoup(page)
count = 0
table = soup.find("tbody")
school_dict = dict()
for row in table.findAll('td', {"data-stat": "school_name"}):
school_name = row.getText()
for a in row.find_all('a', href=True):
link = a['href'].strip()
name = link[13:].split("/")[0]
school_dict[name] = school_name
########## get rosters and total player stats for each season ##########
roster_df_all=pd.DataFrame()
totals_df_all=pd.DataFrame()
season = ['2019']
for s in season:
for school in school_dict:
url = "https://www.sports-reference.com/cbb/schools/" + school + "/" + str(s) + ".html"
try:
urllib2.urlopen(url)
except HTTPError as err:
if err.code == 404:
continue
page = urlopen(url).read()
soup = BeautifulSoup(page)
count = 0
#get totals table
table = soup.find_all('div', {'id':'all_totals'}, {'class':'table_wrapper setup_commented commented'})[0]
comment = table(text=lambda x: isinstance(x, Comment))[0]
newsoup = BeautifulSoup(comment, 'html.parser')
table = newsoup.find('table')
totals_body = table.find("tbody")
totals_rows = totals_body.find_all('tr')
totals_dict={}
totals_cols = {'player','g', 'mp', 'pts'}
for row in totals_rows:
if (row.find('th', {"scope":"row"}) != None):
for t in totals_cols:
if t == 'player':
cell = row.find("td",{"data-stat": t})
a = cell.text.strip().encode()
text=a.decode("utf-8")
href = row.find("a").attrs['href']
player_id = re.findall(r'(?<=cbb/players/)(.*)(?=\.html)', href)[0]
if t in totals_dict:
totals_dict[t].append(text)
totals_dict['player_id'].append(player_id)
else:
totals_dict[t] = [text]
totals_dict['player_id'] = [player_id]
else:
cell = row.find("td",{"data-stat": t})
a = cell.text.strip().encode()
text=a.decode("utf-8")
if t in totals_dict:
totals_dict[t].append(text)
else:
totals_dict[t]=[text]
totals_dict['season'] = s
totals_dict['url_school'] = school
totals_dict['school'] = school_dict[school]
totals_df = pd.DataFrame.from_dict(totals_dict)
totals_df_all=pd.concat([totals_df_all,totals_df])
#get roster table
roster_table = soup.find_all("table", id="roster")
roster_body = roster_table[0].find("tbody")
roster_dict = {}
roster_cols = {'player', 'rsci'}
roster_rows = roster_body.find_all('tr')
for row in roster_rows:
if (row.find('th', {"scope":"row"}) != None):
for r in roster_cols:
if r == 'player':
cell = row.find("th",{"data-stat": r})
a = cell.text.strip().encode()
text=a.decode("utf-8")
href = row.find("a").attrs['href']
player_id = re.findall(r'(?<=cbb/players/)(.*)(?=\.html)', href)[0]
if r in roster_dict:
roster_dict[r].append(text)
roster_dict['player_id'].append(player_id)
else:
roster_dict[r]=[text]
roster_dict['player_id'] = [player_id]
roster_dict['season'] = s
roster_dict['url_school'] = school
roster_dict['school'] = school_dict[school]
else:
if (row.find("td",{"data-stat": r}) != None):
cell = row.find("td",{"data-stat": r})
a = cell.text.strip().encode()
text=a.decode("utf-8")
if r in roster_dict:
roster_dict[r].append(text)
else:
roster_dict[r]=[text]
else:
roster_dict[r]= 'NA'
roster_df = pd.DataFrame.from_dict(roster_dict)
roster_df_all=pd.concat([roster_df_all,roster_df])
#export to csv
#roster_df_all.to_csv("roster_2019.csv", index=False)
#totals_df_all.to_csv("totals_2019.csv", index=False)
########### get tournament player data by season ##########
tourney_df_all=pd.DataFrame()
for r in range(0, 8400, 100):
url = "https://www.sports-reference.com/cbb/play-index/tourney_pgl_finder.cgi?request=1&match=single&year_min=2008&year_max=2019&round=&school_id=&opp_id=&person_id=&game_month=&game_result=&is_starter=&pos_is_g=Y&pos_is_gf=Y&pos_is_f=Y&pos_is_fg=Y&pos_is_fc=Y&pos_is_c=Y&pos_is_cf=Y&c1stat=&c1comp=&c1val=&c2stat=&c2comp=&c2val=&c3stat=&c3comp=&c3val=&c4stat=&c4comp=&c4val=&is_dbl_dbl=&is_trp_dbl=&order_by=mp&order_by_asc=&offset=" + str(r) +""
page = urlopen(url).read()
soup = BeautifulSoup(page)
count = 0
body = soup.find("tbody")
tourney_rows = body.find_all('tr')
tourney_dict={}
tourney_cols = {'player', 'school_name', 'year_id', 'mp', 'pts'}
for row in tourney_rows:
if (row.find('th', {"scope":"row"}) != None):
for t in tourney_cols:
if t == 'player':
cell = row.find("td",{"data-stat": t})
a = cell.text.strip().encode()
text=a.decode("utf-8")
href = row.find("a").attrs['href']
player_id = re.findall(r'(?<=cbb/players/)(.*)(?=\.html)', href)[0]
if t in tourney_dict:
tourney_dict[t].append(text)
tourney_dict['player_id'].append(player_id)
else:
tourney_dict[t] = [text]
tourney_dict['player_id'] = [player_id]
elif t == 'school_name':
if (row.find('th', {"scope":"row"}) != None):
cell = row.find("td",{"data-stat": t})
a = cell.text.strip().encode()
text1=a.decode("utf-8")
href = cell.find("a").attrs['href']
text2 = re.findall(r'(?<=cbb/schools/)(.*)(?=\/)', href)[0]
if t in tourney_dict:
tourney_dict[t].append(text1)
tourney_dict['url_school'].append(text2)
else:
tourney_dict[t] = [text1]
tourney_dict['url_school'] = [text2]
else:
cell = row.find("td",{"data-stat": t})
a = cell.text.strip().encode()
text=a.decode("utf-8")
if t in tourney_dict:
tourney_dict[t].append(text)
else:
tourney_dict[t] = [text]
tourney_df = pd.DataFrame.from_dict(tourney_dict)
tourney_df_all=pd.concat([tourney_df_all,tourney_df])
#export to csv
#tourney_df_all.to_csv("tourney_df_all_2008_2019.csv", index=False)
########## get coaches by season ##########
coaches_df_all=pd.DataFrame()
for s in range(1975, 2020, 1):
url = "https://www.sports-reference.com/cbb/seasons/" + str(s) + "-coaches.html"
page = urlopen(url).read()
soup = BeautifulSoup(page)
table = soup.find_all("table", id="coaches")[0]
body = table.find_all("tbody")
rows = body[0].find_all('tr')
coach_dict = {}
cols = {'coach', 'school', 'since_cur_schl', 'ap_pre', 'tourney_note', 'w_car', 'l_car', 'ncaa_apps_car', 'sweet16_apps_car','final4_apps_car', 'natl_champs_car'}
for row in rows:
for c in cols:
if c == 'coach':
if (row.find('th', {"scope":"row"}) != None):
cell = row.find("th",{"data-stat": c})
a = cell.text.strip().encode()
text1=a.decode("utf-8")
href = cell.find("a").attrs['href']
text2 = re.findall(r'(?<=cbb/coaches/)(.*)(?=\.html)', href)[0]
if c in coach_dict:
coach_dict[c].append(text1)
coach_dict['coach_id'].append(text2)
else:
coach_dict[c] = [text1]
coach_dict['coach_id'] = [text2]
elif c == 'school':
if (row.find('th', {"scope":"row"}) != None):
cell = row.find("td",{"data-stat": c})
a = cell.text.strip().encode()
text1=a.decode("utf-8")
href = cell.find("a").attrs['href']
text2 = re.findall(r'(?<=cbb/schools/)(.*)(?=\/)', href)[0]
if c in coach_dict:
coach_dict[c].append(text1)
coach_dict['url_school'].append(text2)
else:
coach_dict[c] = [text1]
coach_dict['url_school'] = [text2]
else:
if (row.find('th', {"scope":"row"}) != None):
cell = row.find("td",{"data-stat": c})
a = cell.text.strip().encode()
text= a.decode("utf-8")
if c in coach_dict:
coach_dict[c].append(text)
else:
coach_dict[c] = [text]
coach_dict['season'] = s
coaches_df = pd.DataFrame.from_dict(coach_dict)
coaches_df_all=pd.concat([coaches_df_all,coaches_df])
#coaches_df_all.to_csv("coaches_1975_2019.csv", index=False)
########## get tournament results/location ##########
games_df_all=pd.DataFrame()
for r in range(0, 1600, 100):
url = "https://www.sports-reference.com/cbb/play-index/tourney.cgi?request=1&match=single&year_min=2008&year_max=&round=®ion=&location=&school_id=&conf_id=&opp_id=&opp_conf=&seed=&seed_cmp=eq&opp_seed=&opp_seed_cmp=eq&game_result=&pts_diff=&pts_diff_cmp=eq&order_by=date_game&order_by_single=date_game&order_by_combined=g&order_by_asc=&offset=" + str(r) +""
#url = "https://www.sports-reference.com/cbb/play-index/tourney.cgi?request=1&match=single&year_min=2008&year_max=&round=®ion=&location=&school_id=&conf_id=&opp_id=&opp_conf=&seed=&seed_cmp=eq&opp_seed=&opp_seed_cmp=eq&game_result=&pts_diff=&pts_diff_cmp=eq&order_by=date_game&order_by_single=date_game&order_by_combined=g&order_by_asc=&offset=0"
page = urlopen(url).read()
soup = BeautifulSoup(page)
count = 0
body = soup.find("tbody")
games_rows = body.find_all('tr')
games_dict={}
games_cols = {'year_id', 'region', 'round', 'school_name', 'pts', 'opp_name', 'opp_pts', 'overtimes', 'pts_diff', 'location'}
t = 'school_name'
for row in games_rows:
if (row.find('th', {"scope":"row"}) != None):
for t in games_cols:
if t == 'school_name':
cell = row.find("td",{"data-stat": t})
seed = cell.get_text().split()[0]
href = cell.find_all("a")[0]
text1 = href.text
href2 = href.attrs['href']
text2 = re.findall(r'(?<=cbb/schools/)(.*)(?=\/)', href2)[0]
if 'school' in games_dict:
games_dict['school'].append(text1)
games_dict['url_school'].append(text2)
games_dict['seed'].append(seed)
else:
games_dict['school'] = [text1]
games_dict['url_school'] = [text2]
games_dict['seed'] = [seed]
elif t == 'opp_name':
if (row.find('th', {"scope":"row"}) != None):
cell = row.find("td",{"data-stat": t})
opp_seed = cell.get_text().split()[0]
href = cell.find_all("a")[0]
text1 = href.text
href2 = href.attrs['href']
text2 = re.findall(r'(?<=cbb/schools/)(.*)(?=\/)', href2)[0]
if 'opp_school' in games_dict:
games_dict['opp_school'].append(text1)
games_dict['opp_url_school'].append(text2)
games_dict['opp_seed'].append(opp_seed)
else:
games_dict['opp_school'] = [text1]
games_dict['opp_url_school'] = [text2]
games_dict['opp_seed'] = [opp_seed]
else:
cell = row.find("td",{"data-stat": t})
a = cell.text.strip().encode()
text=a.decode("utf-8")
if t in games_dict:
games_dict[t].append(text)
else:
games_dict[t] = [text]
games_df = pd.DataFrame.from_dict(games_dict)
games_df_all=pd.concat([games_df_all,games_df])
#export to csv
#games_df_all.to_csv("games_2008_2019.csv", axis=False)
########## get school locations ##########
url = "https://www.sports-reference.com/cbb/schools/"
page = urlopen(url).read()
soup = BeautifulSoup(page)
count = 0
body = soup.find("tbody")
school_rows = body.find_all('tr')
school_loc_dict={}
school_loc_cols = {'school_name', 'location'}
for row in school_rows:
if (row.find('th', {"scope":"row"}) != None):
for t in school_loc_cols:
if t == 'school_name':
cell = row.find("td",{"data-stat": t})
href = cell.find_all("a")[0]
text1 = href.text
href2 = href.attrs['href']
text2 = re.findall(r'(?<=cbb/schools/)(.*)(?=\/)', href2)[0]
if 'school' in school_loc_dict:
school_loc_dict['school'].append(text1)
school_loc_dict['url_school'].append(text2)
else:
school_loc_dict['school'] = [text1]
school_loc_dict['url_school'] = [text2]
else:
cell = row.find("td",{"data-stat": t})
a = cell.text.strip().encode()
text=a.decode("utf-8")
if t in school_loc_dict:
school_loc_dict[t].append(text)
else:
school_loc_dict[t] = [text]
school_loc_df = pd.DataFrame.from_dict(school_loc_dict)
#export to csv
#school_loc_df.to_csv("school_loc.csv", index=False)
################Scrape Player Win Shares##############
try:
import urllib.request as urllib2
except ImportError:
import urllib2
import urllib.parse
from urllib.request import urlopen
from urllib.error import HTTPError
from bs4 import BeautifulSoup
import pandas as pd
import re
url = "https://basketball.realgm.com/ncaa/tournaments/Post-Season/NCAA-Tournament/1/teams"
page = urlopen(url).read()
soup = BeautifulSoup(page)
table = soup.find("tbody")
school_names = []
school_ids = []
conference_names = []
conference_ids = []
tournaments = []
for row in table.findAll('tr'):
school_name = row.findAll('a')[0].text
school_id = str(row.findAll('a')[0]).split('/')[6]
conference_name = str(row.findAll('a')[0]).split('/')[3]
conference_id = str(row.findAll('a')[0]).split('/')[4]
school_names.append(school_name)
conference_names.append(conference_name)
school_ids.append(school_id)
conference_ids.append(conference_id)
tourn_seasons = []
for row in row.findAll('a')[1:]:
tourn_seasons.append(row.text.split('-')[1])
tournaments.append(tourn_seasons)
#school_df = pd.DataFrame.from_dict(school_dict)
####get schools duplicated for each tournament season
snames = []
sids = []
cnames = []
cids = []
seasons = []
for (school_name, school_id, conference_name ,conference_id, tournament) in zip(school_names, school_ids, conference_names, conference_ids, tournaments):
for year in tournament:
snames.append(school_name)
seasons.append(year)
sids.append(school_id)
cnames.append(conference_name)
cids.append(conference_id)
school_tournaments = pd.DataFrame(list(zip(snames, sids, cnames, cids, seasons)),
columns =['school', 'school_id', 'conference', 'conference_id', 'Season'])
###########full win share scrape
win_shares = {}
for (school_id, school_name, conference_id, conference_name, year) in zip(sids, snames, cids, cnames, seasons):
if year in ['2013', '2014', '2015', '2016', '2017', '2018', '2019']:
try:
school = urllib.parse.quote(school_name)
url = "https://basketball.realgm.com/ncaa/conferences/" + conference_name + "/" + conference_id + "/" + school + "/" + school_id + "/stats/" + year + "/Misc_Stats/All/All/Season/All/per/desc/1/"
page = urlopen(url).read()
soup = BeautifulSoup(page)
try:
table = soup.find("tbody")
except(TypeError, KeyError) as e:
table = soup.find("tbody")
for row in table.findAll('tr'):
player = row.find_all('td')[1].text
player_id = str(row.find_all('a')).split('/')[4].split('">')[0]
win_share = row.find_all('td')[-1].text
if len(win_shares) >= 6:
win_shares['year'].append(year)
win_shares['player'].append(player)
win_shares['player_id'].append(player_id)
win_shares['school_id'].append(school_id)
win_shares['school_name'].append(school_name)
win_shares['win_shares'].append(win_share)
else:
win_shares['year'] = [year]
win_shares['player'] = [player]
win_shares['player_id'] = [player_id]
win_shares['school_id'] = [school_id]
win_shares['school_name'] = [school_name]
win_shares['win_shares'] = [win_share]
except ConnectionResetError:
print('Handle Exception')
win_share_df = pd.DataFrame.from_dict(win_shares)
win_shares = win_share_df.drop_duplicates()
win_share_df['win_shares'] = win_share_df['win_shares'].astype(float)
#get tournament win shares
tourn_win_shares = {}
for (school_id, school_name, conference_id, conference_name, year) in zip(sids, snames, cids, cnames, seasons):
if year in ['2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019']:
try:
school = urllib.parse.quote(school_name)
url = "https://basketball.realgm.com/ncaa/conferences/" + conference_name + "/" + conference_id + "/" + school + "/" + school_id + "/stats/" + year + "/Misc_Stats/All/All/Post-Season_NCAA_Tournament/All/desc/1/"
page = urlopen(url).read()
soup = BeautifulSoup(page)
try:
table = soup.find("tbody")
except(TypeError, KeyError) as e:
table = soup.find("tbody")
for row in table.findAll('tr'):
player = row.find_all('td')[1].text
player_id = str(row.find_all('a')).split('/')[4].split('">')[0]
win_share = row.find_all('td')[-1].text
if len(tourn_win_shares) >= 6:
tourn_win_shares['year'].append(year)
tourn_win_shares['player'].append(player)
tourn_win_shares['player_id'].append(player_id)
tourn_win_shares['school_id'].append(school_id)
tourn_win_shares['school_name'].append(school_name)
tourn_win_shares['win_shares'].append(win_share)
else:
tourn_win_shares['year'] = [year]
tourn_win_shares['player'] = [player]
tourn_win_shares['player_id'] = [player_id]
tourn_win_shares['school_id'] = [school_id]
tourn_win_shares['school_name'] = [school_name]
tourn_win_shares['win_shares'] = [win_share]
except ConnectionResetError:
print('Handle Exception')
tourn_win_share_df = pd.DataFrame.from_dict(tourn_win_shares)
tourn_win_share_df = tourn_win_share_df.drop_duplicates()
tourn_win_share_df['win_shares'] = tourn_win_share_df['win_shares'].astype(float)
player_win_shares = pd.merge(win_share_df, tourn_win_share_df, how = "left", on=['player_id', 'school_id', 'year'], suffixes=['', '_tourn'])
player_win_shares = player_win_shares.drop(['player_tourn', 'school_name_tourn'], axis=1)
player_win_shares['win_shares_tourn'] = player_win_shares['win_shares_tourn'].fillna(0)
player_win_shares['win_shares'] = player_win_shares['win_shares'] - player_win_shares['win_shares_tourn']
player_win_shares = player_win_shares.drop(['win_shares_tourn'], axis=1)
player_win_shares['win_shares'] = player_win_shares['win_shares'].astype(float)
#write to csv
player_win_shares.to_csv("player_win_shares.csv", index=False)
########## Manipulate Data ##########
#read in data files
#rosters
#roster_2020 = pd.read_csv("roster_2020.csv", na_values=['NA'])
roster_2019 = pd.read_csv("roster_2019.csv", na_values=['NA'])
roster_2018 = pd.read_csv("roster_2018.csv", na_values=['NA'])
roster_2017 = pd.read_csv("roster_2017.csv", na_values=['NA'])
roster_2016 = pd.read_csv("roster_2016.csv", na_values=['NA'])
roster_2015 = pd.read_csv("roster_2015.csv", na_values=['NA'])
roster_2014 = pd.read_csv("roster_2014.csv", na_values=['NA'])
roster_2013 = pd.read_csv("roster_2013.csv", na_values=['NA'])
roster_2012 = pd.read_csv("roster_2012.csv", na_values=['NA'])
roster_2011 = pd.read_csv("roster_2011.csv", na_values=['NA'])
roster_2010 = pd.read_csv("roster_2010.csv", na_values=['NA'])
roster_2009 = pd.read_csv("roster_2009.csv", na_values=['NA'])
roster_2008 = pd.read_csv("roster_2008.csv", na_values=['NA'])
roster_2007 = pd.read_csv("roster_2007.csv", na_values=['NA'])
#player totals
#totals_2020 = pd.read_csv("totals_2020.csv", na_values=['NA'])
totals_2019 = pd.read_csv("totals_2019.csv", na_values=['NA'])
totals_2018 = pd.read_csv("totals_2018.csv", na_values=['NA'])
totals_2017 = pd.read_csv("totals_2017.csv", na_values=['NA'])
totals_2016 = pd.read_csv("totals_2016.csv", na_values=['NA'])
totals_2015 = pd.read_csv("totals_2015.csv", na_values=['NA'])
totals_2014 = pd.read_csv("totals_2014.csv", na_values=['NA'])
totals_2013 = pd.read_csv("totals_2013.csv", na_values=['NA'])
totals_2012 = pd.read_csv("totals_2012.csv", na_values=['NA'])
totals_2011 = pd.read_csv("totals_2011.csv", na_values=['NA'])
totals_2010 = pd.read_csv("totals_2010.csv", na_values=['NA'])
totals_2009 = pd.read_csv("totals_2009.csv", na_values=['NA'])
totals_2008 = pd.read_csv("totals_2008.csv", na_values=['NA'])
totals_2007 = pd.read_csv("totals_2007.csv", na_values=['NA'])
#tourney player totals - rename year_id to season, school_name to school and pts/mp to tourn_pts, tourn_mp
tourney_player_totals = pd.read_csv("tourney_2008_2019.csv")
tourney_player_totals = tourney_player_totals.rename({'year_id': 'season', 'school_name': 'school', 'pts': 'tourn_pts', 'mp': 'tourn_mp'}, axis=1)
#coaches
coaches = pd.read_csv("coaches_1975_2019.csv")
coaches['since_cur_schl'] = coaches['since_cur_schl'].str.replace('-\d+', '')
#only get one coach per school per year
#remove mike hopkins as interim coach when Boeheim got suspended
coaches = coaches[~((coaches['coach_id'] == 'mike-hopkins-1') & (coaches['url_school'] == 'syracuse'))]
coaches_subset = coaches[['season', 'url_school', 'since_cur_schl', 'coach_id']]
coaches_subset = coaches_subset.groupby(['season','url_school']).max().reset_index()
#confirm that each season/school only has one coach
coaches_subset.groupby(['season','url_school']).size().sort_index(ascending=True)
#join to coaches
coaches = pd.merge(coaches_subset, coaches, how='left',on=['url_school', 'season', 'coach_id'], suffixes=('', '_y'))
#drop extra column from merge
coaches = coaches.drop('since_cur_schl_y', axis=1)
#games - rename year_id to season
games = pd.read_csv("games_2008_2019.csv")
games = games.rename({'year_id': 'season'}, axis=1)
#states abbreviations file to convert game locations
states = pd.read_csv("states.csv")
#school locations
school_loc = pd.read_csv("school_loc.csv")
#some schools have the wrong city or spelled differently than cities table, so need to update them to be able to join to cities table
school_loc['location'] = school_loc['location'].replace({'Villanova, Pennsylvania': 'Philadelphia, Pennsylvania',
'Mississippi State, Mississippi': 'Starkville, Mississippi',
'University, Mississippi': 'Oxford, Mississippi',
'St. Bonaventure, New York': 'Allegany, New York',
'Washington, D.C.': 'Washington, District of Columbia',
'University Park, Pennsylvania': 'State College, Pennsylvania'
})
#cities file with lat long coordninates
cities = pd.read_csv("worldcities.csv")
#kaggle school spellings - need to specify encoding as it had a decoding error
school_spellings = pd.read_csv("MTeamSpellings.csv", encoding = "ISO-8859-1")
#rename columns
school_spellings = school_spellings.rename({'TeamNameSpelling': 'school_spelling', 'TeamID': 'team_id'}, axis=1)
school_spellings['school_spelling'] = school_spellings['school_spelling'].str.replace('chicago-st', 'chicago-state')
#T-Rank Stats
trank_november = pd.read_csv("trank_november.csv")
trank_december = pd.read_csv("trank_december.csv")
trank_january = pd.read_csv("trank_january.csv")
trank_febmarch = pd.read_csv("trank_febmarch.csv")
trank_all = pd.read_csv("trank_fullseason.csv")
#combine all roster and totals data into 2 separate data frames (1 for all roster data & 1 for all totals data)
#create list of all rosters
roster_list = [roster_2019, roster_2018, roster_2017,
roster_2016, roster_2015, roster_2014, roster_2013,
roster_2012, roster_2011, roster_2010, roster_2009,
roster_2008, roster_2007]
#create list of all totals
totals_list = [totals_2019, totals_2018, totals_2017,
totals_2016, totals_2015, totals_2014, totals_2013,
totals_2012, totals_2011, totals_2010, totals_2009,
totals_2008, totals_2007]
#combine all
rosters = pd.concat(roster_list)
totals = pd.concat(totals_list)
#grab just the ranking from rsci rosters column and remove the (year)
#convert null values to 0
rosters['rsci'] = rosters['rsci'].fillna(0).astype(str)
#create function to split on comma, reverse string list, and return just the ranking
#this was because I wanted the second ranking for players with multiple rankings
def clean_ranking(x):
return x.split(',')[::-1][0].split()[0]
rosters['rsci'] = rosters['rsci'].apply(clean_ranking)
#convert to int
rosters['rsci'] = rosters['rsci'].astype(int)
#check if duplicates
rosters.duplicated(['player', 'season', 'url_school']).sum()
totals.duplicated(['player', 'season', 'url_school']).sum()
#3 duplicates in each. Let's see what they are
rosters[rosters.duplicated(['player', 'season', 'url_school'])]
totals[totals.duplicated(['player', 'season', 'url_school'])]
#the duplicate rows have incorrectly formatted player_id's (lastname-firstname instead of firstname-lastname or 2 instead of 1)
#These can be removed from the rosters
rosters = rosters[rosters['player_id'] != 'funtarov-georgi-1']
rosters = rosters[rosters['player_id'] != 'battle-joseph-1']
rosters = rosters[rosters['player_id'] != 'anthony-horton-2']
#for totals, I will combine the rows into one by renaming each player_id and adding the totals
totals['player_id'] = totals['player_id'].replace('funtarov-georgi-1', 'georgi-funtarov-1').replace('battle-joseph-1', 'joseph-battle-1').replace('anthony-horton-2', 'anthony-horton-1')
totals = totals.groupby(['player_id','player', 'season', 'url_school', 'school']).sum().reset_index()
#totals = totals.drop_duplicates(['player', 'season', 'url_school'])
#join the two data frames together.
rosters_totals = pd.merge(rosters, totals, how='left', on=['player_id', 'player', 'url_school', 'school', 'season'])
#need to rename a player who has a new id and is different from id in tournament data
rosters_totals['player_id'] = rosters_totals['player_id'].replace('anthony-oliverii-1', 'aj-oliverii-1')
#join to tourney data to get pre tourney totals
rosters_totals = pd.merge(rosters_totals, tourney_player_totals, how='left',on=['player_id', 'url_school', 'school', 'season'], suffixes=('', '_y'))
#drop additional player column that was created during the merge
rosters_totals = rosters_totals.drop('player_y', axis=1)
#fill null values with 0 in tourn_pts and tourn_mp
rosters_totals['tourn_mp'] = rosters_totals['tourn_mp'].fillna(0)
rosters_totals['tourn_pts'] = rosters_totals['tourn_pts'].fillna(0)
#subtract tournament stats from season stats
rosters_totals['mp'] = rosters_totals['mp'] - rosters_totals['tourn_mp']
rosters_totals['pts'] = rosters_totals['pts'] - rosters_totals['tourn_pts']
#create new table to work with in calculating rating for team recruiting weighted by minutes
rsci_rosters_totals = rosters_totals
#calculate team recruiting ranking
#reverse numbers for rsci column so that 100 is best rating and 1 is worst
rsci_rosters_totals['rsci'] = (rsci_rosters_totals['rsci']-101).abs()
#replace all values of 101 with 0
rsci_rosters_totals.loc[rsci_rosters_totals['rsci']== 101, 'rsci'] = 0
#multiply rsci by mp
rsci_rosters_totals['rsci_mp'] = rsci_rosters_totals['rsci'] * rsci_rosters_totals['mp']
#group by school/season and sum the rsci_mp and mp columns
rsci_rosters_totals = rsci_rosters_totals.groupby(['season', 'url_school'])['rsci_mp', 'mp'].sum().reset_index()
#create rsci_rating by dividing rsci_mp by mp
rsci_rosters_totals['rsci_rating'] = rsci_rosters_totals['rsci_mp'] / rsci_rosters_totals['mp']
#convert season to int
rsci_rosters_totals['season'] = rsci_rosters_totals['season'].astype(int)
#calculate mp/scoring continuity
#ceate new mp table
mp_cont = rosters_totals[['url_school', 'season', 'player_id', 'mp']]
#calculate if a player is a returning player from prior year
mp_cont['returning'] = mp_cont.groupby(['url_school','player_id'])['mp'].shift(-1)
#group by season and school and create new column with the total number of minutes of school for that season
mp_cont['total'] = mp_cont.groupby(['season', 'url_school'])['mp'].transform(sum)
#fill na vaues with 0
mp_cont['returning'] = mp_cont['returning'].fillna(0)
#filter out non returning players and low impact player by excluding players who played less than 150 minutes
mp_cont = mp_cont.loc[mp_cont['returning'] >= 150 , ['season', 'url_school', 'player_id', 'mp', 'total']]
#calculate the % of team minutes that each player accounter for
mp_cont['pct'] = mp_cont['mp'] / mp_cont['total']
#create continuity column by adding up the pct column
mp_cont['continuity'] = mp_cont.groupby(['season', 'url_school'])['pct'].transform(sum)
#remove everything but season, url_school and continuity columns
mp_cont = mp_cont[['season', 'url_school', 'continuity']]
#remove duplicate rows
mp_cont = mp_cont.drop_duplicates()
#convert season to int
mp_cont['season'] = mp_cont['season'].astype(int)
#calculate scoring continuity
#ceate new table
pts_cont = rosters_totals[['url_school', 'season', 'player_id', 'pts']]
#calculate if a player is a returning player from prior year
pts_cont['returning'] = pts_cont.groupby(['url_school','player_id'])['pts'].shift(-1)
#group by season and school and create new column with the total number of minutes of school for that season
pts_cont['total'] = pts_cont.groupby(['season', 'url_school'])['pts'].transform(sum)
#filter out non returning players by excluding null values
pts_cont = pts_cont.loc[pts_cont['returning'].isnull() == False, ['season', 'url_school', 'player_id', 'pts', 'total']]
#calculate the % of team minutes that each player accounter for
pts_cont['pct'] = pts_cont['pts'] / pts_cont['total']
#create continuity column by adding up the pct column
pts_cont['continuity'] = pts_cont.groupby(['season', 'url_school'])['pct'].transform(sum)
#remove everything but season, url_school and continuity columns
pts_cont = pts_cont[['season', 'url_school', 'continuity']]
##get team id/location for each school from kaggle id's
#confirm number of rows for each year in games table is correct and that I have all the data
games.groupby(['season'])['season'].count()
##get all schools that have played in tournament
schools = games[['url_school', 'school']]
#get unique schools
school_ids = schools.drop_duplicates()
#get school id from kaggle spellings data
school_ids = pd.merge(school_ids, school_spellings, how='left', left_on='url_school', right_on='school_spelling')
#get location and coordinates for each school to calculate distance
#get location of each school
school_ids = pd.merge(school_ids, school_loc, on=['url_school'], suffixes=('', '_y'))
#remove extra school column created in the merge
school_ids = school_ids.drop('school_y', axis=1)
#create new column with underscore separting city/state so that can be joined to cities
school_ids['loc_id'] = school_ids['location'].str.replace(' ', '_').str.replace(',', '').str.replace('.', '').str.lower()
#prepare cities file to join to school_ids to get coordinates
#exclude all non-us cities
cities = cities[cities['country'] == 'United States']
#filter out unnecessary columns
cities = cities[['city_ascii', 'admin_name', 'lat', 'lng']]
#rename city_asci to city and admin_name to state
cities = cities.rename({'city_ascii': 'city', 'admin_name': 'state'}, axis=1)
#create new cities not found in table
new_cities = [pd.Series(['Boiling Springs', 'North Carolina', 35.2543, -81.6670], index=cities.columns),
pd.Series(['Hamilton', 'New York', 42.8270, -75.5447], index=cities.columns),
pd.Series(['South Orange', 'New Jersey', 40.7490, -74.2613], index=cities.columns),
pd.Series(['Allegany', 'New York', 42.0901, -78.4942], index=cities.columns),
pd.Series(['Moon Township','Pennsylvania', 40.5201, -80.2107], index=cities.columns),
pd.Series(['Riverdale', 'New York', 40.9005, -73.9064], index=cities.columns),
pd.Series(['Itta Bena', 'Mississippi', 33.4951, -90.3198], index=cities.columns),
pd.Series(['Loudonville', 'New York', 42.7048, -73.7548], index=cities.columns),
pd.Series(['Chestnut Hill', 'Massachusetts', 42.6362, -72.2009], index=cities.columns),
pd.Series(['Northridge', 'California', 34.2283, -118.5368], index=cities.columns)]
#append new cities to cities table
cities = cities.append(new_cities , ignore_index=True)
#concatenate city and state column
cities['city_id'] = cities['city'] +'_' + cities['state']
#clean up city_id column to the same format in school_ids table
cities['city_id'] = cities['city_id'].str.replace(' ', '_').str.replace(',', '').str.replace('.', '').str.lower()
#get jiust the city_id, lat and lng columns
cities = cities[['city_id', 'lat', 'lng']]
#join cities to school_ids
school_ids = pd.merge(school_ids, cities, how='left', left_on = 'loc_id', right_on= 'city_id')
########## update coaches to only include pre tourney stats ##########
#replace null values with 0
coaches.loc[coaches['ncaa_apps_car'].isnull(), 'ncaa_apps_car'] = 0
coaches.loc[coaches['sweet16_apps_car'].isnull(), 'sweet16_apps_car'] = 0
coaches.loc[coaches['final4_apps_car'].isnull(), 'final4_apps_car'] = 0
coaches.loc[coaches['natl_champs_car'].isnull(), 'natl_champs_car'] = 0
def tourney_win(x):
if x == 'Lost Second Round':
return 1
elif x == 'Lost Regional Semifinal':
return 2
elif x == 'Lost Regional Final':
return 3
elif x == 'Lost National Semifinal':
return 4
elif x == 'Lost National Final':
return 5
elif x == 'Won National Final':
return 6
else:
return 0
def sweet16(x):
if x >= 2:
return 1
else:
return 0
def elite8(x):
if x >= 3:
return 1
else:
return 0
def final4(x):
if x >= 4:
return 1
else:
return 0
coaches['tourney_wins'] = coaches['tourney_note'].apply(tourney_win)
coaches['sweet16'] = coaches['tourney_wins'].apply(sweet16)
coaches['elite8'] = coaches['tourney_wins'].apply(elite8)
coaches['final4'] = coaches['tourney_wins'].apply(final4)
#Calculate elite 8's
#Get just coachid, season, and elite 8
coach_subset = coaches[['coach_id', 'season', 'elite8']]
#create cumsum of elite 8 wins
coach_subset['elite8_apps_car'] = coach_subset.groupby(['coach_id'])['elite8'].cumsum()
#remove negative numbers
coach_subset.loc[coach_subset['elite8_apps_car']== -1, 'elite8_apps_car'] = 0
#remove elite 8 column
coach_subset = coach_subset.drop('elite8', axis=1)
#join to coaches
coaches = pd.merge(coaches, coach_subset, on=['coach_id', 'season'])
coaches['w_car'] = coaches['w_car'].astype(int) - coaches['tourney_wins'].astype(int)
coaches['sweet16_apps_car'] = coaches['sweet16_apps_car'].astype(int) - coaches['sweet16'].astype(int)
coaches['elite8_apps_car'] = coaches['elite8_apps_car'].astype(int) - coaches['elite8'].astype(int)
coaches['final4_apps_car'] = coaches['final4_apps_car'].astype(int) - coaches['final4'].astype(int)
#change sweet16_apps_car from negative 1 to 0
coaches['sweet16_apps_car'] = coaches['sweet16_apps_car'].replace(-1, 1)
#reduce to only the columns I need
coaches = coaches[['season', 'url_school', 'coach_id', 'ap_pre', 'w_car', 'l_car', 'sweet16_apps_car', 'elite8_apps_car']]
#fill na values with 26
coaches['ap_pre'] = coaches['ap_pre'].fillna(26)
#manipulate t-rank data
#get only tounament teams(string includes seed number)
#pd.options.mode.chained_assignment = None
#create function to update tables
def update_trank(df):
#get only tounament teams(string includes seed number)
#strip on seed number and return 1st element and remove space at end and make lowercase
df['school'] = df['school'].str.split('\d+').str[0].str.rstrip().str.lower()
df['school'] = df['school'].astype(str)
df['school'] = df['school'].str.replace('arkansas little rock' , 'ark little rock')
df['school'] = df['school'].str.replace('louisiana lafayette' , 'ull')
df['school'] = df['school'].str.replace('cal st. bakersfield' , 'cal-state-bakersfield')
df['school'] = df['school'].str.replace('mississippi valley st.' , 'mississippi-valley-state')
df['school'] = df['school'].str.replace('arkansas pine bluff' , 'ark pine bluff')
df['adjoe'] = df['adjoe'].str.split('\s+').str[0]
df['adjde'] = df['adjde'].str.split('\s+').str[0]
df['barthag'] = df['barthag'].str.split('\s+').str[0]
df['wab'] = df['wab'].str.split('\s+').str[0]
return df
trank_november = update_trank(trank_november)
trank_december = update_trank(trank_december)
trank_january = update_trank(trank_january)
trank_febmarch = update_trank(trank_febmarch)
trank_all = update_trank(trank_all)
trank_november_new = pd.merge(trank_november, school_spellings, how='left', left_on='school', right_on = ('school_spelling'))
trank_november_new = pd.merge(school_ids, trank_november_new, how='left', on=['team_id'])
trank_november_new = trank_november_new.rename({'adjoe': 'adjoe1', 'adjde': 'adjde1', 'wab': 'wab1'}, axis=1)
trank_december_new = pd.merge(trank_december, school_spellings, how='left', left_on='school', right_on = ('school_spelling'))
trank_december_new = pd.merge(school_ids, trank_december_new, how='left', on=['team_id'])
trank_december_new = trank_december_new.rename({'adjoe': 'adjoe2', 'adjde': 'adjde2', 'wab': 'wab2'}, axis=1)
trank_january_new = pd.merge(trank_january, school_spellings, how='left', left_on='school', right_on = ('school_spelling'))
trank_january_new = pd.merge( school_ids, trank_january_new, how='left', on=['team_id'])
trank_january_new = trank_january_new.rename({'adjoe': 'adjoe3', 'adjde': 'adjde3', 'wab': 'wab3'}, axis=1)
trank_febmarch_new = pd.merge(trank_febmarch, school_spellings, how='left', left_on='school', right_on = ('school_spelling'))
trank_febmarch_new = pd.merge(school_ids, trank_febmarch_new, how='left', on=['team_id'])
trank_febmarch_new = trank_febmarch_new.rename({'adjoe': 'adjoe4', 'adjde': 'adjde4', 'wab': 'wab4'}, axis=1)
trank_all_new = pd.merge(trank_all, school_spellings, how='left', left_on='school', right_on = ('school_spelling'))
trank_all_new = pd.merge(school_ids, trank_all_new, how='left', on=['team_id'])
trank_all_new = trank_all_new.rename({'adjoe': 'adjoe5', 'adjde': 'adjde5', 'wab': 'wab5'}, axis=1)
trank = pd.merge(trank_november_new, trank_december_new, on=['url_school', 'season'])
trank = pd.merge(trank, trank_january_new, on=['url_school', 'season'])
#drop extra season column (wouldn't let me sort)
trank = trank.drop('school_x', axis=1)
trank = pd.merge(trank, trank_febmarch_new, on=['url_school', 'season'])
#drop extra season column (wouldn't let me sort)
trank = trank.drop('school_x', axis=1)
trank = pd.merge(trank, trank_all_new, on=['url_school', 'season'])
#get games table ready to join with other tables
#convert season to string
games['season'] = games['season'].astype(str)
#remove duplicate games
#create new column that has the same game combination string for both lines of games
games['dup_games'] = games.apply(lambda row: ''.join(sorted([row['season'], row['url_school'], row['opp_url_school']])), axis=1)
#remove duplicate games and drop the extra column
games = games.drop_duplicates('dup_games')
games = games.drop('dup_games', axis=1)
#remove unnecessary columns
games = games[['season', 'location', 'url_school', 'opp_url_school', 'pts_diff']]
##convert location column to same format in other tables
#convert The Pit, Al to Albuquerque, New Mexico
games['location'] = games['location'].str.replace('The Pit, Al', 'Albuquerque, NM')
#split location column on comma
games[['city', 'state']] = games['location'].str.split(', ', expand=True)
#get full state name
games = pd.merge(games, states, how='left', left_on = 'state', right_on = 'abbreviation', suffixes=('', '_y'))
#concatenate columns together
games['loc_id'] = games['city'] + ' ' + games['state_y']
#update formate
games['loc_id'] = games['loc_id'].str.replace(' ', '_').str.replace('.', '').str.lower()
#get lat long coordinates for tournament site
games = pd.merge(games, cities, how='left', left_on='loc_id', right_on = 'city_id')
#rename lat/lng
games = games.rename({'lat': 'tourn_lat', 'lng': 'tourn_lng'}, axis=1)
#get lat long coordinates for first school
games = pd.merge(games,school_ids[['url_school', 'lat', 'lng']],on=['url_school'], how='left')
#games = pd.merge(games, school_ids, how='left', on=['url_school'], suffixes=['url_school_', 'url_school_'])
#rename lat/lng
games = games.rename({'lat': 's1_lat', 'lng': 's1_lng'}, axis=1)
#get lat long coordinates for second school
games = pd.merge(games, school_ids, how='left', left_on = 'opp_url_school', right_on='url_school', suffixes=('', '_y'))
#rename lat/lng
games = games.rename({'lat': 's2_lat', 'lng': 's2_lng'}, axis=1)
#reduce columns
games = games[['season', 'url_school', 's1_lat', 's1_lng', 'opp_url_school', 's2_lat', 's2_lng', 'pts_diff', 'loc_id', 'tourn_lat', 'tourn_lng']]
##calculate distance
#pip install pyproj
from pyproj import Geod
#Distance will be measured on this ellipsoid - more accurate than a spherical method
wgs84_geod = Geod(ellps='WGS84')
#Get distance between pairs of lat-lon points
def Distance(lat1,lon1,lat2,lon2):
az12,az21,dist = wgs84_geod.inv(lon1,lat1,lon2,lat2)
return dist
#Add/update a column to the data frame with the distances (in meters)
games['s1_dist'] = Distance(games['s1_lat'].tolist(),games['s1_lng'].tolist(),games['tourn_lat'].tolist(),games['tourn_lng'].tolist())
games['s2_dist'] = Distance(games['s2_lat'].tolist(),games['s2_lng'].tolist(),games['tourn_lat'].tolist(),games['tourn_lng'].tolist())
##add recruiting numbers
#convert season to int
games['season'] = games['season'].astype(int)
#join url_school to recruiting table
games = pd.merge(games, rsci_rosters_totals, how='left', on = ['url_school', 'season'])
#rename recruiting column for url_school
games = games.rename({'rsci_rating': 's1_rsci_rating'}, axis=1)
#join opp_url_school to recruiting table
games = pd.merge(games, rsci_rosters_totals, how='left', left_on = ['opp_url_school', 'season'], right_on = ['url_school', 'season'], suffixes=('', '_y'))
#rename recruiting column for opp_url_school
games = games.rename({'rsci_rating': 's2_rsci_rating'}, axis=1)
# add roster continuity numbers
#join url_school to continuity table
games = pd.merge(games, mp_cont, how='left', on = ['url_school', 'season'])
#rename continuity column for url_school
games = games.rename({'continuity': 's1_cont'}, axis=1)
#join opp_url_school to continuity table
games = pd.merge(games, mp_cont, how='left', left_on = ['opp_url_school', 'season'], right_on = ['url_school', 'season'], suffixes=('', '_y'))
#rename continuity column for opp_url_school
games = games.rename({'continuity': 's2_cont'}, axis=1)
#join to coaches
#join url_school to coaches table
games = pd.merge(games, coaches, how='left', on = ['url_school', 'season'])
#rename coaches column for url_school
games = games.rename({'coach_id': 's1_coach_id', 'w_car': 's1_coach_wins', 'l_car': 's1_coach_losses', 'ap_pre': 's1_ap_pre', 'sweet16_apps_car': 's1_coach_sweet16', 'elite8_apps_car': 's1_coach_elite8'}, axis=1)
#join opp_url_school to coaches table
games = pd.merge(games, coaches, how='left', left_on = ['opp_url_school', 'season'], right_on = ['url_school', 'season'], suffixes=('', '_y'))
#rename coaches column for opp_url_school
games = games.rename({'coach_id': 's2_coach_id', 'w_car': 's2_coach_wins', 'l_car': 's2_coach_losses', 'ap_pre': 's2_ap_pre', 'sweet16_apps_car': 's2_coach_sweet16', 'elite8_apps_car': 's2_coach_elite8'}, axis=1)
#rename url_school and opp_url_school
games = games.rename({'url_school': 's1', 'opp_url_school': 's2'}, axis=1)
#reduce columns
games = games[['season', 'loc_id', 's1', 's1_dist', 's1_cont', 's1_ap_pre', 's1_coach_id', 's1_coach_wins', 's1_coach_sweet16', 's1_coach_elite8', 's1_rsci_rating', 's2', 's2_dist', 's2_cont', 's2_rsci_rating', 's2_ap_pre', 's2_coach_id', 's2_coach_wins', 's2_coach_sweet16', 's2_coach_elite8', 'pts_diff']]
#join to t-rank
games = pd.merge(games, trank, how='left', left_on = ['s1', 'season'], right_on = ['url_school', 'season'], suffixes=('', '_y'))
games = games.rename({'wins': 's1_wins','adjoe1': 's1_adjoe1', 'adjoe2': 's1_adjoe2',
'adjoe3': 's1_adjoe3','adjoe4': 's1_adjoe4', 'adjoe5': 's1_adjoe5',
'adjde1': 's1_adjde1', 'adjde2': 's1_adjde2',
'adjde3': 's1_adjde3', 'adjde4': 's1_adjde4',
'adjde5': 's1_adjde5', 'wab1': 's1_wab1',
'wab2': 's1_wab2', 'wab3': 's1_wab3', 'wab4': 's1_wab4',
'wab5': 's1_wab5'
}, axis=1)
games = pd.merge(games, trank, how='left', left_on = ['s2', 'season'], right_on = ['url_school', 'season'], suffixes=('', '_y'))
games = games.rename({'wins': 's2_wins', 'adjoe1': 's2_adjoe1', 'adjoe2': 's2_adjoe2',
'adjoe3': 's2_adjoe3','adjoe4': 's2_adjoe4', 'adjoe5': 's2_adjoe5',
'adjde1': 's2_adjde1', 'adjde2': 's2_adjde2',
'adjde3': 's2_adjde3', 'adjde4': 's2_adjde4',
'adjde5': 's2_adjde5', 'wab1': 's2_wab1',
'wab2': 's2_wab2', 'wab3': 's2_wab3', 'wab4': 's2_wab4',
'wab5': 's2_wab5'
}, axis=1)
games = games[['season', 'loc_id', 's1', 's1_dist', 's1_cont', 's1_ap_pre', 's1_wins', 's1_coach_id', 's1_coach_wins', 's1_coach_sweet16', 's1_coach_elite8', 's1_rsci_rating',
's1_adjoe1', 's1_adjoe2', 's1_adjoe3', 's1_adjoe4', 's1_adjoe5',
's1_adjde1', 's1_adjde2', 's1_adjde3', 's1_adjde4', 's1_adjde5',
's1_wab1', 's1_wab2', 's1_wab3', 's1_wab4', 's1_wab5',
's2', 's2_dist', 's2_cont', 's2_rsci_rating', 's2_ap_pre', 's2_wins', 's2_coach_id', 's2_coach_wins', 's2_coach_sweet16', 's2_coach_elite8',
's2_adjoe1', 's2_adjoe2', 's2_adjoe3', 's2_adjoe4', 's2_adjoe5',
's2_adjde1', 's2_adjde2', 's2_adjde3', 's2_adjde4', 's2_adjde5',
's2_wab1', 's2_wab2', 's2_wab3', 's2_wab4', 's2_wab5', 'pts_diff']]
#manually enter roster continuity for north-dakota-state who has a broken link for 2009
games.loc[(games['s1'] == 'north-dakota-state') & (games['season'] == 2009), 's1_cont'] = .8685
#check to make sure no null values
games.info()
| true
|
dfd5dc7ab0bb9c4344ca0304b06f262bc7447486
|
Python
|
CorcovadoMing/PuzzleGameRobot
|
/module/imageprocess.py
|
UTF-8
| 1,361
| 3.125
| 3
|
[] |
no_license
|
from __future__ import print_function
import Image
def cluster(r, g, b):
if r in range(70, 160) and g in range(0, 50): return 0 # red
elif r in range(70, 160) and g in range(50, 120): return 1 # yellow
elif g in range(170, 230) and b in range(0, 100): return 2 # green
elif r in range(0, 70) and b in range(60, 256) and g in range(60, 256): return 3 # blue
elif r in range(200, 256) and b in range(100, 200): return 4 # heart
else: return 5 # purple
def get_color_map():
offsetx = (1108-698)/12
offsety = (668-326)/10
first_pointx = 698 + offsetx
first_pointy = 326 + offsety
offsetx *= 2
offsety *= 2
result = []
for row in range(5):
puzzle_map = [(first_pointx, first_pointy+row*offsety)]
for num in xrange(5):
puzzle_map.append((puzzle_map[num][0]+offsetx, puzzle_map[num][1]))
puzzle_map_color_point = [(x-698, y-326) for (x, y) in puzzle_map]
im = Image.open("puzzle.png")
pix = im.load()
puzzle_map_color = []
for (x, y) in puzzle_map_color_point:
puzzle_map_color.append(pix[x, y])
for (r, g, b) in puzzle_map_color:
result.append(cluster(r, g, b))
return ' '.join([str(x) for x in result])
if __name__ in '__main__':
print(get_color_map(), end='')
| true
|
c8ccd2f8c87f00dbff3edc9c52a7b7640818c98a
|
Python
|
guangyi/Algorithm
|
/pacscal's_Triangle.py
|
UTF-8
| 849
| 3.28125
| 3
|
[] |
no_license
|
class Solution:
# @return a list of lists of integers
def generate(self, numRows):
if numRows == 0: return []
result = [[1]]
currArr = result[0]
index = 2
for index in range(2, numRows + 1):
newArr = []
for i in range (0, len(currArr)):
if i + 1 < len(currArr):
newArr.append(currArr[i] + currArr[i + 1])
newArr.insert(0, 1)
newArr.append(1)
result.append(newArr)
currArr = newArr
return result
print Solution().generate(0)
print Solution().generate(2)
print Solution().generate(3)
print Solution().generate(4)
print Solution().generate(5)
'''
[]
[[1], [1, 1]]
[[1], [1, 1], [1, 2, 1]]
[[1], [1, 1], [1, 2, 1], [1, 3, 3, 1]]
[[1], [1, 1], [1, 2, 1], [1, 3, 3, 1], [1, 4, 6, 4, 1]]
'''
| true
|
95ff8ebebe842569f41d2006ff7fb73a4d65c446
|
Python
|
nasa/giant
|
/giant/ufo/clearable_queue.py
|
UTF-8
| 4,695
| 3.1875
| 3
|
[
"LicenseRef-scancode-us-govt-public-domain",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
# Copyright 2021 United States Government as represented by the Administrator of the National Aeronautics and Space
# Administration. No copyright is claimed in the United States under Title 17, U.S. Code. All Other Rights Reserved.
from multiprocessing.queues import Queue
from multiprocessing import Value, get_context
from queue import Empty
from typing import Any
class SharedCounter:
"""
A synchronized shared counter.
The locking done by multiprocessing.Value ensures that only a single
process or thread may read or write the in-memory ctypes object. However,
in order to do n += 1, Python performs a read followed by a write, so a
second process may read the old value before the new one is written by the
first process. The solution is to use a multiprocessing.Lock to guarantee
the atomicity of the modifications to Value.
This class comes almost entirely from Eli Bendersky's blog:
http://eli.thegreenplace.net/2012/01/04/shared-counter-with-pythons-multiprocessing/
"""
def __init__(self, n: int = 0):
self.count = Value('i', n)
def increment(self, n: int = 1):
""" Increment the counter by n (default = 1) """
with self.count.get_lock():
self.count.value += n
@property
def value(self) -> int:
""" Return the value of the counter """
return self.count.value
class ClearableQueue(Queue):
"""
A portable implementation of multiprocessing.Queue.
Because of multithreading / multiprocessing semantics, Queue.qsize() may
raise the NotImplementedError exception on Unix platforms like Mac OS X
where sem_getvalue() is not implemented. This subclass addresses this
problem by using a synchronized shared counter (initialized to zero) and
increasing / decreasing its value every time the put() and get() methods
are called, respectively. This not only prevents NotImplementedError from
being raised, but also allows us to implement a reliable version of both
qsize() and empty().
Borrowed from https://github.com/keras-team/autokeras/issues/368 and https://stackoverflow.com/a/36018632/3431189
"""
size: SharedCounter
def __init__(self, *args: list, **kwargs: dict):
ctx = get_context()
super().__init__(*args, **kwargs, ctx=ctx)
self.size = SharedCounter(0)
self.holder = []
@property
def maxsize(self) -> int:
if hasattr(self, '_maxsize'):
return self._maxsize
else:
return -1
def put(self, *args, **kwargs):
super().put(*args, **kwargs)
self.size.increment(1)
def get(self, *args, **kwargs) -> Any:
"""
Gets the results and tries to flush from the holder if anything is in it
"""
res = super().get(*args, **kwargs)
try:
self.size.increment(-1)
except AttributeError:
print('something is real wrong')
self.flush_holder()
return res
def __getstate__(self):
return super().__getstate__() + (self.size, self.holder)
def __setstate__(self, state):
self.size = state[-2]
self.holder = state[-1]
super().__setstate__(state[:-2])
def flush_holder(self):
"""
Flushes the holder into the queue if it can be
"""
removes = []
for ind, held in enumerate(self.holder):
if 0 < self.maxsize <= self.qsize():
break
self.put(held)
removes.append(ind)
for rm in removes[::-1]:
self.holder.pop(rm)
def get_nowait(self) -> Any:
res = super().get_nowait()
self.size.increment(-1)
self.flush_holder()
return res
def put_nowait(self, item: Any) -> None:
res = super().put_nowait(item)
self.size.increment(1)
return res
def put_retry(self, item: Any):
"""
Attempts to put a value unless the queue is full, in which case it will hold onto it until its not full and
then put it.
:param item: The thing to be put
"""
self.holder.append(item)
self.flush_holder()
def qsize(self) -> int:
""" Reliable implementation of multiprocessing.Queue.qsize() """
return self.size.value + len(self.holder)
def empty(self) -> bool:
""" Reliable implementation of multiprocessing.Queue.empty() """
return not self.qsize()
def clear(self):
"""
Clear out any data from the queue
"""
try:
while True:
self.get_nowait()
except Empty:
pass
| true
|
33666de00d3f24eb66c0a6154dc8672f36924902
|
Python
|
xingyunsishen/pixiu_runoob
|
/69-二分查找.py
|
UTF-8
| 830
| 4.28125
| 4
|
[] |
no_license
|
#-*- coding: utf-8 -*-
#返回x 在arr中的索引,如果不存在返回-1
def binarySearch(arr, l, r, x):
#基本判断
if r >= l:
mid = int(l + (r -l) / 2)
#元素整好的中间位置
if arr[mid] == x:
return mid
#元素小于中间位置的元素,只需要再比较左边的元素
elif arr[mid] > x:
return binarySearch(arr, l, mid-1, x)
#元素大于中间位置的元素,只需要再比较右边的元素
else:
return binarySearch(arr, mid+1, r, x)
else:
#不存在
return -1
#测试数组
arr = [2, 3, 4, 5, 10, 30]
x = 10
#函数调用
result = binarySearch(arr, 0, len(arr)-1, x)
if result != -1:
print("元素在数组中的索引为%d" % result)
else:
print("元素不在数组中!")
| true
|
e492316a99fe822407cf24d9ca2ad39bda87d6d1
|
Python
|
hu279318344/Python-work
|
/PycharmProjects/Task/tread/tread2.py
|
UTF-8
| 469
| 2.65625
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# encoding: utf-8
"""
@version: Python 3.6
@author: Admin
@license: Apache Licence
@contact: yang.hu@live.com
@software: PyCharm
@file: tread2.py
@time: 2017/4/1 15:21
"""
import threading
import time
class MyThread(threading.Thread):
def_init_(self,name)
threading.Thread.__init__(self)
self.name = name
def run(self):
print('Hi ,Iam threda',self.name)
time.sleep(2)
for i in rang(10):
t = MyThread(i)
t.start()
| true
|
cda5f2d46758fcc29f397ebf055932633f744a75
|
Python
|
jiachen247/CodeIt2018-CreditSussie
|
/codeitsuisse/routes/tallyexpense.py
|
UTF-8
| 2,033
| 2.578125
| 3
|
[] |
no_license
|
import logging
import operator
from flask import request, jsonify;
from codeitsuisse import app;
logger = logging.getLogger(__name__)
@app.route('/tally-expense', methods=['POST','GET'])
def evaluate_tally_expense():
data = request.get_json();
print("input: {}".format(data))
logging.info("data sent for evaluation {}".format(data))
list_of_persons = data.get("persons");
tally = {}
for x in list_of_persons:
tally[x] = 0
expenses = data.get("expenses");
for x in expenses:
amount = x.get("amount")
paidBy = x.get("paidBy")
exclude = x.get("exclude")
if isinstance(exclude, list):
payable = [a for a in list_of_persons if (not a in exclude)]
else:
payable = [a for a in list_of_persons if not a == exclude]
eachpay = amount/len(payable)
for x in payable:
tally[x] += eachpay
tally[paidBy] -= amount
balancer = {"transactions": []}
while sorted(tally.items(), key=operator.itemgetter(1),reverse=True)[0][1]>0.0001:
sorted_tally = sorted(tally.items(), key=operator.itemgetter(1),reverse=True)
balances = {}
diff_highest_lowest = sorted_tally[0][1] + sorted_tally[-1][1]
if diff_highest_lowest > 0:
balances["from"] = sorted_tally[0][0]
balances["to"] = sorted_tally[-1][0]
balances["amount"] = round(abs(sorted_tally[-1][1]), 2)
tally[sorted_tally[-1][0]] = 0
tally[sorted_tally[0][0]] = diff_highest_lowest
else:
balances["from"] = sorted_tally[0][0]
balances["to"] = sorted_tally[-1][0]
balances["amount"] = round(abs(sorted_tally[0][1]), 2)
tally[sorted_tally[-1][0]] = diff_highest_lowest
tally[sorted_tally[0][0]] = 0
balancer["transactions"].append(balances)
result = balancer
logging.info("My result :{}".format(result))
print("output: {}".format(result))
return jsonify(result);
| true
|
1621d7dcce28e92cac82779a1197bdd7ebf0e987
|
Python
|
yigalirani/leetcode
|
/20_valid_parentheses.py
|
UTF-8
| 1,003
| 3
| 3
|
[] |
no_license
|
class Solution(object):
def isValid(self, s):
head=[0]
pairs={
'{':'}',
'[':']',
'(':')'
}
def look_ahead():
if head[0]>=len(s):
return '.'
return s[head[0]]
def read_token():
ans=look_ahead()
if ans!='.':
head[0]+=1
return ans
def parse(end):
while(True):
c=read_token()
if c==end:
return
if c not in '{[(':
raise ValueError("syntax error")
parse(pairs[c])
c=read_token()
try:
parse('.')
return True
except ValueError as er:
return False
def run(*test_cases):
for case in test_cases:
result=Solution().isValid(case)
print(result,case)
run(
"()",
"[([])]",
"(((((}}}"
)
| true
|
d09727386f0b66307c9293c379374c1f57054d99
|
Python
|
ericrommel/codenation_python_web
|
/Week01/Chapter05/Exercises/ex_5-11.py
|
UTF-8
| 1,036
| 4.90625
| 5
|
[] |
no_license
|
# Write a function is_rightangled which, given the length of three sides of a triangle, will determine whether the
# triangle is right-angled. Assume that the third argument to the function is always the longest side. It will return
# True if the triangle is right-angled, or False otherwise.
#
# Hint: Floating point arithmetic is not always exactly accurate, so it is not safe to test floating point numbers for
# equality. If a good programmer wants to know whether x is equal or close enough to y, they would probably code it up
# as:
# if abs(x-y) < 0.000001: # If x is approximately equal to y
def is_rightangled(a, b, c):
if c > a and c > b:
x = c**2
y = b**2 + a**2
else:
return "False. The side 3 should be the longest onde."
return abs(x - y) < 0.000001
side1 = int(input("Type the length of side 1: "))
side2 = int(input("Type the length of side 2: "))
side3 = int(input("Type the length of side 3: "))
print("Is this a right-angle triangle? ", is_rightangled(side1, side2, side3))
| true
|
7ac965a38cf19be224e0026f4ec768d9d4e3896e
|
Python
|
jdrese/PyFlow
|
/PyFlow/UI/Canvas/SelectionRect.py
|
UTF-8
| 2,330
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
from Qt import QtGui, QtWidgets, QtCore
class SelectionRect(QtWidgets.QGraphicsWidget):
__backgroundColor = QtGui.QColor(100, 100, 100, 50)
__backgroundAddColor = QtGui.QColor(0, 100, 0, 50)
__backgroundSubColor = QtGui.QColor(100, 0, 0, 50)
__backgroundSwitchColor = QtGui.QColor(0, 0, 100, 50)
__pen = QtGui.QPen(QtGui.QColor(255, 255, 255), 1.0, QtCore.Qt.DashLine)
def __init__(self, graph, mouseDownPos, modifiers):
super(SelectionRect, self).__init__()
self.setZValue(2)
self.__graph = graph
self.__graph.scene().addItem(self)
self.__mouseDownPos = mouseDownPos
self.__modifiers = modifiers
self.setPos(self.__mouseDownPos)
self.resize(0, 0)
self.selectFullyIntersectedItems = False
def collidesWithItem(self, item):
if self.selectFullyIntersectedItems:
return self.sceneBoundingRect().contains(item.sceneBoundingRect())
return super(SelectionRect, self).collidesWithItem(item)
def setDragPoint(self, dragPoint, modifiers):
self.__modifiers = modifiers
topLeft = QtCore.QPointF(self.__mouseDownPos)
bottomRight = QtCore.QPointF(dragPoint)
if dragPoint.x() < self.__mouseDownPos.x():
topLeft.setX(dragPoint.x())
bottomRight.setX(self.__mouseDownPos.x())
if dragPoint.y() < self.__mouseDownPos.y():
topLeft.setY(dragPoint.y())
bottomRight.setY(self.__mouseDownPos.y())
self.setPos(topLeft)
self.resize(bottomRight.x() - topLeft.x(),
bottomRight.y() - topLeft.y())
def paint(self, painter, option, widget):
rect = self.windowFrameRect()
if self.__modifiers == QtCore.Qt.NoModifier:
painter.setBrush(self.__backgroundColor)
if self.__modifiers == QtCore.Qt.ShiftModifier:
painter.setBrush(self.__backgroundAddColor)
elif self.__modifiers == QtCore.Qt.ControlModifier:
painter.setBrush(self.__backgroundSwitchColor)
elif self.__modifiers == QtCore.Qt.ControlModifier | QtCore.Qt.ShiftModifier:
painter.setBrush(self.__backgroundSubColor)
painter.setPen(self.__pen)
painter.drawRect(rect)
def destroy(self):
self.__graph.scene().removeItem(self)
| true
|
37c14cbd197fc30b72e14761ec65045f54186608
|
Python
|
buzhdiao/deep-learning-with-python-notebooks
|
/tensorflow_version/3.6-classifying-newswires.py
|
UTF-8
| 8,165
| 2.953125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
# coding: utf-8
import tensorflow as tf
tf.__version__
# '2.0.0-alpha0'
# 本节使用路透社数据集,它包含许多短新闻机器对应的主题,由路透社在1986年发布,
# 它是一个简单的,广泛使用的文本分类数据集,它包含46个不同的主题,某些主题的样本更多
# 但训练集中国每个主题都至少有10个样本
from tensorflow.keras.datasets import reuters
# 加载数据集,num_words意味着只保留训练集中最常出现的10000的单词,不经常出现的单词被抛弃,最终所有评论的维度保持相同,
(train_data, train_labels), (test_data, test_labels) = reuters.load_data(num_words=10000)
# train_data的大小是(8982,),test_data的大小是(8982,)
# test_labels的大小是(2246,),test_labels的大小是(2246,)
len(train_data)
# 8982
len(test_data)
#2246
train_data[10][:10]
#[1, 245, 273, 207, 156, 53, 74, 160, 26, 14]
# 获得reuters中,单词和数字的对应表,形如下面:
# {':6709,at:20054}
word_index = reuters.get_word_index()
# 将单词和数字的对应表的键值反转,并最终保存为字典,结果形如下面:
# {1:'the',2:'of',···}
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
# 这里含义是找出train_data[0]中数字列表,然后从reverse_word_index中找出对应的value
# 并使用空格连接起来
# 字典中的get方法语法是dict.get(key,default=None),这里'?'就是默认值
# 这里-3的含义是,因为0,1,2,是为padding(填充),start of sequence(序列开始),unknown(未知词)分别保留的索引。
decoded_newswire = ' '.join([reverse_word_index.get(i - 3, '?') for i in train_data[0]])
decoded_newswire
#? ? ? said as a result of its december acquisition of space co it expects earnings per share in 1987 of 1 15 to 1 30 dlrs per share up from 70 cts in 1986 the company
train_labels[10]
# 3
import numpy as np
def vectorize_sequences(sequences, dimension=10000):
# 创建一个形状为(len(sequences),dimesion)的矩阵
results = np.zeros((len(sequences), dimension))
# 进行one-hot编码
for i, sequence in enumerate(sequences):
results[i, sequence] = 1.
return results
# shape是(25000,10000),将训练数据向量化
x_train = vectorize_sequences(train_data)
# shape是(25000,10000),将训练数据向量化
x_test = vectorize_sequences(test_data)
# 进行One-hot编码
def to_one_hot(labels, dimension=46):
results = np.zeros((len(labels), dimension))
for i, label in enumerate(labels):
results[i, label] = 1.
return results
one_hot_train_labels = to_one_hot(train_labels)
one_hot_test_labels = to_one_hot(test_labels)
from tensorflow.python.keras.utils.np_utils import to_categorical
# 将整型标签转换为onehot编码
one_hot_train_labels = to_categorical(train_labels)
one_hot_test_labels = to_categorical(test_labels)
# 导入模型层
from tensorflow.keras import models
# 导入层
from tensorflow.keras import layers
# 建立一个序贯模型,是多个网络层的线性堆叠,也就是一条路走到黑,
#详细信息见:https://keras-cn.readthedocs.io/en/latest/getting_started/sequential_model/
model = models.Sequential()
# 输入维度(10000,)输出维度(64,)激活函数是relu
model.add(layers.Dense(64, activation='relu', input_shape=(10000,)))
# 输入维度(64,),输出维度(64,),激活函数是relu
model.add(layers.Dense(64, activation='relu'))
# 输入维度是(64,),输出维度(46,),激活函数是softmax
model.add(layers.Dense(46, activation='softmax'))
model.summary()
#Model: "sequential_4"
#_________________________________________________________________
#Layer (type) Output Shape Param #
#=================================================================
#dense_12 (Dense) (None, 64) 640064
#_________________________________________________________________
#dense_13 (Dense) (None, 64) 4160
#_________________________________________________________________
#dense_14 (Dense) (None, 46) 2990
#=================================================================
#Total params: 647,214
#Trainable params: 647,214
#Non-trainable params: 0
#_________________________________________________________________
# compile的功能是编译模型,对学习过程进行配置,optimizer是优化器,
# loss是损失函数,这里是分类交叉熵,metrics是指标列表
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# 将原始训练数据留出1000个样本作为验证集
x_val = x_train[:1000]
partial_x_train = x_train[1000:]
y_val = one_hot_train_labels[:1000]
partial_y_train = one_hot_train_labels[1000:]
# 使用512个样本组成的小批量,将模型训练20个轮次,监控留出的10000个样本上的损失和精度,可以通过将验证数据传入validation_data参数来完成
# 调用fit方法会返回一个History对象,这个对象有一个成员history,它是一个字典,包含训练过程中的所有数据
history = model.fit(partial_x_train,
partial_y_train,
epochs=20,
batch_size=512,
validation_data=(x_val, y_val))
history_dict = history.history
history_dict.keys()
import matplotlib.pyplot as plt
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# clf的含义是清除图像
plt.clf() # clear figure
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# 建立一个序贯模型,是多个网络层的线性堆叠,也就是一条路走到黑,
#详细信息见:https://keras-cn.readthedocs.io/en/latest/getting_started/sequential_model/
model = models.Sequential()
# 输入维度(10000,)输出维度(16,)激活函数是relu
model.add(layers.Dense(64, activation='relu', input_shape=(10000,)))
# 输入维度(16,),输出维度(16,),激活函数是relu
model.add(layers.Dense(64, activation='relu'))
# 输入维度是(16,),输出维度(1,),激活函数是softmax
model.add(layers.Dense(46, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(partial_x_train,
partial_y_train,
epochs=8,
batch_size=512,
validation_data=(x_val, y_val))
# 在测试模式下返回模型的误差值和评估标准值
results = model.evaluate(x_test, one_hot_test_labels)
import copy
test_labels_copy = copy.copy(test_labels)
np.random.shuffle(test_labels_copy)
float(np.sum(np.array(test_labels) == np.array(test_labels_copy))) / len(test_labels)
# 0.1861086375779163
# 预测
predictions = model.predict(x_test)
#(46,)
predictions[0].shape
#0.99999994
np.sum(predictions[0])
#3
np.argmax(predictions[0])
# 将标签转换为整数张量
y_train = np.array(train_labels)
y_test = np.array(test_labels)
model.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy', metrics=['acc'])
model = models.Sequential()
model.add(layers.Dense(64, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(4, activation='relu'))
model.add(layers.Dense(46, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(partial_x_train,
partial_y_train,
epochs=20,
batch_size=128,
validation_data=(x_val, y_val))
#精度下降了约8%
| true
|
84c48fae3189dc47b766d44397d9afbcdbdd1c40
|
Python
|
LaraCalvo/TalkAboutSeries
|
/text_preprocess.py
|
UTF-8
| 1,352
| 3.34375
| 3
|
[] |
no_license
|
import numpy as np
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
stemmer = PorterStemmer()
###########################
#Text preprocessing methods
###########################
def noise_removal(words):
noise = ['?', '!', '.', ',', '[', ']', '-', '_']
words = [word for word in words if word not in noise]
return words
def tokenize(sentence):
return nltk.word_tokenize(sentence)
def stem(word):
return stemmer.stem(word.lower())
#Filter stop words and duplicates
def filter_stop_words(words):
stop_words = set(stopwords.words('english'))
words_f = []
for word in words:
if word not in stop_words:
words_f.append(word)
words = sorted(set(words_f))
return words
def bag_of_words(tokenized_sentence, words):
sentence_words = [stem(word) for word in tokenized_sentence]
#The bag starts as an array of zeros
bag = np.zeros(len(words), dtype=np.float32)
for index, word in enumerate(words):
if word in sentence_words:
bag[index] = 1
#We put a 1 in the position of the word
return bag
def preprocess(words):
words = noise_removal(words)
words = [stem(w) for w in words]
words = filter_stop_words(words)
return words
| true
|
00b8fbaa4f5e47d5f66b4f49cfda887da32d6dc5
|
Python
|
zmxhdu/excel
|
/新建文件夹/color.py
|
UTF-8
| 287
| 3.078125
| 3
|
[] |
no_license
|
import os
import openpyxl
from openpyxl.styles import Color, Fill
wb = openpyxl.load_workbook('工作簿1.xlsx')
sheet = wb.get_active_sheet()
for i in range(1, sheet.max_row):
for j in range(1, sheet.max_column):
cell = sheet.cell(row=i,column=j)
print(cell.fill)
| true
|
5541cf7623e5dad8851e9beac7f1fc186fd42aea
|
Python
|
hxperl/hackerrank
|
/python3/Strings/SwapCase.py
|
UTF-8
| 267
| 3.453125
| 3
|
[] |
no_license
|
def swap_case(s):
tmp = list()
for i in s:
if i.isupper():
tmp.append(i.lower())
else:
tmp.append(i.upper())
return ''.join(tmp)
if __name__=='__main__':
string = 'fsdojfSFQsodifoqf'
print(swap_case(string))
| true
|
16086581953ca38f17b934064fe4cce57696924e
|
Python
|
kvbik/lightweight-virtualenv
|
/tests/test_virtualenv.py
|
UTF-8
| 2,991
| 2.515625
| 3
|
[] |
no_license
|
import sys, os
from os import path
from shutil import rmtree, copytree
from unittest import TestCase
from tempfile import mkdtemp
from subprocess import Popen, PIPE
class TestRunCase(TestCase):
def setUp(self):
# store curr path
self.oldcwd = os.getcwd()
# create test dir structure
self.directory = mkdtemp(prefix='test_virtualenv_')
self.virtualenv = path.join(self.directory, 'py')
self.python = path.join(self.virtualenv, 'bin', 'python.py')
# copy virtualenv there
copytree('./py/', self.virtualenv)
# test modules
self.imported = []
def run_command(self, cmd):
shell = sys.platform != 'win32'
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=shell)
return p.communicate()
def test_python_itself(self):
cmd = '%s %s -c "print 128"' % (sys.executable, self.python)
stdout, stderr = self.run_command(cmd)
self.failUnlessEqual('128', stdout.strip())
def test_run_python_script(self):
script = path.join(self.oldcwd, 'tests', 'scripts','print.py')
cmd = '%s %s %s' % (sys.executable, self.python, script)
stdout, stderr = self.run_command(cmd)
self.failUnlessEqual('', stdout)
def test_run_python_script_with_args(self):
script = path.join(self.oldcwd, 'tests', 'scripts','print.py')
cmd = '%s %s %s a b c' % (sys.executable, self.python, script)
stdout, stderr = self.run_command(cmd)
self.failUnlessEqual("['a', 'b', 'c']", stdout.strip())
def install_some_way(self, inst_type, inst_command='install'):
os.chdir(path.join(self.oldcwd, 'tests', 'installs', 'venvtest-%s' % inst_type))
inst = '%s %s setup.py %s' % (sys.executable, self.python, inst_command)
stdout, stderr = self.run_command(inst)
os.chdir(self.oldcwd)
print 'stdout:'
print stdout
print 'stderr:'
print stderr
self.failUnlessEqual('', stderr)
cmd = '%s %s -c "import venvtest; print venvtest.__versionstr__"' % (sys.executable, self.python)
stdout, stderr = self.run_command(cmd)
expected = '0.1.0'
self.failUnlessEqual(expected, stdout.strip())
cmd = '%s %s -c "import venvtest; print venvtest.__file__"' % (sys.executable, self.python)
stdout, stderr = self.run_command(cmd)
a = len(self.virtualenv)
b = -len('venvtest.pyc')
env = stdout.strip()[:a]
mod = stdout.strip()[b:]
pth = stdout.strip()[a:b]
print pth
self.failUnlessEqual(self.virtualenv, env)
self.failUnlessEqual('venvtest.pyc', mod)
def test_install_distutils_way(self):
self.install_some_way('distutils')
def test_install_setuptools_way(self):
self.install_some_way('setuptools')
def tearDown(self):
# go back
os.chdir(self.oldcwd)
# dir cleanup
rmtree(self.directory, True)
| true
|
8013d64de8c454a30b6a7a6ba772d57a506d8b7e
|
Python
|
Harrisonsam932/Python
|
/Previous Courses/KITS/Samples/sample66.py
|
UTF-8
| 176
| 3.46875
| 3
|
[] |
no_license
|
class SampleDemo:
def display(self,*var):
s = 0
for item in var:
s+=item
print(s)
obj = SampleDemo()
obj.display(10,20,30,40,50)
| true
|
5fe16d4a30306fec5dd3e8899ef59604595fb75c
|
Python
|
IdanErgaz/test-Delete
|
/pingByCsvInput.py
|
UTF-8
| 1,485
| 3.1875
| 3
|
[] |
no_license
|
#ping to destination number of times after reading and using csv as input
import csv, time, subprocess
count=0
destination=0
csvFile='EnvVars.csv'
resFile='pingRes.txt'
#Function to read details from csv
def readFromCsv(csvFileName):
with open (csvFileName) as csvfile:
reader = csv.reader(csvfile,delimiter=',')
line=0
for row in reader:
if line==0:
line+=1
pass
else:
count, destination = int(row[0]), row[1]
print("count:", count)
print("destination:", destination)
return count, destination
#function which ping to the given cesination using the count and runNumber
def sendPing(destination, count, runNumber):
# subprocess.run('ping -n '+str(count)+ ' '+destination+ ' '+ '>'+str(runNumber)+'pingRes.text', shell=True)
subprocess.run('ping -n '+ str(count) + ' '+ destination + ' > ' +str(runNumber) + resFile, shell=True)
############################################################################################################
#Main:
runNumber=0
loop_times=2
while runNumber<loop_times:
print("Starting with ping test...")
vars=readFromCsv(csvFile)
count=vars[0]
destination=vars[1]
print('count is:{}'.format(count))
print('destination is:{}'.format(destination))
sendPing(destination, count, runNumber)
print('Finish with the pint test iteration')
time.sleep(3)
runNumber+=1
| true
|
91cc671593aeaa567cabc4dc21ea9e6a9000b691
|
Python
|
ducdh-dev/python_quiz
|
/python_quiz/bitwise_operators.py
|
UTF-8
| 1,024
| 4.03125
| 4
|
[] |
no_license
|
a = int("00111100", 2) # hệ nhị phân
b = int("00001101", 2)
# a >> 2 => 00001111 => dịch phải 2 bit
# a << 2 => 11110000 => dịch trái 2 bit
# a & b => 00001100 => = 1 nếu bit tồn tại ở cả 2 mảng
# a | b => 00111101 => = 1 nếu bit tổn tại ở 1 trong 2 mảng
# ~a => 11000011 => đảo ngược bit
# a ^ b => 00110001 => chỉ một trong hai
arr1 = [1, 1, 2, 2, 3, 5, 8, 13, 21, 34, 55, 89]
arr2 = [1, 2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
print(set(arr1) & set(arr2)) # giao 2 mảng
print(set(arr1) | set(arr2)) # hợp 2 mảng
print(set(arr1) ^ set(arr2)) # có ở chỉ 1 trong hai mảng
from collections import Counter
def commonCharacterCount(s1, s2):
counter1 = Counter(s1)
counter2 = Counter(s2)
intersection = counter1 & counter2
print(f"{counter1 = }")
print(f"{counter2 = }")
print(f"{intersection = }")
return sum(intersection.values())
print(commonCharacterCount(arr1, arr2))
| true
|
38f9fc72423c5d44704e123c8aa116d7de96cd11
|
Python
|
Fraxinus/stock
|
/pythonProject/class/matlabtest.py
|
UTF-8
| 7,878
| 2.75
| 3
|
[] |
no_license
|
#_*_coding:utf-8_*_
from PyQt4 import QtGui, QtCore, uic
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as figureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import sys
import prettyplotlib as ppl
class DrawWidget(QtGui.QWidget):
def __init__(self, parent=None):
super(DrawWidget, self).__init__(parent)
figure = plt.gcf()
x = [1, 2, 3, 3]
y = [4, 5, 5, 6]
t = np.arange(0., 5., 0.2)
# plt.plot(t, t, 'g--', t, t*2, 'bs', t, t**2, 'r^')
# plt.axis([-2, 10, -2, 30])
# # 是指定xy坐标的起始范围,它的参数是列表[xmin, xmax, ymin, ymax]。
# plt.text(2, .25, r'$\mu=100,\ \sigma=15$')
# plt.title('example')
# plt.xlabel('x')
# plt.ylabel('y')
self.xxlineH = None
self.xxlineV = None
self.xxax = figure.gca()
# fig, ax = plt.subplots(1)
# np.random.seed(14)
x = ppl.plot(figure.gca(), t, t, '--', color=(255/255.,150/255.,250/255.), label=str('t, t'), pickradius=28.0)
ppl.plot(figure.gca(), t, t*2, label=str(' t, t*2'), pickradius=8.0)
ppl.plot(figure.gca(), t, t**2, label=str('t, t**2'), pickradius=8.0)
ppl.legend(figure.gca(), loc='upper left', ncol=3)
# figure.gca().lines.remove(x[0])
# ax = plt.gca()#移动坐标轴
# ax.spines['right'].set_color('none')#去除右边的轴
# ax.spines['top'].set_color('none')#去除顶轴
# ax.xaxis.set_ticks_position('bottom')
# #下轴移至数据0点,理想状态下0点为中心点,具体跟数据位置有关
# ax.spines['bottom'].set_position(('data', 0))
# ax.yaxis.set_ticks_position('left')
# ax.spines['left'].set_position(('data', 0))
# plt.xlim(t.min()*1.1, t.max()*1.1)#X轴的范围
# plt.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi],#从新定义刻度
# [r'$-\pi$',r'$-\pi/2$',r'$0$',r'$\pi/2$',r'$\pi$'])#X轴的刻度值
# plt.ylim(s.min()*1.1,s.max()*1.1)#Y轴的范围
# plt.yticks([-1,0,1],[r'$-1$',r'$0$',r'$+1$']) #设置Y轴的刻度值,第二个参数对其进行格式化
plt.annotate(r'$sin(\frac{2\pi}{3})=(\frac{\sqrt{3}}{2})$',
xy=(5, 5), xycoords='data',
xytext=(15, 200), textcoords='offset points', fontsize=16,
arrowprops = dict(arrowstyle='->', connectionstyle='arc3,rad=.1'))
plt.plot([5, 5], [0, 5], 'ro', color='black', linewidth=1.0, linestyle='--', label='$cos(x)$')
# plt.plot([5, 5], [0, 5],'ro', linewidth=5.0, label='$sin(x)$')
# plt.scatter([5, 5], [0, 5], 50, color='red')
# for i in ax.get_xticklabels() + ax.get_yticklabels():#从新设置所有bbox
# i.set_fontsize(15)
# i.set_bbox(dict(facecolor='white',edgecolor='none',alpha=0.65))
# 'button_press_event':鼠标按键按下时触发
# 'button_release_event':鼠标按键释放时触发
# 'motion_notify_event':鼠标移动时触发
# 当前的所有注册的响应函数可以通过Figure.canvas.callbacks.callbacks
for key, funcs in figure.canvas.callbacks.callbacks.iteritems():
print key
for cid, wrap in sorted(funcs.items()):
func = wrap.func
print " {0}:{1}.{2}".format(cid, func.__module__, func)
self.text = figure.gca().text(0.5, 10.5, "event", ha="center", va="center", fontdict={"size":20})
self.canvas = figureCanvas(figure)
self.canvas.setFocusPolicy( QtCore.Qt.ClickFocus) ##qt4需要加这两句,否者信号被qt拦截,无法到达matplot
self.canvas.setFocus()
figure.canvas.mpl_connect('key_press_event', self.on_key_press)
# figure.canvas.mpl_disconnect(figure.canvas.manager.key_press_handler_id)
figure.canvas.mpl_connect('motion_notify_event', self.on_mouse_move)
self.canvas.draw()
figure2 = plt.figure(2, figsize=(8, 4), facecolor='green', edgecolor='red')
#figsize = (8,4)表示figure的大小,屏幕显示 640 * 320 , 输出显示 800*400,这个要注意。
#显示色和外框线条颜色设置。
self.canvas2 = figureCanvas(figure2)
plt.subplot(311)# 子区,3行,1列, 第1个
y = [1, 2, 3, 4]
x = [4, 5, 5, 6]
plt.plot(x, y, 'bo', x, y, 'r')
plt.title('examrple2')
plt.xlabel('x')
plt.ylabel('y')
plt.subplot(323)# 子区,3行,2列, 第3个
x = [1, 2, 3]
y = [4, 5, 6]
plt.bar(x, y)
plt.title('Example3')
plt.xlabel('x')
plt.ylabel('y')
plt.subplot(336)# 子区,3行,3列, 第6个
x = [1, 2, 3]
y = [4, 5, 6]
plt.scatter(x, y)
plt.title('Example4')
plt.xlabel('x')
plt.ylabel('y')
plt.subplot(313)# 子区,3行,1列, 第3个
mu, sigma = 100, 15
x = mu + sigma*np.random.randn(10000)
# the histogram of the data
n, bins, patches = plt.hist(x, 150, normed=1, facecolor='g', alpha=0.75)
plt.xlabel('Smarts')
plt.ylabel('Probability')
plt.title('Histogram of IQ')
plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
# import prettyplotlib as ppl
# fig, ax = plt.subplots(1)
# np.random.seed(14)
# n = 10
# ppl.bar(plt.gca(), np.arange(n), np.abs(np.random.randn(n)), annotate=True, grid='y')
layout = QtGui.QHBoxLayout(self)
layout.addWidget(self.canvas)
layout.addWidget(self.canvas2)
self.canvas2.draw()
def on_mouse_move(self, event):
print event.name, ',', event.x, ',', event.y, ',', event.xdata, ',', event.ydata
if event.xdata and event.ydata:
info = "{}\nButton:{}\nFig x,y:{}, {}\nData x,y:{:3.2f}, {:3.2f}".format(
event.name, event.button, event.x, event.y, event.xdata, event.ydata)
self.text.set_text(info)
for line in self.xxax.lines:
if line.contains(event)[0]:
self.highlight(line)
break
else:
self.highlight(None)
#绘制准心
if not self.xxlineH:
print 'draw line'
self.xxlineH = self.xxax.plot([0, event.xdata], [event.ydata, event.ydata], 'k')[0]
self.xxlineV = self.xxax.plot([event.xdata, event.xdata], [0, event.ydata], 'k')[0]
else:
self.xxax.lines.remove(self.xxlineH)
self.xxax.lines.remove(self.xxlineV)
self.xxlineH = self.xxax.plot([0, event.xdata], [event.ydata, event.ydata], 'k')[0]
self.xxlineV = self.xxax.plot([event.xdata, event.xdata], [0, event.ydata], 'k')[0]
self.text.set_x(event.xdata)
self.text.set_y(event.ydata)
self.canvas.draw()
def on_key_press(self, event):
print event.key
# sys.stdout.flush()
if event.key == 'escape':
self.close()
def highlight(self, target):
need_redraw = False
if target is None:
for line in self.xxax.lines:
line.set_linewidth(1.0)
need_redraw = True
else:
for line in self.xxax.lines:
line.set_alpha(1.0)
need_redraw = True
target.set_linewidth(20.0)
if need_redraw:
self.xxax.figure.canvas.draw_idle()
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
ui = DrawWidget()
ui.show()
ui.raise_()
sys.exit(app.exec_())
| true
|
2c6755bd084a2398bc9087975b7321239e4df8ec
|
Python
|
TechInTech/algorithmsAnddataStructure
|
/numberArray/interview60b.py
|
UTF-8
| 1,354
| 3.421875
| 3
|
[] |
no_license
|
# -*- coding:utf-8 -*-
"""n个骰子的点数(基于循环求骰子点数)
"""
import copy
G_MAXVALUE = 6 # 骰子点数可自定义
class Solution_60b(object):
def print_probability(self, number):
if number < 1:
return
# probabilities = [[0] * (G_MAXVALUE * number + 1), [0] * (G_MAXVALUE * number + 1)]
ls = [0] * (G_MAXVALUE * number + 1)
probabilities = [ls, copy.copy(ls)]
flag = 0
for i in range(1, G_MAXVALUE + 1):
probabilities[flag][i] = 1
for k in range(2, number + 1):
for i in range(k):
probabilities[1 - flag][i] = 0
for i in range(k, G_MAXVALUE * k + 1):
probabilities[1 - flag][i] = 0
j = 1
while j <= i and j <= G_MAXVALUE:
# for j in range(1, min(i+1, G_MAXVALUE+1)):
probabilities[1 - flag][i] += probabilities[flag][i - j]
j += 1
flag = 1 - flag
#
total = pow(G_MAXVALUE, number)
for i in range(number, G_MAXVALUE * number + 1):
ratio = probabilities[flag][i]/total
print('%d: %.4f'%(i, ratio))
del probabilities
def main():
n = 2
s60 = Solution_60b()
s60.print_probability(n)
if __name__ == '__main__':
main()
| true
|
a284b468b3f1037b8f7016ffc468b7365053f43b
|
Python
|
511753317/algorithm
|
/mechinelearn/kmeans/sklearniris.py
|
UTF-8
| 549
| 2.875
| 3
|
[] |
no_license
|
#!usr/bin/env python
# -*- coding:utf-8 -*-
"""
@time: 2018/06/29 13:52
@author: 柴顺进
@file: sklearniris.py
@software:machineline
@note:
"""
from matplotlib import pyplot as plt
from sklearn import datasets
iris=datasets.load_iris()
x_index=3
color=['blue','red','green']
for label,color in zip(range(len(iris.target_names)),color):
plt.hist(iris.data[iris.target==label, x_index],
label=iris.target_names[label],
color=color)
plt.xlabel(iris.feature_names[x_index])
plt.legend(loc='upper right')
plt.show()
| true
|