seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
8833474558
|
########################################################
# Rodrigo Leite - drigols #
# Last update: 17/12/2021 #
########################################################
import pandas as pd
from matplotlib import pyplot as plt
df = pd.DataFrame(
{
'Name': ['Dan', 'Joann', 'Pedro', 'Rosie', 'Ethan', 'Vicky', 'Frederic'],
'Salary':[50000, 54000, 50000, 189000, 55000, 40000, 59000],
'Hours':[41, 40, 36, 17, 35, 39, 40],
'Grade':[50, 50, 46, 95, 50, 5,57]
}
)
# Utiliza o atributo showfliers = False - Ou seja, ignora dados discrepantes.
df['Salary'].plot(kind='box', title='Salary Distribution', figsize=(10,8), showfliers=False)
plt.savefig('../images/first-boxplot-03.png', format='png')
plt.show()
|
drigols/studies
|
modules/math-codes/modules/statistics-and-probability/src/outliers-v2.py
|
outliers-v2.py
|
py
| 804
|
python
|
en
|
code
| 0
|
github-code
|
6
|
21437122618
|
import kivy
from kivy.app import App
from kivy.uix.label import Label
# 2
from kivymd.app import MDApp
from kivymd.uix.label import MDLabel
from kivymd.uix.screen import Screen
kivy.require('2.1.0')
class MyFirstApp(App):
def build(self):
# lbl = Label(text='Hello World')
# lbl = Label(text='Hello World and Good Morning', font_size='20sp', color=[0.41, 0.42, 0.74, 1])
lbl = Label(text="[color=ff3333][b]'Hello World'[/b][/color]\n[color=3333ff]Good Morning[/color]",
font_size='20sp', markup=True)
"""
[b][/b] → 太字を有効にする
[i][/i] → イタリック体のテキストをアクティブにする
[u][/u] → 下線テキスト
[s][/s] →取り消し線付きテキスト
[font=][/font] → フォントを変更する
[サイズ=][/size]]です。→ フォントサイズを変更する
[色=#][/color] → 文字色の変更
[ref=][/ref] -> インタラクティブゾーンを追加します。参照+参照内部のバウンディングボックスがLabel.refsで利用可能になります。
[anchor=] -> テキストにアンカーを入れる。テキスト内のアンカーの位置はLabel.anchorsで取得できます。
[sub][/sub] -> 前のテキストからの相対的な添え字の位置でテキストを表示します。
[sup][/sup] -> 前のテキストと相対的な上付き文字の位置でテキストを表示します。
"""
return lbl
class Demo(MDApp):
def build(self):
screen = Screen()
l = MDLabel(text="Welcome", pos_hint={'center_x': 0.8, 'center_y': 0.8},
theme_text_color='Custom',
text_color=(0.5, 0, 0.5, 1),
font_style='Caption'
)
l1 = MDLabel(text="Welcome", pos_hint={'center_x': 0.8, 'center_y': 0.5},
theme_text_color='Custom',
text_color=(0.5, 0, 0.5, 1),
font_style='H2'
)
l2 = MDLabel(text="Welcome", pos_hint={'center_x': 0.8, 'center_y': 0.2},
theme_text_color='Custom',
text_color=(0.5, 0, 0.5, 1),
font_style='H1'
)
screen.add_widget(l)
screen.add_widget(l1)
screen.add_widget(l2)
return screen
if __name__ == '__main__':
# MyFirstApp().run()
Demo().run()
|
gonzales54/python_script
|
kivy/kivy1(text)/main1.py
|
main1.py
|
py
| 2,528
|
python
|
ja
|
code
| 0
|
github-code
|
6
|
5609431554
|
import gym
class SparseRewardWrapper(gym.Wrapper):
def __init__(self, env, sparse_level=-1, timestep_limit=-1):
super(SparseRewardWrapper, self).__init__(env)
self.sparse_level = sparse_level
self.timestep_limit = timestep_limit
self.acc_reward = 0
self.acc_t = 0
def step(self, action):
obs, rew, done, info = self.env.step(action)
self.acc_t += 1
if self.timestep_limit > 0 and (self.acc_t) >= self.timestep_limit:
done = True
if self.sparse_level == 0:
return obs, rew, done, info
self.acc_reward += rew
ret_rew = 0
if self.sparse_level != -1:
if done or (self.acc_t > 0 and self.acc_t % self.sparse_level == 0):
ret_rew = self.acc_reward
self.acc_reward = 0
else:
if done:
ret_rew = self.acc_reward
self.acc_reward = 0
return obs, ret_rew, done, info
def reset(self, **kwargs):
self.acc_t = 0
self.acc_reward = 0
return self.env.reset(**kwargs)
|
pfnet-research/piekd
|
sparse_wrapper.py
|
sparse_wrapper.py
|
py
| 1,118
|
python
|
en
|
code
| 6
|
github-code
|
6
|
36154798504
|
import streamlit as st
st.set_option('deprecation.showPyplotGlobalUse', False)
# for manipulation
import pandas as pd
import numpy as np
# for data visualization
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="ticks")
plt.style.use("dark_background")
#sns.set_style('whitegrid')
# to filter warnings
import warnings
warnings.filterwarnings('ignore')
# for interactivity
from ipywidgets import interact
st.title("Agricultural Production Optimization Engine")
# Reading the dataset
data= pd.read_csv('data.csv')
x= data.drop(['label'], axis=1)
y= data['label']
# let's create training and testing sets for validation of results
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test= train_test_split(x,y, test_size=0.2, random_state=42)
# let's create predictive model
from sklearn.linear_model import LogisticRegression
LogReg = LogisticRegression()
LogReg.fit(x_train,y_train)
from sklearn import linear_model
from sklearn.ensemble import RandomForestClassifier
random_forest= RandomForestClassifier(n_estimators=100)
random_forest.fit(x_train, y_train)
from sklearn.tree import DecisionTreeClassifier
DecTree= DecisionTreeClassifier()
DecTree.fit(x_train,y_train)
from sklearn.neighbors import KNeighborsClassifier
KNN= KNeighborsClassifier()
KNN.fit(x_train, y_train)
from sklearn.naive_bayes import GaussianNB
NB= GaussianNB()
NB.fit(x_train, y_train)
from sklearn.svm import SVC
svm = SVC()
svm.fit(x_train, y_train)
Nv = st.sidebar.radio("Navigator", ["Home","Prediction","Contribute"])
if Nv== "Home":
#st.write("### Home")
st.image("app.png", width= 700)
if st.checkbox("Show Dataset"):
st.table(data)
st.subheader("\nSoil Requirement of Each Crop")
if st.checkbox("Show Soil Requirement Graphs"):
condition = st.selectbox("Conditions",['Nitrogen Requirement','Phosphorous Requirement','Potassium Requirement','Temperature Requirement',
'PH Requirement','Humidity Requirement','Rainfall Requirement'])
if condition == "Nitrogen Requirement":
plt.figure(figsize=(5, 3))
sns.barplot(data['label'], data["N"])
plt.xlabel('\nCrops', fontsize=14)
plt.xticks(rotation=90)
plt.ylabel("Nitrogen Requirement", fontsize=12)
st.pyplot()
if condition == "Phosphorous Requirement":
plt.figure(figsize=(5, 3))
sns.barplot(data['label'], data["P"])
plt.xlabel('\nCrops', fontsize=14)
plt.xticks(rotation=90)
plt.ylabel("Phosphorous Requirement", fontsize=12)
st.pyplot()
if condition == "Potassium Requirement":
plt.figure(figsize=(5, 3))
sns.barplot(data['label'], data["K"])
plt.xlabel('\nCrops', fontsize=14)
plt.xticks(rotation=90)
plt.ylabel("Potassium Requirement", fontsize=12)
st.pyplot()
if condition == "Temperature Requirement":
plt.figure(figsize=(5, 3))
sns.barplot(data['label'], data["temperature"])
plt.xlabel('\nCrops', fontsize=14)
plt.xticks(rotation=90)
plt.ylabel("Temperature Requirement", fontsize=12)
st.pyplot()
if condition == "Humidity Requirement":
plt.figure(figsize=(5, 3))
sns.barplot(data['label'], data["humidity"])
plt.xlabel('\nCrops', fontsize=14)
plt.xticks(rotation=90)
plt.ylabel("Humidity Requirement", fontsize=12)
st.pyplot()
if condition == "PH Requirement":
plt.figure(figsize=(5, 3))
sns.barplot(data['label'], data["ph"])
plt.xlabel('\nCrops', fontsize=14)
plt.xticks(rotation=90)
plt.ylabel("PH Requirement", fontsize=12)
st.pyplot()
if condition == "Rainfall Requirement":
plt.figure(figsize=(5, 3))
sns.barplot(data['label'], data["rainfall"])
plt.xlabel('\nCrops', fontsize=14)
plt.xticks(rotation=90)
plt.ylabel("Rainfall Requirement", fontsize=12)
st.pyplot()
st.subheader("\nDistribution of Agricultural Conditions")
if st.checkbox("Show Distribution Graphs"):
con = st.selectbox("Conditions",['N','P','K','Temperature','PH','Humidity','Rainfall'])
if con == "N":
plt.figure(figsize=(5, 3))
sns.distplot(data["N"])
plt.xlabel("\nNitrogen", fontsize=14)
plt.ylabel('Density',fontsize=14)
plt.axvline(data["N"].min(), color='y', label='Minimum')
plt.axvline(data["N"].mean(), color='orange', label='Mean')
plt.axvline(data["N"].max(), color='grey', label='Maximum')
plt.legend()
st.pyplot()
if con == "P":
plt.figure(figsize=(5, 3))
sns.distplot(data["P"])
plt.xlabel("\nPhosphourous", fontsize=14)
plt.ylabel('Density',fontsize=14)
plt.axvline(data["P"].min(), color='y', label='Minimum')
plt.axvline(data["P"].mean(), color='orange', label='Mean')
plt.axvline(data["P"].max(), color='grey', label='Maximum')
plt.legend()
st.pyplot()
if con == "K":
plt.figure(figsize=(5, 3))
sns.distplot(data["K"])
plt.xlabel("\nPotassium", fontsize=14)
plt.ylabel('Density',fontsize=14)
plt.axvline(data["K"].min(), color='y', label='Minimum')
plt.axvline(data["K"].mean(), color='orange', label='Mean')
plt.axvline(data["K"].max(), color='grey', label='Maximum')
plt.legend()
st.pyplot()
if con == "Temperature":
plt.figure(figsize=(5, 3))
sns.distplot(data["temperature"])
plt.xlabel("\nTemperature", fontsize=14)
plt.ylabel('Density',fontsize=14)
plt.axvline(data["temperature"].min(), color='y', label='Minimum')
plt.axvline(data["temperature"].mean(), color='orange', label='Mean')
plt.axvline(data["temperature"].max(), color='grey', label='Maximum')
plt.legend()
st.pyplot()
if con == "PH":
plt.figure(figsize=(5, 3))
sns.distplot(data["ph"])
plt.xlabel("\nPH", fontsize=14)
plt.ylabel('Density',fontsize=14)
plt.axvline(data["ph"].min(), color='y', label='Minimum')
plt.axvline(data["ph"].mean(), color='orange', label='Mean')
plt.axvline(data["ph"].max(), color='grey', label='Maximum')
plt.legend()
st.pyplot()
if con == "Humidity":
plt.figure(figsize=(5, 3))
sns.distplot(data["humidity"])
plt.xlabel("\nHumidity", fontsize=14)
plt.ylabel('Density',fontsize=14)
plt.axvline(data["humidity"].min(), color='y', label='Minimum')
plt.axvline(data["humidity"].mean(), color='orange', label='Mean')
plt.axvline(data["humidity"].max(), color='grey', label='Maximum')
plt.legend()
st.pyplot()
if con == "Rainfall":
plt.figure(figsize=(5, 3))
sns.distplot(data["rainfall"])
plt.xlabel("\nRainfall", fontsize=14)
plt.ylabel('Density',fontsize=14)
plt.axvline(data["rainfall"].min(), color='y', label='Minimum')
plt.axvline(data["rainfall"].mean(), color='orange', label='Mean')
plt.axvline(data["rainfall"].max(), color='grey', label='Maximum')
plt.legend()
st.pyplot()
if Nv == "Prediction":
st.subheader("\nCrop Predictor\n")
N = st.number_input("\nNitrogen Value: ",50.00, step=0.10)
P = st.number_input("Phosphorous Value: ", 50.00 ,step=0.10)
K = st.number_input("Potassium Value: ", 50.00 ,step=0.10)
T = st.number_input("Tempreture: ", 25.00 ,step=0.10)
H = st.number_input("Humidity: ", 50.00 ,step=0.10)
PH = st.number_input("PH Value: ", 7.00 ,step=0.10)
R = st.number_input("Rainfall: ", 200.00 ,step=0.10)
st.write("\n\n\n")
op=st.selectbox("Choose ML Algorithm",['Random Forest','Logistic Regression', 'Decision Tree','KNN', 'Naive Bayes', 'SVM'])
st.write("\n\n\n")
if st.button("Predict"):
if op=="Logistic Regression":
y_pred_LR= LogReg.predict([[N, P, K, T, H, PH, R]])
st.subheader(f"\nPredicted Crop by using Logistic Regression is:")
st.success(y_pred_LR)
if op=="Random Forest":
y_pred_RF= random_forest.predict([[N, P, K, T, H, PH, R]])
st.subheader(f"\nPredicted Crop by using Random Forest is:")
st.success(y_pred_RF)
if op=="Decision Tree":
y_pred_DT= DecTree.predict([[N, P, K, T, H, PH, R]])
st.subheader(f"\nPredicted Crop by using Decision Tree is:")
st.success(y_pred_DT)
if op=="KNN":
y_pred_KNN= DecTree.predict([[N, P, K, T, H, PH, R]])
st.subheader(f"\nPredicted Crop by using KNN is:")
st.success(y_pred_KNN)
if op=="Naive Bayes":
y_pred_NB= NB.predict([[N, P, K, T, H, PH, R]])
st.subheader(f"\nPredicted Crop by using Naive Bayes is:")
st.success(y_pred_NB)
if op=="SVM":
y_pred_SVM= svm.predict([[N, P, K, T, H, PH, R]])
st.subheader(f"\nPredicted Crop by using SVM is:")
st.success(y_pred_SVM)
if Nv == "Contribute":
st.subheader("Contribute to our Dataset")
N = st.number_input("Nitrogen Value: ", 0.00, 150.00, 50.00, step=0.5)
P = st.number_input("Phosphorous Value: ", 0.00, 150.00, 50.00, step=0.5)
K = st.number_input("Potassium Value: ", 0.00, 120.00, 50.00, step=0.5)
T = st.number_input("Tempreture: ", 0.00, 60.00, 25.00, step=0.5)
H = st.number_input("Humidity: ", 10.00, 100.00, 50.00, step=0.5)
PH = st.number_input("PH Value: ", 0.00, 10.00, 7.00, step=0.5)
R = st.number_input("Rainfall: ", 20.00, 300.00, 200.00, step=0.5)
crop = st.text_input("Crop: ")
if st.button("Contribute"):
to_add= {"N":[N], "P":[P], "K":[K], "temperature":[T], "humidity":[H], "ph":[PH], "rainfall":[R], "label":[crop]}
to_add= pd.DataFrame(to_add)
to_add.to_csv("app.csv", mode='a', header=False, index=False)
st.success("Thanks for Your Contribution")
|
Jkauser/Agricultural-Production-Optimization-Engine
|
app.py
|
app.py
|
py
| 10,888
|
python
|
en
|
code
| 0
|
github-code
|
6
|
8380997732
|
import os
from flask import Flask, jsonify, request
from math import sqrt
app = Flask(__name__)
@app.route('/')
def nao_entre_em_panico():
nmax = 50
n1 = 0
n2 = 1
cont = 0
fib = 0
res = "Essa é sequencia dos 50 primeiros números da razão de Fibonacci: <br> Desenvolvido por Jefferson Alves. <br> <br>"
while cont < nmax:
fib = n1 + n2
n1 = n2
n2 = fib
cont = cont + 1
res = res + str(fib) + "<br>"
return res
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
|
jeffersonpedroza/Docker
|
fibonacci.py
|
fibonacci.py
|
py
| 606
|
python
|
pt
|
code
| 0
|
github-code
|
6
|
36837090213
|
import streamlit as st
from streamlit_option_menu import option_menu
import math
import datetime
from datetime import date
import calendar
from PIL import Image
from title_1 import *
from img import *
with open('final.css') as f:
st.markdown(f"<style>{f.read()}</style>",unsafe_allow_html=True)
def average():
image()
st.markdown(" <h1 style='text-align: center; color: Black;font-size: 25px;'>Application to Find the Average</h1>", unsafe_allow_html=True)
w1,col1,col2,w2=st.columns((1,2,2,1))
us1,bc1,bc2,us2=st.columns((4,1.5,1.8,6))
with col1:
st.markdown("")
st.write("# Enter the Date ")
# ------------to create the function to clear the input-----------#
with bc2:
st.markdown("")
st.markdown("")
def clear_text():
st.session_state["text"] = ""
st.button("Clear", on_click=clear_text)
with col2:
vAR_input_num=st.text_input("",key="text")
vAR_list=[]
#----- Average -------#
with bc1:
st.markdown("")
st.markdown("")
if st.button("Submit"):
with col2:
if vAR_input_num != '':
vAR_input_data = vAR_input_num.split(",")
for i in vAR_input_data:
num=int(i)
vAR_list.append(num)
def Average(vAR_list):
vAR_avg= sum(vAR_list) / len(vAR_list)
vAR_avg=round(vAR_avg,4)
st.success(vAR_avg)
Average(vAR_list)
else:
st.error("Error")
with col1:
st.write("# Answer is ")
|
Deepsphere-AI/AI-lab-Schools
|
Grade 08/Application/find_avg.py
|
find_avg.py
|
py
| 1,787
|
python
|
en
|
code
| 0
|
github-code
|
6
|
19686018633
|
#Caesar Cipher Technique
print ("\nCaesar Cipher Technique")
def encrypt(text,s):
result = ("")
for i in range(len(text)):
char = text[i]
if (char.isupper()):
result += chr((ord(char) + Shift - 65) % 26 + 65)
else:
result += chr((ord(char) + s - 97) % 26 + 97)
return result
#check the above function
Technique = input("\nEnter the Caesar Cipher Technique, \ni.e. Either Encrypt or Decrypt, Type ('Encrypt' or 'Decrypt').\n")
if Technique =='Encrypt':
Text = input("\nEnter the Plain Text : ")
Shift = int(input("\nEnter Key : "))
E = encrypt(Text,Shift)
print ("\nYour Text : " + Text)
print ("\nShift : " + str(Shift))
print ("\nEncrypted Text : " + E)
elif Technique =='Decrypt':
Text = input("\nEnter the Cipher Text : ")
Shift = int(input("\nEnter the key : "))
D = encrypt(Text,(26 - Shift))
print ("\nYour Cipher Text : " + Text)
print ("\nShift : " + str(Shift))
print ("\nDecrypted Text : " + D)
else :
print ("Wrong Choice Please Try Again ... ")
|
hsenhgiv/i
|
Practical 1.1 Caesar Cipher Technique.py
|
Practical 1.1 Caesar Cipher Technique.py
|
py
| 1,256
|
python
|
en
|
code
| 0
|
github-code
|
6
|
35021100800
|
from .base import BaseEnvironment
import os
import subprocess
class K3dEnvironment(BaseEnvironment):
name = "k3d"
def load_images(self, images):
loaded = []
for img, is_latest in images:
md = open(img+".txt")
image_id = md.readline().strip()
image_repo_tag = md.readline().strip()
self.load_image(image_repo_tag, image_id)
def load_image(self, repo_tag, id):
print(f"Loading image {repo_tag} ({id})")
cmd = f"k3d image import {repo_tag} -c k3s-default"
print(cmd)
subprocess.check_call(cmd, shell=True)
def start_cluster(self, nodes, version):
assert version is None
#regpath = os.path.join(os.path.dirname(__file__), "k3d-registries.yaml")
# , "--volume", regpath+":/etc/rancher/k3s/registries.yaml"]
args = ["k3d", "cluster", "create", "k3s-default"]
subprocess.check_call(args)
# connect network of the cluster to the local image registry
#subprocess.call(["docker", "network", "connect", "k3d-k3s-cluster", "registry.localhost"])
def stop_cluster(self):
args = ["k3d", "cluster", "stop", "k3s-default"]
subprocess.check_call(args)
def delete_cluster(self):
args = ["k3d", "cluster", "delete", "k3s-default"]
subprocess.check_call(args)
|
mvvitorsilvati/mysql-operator
|
tests/utils/ote/k3d.py
|
k3d.py
|
py
| 1,357
|
python
|
en
|
code
| null |
github-code
|
6
|
2061469568
|
from sklearn.preprocessing import StandardScaler
from sklearn import svm
class OneClassSVM:
def __init__(self, scaling=True):
self._scaling = scaling
def fit(self, X):
if self._scaling:
self._scaler = StandardScaler()
X = self._scaler.fit_transform(X)
X = X[:4096]
self._svm = svm.OneClassSVM().fit(X)
return self
def anomaly_scores(self, batch):
if self._scaling:
batch = self._scaler.transform(batch)
return -self._svm.decision_function(batch)
|
rom1mouret/cheatmeal
|
benchmarks/baselines/one_class_svm.py
|
one_class_svm.py
|
py
| 559
|
python
|
en
|
code
| 2
|
github-code
|
6
|
41014218939
|
#coding=utf-8
import numpy as np
import pyten
from scipy import stats
from pyten.method.PoissonAirCP import PoissonAirCP
from pyten.method import AirCP
from pyten.tools import tenerror
from pyten.method import cp_als
from pyten.method import falrtc,TNCP
import matplotlib.pyplot as plt
#参数设置
missList = [0.7]
duplicate=1
prespecifyrank = 5
para_alpha = [1,1,1]
para_lmbda = 1
def normalize(mat):
'''
将矩阵每一列都标准化,不然在计算余弦相似度时都非常相近
:param mat:
:return:
'''
X_mean = mat.mean(axis=0)
# standardize X
X1 = (mat - X_mean)
return(X1)
from sklearn.metrics.pairwise import cosine_similarity
def cons_similarity(dat):
siz = dat.shape
temp = np.sum(dat, axis=1)
tagvector = normalize(np.sum(dat, axis=1))
cos_dist = 1 - cosine_similarity(tagvector)
aux0 = np.exp(-(cos_dist**2))
# 2时间相似性用AR(1)模型的acf去做
from statsmodels.tsa.arima_model import ARMA
ts = np.sum(np.sum(dat, axis=0),axis = 1)
order = (1,0)
tempModel = ARMA(ts,order).fit()
rho = np.abs(tempModel.arparams)
aux1 = np.diag(np.ones(siz[1]))
for nn in range(1, siz[1]):
aux1 = aux1 + np.diag(np.ones(siz[1] - nn), -nn) * rho ** nn + np.diag(np.ones(siz[1] - nn), nn) * rho ** nn
# 3话题之间相关性
aux2 = np.diag(np.ones(siz[2]))
Pl = np.sum(temp, axis=1) / np.sum(temp)
for i in range(siz[2]):
for j in range(siz[2]):
aux2[i,j] = np.exp(-np.sum((((temp[:, i] - temp[:, j]) / np.max(temp, 1)) ** 2) * Pl))
aux = [aux0, aux1, aux2]
return (aux)
def convertMon(mat):
'''
将数据从daily_data转化为monthly_data
:param mat:
:return:
'''
monthdat = []
month = range(0, 365, 30)
for i in range(12):
monthdat.append(np.sum(mat[:, month[i]:month[i + 1]], axis=1))
monthdat = np.array(monthdat)
monthdat = monthdat.transpose((1, 0, 2))
return(monthdat)
dat =np.load('newbuild_tensor.npy')
#预处理,先筛选一次国家,0太多的的不纳入考虑,只剩下235->195个
idx = np.sum(np.sum(dat ==0,axis = 1),axis=1)>1000
dat = dat[idx]
#可供选择的调整方法,整理成月数据
dat = convertMon(dat)
siz = dat.shape
true_data = dat.copy()
true_data = pyten.tenclass.tensor.Tensor(true_data)
# 这里是为了画图比较
finalList1 = []
finalList22 = []
finalList2 = []
finalListTNCP=[]
finalListfal = []
for miss in missList:
aux = [np.diag(np.ones(siz[0])), np.diag(np.ones(siz[1])), np.diag(np.ones(siz[2]))]
RE2 = []
RE22 = []
for dup in range(duplicate):
np.random.seed(dup*4)
#每次都用同一份数据去做
data = dat.copy()
#观测值:丢失部分数据的
Omega = (np.random.random(siz) > miss) * 1
data[Omega == 0] -= data[Omega == 0]
data = pyten.tenclass.tensor.Tensor(data)
#补全时候用的rank
print('missing ratio: {0}'.format(miss))
#补全时候用的rank
com_rank = prespecifyrank
# 这部分引入了更新辅助矩阵的算法
simerror = 1
Iter = 1
while (simerror > 1e-2 and Iter < 10):
self2 = PoissonAirCP(data, omega=Omega, rank=com_rank, max_iter=3000, tol=1e-5,
OnlyObs=True, TrueValue=true_data, sim_mats=aux, alpha=para_alpha, lmbda=para_lmbda)
self2.run()
temp_aux = cons_similarity(self2.X.data)
simerror = np.max((np.linalg.norm(aux[0] - temp_aux[0]),
np.linalg.norm(aux[1] - temp_aux[1]),
np.linalg.norm(aux[2] - temp_aux[2])))
aux = temp_aux
Iter = Iter + 1
print('ExpAirCP loop with similarity error: {0}'.format(simerror))
[EEr, EReEr1, EReEr2] = tenerror(self2.X, true_data, Omega)
if Iter ==2:
RE22.append(EReEr1)
print(EReEr1)
# 到这里为止
[EErr, EReErr1, EReErr2] = tenerror(self2.X, true_data, Omega)
print ('ExpAirCP Completion Error: {0}, {1}, {2}'.format(EErr, EReErr1, EReErr2))
RE2.append(EReErr1)
finalList22.append(np.mean(RE22))
finalList2.append(np.mean(RE2))
for miss in missList:
aux = [np.diag(np.ones(siz[0])), np.diag(np.ones(siz[1])), np.diag(np.ones(siz[2]))]
RE1 = []
RE11 = []
for dup in range(duplicate):
np.random.seed(dup*4)
#每次都用同一份数据去做
data = dat.copy()
#观测值:丢失部分数据的
Omega = (np.random.random(siz) > miss) * 1
data[Omega == 0] -= data[Omega == 0]
data = pyten.tenclass.tensor.Tensor(data)
#补全时候用的rank
print('missing ratio: {0}'.format(miss))
#补全时候用的rank
com_rank = prespecifyrank
# 这部分引入了更新辅助矩阵的算法
simerror = 1
Iter = 1
while (simerror > 1e-2 and Iter < 10):
self = AirCP(data, omega=Omega, rank=com_rank, max_iter=3000, tol=1e-5, sim_mats=aux, alpha=para_alpha, lmbda=para_lmbda)
self.run()
temp_aux = cons_similarity(self.X.data)
simerror = np.max((np.linalg.norm(aux[0] - temp_aux[0]),
np.linalg.norm(aux[1] - temp_aux[1]),
np.linalg.norm(aux[2] - temp_aux[2])))
aux = temp_aux
Iter = Iter + 1
print('AirCP loop with similarity error: {0}'.format(simerror))
[EEr, EReEr1, EReEr2] = tenerror(self.X, true_data, Omega)
print(EReEr1)
# 到这里为止
#这里看对原始数据的补全准不准
[Err, ReErr1, ReErr2] = tenerror(self.X, true_data, Omega)
print ('AirCP Completion Error: {0}, {1}, {2}'.format(Err, ReErr1, ReErr2))
RE1.append(ReErr1)
finalList1.append(np.mean(RE1))
# for miss in missList:
# RETNCP = []
#
# for dup in range(duplicate):
# np.random.seed(dup*4)
# #每次都用同一份数据去做
# data = dat.copy()
# #观测值:丢失部分数据的
# Omega = (np.random.random(siz) > miss) * 1
# data[Omega == 0] -= data[Omega == 0]
# data = pyten.tenclass.tensor.Tensor(data)
#
# #补全时候用的rank
# print('missing ratio: {0}'.format(miss))
# #补全时候用的rank
# com_rank = prespecifyrank
# self3 = TNCP(data, Omega, rank=com_rank,alpha = para_alpha, lmbda=para_lmbda)
# self3.run()
# [EErrr, EReErrr1, EReErrr2] = tenerror(self3.X, true_data, Omega)
# print ('TNCP Completion Error: {0}, {1}, {2}'.format(EErrr, EReErrr1, EReErrr2))
# RETNCP.append(EReErrr1)
# finalListTNCP.append(np.mean(RETNCP))
#
#
# #对于fal不受到rank改变的影响,所以单独写出来
# for miss in missList:
# REfal = []
# for dup in range(duplicate):
# np.random.seed(dup*4)
# #每次都用同一份数据去做
# data = dat.copy()
# #观测值:丢失部分数据的
# Omega = (np.random.random(siz) > miss) * 1
# data[Omega == 0] -= data[Omega == 0]
# data = pyten.tenclass.tensor.Tensor(data)
# print('missing ratio: {0}'.format(miss))
# rX1 = falrtc(data, Omega, max_iter=100)
# [Errfal, ReErrfal, ReErr2fal] = tenerror(rX1, true_data, Omega)
# print ('falrtc Completion Error: {0}, {1}, {2}'.format(Errfal, ReErrfal, ReErr2fal))
# REfal.append(ReErrfal)
# finalListfal.append(np.mean(REfal))
#
print(finalList1)
print(finalList2)
print(finalListTNCP)
print(finalListfal)
result = [finalList1,finalList2,finalListTNCP]
result_name = 'prerank='+str(prespecifyrank)+'.csv'
#np.savetxt(result_name,result,fmt='%.4f',delimiter=',')
|
yangjichen/ExpCP
|
realdata/GDELT_step3.py
|
GDELT_step3.py
|
py
| 7,907
|
python
|
en
|
code
| 0
|
github-code
|
6
|
38026626999
|
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
from Utils import Calendar
class App:
def __init__(self):
self.root = Tk()
self.root.title("Calendar App")
self.root.geometry("600x500")
self.root.resizable(False,False)
self.today = Calendar.getToday()
self.year = self.today.year
self.monthIndex = self.today.month
self.day = self.today.day
self.weekDay = self.today.weekday()
self.dayName = Calendar.weekdays[self.today.weekday()]
self.dateLabelsOnWindow = []
self.frame = Frame(self.root,background="#ececec")
self.move_to_date_frame = Frame(self.root)
self.menu = Menu(self.root)
self.setUpCalendar()
self.root.mainloop()
def selectDate(self,e):
for items in self.dateLabelsOnWindow:
if int(items.cget("text")) == self.today.day and self.year == self.today.year and self.monthIndex == self.today.month:
items.config(background="#19668A",foreground="white")
else:
items.config(background="#D3D3D3",foreground="black")
items.selected = False
e.widget.config(background="#3f3f3f",foreground="white")
e.widget.selected = True
def nextMonth(self):
self.monthIndex +=1
self.dateLabelsOnWindow = []
if self.monthIndex > 12:
self.monthIndex = 1
self.year+=1
self.setUpCalendar()
def previousMonth(self):
self.monthIndex -=1
self.dateLabelsOnWindow = []
if self.monthIndex < 1:
self.monthIndex = 12
self.year-=1
self.setUpCalendar()
def getToday(self):
self.monthIndex = self.today.month
self.year = self.today.year
self.setUpCalendar()
def moveToDate(self,year,month):
try:
if month =="" or year == "":
raise Exception()
self.year = int(year)
self.monthIndex=Calendar.months.index(month.capitalize())+1
self.setUpCalendar()
except Exception as e:
messagebox.showwarning(title="Warning",message="Fill all the entries carefully")
def createMenuBar(self):
self.menu = Menu(self.root)
home = Menu(self.menu,tearoff=False)
about = Menu(self.menu,tearoff=False)
self.menu.add_cascade(label="Home",menu=home)
self.menu.add_cascade(label="About",menu=about)
home.add_cascade(label="Today",command=self.getToday)
home.add_cascade(label="Move to Date",command=self.moveToDatePage)
home.add_separator()
home.add_cascade(label="Exit",command=self.root.destroy)
about.add_cascade(label="About",command=lambda:messagebox.askokcancel(title="About",message="This is a calendar app made using python by Paras Punjabi"))
self.root.config(menu=self.menu)
def moveToDatePage(self):
self.frame.pack_forget()
month = StringVar()
year = StringVar()
year.set(self.year)
month.set("January")
Label(self.move_to_date_frame,text="Go To Date",font=("monospace",25,"bold")).grid(row=0,column=0,columnspan=2,pady=20)
Label(self.move_to_date_frame,text="Month",font=("monospace",15,"bold")).grid(row=1,column=0,pady=10)
combobox = ttk.Combobox(self.move_to_date_frame,textvariable=month,width=40,font=("monospace",15,"bold"))
combobox["values"] = Calendar.months
combobox.grid(row=1,column=1,pady=10)
Label(self.move_to_date_frame,text="Year",font=("monospace",15,"bold")).grid(row=2,column=0,pady=10)
Entry(self.move_to_date_frame,textvariable=year,font=("monospace",15,"bold"),width=40).grid(row=2,column=1,pady=10)
Button(self.move_to_date_frame,text="Go to Date",font=("monospace",15,"bold"),command=lambda:self.moveToDate(year.get(),month.get())).grid(row=3,column=1,pady=10)
Button(self.move_to_date_frame,text="Home",font=("monospace",15,"bold"),command=self.setUpCalendar).grid(row=4,column=1,pady=10)
self.move_to_date_frame.pack()
def setUpCalendar(self):
self.move_to_date_frame.pack_forget()
self.dateLabelsOnWindow = []
self.frame.destroy()
self.createMenuBar()
self.frame = Frame(self.root,background="#ececec")
heading = Label(self.frame,text=Calendar.months[self.monthIndex-1].upper() + " " + str(self.year),font=("monospace",20,"bold"),anchor="center")
heading.grid(row=0,column=2,columnspan=5,padx=10,pady=10)
Button(self.frame,text="\u25C4",font=("monospace",15,"bold"),command=self.previousMonth).grid(row=0,column=0,pady=10,padx=10)
Button(self.frame,text="\u25BA",font=("monospace",15,"bold"),command=self.nextMonth).grid(row=0,column=8,pady=10,padx=10)
# all days of calendar
Label(self.frame,text=" ",font=("monospace",10,"bold")).grid(row=2,column=0,padx=10)
for i in range(7):
label = Label(self.frame,text=Calendar.weekdays[i],font=("monospace",10,"bold"))
label.grid(row=2,column=i+1,padx=10)
Label(self.frame,text="",font=("monospace",10,"bold")).grid(row=2,column=8,padx=10)
# all dates of calendar
columnIndex = 1
rowIndex = 3
todaysCalendar = Calendar().getCalendarArray(self.year,self.monthIndex)
for items in todaysCalendar:
if columnIndex>7:
columnIndex =1
rowIndex +=1
columnIndex = items["day"]+1
label = Label(self.frame,text=str(f'0{items["date"]}') if items["date"] <= 9 else str(items["date"]) ,font=("monospace",13,"bold"),background="#D3D3D3",border="2")
label.grid(row=rowIndex,column=items["day"]+1,padx=10,pady=5,ipadx=10,ipady=10)
label.selected = False
if items["date"] == self.today.day and items["day"] == self.today.weekday() and self.today.year == self.year and self.today.month == self.monthIndex:
label.config(background="#19668A",foreground="white")
self.dateLabelsOnWindow.append(label)
label.bind("<Button-1>",lambda e:self.selectDate(e))
columnIndex+=1
self.frame.pack(ipadx=10,ipady=10,side="top")
if __name__ == '__main__':
App()
'''
To convert into exe:-
pyinstaller --onefile -w Calendar.py
'''
|
Paras-Punjabi/Calendar-App-in-Python
|
Calendar.py
|
Calendar.py
|
py
| 6,488
|
python
|
en
|
code
| 0
|
github-code
|
6
|
25849292828
|
# imports
import socket
import json
def extractData(ledger):
ledger = ledger['ledger']
title = ledger['title']
date = ledger['date']
people = [person['name'] for person in ledger['people']]
people = ', '.join(people)
summary = ledger['summary']
items = ledger['transactions']
htmlTable = []
for item in items:
htmlTable.append(f"<tr><td>{item['item']}</td><td>{item['amount']}</td><td>{item['date']}</td><td>{item['paid_by']}</td></tr>")
htmlTable = ''.join(htmlTable)
return title, date, people, summary, htmlTable
def generateHTML(title, date, people, summary, table):
html = f'''<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Email</title>
</head>
<body>
<h1>Subject: {title}</h1>
<p>You have been sent a record of shared expenses between {people}. The following is a snapshop of that ledger from {date}</p>
<table style="width:80%">
<tr>
<th>Item</th>
<th>Amount</th>
<th>Date</th>
<th>Paid By</th>
</tr>
{table}
</table>
<p>Note that items with an " * " means they have been edited. Items with "del" have been removed from the summary.</p>
<h2>Ledger summary: </h2>
<p>{summary}</p>
</body>
</html>'''
return html
# setup
HOST = "localhost"
PORT = 65432
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((HOST, PORT))
while True:
server.listen(1)
print(f'Server listening on port: {PORT} \n')
commSocket, addr = server.accept()
print(f'Connected by {addr} \n')
dataLen = int(commSocket.recv(1024).decode())
print(f'length of data to receive: {dataLen} \n')
# LENGTH VERIFICATION
commSocket.send(str(dataLen).encode())
ledgerData = ''
while True:
data = commSocket.recv(1024).decode()
ledgerData += data
if len(ledgerData) == dataLen:
print(f'Server received: {ledgerData}\n')
ledgerData = json.loads(ledgerData)
title, date, people, summary, htmlTable = extractData(ledgerData)
html = generateHTML(title, date, people, summary, htmlTable)
commSocket.send(html.encode())
print('sending html')
commSocket.close()
print('Connection closed.')
break
|
alexcw08/email-microservice
|
server.py
|
server.py
|
py
| 2,524
|
python
|
en
|
code
| 0
|
github-code
|
6
|
15751603227
|
from elasticsearch import Elasticsearch, exceptions
import json, time
import itertools
from project import config
class SelectionAnalytics():
'''
SelectionAnalytics class
data analytics - elasticsearch
'''
# declare globals for the Elasticsearch client host
DOMAIN = config.DOMAIN
LOGIN = config.LOGIN
PASSWORD = config.PASSWORD
PORT = config.PORT
index_name = 'news_analysis'
client = None
def __init__(self):
'''
Create an Elasticsearch connection object
:param index_name: index name
:type index_name: string
:return: null
'''
self.client = Elasticsearch( [self.DOMAIN],
http_auth=(self.LOGIN, self.PASSWORD),
scheme="https",
port=self.PORT)
# Confirming there is a valid connection to Elasticsearch
try:
# use the JSON library's dump() method for indentation
info = json.dumps(self.client.info(), indent=4)
# pass client object to info() method
print ("Elasticsearch client info():", info)
except exceptions.ConnectionError as err:
# print ConnectionError for Elasticsearch
print ("\nElasticsearch info() ERROR:", err)
print ("\nThe client host:", host, "is invalid or cluster is not running")
# change the client's value to 'None' if ConnectionError
self.client = None
def get_elements_list(self, element_name):
'''
get_elements_list
:param self: self
:type self: None
:param element_name: element_name
:type element_name: str
:return: elements_list
:rtype: list of dics (doc_count & key)
'''
res = self.client.search(index=self.index_name, body={
"size": 0,
"aggs": {
"Articles": {
"filter": {
"range": {
"date": {
"gte": "2020-01-01T00:00:00.00"
}
}
},
"aggs": {
"GroupBy": {
"terms": { "field": element_name + ".keyword", "size": 10000 }
}
}
}
}
}
)
elements_docs = res['aggregations']['Articles']['GroupBy']['buckets']
# sorting by doc_count desc
elements_list = [item for item in sorted(elements_docs, key = lambda i: i['doc_count'], reverse=True)]
# remove empty sections (bug to fix)
sections_to_exclude = ['les-decodeurs', 'm-le-mag', 'm-perso', 'm-styles', 'series-d-ete']
for item in elements_list[:18]:
# print(item)
if (item['key'] in sections_to_exclude):
elements_list.remove(item)
# list of dics (doc_count & key)
return elements_list[:18]
def get_custom_corpus(self, section_name, query_size):
'''
get_custom_corpus
:param section_name: section_name
:type section_name: str
:param query_size: query_size
:type query_size: int
:return: (custom_corpus, total_hits)
:rtype: dict (custom_corpus & total_hits)
'''
res = self.client.search(index=self.index_name, body= {
"size": query_size,
"query": {
"bool" : {
"must" : {
"term" : { "section" : section_name }
},
},
},
"_source": ["doc_token"]
}
)
# total hits
total_hits = res['hits']['total']['value']
# concat doc_token fields from documents
results_list = []
results_list = [item["_source"]['doc_token'] for item in res['hits']['hits']]
# merge lists to unique list of tokens
custom_corpus = list(itertools.chain.from_iterable(results_list))
return (custom_corpus, total_hits)
def get_documents(self, string_search, nb_wanted):
'''
get_documents
:param string_search: tokens to search
:type string_search: str
:param nb_wanted: total docs wanted
:type nb_wanted: int
:return: (hits, nb_wanted, documents_list)
:rtype: tuple
'''
res = self.client.search(index=self.index_name, body={
"size": nb_wanted,
"query": {
"match": {
"doc_token": string_search
},
},
"_source": {
"include": ["author", "date", "link", "section", "title"]
},
}
)
hits = res['hits']['total']['value']
documents_list = res['hits']['hits']
return (hits, nb_wanted, documents_list)
def get_document_by_id(self, id_doc):
'''
get_documents
:param id_doc: id_doc
:type id_doc: str
:return: doc
:rtype: dict
'''
res = self.client.search(index=self.index_name, body={
"size": 1,
"query": {
"terms": {
"_id": [id_doc]
},
},
"_source": {
"include": ["author", "content_html", "date", "doc_token", "link", "teaser", "section", "title"]
},
}
)
doc = res['hits']['hits'][0]
return doc
def get_custom_corpus_list(self, section_name, query_size):
'''
get_custom_corpus
:param section_name: section_name
:type section_name: str
:param query_size: query_size
:type query_size: int
:return: custom_corpus
:rtype: list of lists
'''
res = self.client.search(index='news_analysis', body= {
"size": query_size,
"query": {
"bool" : {
"must" : {
"term" : { "section" : section_name }
},
},
},
"_source": ["doc_token"]
}
)
# from doc_token fields create list of lists
results_list = []
results_list = [item["_source"]['doc_token'] for item in res['hits']['hits']]
return (results_list)
def count_by_sections(self):
'''
count docs by sections
:return: sections_list
:rtype: list of dicts
'''
res = self.client.search(index='news_analysis', body={
# "size": 9999,
"aggs": {
"sections": {
"terms": { "field": "section.keyword" }
}
},
"_source": {
"include": ["_id", "date", "section"]
},
}
)
result = res['aggregations']['sections']
buckets = result['buckets'][:9]
sections_list = []
# get total docs
total_docs = 0
for item in buckets:
total_docs += item['doc_count']
# get percent
for item in buckets:
doc_percent = round(item['doc_count']/total_docs*100)
sections_list.append({'score':item['doc_count'], 'percent':doc_percent, 'section':item['key']})
# Rename sections
sections_names = {
'international': 'International',
'economie':'Economie',
'planete': 'Planète',
'idees':'Idées',
'afrique':'Afrique',
'politique':'Politique',
'societe': 'Societe',
'culture':'Culture',
'sport':'Sport'
}
for item in sections_list:
if item['section'] in sections_names:
item['section']= sections_names[item['section']]
return(sections_list)
def count_by_dates(self):
res = self.client.search(index='news_analysis', body={
"aggs": {
"amount_per_week": {
"date_histogram": {
"field": "date",
"interval": "week",
"format" : "yyyy-MM-dd"
},
# "aggs": {
# "total_amount": {
# "sum": {
# "field": "date"
# }
# }
# },
"aggs": {
"sections": {
"terms": { "field": "section.keyword" }
}
},
}
},
}
)
res_list = res['aggregations']['amount_per_week']['buckets']
# dict for sections selection & renaming
sections_names = {
'international': 'International',
'economie':'Economie',
'planete': 'Planète',
'idees':'Idées',
'afrique':'Afrique',
'politique':'Politique',
'societe': 'Société',
'culture':'Culture',
'sport':'Sport'
}
# build data list
data = []
for item in res_list:
nb_docs = item['doc_count']
# filter year 2020
year = item['key_as_string'][0:4]
if (year != '2019'):
# get & subtring date
date = item['key_as_string'][0:10]
buckets = item['sections']['buckets']
sections_scores = []
# select sections and rename
for i in buckets:
if i['key'] in sections_names:
sections_scores.append({'section':sections_names[i['key']], 'score':i['doc_count']})
# set empty sections to zero
listed_sections = [element['section'] for element in sections_scores]
for name in sections_names.values():
if name not in listed_sections:
sections_scores.append({'section':name, 'score':0})
# data list to return
data.append({'date':date, 'nb_docs':nb_docs, 'sections_scores':sections_scores})
# reformat data
data_list = []
for item in data:
item_dict = {'date': item['date'].replace('-', '')}
for element in item['sections_scores']:
item_dict[element['section']] = element['score']
data_list.append(item_dict)
return data_list
class SelectionRelational():
'''
SelectionRelational class
data statistics - Azure SQL
'''
def __init__(self):
'''
Create an Elasticsearch connection object
:param index_name: index name
:type index_name: string
:return: null
'''
return 'hello SelectionRelational'
|
flabastie/news-analysis
|
project/queries/selection.py
|
selection.py
|
py
| 11,437
|
python
|
en
|
code
| 0
|
github-code
|
6
|
22088681014
|
from helpers import setup_logger
menu_name = "Hardware test"
from threading import Event, Thread
from traceback import format_exc
from subprocess import call
from time import sleep
import sys
import os
from ui import Menu, Printer, PrettyPrinter, GraphicsPrinter
from helpers import ExitHelper, local_path_gen
logger = setup_logger(__name__, "warning")
i = None
o = None
#Code from downloading a song from http://freemusicarchive.org/
downloaded = Event()
url = "http://wiki.zerophone.org/images/b/b5/Otis_McMusic.mp3"
music_filename = "test.mp3"
local_path = local_path_gen(__name__)
music_path = local_path(music_filename)
def init_app(input, output):
global i, o
i = input; o = output
if music_filename not in os.listdir(local_path('.')):
def download():
downloaded.clear()
logger.debug("Downloading music for hardware test app!")
call(["wget", url, "-O", music_path])
downloaded.set()
t = Thread(target=download)
t.daemon=True
t.start()
else:
downloaded.set()
def callback():
try:
#Testing I2C - 0x12 should answer, 0x20 should raise IOError with busy errno
from smbus import SMBus
bus = SMBus(1)
try:
bus.read_byte(0x12)
except IOError:
PrettyPrinter("Keypad does not respond!", i, o)
else:
PrettyPrinter("Keypad found!", i, o)
#Checking IO expander
expander_ok = False
try:
bus.read_byte(0x20)
except IOError as e:
if e.errno == 16:
PrettyPrinter("IO expander OK!", i, o)
expander_ok = True
elif e.errno == 121:
PrettyPrinter("IO expander not found!", i, o)
else:
PrettyPrinter("IO expander driver not loaded!", i, o)
#Launching splashscreen
GraphicsPrinter("splash.png", i, o, 2)
#Launching key_test app from app folder, that's symlinked from example app folder
PrettyPrinter("Testing keypad", i, o, 1)
import key_test
key_test.init_app(i, o)
key_test.callback()
#Following things depend on I2C IO expander,
#which might not be present:
if expander_ok:
#Testing charging detection
PrettyPrinter("Testing charger detection", i, o, 1)
from zerophone_hw import is_charging
eh = ExitHelper(i, ["KEY_LEFT", "KEY_ENTER"]).start()
if is_charging():
PrettyPrinter("Charging, unplug charger to continue \n Enter to bypass", None, o, 0)
while is_charging() and eh.do_run():
sleep(1)
else:
PrettyPrinter("Not charging, plug charger to continue \n Enter to bypass", None, o, 0)
while not is_charging() and eh.do_run():
sleep(1)
#Testing the RGB LED
PrettyPrinter("Testing RGB LED", i, o, 1)
from zerophone_hw import RGB_LED
led = RGB_LED()
for color in ["red", "green", "blue"]:
led.set_color(color)
Printer(color.center(o.cols), i, o, 3)
led.set_color("none")
#Testing audio jack sound
PrettyPrinter("Testing audio jack", i, o, 1)
if not downloaded.isSet():
PrettyPrinter("Audio jack test music not yet downloaded, waiting...", i, o)
downloaded.wait()
disclaimer = ["Track used:" "", "Otis McDonald", "-", "Otis McMusic", "YT AudioLibrary"]
Printer([s.center(o.cols) for s in disclaimer], i, o, 3)
PrettyPrinter("Press C1 to restart music, C2 to continue testing", i, o)
import pygame
pygame.mixer.init()
pygame.mixer.music.load(music_path)
pygame.mixer.music.play()
continue_event = Event()
def restart():
pygame.mixer.music.stop()
pygame.mixer.init()
pygame.mixer.music.load(music_path)
pygame.mixer.music.play()
def stop():
pygame.mixer.music.stop()
continue_event.set()
i.clear_keymap()
i.set_callback("KEY_F1", restart)
i.set_callback("KEY_F2", stop)
i.set_callback("KEY_ENTER", stop)
continue_event.wait()
#Self-test passed, it seems!
except:
exc = format_exc()
PrettyPrinter(exc, i, o, 10)
else:
PrettyPrinter("Self-test passed!", i, o, 3, skippable=False)
|
LouisPi/piportablerecorder
|
apps/test_hardware/main.py
|
main.py
|
py
| 4,539
|
python
|
en
|
code
| 1
|
github-code
|
6
|
37379251526
|
import os
import glob
import numpy as np
import time
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
from configs import *
def merge_shp(shp_list, save_dir):
"""merge shapefiles in shp_list to a single shapefile in save_dir
Args:
shp_list (list): _description_
save_dir (str): the path of save dir
Returns:
str: the merged shp file path
"""
files_string = " ".join(shp_list)
print(files_string)
shp_dir = os.path.join(os.path.dirname(shp_list[0]), save_dir)
if not os.path.exists(shp_dir):
os.makedirs(shp_dir)
# the path maybe need to be changed
command = "ogrmerge.py -single -o {}/merged.shp ".format(shp_dir) + files_string
print(os.popen(command).read())
time.sleep(1)
return shp_dir + "/merged.shp"
def trans_shp(fn):
"""create a new feature depending on the 'CC' field
Args:
fn (function): _description_
"""
driver = ogr.GetDriverByName("ESRI Shapefile")
dataSource = driver.Open(fn, 1)
layer = dataSource.GetLayer()
feature = layer.GetNextFeature()
sum = 0
newField = ogr.FieldDefn('My_class', ogr.OFTInteger)
if layer.GetLayerDefn().GetFieldIndex('My_class') == -1:
layer.CreateField(newField)
while feature:
DLBM = feature.GetField('DLBM')
# if DLBM in 水田:
# feature.SetField('My_class', 0)
# elif DLBM in 旱地:
# feature.SetField('My_class', 1)
# elif DLBM in 果园:
# feature.SetField('My_class', 2)
# elif DLBM in 茶园:
# feature.SetField('My_class', 3)
# elif DLBM in 乔木林地:
# feature.SetField('My_class', 4)
# elif DLBM in 灌木林地:
# feature.SetField('My_class', 5)
# elif DLBM in 苗圃:
# feature.SetField('My_class', 6)
# elif DLBM in 草地:
# feature.SetField('My_class', 7)
# elif DLBM in 工矿用地:
# feature.SetField('My_class', 8)
# elif DLBM in 公共建筑:
# feature.SetField('My_class', 9)
# elif DLBM in 城镇住宅:
# feature.SetField('My_class', 10)
# elif DLBM in 农村住宅:
# feature.SetField('My_class', 11)
# elif DLBM in 公路用地:
# feature.SetField('My_class', 12)
# elif DLBM in 农村道路:
# feature.SetField('My_class', 13)
# elif DLBM in 河流:
# feature.SetField('My_class', 14)
# elif DLBM in 裸地:
# feature.SetField('My_class', 15)
# else:
# feature.SetField('My_class', 16)
# sum += 1
if DLBM in 田地:
feature.SetField('My_class', 0)
elif DLBM in 园地:
feature.SetField('My_class', 1)
elif DLBM in 林地:
feature.SetField('My_class', 2)
elif DLBM in 建筑用地:
feature.SetField('My_class', 3)
elif DLBM in 道路:
feature.SetField('My_class', 4)
elif DLBM in 水体:
feature.SetField('My_class', 5)
else:
feature.SetField('My_class', 6)
sum += 1
layer.SetFeature(feature)
feature = layer.GetNextFeature()
print(sum)
return
def trans_shp_all_class(fn):
"""create a new feature depending on the 'CC' field
Args:
fn (function): _description_
"""
driver = ogr.GetDriverByName("ESRI Shapefile")
dataSource = driver.Open(fn, 1)
layer = dataSource.GetLayer()
feature = layer.GetNextFeature()
newField = ogr.FieldDefn('My_class', ogr.OFTInteger)
if layer.GetLayerDefn().GetFieldIndex('My_class') == -1:
layer.CreateField(newField)
while feature:
DLBM = feature.GetField('DLBM')
if DLBM not in CORRESPOND:
code = 56
else:
code = CORRESPOND_LABEL[CORRESPOND[DLBM]]
feature.SetField('My_class', code)
layer.SetFeature(feature)
feature = layer.GetNextFeature()
return
def shp2raster(shapename, output_raster, pixel_size, colormap=None):
"""convert shapefile to raster
Args:
shapename (str): the path of shapefile
output_raster (str): the path of output raster
pixel_size (float): the pixel size of output raster
colormap(array): the color map of output raster
"""
input_shp = ogr.Open(shapename)
shp_layer = input_shp.GetLayer()
extent = shp_layer.GetExtent()
x_min = extent[0]
x_max = extent[1]
y_min = extent[2]
y_max = extent[3]
x_res = int((x_max - x_min) / pixel_size)
y_res = int((y_max - y_min) / pixel_size)
image_type = "GTiff"
driver = gdal.GetDriverByName(image_type)
new_raster = driver.Create(output_raster, x_res, y_res, 1, gdal.GDT_Byte)
new_raster.SetGeoTransform((x_min, pixel_size, 0, y_max, 0, -pixel_size))
band = new_raster.GetRasterBand(1)
ct = colormap
# band.SetRasterColorTable(ct)
band.SetNoDataValue(255)
band.FlushCache()
gdal.RasterizeLayer(new_raster, [1], shp_layer, options=["Attribute=My_class"])
new_rasterSRS = osr.SpatialReference()
new_rasterSRS.ImportFromEPSG(4524)
new_raster.SetProjection(new_rasterSRS.ExportToWkt())
return
def count_features_by_field(shp_file, field_name):
driver = ogr.GetDriverByName('ESRI Shapefile')
data_source = driver.Open(shp_file, 0)
layer = data_source.GetLayer()
feature_count = {}
for feature in layer:
field_value = feature.GetField(field_name)
if field_value not in feature_count:
feature_count[field_value] = 1
else:
feature_count[field_value] += 1
return feature_count
def area_features_by_field(shp_file):
driver = ogr.GetDriverByName('ESRI Shapefile')
data_source = driver.Open(shp_file, 0)
layer = data_source.GetLayer()
feature_area = {}
for feature in layer:
field_value = feature.GetField("DLBM")
field_area = feature.GetField("SHAPE_Area")
if field_value not in feature_area:
feature_area[field_value] = field_area
else:
feature_area[field_value] += field_area
return feature_area
def gdb_to_shp(gdb_file, output_folder):
ogr_command = "ogr2ogr -f 'ESRI Shapefile' -lco ENCODING=UTF-8 -s_srs EPSG:4490 -t_srs EPSG:4524 {} {}".format(output_folder, gdb_file)
os.system(ogr_command)
def rename_lcpa_copy(shp_dir, target_dir):
for dir, _, file_names in os.walk(shp_dir):
for file_name in file_names:
if "LCPA" in file_name:
source_file = os.path.join(dir, file_name)
taget_name = file_name.replace("LCPA", dir.split('/')[-1])
target_file = os.path.join(target_dir, taget_name)
os.popen('cp {} {}'.format(source_file, target_file))
if __name__ == "__main__":
a=0
# gdb_dir = "/media/dell/DATA/wy/data/guiyang/地理国情监测/2021/分区/"
# output_dir = "/media/dell/DATA/wy/data/guiyang/地理国情监测/2021/shape/"
# if not os.path.exists(output_dir):
# os.makedirs(output_dir)
# gdb_list = os.listdir(gdb_dir)
# for gdb_name in gdb_list:
# print(gdb_name)
# gdb_path = os.path.join(gdb_dir, gdb_name)
# output_shp_dir = os.path.join(output_dir, gdb_name.split('.')[0])
# if not os.path.exists(output_shp_dir):
# os.makedirs(output_shp_dir)
# gdb_to_shp(gdb_path, output_shp_dir)
# rename_lcpa_copy("/media/dell/DATA/wy/data/guiyang/地理国情监测/2021/shape/", "/media/dell/DATA/wy/data/guiyang/地理国情监测/2021/LCPA/")
# merge_shp()
# data_dir = "J:/GuangdongSHP/splitSHP/merge_shp/"
# file_list = glob.glob(('{}*.shp'.format(data_dir)))
# for i, file_name in enumerate(file_list):
# print("{}/{}".format(str(i+1), str(len(file_list))))
# output_raster = file_name.split(".")[0] + '.tif'
# pixel_size = 7.516606439032443e-06
# shp2raster(file_name, output_raster, pixel_size)
|
faye0078/RS-ImgShp2Dataset
|
make_dataset/shp_functions.py
|
shp_functions.py
|
py
| 8,144
|
python
|
en
|
code
| 1
|
github-code
|
6
|
10434320461
|
"""
.. moduleauthor:: Martí Congost <marti.congost@whads.com>
"""
from woost.models import ExtensionAssets, Page, CustomBlock
def install():
"""Creates the assets required by the googlesearch extension."""
assets = ExtensionAssets("googlesearch")
assets.require(
Page,
"results_page",
title = assets.TRANSLATIONS,
blocks = [
assets.require(
CustomBlock,
"results_block",
view_class = "woost.extensions.googlesearch.GoogleCSEResults"
)
]
)
|
marticongost/woost.extensions.googlesearch
|
woost/extensions/googlesearch/installation.py
|
installation.py
|
py
| 574
|
python
|
en
|
code
| 0
|
github-code
|
6
|
21334940324
|
import logging
import re
import urlparse
find_href = re.compile(r'\bhref\s*=\s*(?!.*mailto:)(?!.*mailto:)("[^"]*"|\'[^\']*\'|[^"\'<>=\s]+)')
# FYI: added a workaround to not to break inline akavita counter script
find_src = re.compile(r'\bsrc\s*=\s*("[^"\']*"|\'[^"\']*\'|[^"\'<>=\s;]{2,})')
PATTERNS = [find_href, find_src]
def fix_urls(document, base_url, pattern):
ret = []
last_end = 0
for match in pattern.finditer(document):
url = match.group(1)
logging.info("Checking url: %s" % url)
if url[0] in "\"'":
url = url.strip(url[0])
parsed = urlparse.urlparse(url)
if parsed.scheme == parsed.netloc == '':
if not url.startswith('/' + base_url) and not url.startswith(base_url):
logging.info("Processing url: %s" % url)
url = '/%s%s' % (base_url, url)
logging.info("Processed url: %s" % url)
ret.append(document[last_end:match.start(1)])
ret.append('"%s"' % (url))
last_end = match.end(1)
ret.append(document[last_end:])
return ''.join(ret)
def add_subdir_hook():
def replace_hook(options, page):
if options.get('url_subdir'):
for pattern in PATTERNS:
page.rendered = fix_urls(page.rendered, options['url_subdir'], pattern)
return [replace_hook]
|
stachern/bseu_fm
|
hooks/subdir.py
|
subdir.py
|
py
| 1,410
|
python
|
en
|
code
| 0
|
github-code
|
6
|
20543259826
|
"""
TIC TAC TOE
"""
#variables
board = ["*", "*", "*",
"*", "*", "*",
"*", "*", "*"]
player = "X" #first player is always X
winner = None
game_running = True
print("\033[95m-- TIC TAC TOE --\033[m\n")
#creating the board game
def print_board(board):
print("-" *10)
print(board[0] + " | " + board[1] + " | " + board[2])
print("-" *10)
print(board[3] + " | " + board[4] + " | " + board[5])
print("-" *10)
print(board[6] + " | " + board[7] + " | " + board[8])
print("-" *10)
#taking player input
def player_input(board):
choice = int(input("Enter your move: "))
if choice >= 1 or choice <= 9 and board[choice -1] != "*":
board[choice -1] = player
else:
print("Ops, try again!")
#checking for winners horizontally
def check_horiz(board):
global winner
if board[0] == board[1] == board[2] and board[0] != "*":
winner = board[0]
return True
elif board[3] == board[4] == board[5] and board[3] != "*":
winner = board[3]
return True
elif board[6] == board[7] == board[8] and board[6] != "*":
winner = board[6]
return True
#checking for winners vertically
def check_vert(board):
global winner
if board[0] == board[3] == board[6] and board[0] != "*":
winner = board[0]
return True
elif board[1] == board[4] == board[7] and board[1] != "*":
winner = board[1]
return True
elif board[2] == board[5] == board[8] and board[2] != "*":
winner = board[2]
return True
#checking for winners diagonally
def check_diag(board):
global winner
if board[0] == board[4] == board[8] and board[0] != "*":
winner = board[0]
return True
elif board[2] == board[4] == board[6] and board[2] != "*":
winner = board[2]
return True
#main function to check the winner
def check_winner():
global game_running
if check_horiz(board) or check_vert(board) or check_diag(board):
print_board(board)
print(f"The winner is {winner}")
game_running = False
#checking for tie
def chack_tie(board):
global game_running
if "*" not in board:
print_board(board)
print("It is a tie!")
game_running = False
#switching players
def switch_player():
global player
if player == "X":
player = "O"
else:
player = "X"
#all functions
while game_running:
print_board(board)
player_input(board)
check_winner()
chack_tie(board)
switch_player()
print("\033[93mGame Over\033[m")
|
rlorimier/tictactoe
|
tictactoe.py
|
tictactoe.py
|
py
| 2,603
|
python
|
en
|
code
| 0
|
github-code
|
6
|
42496516652
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def append(self, data):
new_node = Node(data)
if self.head is None:
self.head = new_node
return
last_node = self.head
while last_node.next:
last_node = last_node.next
last_node.next = new_node
def prepend(self, data):
new_node = Node(data)
new_node.next = self.head
self.head = new_node
def print_list(self):
current_node = self.head
while current_node:
print(current_node.data)
current_node = current_node.next
def insert_after_node(self, prev_node, data):
new_node = Node(data)
new_node.next = prev_node.next
prev_node.next = new_node
def length_of_list(self):
if self.head is None:
print("List is empty.")
return
current_node = self.head
length = 1
while current_node.next:
length += 1
current_node = current_node.next
print(length)
llist = LinkedList()
llist.append("A")
llist.append("B")
llist.append("E")
llist.append("D")
# llist.print_list()
llist.insert_after_node(llist.head.next, "C")
llist.print_list()
llist.length_of_list()
|
kedarjk44/basic_python
|
linkedlist_insertion.py
|
linkedlist_insertion.py
|
py
| 1,379
|
python
|
en
|
code
| 0
|
github-code
|
6
|
70780596348
|
from flask import Flask, render_template, request
from modelo import modelagemPredicao
from data import gerarNovosDados
app = Flask(__name__, template_folder='templates', static_folder='static')
@app.route('/', methods=['GET', 'POST'])
def index():
# variáveis auxiliares
partidas = 0
precisaomedalha = 0
precisaotaxavitoria = 0
probabilidade = 0
team = 'RADIANT'
if request.method == 'POST':
radiantheroes = request.form.get('radiantheroes')
direheroes = request.form.get('direheroes')
radiantmedals = request.form.get('radiantmedals')
diremedals = request.form.get('diremedals')
if radiantheroes and direheroes:
# separando os heróis
radiantheroes = radiantheroes.split(',')
direheroes = direheroes.split(',')
# separando as medalhas
radiantmedals = radiantmedals.split(',')
diremedals = diremedals.split(',')
if len(radiantheroes) == 5 and len(direheroes) == 5 and len(radiantmedals) == 5 and len(diremedals) == 5:
# enviando para trasnformação dos dados
dados = modelagemPredicao.preprocessamentomedalha(radiantheroes, radiantmedals, direheroes, diremedals)
print(dados)
# predict do modelo
team, probabilidade = modelagemPredicao.predicao(dados, 1)
elif len(radiantheroes) == 5 and len(direheroes) == 5:
# enviando para trasnformação dos dados
dados = modelagemPredicao.preprocessamentotaxavitoria(radiantheroes, direheroes)
print(dados)
# predict do modelo
team, probabilidade = modelagemPredicao.predicao(dados, 2)
# precisão dos modelos
precisaomedalha, precisaotaxavitoria, partidas = modelagemPredicao.precisaomodelos()
# deixando valor da precisao em número inteiro
precisaomedalha = int(precisaomedalha*100)
precisaotaxavitoria = int(precisaotaxavitoria*100)
# deixando a probabilidade em número inteiro
probabilidade = int(probabilidade*100)
# deixando texto maiusculo
team = team.upper()
# definindo cor para html de acordo com o time
color = ''
if team == 'RADIANT':
color = '#64FF56'
else:
color = '#d84a4a'
# renderizando modelo
return render_template("index.html", partidas=partidas, precisaotaxavitoria=precisaotaxavitoria, precisaomedalha=precisaomedalha, chance=probabilidade, team=team, color=color)
@app.route('/atualizarDados')
def atualizarDados():
# chamando função de inserção de novos dados
gerarNovosDados.gerarNovosDadosPartidas()
# renderizando modelo
return render_template("atualizarDados.html")
|
stardotwav/Dota2Predictor
|
web service/app.py
|
app.py
|
py
| 2,788
|
python
|
pt
|
code
| 2
|
github-code
|
6
|
70110821308
|
from copy import deepcopy
CHECK_DIRECTION = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1)
]
DEBUG = False
def load_data(filename):
with open(filename, 'r') as f:
data = [[int(i) for i in line.strip()] for line in f.readlines()]
return data
def print_data(data):
for j in range(10):
print(''.join(map(str, data[j])).replace('0', '*'))
def check_flashes(data):
indices = []
for j in range(10):
for i in range(10):
if data[j][i] > 9:
data[j][i] = 0
indices.append((j, i))
return indices
def step1(data):
for j in range(10):
for i in range(10):
data[j][i] += 1
def step2(data, indices):
for j, i in indices:
for dj, di in CHECK_DIRECTION:
if all([
-1 < j+dj < len(data),
-1 < i+di < len(data[0]),
]):
if data[j+dj][i+di] != 0:
data[j+dj][i+di] += 1
def pt_1(data):
flashes = 0
if DEBUG: print_data(data)
for i in range(100):
step1(data)
indices = check_flashes(data)
while indices:
flashes += len(indices)
step2(data, indices)
indices = check_flashes(data)
if DEBUG:
print('\nAfter step {}:'.format(i+1))
print_data(data)
return flashes
def pt_2(data):
day = 0
if DEBUG: print_data(data)
while True:
flashes = 0
step1(data)
indices = check_flashes(data)
while indices:
flashes += len(indices)
step2(data, indices)
indices = check_flashes(data)
day += 1
if DEBUG:
print('\nAfter step {}:'.format(day))
print_data(data)
if flashes == 100:
return day
if __name__ == '__main__':
data = load_data('input.txt')
print(pt_1(deepcopy(data)))
print(pt_2(deepcopy(data)))
|
eVen-gits/advent_of_code_2021
|
day_11/code.py
|
code.py
|
py
| 2,004
|
python
|
en
|
code
| 1
|
github-code
|
6
|
10420612333
|
from __future__ import annotations
from typing import TYPE_CHECKING
from randovania.exporter.hints import guaranteed_item_hint
from randovania.exporter.hints.hint_exporter import HintExporter
from randovania.exporter.hints.joke_hints import JOKE_HINTS
from randovania.game_description.db.hint_node import HintNode
from randovania.games.common.prime_family.exporter.hint_namer import colorize_text
from randovania.games.prime2.patcher import echoes_items
if TYPE_CHECKING:
from random import Random
from randovania.exporter.hints.hint_namer import HintNamer
from randovania.game_description.db.node_identifier import NodeIdentifier
from randovania.game_description.db.region_list import RegionList
from randovania.game_description.game_patches import GamePatches
from randovania.game_description.resources.resource_database import ResourceDatabase
from randovania.games.prime2.exporter.hint_namer import EchoesHintNamer
from randovania.interface_common.players_configuration import PlayersConfiguration
def create_simple_logbook_hint(asset_id: int, hint: str) -> dict:
return {
"asset_id": asset_id,
"strings": [hint, "", hint],
}
def create_patches_hints(
all_patches: dict[int, GamePatches],
players_config: PlayersConfiguration,
region_list: RegionList,
namer: HintNamer,
rng: Random,
) -> list:
exporter = HintExporter(namer, rng, JOKE_HINTS)
hints_for_asset: dict[NodeIdentifier, str] = {}
for identifier, hint in all_patches[players_config.player_index].hints.items():
hints_for_asset[identifier] = exporter.create_message_for_hint(hint, all_patches, players_config, True)
return [
create_simple_logbook_hint(
logbook_node.extra["string_asset_id"],
hints_for_asset.get(region_list.identifier_for_node(logbook_node), "Someone forgot to leave a message."),
)
for logbook_node in region_list.iterate_nodes()
if isinstance(logbook_node, HintNode)
]
def hide_patches_hints(region_list: RegionList) -> list:
"""
Creates the string patches entries that changes the Lore scans in the game
completely useless text.
:return:
"""
return [
create_simple_logbook_hint(logbook_node.extra["string_asset_id"], "Some item was placed somewhere.")
for logbook_node in region_list.iterate_nodes()
if isinstance(logbook_node, HintNode)
]
_SKY_TEMPLE_KEY_SCAN_ASSETS = [
0xD97685FE,
0x32413EFD,
0xDD8355C3,
0x3F5F4EBA,
0xD09D2584,
0x3BAA9E87,
0xD468F5B9,
0x2563AE34,
0xCAA1C50A,
]
def create_stk_hints(
all_patches: dict[int, GamePatches],
players_config: PlayersConfiguration,
resource_database: ResourceDatabase,
namer: HintNamer,
hide_area: bool,
) -> list:
"""
Creates the string patches entries that changes the Sky Temple Gateway hint scans with hints for where
the STK actually are.
:param all_patches:
:param players_config:
:param resource_database:
:param namer:
:param hide_area: Should the hint include only the db?
:return:
"""
resulting_hints = guaranteed_item_hint.create_guaranteed_hints_for_resources(
all_patches,
players_config,
namer,
hide_area,
[resource_database.get_item(index) for index in echoes_items.SKY_TEMPLE_KEY_ITEMS],
True,
)
return [
create_simple_logbook_hint(
_SKY_TEMPLE_KEY_SCAN_ASSETS[key_number],
resulting_hints[resource_database.get_item(key_index)],
)
for key_number, key_index in enumerate(echoes_items.SKY_TEMPLE_KEY_ITEMS)
]
def hide_stk_hints(namer: EchoesHintNamer) -> list:
"""
Creates the string patches entries that changes the Sky Temple Gateway hint scans with hints for
completely useless text.
:return:
"""
return [
create_simple_logbook_hint(
_SKY_TEMPLE_KEY_SCAN_ASSETS[key_number],
"{} is lost somewhere in Aether.".format(
colorize_text(namer.color_item, f"Sky Temple Key {key_number + 1}", True)
),
)
for key_number in range(9)
]
|
randovania/randovania
|
randovania/games/prime2/exporter/hints.py
|
hints.py
|
py
| 4,216
|
python
|
en
|
code
| 165
|
github-code
|
6
|
32653169006
|
from flask import Flask, send_file, send_from_directory, safe_join, abort
app = Flask(__name__)
# app.config["CLIENT_IMAGES"] = "/home/mahima/console/static/client/img"
app.config["CLIENT_IMAGES"] = "/home/lenovo/SEproject/OpsConsole/api/static"
# The absolute path of the directory containing CSV files for users to download
app.config["CLIENT_CSV"] = "/home/mahima/console/static/client/csv"
# The absolute path of the directory containing PDF files for users to download
app.config["CLIENT_PDF"] = "/home/mahima/console/static/client/pdf"
@app.route("/getimg/<img_name>")
def get_img(img_name):
try:
return send_from_directory(app.config["CLIENT_IMAGES"], filename = img_name, as_attachment=True)
except FileNotFoundError:
abort(404)
@app.route('/hello')
def hello():
return "Hello Lifeeee"
if __name__ == "__main__":
app.run(debug = True)
|
trishu99/Platypus
|
api/static/fileserver.py
|
fileserver.py
|
py
| 882
|
python
|
en
|
code
| 0
|
github-code
|
6
|
41123532534
|
from tkinter import *
import random
def resetSuggestion():
global currentSuspect
global currentWeapon
global currentLocation
global xChar, yChar
global count #NEW
xChar = 200
yChar = 400
count = 0
currentSuspect = "White"
currentWeapon = "Dagger"
currentLocation = "Dining"
drawSuggestion()
def suspectParade():
global currentSuspect
global currentWeapon
global currentLocation
global xChar
global count
global window #NEW
global foo, man, chu
currentSuspect = random.choice(foo)
currentWeapon = random.choice(chu)
currentLocation = random.choice(man)
anim()
def anim():
global xChar
global count
if count < 8:
window.after(100, anim) #NEW
count = count + 1
xChar = xChar + 50
drawSuggestion()
# we will use only lowercase letters in the filenames
# spaces will be changed to be underscores
# dots will be changed to be 'd'
def fixFilename(name):
return name.lower().replace(" ","_").replace(".","d")
#
def drawLocation():
global currentLocation
global currentLocationImage
global canvas
locationFile = fixFilename(currentLocation) + ".gif"
currentLocationImage = PhotoImage(file=locationFile)
canvas.create_image(400, 300, image=currentLocationImage)
def change_coord(event):
global window #NEW
global xChar, yChar
if event.keysym == 'Up':
yChar = yChar - 15
drawSuggestion()
elif event.keysym == 'Down':
yChar = yChar + 15
drawSuggestion()
elif event.keysym == 'Right':
xChar = xChar + 15
drawSuggestion()
elif event.keysym == 'Left':
xChar = xChar - 15
drawSuggestion()
def drawSuspect():
global currentSuspect
global currentSuspectImage
global canvas
suspectFile = fixFilename(currentSuspect) + ".gif"
currentSuspectImage = PhotoImage(file=suspectFile)
canvas.create_image(xChar, yChar, image=currentSuspectImage)
def drawSuggestion():
drawLocation()
drawSuspect()
def setLocation(location):
global currentLocation
currentLocation = location
drawSuggestion()
def drawWeapon():
global currentWeapon
global currentWeaponImage
global canvas
weaponFile= fixFilename(currentWeapon) + ".gif"
currentWeaponImage = PhotoImage(file=weaponFile)
canvas.create_image(300, 300, image=currentWeaponImage)
canvas.create_image(xChar, yChar, image=currentSuspectImage)
def setLocation(location):
global currentLocation
currentLocation = location
drawSuggestion()
def setDining():
global currentLocation
currentLocation = "dining"
drawSuggestion()
def setLounge():
global currentLocation
currentLocation = "lounge"
drawSuggestion()
def setConservatory():
global currentLocation
currentLocation = "conservatory"
drawSuggestion()
def setLibrary():
global currentLocation
currentLocation = "Library"
drawSuggestion()
def setBilliard():
global currentLocation
currentLocation = "Billiard"
drawSuggestion()
def setHall():
global currentLocation
currentLocation = "Hall"
drawSuggestion()
def setStudy():
global currentLocation
currentLocation = "Study"
drawSuggestion()
def setWhite():
global currentSuspect
currentSuspect = "White"
drawSuggestion()
def setMustard():
global currentSuspect
currentSuspect = "Mustard"
drawSuggestion()
def setGreen():
global currentSuspect
currentSuspect = "Green"
drawSuggestion()
def setPeacock():
global currentSuspect
currentSuspect = "Peacock"
drawSuggestion()
def setWeapon():
global currentWeapon
currentWeapon = "Dagger"
drawSuggestion()
def setRope():
global currentWeapon
currentWeapon = "Rope"
drawSuggestion()
def setCandlestick():
global currentWeapon
currentWeapon = "Candlestick"
drawSuggestion()
def setLeadpipe():
global currentWeapon
currentWeapon = "Leadpipe"
drawSuggestion()
def setRevolver():
global currentWeapon
currentWeapon = "Revolver"
drawSuggestion()
def setSpanner():
global currentWeapon
currentWeapon = "Spanner"
drawSuggestion()
def setupGameControls(w):
global canvas
btnReset = Button(w, text="Reset Suggestion", command=resetSuggestion)
btnReset.place(x = 10, y = 5)
btnReset = Button(w, text="Suspect Parade", command=suspectParade)
btnReset.place(x = 200, y = 5)
#Background
btnLibrary = Button(w, text="Dining", command=setDining)
btnLibrary.place(x = 10, y = 650)
btnLibrary = Button(w, text="Lounge", command=setLounge)
btnLibrary.place(x = 80, y = 650)
btnLibrary = Button(w, text="Conservatory", command=setConservatory)
btnLibrary.place(x = 160, y = 650)
btnLibrary = Button(w, text="Library", command=setLibrary)
btnLibrary.place(x = 270, y = 650)
btnLibrary = Button(w, text="Billiard", command=setBilliard)
btnLibrary.place(x = 340, y = 650)
btnLibrary = Button(w, text="Hall", command=setHall)
btnLibrary.place(x = 410, y = 650)
btnLibrary = Button(w, text="Study", command=setStudy)
btnLibrary.place(x = 460, y = 650)
#Characters
btnScarlet = Button(w, text="Mrs.White", command=setWhite)
btnScarlet.place(x = 10, y = 700)
btnPlum = Button(w, text="Colonel Mustard", command=setMustard)
btnPlum.place(x = 110, y = 700)
btnPlum = Button(w, text="Reverend Green", command=setGreen)
btnPlum.place(x = 240, y = 700)
btnPlum = Button(w, text="Mrs. Peacock", command=setPeacock)
btnPlum.place(x = 370, y = 700)
#Weapon
btnPlum = Button(w, text="Dagger", command=setWeapon)
btnPlum.place(x = 10, y = 750)
btnPlum = Button(w, text="Rope", command=setRope)
btnPlum.place(x = 80, y = 750)
btnPlum = Button(w, text="Candlestick", command=setCandlestick)
btnPlum.place(x = 140, y = 750)
btnPlum = Button(w, text="Leadpipe", command=setLeadpipe)
btnPlum.place(x = 240, y = 750)
btnPlum = Button(w, text="Revolver", command=setRevolver)
btnPlum.place(x = 330, y = 750)
btnPlum = Button(w, text="Spanner", command=setSpanner)
btnPlum.place(x = 410, y = 750)
canvas = Canvas(w, width=800, height=600)
canvas.place(x=10, y=30)
canvas.create_rectangle(0, 0, 800, 600, fill="white")
#NEW
window = Tk()
window.geometry("900x800")
window.bind_all('<Up>', change_coord)
window.bind_all('<Down>', change_coord)
window.bind_all('<Left>', change_coord)
window.bind_all('<Right>', change_coord)
xChar = 200
yChar = 400
count = 0
foo = ['colonel_mustard', 'miss_white', 'reverend_green', 'miss_peacock', 'miss_scarlet', 'professor_plum']
man = ['ballroom', 'billard', 'conservatory', 'dinroom', 'hall', 'kitchen', 'library', 'lounge', 'study']
chu = ['candlestick', 'dagger', 'lead_pipe', 'revolver', 'rope', 'spanner']
setupGameControls(window)
resetSuggestion()
drawSuggestion()
window.mainloop()
|
xantin/code-examples
|
python/Project - Haya_py/cluedo.py
|
cluedo.py
|
py
| 7,073
|
python
|
en
|
code
| 0
|
github-code
|
6
|
16164892137
|
from flask import Flask, request, jsonify, abort, Response, redirect
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
from os import environ
import sys
import os
import asyncio
import requests
from invokes import invoke_http
import pika
import amqp_setup
import json
from datetime import datetime
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = environ.get('dbURL')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ENGINE_OPTIONS'] = {'pool_recycle': 299}
db = SQLAlchemy(app)
CORS(app)
verification_URL = environ.get(
'verificationURL') or "http://localhost:6001/verification/"
account_URL = environ.get('accountURL') or "http://localhost:6003/account/"
epayment_URL = environ.get('epaymentURL') or "http://localhost:6203/epayment"
loyalty_URL = environ.get('loyaltyURL') or "http://localhost:6301/loyalty/"
promo_URL = environ.get('promoURL') or "http://localhost:6204/promo/"
queue_URL = environ.get('queueURL') or "http://localhost:6202/queueticket/"
order_URL = environ.get('orderURL') or "http://localhost:6201/order/"
@app.route('/order/get_payment_method/<int:account_id>', methods=['POST'])
async def select_payment_method(account_id):
payment_method1 = request.get_json()
payment_method = payment_method1['payment_method']
check_qid = invoke_http(
queue_URL, method='GET')
if check_qid["code"] == 200:
if len(check_qid["data"]["queues"]) == 0:
queue_id = 1
else:
queue_id = len(check_qid["data"]["queues"]) + 1
else:
queue_id = 1
data = {
"account_id": account_id,
"queue_id": queue_id,
"payment_method": payment_method
}
if (payment_method == "external"):
response = invoke_http(epayment_URL + 'create_checkout_session',
method="POST", json={"account_id": data["account_id"]})
if response:
response["queue_id"] = data["queue_id"]
ini_create_ticket = invoke_http(
order_URL + str(account_id) + "/paying", method='POST', json=data)
if ini_create_ticket["code"] == 201:
return jsonify({
"code": 200,
"data": response,
"queue_id": data["queue_id"]
}), 200
else:
return jsonify({
"code": 405,
"data": response,
"message": "Failed to create ticket"
}), 405
else:
return jsonify({'status': 'error', 'message': 'Failed to create checkout session', 'data': response})
elif (payment_method == "promo"):
promo_json = {
"is_used": 1,
"promo_code": payment_method1["promo_code"]
}
update_promo = invoke_http(
promo_URL + str(account_id), method="PATCH", json=promo_json)
if update_promo["code"] == 200:
ini_create_ticket = invoke_http(
order_URL + str(account_id) + "/paying", method='POST', json=data)
if ini_create_ticket["code"] == 201:
return jsonify({
"code": 200,
"message": "Promo code has been redeemed",
"data": update_promo["data"],
"queue_id": data["queue_id"]
}), 200
else:
return jsonify({
"code": 405,
"message": update_promo["message"]
}), 405
elif (payment_method == "loyalty"):
points = {
"points": 500
}
update_loyalty = invoke_http(
loyalty_URL + str(account_id) + "/redeem", method='PATCH', json=points)
if update_loyalty["code"] == 200:
ini_create_ticket = invoke_http(
order_URL + str(account_id) + "/paying", method='POST', json=data)
if ini_create_ticket["code"] == 201:
return jsonify({
"code": 200,
"message": "Loyalty points have been redeemed",
"data": update_loyalty["data"],
"queue_id": data["queue_id"],
"available_points": update_loyalty["data"]["available_points"]
}), 200
else:
return jsonify({
"code": 405,
"message": update_loyalty["message"],
"available_points": update_loyalty["data"]["available_points"]
}), 405
else:
return "Cannot find payment method"
@app.route("/order/<int:account_id>/paying", methods=['POST'])
def ini_create_ticket(account_id):
# this function initialises the create ticket post
# invoked by one of 3 payment microservice to indicate that it has been paid
if (not request.is_json):
return jsonify({
"code": 404,
"message": "Invalid JSON input: " + str(request.get_data())
}), 404
data = request.get_json()
create_ticket = invoke_http(
queue_URL, method='POST', json=data)
if create_ticket["code"] == 201:
# For User Scenario 3, Update Challenge Status
challenge_message = {
"mission_id": 2,
"code": 201
}
challenge_message.update(create_ticket["data"])
message = json.dumps(challenge_message)
amqp_setup.channel.basic_publish(exchange=amqp_setup.exchangename1, routing_key="challenge.challenge_complete", body=message, properties=pika.BasicProperties(delivery_mode=2))
return jsonify({
"code": 201,
"message": "Queueticket being created",
"data": create_ticket["data"]
}), 201
else:
return jsonify({
"code": 405,
"message": "Queueticket not being created",
"error": create_ticket,
}), 405
@app.patch("/order/<int:account_id>/paid")
def update_order(account_id):
# this function is being invoked by post queue ticket
# indicates that the ticket has been created
if (not request.is_json):
return jsonify({
"code": 404,
"message": "Invalid JSON input: " + str(request.get_data())
}), 404
data = request.get_json()
update_account = invoke_http(
account_URL + str(account_id), method='PATCH', json=data)
if update_account["code"] == 200:
account_result = invoke_http(
verification_URL + "account/" + str(data["account_id"]), method='GET')
notification_message = {
"type": "queueticket",
"account_id": data["account_id"],
"first_name": account_result["data"]["first_name"],
"phone_number": account_result["data"]["phone"],
"payment_method": data["payment_method"],
"queue_id": data["queue_id"],
"message": "You have successfully created a queueticket."
}
message = json.dumps(notification_message)
amqp_setup.channel.basic_publish(exchange=amqp_setup.exchangename, routing_key="notification.sms",
body=message, properties=pika.BasicProperties(delivery_mode=2))
return jsonify({
"code": 200,
"message": "Account updated successfully (is express)",
"queue_id": data["queue_id"]
}), 200
else:
return jsonify({
"code": 405,
"message": "Order not updated"
}), 405
@app.route("/order/<int:queue_id>/used", methods=['PATCH'])
def ticket_used(queue_id):
if (not request.is_json):
return jsonify({
"code": 404,
"message": "Invalid JSON input: " + str(request.get_data())
}), 404
data = request.get_json()
ticket_update = invoke_http(
queue_URL + str(data["queue_id"]), method='PATCH', json=data)
if ticket_update["code"] == 200:
update_is_prio = {
"is_priority": 0
}
account_res = invoke_http(
account_URL + str(account_URL), method='PATCH', json=update_is_prio)
if account_res["code"] == 200:
return jsonify({
"code": 200,
"message": "Ticket used successfully"
}), 200
account_result = invoke_http(
verification_URL + "account/" + str(ticket_update["data"]["account_id"]), method='GET')
notification_message = {
"type": "use_queue",
"account_id": ticket_update["data"]["account_id"],
"first_name": account_result["data"]["first_name"],
"phone_number": account_result["data"]["phone"],
"payment_method": ticket_update["data"]["payment_method"],
"queue_id": ticket_update["data"]["queue_id"],
"message": "You have redeemed your queue ticket."
}
message = json.dumps(notification_message)
amqp_setup.channel.basic_publish(exchange=amqp_setup.exchangename, routing_key="notification.sms",
body=message, properties=pika.BasicProperties(delivery_mode=2))
return jsonify({
"code": 200,
"message": "Ticket used successfully",
"data": ticket_update["data"]
}), 200
else:
return jsonify({
"code": 405,
"message": ticket_update["message"]
}), 405
if __name__ == '__main__':
app.run(host='0.0.0.0', port=6201, debug=True)
|
ESDeezknee/ESDeezknee
|
order/order.py
|
order.py
|
py
| 9,606
|
python
|
en
|
code
| 1
|
github-code
|
6
|
27257763451
|
#! /usr/bin/python -u
import fileinput
import getopt
import os
import re
import string, sys, time,random
printablestringre=re.compile('[\x80-\xFF\x00-\x08\x0A-\x1F]')
def safestring(badstring):
"""only printable range..i.e. upper ascii minus lower junk like line feeds, etc"""
return printablestringre.sub('',badstring)
def main():
"The 'heart' of the program, executes all other functions"
global options
options = read_options()
if options["file"] != '':
fp = open(options['file'], 'r')
while 1:
if options["file"] != '':
line = safestring(fp.readline().strip())
if not line:
break
else:
time.sleep(random.uniform(0.1,1.0))
sys.stdout.write(line + '\n')
def read_options():
"""Read options from config files and the command line, returns the
defaults if no user options are found"""
# required options
required = ['file']
# defaults
options = {'file' : ''}
# read from command line
helpstr = 'Usage: ' + sys.argv[0] + ' [OPTIONS]' + """\n
Options:
* -f, --file <filename> the log file to replay to stdout
A '*' means this option is required."""
optlist, args = getopt.getopt(sys.argv[1:], 'f:', ['file='])
# parse options
for o, a in optlist:
if (o == '-h' or o == '--help'):
print(helpstr)
sys.exit()
else:
for option in options.keys():
execcode = "if (o == '-%s' or o == '--%s'): options['%s'] = a" % (option[0], option, option)
exec(execcode)
for option in required:
if not options[option]:
print( "Required option '%s' is not set!" % option)
sys.exit()
# return all options
return options
main()
|
jeffbryner/blendersecviz
|
logs/logreplay.py
|
logreplay.py
|
py
| 1,832
|
python
|
en
|
code
| 4
|
github-code
|
6
|
50847831
|
class Solution:
def makeLargestSpecial(self, s: str) -> str:
def dfs(sbs):
res = []
i, height = 0, 0
for j in range(len(sbs)):
if sbs[j] == '1':
height += 1
else:
height -= 1
if height == 0:
sub = '1' + dfs(sbs[i+1:j]) + '0'
res.append(sub)
i = j+1
res.sort(reverse=True)
return "".join(res)
res = dfs(s)
return res
if __name__ == "__main__":
sol = Solution()
s = "11011000"
assert sol.makeLargestSpecial(s) == "11100100"
|
code-cp/leetcode
|
solutions/761/main.py
|
main.py
|
py
| 680
|
python
|
en
|
code
| 0
|
github-code
|
6
|
72510037949
|
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import Profile
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit, HTML, Div, Row, Column, Fieldset
from crispy_forms.bootstrap import InlineRadios
from django.contrib.auth.forms import PasswordResetForm
class EmailValidationOnForgotPassword(PasswordResetForm):
def clean_email(self):
email = self.cleaned_data['email']
if not User.objects.filter(email__iexact=email, is_active=True).exists():
msg = ("There is no user registered with the specified E-Mail address.")
self.add_error('email', msg)
return email
class UserRegisterForm(UserCreationForm):
class Meta():
model = User
fields = ['username', 'email', 'password1']
# fields = ['username', 'email', 'password1']
# def __init__(self, *args, **kwargs):
# super(UserRegisterForm, self).__init__(*args, **kwargs)
# self.fields['username'].widget.attrs.update({'class': 'form-control', 'placeholder': 'username'})
# self.fields['email'].widget.attrs.update({'class': 'form-control', 'placeholder': 'email'})
# self.fields['password1'].widget.attrs.update({'class': 'form-control', 'placeholder': 'password'})
# self.fields['password2'].widget.attrs.update({'class': 'form-control', 'placeholder': 'repeat password'})
""" Update user profile fields """
class UserUpdateForm(forms.ModelForm):
class Meta:
model = User
fields = ['username', 'email', 'first_name', 'last_name']
""" Update user profile image """
class ProfileUpdateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'form-group'
self.helper.form_tag = False
self.helper.layout = Layout(
'phone_number'
)
class Meta:
model = Profile
fields = ['image', 'phone_number']
class ExampleForm(forms.Form):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
'first arg is the legend of the fieldset',
'favorite_number',
'favorite_color',
'favorite_food',
HTML("""<p>We use notes to get better, <strong>please help us {{ username }}</strong></p>"""),
'notes',
Row(
Column('name'),
Column('email'),
Column(InlineRadios('like_website')),
),
Submit('submit', 'Submit', css_class='button white'),
)
like_website = forms.TypedChoiceField(
label = "Do you like this website?",
choices = ((1, "Yes"), (0, "No")),
coerce = lambda x: bool(int(x)),
widget = forms.RadioSelect,
initial = '1',
required = True,
)
favorite_food = forms.CharField(
label = "What is your favorite food?",
max_length = 80,
required = True,
)
favorite_color = forms.CharField(
label = "What is your favorite color?",
max_length = 80,
required = True,
)
favorite_number = forms.IntegerField(
label = "Favorite number",
required = False,
)
notes = forms.CharField(
label = "Additional notes or feedback",
required = False,
)
name = forms.CharField(
label = 'What is your name?',
required=False
)
email = forms.EmailField(
label='What is your email?',
required=False
)
|
userksv/carsbay
|
users/forms.py
|
forms.py
|
py
| 3,773
|
python
|
en
|
code
| 0
|
github-code
|
6
|
16566979883
|
# 파티
# N개의 마을에 학생이 각 한 명씩 있음. 모두들 특정 마을로 모이기로 함.
# 학생들이 모였다가 다시 본인들의 마을로 돌아가야 한다고 할 때, 가장 많은 시간을 소비하는 학생을 구하라.
# 내 답안1
import sys
import heapq
input = sys.stdin.readline
INF = 987654321
N, M, X = map(int, input().split())
graph = [[]for _ in range(N+1)]
for i in range(M):
a,b,c = map(int, input().split())
graph[a].append((b, c))
def dijkstra(start):
q = []
heapq.heappush(q, (0, start))
d = [INF for _ in range(N + 1)]
d[start] = 0
while q:
dist, now = heapq.heappop(q)
if d[now] < dist:
continue
for b, c in graph[now]:
cost = d[now] + c
if cost < d[b]:
d[b] = cost
heapq.heappush(q, (cost, b))
return d
ans = 0
for i in range(1, N+1):
backward = dijkstra(X)
forward = dijkstra(i)
ans = max(ans, backward[i]+ forward[X])
print(ans)
|
dngus1683/codingTestStudy
|
알고리즘/dijkstra/백준/python/1238.py
|
1238.py
|
py
| 1,028
|
python
|
ko
|
code
| 0
|
github-code
|
6
|
14992716515
|
#!/usr/bin/env python
# coding: utf-8
# In[37]:
# Questions for 10/28 meeting:
# Test set -> Should the test be just one game? Answer: Leave it the way it is for now.
# Train set -> Should we duplicate previous games to add weighting? Answer: Yes.
## November 6th, 2020 Backend Meeting ##
# 4 Factors to include for opponent: efg, tov_pct, orb_pct, ftr ... - Done
# Add win (boolean) column for each game -> predict on that instead of points - Done
# Later on: Using most recent games???
## November 10th, 2020 Backend Meeting ##
# Next Steps:
# Get it on the dashboard
# Other functionality?
# Imports
import numpy as np
import pandas as pd
get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from sklearn.feature_selection import f_regression
from sklearn.feature_selection import SelectKBest
from matplotlib import pyplot
pd.set_option("display.max_rows", None, "display.max_columns", None)
# In[38]:
# Read in box score data provided by Ludis
df = pd.read_csv("team_boxscores_v3.csv")
df = df.fillna(0)
# pd.set_option('display.max_columns', None)
pd.set_option('display.max_columns', 59)
# In[39]:
### Hard-coded teamIDs from dataset for testing purposes ###
# Kentucky
team1 = '2267a1f4-68f6-418b-aaf6-2aa0c4b291f1'
# LSU
team2 = '70e2bedd-3a0a-479c-ac99-e3f58aa6824b'
# Ohio State
team3 = '857462b3-0ab6-4d26-9669-10ca354e382b'
# Florida
team4 = '912f8837-1d81-4ef9-a576-a21f271d4c64'
# Michigan State
team5 = 'a41d5a05-4c11-4171-a57e-e7a1ea325a6d'
floatArr = ["efg","orb_pct","ftr"]
negFloatArr = ["tov_pct"]
intArr = ["assists", "blocks","defensive_rebounds", "fast_break_pts", "points_in_paint","points_off_turnovers","rebounds","steals"]
negIntArr = ["turnovers","opponent_drb"]
# In[40]:
# Returns all game records for a given teamID
def getAllTeamMatchRecords(teamID, df):
return df[df["team_id"] == teamID]
# In[41]:
# Returns win/loss ratio for a given team across entire dataset
# Add functionality for filtering by season?
def statWinLoss(teamID, df):
wins = 0
losses = 0
team_stats = df[df["team_id"] == teamID]
for index, row in team_stats.iterrows():
if row["points"] > row["points_against"]:
wins = wins + 1
else:
losses = losses + 1
if losses == 0:
return 1
else:
return wins/losses
# In[42]:
# Return all gameIDs for a given team
def getGameIDs(teamID, df):
return df[df["team_id"] == teamID]["game_id"]
# In[43]:
# Returns common game IDs between two teams
def getMatchupGameIDs(team1, team2, df):
return pd.merge(getGameIDs(team1, df), getGameIDs(team2, df))
# In[44]:
# Returns average of a given statistic for a given teamID
def getAvgStatForTeam(teamID, statistic, df):
runningSum = 0
#runningSum = float(0)
runningCount = 0
team_stats = df[df["team_id"] == teamID]
for index, row in team_stats.iterrows():
runningSum += row[statistic]
runningCount += 1
return runningSum / runningCount
return runningSum / runningCount
print(getAvgStatForTeam(team1, "rebounds", df))
# In[45]:
# This function will get the record of a team by a specific year and can also calculate some avg
def getTeamRecordByYear(teamID, year, df):
team_record = df[df["team_id"] == teamID]
sum_two_pts_made = 0
count = 0
avg_two_pts_made = 0
sum_field_goals_made =0
count2 = 0
avg_field_goals_made = 0
for index, row in team_record.iterrows():
if (row["season"] == year):
team_record1 = team_record[df["season"] == row["season"]]
for index, row in team_record1.iterrows():
sum_two_pts_made += row["two_points_made"]
sum_field_goals_made += row["field_goals_made"]
count +=1
count2 +=1
avg_two_pts_made = sum_two_pts_made / count
avg_field_goals_made = sum_field_goals_made / count2
return_value = "%f %f" %(avg_two_pts_made,avg_field_goals_made)
return team_record1
# In[46]:
# Return dataframe with selected features
def filterRowsFS(df):
return df[["assists","blocks","defensive_rebounds","opponent_drb","fast_break_pts","points_in_paint","points_off_turnovers","rebounds","steals","turnovers","efg","tov_pct","orb_pct","ftr"]]
# In[105]:
# Calculate correct predictions -> wins/losses
def calcPredError(df):
error = 0
correct = 0
i = 0
for index, row in df.iterrows():
i = i + 1
if df.loc[index, 'Actual'] != df.loc[index, 'Predicted (int)']:
error = error + 1
else:
correct = correct + 1
return ((correct / i) * 100)
# In[48]:
# Calculate win percentage
def winPct(teamPred):
# return round((teamPred['Predicted (float)'].sum() / len(teamPred['Predicted (float)']) * 100))
return float(teamPred['Predicted (float)'].sum() / len(teamPred['Predicted (float)']) * 100)
# In[49]:
# feature selection
def select_features(X_train, y_train, X_test):
# configure to select all features
fs = SelectKBest(score_func=f_regression, k='all')
# learn relationship from training data
fs.fit(X_train, y_train)
# transform train input data
X_train_fs = fs.transform(X_train)
# transform test input data
X_test_fs = fs.transform(X_test)
return X_train_fs, X_test_fs, fs
# In[50]:
def overallFeatures(df):
datasetForFS = df
datasetForFS.fillna(0)
# X1 = datasetForFS[["assists","personal_fouls","ftr","orb_pct", "tov_pct", "points_in_paint", "blocks"]]
# X1 = datasetForFS[["assists","blocks","personal_fouls"]]
X1 = datasetForFS[["assists","blocks","defensive_rebounds","opponent_drb","fast_break_pts","points_in_paint","points_off_turnovers","rebounds","steals","turnovers","efg","tov_pct","orb_pct","ftr"]]
y1 = datasetForFS['win']
X_train, X_test, y_train, y_test = train_test_split(X1, y1, test_size=0.2, random_state=0)
X_train_fs, X_test_fs, fs = select_features(X_train, y_train, X_test)
colList = X1.columns.values.tolist()
statScoreDF = pd.DataFrame(data={'Stat': pd.Series(colList), 'Score': pd.Series(fs.scores_.tolist())})
statScoreDF = statScoreDF.sort_values(by=['Score'], ascending=False)
# plot the scores
pyplot.bar([i for i in range(len(fs.scores_))], fs.scores_)
pyplot.show()
return statScoreDF
# print(overallFeatures(df))
# In[122]:
def teamFeatures(team1, team2, df):
datasetForFS = getAllTeamMatchRecords(team1, df).merge(getMatchupGameIDs(team1, team2, df))
datasetForFS.fillna(0)
# X1 = datasetForFS[["assists","personal_fouls","ftr","orb_pct", "tov_pct", "points_in_paint", "blocks"]]
# X1 = datasetForFS[["assists","blocks","personal_fouls"]]
X1 = datasetForFS[["assists","blocks","defensive_rebounds","opponent_drb","fast_break_pts","points_in_paint","points_off_turnovers","rebounds","steals","turnovers","efg","tov_pct","orb_pct","ftr"]]
y1 = datasetForFS['win']
X_train, X_test, y_train, y_test = train_test_split(X1, y1, test_size=0.2, random_state=0)
X_train_fs, X_test_fs, fs = select_features(X_train, y_train, X_test)
colList = X1.columns.values.tolist()
statScoreDF = pd.DataFrame(data={'Stat': pd.Series(colList), 'Score': pd.Series(fs.scores_.tolist())})
statScoreDF = statScoreDF.sort_values(by=['Score'], ascending=False)
# Plot the scores - PyPlot
# pyplot.bar([i for i in range(len(fs.scores_))], fs.scores_)
# pyplot.show()
return statScoreDF
# teamFeatures(team1, team2, df)
# In[123]:
def learn(dataset):
dataset = pd.read_csv("team_boxscores_v3.csv")
dataset = dataset.fillna(0)
# Shuffle
dataset = dataset.sample(frac = 1)
X1 = dataset[["assists","blocks","defensive_rebounds","opponent_drb","fast_break_pts","points_in_paint","points_off_turnovers","rebounds","steals","turnovers","efg","tov_pct","orb_pct","ftr"]]
y1 = dataset['win']
# No shuffle
# X_train, X_test, y_train, y_test = train_test_split(X1, y1, test_size=0.2, random_state=0)
# W/ shuffle
X_train = X1[int(len(X1)/5):]
X_test = X1[:int(len(X1)/5)]
y_train = y1[int(len(y1)/5):]
y_test = y1[:int(len(y1)/5)]
regressor = LinearRegression()
regressor.fit(X_train, y_train)
coeff_df = pd.DataFrame(regressor.coef_, X1.columns, columns=['Coefficient'])
y_pred = regressor.predict(X_test)
y_pred_round = np.around(regressor.predict(X_test))
# print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
# print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
# print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
return regressor, pd.DataFrame({'Actual': y_test, 'Predicted (int)': y_pred_round, 'Predicted (float)': y_pred})
# reg, pred = learn(pd.read_csv("team_boxscores_v3.csv"))
# print(calcPredError(pred), winPct(pred))
# df1 = filterRowsFS(getAllTeamMatchRecords(team1, df))
# df2 = getAllTeamMatchRecords(team1, df)["win"]
# dfPred = reg.predict(df1)
# dfPredRound = np.around(dfPred)
# temp = pd.DataFrame({'Actual': df2, 'Predicted (int)': dfPredRound, 'Predicted (float)': dfPred})
# print(calcPredError(temp), winPct(temp))
# In[124]:
def learnMatchup(team1, team2):
dataset = pd.read_csv("team_boxscores_v3.csv")
dataset = dataset.fillna(0)
dfTeam1 = getAllTeamMatchRecords(team1, dataset)
matchups = getMatchupGameIDs(team1, team2, df)["game_id"].tolist()
dfTeam1 = dfTeam1.reset_index()
# Elijah - Save rows for later and append to train set
for index, row in dfTeam1.iterrows():
for i in range(0, len(matchups)):
if str(dfTeam1.loc[index, "game_id"]) == matchups[i]:
dfTeam1 = dfTeam1.append(dfTeam1.loc[index], ignore_index=True)
dfTeam1 = dfTeam1.sample(frac = 1)
X1 = dfTeam1[["assists","blocks","defensive_rebounds","opponent_drb","fast_break_pts","points_in_paint","points_off_turnovers","rebounds","steals","turnovers","efg","tov_pct","orb_pct","ftr"]]
y1 = dfTeam1['win']
# rng = np.random.randint(0, 42)
rng = 0
# X_train, X_test, y_train, y_test = train_test_split(X1, y1, test_size=0.2, random_state=rng)
# W/ shuffle
X_train = X1[int(len(X1)/5):]
X_test = X1[:int(len(X1)/5)]
y_train = y1[int(len(y1)/5):]
y_test = y1[:int(len(y1)/5)]
regressor = LinearRegression()
regressor.fit(X_train, y_train)
coeff_df = pd.DataFrame(regressor.coef_, X1.columns, columns=['Coefficient'])
y_pred = regressor.predict(X_test)
y_pred_round = np.around(regressor.predict(X_test))
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
return regressor, pd.DataFrame({'Actual': y_test, 'Predicted (int)': y_pred_round, 'Predicted (float)': y_pred})
reg, pred = learnMatchup(team1, team2)
# In[125]:
def avgDataRow(df):
df1 = dict()
for (columnName, columnData) in df.iteritems():
df1[columnName] = [df[columnName].mean()]
return pd.DataFrame(df1)
# In[128]:
stats = teamFeatures(team1, team2, df).head()['Stat'].tolist()
df1 = getAllTeamMatchRecords(team1, df)
df2 = avgDataRow(filterRowsFS(getAllTeamMatchRecords(team1, df)))
df3 = df1["win"]
dfPred = reg.predict(df2)
dfPredRound = np.around(dfPred)
dfFinal = pd.DataFrame({'Actual': df3.mean(), 'Predicted (int)': dfPredRound, 'Predicted (float)': dfPred})
print(dfFinal)
# print(df2)
df2.at[0,"assists"] = df2.at[0,"assists"] + 10
dfPred = reg.predict(df2)
dfPredRound = np.around(dfPred)
dfFinal = pd.DataFrame({'Actual': df3.mean(), 'Predicted (int)': dfPredRound, 'Predicted (float)': dfPred})
print(dfFinal)
# print(df2)
# In[54]:
# Return win percentage as stat changes
# df - dataframe, e.g. getAllTeamMatchRecords(team1, df)
# reg - regressor from above
# var - the feature to change
# val - the value to add to the feature
def predOnStat(df, reg, var, val):
df1 = df[["assists","blocks","defensive_rebounds","opponent_drb","fast_break_pts","points_in_paint","points_off_turnovers","rebounds","steals","turnovers","efg","tov_pct","orb_pct","ftr"]]
for index, row in df1.iterrows():
df1.at[index, var] = df1.at[index, var] + val
temp_pred = reg.predict(df1)
temp_pred_round = np.around(reg.predict(df1))
test = pd.DataFrame({'Actual': df['win'], 'Predicted (int)': temp_pred_round, 'Predicted (float)': temp_pred})
return float(winPct(test))
# In[ ]:
# df -> dataframe
# reg -> regressor
# Return new win pct
def updateWinPct(df, reg):
reg.predict()
# In[28]:
# statList = ["assists", "blocks", "orb_pct"]
def compTeams(df, teamID, opponentID, win_percent):
topFive = teamFeatures(teamID, opponentID, df)["Stat"].head().tolist()
print(topFive)
reg, pred = learnMatchup(teamID, opponentID)
intVal = 0
floatVal = 0
originalPct = predOnStat(getAllTeamMatchRecords(teamID, df), reg, 'assists', 0)
for stat in topFive:
currentPct = originalPct
print(stat)
floatVal = 0
intVal = 0
if stat in intArr:
while (currentPct <= win_percent):
print("intyyy")
intVal = intVal + 1
currentPct = predOnStat(getAllTeamMatchRecords(teamID, df), reg, stat, intVal)
print(stat, intVal)
if stat in negIntArr:
while (currentPct <= win_percent):
print("neggggintyyy")
intVal = intVal - 1
currentPct = predOnStat(getAllTeamMatchRecords(teamID, df), reg, stat, intVal)
print(stat, intVal)
elif stat in floatArr:
while (currentPct <= win_percent):
print("floattty")
floatVal = floatVal + 0.1
currentPct = predOnStat(getAllTeamMatchRecords(teamID, df), reg, stat, floatVal)
print(stat, floatVal)
elif stat in negFloatArr:
while (currentPct <= win_percent):
print("neggggfloattty")
floatVal = floatVal - 0.1
currentPct = predOnStat(getAllTeamMatchRecords(teamID, df), reg, stat, floatVal)
print(stat, floatVal)
print(val)
return temp
win_percent = 80.5
compTeams(df, team1, team2, win_percent)
# In[19]:
# testey = getAllTeamMatchRecords(team1, df)
# prediction_acc, win_percent = predOnStat(testey, reg, "assists", 0)
# print("Prediction accuracy:", prediction_acc, "\nWin Percent:", win_percent)
# prediction_acc, win_percent = predOnStat(testey, reg, "assists", 5)
# print("Prediction accuracy:", prediction_acc, "\nWin Percent:", win_percent)
# prediction_acc, win_percent = predOnStat(testey, reg, "assists", 10)
# print("Prediction accuracy:", prediction_acc, "\nWin Percent:", win_percent)
# In[ ]:
|
oohshan/SmartGameGoalsGenerator
|
passenger.py
|
passenger.py
|
py
| 15,323
|
python
|
en
|
code
| 1
|
github-code
|
6
|
72650289467
|
#
# @lc app=leetcode id=148 lang=python3
#
# [148] Sort List
#
# @lc code=start
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def sortList(self, head: Optional[ListNode]) -> Optional[ListNode]:
arr = []
while head:
arr.append(head.val)
head = head.next
arr.sort()
head = ListNode()
new_list = head
for num in arr:
new_list.next = ListNode(num)
new_list = new_list.next
return head.next
# @lc code=end
|
hieun314/leetcode_NguyenKimHieu
|
148.sort-list.py
|
148.sort-list.py
|
py
| 636
|
python
|
en
|
code
| 0
|
github-code
|
6
|
32644084947
|
import maya.cmds as cmds
import pymel.core as pm
from mgear.core import attribute
ATTR_SLIDER_TYPES = ["long", "float", "double", "doubleLinear", "doubleAngle"]
DEFAULT_RANGE = 1000
# TODO: filter channel by color. By right click menu in a channel with color
def init_table_config_data():
"""Initialize the dictionary to store the channel master table data
Items are the channels or attributes fullname in a list
items_data is a dictionary with each channel configuration, the keys is the
fullName
Returns:
dict: configuration dictionary
"""
config_data = {}
config_data["channels"] = []
config_data["channels_data"] = {}
return config_data
def init_channel_master_config_data():
"""Initialize the dictionary to store channel master tabs configuration"""
config_data = {}
config_data["tabs"] = []
config_data["tabs_data"] = {}
config_data["current_tab"] = 0
return config_data
def get_keyable_attribute(node):
"""Get keyable attributes from node
Args:
node (str): name of the node that have the attribute
Returns:
list: list of keyable attributes
"""
if cmds.nodeType(node) == "blendShape":
attrs = cmds.listAttr("{}.w".format(node), m=True)
else:
attrs = cmds.listAttr(node, ud=False, k=True)
return attrs
def get_single_attribute_config(node, attr):
"""Summary
Args:
node (str): name of the node that have the attribute
attr (str): attribute name
Returns:
dict: attribute configuration
"""
config = {}
# config["ctl"] = node
# config["ctl"] = pm.NameParser(node).stripNamespace().__str__()
config["ctl"] = node
config["color"] = None # This is a place holder for the channel UI color
try:
config["type"] = cmds.attributeQuery(attr, node=node, attributeType=True)
except:
return
# check it the attr is alias
alias = cmds.aliasAttr(node, q=True)
if alias and attr in alias:
config["niceName"] = attr
config["longName"] = attr
else:
config["niceName"] = cmds.attributeQuery(
attr, node=node, niceName=True
)
config["longName"] = cmds.attributeQuery(
attr, node=node, longName=True
)
config["fullName"] = config["ctl"] + "." + config["longName"]
if config["type"] in ATTR_SLIDER_TYPES:
if cmds.attributeQuery(attr, node=node, maxExists=True):
config["max"] = cmds.attributeQuery(attr, node=node, max=True)[0]
else:
config["max"] = DEFAULT_RANGE
if cmds.attributeQuery(attr, node=node, minExists=True):
config["min"] = cmds.attributeQuery(attr, node=node, min=True)[0]
else:
config["min"] = DEFAULT_RANGE * -1
config["default"] = cmds.attributeQuery(
attr, node=node, listDefault=True
)[0]
elif config["type"] in ["enum"]:
items = cmds.attributeQuery(attr, node=node, listEnum=True)[0]
config["items"] = [x for x in items.split(":")]
# Get value at channel creation time
# this value can be different from the default value
config["creationValue"] = cmds.getAttr("{}.{}".format(node, attr))
return config
def get_attributes_config(node):
"""Get the configuration to all the keyable attributes
Args:
node (str): name of the node that have the attribute
Returns:
dict: All keyable attributes configuration
"""
# attrs_config = {}
keyable_attrs = get_keyable_attribute(node)
config_data = init_table_config_data()
if keyable_attrs:
# attrs_config["_attrs"] = keyable_attrs
for attr in keyable_attrs:
config = get_single_attribute_config(node, attr)
# attrs_config[attr] = config
if config:
config_data["channels"].append(config["fullName"])
config_data["channels_data"][config["fullName"]] = config
return config_data
def get_table_config_from_selection():
oSel = pm.selected()
attrs_config = None
namespace = None
if oSel:
namespace = oSel[-1].namespace()
ctl = oSel[-1].name()
attrs_config = get_attributes_config(ctl)
return attrs_config, namespace
def get_ctl_with_namespace(attr_config, namespace=None):
if namespace:
ctl = (
namespace
+ pm.NameParser(attr_config["ctl"]).stripNamespace().__str__()
)
else:
ctl = attr_config["ctl"]
return ctl
def reset_attribute(attr_config, namespace=None):
"""Reset the value of a given attribute for the attribute configuration
Args:
attr_config (dict): Attribute configuration
"""
ctl = get_ctl_with_namespace(attr_config, namespace=None)
obj = pm.PyNode(ctl)
attr = attr_config["longName"]
attribute.reset_selected_channels_value(objects=[obj], attributes=[attr])
def reset_creation_value_attribute(attr_config, namespace=None):
"""Reset the value of a given attribute for the attribute configuration
Args:
attr_config (dict): Attribute configuration
"""
ctl = get_ctl_with_namespace(attr_config, namespace=None)
attr = attr_config["longName"]
fullname_attr = "{}.{}".format(ctl, attr)
if "creationValue" in attr_config.keys():
val = attr_config["creationValue"]
cmds.setAttr(fullname_attr, val)
else:
pm.displayWarning(
"Initial Creation Value was not originally stored for {}".format(
fullname_attr
)
)
def sync_graph_editor(attr_configs, namespace=None):
"""sync the channels in the graph editor
Args:
attr_configs (list): list of attribute configuration
"""
# select channel host controls
ctls = []
for ac in attr_configs:
ctl = ac["ctl"]
if ctl not in ctls:
if namespace:
ctl = namespace + pm.NameParser(ctl).stripNamespace().__str__()
ctls.append(ctl)
pm.select(ctls, r=True)
# filter curves in graph editor\
cnxs = []
for ac in attr_configs:
attr = ac["fullName"]
if namespace:
attr = namespace + pm.NameParser(attr).stripNamespace().__str__()
cnxs.append(attr)
def ge_update():
pm.selectionConnection("graphEditor1FromOutliner", e=True, clear=True)
for c in cnxs:
cmds.selectionConnection(
"graphEditor1FromOutliner", e=True, select=c
)
# we need to evalDeferred to allow grapheditor update the selection
# highlight in grapheditor outliner
pm.evalDeferred(ge_update)
################
# Keyframe utils
################
def current_frame_has_key(attr):
"""Check if the attribute has keyframe in the current frame
Args:
attr (str): Attribute fullName
Returns:
bool: Return true if the attribute has keyframe in the current frame
"""
k = pm.keyframe(attr, query=True, time=pm.currentTime())
if k:
return True
def channel_has_animation(attr):
"""Check if the current channel has animaton
Args:
attr (str): Attribute fullName
Returns:
bool: Return true if the attribute has animation
"""
k = cmds.keyframe(attr, query=True)
if k:
return True
def get_anim_value_at_current_frame(attr):
"""Get the animation value in the current framwe from a given attribute
Args:
attr (str): Attribute fullName
Returns:
bol, int or float: animation current value
"""
val = cmds.keyframe(attr, query=True, eval=True)
if val:
return val[0]
def set_key(attr):
"""Keyframes the attribute at current frame
Args:
attr (str): Attribute fullName
"""
cmds.setKeyframe(attr)
def remove_key(attr):
"""Remove the keyframe of an attribute at current frame
Args:
attr (str): Attribute fullName
"""
pm.cutKey(attr, clear=True, time=pm.currentTime())
def remove_animation(attr):
"""Remove the animation of an attribute
Args:
attr (str): Attribute fullName
"""
pm.cutKey(attr, clear=True)
def _go_to_keyframe(attr, which):
frame = cmds.findKeyframe(attr, which=which)
cmds.currentTime(frame, e=True)
def next_keyframe(attr):
_go_to_keyframe(attr, which="next")
def previous_keyframe(attr):
_go_to_keyframe(attr, which="previous")
def value_equal_keyvalue(attr, current_time=False):
"""Compare the animation value and the current value of a given attribute
Args:
attr (str): the attribute fullName
Returns:
bool: Return true is current value and animation value are the same
"""
anim_val = get_anim_value_at_current_frame(attr)
if current_time:
val = cmds.getAttr(attr, time=current_time)
else:
val = cmds.getAttr(attr)
if anim_val == val:
return True
|
mgear-dev/mgear4
|
release/scripts/mgear/animbits/channel_master_utils.py
|
channel_master_utils.py
|
py
| 9,005
|
python
|
en
|
code
| 209
|
github-code
|
6
|
15142385428
|
import requests
from flask import redirect, url_for, flash
from app.github import bp
from app.github.functions import request_interface
@bp.route('/update-database', methods=['GET', 'POST'])
async def update_database():
# get all repos sorted by star rating
# The max number of items per page is 100
url = 'https://api.github.com/search/repositories?q=language:python&sort=stars&per_page=100'
response = requests.get(url)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
flash('Rate Limit Exceeded, please wait a little while and try again')
return redirect(url_for('main.home'))
response_dict = response.json()
status = await request_interface(response_dict)
if status == 200:
return redirect(url_for('main.home'))
else:
return 'Oh no! Something is amiss!'
|
Red-Hammer/most-starred-python-repos
|
app/github/routes.py
|
routes.py
|
py
| 872
|
python
|
en
|
code
| 0
|
github-code
|
6
|
37226233681
|
class flag():
def __init__(self, short_flag, long_flag, args_num, args_name_list, if_force_num):
self.short_flag = short_flag
self.long_flag = long_flag
self.args_num = args_num
self.args_name_list = args_name_list
self.if_force_num = if_force_num
try:
assert args_num >= 0
assert args_name_list != []
assert short_flag is None or len(short_flag) == 1
assert long_flag != 'header'
except:
print('attribute error in class flag')
class parser():
def __init__(self, header_flag=None, flags_list=[]):
self.header = header_flag
self.flags = flags_list
def parse_args(self, string):
args = dict()
arg_name_list = [name for flag in self.flags for name in flag.args_name_list]
if self.header is not None:
arg_name_list += self.header.args_name_list
for key in arg_name_list:
args[key] = None
args_cnt = 0
args_num = 0
if_force_num = None
if self.header is not None:
wait_status = 'arg_or_flag'
flag_status = self.header.args_name_list
args_num = self.header.args_num
if_force_num = self.header.if_force_num
else:
wait_status = 'flag'
flag_status = None
words = string.split(' ')
words = [word.replace(' ', '') for word in words]
words = [word for word in words if word != '' and word != ' ']
for word in words:
if wait_status == 'arg_or_flag' or wait_status == 'flag':
this_flag = None
for flag in self.flags:
if word == '-' + flag.short_flag:
this_flag = flag
break
elif word == '--' + flag.long_flag:
this_flag = flag
break
if this_flag is None:
if wait_status != 'arg_or_flag':
return 'unknown_flags_or_too_many_arguments'
if this_flag is not None:
if this_flag.args_num == 0:
args[this_flag.args_name_list[0]] = True
wait_status = 'flag'
continue
wait_status = 'arg'
flag_status = this_flag.args_name_list
args_num = this_flag.args_num
if_force_num = this_flag.if_force_num
args_cnt = 0
continue
# wait_status == 'arg'
args[flag_status[args_cnt]] = word
args_cnt += 1
if args_cnt == args_num:
wait_status = 'flag'
flag_status = None
args_num = None
args_cnt = 0
if_force_num = None
continue
if if_force_num == False:
wait_status = 'arg_or_flag'
else:
wait_status = 'arg'
return args
|
Ntimesp/AkinaChann
|
utils/arg_parser.py
|
arg_parser.py
|
py
| 3,295
|
python
|
en
|
code
| 0
|
github-code
|
6
|
21099702516
|
from sklearn import svm
import sklearn.linear_model.stochastic_gradient as sg
from sklearn.model_selection import GridSearchCV as grid
import numpy
#linear kernel support vector machine using tf-idf vectorizations
class SVM:
train_X = []
train_Y = []
test_X = []
test_Y = []
def __init__(self, train_X, train_Y, test_X, test_Y, n_iter, alpha):
self.n_iter = n_iter
self.alpha = alpha
self.train_X = train_X.apply(lambda x: ' '.join(x)).tolist()
self.train_Y = train_Y
self.test_X = test_X.apply(lambda x: ' '.join(x)).tolist()
self.test_Y = test_Y
# Convert text to tf-idf vectors and return accuracy obtained from SVM
def predict(self):
from sklearn.feature_extraction.text import TfidfVectorizer
#convert train set to tf-idf vectors
tf_idf = TfidfVectorizer()
self.train_X = tf_idf.fit_transform(self.train_X)
self.test_X = tf_idf.transform( raw_documents=self.test_X)
#SVM very slow, better suited for task but does not scale to large datasets
# SVM = svm.SVC(kernel='linear', verbose=True)
# SVM.fit(X=self.train_X, y=self.train_Y)
# prediction = SVM.predict(self.test_X)
# accuracy = numpy.mean(prediction == self.test_Y)
# param_grid = [
# {'alpha': [.00001, .0001, .001, .01]}
# ] # best results for lowest alpha
SGD = sg.SGDClassifier(verbose=True, n_iter=self.n_iter, alpha=self.alpha)
# clf = grid(SGD, param_grid, cv=3)
SGD.fit(X=self.train_X, y=self.train_Y)
prediction = SGD.predict(self.test_X)
accuracy = numpy.mean(prediction == self.test_Y)
return accuracy
|
hadarohana/Tweets
|
Tweets/SVM.py
|
SVM.py
|
py
| 1,710
|
python
|
en
|
code
| 0
|
github-code
|
6
|
34196758257
|
#/usr/bin/env python3
with open("day18_in.txt") as f:
lines = [l.strip() for l in f]
grid = ''.join(lines)
Y_END = len(lines)
X_END = len(lines[0])
def run(grid, times):
cache = {grid: 0}
i = 0
while i < times:
grid = next_grid(grid)
if grid in cache:
i = iterations_fast_jump(i, cache[grid], times)
cache.clear()
cache[grid] = i
i += 1
return grid.count('|') * grid.count('#')
def next_grid(grid):
return ''.join(next_grid_generator(grid))
def next_grid_generator(grid):
for y, x in ((y, x) for y in range(Y_END) for x in range(X_END)):
val = grid[y * X_END + x]
adjs = list(adjacents(grid, y, x))
if val == '.' and adjs.count('|') >= 3:
val = '|'
elif val == '|' and adjs.count('#') >= 3:
val = '#'
elif val == '#' and (adjs.count('#') == 0 or adjs.count('|') == 0):
val = '.'
yield val
def adjacents(grid, y, x):
ymin, xmin = max(y - 1, 0), max(x - 1, 0)
yend, xend = min(y + 2, Y_END), min(x + 2, X_END)
for y2, x2 in ((y2, x2) for y2 in range(ymin, yend) for x2 in range(xmin, xend)):
if (y2 != y or x2 != x):
yield grid[y2 * X_END + x2]
def iterations_fast_jump(i, i_prev, i_max):
jump = i - i_prev
jumps = (i_max - 1 - i) // jump
return i + jump * jumps
print(f"Part 1: trees * lumberyards = {run(grid, 10)}")
print(f"Part 2: trees * lumberyards = {run(grid, 1000000000)}")
|
naggety/adventofcode2018.py
|
day18.py
|
day18.py
|
py
| 1,518
|
python
|
en
|
code
| 0
|
github-code
|
6
|
19325512904
|
from statsmodels.tsa.seasonal import seasonal_decompose
from dateutil.parser import parse
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv('../timeserie_train.csv',
parse_dates=['data'],
index_col='data',
squeeze=True)
# Multiplicative Decomposition
result_mul = seasonal_decompose(df, model='multiplicative', period=365*24, extrapolate_trend='freq')
# Additive Decomposition
result_add = seasonal_decompose(df, model='additive',period=365*24, extrapolate_trend='freq')
# Extract the Components ----
# Actual Values = Product of (Seasonal * Trend * Resid)
df_reconstructed_mul = pd.concat([result_mul.seasonal, result_mul.trend, result_mul.resid, result_mul.observed], axis=1)
df_reconstructed_mul.columns = ['seas', 'trend', 'resid', 'actual_values']
df_reconstructed_mul.to_csv('timeserie_decom_mul_train.csv')
#df_reconstructed_mul.head()
# Actual Values = Sum of (Seasonal * Trend * Resid)
df_reconstructed_add = pd.concat([result_add.seasonal, result_add.trend, result_add.resid, result_add.observed], axis=1)
df_reconstructed_add.columns = ['seas', 'trend', 'resid', 'actual_values']
df_reconstructed_add.to_csv('timeserie_decom_add_train.csv')
#df_reconstructed_add.head()
# Plot
result_mul.plot().suptitle('Multiplicative Decompose', fontsize=22)
result_add.plot().suptitle('Additive Decompose', fontsize=22)
plt.show()
|
gsilva49/timeseries
|
H/python_code/decom.py
|
decom.py
|
py
| 1,379
|
python
|
en
|
code
| 0
|
github-code
|
6
|
19066210421
|
from utils.crawling import *
Ctcd_name = {"11": "서울", "21": "부산", "22": "대구", "23": "인천", #4
"24": "광주", "25": "대전", "26": "울산", "45": "세종", #4
"31": "경기", "32": "강원", "33": "충북", "34": "충남", #4
"35": "전북", "36": "전남", "37": "경북", "38": "경남", "50": "제주"} #5
Pcd1_name = {"03": "석기", "05": "청동기", "07": "철기", #3
"11": "고구려", "12": "백제", "13": "신라", "14": "가야", "20": "통일신라", #5
"30": "고려", "40": "조선", "45": "대한제국", "50": "일제강점기"} #4
heritage_df = csv_to_df("heritage.csv")
for i in range(len(heritage_df)):
heritage_df.loc[i, "ccbaCtcdNm"] = Ctcd_name[heritage_df.loc[i, "ccbaCtcd"]]
heritage_df.loc[i, "ccbaPcd1Nm"] = Pcd1_name[heritage_df.loc[i, "ccbaPcd1"]]
col = ['ccbaCpno', 'ccbaPcd1', 'ccbaPcd1Nm', 'longitude', 'latitude',
'ccbaCtcd', 'ccbaCtcdNm', 'ccbaKdcd', 'ccmaName', 'crltsnoNm',
'ccbaMnm1', 'ccbaMnm2', 'imageUrl', 'content', 'ccceName', 'ccbaLcad',
'ccbaAdmin', 'ccbaPoss', 'ccbaAsdt']
save_df(heritage_df[col], "heritage.csv")
|
Park-Min-Jeong/Interface-Project-DataPreProcessing
|
7 decode_data.py
|
7 decode_data.py
|
py
| 1,185
|
python
|
en
|
code
| 0
|
github-code
|
6
|
72994074107
|
import torch.nn as nn
import torch
class NetworksFactory:
def __init__(self):
pass
@staticmethod
def get_by_name(network_name, *args, **kwargs):
################ Ours #################
if network_name == 'Ours_Reconstruction':
from networks.Ours_Reconstruction import Net
network = Net(*args, **kwargs)
elif network_name == 'Ours_DeblurOnly':
from networks.Ours_DeblurOnly import Net
network = Net(*args, **kwargs)
else:
raise ValueError("Network %s not recognized." % network_name)
# print(network)
print("Network %s was created: " % network_name)
print('Network parameters: {}'.format(sum([p.data.nelement() for p in network.network.parameters()])))
return network
class NetworkBase(nn.Module):
def __init__(self):
super(NetworkBase, self).__init__()
self._name = 'BaseNetwork'
@property
def name(self):
return self._name
|
Lynn0306/LEDVDI
|
CODES/networks/networks.py
|
networks.py
|
py
| 1,012
|
python
|
en
|
code
| 20
|
github-code
|
6
|
13437468850
|
# Display a runtext with double-buffering.
import sys
sys.path.append("matrix/bindings/python/samples")
from samplebase import SampleBase
from rgbmatrix import graphics
import time
from PIL import Image
import requests
import json
import threading
from threading import Thread
from queue import Queue
import traceback
import logging
LOG_FILENAME = "Logs/mtatext.log"
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG)
logging.debug("Startup test in mtatext.py")
#GPIO
PIN = 35
DIRECTIONS = ["N", "S"]
### MATRIX HELPER FUNCTIONS ###
def fillRectangle(gx, canvas, xUL=0, yUL=0, xBR=63, yBR=31, color=graphics.Color(0,0,0)):
if xUL>=xBR or yUL>=yBR:
print("ERROR, bad rectangle boundaries.")
else:
for x in range(xUL,xBR+1):
gx.DrawLine(canvas, x,yUL,x,yBR,color)
def scrollText(gx, canvas, leftBoundary, rightBoudary, height, color, text):
text_length = graphics.DrawText(offscreen_canvas, font, pos, 20, textColor, my_text)
#hardcoded now, update for different trains
def printTrainBulletId(canvas, x, y, route_id):
#printTrainBullet(canvas, x, y, 0, 106, 9)
image = Image.open("pixelMaps/%strain.ppm"%(route_id)).convert('RGB')
canvas.SetImage(image, x, y)
#position is 0 or 1
def printTrainLine(gx, canvas, route_id, font, min_font, destination, mins_left, position, text_frame):
height = 8 + position*17
bullet_position = (0, height - 7) #was 6,height
destination_position = (bullet_position[0]+16, height+int(font.baseline/2)-1)
mins_left_position = (48, height+int(font.baseline/2)-1)
text_color = gx.Color(100,100,100)
left_boundary = destination_position[0]-1
right_boundary = mins_left_position[0]-2
text_width = gx.DrawText(canvas, font, destination_position[0]-text_frame, destination_position[1], text_color, destination)
fillRectangle(gx, canvas, xBR=left_boundary, yUL=position*16, yBR=16+position*16)
fillRectangle(gx, canvas, xUL=right_boundary, yUL=position*16, yBR=16+position*16)
printTrainBulletId(canvas, bullet_position[0], bullet_position[1], route_id)
gx.DrawText(canvas, min_font, mins_left_position[0], mins_left_position[1], text_color, "%sm"%(mins_left))
return text_width-text_frame
def getTrains(stations):
station_string = ",".join(stations) if len(stations)>1 else stations[0]
try:
response = requests.get("http://localhost:5000/train-schedule/%s"%(station_string))
trains = json.loads(response.text)
valid = trains and len(trains)>0 and trains[0]["destination"] is not None
if valid:
logging.debug("Valid response returning trains:")
logging.debug(str(trains))
return trains
logging.debug("Not valid returning NONE")
return None
except:
logging.exception("Ex in getTrains:")
return None
server_live = threading.Event()
class ServerLiveThread(Thread):
def __init__(self):
Thread.__init__(self)
def run(self):
trains = None
try:
valid = trains and len(trains)>0 and trains[0]["destination"] is not None
while not valid:
logging.debug("Startup server still not valid, pinging again")
trains = getTrains(["F21"])
valid = trains and len(trains)>0 and trains[0]["destination"] is not None
logging.debug("Server online, starting UI")
server_live.set()
except:
logging.exception("Ex in ServerLiveThread:")
time.sleep(2)
self.run()
class GetTrainsThread(Thread):
def __init__(self, stations, queue):
Thread.__init__(self)
self.trains = []
self.stations = stations
self.queue = queue
def setTrains(self, trains):
self.trains = trains
def run(self):
self.trains = getTrains(self.stations)
if self.trains:
self.queue.put(self.trains)
class RunText(SampleBase):
def __init__(self, *args, **kwargs):
super(RunText, self).__init__(*args, **kwargs)
self.parser.add_argument("-s", "--stations", help="List of stations", nargs="*", default=["F21"])
def run(self):
offscreen_canvas = self.matrix.CreateFrameCanvas()
font = graphics.Font()
font.LoadFont("matrix/fonts/6x12.bdf")
min_font = graphics.Font()
min_font.LoadFont("matrix/fonts/5x8.bdf")
textColor = graphics.Color(200, 200, 200)
black = graphics.Color(0,0,0)
pos = offscreen_canvas.width
stations = self.args.stations
time_step = 0.09
freeze_time = 3
train_update_time = 25
secondary_switch_time = 10
trains_queue = Queue()
pos1 = 0
freeze1 = int(freeze_time/time_step)
pos2 = 0
freeze2 = int(freeze_time/time_step)
train_update = 0
switch_time = int(secondary_switch_time/time_step)
trains = None
secondary_train = 1
primary_train = 0
t = ServerLiveThread()
t.start()
server_live.wait()
while True:
now = time.time()
offscreen_canvas.Clear()
if train_update==0 and trains_queue.qsize()==0 and threading.active_count()==1:
train_thread = GetTrainsThread(stations,trains_queue)
train_thread.start()
train_update = int(train_update_time/time_step)
if(trains_queue.qsize()>0):
trains = trains_queue.get()
if trains:
if switch_time==0:
secondary_train = max(1,(secondary_train+2)%len(trains))
primary_train = secondary_train-1
switch_time = int(secondary_switch_time/time_step)
else:
switch_time-=1
reset1 = printTrainLine(graphics, offscreen_canvas, trains[primary_train]["route_id"], font, min_font, trains[primary_train]["destination"], trains[primary_train]["mins_left"], 0, pos1)
if len(trains) > 1:
reset2 = printTrainLine(graphics, offscreen_canvas, trains[secondary_train]["route_id"], font, min_font,trains[secondary_train]["destination"], trains[secondary_train]["mins_left"], 1, pos2)
else:
reset2 = -1
offscreen_canvas = self.matrix.SwapOnVSync(offscreen_canvas)
if trains:
if pos1==0 and freeze1>0:
freeze1-=1
else:
pos1+=1
freeze1 = int(freeze_time/time_step)
if pos2==0 and freeze2>0:
freeze2-=1
else:
pos2+=1
freeze2 = int(freeze_time/time_step)
if reset1<0:
pos1 = 0
if reset2<0:
pos2 = 0
train_update= max(0, train_update-1)
elapsed = time.time()-now
time.sleep(max(0,time_step-elapsed))
# Main function
if __name__ == "__main__":
run_text = RunText()
if (not run_text.process()):
run_text.print_help()
|
aqwesd8/MTAProject
|
mtatext.py
|
mtatext.py
|
py
| 7,270
|
python
|
en
|
code
| 0
|
github-code
|
6
|
1396103450
|
from django.shortcuts import render, redirect, reverse
from django.http import JsonResponse
from django.forms import ValidationError
from .models import *
import pyshorteners
def index(request):
data = {}
if request.method == "POST":
try:
l = Link()
s = pyshorteners.Shortener()
l.original_url = request.POST["url"]
l.short_url = s.tinyurl.short(l.original_url)
l.full_clean()
l.save()
return redirect(reverse("encode", args=(l.id,)))
except ValidationError as v:
data["error"] = v.message_dict
return render(request, 'pages/index.html', data)
def encode(request, link_id):
l = Link.objects.get(id=link_id)
data = {
"short_url" : l.short_url
}
return JsonResponse(data)
def decode(request, link_id):
l = Link.objects.get(id=link_id)
data = {
"original_url": l.original_url
}
return JsonResponse(data)
|
jennytoc/url-shortener
|
url_shortener_app/views.py
|
views.py
|
py
| 976
|
python
|
en
|
code
| 0
|
github-code
|
6
|
10117546059
|
import pytest
from dao.genre import GenreDAO
from service.genre import GenreService
class TestGenreService:
@pytest.fixture(autouse=True)
def genre_service(self, genre_Dao: GenreDAO):
self.genre_service = GenreService(genre_Dao)
def test_get_one(self):
certain_genre = self.genre_service.get_one(1)
assert certain_genre is not None
assert certain_genre.id == 1
assert certain_genre.name == 'horror'
def test_get_all(self):
all_genres = self.genre_service.get_all(None)
assert all_genres is not None
assert type(all_genres) == list
|
AgzigitovOskar/CR_4_Agzigitov
|
tests/service_tests/genre_service.py
|
genre_service.py
|
py
| 618
|
python
|
en
|
code
| 0
|
github-code
|
6
|
8236657823
|
# sets: unordered, mutable, no duplicates
myset = {1, 2, 3, 1, 2} # no duplicates allowed
print(myset) # {1, 2, 3}
myset1 = set([1, 2, 3, 4, 1, 2, 3, 4]) # turn list into a set and it removes duplicates
print(myset1) # {1, 2, 3, 4}
myset2 = set("Hello") # unordered
print(myset2) # {'e', 'H', 'l', 'o'}
myset3 = set() # empty set
myset3.add(1) # add 1 to set
myset3.add(2) # add 2 to set
myset3.add(3) # add 3 to set
myset3.discard(3) # remove 3 from set
print(myset3)
myset3.add(3) # add 3 to set
print(myset3.pop()) # remove and return an arbitrary element from set
odds = {1, 3, 5, 7, 9}
evens = {0, 2, 4, 6, 8}
primes = {2, 3, 5, 7}
u = odds.union(evens) # union of two sets (adds two sets
print(u) # {1, 2, 3, 4, 5, 6, 7, 8, 9}
i = odds.intersection(evens) # intersection of two sets (only common elements)
print(i) # empty set()
i1 = odds.intersection(primes) # intersection of two sets (only common elements)
print(i1) # {3, 5, 7}
a = {1, 2, 3, 4, 5, 6, 7, 8, 9}
b = {1, 2, 3, 10, 11, 12}
diff = a.difference(b) # elements in a but not in b
print(diff) # {4, 5, 6, 7, 8, 9}
symdiff = a.symmetric_difference(b) # elements in either a or b but not both
print(symdiff) # {4, 5, 6, 7, 8, 9, 10, 11, 12}
a.update(b) # add all elements from b to a
print(a) # {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}
a = {1, 2, 3, 4, 5, 6, 7, 8, 9}
b = {1, 2, 3, 10, 11, 12}
a.intersection_update(b) # set a to only common elements of a and b
print(a) # {1, 2, 3}
a = {1, 2, 3, 4, 5, 6 }
b = {1, 2, 3}
print(b.issubset(a)) # b is a subset of a: True
print(b.issuperset(a)) # b is a superset of a: False
# to copy a set, use the copy() method
# frozenset is immutable (cannot oadd or remove elements)
|
bhagya2002/python
|
Intermediate/4_sets.py
|
4_sets.py
|
py
| 1,703
|
python
|
en
|
code
| 0
|
github-code
|
6
|
37974828359
|
'''
Program to be called from cron for working with lights - on and off
This is a wrapper for the Client, handling command line parameters
Author: Howard Webb
Date: 2/10/2021
'''
import argparse
from exp import exp
from GrowLight import GrowLight
parser = argparse.ArgumentParser()
# list of acceptable arguments
parser.add_argument("-a", help="Send a light command (on, off, ...", type=str)
args = parser.parse_args()
#print(args)
gl = GrowLight()
if args.a == "on":
gl.on()
elif args.a == "off":
gl.off()
|
webbhm/GBE-Digital
|
python/Light_Switch.py
|
Light_Switch.py
|
py
| 538
|
python
|
en
|
code
| 1
|
github-code
|
6
|
72708100028
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2022/8/13 15:04
# @Author : 0x00A0
# @File : main.py
# @Description : TKINTER窗口程序
import asyncio
import multiprocessing
import os
import re
import shutil
import sys
import threading
import time
import tkinter
from tkinter import filedialog, messagebox
from tkinter.ttk import Progressbar
import paddle
import predict
from ProgressBar import ProgressBar
from ppgan.apps import StyleGANv2EditingPredictor
from ppgan.apps import Pixel2Style2PixelPredictor
from tkinter import *
import tkinter as tk
from PIL import Image, ImageTk
shutil.rmtree("img")
os.mkdir("img")
"""
=================================================
主窗口
=================================================
"""
root = Tk()
root.title("人脸时光机")
# root.resizable(False,False) # 不可缩放
root.minsize(1000, 750)
h_box = w_box = root.winfo_screenwidth() / 4 # 指定图片框的最大大小为屏幕的1/4
window = tk.Frame(root)
prog = None
global photo0
global photo1
global done
done = False
"""
=================================================
控件定义
=================================================
"""
work_path = os.getcwd()
ori_path = "" # 图片路径
# ===原始图片框===
def resize(w, h, w_box, h_box, pil_image):
"""
对一个pil_image对象进行缩放,让它在一个矩形框内,还能保持比例
"""
f1 = 1.0 * w_box / w
f2 = 1.0 * h_box / h
factor = min([f1, f2])
width = int(w * factor)
height = int(h * factor)
return pil_image.resize((width, height), Image.ANTIALIAS)
img0 = Image.open(os.path.join("statics", "L.png"))
w, h = img0.size
img0 = resize(w, h, w_box, h_box, img0)
photo0 = ImageTk.PhotoImage(img0)
imgbox_ori = Label(window, image=photo0)
lbl_ori = Label(window, text="原始图像", font=("微软雅黑", 19))
# ===生成图片框===
img1 = Image.open(os.path.join("statics", "R.png"))
w, h = img1.size
img1 = resize(w, h, w_box, h_box, img1)
photo1 = ImageTk.PhotoImage(img1)
imgbox_dst = Label(window, image=photo1)
lbl_dst = Label(window, text="生成图像", font=("微软雅黑", 19))
# ===打开图片按钮===
def openImg():
filename = filedialog.askopenfilename()
if filename != '':
global ori_path
ori_path = filename
img0 = Image.open(ori_path)
w, h = img0.size
img0 = resize(w, h, w_box, h_box, img0)
global photo0
photo0 = ImageTk.PhotoImage(img0)
imgbox_ori.configure(image=photo0)
img1 = Image.open(os.path.join("statics", "R.png"))
w, h = img1.size
img1 = resize(w, h, w_box, h_box, img1)
global photo1
photo1 = ImageTk.PhotoImage(img1)
imgbox_dst.configure(image=photo1)
else:
messagebox.showinfo("提示", "您没有选择任何文件")
func_openImg = window.register(openImg) # 必要的函数包装
btn_OpenImg = Button(window, text="打开图片", font=("微软雅黑", 19), command=func_openImg)
# ===年龄输入框===
lbl_Age = Label(window, text="需要增长的年龄(负数表示年轻):", font=("微软雅黑", 19))
def checkInput(content):
"""
只允许输入框输入数字和正负号
"""
if re.match(r"^-?(\.\d+|\d+(\.\d+)?)", content) or content == '-' or content == "":
return True
else:
return False
val_var = tk.StringVar(value="0")
func_checkInput = window.register(checkInput) # 必要的函数包装
txt_Age = Entry(window, font=("微软雅黑", 19), textvariable=val_var, validate="key",
validatecommand=(func_checkInput, '%P'))
# ===图片生成按钮===
def getDistance(age):
if age > 0:
return age / 10
else:
return age / 15
def getResult():
global done
if done:
if os.path.exists(os.path.join(work_path, "img", "dst.editing.png")):
img1 = Image.open(os.path.join(work_path, "img", "dst.editing.png"))
w, h = img1.size
img1 = resize(w, h, w_box, h_box, img1)
global photo1
photo1 = ImageTk.PhotoImage(img1)
imgbox_dst.configure(image=photo1)
prog.quit()
btn_genImg["state"] = "normal"
messagebox.showinfo("提示", "图片生成完成", parent=root)
shutil.copy(os.path.join(work_path, "img", "dst.editing.png"),
''.join(os.path.normpath(ori_path).split(r"\\")[:-1]))
done = False
return
root.after(1000, getResult)
def pred():
try:
predictor = predict.predictor(os.path.normpath(ori_path), os.path.join(work_path, "img"),
getDistance(int(txt_Age.get())))
predictor.predict()
except Exception as e:
prog.quit()
btn_genImg["state"] = "normal"
messagebox.showerror("错误", ''.join(e.args), parent=root)
global done
done = True
def genImg():
shutil.rmtree("img")
os.mkdir("img")
btn_genImg["state"] = "disabled"
if txt_Age.get() == "" or txt_Age.get() == "-":
messagebox.showinfo("提示", "未填写年龄", parent=root)
print((os.path.normpath(ori_path), os.path.join(work_path, "img"), getDistance(int(txt_Age.get()))))
th = threading.Thread(target=pred)
th.setDaemon(True) # 标记成Daemon以防止出现孤儿进程
th.start()
root.after(1000, getResult)
global prog
prog = ProgressBar()
prog.start(root)
func_genImg = window.register(genImg) # 必要的函数包装
btn_genImg = Button(window, text="生成照片", font=("微软雅黑", 19), command=func_genImg)
# ===About信息===
lbl_about = Label(window, text="浙大宁波理工学院 计算机与数据工程学院" +
"大数据201 万昕睿" +
"技术支持: 百度Paddle提供深度学习框架与模型\n" +
" 开源地址: https://github.com/PaddlePaddle/Paddle\n" +
"本项目已开源至GitHub\n" +
" 开源地址: https://github.com/0x00A0/FaceAge",
font=("微软雅黑", 9),
foreground="#45AA48"
)
"""
=================================================
放置所有控件
=================================================
"""
window.place(anchor="center", relx=.50, rely=.50)
lbl_ori.grid(row=0, column=0, rowspan=1, columnspan=4)
imgbox_ori.grid(row=1, column=0, rowspan=4, columnspan=4, sticky="NSWE")
lbl_dst.grid(row=0, column=5, rowspan=1, columnspan=4)
imgbox_dst.grid(row=1, column=5, rowspan=4, columnspan=4, sticky="NSWE")
btn_OpenImg.grid(row=6, column=0, rowspan=1, columnspan=10, sticky="WE")
lbl_Age.grid(row=7, column=0, rowspan=1, columnspan=4, sticky="WE")
txt_Age.grid(row=7, column=5, rowspan=1, columnspan=4, sticky="WE")
btn_genImg.grid(row=8, column=0, rowspan=1, columnspan=10, sticky="WE")
lbl_about.grid(row=9, column=0, rowspan=1, columnspan=10, sticky="WE")
"""
=================================================
调用mainloop()显示主窗口
=================================================
"""
window.mainloop()
|
0x00A0/FaceAge
|
main.pyw
|
main.pyw
|
pyw
| 6,807
|
python
|
en
|
code
| 2
|
github-code
|
6
|
38928831481
|
import os
from dotenv import load_dotenv
import requests
from lxml import etree
import re
from postgres import cursor, connection
from slugify import slugify
load_dotenv()
# --------------------------
# link đến trang hình ảnh của chapter
nettruyen = os.getenv("PUBLIC_NETTRUYEN_URL")
def openWebsite(domain: str):
headersList = {
"Accept": "*/*",
"User-Agent": "Thunder Client (https://www.thunderclient.com)"
}
response = requests.request("GET", domain, data="", headers=headersList)
return response
def crawlChapters(crawl_id: str):
# crawl_id là id của truyện tranh để truy vấn và lấy ra danh sách chhapter
# Gọi api domain lấy danh sách chương
response = openWebsite(nettruyen + "Comic/Services/ComicService.asmx/ProcessChapterList?comicId=" + crawl_id)
return response.json() # Assuming the response is in JSON format
def updateChapter(comic_id: str, crawl_id: str):
# comic_id : id cua table comics, crawl_id : chapter_id cua table crawls
# Thu thập dữ liệu
data = crawlChapters(crawl_id)
if len(data['chapters']):
for chap in data['chapters']:
# Thêm dữ liệu vào db
cursor.execute("INSERT INTO public.chapters(comic_id, title, crawl_id) "
"VALUES (%s, %s, %s) ON CONFLICT (crawl_id) DO NOTHING",
# Đảm bảo các dữ liệu thêm vào không bị trùng lặp
(comic_id, chap['name'], chap['url']))
# Đảm bảo lưu thay đổi vào cơ sở dữ liệu
connection.commit()
# print("Thêm chapter vào db thành công:")
print(chap['name'])
# set is_updated comics = false
cursor.execute("UPDATE public.comics SET is_updated = false WHERE id = %s", (comic_id,))
connection.commit()
cursor.execute("UPDATE public.crawls SET is_updated = false WHERE chapter_id = %s", (crawl_id,))
connection.commit()
return
# func auto update
def autoUpdateChapter():
# Lấy danh sách truyện
cursor.execute("SELECT id, crawl_id FROM public.comics WHERE is_updated = true ORDER BY id ASC limit 5")
results = cursor.fetchall()
if results is not None:
for row in results:
comic_id = row[0] # id cua truyen tranh
crawl_id = row[1] # crawl id fk id cua comic
cursor.execute("SELECT chapter_id FROM public.crawls WHERE id = %s", (crawl_id,))
crawlResult = cursor.fetchone()
if crawlResult is not None:
updateChapter(comic_id, crawlResult[0])
return
autoUpdateChapter()
|
baocuns/BCunsAutoCrawls
|
crawlChaptersNettruyenToPostgres.py
|
crawlChaptersNettruyenToPostgres.py
|
py
| 2,686
|
python
|
vi
|
code
| 0
|
github-code
|
6
|
31008546048
|
from flask import Flask
from flask_pymongo import PyMongo
from flask import Response
import random
import requests
from flask import request
import json
from itsdangerous import (TimedJSONWebSignatureSerializer
as Serializer, BadSignature, SignatureExpired)
from flask import jsonify
from bson.objectid import ObjectId
from functools import wraps
import uuid
from bson import json_util
app = Flask(__name__)
app.config['SECRET_KEY'] = "secret"
app.config['MONGO_DBNAME'] = "tasker_db"
app.config['MONGO_URI'] = "mongodb://localhost:27017"
mongo = PyMongo(app)
def to_json(data):
"""Convert Mongo object(s) to JSON"""
return json.dumps(data, default=json_util.default)
#region Security
def generate_auth_token(id, expiration=600):
ss = str(id)
s = Serializer(app.config['SECRET_KEY'], expires_in=expiration)
return s.dumps({'id': ss})
def authorize(f):
@wraps(f)
def wrapper(*args, **kwargs):
if not 'Authorization' in request.headers:
return Response(status="401")
data = request.headers['Authorization']
user = verify_auth_token(data)
if not user:
return Response(status="404")
return f(user)
return wrapper
def verify_auth_token(token):
s = Serializer(app.config['SECRET_KEY'])
t = token.replace('\'', '')[1:]
try:
data = s.loads(t)
except SignatureExpired:
return None # valid token, but expired
except BadSignature:
return None # invalid token
dd = data['id']
user = mongo.db.users.find_one({'_id': ObjectId(dd)})
return user
#endregion
@app.route('/')
def home_page():
user = mongo.db.users
return "Success!"
# region Registration
@app.route('/register', methods=['POST'])
def register():
req = request.get_json(silent=True)
rnd = random.randrange(1000, 9999)
if mongo.db.users.find_one({'phone': req['phone']}):
return Response(
"Указанный номер зарегистрирован",
status_code=409,
content_type="utf-8")
tmp_users = mongo.db.template_users
user = tmp_users.find_one({'phone': req['phone']})
if not tmp_users.find_one_and_update({'phone': req['phone']}, {'$set': {'code': rnd}}, upsert=True):
tmp_users.insert({'phone': req['phone'], 'code': rnd})
#remove this
return Response(status="200")
r = requests.post('https://sms.ru/sms/send?api_id=840B3593-66E9-5AB4-4965-0B9589019F3A&to=' + str(
req['phone']) + '&msg=Код%20для%20регистрации:%20' + str(rnd) + '&json=1')
return Response(
r.text,
status=r.status_code,
content_type=r.headers['content-type'])
@app.route('/register/confirm', methods=['POST'])
def finish_registration():
req = request.get_json(silent=True)
if mongo.db.users.find_one({'phone': req['phone']}):
return Response("Указанный номер уже зарегистирован", status="409", content_type="utf-8")
tmp_users = mongo.db.template_users
if tmp_users.find_one({'phone': req['phone'], 'code': int(req['code'])}):
to_insert = {'phone': req['phone'], 'profile': {'first_name': '', 'last_name': '', 'birth_date': None}}
mongo.db.users.insert_one(
{'phone': req['phone'], 'profile': {'first_name': '', 'last_name': '', 'birth_date': None}})
return Response(json.dumps(to_insert), status="200", content_type='application/json')
else:
return Response(status="404")
@app.route('/login', methods=['POST'])
def tasks():
req = request.get_json(silent=True)
user = mongo.db.users.find_one({'phone': req['phone']})
if user:
token = generate_auth_token(user['_id'])
return Response(json.dumps({'token': str(token)}), status="200", content_type='application/json')
else:
return Response(status="404")
# endregion
#region Task
@app.route('/tasks', methods=['GET'])
@authorize
def get_all_task(user):
tasks = mongo.db.tasks.find({'user_id': str(user['_id'])})
json_result = []
for task in tasks:
json_result.append(task)
result = to_json(json_result)
return Response(result, status="200", content_type='application/json')
@app.route('/task', methods=['POST'])
@authorize
def add_task(user):
req = request.get_json(silent=True)
mongo.db.tasks.insert({'data': req['data'],
'date': req['date'],
'guid': req['guid'],
'user_id': str(user['_id'])})
return Response(status="200")
@app.route('/task', methods=['PUT'])
@authorize
def update_task(user):
req = request.get_json(silent=True)
mongo.db.tasks.update({'guid': req['guid']},
{"$set" : {'data': req['data'],
'date': req['date']}})
return Response(status="200")
@app.route('/task', methods=['GET'])
@authorize
def get_task(user):
req = request.get_json(silent=True)
id = request.args.get('id')
task = mongo.db.tasks.find_one({'guid' : id})
return Response(to_json(task), status="200", content_type='application/json')
@app.route('/task', methods=['DELETE'])
@authorize
def delete_task(user):
req = request.get_json(silent=True)
mongo.db.tasks.delete_one({'guid' : req['guid']})
return Response(status="200")
#endregion
if __name__ == "__main__":
app.run()
|
SvTitov/tasker
|
SRV/tasker_srv/application.py
|
application.py
|
py
| 5,493
|
python
|
en
|
code
| 0
|
github-code
|
6
|
18711654900
|
import argparse
import logging
from pathlib import Path
from typing import List
import yaml
from topaz3.conversions import phase_remove_bad_values, phase_to_map
from topaz3.database_ops import prepare_labels_database, prepare_training_database
from topaz3.delete_temp_files import delete_temp_files
from topaz3.get_cc import get_cc
from topaz3.mtz_info import mtz_get_cell
from topaz3.space_group import mtz_find_space_group, textfile_find_space_group
def prepare_training_data(
phase_directory: str,
cell_info_directory: str,
cell_info_path: str,
space_group_directory: str,
space_group_path: str,
xyz_limits: List[int],
output_directory: str,
database: str = None,
delete_temp: bool = True,
):
"""Convert both the original and inverse hands of a structure into a regular map file based on information
about the cell info and space group and the xyz dimensions. Return True if no exceptions"""
logging.info("Preparing training data")
# Check all directories exist
try:
phase_dir = Path(phase_directory)
assert phase_dir.exists()
except Exception:
logging.error(f"Could not find phase directory at {phase_directory}")
raise
try:
cell_info_dir = Path(cell_info_directory)
assert cell_info_dir.exists()
except Exception:
logging.error(f"Could not find cell info directory at {cell_info_directory}")
raise
try:
space_group_dir = Path(space_group_directory)
assert space_group_dir.exists()
except Exception:
logging.error(
f"Could not find space group directory at {space_group_directory}"
)
raise
try:
output_dir = Path(output_directory)
assert output_dir.exists()
except Exception:
logging.error(f"Could not find output directory at {output_directory}")
raise
# Check xyz limits are of correct format
try:
assert type(xyz_limits) == list or type(xyz_limits) == tuple
assert len(xyz_limits) == 3
assert all(type(values) == int for values in xyz_limits)
except AssertionError:
logging.error(
"xyz_limits muste be provided as a list or tupls of three integer values"
)
raise
# Get lists of child directories
phase_structs = [struct.stem for struct in phase_dir.iterdir()]
cell_info_structs = [struct.stem for struct in cell_info_dir.iterdir()]
space_group_structs = [struct.stem for struct in space_group_dir.iterdir()]
assert (
phase_structs == cell_info_structs == space_group_structs
), "Same structures not found in all given directories"
phase_structs = sorted(phase_structs)
logging.debug(f"Following structures found to transform: {phase_structs}")
# Get cell info and space group
cell_info_dict = {}
space_group_dict = {}
# Set up function to get space group depending on suffix
if Path(space_group_path).suffix == ".mtz":
find_space_group = mtz_find_space_group
else:
find_space_group = textfile_find_space_group
for struct in phase_structs:
logging.info(
f"Collecting info from {struct}, {phase_structs.index(struct)+1}/{len(phase_structs)}"
)
try:
cell_info_file = cell_info_dir / Path(struct) / Path(cell_info_path)
assert cell_info_file.exists()
except Exception:
logging.error(f"Could not find cell info file at {cell_info_dir}")
raise
try:
cell_info_dict[struct] = mtz_get_cell(cell_info_file)
except Exception:
logging.error(f"Could not get cell info from {cell_info_file}")
raise
try:
space_group_file = space_group_dir / Path(struct) / Path(space_group_path)
assert space_group_file.exists()
except Exception:
logging.error(f"Could not find space group file at {space_group_dir}")
raise
try:
space_group_dict[struct] = find_space_group(space_group_file)
except Exception:
logging.error(f"Could not get space group from {space_group_file}")
raise
logging.info("Collected cell info and space group")
# Begin transformation
for struct in phase_structs:
logging.info(
f"Converting {struct}, {phase_structs.index(struct)+1}/{len(phase_structs)}"
)
# Create original and inverse hands
try:
original_hand = Path(
phase_dir / struct / space_group_dict[struct] / (struct + ".phs")
)
inverse_hand = Path(
phase_dir / struct / space_group_dict[struct] / (struct + "_i.phs")
)
# Catch a weird situation where some space groups RXX can also be called RXX:H
if (space_group_dict[struct][0] == "R") and (
original_hand.exists() is False
):
original_hand = Path(
phase_dir
/ struct
/ (space_group_dict[struct] + ":H")
/ (struct + ".phs")
)
inverse_hand = Path(
phase_dir
/ struct
/ (space_group_dict[struct] + ":H")
/ (struct + "_i.phs")
)
assert original_hand.exists(), f"Could not find original hand for {struct}"
assert inverse_hand.exists(), f"Could not find inverse hand for {struct}"
except Exception:
logging.error(
f"Could not find phase files of {struct} in space group {space_group_dict[struct]}"
)
raise
# Convert original
# Check the phase file first
original_hand_good = phase_remove_bad_values(
original_hand, output_dir.parent / (original_hand.stem + "_temp.phs")
)
# Log the result
if original_hand is not original_hand_good:
logging.info(
f"Filtered bad values from {original_hand.stem} and stored results in {original_hand_good}"
)
try:
phase_to_map(
original_hand_good,
cell_info_dict[struct],
space_group_dict[struct],
xyz_limits,
output_dir / (struct + ".map"),
)
except Exception:
logging.error(f"Could not convert original hand for {struct}")
raise
# Convert inverse
# Check the phase file first
inverse_hand_good = phase_remove_bad_values(
inverse_hand, output_dir.parent / (inverse_hand.stem + "_temp.phs")
)
# Log the result
if inverse_hand is not inverse_hand_good:
logging.info(
f"Filtered bad values from {inverse_hand.stem} and stored results in {inverse_hand_good}"
)
try:
phase_to_map(
inverse_hand_good,
cell_info_dict[struct],
space_group_dict[struct],
xyz_limits,
output_dir / (struct + "_i.map"),
)
except Exception:
logging.error(f"Could not convert inverse hand for {struct}")
raise
logging.info(f"Successfully converted {struct}")
logging.info("Finished conversions")
# If a database file is given, attempt to provide the training and labels table
if database is not None:
logging.info(f"Adding to database at {database}")
# Build up database - collect all cc information first then put it into database
logging.info("Collecting CC information")
# Dictionary of correlation coefficients
cc_original_dict = {}
cc_inverse_dict = {}
for struct in phase_structs:
# Create original and inverse hands
try:
original_hand = Path(
phase_dir / struct / space_group_dict[struct] / (struct + ".lst")
)
inverse_hand = Path(
phase_dir / struct / space_group_dict[struct] / (struct + "_i.lst")
)
# Catch a weird situation where some space groups RXX can also be called RXX:H
if (space_group_dict[struct][0] == "R") and (
original_hand.exists() is False
):
original_hand = Path(
phase_dir
/ struct
/ (space_group_dict[struct] + ":H")
/ (struct + ".lst")
)
inverse_hand = Path(
phase_dir
/ struct
/ (space_group_dict[struct] + ":H")
/ (struct + "_i.lst")
)
assert (
original_hand.exists()
), f"Could not find original hand for {struct}"
assert (
inverse_hand.exists()
), f"Could not find inverse hand for {struct}"
except Exception:
logging.error(
f"Could not find lst files of {struct} in space group {space_group_dict[struct]}"
)
raise
try:
cc_original_dict[struct] = get_cc(original_hand)
cc_inverse_dict[struct] = get_cc(inverse_hand)
except Exception:
logging.error(
f"Could not get CC info of {struct} in space group {space_group_dict[struct]}"
)
raise
try:
database_path = Path(database)
assert database_path.exists()
except Exception:
logging.error(f"Could not find database at {database}")
raise
# Generate list of results
cc_results = []
for struct in phase_structs:
cc_results.append(
(
struct,
cc_original_dict[struct],
cc_inverse_dict[struct],
(cc_original_dict[struct] > cc_inverse_dict[struct]),
(cc_original_dict[struct] < cc_inverse_dict[struct]),
)
)
# Put in database
prepare_training_database(str(database_path), cc_results)
prepare_labels_database(str(database_path))
# Delete temporary files if requested
if delete_temp is True:
delete_temp_files(output_directory)
logging.info("Deleted temporary files in output directory")
return True
def params_from_yaml(args):
"""Extract the parameters for prepare_training_data from a yaml file and return a dict"""
# Check the path exists
try:
config_file_path = Path(args.config_file)
assert config_file_path.exists()
except Exception:
logging.error(f"Could not find config file at {args.config_file}")
raise
# Load the data from the config file
try:
with open(config_file_path, "r") as f:
params = yaml.safe_load(f)
except Exception:
logging.error(
f"Could not extract parameters from yaml file at {config_file_path}"
)
raise
if "db_path" not in params.keys():
params["db_path"] = None
if "delete_temp" not in params.keys():
params["delete_temp"] = True
return params
def params_from_cmd(args):
"""Extract the parameters for prepare_training_data from the command line and return a dict"""
params = {
"phase_dir": args.phase_dir,
"cell_info_dir": args.cell_info_dir,
"cell_info_path": args.cell_info_path,
"space_group_dir": args.space_group_dir,
"space_group_path": args.space_group_path,
"xyz_limits": args.xyz,
"db_path": args.db,
"output_dir": args.output_dir,
"delete_temp": True,
}
if args.keep_temp:
params["delete_temp"] = False
return params
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(name="debug_log")
userlog = logging.getLogger(name="usermessages")
# Parser for command line interface
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
yaml_parser = subparsers.add_parser("yaml")
yaml_parser.add_argument(
"config_file",
type=str,
help="yaml file with configuration information for this program",
)
yaml_parser.set_defaults(func=params_from_yaml)
cmd_parser = subparsers.add_parser("cmd")
cmd_parser.add_argument(
"phase_dir", type=str, help="top level directory for phase information"
)
cmd_parser.add_argument(
"cell_info_dir", type=str, help="top level directory for cell info"
)
cmd_parser.add_argument(
"cell_info_path", type=str, help="cell info file within each structure folder"
)
cmd_parser.add_argument(
"space_group_dir", type=str, help="top level directory for space group"
)
cmd_parser.add_argument(
"space_group_path",
type=str,
help="space group file within each structure folder",
)
cmd_parser.add_argument(
"xyz", type=int, nargs=3, help="xyz size of the output map file"
)
cmd_parser.add_argument(
"output_dir", type=str, help="directory to output all map files to"
)
cmd_parser.add_argument(
"db",
type=str,
help="location of the sqlite3 database to store training information",
)
cmd_parser.add_argument(
"--keep_temp",
action="store_false",
help="keep the temporary files after processing",
)
cmd_parser.set_defaults(func=params_from_cmd)
# Extract the parameters based on the yaml/command line argument
args = parser.parse_args()
parameters = args.func(args)
print(parameters)
# Execute the command
try:
prepare_training_data(
parameters["phase_dir"],
parameters["cell_info_dir"],
parameters["cell_info_path"],
parameters["space_group_dir"],
parameters["space_group_path"],
parameters["xyz_limits"],
parameters["output_dir"],
parameters["db_path"],
parameters["delete_temp"],
)
except KeyError as e:
logging.error(f"Could not find parameter {e} to prepare training data")
|
mevol/python_topaz3
|
topaz3/prepare_training_data.py
|
prepare_training_data.py
|
py
| 14,661
|
python
|
en
|
code
| 0
|
github-code
|
6
|
8042412809
|
import tornado.web
import tornado.ioloop
import tornado.httpserver
import tornado.options
# define parameter,like --port=9000 list=a,b,c,de,
tornado.options.define("port", default=8000, type=None)
tornado.options.define("list", default=[], type=str, multiple=True)
class IndexHandler(tornado.web.RequestHandler):
def get(self, *args, **kwargs):
self.write("hello customer server.")
if __name__ == '__main__':
tornado.options.options.logging = None # turn off logging
tornado.options.parse_config_file("config")
print(tornado.options.options.list)
app = tornado.web.Application([
(r"/", IndexHandler)
])
httpserver = tornado.httpserver.HTTPServer(app)
# use parameter value
httpserver.bind(tornado.options.options.port)
httpserver.start(1)
tornado.ioloop.IOLoop.current().start()
|
zuohd/python-excise
|
tornado/server04.py
|
server04.py
|
py
| 847
|
python
|
en
|
code
| 0
|
github-code
|
6
|
4786996440
|
#まだわからん。
from collections import defaultdict
n,k = map(int,input().split())
a = list(map(int,input().split()))
d = defaultdict(int)
right = 0
ans = 0 # 区間の最大を保存する。
kinds = 0
for left in range(n):
while right < n and kinds < k:
d[a[right]] += 1
right += 1
kinds = len(d)
print("whileループの中",kinds,d)
"""
if left == right:
right += 1
continue
"""
print(left,right," ",right-left)
if ans < right - left:
ans = right-left
print("ansを更新しました!",ans)
d[a[left]] -= 1
if d[a[left]] == 0:
print("削除します",d)
kinds -= 1
del d[a[left]]
print(ans)
|
K5h1n0/compe_prog_new
|
typical90/034/main.py
|
main.py
|
py
| 725
|
python
|
ja
|
code
| 0
|
github-code
|
6
|
34859170758
|
#####
# Remove "warn" logs from spark
#####
from os.path import abspath
from pyspark.sql import SparkSession
# warehouse_location points to the default location for managed databases and tables
warehouse_location = abspath('spark-warehouse')
spark = SparkSession \
.builder \
.appName("Pyspark integration with Hive") \
.config("spark.sql.warehouse.dir", warehouse_location) \
.enableHiveSupport() \
.getOrCreate()
# enableHiveSupport() option in spark session supports the connection with Hive
# Queries are expressed in HiveQL
spark.sql("SELECT * FROM company.employees").show()
employees_df = spark.sql("SELECT id, first_name, last_name, age, gender \
FROM company.employees \
WHERE age < 30 \
ORDER BY first_name")
employees_df.show(50)
|
zaka-ai/data-engineer-track
|
Big_data_warehousing_in_hadoop/hive_hands_on/2_hive_partitioning_pyspark_integration/2_2_hive_with_pyspark.py
|
2_2_hive_with_pyspark.py
|
py
| 828
|
python
|
en
|
code
| 0
|
github-code
|
6
|
35164168406
|
#!/usr/bin/python3
import os
import json
import html
import random
import string
import threading
import subprocess
from bottle import app, error, post, request, redirect, route, run, static_file
from beaker.middleware import SessionMiddleware
session_opts = {
'session.type': 'file',
'session.data_dir': './cfg/',
'session.auto': True,
}
sapp = SessionMiddleware(app(), session_opts)
sess = request.environ.get('beaker.session')
ipfspath = '/usr/local/bin/ipfs'
with open('cfg/email.cfg', 'r') as ecf:
email = ecf.read()
ipfs_id = ''
if os.path.exists('ipfs/config'):
with open('ipfs/config', 'r') as ipcfg:
ipconfig = ipcfg.read()
jtxt = json.loads(ipconfig)
ipfs_id = jtxt['Identity']['PeerID']
@route('/')
def index():
sess = request.environ.get('beaker.session')
sess['csrf'] = ''.join(random.choice(string.ascii_letters) for i in range(12))
sess.save()
htmlsrc = '<html><head>'
htmlsrc += '<title>IPFS Podcast Node</title>'
htmlsrc += '<meta name="viewport" content="width=device-width, initial-scale=1.0" />'
htmlsrc += '<link rel="icon" href="/favicon.png">'
htmlsrc += '<style>'
htmlsrc += 'body { background-image: url("ipfspod.png"); background-repeat: no-repeat; background-position: 50% 50%; font-family: "Helvetica Neue",Helvetica,Arial,sans-serif; font-size: 14px; margin: 1em; } '
htmlsrc += '.nfo { border-radius: 20px; background-color: darkcyan; color: white; opacity: 0.6; padding: 10px; } '
htmlsrc += 'label { display: inline-block; width: 65px; text-align: right; } '
htmlsrc += 'form#ecfg { margin-bottom: 0; } '
htmlsrc += 'form#ecfg input { margin: 4px; width: calc(100% - 150px); max-width: 200px; } '
htmlsrc += 'form#frst button { background-color: pink; border-color: indianred; margin: 4px; padding: 3px 13px; font-weight: bold; border-radius: 10px; display: inline-block; font-size: 9pt; white-space: nowrap; } '
htmlsrc += 'form#igc { display: inline-block; margin-left: 5px; } '
htmlsrc += 'div.prog { height: 5px; background-color: gray; border-radius: 0.25rem; } '
htmlsrc += 'div.prog div.used { height: 5px; background-color: lime; border-radius: 0.25rem; } '
htmlsrc += 'pre { overflow: auto; height: 50%; display: flex; flex-direction: column-reverse; white-space: break-spaces; } '
htmlsrc += 'div#links a { background-color: lightgray; margin: 4px; padding: 5px 13px; font-weight: bold; border-radius: 10px; display: inline-block; font-size: 9pt; text-decoration: none; } '
htmlsrc += 'a.ppass, a.pwarn, a.pfail { padding: 3px 8px 1px 8px; border-radius: 8px; display: inline-block; font-size: 9pt; font-weight: bold; text-decoration: none; } '
htmlsrc += 'a.ppass { background-color: lightgreen; color: green; } '
htmlsrc += 'a.pwarn { background-color: palegoldenrod; color: darkorange; } '
htmlsrc += 'a.pfail { background-color: pink; color: red; } '
htmlsrc += 'div#tmr { height: 3px; margin-bottom: 0.5em; background-color: lightblue; animation: tbar 60s linear; } '
htmlsrc += '@keyframes tbar { 0% { width: 0%; } 90% { background-color: cornflowerblue; } 100% { width: 100%; background-color: red; } } '
htmlsrc += '</style>'
htmlsrc += '</head>'
htmlsrc += '<body>'
htmlsrc += '<h2>IPFS Podcasting Node</h2>'
htmlsrc += '<div class="nfo" style="background-color: #222; overflow: hidden;">'
if ipfs_id != '':
htmlsrc += '<div style="white-space: nowrap;"><label>IPFS ID : </label> <b>' + str(ipfs_id) + '</b></div>'
htmlsrc += '<form id="ecfg" action="/" method="post">'
htmlsrc += '<input id="csrf" name="csrf" type="hidden" value="' + sess['csrf'] + '" />'
htmlsrc += '<label title="E-mail Address (optional)">E-Mail : </label><input id="email" name="email" type="email" placeholder="user@example.com" title="E-mail Address (optional)" value="' + email + '" />'
htmlsrc += '<button>Update</button><br/>'
htmlsrc += '</form>'
htmlsrc += '<label>Network : </label> '
httpstat = 'pfail'
hstat = subprocess.run('timeout 1 bash -c "</dev/tcp/ipfspodcasting.net/80"', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if hstat.returncode == 0:
httpstat = 'ppass'
htmlsrc += '<a class="' + httpstat + '" href="https://ipfspodcasting.net/Help/Network" title="Port 80 Status" target="_blank">HTTP</a> '
httpsstat = 'pfail'
hsstat = subprocess.run('timeout 1 bash -c "</dev/tcp/ipfspodcasting.net/443"', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if hsstat.returncode == 0:
httpsstat = 'ppass'
htmlsrc += '<a class="' + httpsstat + '" href="https://ipfspodcasting.net/Help/Network" title="Port 443 Status" target="_blank">HTTPS</a> '
peercnt = 0
speers = subprocess.run(ipfspath + ' swarm peers|wc -l', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if speers.returncode == 0:
peercnt = int(speers.stdout.decode().strip())
if peercnt > 400:
ipfsstat = 'ppass'
elif peercnt > 100:
ipfsstat = 'pwarn'
else:
ipfsstat = 'pfail'
htmlsrc += '<a class="' + ipfsstat + '" href="https://ipfspodcasting.net/Help/Network" title="Port 4001 Status" target="_blank">IPFS <span style="font-weight: normal; color: #222;">- ' + str(peercnt) + ' Peers</span></a><br/>'
repostat = subprocess.run(ipfspath + ' repo stat -s|grep RepoSize', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if repostat.returncode == 0:
repolen = repostat.stdout.decode().strip().split(':')
used = int(repolen[1].strip())
else:
used = 0
df = os.statvfs('/')
avail = df.f_bavail * df.f_frsize
percent = round(used/(used+avail)*100, 1)
if used < (1024*1024*1024):
used = str(round(used/1024/1024, 1)) + ' MB'
elif used < (1024*1024*1024*1024):
used = str(round(used/1024/1024/1024, 1)) + ' GB'
else:
used = str(round(used/1024/1024/1024/1024, 2)) + ' TB'
if avail < (1024*1024*1024):
avail = str(round(avail/1024/1024, 1)) + ' MB'
elif avail < (1024*1024*1024*1024):
avail = str(round(avail/1024/1024/1024, 1)) + ' GB'
else:
avail = str(round(avail/1024/1024/1024/1024, 2)) + ' TB'
htmlsrc += '<label>Storage : </label>'
htmlsrc += '<div style="display: inline-block; margin-left: 5px; position: relative; top: 5px; width: calc(100% - 150px);">'
htmlsrc += '<div class="prog"><div class="used" style="width: ' + str(percent) + '%; min-width: 4px;"></div></div>'
htmlsrc += '<div style="display: flex; margin-top: 3px;"><span style="width: 33.3%; text-align: left;">' + str(used) + ' Used</span><span style="width: 33.3%; text-align: center;">' + str(percent) + '%</span><span style="width: 33.3%; text-align: right;">' + str(avail) + ' Available</span></div>'
htmlsrc += '</div>'
#don't allow gc while pinning (or already running)
gctxt = ''
gcrun = subprocess.run('ps x|grep -E "(repo gc|ipfs pin)"|grep -v grep', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if gcrun.returncode == 0:
gctxt = gcrun.stdout.decode().strip()
if gctxt == '':
disabled = ''
title = 'Run IPFS Garbage Collection'
else:
disabled = 'disabled="disabled"'
title = 'Not available while pinning or GC already running...'
htmlsrc += '<form id="igc" action="/" method="post">'
htmlsrc += '<input id="csrf" name="csrf" type="hidden" value="' + sess['csrf'] + '" />'
htmlsrc += '<input id="rungc" name="rungc" type="hidden" value="1" />'
htmlsrc += '<button ' + disabled + ' title="' + title + '">Clean Up</button>'
htmlsrc += '</form>'
htmlsrc += '</div>'
htmlsrc += '<h3 style="margin-bottom: 0;">Activity Log</h3>'
htmlsrc += '<pre class="nfo" style="margin-top: 0;">'
with open('ipfspodcastnode.log', 'r') as pcl:
logtxt = pcl.read()
htmlsrc += html.escape(logtxt)
htmlsrc += '</pre>'
htmlsrc += '<div id="tmr"></div>'
htmlsrc += '<form id="frst" action="/" method="post" style="float: right;">'
htmlsrc += '<input id="csrf" name="csrf" type="hidden" value="' + sess['csrf'] + '" />'
htmlsrc += '<input id="reset" name="reset" type="hidden" value="1" />'
htmlsrc += '<button title="Hard reset the IPFS app (when "it\'s just not working")">Restart IPFS</button>'
htmlsrc += '</form>'
htmlsrc += '<div id="links"><a href="https://ipfspodcasting.net/Manage" target="_blank">Manage</a><a href="https://ipfspodcasting.net/faq" target="_blank">FAQ</a></div>'
#<a id="ipfsui" href="http://umbrel.local:5001/webui" target="_blank">IPFS WebUI</a><a id="ipfspn" href="http://umbrel.local:5001/webui/#/pins" target="_blank">Pinned Files</a>
htmlsrc += '<script>window.setTimeout( function() { window.location.reload(); }, 60000); </script>'
#document.getElementById("ipfsui").href=window.location.href; document.getElementById("ipfsui").href=document.getElementById("ipfsui").href.replace("8675", "5001/webui"); document.getElementById("ipfspn").href=window.location.href; document.getElementById("ipfspn").href=document.getElementById("ipfspn").href.replace("8675", "5001/webui/#/pins");
htmlsrc += '</body></html>'
return htmlsrc
@post('/')
def do_email():
csrf = request.forms.get('csrf')
sess = request.environ.get('beaker.session')
if csrf == sess['csrf']:
if request.forms.get('email') is not None:
global email
email = request.forms.get('email')
with open('cfg/email.cfg', 'w') as ecf:
ecf.write(email)
if request.forms.get('reset') == '1':
suicide = subprocess.run('kill 1', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if request.forms.get('rungc') == '1':
gcrun = subprocess.run(ipfspath + ' repo gc --silent', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
redirect('/')
@route('/ipfspod.png')
def server_static():
return static_file('ipfspod.png', root='')
@route('/favicon.png')
def server_static():
return static_file('favicon.png', root='')
#run(host='0.0.0.0', port=8675, debug=True)
threading.Thread(target=run, kwargs=dict(host='0.0.0.0', port=8675, app=sapp, debug=False)).start()
|
Cameron-IPFSPodcasting/podcastnode-Umbrel
|
webui.py
|
webui.py
|
py
| 9,972
|
python
|
en
|
code
| 4
|
github-code
|
6
|
71552358588
|
import pyttsx3
import datetime
import speech_recognition as sr
import wikipedia
import webbrowser
import os, os.path
import smtplib
import random
import win32gui
import win32con
try:
engine=pyttsx3.init('sapi5')
voices=engine.getProperty('voices')
print(voices[0].id)
engine.setProperty('voice',voices[0].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def chrome_webbrowser(chrome_path, url):
webbrowser.get(chrome_path).open(url)
def wishme():
hour=int(datetime.datetime.now().hour)
if hour>=0 and hour<12:
speak('Good Morning Vicky')
elif hour>=12 and hour>=17:
speak('Good Afternoon Vicky')
else:
speak('Good Evening')
print('Hellow I am Computer, How can I help you!')
speak('Hellow I am Computer, How can I help you!')
def takecommand():
r=sr.Recognizer()
with sr.Microphone() as source:
print('Listening....')
r.pause_threshold=1
audio=r.listen(source,timeout=1,phrase_time_limit=3)
try:
print('Recognizing....')
querry=r.recognize_google(audio,language='en-in')
print(f'You said {querry}')
except Exception:
print('Say That Again Please!')
return 'None'
return querry
if __name__ == "__main__":
chrome_path="C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s"
webbrowser.get(chrome_path)
while True:
querry=takecommand().lower()
# Strt
if "poweroff computer" in querry:
speak("Computer has been closed")
os.system("shutdown /s /t 1")
elif "power off computer" in querry:
speak("Computer has been closed")
os.system("shutdown /s /t 1")
elif "power of computer" in querry:
speak("Computer has been closed")
os.system("shutdown /s /t 1")
elif "shutdown computer" in querry or "shut down computer" in querry:
speak("Computer has been closed")
os.system("shutdown /s /t 1")
elif "computer shutdown" in querry or "computer shut down" in querry:
speak("Computer has been closed")
os.system("shutdown /s /t 1")
elif "quit computer" in querry:
speak("Computer has been power off")
os.system("shutdown /s /t 1")
elif "restartcomputer" in querry:
speak("We restarting your PC")
os.system("shutdown /r /t 1")
elif "restart computer" in querry:
speak("We restarting your PC")
os.system("shutdown /r /t 1")
elif "rstart computer" in querry:
speak("We restarting your PC")
os.system("shutdown /r /t 1")
elif "restart computer" in querry:
speak("We restarting your PC")
os.system("shutdown /r /t 1")
elif "hybrid" in querry or "hybernate" in querry or "hybernation" in querry or "hibernation" in querry:
speak("We set your PC to sleeping mode")
os.system("Rundll32.exe Powrprof.dll,SetSuspendState Sleep")
elif "sleep" in querry or "sleap" in querry:
speak("We set your PC to sleeping mode or turn off your screen")
win32gui.SendMessage(win32con.HWND_BROADCAST,win32con.WM_SYSCOMMAND, win32con.SC_MONITORPOWER, 2)
elif "open screen" in querry or "openscreen" in querry or "screen" in querry:
speak("We open your screen")
win32gui.SendMessage(win32con.HWND_BROADCAST,
win32con.WM_SYSCOMMAND, win32con.SC_MONITORPOWER, -1)
elif "vscode" in querry or "vs code" in querry:
speak("Vs code open to you Vicky")
os.system("code .")
# end
if "commands" in querry or "command" in querry:
i=0
while True:
if i==0:
i=1
wishme()
querry=takecommand().lower()
if 'wikipedia' in querry:
speak('Searching wikipedia...')
querry=querry.replace('wikipedia','')
querry=querry.replace('please','')
results=wikipedia.summary(querry,sentences=2)
speak('According to wikipedia, ')
print(results)
speak(results)
elif "vscode" in querry or "vs code" in querry:
speak("Vs code open to you Vicky")
os.system("code .")
elif 'who are you' in querry:
print('I am Computer Sir!')
speak('I am Computer Sir!')
elif 'made you' in querry:
print('I am made by you Sir Waqas powered by Vicky World Production')
speak('I am made by you Sir Waqas powered by Vicky World Production')
elif "sleep" in querry or "sleap" in querry:
speak("We set your PC to sleeping mode")
# os.system("Powercfg -H OFF")
os.system("rundll32.exe Powercfg -H OFF,SetSuspendState 0,1,0")
elif 'open youtube' in querry:
url=('youtube.com')
chrome_webbrowser(chrome_path,url)
# webbrowser.open('youtube.com')
speak('Youtube has been opened dear Vicky')
elif 'open google' in querry or 'open chrome' in querry:
# webbrowser.open('google.com')
url=('google.com')
chrome_webbrowser(chrome_path,url)
speak('Google Has been opened dear Vicky')
elif 'stack overflow' in querry:
# webbrowser.open('stackoverflow.com')
url=('stackoverflow.com')
chrome_webbrowser(chrome_path,url)
elif 'stackoverflow' in querry:
url=('stackoverflow.com')
chrome_webbrowser(chrome_path,url)
elif 'time' in querry:
str=datetime.datetime.now().strftime('%H:%M:%S')
print(f"Time is{str}")
speak(f"Time is {str}")
elif 'search' in querry:
querry=querry.replace('search','')
querry=querry.replace('please','')
chrome_webbrowser(chrome_path,querry)
elif 'song' in querry or 'songs' in querry:
music_dir=r'E:\D\New folder (2)'
songs=os.listdir(music_dir)
print(songs)
files_len= len([name for name in os.listdir('.') if os.path.isfile(name)])
print(files_len)
r= random.randint(0, files_len-1)
print(songs[r])
os.startfile(os.path.join(music_dir,songs[r]))
elif 'stop' in querry:
print('Commands has been stopped Thank You Sir!')
speak('Commands has been stopped Thank You Sir!')
break
elif 'quit' in querry or 'exit' in querry:
print('Commands has been stopped. Thank You Sir!')
speak('Commands has been stopped. Thank You Sir!')
break
elif "shutdown computer" in querry or "shut down computer" in querry:
speak("Computer has been closed")
os.system("shutdown /s /t 1")
elif "computer shutdown" in querry or "computer shut down" in querry:
speak("Computer has been closed")
os.system("shutdown /s /t 1")
elif "poweroff computer" in querry:
speak("Computer has been closed")
os.system("shutdown /s /t 1")
elif "power off computer" in querry:
speak("Computer has been closed")
os.system("shutdown /s /t 1")
elif "power of computer" in querry:
speak("Computer has been closed")
os.system("shutdown /s /t 1")
elif "quit Computer" in querry:
speak("Computer has been power off")
os.system("shutdown /s /t 1")
elif "restartcomputer" in querry:
speak("We restarting your PC")
os.system("shutdown /r /t 1")
elif "restart computer" in querry:
speak("We restarting your PC")
os.system("shutdown /r /t 1")
elif "rstart computer" in querry:
speak("We restarting your PC")
os.system("shutdown /r /t 1")
elif "restart Computer" in querry:
speak("We restarting your PC")
os.system("shutdown /r /t 1")
elif "sleep" in querry or "sleap" in querry:
speak("We set your PC to sleeping mode or turn off your screen")
win32gui.SendMessage(win32con.HWND_BROADCAST,win32con.WM_SYSCOMMAND, win32con.SC_MONITORPOWER, 2)
elif "open screen" in querry or "openscreen" in querry:
speak("Screen has been turn")
win32gui.SendMessage(win32con.HWND_BROADCAST,
win32con.WM_SYSCOMMAND, win32con.SC_MONITORPOWER, -1)
except Exception as e:
speak('An unknown Error has been occured Check Your Connection Please')
|
IamVicky90/Desktop-AI
|
task.py
|
task.py
|
py
| 10,566
|
python
|
en
|
code
| 0
|
github-code
|
6
|
24260931224
|
# first=(input("Please input your first name:"))
# last=(input("Please input your last name:"))
# user={"name":first,"lname":last}
# print("My name is, " + user["lname"] + " " + user["name"])
#activity 2
first_name = "Wesley"
last_name = "Kolar"
home_address = {"street": "1200 Richmond Ave", "city": "Houston"}
vacation_home_address = {"street": "5533 Stiener Ranch", "city": "Austin"}
addresses = [home_address, vacation_home_address]
user = {"first_name": first_name, "last_name": last_name, "addresses": addresses}
print(user)
|
wesleyjkolar/week1
|
day4/dictionary.py
|
dictionary.py
|
py
| 550
|
python
|
en
|
code
| 0
|
github-code
|
6
|
23476634886
|
import joblib
wordsTB = ["'s", ',', 'keywords', 'Twitter', 'account', 'a', 'all', 'anyone', 'are', 'awesome', 'be', 'behavior', 'by', 'bye', 'can', 'chatting', 'check', 'could', 'data', 'day', 'detail', 'do', 'dont', 'find', 'for', 'give', 'good', 'goodbye', 'have', 'hello', 'help', 'helpful', 'helping', 'hey', 'hi', 'history', 'how', 'i', 'id', 'is', 'later', 'list', 'load', 'locate', 'log', 'looking', 'lookup', 'management', 'me', 'module', 'next', 'nice', 'of', 'offered', 'open', 'provide', 'reaction', 'related', 'result', 'search', 'searching', 'see', 'show', 'support', 'task', 'thank', 'thanks', 'that', 'there', 'till', 'time', 'to', 'transfer', 'up', 'want', 'what', 'which', 'with', 'you']
classesTB = ['goodbye', 'greeting', 'options', 'thanks', 'no_response']
joblib.dump(wordsTB, 'wordsTB.pkl')
joblib.dump(classesTB, 'classesTB.pkl')
'''
x = joblib.load('x.pkl')
print(x)
'''
|
kaitong-li/Twitter-Bot
|
Twitter Bot/generatePkl.py
|
generatePkl.py
|
py
| 906
|
python
|
en
|
code
| 0
|
github-code
|
6
|
27579907019
|
from pyspark import SparkConf
from pyspark.context import SparkContext
from pyspark.sql.session import SparkSession
conf = SparkConf().set("spark.cores.max", "32") \
.set("spark.driver.memory", "50g") \
.set("spark.executor.memory", "50g") \
.set("spark.executor.memory_overhead", "50g") \
.set("spark.driver.maxResultsSize", "16g")\
.set("spark.executor.heartbeatInterval", "30s")
sc = SparkContext(conf=conf).getOrCreate();
spark = SparkSession(sc)
# read baskets_prior
baskets = spark.read.csv('./data/baskets_prior.csv',header=True, inferSchema=True)
baskets.createOrReplaceTempView("baskets")
baskets.show(5)
print(baskets.count())
# transform string to list
import pyspark.sql.functions as F
df2 = baskets.withColumn(
"new_items",
F.from_json(F.col("items"), "array<string>")
)
df2 = df2.drop('items')
df2.show(5)
from pyspark.ml.fpm import FPGrowth
import time
start = time.time()
local_time = time.ctime(start)
print("Start time:", local_time)
fpGrowth = FPGrowth(itemsCol="new_items", minSupport=0.000015, minConfidence=0.7)
model = fpGrowth.fit(df2)
model.associationRules.show()
print(model.associationRules.count())
assoRules = model.associationRules
freqItems = model.freqItemsets
end = time.time()
print("run time: ", (end-start)/60)
local_time = time.ctime(end)
print("End time:", local_time)
# freq to pandas
freq_pd =freqItems.toPandas()
freq_pd = freq_pd.sort_values('freq', ascending=False)
print(freq_pd.head(5))
freq_pd.to_csv('./data/freqItems_baskets3M.csv', index=False)
# save rules
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType
def array_to_string(my_list):
return '[' + ','.join([str(elem) for elem in my_list]) + ']'
array_to_string_udf = udf(array_to_string, StringType())
assoRules = assoRules.withColumn('antecedent', array_to_string_udf(assoRules["antecedent"]))
assoRules = assoRules.withColumn('consequent', array_to_string_udf(assoRules["consequent"]))
print('after convert string to save: ', assoRules.show(7))
assoRules.coalesce(1).write.csv('./data/assoRules_baskets3M_50_70%')
|
thuy4tbn99/spark_instacart
|
baskets.py
|
baskets.py
|
py
| 2,095
|
python
|
en
|
code
| 0
|
github-code
|
6
|
1584371171
|
import ctypes
import ctypes.util
import threading
# this is mostly copied from https://bugs.python.org/issue15500#msg230736
def patch():
if getattr(threading.Thread.start, "_namedthreads_patched", None):
# threading module is already patched
return
libpthread_path = ctypes.util.find_library("pthread")
if not libpthread_path:
# pthread library not found, not patching"
return
libpthread = ctypes.CDLL(libpthread_path)
if not hasattr(libpthread, "pthread_setname_np"):
# pthread library does not have pthread_setname_np function, not patching
return
pthread_setname_np = libpthread.pthread_setname_np
pthread_setname_np.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
pthread_setname_np.restype = ctypes.c_int
orig_start = threading.Thread.start
def start(self):
orig_start(self)
try:
name = self.name
if name:
if hasattr(name, "encode"):
name = name.encode('ascii', 'replace')
ident = getattr(self, "ident", None)
if ident is not None:
pthread_setname_np(ident, name[:15])
except Exception:
pass # Don't care about failure to set name
start._namedthreads_patched = True
start._namedthreads_orig = threading.Thread.start
threading.Thread.start = start
return True
def unpatch():
if not getattr(threading.Thread.start, "_namedthreads_patched", None):
# threading module is not patched
return
patched_start = threading.Threading.start
threading.Thread.start = patched_start._namedthreads_orig
|
beniwohli/namedthreads
|
namedthreads.py
|
namedthreads.py
|
py
| 1,681
|
python
|
en
|
code
| 1
|
github-code
|
6
|
27458445454
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import vector_diffeomixture as vector_diffeomixture_lib
from tensorflow.contrib.linalg.python.ops import linear_operator_diag as linop_diag_lib
from tensorflow.contrib.linalg.python.ops import linear_operator_identity as linop_identity_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.platform import test
class VectorDistributionTestHelpers(object):
"""VectorDistributionTestHelpers helps test vector-event distributions."""
def linop(self, num_rows=None, multiplier=None, diag=None):
"""Helper to create non-singular, symmetric, positive definite matrices."""
if num_rows is not None and multiplier is not None:
if any(p is not None for p in [diag]):
raise ValueError("Found extra args for scaled identity.")
return linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=num_rows,
multiplier=multiplier,
is_positive_definite=True)
elif num_rows is not None:
if any(p is not None for p in [multiplier, diag]):
raise ValueError("Found extra args for identity.")
return linop_identity_lib.LinearOperatorIdentity(
num_rows=num_rows,
is_positive_definite=True)
elif diag is not None:
if any(p is not None for p in [num_rows, multiplier]):
raise ValueError("Found extra args for diag.")
return linop_diag_lib.LinearOperatorDiag(
diag=diag,
is_positive_definite=True)
else:
raise ValueError("Must specify at least one arg.")
def run_test_sample_consistent_log_prob(
self,
sess,
dist,
num_samples=int(1e5),
radius=1.,
center=0.,
seed=42,
rtol=1e-2,
atol=0.):
"""Tests that sample/log_prob are mutually consistent.
"Consistency" means that `sample` and `log_prob` correspond to the same
distribution.
The idea of this test is to compute the Monte-Carlo estimate of the volume
enclosed by a hypersphere, i.e., the volume of an `n`-ball. While we could
choose an arbitrary function to integrate, the hypersphere's volume is nice
because it is intuitive, has an easy analytical expression, and works for
`dimensions > 1`.
Technical Details:
Observe that:
```none
int_{R**d} dx [x in Ball(radius=r, center=c)]
= E_{p(X)}[ [X in Ball(r, c)] / p(X) ]
= lim_{m->infty} m**-1 sum_j^m [x[j] in Ball(r, c)] / p(x[j]),
where x[j] ~iid p(X)
```
Thus, for fixed `m`, the above is approximately true when `sample` and
`log_prob` are mutually consistent.
Furthermore, the above calculation has the analytical result:
`pi**(d/2) r**d / Gamma(1 + d/2)`.
Note: this test only verifies a necessary condition for consistency--it does
does not verify sufficiency hence does not prove `sample`, `log_prob` truly
are consistent. For this reason we recommend testing several different
hyperspheres (assuming the hypersphere is supported by the distribution).
Furthermore, we gain additional trust in this test when also tested `sample`
against the first, second moments
(`run_test_sample_consistent_mean_covariance`); it is probably unlikely that
a "best-effort" implementation of `log_prob` would incorrectly pass both
tests and for different hyperspheres.
For a discussion on the analytical result (second-line) see:
https://en.wikipedia.org/wiki/Volume_of_an_n-ball.
For a discussion of importance sampling (fourth-line) see:
https://en.wikipedia.org/wiki/Importance_sampling.
Args:
sess: Tensorflow session.
dist: Distribution instance or object which implements `sample`,
`log_prob`, `event_shape_tensor` and `batch_shape_tensor`. The
distribution must have non-zero probability of sampling every point
enclosed by the hypersphere.
num_samples: Python `int` scalar indicating the number of Monte-Carlo
samples to draw from `dist`.
radius: Python `float`-type indicating the radius of the `n`-ball which
we're computing the volume.
center: Python floating-type vector (or scalar) indicating the center of
the `n`-ball which we're computing the volume. When scalar, the value is
broadcast to all event dims.
seed: Python `int` indicating the seed to use when sampling from `dist`.
In general it is not recommended to use `None` during a test as this
increases the likelihood of spurious test failure.
rtol: Python `float`-type indicating the admissible relative error between
actual- and approximate-volumes.
atol: Python `float`-type indicating the admissible absolute error between
actual- and approximate-volumes. In general this should be zero since
a typical radius implies a non-zero volume.
"""
def actual_hypersphere_volume(dims, radius):
# https://en.wikipedia.org/wiki/Volume_of_an_n-ball
# Using tf.lgamma because we'd have to otherwise use SciPy which is not
# a required dependency of core.
radius = np.asarray(radius)
dims = math_ops.cast(dims, dtype=radius.dtype)
return math_ops.exp(
(dims / 2.) * np.log(np.pi)
- math_ops.lgamma(1. + dims / 2.)
+ dims * math_ops.log(radius))
def is_in_ball(x, radius, center):
return math_ops.cast(linalg_ops.norm(x - center, axis=-1) <= radius,
dtype=x.dtype)
def monte_carlo_hypersphere_volume(dist, num_samples, radius, center):
# https://en.wikipedia.org/wiki/Importance_sampling
x = dist.sample(num_samples, seed=seed)
return math_ops.reduce_mean(
math_ops.exp(-dist.log_prob(x)) * is_in_ball(x, radius, center),
axis=0)
[
batch_shape_,
actual_volume_,
sample_volume_,
] = sess.run([
dist.batch_shape_tensor(),
actual_hypersphere_volume(
dims=dist.event_shape_tensor()[0],
radius=radius),
monte_carlo_hypersphere_volume(
dist,
num_samples=num_samples,
radius=radius,
center=center),
])
self.assertAllClose(np.tile(actual_volume_, reps=batch_shape_),
sample_volume_,
rtol=rtol, atol=atol)
def run_test_sample_consistent_mean_covariance(
self,
sess,
dist,
num_samples=int(1e5),
seed=24,
rtol=1e-2,
atol=0.,
cov_rtol=None,
cov_atol=None):
"""Tests that sample/mean/covariance are consistent with each other.
"Consistency" means that `sample`, `mean`, `covariance`, etc all correspond
to the same distribution.
Args:
sess: Tensorflow session.
dist: Distribution instance or object which implements `sample`,
`log_prob`, `event_shape_tensor` and `batch_shape_tensor`.
num_samples: Python `int` scalar indicating the number of Monte-Carlo
samples to draw from `dist`.
seed: Python `int` indicating the seed to use when sampling from `dist`.
In general it is not recommended to use `None` during a test as this
increases the likelihood of spurious test failure.
rtol: Python `float`-type indicating the admissible relative error between
analytical and sample statistics.
atol: Python `float`-type indicating the admissible absolute error between
analytical and sample statistics.
cov_rtol: Python `float`-type indicating the admissible relative error
between analytical and sample covariance. Default: rtol.
cov_atol: Python `float`-type indicating the admissible absolute error
between analytical and sample covariance. Default: atol.
"""
def vec_osquare(x):
"""Computes the outer-product of a vector, i.e., x.T x."""
return x[..., :, array_ops.newaxis] * x[..., array_ops.newaxis, :]
x = dist.sample(num_samples, seed=seed)
sample_mean = math_ops.reduce_mean(x, axis=0)
sample_covariance = math_ops.reduce_mean(
vec_osquare(x - sample_mean), axis=0)
sample_variance = array_ops.matrix_diag_part(sample_covariance)
sample_stddev = math_ops.sqrt(sample_variance)
[
sample_mean_,
sample_covariance_,
sample_variance_,
sample_stddev_,
mean_,
covariance_,
variance_,
stddev_
] = sess.run([
sample_mean,
sample_covariance,
sample_variance,
sample_stddev,
dist.mean(),
dist.covariance(),
dist.variance(),
dist.stddev(),
])
self.assertAllClose(mean_, sample_mean_, rtol=rtol, atol=atol)
self.assertAllClose(covariance_, sample_covariance_,
rtol=cov_rtol or rtol,
atol=cov_atol or atol)
self.assertAllClose(variance_, sample_variance_, rtol=rtol, atol=atol)
self.assertAllClose(stddev_, sample_stddev_, rtol=rtol, atol=atol)
class VectorDiffeomixtureTest(VectorDistributionTestHelpers, test.TestCase):
"""Tests the VectorDiffeomixture distribution."""
def testSampleProbConsistentBroadcastMix(self):
with self.test_session() as sess:
dims = 4
vdm = vector_diffeomixture_lib.VectorDiffeomixture(
mix_loc=[[0.], [1.]],
mix_scale=[1.],
distribution=normal_lib.Normal(0., 1.),
loc=[
None,
np.float32([2.]*dims),
],
scale=[
linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=dims,
multiplier=np.float32(1.1),
is_positive_definite=True),
linop_diag_lib.LinearOperatorDiag(
diag=np.linspace(2.5, 3.5, dims, dtype=np.float32),
is_positive_definite=True),
],
validate_args=True)
# Ball centered at component0's mean.
self.run_test_sample_consistent_log_prob(
sess, vdm, radius=2., center=0., rtol=0.005)
# Larger ball centered at component1's mean.
self.run_test_sample_consistent_log_prob(
sess, vdm, radius=4., center=2., rtol=0.005)
def testSampleProbConsistentBroadcastMixNonStandardBase(self):
with self.test_session() as sess:
dims = 4
vdm = vector_diffeomixture_lib.VectorDiffeomixture(
mix_loc=[[0.], [1.]],
mix_scale=[1.],
distribution=normal_lib.Normal(1., 1.5),
loc=[
None,
np.float32([2.]*dims),
],
scale=[
linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=dims,
multiplier=np.float32(1.1),
is_positive_definite=True),
linop_diag_lib.LinearOperatorDiag(
diag=np.linspace(2.5, 3.5, dims, dtype=np.float32),
is_positive_definite=True),
],
validate_args=True)
# Ball centered at component0's mean.
self.run_test_sample_consistent_log_prob(
sess, vdm, radius=2., center=1., rtol=0.006)
# Larger ball centered at component1's mean.
self.run_test_sample_consistent_log_prob(
sess, vdm, radius=4., center=3., rtol=0.009)
def testMeanCovariance(self):
with self.test_session() as sess:
dims = 3
vdm = vector_diffeomixture_lib.VectorDiffeomixture(
mix_loc=[[0.], [4.]],
mix_scale=[10.],
distribution=normal_lib.Normal(0., 1.),
loc=[
np.float32([-2.]),
None,
],
scale=[
linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=dims,
multiplier=np.float32(1.5),
is_positive_definite=True),
linop_diag_lib.LinearOperatorDiag(
diag=np.linspace(2.5, 3.5, dims, dtype=np.float32),
is_positive_definite=True),
],
validate_args=True)
self.run_test_sample_consistent_mean_covariance(
sess, vdm, rtol=0.02, cov_rtol=0.06)
def testMeanCovarianceUncenteredNonStandardBase(self):
with self.test_session() as sess:
dims = 3
vdm = vector_diffeomixture_lib.VectorDiffeomixture(
mix_loc=[[0.], [4.]],
mix_scale=[10.],
distribution=normal_lib.Normal(-1., 1.5),
loc=[
np.float32([-2.]),
np.float32([0.]),
],
scale=[
linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=dims,
multiplier=np.float32(1.5),
is_positive_definite=True),
linop_diag_lib.LinearOperatorDiag(
diag=np.linspace(2.5, 3.5, dims, dtype=np.float32),
is_positive_definite=True),
],
validate_args=True)
self.run_test_sample_consistent_mean_covariance(
sess, vdm, num_samples=int(1e6), rtol=0.01, cov_atol=0.025)
# TODO(jvdillon): We've tested that (i) .sample and .log_prob are consistent,
# (ii) .mean, .stddev etc... and .sample are consistent. However, we haven't
# tested that the quadrature approach well-approximates the integral.
#
# To that end, consider adding these tests:
#
# Test1: In the limit of high mix_scale, this approximates a discrete mixture,
# and there are many discrete mixtures where we can explicitly compute
# mean/var, etc... So test1 would choose one of those discrete mixtures and
# show our mean/var/etc... is close to that.
#
# Test2: In the limit of low mix_scale, the a diffeomixture of Normal(-5, 1),
# Normal(5, 1) should (I believe...must check) should look almost like
# Uniform(-5, 5), and thus (i) .prob(x) should be about 1/10 for x in (-5, 5),
# and (ii) the first few moments should approximately match that of
# Uniform(-5, 5)
#
# Test3: If mix_loc is symmetric, then for any mix_scale, our
# quadrature-based diffeomixture of Normal(-1, 1), Normal(1, 1) should have
# mean zero, exactly.
# TODO(jvdillon): Add more tests which verify broadcasting.
if __name__ == "__main__":
test.main()
|
playbar/tfandroid
|
tensorflow/contrib/distributions/python/kernel_tests/vector_diffeomixture_test.py
|
vector_diffeomixture_test.py
|
py
| 14,527
|
python
|
en
|
code
| 7
|
github-code
|
6
|
8765318097
|
""" Faça um programa que pergunte ao usuário se ele quer passar uma temperatura de Fahrenheit
para Celsius ou de Celsius para Fahrenheit, e que, a partir da resposta do usuário, faça a devida
conversão. """
conversao = input("Digite F (De C° para F°) ou C (De F° para C°)")
temp = int(input("Digite a temperatura : "))
if conversao == "f":
print(f"A temperatura em F° é {(temp*9)/5}")
else:
print(f"A temperatura em F° é {temp - 32 * 5/9}")
|
AndreDosSantosMaier/Liguagem_Programacao
|
Lista de Exercicios/Exer-16.py
|
Exer-16.py
|
py
| 473
|
python
|
pt
|
code
| 0
|
github-code
|
6
|
20123867357
|
import requests
import json
from collections import OrderedDict
target = "static/data/words_cached.json"
words_cached = json.loads(requests.get("http://mnemonic-si.appspot.com/api/words").text)
open(target, "w").write(json.dumps(words_cached, indent=4))
print("wrote to %s" % target)
words = OrderedDict({})
for word in words_cached["result"]:
words[word["name"]] = word
target = "static/data/words.json"
open(target, "w").write(json.dumps(words, indent=4))
print("wrote to %s" % target)
binary_to_name = OrderedDict({})
for word in words_cached["result"]:
binary_to_name[word["binary"]] = word["name"]
target = "static/data/binary_to_name.json"
open(target, "w").write(json.dumps(binary_to_name, indent=4))
print("wrote to %s" % target)
decimal_to_name = OrderedDict({})
for word in words_cached["result"]:
decimal_to_name[word["decimal"]] = word["name"]
target = "static/data/decimal_to_name.json"
open(target, "w").write(json.dumps(decimal_to_name, indent=4))
print("wrote to %s" % target)
|
flowcoin/mnemonic
|
frontend/scripts/dump_words.py
|
dump_words.py
|
py
| 1,010
|
python
|
en
|
code
| 1
|
github-code
|
6
|
8056801684
|
"""
Static Pipeline representation to create a CodePipeline dedicated to building
Lambda Layers
"""
from troposphere import (
Parameter,
Template,
GetAtt,
Ref,
Sub
)
from ozone.handlers.lambda_tools import check_params_exist
from ozone.resources.iam.roles.pipeline_role import pipelinerole_build
from ozone.resources.devtools.pipeline import (
SourceAction,
BuildAction,
DeployAction,
InvokeAction,
CodePipeline
)
from ozone.outputs import object_outputs
def template(**kwargs):
"""
"""
template_required_params = [
'BucketName',
'Source', 'LayerBuildProjects', 'LayersMergeProject',
'LayerName', 'GeneratorFunctionName', 'CloudformationRoleArn'
]
check_params_exist(template_required_params, kwargs)
template = Template()
token = template.add_parameter(Parameter(
'GitHubOAuthToken',
Type="String",
NoEcho=True
))
role = pipelinerole_build(
UseCodeCommit=True,
UseCodeBuild=True,
UseLambda=True,
UseCloudformation=True,
Bucket=kwargs['BucketName']
)
if kwargs['Source']['Provider'].lower() == 'github':
kwargs['Source']['Config']['OAuthToken'] = Ref(token)
source = SourceAction(
name='SourceCode',
provider=kwargs['Source']['Provider'],
config=kwargs['Source']['Config']
)
build_actions = []
builds_projects = kwargs['LayerBuildProjects']
for project in builds_projects:
build_actions.append(BuildAction(
project,
source.outputs,
project
))
build_outputs = []
for action in build_actions:
build_outputs += action.outputs
merge_action = BuildAction(
'MergeAction',
build_outputs,
kwargs['LayersMergeProject']
)
invoke = InvokeAction(
'GenerateTemplateForCfn',
merge_action.outputs,
function_name=kwargs['GeneratorFunctionName']
)
input_name = invoke.outputs[0].Name
deploy = DeployAction(
'DeployToCfn',
invoke.outputs,
'CloudFormation',
StackName=f'layer-{kwargs["LayerName"]}',
RoleArn=kwargs['CloudformationRoleArn'],
TemplatePath=f'{input_name}::tmp/template.json'
)
stages = [
('Source', [source]),
('BuildLayers', build_actions),
('MergeLayers', [merge_action]),
('GenerateCfnTemplate', [invoke]),
('DeployWithCfn', [deploy]),
]
pipeline = CodePipeline(
'Pipeline',
GetAtt(role, 'Arn'),
kwargs['BucketName'],
stages
)
template.add_resource(role)
template.add_resource(pipeline)
template.add_output(object_outputs(pipeline, True))
return template
|
lambda-my-aws/ozone
|
ozone/templates/awslambdalayer_pipeline.py
|
awslambdalayer_pipeline.py
|
py
| 2,782
|
python
|
en
|
code
| 0
|
github-code
|
6
|
6757711914
|
import json
import sys
import os.path
from mutagen.id3 import (ID3, CTOC, CHAP, TIT2, TALB,
TPE1, COMM, USLT, APIC, CTOCFlags)
audio = ID3(sys.argv[1])
if len(sys.argv) > 2:
data = json.loads(sys.argv[2])
chapters = data["chapters"]
ctoc_ids = list(map(lambda i: i.get("id"), chapters))
audio.delall('TALB')
audio["TALB"] = TALB(encoding=3, text=data["podcast_title"])
audio.delall('TPE1')
audio["TPE1"] = TPE1(encoding=3, text=data["podcast_title"])
audio.delall('TIT2')
audio["TIT2"] = TIT2(encoding=3, text=data["episode_title"])
audio.delall('COMM')
audio["COMM"] = COMM(encoding=3,
lang=u'eng',
text=data["episode_description"])
audio.delall('USLT')
audio["USLT"] = USLT(encoding=3,
lang=u'eng',
text=data["episode_description"])
if "podcast_cover" in data and os.path.isfile(data["podcast_cover"]):
audio.delall('APIC')
audio["APIC"] = APIC(encoding=3,
mime='image/jpeg',
type=3,
desc=u'Cover',
data=open(data["podcast_cover"]).read())
audio.delall('CTOC')
audio.add(CTOC(element_id=u"toc",
flags=CTOCFlags.TOP_LEVEL | CTOCFlags.ORDERED,
child_element_ids=ctoc_ids,
sub_frames=[
TIT2(text=[u"TOC"]),
]))
audio.delall('CHAP')
for chapter in chapters:
audio.add(CHAP(element_id=chapter.get("id"),
start_time=int(chapter.get("start")),
end_time=int(chapter.get("end")),
sub_frames=[
TIT2(text=[chapter.get("title")]),
]))
audio.save()
for key, value in audio.items():
print(value.pprint())
|
lukekarrys/audiobook
|
id3.py
|
id3.py
|
py
| 1,967
|
python
|
en
|
code
| 1
|
github-code
|
6
|
74221349948
|
t = int(input())
outs = []
for g in range(t):
equiv = []
for k in range(51):
equiv.append(0)
n = int(input())
a = []
a = list(map(int, input().split()))
s = input()
news = list(s)
ans = []
for i in range(n):
u = a[i]
if equiv[u] == 0:
equiv[u] = s[i]
ans.append(s[i])
else: ans.append(equiv[u])
if ans == news: outs.append("YES")
else: outs.append("NO")
for i in outs: print(i)
|
El-Medonho/Contests
|
Geral/CodeForces/Contests/rnd 828/a.py
|
a.py
|
py
| 479
|
python
|
en
|
code
| 1
|
github-code
|
6
|
4993994587
|
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 19 13:04:11 2019
@author: Diego Wanderley
@python: 3.6
@description: Train script with training class
"""
import tqdm
import argparse
import torch
import torch.optim as optim
import numpy as np
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
from torch.autograd import Variable
from terminaltables import AsciiTable
import utils.transformations as tsfrm
from test_yolo import evaluate
from models.yolo import Darknet
from models.yolo_utils.utils import *
from utils.datasets import OvaryDataset
from utils.helper import gettrainname
class Training:
"""
Training class
"""
def __init__(self, model, device, train_set, valid_set, optim,
class_names, train_name='yolov3', logger=None,
iou_thres=0.5, conf_thres=0.5, nms_thres=0.5):
'''
Training class - Constructor
'''
self.model = model
self.device = device
self.train_set = train_set
self.valid_set = valid_set
self.optimizer = optim
self.train_name = train_name
self.model_name = "_".join(train_name.split('_')[2:])
self.logger = logger
self.class_names = class_names
self.gradient_accumulations = 2
self.iou_thres = iou_thres
self.conf_thres = conf_thres
self.nms_thres = nms_thres
self.metrics = [
"grid_size",
"loss",
"x",
"y",
"w",
"h",
"conf",
"cls",
"cls_acc",
"recall50",
"recall75",
"precision",
"conf_obj",
"conf_noobj",
]
self.epoch = 0
def _saveweights(self, state, log=None):
'''
Save network weights.
Arguments:
@state (dict): parameters of the network
'''
path = '../weights/'
filename = path + self.train_name + '_weights.pth.tar'
torch.save(state, filename)
# Save Log table
if type(log) == str:
logname = filename.replace('.pth.tar','.log')
logname = logname.replace('_weights','_train')
log_file = open(logname, "w")
log_file.write(log)
log_file.close()
def _iterate_train(self, data_loader):
# Init loss count
lotal_loss = 0
data_train_len = len(self.train_set)
# Active train
self.model.train()
self.model = self.model.to(self.device)
# Batch iteration - Training dataset
for batch_idx, (names, imgs, targets) in enumerate(tqdm.tqdm(data_loader, desc="Training epoch")):
batches_done = len(data_loader) * self.epoch + batch_idx
targets = Variable(targets.to(self.device), requires_grad=False)
imgs = Variable(imgs.to(self.device))
bs = len(imgs)
# Forward and loss
loss, output = self.model(imgs, targets=targets)
loss.backward()
if batches_done % self.gradient_accumulations:
# Accumulates gradient before each step
self.optimizer.step()
self.optimizer.zero_grad()
self.model.seen += imgs.size(0)
# Log metrics at each YOLO layer
batch_factor = bs / data_train_len
for i, metric in enumerate(self.metrics):
out_metrics = [(yolo.metrics.get(metric, 0) * batch_factor) for yolo in self.model.yolo_layers]
# Fill average
for j in range(len(self.avg_metrics[metric])):
self.avg_metrics[metric][j] += out_metrics[j]
lotal_loss += loss.item() * batch_factor
return lotal_loss
def _logging(self, epoch, avg_loss_train, val_evaluation):
# 1. Log scalar values (scalar summary)
info = val_evaluation
info.append(('train_loss_total', avg_loss_train))
for tag, value in info:
self.logger.add_scalar(tag, value, epoch+1)
# 2. Log values and gradients of the parameters (histogram summary)
for yolo_tag, value in self.model.named_parameters():
# Define tag name
tag_parts = yolo_tag.split('.')
tag = self.model_name + '/' + tag_parts[-2] + '/' + tag_parts[-1]
# Ignore bias from batch normalization
if (not 'batch_norm' in tag_parts[-2]) or (not 'bias' in tag_parts[-1]):
# add data to histogram
self.logger.add_histogram(tag, value.data.cpu().numpy(), epoch+1)
# add gradient if exist
#if not value.grad is None:
# self.logger.add_histogram(tag +'/grad', value.grad.data.cpu().numpy(), epoch+1)
def train(self, epochs=100, batch_size=4):
'''
Train network function
Arguments:
@param net: network model
@param epochs: number of training epochs (int)
@param batch_size: batch size (int)
'''
# Load Dataset
data_loader_train = DataLoader(self.train_set, batch_size=batch_size, shuffle=True,
collate_fn=self.train_set.collate_fn_yolo)
data_loader_val = DataLoader(self.valid_set, batch_size=1, shuffle=False,
collate_fn=self.valid_set.collate_fn_yolo)
# Define parameters
best_loss = 1000000 # Init best loss with a too high value
best_ap = 0 # Init best average precision as zero
# Run epochs
for e in range(epochs):
self.epoch = e
print('Starting epoch {}/{}.'.format(self.epoch + 1, epochs))
log_str = ''
metric_table = [["Metrics", *["YOLO Layer " + str(i) for i in range(len(model.yolo_layers))]]]
self.avg_metrics = { i : [0]*len(self.model.yolo_layers) for i in self.metrics }
# ========================= Training =============================== #
loss_train = self._iterate_train(data_loader_train)
# Log metrics at each YOLO layer
for i, metric in enumerate(self.metrics):
formats = {m: "%.6f" for m in self.metrics}
formats["grid_size"] = "%2d"
formats["cls_acc"] = "%.2f%%"
row_metrics = self.avg_metrics[metric]
metric_table += [[metric, *row_metrics]]
log_str += AsciiTable(metric_table).table
log_str += "\nTotal loss: %0.5f"%loss_train
print(log_str)
print('')
# ========================= Validation ============================= #
precision, recall, AP, f1, ap_class = evaluate(self.model,
data_loader_val,
self.iou_thres,
self.conf_thres,
self.nms_thres,
self.device)
# Group metrics
evaluation_metrics = [
("val_precision", precision.mean()),
("val_recall", recall.mean()),
("val_mAP", AP.mean()),
("val_f1", f1.mean()),
]
# Print class APs and mAP
ap_table = [["Index", "Class name", "AP"]]
for i, c in enumerate(ap_class):
ap_table += [[c, self.class_names[c], "%.5f" % AP[i]]]
print(AsciiTable(ap_table).table)
print("mAP: "+ str(AP.mean()))
print('\n')
# ======================== Save weights ============================ #
best_loss = loss_train if loss_train <= best_loss else best_loss
is_best = AP.mean() >= best_ap
if is_best:
best_ap = AP.mean()
# save
self._saveweights({
'epoch': self.epoch + 1,
'state_dict': self.model.state_dict(),
'train_loss_total': loss_train,
'train_best_loss': best_loss,
'val_precision': precision.mean(),
'val_recall': recall.mean(),
'val_mAP': AP.mean(),
'val_f1': f1.mean(),
'batch_size': batch_size,
'optimizer': str(self.optimizer),
'optimizer_dict': self.optimizer.state_dict(),
'device': str(self.device),
'avg_metrics': self.avg_metrics,
'iou_thres': self.iou_thres,
'conf_thres': self.conf_thres,
'nms_thres': self.nms_thres
},
log=log_str )
print('Model {:s} updated!'.format(self.train_name))
print('\n')
# ====================== Tensorboard Logging ======================= #
if self.logger:
self._logging(self.epoch, loss_train, evaluation_metrics)
def parse_yolo_name(backbone_name, num_anchors, num_classes):
"""
Get the .cfg filename given the Yolo v3 hyperparameters.
"""
model_name = 'yolov3'
if 'tiny' in backbone_name:
model_name += '-tiny'
elif 'spp' in backbone_name:
model_name += '-spp'
model_name += '_a' + str(num_anchors)
model_name += '_c' + str(num_classes)
return model_name
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Training parameters
parser.add_argument("--batch_size", type=int, default=4, help="size of each image batch")
parser.add_argument("--num_epochs", type=int, default=150, help="size of each image batch")
parser.add_argument("--model_name", type=str, default="yolov3", help="name of the model definition (used to load the .cfg file)")
parser.add_argument("--num_anchors", type=int, default=6, help="number of anchors")
parser.add_argument("--num_classes", type=int, default=3, help="number of classes")
# Evaluation parameters
parser.add_argument("--iou_thres", type=float, default=0.5, help="iou threshold required to qualify as detected")
parser.add_argument("--conf_thres", type=float, default=0.5, help="object confidence threshold")
parser.add_argument("--nms_thres", type=float, default=0.4, help="iou thresshold for non-maximum suppression")
opt = parser.parse_args()
print(opt)
# Classes names
cls_names = ['background','follicle','ovary']
# Input parameters
n_classes = opt.num_classes
has_ovary = True if n_classes > 2 else False
n_epochs = opt.num_epochs
batch_size = opt.batch_size
network_name = parse_yolo_name(opt.model_name, opt.num_anchors, n_classes)
train_name = gettrainname(network_name)
mode_config_path = 'config/'+ network_name +'.cfg'
# Load network model
model = Darknet(mode_config_path)
# Load CUDA if exist
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Transformation parameters
transform = tsfrm.Compose([tsfrm.RandomHorizontalFlip(p=0.5),
tsfrm.RandomVerticalFlip(p=0.5),
tsfrm.RandomAffine(90, translate=(0.15, 0.15),
scale=(0.75, 1.5), resample=3, fillcolor=0)
])
# Dataset definitions
dataset_train = OvaryDataset(im_dir='../datasets/ovarian/im/train/',
gt_dir='../datasets/ovarian/gt/train/',
clahe=False, transform=transform,
ovary_inst=has_ovary)
dataset_val = OvaryDataset(im_dir='../datasets/ovarian/im/val/',
gt_dir='../datasets/ovarian/gt/val/',
clahe=False, transform=False,
ovary_inst=has_ovary)
# Optmization
optimizer = optim.Adam(model.parameters())
# Set logs folder
log_dir = '../logs/' + train_name + '/'
writer = SummaryWriter(log_dir=log_dir)
# Run training
training = Training(model, device, dataset_train, dataset_val,
optimizer,
logger=writer,
class_names=cls_names[:n_classes],
train_name=train_name,
iou_thres=opt.iou_thres,
conf_thres=opt.conf_thres,
nms_thres=opt.nms_thres)
training.train(epochs=n_epochs, batch_size=batch_size)
print('')
|
dswanderley/detntorch
|
python/train_yolo.py
|
train_yolo.py
|
py
| 12,881
|
python
|
en
|
code
| 1
|
github-code
|
6
|
21041808334
|
"""Pytorch dataset module"""
import json
from glob import glob
from pathlib import Path
import albumentations as A
import cv2
import numpy as np
import torch
from albumentations.pytorch import ToTensorV2
from torch import Tensor
from torch.utils.data import Dataset
from data.config import DataConfig, keypoint_indices
val_transforms = A.Compose(
[
A.LongestMaxSize(max_size=DataConfig.IMAGE_SIZE),
A.PadIfNeeded(
min_height=DataConfig.IMAGE_SIZE,
min_width=DataConfig.IMAGE_SIZE,
border_mode=cv2.BORDER_REPLICATE,
),
A.Normalize(),
ToTensorV2(),
],
keypoint_params=A.KeypointParams(format='xy', remove_invisible=False),
bbox_params=A.BboxParams(format='pascal_voc', label_fields=['classes']),
)
train_transforms = A.Compose(
[
# spatial
A.HorizontalFlip(),
A.VerticalFlip(),
A.Affine(mode=cv2.BORDER_REPLICATE),
A.Perspective(pad_mode=cv2.BORDER_REPLICATE),
A.Rotate(limit=30, border_mode=cv2.BORDER_REPLICATE),
A.SmallestMaxSize(max_size=320),
A.RandomScale(scale_limit=.1),
A.RandomCrop(
height=DataConfig.IMAGE_SIZE,
width=DataConfig.IMAGE_SIZE,
),
# pixel level
A.RandomBrightnessContrast(p=.15),
A.AdvancedBlur(p=.15),
A.ChannelShuffle(p=.15),
A.MedianBlur(p=.15),
A.Posterize(p=.15),
A.Solarize(p=.015),
# format data
A.Normalize(),
ToTensorV2(),
],
keypoint_params=A.KeypointParams(format='xy', remove_invisible=False),
bbox_params=A.BboxParams(format='pascal_voc', label_fields=['classes']),
)
class DeepFashion2Dataset(Dataset):
def __init__(
self,
base_path: str,
transforms: A.Compose,
max_objects: int,
) -> None:
super().__init__()
base_path = Path(base_path)
self._base_path = Path(base_path)
self._length = len(glob(str(self._base_path / 'image/*.jpg')))
self._transforms = transforms
self._max_objects = max_objects
def __len__(self) -> int:
return self._length
def _pad_classes(self, classes: list[int]) -> Tensor:
classes = torch.LongTensor(classes)
classes = torch.cat(
[
classes,
torch.zeros(
self._max_objects - classes.size(0),
dtype=torch.int32,
),
],
)
return classes
def _pad_bboxes(self, bboxes: list[tuple[float]]) -> Tensor:
bboxes = torch.FloatTensor(bboxes).clip(0, DataConfig.IMAGE_SIZE)
bboxes /= DataConfig.IMAGE_SIZE
bboxes = torch.cat(
[
bboxes,
torch.zeros(
(self._max_objects - bboxes.size(0), 4),
dtype=torch.float32,
),
],
)
return bboxes
def _pad_keypoints(
self,
keypoints: list[list[tuple[float]]],
classes: Tensor,
) -> Tensor:
keypoints = [
(
torch.FloatTensor(keypoint).clip(0, DataConfig.IMAGE_SIZE)
/ DataConfig.IMAGE_SIZE
)
for keypoint
in keypoints
]
result = torch.zeros(
(self._max_objects, DataConfig.NUM_KEYPOINTS, 2),
dtype=torch.float32,
)
for i, (class_, keypoint) in enumerate(zip(classes, keypoints)):
class_ = class_.item()
if class_ == 0:
break
start, end = keypoint_indices[class_]
result[i, start:end] = keypoint
return result
def _pad_visibilities(
self,
visibilities: list[np.ndarray],
classes: Tensor,
) -> Tensor:
visibilities = [
torch.FloatTensor(visibility).reshape(-1, 1) / 2.
for visibility
in visibilities
]
result = torch.zeros(
(self._max_objects, DataConfig.NUM_KEYPOINTS, 1),
dtype=torch.float32,
)
for i, (class_, visibility) in enumerate(zip(classes, visibilities)):
class_ = class_.item()
if class_ == 0:
break
start, end = keypoint_indices[class_]
result[i, start:end] = visibility
return result
def _getitem(self, index: int) -> tuple[Tensor]:
# create paths
image_path = self._base_path / f'image/{index + 1:06d}.jpg'
annotation_path = self._base_path / f'annos/{index + 1:06d}.json'
# load image and annotation
image = cv2.imread(str(image_path), cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
with open(annotation_path) as f:
annotation = json.load(f)
# restructure annotation
annotation = [
{
'bbox': v['bounding_box'],
'class': v['category_id'],
'keypoints': np.array(v['landmarks']).reshape(-1, 3)[:, :2],
'visibilities': np.array(v['landmarks']).reshape(-1, 3)[:, 2],
}
for k, v in annotation.items()
if k.startswith('item')
]
# create keypoint, bbox, and classes lists. (pack keypoints)
bboxes = [item['bbox'] for item in annotation]
keypoints = np.concatenate([item['keypoints'] for item in annotation])
keypoints_border = [item['keypoints'].shape[0] for item in annotation]
classes = [item['class'] for item in annotation]
visibilities = [item['visibilities'] for item in annotation]
# apply transform
transformed = self._transforms(
image=image,
bboxes=bboxes,
keypoints=keypoints,
classes=classes,
)
# separate transformed results
image = transformed['image']
bboxes = transformed['bboxes']
keypoints = transformed['keypoints']
classes = transformed['classes']
# unpack keypoints
keypoints_border = np.cumsum([0] + keypoints_border)
iterator = zip(keypoints_border[:-1], keypoints_border[1:])
keypoints = [keypoints[start:end] for start, end in iterator]
# normalize and fix length of classes, bboxes, keypoints,
# and visibilities
classes = self._pad_classes(classes)
bboxes = self._pad_bboxes(bboxes)
keypoints = self._pad_keypoints(keypoints, classes)
visibilities = self._pad_visibilities(visibilities, classes)
return image, classes, bboxes, keypoints, visibilities
def __getitem__(self, index: int) -> tuple[Tensor]:
try:
return self._getitem(index)
except Exception:
return self[(index + 1) % len(self)]
if __name__ == '__main__':
ds = DeepFashion2Dataset(
base_path='/home/aj/data/DeepFashion2/validation',
transforms=train_transforms,
# transforms=val_transforms,
max_objects=10,
)
image, classes, bboxes, keypoints, visibilities = ds[0]
from torchvision.utils import save_image
save_image(image, '/tmp/tmp.png')
|
mohamad-hasan-sohan-ajini/deep_fashion_2
|
data/data_pt.py
|
data_pt.py
|
py
| 7,291
|
python
|
en
|
code
| 1
|
github-code
|
6
|
69936276028
|
import torch.nn as nn
import torch.optim as optimizers
from nlp.generation.models import CharLSTM
class CharLSTMTrainer:
def __init__(self,
model: CharLSTM,
vocab_size: int,
learning_rate: float = 1e-3,
weights_decay: float = 1e-3,
epochs: int = 1,
logging_level: int = 0):
self.vocab_size = vocab_size
self.logging_level = logging_level
self.model = model.train()
self.epochs = epochs
self.learning_rate = learning_rate
self._loss = nn.CrossEntropyLoss()
self._optimizer = optimizers.Adam(self.model.parameters(), lr=self.learning_rate, weight_decay=weights_decay)
def train(self, text_dataloader):
for epoch in range(self.epochs):
for input_chars, target_chars in text_dataloader:
self._optimizer.zero_grad()
predicted_chars = self.model(input_chars)
loss = self._loss(predicted_chars.transpose(1, 2), target_chars)
loss.backward()
self._optimizer.step()
|
Danielto1404/bachelor-courses
|
python-backend/projects/nlp.ai/nlp/generation/trainers.py
|
trainers.py
|
py
| 1,129
|
python
|
en
|
code
| 5
|
github-code
|
6
|
25442443781
|
import pygame
from . import view
from . import render
from . import callback
from . import button
HORIZONTAL = 0
VERTICAL = 1
SCROLLBAR_SIZE = 12
class ScrollbarThumbView(view.View):
"""Draggable thumb of a scrollbar."""
def __init__(self, direction):
size = SCROLLBAR_SIZE
view.View.__init__(self, pygame.Rect(0, 0, size, size))
self.direction = direction
self.draggable = True
def key_down(self, key, code):
# Simulate mouse drag to scroll with keyboard.
if self.direction == VERTICAL:
if key == pygame.K_DOWN:
self.mouse_drag((0, 0), (0, 1))
elif key == pygame.K_UP:
self.mouse_drag((0, 0), (0, - 1))
else:
if key == pygame.K_RIGHT:
self.mouse_drag((0, 0), (1, 0))
elif key == pygame.K_LEFT:
self.mouse_drag((0, 0), (-1, 0))
#def draw(self, force=False):
# if view.View.draw(self, force):
# print self.__class__.__name__ + '::draw'
class ScrollbarView(view.View):
"""A scrollbar."""
def __init__(self, scroll_view, direction):
"""Create a scrollbar for the given scrollable view."""
if direction == VERTICAL:
height = scroll_view.frame.h - SCROLLBAR_SIZE
frame = pygame.Rect(0, 0, SCROLLBAR_SIZE, height)
frame.right = scroll_view.frame.w
else:
width = scroll_view.frame.w - SCROLLBAR_SIZE
frame = pygame.Rect(0, 0, width, SCROLLBAR_SIZE)
frame.bottom = scroll_view.frame.h
view.View.__init__(self, frame)
self.direction = direction
self.scroll_view = scroll_view
self.thumb = ScrollbarThumbView(self.direction)
self.add_child(self.thumb)
def layout(self):
self._update_thumb()
self.thumb.layout()
view.View.layout(self)
def _update_thumb(self):
self.thumb.frame.top = max(0, self.thumb.frame.top)
self.thumb.frame.bottom = min(self.frame.bottom,
self.thumb.frame.bottom)
self.thumb.frame.left = max(0, self.thumb.frame.left)
self.thumb.frame.right = min(self.frame.right, self.thumb.frame.right)
if self.direction == VERTICAL:
self.thumb.frame.centerx = SCROLLBAR_SIZE // 2
else:
self.thumb.frame.centery = SCROLLBAR_SIZE // 2
if self.direction == VERTICAL:
self.frame.right = self.scroll_view.frame.w
off_x = self.scroll_view._content_offset[0]
off_y = self.thumb.frame.top / float(self.frame.h)
self.scroll_view.set_content_offset(off_x, off_y)
percentage = (self.scroll_view.frame.h / float(self.scroll_view.content_view.frame.h))
self.thumb.frame.h = self.frame.h * percentage
# self.hidden = (percentage >= 1)
else:
self.frame.bottom = self.scroll_view.frame.h
off_x = self.thumb.frame.left / float(self.frame.w)
off_y = self.scroll_view._content_offset[1]
self.scroll_view.set_content_offset(off_x, off_y)
percentage = (self.scroll_view.frame.w /
float(self.scroll_view.content_view.frame.w))
self.thumb.frame.w = self.frame.w * percentage
self.hidden = (percentage >= 1)
if (self.direction == VERTICAL and
self.scroll_view.hscrollbar.hidden and not self.scroll_view.vscrollbar.hidden):
self.frame.h = self.scroll_view.frame.h
elif (self.direction == HORIZONTAL and
self.scroll_view.vscrollbar.hidden and not self.scroll_view.hscrollbar.hidden):
self.frame.w = self.scroll_view.frame.w
self.updated = True
#def draw(self, force=False):
# if view.View.draw(self, force):
# print self.__class__.__name__ + '::draw'
def _child_dragged(self, child):
assert child == self.thumb
self.layout()
# Jump to offset at clicked point; does not allow dragging
# without reclicking thumb
def mouse_down(self, button, point):
if self.direction == VERTICAL:
self.thumb.frame.top = point[1]
self._update_thumb()
else:
self.thumb.frame.left = point[0]
self._update_thumb()
class VBar(ScrollbarView):
"""A scrollbar."""
def __init__(self, scroll_view):
self.btn_size = 45
height = scroll_view.frame.h - SCROLLBAR_SIZE - self.btn_size * 2
frame = pygame.Rect(0, self.btn_size, SCROLLBAR_SIZE, height)
#frame.bottom = frame.bottom - self.btn_size
frame.right = scroll_view.frame.w
view.View.__init__(self, frame)
self.direction = VERTICAL
self.scroll_view = scroll_view
self.thumb = ScrollbarThumbView(self.direction)
self.add_child(self.thumb)
def _update_thumb(self):
self.thumb.frame.top = max(0, self.thumb.frame.top)
self.thumb.frame.bottom = min(self.frame.bottom-self.btn_size, self.thumb.frame.bottom) #dunno why I had to subtract btn_size here
self.thumb.frame.left = max(0, self.thumb.frame.left)
self.thumb.frame.right = min(self.frame.right, self.thumb.frame.right)
self.thumb.frame.centerx = SCROLLBAR_SIZE // 2
self.frame.right = self.scroll_view.frame.w
off_x = self.scroll_view._content_offset[0]
off_y = self.thumb.frame.top / float(self.frame.h)
self.scroll_view.set_content_offset(off_x, off_y)
percentage = (self.scroll_view.frame.h / float(self.scroll_view.content_view.frame.h)) if (self.scroll_view.content_view.frame.h > 0) else 0
#print 'VBar::_update_thumb pct:' + str(percentage) + ' h: ' + str(self.thumb.frame.h)
self.thumb.frame.h = self.frame.h * percentage
if (self.scroll_view.vscrollbar.hidden): # and not self.scroll_view.hscrollbar.hidden):
self.frame.h = self.scroll_view.frame.h - self.btn_size * 2
#print '\t thmb y: ' + str(self.thumb.frame.top)
class ScrollView(view.View):
"""A view that scrolls a content view
Signals
on_scrolled(scroll_view)
content offset was updated.
"""
def __init__(self, frame, content_view):
width = frame.size[0] + SCROLLBAR_SIZE
height = frame.size[1] + SCROLLBAR_SIZE
rect = pygame.Rect(frame.topleft, (width, height))
view.View.__init__(self, rect)
self.on_scrolled = callback.Signal()
self.content_view = content_view
self._content_offset = (0, 0)
self.add_child(self.content_view)
self.hscrollbar = ScrollbarView(self, HORIZONTAL)
self.vscrollbar = ScrollbarView(self, VERTICAL)
self.add_child(self.hscrollbar)
self.add_child(self.vscrollbar)
def update_content_view(self, content_view):
self.rm_child(self.content_view)
self.add_child(content_view)
self.content_view = content_view
#self.stylize()
def layout(self):
self.hscrollbar.layout()
self.vscrollbar.layout()
view.View.layout(self)
def set_content_offset(self, percent_w, percent_h,
update_scrollbar_size=True):
self._content_offset = (min(1, max(0, percent_w)),
min(1, max(0, percent_h)))
self.content_view.frame.topleft = (
-self._content_offset[0] * self.content_view.frame.w,
-self._content_offset[1] * self.content_view.frame.h)
if update_scrollbar_size:
self.vscrollbar.thumb.centery = percent_h * self.vscrollbar.frame.h
self.hscrollbar.thumb.centerx = percent_w * self.hscrollbar.frame.w
self.on_scrolled(self)
def draw(self, force=False):
if not view.View.draw(self, force):
return False
#print self.__class__.__name__ + '::draw'
if not self.vscrollbar.hidden and not self.hscrollbar.hidden:
hole = pygame.Rect(self.vscrollbar.frame.left, self.vscrollbar.frame.bottom, SCROLLBAR_SIZE, SCROLLBAR_SIZE)
render.fillrect(self.surface, self.hole_color, hole)
return True
"""
ScrollBox
"""
class ScrollBox(ScrollView):
def __init__(self, frame, content_view):
self.btn_size = 45
width = frame.size[0] + SCROLLBAR_SIZE
height = frame.size[1] + SCROLLBAR_SIZE
rect = pygame.Rect(frame.topleft, (width, height))
view.View.__init__(self, rect)
self.on_scrolled = callback.Signal()
self._content_offset = (0, 0)
self.content_view = content_view
self.vscrollbar = VBar(self)
"""
icon_down = button.IconButton(
pygame.Rect(width - self.btn_size,
0,
self.btn_size,
self.btn_size),
'chevron-down',
12)
icon_up = button.IconButton(
pygame.Rect(width - self.btn_size,
height - self.btn_size,
self.btn_size,
self.btn_size),
'chevron-up',
12)
icon_down.tag_name = 'Down'
icon_up.tag_name ='Up'
icon_down.on_clicked.connect(self.on_page_nav_clicked)
icon_up.on_clicked.connect(self.on_page_nav_clicked)
self.icon_down = icon_down
self.icon_up = icon_up
self.add_child(self.icon_down)
self.add_child(self.icon_up)
"""
self.add_child(self.content_view)
self.add_child(self.vscrollbar)
self.scrolled = False
self.scrollable = True if self.content_view.frame.height > self.frame.height else False
if not self.scrollable:
self.vscrollbar.hidden = True
def on_page_nav_clicked(self, btn, mouse_btn):
pass
"""
idx = 1
if btn == self.page_down:
for button in self.child_btns[self.current_child_index][idx:]:
btn_y = button.frame.top + self.label_height
offset = self.current_child.content_view.frame.top
btn_vy = btn_y + offset
if btn_vy > self.current_child.frame.height:
#self.active_btn.state = 'normal'
#self.active_btn = button
pct = (self.current_child.frame.height - self.label_height) / float(self.current_child.content_view.frame.h)
self.current_child.do_scroll(pct, 'down')
#self.active_btn.state = 'focused'
#self.active_btn_index = idx
#self.sibling_active = False
break
idx += 1
else:
for button in self.child_btns[self.current_child_index][idx:]:
btn_y = button.frame.top + self.label_height
offset = self.current_child.content_view.frame.top
btn_vy = btn_y + offset
if btn_vy < 0:
#self.active_btn.state = 'normal'
#self.active_btn = button
pct = (self.current_child.frame.height - self.label_height) / float(self.current_child.content_view.frame.h)
self.current_child.do_scroll(pct, 'up')
#self.active_btn.state = 'focused'
#self.active_btn_index = idx
#self.sibling_active = False
break
idx += 1
"""
def update_content_view(self, content_view):
self.rm_child(self.content_view)
self.add_child(content_view)
self.content_view = content_view
self.scrollable = True if self.content_view.frame.height > self.frame.height else False
if not self.scrollable:
self.vscrollbar.hidden = True
else:
self.vscrollbar.hidden = False
def layout(self):
self.vscrollbar.layout()
view.View.layout(self)
def do_scroll(self, pct, dir):
change = pct * self.vscrollbar.frame.h
if dir == 'up':
self.vscrollbar.thumb.frame.top = self.vscrollbar.thumb.frame.top - change
self.vscrollbar._update_thumb()
else:
self.vscrollbar.thumb.frame.top = self.vscrollbar.thumb.frame.top + change
self.vscrollbar._update_thumb()
self.updated = True
def set_content_offset(self, percent_w, percent_h,
update_scrollbar_size=True):
self._content_offset = (min(1, max(0, percent_w)),
min(1, max(0, percent_h)))
self.content_view.frame.topleft = (
-self._content_offset[0] * self.content_view.frame.w,
-self._content_offset[1] * self.content_view.frame.h)
if update_scrollbar_size:
self.vscrollbar.thumb.centery = percent_h * self.vscrollbar.frame.h
if self._content_offset[1] != 0:
self.scrolled = True
else:
self.scrolled = False
self.on_scrolled(self)
def draw(self, force=False):
if not view.View.draw(self, force):
return False
if not self.vscrollbar.hidden:
hole = pygame.Rect(self.vscrollbar.frame.left, self.vscrollbar.frame.bottom, SCROLLBAR_SIZE, SCROLLBAR_SIZE)
render.fillrect(self.surface, self.hole_color, hole)
return True
"""
ScrollList
"""
class ScrollList(ScrollBox):
def __init__(self, frame, content_frame):
content_view = view.View(content_frame)
ScrollBox.__init__(self,frame,content_view)
self.active_idx = 0
def empty_list(self):
del self.content_view.children[:]
self.active_idx = 0
self.scrollable = False
self.vscrollbar.hidden = True
def add_list_item(self, item):
self.content_view.add_child(item)
self.scrollable = True if self.content_view.frame.height > self.frame.height else False
if not self.scrollable:
self.vscrollbar.hidden = True
else:
self.vscrollbar.hidden = False
|
jwayneroth/mpd-touch
|
pygameui/scroll.py
|
scroll.py
|
py
| 14,197
|
python
|
en
|
code
| 5
|
github-code
|
6
|
71943493307
|
import csv
import math
import sys
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_selection import chi2, f_regression, mutual_info_regression
def mylog(x):
if x==0:
return -10000000000
else:
return math.log(x)
def entropy(probs, neg, pos):
'''
entropy for binary classification data
'''
entropy=0.0
entropy=(probs[1]/pos-probs[0]/neg)*mylog((probs[1]*neg)/(probs[0]*pos))
return entropy
def get_bin_from_score(score):
'''
get bin number. 0-low, 1-high. avg. threshold=280
'''
return min(1,int(score//280))
def iv(header, data):
'''
Information Value based feature selection
'''
neg,pos = 0,0
probs=np.zeros((9,5,2))
for datum in data:
score_bin=get_bin_from_score(float(datum[9]))
if(score_bin==0):
neg+=1
else:
pos+=1
for i in range(9):
probs[i][int(datum[i])-1][score_bin]+=1
feature_score=[0 for _ in range(9)]
for i in range(9):
for j in range(5):
feature_score[i]+=entropy(probs[i][j], neg, pos)
fig = plt.figure()
plt.barh(header[::-1],feature_score[::-1])
plt.show()
def anova(header, regressors, target):
'''
ANOVA based feature selection
'''
# chi_scores = chi2(regressors,target)
anova_scores = f_regression(regressors, target)
fig = plt.figure()
plt.barh(header[::-1],anova_scores[0][::-1])
plt.show()
def mutual_info(header, regressors, target):
'''
Mutual Information based feature selection
'''
# chi_scores = chi2(regressors,target)
mi_scores = mutual_info_regression(regressors, target)
fig = plt.figure()
plt.barh(header[::-1],mi_scores[::-1])
plt.show()
def main():
'''
reads training data and calls appropriate method for feature-selection
'''
data,regressors,target = [],[],[]
with open('data.csv','r') as csv_file:
csv_reader=csv.reader(csv_file,delimiter=',')
for row in csv_reader:
data.append(row)
header=data[0]
data=data[1:]
for datum in data:
for i in range(9):
datum[i]=int(datum[i])
regressors.append(datum[:9])
target.append(float(datum[9]))
if len(sys.argv)<2:
print('Usage: python feature_selectors.py [iv/anova/mi]')
else:
option=sys.argv[1]
if option=='iv':
iv(header, data)
elif option=='anova':
anova(header, regressors, target)
elif option=='mi':
mutual_info(header, regressors, target)
else:
print('Usage: python feature_selectors.py [iv/anova/mi]')
if __name__=='__main__':
main()
|
Arnabjana1999/scoring_models
|
feature_selectors.py
|
feature_selectors.py
|
py
| 2,740
|
python
|
en
|
code
| 0
|
github-code
|
6
|
41559834396
|
import string
def day3_part1(file):
priorities = string.ascii_letters
priority_sum = 0
with open(file, "r") as f:
data = f.readlines()
for line in data:
line_len = len(line)
half_way = int(line_len / 2)
comp_1 = line[0:half_way]
comp_2 = line[half_way:line_len]
for letter in comp_1:
if letter in comp_2:
priority_sum += priorities.index(letter) + 1
break
print("The sum of priorities: {}".format(priority_sum))
def day3_part2(file):
priorities = string.ascii_letters
priority_sum = 0
with open(file, "r") as f:
data = f.readlines()
cur_line = 0
for line_num in range(int(len(data) / 3)):
for letter in data[cur_line]:
if letter in data[cur_line + 1]:
if letter in data[cur_line + 2]:
priority_sum += priorities.index(letter) + 1
break
cur_line += 3
print("The sum of priorities: {}".format(priority_sum))
day3_part1("day3.txt")
day3_part2("day3.txt")
|
cerneris/Advent_of_code_2022
|
day3.py
|
day3.py
|
py
| 1,082
|
python
|
en
|
code
| 0
|
github-code
|
6
|
39959163393
|
# to build, use "cd (playsong directory)"
# pyinstaller --onefile playSong.py
#lib imports
import keyboard
import threading
import time
import os
import re
#local imports
from settings import SETTINGS,map_velocity,apply_range_bounds
global isPlaying
global midi_action_list
isPlaying = False
storedIndex = 0
conversionCases = {'!': '1', '@': '2', '£': '3', '$': '4', '%': '5', '^': '6', '&': '7', '*': '8', '(': '9', ')': '0'}
"""
#maps a string representing a note to a note index where C0 = 0
note_offsets = {"C":0,"D":2,"E":4,"F":5,"G":7,"A":9,"B":11}
def note_to_index(note):
is_sharp = (note[1] == "#")
note_letter = note[0]
if is_sharp:
note_number = int(note[2:])
else:
note_number = int(note[1:])
index = note_offsets[note_letter] + int(is_sharp) + 12*note_number
return index
octave_note_order = ["C","C#","D","D#","E","F","F#","G","G#","A","A#","B"]
def index_to_note(index):
base_letter = "A"
base_octave = 0
val = 21
#A0 is value 21 in midi
octave = (index - 12) // 12
letter = (index - 12) % 12
return octave_note_order[letter] + str(octave)
"""
def onDelPress(event):
global isPlaying
isPlaying = not isPlaying
if isPlaying:
print("Playing...")
playNextNote()
else:
print("Stopping...")
return True
def isShifted(charIn):
#print(charIn)
if "shift" in charIn:
return True
asciiValue = ord(charIn)
if(asciiValue >= 65 and asciiValue <= 90):
return True
if(charIn in "!@#$%^&*()_+{}|:\"<>?"):
return True
return False
def pressLetter(strLetter):
if isShifted(strLetter):
# we have to convert all symbols to numbers
if strLetter in conversionCases:
strLetter = conversionCases[strLetter]
keyboard.release(strLetter.lower())
keyboard.press("shift")
keyboard.press(strLetter.lower())
keyboard.release("shift")
if SETTINGS.get("key_instant_release") == True:
keyboard.release(strLetter.lower())
else:
keyboard.release(strLetter)
keyboard.press(strLetter)
if SETTINGS.get("key_instant_release") == True:
keyboard.release(strLetter)
return
def releaseLetter(strLetter):
if isShifted(strLetter):
if strLetter in conversionCases:
strLetter = conversionCases[strLetter]
keyboard.release(strLetter.lower())
else:
keyboard.release(strLetter)
return
#Mini class to organize different actions into standard chunks
class Midi_Action:
def __init__(self,offset,note_list,velocity,tempo_change):
self.tempo_change = tempo_change
self.note_list = note_list
self.velocity = velocity
self.offset = offset
def processFile(song_path):
global playback_speed
with open(song_path,"r") as macro_file:
lines = macro_file.read().split("\n")
processed_notes = []
for line in lines:
if len(line.strip()) == 0:
continue
try:
#print(line)
offset,note_str = line.split(" ",1)
note_group,velocity = note_str.split(":")
if "tempo" in note_str:
tempo_change = int(note_group.split("tempo=")[1])
note_list = []
else:
tempo_change = None
note_list = note_group.split(" ")
new_note_list = []
for n in note_list:
v = apply_range_bounds(int(n))
if v is not None:
new_note_list.append(SETTINGS["key_map"][v])
note_list = new_note_list
#print(note_list)
#input()
m = Midi_Action( float(offset),
note_list,
int(velocity),
tempo_change)
processed_notes.append(m)
except Exception as e:
print(f"Error reading line:: '{line}'")
print(e)
input()
return processed_notes
# for this method, we instead use delays as l[0] and work using indexes with delays instead of time
# we'll use recursion and threading to press keys
def set_note_offsets(midi_action_list):
# parse time between each note
# while loop is required because we are editing the array as we go
i = 0
while i < len(midi_action_list)-1:
note = midi_action_list[i]
nextNote = midi_action_list[i+1]
if note.tempo_change:
tempo = 60/float(note.tempo_change)
midi_action_list.pop(i)
note = midi_action_list[i]
if i < len(midi_action_list)-1:
nextNote = midi_action_list[i+1]
else:
note.offset = (nextNote.offset - note.offset) * tempo
i += 1
# let's just hold the last note for 1 second because we have no data on it
midi_action_list[-1].offset = 1.00
return midi_action_list
def playNextNote():
global isPlaying
global storedIndex
global playback_speed
while isPlaying and storedIndex < len(midi_action_list):
note = midi_action_list[storedIndex]
delay = max(note.offset,0)
if note.velocity == 0:
#release notes
for n in note.note_list:
releaseLetter(n)
else:
#press notes
if SETTINGS.get("alt_velocity",False) == True:
vel_key = map_velocity(note.velocity)
print("alt+",vel_key)
keyboard.press("alt")
keyboard.press_and_release(vel_key)
keyboard.release("alt")
if SETTINGS.get("hold_to_play",False) == True:
while not keyboard.is_pressed(SETTINGS.get("hold_to_play_key")):
time.sleep(.05)
for n in note.note_list:
pressLetter(n)
if(note.tempo_change is None and note.velocity != 0):
print("%10.2f %15s %d" % (delay,"".join(note.note_list),note.velocity))
#print("%10.2f %15s" % (delay/playback_speed,noteInfo[1]))
storedIndex += 1
if(delay != 0):
threading.Timer(delay/playback_speed, playNextNote).start()
return
if storedIndex > len(midi_action_list)-1:
isPlaying = False
storedIndex = 0
return
#TODO (BUG)
#Rewind and Fast Forward skip over tempo events
# missing a critical tempo event will change playback significantly.
def rewind(KeyboardEvent):
global storedIndex
if storedIndex - 10 < 0:
storedIndex = 0
else:
storedIndex -= 10
print("Rewound to %.2f" % storedIndex)
def skip(KeyboardEvent):
global storedIndex
if storedIndex + 10 > len(midi_action_list):
isPlaying = False
storedIndex = 0
else:
storedIndex += 10
print("Skipped to %.2f" % storedIndex)
def get_file_choice(song_dir):
fileList = os.listdir(song_dir)
songList = []
for f in fileList:
if(".txt" in f or ".txt" in f.lower()):
songList.append(f)
print("\nType the number of a song file then press enter:\n")
for i in range(len(songList)):
print(i+1,":",songList[i])
choice = int(input(">"))
print()
choice_index = int(choice)
return songList[choice_index-1],songList
def mode_play(song_path):
global isPlaying
global midi_action_list
global playback_speed
playback_speed = SETTINGS["playback_speed"]
isPlaying = False
storedIndex = 0
midi_action_list = processFile(song_path)
set_note_offsets(midi_action_list)
keyboard.on_press_key(SETTINGS["pause_key"], onDelPress)
keyboard.on_press_key(SETTINGS["rewind_key"], rewind)
keyboard.on_press_key(SETTINGS["advance_key"], skip)
print()
print("Controls")
print("-"*20)
print(f"Press {SETTINGS['pause_key'].upper()} to play/pause")
print(f"Press {SETTINGS['rewind_key'].upper()} to rewind")
print(f"Press {SETTINGS['advance_key'].upper()} to advance")
if SETTINGS.get("hold_to_play",False) == True:
print(f"Hold {SETTINGS['hold_to_play_key'].upper()} while song is unpaused to play notes")
while True:
input("Press Ctrl+C to go back\n\n")
def main():
song_dir = SETTINGS["song_dir"]
while True:
song_choice,_ = get_file_choice(song_dir)
song_path = os.path.join(song_dir,song_choice)
try:
mode_play(song_path)
except KeyboardInterrupt as e:
pass
finally:
keyboard.unhook_all()
storedIndex = 0
isPlaying = False
if __name__ == "__main__":
main()
|
eddiemunson/nn
|
playSong.py
|
playSong.py
|
py
| 7,532
|
python
|
en
|
code
| 0
|
github-code
|
6
|
2958627650
|
import Algorithmia
import logging
import os
LOG_FOLDER = 'logs'
if os.path.exists(LOG_FOLDER) is False:
os.mkdir(LOG_FOLDER)
logging.basicConfig(filename=LOG_FOLDER + '/' + __name__ + '.log', format='[%(asctime)s] %(message)s\n\n',
level=logging.DEBUG)
api_key = None
def get_emotion(photo: bytes) -> str or None:
'''Returns emotions by face image
Args:
photo: bytes data
Returns:
main_emotion: most possible emotion name
None: if failed
'''
try:
client = Algorithmia.client(api_key)
algo = client.algo('deeplearning/EmotionRecognitionCNNMBP/0.1.3')
img = bytearray(photo)
emotions = algo.pipe(img).result['results'][0]['emotion']
main_emotion = str()
confidence = 0.0
for emotion in emotions:
if emotion[0] > confidence:
confidence = emotion[0]
main_emotion = emotion[1]
return main_emotion.lower()
except Exception as e:
logging.error(str(e))
print('Algorithmia:', str(e))
return None
def celebrities_similarity(photo: bytes) -> str or None:
'''Returns person similarity to some celebrity
Args:
photo: bytes data
Returns:
Name of the most possible celebrity
None: if failed
'''
try:
client = Algorithmia.client(api_key)
algo = client.algo('deeplearning/DeepFaceRecognition/0.1.1')
img = bytearray(photo)
celebrities = algo.pipe(img).result['results']
return ' '.join(celebrities[0][1].split('_'))
except Exception as e:
logging.error(str(e))
return None
def verify_faces(photo1: bytes, photo2: bytes) -> float or None:
'''Returns two photos similarity
Args:
photo1: bytes data
photo2: bytes data
Returns:
similarity confidence: if data recieved
None: if failed
'''
try:
data = [bytearray(photo1), bytearray(photo2)]
client = Algorithmia.client(api_key)
algo = client.algo('zskurultay/ImageSimilarity/0.1.2')
return algo.pipe(data)
except Exception as e:
logging.error(str(e))
return None
def gender(photo: bytes) -> str or None:
'''Computes gender probabilities
Args:
photo: bytes data
Returns:
gender name
None: if failed
'''
try:
img = bytearray(photo)
data = {'image': img}
client = Algorithmia.client(api_key)
algo = client.algo('deeplearning/GenderClassification/1.0.2')
gender_list = algo.pipe(img).result['results'][0]['gender']
if gender_list[0][0] > gender_list[1][0]:
return gender_list[0][1].lower()
else:
return gender_list[1][1].lower()
except Exception as e:
logging.error(str(e))
return None
def age(photo: bytes) -> str or None:
'''Returns age groups with probabilies
Args:
photo: bytes data
Returns:
the most possible age interval : list with structure [min_age, max_age]
None: if failed
'''
try:
img = bytearray(photo)
client = Algorithmia.client(api_key)
algo = client.algo('deeplearning/AgeClassification/1.0.3')
ages = algo.pipe(img).result['results'][0]['age']
str_age_interval = str()
age_confidence = 0.0
for age in ages:
if age[0] > age_confidence:
age_confidence = age[0]
str_age_interval = age[1]
age_string_interval = str_age_interval.strip('()').split(', ')
age_interval = [int(age_string_interval[0]), int(age_string_interval[1])]
return age_interval
except Exception as e:
logging.error(str(e))
print(str(e))
return None
|
FunRobots/candybot_v2
|
src/coffebot/vision/utils/algorithmia.py
|
algorithmia.py
|
py
| 3,841
|
python
|
en
|
code
| 0
|
github-code
|
6
|
25760910262
|
import random
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, LSTM
# Define the RNN model
model = Sequential()
model.add(LSTM(64, input_shape=(1, 1)))
model.add(Dense(1, activation='linear'))
model.compile(optimizer='adam', loss='mean_squared_error')
balance = 100
bet = 1
sim_numbers = []
while True:
cup = random.randint(1, 3)
game = input("Choice ('simulate x' or 'play cups'): ")
if game.startswith("simulate"):
input_list = game.split(" ")
num = int(input_list[1])
if num >= 100000:
print("Please wait a few minutes, numbers above 100.000 take longer for the model to simulate...")
sim_numbers += [random.randint(1, 3) for _ in range(num)]
print(sim_numbers)
sim_numbers_arr = np.array(sim_numbers)
sim_numbers_arr = sim_numbers_arr.reshape(sim_numbers_arr.shape[0], 1, 1)
model.fit(sim_numbers_arr[:-1], sim_numbers_arr[1:], epochs=10, verbose=0)
predicted_number = model.predict(sim_numbers_arr[-1].reshape(1, 1, 1))
print(f'Predicted next number: {predicted_number[0][0]:.0f}')
elif game == "play cups" or game == "play":
choice = input("What cup do you want to choose? 1, 2 or 3: ")
bet_choice = input(f'How much do you want to bet on cup {choice}?: ')
bet = bet_choice
if int(choice) == cup:
balance += int(bet)
print(f'You won ${bet}! Total balance is now ${balance}!')
elif int(choice) != cup:
balance -= int(bet)
print(f'You lost ${bet}. Total balance is now ${balance}. Correct cup was cup {cup}.')
else:
print("Please input a valid number")
continue
else:
print("Please input a valid choice...")
|
atleastimnotgay/python
|
3cups_prediction.py
|
3cups_prediction.py
|
py
| 1,897
|
python
|
en
|
code
| 0
|
github-code
|
6
|
73583270588
|
#!/usr/bin/python
# coding: utf-8
from flask import Flask, Blueprint, flash, g, redirect, render_template, request, url_for, session
import os
app = Flask(__name__)
tests = []
class TestObj:
def __init__(self, name, path):
self.name = name
self.path = path+self.name
self.countfile = self.path+"/count.txt"
self.count = self.read_count()
self.run_file = self.path + "/run.txt"
self.running = self.get_run_state()
def read_count(self):
return int(open(self.countfile).read())
def get_run_state(self):
return int(open(self.run_file).read())
def toggle_run(self):
newstate = str(int(not bool(self.get_run_state())))
with open(self.run_file,'w') as rf:
rf.write(newstate)
@app.route('/', methods=('GET','POST'))
def main():
if request.method == 'POST':
print('got request')
test = list(dict(request.form))[0]
testobj = None
for t in tests:
if t.name == test:
testobj = t
testobj.toggle_run()
print ('test',testobj)
global tests
tests = generate_tests()
return render_template('index.html',tests=tests)
def generate_tests():
a_dir = "/home/pi/current_tests/"
tests = [name for name in os.listdir(a_dir) if os.path.isdir(os.path.join(a_dir, name))]
testobjs= [ TestObj(x,a_dir) for x in tests]
return testobjs
if __name__ == '__main__':
print(generate_tests())
app.run(host='0.0.0.0', debug=True)
|
rjames711/automation
|
flaskweb/app.py
|
app.py
|
py
| 1,568
|
python
|
en
|
code
| 0
|
github-code
|
6
|
40411500341
|
#!/usr/bin/env python3
"""
Name: example_ndfc_policy_delete_using_switch_serial_entity_type_entity_name.py
Description: Delete policies matching switch serial number, entity type,
and entity name
"""
import sys
from ndfc_python.log import log
from ndfc_python.ndfc import NDFC, NdfcRequestError
from ndfc_python.ndfc_credentials import NdfcCredentials
from ndfc_python.ndfc_policy import NdfcPolicy
nc = NdfcCredentials()
logger = log("ndfc_policy_create_log", "INFO", "DEBUG")
ndfc = NDFC()
ndfc.logger = logger
ndfc.username = nc.username
ndfc.password = nc.password
ndfc.ip4 = nc.ndfc_ip
ndfc.login()
instance = NdfcPolicy()
instance.logger = logger
instance.ndfc = ndfc
instance.serial_number = "FDO21120U5D"
try:
instance.delete()
except ValueError as err:
msg = f"exiting. Exception detail: {err}"
instance.logger.error(msg)
sys.exit(1)
except NdfcRequestError as err:
msg = f"exiting. Exception detail: {err}"
instance.logger.error(msg)
sys.exit(1)
instance.logger.info("Delete request succeeded")
|
allenrobel/ndfc-python
|
examples/ndfc_policy/policy_delete_using_switch_serial.py
|
policy_delete_using_switch_serial.py
|
py
| 1,036
|
python
|
en
|
code
| 0
|
github-code
|
6
|
27216859235
|
from django.conf.urls.defaults import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
(r'^$', 'rss_duna.feed.views.home'),
# url(r'^myproject/', include('myproject.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/', include(admin.site.urls)),
#(r'^feed/$', DunaEntriesFeed()),
(r'^duna/(?P<programa_id>\D+)/rss/$', 'rss_duna.feed.views.get_feed_rss'),
(r'^duna/feeds/$', 'rss_duna.feed.views.list_feeds'),
#(r'^prueba/$', 'rss_duna.feed.views.prueba')
)
#if settings.DEBUG:
# urlpatterns += patterns('',
# (r'^files/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT, 'show_indexes':False}),
# )
|
yonsing/rss_duna
|
urls.py
|
urls.py
|
py
| 965
|
python
|
en
|
code
| 0
|
github-code
|
6
|
13114754891
|
import requests
import tkinter.messagebox
user = open('user.txt','r').read().splitlines()
def checking():
for users in user:
tik = (f'https://m.tiktok.com/node/share/user/@{users}')
head = {
'accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'accept-encoding':'gzip, deflate, br',
'accept-language':'en-US,en;q=0.9',
'cache-control':'max-age=0',
'cookie':'tt_webid_v2=6930696974879032837; tt_webid=6930696974879032837; tt_csrf_token=d8lRPZdjfD3sgWCKlFHeaq-0',
'sec-fetch-dest':'document',
'sec-fetch-mode':'navigate',
'sec-fetch-site':'none',
'sec-fetch-user':'?1',
'upgrade-insecure-requests':'1',
'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36 OPR/73.0.3856.344',
}
Tik = requests.get(tik,headers=head)
if ('"statusCode":10202,"statusMsg":""') in Tik.text:
tkinter.messagebox.showinfo(title='NewUser',message=users)
elif ('statusCode":10221') in Tik.text:
print(f'Status : Banned >> {users}')
elif ('"pageId"') in Tik.text:
print(f'Taken >> {users}')
checking()
|
8-wrk/TikCheck
|
Check.py
|
Check.py
|
py
| 1,463
|
python
|
en
|
code
| 0
|
github-code
|
6
|
21764401402
|
# Approach 1: Merge Sort
# Time: O(n log n)
# Space: O(n)
class Solution:
def sortArray(self, nums: List[int]) -> List[int]:
temp_arr = [0] * len(nums)
def merge(left, mid, right):
start1 = left
start2 = mid + 1
n1 = mid - left + 1
n2 = right - mid
# Copy elements of both halves in a temp arr
for i in range(n1):
temp_arr[start1 + i] = nums[start1 + i]
for i in range(n2):
temp_arr[start2 + i] = nums[start2 + i]
# Merge the sub-arrays 'in tempArray' back into the original array 'arr' in sorted order
i, j , k = 0, 0, left
while i < n1 and j < n2:
if temp_arr[start1 + i] <= temp_arr[start2 + j]:
nums[k] = temp_arr[start1 + i]
i += 1
else:
nums[k] = temp_arr[start2 + j]
j += 1
k += 1
# Copy remaining elements
while i < n1:
nums[k] = temp_arr[start1 + i]
i += 1
k += 1
while j < n2:
nums[k] = temp_arr[start2 + j]
j += 1
k += 1
def merge_sort(left, right):
if left >= right:
return
mid = (left + right) // 2
merge_sort(left, mid)
merge_sort(mid + 1, right)
merge(left, mid, right)
merge_sort(0, len(nums) - 1)
return nums
|
jimit105/leetcode-submissions
|
problems/sort_an_array/solution.py
|
solution.py
|
py
| 1,582
|
python
|
en
|
code
| 0
|
github-code
|
6
|
19882566170
|
from jinja2 import Environment, BaseLoader, TemplateNotFound
import importlib_resources
class PackageLoader(BaseLoader):
def __init__(self, path):
self.path = path
def get_source(self, environment, template):
from backendService import templates
try:
source = importlib_resources.read_text(templates, template)
except FileNotFoundError as exc:
raise TemplateNotFound(template) from exc
return source, self.path, lambda: True
JINJA_ENV = Environment(loader=PackageLoader("backendService.http.templates"))
def get_template(name):
return JINJA_ENV.get_template(name)
|
bitlogik/guardata
|
backendService/templates/__init__.py
|
__init__.py
|
py
| 648
|
python
|
en
|
code
| 9
|
github-code
|
6
|
10424214131
|
#-*- coding: utf-8 -*-
u"""
@author: Martí Congost
@contact: marti.congost@whads.com
@organization: Whads/Accent SL
@since: October 2008
"""
import cherrypy
from cocktail.modeling import cached_getter
from woost.controllers.publishablecontroller import PublishableController
class DocumentController(PublishableController):
"""A controller that serves rendered pages."""
def __call__(self, **kwargs):
# Document specified redirection
document = self.context["publishable"]
if document.redirection_mode:
redirection_target = document.find_redirection_target()
if redirection_target is None:
raise cherrypy.NotFound()
raise cherrypy.HTTPRedirect(redirection_target.get_uri())
# No redirection, serve the document normally
return PublishableController.__call__(self)
@cached_getter
def page_template(self):
template = self.context["publishable"].template
if template is None:
raise cherrypy.NotFound()
return template
@cached_getter
def view_class(self):
return self.page_template.identifier
|
marticongost/woost
|
woost/controllers/documentcontroller.py
|
documentcontroller.py
|
py
| 1,181
|
python
|
en
|
code
| 0
|
github-code
|
6
|
31628139132
|
# fastapi
from fastapi import APIRouter
from fastapi_sqlalchemy import db
# starlette
from starlette.requests import Request
# models
from server.models import User
router = APIRouter(
prefix="/accounts",
tags=["accounts"],
dependencies=[],
responses={
400: {"description": "Bad request"}
},
)
@router.get("/profile/")
def profile(request: Request):
user_id = request.state.user_id
user = db.session.query(User).filter(
User.id == user_id
).first()
return user.dict()
|
RajeshJ3/arya.ai
|
server/accounts/account_controllers.py
|
account_controllers.py
|
py
| 525
|
python
|
en
|
code
| 0
|
github-code
|
6
|
21729046794
|
import firebase_admin
from firebase_admin import db
from flask import jsonify
from hashlib import md5
from random import randint
from time import time
from time import time, sleep
firebase_admin.initialize_app(options={
'databaseURL': 'https://copy-passed.firebaseio.com',
})
waitlist = db.reference('waitlist')
ids = db.reference('ids')
timeoutSeconds = 120
def put(ref, data):
orig = ref.get()
if not orig:
return False
for i, j in data.items():
orig[i] = j
ref.update(orig)
return True
def add_user(request):
delBlankw = False
delBlanki = False
if waitlist.get() and request.json["id"] in waitlist.get():
return "Conflict", 409
if not waitlist.get():
delBlankw = True
db.reference().update({"waitlist": {"blank--": 0}})
if not ids.get():
delBlanki = True
db.reference().update({"ids": {"blank--": 0}})
put(waitlist, {request.json["id"]: "x"})
start = time()
# print("entering waitlist")
while waitlist.child(request.json["id"]).get() == "x":
sleep(.5)
if (time() - start) >= timeoutSeconds:
if delBlanki:
ids.child("blank--").delete()
if delBlankw:
waitlist.child("blank--").delete()
waitlist.child(request.json["id"]).delete()
return "Request Timeout", 408
uuid = waitlist.child(request.json["id"]).get()
waitlist.child(request.json["id"]).delete()
if not ids.get():
delBlanki = True
db.reference().update({"ids": {"blank--": 0}})
idu = get_id(uuid)
while idu in ids.get():
idu = get_id(uuid)
put(ids, {idu: {"uid": uuid, "timestamp": time()}})
# print(ids)
if delBlanki:
ids.child("blank--").delete()
if delBlankw:
waitlist.child("blank--").delete()
return jsonify({"id": idu}), 201
def get_id(oid):
return md5((str(randint(0, 1e12)) + oid).encode()).hexdigest()
def authenticator(request):
if not request.json or 'id' not in request.json:
return "Not Acceptable", 406
if "revoke" in request.json and request.json["revoke"]:
if ids.get() and ids.child(request.json["id"]).get():
ids.child(request.json["id"]).delete()
return "Deleted", 200
else:
return "Not Found", 204
if request.path == '/' or request.path == '':
if request.method == 'POST':
return add_user(request)
else:
return 'Method not supported', 405
return 'URL not found', 404
|
ocular-data/copy-passed-firebase
|
python_functions/authenticator/main.py
|
main.py
|
py
| 2,575
|
python
|
en
|
code
| 0
|
github-code
|
6
|
15581775407
|
#!/usr/bin/env python
import pygame
import constants
from network import Type
import physical_object
from physical_object import PhysicalObject
import bullet
import math
from pygame.rect import Rect
import play_sound
from pygame import mixer
from pygame.mixer import Sound
TURRET_WIDTH = 24
TURRET_HEIGHT = 28
GUN_CHARGEUP_TIME = 100
class Turret(PhysicalObject):
"""This class represents a turret"""
typ = Type.TURRET
timeLeftToCharge = 0
def __init__(self, position, level):
PhysicalObject.__init__(self, position)
self.level = level
self.controllingPlayer = physical_object.OWNER_DEFENDER
self.physicsRect = pygame.rect.Rect(self.r_x, self.r_y, TURRET_WIDTH, TURRET_HEIGHT)
self.image = pygame.image.load('images/defenses.png')
self.rect = self.image.get_rect()
self.actions = {"charged 0": (0, 112, TURRET_WIDTH, TURRET_HEIGHT),
"charged 50": (TURRET_WIDTH, 112, TURRET_WIDTH, TURRET_HEIGHT),
"charged 100": (2*TURRET_WIDTH, 112, TURRET_WIDTH, TURRET_HEIGHT)}
self.boundsRect = Rect(level.rect.x,level.rect.y,level.rect.width,constants.SCREEN_HEIGHT)
self.action = "charged 0"
self.area = pygame.rect.Rect(self.actions[self.action])
#print 'turret (x,y) = ', (self.r_x, self.r_y)
#print 'turret owner = ', self.controllingPlayer
self.timeLeftToCharge = GUN_CHARGEUP_TIME
def step(self, scrollPosition):
# translate movement boundary
self.boundsRect.y = scrollPosition
# update self
PhysicalObject.step(self, scrollPosition)
if self.timeLeftToCharge < (1/5.0)*GUN_CHARGEUP_TIME:
self.action = "charged 100"
elif self.timeLeftToCharge < (3/5.0)*GUN_CHARGEUP_TIME:
self.action = "charged 50"
else:
self.action = "charged 0"
self.area = pygame.rect.Rect(self.actions[self.action])
if self.physicsRect.colliderect(self.boundsRect):
turretSeesShip = False
target = None
for o in self.level.physicalObjects:
if(o.controllingPlayer == physical_object.OWNER_ATTACKER and
o.targetType == physical_object.TARGET_TYPE_SHIP):
turretSeesShip = True
target = o
if turretSeesShip:
self.timeLeftToCharge -= 1
if self.timeLeftToCharge <= 0:
# it's the ship! get it!
soundEfx = pygame.mixer.Sound(constants.TURRET_BULLET_SFX)
soundEfx.set_volume(0.5)
play_sound.PlaySounds(soundEfx, 2)
theBullet = bullet.Bullet((self.rect.x + TURRET_WIDTH/2 - bullet.BULLET_WIDTH/2, self.rect.y + (bullet.BULLET_HEIGHT + 6)), "tur")
theBullet.controllingPlayer = self.controllingPlayer
# old velocity code
#deltaX = o.r_x - self.r_x
#deltaY = o.r_y - self.r_y
#distance = math.hypot(deltaX, deltaY)
#theBullet.v_x = bullet.DEFAULT_SPEED*(deltaX/distance) # v_x = speed*cos
#theBullet.v_y = bullet.DEFAULT_SPEED*(deltaY/distance) # v_y = speed*sin
# new velocity code; apparently tries to divide by zero and take the square root of a negative number
#timeToImpact = ((o.r_x*o.v_x + o.r_y*o.v_y + math.sqrt(-pow(o.r_y,2)*(-1 + pow(o.v_x, 2)) + o.r_x*(o.r_x + 2*o.r_y*o.v_x*o.v_y - o.r_x*pow(o.v_y, 2))))/(-1 + pow(o.v_x, 2) + pow(o.v_y, 2)))
#theBullet.v_x = (o.r_x + timeToImpact*o.v_x)/timeToImpact
#theBullet.v_y = (o.r_y + timeToImpact*o.v_y)/timeToImpact
# new velocity code, mk. II
futurepos = (target.r_x, target.r_y) # Guess that where they'll be in the future is where they are now
MY_SPEED = 1.5 + constants.SCROLL_RATE
for i in range(0, 4):
dist = (futurepos[0] - self.r_x, futurepos[1] - self.r_y)
timetotarget = math.hypot(dist[0], dist[1]) / bullet.DEFAULT_SPEED
distcovered = (target.v_x*timetotarget, target.v_y*timetotarget)
futurepos = (target.r_x + distcovered[0], target.r_y + distcovered[1])
dirNotNormalized = (futurepos[0] - self.r_x, futurepos[1] - self.r_y)
dirNormalized = ((dirNotNormalized[0]/math.hypot(dirNotNormalized[0], dirNotNormalized[1]),
dirNotNormalized[1]/math.hypot(dirNotNormalized[0], dirNotNormalized[1])))
theBullet.v_x = MY_SPEED*dirNormalized[0]
theBullet.v_y = MY_SPEED*dirNormalized[1]
# end of velocity code
self.childObjects.append(theBullet)
self.timeLeftToCharge = GUN_CHARGEUP_TIME
else: # if the turret doesn't see the ship,
self.timeLeftToCharge = GUN_CHARGEUP_TIME # then the turret should power down
|
Nayruden/GameDev
|
turret.py
|
turret.py
|
py
| 4,330
|
python
|
en
|
code
| 6
|
github-code
|
6
|
18262922550
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import types
import re
import subprocess
import unitTestUtil
import logging
sensorDict = {}
util_support_map = ['fbttn', 'fbtp', 'lightning', 'minipack', 'fby2', 'yosemite']
multi_host_map = ['fby2', 'yosemite']
lm_sensor_support_map = ['wedge', 'wedge100', 'galaxy100', 'cmm']
def sensorTest(platformType, data, util):
"""
Check that sensor data is with spec or is just present
"""
# no drivers present from sensor cmd
if platformType in util_support_map:
failed = sensorTestUtil(platformType, data, util)
# drivers present from sensor cmd
else:
failed = sensorTestNetwork(platformType, data, util)
if len(failed) == 0:
print("Sensor Readings on " + platformType + " [PASSED]")
sys.exit(0)
else:
print("Sensor Readings on " + platformType + " for keys: " +
str(failed) + " [FAILED]")
sys.exit(1)
def sensorTestNetwork(platformType, data, util):
failed = []
createSensorDictNetworkLmSensors(util)
logger.debug("Checking values against json file")
for driver in data:
if isinstance(data[driver], dict):
for reading in data[driver]:
if data[driver][reading] == "yes":
try:
raw_value = sensorDict[driver][reading]
except Exception:
failed += [driver, reading]
continue
if isinstance(data[driver][reading], list):
values = re.findall(r"[-+]?\d*\.\d+|\d+", raw_value)
if len(values) == 0:
failed += [driver, reading]
continue
rang = data[driver][reading]
if float(rang[0]) > float(values[0]) or float(
values[0]) > float(rang[1]):
failed += [driver, reading]
else:
if bool(re.search(r'\d', raw_value)):
continue
else:
failed += [driver, reading]
return failed
def sensorTestUtil(platformType, data, util):
failed = []
createSensorDictUtil(util)
logger.debug("checking values against json file")
for sensor in data:
# skip type argument in json file
if sensor == "type":
continue
try:
raw_values = sensorDict[sensor]
except Exception:
failed += [sensor]
continue
if platformType in multi_host_map:
if len(raw_values) not in [1, 4]:
failed += [sensor]
continue
elif len(raw_values) not in [1]:
failed += [sensor]
continue
if isinstance(data[sensor], list):
for raw_value in raw_values:
values = re.findall(r"[-+]?\d*\.\d+|\d+", raw_value)
if len(values) == 0:
failed += [sensor]
continue
rang = data[sensor]
if float(rang[0]) > float(values[0]) or float(values[0]) > float(
rang[1]):
failed += [sensor]
else:
for raw_value in raw_values:
if 'ok' not in raw_value:
failed += [sensor + raw_value]
break
return failed
def createSensorDictNetworkLmSensors(util):
"""
Creating a sensor dictionary driver -> sensor -> reading
Supports wedge, wedge100, galaxy100, and cmm
"""
cmd = util.SensorCmd
if cmd is None:
raise Exception("sensor command not implemented")
logger.debug("executing command: ".format(cmd))
f = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
info, err = f.communicate()
if len(err) != 0:
raise Exception(err)
logger.debug("Creating sensor dictionary")
info = info.decode('utf-8')
info = info.split('\n')
currentKey = ''
for line in info:
if ':' in line:
lineInfo = line.split(':')
key = lineInfo[0]
val = ''.join(lineInfo[1:])
sensorDict[currentKey][key] = val
elif len(line) == 0 or line[0] == ' ':
continue
else:
sensorDict[line] = {}
currentKey = line
def createSensorDictUtil(util):
"""
Creating a sensor dictionary sensor -> reading
Supports fbtp and fbttn
"""
cmd = util.SensorCmd
if cmd is None:
raise Exception("sensor command not implemented")
logger.debug("executing command: " + str(cmd))
f = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
info, err = f.communicate()
if len(err) != 0:
raise Exception(err)
logger.debug("creating sensor dictionary")
info = info.decode('utf-8')
info = info.split('\n')
for line in info:
if ':' in line:
lineInfo = line.split(':')
key = lineInfo[0]
val = ''.join(lineInfo[1:])
if key not in sensorDict:
sensorDict[key] = []
sensorDict[key].append(val)
if "timed out" in line:
print(line)
raise Exception(line)
if __name__ == "__main__":
"""
Input to this file should look like the following:
python sensorTest.py wedgeSensor.json
"""
util = unitTestUtil.UnitTestUtil()
logger = util.logger(logging.WARN)
try:
data = {}
args = util.Argparser(['json', '--verbose'], [str, None],
['json file',
'output all steps from test with mode options: DEBUG, INFO, WARNING, ERROR'])
if args.verbose is not None:
logger = util.logger(args.verbose)
data = util.JSONparser(args.json)
platformType = data['type']
utilType = util.importUtil(platformType)
sensorTest(platformType, data, utilType)
except Exception as e:
print("Sensor test [FAILED]")
print("Error code returned: {}".format(e))
sys.exit(1)
|
WeilerWebServices/Facebook
|
openbmc/tests/common/sensorTest.py
|
sensorTest.py
|
py
| 6,525
|
python
|
en
|
code
| 3
|
github-code
|
6
|
34172290130
|
import os
import csv
file_path = os.path.join(".","Resources", "budget_data.csv")
# print(file_path)
with open(file_path, "r") as csv_file:
csv_reader_obj = csv.reader(csv_file, delimiter=",")
# print(list(csv_reader_obj))
# Read the header row first and move the pointer to next line
csv_header = next(csv_file)
print(csv_header)
data_list = list(csv_reader_obj)
total_month = len(data_list)
# print(total_month)
total_profit_loss = int(data_list[0][1])
total_change = 0
greatest_profit_increase = 0
greatest_profit_decrease = 0
greatest_profit_increase_month = ""
greatest_profit_decrease_month = ""
previous_month_profit = int(data_list[0][1])
for i in range(1, total_month):
current_month_profit = int(data_list[i][1])
change = current_month_profit - previous_month_profit
total_profit_loss += current_month_profit
total_change += change
previous_month_profit = current_month_profit
if greatest_profit_increase < change:
greatest_profit_increase = change
greatest_profit_increase_month = data_list[i][0]
if greatest_profit_decrease > current_month_profit:
greatest_profit_decrease = change
greatest_profit_decrease_month = data_list[i][0]
# print(total_profit_loss)
# print(total_change)
result = f"\
Financial Ananlysis\n\
-------------------------------\n\
Total Months: {total_month}\n\
Total: ${total_profit_loss}\n\
Average Change: ${round(total_change / (total_month - 1), 2)}\n\
Greatest Increase in Profits: {greatest_profit_increase_month} (${greatest_profit_increase})\n\
Greatest Decrease in Profits: {greatest_profit_decrease_month} (${greatest_profit_decrease})\n\
"
print(result)
with open("PyBank_results.txt", "w") as file:
file.write(result)
|
Simon-Xu-Lan/python-challenge
|
PyBank/main.py
|
main.py
|
py
| 1,772
|
python
|
en
|
code
| 0
|
github-code
|
6
|
39275871070
|
import sys
import time
import traceback
from datetime import datetime
import random
import re
from decimal import Decimal
import numexpr
from typing import List, Dict
import disnake
from disnake.ext import commands, tasks
from disnake import ActionRow, Button
from disnake.enums import OptionType
from disnake.app_commands import Option
from disnake.enums import ButtonStyle
import copy
import store
from Bot import logchanbot, EMOJI_ZIPPED_MOUTH, EMOJI_ERROR, EMOJI_RED_NO, EMOJI_ARROW_RIGHTHOOK, \
EMOJI_MONEYFACE, NOTIFICATION_OFF_CMD, EMOJI_SPEAK, EMOJI_BELL, EMOJI_BELL_SLASH, EMOJI_HOURGLASS_NOT_DONE, \
EMOJI_INFORMATION, EMOJI_PARTY, SERVER_BOT, seconds_str, RowButtonCloseMessage, RowButtonRowCloseAnyMessage, \
text_to_num, truncate
from cogs.wallet import WalletAPI
from cogs.utils import Utils, num_format_coin
class MyMathBtn(disnake.ui.Button):
def __init__(self, label, _style, _custom_id):
super().__init__(label=label, style=_style, custom_id=_custom_id)
class MathButton(disnake.ui.View):
message: disnake.Message
a_index: int
coin_list: Dict
def __init__(self, bot, ctx, answer_list, answer_index: int, timeout: float, coin_list):
super().__init__(timeout=timeout)
i = 0
self.bot = bot
self.utils = Utils(self.bot)
self.a_index = answer_index
self.coin_list = coin_list
self.ctx = ctx
for name in answer_list:
custom_id = "mathtip_answers_" + str(i)
self.add_item(MyMathBtn(name, ButtonStyle.green, custom_id))
i += 1
async def on_timeout(self):
i = 0
for child in self.children:
if isinstance(child, disnake.ui.Button):
child.disabled = True
if i == self.a_index:
child.style = ButtonStyle.red
i += 1
## Update content
get_mathtip = None
try:
original_message = await self.ctx.original_message()
get_mathtip = await store.get_discord_mathtip_by_msgid(str(original_message.id))
except Exception:
traceback.print_exc(file=sys.stdout)
return
if get_mathtip is None:
await logchanbot(f"[ERROR MATH TIP] Failed timeout in guild {self.ctx.guild.name} / {self.ctx.guild.id}!")
return
if get_mathtip['status'] == "ONGOING":
answered_msg_id = await store.get_math_responders_by_message_id(str(self.message.id))
amount = get_mathtip['real_amount']
coin_name = get_mathtip['token_name']
owner_displayname = get_mathtip['from_username']
total_answer = answered_msg_id['total']
coin_decimal = getattr(getattr(self.coin_list, coin_name), "decimal")
contract = getattr(getattr(self.coin_list, coin_name), "contract")
token_display = getattr(getattr(self.coin_list, coin_name), "display_name")
price_with = getattr(getattr(self.coin_list, coin_name), "price_with")
coin_emoji = getattr(getattr(self.coin_list, coin_name), "coin_emoji_discord")
coin_emoji = coin_emoji + " " if coin_emoji else ""
indiv_amount_str = num_format_coin(truncate(amount / len(answered_msg_id['right_ids']), 12)) if \
len(answered_msg_id['right_ids']) > 0 else num_format_coin(truncate(amount, 12))
indiv_amount = truncate(amount / len(answered_msg_id['right_ids']), 12) if \
len(answered_msg_id['right_ids']) > 0 else truncate(amount, 12)
amount_in_usd = 0.0
each_amount_in_usd = 0.0
total_equivalent_usd = ""
per_unit = None
if price_with:
per_unit = get_mathtip['unit_price_usd']
if per_unit and per_unit > 0 and len(answered_msg_id['right_ids']) > 0:
each_amount_in_usd = per_unit * float(indiv_amount)
if each_amount_in_usd > 0.0001:
num = len(answered_msg_id['right_ids']) if len(answered_msg_id['right_ids']) > 0 else 1
total_equivalent_usd = " ~ {:,.4f} USD".format(each_amount_in_usd * num)
elif per_unit and per_unit > 0 and len(answered_msg_id['right_ids']) == 0:
each_amount_in_usd = per_unit * float(indiv_amount)
total_equivalent_usd = " ~ {:,.4f} USD".format(each_amount_in_usd)
embed = disnake.Embed(
title=f"🧮 Math Tip {coin_emoji}{num_format_coin(amount)} "\
f"{token_display} {total_equivalent_usd} - Total answer {total_answer}",
description=get_mathtip['eval_content'],
timestamp=datetime.fromtimestamp(get_mathtip['math_endtime']
)
)
embed.add_field(
name="Correct answer",
value=get_mathtip['eval_answer'],
inline=False
)
embed.add_field(
name="Correct ( {} )".format(len(answered_msg_id['right_ids'])),
value="{}".format(
" | ".join(answered_msg_id['right_names']) if len(answered_msg_id['right_names']) > 0 else "N/A"),
inline=False
)
embed.add_field(
name="Incorrect ( {} )".format(len(answered_msg_id['wrong_ids'])),
value="{}".format(
" | ".join(answered_msg_id['wrong_names']) if len(answered_msg_id['wrong_names']) > 0 else "N/A"),
inline=False
)
if len(answered_msg_id['right_ids']) > 0:
embed.add_field(
name='Each Winner Receives:',
value=f"{coin_emoji}{indiv_amount_str} {token_display}",
inline=True
)
embed.set_footer(text=f"MathTip by {owner_displayname}")
if len(answered_msg_id['right_ids']) > 0:
await store.sql_user_balance_mv_multiple(
get_mathtip['from_userid'], answered_msg_id['right_ids'],
get_mathtip['guild_id'], get_mathtip['channel_id'],
float(indiv_amount), coin_name, "MATHTIP", coin_decimal, SERVER_BOT, contract,
float(each_amount_in_usd), None
)
# Change status
change_status = await store.discord_mathtip_update(get_mathtip['message_id'], "COMPLETED")
await original_message.edit(embed=embed, view=self)
else:
await original_message.edit(view=self)
class MathTips(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.wallet_api = WalletAPI(self.bot)
self.utils = Utils(self.bot)
self.math_duration_min = 5
self.math_duration_max = 45
self.max_ongoing_by_user = self.bot.config['discord']['max_ongoing_by_user']
self.max_ongoing_by_guild = self.bot.config['discord']['max_ongoing_by_guild']
async def async_mathtip(self, ctx, amount: str, token: str, duration: str, math_exp: str = None):
coin_name = token.upper()
if len(self.bot.coin_alias_names) > 0 and coin_name in self.bot.coin_alias_names:
coin_name = self.bot.coin_alias_names[coin_name]
# Token name check
if not hasattr(self.bot.coin_list, coin_name):
msg = f'{ctx.author.mention}, **{coin_name}** does not exist with us.'
await ctx.response.send_message(msg)
return
# End token name check
await ctx.response.send_message(f"{ctx.author.mention}, /mathtip preparation... ")
serverinfo = self.bot.other_data['guild_list'].get(str(ctx.guild.id))
if serverinfo and serverinfo['tiponly'] and serverinfo['tiponly'] != "ALLCOIN" and coin_name not in serverinfo[
'tiponly'].split(","):
allowed_coins = serverinfo['tiponly']
msg = f"{ctx.author.mention}, **{coin_name}** is not allowed here. Currently, allowed `{allowed_coins}`. "\
"You can ask guild owner to allow. `/SETTING TIPONLY coin1,coin2,...`"
await ctx.edit_original_message(content=msg)
return
try:
self.bot.commandings.append((str(ctx.guild.id) if hasattr(ctx, "guild") and hasattr(ctx.guild, "id") else "DM",
str(ctx.author.id), SERVER_BOT, "/mathtip", int(time.time())))
await self.utils.add_command_calls()
except Exception:
traceback.print_exc(file=sys.stdout)
# check lock
try:
is_user_locked = self.utils.is_locked_user(str(ctx.author.id), SERVER_BOT)
if is_user_locked is True:
await ctx.edit_original_message(
content = f"{EMOJI_RED_NO} {ctx.author.mention}, your account is locked for using the Bot. "\
"Please contact bot dev by /about link."
)
return
except Exception:
traceback.print_exc(file=sys.stdout)
# end check lock
# Check if there is many airdrop/mathtip/triviatip
try:
count_ongoing = await store.discord_freetip_ongoing(str(ctx.author.id), "ONGOING")
if count_ongoing >= self.max_ongoing_by_user and \
ctx.author.id != self.bot.config['discord']['owner_id']:
msg = f"{EMOJI_INFORMATION} {ctx.author.mention}, you still have some ongoing tips. "\
f"Please wait for them to complete first!"
await ctx.edit_original_message(content=msg)
return
count_ongoing = await store.discord_freetip_ongoing_guild(str(ctx.guild.id), "ONGOING")
# Check max if set in guild
if serverinfo and count_ongoing >= serverinfo['max_ongoing_drop'] and\
ctx.author.id != self.bot.config['discord']['owner_id']:
msg = f"{EMOJI_INFORMATION} {ctx.author.mention}, there are still some ongoing drops"\
f" or tips in this guild. Please wait for them to complete first!"
await ctx.edit_original_message(content=msg)
return
elif serverinfo is None and count_ongoing >= self.max_ongoing_by_guild and\
ctx.author.id != self.bot.config['discord']['owner_id']:
msg = f"{EMOJI_INFORMATION} {ctx.author.mention}, there are still some ongoing drops or"\
f" tips in this guild. Please wait for them to complete first!"
await ctx.edit_original_message(content=msg)
await logchanbot(f"[MATHTIP] server {str(ctx.guild.id)} has no data in discord_server.")
return
except Exception:
traceback.print_exc(file=sys.stdout)
# End of ongoing check
try:
token_display = getattr(getattr(self.bot.coin_list, coin_name), "display_name")
contract = getattr(getattr(self.bot.coin_list, coin_name), "contract")
try:
coin_emoji = ""
if ctx.guild.get_member(int(self.bot.user.id)).guild_permissions.external_emojis is True:
coin_emoji = getattr(getattr(self.bot.coin_list, coin_name), "coin_emoji_discord")
coin_emoji = coin_emoji + " " if coin_emoji else ""
except Exception:
traceback.print_exc(file=sys.stdout)
net_name = getattr(getattr(self.bot.coin_list, coin_name), "net_name")
type_coin = getattr(getattr(self.bot.coin_list, coin_name), "type")
deposit_confirm_depth = getattr(getattr(self.bot.coin_list, coin_name), "deposit_confirm_depth")
coin_decimal = getattr(getattr(self.bot.coin_list, coin_name), "decimal")
min_tip = getattr(getattr(self.bot.coin_list, coin_name), "real_min_tip")
max_tip = getattr(getattr(self.bot.coin_list, coin_name), "real_max_tip")
price_with = getattr(getattr(self.bot.coin_list, coin_name), "price_with")
get_deposit = await self.wallet_api.sql_get_userwallet(
str(ctx.author.id), coin_name, net_name, type_coin, SERVER_BOT, 0
)
if get_deposit is None:
get_deposit = await self.wallet_api.sql_register_user(
str(ctx.author.id), coin_name, net_name, type_coin, SERVER_BOT, 0
)
wallet_address = get_deposit['balance_wallet_address']
if type_coin in ["TRTL-API", "TRTL-SERVICE", "BCN", "XMR"]:
wallet_address = get_deposit['paymentid']
elif type_coin in ["XRP"]:
wallet_address = get_deposit['destination_tag']
except Exception:
traceback.print_exc(file=sys.stdout)
msg = f"{EMOJI_RED_NO} {ctx.author.mention}, some internal error. Please try again."
await ctx.edit_original_message(content=msg)
return
height = await self.wallet_api.get_block_height(type_coin, coin_name, net_name)
# check if amount is all
all_amount = False
if not amount.isdigit() and amount.upper() == "ALL":
all_amount = True
userdata_balance = await store.sql_user_balance_single(
str(ctx.author.id), coin_name, wallet_address, type_coin,
height, deposit_confirm_depth, SERVER_BOT
)
amount = float(userdata_balance['adjust'])
# If $ is in amount, let's convert to coin/token
elif "$" in amount[-1] or "$" in amount[0]: # last is $
# Check if conversion is allowed for this coin.
amount = amount.replace(",", "").replace("$", "")
if price_with is None:
msg = f"{EMOJI_RED_NO} {ctx.author.mention}, dollar conversion is not "\
f"enabled for this `{coin_name}`."
await ctx.edit_original_message(content=msg)
return
else:
per_unit = await self.utils.get_coin_price(coin_name, price_with)
if per_unit and per_unit['price'] and per_unit['price'] > 0:
per_unit = per_unit['price']
amount = float(Decimal(amount) / Decimal(per_unit))
else:
msg = f"{EMOJI_RED_NO} {ctx.author.mention}, I cannot fetch equivalent price. "\
f"Try with different method."
await ctx.edit_original_message(content=msg)
return
else:
amount = amount.replace(",", "")
amount = text_to_num(amount)
if amount is None:
msg = f'{EMOJI_RED_NO} {ctx.author.mention}, invalid given amount.'
await ctx.edit_original_message(content=msg)
return
# end of check if amount is all
# Check if tx in progress
if str(ctx.author.id) in self.bot.tipping_in_progress and \
int(time.time()) - self.bot.tipping_in_progress[str(ctx.author.id)] < 150:
msg = f"{EMOJI_ERROR} {ctx.author.mention}, you have another transaction in progress."
await ctx.edit_original_message(content=msg)
return
try:
amount = float(amount)
except ValueError:
msg = f'{EMOJI_RED_NO} {ctx.author.mention}, invalid amount.'
await ctx.edit_original_message(content=msg)
return
if amount <= 0:
msg = f'{EMOJI_RED_NO} {ctx.author.mention}, please get more {token_display}.'
await ctx.edit_original_message(content=msg)
return
def hms_to_seconds(time_string):
duration_in_second = 0
if time_string.isdigit():
return int(time_string)
try:
time_string = time_string.replace("hours", "h")
time_string = time_string.replace("hour", "h")
time_string = time_string.replace("hrs", "h")
time_string = time_string.replace("hr", "h")
time_string = time_string.replace("minutes", "mn")
time_string = time_string.replace("mns", "mn")
time_string = time_string.replace("mins", "mn")
time_string = time_string.replace("min", "mn")
time_string = time_string.replace("mn", "mn")
mult = {'h': 60 * 60, 'mn': 60, 's': 1}
duration_in_second = sum(
int(num) * mult.get(val, 1) for num, val in re.findall('(\d+)(\w+)', time_string))
except Exception:
traceback.print_exc(file=sys.stdout)
return duration_in_second
default_duration = 60
duration_s = 0
try:
duration_s = hms_to_seconds(duration)
except Exception:
traceback.print_exc(file=sys.stdout)
msg = f'{EMOJI_RED_NO} {ctx.author.mention}, invalid duration.'
await ctx.edit_original_message(content=msg)
return
if duration_s == 0:
# Skip message
duration_s = default_duration
# Just info, continue
elif duration_s < self.math_duration_min or duration_s > self.math_duration_max:
msg = f"{EMOJI_RED_NO} {ctx.author.mention}, invalid duration. "\
f"Please use between {str(self.math_duration_min)}s to {str(self.math_duration_max)}s."
await ctx.edit_original_message(content=msg)
return
try:
amount = float(amount)
except ValueError:
msg = f'{EMOJI_RED_NO} {ctx.author.mention}, invalid amount.'
await ctx.edit_original_message(content=msg)
return
result_float = None
wrong_answer_1 = None
wrong_answer_2 = None
wrong_answer_3 = None
eval_string_original = ""
if math_exp and len(math_exp) > 0:
eval_string_original = math_exp
math_exp = math_exp.replace(",", "").replace(" ", "")
supported_function = ['+', '-', '*', '/', '(', ')', '.', ',', '!', '^']
additional_support = ['exp', 'sqrt', 'abs', 'log10', 'log', 'sinh', 'cosh', 'tanh', 'sin', 'cos', 'tan']
has_operation = False
for each_op in ['exp', 'sqrt', 'abs', 'log10', 'log', 'sinh', 'cosh', 'tanh', 'sin', 'cos', 'tan', '+', '-',
'*', '/', '!', '^']:
if each_op in math_exp: has_operation = True
if has_operation is False:
msg = f'{EMOJI_RED_NO} {ctx.author.mention}, nothing to calculate.'
await ctx.edit_original_message(content=msg)
return
test_string = math_exp
for each in additional_support:
test_string = test_string.replace(each, "")
if all([c.isdigit() or c in supported_function for c in test_string]):
try:
result = numexpr.evaluate(math_exp).item()
listrand = [2, 3, 4, 5, 6, 7, 8, 9, 10]
# OK have result. Check it if it's bigger than 10**10 or below 0.0001
if abs(result) > 10 ** 10:
msg = f'{EMOJI_RED_NO} Result for `{eval_string_original}` is too big.'
await ctx.edit_original_message(content=msg)
return
elif abs(result) < 0.0001:
msg = f'{EMOJI_RED_NO} Result for `{eval_string_original}` is too small.'
await ctx.edit_original_message(content=msg)
return
else:
# store result in float XX.XXXX
if result >= 0:
result_float = truncate(float(result), 4)
wrong_answer_1 = truncate(float(result * random.choice(listrand)), 4)
wrong_answer_2 = truncate(float(result + random.choice(listrand)), 4)
wrong_answer_3 = truncate(float(result - random.choice(listrand)), 4)
else:
result_float = - abs(truncate(float(result), 4))
wrong_answer_1 = - abs(truncate(float(result * random.choice(listrand)), 4))
wrong_answer_2 = - abs(truncate(float(result + random.choice(listrand)), 4))
wrong_answer_3 = - abs(truncate(float(result - random.choice(listrand)), 4))
except Exception:
msg = f'{EMOJI_RED_NO}, invalid result for `{eval_string_original}`.'
await ctx.edit_original_message(content=msg)
return
else:
msg = f'{EMOJI_ERROR} {ctx.author.mention}, unsupported usage for `{eval_string_original}`.'
await ctx.edit_original_message(content=msg)
return
else:
msg = f'{EMOJI_RED_NO} {ctx.author.mention}, invalid math expression.'
await ctx.edit_original_message(content=msg)
return
userdata_balance = await store.sql_user_balance_single(
str(ctx.author.id), coin_name, wallet_address, type_coin,
height, deposit_confirm_depth, SERVER_BOT
)
actual_balance = float(userdata_balance['adjust'])
if amount > max_tip or amount < min_tip:
msg = f"{EMOJI_RED_NO} {ctx.author.mention}, transactions cannot be bigger "\
f"than **{num_format_coin(max_tip)} {token_display}** "\
f"or smaller than **{num_format_coin(min_tip)} {token_display}**."
await ctx.edit_original_message(content=msg)
return
elif amount > actual_balance:
msg = f"{EMOJI_RED_NO} {ctx.author.mention}, insufficient balance to do a math tip of "\
f"**{num_format_coin(amount)} {token_display}**."
await ctx.edit_original_message(content=msg)
return
## add to queue
if str(ctx.author.id) not in self.bot.tipping_in_progress:
self.bot.tipping_in_progress[str(ctx.author.id)] = int(time.time())
equivalent_usd = ""
total_in_usd = 0.0
per_unit = None
if price_with:
native_token_name = getattr(getattr(self.bot.coin_list, coin_name), "native_token_name")
coin_name_for_price = coin_name
if native_token_name:
coin_name_for_price = native_token_name
if coin_name_for_price in self.bot.token_hints:
id = self.bot.token_hints[coin_name_for_price]['ticker_name']
per_unit = self.bot.coin_paprika_id_list[id]['price_usd']
else:
per_unit = self.bot.coin_paprika_symbol_list[coin_name_for_price]['price_usd']
if per_unit and per_unit > 0:
total_in_usd = float(Decimal(amount) * Decimal(per_unit))
if total_in_usd >= 0.0001:
equivalent_usd = " ~ {:,.4f} USD".format(total_in_usd)
owner_displayname = "{}#{}".format(ctx.author.name, ctx.author.discriminator)
embed = disnake.Embed(
title=f"🧮 Math Tip {coin_emoji}{num_format_coin(amount)} {token_display} {equivalent_usd}",
description=eval_string_original, timestamp=datetime.fromtimestamp(int(time.time()) + duration_s))
embed.add_field(
name="Answering",
value="None",
inline=False
)
embed.set_footer(text=f"Math tip by {owner_displayname}")
answers = [str(result_float), str(wrong_answer_1), str(wrong_answer_2), str(wrong_answer_3)]
random.shuffle(answers)
index_answer = answers.index(str(result_float))
try:
view = MathButton(self.bot, ctx, answers, index_answer, duration_s, self.bot.coin_list)
view.message = await ctx.original_message()
await store.insert_discord_mathtip(
coin_name, contract, str(ctx.author.id),
owner_displayname, str(view.message.id),
eval_string_original, result_float, wrong_answer_1,
wrong_answer_2, wrong_answer_3, str(ctx.guild.id),
str(ctx.channel.id), amount, total_in_usd,
equivalent_usd, per_unit, coin_decimal,
int(time.time()) + duration_s, net_name
)
await ctx.edit_original_message(content=None, embed=embed, view=view)
except disnake.errors.Forbidden:
await ctx.edit_original_message(content="Missing permission! Or failed to send embed message.")
except Exception:
traceback.print_exc(file=sys.stdout)
try:
del self.bot.tipping_in_progress[str(ctx.author.id)]
except Exception:
pass
@commands.guild_only()
@commands.bot_has_permissions(send_messages=True)
@commands.slash_command(
dm_permission=False,
usage='mathtip <amount> <token> <duration> <math expression>',
options=[
Option('amount', 'amount', OptionType.string, required=True),
Option('token', 'token', OptionType.string, required=True),
Option('duration', 'duration', OptionType.string, required=True),
Option('math_exp', 'math_exp', OptionType.string, required=True)
],
description="Spread math tip by user's answer"
)
async def mathtip(
self,
ctx,
amount: str,
token: str,
duration: str,
math_exp: str
):
await self.async_mathtip(ctx, amount, token, duration, math_exp)
@mathtip.autocomplete("token")
async def mathtip_token_name_autocomp(self, inter: disnake.CommandInteraction, string: str):
string = string.lower()
return [name for name in self.bot.coin_name_list if string in name.lower()][:10]
def setup(bot):
bot.add_cog(MathTips(bot))
|
wrkzcoin/TipBot
|
wrkzcoin_tipbot/cogs/mathtip.py
|
mathtip.py
|
py
| 26,213
|
python
|
en
|
code
| 137
|
github-code
|
6
|
21813222206
|
LEFT = 0
RIGHT = 1
DATA = 2
node = [
[1, 2, "38.5℃以上のねつがある?"],
[3, 4, "胸がヒリヒリする"],
[5, 6, "元気がある?"],
[None, None, "速攻病院"],
[None, None, "解熱剤で病院"],
[None, None, "様子を見る"],
[None, None, "氷枕で病院"]
]
MAX = len(node)
a = 0
while True:
print(node[a][DATA], end="")
s = input("(y/n)")
if s == "":
break
if s == "y":
a = node[a][LEFT]
if s == "n":
a = node[a][RIGHT]
if node[a][LEFT] == None and node[a][RIGHT] == None:
print("診断結果:",node[a][DATA])
break
|
itc-s21007/algrithm_class
|
練習問題/YesOrNo.py
|
YesOrNo.py
|
py
| 641
|
python
|
en
|
code
| 0
|
github-code
|
6
|
18195204561
|
#implementation of doubly link list
#-------implementation of node class------
class Node:
head = None
tail = None
def __init__(self, data):
self.key = data
self.prev = None
self.next = None
#--------Insert function-------------
def insert(data):
if Node.head == None:
Node.head = Node(data)
Node.tail = Node.head
else:
temp = Node(data)
temp.prev = Node.tail
Node.tail.next = temp
Node.tail = temp
#---------Insert at begin----------------
def insert_at_begin(data):
if Node.head == None:
insert(data)
else:
temp = Node(data)
temp.next = Node.head
Node.head.prev = temp
Node.head = temp
#--------Inser at a position------------
def insert_at_pos(data, pos):
if Node.head == None:
print("wrong position")
elif pos == 1:
insert_at_begin(data)
else:
temp = Node(data)
curr = Node.head
for index in range(pos - 2):
curr = curr.next
if curr == None:
print("wrong position")
return
temp.next = curr.next
temp.prev = curr
curr.next = temp
#--------delete a node---------------------
def delete():
if Node.head == None:
print("Nothing to delete")
else:
Node.tail = Node.tail.prev
Node.tail.next = None
#---------delete at begin-------------------
def delete_first():
if Node.head == None:
print("Nothing to delete")
else:
Node.head = Node.head.next
Node.head.prev = None
#--------delete at pos----------------------
def delete_at_pos(pos):
if Node.head == None:
print("wrong pos")
elif pos == 1:
delete_first()
else:
curr = Node.head
for index in range(pos - 2):
curr = curr.next
nextNode = curr.next.next
curr.next = nextNode
if curr.next != None:
nextNode.prev = curr
#---------traverse function-----------------
def traverse():
if Node.head == None:
return
else:
curr = Node.head
while curr != None:
print(curr.key)
curr = curr.next
#---------search a node----------------------
def search(data):
index = 0
if Node.head == None:
print("List is empty")
else:
curr = Node.head
while curr != None:
index += 1
if curr.key == data:
print(index)
return
curr = curr.next
if curr.next == Node.head:
print("-1")
#-------reverse the doubly link list---------
def reverse():
if Node.head == None:
return
if Node.head.next == None:
return
else:
stack = []
curr = Node.head
Node.head = None
while curr is not None:
stack.append(curr.key)
curr = curr.next
while len(stack) > 0:
insert(stack.pop())
insert(10)
insert(20)
insert(30)
insert(40)
insert(50)
insert(60)
delete_at_pos(6)
traverse()
reverse()
traverse()
|
Sjasvin93/datastructures-with-python
|
doubly_linked_list.py
|
doubly_linked_list.py
|
py
| 3,122
|
python
|
en
|
code
| 0
|
github-code
|
6
|
30357800081
|
from math import log10
from pyface.qt import QtCore, QtGui
from traits.api import TraitError, Str, Float, Any, Bool
from .editor_factory import TextEditor
from .editor import Editor
from .constants import OKColor, ErrorColor
from .helper import IconButton
# -------------------------------------------------------------------------
# 'BaseRangeEditor' class:
# -------------------------------------------------------------------------
class BaseRangeEditor(Editor):
"""The base class for Range editors. Using an evaluate trait, if specified,
when assigning numbers the object trait.
"""
# -------------------------------------------------------------------------
# Trait definitions:
# -------------------------------------------------------------------------
#: Function to evaluate floats/ints
evaluate = Any()
def _set_value(self, value):
if self.evaluate is not None:
value = self.evaluate(value)
Editor._set_value(self, value)
class SimpleSliderEditor(BaseRangeEditor):
"""Simple style of range editor that displays a slider and a text field.
The user can set a value either by moving the slider or by typing a value
in the text field.
"""
# -------------------------------------------------------------------------
# Trait definitions:
# -------------------------------------------------------------------------
#: Low value for the slider range
low = Any()
#: High value for the slider range
high = Any()
def init(self, parent):
"""Finishes initializing the editor by creating the underlying toolkit
widget.
"""
factory = self.factory
if not factory.low_name:
self.low = factory.low
if not factory.high_name:
self.high = factory.high
self.evaluate = factory.evaluate
self.sync_value(factory.evaluate_name, "evaluate", "from")
self.sync_value(factory.low_name, "low", "from")
self.sync_value(factory.high_name, "high", "from")
self.control = QtGui.QWidget()
panel = QtGui.QHBoxLayout(self.control)
panel.setContentsMargins(0, 0, 0, 0)
fvalue = self.value
try:
if not (self.low <= fvalue <= self.high):
fvalue = self.low
fvalue_text = self.string_value(fvalue)
except:
fvalue_text = ""
fvalue = self.low
ivalue = self._convert_to_slider(fvalue)
self._label_lo = QtGui.QLabel()
self._label_lo.setAlignment(
QtCore.Qt.AlignmentFlag.AlignRight | QtCore.Qt.AlignmentFlag.AlignVCenter
)
if factory.label_width > 0:
self._label_lo.setMinimumWidth(int(factory.label_width))
panel.addWidget(self._label_lo)
self.control.slider = slider = QtGui.QSlider(QtCore.Qt.Orientation.Horizontal)
slider.setTracking(factory.auto_set)
slider.setMinimum(0)
slider.setMaximum(10000)
slider.setPageStep(1000)
slider.setSingleStep(100)
slider.setValue(ivalue)
slider.valueChanged.connect(self.update_object_on_scroll)
panel.addWidget(slider)
self._label_hi = QtGui.QLabel()
panel.addWidget(self._label_hi)
if factory.label_width > 0:
self._label_hi.setMinimumWidth(int(factory.label_width))
self.control.text = text = QtGui.QLineEdit(fvalue_text)
text.editingFinished.connect(self.update_object_on_enter)
# The default size is a bit too big and probably doesn't need to grow.
sh = text.sizeHint()
sh.setWidth(sh.width() // 2)
text.setMaximumSize(sh)
panel.addWidget(text)
low_label = factory.low_label
if factory.low_name != "":
low_label = self.string_value(self.low)
high_label = factory.high_label
if factory.high_name != "":
high_label = self.string_value(self.high)
self._label_lo.setText(low_label)
self._label_hi.setText(high_label)
self.set_tooltip(slider)
self.set_tooltip(self._label_lo)
self.set_tooltip(self._label_hi)
self.set_tooltip(text)
def update_object_on_scroll(self, pos):
"""Handles the user changing the current slider value."""
value = self._convert_from_slider(pos)
blocked = self.control.text.blockSignals(True)
try:
self.value = value
self.control.text.setText(self.string_value(value))
except TraitError as exc:
from traitsui.api import raise_to_debug
raise_to_debug()
finally:
self.control.text.blockSignals(blocked)
def update_object_on_enter(self):
"""Handles the user pressing the Enter key in the text field."""
# It is possible the event is processed after the control is removed
# from the editor
if self.control is None:
return
try:
value = eval(str(self.control.text.text()).strip())
except Exception as ex:
# They entered something that didn't eval as a number, (e.g.,
# 'foo') pretend it didn't happen
value = self.value
self.control.text.setText(str(value))
# for compound editor, value may be non-numeric
if not isinstance(value, (int, float)):
return
if not self.factory.is_float:
value = int(value)
# If setting the value yields an error, the resulting error dialog
# stealing focus could trigger another editingFinished signal so we
# block signals here
blocked = self.control.text.blockSignals(True)
try:
self.value = value
blocked = self.control.slider.blockSignals(True)
try:
self.control.slider.setValue(
self._convert_to_slider(self.value)
)
finally:
self.control.slider.blockSignals(blocked)
except TraitError:
# They entered something invalid, pretend it didn't happen
value = self.value
self.control.text.setText(str(value))
finally:
self.control.text.blockSignals(blocked)
def update_editor(self):
"""Updates the editor when the object trait changes externally to the
editor.
"""
value = self.value
low = self.low
high = self.high
try:
text = self.string_value(value)
1 / (low <= value <= high)
except:
text = ""
value = low
ivalue = self._convert_to_slider(value)
self.control.text.setText(text)
blocked = self.control.slider.blockSignals(True)
try:
self.control.slider.setValue(ivalue)
finally:
self.control.slider.blockSignals(blocked)
def get_error_control(self):
"""Returns the editor's control for indicating error status."""
return self.control.text
def _low_changed(self, low):
if self.value < low:
if self.factory.is_float:
self.value = float(low)
else:
self.value = int(low)
if self._label_lo is not None:
self._label_lo.setText(self.string_value(low))
self.update_editor()
def _high_changed(self, high):
if self.value > high:
if self.factory.is_float:
self.value = float(high)
else:
self.value = int(high)
if self._label_hi is not None:
self._label_hi.setText(self.string_value(high))
self.update_editor()
def _convert_to_slider(self, value):
"""Returns the slider setting corresponding to the user-supplied value."""
if self.high > self.low:
ivalue = int(
(float(value - self.low) / (self.high - self.low)) * 10000.0
)
else:
ivalue = self.low
if ivalue is None:
ivalue = 0
return ivalue
def _convert_from_slider(self, ivalue):
"""Returns the float or integer value corresponding to the slider
setting.
"""
value = self.low + ((float(ivalue) / 10000.0) * (self.high - self.low))
if not self.factory.is_float:
value = int(round(value))
return value
# -------------------------------------------------------------------------
class LogRangeSliderEditor(SimpleSliderEditor):
# -------------------------------------------------------------------------
"""A slider editor for log-spaced values"""
def _convert_to_slider(self, value):
"""Returns the slider setting corresponding to the user-supplied value."""
value = max(value, self.low)
ivalue = int(
(log10(value) - log10(self.low))
/ (log10(self.high) - log10(self.low))
* 10000.0
)
return ivalue
def _convert_from_slider(self, ivalue):
"""Returns the float or integer value corresponding to the slider
setting.
"""
value = float(ivalue) / 10000.0 * (log10(self.high) - log10(self.low))
# Do this to handle floating point errors, where fvalue may exceed
# self.high.
fvalue = min(self.low * 10 ** (value), self.high)
if not self.factory.is_float:
fvalue = int(round(fvalue))
return fvalue
class LargeRangeSliderEditor(BaseRangeEditor):
"""A slider editor for large ranges.
The editor displays a slider and a text field. A subset of the total range
is displayed in the slider; arrow buttons at each end of the slider let
the user move the displayed range higher or lower.
"""
# -------------------------------------------------------------------------
# Trait definitions:
# -------------------------------------------------------------------------
#: Low value for the slider range
low = Any(0)
#: High value for the slider range
high = Any(1)
#: Low end of displayed range
cur_low = Float()
#: High end of displayed range
cur_high = Float()
#: Flag indicating that the UI is in the process of being updated
ui_changing = Bool(False)
def init(self, parent):
"""Finishes initializing the editor by creating the underlying toolkit
widget.
"""
factory = self.factory
# Initialize using the factory range defaults:
self.low = factory.low
self.high = factory.high
self.evaluate = factory.evaluate
# Hook up the traits to listen to the object.
self.sync_value(factory.low_name, "low", "from")
self.sync_value(factory.high_name, "high", "from")
self.sync_value(factory.evaluate_name, "evaluate", "from")
self.init_range()
low = self.cur_low
high = self.cur_high
self._set_format()
self.control = QtGui.QWidget()
panel = QtGui.QHBoxLayout(self.control)
panel.setContentsMargins(0, 0, 0, 0)
fvalue = self.value
try:
fvalue_text = self._format % fvalue
1 / (low <= fvalue <= high)
except:
fvalue_text = ""
fvalue = low
if high > low:
ivalue = int((float(fvalue - low) / (high - low)) * 10000)
else:
ivalue = low
# Lower limit label:
self.control.label_lo = label_lo = QtGui.QLabel()
label_lo.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight | QtCore.Qt.AlignmentFlag.AlignVCenter)
panel.addWidget(label_lo)
# Lower limit button:
self.control.button_lo = IconButton(
QtGui.QStyle.StandardPixmap.SP_ArrowLeft, self.reduce_range
)
panel.addWidget(self.control.button_lo)
# Slider:
self.control.slider = slider = QtGui.QSlider(QtCore.Qt.Orientation.Horizontal)
slider.setTracking(factory.auto_set)
slider.setMinimum(0)
slider.setMaximum(10000)
slider.setPageStep(1000)
slider.setSingleStep(100)
slider.setValue(ivalue)
slider.valueChanged.connect(self.update_object_on_scroll)
panel.addWidget(slider)
# Upper limit button:
self.control.button_hi = IconButton(
QtGui.QStyle.StandardPixmap.SP_ArrowRight, self.increase_range
)
panel.addWidget(self.control.button_hi)
# Upper limit label:
self.control.label_hi = label_hi = QtGui.QLabel()
panel.addWidget(label_hi)
# Text entry:
self.control.text = text = QtGui.QLineEdit(fvalue_text)
text.editingFinished.connect(self.update_object_on_enter)
# The default size is a bit too big and probably doesn't need to grow.
sh = text.sizeHint()
sh.setWidth(sh.width() // 2)
text.setMaximumSize(sh)
panel.addWidget(text)
label_lo.setText(str(low))
label_hi.setText(str(high))
self.set_tooltip(slider)
self.set_tooltip(label_lo)
self.set_tooltip(label_hi)
self.set_tooltip(text)
# Update the ranges and button just in case.
self.update_range_ui()
def update_object_on_scroll(self, pos):
"""Handles the user changing the current slider value."""
value = self.cur_low + (
(float(pos) / 10000.0) * (self.cur_high - self.cur_low)
)
self.control.text.setText(self._format % value)
if self.factory.is_float:
self.value = value
else:
self.value = int(value)
def update_object_on_enter(self):
"""Handles the user pressing the Enter key in the text field."""
# It is possible the event is processed after the control is removed
# from the editor
if self.control is None:
return
try:
self.value = eval(str(self.control.text.text()).strip())
except TraitError as excp:
pass
def update_editor(self):
"""Updates the editor when the object trait changes externally to the
editor.
"""
value = self.value
low = self.low
high = self.high
try:
text = self._format % value
1 / (low <= value <= high)
except:
value = low
self.value = value
if not self.ui_changing:
self.init_range()
self.update_range_ui()
def update_range_ui(self):
"""Updates the slider range controls."""
low, high = self.cur_low, self.cur_high
value = self.value
self._set_format()
self.control.label_lo.setText(self._format % low)
self.control.label_hi.setText(self._format % high)
if high > low:
ivalue = int((float(value - low) / (high - low)) * 10000.0)
else:
ivalue = low
blocked = self.control.slider.blockSignals(True)
self.control.slider.setValue(ivalue)
self.control.slider.blockSignals(blocked)
text = self._format % self.value
self.control.text.setText(text)
self.control.button_lo.setEnabled(low != self.low)
self.control.button_hi.setEnabled(high != self.high)
def init_range(self):
"""Initializes the slider range controls."""
value = self.value
low, high = self.low, self.high
if (high is None) and (low is not None):
high = -low
mag = abs(value)
if mag <= 10.0:
cur_low = max(value - 10, low)
cur_high = min(value + 10, high)
else:
d = 0.5 * (10 ** int(log10(mag) + 1))
cur_low = max(low, value - d)
cur_high = min(high, value + d)
self.cur_low, self.cur_high = cur_low, cur_high
def reduce_range(self):
"""Reduces the extent of the displayed range."""
low, high = self.low, self.high
if abs(self.cur_low) < 10:
self.cur_low = max(-10, low)
self.cur_high = min(10, high)
elif self.cur_low > 0:
self.cur_high = self.cur_low
self.cur_low = max(low, self.cur_low / 10)
else:
self.cur_high = self.cur_low
self.cur_low = max(low, self.cur_low * 10)
self.ui_changing = True
self.value = min(max(self.value, self.cur_low), self.cur_high)
self.ui_changing = False
self.update_range_ui()
def increase_range(self):
"""Increased the extent of the displayed range."""
low, high = self.low, self.high
if abs(self.cur_high) < 10:
self.cur_low = max(-10, low)
self.cur_high = min(10, high)
elif self.cur_high > 0:
self.cur_low = self.cur_high
self.cur_high = min(high, self.cur_high * 10)
else:
self.cur_low = self.cur_high
self.cur_high = min(high, self.cur_high / 10)
self.ui_changing = True
self.value = min(max(self.value, self.cur_low), self.cur_high)
self.ui_changing = False
self.update_range_ui()
def _set_format(self):
self._format = "%d"
factory = self.factory
low, high = self.cur_low, self.cur_high
diff = high - low
if factory.is_float:
if diff > 99999:
self._format = "%.2g"
elif diff > 1:
self._format = "%%.%df" % max(0, 4 - int(log10(high - low)))
else:
self._format = "%.3f"
def get_error_control(self):
"""Returns the editor's control for indicating error status."""
return self.control.text
def _low_changed(self, low):
if self.control is not None:
if self.value < low:
if self.factory.is_float:
self.value = float(low)
else:
self.value = int(low)
self.update_editor()
def _high_changed(self, high):
if self.control is not None:
if self.value > high:
if self.factory.is_float:
self.value = float(high)
else:
self.value = int(high)
self.update_editor()
class SimpleSpinEditor(BaseRangeEditor):
"""A simple style of range editor that displays a spin box control."""
# -------------------------------------------------------------------------
# Trait definitions:
# -------------------------------------------------------------------------
# Low value for the slider range
low = Any()
# High value for the slider range
high = Any()
def init(self, parent):
"""Finishes initializing the editor by creating the underlying toolkit
widget.
"""
factory = self.factory
if not factory.low_name:
self.low = factory.low
if not factory.high_name:
self.high = factory.high
self.sync_value(factory.low_name, "low", "from")
self.sync_value(factory.high_name, "high", "from")
low = self.low
high = self.high
self.control = QtGui.QSpinBox()
self.control.setMinimum(low)
self.control.setMaximum(high)
self.control.setValue(self.value)
self.control.valueChanged.connect(self.update_object)
if not factory.auto_set:
self.control.setKeyboardTracking(False)
self.set_tooltip()
def update_object(self, value):
"""Handles the user selecting a new value in the spin box."""
self._locked = True
try:
self.value = value
finally:
self._locked = False
def update_editor(self):
"""Updates the editor when the object trait changes externally to the
editor.
"""
if not self._locked:
blocked = self.control.blockSignals(True)
try:
self.control.setValue(int(self.value))
except Exception:
from traitsui.api import raise_to_debug
raise_to_debug()
finally:
self.control.blockSignals(blocked)
def _low_changed(self, low):
if self.value < low:
if self.factory.is_float:
self.value = float(low)
else:
self.value = int(low)
if self.control:
self.control.setMinimum(low)
self.control.setValue(int(self.value))
def _high_changed(self, high):
if self.value > high:
if self.factory.is_float:
self.value = float(high)
else:
self.value = int(high)
if self.control:
self.control.setMaximum(high)
self.control.setValue(int(self.value))
class RangeTextEditor(TextEditor):
"""Editor for ranges that displays a text field. If the user enters a
value that is outside the allowed range, the background of the field
changes color to indicate an error.
"""
# -------------------------------------------------------------------------
# Trait definitions:
# -------------------------------------------------------------------------
#: Low value for the slider range
low = Any()
#: High value for the slider range
high = Any()
#: Function to evaluate floats/ints
evaluate = Any()
def init(self, parent):
"""Finishes initializing the editor by creating the underlying toolkit
widget.
"""
TextEditor.init(self, parent)
factory = self.factory
if not factory.low_name:
self.low = factory.low
if not factory.high_name:
self.high = factory.high
self.evaluate = factory.evaluate
self.sync_value(factory.evaluate_name, "evaluate", "from")
self.sync_value(factory.low_name, "low", "from")
self.sync_value(factory.high_name, "high", "from")
# force value to start in range
if self.low is not None and self.low > self.value:
self.value = self.low
elif self.high is not None and self.high < self.value:
self.value = self.low if self.low is not None else self.high
def update_object(self):
"""Handles the user entering input data in the edit control."""
try:
value = eval(str(self.control.text()))
if self.evaluate is not None:
value = self.evaluate(value)
if self.low is not None and self.low > value:
value = self.low
col = ErrorColor
elif self.high is not None and self.high < value:
value = self.low if self.low is not None else self.high
col = ErrorColor
else:
col = OKColor
self.value = value
except Exception:
col = ErrorColor
if self.control is not None:
pal = QtGui.QPalette(self.control.palette())
pal.setColor(QtGui.QPalette.ColorRole.Base, col)
self.control.setPalette(pal)
# -------------------------------------------------------------------------
# 'SimpleEnumEditor' factory adaptor:
# -------------------------------------------------------------------------
def SimpleEnumEditor(parent, factory, ui, object, name, description, **kwargs):
return CustomEnumEditor(
parent, factory, ui, object, name, description, "simple"
)
def CustomEnumEditor(
parent, factory, ui, object, name, description, style="custom", **kwargs
):
"""Factory adapter that returns a enumeration editor of the specified
style.
"""
if factory._enum is None:
import traitsui.editors.enum_editor as enum_editor
factory._enum = enum_editor.ToolkitEditorFactory(
values=list(range(factory.low, factory.high + 1)),
cols=factory.cols,
)
if style == "simple":
return factory._enum.simple_editor(
ui, object, name, description, parent
)
return factory._enum.custom_editor(ui, object, name, description, parent)
# -------------------------------------------------------------------------
# Defines the mapping between editor factory 'mode's and Editor classes:
# -------------------------------------------------------------------------
# Mapping between editor factory modes and simple editor classes
SimpleEditorMap = {
"slider": SimpleSliderEditor,
"xslider": LargeRangeSliderEditor,
"spinner": SimpleSpinEditor,
"enum": SimpleEnumEditor,
"text": RangeTextEditor,
"logslider": LogRangeSliderEditor,
}
# Mapping between editor factory modes and custom editor classes
CustomEditorMap = {
"slider": SimpleSliderEditor,
"xslider": LargeRangeSliderEditor,
"spinner": SimpleSpinEditor,
"enum": CustomEnumEditor,
"text": RangeTextEditor,
"logslider": LogRangeSliderEditor,
}
|
enthought/traitsui
|
traitsui/qt/range_editor.py
|
range_editor.py
|
py
| 25,173
|
python
|
en
|
code
| 290
|
github-code
|
6
|
3986831730
|
"""The abstract class for http routing"""
from abc import ABCMeta, abstractmethod
from typing import AbstractSet, Any, Mapping, Tuple
from .http_callbacks import HttpRequestCallback
from .http_response import HttpResponse
class HttpRouter(metaclass=ABCMeta):
"""The interface for an HTTP router"""
@property # type: ignore
@abstractmethod
def not_found_response(self) -> HttpResponse:
"""The response when a handler could not be found for a method/path
Returns:
HttpResponse: The response when a route cannot be found.
"""
@not_found_response.setter # type: ignore
@abstractmethod
def not_found_response(self, value: HttpResponse) -> None:
...
@abstractmethod
def add(
self,
methods: AbstractSet[str],
path: str,
callback: HttpRequestCallback
) -> None:
"""Add an HTTP request handler
Args:
methods (AbstractSet[str]): The supported HTTP methods.
path (str): The path.
callback (HttpRequestCallback): The request handler.
"""
@abstractmethod
def resolve(
self,
method: str,
path: str
) -> Tuple[HttpRequestCallback, Mapping[str, Any]]:
"""Resolve a request to a handler with the route matches
Args:
method (str): The HTTP method.
path (str): The path.
Returns:
Tuple[HttpRequestCallback, Mapping[str, Any]]: A handler and the route
matches.
"""
|
rob-blackbourn/bareASGI
|
bareasgi/http/http_router.py
|
http_router.py
|
py
| 1,583
|
python
|
en
|
code
| 26
|
github-code
|
6
|
29180093984
|
from bs4 import BeautifulSoup as soup
from urllib.request import urlopen as uReq
from datetime import datetime as dt
import re
import copy
import MySQLdb
dataBase = MySQLdb
userInput1 = str(input("Please Provide with Calendar link: "))
userInput2 = str(input("Please Provide a file name ending in .sql: "))
userInput3 = str(input("Please select student type: (undergrad or graduate): "))
url = userInput1
# get data from the link
client = uReq(url)
pageHtml = client.read()
client.close()
# create parsabel html
html = soup(pageHtml, "html.parser")
header = html.h2
headTables = header.find_next_siblings("table")
list_of_rows = []
# Loop through the sibling tables of h2 and find tr
for i in headTables:
rows = i.find_all("tr")
# loop through all the tr's and find td's
for j in rows:
list_of_cells = []
cols = j.find_all("td")
# loop through all the td's and get data
for data in cols:
event = data.text.replace("\r\n\t\t\t",' ')
# assignment based on regex
time = re.match(r"[ADFJMNOS]\w* [\d]{1,2}, [\d]{4}",event)
doubleTime = re.match(r"[ADFJMNOS]\w* [\d]{1,2} to [\d]{1,2}, [\d]{4}", event)
doubleDates = re.match(r"[ADFJMNOS]\w* [\d]{1,2} and [\d]{1,2}, [\d]{4}", event)
crossYear = re.match(r"[ADFJMNOS]\w* [\d]{1,2}, [\d]{4} to [ADFJMNOS]\w* [\d]{1,2}, [\d]{4}", event)
global updateEvent
if crossYear:
dates = re.split(r"\ to \ |\ |,\ ",event)
# get the start date numbers and join
startDates = " ".join([dates[0],dates[1],dates[2]])
# get the end date values and join
endDates = " ".join([dates[4],dates[5],dates[6]])
startTime = dt.strptime(startDates, "%B %d %Y")
endTime = dt.strptime(endDates, "%B %d %Y")
finalDates = ' to '.join([str(startTime), str(endTime)])
event = re.sub(r"[ADFJMNOS]\w* [\d]{1,2}, [\d]{4} to [ADFJMNOS]\w* [\d]{1,2}, [\d]{4}",str(finalDates), event)
# print(event)
# converts Month Day, Year
elif time:
timeVal = dt.strptime(time.group(), "%B %d, %Y")
# global variable for storage of date value
updateEvent = timeVal
event = re.sub(r"[ADFJMNOS]\w* [\d]{1,2}, [\d]{4}",str(timeVal), event)
# convets date in format: Month Day to Day, Year
elif doubleTime:
dates = re.split(r"\ to\ |\ |,\ ", event)
# get the start date numbers and join
startDates = " ".join([dates[0],dates[1],dates[3]])
# get the end date values and join
endDates = " ".join([dates[0],dates[2],dates[3]])
startTime = dt.strptime(startDates, "%B %d %Y")
endTime = dt.strptime(endDates, "%B %d %Y")
#global variable for empty date
updateEvent = str(startTime)
finalDates = ' to '.join([str(startTime), str(endTime)])
# print(finalDates)
event = re.sub(r"[ADFJMNOS]\w* [\d]{1,2} to [\d]{1,2}, [\d]{4}",str(finalDates), event)
# converts Month Day and Day, Year
elif doubleDates:
dates = re.split(r"\ and\ |\ |,\ ", event)
# get the start date numbers and join
startDates = " ".join([dates[0],dates[1],dates[3]])
# get the end date values and join
endDates = " ".join([dates[0],dates[2],dates[3]])
startTime = dt.strptime(startDates, "%B %d %Y")
#global variable for empty dates
endTime = dt.strptime(endDates, "%B %d %Y")
updateEvent = str(startTime)
finalDates = ' to '.join([str(startTime), str(endTime)])
event = re.sub(r"[ADFJMNOS]\w* [\d]{1,2} and [\d]{1,2}, [\d]{4}",str(finalDates), event)
else :
# Fill in all the date values that are empty with date value
# before it
event = re.sub(r"\xa0", str(updateEvent), event)
#append to list of cols
list_of_cells.append(event)
newCells = copy.deepcopy(list_of_cells[0])
toSplit = re.match(r"[\d]{4}-[\d]{1,2}-[\d]{1,2} [\d]{1,2}:[\d]{1,2}:[\d]{1,2} to [\d]{4}-[\d]{1,2}-[\d]{1,2} [\d]{1,2}:[\d]{1,2}:[\d]{1,2}", newCells)
global endTimes
if toSplit:
# print(toSplit.group())
newSplit = toSplit.group().split(' to')
endTimes = newSplit[1].replace("00:00:00","11:59:59")
else:
endTimes = newCells.replace("00:00:00","11:59:59")
list_of_cells.append(endTimes)
startDate = list_of_cells[0].split('to')
strParts = list_of_cells[1].split('. ')
global title, description
if len(strParts) > 1:
title = str(strParts[0])
description = str(strParts[1])
else:
title= str(strParts[0])
description = str(strParts[0])
newTitle =str(dataBase.escape_string(title))
newDesc =str(dataBase.escape_string(description))
query = "INSERT INTO tbl_entries ( event_name, event_description, event_categories, event_tags, event_startdate, event_enddate, open_to, location_building, location_room, location_campus, location_other, start_hour, start_minute, start_ampm, end_hour, end_minute, end_ampm, contact_event_firstname, contact_event_lastname, contact_event_phonenumber, contact_event_phoneext, contact_event_email, contact_firstname, contact_lastname, contact_phonenumber, contact_phoneext, contact_email, event_url, event_url_protocol, upload_image, date_submitted, date_approved, repeated, repeat_freq, repeat_day, repeat_until, repeat_until_date, repeat_until_num, clickable, pending, approved, archived, cancelled, frontpage, submission_ip) VALUES "
registrars = str(dataBase.escape_string("'Registar's'"))[1:]
global values
if userInput3 == 'undergrad':
values = "(" + str(newTitle)[1:] + " ," + str(newDesc)[1:]+ ", '73', '0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0', '"+ startDate[0] + "','" + endTimes + "', '29', 0, '', 2, 'Ontario Tech', 0, 0, 'am', 11, 59, 'pm', "+ registrars +", 'Office', '905.721.3190', '', 'connect@uoit.ca', " + registrars + ", 'Office', '905.721.3190', '', 'connect@uoit.ca', '" + url + "', 'https', NULL, '" + str(dt.now())+ "', '" + str(dt.now()) + "', 0, '', '', 0, '" + str(dt.now()) + "', 0, 1, 0, 1, 0, 0, 0, '00.000.0.000'),"
elif userInput3 == 'graduate':
values = "(" + str(newTitle)[1:] + " ," + str(newDesc)[1:]+ ", '74', '0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0', '"+ startDate[0] + "','" + endTimes + "', '29', 0, '', 2, 'Ontario Tech', 0, 0, 'am', 11, 59, 'pm','School of Graduate', 'and Postdoctoral Studies', '905.721.8668', '6209', 'connect@uoit.ca', 'School of Graduate', 'and Postdoctoral Studies', '905.721.8668', '6209','connect@uoit.ca', '" + url + "', 'https', NULL, '" + str(dt.now())+ "', '" + str(dt.now()) + "', 0, '', '', 0, '" + str(dt.now()) + "', 0, 1, 0, 1, 0, 0, 0, '00.000.0.000'),"
#append to rows
list_of_rows.append(values)
lastString = str(list_of_rows.pop(len(list_of_rows)-1))
lastString = lastString[:-1] +';'
list_of_rows.append(lastString)
outfile = open(userInput2, "w")
outfile.write(query)
for item in list_of_rows:
outfile.write("%s\n" % item)
outfile.close
|
ZbonaL/WebScraper
|
webscraper-Important-Dates.py
|
webscraper-Important-Dates.py
|
py
| 7,088
|
python
|
en
|
code
| 1
|
github-code
|
6
|
37407141564
|
from .core.Backtest import BackTesting
from .core.Feature import ApplyRule
import pandas as pd
import numpy as np
class ESG_rule(ApplyRule):
def __init__(self, min_buy_score, roe_score=None, roic_score=None):
self.min_buy_score = min_buy_score
self.roe_score = roe_score
self.roic_score = roic_score
# ROE more than 15
def buy_rule(self, df):
buy_esg = df['ESG Combined Score']>= self.min_buy_score
buy_roe = df['ROE']>=self.roe_score if self.roe_score else True
buy_roic = df['ROIC']>=self.roic_score if self.roic_score else True
buy = buy_esg & buy_roe & buy_roic
df['buy_signal'] = buy.map(lambda x: 1 if x else None)
return df
def sell_rule(self, df):
sell_esg = df['ESG Combined Score']< self.min_buy_score
sell_roe = df['ROE']<self.roe_score if self.roe_score else False
sell_roic = df['ROIC']<self.roic_score if self.roic_score else False
sell = sell_esg | sell_roe | sell_roic
df['sell_signal'] = sell.map(lambda x: -1 if x else None)
return df
@ApplyRule.get_action
def run(self,df):
df = self.buy_rule(df)
df = self.sell_rule(df)
df = self.get_signal(df)
return df
|
etq-quant/etqbankloan
|
Lib/etiqabacktest/ESGRule.py
|
ESGRule.py
|
py
| 1,279
|
python
|
en
|
code
| 0
|
github-code
|
6
|
20162423075
|
# Level 1
# Task 1. Reverse a negative integer and keep the negative sign at the beginning.
def reverse_negative_integer(n: int):
n = str(n)
n = "-" + n[:0:-1]
return int(n)
print(reverse_negative_integer(-234))
# Task 2. Write a function that takes two strings as input and returns True if they are anagrams of each other, and False otherwise.
# The strings can contain uppercase and lowercase letters, and should be ignored during the comparison.
def are_anagrams(s1: str, s2: str):
print(f'{s1} and {s2} are anagrams!')
return sorted(s1.lower()) == sorted(s2.lower())
print(are_anagrams('Rat', 'Tar'))
# Level 2
# Task 3. Given a sentence, reverse the order of characters in each word
def reverse_words(sentence: str):
sentence = sentence.split()
sentence_list = []
for w in sentence:
word = w[::-1]
sentence_list.append(word)
result = ' '.join(sentence_list) + '.'
return result
print(reverse_words('ehT taC sah caT'))
# Task 4. Given a string made of digits [0-9], return a string where each digit is repeated a number of times equals to its value
def repeat_digits(s: str):
result = ''
for char in s:
char *= int(char)
result += str(char) + ' '
return result
print(repeat_digits('0123456789'))
# Task 5. Create a function called shortcut to remove the lowercase vowels (a, e, i, o, u) in a given string.
# “y” is not considered a vowel for this task. The input string is always in lowercase.
def shortcut(s: str):
vowel = 'aeiou'
result = ''
for i in s:
if i not in vowel:
result += i
return result
print(shortcut('i love trap music!'))
|
OrbitWon45/git_hw
|
algorrithms_hw_2.py
|
algorrithms_hw_2.py
|
py
| 1,692
|
python
|
en
|
code
| 0
|
github-code
|
6
|
1956715633
|
from collections import deque
ulaz = open('ulaz.txt', 'r')
sve = ulaz.read()
ulaz.close()
igrači = sve.split('\n\n')
prvi = deque(igrači[0].split('\n')[1:])
drugi = deque(igrači[1].split('\n')[1:])
while len(prvi) != 0 and len(drugi) != 0:
a = int(prvi.popleft())
b = int(drugi.popleft())
if a > b:
prvi.extend([str(a), str(b)])
else:
drugi.extend([str(b), str(a)])
if len(prvi) > 0:
l = prvi
else:
l = drugi
l.reverse()
rezultat = 0
for i in range(len(l)):
rezultat += int(l[i]) * (i + 1)
print(rezultat)
|
bonkach/Advent-of-Code-2020
|
22a.py
|
22a.py
|
py
| 582
|
python
|
hr
|
code
| 1
|
github-code
|
6
|
40403637355
|
# Program swaps max and min elements of an array
mas = [5, 6, 7, 4, 3, 2, 1, 0, -2, 0, 9, 4, 7, -5, 3, 1]
def find_max(array):
maximum = array[0]
for i in range(1, len(array)):
if array[i] > maximum:
maximum = array[i]
return maximum
def find_min(array):
maximum = array[0]
for i in range(1, len(array)):
if array[i] < maximum:
maximum = array[i]
return maximum
max_va = mas.index(find_max(mas))
min_va = mas.index(find_min(mas))
mas[max_va], mas[min_va] = mas[min_va], mas[max_va]
print(mas)
|
danielsummer044/EDUCATION
|
swap_min_max.py
|
swap_min_max.py
|
py
| 516
|
python
|
en
|
code
| 0
|
github-code
|
6
|
70965223549
|
### THEORETICAL PROBABILITY ###
# For the following problems, use python to simulate the problem and calculate an experimental probability, then compare that to the theoretical probability.
from scipy import stats
import numpy as np
# Grades of State University graduates are normally distributed with a mean of 3.0 and a standard deviation of .3.
# Calculate the following:
#**Theoretical
# What grade point average is required to be in the top 5% of the graduating class?
p_top_5 = 5/100
mean_graduates = 3
sd_graduates = 0.3
grads = stats.norm(mean_graduates, sd_graduates)
grads.isf(p_top_5)
# An eccentric alumnus left scholarship money for students in the third decile from the bottom of their class. Determine the range of the third decile.
p_third_point = 3/10
p_fourth_point = 4/10
third_point = grads.ppf(p_third_point)
fourth_point = grads.ppf(p_fourth_point)
# Would a student with a 2.8 grade point average qualify for this scholarship?
n = 2.8
qualify = n > third_point and n < fourth_point
qualify
# If I have a GPA of 3.5, what percentile am I in?
in_percentile = grads.cdf(3.5)
in_percentile
#**Simulated
sim_no_of_students = 10_000
graduates_data = np.random.normal(mean_graduates, sd_graduates,sim_no_of_students)
top_five_percent = np.percentile(graduates_data, 95)
#30th percentile - 20th percentile
thirtieth_percentile = np.percentile(graduates_data, 30)
twentieth_percentile = np.percentile(graduates_data, 20)
student_gpa = 2.8
student_qualify = student_gpa > twentieth_percentile and student_gpa < thirtieth_percentile
# A marketing website has an average click-through rate of 2%.
# One day they observe 4326 visitors and 97 click-throughs. How likely is it that this many people or more click through?
ctr = 2/100
ctr_observed = 97/4326
website = stats.poisson(ctr).sf(ctr_observed)
website
# You are working on some statistics homework consisting of 100 questions where all of the answers are a probability rounded to the hundreths place. Looking to save time, you put down random probabilities as the answer to each question.
# What is the probability that at least one of your first 60 answers is correct?
questions = 100
p_success = 0.5
homework = stats.binom(questions, p_success).pmf(60) #pmf because being correct or incorrect is discrete?
homework
first_sixty = np.random.choice(["Correct", "Incorrect"],(sim_visitors,60))
((first_sixty == "Incorrect").sum(axis=1) == 60).mean()
# The codeup staff tends to get upset when the student break area is not cleaned up.
# Suppose that there's a 3% chance that any one student cleans the break area when they visit it, and, on any given day, about 90% of the 3 active cohorts of 22 students visit the break area.
# How likely is it that the break area gets cleaned up each day? How likely is it that it goes two days without getting cleaned up? All week?
p_cleaning = .03
n_students = .90 * (3*22)
#success = days the break area is cleaned
cleaning = stats.binom(n_students,p_cleaning).pmf(5) #all week days
cleaning
cleaning = stats.binom(n_students,p_cleaning).pmf(1) #once in a week
cleaning
cleaning = stats.binom(n_students,p_cleaning).pmf(0) #not cleaned at all
cleaning
# You want to get lunch at La Panaderia, but notice that the line is usually very long at lunchtime.
# After several weeks of careful observation, you notice that the average number of people in line when your lunch break starts is normally distributed
# with a mean of 15 and standard deviation of 3.
# If it takes 2 minutes for each person to order, and 10 minutes from ordering to getting your food
# what is the likelihood that you have at least 15 minutes left to eat your food before you have to go back to class?
# Assume you have one hour for lunch, and ignore travel time to and from La Panaderia.
p_of_service = .2
mean_customers = 15
sd_customers = 3
served = stats.norm(mean_customers,sd_customers)
order_time = 12
# Connect to the employees database and find the average salary of current employees, along with the standard deviation. Model the distribution of employees salaries with a normal distribution and answer the following questions:
def get_db_url(u,p,h,d):
url = f'mysql+pymysql://{u}:{p}@{h}/{d}'
return url
# Use your function to obtain a connection to the employees database.
from env import host, user, password
host = host
user = user
password = password
url = get_db_url(user,password,host,"employees")
url
salaries_df = pd.read_sql('SELECT * FROM salaries', url)
# What percent of employees earn less than 60,000?
salaries_df.head()
mean_salary = salaries_df.salary.mean()
sd_salary = salaries_df.salary.std()
salary_dist = stats.norm(mean_salary,sd_salary)
salary_dist.cdf(60_000)
# What percent of employees earn more than 95,000?
salary_dist.sf(95_000)
# What percent of employees earn between 65,000 and 80,000?
lower_bound = salary_dist.sf(65_000)
upper_bound = salary_dist.sf(80_000)
in_between = lower_bound - upper_bound
in_between
# What do the top 5% of employees make?
percentage = .95
salary_dist.isf(percentage)
### EXPERIMENTAL PROBABILITY ###
import pandas as pd
import matplotlib.pyplot as plt
np.random.seed(3)
# A marketing website has an average click-through rate of 2%.
# One day they observe 4326 visitors and 97 click-throughs.
# How likely is it that this many people or more click through?
sim_visitors = 100_000_000
ctr_observed
mean_website = 2/100
sim_ctr = np.random.normal(mean_website, 0.03, sim_visitors) >= ctr_observed
sim_ctr.mean()
# You are working on some statistics homework consisting of 100 questions where all of the answers are a probability rounded to the hundreths place.
# Looking to save time, you put down random probabilities as the answer to each question.
# What is the probability that at least one of your first 60 answers is correct?
#bunch of data, see if the first 60 answers out of 100 are correct.
# The codeup staff tends to get upset when the student break area is not cleaned up.
# Suppose that there's a 3% chance that any one student cleans the break area when they visit it,
# and, on any given day, about 90% of the 3 active cohorts of 22 students visit the break area.
# How likely is it that the break area gets cleaned up each day? How likely is it that it goes two days without getting cleaned up? All week?
cleaning_probability = ["Cleaned"] * 2 + ["Not Cleaned"] * 64
weekly_cleaning_streak = (((np.random.choice(cleaning_probability,(sim_visitors,7)) == "Cleaned").sum(axis=1)) == 7).mean()
weekly_cleaning_streak_2 = (((np.random.choice(cleaning_probability,(sim_visitors,2)) == "Not Cleaned").sum(axis=1)) == 2).mean()
weekly_cleaning_streak_7 = (((np.random.choice(cleaning_probability,(sim_visitors,7)) == "Not Cleaned").sum(axis=1)) == 7).mean()
# You want to get lunch at La Panaderia, but notice that the line is usually very long at lunchtime.
# After several weeks of careful observation, you notice that the average number of people in line when your lunch break starts is normally distributed with a mean of 15 and standard deviation of 3.
# If it takes 2 minutes for each person to order, and 10 minutes from ordering to getting your food, what is the likelihood that you have at least 15 minutes left to eat your food before you have to go back to class? Assume you have one hour for lunch, and ignore travel time to and from La Panaderia.
mean_customers
sd_customers
panaderia_traffic = np.random.normal(mean_customers,sd_customers,sim_visitors)
panaderia_traffic_with_time = panaderia_traffic * 12
(panaderia_traffic_with_time <= 45).mean()
# Connect to the employees database and find the average salary of current employees, along with the standard deviation.
# Model the distribution of employees salaries with a normal distribution and answer the following questions:
# What percent of employees earn less than 60,000?
# What percent of employees earn more than 95,000?
# What percent of employees earn between 65,000 and 80,000?
# What do the top 5% of employees make?
employees_simulation = np.random.normal(mean_salary,sd_salary,sim_visitors)
(employees_simulation < 60_000).mean()
(employees_simulation > 95_000).mean()
((employees_simulation >= 65_000) & (employees_simulation <= 80_000)).mean()
top_5_employee_salary = np.percentile(employees_simulation,95)
top_5_employee_salary
# A bank found that the average number of cars waiting during the noon hour at a drive-up window follows a Poisson distribution with a mean of 2 cars.
# Make a chart of this distribution and answer these questions concerning the probability of cars waiting at the drive-up window.
# What is the probability that no cars drive up in the noon hour?
# What is the probability that 3 or more cars come through the drive through?
# How likely is it that the drive through gets at least 1 car?
cars = np.random.poisson(2,sim_visitors)
cars
plt.hist(cars)
no_cars = (np.isin(cars,0).sum())/len(cars)
three_or_more_cars = (cars >= 3).mean()
at_least_one = (cars > 0).mean()
|
crisgiovanoni/statistics-exercises
|
probability_distributions.py
|
probability_distributions.py
|
py
| 9,018
|
python
|
en
|
code
| 0
|
github-code
|
6
|
36559869570
|
from turtle import Turtle, Screen
from layout import Layout
import random
is_race_on = False
screen = Screen()
screen.setup(width=500, height=400)
user_bet = screen.textinput(title="Make your bet", prompt="Which turtle will win the race? Enter a color: ")
# Create layout
layout = Layout()
layout.draw_end_flag()
# Create Turtles
colors = ["red", "orange", "yellow", "green", "blue", "purple"]
all_turtle = []
y_position = -100
for turtle_index in range(0, 6):
new_turtle = Turtle(shape="turtle")
new_turtle.penup()
new_turtle.goto(x=-230, y=y_position)
new_turtle.color(colors[turtle_index])
all_turtle.append(new_turtle)
y_position += 40
# Check if the user bet was successful
if user_bet:
is_race_on = True
# Start race
while is_race_on:
for turtle in all_turtle:
random_distance = random.randint(0, 8)
turtle.forward(random_distance)
# Check if turtle cross the end flag
if turtle.xcor() > 190:
is_race_on = False
winner_color = turtle.pencolor()
# Check if the user wins
if user_bet == winner_color:
layout.victory(winner_color)
else:
layout.defeat(winner_color)
screen.exitonclick()
|
portoduque/Turtle-Race
|
main.py
|
main.py
|
py
| 1,258
|
python
|
en
|
code
| 1
|
github-code
|
6
|
16799554480
|
import argparse
from pathlib import Path
import sys
# Add aoc_common to the python path
file = Path(__file__)
root = file.parent.parent
sys.path.append(root.as_posix())
import re
from functools import lru_cache
from math import inf
parser = argparse.ArgumentParser()
parser.add_argument('--sample', '-s', help='Run with sample data', action='store_true', default=False)
parsed_args = parser.parse_args()
if parsed_args.sample:
print("Using sample data!")
def dprint(*args, **kwargs):
if parsed_args.sample:
print(*args, **kwargs)
with open('input.txt' if not parsed_args.sample else 'sample.txt') as f:
input_data = list(map(lambda x: x.replace('\n', ''), f.readlines()))
dprint(input_data)
valve_flow_rates = {}
valve_tunnels = {}
input_re = re.compile(r"Valve ([A-Z]{2}) has flow rate=(\d+); tunnels? leads? to valves? (.*)")
for line in input_data:
matcher = input_re.match(line)
valve, rate, tunnels = matcher.groups()
tunnels = tunnels.split(', ')
valve_flow_rates[valve] = int(rate)
valve_tunnels[valve] = tunnels
dprint(valve_flow_rates)
dprint(valve_tunnels)
potential_valves = sorted([x[0] for x in valve_flow_rates.items() if x[1] != 0])
@lru_cache(maxsize=None)
def get_max_flow_rate(current_position, opened_valves, time_left):
if time_left <= 0:
return 0
# If the valve can open, we want to open the valve and consider the case where we can't open the valve
# If the valve can't open, we just want to cosnider the adjacent spaces
if valve_flow_rates[current_position] == 0 or current_position in opened_valves:
best = 0
for adjacent in valve_tunnels[current_position]:
best = max(best, get_max_flow_rate(adjacent, opened_valves, time_left - 1))
return best
else:
gained_flow = (time_left - 1) * valve_flow_rates[current_position]
best = 0
opened = tuple(sorted(opened_valves + (current_position,)))
for adjacent in valve_tunnels[current_position]:
best = max(best, gained_flow + get_max_flow_rate(adjacent, opened, time_left - 2))
best = max(best, get_max_flow_rate(adjacent, opened_valves, time_left - 1))
return best
valve_distances = {}
def djikstra(valve):
possible = {valve: 0}
explored = set()
while len(explored) < len(valve_tunnels):
current = min(((k, v) for k, v in possible.items() if k not in explored), key=lambda x: x[1])[0]
for other in valve_tunnels[current]:
new_dist = possible[current] + 1
if possible.get(other, inf) > new_dist:
explored.discard(other)
possible[other] = new_dist
explored.add(current)
return possible
valve_distances = {k: djikstra(k) for k in valve_tunnels.keys() }
max_flow_seen = 0
@lru_cache(maxsize=None)
def run_part_2(cur, other, closed_valves):
cur_time_left, cur_pos = cur
other_time_left, other_pos = other
totals = [0]
for valve in closed_valves:
time_to_valve = valve_distances[cur_pos].get(valve) + 1
time_left = cur_time_left - time_to_valve
if time_left <= 0:
continue # Can't get to the valve and open it
flow_gained = time_left * valve_flow_rates[valve]
# Move the person that has the most time left
if time_left > other_time_left:
totals.append(flow_gained + run_part_2((time_left, valve), other, closed_valves - {valve}))
else:
totals.append(flow_gained + run_part_2(other, (time_left, valve), closed_valves - {valve}))
max_flow = max(totals)
global max_flow_seen
if max_flow > max_flow_seen:
print("New max:", max_flow)
max_flow_seen = max_flow
return max_flow
def part_1():
return get_max_flow_rate('AA', (), 30)
def part_2():
return run_part_2((26, 'AA'), (26, 'AA'), frozenset(potential_valves))
print(f"Part 1: {part_1()}")
print(f"Part 2: {part_2()}")
|
mrkirby153/AdventOfCode2022
|
day16/day16.py
|
day16.py
|
py
| 3,973
|
python
|
en
|
code
| 0
|
github-code
|
6
|
18810536610
|
from sympy import *
#Exercice de dérivation:
x = Symbol('x')
L=[]
def derivee(y):
yprime = y.diff(x)
print("la dérivée est :", yprime)
for i in range(1):
a=(input("Entrez la fonction à derivé :"))
L.append(a)
#print(L)
for a in L:
print(derivee(a))
|
Mehdi921NSI/derivation
|
python.py
|
python.py
|
py
| 299
|
python
|
fr
|
code
| 0
|
github-code
|
6
|
26024379550
|
# 积分守恒型 Lax - Friedrichs 格式
# 导入所需要的模块
import numpy as np
from fealpy.decorator import cartesian
from scipy.sparse import diags
from scipy.sparse import csr_matrix
from typing import Union,Tuple,List # Union: 将多个集合合并为一个集合
from scipy.sparse.linalg import spsolve
import matplotlib.pyplot as plt
from fealpy.mesh import UniformMesh1d
# 创建类
class Hyperbolic1dPDEData:
def __init__(self,D: Union[Tuple[int,int],List[int]] = (0,2),T: Union[Tuple[int, int], List[int]] = (0, 4)):
self._domain = D
self._duration = T
def domain(self) -> Union[Tuple[float,float],List[float]]:
return self._domain
def duration(self) -> Union[Tuple[float,float],List[float]]:
return self._duration
@cartesian
def solution(self,p: np.ndarray,t: np.float64) -> np.ndarray:
val = np.zeros_like(p)
flag1 = p <= t
flag2 = p > t+1
flag3 = ~flag1 & ~flag2
val[flag1] = 1
val[flag3] = 1 - p[flag3] + t
val[flag2] = p[flag2] - t -1
return val
@cartesian
def init_solution(self,p: np.ndarray) -> np.ndarray:
val = np.zeros_like(p)
val = np.abs(p-1)
return val
@cartesian
def source(self,p: np.ndarray, t: np.float64) -> np.ndarray:
return 0.0
@cartesian
def dirichlet(self,p: np.ndarray,t: np.float64) -> np.ndarray:
return np.ones(p.shape)
def a(self) -> np.float64:
return 1
# 创建对象
pde =Hyperbolic1dPDEData()
# 空间离散
domain = pde.domain()
nx = 40
hx = (domain[1] - domain[0]) / nx
mesh = UniformMesh1d([0,nx], h = hx, origin = domain[0])
# 时间离散
duration = pde.duration()
nt = 3200
tau = (duration[1] - duration[0]) / nt
# 准备初值
uh0 = mesh.interpolate(pde.init_solution, intertype = 'node')
# 组装矩阵
def hyperbolic_operator_explicity_lax_friedrichs(mesh, tau, a):
"""
@brief 积分守恒型 lax_friedrichs 格式
"""
r = a*tau/mesh.h
if r > 1.0:
raise ValueError(f"The r: {r} should be smaller than 0.5")
NN = mesh.number_of_nodes()
k = np.arange(NN)
A = diags([0], [0], shape=(NN, NN), format='csr')
val0 = np.broadcast_to(1/2 -1/2*r, (NN-1, ))
val1 = np.broadcast_to(1/2 +1/2*r, (NN-1, ))
I = k[1:]
J = k[0:-1]
A += csr_matrix((val0, (J, I)), shape=(NN, NN), dtype=mesh.ftype)
A += csr_matrix((val1, (I, J)), shape=(NN, NN), dtype=mesh.ftype)
return A
# 时间步进
def hyperbolic_lax(n, *fargs):
t = duration[0] + n*tau
if n == 0:
return uh0, t
else:
A = hyperbolic_operator_explicity_lax_friedrichs(mesh,pde.a(), tau)
uh0[:] = A@uh0
gD = lambda p: pde.dirichlet(p, t)
mesh.update_dirichlet_bc(gD, uh0, threshold=0)
solution = lambda p: pde.solution(p, t)
e = mesh.error(solution, uh0, errortype='max')
print(f"the max error is {e}")
return uh0, t
# 制作动画
box = [0, 2, 0, 2]
fig, axes = plt.subplots()
mesh.show_animation(fig, axes, box, hyperbolic_lax, frames=nt+1)
plt.show()
|
suanhaitech/pythonstudy2023
|
python-jovan/Numerical solution of differential equation/hyperbolic/test_exp2.py
|
test_exp2.py
|
py
| 3,132
|
python
|
en
|
code
| 2
|
github-code
|
6
|
28670801665
|
from os import close
from numpy.lib.npyio import NpzFile
import pandas as pd
import os
import tqdm
import re
import numpy as np
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
from scipy import stats
Folder_Path = r'C:\Users\hp\Desktop\wdpashp\wdpadata1\wdpa_ntl7_count' #要拼接的文件夹及其完整路径,注意不要包含中文
SaveFile_Path_base = r'C:\Users\hp\Desktop\wdpashp\wdpadata3\Picuture\case\BufferCount'
SaveFile_Path = r'C:\Users\hp\Desktop\wdpashp\wdpadata3\Picuture\case\RingBufferCount' #拼接后要保存的文件路径
SaveFile_Path_sum = r'C:\Users\hp\Desktop\wdpashp\wdpadata3\Picuture\case\RingBufferSum'
SaveFile_Path_mean = r'C:\Users\hp\Desktop\wdpashp\wdpadata3\Picuture\case\RingBufferMean'
SaveFile_Path_mean_base = r'C:\Users\hp\Desktop\wdpashp\wdpadata3\Picuture\case\RingBufferMean_base'
SaveFile_Path_sum_base = r'C:\Users\hp\Desktop\wdpashp\wdpadata3\Picuture\case\RingBufferSum_base'
# Base_path = r'C:\Users\hp\Desktop\wdpashp\wdpadata3\Picuture\case\case.csv'
Base_path = r'C:\Users\hp\Desktop\wdpashp\wdpadata3\Picuture\case\case2.csv'
# Base_path = r'C:\Users\hp\Desktop\wdpashp\wdpadata3\Picuture\case\RingBufferSum\bufferIntensityMode.csv'
Base_path_First = r'C:\Users\hp\Desktop\wdpashp\wdpadata3\Picuture\case\RingBufferMean\FirstRingBase.csv'
# 从空间抽取数据
def SpaticalSelect(pattern,filename):
basedf = pd.read_csv(Base_path)
# viirs2018
# dmsp2010
for i in range(0,51,1):
df = pd.read_csv(os.path.join(Folder_Path,'wdpa_ntl7_count'+str(i)+'.csv'))
if pattern in df.columns:
df = df[['objectid',pattern]]
else:
df[pattern]=np.nan
df = df[['objectid',pattern]]
basedf = basedf.merge(df, on='objectid',how='left',suffixes = ('','_b'+str(i)))
# .fillna("")
basedf.to_csv(os.path.join(SaveFile_Path,filename), index=False)
def getRingSumValue(filename,year):
df= pd.read_csv(os.path.join(SaveFile_Path_sum_base,filename))
ntlValue = np.array(df.iloc[:,1:])
firstArr = None
ids = np.array(df.iloc[:,[0]]).ravel()
for i in range(len(ids)):
value = ntlValue[i,2:]
id = ids[i]
indexmin = int(ntlValue[i,0])-1
indexmax = int(ntlValue[i,1])
value[np.where(np.isinf(value))] = np.nan
if indexmax == indexmin:
meanV = value[indexmin-1]
else:
goubi = value[indexmin:indexmax]
meanV = np.nansum(value[indexmin:indexmax])
# if id == 16515:
# gouzi
firstLine = np.array([id,meanV])
if firstArr is None:
firstArr = firstLine
else:
firstArr = np.vstack((firstArr,firstLine))
pdValue = pd.DataFrame(firstArr,columns=['objectid',year])
return pdValue
def getIntensityValue(filename,year):
df= pd.read_csv(os.path.join(SaveFile_Path,filename))
ntlValue = np.array(df.iloc[:,1:])
firstArr = None
ids = np.array(df.iloc[:,[0]]).ravel()
for i in range(len(ids)):
value = ntlValue[i,2:]
id = ids[i]
mode = int(ntlValue[i,0])
value_before = ntlValue[i,1:-1]
ringV = value - value_before
indexArr = np.arange(1,51,1)
intensity = np.divide(ringV,indexArr)
modeV = intensity[mode]
firstLine = np.array([id,modeV])
if firstArr is None:
firstArr = firstLine
else:
firstArr = np.vstack((firstArr,firstLine))
pdValue = pd.DataFrame(firstArr,columns=['objectid',year])
return pdValue
def getRingSumValue2csv(filename,year):
df= pd.read_csv(os.path.join(SaveFile_Path_base,filename))
columnsArr = np.delete(df.columns,1)
ntlValue = np.array(df.iloc[:,1:])
firstArr = None
ids = np.array(df.iloc[:,[0]]).ravel()
for i in range(len(ids)):
value = ntlValue[i,1:]
id = ids[i]
value_before = ntlValue[i,:-1]
ringV = value - value_before
firstLine = np.hstack((id,ringV))
if firstArr is None:
firstArr = firstLine
else:
firstArr = np.vstack((firstArr,firstLine))
pdValue = pd.DataFrame(firstArr,columns=columnsArr)
return pdValue
def getRingMeanValue2csv(filename,year):
df= pd.read_csv(os.path.join(SaveFile_Path,filename))
columnsArr = df.columns
ntlValue = np.array(df.iloc[:,1:])
df_sum= pd.read_csv(os.path.join(SaveFile_Path_sum,filename))
firstArr = None
ids = np.array(df.iloc[:,[0]]).ravel()
for i in range(len(ids)):
value = ntlValue[i,:]
id = ids[i]
value_sum = np.array(df_sum.loc[df_sum['objectid'] == id]).ravel()[1:]
meanV = value_sum/value
firstLine = np.hstack((id,meanV))
if firstArr is None:
firstArr = firstLine
else:
firstArr = np.vstack((firstArr,firstLine))
pdValue = pd.DataFrame(firstArr,columns=columnsArr)
return pdValue
def LinearModel(x,y):
model = LinearRegression()
model.fit(x, y)
y_pred = model.predict(x)
a = model.coef_
b = model.intercept_
r2 = r2_score(y,y_pred)
return y_pred,a,b,r2
def writefile(filename,lines):
f = open(filename,'a')
for line in lines:
f.writelines(line + '\n')
if __name__ == "__main__":
# # part I and partII(替换BaseDF)
# for year in tqdm.tqdm(range(1992,2014,1)):
# pattern = 'dmsp' + str(year)
# filename = 'case_' + str(year) + '.csv'
# SpaticalSelect(pattern,filename)
# for year in tqdm.tqdm(range(2014,2019,1)):
# pattern = 'viirs' + str(year)
# filename = 'case_' + str(year) + '.csv'
# SpaticalSelect(pattern,filename)
# # part II 获取ring buffer
# for year in tqdm.tqdm(range(1992,2019,1)):
# filename = 'case_' + str(year) + '.csv'
# pdValue = getRingSumValue2csv(filename,str(year))
# pdValue.to_csv(os.path.join(SaveFile_Path,'case' + str(year) + '.csv'), index=False)
# # part II 获取ring buffer Mean
# for year in tqdm.tqdm(range(1992,2019,1)):
# filename = 'case' + str(year) + '.csv'
# pdValue = getRingMeanValue2csv(filename,str(year))
# pdValue.to_csv(os.path.join(SaveFile_Path_mean,'case' + str(year) + '.csv'), index=False)
# # part 合并BaseDF
# for year in tqdm.tqdm(range(1992,2019,1)):
# filename = 'case' + str(year) + '.csv'
# basedf = pd.read_csv(Base_path_First)
# df= pd.read_csv(os.path.join(SaveFile_Path_sum,filename))
# basedf = basedf.merge(df, on='objectid',how='left')
# basedf.to_csv(os.path.join(SaveFile_Path_sum_base,filename), index=False)
# # # part II 获取1992-2018年灯光最强的index
# ValueList = []
# for year in tqdm.tqdm(range(1992,2019,1)):
# filename = 'case' + str(year) + '.csv'
# pdValue = getRingSumValue(filename,str(year))
# ValueList.append(pdValue)
# basedf = None
# for value in ValueList:
# if basedf is None:
# basedf = value
# else:
# basedf = basedf.merge(value, on='objectid',how='left')
# basedf.to_csv(os.path.join(SaveFile_Path_sum_base,'bufferSum.csv'), index=False)
# part IV 计算最大buffer的众数的斜率
df= pd.read_csv(os.path.join(SaveFile_Path_sum_base,'bufferSum.csv'))
ntlValue = np.array(df.iloc[:,1:])
ids = np.array(df.iloc[:,[0]]).ravel()
firstArr = None
lines = []
for i in tqdm.tqdm(range(len(ids))):
x = np.arange(1, 28, 1)
x_lable = [str(i) for i in np.arange(1992, 2019, 1)]
y = ntlValue[i,:]
y1,a1,b1,r2_1 = LinearModel(x.reshape(-1, 1),y.reshape(-1, 1))
tau, p_value = stats.kendalltau(x, y)
res = stats.theilslopes(y, x, 0.90)
# print(res[0],res[1],res[2],res[3])
r2_res = r2_score(y,res[1] + res[0] * x)
if int(ids[i]) == 40780:
plt.rcParams['axes.facecolor'] = '#424242'
plt.rc('font',family='Times New Roman')
plt.figure(figsize=(15,8))
# plt.plot(x.reshape(-1, 1), y1, 'g-')
plt.xlabel('Year',size = 35)
plt.ylabel('NTL DN Value', size = 35)
plt.xticks(x, x_lable, fontproperties = 'Times New Roman', size = 30)
plt.yticks(fontproperties = 'Times New Roman', size = 30)
ax= plt.gca()
for tick in ax.get_xticklabels():
tick.set_rotation(45)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
import matplotlib.ticker as ticker
ax.xaxis.set_major_locator(ticker.MultipleLocator(2))
plt.scatter(x, y, s=200 ,c='#B36E5F')
plt.plot(x, res[1] + res[0] * x, '-',color='#E3B96D',linewidth=3)
# plt.plot(x, res[1] + res[2] * x, 'r--')
# plt.plot(x, res[1] + res[3] * x, 'r--')
plt.title("max_index:"+str(ids[i]),size = 15)
line = str(ids[i]) + ',' + str(a1) + ',' + str(b1) + ',' + str(r2_1) + ',' +str(tau) + ',' + str(p_value) + ',' + str(res[0]) + ',' + str(res[1]) + ',' + str(res[2]) + ',' + str(res[3]) + ',' + str(r2_res)
print(line)
plt.tight_layout()
plt.show()
# plt.savefig(os.path.join(SaveFile_Path_sum_base ,'Picture1',str(int(ids[i])) + '.png'))
plt.close()
line = str(ids[i]) + ',' + str(a1) + ',' + str(b1) + ',' + str(r2_1) + ',' +str(tau) + ',' + str(p_value) + ',' + str(res[0]) + ',' + str(res[1]) + ',' + str(res[2]) + ',' + str(res[3]) + ',' + str(r2_res)
lines.append(line.replace('[','').replace(']',''))
# writefile(os.path.join(SaveFile_Path_sum_base, 'bufferSumslopsen.csv'),lines)
|
HaoweiGis/EarthLearning
|
tools/LightPollution/LightPollution2.py
|
LightPollution2.py
|
py
| 9,858
|
python
|
en
|
code
| 3
|
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.