blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a9ed423e9c5764389b6b01030d56212aff28f32f | Python | shangeth/Facial-Emotion-Recognition-PyTorch-ONNX | /PyTorch/FER_image.py | UTF-8 | 1,931 | 2.59375 | 3 | [] | no_license | import cv2
import torch
import torchvision.transforms as transforms
from PIL import Image
import matplotlib.pyplot as plt
import argparse
import os
from model import *
def load_trained_model(model_path):
model = Face_Emotion_CNN()
model.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage), strict=False)
return model
def FER_image(img_path):
model = load_trained_model('./models/FER_trained_model.pt')
emotion_dict = {0: 'neutral', 1: 'happiness', 2: 'surprise', 3: 'sadness',
4: 'anger', 5: 'disguest', 6: 'fear'}
val_transform = transforms.Compose([
transforms.ToTensor()])
img = cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
face_cascade = cv2.CascadeClassifier('./models/haarcascade_frontalface_default.xml')
faces = face_cascade.detectMultiScale(img)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x,y), (x+w, y+h), (255,0,0), 2)
resize_frame = cv2.resize(gray[y:y + h, x:x + w], (48, 48))
X = resize_frame/256
X = Image.fromarray((resize_frame))
X = val_transform(X).unsqueeze(0)
with torch.no_grad():
model.eval()
log_ps = model.cpu()(X)
ps = torch.exp(log_ps)
top_p, top_class = ps.topk(1, dim=1)
pred = emotion_dict[int(top_class.numpy())]
cv2.putText(img, pred, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 1)
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.grid(False)
plt.axis('off')
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--path", required=True,
help="path of image")
args = vars(ap.parse_args())
if not os.path.isfile(args['path']):
print('The image path does not exists!!')
else:
print(args['path'])
FER_image(args['path']) | true |
7bb5b2eb3ec8b300f3adc69c5e2756e7d9955886 | Python | jrefusta/LP-FIB-QuizBot | /bot/bot.py | UTF-8 | 8,110 | 2.71875 | 3 | [] | no_license | import pandas as pd
import numpy as np
import telegram
from telegram import ParseMode
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import networkx as nx
import matplotlib.pyplot as plt
import os
import pickle
def start(bot, update):
bot.send_message(chat_id=update.message.chat_id, text="Hola! Soc el QuizBot.")
def help(bot, update):
bot.send_message(chat_id=update.message.chat_id, text="QuizBot contesta textualment i gràficament a preguntes relacionades a les enquestes descrites en el compilador.\nComanes:\n/start inicia la conversa amb el Bot.\n/help contesta amb una llista de comanes amb una breu descripció.\n/author nom complet de l'autor del projecte i mail de la FIB.\n/quiz <idEnquesta> inicia un intèrpret de l'enquesta descrita previament en el compilador.\n/bar <idEnquesta> mostra una gràfica de barres mostrant un diagrama de barres a la pregunta donada.\n/pie <idEnquesta> mostra una gràfica de formatget amb el percentatge a la pregunta donada.\n/report el bot ha de donar quelcom tipus taula amb el nombre de respostes obtingudes per cada valor de cada pregunta.")
def author(bot, update):
bot.send_message(chat_id=update.message.chat_id, text="Joan Manuel Ramos Refusta joan.manuel.ramos@est.fib.upc.edu")
def afegirVot(preg, val):
try:
dict = pickle.load(open("dict.pickle", "rb"))
p = tuple([preg, val])
if p in dict:
dict[p] = dict[p] + 1
else:
dict[p] = 1
pickle.dump(dict, open("dict.pickle", "wb"))
except (OSError, IOError) as e:
dict = {}
p = tuple([preg, val])
dict[p] = 1
pickle.dump(dict, open("dict.pickle", "wb"))
def respostes(r):
global possiblesRes
possiblesRes = []
res = ""
for i in range(len(r)):
if (r[i] == ':'):
possiblesRes.append(r[i-2])
if (r[i] == ' '):
if (not r[i-1].isdigit() and r[i-1] != ';'):
res = res + ' '
elif (r[i] == ';'):
res = res + '\n'
else:
res = res + r[i]
return res
def quiz(bot, update, args):
try:
global dicN
global listE
global preg
global identificador
global destiN
global listAltern
global origenN
listAltern = []
destiN = ""
G = nx.read_gpickle("../cl/test.gpickle")
dicN = dict(G.nodes(data=True))
identificador = str(args[0])
inici = "Enquesta " + (dicN[identificador]['id']) + ":"
bot.send_message(chat_id=update.message.chat_id, text=inici)
listE = list(G.edges(data=True))
origenN = (dicN[identificador]['id'])
for i in range(len(listE)):
if (listE[i][0] == origenN):
destiN = (listE[i][1])
preg = identificador
if (dicN[destiN]['tipus'] == 'pregunta'):
preg = preg + dicN[destiN]['p'] + "\n"
origenN = destiN
for i in range(len(listE)):
if (listE[i][0] == origenN):
if (listE[i][2]['tipus'] == 'item'):
resposta = (listE[i][1])
if (listE[i][2]['tipus'] == 'normal'):
destiN = listE[i][1]
if (listE[i][2]['tipus'] == 'alternativa'):
listAltern.append(tuple([listE[i][2]['number'], listE[i][1]]))
if (dicN[resposta]['tipus'] == 'resposta'):
r = dicN[resposta]['r']
r = respostes(r)
preg = preg + r
bot.send_message(chat_id=update.message.chat_id, text=preg)
except Exception as e:
print(e)
bot.send_message(chat_id=update.message.chat_id, text="Error")
def interaccio(bot, update):
try:
global destiN
global listAltern
global ptControl
global origenN
global identificador
global possiblesRes
answer = update.message.text
if (answer in possiblesRes and answer[0] != '/'):
afegirVot(origenN, answer)
for i in range(len(listAltern)):
if (listAltern[i][0] == answer):
ptControl = destiN
destiN = listAltern[i][1]
listAltern = []
destiPrev = destiN
if (dicN[destiN]['tipus'] != 'end'):
preg = identificador
if (dicN[destiN]['tipus'] == 'pregunta'):
preg = preg + dicN[destiN]['p'] + "\n"
origenN = destiN
for i in range(len(listE)):
if (listE[i][0] == origenN):
if (listE[i][2]['tipus'] == 'item'):
resposta = (listE[i][1])
if (listE[i][2]['tipus'] == 'normal'):
destiN = listE[i][1]
if (listE[i][2]['tipus'] == 'alternativa'):
listAltern.append(tuple([listE[i][2]['number'], listE[i][1]]))
if (destiPrev == destiN):
destiN = ptControl
if (dicN[resposta]['tipus'] == 'resposta'):
r = dicN[resposta]['r']
r = respostes(r)
preg = preg + r
louise = update.message.text
bot.send_message(chat_id=update.message.chat_id, text=preg)
else:
bot.send_message(chat_id=update.message.chat_id, text=identificador+"> Gràcies pel teu temps!")
destiN = ""
except Exception as e:
print(e)
bot.send_message(chat_id=update.message.chat_id, text='Error')
def report(bot, update):
try:
dict = pickle.load(open("dict.pickle", "rb"))
text = "*pregunta valor respostes* \n"
for i in dict:
text = text + str(i[0]) + ' ' + str(i[1]) + ' ' + str(dict[i]) + '\n'
bot.send_message(chat_id=update.message.chat_id, text=text, parse_mode=ParseMode.MARKDOWN)
except Exception as e:
print(e)
bot.send_message(chat_id=update.message.chat_id, text="Error")
def pie(bot, update, args):
try:
dict = pickle.load(open("dict.pickle", "rb"))
preg = str(args[0])
labels = []
sizes = []
for i in dict:
if (str(i[0]) == preg):
labels.append(str(i[1]))
sizes.append(int(dict[i]))
explode = []
for i in range(len(sizes)):
explode.append(0.1)
fig1, ax1 = plt.subplots()
plt.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True)
plt.axis('equal')
plt.savefig('pie.png')
bot.send_photo(chat_id=update.message.chat_id, photo=open('pie.png', 'rb'))
plt.clf()
os.remove('pie.png')
except Exception as e:
print(e)
bot.send_message(chat_id=update.message.chat_id, text="Error")
def bar(bot, update, args):
try:
dict = pickle.load(open("dict.pickle", "rb"))
preg = str(args[0])
labelsb = []
sizesb = []
for i in dict:
if (str(i[0]) == preg):
labelsb.append(str(i[1]))
sizesb.append(int(dict[i]))
plt.bar(labelsb, sizesb)
plt.savefig('bar.png')
bot.send_photo(chat_id=update.message.chat_id, photo=open('bar.png', 'rb'))
plt.clf()
os.remove('bar.png')
except Exception as e:
print(e)
bot.send_message(chat_id=update.message.chat_id, text="Error")
TOKEN = open('token.txt').read().strip()
updater = Updater(token=TOKEN)
dispatcher = updater.dispatcher
dispatcher.add_handler(CommandHandler('start', start))
dispatcher.add_handler(CommandHandler('help', help))
dispatcher.add_handler(CommandHandler('author', author))
dispatcher.add_handler(CommandHandler('quiz', quiz, pass_args=True))
dispatcher.add_handler(MessageHandler(Filters.text, interaccio))
dispatcher.add_handler(CommandHandler('report', report))
dispatcher.add_handler(CommandHandler('pie', pie, pass_args=True))
dispatcher.add_handler(CommandHandler('bar', bar, pass_args=True))
updater.start_polling()
| true |
c5982356cac32ea27d239f0438258f57c4b40f2d | Python | omizu-12/AtCorder | /ABC158/B.py | UTF-8 | 122 | 2.96875 | 3 | [] | no_license | N,A,B = list(map(int,input().split()))
C = A+B
D = int(N/C)
E = A*D
F = N%C
if F>A:
print(E+A)
else:
print(E+F)
| true |
53a2649e6ca6f660f4cf84e9296c72595146be0f | Python | ucefizi/KattisPython | /conundrum.py | UTF-8 | 210 | 3.265625 | 3 | [] | no_license | # Problem statement: https://open.kattis.com/problems/conundrum
s = input()
x = 0
for i, v in enumerate(s):
if (v != 'P' and i%3 == 0) or (v != 'E' and i%3 == 1) or (v != 'R' and i%3 == 2):
x += 1
print(x)
| true |
9028b9c75b99dc00b8a8f509aa88519061f8d0f6 | Python | sarguhl/Exyls-Bot-LOL | /lib/cogs/warn.py | UTF-8 | 2,195 | 2.75 | 3 | [] | no_license | import discord
from discord.ext import commands
from discord.ext.commands import has_permissions, MissingPermissions
import json
with open('./data/db/reports.json', "r", encoding='utf-8') as f:
try:
report = json.load(f)
except ValueError:
report = {}
report['users'] = []
class Warn(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context = True)
async def warn(self, ctx,user:discord.Member,*reason:str):
if not reason:
await ctx.send(":no_entry: Please provide a reason!")
return
reason = ' '.join(reason)
for current_user in report['users']:
if current_user['id'] == user.id:
current_user['reasons'].append(reason)
if len(current_user['reasons']) >= 3:
await user.send("You got kicked for the 3rd warning. The next warning will be an instant ban.")
await user.kick(reason=f"{reason} | 3rd warning")
elif len(current_user['reasons']) >= 4:
await user.ban(reason=f"{reason} | 4th warning")
break
else:
report['users'].append({
'id':user.id,
'name': user.name,
'reasons': [reason,]
})
with open('./data/db/reports.json','w') as f:
json.dump(report,f, indent=4)
embed = discord.Embed(
name="Warned",
description=f"{user.mention} warned by the moderator {ctx.author.mention}. At: {ctx.message.created_at}",
color=ctx.author.color
)
await ctx.send(embed=embed)
@commands.command(pass_context = True)
async def warnings(self, ctx,user:discord.Member):
for current_user in report['users']:
if user.id == current_user['id']:
await ctx.send(f"{user.name} has been reported `{len(current_user['reasons'])}` times. Reasons: `{','.join(current_user['reasons'])}`")
break
else:
await ctx.send(f":no_entry: {user.name} has never been reported!")
def setup(bot):
bot.add_cog(Warn(bot)) | true |
ee13bce52a2340fa65efdab7fd811c4df37b99b7 | Python | ZarmeenLakhani/Python-Documentation | /Lists.py | UTF-8 | 1,365 | 4.3125 | 4 | [] | no_license | x=1
print(str(x))
print(float(x))
bool(0) #only 0 is considered as false rest of the integers are considered true.even the negative ones
#sometimes a specific data type is required
fruit = 'banana'
len(fruit)
digits=[11,0,1,2,3,9,5,4,6,18]
digits.sort()
print(digits[-len(digits)])
print(digits[-1]) # last element
print(digits[0]) #first element
print(digits[-2]) # last element)
#Slicing
print(digits[:3])
print(digits[0:3])
#print(digit[:3]) all element startin from 0 till 2]]
#integer before 0 means
print(digits[3])
#pick 3rd item starting from 0
print(digits[0::7:2])
#okay so I will get all the elements of the list ( they're 7) which will be shown if they are multiple of 2
print(digits[::-1])
#basically reverses
#Note: make sure if the stride(multiple) is neg, than slicing is going toward left
#if the stride is positive, than slicing is going towards right
# basically you can't go
print(digits[0:5:-2]) #would return empty
print(digits[5:0:-2]) #instead you go from right to left.
for i in range(len(digits)):
print(digits[0:i])
#Splitting and Joining
hobbies="reading, chess, coding, sudoku"
z= hobbies.split(", ")
print(z)
#turns string into a list
y= hobbies.split("chess")
print(y)
#['reading, ', ', coding, sudoku'] okay so before and after chess turned into elements of list.
joined=" and ".join(z)
print (joined)
csv=','.join(z)
print(csv)
| true |
73710ac828b09153c6ae26ee96fa31c9c59faabe | Python | gilsontm/linguagens-formais | /source/utils/messages.py | UTF-8 | 514 | 2.859375 | 3 | [] | no_license | INVALID_AUTOMATA = "Autômato inválido."
INVALID_GRAMMAR = "Gramática inválida."
INVALID_REGEX = "Expressão regular inválida."
INVALID_FIRST_OPERAND = "Operando 1 inválido."
INVALID_SECOND_OPERAND = "Operando 2 inválido."
GRAMMAR_NOT_REGULAR = "A gramática não é regular."
GRAMMAR_NOT_CONTEXT_FREE = "A gramática não é livre de contexto."
GRAMMAR_CONFLICT = "Houve conflito. A gramática não é LL(1)."
GRAMMAR_UNSUPORTED = "A gramática não é suportada (provavelmente por ser muito extensa)."
| true |
0735b9cf80a69eb310b65ad0e07fae2b21bd67c9 | Python | dongqing7/herb_pairs_netowrk_v4 | /herb_distance_generation.py | UTF-8 | 4,809 | 2.65625 | 3 | [] | no_license |
from proximity_key import *
from collections import defaultdict
class Herb_Distance:
def __init__(self, G_obj, Ingredients_obj, Herb_obj):
self.G_ogj = G_obj
self.G = self.G_ogj.g
self.Ingredients = Ingredients_obj
self.Ingredients.ingredients_target_dict(list(self.G_ogj.G.nodes))
self.Herb = Herb_obj
self.Herb.herb_ingre_dict(self.Ingredients.ingre_tar_dict)
self.Herb.herb_ingretargets_dic(self.Ingredients.ingre_tar_dict)
def herb_herb_length_dict(self, nodes_from, nodes_to, distance_method):
length_dict = Sets_Lengths(nodes_from, nodes_to).ingre_length(self.length_fuc, distance_method)
return length_dict
def herb_herb_dis(self, herb_from, herb_to, distance_method, distance_method_herb_list):
if any([herb not in self.Herb.herb_ingre_dict.keys() for herb in [herb_from, herb_to]]):
print('herb {} or {} not in herb_ingre dictionary'.format(herb_from, herb_to))
return None
else:
nodes_from = self.Herb.herb_ingre_dict[herb_from]
nodes_to = self.Herb.herb_ingre_dict[herb_to]
length_dict = self.herb_herb_length_dict(nodes_from, nodes_to, distance_method)
saved_lengthes_dict = defaultdict()
for distance_method_herb in distance_method_herb_list:
dis_obj = Network_Distance(nodes_from, nodes_to, length_dict)
distance = dis_obj.network_distance(distance_method_herb)
saved_lengthes_dict[distance_method_herb] = distance
distances = {'ingre_method': distance_method,
'two_level' : {'length_dict':length_dict,
'distances':saved_lengthes_dict}}
return distances
def length_fuc(self, nodes_from, nodes_to, distance_method):
distance = self.Ingredients.ingre_ingre_dis(nodes_from, nodes_to, self.G, distance_method)
return distance
def herb_herb_distance_uni(self, herb_from, herb_to, distance_method):
if any([herb not in self.Herb.herb_ingretargets_dic.keys() for herb in [herb_from, herb_to]]):
print('herb {} or {} not in herb_ingretarget dictionary'.format(herb_from, herb_to))
return None
else:
nodes_from = self.Herb.herb_ingretargets_dic[herb_from]
nodes_to = self.Herb.herb_ingretargets_dic[herb_to]
length_dict = Sets_Lengths(nodes_from, nodes_to).target_lengths(self.G)
dis_obj = Network_Distance(nodes_from, nodes_to, length_dict)
distance = dis_obj.network_distance(distance_method)
distances = {'ingre_method': distance_method,
'one_level': {'union': distance}}
return distances
def herb_herb_dis_all(self, herb_from, herb_to):
method_list_ingre = ['separation', 'closest', 'shortest', 'kernel', 'center']
method_list_herb = ['separation', 'closest', 'shortest', 'kernel', 'center']
dis_dict = defaultdict()
for method_ingre in method_list_ingre:
values_two_level = self.herb_herb_dis(herb_from, herb_to,
method_ingre, method_list_herb)
values_one_level = self.herb_herb_distance_uni(herb_from, herb_to, method_ingre)
dis_dict[method_ingre] = {'two_level':values_two_level['two_level'],
'one_level':values_one_level['one_level']}
return dis_dict
def generator_result(self, herb_pairs_list):
herb_pairs_distances = defaultdict()
n = 0
k = 1
for herb_pairs in herb_pairs_list:
herb1, herb1_name, herb2, herb2_name, frequency = herb_pairs
try:
distances = self.herb_herb_dis_all(herb1, herb2)
print('yes, herb pairs {} and {} are successful'.format(herb1, herb2))
for ingre_method in distances.keys():
dict_1 = distances[ingre_method]['two_level']['distances']
#dict_2 = distances[ingre_method]['one_level']
#dict_1.update(dict_2)
dict_1.update({
'Ingredient-level distance type': ingre_method,
'herb1': herb1,
'herb1_name': herb1_name,
'herb2': herb2,
'herb2_name': herb2_name,
'frequency': frequency
})
n += 1
herb_pairs_distances[n] = dict_1
k += 1
print('this is the {}th successful pairs'.format(k))
except:
continue
return pd.DataFrame.from_dict(herb_pairs_distances, orient='index')
| true |
2fb022755844502687c00443db5736ea00236c8d | Python | alekospj/GoogleForecast | /src/sc_1_forecast.py | UTF-8 | 6,900 | 2.796875 | 3 | [
"MIT"
] | permissive | import pandas as pd
import warnings
import time
from datetime import datetime
from datetime import date
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.stattools import adfuller
from pmdarima import auto_arima
import statsmodels.api as sm
import plotly.graph_objects as go
import plotly.express as px
warnings.filterwarnings('ignore')
class forecastingGoogle():
def __init__(self, df):
self.df = df
self.data_clean = None
self.model = None
self.train_dt = None
self.test_dt = None
self.graphs_show = False
def pre_pro(self):
df = self.df
# Taking the correct column names
# Rename the columns
self.data_clean = df.reset_index().rename(columns={'Unnamed: 0': 'week', 'Category: All categories': 'score'})
# Remove the header row
self.data_clean = self.data_clean.iloc[1:len(self.data_clean)]
# Fixing formats
self.data_clean.week = pd.to_datetime(self.data_clean.week, format='%Y-%m-%d')
self.data_clean.score = self.data_clean.score.astype(int)
self.data_clean['year'] = self.data_clean['week'].dt.year
self.data_clean['month'] = self.data_clean['week'].dt.month
self.data_clean['day'] = self.data_clean['week'].dt.day
self.data_clean = self.data_clean[['week', 'year', 'month', 'day', 'score']]
return self.data_clean
def graphs_gen(self):
# Showing The avg allocation month
m_dt = self.data_clean.groupby('month').score.mean()
y = m_dt
fig_avg_month = go.Figure(data=[go.Bar(
y=y,
x=['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Dec'],
text=y,
textposition='auto',
)])
fig_avg_month.update_layout(title_text='Avg Allocation per Month', title_x=0.5)
if self.graphs_show:
fig_avg_month.show()
# Showing The avg allocation Year
todays_date = date.today()
year = int(todays_date.year)
m_dt = self.data_clean.groupby('year').score.mean()
y = m_dt
fig_avg_year = go.Figure(data=[go.Bar(
y=y,
x=[year - 5, year - 4, year - 3, year - 2, year - 1, year],
text=y,
textposition='auto',
)])
fig_avg_year.update_layout(title_text='Avg Allocation per Year', title_x=0.5)
if self.graphs_show:
fig_avg_year.show()
# Showing The timeseries
fig_timeseries = px.line(self.data_clean, x='week', y="score")
fig_timeseries.update_layout(title_text='Search Score Over Time', title_x=0.5)
if self.graphs_show:
fig_timeseries.show()
return fig_avg_year, fig_avg_month, fig_timeseries
def train_sarimax_model(self,step):
# Adfuller metric to find best design
def adfuler_mets(time_series):
# print('Results of Dickey-Fuller Test:')
dftest = adfuller(time_series, autolag='AIC')
dfoutput = pd.Series(dftest[0:4],
index=['Test Statistic', 'p-value', '#Lags Used', 'Number of Observations Used'])
for key, value in dftest[4].items():
dfoutput['Critical Value (%s)' % key] = value
# print('*** Adfuller:\n', dfoutput)
adfuler_mets(self.data_clean['score']) # .diff().dropna().diff().dropna()
# Prepare test and train data
all_len = len(self.data_clean)
ts_tr = int(all_len * 0.7)
ts_te = all_len - ts_tr
train_data = self.data_clean[0:ts_tr]
self.train_dt = train_data
test_data = self.data_clean.tail(ts_te)
self.test_dt = test_data
# Sarimax_model = auto_arima(train_data.score,
# start_p=0,
# start_q=0,
# max_p=3,
# max_q=3,
# m=12,
# test='adf',
# seasonal=True,
# d=1,
# D=1,
# trace=True,
# error_action='ignore',
# suppress_warnings=True,
# stepwise=True)
#
# print(Sarimax_model.summary())
# Prepare Arima Model
my_order = (2, 1, 0)
my_seasonal_order = (2, 1, 0, step)
model = SARIMAX(train_data.score, order=my_order, seasonal_order=my_seasonal_order)
# fit the model
model_fit = model.fit()
# print(model_fit.summary())
# get the predictions and residuals
predictions = model_fit.forecast(len(test_data.score))
predictions = pd.Series(predictions, index=test_data.index)
residuals = test_data.score - predictions
# # Vis Residuals
fig_residual = go.Figure()
fig_residual.add_trace(go.Scatter(y=residuals, name='residuals'))
fig_residual.add_hline(y=0, line_width=3, line_dash="dash", line_color="green")
fig_residual.update_layout(title_text='Residuals from SARIMA model', title_x=0.5)
fig_residual.add_annotation(
# x=2,
y=max(residuals),
xref="x",
yref="y",
text='Residual mean is:'+str(residuals.mean()),
showarrow=False,
font=dict(
family="Courier New, monospace",
size=16,
color="#ffffff"
),
align="center",
arrowhead=2,
arrowsize=1,
arrowwidth=2,
arrowcolor="#636363",
ax=20,
ay=-30,
bordercolor="#c7c7c7",
borderwidth=2,
borderpad=4,
bgcolor="#ff7f0e",
opacity=0.8
)
if self.graphs_show:
fig_residual.show()
fig_res = go.Figure()
fig_res.add_trace(go.Scatter(x=self.data_clean['week'], y=self.data_clean['score'], name='Data Original'))
fig_res.add_trace(go.Scatter(x=self.train_dt['week'], y=self.train_dt['score'], name='Train Data'))
fig_res.add_trace(go.Scatter(x=self.test_dt['week'], y=self.test_dt['score'], name='Test Data'))
fig_res.add_trace(go.Scatter(x=self.test_dt['week'], y=predictions, name='Predictions'))
fig_res.update_layout(title_text='Original Data and Predictions', title_x=0.5)
if self.graphs_show:
fig_res.show()
return fig_res, fig_residual
if __name__ == "__main__":
df = pd.read_csv('data/sun.csv')
a = forecastingGoogle(df)
a.pre_pro()
a.graphs_gen()
a.train_sarimax_model(54)
| true |
82b228035724e542ba0073471b0399428482e944 | Python | Dikzamen/task_nerdysoft | /main.py | UTF-8 | 4,865 | 3.1875 | 3 | [] | no_license | import json
import tkinter as tk
from tkinter.ttk import Button, Entry, Label
from tkinter.filedialog import askopenfilename
from tkinter import LEFT, RIGHT
def contains(word, letters):
word_dict = {i: word.count(i) for i in set(word)}
letters_dict = {i: letters.count(i) for i in set(letters)}
for key, value in letters_dict.items():
if word_dict.get(key, 0) < value:
return False
return True
class Vocabulary:
def __init__(self, filename):
self.filename = filename
self.dataset = set()
self.read_data()
def read_data(self):
with open(self.filename, 'r', encoding='utf8') as file:
self.dataset = set(json.load(file))
def update_data(self, data):
with open(self.filename, 'w') as f:
json.dump(data, f)
self.read_data()
print('append word', len(self.dataset))
def append_word(self, word):
data = list(self.dataset)
data.append(word)
print('append word', len(self.dataset))
self.update_data(data)
def append_data(self, filename):
vocabulary = Vocabulary(filename)
data1, data2 = list(self.dataset), list(vocabulary.dataset)
data1.extend(data2)
self.update_data(data1)
def count_occurrences(self, letters):
result = []
for word in self.dataset:
if contains(word, letters):
result.append(word)
return result
class Application(tk.Tk):
def __init__(self):
super().__init__()
self.geometry('800x600+0+0')
self.dictionary_label = Label(self, text='Current vocabulary contains 0 words')
self.append_button = Button(self, text='Append vocabulary', command=lambda: self.open_file(False))
self.replace_button = Button(self, text='Replace vocabulary', command=lambda: self.open_file(True))
self.dictionary_label.pack()
self.append_button.pack()
self.replace_button.pack()
self.label = Label(self, text='Add word to vocabulary')
self.label.pack()
self.word_entry = Entry(self)
self.word_entry.pack()
self.new_word_button = Button(self, text='Append word to vocabulary', command=self.add_word)
self.new_word_button.pack()
self.search_label = Label(self, text='Search for letters')
self.search_label.pack()
self.letters_entry = Entry(self)
self.letters_entry.pack()
self.search_button = Button(self, text='Start search', command=self.search)
self.new_word_label = Label(self, text='Current vocabulary contains 0 words')
self.answer_number = Entry(self)
self.search_button.pack()
self.answer_number.pack()
self.page = 0
self.vocabulary = None
self.page_widgets = []
def destroy_widgets(self):
for widget in self.page_widgets:
widget.destroy()
def add_word(self):
word = self.word_entry.get()
if self.vocabulary:
self.vocabulary.append_word(word)
def generate_widgets(self, result, page):
self.destroy_widgets()
label = Label(self, text='Results')
label.pack()
self.page_widgets.append(label)
for widget in result[page * 10: page * 10 + 10]:
entry = Entry(self)
entry.insert(0, widget)
entry.pack()
self.page_widgets.append(entry)
if page > 0:
button = Button(self, text='<',
command=lambda: self.generate_widgets(result, page - 1)
)
button.pack(side=LEFT)
self.page_widgets.append(button)
if result[page * 10 + 10:]:
button = Button(self, text='>',
command=lambda: self.generate_widgets(result, page + 1)
)
button.pack(side=RIGHT)
self.page_widgets.append(button)
def open_file(self, replace):
self.update()
name = askopenfilename()
vocabulary = Vocabulary(name)
if replace or self.vocabulary is None:
self.vocabulary = vocabulary
self.dictionary_label.config(text=f'Current vocabulary contains {len(self.vocabulary.dataset)} words')
else:
self.vocabulary.append_data(name)
self.dictionary_label.config(text=f'Current vocabulary contains {len(self.vocabulary.dataset)} words')
def search(self):
search_text = self.letters_entry.get()
result = self.vocabulary.count_occurrences(search_text)
self.answer_number.delete(0, 'end')
self.answer_number.insert(0, str(len(result)))
self.generate_widgets(result, 0)
def main():
window = Application()
window.mainloop()
if __name__ == '__main__':
main()
| true |
5b7537e7f66c92550bc06dcf8d285c09ed6b991c | Python | almazkun/the_hard_way | /Python_3/ex21.py | UTF-8 | 763 | 4.46875 | 4 | [] | no_license | def add(a, b):
print("Adding {} + {}".format(a, b))
return a + b
def subtrackt(a, b):
print("Subtrackting {} - {}".format(a, b))
return a - b
def multiply(a, b):
print("Multyplying {} * {}".format(a, b))
return a * b
def divide(a, b):
print("Dividing {} / {}".format(a, b))
return a / b
print("Let's make some calculations\n")
age = add(30, 7)
height = subtrackt(190, 4)
weight = multiply(35, 2)
iq = divide(220, 2)
print(
"Age: {} years, height: {} santimetres, weight: {} kilograms, iq: {} or negligible.\n".format(
age, height, weight, iq
)
)
print("It is interesting: ")
what = add(age, subtrackt(height, multiply(weight, divide(iq, 2))))
print("Result: ", what, "Can you calculate that manualy?")
| true |
7e8527474dec9c9f8681f721695d6ed29d5b5c57 | Python | chudacontreras/python_basico_platzi | /poo/dados/dados.py | UTF-8 | 2,450 | 4.21875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
""" Este programa es un juego de azar en el que se lanza un dado y si el valor no es ni 1 ni 6 se pierde.
Para esto creamos la clase Dice con los atributos de un dado su valor y su cantidad de lados así como su comportamiento
como lo es el lanzarlo o girarlo para que obtengamos un valor al azar"""
#Importo libreria Random
import random
class Dice:
"""Clase que crea un dado. Se puede lanzar el dado y obtener su valor. Darse cuenta que el array está declarado como contante
y además como privado ya que tiene dos (_) antes de dato. Que quiere decir esto? Ques una constante privada la cual solo puede ser
accedida dentro de su propia clase. Si tuviese un solo (_) sería una constante publica de la clase, así que cualquier pudiese
acceder a ella en el main por ejemplo"""
__DADO= ["""
-----
| |
| o |
| |
-----
""", """
-----
|o |
| |
| o|
-----
""","""
-----
|o |
| o |
| o|
-----
""","""
-----
|o o|
| |
|o o|
-----
""", """
-----
|o o|
| o |
|o o|
-----
""", """
-----
|o o|
|o o|
|o o|
-----"""]
#Constante que define que el dado tiene 6 lados
__SIDES = 6
#en el constructor de la clase no es necesario pasarle nada ya que se creará el dado con valor 1
def __init__(self):
self.__value=1
#Con el metodo roll le damos un valor random de 1 a 6 a la instancia. Lo que es lo mismo que lanzar el dado
def roll(self):
self.__value= random.randint(0,6)
#Aquí mostramos el dibujo que corresponde al valor de dado obtenido
self.__display_dice()
#este metodo es para obtener el valor del dado en el programa proncipal. De esa manera no es necesario accesar a la propia variable
#__value ya que es una variable privada que no queremos que sea modificada por el usuario a menos que use el metodo roll()
def get_value(self):
return self.__value
#En este metodo mostramos la figura del dado que corresponda al valor generado cuando lanzamos el dado. Dense cuenta que en
# en este metodo tambien lo declaramos como un metodo privado que solo puede ser accedido dentro de la clase
def __display_dice(self):
if self.__value is 1:
print(self.__DADO[0])
elif self.__value is 2:
print(self.__DADO[1])
elif self.__value is 3:
print(self.__DADO[2])
elif self.__value is 4:
print(self.__DADO[3])
elif self.__value is 5:
print(self.__DADO[4])
else:
print(self.__DADO[5]) | true |
dc4bfd269dcbfbb123af73e2090922ea43380fca | Python | ZSX-JOJO/crawler_html2pdf | /webargstest.py | UTF-8 | 1,709 | 2.578125 | 3 | [
"Apache-2.0"
] | permissive | import re
from flask import Flask, jsonify, request
from webargs import fields
from webargs.flaskparser import use_args
app = Flask("hello")
@app.route("/api/login", methods=["POST"])
@use_args({"username": fields.Str(required=True)},
location="json")
def login(args):
name = args['username']
password = args['password']
return jsonify({"code": 200, "msg": "ok"})
@app.route("/api/login", methods=["POST"])
def login3():
data = request.get_json()
email = data.get("email")
if not email or re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", email):
return jsonify({"code": 400, "msg": "参数有误"}), 400
password = data.get("password")
if not password or len(password) < 6:
return jsonify({"code": 400, "msg": "参数有误"}), 400
# 数据库查询
return jsonify({"code": 200, "msg": "ok"})
from webargs import fields, ValidationError
def must_exist_in_db(val):
if val != 1:
raise ValidationError("id not exist")
hello_args = {"name": fields.Str(missing="Friend"),
"id": fields.Integer(required=True, validate=must_exist_in_db)}
@app.route("/", methods=["GET"])
@use_args(hello_args, location="query")
def hello(args):
"""A welcome page."""
return jsonify({"message": "Welcome, {}!".format(args["name"])})
@app.errorhandler(422)
@app.errorhandler(400)
def handle_error(err):
headers = err.data.get("headers", None)
messages = err.data.get("messages", ["Invalid request."])
if headers:
return jsonify({"errors": messages}), err.code, headers
else:
return jsonify({"errors": messages}), 400
#
if __name__ == '__main__':
app.run(port=5000)
| true |
cb3f54d6c871e64d07cb26f2bf56bc0c50717db4 | Python | jlp1701/fbHash | /tests/test_fbHashB.py | UTF-8 | 2,176 | 2.578125 | 3 | [] | no_license | import pytest
from fbHash import fbHashB
def read_file(file_path):
with open(file_path, "rb") as f:
return f.read()
def test_test():
assert True == True
def test_document_weights():
files = ["./tests/files/testfile_1.txt", "./tests/files/testfile_2.txt", "./tests/files/testfile_3.txt", "./tests/files/testfile_4.txt"]
doc_w = fbHashB.compute_document_weights(files)
assert len(doc_w) > 0
def test_chunk_freq():
d1 = read_file("tests/files/testfile_1.txt")
d1_1 = read_file("tests/files/testfile_1_1.txt")
d2 = read_file("tests/files/testfile_2.txt")
d3 = read_file("tests/files/testfile_3.txt")
d4 = read_file("tests/files/testfile_4.txt")
ch_fr1 = fbHashB.compute_chunk_freq(d1)
ch_fr1_1 = fbHashB.compute_chunk_freq(d1_1)
ch_fr2 = fbHashB.compute_chunk_freq(d2)
ch_fr3 = fbHashB.compute_chunk_freq(d3)
ch_fr4 = fbHashB.compute_chunk_freq(d4)
assert len(ch_fr1.keys()) == 1
assert len(ch_fr1_1.keys()) == 2
assert len(ch_fr2.keys()) == 1
assert len(ch_fr3.keys()) == 1
assert len(ch_fr4.keys()) == 187
# different files
assert len(ch_fr1.keys() & ch_fr2.keys()) == 0
# one common chunk
assert len(ch_fr1.keys() & ch_fr1_1.keys()) == 1
def test_unique_chunks():
assert len(fbHashB.get_chunks(read_file("tests/files/testfile_1.txt"))) == 1
assert len(fbHashB.get_unique_chunks("tests/files/testfile_1.txt")) == 1
assert len(fbHashB.get_chunks(read_file("tests/files/testfile_1_2.txt"))) == 27
assert len(fbHashB.get_unique_chunks("tests/files/testfile_1_2.txt")) == 1
def test_comparison():
files = ["./tests/files/testfile_1.txt", "./tests/files/testfile_1_1.txt", "./tests/files/testfile_2.txt", "./tests/files/testfile_3.txt"]
doc_w_path = "test_weights.db"
doc_w = fbHashB.compute_document_weights(files)
fbHashB.doc_weights2sqlite(doc_w, doc_w_path)
h1 = fbHashB.hashf(files[0], doc_w_path)
h1_1 = fbHashB.hashf(files[1], doc_w_path)
h2 = fbHashB.hashf(files[2], doc_w_path)
# different files
assert fbHashB.compare(h1, h2) == 0
# similar files
assert 40 < fbHashB.compare(h1, h1_1) < 60
| true |
61fa05eed59ece280c59e171e44788f0ea627e68 | Python | amarusyak/Telegram-Bot | /api_client/client.py | UTF-8 | 1,995 | 2.640625 | 3 | [] | no_license | import requests
import config
import curlify
from logger.logger import Logger
requests.packages.urllib3.disable_warnings()
class Client:
def __init__(self):
self._session = requests.Session()
######################
# Basic HTTP methods #
######################
def get(self, url, headers, params):
return self._session.get(url=url,
verify=False,
headers=headers,
params=params)
def post(self, url, headers, params):
return self._session.post(url=url,
verify=False,
headers=headers,
data=params)
def put(self, url, headers, params):
return self._session.put(url=url,
verify=False,
headers=headers,
params=params)
def delete(self, url, headers, params):
return self._session.put(url=url,
verify=False,
headers=headers,
params=params)
# Unified HTTP call method
def make_call(self, method, request, params,
headers=config.DEFAULT_REQUEST_HEADERS):
http_method = getattr(self, method)
headers.update(headers if headers else None)
response = http_method(url=request, headers=headers, params=params)
try:
response.raise_for_status()
except requests.RequestException as e:
logger = Logger()
logger.log('\n'.join([
"Details:",
"Request: " + curlify.to_curl(response.request),
"Response status: " + str(response.status_code),
"Response: " + response.text,
"Python Exception: " + str(e) + '\n'
]))
return response.json()
| true |
0f6c10384d250f57a1ae090fcc763820e496f6f0 | Python | grantozz/flower_cnn | /src/splitdata.py | UTF-8 | 1,732 | 3.203125 | 3 | [] | no_license | """### Split data into test and train sets"""
import os
import shutil
import sys
import tarfile
import wget
from config import archive
#given a data set of sub dirs of images copy 20% of files to a
# new dir preserving dir structure
numfiles=0
numclasses=0
def split(src,dest_dir):
src_files = os.listdir(src)
for file_name in src_files:
full_file_name = os.path.join(src, file_name)
if not (os.path.isfile(full_file_name)): #if it is a dir
global numclasses
numclasses+=1
dest_name = os.path.join(dest_dir, file_name)
os.makedirs(dest_name)
copy(full_file_name,dest_name)
#copy 20% of files from src to dest
def copy(src,dest):
src_files = os.listdir(src)
num=len(src_files)//5
for file_name in src_files:
num-=1
if not num:
break
full_file_name = os.path.join(src, file_name)
if (os.path.isfile(full_file_name)):
shutil.move(full_file_name, dest)
global numfiles
numfiles+=1
def test_train_split(src,dest_dir):
#this function should only be run once or else to many files will be copied to the test set
if(os.path.exists(dest_dir)):
return
dl_data()
extract()
os.makedirs(dest_dir)
split(src,dest_dir)
print('moved files {0} from {1} to {2} '.format(numfiles,src,dest_dir),flush=True)
def count_classes(src):
return len(list(os.walk(src))) - 1
def extract():
tar = tarfile.open(archive)
tar.extractall()
tar.close()
def dl_data():
url = 'http://download.tensorflow.org/example_images/flower_photos.tgz'
print('downloading dataset please wait')
wget.download(url) | true |
eeabb7a700fdc6678c4954d42b36b62165cdbfe4 | Python | AIHackerTest/RachyJ_Py101-004 | /Chap1/project/ex42.py | UTF-8 | 1,816 | 4.40625 | 4 | [] | no_license | ## Animal is-a object, a base class
class Animal(object):
## a placeholder when a statement is required syntactically, but no code needs to be executed
pass
## Dog is a class, inherit Animal
class Dog(Animal):
def __init__(self, name):
## dog has a name
self.name = name
print("The dog name is %s" % name)
## cat is a class, inherit Animal
class Cat(Animal):
def __init__(self, name):
## cat has a name
self.name = name
print("The cat name is %s" % name)
## person is a base class
class Person(object):
def __init__(self, name):
## person has a name
self.name = name
## set the default value to None
self.pet = None
print("The name of the person is %s" % name)
#print("He/She has a pet %s" % name.pet)
## empoyee is a type of person
class Employee(Person):
def __init__(self, name, salary):
## run the init method of its parent class - Person
super(Employee, self).__init__(name)
##
self.salary = salary
print("%s earns how much each year" % name)
print(salary)
## fish is an object
class Fish(object):
pass
## salmon is a fish
class Salmon(Fish):
pass
## halibut is a fish
class Halibut(Fish):
pass
## rover is a Dog
rover = Dog("Rover")
## satan is a cat
satan = Cat("Satan")
## Mary is a person
mary = Person("Mary")
## Mary's pet is satan
mary.pet = satan
print("He/She has a pet %s" % mary.pet.name)
## frank is an employee with salary 120000
frank = Employee("Frank", 120000)
## frank has a rover as pet
frank.pet = rover
print("He/She has a pet %s" % frank.pet.name)
## flipper is a kind of fish
flipper = Fish()
## crouse is a kind of Salmon
crouse = Salmon()
## harry is a kind of halibut
harry = Halibut()
| true |
353b5ddfe89e88188b54b5a97b24c41edbb4c7db | Python | bergercookie/pymendeley | /lmendeley/__init__.py | UTF-8 | 1,406 | 2.765625 | 3 | [] | no_license | """Access document data in the Mendeley sqlite3 database.
.. note:: Currently looks in fixed paths on Linux. Changing
EXPECTED_MENDELEY_SQLITE_DIR and EXPECTED_MENDELEY_CONFIG_PATH
should allow it to work on non-linux or non-standard
installs.
"""
import os
from configparser import ConfigParser
# On Linux we can usually find the Mendeley sqlite3 database
# at this location.
EXPECTED_MENDELEY_SQLITE_DIR = \
os.path.expanduser('~/.local/share/data/Mendeley Ltd./Mendeley Desktop')
EXPECTED_MENDELEY_CONFIG_PATH = \
os.path.expanduser('~/.config/Mendeley Ltd./Mendeley Desktop.conf')
def find_mendeley_sqlite_path():
"""Get the path to the mendeley db file.
:returns: The path to the Mendeley sqlite3 database if in standard location,
otherwise returns None.
:rtype str:
"""
try:
if os.path.exists(EXPECTED_MENDELEY_CONFIG_PATH):
config_parser = ConfigParser()
config_parser.read(EXPECTED_MENDELEY_CONFIG_PATH)
email = config_parser.get('MendeleyWeb', 'userEmail')
candidate_path = os.path.join(EXPECTED_MENDELEY_SQLITE_DIR,
'%s@www.mendeley.com.sqlite' %
(email,))
if os.path.exists(candidate_path):
return candidate_path
except Exception:
pass
return None
| true |
c0f71ba30d1aee3e797c4c4f57407798c5839235 | Python | LidiaDomingos/Samurai-Andrew-Desagil | /src/game_screen.py | UTF-8 | 4,201 | 2.75 | 3 | [
"MIT"
] | permissive | from os.path import join
from random import randint
import pygame
from pygame import image
from pygame.sprite import Group
from pygame.mixer import Sound
from pygame.time import delay
from config import *
from Character_assets import Character_assets
from Player import Player
from Fruit import Fruit
from Cursor import Cursor
# Screen Assets
background_path = join(IMAGES_DIR, "scene", "background.png")
background_asset = pygame.image.load(background_path).convert()
logo_path = join(IMAGES_DIR, "game", "logo.png")
logo_asset = pygame.image.load(logo_path).convert_alpha()
platforms_path = join(IMAGES_DIR, "scene", "platforms.png")
platforms_asset = pygame.image.load(platforms_path).convert_alpha()
ingame_path = join(IMAGES_DIR, "scene", "in-game.png")
ingame_asset = pygame.image.load(ingame_path).convert()
# Sounds
playing_path = join(SOUNDS_DIR, "musica_gaming.ogg")
pygame.mixer.music.load(playing_path)
pygame.mixer.music.set_volume(GAME_VOLUME)
# Fonts assets
font_dir = join(FONTS_DIR, 'PressStart2P.ttf')
font = pygame.font.Font(font_dir, FONT_SIZE)
# Assets dos jogadores
samurai_andrew_assets = Character_assets("assets/characters/samurai_andrew")
samurai_andrew = Player(samurai_andrew_assets)
# kunoichi_barbara_assets = Character_assets("assets/characters/kunoichi_barbara")
# kunoichi_barbara = Player(kunoichi_barbara_assets)
# ninja_diego_assets = Character_assets("assets/characters/ninja_diego")
# ninja_diego = Player(ninja_diego_assets)
# Jogador Principal
player = samurai_andrew
player.rect.x = 91
# player.rect.bottom = 0
player.rect.y = 50
apple_assests = Character_assets("assets/characters/apple")
banana_assests = Character_assets("assets/characters/banana")
watermelon_assests = Character_assets("assets/characters/watermelon")
# Assets das frutas
# from characters import melancia as melancia_assests
# from characters import melancia as banana_assests
# from characters import melancia as maca_assests
fruits_assets = (apple_assests, banana_assests, watermelon_assests)
# Cursor
cursor_asset = join(ASSETS_DIR, "game", "images", "game", "cursor.png")
cursor_skin = image.load(cursor_asset).convert_alpha()
cursor = Cursor(cursor_skin)
def game_screen(screen, render):
# Sprites
fruits = Group()
players = Group()
# Musica da tela de jogo
pygame.mixer.music.play(loops=-1)
# Animação de introdução
for i in range(0, 100, 1):
screen.blit(background_asset, SCREEN_ORIGIN)
screen.blit(logo_asset, (117, 11 + 200 * i/100))
screen.blit(platforms_asset, (41, 103 + 70 * (1 - i/100)))
render()
# Adiciona o jogador
players.add(player)
player.get_sound("letsgo").play()
score = 0
lives = 3
# Inicia jogo
state = GAME_SCREEN
while state == GAME_SCREEN:
#Processa os eventos (mouse, teclado, botão, e os sprites)
for event in pygame.event.get():
# Eventos do jogador
player.event_handdler(event)
# Eventos do cursor
cursor.event_handler(event)
# Verifica se o jogo foi fechado.
if event.type == pygame.QUIT:
state = LEAVE_GAME
# Adiciona as fruits voadoras aleatórias
while len(fruits) <= FRUITS_MAX:
length = len(fruits_assets)
random_index = randint(0, length - 1)
fruit_asset = fruits_assets[random_index]
fruit = Fruit(fruit_asset)
fruits.add(fruit)
# Corta as fruits com o cursor
for fruta in fruits:
if fruta.rect.collidepoint(cursor.get_position()):
fruta.kill()
score += 10
if not players:
lives -= 1
if lives == 0:
state = OVER_SCREEN
else:
players.add(player)
player.rect.x = 91
player.rect.bottom = 0
# Atualiza os Sprites
fruits.update()
players.update()
# Desenha o plano de fundo
screen.blit(ingame_asset, SCREEN_ORIGIN)
# Desenha os Sprites
fruits.draw(screen)
players.draw(screen)
score_surface = font.render(str(score), True, WHITE)
screen.blit(score_surface, SCREEN_ORIGIN)
live_surface = font.render(str(lives), True, WHITE)
screen.blit(live_surface, (300,150))
cursor.draw(screen)
render()
return state
| true |
3a7dce6b6744ab0507ae789edf19112c79b52502 | Python | ChibaniMohamed/fake_faces_DCGAN | /dcgan_model.py | UTF-8 | 3,582 | 2.65625 | 3 | [
"MIT"
] | permissive | import matplotlib.pyplot as plt
import numpy as np
import os
from PIL import Image
from keras.models import Sequential,Model
from keras.layers import Conv2D,Conv2DTranspose,LeakyReLU,Dropout,Dense,UpSampling2D,Flatten,Reshape,Input
from keras.initializers import RandomNormal
from keras.optimizers import Adam
PATH = './images/'
images = []
for image in os.listdir(PATH):
img = Image.open(PATH+image)
img = img.resize((120,120))
img = np.asarray(img)
images.append(img)
images = np.array(images)
images = (images.astype(np.float32) - 127.5) / 127.5
def call_generator():
generator = Sequential()
generator.add(Dense(128 * 15 * 15,kernel_initializer=RandomNormal(0,0.02),input_dim=100))
generator.add(LeakyReLU(0.2))
generator.add(Reshape((15, 15, 128)))
generator.add(Conv2DTranspose(128,(4,4),strides=2,padding="same",kernel_initializer=RandomNormal(0,0.02)))
generator.add(LeakyReLU(0.2))
generator.add(Conv2DTranspose(128,(4,4),strides=2,padding="same",kernel_initializer=RandomNormal(0,0.02)))
generator.add(LeakyReLU(0.2))
generator.add(Conv2DTranspose(128,(4,4),strides=2,padding="same",kernel_initializer=RandomNormal(0,0.02)))
generator.add(LeakyReLU(0.2))
generator.add(Conv2D(3,(3,3),padding="same",activation="tanh",kernel_initializer=RandomNormal(0,0.02)))
generator.compile(optimizer=Adam(0.0002,0.5),loss="binary_crossentropy")
return generator
def call_discriminator():
descriminator = Sequential()
descriminator.add(Conv2D(64,(3,3),padding="same",kernel_initializer=RandomNormal(0,0.02),input_shape=(120,120,3)))
descriminator.add(LeakyReLU(0.2))
descriminator.add(Conv2D(128,(3,3),strides=2,padding="same",kernel_initializer=RandomNormal(0,0.02)))
descriminator.add(LeakyReLU(0.2))
descriminator.add(Conv2D(128,(3,3),strides=2,padding="same",kernel_initializer=RandomNormal(0,0.02)))
descriminator.add(LeakyReLU(0.2))
descriminator.add(Conv2D(256,(3,3),strides=2,padding="same",kernel_initializer=RandomNormal(0,0.02)))
descriminator.add(LeakyReLU(0.2))
descriminator.add(Flatten())
descriminator.add(Dropout(0.2))
descriminator.add(Dense(1,activation="sigmoid"))
descriminator.compile(loss="binary_crossentropy",optimizer=Adam(0.0002,0.5))
return descriminator
def show_images(noise, epoch=None):
generated_images = gen.predict(noise)
plt.figure(figsize=(10, 10))
for i, image in enumerate(generated_images):
plt.subplot(10, 10, i+1)
plt.imshow(image.reshape((120, 120, 3)))
plt.axis('off')
plt.tight_layout()
if epoch != None:
plt.savefig(f'./gan-images_epoch-{epoch}.png')
plt.show()
gen = call_generator()
desc = call_discriminator()
desc.trainable = False
gan_input = Input(shape=(100,))
fake_img = gen(gan_input)
gan_output = desc(fake_img)
gan = Model(gan_input,gan_output)
gan.compile(loss="binary_crossentropy",optimizer=Adam(0.0002,0.5))
batch_size = 16
step_per_epoch = 491
s_noise = np.random.normal(0,1,size=(100,100))
for epoch in range(800):
for batch in range(step_per_epoch):
noise = np.random.normal(0,1,size=(batch_size,100))
fake = gen.predict(noise)
real = images[np.random.randint(0,images.shape[0],size=batch_size)]
x = np.concatenate((real,fake))
label_real = np.ones(2*batch_size)
label_real[:batch_size] = 0.9
desc_loss = desc.train_on_batch(x,label_real)
label_fake = np.zeros(batch_size)
gen_loss = gan.train_on_batch(noise,label_fake)
print(f"Epoch : {epoch} / Descriminator Loss : {desc_loss} / Generator Loss : {gen_loss}")
if epoch % 10 == 0:
show_images(s_noise, epoch)
| true |
a878b079c46405c86305441b5aef3b167d9ef92b | Python | TrevorQuan/C2-Data-Types | /main.py | UTF-8 | 117 | 2.90625 | 3 | [] | no_license | firstName = "Trevor" # String
age = "12" #String
favNumber = 37 #Integer
favChar = 'T' #char
isHungry = True #boolean | true |
3508cf6bd5f9209939e2f48af576a6adb2a12a52 | Python | Zulko/moviepy | /moviepy/video/fx/loop.py | UTF-8 | 750 | 3.078125 | 3 | [
"MIT"
] | permissive | from moviepy.decorators import requires_duration
@requires_duration
def loop(clip, n=None, duration=None):
"""
Returns a clip that plays the current clip in an infinite loop.
Ideal for clips coming from GIFs.
Parameters
----------
n
Number of times the clip should be played. If `None` the
the clip will loop indefinitely (i.e. with no set duration).
duration
Total duration of the clip. Can be specified instead of n.
"""
previous_duration = clip.duration
clip = clip.time_transform(
lambda t: t % previous_duration, apply_to=["mask", "audio"]
)
if n:
duration = n * previous_duration
if duration:
clip = clip.with_duration(duration)
return clip
| true |
d49c03a009a1bffc88273c17230453a1ee7ecd01 | Python | AtulSinghTR/AtulSinghTR | /exercism-py/python/raindrops/raindrops.py | UTF-8 | 261 | 3.171875 | 3 | [] | no_license | def convert(number):
sound=''
if number%3==0:
sound='Pling'
if number%5==0:
sound=sound+'Plang'
if number%7==0:
sound=sound+'Plong'
if len(sound)==0:
sound=str(number)
return sound
| true |
ef08e227d2d7a34ac0a32f37309c709dc955fb73 | Python | saqibns/CodeVita | /Practice/brackets.py | UTF-8 | 812 | 3.296875 | 3 | [] | no_license | def main():
for i in range(10):
print('Test', str(i + 1) + ':')
length = input()
expression = list(input())
operations = int(input())
for j in range(operations):
operation = int(input())
if operation == 0:
check(expression)
else:
bracket = expression[operation - 1]
if bracket == '(':
expression[operation - 1] = ')'
else:
expression[operation - 1] = '('
def check(expression):
stack = 0
for i in expression:
if i == '(':
stack += 1
else:
stack -= 1
if stack < 0:
break
if stack == 0:
print('YES')
else:
print('NO')
main()
| true |
7c87fd279338bf1da8418400f9eae4d66605e239 | Python | Aasthaengg/IBMdataset | /Python_codes/p02912/s689541834.py | UTF-8 | 311 | 2.53125 | 3 | [] | no_license | from heapq import heapify, heappop, heappush
N, M = map(int, input().split())
A = list(map(lambda x: -int(x), input().split()))
if N == 1:
print(N >> M)
exit()
heapify(A)
while M > 0:
a = -heappop(A)
while M > 0 and a >= -A[0]:
a >>= 1
M -= 1
heappush(A, -a)
print(-sum(A)) | true |
b1ed91e939019cc80cebc6a9b3526852dee9b753 | Python | rugbyprof/4883-Software-Tools | /Lectures/L03/main.py | UTF-8 | 447 | 2.796875 | 3 | [] | no_license | """
"""
import json
from rich import print
def processJson():
with open('dwarf_family_tree.json') as f:
data = json.load(f)
for person in data:
print(person)
def processCsv():
with open('dwarf_family_tree.csv') as f:
data = f.readlines()
for line in data:
print(line.strip().split(','))
if __name__ == "__main__":
processJson()
processCsv() | true |
93583cc813edc5d71dc10f4d4a2917846a16d187 | Python | DaHuO/Supergraph | /codes/CodeJamCrawler/CJ_16_1/16_1_1_ManojRK_A.py | UTF-8 | 309 | 3.4375 | 3 | [] | no_license | for tc in range(1, int(input()) + 1):
s = input()
back = []
front = [s[0]]
for ch in s[1:]:
if ch >= front[-1]:
front.append(ch)
else:
back.append(ch)
print('Case #{}: '.format(tc), *(front[i] for i in range(len(front) - 1, -1, -1)), *back, sep='')
| true |
5e4a0037357e9860046a86d072a5870936e51b5a | Python | MH-Lee/Gobble-v.1 | /utils/processor_checker.py | UTF-8 | 302 | 3.328125 | 3 | [] | no_license | from time import time
def timeit(method):
"""decorator for timing processes"""
def timed(*args, **kwargs):
ts = time()
result = method(*args, **kwargs)
te = time()
print("Process took " + str(round(te-ts,2)) + " seconds")
return result
return timed
| true |
dcb332593d29075a047a07f6db71f05e344f2fe3 | Python | dirk0082/hello-world | /web_scraping_incognito.py | UTF-8 | 746 | 3 | 3 | [] | no_license | import bs4
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
my_url = 'https://thegolfnewsnet.com/golfnewsnetteam/2020/02/09/2020-att-pebble-beach-pro-am-money-purse-winners-share-prize-money-payout-118206/'
#opening up connection, grabbing page
uClient = uReq(my_url)
#offloads content into usable variable
page_html = uClient.read()
#prints html page
#print(page_html)
#closes connection
uClient.close
#html parsing
page_soup = soup(page_html, 'html.parser')
#prints header 1
#print(page_soup.h1)
#prints p tag
#print(page_soup.p)
#grabs each product
containers = page_soup.findAll("table")
#prints how many containers we have
print(len(containers))
#prints html of containers section
#print(containers)
| true |
cf67833290880d8504992037ecfb730c641d5759 | Python | zeeshanahmad10809/sst-deep-tensorflow | /sst/utils.py | UTF-8 | 1,338 | 2.8125 | 3 | [
"MIT"
] | permissive | import numpy as np
from loguru import logger
from tqdm import tqdm
import os
def get_binary_label(sentiment):
if sentiment <= 1:
return 0
else:
return 1
def loadFastTextModel(path=""):
logger.info("Loading FastText Model!")
embeddings_index = dict()
try:
with open(path, "r") as f:
with tqdm(total=1999996, desc="loading FastText") as pbar:
for line in f:
values = line.strip().split(" ")
word = values[0]
coefs = np.asarray(values[1:], dtype="float32")
embeddings_index[word] = coefs
pbar.update(1)
return embeddings_index
except FileNotFoundError:
logger.error("Embedding file not in path!")
os._exit(0)
def buildEmbeddingMatrix(word_index, vocab_size, embedding_size, embeddings_index):
logger.info("Building Embedding Matrix!")
embedding_matrix = np.zeros((vocab_size, embedding_size))
for word, i in word_index.items():
if i >= vocab_size:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
return embedding_matrix
| true |
ebca102afbfd43ab1faef5aeaef2b6a203a31856 | Python | flocknroll/text2fb | /text2fb.py | UTF-8 | 1,684 | 2.875 | 3 | [] | no_license | import numpy as np
from datetime import datetime
from PIL import Image, ImageDraw, ImageColor, ImageFont
sRes = (120, 80)
oRes = (480, 320)
def px24_to_16(r, g, b):
r >>= 3
g >>= 2
b >>= 3
return ((r << 11) + (g << 5) + b).to_bytes(2, "little")
def rgb24_to_rgb16(bmp):
bBuffer = memoryview(bmp.tobytes())
outBuffer = bytearray(oRes[0] * oRes[1] * 2)
for i in range(oRes[0] * oRes[1]):
r = bBuffer[i * 3]
g = bBuffer[i * 3 + 1]
b = bBuffer[i * 3 + 2]
if r + g + b == 0:
px = b"\x00\x00"
elif r + g + b == 768:
px = b"\xff\xff"
else:
px = px24_to_16(r, g, b)
outBuffer[i * 2] = px[0]
outBuffer[i * 2 + 1] = px[1]
return outBuffer
def np_rgb24_to_rgb16(bmp):
na = np.array(bmp, dtype="intc").reshape((oRes[0] * oRes[1], 3))
np.right_shift(na, [3, 2, 3], out=na)
np.left_shift(na, [11, 5, 0], out=na)
na = na[...,0] | na[...,1] | na[...,2]
return na.astype("uint16").tobytes()
def text_to_fb(text):
im = Image.new("RGB", sRes)
draw = ImageDraw.Draw(im)
font = ImageFont.truetype("/usr/share/fonts/TTF/Inconsolata-Regular.ttf", size=42)
w, h = font.getsize(text)
x = (sRes[0] - w) // 2
y = (sRes[1] - h) // 2
draw.text((x, y), text, font=font, fill=ImageColor.getrgb("red"))
#draw.text((x, y), hour, font=font, fill=(32, 20, 32))
im = im.resize(oRes, Image.NEAREST)
fbBuffer = np_rgb24_to_rgb16(im)
with open("/dev/fb1", "wb") as fb:
fb.write(fbBuffer)
def show_hour():
hour = datetime.strftime(datetime.now(), "%H:%M")
text_to_fb(hour)
show_hour() | true |
c671a49ef7ccb5bf6beb4e8d747e59021135dd11 | Python | honoraip/brick_breaking | /breakout.py | UTF-8 | 11,107 | 3.515625 | 4 | [] | no_license | # breakout.py
# Honora Ip, hi52
# December 12, 2014
"""Primary module for Breakout application
This module contains the App controller class for the Breakout application.
There should not be any need for additional classes in this module.
If you need more classes, 99% of the time they belong in either the gameplay
module or the models module. If you are ensure about where a new class should go,
post a question on Piazza."""
from constants import *
from gameplay import *
from game2d import *
# PRIMARY RULE: Breakout can only access attributes in gameplay.py via getters/setters
# Breakout is NOT allowed to access anything in models.py
class Breakout(GameApp):
"""Instance is a Breakout App
This class extends GameApp and implements the various methods necessary
for processing the player inputs and starting/running a game.
Method init starts up the game.
Method update either changes the state or updates the Gameplay object
Method draw displays the Gameplay object and any other elements on screen
Because of some of the weird ways that Kivy works, you SHOULD NOT create an
initializer __init__ for this class. Any initialization should be done in
the init method instead. This is only for this class. All other classes
behave normally.
Most of the work handling the game is actually provided in the class Gameplay.
Gameplay should have a minimum of two methods: updatePaddle(touch) which moves
the paddle, and updateBall() which moves the ball and processes all of the
game physics. This class should simply call that method in update().
The primary purpose of this class is managing the game state: when is the
game started, paused, completed, etc. It keeps track of that in an attribute
called _state.
INSTANCE ATTRIBUTES:
view [Immutable instance of GView, it is inherited from GameApp]:
the game view, used in drawing (see examples from class)
_state [one of STATE_INACTIVE, STATE_COUNTDOWN, STATE_PAUSED, STATE_ACTIVE]:
the current state of the game represented a value from constants.py
_last [GPoint, or None if mouse button is not pressed]:
the last mouse position (if Button was pressed)
_game [GModel, or None if there is no game currently active]:
the game controller, which manages the paddle, ball, and bricks
ADDITIONAL INVARIANTS: Attribute _game is only None if _state is STATE_INACTIVE.
You may have more attributes if you wish (you might need an attribute to store
any text messages you display on the screen). If you add new attributes, they
need to be documented here.
LIST MORE ATTRIBUTES (AND THEIR INVARIANTS) HERE IF NECESSARY
_mssg ['Press to Play', or none if the game is not inactive]:
This message is displayed on the welcome screen
and instructs user to press the mouse to play.
_lasttouch [GPoint, or none if the game is inactive]:
Holds the previous value of touch. This is used in
GamePlay to detect the first time the user clicks
the mouse to move the paddle.
_timer [int >= 0 ]:
This is a countdown timer that counts frames. It is
used to count COUNTDOWN_SECONDS seconds in state
countdown.
_ballcount [int >= 0]:
The number of balls left (initialized to NUMBER_TURNS).
_pausemssg [pause message string, or none if the game is not paused]:
This message appears when the game is paused between tries.
_finalmssg [final message string, or none if the game is not complete]:
This message appears on the completion screen and tells
the user the game's result.
"""
# GAMEAPP METHODS
def init(self):
"""Initialize the game state.
This method is distinct from the built-in initializer __init__.
This method is called once the game is running. You should use
it to initialize any game specific attributes.
This method should initialize any state attributes as necessary
to statisfy invariants. When done, set the _state to STATE_INACTIVE
and create a message (in attribute _mssg) saying that the user should
press to play a game."""
self._state = STATE_INACTIVE
self._last = None
self._game = None
self._mssg = GLabel(text = 'Press to Play', font_size = 50)
self._lasttouch = None
self._timer = 0
self._ballcount = NUMBER_TURNS
self._pausemssg = None
self._finalmssg = None
def update(self,dt):
"""Animate a single frame in the game.
It is the method that does most of the work. Of course, it should
rely on helper methods in order to keep the method short and easy
to read. Some of the helper methods belong in this class, but most
of the others belong in class Gameplay.
The first thing this method should do is to check the state of the
game. We recommend that you have a helper method for every single
state: STATE_INACTIVE, STATE_COUNTDOWN, STATE_PAUSED,
STATE_ACTIVE, STATE_COMPLETE.
The game does different things in each state.
In STATE_INACTIVE, the method checks to see if the player clicks
the mouse (_last is None, but view.touch is not None). If so, it
(re)starts the game and switches to STATE_COUNTDOWN.
STATE_PAUSED is similar to STATE_INACTIVE. However, instead of
restarting the game, it simply switches to STATE_COUNTDOWN.
In STATE_COUNTDOWN, the game counts down until the ball is served.
The player is allowed to move the paddle, but there is no ball.
Paddle movement should be handled by class Gameplay (NOT in this class).
This state should delay at least one second.
In STATE_ACTIVE, the game plays normally. The player can move the
paddle and the ball moves on its own about the board. Both of these
should be handled by methods inside of class Gameplay (NOT in this class).
Gameplay should have methods named updatePaddle and updateBall.
While in STATE_ACTIVE, if the ball goes off the screen and there
are tries left, it switches to STATE_PAUSED. If the ball is lost
with no tries left, or there are no bricks left on the screen, the
game is over and it switches to STATE_INACTIVE. All of these checks
should be in Gameplay, NOT in this class.
STATE_COMPLETE displays a message when the game is over. The game shows
a message _finalmssg when the game is won or lost.
You are allowed to add more states if you wish. Should you do so,
you should describe them here.
Precondition: dt is the time since last update (a float). This
parameter can be safely ignored. It is only relevant for debugging
if your game is running really slowly. If dt > 0.5, you have a
framerate problem because you are trying to do something too complex."""
assert type(dt) == float
# Each state has a helper method
if self._state == STATE_INACTIVE:
self._inactive()
if self._state == STATE_COUNTDOWN:
self._countdown()
if self._state == STATE_ACTIVE:
self._active()
if self._state == STATE_PAUSED:
self._paused()
if self._state == STATE_COMPLETE:
self._complete()
def draw(self):
"""Draws the game objects to the view.
Every single thing you want to draw in this game is a GObject.
To draw a GObject g, simply use the method g.draw(view). It is
that easy!
Many of the GObjects (such as the paddle, ball, and bricks) are
attributes in Gameplay. In order to draw them, you either need to
add getters for these attributes or you need to add a draw method
to class Gameplay. We suggest the latter. See the example
subcontroller.py from class."""
# Only draw if the object exists (is not None)!
if self._mssg != None:
self._mssg.draw(self.view)
if self._game != None:
self._game.draw(self.view)
if self._pausemssg != None:
self._pausemssg.draw(self.view)
if self._finalmssg != None:
self._finalmssg.draw(self.view)
# HELPER METHODS FOR THE STATES GO HERE
def _inactive(self):
"""Checks if player clicks the mouse. If so, the game starts,
game state is changed to STATE_COUNTDOWN, and the welcome screen is
dismissed."""
if self._last == None and self.view.touch != None:
self._state = STATE_COUNTDOWN
self._last = self.view.touch
self._mssg = None
self._game = Gameplay()
def _paused(self):
"""Checks if player clicks the mouse. If so, the game state is changed
to STATE_COUNTDOWN and the pause message disappears."""
if self.view.touch != None:
self._pausemssg = None
self._state = STATE_COUNTDOWN
def _countdown(self):
"""Updates the paddle movement and counts down seconds until the ball
is to be released. When countdown ends, the state switches to
STATE_ACTIVE, the ball is served, and the ball count is decremented."""
self._game.updatePaddle(self._lasttouch, self.view.touch)
self._lasttouch = self.view.touch
self._timer += 1
# Switch state to active after countdown (60 frames per second)
if self._timer >= COUNTDOWN_SECONDS*60:
self._state = STATE_ACTIVE
self._game.serveBall()
self._ballcount -= 1
def _active(self):
"""Updates the paddle movement and moves the ball. If the ball is lost,
pause in STATE_PAUSED or end the game in STATE_COMPLETE based on how
many balls are left. Set messages accordingly."""
self._game.updatePaddle(self._lasttouch, self.view.touch)
self._lasttouch = self.view.touch
# Handle lost ball
lostball = self._game.moveBall()
if lostball and self._ballcount == 0:
self._finalmssg = GLabel(text = 'You Lost', font_size = 50)
self._state = STATE_COMPLETE
elif lostball and self._ballcount > 0:
self._pausemssg = GLabel(text = 'Click! Try again', font_size = 50)
self._state = STATE_PAUSED
# Check for a screen with no bricks
completed = self._game.checkBricksListEmpty()
if completed:
self._finalmssg = GLabel(text = 'You Win', font_size = 50)
self._state = STATE_COMPLETE
def _complete(self):
"""The game is over. Display the completion message."""
# Nothing to do here. Game is over.
pass
| true |
8af625aba36e0452ed02c3f9d9ee0aeb36ebc127 | Python | udapy/Hashing-1 | /isIsomorphic2Hash.py | UTF-8 | 897 | 3.609375 | 4 | [] | no_license |
class Solution:
def isIsomorphic(self, s: str, t: str) -> bool:
"""
We are optimizing rathar than iterating over values of hashmap
Which will reduce our value search space O(n).
Double hash mapping, storing reverse mapping into other hash map to keep track they bisected.
Only one to one mapping is available.
Time Complexity: O(n)
Space Complexity: O(n) because we are creating other hashmap
"""
table, table_rev = {}, {}
for i in range(len(s)):
if s[i] in table and table[s[i]] != t[i]:
return False
elif s[i] not in table and t[i] in table_rev:return False # make sure reverse mapping is not maping to same element.
else:
table[s[i]] = t[i]
table_rev[t[i]] = s[i]
#print(table)
return True | true |
252f9e39e06ecf7989b4822bd02625595b71ccd9 | Python | badr1002/M_R_Task | /app.py | UTF-8 | 2,951 | 2.5625 | 3 | [] | no_license | from os import error
from threading import ThreadError
from flask import Flask, request, jsonify
from flask_mongoengine import MongoEngine
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
app.config['MONGODB_SETTINGS'] = {
'db': 'test',
'host': 'localhost',
'port': 27017
}
db = MongoEngine()
db.init_app(app)
class User(db.Document):
name = db.StringField()
email = db.StringField()
password = db.StringField()
class Values(db.Document):
val = db.DecimalField()
@app.route('/api/user/allUsers')
def home():
try:
users = User.objects()
return jsonify({
'apiStatus':True,
'msg':"get users successfully",
'data':users
}),200
except(error):
return jsonify({
'apiStatus': False,
'msg': "get users faild!",
'data': {}
}),500
@app.route('/api/user/register', methods=['POST'])
def register():
name = request.get_json()['name']
email = request.get_json()['email']
password = request.get_json()['password']
try:
User(name=name, email=email, password=password).save()
return jsonify({
'apiStatus':True,
'msg':"add users successfully",
'data':{}
}),200
except(error):
return jsonify({
'apiStatus': False,
'msg': "add users faild!",
'data': {}
}),500
@app.route('/api/user/login', methods=['POST'])
def login():
email = request.get_json()['email']
password = request.get_json()['password']
try:
user = User.objects(email=email, password=password).first()
if user:
return jsonify({
'apiStatus': True,
'msg': "login successfully",
'data': user
}),200
else:
ThreadError("this email not found!")
except(error):
return jsonify({
'apiStatus': False,
'msg': "login faild!",
'data': error
}),500
@app.route('/api/value/add', methods=['POST'])
def Value():
val = request.get_json('')['value']
try:
Values(val=val).save()
return jsonify({
'apiStatus': True,
'msg': "add value successfully",
'data': {}
}), 200
except(error):
return jsonify({
'apiStatus': False,
'msg': "add value faild!",
'data': error
}),500
@app.route('/api/value/allValues', methods=['GET'])
def getValue():
try:
values = Values.objects()
return jsonify({
'apiStatus': True,
'msg': "get values successfully",
'data': values
}), 200
except(error):
return jsonify({
'apiStatus': False,
'msg': "get values faild!",
'data': error
}), 500
if __name__ == "__main__":
app.run(debug=True)
| true |
901f36828202d565a154a94d57bb6254cf670dbb | Python | OntonYakut/python | /mysock.py | UTF-8 | 273 | 2.578125 | 3 | [] | no_license | import socket
mysock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
mysock.connect(('www.py4inf.com',80))
mysock.send('GET http://www.py4inf.com/code/romeo.txt HTTP/1.0\n\n')
while True:
data=mysock.recv(512)
if( len(data) < 1 ) :
break
print data
mysock.close() | true |
2a089c343481f6862543ddc6ce7e7dd7c44a0ad4 | Python | fedorpashin/physics | /physics/boundary_conditions/third_type_boundary_condition.py | UTF-8 | 579 | 2.546875 | 3 | [
"MIT"
] | permissive | from physics.boundary_conditions.boundary_condition import BoundaryCondition
from dataclasses import dataclass
from final_class import final
from overrides import overrides
__all__ = ['ThirdTypeBoundaryCondition']
@final
@dataclass
class ThirdTypeBoundaryCondition(BoundaryCondition):
__ν: float
__κ: float
def __init__(self, ν: float, κ: float):
self.__ν = ν
self.__κ = κ
@property # type: ignore
@overrides
def ν(self) -> float:
return self.__ν
@property
def κ(self) -> float:
return self.__κ
| true |
4d5dc2e7ed3d46e3aa46d1d58a52c7ed45a499fc | Python | zgrzebnickij/test_automation | /src/tic_tac_toe/database.py | UTF-8 | 798 | 3 | 3 | [] | no_license | # tic_tac_toe/database.py
from sqlalchemy import Table, Column, Integer, String, MetaData, select
from .utilities import tic_tac_toe_winner
metadata = MetaData()
history = Table(
'history',
metadata,
Column('game_id', Integer, nullable=False),
Column('move_id', Integer, primary_key=True),
Column('position', Integer, nullable=False),
Column('symbol', String(1), nullable=False)
)
def winner(connection, game_id):
metadata.create_all(connection)
query = select([history.c.position, history.c.symbol])\
.where(history.c.game_id == game_id)\
.order_by(history.c.move_id.asc())
board = {position: symbol for position, symbol in connection.execute(query)}
return tic_tac_toe_winner(''.join(board.get(position, ' ') for position in range(9))) | true |
e87a79a8126118ae26f7a60c8bb1dd703b6c3164 | Python | nicolasque/mi-primer-programa | /wile.py | UTF-8 | 236 | 3.359375 | 3 | [] | no_license | numero_inicia = 2000000
while numero_inicia > 0:
print(numero_inicia)
if numero_inicia % 2 == 0:
print("este numero es par")
else:
print("este numero es inpar")
numero_inicia -= 1
print("he terminado")
| true |
1c659173823e601f8d56a2fc552535f385c2f252 | Python | juangamella/stars | /docs/stars_example.py | UTF-8 | 587 | 2.859375 | 3 | [
"BSD-3-Clause"
] | permissive | import numpy as np
import stars
# Define a dummy estimator (returns the same estimate for all subsamples)
def estimator(subsamples, lmbda):
p = subsamples.shape[2]
A = np.triu(np.random.uniform(size=(p,p)), k=1)
A += A.T
A = A > 0.5
return np.array([A] * len(subsamples))
# Generate data from a neighbourhood graph (page 10 of the paper)
true_precision = stars.neighbourhood_graph(100)
true_covariance = np.linalg.inv(true_precision)
X = np.random.multivariate_normal(np.zeros(100), true_covariance, size=400)
# Run StARS + Graphical lasso
stars.fit(X, estimator)
| true |
48fb645c564d0e83d59230d2ec2b9695ee475de2 | Python | vivekmids/nlp-summarization | /beam_search.py | UTF-8 | 2,700 | 3 | 3 | [] | no_license | def get_top_beam_search_sentences(input_seq, beam=3):
# Encode the input as state vectors.
e_out, e_h, e_c = encoder_model.predict(input_seq)
top_sentences = {}
def top_tokens(last_token, out, h, c):
output_tokens, h_new, c_new = decoder_model.predict([[last_token]] + [out, h, c])
top_token_indexes = np.argsort(output_tokens[0, -1, :])[-beam:]
top_probabilities = output_tokens[0,-1, top_token_indexes]
return top_token_indexes, top_probabilities, h_new, c_new
#first set of tokens when feeding encoder states and 0 as the first token to the decoder.
first_tokens, first_probabilities, h, c = top_tokens(0, e_out, e_h, e_c)
for first_token, first_probability in zip(first_tokens, first_probabilities):
#initialize top sentences, their corresponding probabilities and states
top_sentences[y_index_word.get(first_token, '')] = (first_probability, h, c)
#loop to iterate over next tokens
len = 1
while len < MAX_HEADLINE_LENGTH:
candidate_sentences = {}
for sentence, (probability, h, c) in top_sentences.items():
last_word = sentence.split()[-1] #pick the last word in the sentence as next word
if(last_word != '.'):
token = y_word_index.get(last_word, 0)
next_tokens, next_probabilities, h_next, c_next = top_tokens(token, e_out, h, c)
for next_token, next_probability in zip(next_tokens, next_probabilities):
new_sentence = sentence.strip() + ' ' + y_index_word.get(next_token, '')
candidate_sentences[new_sentence.strip()] = (probability * next_probability, h_next, c_next)
else:
candidate_sentences[sentence] = (probability, h, c)
#print('Candidate sentences')
#print(candidate_sentences.keys())
#remove low probability candidates
low_probability_candidates = sorted(candidate_sentences, key=lambda k: candidate_sentences.get(k)[0])[:-beam]
for low_probability_candidate in low_probability_candidates:
candidate_sentences.pop(low_probability_candidate)
#Now all candidates left have highest probabilities.
top_sentences = candidate_sentences
len = len + 1
#print('Sentences at the bottom of the loop')
#print(top_sentences.keys())
return top_sentences
def decode_sequence(input_seq, beam=3):
top_sentences_obj = get_top_beam_search_sentences(input_seq.reshape(1,-1), beam)
l = [(sen, prob) for sen, (prob, _, _) in top_sentences_obj.items()]
return sorted(l, key = lambda x:-x[1])[0][0]
| true |
d9243622b6e0647168ca582039e6ad80d67e56a0 | Python | lev2cu/python_test | /pandas/test/movie.py | UTF-8 | 847 | 2.796875 | 3 | [] | no_license |
import numpy as np
from pandas import Series, DataFrame
import pandas as pd
path ='/Applications/XAMPP/xamppfiles/htdocs/IMDB/IMDBMovie.txt'
MOVIE_n = ['id', 'name', 'year', 'rank']
idn,rank,year,name = [],[], [], []
with open(path) as f:
for i, line in enumerate(f):
fields = line.strip().split(",")
idn.append(fields.pop(0))
rank_ = fields.pop(-1)
rank.append(rank_ if rank_ else np.nan)
year.append(fields.pop(-1))
nae = '{}'.format(",".join(fields))
name.append(nae)
print i
MOVIE_d = {'id':idn,'name': name,'year':year,'rank':rank}
MOVIE1 = pd.DataFrame(MOVIE_d,columns =MOVIE_n )
MOVIE = MOVIE1.ix[2:]
| true |
a7636870574dfe60e58f13ae031e74543cf1fb30 | Python | Rakalute/Atcoder | /ARC121/A.py | UTF-8 | 2,437 | 3.125 | 3 | [] | no_license | def main():
N = int(input())
X = []
Y = []
ans = 0
# Z = []
for i in range(N):
x, y = map(int, input().split())
X.append([x, i]), Y.append([y, i])
X_sort = sorted(X, reverse = True, key=lambda x: x[0])
Y_sort = sorted(Y, reverse = True, key=lambda x: x[0])
X_max, X_max2, X_min, X_min2 = X_sort[0], X_sort[1], X_sort[-1], X_sort[-2]
Y_max, Y_max2, Y_min, Y_min2 = Y_sort[0], Y_sort[1], Y_sort[-1], Y_sort[-2]
# Xが最大値
if (X_max[0] - X_min[0]) >= (Y_max[0] - Y_min[0]) and X_max[1] != Y_max[1] and X_min[1] != Y_min[1]:
ans = max(X_max[0] - X_min2[0], X_max2[0] - X_min[0], Y_max[0] - Y_min[0])
print(ans)
elif (X_max[0] - X_min[0]) >= (Y_max[0] - Y_min[0]) and X_max[1] == Y_max[1] and X_min[1] != Y_min[1]:
ans = max(X_max[0] - X_min2[0], X_max2[0] - X_min[0], Y_max2[0] - Y_min[0])
print(ans)
elif (X_max[0] - X_min[0]) >= (Y_max[0] - Y_min[0]) and X_max[1] != Y_max[1] and X_min[1] == Y_min[1]:
ans = max(X_max[0] - X_min2[0], X_max2[0] - X_min[0], Y_max[0] - Y_min2[0])
print(ans)
elif (X_max[0] - X_min[0]) >= (Y_max[0] - Y_min[0]) and X_max[1] == Y_max[1] and X_min[1] == Y_min[1]:
ans = max(X_max[0] - X_min2[0], X_max2[0] - X_min[0], Y_max2[0] - Y_min2[0])
print(ans)
# Yが最大値
elif (X_max[0] - X_min[0]) < (Y_max[0] - Y_min[0]) and Y_max[1] != X_max[1] and Y_min[1] != X_min[1]:
ans = max(Y_max[0] - Y_min2[0], Y_max2[0] - Y_min[0], X_max[0] - X_min[0])
print(ans)
elif (X_max[0] - X_min[0]) < (Y_max[0] - Y_min[0]) and Y_max[1] == X_max[1] and Y_min[1] != X_min[1]:
ans = max(Y_max[0] - Y_min2[0], Y_max2[0] - Y_min[0], X_max2[0] - X_min[0])
print(ans)
elif (X_max[0] - X_min[0]) < (Y_max[0] - Y_min[0]) and Y_max[1] != X_max[1] and Y_min[1] == X_min[1]:
ans = max(Y_max[0] - Y_min2[0], Y_max2[0] - Y_min[0], X_max[0] - X_min2[0])
print(ans)
elif (X_max[0] - X_min[0]) < (Y_max[0] - Y_min[0]) and Y_max[1] == X_max[1] and Y_min[1] == X_min[1]:
ans = max(Y_max[0] - Y_min2[0], Y_max2[0] - Y_min[0], X_max2[0] - X_min2[0])
print(ans)
# for i in range(N - 1):
# for j in range(i + 1, N):
# z = max(abs(X[i] - X[j]), abs(Y[i] - Y[j]))
# Z.append(z)
# Z = sorted(Z, reverse = True)
# print(Z[1])
if __name__ == "__main__":
main() | true |
ef74fc7c414e1f1d7a0ef4f2803847b8643e0f84 | Python | Coby-chan/AID1906 | /7.24homework.py | UTF-8 | 689 | 3.796875 | 4 | [] | no_license | """
给出两个有序的链表L1,L2 .
在不创建新的链表的基础上将两个链表合并为一个
要求合并后的链表仍为有序
"""
from day01.linklist import *
L1 = LinkList()
L2 = LinkList()
L1.init_list([1,5,7,8,10,12,13,19])
L2.init_list([0,3,4,8,14,21,22])
L1.show()
print("=========================")
L2.show()
def merge(L1,L2):
p = L1.head
q = L2.head.next
while p.next is not None:
if p.next.val < q.val:
p = p.next
else:
tmp = p.next
p.next = q
p = p.next
q = tmp
p.next = q
merge(L1,L2)
print("=================================")
L1.show() | true |
29052ecbb3a0529315627e947a2f64f35571eae6 | Python | TimVanDyke/Computer-Vision | /load.py | UTF-8 | 1,161 | 2.6875 | 3 | [] | no_license | import numpy as np
import gzip
def load_data(dataset):
def load_images(filename):
with gzip.open(filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=16)
# The inputs are vectors now, we reshape them to monochrome 2D images,
data = data.reshape(-1, 1, 28, 28)
# The inputs come as bytes, we convert them to float32 in range [0,1].
return data / np.float32(256)
def load_labels(filename):
with gzip.open(filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=8)
# The labels are a list of integers integers
return data
train_labels = load_labels(
'./datasets/emnist-{}-train-labels-idx1-ubyte.gz'.format(dataset)) # Y
train_images = load_images(
'./datasets/emnist-{}-train-images-idx3-ubyte.gz'.format(dataset)) # X
test_labels = load_labels(
'./datasets/emnist-{}-test-labels-idx1-ubyte.gz'.format(dataset)) # Y
test_images = load_images(
'./datasets/emnist-{}-test-images-idx3-ubyte.gz'.format(dataset)) # X
return train_images, train_labels, test_images, test_labels
| true |
9b60278886cfdae240762b9491401067f57d43a7 | Python | gorgonun/rpg_dso1 | /playerDao.py | UTF-8 | 1,178 | 2.875 | 3 | [] | no_license | from dao import Dao
class PlayerDao(Dao):
def __init__(self, datasource="players.pickle"):
super().__init__(datasource)
def get_all(self):
return [x for x in self.object_cache.values()]
def get_dict(self):
return self.object_cache
def get(self, name):
player = self.object_cache.get(name)
if player:
return player
return player
def get_char(self, player_name, char_name):
for char in self.object_cache[player_name].characters:
if char.name == char_name:
return char
def add_char(self, player, char):
self.object_cache[player.name].new_character(char)
self.update()
def add_player(self, player):
self.object_cache.update({player.name: player})
self.update()
def remove_player(self, player):
self.object_cache.pop(player.name)
self.update()
def remove_char(self, player, char):
player = self.get(player.name)
player.remove(char)
if len(player.characters) == 0:
self.remove_player(player)
self.update()
def save(self):
self.update()
| true |
a7e19e4b5efcec43ae432799d052069ad1ed186d | Python | rohun-kulkarni/gastronomy | /manipulation/azure_kinect_calibration/utils/prepare_multi_dir_calibration_data.py | UTF-8 | 2,464 | 2.609375 | 3 | [] | no_license | import numpy as np
import os
import shutil
def main(path_list, dest_path):
img_count = 1
main_dest_dir = os.path.join(dest_path, 'main')
secondary_dest_dir = os.path.join(dest_path, 'secondary')
for p in [main_dest_dir, secondary_dest_dir]:
if not os.path.exists(p):
os.makedirs(p)
for p in path_list:
main_img_dir = os.path.join(p, 'main')
secondary_img_dir = os.path.join(p, 'secondary')
for img_idx in range(1, 1000):
img_path = '{}_main_img.png'.format(img_idx)
if not os.path.exists(os.path.join(main_img_dir, img_path)):
break
if 'png' not in img_path:
continue
main_img_path = os.path.join(main_img_dir, img_path)
new_main_img_path = os.path.join(
main_dest_dir, '{}_main_img.png'.format(img_count))
shutil.copy2(main_img_path, new_main_img_path)
curr_img_count = int(img_path.split('_')[0])
secondary_img_path = os.path.join(
secondary_img_dir, '{}_secondary_img.png'.format(curr_img_count))
assert os.path.exists(secondary_img_path)
new_secondary_img_path = os.path.join(
secondary_dest_dir, '{}_secondary_img.png'.format(img_count)
)
shutil.copy2(secondary_img_path, new_secondary_img_path)
img_count = img_count + 1
print("Copied images \t from dir: {}\n"
" \t to dir: {}\n"
" \t count: {}".format(
p, dest_path, img_idx
))
if __name__ == '__main__':
path_list = [
'/home/klz/good_calib_data/main_overhead_sec_front_left/Nov_23_try_1/calib_data_1',
'/home/klz/good_calib_data/main_overhead_sec_front_left/Nov_23_try_2',
# '/home/klz/good_calib_data/main_front_right_sec_overhead/combined_data/Nov_20_7_30/org/calib_data_Nov_19_11_20_PM',
# '/home/klz/good_calib_data/main_front_right_sec_overhead/combined_data/Nov_20_7_30/org/calib_data_Nov_21_12_30_PM_try_5'
]
# dest_path = '/home/klz/good_calib_data/main_front_right_sec_overhead/combined_data/Nov_20_7_30/combined'
dest_path = '/home/klz/good_calib_data/main_overhead_sec_front_left/combined/Nov_23_try_1_2/'
if not os.path.exists(dest_path):
os.makedirs(dest_path)
main(path_list, dest_path) | true |
bec5840fd27bbc271c6a2420fd2448b315bd519c | Python | farukara/Project-Euler-problems | /files/041 - pandigital prime.py | UTF-8 | 1,478 | 4.03125 | 4 | [
"MIT"
] | permissive | #!python3
# coding: utf-8
# We shall say that an *n*-digit number is pandigital if it makes use of all the digits 1 to *n* exactly once. For example, 2143 is a 4-digit pandigital and is also prime.
# What is the largest *n*-digit pandigital prime that exists?
#https://projecteuler.net/problem=30
from time import perf_counter
from itertools import permutations
from math import sqrt
def timeit(func):
def wrapper(*args, **kwargs):
start = perf_counter()
result = func(*args, **kwargs)
finish = perf_counter()
print(f"{func.__name__} function took {finish - start:.2f} seconds")
return result
return wrapper
def is_prime(n):
"return True if n is prime"
if n == 0 or n == 1:
return False
if n == 2 or n == 3:
return True
if n%2 == 0:
return False
for i in range(3, int(sqrt(n))+1, 2):
if n%i == 0:
return False
return True
@timeit
def main():
#dropped 8 and 9 b/c a number is divisible by 3 if
#sum of its digits is divisible by 3
pan_digits = list("1234567")
candidates = []
for i in range(9):
pan_digits = pan_digits[:9-i]
combs = permutations(pan_digits, len(pan_digits))
for comb in combs:
if is_prime(int("".join(comb))):
candidates.append(int("".join(comb)))
if candidates: #early exit
break
print(max(candidates))
if __name__ == "__main__":
main()
| true |
a2a8208867f30526888067e9bed260a46d25407d | Python | realshovanshah/restura-virtual-assistant | /restura_assistant.py | UTF-8 | 1,631 | 2.84375 | 3 | [] | no_license | import io, random
from gtts import gTTS
#requies ffmeg
from pydub import AudioSegment
from pydub.playback import play
import speech_recognition as sr
from restura_api import ResturaApi
from helper import toStr
class ResturaAssistant:
all_items = ResturaApi.items
top_items = random.sample(all_items, k=4)
categories = ResturaApi.categories
@staticmethod
def get_audio():
r = sr.Recognizer()
with sr.Microphone() as source:
print('listening')
audio = r.listen(source)
spokenText = ""
try:
spokenText = r.recognize_google(audio)
except Exception as e:
print("Exception: " + str(e))
return spokenText
@staticmethod
def speak(my_text):
with io.BytesIO() as f:
gTTS(text=my_text, lang='en').write_to_fp(f)
f.seek(0)
song = AudioSegment.from_file(f, format="mp3")
play(song)
@classmethod
def executeAction(cls, items):
for item in items:
print('running')
ResturaApi.orders.append(item)
@classmethod
def getData(cls, action):
if action == 'getOrder':
orders = ResturaApi.orders
if len(orders)>0:
print(f'You have ordered {toStr(ResturaApi.orders)}')
else:
print('Your order is empty!')
if action == 'item':
print(f'Some of the items we offer are {toStr(cls.top_items)}')
if action == 'category':
print(f'we have {toStr(cls.categories)} categories.')
| true |
c9278fc416bb1aee53f89b8a296d5dccfd3768ec | Python | Qingchuan-Ma/Photo_Editor | /functions/skin.py | UTF-8 | 1,035 | 2.84375 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 27 16:38:07 2019
@author: qingchuan-ma
"""
import numpy as np
import cv2
def beauty_face(img, degree = 5, detail = 1):
if degree >= 1:
dst = np.zeros_like(img)
#int value1 = 3, value2 = 1; 磨皮程度与细节程度的确定
value = 5
v1 = int(degree)
v2 = int(detail)
dx = v1 * 5 # 双边滤波参数之一
fc = v1 * 12.5 # 双边滤波参数之一
p = 0.1
temp4 = np.zeros_like(img)
temp1 = cv2.bilateralFilter(img,dx,fc,fc)
temp2 = cv2.subtract(temp1,img);
temp2 = cv2.add(temp2,(10,10,10,128))
temp3 = cv2.GaussianBlur(temp2,(2*v2 - 1,2*v2-1),0)
temp4 = cv2.add(img,temp3)
dst = cv2.addWeighted(img,p,temp4,1-p,0.0)
dst = cv2.add(dst,(10, 10, 10,255))
dst = cv2.bilateralFilter(dst, value, value * 2, value / 2)
#cv2.imwrite("4.jpg",dst)
else:
dst = img
return dst
| true |
36d95ce3801f6eee31a7236610da4163ca9288fd | Python | yzl232/code_training | /mianJing111111/Google/interleave iterator of iterators_join iterator of iterators.py | UTF-8 | 2,547 | 3.84375 | 4 | [] | no_license | # encoding=utf-8
#超高频的一道题目了
'''
问的是和java里的iterator有关的问题:
假设有n (下面的例子n=3)个lists:
l1: a1 a2 a3 a4 a5 ...
l2: b1 b2 b3 b4...
l3: c1 c2 ....
要求交替输出:a1 b1 c1 a2 b2 c2....
给的输入是Iterator<Iterator<T>>,要实现一个
class InterleavingIterator<T>
刚开始脑子一团浆糊,太紧张了,过了有好几分钟才想清楚怎么写,(想的过程中漂亮的面试官还问我哪里卡住了,超级nice的),然后就写完了,然后面试官 说你要怎样测试你的code哇? 然后我就给了几个test case,然后看test case的过程中发现代码有bug,没有处理输入为空的情况,改好了之后,又问了hasNext()和next()的时间复杂度,然后又聊了一下我的 work和她的work,然后就到时间了。
最有一轮总算是比较轻松了……第一题是写个iterator of a list of iterators,注意处理list为空和某个iterator为空的情况。代码没啥问题后,就进入了下一题。
6.加试电面, 写jump iterator类, 构造函数传入一个普通的iterator, 然后实现next(), hasNext(). next()返回传入iterator的next().next(), 就是每次跳过一个元素输出.
然后再实现一个rotateIterator(), 构造函数传入List<Iterator<T>>, 实现next(), hasNext(). 例如:
传入的三个iterator里面的值分别是[[1,2,3],[4,5,6], [7,8]], 那rotateIterator的next()应该输出[1,4,7,2,5,8,3,6]. 就是竖着遍历每个iterator输出, 如果当前的iterator没有了, 就跳到下一个.
2,面试官问我既然我在上一题用到了iterator,那接下来就编写一个变形的iterator吧:
给定两个iterator,让两个iterator进行交互输出。: [% m% V; x1 ~# ^
例子:
A:1234
B:abcd
则我编写的iterator输出为:1a2b3c4d,如果一个读完了那就读没读完那个直到两个都读完为止。
'''
#其实比较像用minHeap 那道题目
#用queue来做。 很巧妙
#http://stackoverflow.com/questions/9200080/join-multiple-iterators-in-java
from collections import deque
class FlatIterator:
def __init__(self, iters):
self.q= deque([x for x in iters if x.hasNext()])
def hasNext(self):
return False if not self.q else True
def next(self):
assert self.hasNext() #以后都可以加一句这个
t = self.q.popleft()
val = t.next()
if t.hasNext():self.q.append(t) # if t.hasNext() 这句不需要。 因为hasNext 已经判断了
return val | true |
7acfcc3c23320b749fae6f90b66e09631e7d9f1f | Python | mdhvkothari/Python-Program | /hackerrank/co.py | UTF-8 | 570 | 2.875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 18 17:19:49 2018
@author: Madhav
"""
for _ in range (input()):
c=input()
main=0
maxnum=1
for x in range (1,c+1):
temp=0
T=x
while True :
if x%2 ==0 :
x=x/2
else :
x=3*x+1
temp=temp+1
if x==1:
if temp == main and T>maxnum :
maxnum=T
elif temp>main :
main=temp
maxnum=T
break
print (maxnum) | true |
11e2ca45808c9571f3fe3dca8d1ffb87d6195087 | Python | mrunalruikar/PythonFundamentals | /ex3.py | UTF-8 | 449 | 4.09375 | 4 | [] | no_license | print "I will now count my chickens:"
print "Hens", 25 + 30 /6
print "Roosters", 100 - 25 * 3 % 4
print 3 + 2 + 1 - 5 + 4 % 2 - 1 / 4 + 6
print "Is it true that 3 + 2 < 5 - 7 ?"
print 3 + 2 < 5 - 7
print "What is 3 + 2 ?", 3 + 2
print "What is 5 - 7 ?", 5 - 7
print "Oh that's why it is False. "
print "How about some more. "
print "Is it greater? ", 5 > -2
print "Is it grater or equal?", 5 >= -2
print "Is it less or equal?", 5 <= -2
| true |
231581a0d55b46015b1d68d08c5b695396bce852 | Python | cseharshit/Python_Practice_Beginner | /55.Selection_Sort.py | UTF-8 | 297 | 3.65625 | 4 | [
"MIT"
] | permissive | from random import randint
x=int(input("Enter number of elements: "))
arr=[randint(1,100) for i in range(x)]
print(arr)
j=x-1
while j!=0:
k=0
for i in range(1,j+1):
if arr[i] > arr[k]:
k=i
# Swap the values
arr[k],arr[j]=arr[j],arr[k]
j-=1
print(arr) | true |
6bf694170d25bdab3232c85913591c8235e855bb | Python | Puckery-fc/tests | /验证码/Login.py | UTF-8 | 2,645 | 2.84375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# File : Login.py
# Date : 2019/4/28
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
import time,os,unittest
class JD(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "https://www.jd.com/"
def test_Login(self):
driver = self.driver
driver.get(self.base_url)
driver.maximize_window()
cookieBefore = driver.get_cookies()
print(cookieBefore) #打印登录前的cookie
time.sleep(2)
driver.find_element_by_class_name("link-login").click()
driver.find_element_by_link_text("账户登录").click()
driver.find_element_by_id("loginname").clear()
driver.find_element_by_id("loginname").send_keys("15088558058")
driver.find_element_by_id("loginname").send_keys(Keys.TAB)
driver.find_element_by_id("nloginpwd").send_keys("fc678268")
driver.find_element_by_id("loginsubmit").click()
driver.implicitly_wait(5) # 加一个休眠,这样得到的cookie 才是登录后的cookie,否则可能打印的还是登录前的cookie
time.sleep(5)
print("登录后")
cookieAfter = driver.get_cookies()
print("cookiesAfter:")
print(cookieAfter)
len1 = len(cookieAfter)
print("len:%d"%len1)
cookie1 = cookieAfter[0]
cookie2 = cookieAfter[3]
cookie3 = cookieAfter[-2]
cookie4 = cookieAfter[-1]
print("cookie1:%s"%cookie1)
print("cookie2:%s" % cookie2)
print("cookie3:%s" % cookie3)
print("cookie4:%s" % cookie4)
driver.quit()
# 将获取的这四个cookie作为参数,传递给,使用cookie登录的函数,如下
cookieLogin(cookie1,cookie2,cookie3,cookie4)
def cookieLogin(cookie1,cookie2,cookie3,cookie4):
print("+++++++++++++++++++++++++")
print("cookieLogin")
print("cookie2:%s" % cookie2)
print("cookie4:%s" % cookie4)
driver = self.driver
driver.maximize_window()
driver.delete_all_cookies()
time.sleep(3)
driver.get(self.base_url)
driver.add_cookie(cookie1)
driver.add_cookie(cookie2)
driver.add_cookie(cookie3)
driver.add_cookie(cookie4)
print("cookies")
print(driver.get_cookies())
time.sleep(5)
driver.refresh()
time.sleep(5)
driver.quit()
if __name__ == "__main__":
unittest.main()
| true |
d0ff1020d4ebda8909bc3d1f10837b17091a0c9f | Python | chenchuangc/algorithm | /03.mythink_/09.code_python02/03.all_search_/08_how_to_iterator_/08_01_hannuo_tower.py | UTF-8 | 1,381 | 3.765625 | 4 | [] | no_license | # !/bin/python
# -*- encoding:UTF-8 -*-
# 有三根杆子A,B,C。A杆上有 N 个 (N>1) 穿孔圆盘,盘的尺寸由下到上依次变小。要求按下列规则将所有圆盘移至 C 杆:
# 每次只能移动一个圆盘;
# 大盘不能叠在小盘上面。
# 提示:可将圆盘临时置于 B 杆,也可将从 A 杆移出的圆盘重新移回 A 杆,但都必须遵循上述两条规则。
# 问:如何移?最少要移动多少次?
# 这个是经典的汉诺塔问题,我们可以使用递推的方式进行 假如能够完成f(n)到b上,那就一定能够完成f(n+1)到c上面,
# 因为,完成了f(n),只需要将第n+1个移动到c,后面的问题就等于f(n)了
# 递归的的主体是父亲在还在的基础上移动一个盘子,然后再转化问题
# 问题的描述变量 当前盘子数,当前盘子在哪个杆上,移动的目标是到哪个盘上面,
# that is ok
def han_nuo_tower(n, s_index, target_index, step_list):
if n == 1:
step_list.append(str(s_index) + "->" + str(target_index))
return
mid_target_index = (1 + 2 + 3) - s_index - target_index
han_nuo_tower(n - 1, s_index, mid_target_index, step_list)
step_list.append(str(s_index) + "->" + str(target_index))
han_nuo_tower(n - 1, mid_target_index, target_index, step_list)
step = []
han_nuo_tower(4, 1, 3, step)
print step
| true |
66d55404659adaad9681eb1b07ffe67453f9de7a | Python | aascode/emotion | /scripts/training/train_multiple.py | UTF-8 | 3,416 | 2.6875 | 3 | [
"MIT"
] | permissive | import argparse
import pickle
from pathlib import Path
import numpy as np
from emotion_recognition.classification import PrecomputedSVC
from emotion_recognition.dataset import CombinedDataset, NetCDFDataset
from sklearn.metrics import (average_precision_score, f1_score, get_scorer,
make_scorer, precision_score, recall_score)
from sklearn.model_selection import (GroupKFold, LeaveOneGroupOut, KFold,
cross_validate)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=Path, nargs='+', required=True,
help="Input datasets.")
parser.add_argument(
'--cv', type=str, default='speaker',
help="Cross-validation method. One of {speaker, corpus}."
)
parser.add_argument('--norm', type=str, default='speaker',
help="Normalisation method. One of {speaker, corpus}.")
parser.add_argument('--save', type=Path,
help="Path to save trained model.")
args = parser.parse_args()
dataset = CombinedDataset(*(NetCDFDataset(path) for path in args.input))
emotion_map = {x: 'emotional' for x in dataset.classes}
emotion_map['neutral'] = 'neutral'
dataset.map_classes(emotion_map)
print(dataset.class_counts)
dataset.normalise(scheme=args.norm)
cv = LeaveOneGroupOut()
if args.cv == 'speaker':
groups = dataset.speaker_group_indices
if len(dataset.speakers) > 10:
cv = GroupKFold(6)
print("Using speaker-independent cross-validation.")
elif args.cv == 'corpus':
groups = dataset.corpus_indices
print("Using corpus-independent cross-validation.")
else:
groups = None
cv = KFold(10)
class_weight = (dataset.n_instances
/ (dataset.n_classes * dataset.class_counts))
# Necessary until scikeras supports passing in class_weights directly
sample_weight = class_weight[dataset.y]
scoring = {
'war': get_scorer('accuracy'),
'uar': get_scorer('balanced_accuracy'),
'recall': make_scorer(recall_score, pos_label=0),
'precision': make_scorer(precision_score, pos_label=0),
'f1': make_scorer(f1_score, pos_label=0),
'ap': make_scorer(average_precision_score, pos_label=0)
}
clf = PrecomputedSVC(C=1.0, kernel='rbf', gamma=2**-6, probability=True)
scores = cross_validate(
clf, dataset.x, dataset.y, cv=cv, scoring=scoring, groups=groups,
fit_params={'sample_weight': sample_weight}, n_jobs=6, verbose=0
)
mean_scores = {k[5:]: np.mean(v) for k, v in scores.items()
if k.startswith('test_')}
print('Accuracy: {:.3f}'.format(mean_scores['war']))
print('Bal. accuracy: {:.3f}'.format(mean_scores['uar']))
print('Emotion recall: {:.3f}'.format(mean_scores['recall']))
print('Emotion precision: {:.3f}'.format(mean_scores['precision']))
print('F1 score: {:.3f}'.format(mean_scores['f1']))
print('AP: {:.3f}'.format(mean_scores['ap']))
if args.save:
clf.fit(dataset.x, dataset.y, sample_weight=sample_weight)
args.save.parent.mkdir(parents=True, exist_ok=True)
with open(args.save, 'wb') as fid:
pickle.dump(clf, fid)
print("Saved classifier to {}".format(args.save))
if __name__ == "__main__":
main()
| true |
f0229d3524ad8a562caf5676335f117ae07c8515 | Python | zhtea/codejam | /2008/milkshakes/solve.py | UTF-8 | 999 | 2.59375 | 3 | [] | no_license | #coding=utf-8
import sys
N = int(sys.stdin.readline().strip())
for i in range(1,N+1):
flavor = int(sys.stdin.readline().strip())
customer = int(sys.stdin.readline().strip())
flavors = [0]*flavor
r = True
j = 0
while j < customer:
l = [int(i) for i in sys.stdin.readline().strip().split()]
fl = l[0]
l = l[1:]
tmp = -1
tmp_mark = False
for k in range(fl):
if l[k*2+1] == flavors[l[k*2]-1]:
tmp_mark = True
break
if l[k*2+1] == 1:
tmp = l[k*2]-1
if not tmp_mark:
if tmp == -1:
r = False
else:
if flavors[tmp] == 1:
r = False
else:
flavors[tmp] = 1
j += 1
if not r:
print("Case #%d: IMPOSSIBLE"%i)
else:
print("Case #%d: "%i + " ".join([str(j) for j in flavors]))
| true |
697e3b0452c5f3ed28f72e14126e8e92cd7b3aa3 | Python | chrillux/brottsplatskartan | /brottsplatskartan/__init__.py | UTF-8 | 4,775 | 2.65625 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | # coding=utf-8
""" Brottsplatskartan API """
import datetime
import time
from json.decoder import JSONDecodeError
from typing import Union
import requests
AREAS = [
"Blekinge län", "Dalarnas län", "Gotlands län", "Gävleborgs län",
"Hallands län", "Jämtlands län", "Jönköpings län", "Kalmar län",
"Kronobergs län", "Norrbottens län", "Skåne län", "Stockholms län",
"Södermanlands län", "Uppsala län", "Värmlands län", "Västerbottens län",
"Västernorrlands län", "Västmanlands län", "Västra Götalands län",
"Örebro län", "Östergötlands län"
]
ATTRIBUTION = "Information provided by brottsplatskartan.se"
BROTTS_URL = "https://brottsplatskartan.se/api"
class BrottsplatsKartan: # pylint: disable=too-few-public-methods
""" Brottsplatskartan API wrapper. """
def __init__(self, app='bpk', areas=None, longitude=None, latitude=None):
""" Setup initial brottsplatskartan configuration. """
self.parameters = {"app": app}
self.incidents = {}
if areas:
for area in areas:
if area not in AREAS:
raise ValueError('not a valid area: {}'.format(area))
self.url = BROTTS_URL + "/events"
self.parameters["areas"] = areas
elif longitude and latitude:
self.url = BROTTS_URL + "/eventsNearby"
self.parameters["lat"] = latitude
self.parameters["lng"] = longitude
else:
# Missing parameters. Using default values.
self.url = BROTTS_URL + "/events"
self.parameters["areas"] = ["Stockholms län"]
@staticmethod
def _get_datetime_as_ymd(date: time.struct_time) -> datetime.datetime:
datetime_ymd = datetime.datetime(date.tm_year, date.tm_mon,
date.tm_mday)
return datetime_ymd
@staticmethod
def is_ratelimited(requests_response) -> bool:
""" Check if we have been ratelimited. """
rate_limited = requests_response.headers.get('x-ratelimit-reset')
if rate_limited:
print("You have been rate limited until " + time.strftime(
'%Y-%m-%d %H:%M:%S%z', time.localtime(int(rate_limited))))
return True
return False
def get_incidents_from_bpk(self, parameters) -> Union[list, bool]:
""" Make the API calls to get incidents """
brotts_entries_left = True
incidents_today = []
url = self.url
while brotts_entries_left:
requests_response = requests.get(url, params=parameters)
if self.is_ratelimited(requests_response):
return False
try:
requests_response = requests_response.json()
except JSONDecodeError:
print("got JSONDecodeError")
return False
incidents = requests_response.get("data")
if not incidents:
incidents_today = []
break
datetime_today = datetime.date.today()
datetime_today_as_time = time.strptime(str(datetime_today),
"%Y-%m-%d")
today_date_ymd = self._get_datetime_as_ymd(datetime_today_as_time)
for incident in incidents:
incident_pubdate = incident["pubdate_iso8601"]
incident_date = time.strptime(incident_pubdate,
"%Y-%m-%dT%H:%M:%S%z")
incident_date_ymd = self._get_datetime_as_ymd(incident_date)
if today_date_ymd == incident_date_ymd:
incidents_today.append(incident)
else:
brotts_entries_left = False
break
if requests_response.get("links"):
url = requests_response["links"]["next_page_url"]
else:
break
return incidents_today
def get_incidents(self) -> Union[list, bool]:
""" Get today's incidents. """
areas = self.parameters.get("areas")
all_incidents = {}
current_incidents = []
if areas:
parameters = {}
for area in areas:
parameters["app"] = self.parameters.get("app")
parameters["area"] = area
current_incidents = self.get_incidents_from_bpk(parameters)
all_incidents.update({area: current_incidents})
else:
current_incidents = self.get_incidents_from_bpk(self.parameters)
all_incidents.update({"latlng": current_incidents})
if current_incidents is False:
return False
return all_incidents
| true |
ddcf30bfbe7ad8eeff0aea1c41a812526b694b02 | Python | Rockyzsu/StudyRepo | /python/my_py_notes_万物皆对象/modules_python常用模块/cmath/cmath_test.py | UTF-8 | 262 | 3.28125 | 3 | [] | no_license | # coding = utf-8
__author__ = 'super_fazai'
# @Time : 17-7-26 下午2:22
# @File : cmath_test.py
import cmath
print(cmath.exp(2)) # return the exponential value e**x
print(cmath.sqrt(4)) # 开方
print(cmath.asin(0.5))
print('%.20f' % cmath.pi) | true |
ccc6ebc7713b5456ddce86b8dc2933ecbc973bcf | Python | AngelBachler/Proyecto-Algebra | /vec.py | UTF-8 | 3,579 | 3 | 3 | [] | no_license | # Copyright 2013 Philip N. Klein
def getitem(v,k):
assert k in v.D
if k in v.f.keys():
return v.f[k]
else:
return 0
#pass
def setitem(v,k,val):
assert k in v.D
v.f[k] = val
#pass
def equal(u,v):
assert u.D == v.D
for x in u.D:
if getitem(u, x) != getitem(v, x):
return False
return True
#pass
def add(u,v):
assert u.D == v.D
return Vec(u.D, { i:v[i]+u[i] for i in u.f.keys() | v.f.keys() })
#pass
def dot(u,v):
assert u.D == v.D
return sum([getitem(v,d)*getitem(u,d) for d in u.D])
#pass
def scalar_mul(v, alpha):
return Vec(v.D, {i:alpha*getitem(v,i) for i in v.D})
#pass
def neg(v):
return Vec(v.D, {i:-1*getitem(v,i) for i in v.D})
#pass
###############################################################################################################################
class Vec:
"""
A vector has two fields:
D - the domain (a set)
f - a dictionary mapping (some) domain elements to field elements
elements of D not appearing in f are implicitly mapped to zero
"""
def __init__(self, labels, function):
assert isinstance(labels, set)
assert isinstance(function, dict)
self.D = labels
self.f = function
__getitem__ = getitem
__setitem__ = setitem
__neg__ = neg
__rmul__ = scalar_mul #if left arg of * is primitive, assume it's a scalar
def __mul__(self,other):
#If other is a vector, returns the dot product of self and other
if isinstance(other, Vec):
return dot(self,other)
else:
return NotImplemented # Will cause other.__rmul__(self) to be invoked
def __truediv__(self,other): # Scalar division
return (1/other)*self
__add__ = add
def __radd__(self, other):
"Hack to allow sum(...) to work with vectors"
if other == 0:
return self
def __sub__(a,b):
"Returns a vector which is the difference of a and b."
return a+(-b)
__eq__ = equal
def is_almost_zero(self):
s = 0
for x in self.f.values():
if isinstance(x, int) or isinstance(x, float):
s += x*x
elif isinstance(x, complex):
y = abs(x)
s += y*y
else: return False
return s < 1e-20
def __str__(v):
"pretty-printing"
D_list = sorted(v.D, key=repr)
numdec = 3
wd = dict([(k,(1+max(len(str(k)), len('{0:.{1}G}'.format(v[k], numdec))))) if isinstance(v[k], int) or isinstance(v[k], float) else (k,(1+max(len(str(k)), len(str(v[k]))))) for k in D_list])
s1 = ''.join(['{0:>{1}}'.format(str(k),wd[k]) for k in D_list])
s2 = ''.join(['{0:>{1}.{2}G}'.format(v[k],wd[k],numdec) if isinstance(v[k], int) or isinstance(v[k], float) else '{0:>{1}}'.format(v[k], wd[k]) for k in D_list])
return "\n" + s1 + "\n" + '-'*sum(wd.values()) +"\n" + s2
def __hash__(self):
"Here we pretend Vecs are immutable so we can form sets of them"
h = hash(frozenset(self.D))
for k,v in sorted(self.f.items(), key = lambda x:repr(x[0])):
if v != 0:
h = hash((h, hash(v)))
return h
def __repr__(self):
return "Vec(" + str(self.D) + "," + str(self.f) + ")"
def copy(self):
"Don't make a new copy of the domain D"
return Vec(self.D, self.f.copy())
def __iter__(self):
raise TypeError('%r object is not iterable' % self.__class__.__name__)
| true |
6ae5d2ccaafae70148ab565df6efe5b10efaa01b | Python | liwit101/HHU_ProjetcSeminar_5.Semester | /p2_will/dataHander.py | UTF-8 | 551 | 2.65625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Nov 23 22:41:36 2013
@author: yuankunluo
"""
def dataBreaker(dataList):
times = []
hashtags = []
words = []
clients = []
for data in dataList:
time = data[0]
word = data[2].split(" ")
client = data[3]
hashtag = data[5].split(" ")
times.append(time)
hashtags.extend(hashtag)
words.extend(word)
clients.append(client)
return {u'times':times,u'hashtags': hashtags,
u'words': words,'clients': clients}
| true |
269d04dedd1c7018cd9418c6d8043852658b6abd | Python | nishio/atcoder | /abc177/b.py | UTF-8 | 1,313 | 2.921875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
import sys
sys.setrecursionlimit(10**6)
INF = 10 ** 9 + 1 # sys.maxsize # float("inf")
MOD = 10 ** 9 + 7
def debug(*x):
print(*x, file=sys.stderr)
def solve(S, T):
buf = []
for i in range(len(S) - len(T) + 1):
diff = 0
for j in range(len(T)):
if S[i + j] != T[j]:
diff += 1
buf.append(diff)
return min(buf)
def main():
# parse input
S = input().strip()
T = input().strip()
print(solve(S, T))
# tests
T1 = """
cabacc
abc
"""
TEST_T1 = """
>>> as_input(T1)
>>> main()
1
"""
T2 = """
codeforces
atcoder
"""
TEST_T2 = """
>>> as_input(T2)
>>> main()
6
"""
T3 = """
aaa
bbb
"""
TEST_T3 = """
>>> as_input(T3)
>>> main()
3
"""
def _test():
import doctest
doctest.testmod()
g = globals()
for k in sorted(g):
if k.startswith("TEST_"):
doctest.run_docstring_examples(g[k], g, name=k)
def as_input(s):
"use in test, use given string as input file"
import io
f = io.StringIO(s.strip())
g = globals()
g["input"] = lambda: bytes(f.readline(), "ascii")
g["read"] = lambda: bytes(f.read(), "ascii")
input = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
if sys.argv[-1] == "-t":
print("testing")
_test()
sys.exit()
main()
| true |
08587957415c6be4390cc06853f6ca994a793671 | Python | boschma2702/ContainerStacking | /main/model/dataclass/terminal.py | UTF-8 | 4,254 | 2.671875 | 3 | [] | no_license | from __future__ import annotations
from dataclasses import dataclass
from typing import Tuple, List, Optional
from main.model.dataclass import tuple_long_replace, StackLocation, Container, StackTierLocation
from main.model.dataclass.block import Block
from main.model.dataclass.stack import Stack
@dataclass(order=True)
class Terminal:
__slots__ = ['max_height', 'blocks']
max_height: int
blocks: Tuple[Block, ...]
# cache_hash: Any
def __init__(self, blocks: Tuple[Block, ...], max_height: int):
self.blocks = blocks
self.max_height = max_height
@classmethod
def empty_single_stack_block(cls, nr_stacks, max_height) -> Terminal:
return cls(tuple([Block.empty_single_stack() for i in range(nr_stacks)]), max_height)
@classmethod
def empty_bay(cls, nr_bays, max_height) -> Terminal:
return cls(tuple([Block((Stack(()), Stack(()), Stack(()), Stack(()), Stack(())), True) for i in range(nr_bays)]), max_height)
########################################################################################
# Basic abstract, store, retrieve, reshuffle and reveal operations
########################################################################################
def abstract(self) -> Terminal:
return Terminal(tuple(sorted([block.abstract() for block in self.blocks])), self.max_height)
def store_container(self, location: StackLocation, container: Container) -> Terminal:
replacement = self.blocks[location[0]].store_container(location[1], container)
blocks = tuple_long_replace(self.blocks, location[0], replacement)
return Terminal(blocks, self.max_height)
def retrieve_container(self, location: StackLocation) -> Tuple[Terminal, Container]:
new_block, container = self.blocks[location[0]].retrieve_container(location[1])
blocks = tuple_long_replace(self.blocks, location[0], new_block)
return Terminal(blocks, self.max_height), container
def reshuffle_container(self, from_location: StackLocation, to_location: StackLocation) -> Terminal:
new_term, container = self.retrieve_container(from_location)
return new_term.store_container(to_location, container)
def reveal_order(self, containers: Tuple[Container, ...]):
order_dict = dict([(containers[i][0], i + 1) for i in range(len(containers))])
return Terminal(tuple([block.reveal_order(order_dict) for block in self.blocks]), self.max_height)
########################################################################################
# Misc util operators
########################################################################################
def nr_blocks(self) -> int:
return len(self.blocks)
def block(self, i: int) -> Block:
return self.blocks[i]
def stack_height(self, stack_location: StackLocation) -> int:
return len(self.blocks[stack_location[0]].stacks[stack_location[1]].containers)
def container_location(self, container: Container):
container_id = container[0]
for block_index in range(len(self.blocks)):
block = self.blocks[block_index]
for stack_index in range(len(block.stacks)):
stack = block.stacks[stack_index]
for tier_index in range(len(stack.containers)):
if container_id == stack.containers[tier_index][0]:
return block_index, stack_index, tier_index
raise RuntimeError("Could not find given container")
# def containers_above(self, stack_tier_location: StackTierLocation) -> Tuple[Container, ...]:
# stack = self.blocks[stack_tier_location[0]].stacks[stack_tier_location[1]]
# return stack.containers[stack_tier_location[2] + 1:]
def blocking_containers(self, stack_tier_location: StackTierLocation) \
-> List[Container]:
return self.blocks[stack_tier_location[0]].blocking_containers(stack_tier_location[1:])
def __repr__(self):
split = "*" * 20 + "\n"
return """\n{split}\n{blocks}\n{split}\n""".format(split=split, blocks="**\n".join([str(block) for block in self.blocks]))
def __hash__(self):
return hash(self.blocks)
| true |
dfb92bb2387cb118db115e441e1cc067848a04ef | Python | jsleep/cs231n-2017 | /assignment1/cs231n/classifiers/linear_svm.py | UTF-8 | 2,826 | 3.625 | 4 | [] | no_license | import numpy as np
from random import shuffle
from past.builtins import xrange
def svm_loss_naive(W, X, y, reg):
"""
Structured SVM loss function, naive implementation (with loops).
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
dW = np.zeros(W.shape) # initialize the gradient as zero
# compute the loss and the gradient
num_classes = W.shape[1]
num_train = X.shape[0]
loss = 0.0
for i in xrange(num_train):
scores = X[i].dot(W)
correct_class_score = scores[y[i]]
for j in xrange(num_classes):
if j == y[i]:
continue
margin = scores[j] - correct_class_score + 1 # note delta = 1
if margin > 0:
# Gradient for non correct class weight.
loss += margin
dW[:,j] += X[i]
dW[:, y[i]] -= X[i]
# Right now the loss is a sum over all training examples, but we want it
# to be an average instead so we divide by num_train.
loss /= num_train
# Add regularization to the loss.
loss += reg * np.sum(W * W)
# Average our gradient across the batch and add gradient of regularization term.
dW = dW/num_train + 2*reg*W
return loss, dW
def svm_loss_vectorized(W, X, y, reg):
"""
Structured SVM loss function, vectorized implementation.
Inputs and outputs are the same as svm_loss_naive.
"""
loss = 0.0
dW = np.zeros(W.shape) # initialize the gradient as zero
#from lecture:
num_train = X.shape[0]
#get all scores with big matrix multiplication
scores = X.dot(W)
#create a 2D Index array to get class scores for each training example
correct_class_idx = range(num_train),y
correct_scores = scores[correct_class_idx]
correct_scores = np.reshape(correct_scores,(num_train,1))
###LOSS
# get margins by subtracting score of correct class
margins = scores - correct_scores + 1
#hinge-loss
margins = np.maximum(0,margins)
#correct class scores don't contribute to loss
margins[correct_class_idx] = 0
#average loss across training example
loss = np.sum(margins) / num_train
#reguarlization
loss += reg * np.sum(W * W)
###GRADIENT
#look at non-zero indexes
#for each non-zero index:
#add column to row
nonzero = np.copy(margins)
nonzero[nonzero > 0] = 1
nonzero[correct_class_idx] = -(np.sum(nonzero, axis=1))
dW = (X.T).dot(nonzero)
dW /= num_train
dW += 2*reg*W
return loss, dW
| true |
a0bdac4ac528e3f2f77b100a810daddae799a674 | Python | yoshichulo/RubikSolver | /rubiktools.py | UTF-8 | 2,770 | 3.453125 | 3 | [] | no_license | from PIL import Image, ImageDraw
colors = {
0: '#FF0000', # RED
1: '#0000FF', # BLUE
2: '#FFFF00', # YELLOW
3: '#008000', # GREEN
4: '#FF8C00', # ORANGE
5: '#FFFFFF' # WHITE
}
def show_cube(cube):
''' This function shows the actual state of all the Cube faces '''
n = len(cube.BACK)
square_size = 25
padding = 5
face_size = n*square_size
width, height = (face_size*4 + padding*5, face_size*3 + padding*4)
cube_img = Image.new('RGBA', (width, height), (255,255,255,0))
d = ImageDraw.Draw(cube_img)
draw_face(d, cube.LEFT, square_size, padding, padding*2 + face_size)
draw_face(d, cube.DOWN, square_size, padding*2 + face_size, padding*2 + face_size)
draw_face(d, cube.RIGHT, square_size, padding*3 + face_size*2, padding*2 + face_size)
draw_face(d, cube.UP, square_size, padding*4 + face_size*3, padding*2 + face_size)
draw_face(d, cube.BACK, square_size, padding*2 + face_size, padding)
draw_face(d, cube.FRONT, square_size, padding*2 + face_size, padding*3 + face_size*2)
cube_img.show()
def draw_face(drawer, face, square_size, x, y):
'''
Function that draws the cube square by square
- drawer: ImageDraw object, linked with the Image that you want to edit
- face: face of the Cube object (Cube.BACK, Cube.FRONT, Cube.LEFT...)
- square_size: the size in px of each piece of the cube
- x: initial x position for drawing
- y: initial y position for drawing
'''
y1 = y ; y2 = y1 + square_size
for row in face:
x1 = x ; x2 = x1+ square_size
for n in row:
drawer.rectangle((x1, y1, x2, y2), fill=colors[n], outline='#000000')
x1 += square_size ; x2 += square_size
y1 += square_size ; y2 += square_size
def copy_column(face, position, column):
'''
Function that overrides a face column with a given column and position
- face: face of the Cube object (Cube.BACK, Cube.FRONT, Cube.LEFT...)
- position: position of the column you want to override (0...N-1)
- column: vector that contains the values you want to override the face with
'''
for i in range(0, len(face)):
face[i][position] = column[i]
return face
def copy_row(face, position, row):
'''
Function that overrides a face row with a given row and position
- face: face of the Cube object (Cube.BACK, Cube.FRONT, Cube.LEFT...)
- position: position of the row you want to override (0...N-1)
- row: vector that contains the values you want to override the face with
'''
for i in range(0, len(face)):
face[position][i] = row[i]
return face | true |
4767476fe56215c49e1f4b4b7055640ee3aa7860 | Python | westgate458/LeetCode | /P0593.py | UTF-8 | 512 | 2.90625 | 3 | [] | no_license | class Solution(object):
def validSquare(self, p1, p2, p3, p4):
"""
:type p1: List[int]
:type p2: List[int]
:type p3: List[int]
:type p4: List[int]
:rtype: bool
"""
# check distances between corners
def l(p1, p2): return (p1[1] - p2[1])**2 + (p1[0] - p2[0])**2
return len(set([tuple(p1),tuple(p2),tuple(p3),tuple(p4)]))==4 and \
len(set([l(p1, p2),l(p1, p3),l(p1, p4),l(p2, p3),l(p2, p4),l(p3, p4)]))==2 | true |
c6739fec30f14af855849b1f09cce60e259a536a | Python | messah/Workspace | /python/Lab çalışmaları/lab2 calısma/soru5.py | ISO-8859-9 | 219 | 2.765625 | 3 | [] | no_license | uzun=input("uzun kenar gir:")
ksa=input("ksa kenar gir:")
pi=3.14
dairealan=(pi*ksa*ksa)/4.0-(ksa**2/2.0)
dikdrtgenalan=ksa*uzun
print "toplam alan=",dairealan+dikdrtgenalan,"dr."
| true |
cd6881d8d17d212177b4031495bc73f8926c9160 | Python | mandar-degvekar/DataEngineeringGCP | /Utility.py | UTF-8 | 215 | 3.328125 | 3 | [] | no_license | class util:
def splitandlist(input):
l = []
for i in input:
try:
l.append(int(i))
except:
print(i + " is not a number")
return l | true |
6eb5cb0c208022350e4de33e4e9a311131f2b321 | Python | Auguste0904/CAESAR | /src/repeating_key_XOR.py | UTF-8 | 1,653 | 3.28125 | 3 | [] | no_license | #!/usr/bin/env python3
##
## EPITECH PROJECT, 2020
## B-SEC-500-PAR-5-1-caesar-lucas.moritel
## File description:
## repeating_key_XOR.py
##
import os
import sys
import codecs
def error_gestion_arg(argv):
if len(argv) != 2:
print("Error: Invalid number of arguments")
exit(84)
if os.path.isfile(argv[1]) == False:
print("Error: The argument is not a file")
exit(84)
def repeating_key_xor(key, text):
output = b''
i = 0
for chara in text:
output += bytes([chara ^ key[i]])
if (i + 1) == len(key):
i = 0
else:
i += 1
return output
def main():
error_gestion_arg(sys.argv)
file = open(sys.argv[1], "r")
encoded_key = file.readline().strip('\n')
encoded_text = file.readline().strip('\n')
if len(encoded_key) == 0:
print("Error: There is no key in your file")
exit(84)
if len(encoded_text) == 0:
print("Error: There is no text to decrypt in your file")
exit(84)
size_key = len(encoded_key) % 2
if size_key != 0:
print("Error: Length of the encoded key content is not even but odd")
exit(84)
if encoded_text == '' or encoded_key == '':
print("Error: The encoded key or the encoded tesxt is missing")
exit(84)
decoded_text = ''.join(encoded_text).encode()
decoded_key = ''.join(encoded_key).encode()
decoded_text = codecs.decode(decoded_text, 'hex')
decoded_key = codecs.decode(decoded_key, 'hex')
ciphertext = repeating_key_xor(decoded_key, decoded_text)
print(ciphertext.hex().upper())
if __name__ == "__main__":
main()
| true |
7e2b0ecad14e730c1199acb78af78bcc815d485a | Python | Sketchjar/url-stacking | /cnn.py | UTF-8 | 2,913 | 2.734375 | 3 | [] | no_license | from tensorflow.keras.layers import Input, ELU, Embedding, BatchNormalization, Convolution1D, MaxPooling1D, concatenate, Dense, Dropout, Lambda
from tensorflow.keras.models import Model
from tensorflow.keras import regularizers
from tensorflow.keras import backend as K
class cnn(Model):
def __init__(self, max_len=80, emb_dim=32, max_vocab_len=128, W_reg=regularizers.l2(1e-4)):
super(cnn, self).__init__(name='cnn_model')
self.max_vocab_len = max_vocab_len
self.emb_dim = emb_dim
self.max_len = max_len
self.W_reg = W_reg
# Embedding layer
self.emb = Embedding(input_dim=max_vocab_len, output_dim=emb_dim, input_length=max_len, embeddings_regularizer=W_reg)
self.emb_drop = Dropout(0.2)
self.h1 = Dense(1024)
self.h2 = Dense(256)
self.h3 = Dense(64)
self.bn = BatchNormalization()
self.el = ELU()
self.dr = Dropout(0.5)
# Output layer (last fully connected layer)
# 마지막 클래스 결정하는 layer
self.output_layer = Dense(1, activation='sigmoid', name='cnn_output')
#def call(self, inputs=Input(shape=(80,), dtype='int32', name='cnn_input'), training=None, mask=None):
def call(self, inputs, training=None, mask=None):
print('##### input: ', inputs)
x = self.emb(inputs)
x = self.emb_drop(x)
def get_conv_layer(emb, kernel_size=5, filters=256):
def sum_1d(X):
return K.sum(X, axis=1)
# Conv layer
conv = Convolution1D(kernel_size=kernel_size, filters=filters, padding='same')(emb)
conv = ELU()(conv)
conv = MaxPooling1D(5)(conv)
conv = Lambda(sum_1d, output_shape=(filters,))(conv)
conv = Dropout(0.5)(conv)
return conv
# Multiple Conv Layers
# 커널 사이즈를 다르게 한 conv
conv1 = get_conv_layer(x, kernel_size=2, filters=256)
conv2 = get_conv_layer(x, kernel_size=3, filters=256)
conv3 = get_conv_layer(x, kernel_size=4, filters=256)
conv4 = get_conv_layer(x, kernel_size=5, filters=256)
# Fully Connected Layers
# 위 결과 합침
merged = concatenate([conv1, conv2, conv3, conv4], axis=1)
print('########### merges: ', merged)
hidden1 = self.h1(merged)
hidden1 = self.el(hidden1)
print('########### hidden1: ', hidden1)
hidden1 = self.bn(hidden1)
print('########### hidden1: ', hidden1)
hidden1 = self.dr(hidden1)
hidden2 = self.h2(hidden1)
hidden2 = self.el(hidden2)
hidden2 = self.bn(hidden2)
hidden2 = self.dr(hidden2)
hidden3 = self.h3(hidden2)
hidden3 = self.el(hidden3)
hidden3 = self.bn(hidden3)
hidden3 = self.dr(hidden3)
return self.output_layer(hidden3)
| true |
938243882f6413ea247a308bb8480c05df32ae0f | Python | CyricV/SumofSuffixes | /sos.py | UTF-8 | 1,346 | 3.21875 | 3 | [] | no_license |
def sos(arrayB,arrayC):
zeroest = arrayB[len(arrayB)-1]+arrayC[(len(arrayC)-1)]
for s in range(len(arrayB)):
for t in range(len(arrayC)):
newSum = sumArrays(arrayB[s:],arrayC[t:])
if isCloserToZero(newSum,zeroest):
zeroest = newSum
finalS = s
finalT = t
return [zeroest,finalS,finalT]
def ssos(arrayB,arrayC):
zeroest = arrayB[len(arrayB)-1]+arrayC[(len(arrayC)-1)]
combineArray = fuseArrays(arrayB,arrayC)
for i in range(len(combineArray)-2):
if (combineArray[i][1]!=combineArray[i+1][1]):
if (combineArray[i][1]=='b'):
if isCloserToZero(combineArray[i][0]-combineArray[i+1][0],zeroest):
zeroest = combineArray[i][0]-combineArray[i+1][0]
if (combineArray[i][1]=='c'):
if isCloserToZero(combineArray[i+1][0]-combineArray[i][0],zeroest):
zeroest = combineArray[i+1][0]-combineArray[i][0]
return zeroest
def fuseArrays(arrayB,arrayC):
for i in range(len(arrayB)):
arrayB[i] = [sum(arrayB[i:]),'b']
for j in range(len(arrayC)):
arrayC[j] = [-sum(arrayC[j:]),'c']
arrayOut = arrayB+arrayC
arrayOut.sort(reverse=True)
return arrayOut
def sumArrays(arrayA,arrayB):
return sum(arrayA)+sum(arrayB)
# True if first arg is closer to zero, False if is second arg is closer to zero or they are equal
def isCloserToZero(t, f):
t = abs(t)
f = abs(f)
if (t>=f): return False
return True
| true |
16c410001ef0083009f98ce8c4fbf30c2ee35075 | Python | DmitriiDes/automate_updating_catalog_information | /health_check.py | UTF-8 | 1,647 | 2.796875 | 3 | [
"Unlicense"
] | permissive | #!/usr/bin/env python3
import psutil
import emails
import socket
import time
from ipaddress import ip_address
def check_sys_health():
sys_health = {
"cpu_usage" : psutil.cpu_percent(1),
"available_disk_space" : psutil.disk_usage('/').free / psutil.disk_usage('/').total * 100,
"available_memory" : psutil.virtual_memory().available / (1024.0 ** 2),
"ip_localhost" : ip_address(socket.gethostbyname("localhost"))
}
return sys_health
def monitor_sys_health(sender, recipient):
subject_line = ""
email_body = "Please check your system and resolve the issue as soon as possible."
starttime = time.time()
sys_health = check_sys_health()
if sys_health["cpu_usage"] > 80:
subject_line = "Error - CPU usage is over 80%"
if sys_health["available_disk_space"] < 20:
subject_line = "Error - Available disk space is less than 20%"
if sys_health["available_memory"] < 500:
subject_line = "Error - Available memory is less than 500MB"
if sys_health["ip_localhost"] != ip_address("127.0.0.1"):
subject_line = "Error - localhost cannot be resolved to 127.0.0.1"
if subject_line:
message = emails.generate_email(sender, recipient, subject_line, email_body)
emails.send_email(sender, message)
def main():
sender = "automation@example.com"
username = input("Paste your username: ")
recipient = username + "@example.com" #Replace username with the username given in the Connection Details Panel on the right hand side
monitor_sys_health(sender, recipient)
if __name__ == '__main__':
main()
| true |
9f73207381b02cd90648d7dfa36bc54c1f0f96bd | Python | antonioml97/DAI-Desarrollo-de-Aplicaciones-para-Internet | /ejercicios/ejercicio1.py | UTF-8 | 615 | 3.875 | 4 | [] | no_license | import random
numero_a_adivinar=random.randint(1, 100)
print(numero_a_adivinar)
print("Estoy pensado un numero... Adivinalo esta entre 1 y 100")
numero_usuario=int(input());
for i in range(0,10):
if numero_a_adivinar == numero_usuario :
print("Lo has adivinado! Es " + str(numero_usuario) )
break
elif numero_a_adivinar > numero_usuario :
print("El numero es más grande")
else:
print("El numero es más pequeño")
if i == 9:
print("Ya no te quendas máss intentos")
break
print("Dime otro número")
numero_usuario=int(input()) | true |
783d1b885c6b091986f03371b0a37c8b520e2820 | Python | johnnoone/cooperate | /cooperate/concurrency.py | UTF-8 | 671 | 2.828125 | 3 | [
"BSD-3-Clause"
] | permissive | import math
__all__ = ['Concurrency']
class Concurrency:
def __init__(self, *, size=None, part=None):
if size and part:
raise ValueError('size and part are mutually exclusive')
self.size = size
self.part = part
def batch(self, collection):
if self.size:
return self.size
if self.part:
return math.ceil(len(collection) / 100 * self.part)
return len(collection)
def __repr__(self):
if self.size:
return '<Concurrency(size=%r)>' % self.size
if self.part:
return '<Concurrency(part=%r)>' % self.part
return '<Concurrency>'
| true |
c334dae2dba5b099f8e2efaf687aa72aa11ad936 | Python | eshthakkar/coding_challenges | /sliding_window.py | UTF-8 | 1,278 | 3.265625 | 3 | [] | no_license | from queue import *
# make it for n seconds
class WebHits(object):
def __init__(self, n=300):
self.queue = Queue(maxsize=n - 1)
self.last_second = 0
self.sec_hits_count = 0
self.last_n-1th_hit_count = 0
def record_hit(self):
""" Record the hit for the corresponding time whenever there is a visitor on the webpage"""
self.reset()
self.sec_hits_count += 1
def get_last_n_seconds_hit_count(self):
""" Get the web hit count for the last n seconds"""
self.reset()
return self.last_n-1th_hit_count + self.sec_hits_count
def reset():
""" Reset the appropriate variables to the most updated values
before recording a hit and giving out last 5 min hit count"""
while epoch_time() != self.last_second:
oldest_item = self.queue.get()
self.queue.put(self.sec_hits_count)
self.last_n-1th_hit_count = self.last_n-1th_hit_count - oldest_item + self.sec_hits_count
self.sec_hits_count = 0
self.last_second += 1
# #(time_in_seconds, "type of function", expected value)
# [
# (0.1, "hit", 0),
# (0.2, "hit", 0),
# (0.22, "count", 2),
# (1.2, "hit", 0),
# (1.5, "count", 0)
# ] | true |
cf323ac3fa388a6f8e3a74146f06622b99f7dd96 | Python | margarita-v/PyChain | /3_consensus_and_mining/merkle_tree/another_solve_for_python2.py | UTF-8 | 1,495 | 3.125 | 3 | [] | no_license | # http://pythonfiddle.com/merkle-root-bitcoin/
import hashlib
# Hash pairs of items recursively until a single value is obtained
def merkle(hashList):
length = len(hashList)
if length == 1:
return hashList[0]
newHashList = []
# Process pairs. For odd length, the last is skipped
for i in range(0, length - 1, 2):
newHashList.append(hash2(hashList[i], hashList[i+1]))
if length % 2 == 1: # odd, hash last item twice
newHashList.append(hash2(hashList[-1], hashList[-1]))
return merkle(newHashList)
def hash2(a, b):
# Reverse inputs before and after hashing
# due to big-endian / little-endian nonsense
a1 = a.decode('hex')
a11 = a1[::-1]
# print a11.encode('hex')
b1 = b.decode('hex')[::-1]
#print b1.encode('hex')
concat = a11+b1
#print concat.encode('hex')
concat2 = hashlib.sha256(concat).digest()
print "hash1:" + concat2.encode('hex')
h = hashlib.sha256(concat2).digest()
print "hash2:" + h[::-1].encode('hex')
print ''
return h[::-1].encode('hex')
# https://blockexplorer.com/rawblock/000000000000030de89e7729d5785c4730839b6e16ea9fb686a54818d3860a8d
txHashes = [
'5eab9fc7bda0017450f05232e8e219df936a4dd787b8e8706622074d5bee9222',
'fd7cbc5db77bd282ea281a02d05b6b3dd0ae9f21659ba23d362aa2b774cdfef1',
'3a0d89d8e0ccc13bfc11af67fe8297b37903415ff9d194e594fb91b985adec13',
'8aa115ab1511601a86a627e3ddd0f2dba53f068d97d098be53f656c9d6495dd6'
]
print merkle(txHashes)
| true |
ae694b971b90d44ba6993731e747f4204c2d02e3 | Python | HarkerJC/python-extend | /package/class.py | UTF-8 | 1,070 | 3.65625 | 4 | [] | no_license | class People:
#base property
name=""
age=0
#private property other object can not visit
__weight=0
def __init__(self,name,age,weight):
self.name=name
self.age=age
self.__weight+weight
pass
#private function
def __run(self):
print("run")
class Person(People):
grage=''
def __init__(self,name,age,weight,grage):
People.__init__(name,age,weight)
self.grage=grage
# mutiple extend class Person(Base1,Base2)
'''
__init__ : 构造函数,在生成对象时调用
__del__ : 析构函数,释放对象时使用
__repr__ : 打印,转换
__setitem__ : 按照索引赋值
__getitem__: 按照索引获取值
__len__: 获得长度
__cmp__: 比较运算
__call__: 函数调用
__add__: 加运算
__sub__: 减运算
__mul__: 乘运算
__div__: 除运算
__mod__: 求余运算
__pow__: 乘方
'''
'''
运算符重载
def __add__(self,other):
return self.a+other.a
def __str__(self):
return '这个人的名字是%s,已经有%d岁了!' % (self.name, self.age)
'''
| true |
fc7c028c2e25f12f251b1aa86b9f7d9a8f3a4b48 | Python | nanfeng-dada/leetcode_note | /easy/203.removeElements.py | UTF-8 | 843 | 3.765625 | 4 | [
"MIT"
] | permissive | # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
#注意cur与cur.next的区别,要删除节点一定要留有前驱
# 定义一个节点指向头结点,
class Solution:
def removeElements(self, head: ListNode, val: int) -> ListNode:
if not head:
return None
h = ListNode(None)
h.next = head
cur = h
while cur.next:
if cur.next.val == val:
cur.next = cur.next.next
else:
cur = cur.next
return h.next
# 递归
class Solution1:
def removeElements(self, head: ListNode, val: int) -> ListNode:
if not head:
return None
head.next=self.removeElements(head.next,val)
return head.next if head.val==val else head
| true |
3f973c4c886a750e007da7589f9e201e43d24da7 | Python | EDGSCOUT/domain_adapt_segm | /utils/metrics.py | UTF-8 | 1,658 | 2.75 | 3 | [
"MIT"
] | permissive | from collections import OrderedDict
import torch
import torch.nn as nn
def softIoU(out, target, e=1e-6):
sm = nn.Softmax(dim=1)
out = sm(out)
target = target.float()
out = out[:, 1, :, :]
num = (out * target).sum()
den = (out + target - out * target).sum() + e
iou = num / den
return iou.mean()
class softIoULoss(nn.Module):
def __init__(self, e=1e-6):
super(softIoULoss, self).__init__()
self.e = e
def forward(self, inputs, targets):
return 1.0 - softIoU(inputs, targets, self.e)
def update_cm(cm, y_pred, y_true):
y_pred = torch.argmax(y_pred, 1)
for i in range(cm.shape[0]):
for j in range(cm.shape[0]):
cm[i, j] += ((y_pred == i) * (y_true == j)).sum().float()
return cm
def compute_metrics(cm, ret_metrics, eps=1e-8):
TP_perclass = cm.diag()
FP_perclass = cm.sum(1) - TP_perclass
FN_perclass = cm.sum(0) - TP_perclass
ret_metrics['accuracy'] = TP_perclass.sum() / cm.sum()
iou_perclass = TP_perclass / (TP_perclass + FP_perclass + FN_perclass + eps)
ret_metrics['iou_perclass_0'] = iou_perclass[0]
ret_metrics['iou_perclass_1'] = iou_perclass[1]
ret_metrics['iou'] = iou_perclass.mean()
return ret_metrics
def print_metrics(init, metrics, time=None):
out_str = init
metrics = OrderedDict(metrics)
for k in metrics.keys():
try:
out_str += (k + ': {:.3f} | ' * len(metrics[k])).format(*metrics[k])
except:
out_str += (k + ': {:.3f} | ').format(metrics[k])
if time is not None:
out_str += ("time {:.3f}s").format(time)
print(out_str)
| true |
fc138649a9302760ceda98eecaf87440e3f76552 | Python | pulkitgupta317/Python | /ConstructionOfATowerWithoutSorting.py | UTF-8 | 5,260 | 3.390625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 22 22:39:41 2020
@author: pulkit
"""
from os import path
# Constants
inputFile = 'inputPS3.txt'
outputFile = 'outputPS3.txt'
# Queue class for handling operations of a queue
class Queue:
def __init__(self):
self.queue = []
# append an element in the queue at the end
def enqueue(self, item):
self.queue.append(item)
# remove an element from the front of the queue
def dequeue(self):
if len(self.queue) < 1:
return None
return self.queue.pop(0)
# return whether the queue is empty or not
def empty(self):
return not (len(self.queue))
# check if the item is present in the queue or not. dequeue and enqueue the elements till either the element found
# or the whole queue is traversed
def present(self, item):
if not self.empty():
i = 0
while i < len(self.queue):
if self.queue[0] == item:
return True
else:
self.enqueue(self.dequeue())
i += 1
return False
# generate string from the queue elements
def __str__(self):
return ' '.join([str(i) for i in self.queue])
# Construct a tower in N days with N disks
def diskTower(disks, countOfDays):
buffer_list = Queue()
out_list = Queue()
# traverse through the disks
for disk_size in disks:
# add the disk to the list
buffer_list.enqueue(disk_size)
# if we are on the biggest disk
if disk_size == countOfDays:
temp_list = Queue()
# run the loop while the countOfDays ( biggest disk ) present in the buffer list
while buffer_list.present(countOfDays):
# dequeue the disk and enqueue it in the temp list
buffer_list.dequeue()
temp_list.enqueue(countOfDays)
# reduce the biggest disk by 1 since it has been processed
countOfDays -= 1
out_list.enqueue(temp_list)
else:
out_list.enqueue(Queue())
return out_list
# DiskTowerModel class is used for handling and validating the input data
class DiskTowerModel:
def __init__(self):
self.countOfDays = None
self.discSizes = None
self.error = None
self.errorMessage = None
def set(self, countOfDays, discSizes):
self.discSizes = discSizes
self.countOfDays = countOfDays
self.error = False
def setErrorMessage(self, message):
self.error = True
self.errorMessage = message
def setValues(self, lines):
# At least 2 lines should be there to process the input
if lines is None or len(lines) < 2:
self.setErrorMessage('Please enter 2 lines')
else:
countOfDays = lines[0].strip()
# First line should be a digit only
if not countOfDays.isdigit():
self.setErrorMessage('Invalid count of days')
else:
countOfDays = int(countOfDays)
discSizes = lines[1].strip()
try:
# 2nd line should have values ranging from 1 to CountOfDays, all digits
discSizes = list(map(int, discSizes.split()))
res = all(0 < ele <= countOfDays for ele in discSizes)
# Check if there is any disc size which is not in the range mentioned in above comment
if not res:
self.setErrorMessage('One or many disc are not in the range')
# Check if the disc size list having the count same as the countOfDays
elif len(discSizes) < countOfDays:
self.setErrorMessage('Disc size mentioned are less than N')
else:
# Check if the disc size mentioned are more than the countOfDays
if len(discSizes) > countOfDays:
# remove the other elements
discSizes = discSizes[:countOfDays]
self.set(countOfDays, discSizes)
except:
self.setErrorMessage('Invalid disc sizes')
# Write the data into the output file
def writeIntoFile(data):
try:
# open the connection for the file
f = open(outputFile, "w")
# calling the
f.write(data)
f.close()
except:
print('Error occurred in writing the data into the file')
def readFromFile():
obj = DiskTowerModel()
if path.exists(inputFile):
try:
f = open(inputFile, 'r')
lines = f.readlines()
f.close()
obj.setValues(lines)
except:
obj.setErrorMessage('Error occurred in reading the data from the file')
else:
obj.setErrorMessage('File does not exist')
return obj
data = readFromFile()
if data.error:
writeIntoFile(data.errorMessage)
else:
out_ = diskTower(data.discSizes, data.countOfDays)
listOfString = []
while not out_.empty():
listOfString.append(str(out_.dequeue()))
writeIntoFile('\n'.join(listOfString))
| true |
28f9342985caedf7bdabd0108c6f6d32bad536c5 | Python | yangshuqi/FoodAnalysis | /codes/get_volume_main.py | UTF-8 | 3,008 | 3 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 27 13:11:48 2018
@author: shuqi
"""
from segment_food import *
from calculate_volume import *
from get_food_region import *
from skimage import data, color, transform, feature, morphology
from skimage import io
def get_volume_main(filepath, plate_color, ref_len, shape_type, additional_info = 0, debug = False, showing = False):
"""
shape type:
1: cube (e.g. cake)
2: ball (e.g. apple)
3: half-ball (e.g. bun)
4: cone (e.g. fried rice in the plate)
5: fixed-height (e.g. pizza)
6: irregular but nearly fixed shape (e.g. banana)
additional_info:
height, for type 5
volume per unit area, for type 6
"""
image_rgb = io.imread(filepath)
if (image_rgb.shape[2] == 4):
image_rgb = color.rgba2rgb(image_rgb)
image_rgb = transform.resize(image_rgb, (int(100*image_rgb.shape[0]/image_rgb.shape[1]), 100))
food, plate_long, plate_short = get_food_region(image_rgb, plate_color)
if showing:
io.imsave('original_image.jpg', image_rgb)
if debug:
f, ((ax0, ax1, ax2, ax3), (ax4, ax5, ax6, ax7)) = plt.subplots(ncols=4, nrows=2, figsize=(22, 8))
ax0.set_title('food')
ax0.imshow(image_rgb)
if shape_type == 1:
labels, labels2 = segment_food (image_rgb, food)
area, height = get_height_and_area(labels2)
volume = cal_volume_1(plate_long, plate_short, ref_len, area, height)
if debug:
ax2.set_title('segment')
ax2.imshow(labels)
ax3.set_title('segment2')
ax3.imshow(labels2)
if shape_type == 2:
volume = cal_volume_2(plate_long, plate_short, ref_len, food)
if shape_type == 3:
volume = cal_volume_3(plate_long, plate_short, ref_len, food)
if shape_type == 4:
volume = cal_volume_4(plate_long, plate_short, ref_len, food)
if shape_type == 5:
volume = cal_volume_5(plate_long, plate_short, ref_len, food, additional_info)
if shape_type == 6:
volume = cal_volume_6(plate_long, plate_short, ref_len, food, additional_info)
if debug:
print('The estimated volume is', volume, 'cm^3.\n(Plate size:', ref_len, 'cm; type of shape: #', shape_type, '.)')
for i in range(0, image_rgb.shape[0]):
for j in range(0, image_rgb.shape[1]):
if (food[i][j] == 0):
image_rgb[i][j] = [0,0,0]
ax1.set_title('food')
ax1.imshow(image_rgb)
if showing:
if shape_type == 1:
io.imsave('mid_result.jpg', labels2)
else:
io.imsave('mid_result.jpg', food)
return volume
if (__name__ == '__main__'):
v = get_volume_main('../images/test.jpg', [140/225, 175/255, 160/255], 20, 1, debug = False, showing = True)
print(v) | true |
f9d653460e9a200a6fde061a065acc81d40b75a7 | Python | rjsvaljean/python-testing-tools | /ireton/tests/test_paramtrize.py | UTF-8 | 700 | 2.859375 | 3 | [
"MIT"
] | permissive | from ireton import parametrize
import unittest
class TestParametrize(unittest.TestCase):
def test_parametrized_test(self):
@parametrize([
(1, 2),
(2, 3),
(3, 4),
])
def run_test(x, y):
assert x + 1 == y
def test_failures(self ):
try:
@parametrize([
(1, 1),
(2, 2),
(2, 3)
])
def run_test(x, y):
assert x + 1 == y
except AssertionError as e:
assert 'Failed on: (1, 1)' in e.args[0]
assert 'Failed on: (2, 2)' in e.args[0]
assert 'Failed on: (2, 3)' not in e.args[0]
| true |
42a46a03b2bbd2768964f703c755b1c2326177df | Python | cutejiejie/Algorithms | /查找排序/quick_sort.py | UTF-8 | 1,529 | 3.296875 | 3 | [] | no_license | import random
from cal_time import *
import copy
import sys
sys.setrecursionlimit(100000)
@cal_time
def bubble_sort(li):
for i in range(len(li) - 1): #第i趟
isexchange = False
for j in range(len(li) - i -1):
if li[j] > li[j+1]:
li[j], li[j+1] = li[j+1], li[j]
isexchange = True
if not isexchange:
return
def partition(li, left, right):
tmp = li[left]
while left < right:
while left < right and li[right] >= tmp: #从右面找到比tmp小的数
right -= 1 #往左走一步
li[left] = li[right] #把右边的值写到左边空位上
# print(li, 'right')
while left < right and li[left] <= tmp:
left += 1 #往左走一步
li[right] = li[left] #把左边的值写到右边空位上
# print(li, 'left')
li[left] = tmp #把tmp归位
return left
def _quick_sort(li, left, right):
if left < right: #至少两个元素
mid = partition(li, left, right)
_quick_sort(li, left, mid-1)
_quick_sort(li, mid+1, right)
@cal_time
def quick_sort(li):
_quick_sort(li, 0, len(li) - 1)
li = list(range(10000, 0, -1))
# random.shuffle(li)
#
# li1 = copy.deepcopy(li)
# li2 = copy.deepcopy(li)
#
# print(id(li)) #2385257834944
# print(id(li1)) #2385259085568
# print(id(li1)) #2385259085312
quick_sort(li)
# quick_sort(li1)
# bubble_sort(li2)
#
# print(li1)
# print(li2)
# li = [9,8,7,6,5,4,3,2,1]
# partition(li, 0, len(li) - 1)
# print(li) | true |
c960eff2879c9b7daac82c40547942758bf6347e | Python | pewosas/DuongHieu-C4T6 | /turtle_intro.py | UTF-8 | 202 | 2.796875 | 3 | [] | no_license | from turtle import *
speed(0)
for i in range (500*500):
forward (100)
left (90)
forward (100)
left (90)
forward (100)
left (90)
forward (100)
left (119)
mainloop()
| true |
ed2fe1c8b05a30b0da4ef96e3545ff83de8b0946 | Python | abelatnvidia/IntroTF | /src/module_02/code01.py | UTF-8 | 386 | 2.796875 | 3 | [
"Apache-2.0"
] | permissive | import os, tensorflow as tf
# create a constant in default graph
a = tf.constant(10)
with tf.Session() as sess:
# create tensorboard log files
sfw = tf.summary.FileWriter(os.getcwd(), sess.graph)
# dump the graph definition as JSON
print(sess.graph.as_graph_def())
# clean up
sfw.close()
# notice that the constant is actual listed in output as an "op" (?) | true |
ed4cde158a06e6cedea64afc2568aec0597a6f7a | Python | anaiortega/XCmodels | /puente_quintanavides/calculo_dinamico/oscilacion_viga_biapoyada_xcm.py | ISO-8859-1 | 19,140 | 3.15625 | 3 | [] | no_license | '''Devuelve la primera forma modal de la viga biapoyada de
acuerdo con la figura B.5 de la IAPF, siendo:
x: Abcisa para la que se obtiene el valor.
L: Luz entre apoyos de la viga.
'''
def Fi1X(x,L):
return(sin(PI*x/L))
'''Devuelve el valor de la amplitud del movimiento de vibracin para el primer modo de vibracin de
acuerdo con la expresin 3.10 de la tesis titulada Interaccin vehculo-estructura y efectos
de resonancia en puentes isostticos de ferrocarril para lneas de alta velocidad de Pedro
Museros Romero, siendo:
P: Carga que produce la oscilacin.
m: Masa por unidad de longitud.
L: Luz entre apoyos.
w0: Pulsacin correspondiente al modo fundamental.
psi: Amortiguamiento.
V: Velocidad con que se desplaza la carga.
t: Instante de tiempo en el que se calcula la amplitud.
'''
def amplitudCargaAisladaEnPuente(P,m,L,w0,psi,V,t):
assert(t<=L/V)
n0= w0/2/PI()
K= V/2/n0/L
WOt= w0*t
return(2*P/m/L/sqr(w0)/(1-sqr(K))*(sin(K*WOt)-K*exp(-psi*WOt)*sin(WOt)))
'''Devuelve el valor de la derivada primera (velocidad) de la amplitud del movimiento de vibracin para el primer modo de vibracin de
acuerdo con la expresin 3.10 de la tesis titulada Interaccin vehculo-estructura y efectos
de resonancia en puentes isostticos de ferrocarril para lneas de alta velocidad de Pedro
Museros Romero, siendo:
P: Carga que produce la oscilacin.
m: Masa por unidad de longitud.
L: Luz entre apoyos.
w0: Pulsacin correspondiente al modo fundamental.
psi: Amortiguamiento.
V: Velocidad con que se desplaza la carga.
t: Instante de tiempo en el que se calcula la amplitud.
'''
def amplitudDotCargaAisladaEnPuente(P,m,L,w0,psi,V,t):
assert(t<=L/V)
n0= w0/2/PI()
K= V/2/n0/L
return(2*(w0*K*cos(t*w0*K)+psi*w0*exp(-(psi*t*w0))*sin(t*w0)*K-w0*exp(-(psi*t*w0))*cos(t*w0)*K)*P/(m*sqr(w0)*(1-sqr(K))*L))
'''Devuelve el valor de la derivada segunda (aceleracin) de la amplitud del movimiento de vibracin para el primer modo de vibracin de
acuerdo con la expresin 3.10 de la tesis titulada Interaccin vehculo-estructura y efectos
de resonancia en puentes isostticos de ferrocarril para lneas de alta velocidad de Pedro
Museros Romero, siendo:
P: Carga que produce la oscilacin.
m: Masa por unidad de longitud.
L: Luz entre apoyos.
w0: Pulsacin correspondiente al modo fundamental.
psi: Amortiguamiento.
V: Velocidad con que se desplaza la carga.
t: Instante de tiempo en el que se calcula la amplitud.
'''
def amplitudDotDotCargaAisladaEnPuente(P,m,L,w0,psi,V,t):
assert(t<=L/V)
n0= w0/2/PI()
K= V/2/n0/L
\return{2*(-w0^2*K^2*sin(t*w0*K)-psi^2*w0^2*exp(-(psi*t*w0))*sin(t*w0)*K+w0^2*exp(-(psi*t*w0))*sin(t*w0)*K+2*psi*w0^2*exp(-(psi*t*w0))*cos(t*w0)*K)*P/(m*sqr(w0)*(1-sqr(K))*L)}
'''Devuelve el valor de la amplitud del movimiento de vibracin para el primer modo de vibracin de
acuerdo con la expresin 3.11 de la tesis titulada Interaccin vehculo-estructura y efectos
de resonancia en puentes isostticos de ferrocarril para lneas de alta velocidad de Pedro
Museros Romero, siendo:
P: Carga que produce la oscilacin.
m: Masa por unidad de longitud.
L: Luz entre apoyos.
w0: Pulsacin correspondiente al modo fundamental.
psi: Amortiguamiento.
V: Velocidad con que se desplaza la carga.
t: Instante de tiempo en el que se calcula la amplitud.
'''
def amplitudCargaAisladaTrasPuente(P,m,L,w0,psi,V,t):
n0= w0/2/PI()
K= V/2/n0/L
WOt= w0*t
t2= t-L/V
assert(t2>=0.0)
WOt2= w0*t2
return(2*P/m/L/sqr(w0)*K/(1-sqr(K))*(exp(-psi*WOt)*sin(WOt)-exp(-psi*WOt2)*sin(WOt2)))
'''Devuelve el valor de la derivada primera de la amplitud (velocidad) del movimiento de vibracin para el primer modo de vibracin de
acuerdo con la expresin 3.11 de la tesis titulada Interaccin vehculo-estructura y efectos
de resonancia en puentes isostticos de ferrocarril para lneas de alta velocidad de Pedro
Museros Romero, siendo:
P: Carga que produce la oscilacin.
m: Masa por unidad de longitud.
L: Luz entre apoyos.
w0: Pulsacin correspondiente al modo fundamental.
psi: Amortiguamiento.
V: Velocidad con que se desplaza la carga.
t: Instante de tiempo en el que se calcula la amplitud.
'''
def amplitudDotCargaAisladaTrasPuente(P,m,L,w0,psi,V,t):
n0= w0/2/PI()
K= V/2/n0/L
assert(t>=L/V)
return(2*K*P*(psi*w0*sin(w0*(t-L/V))*exp(-(psi*w0*(t-L/V)))-w0*cos(w0*(t-L/V))*exp(-(psi*w0*(t-L/V)))-psi*w0*exp(-(psi*t*w0))*sin(t*w0)+w0*exp(-(psi*t*w0))*cos(t*w0))/(m*sqr(w0)*(1-sqr(K))*L))
'''Devuelve el valor de la derivada segunda de la amplitud (aceleracin) del movimiento de vibracin para el primer modo de vibracin de
acuerdo con la expresin 3.11 de la tesis titulada Interaccin vehculo-estructura y efectos
de resonancia en puentes isostticos de ferrocarril para lneas de alta velocidad de Pedro
Museros Romero, siendo:
P: Carga que produce la oscilacin.
m: Masa por unidad de longitud.
L: Luz entre apoyos.
w0: Pulsacin correspondiente al modo fundamental.
psi: Amortiguamiento.
V: Velocidad con que se desplaza la carga.
t: Instante de tiempo en el que se calcula la amplitud.
'''
def amplitudDotDotCargaAisladaTrasPuente(P,m,L,w0,psi,V,t):
n0= w0/2/PI()
K= V/2/n0/L
assert(t>=L/V)
\return{2*K*P*(-psi^2*w0^2*sin(w0*(t-L/V))*exp(-(psi*w0*(t-L/V)))+w0^2*sin(w0*(t-L/V))*exp(-(psi*w0*(t-L/V)))+2*psi*w0^2*cos(w0*(t-L/V))*exp(-(psi*w0*(t-L/V)))+psi^2*w0^2*exp(-(psi*t*w0)*sin(t*w0))-w0^2*exp(-(psi*t*w0))*sin(t*w0)-2*psi*w0^2*exp(-(psi*t*w0))*cos(t*w0))/(m*sqr(w0)*(1-sqr(K))*L)}
'''Devuelve el valor de la amplitud del movimiento de vibracin para el primer modo de vibracin de
acuerdo con las expresiones 3.10 y 3.11 de la tesis titulada Interaccin vehculo-estructura y efectos
de resonancia en puentes isostticos de ferrocarril para lneas de alta velocidad de Pedro
Museros Romero, siendo:
P: Carga que produce la oscilacin.
m: Masa por unidad de longitud.
L: Luz entre apoyos.
w0: Pulsacin correspondiente al modo fundamental.
psi: Amortiguamiento.
V: Velocidad con que se desplaza la carga.
t: Instante de tiempo en el que se calcula la amplitud.
'''
def amplitudCargaAislada(P,m,L,w0,psi,V,t):
\if
cond(t<=0)
then(return(0))
\else
{
\if
{
cond(t<=L/V)
then(return(amplitudCargaAisladaEnPuente(P,m,L,w0,psi,V,t)))
else(return(amplitudCargaAisladaTrasPuente(P,m,L,w0,psi,V,t)))
}
}
'''Devuelve el valor de la derivada primera de la amplitud (velocidad) del movimiento de vibracin
para el primer modo de vibracin de acuerdo con las expresiones 3.10 y 3.11 de la tesis
titulada Interaccin vehculo-estructura y efectos de resonancia en puentes isostticos de
ferrocarril para lneas de alta velocidad de Pedro Museros Romero, siendo:
P: Carga que produce la oscilacin.
m: Masa por unidad de longitud.
L: Luz entre apoyos.
w0: Pulsacin correspondiente al modo fundamental.
psi: Amortiguamiento.
V: Velocidad con que se desplaza la carga.
t: Instante de tiempo en el que se calcula la amplitud.
'''
def amplitudDotCargaAislada(P,m,L,w0,psi,V,t):
\if
cond(t<=0)
then(return(0))
\else
{
\if
{
cond(t<=L/V)
then(return(amplitudDotCargaAisladaEnPuente(P,m,L,w0,psi,V,t)))
else(return(amplitudDotCargaAisladaTrasPuente(P,m,L,w0,psi,V,t)))
}
}
'''Devuelve el valor de la derivada segunda de la amplitud (aceleracin) del movimiento de vibracin
para el primer modo de vibracin de acuerdo con las expresiones 3.10 y 3.11 de la tesis
titulada Interaccin vehculo-estructura y efectos de resonancia en puentes isostticos de
ferrocarril para lneas de alta velocidad de Pedro Museros Romero, siendo:
P: Carga que produce la oscilacin.
m: Masa por unidad de longitud.
L: Luz entre apoyos.
w0: Pulsacin correspondiente al modo fundamental.
psi: Amortiguamiento.
V: Velocidad con que se desplaza la carga.
t: Instante de tiempo en el que se calcula la amplitud.
'''
def amplitudDotDotCargaAislada(P,m,L,w0,psi,V,t):
\if
cond(t<=0)
then(return(0))
\else
{
\if
{
cond(t<=L/V)
then(return(amplitudDotDotCargaAisladaEnPuente(P,m,L,w0,psi,V,t)))
else(return(amplitudDotDotCargaAisladaTrasPuente(P,m,L,w0,psi,V,t)))
}
}
'''Devuelve el valor de la flecha dinmica para el punto de acisa x, siendo:
P: Carga aislada que produce la oscilacin.
m: Masa por unidad de longitud.
L: Luz entre apoyos.
w0: Pulsacin correspondiente al modo fundamental.
psi: Amortiguamiento.
V: Velocidad con que se desplaza la carga.
t: Instante de tiempo en el que se calcula la amplitud.
x: Abcisa en la que se calcula la flecha.
'''
def flechaDinamicaCargaAislada(P,m,L,w0,psi,V,t,x):
return(Fi1X(x,L)*amplitudCargaAislada(P,m,L,w0,psi,V,t))
'''Devuelve el valor de la aceleracin para el punto de acisa x, siendo:
P: Carga aislada que produce la oscilacin.
m: Masa por unidad de longitud.
L: Luz entre apoyos.
w0: Pulsacin correspondiente al modo fundamental.
psi: Amortiguamiento.
V: Velocidad con que se desplaza la carga.
t: Instante de tiempo en el que se calcula la amplitud.
x: Abcisa en la que se calcula la flecha.
'''
def aceleracionCargaAislada(P,m,L,w0,psi,V,t,x):
return(Fi1X(x,L)*amplitudDotDotCargaAislada(P,m,L,w0,psi,V,t))
'''Devuelve el valor mnimo de la flecha dinmica para el punto de acisa x, siendo:
P: Carga que produce la oscilacin.
m: Masa por unidad de longitud.
L: Luz entre apoyos.
w0: Pulsacin correspondiente al modo fundamental.
psi: Amortiguamiento.
V: Velocidad con que se desplaza la carga.
tIni: Instante inicial.
tFin: Instante final.
x: Abcisa en la que se calcula la flecha.
'''
def flechaDinamicaMinimaCargaAislada(P,m,L,w0,psi,V,x,tIni,tFin):
incT= 2*PI/w0/10 # 10 puntos por ciclo (5 puntos en cada semionda)
instT=
fDinMin= 1e12
fTmp=
\for
inicio(instT=tIni) continua(instT<tFin) incremento(instT=instT+incT)
\bucle
{
fTmp= flechaDinamicaCargaAislada(P,m,L,w0,psi,V,instT,x)
\if
{
cond(fTmp<fDinMin)
then(fDinMin= fTmp)
}
}
return(fDinMin)
'''Devuelve el valor extremo de la aceleracin para el punto de acisa x, siendo:
P: Carga que produce la oscilacin.
m: Masa por unidad de longitud.
L: Luz entre apoyos.
w0: Pulsacin correspondiente al modo fundamental.
psi: Amortiguamiento.
V: Velocidad con que se desplaza la carga.
tIni: Instante inicial.
tFin: Instante final.
x: Abcisa en la que se calcula la flecha.
'''
def aceleracionExtremaCargaAislada(P,m,L,w0,psi,V,x,tIni,tFin):
incT= 2*PI/w0/10 # 10 puntos por ciclo (5 puntos en cada semionda)
instT=
aExtrema= 0
aTmp=
\for
inicio(instT=tIni) continua(instT<tFin) incremento(instT=instT+incT)
\bucle
{
aTmp= aceleracionCargaAislada(P,m,L,w0,psi,V,instT,x)
\if
{
cond(abs(aTmp)>abs(aExtrema))
then(aExtrema= aTmp)
}
}
return(aExtrema)
'''Devuelve el valor mnimo de la flecha dinmica para el punto de acisa x, siendo:
P: Carga que produce la oscilacin.
m: Masa por unidad de longitud.
L: Luz entre apoyos.
w0: Pulsacin correspondiente al modo fundamental.
psi: Amortiguamiento.
V: Velocidad con que se desplaza la carga.
vIni: Instante inicial.
vFin: Instante final.
x: Abcisa en la que se calcula la flecha.
'''
def flechaDinamicaMinimaCargaAisladaRangoVel(P,m,L,w0,psi,x,tIni,tFin,vIni,vFin):
incV= 10/3.6
v=
fDinMinR= 1e12
fTmpR=
\for
inicio(v=vIni) continua(v<vFin) incremento(v=v+incV)
\bucle
{
fTmpR= flechaDinamicaMinimaCargaAislada(P,m,L,w0,psi,v,x,tIni,tFin)
\if
{
cond(fTmpR<fDinMinR)
then(fDinMinR= fTmpR)
}
print("v= ",v*3.6," km/h fDin= ",fTmpR," m fDinMin= ",fDinMinR," m\n")
}
return(fDinMinR)
'''Devuelve el valor extremo de la aceleracin para el punto de acisa x, siendo:
P: Carga que produce la oscilacin.
m: Masa por unidad de longitud.
L: Luz entre apoyos.
w0: Pulsacin correspondiente al modo fundamental.
psi: Amortiguamiento.
V: Velocidad con que se desplaza la carga.
vIni: Instante inicial.
vFin: Instante final.
x: Abcisa en la que se calcula la aceleracin.
'''
def aceleracionExtremaCargaAisladaRangoVel(P,m,L,w0,psi,x,tIni,tFin,vIni,vFin):
incV= 10/3.6
v=
aExtremaR= 0
aTmpR=
\for
inicio(v=vIni) continua(v<vFin) incremento(v=v+incV)
\bucle
{
aTmpR= aceleracionExtremaCargaAislada(P,m,L,w0,psi,v,x,tIni,tFin)
\if
{
cond(abs(aTmpR)>abs(aExtremaR))
then(aExtremaR= aTmpR)
}
print("v= ",v*3.6," km/h a= ",aTmpR," m aExtrema= ",aExtremaR," m\n")
}
return(aExtremaR)
'''Devuelve el valor de la flecha dinmica para el punto de acisa x, siendo:
ejesTren: Lista con las cargas por eje del tren que produce la oscilacin.
m: Masa por unidad de longitud.
L: Luz entre apoyos.
w0: Pulsacin correspondiente al modo fundamental.
psi: Amortiguamiento.
V: Velocidad con que se desplaza la carga.
t: Instante de tiempo en el que se calcula la amplitud (el instante t=0 corresponde a la entrada del tren en la viga).
x: Abcisa en la que se calcula la flecha.
'''
def flechaDinamicaTren(ejesTren,m,L,w0,psi,V,t,x):
sz= ejesTren.size
i= 0.0
retval= 0.0
xPEje= [0,0]
tEje= 0.0
fEje= 0.0
\for
inicio(i=0 ) continua(i<sz) incremento(i=i+1)
\bucle
{
xPEje= ejesTren[i]
tEje= t-xPEje[0]/V # Tiempo "local" para el eje.
fEje= flechaDinamicaCargaAislada(-xPEje[1],m,L,w0,psi,V,tEje,x) # Flecha para el eje aislado.
retval= retval+fEje
}
return(retval)
'''Devuelve el valor de la aceleracin inducida por el paso de un tren:
ejesTren: Lista con las cargas por eje del tren que produce la oscilacin.
m: Masa por unidad de longitud.
L: Luz entre apoyos.
w0: Pulsacin correspondiente al modo fundamental.
psi: Amortiguamiento.
V: Velocidad con que se desplaza la carga.
t: Instante de tiempo en el que se calcula la amplitud (el instante t=0 corresponde a la entrada del tren en la viga).
x: Abcisa en la que se calcula la flecha.
'''
def aceleracionInducidaTren(ejesTren,m,L,w0,psi,V,t,x):
sz= ejesTren.size
i= 0.0
retval= 0.0
xPEje= [0,0]
tEje= 0.0
fEje= 0.0
\for
inicio(i=0 ) continua(i<sz) incremento(i=i+1)
\bucle
{
xPEje= ejesTren[i]
tEje= t-xPEje[0]/V # Tiempo "local" para el eje.
fEje= aceleracionCargaAislada(-xPEje[1],m,L,w0,psi,V,tEje,x) # Flecha para el eje aislado.
retval= retval+fEje
}
return(retval)
'''Devuelve el valor mnimo de la flecha dinmica para el punto de acisa x, siendo:
ejesTren: Lista con las cargas por eje del tren que produce la oscilacin.
m: Masa por unidad de longitud.
L: Luz entre apoyos.
w0: Pulsacin correspondiente al modo fundamental.
psi: Amortiguamiento.
V: Velocidad con que se desplaza la carga.
tIni: Instante inicial.
tFin: Instante final.
x: Abcisa en la que se calcula la flecha.
'''
def flechaDinamicaMinimaTren(ejesTren,m,L,w0,psi,V,x):
numEjes= ejesTren.size
tIni= 0
ultEjeTren= ejesTren[numEjes-1]
longTren= ultEjeTren[0]
tFin= 1.5*(longTren+L)/V
incT= 2*PI/w0/10 # 10 puntos por ciclo (5 puntos en cada semionda)
instT=
fDinMin= 1e12
fTmp=
\for
inicio(instT=tIni) continua(instT<tFin) incremento(instT=instT+incT)
\bucle
{
fTmp= flechaDinamicaTren(ejesTren,m,L,w0,psi,V,instT,x)
\if
{
cond(fTmp<fDinMin)
then(fDinMin= fTmp)
}
}
return(fDinMin)
'''Devuelve el valor extremo de la aceleracin inducida por el tren en el punto de acisa x, siendo:
ejesTren: Lista con las cargas por eje del tren que produce la oscilacin.
m: Masa por unidad de longitud.
L: Luz entre apoyos.
w0: Pulsacin correspondiente al modo fundamental.
psi: Amortiguamiento.
V: Velocidad con que se desplaza la carga.
tIni: Instante inicial.
tFin: Instante final.
x: Abcisa en la que se calcula la flecha.
'''
def aceleracionExtremaInducidaTren(ejesTren,m,L,w0,psi,V,x):
numEjes= ejesTren.size
tIni= 0
ultEjeTren= ejesTren[numEjes-1]
longTren= ultEjeTren[0]
tFin= 1.5*(longTren+L)/V
incT= 2*PI/w0/10 # 10 puntos por ciclo (5 puntos en cada semionda)
instT=
aExtrema= 0
aTmp=
\for
inicio(instT=tIni) continua(instT<tFin) incremento(instT=instT+incT)
\bucle
{
aTmp= aceleracionInducidaTren(ejesTren,m,L,w0,psi,V,instT,x)
\if
{
cond(abs(aTmp)>abs(aExtrema))
then(aExtrema= aTmp)
}
}
return(aExtrema)
'''Devuelve el valor mnimo de la flecha dinmica para el punto de abcisa x, siendo:
P: Carga que produce la oscilacin.
m: Masa por unidad de longitud.
L: Luz entre apoyos.
w0: Pulsacin correspondiente al modo fundamental.
psi: Amortiguamiento.
V: Velocidad con que se desplaza la carga.
vIni: Instante inicial.
vFin: Instante final.
x: Abcisa en la que se calcula la flecha.
'''
def flechaDinamicaMinimaTrenRangoVel(ejesTren,m,L,w0,psi,x,vIni,vFin, fName):
incV= 10/3.6
v=
fDinMinR= 1e12
fTmpR=
\for
inicio(v=vIni) continua(v<vFin) incremento(v=v+incV)
\bucle
{
fTmpR= flechaDinamicaMinimaTren(ejesTren,m,L,w0,psi,v,x)
\if
{
cond(fTmpR<fDinMinR)
then(fDinMinR= fTmpR)
}
\print[fName]{v*3.6," ",fTmpR,"\n"}
}
return(fDinMinR)
'''Devuelve el valor extremo de la aceleracin inducida por el tren para el punto de abcisa x, siendo:
P: Carga que produce la oscilacin.
m: Masa por unidad de longitud.
L: Luz entre apoyos.
w0: Pulsacin correspondiente al modo fundamental.
psi: Amortiguamiento.
V: Velocidad con que se desplaza la carga.
vIni: Instante inicial.
vFin: Instante final.
x: Abcisa en la que se calcula la flecha.
'''
def aceleracionExtremaTrenRangoVel(ejesTren,m,L,w0,psi,x,vIni,vFin, fName):
incV= 10/3.6
v=
aExtremaR= 0
aTmpR=
\for
inicio(v=vIni) continua(v<vFin) incremento(v=v+incV)
\bucle
{
aTmpR= aceleracionExtremaInducidaTren(ejesTren,m,L,w0,psi,v,x)
\if
{
cond(abs(aTmpR)>abs(aExtremaR))
then(aExtremaR= aTmpR)
}
\print[fName]{v*3.6," ",abs(aTmpR),"\n"}
}
return(aExtremaR)
| true |
a35332e9b4638bcdad54d4f7bf727402175a743a | Python | KeeganRen/SLAMwithCameraIMUforPython | /keypoint_pair.py | UTF-8 | 550 | 3.046875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
keypoint.py
author: Keita Nagara 永良慶太 (University of Tokyo) <nagara.keita()gmail.com>
Class for key points pair between images
"""
class KeyPointPair:
def __init__(self,data_):
cx = 540.0 + 19.840576 # (image size X)/2 + principal point X
cy = 960.0 + 9.901855 # (image size Y)/2 + principal point Y
data = data_.split(':')
self.prevIndex = int(data[0])
self.index = int(data[1])
self.x1 = float(data[2]) - cx
self.y1 = float(data[3]) - cy
self.x2 = float(data[4]) - cx
self.y2 = float(data[5]) - cy | true |
6b73cc0ecbc241e22033c30dc67b38fe49cd9f3e | Python | isaacpena/CS477-Python-Natural-Language-Processing | /Homework1/solutionsB.py | UTF-8 | 16,632 | 3.375 | 3 | [] | no_license | import sys
import nltk
import math
import time
from collections import Counter
START_SYMBOL = '*'
STOP_SYMBOL = 'STOP'
RARE_SYMBOL = '_RARE_'
RARE_WORD_MAX_FREQ = 5
LOG_PROB_OF_ZERO = -1000
# TODO: IMPLEMENT THIS FUNCTION
# Receives a list of tagged sentences and processes each sentence to generate a list of words and a list of tags.
# Each sentence is a string of space separated "WORD/TAG" tokens, with a newline character in the end.
# Remember to include start and stop symbols in yout returned lists, as defined by the constants START_SYMBOL and STOP_SYMBOL.
# brown_words (the list of words) should be a list where every element is a list of the tags of a particular sentence.
# brown_tags (the list of tags) should be a list where every element is a list of the tags of a particular sentence.
def split_wordtags(brown_train):
brown_words = []
brown_tags = []
for sentence in brown_train:
tokens = sentence.split()
swords = [] # words in this sentence
stags = [] # tags in this sentence
for token in tokens:
wordtag = token.rsplit('/', 1)
# rsplit starts from the back - none of the tags have / in them, so the first / seen from the end of the string is the WORD/TAG separator
swords.append(wordtag[0])
stags.append(wordtag[1])
swords.insert(0, START_SYMBOL)
stags.insert(0, START_SYMBOL)
swords.insert(0, START_SYMBOL)
stags.insert(0, START_SYMBOL)
swords.append(STOP_SYMBOL)
stags.append(STOP_SYMBOL)
# Prep the words and tags for the sentence with two START_SYMBOLs and one STOP_SYMBOL.
brown_words.append(swords)
brown_tags.append(stags)
return brown_words, brown_tags
# TODO: IMPLEMENT THIS FUNCTION
# This function takes tags from the training data and calculates tag trigram probabilities.
# It returns a python dictionary where the keys are tuples that represent the tag trigram, and the values are the log probability of that trigram
def calc_trigrams(brown_tags):
q_values = {}
tritags = []
bitags = []
for stags in brown_tags:
# where stags is a list of tags in a given sentence:
tritags += list(nltk.trigrams(stags))
bitags += list(nltk.bigrams(stags))
# Add bigrams & trigrams of the current sentence tags to these lists
trifreq = nltk.FreqDist(tritags)
bifreq = nltk.FreqDist(bitags)
for token, freq in trifreq.items():
# Trigram calculation is still based on prefix - hence calculating the bigrams as well.
prefixcount = bifreq.get((token[0], token[1]))
logprob = math.log(float(freq) / prefixcount, 2)
q_values.update({token:logprob})
return q_values
# This function takes output from calc_trigrams() and outputs it in the proper format
def q2_output(q_values, filename):
outfile = open(filename, "w")
trigrams = q_values.keys()
trigrams.sort()
for trigram in trigrams:
output = " ".join(['TRIGRAM', trigram[0], trigram[1], trigram[2], str(q_values[trigram])])
outfile.write(output + '\n')
outfile.close()
# TODO: IMPLEMENT THIS FUNCTION
# Takes the words from the training data and returns a set of all of the words that occur more than 5 times (use RARE_WORD_MAX_FREQ)
# brown_words is a python list where every element is a python list of the words of a particular sentence.
# Note: words that appear exactly 5 times should be considered rare!
def calc_known(brown_words):
known_words = set([])
wordfreqs = Counter()
for swords in brown_words:
for word in swords:
wordfreqs[word] += 1
# Takes frequency of each word in each sentence
for word, freq in wordfreqs.items():
if freq > RARE_WORD_MAX_FREQ:
# Only those whose frequency is greater than 5 are added to the known_words set
known_words.add(word)
return known_words
# TODO: IMPLEMENT THIS FUNCTION
# Takes the words from the training data and a set of words that should not be replaced for '_RARE_'
# Returns the equivalent to brown_words but replacing the unknown words by '_RARE_' (use RARE_SYMBOL constant)
def replace_rare(brown_words, known_words):
brown_words_rare = []
for sentence in brown_words:
raresent = []
for word in sentence:
if word in known_words:
raresent.append(word)
else:
raresent.append(RARE_SYMBOL)
# Fairly self-explanatory
brown_words_rare.append(raresent)
return brown_words_rare
# This function takes the ouput from replace_rare and outputs it to a file
def q3_output(rare, filename):
outfile = open(filename, 'w')
for sentence in rare:
outfile.write(' '.join(sentence[2:-1]) + '\n')
outfile.close()
# TODO: IMPLEMENT THIS FUNCTION
# Calculates emission probabilities and creates a set of all possible tags
# The first return value is a python dictionary where each key is a tuple in which the first element is a word
# and the second is a tag, and the value is the log probability of the emission of the word given the tag
# The second return value is a set of all possible tags for this data set
def calc_emission(brown_words_rare, brown_tags):
e_values = {}
taglist = set([])
emisscount = Counter()
tagcount = Counter()
for i in range(0, len(brown_words_rare)):
for j in range(0, len(brown_words_rare[i])):
emisscount[(brown_words_rare[i][j], brown_tags[i][j])] += 1
tagcount[brown_tags[i][j]] += 1
taglist.add(brown_tags[i][j])
# Add one to frequency counter for each word, tag pair to get emission frequencies
# Add one to the count of that tag as well
# And lastly, attempt to add the tag to the set of tags - if it's already there it won't matter
for (word, tag), freq in emisscount.items():
denomprob = tagcount[tag]
numerprob = float(emisscount[(word, tag)])
# Emission probability is the number of times a tag manifests as a particular word divided by the frequency of that tag
logprob = math.log(numerprob / denomprob , 2)
e_values.update({(word, tag):logprob})
return e_values, taglist
# This function takes the output from calc_emissions() and outputs it
def q4_output(e_values, filename):
outfile = open(filename, "w")
emissions = e_values.keys()
emissions.sort()
for item in emissions:
output = " ".join([item[0], item[1], str(e_values[item])])
outfile.write(output + '\n')
outfile.close()
def get_tags(k, tags):
if k <= 0:
return ['*']
else:
return tags
# TODO: IMPLEMENT THIS FUNCTION
# This function takes data to tag (brown_dev_words), a set of all possible tags (taglist), a set of all known words (known_words),
# trigram probabilities (q_values) and emission probabilities (e_values) and outputs a list where every element is a tagged sentence
# (in the WORD/TAG format, separated by spaces and with a newline in the end, just like our input tagged data)
# brown_dev_words is a python list where every element is a python list of the words of a particular sentence.
# taglist is a set of all possible tags
for tokens in brown_dev_words:
n = len(tags)
t = len(tokens)
vitmatrix = [[[LOG_PROB_OF_ZERO for i in range(n)] for j in range(n)] for k in range(t)]
bp = [[[-1 for i in range(n)] for j in range(n)] for k in range(t)]
# t observations, the * at t = -1(I guess) is implicit
# n states/tags + one * only used at the beginning
# Fill in first column - when t=0 (i.e. first token), the previous symbols must be *, *.
# As such, the "v" here is each state, while "u" and "w" = * (so the vitmatrix[k-1][u][v] term doesn't come into play)
word = tokens[0]
if word not in known_words:
word = RARE_SYMBOL
for v in range(n):
transval = q_values.get((START_SYMBOL, START_SYMBOL, tags[v]), None)
emissval = e_values.get((word, tags[v]), None)
if transval != None:
vitmatrix[0][0][v] = transval
# Fill in second column, when t = 1 (second token). W is still going to be * here;
# therefore we don't need to loop through it. Calculation of values here is
# transition probability *->state u in range(1, n) (excepting *)-> state v in range(1, n) also excepting *
# + emission probability P(tokens[1] | tags[v])
# + previous value for vitmatrix[0][0][v] (the only possible one last observation)
word = tokens[1]
if word not in known_words:
word = RARE_SYMBOL
for v in range(1, n):
for u in range(1, n):
transval = q_values.get((START_SYMBOL, tags[u], tags[v]), None)
emissval = e_values.get((word, tags[v]), None)
if transval != None and emissval != None:
vitmatrix[1][u][v] = vitmatrix[0][0][v] + transval + emissval
bp[1][u][v] = u
# Fill in remaining t-2 columns (final one is indexed t-1).
# This is not exactly simple to loop through, but it /is/ consistent:
# the calculation of vitmatrix[k][u][v] = vitmatrix[k-1][w][u] + q_values[tags[w], tags[u], tags[v]] + e_values[tokens[k], tags[v]] every time
# * is left out in analysis of each token
# if vitmatrix[k-1][w][u] = -1000 or q_values/e_values call returns None, DO NOT CHANGE
k = 2
while k < t:
# for each token from 2 to (including) t-1
word = tokens[k]
if word not in known_words:
word = RARE_SYMBOL
# replacement of rare words with RARE_SYMBOL
flag = 0
for v in range(1, n):
emissval = e_values.get((word, tags[v]), None)
if emissval == None:
continue
# Unseen emissions should not do anything
for u in range(1, n):
# for each state that could have been the previous state, find the maximum
maxim = -1000000
argmaxim = -1
for w in range(1, n):
# finding maximum here
transval = q_values.get((tags[w], tags[u], tags[v]), None)
formval = vitmatrix[k-1][w][u]
# disallow unseen transitions and unreachable previous states
if transval != None:
totalval = formval + transval + emissval
else:
totalval = formval + emissval + LOG_PROB_OF_ZERO
if totalval >= maxim:
maxim = totalval
argmaxim = w
flag = 1
vitmatrix[k][u][v] = maxim
bp[k][u][v] = argmaxim
k += 1
# Of all the states that the final observation (usually a period, or some other punctuation) can be in;
# find the one which has the maximum value of (transition from this state to STOP + this state's value)
maxim = -1000000
argmaxim = (0, 0)
for i in range(1, n):
for j in range(1, n):
transval = q_values.get((tags[j], tags[i], STOP_SYMBOL), None)
if transval != None:
val = transval + vitmatrix[t-1][j][i]
else:
val = LOG_PROB_OF_ZERO + vitmatrix[t-1][j][1]
if val >= maxim:
maxim = val
argmaxim = (j, i)
#(j, i) is the maximum u,v pair for observations t-2 and t-1
fintags = [-1 for i in range(t)]
acttags = ['NOUN' for i in range(t)]
fintags[t-1] = argmaxim[1]
fintags[t-2] = argmaxim[0]
acttags[t-1] = tags[argmaxim[1]]
acttags[t-2] = tags[argmaxim[0]]
# Follow backpointers to get the maximum tag sequence probability for the full sentence
# This part of the algorithm is taken from the Michael Collins notes at Columbia
k = t - 3
while k >= 0:
point = bp[k+2][fintags[k+1]][fintags[k+2]]
fintags[k] = point
acttags[k] = tags[point]
k = k - 1
# Assemble the final string & then append it to the list of tagged sentences
finstr = ""
for m in range(t):
finstr = finstr + tokens[m] + "/" + acttags[m] + " "
strippedstring = finstr.strip() + "\n"
tagged.append(strippedstring)
return tagged
# This function takes the output of viterbi() and outputs it to file
def q5_output(tagged, filename):
outfile = open(filename, 'w')
for sentence in tagged:
outfile.write(sentence)
outfile.close()
# TODO: IMPLEMENT THIS FUNCTION
# This function uses nltk to create the taggers described in question 6
# brown_words and brown_tags is the data to be used in training
# brown_dev_words is the data that should be tagged
# The return value is a list of tagged sentences in the format "WORD/TAG", separated by spaces. Each sentence is a string with a
# terminal newline, not a list of tokens.
def nltk_tagger(brown_words, brown_tags, brown_dev_words):
# Hint: use the following line to format data to what NLTK expects for training
training = [ zip(brown_words[i],brown_tags[i]) for i in xrange(len(brown_words)) ]
# IMPLEMENT THE REST OF THE FUNCTION HERE
tagged = []
deftag = nltk.DefaultTagger('NOUN')
bitag = nltk.BigramTagger(training, backoff=deftag)
tritag = nltk.TrigramTagger(training, backoff=bitag)
for tokens in brown_dev_words:
wordtags = list(tritag.tag(tokens))
finstr = ""
for tup in wordtags:
finstr = finstr + tup[0] + "/" + tup[1] + " "
stripstring = finstr.strip() + "\n"
tagged.append(stripstring)
return tagged
# This function takes the output of nltk_tagger() and outputs it to file
def q6_output(tagged, filename):
outfile = open(filename, 'w')
for sentence in tagged:
outfile.write(sentence)
outfile.close()
DATA_PATH = '/home/classes/cs477/data/'
OUTPUT_PATH = 'output/'
def main():
# start timer
time.clock()
# open Brown training data
infile = open(DATA_PATH + "Brown_tagged_train.txt", "r")
brown_train = infile.readlines()
infile.close()
# split words and tags, and add start and stop symbols (question 1)
brown_words, brown_tags = split_wordtags(brown_train)
# calculate tag trigram probabilities (question 2)
q_values = calc_trigrams(brown_tags)
# question 2 output
q2_output(q_values, OUTPUT_PATH + 'B2.txt')
# calculate list of words with count > 5 (question 3)
known_words = calc_known(brown_words)
# get a version of brown_words with rare words replace with '_RARE_' (question 3)
brown_words_rare = replace_rare(brown_words, known_words)
# question 3 output
q3_output(brown_words_rare, OUTPUT_PATH + "B3.txt")
# calculate emission probabilities (question 4)
e_values, taglist = calc_emission(brown_words_rare, brown_tags)
# question 4 output
q4_output(e_values, OUTPUT_PATH + "B4.txt")
# delete unneceessary data
del brown_train
del brown_words_rare
# open Brown development data (question 5)
infile = open(DATA_PATH + "Brown_dev.txt", "r")
brown_dev = infile.readlines()
infile.close()
# format Brown development data here
brown_dev_words = []
for sentence in brown_dev:
brown_dev_words.append(sentence.split(" ")[:-1])
# do viterbi on brown_dev_words (question 5)
viterbi_tagged = viterbi(brown_dev_words, taglist, known_words, q_values, e_values)
# question 5 output
q5_output(viterbi_tagged, OUTPUT_PATH + 'B5.txt')
# do nltk tagging here
nltk_tagged = nltk_tagger(brown_words, brown_tags, brown_dev_words)
# question 6 output
q6_output(nltk_tagged, OUTPUT_PATH + 'B6.txt')
# print total time to run Part B
print "Part B time: " + str(time.clock()) + ' sec'
if __name__ == "__main__": main()
| true |
9fa45df0e2770fcdee59b1deb14d0d5c6bbbb0f8 | Python | fomalhaut88/pybackup2 | /models/base_command.py | UTF-8 | 423 | 2.765625 | 3 | [] | no_license | from models.errors import CommandError
class BaseCommand:
command = None
args = ()
doc = ""
def __init__(self, *args):
if len(args) != len(self.__class__.args):
raise CommandError("invalid number of arguments")
self.args = {
key: arg
for key, arg in zip(self.__class__.args, args)
}
def execute(self):
raise NotImplementedError()
| true |
33179ab9b38618c19a15fba5718b030712d1cded | Python | sbacheld/sudoku-solver | /domain.py | UTF-8 | 231 | 3.21875 | 3 | [
"MIT"
] | permissive | class Domain:
_values = []
def __init__(self, values):
self._values = values
@property
def values(self):
return self._values
def update_values(self, values):
self._values = values
| true |
83ff9fb44c296b15fd0d4505c8b5745d58ccd752 | Python | OpenFAST/python-toolbox | /pyFAST/input_output/examples/Example_PlotBinary.py | UTF-8 | 638 | 2.828125 | 3 | [] | no_license | """
- Open and OpenFAST binary file
- Convert it to a pandas dataframe
- Plot a given output channel
"""
import os
import matplotlib.pyplot as plt
from pyFAST.input_output import FASTOutputFile
# Get current directory so this script can be called from any location
scriptDir = os.path.dirname(__file__)
fastoutFilename = os.path.join(scriptDir, '../../../data/example_files/fastout_allnodes.outb')
df = FASTOutputFile(fastoutFilename).toDataFrame()
print(df.keys())
time = df['Time_[s]']
Omega = df['RotSpeed_[rpm]']
plt.plot(time, Omega)
plt.xlabel('Time [s]')
plt.ylabel('RotSpeed [rpm]')
if __name__ == '__main__':
plt.show()
| true |
15e82f8de1609de491a4e03e9fefbc7c1a027c14 | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2338/60639/244373.py | UTF-8 | 464 | 2.953125 | 3 | [] | no_license | def solution():
inp=input().split(' ')
n=int(inp[0])
sum=int(inp[1])
inp=input().split(' ')
nums=[]
for i in range(n):
nums.append(int(inp[i]))
for i in range(n-1):
for j in range(i+1,n):
if nums[i]+nums[j]==sum:
print("Yes")
return
else:
continue
print("No")
def main():
T=int(input())
for i in range(T):
solution()
main()
| true |
ce3b40f90fd20f96d706403129473582eabd177c | Python | aptend/leetcode-rua | /Python/905 - Sort Array By Parity/905_sort-array-by-parity.py | UTF-8 | 976 | 3.234375 | 3 | [] | no_license | from leezy import Solution, solution
"""
How about returning an inverleaving array?
even, odd, even, odd, ... and so forth
[1, 2, 3, 4] -> [2, 1, 4, 3]
one solution:
N = len(A)
i, j = 0, 1
while True:
while i < N and (i + A[i]) % 2 == 0:
i += 2
while j < N and (j + A[j]) % 2 == 0:
j += 2
if i < N and j < N:
A[i], A[j] = A[j], A[i]
i += 2
j += 2
else:
break
return A
"""
class Q905(Solution):
@solution
def sortArrayByParity(self, A):
# 56ms 98.22%
N = len(A)
i, j = 0, N-1
while True:
while i < N and A[i] % 2 == 0:
i += 1
while j >= 0 and A[j] % 2 == 1:
j -= 1
if i >= j:
break
A[i], A[j] = A[j], A[i]
i += 1
j -= 1
return A
def main():
q = Q905()
q.add_args([3, 1, 2, 4])
q.run()
if __name__ == "__main__":
main()
| true |
72ba8dca05d9a480f57dc1db308cd9923f2d3d8e | Python | hancse/model_rowhouse | /House model_2R2C_Python/house_model/configurator.py | UTF-8 | 6,553 | 3.0625 | 3 | [] | no_license | """
A certain Python style gives the modules (*.py files) names of a profession:
in this style, the module that encapsulates the parameter configuration can be called
configurator.py
the module performs the following tasks:
1. read the input parameters for the model simulation from a configuration file
"Pythonic" configuration file types are *.ini, *.yml, *.toml and *.json
The *.yml can cope with array parameters. This makes it more useful than the *.ini format
The *.json format can also represent arrays. It is used when the input data comes from a database.
2. convert the input parameters to a dict
3. optionally, convert the dict to a dataclass object
4. get additional parameters from NEN5060
5. perform calculations to prepare for ODE integration
"""
import yaml
"""
The predefined variables are now defined in a configuration file
All parameters read from configuration (*.yml) file
"""
def load_config(config_name: str):
with open(config_name) as config_file:
hp = yaml.safe_load(config_file) # hp = house_parameters
return hp
def save_config(hp):
with open("../config2R2C.yml", "w") as config_outfile:
yaml.dump(hp, config_outfile, indent=4)
# Variables from Simulink model, dwelling mask (dwelling mask???????)
# Floor and internal walls construction.
# It is possible to choose between light, middle or heavy weight construction
"""
# Facade construction
# It is possible to choose between light, middle or heavy weight construction
the parameters c_internal_mass, th_internal_mass and rho_internal_mass
c_facade, th_facade and rho_facade are now lists
the indices to these lists are N_internal_mass an N_facade
"""
# It is assumed that furniture and the surface part of the walls have the same temperature
# as the air and the wall mass is divided between the air and wall mass.
# Thus, the capacity of the air node consists of the air capacity,
# furniture capacity and capacity of a part of the walls.
# Appendix I presents the coefficients in the dwelling model.
# In the resistance Rair_outdoor the influence of heat transmission through the outdoor walls
# and natural ventilation is considered.
def calculateRC(hp: dict):
"""
Args:
hp:
Returns:
Rair_wall :
Cwall :
Rair_outdoor :
Cair :
"""
# assignment to local variables from hp: dict
# Envelope surface (facade + roof + ground) [m2]
A_facade = hp['dimensions']['A_facade']
# Floor and internal walls surface [m2]
A_internal_mass = hp['dimensions']['A_internal_mass']
# Internal volume [m3]
V_dwelling = hp['dimensions']['V_dwelling']
# Envelope thermal resistance, R-value [m2/KW]
Rc_facade = hp['thermal']['Rc_facade']
# Window thermal transmittance, U-value [W/m2K]
Uglass = hp['thermal']['U_glass']
CF = hp['ventilation']['CF']
# Ventilation, air changes per hour [#/h]
n = hp['ventilation']['n']
# Facade construction
# Light_weight = 0 / Middle_weight = 1 / Heavy_weight = 2
N_facade = hp['construction']['N_facade']
# Floor and internal walls construction
N_internal_mass = hp['construction']['N_internal_mass']
# Initial parameters file for House model
##Predefined variables
rho_air = hp['initial']['rho_air'] # density air in [kg/m3]
c_air = hp['initial']['c_air'] # specific heat capacity air [J/kgK]
alpha_i_facade = hp['initial']['alpha_i_facade']
alpha_e_facade = hp['initial']['alpha_e_facade']
alpha_internal_mass = hp['initial']['alpha_internal_mass']
c_internal_mass = hp['thermal']['c_internal_mass'][N_internal_mass] # Specific heat capacity construction [J/kgK]
th_internal_mass = hp['construction']['th_internal_mass'][N_internal_mass] # Construction thickness [m]
rho_internal_mass = hp['construction']['rho_internal_mass'][N_internal_mass] # Density construction in [kg/m3]
c_facade = hp['thermal']['c_facade'][N_facade] # Specific heat capacity construction [J/kgK]
th_facade = hp['construction']['th_facade'][N_facade] # Construction thickness [m]
rho_facade = hp['construction']['rho_facade'][N_facade] # Density construction in [kg/m3]
A_glass = sum(hp['glass'].values()) # Sum of all glass surfaces [m2]
A_glass -= hp['glass']['g_value']
print(A_glass)
# Volume floor and internal walls construction [m3]
V_internal_mass = A_internal_mass * th_internal_mass
# A_internal_mass: Floor and internal walls surface [m2]
qV = (n * V_dwelling) / 3600 # Ventilation, volume air flow [m3/s],
# n: ventilation air change per hour; V_dwelling : internal volume m3
qm = qV * rho_air # Ventilation, mass air flow [kg/s]
# Dwelling temperatures calculation
# Calculation of the resistances
Rair_wall = 1.0 / (A_internal_mass * alpha_internal_mass) # Resistance indoor air-wall
U = 1.0 / (1.0 / alpha_i_facade + Rc_facade + 1 / alpha_e_facade) # U-value indoor air-facade
Rair_outdoor = 1.0 / (A_facade * U + A_glass * Uglass + qm * c_air) # Resistance indoor air-outdoor air
# Calculation of the capacities
Cair = rho_internal_mass * c_internal_mass * V_internal_mass / 2.0 + rho_air * c_air * V_dwelling # Capacity indoor air + walls
Cwall = rho_internal_mass * c_internal_mass * V_internal_mass / 2.0 # Capacity walls
return Rair_wall, Cwall, Rair_outdoor, Cair
# Time base on 1 hour sampling from NEN
"""
time = Irr.qsunS[0]
# time = first row of Irr.qsunSouth (time axis) in seconds [0, 3600, 7200, ...]
print("ID time: ", id(time), ", ID Irr.qsunS[0]: ", id(Irr.qsunS[0]))
the "new" variable time is NOT at the same memory address as
the "old" variable Irr.qsunS[0]!
because the value of the first element of an array is assigned to a scalar (float)
the instruction now has COPIED the variable
this asks for extreme programmer awareness!
# define window surface in m2
# Windows surface [E,SE,S,SW,W,NW,N,NE] [m2]
# -90 (E), -45 (SE), 0 (S), 45 (SW), 90 (W), 135 (NW), 180 (N), 225 (NE)
# Window solar transmittance, g-value
# Calculate Qsolar on window
Qsolar = (Irr.qsunE[1] * hp['glass']['E'] + Irr.qsunSE[1] * hp['glass']['SE'] +
Irr.qsunS[1] * hp['glass']['S'] + Irr.qsunSW[1] * hp['glass']['SW'] +
Irr.qsunW[1] * hp['glass']['W'] + Irr.qsunNW[1] * hp['glass']['NW'] +
Irr.qsunN[1] * hp['glass']['N'] + Irr.qsunNE[1] * hp['glass']['NE']) * hp['g_value']
# with input NEN5060, glass and g_value, qsun can give a single result Qsolar
""" | true |
b7548ce3cdaa04ca2aaac4ca69db5a09cb685745 | Python | nikita199801/binary_tree | /main.py | UTF-8 | 877 | 3.578125 | 4 | [] | no_license | from bintree import BinTree
new_tree=BinTree()
while True:
choose = int(input("1. Ввести дерево \n"
"2. Вывести дерево\n"
"3. Вывести все возможные слова\n"
"0. Выход\n"
"Выберите пункт: "))
if choose == 1:
val=""
while val !=" ":
val=input("Введите узел: ")
if val == " ":
break
else:
new_tree.insert(val)
elif choose == 2:
new_tree.printTree(new_tree.getRoot())
elif choose == 3:
new_tree.printWrds(new_tree.getRoot())
new_tree.nullFlag(new_tree.getRoot())
elif choose == 0:
break
new_tree.printTree(new_tree.getRoot())
# a=["f","w","b","e","a","t","u","l","d","z","s","q"]
# b=["ф","а","в","б","е","ц","с"]
| true |
39afa0ca203edb0a22fd5ea80fdaa78391cf985e | Python | vr2262/framer | /bin/make_vertical.py | UTF-8 | 1,551 | 2.90625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
"""The command-line entry point for combining images into a single image."""
from framer import combine_vertical_from_iterable
from PIL import Image
import argparse
import os
def main():
"""Combine images vertically given file names on the command line."""
parser = argparse.ArgumentParser(description='''Make a vertical image from
a series of images. They better have the same dimensions...''')
parser.add_argument('images', nargs='+',
help='''The file paths of the images to combine.''')
parser.add_argument('-n', '--name',
help=""""The resultant image's file name. Defaults to
the name of the first image plus '_vertical'""")
parser.add_argument('-d', '--delete', action='store_true',
help='Set this flag to delete the component images.')
parser.add_argument('-t', '--type', choices=['png', 'jpg'], default='png',
help='The file type of the snapshots. ')
args = parser.parse_args()
result = combine_vertical_from_iterable(args.images)
if args.delete:
for image in args.images:
os.remove(image)
fmt = args.type if args.type is not None \
else Image.open(args.images[0]).format
name = args.name if args.name is not None \
else os.path.splitext(args.images[0])[0] + \
'_vertical.' + fmt
result.save(name, format=args.type)
if __name__ == '__main__':
main()
| true |
d8bade4c3171359a3e2c7ee27b0c0ee2858e7e78 | Python | kxie8/cs1113 | /code/idris/examples.py | UTF-8 | 297 | 3.640625 | 4 | [] | no_license | # assignment commands
x = 6
y = 5
z = 5
# conditions
if (True):
x = 3
y = 6
else:
x = 7
z = 6
# { (x,3), (y,6), (z,5) }
if (x > 0 or y == 1):
x = 2
y = 4
else:
x = 1
z = 0
# iteration
times = 10
while (times > 0):
print("Hello")
times = (times - 1)
| true |
fd87533135530d7f7c1cb931869b9e283703dcde | Python | SophiaMVaughn/Pollution-CSC4996 | /Backend/main.py | UTF-8 | 6,360 | 2.765625 | 3 | [] | no_license | from scraperInterface import ScraperInterface
from parse import isArticleEvent
from parse import convertScrapedtoSent
from RNNBinary import readBinary
from officialComm import officialComment
from dateRegex import dateInfo
from textColors import bcolors
from Location import locationsInfo
from mongoengine import connect
from dateutil import parser
from datetime import date
# delete content in error log
errorLog = open("errorLog.txt","r+")
errorLog.truncate(0)
errorLog.close()
# delete text file holding crawled websites
crawlLog = open("crawlLog.txt","r+")
crawlLog.truncate(0)
crawlLog.close()
# delete text file holding article urls scraped
scrapeLog = open("scrapeLog.txt", "r+")
scrapeLog.truncate(0)
scrapeLog.close()
#################### Article scraping ###########################
# set the keywords to use in crawler
keywords = ["pollution", "contamination", "spill"]
# CHANGE THIS TO FALSE AFTER THE FIRST RUN OF THE PROGRAM
isInitialCrawl = True
# instantiate ScraperInterface object, passing the keywords list, setting a search page limit of 10,
# and setting the json file to pull websites/website attributes from to website.json
scraper = ScraperInterface(keywords=keywords, searchPageLimit=10, websitesJsonFile="websites.json",
isInitialCrawl=isInitialCrawl)
print("\n" + bcolors.OKGREEN + "[+] " + str(scraper.getArticleCount()) + " articles scraped" + bcolors.ENDC)
# array to hold article titles
articleTitles = []
# loop through list of article dictionary objects, each dictionary holding scraped values of a
# particular article (title, date, body) and append each article title to articleTitles list
for article in scraper.getScrapedArticles():
articleTitles.append(article['title'])
#################### NLP event recognition ###########################
# list of articles about contamination events
confirmedEventArticles = []
# counter to track number of contamination event articles
confirmedEventCount = 0
count = 0
print("\nParsing event articles")
print("-----------------------")
# for every article found
for article in scraper.getScrapedArticles():
count = count + 1
# if it is determined to be an event
if isArticleEvent(article):
# insert article in the Articles collection
scraper.storeInArticlesCollection(article)
confirmedEventArticles.append(article)
confirmedEventCount = confirmedEventCount + 1
print(bcolors.OKGREEN + "[+] (" + str(count) + "/" + str(len(scraper.getScrapedArticles()))
+ ") " + article['title'] + bcolors.ENDC)
else:
print(bcolors.FAIL + "[-] (" + str(count) + "/" + str(len(scraper.getScrapedArticles()))
+ ") " + article['title'] + bcolors.ENDC)
# deallocated memory taken up by the list of dictionaries for scraped article
scraper.delScrapedArticlesList()
print(bcolors.OKGREEN + "\n[+] " + str(confirmedEventCount) + " event articles found" + bcolors.ENDC)
print("\nRunning NLP analysis")
print("-------------------------")
# open weekly log file to hold events inserted into Incidents collection
count = 0
weeklyRunLogs = open('weeklyRunLogs.txt', 'a+')
# setting and writing the date of the run to the log file
today = date.today()
weeklyRunLogs.write("\n************ " + str(today) + " ************\n\n")
# write the number of incidents retrieved to the log file
weeklyRunLogs.write("Incidents retrieved: " + str(len(confirmedEventArticles)) + "\n\n")
####################### NLP event attributes extraction ########################
# for each confirmed contamination event article
for article in confirmedEventArticles:
count = count + 1
print("\n" + bcolors.OKGREEN + "[+] (" + str(count) + "/" + str(len(confirmedEventArticles)) + ") "
+ article['title'] + bcolors.ENDC)
# parse the body into paragraphs
body = convertScrapedtoSent(article['body'])
# retrieve chemicals from the body
chems = readBinary(body)
# For getting location information
locations = locationsInfo(body)
# for getting official statement
offComm = officialComment(body)
# for pulling date information
dates = dateInfo(body)
# if no date was found
if len(dates) == 0:
# use the publicshiing date of the article
date = article['publishingDate']
else:
date = dates[0]
try:
# attempt to format the date
d = parser.parse(date)
date = d.strftime("%m/%d/%Y")
# if it failed, use the publishing date
except:
date = article['publishingDate']
# if there is not an official comment found
if len(offComm) is None:
offComm = ""
articleLinks = []
articleLinks.append(article['url'])
error = False
# remove bad locations
if len(locations) == 0: # no locations found
location = ""
# some locations found
else:
# for each location
for location in locations:
# if a location is a tuple (bad)
if(type(location) is tuple):
# remove the location
locations.remove(location)
continue
# if it is not a tuple
else:
# make that the location
location = locations[0]
break
# if the type is a tuple, it contains a good location somewhere in there, so find it and use it
if type(location) is tuple:
for t in location:
if (len(t) > 0):
location = t
break
# final level of error handling
try:
print("final location: "+location)
except:
location = ""
# store all attributes of the event (chemicals involved, location, date, official statement, and
# related article links) into Incidents collection
scraper.storeInIncidentsCollection(chems, date, location, offComm, articleLinks)
# insert event ant and it's attributes into the weekly log file
weeklyRunLogs.write("Event #" + str(count) + " - ")
weeklyRunLogs.write("Date: " + str(date) + "; ")
weeklyRunLogs.write("Location: " + str(location) + "; ")
weeklyRunLogs.write("Chems: " + str(chems) + "; ")
weeklyRunLogs.write("Article Links: " + str(articleLinks) + "\n")
# close weekly log file
weeklyRunLogs.write("\nRun complete\n")
weeklyRunLogs.close()
| true |
b05cf969f1e67d620d8240031274fb9aaee82681 | Python | mattesko/torch-toolkit | /torch-toolkit/datasets.py | UTF-8 | 5,202 | 3.03125 | 3 | [
"MIT"
] | permissive | import random
import PIL
from PIL import Image
import pydicom
import numpy as np
import os
import torch
from torch.utils.data import Dataset
class ClassificationDataset(Dataset):
"""Simple input, target classification"""
def __init__(self, X, y, transform=None):
"""
Arguments:
X (sequence): the training examples
y (sequence): the training targets
transform (callable, optional): transform applied on training
examples
"""
assert len(X) == len(y), f"Mismatch in number of instances X ({len(X)}) and y ({len(y)})"
self.X = X
self.y = y
self.transform = transform
def __getitem__(self, key):
x, y = self.X[key], self.y[key]
if self.transform:
x = self.transform(x)
return x, y
def __len__(self):
return len(self.X)
class Segmentation2DDataset(Dataset):
"""
Dataset for segmentation tasks
Supports 2D DICOM format (.dcm) and common image formats (.png, .jpg)
"""
def __init__(self, image_pairs, input_transform=None,
mask_transform=None, input_image_handler=None,
mask_image_handler=None, cache=False):
"""
Arguments:
image_pairs (sequence): sequence of (input, mask) image pairs. Can
either be pairs of filepaths for the images or
input_transform (callable, optional): the transform to be applied
on input images
mask_transform (callable, optional): the transform to be applied
on target/mask images
input_image_handler (callable, optional): the handler to open
input images. By default, the input file's extension is
used to select the appropriate handler
mask_image_handler (callable, optional): the handler to open
mask images. By default, the mask file's extension is
used to select the appropriate handler
cache (bool, optional): if True, will load all images to memory
if image pairs are filepaths. By default will load the images
lazily
"""
assert len(image_pairs) > 0, \
f"Expected non empty sequence for input target pairs"
if isinstance(image_pairs[0], np.ndarray) \
or isinstance(image_pairs[0], torch.Tensor):
self.are_image_paths = False
else:
self.are_image_paths = True
self.image_pairs = image_pairs
self.input_transform = input_transform
self.mask_transform = mask_transform
self.input_image_handler = input_image_handler
self.mask_image_handler = mask_image_handler
self.cache = cache
self.seed = np.random.randint(2147483647)
if cache and self.are_image_paths:
self._cache_segmentation_pairs()
def _cache_segmentation_pairs(self):
"""Load all input image and target images to memory"""
self.cached_segmentation_pairs = []
for input_image_fp, mask_image_fp in self.image_pairs:
input_image = self._load_image_array(input_image,
self.input_image_handler)
mask_image = self._load_image_array(mask_image_fp,
self.mask_image_handler)
self.cached_segmentation_pairs.append((input_image, mask_image))
def _load_image_array(self, image_fp, handler=None):
"""Load the image as an array"""
_, file_extension = os.path.splitext(image_fp)
if handler:
return handler(image_fp)
if file_extension == ".dcm":
dicom_obj = pydicom.dcmread(image_fp)
image_array = dicom_obj.pixel_array
else:
image_array = np.array(Image.open(image_fp).load())
return image_array
def __getitem__(self, key):
if self.cache and self.are_image_paths:
input_image, mask_image = self.cached_segmentation_pairs[key]
elif not self.cache and self.are_image_paths:
input_image_fp, mask_image_fp = self.image_pairs[key]
input_image = self._load_image_array(input_image_fp,
self.input_image_handler)
mask_image = self._load_image_array(mask_image_fp,
self.mask_image_handler)
else:
input_image, mask_image = self.image_pairs[key]
if self.input_transform and self.mask_transform:
# Need to use the same seed for the random package, so that any
# random properties for both input and target transforms are the same
random.seed(self.seed)
torch.manual_seed(self.seed)
input_image = self.input_transform(input_image)
mask_image = self.mask_transform(mask_image)
elif self.input_transform:
input_image = self.input_transform(input_image)
elif self.mask_transform:
mask_image = self.mask_transform(mask_image)
return input_image, mask_image
def __len__(self):
return len(self.image_pairs)
| true |
f84430104cff4f29ab9310bca5ba411a36c40363 | Python | memray/springleaf | /ruimeng/FeatureSelection.py | UTF-8 | 3,055 | 2.828125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
from sklearn.svm import LinearSVC
__author__ = 'Memray'
import pandas as pd
import numpy as np
import urllib
import time
from sklearn import datasets
import pandas as pd
from pandas import Series
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import RFECV, SelectKBest, chi2, SelectFromModel
def read_data(data_path):
# URL for the Pima Indians Diabetes dataset (UCI Machine Learning Repository)
# url = "http://goo.gl/j0Rvxq"
# download the file
# raw_data = urllib.urlopen(url)
# data_path = 'H:\Dropbox\PhD@Pittsburgh\\2.Course\Courses@Pitt\INFSCI2160_DataMining\\final_project\springleaf\FuJun\\temp_train.csv'
# load the CSV file as a numpy matrix
print('Reading CSV...')
data = pd.read_csv(data_path)
# dataset = np.loadtxt(raw_data, delimiter=",")
print('')
print(data.shape)
# separate the data from the target attributes
data.info()
return data
class RandomForestClassifierWithCoef(RandomForestClassifier):
def fit(self, *args, **kwargs):
super(RandomForestClassifierWithCoef, self).fit(*args, **kwargs)
self.coef_ = self.feature_importances_
path = 'H:\\Dropbox\\DM\\DataOnFeatureSelect\\'
filename = 'train_65.csv'
new_filename = 'train_65_reduced.csv'
time_reading_start = time.time()
data = read_data(path+filename)
time_reading_end = time.time()
print('Reading time is: {0}'.format(time_reading_end - time_reading_start))
# filling missing values: fillna
data = data.fillna(0)
# print(column_list)
# print(len(column_list))
# get X and y seperately
column_list = data.columns[:-1]
X = pd.DataFrame(data.loc[1:,column_list])
print('Size of X:{0}'.format(X.shape))
y=(pd.Series(data.target, name='target'))[1:].astype(int)
print('Size of y:{0}'.format(y.shape))
########### RandomForest failed as the poor performance ##########
# print('Start to run RandomForest...')
# rf = RandomForestClassifierWithCoef(n_estimators=1000, max_features=30, min_samples_leaf=5, n_jobs=-1)
# print('Start to run Feature Selection...')
# rfecv = RFECV(estimator=rf, step=1, cv=2, scoring='roc_auc', verbose=2)
# selector=rfecv.fit(x, y)
########### Chi-squared failed as the requirement of positive X value. Also f_classif is not feasible as input matrix must be dense ##########
# X_new = SelectKBest(chi2, k=100).fit_transform(X, y)
########### Classification of text documents using sparse features: Comparison of different algorithms for document classification including L1-based feature selection.
time_fs_start = time.time()
lsvc = LinearSVC(C=0.01, penalty="l1", dual=False).fit(X, y)
model = SelectFromModel(lsvc, prefit=True)
X_new = model.transform(X)
time_fs_end = time.time()
########### Try some ##########
reduced_column = map(lambda (i,x): x, filter(lambda (i,x):model.get_support()[i], enumerate(column_list) ) )
X_new_df = pd.DataFrame(X_new, columns=reduced_column)
data_new = X_new_df.join(y)
data_new.to_csv(path+new_filename) | true |