blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e23b1a85f77241d1b76ad020f4ee46f671128a04
|
c66ad1061400c5830c5236b3a9233aa62176f80d
|
/program5/floater.py
|
52bcad1f83ee12188401b3ef43c0d3408e3ef6f2
|
[] |
no_license
|
franchescaleung/ChasingPrey
|
cff6b9a11f49908f31eff7ba6cf3780abc6674fd
|
54900366b3ce73d26a10646bb3adac111261f34f
|
refs/heads/master
| 2020-04-25T18:53:29.063109
| 2019-02-27T22:28:08
| 2019-02-27T22:28:08
| 172,999,825
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,265
|
py
|
# A Floater is Prey; it updates by moving mostly in
# a straight line, but with random changes to its
# angle and speed, and displays as ufo.gif (whose
# dimensions (width and height) are computed by
# calling .width()/.height() on the PhotoImage
from PIL.ImageTk import PhotoImage
from prey import Prey
import random
import math
class Floater(Prey):
def __init__(self, x, y):
self._image = PhotoImage(file='ufo.gif')
Prey.__init__(self, x, y, self._image.width(), self._image.height(), 2*math.pi*random.random(), 5)
def update(self,model):
temp = random.randint(1,10)
if temp <= 3:
#change angle and speed
a = random.choice('+-')
b = random.choice('+-')
changedby1 = int(str(a) + str(random.randint(0,5))) * .1
changedby2 = int(str(b) + str(random.randint(0, 5))) * .1
if ((self.get_speed() + changedby1) <= 7) or ((self.get_speed() + changedby1) >= 3):
self.set_speed(self.get_speed() + changedby1)
self.set_angle(self.get_angle() + changedby2)
self.move()
def display(self,the_canvas):
the_canvas.create_image(*self.get_location(),image=self._image)
|
[
"noreply@github.com"
] |
franchescaleung.noreply@github.com
|
4d303baeb39ff6d713494dce4563ee0596a7aacc
|
bd8532378ad2a61240faaa7be8ef44c60c055a2a
|
/rabona/data/leagues/Österreichische Fußball-Bundesliga/SCR Altach/SCR Altach.py
|
a4e6c68a03b8e7c93421d8a36b634dbb7c39ef21
|
[] |
no_license
|
nosoyyo/rabona
|
278a9dfe158e342261343b211fb39b911e993803
|
b0af3ab5806675fbf81b038633a74943118a67bb
|
refs/heads/master
| 2020-03-16T06:56:55.277293
| 2018-05-30T11:45:51
| 2018-05-30T11:45:51
| 132,565,989
| 2
| 1
| null | 2018-05-30T11:45:52
| 2018-05-08T06:44:11
|
Python
|
UTF-8
|
Python
| false
| false
| 9,816
|
py
|
club_info = {'club_url': 'https://www.futbin.com///18/leagues/%C3%96sterreichische%20Fu%C3%9Fball-Bundesliga?page=1&club=15009', 'club_logo': 'https://cdn.futbin.com/content/fifa18/img/clubs/15009.png', 'club_name': 'SCR Altach'}
players = {}
players['Lukse'] = {'player_url': 'https://www.futbin.com//18/player/4636/Andreas Lukse', 'player_name': 'Andreas Lukse', 'player_rating': '70', 'player_shortname': 'Lukse', 'player_position': 'GK', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/4.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/181733.png?v=2'}
players['Lienhart'] = {'player_url': 'https://www.futbin.com//18/player/6207/Andreas Lienhart', 'player_name': 'Andreas Lienhart', 'player_rating': '68', 'player_shortname': 'Lienhart', 'player_position': 'RB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/4.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/173480.png?v=2'}
players['Netzer'] = {'player_url': 'https://www.futbin.com//18/player/6145/Philipp Netzer', 'player_name': 'Philipp Netzer', 'player_rating': '68', 'player_shortname': 'Netzer', 'player_position': 'CB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/4.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/152107.png?v=2'}
players['Zech'] = {'player_url': 'https://www.futbin.com//18/player/6785/Benedikt Zech', 'player_name': 'Benedikt Zech', 'player_rating': '68', 'player_shortname': 'Zech', 'player_position': 'CB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/4.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/223426.png?v=2'}
players['Kobras'] = {'player_url': 'https://www.futbin.com//18/player/8035/Martin Kobras', 'player_name': 'Martin Kobras', 'player_rating': '66', 'player_shortname': 'Kobras', 'player_position': 'GK', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/4.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/183646.png?v=2'}
players['Prokopič'] = {'player_url': 'https://www.futbin.com//18/player/8036/Boris Prokopič', 'player_name': 'Boris Prokopič', 'player_rating': '66', 'player_shortname': 'Prokopič', 'player_position': 'CM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/4.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/183665.png?v=2'}
players['Ngwat Mahop'] = {'player_url': 'https://www.futbin.com//18/player/8084/Ngwat Mahop', 'player_name': 'Ngwat Mahop', 'player_rating': '66', 'player_shortname': 'Ngwat Mahop', 'player_position': 'CM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/103.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/188818.png?v=2'}
players['Meilinger'] = {'player_url': 'https://www.futbin.com//18/player/16775/Marco Meilinger', 'player_name': 'Marco Meilinger', 'player_rating': '66', 'player_shortname': 'Meilinger', 'player_position': 'RM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/4.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/199470.png?v=2'}
players['Piesinger'] = {'player_url': 'https://www.futbin.com//18/player/8402/Simon Piesinger', 'player_name': 'Simon Piesinger', 'player_rating': '66', 'player_shortname': 'Piesinger', 'player_position': 'CDM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/4.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/210482.png?v=2'}
players['Nutz'] = {'player_url': 'https://www.futbin.com//18/player/8578/Stefan Nutz', 'player_name': 'Stefan Nutz', 'player_rating': '66', 'player_shortname': 'Nutz', 'player_position': 'CAM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/4.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/216184.png?v=2'}
players['Honsak'] = {'player_url': 'https://www.futbin.com//18/player/9611/Mathias Honsak', 'player_name': 'Mathias Honsak', 'player_rating': '65', 'player_shortname': 'Honsak', 'player_position': 'LM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/4.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/232156.png?v=2'}
players['Salomon'] = {'player_url': 'https://www.futbin.com//18/player/9122/Patrick Salomon', 'player_name': 'Patrick Salomon', 'player_rating': '65', 'player_shortname': 'Salomon', 'player_position': 'CM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/4.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/199197.png?v=2'}
players['Zwischenbrugger'] = {'player_url': 'https://www.futbin.com//18/player/9432/Jan Zwischenbrugger', 'player_name': 'Jan Zwischenbrugger', 'player_rating': '65', 'player_shortname': 'Zwischenbrugger', 'player_position': 'CB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/4.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/221768.png?v=2'}
players['Dobras'] = {'player_url': 'https://www.futbin.com//18/player/10187/Kristijan Dobras', 'player_name': 'Kristijan Dobras', 'player_rating': '64', 'player_shortname': 'Dobras', 'player_position': 'LM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/4.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/210490.png?v=2'}
players['Tekpetey'] = {'player_url': 'https://www.futbin.com//18/player/11371/Bernard Tekpetey', 'player_name': 'Bernard Tekpetey', 'player_rating': '63', 'player_shortname': 'Tekpetey', 'player_position': 'LM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/117.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/234787.png?v=2'}
players['Janeczek'] = {'player_url': 'https://www.futbin.com//18/player/10906/Bernhard Janeczek', 'player_name': 'Bernhard Janeczek', 'player_rating': '63', 'player_shortname': 'Janeczek', 'player_position': 'CB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/4.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/199654.png?v=2'}
players['Aigner'] = {'player_url': 'https://www.futbin.com//18/player/10750/Johannes Aigner', 'player_name': 'Johannes Aigner', 'player_rating': '63', 'player_shortname': 'Aigner', 'player_position': 'ST', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/4.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/152116.png?v=2'}
players['Schreiner'] = {'player_url': 'https://www.futbin.com//18/player/10838/Emanuel Schreiner', 'player_name': 'Emanuel Schreiner', 'player_rating': '63', 'player_shortname': 'Schreiner', 'player_position': 'LM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/4.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/189237.png?v=2'}
players['Gebauer'] = {'player_url': 'https://www.futbin.com//18/player/12140/Christian Gebauer', 'player_name': 'Christian Gebauer', 'player_rating': '62', 'player_shortname': 'Gebauer', 'player_position': 'RM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/4.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/239357.png?v=2'}
players['Sakic'] = {'player_url': 'https://www.futbin.com//18/player/12595/Emanuel Sakic', 'player_name': 'Emanuel Sakic', 'player_rating': '61', 'player_shortname': 'Sakic', 'player_position': 'RB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/4.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/235420.png?v=2'}
players['Dmitrovic'] = {'player_url': 'https://www.futbin.com//18/player/13595/Filip Dmitrovic', 'player_name': 'Filip Dmitrovic', 'player_rating': '59', 'player_shortname': 'Dmitrovic', 'player_position': 'GK', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/51.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/239089.png?v=2'}
players['Grbic'] = {'player_url': 'https://www.futbin.com//18/player/13605/Adrian Grbic', 'player_name': 'Adrian Grbic', 'player_rating': '59', 'player_shortname': 'Grbic', 'player_position': 'ST', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/4.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/239432.png?v=2'}
players['Müller'] = {'player_url': 'https://www.futbin.com//18/player/14107/Valentino Müller', 'player_name': 'Valentino Müller', 'player_rating': '57', 'player_shortname': 'Müller', 'player_position': 'CDM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/4.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/231840.png?v=2'}
players['Ozegovic'] = {'player_url': 'https://www.futbin.com//18/player/14830/Benjamin Ozegovic', 'player_name': 'Benjamin Ozegovic', 'player_rating': '54', 'player_shortname': 'Ozegovic', 'player_position': 'GK', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/4.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/235421.png?v=2'}
players['Nussbaumer'] = {'player_url': 'https://www.futbin.com//18/player/14813/Daniel Nussbaumer', 'player_name': 'Daniel Nussbaumer', 'player_rating': '54', 'player_shortname': 'Nussbaumer', 'player_position': 'CM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/4.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/233740.png?v=2'}
players['Tartarotti'] = {'player_url': 'https://www.futbin.com//18/player/15044/Johannes Tartarotti', 'player_name': 'Johannes Tartarotti', 'player_rating': '53', 'player_shortname': 'Tartarotti', 'player_position': 'CAM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/4.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/239501.png?v=2'}
|
[
"oyyoson@gmail.com"
] |
oyyoson@gmail.com
|
45be7213aeff8794c613f2da74ff5b00f986c9a2
|
fc5fa0402dfecd408d1c1f78b7d1c041df9f0400
|
/dx/derivatives/blackscholes.py
|
7fa49483c3d3b1026a6612c5a856d56dc635b99d
|
[
"MIT"
] |
permissive
|
ViniciusRibeiroSouza/OptionsPricing
|
0cfe0e9c4a43d3f6226514b2d532c9c0bd51bcfe
|
c63426e8b578ce59e3c0bbadf65481ff56c91fb2
|
refs/heads/main
| 2023-06-01T09:52:06.155514
| 2021-06-08T03:08:28
| 2021-06-08T03:08:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,039
|
py
|
import numpy as np
import scipy.stats
import matplotlib.pyplot as plt
import pandas as pd
class BlackScholes:
"""
Classe usada parar armazenar dados de uma opção europeia para serem calculados métricas utilizando o modelo de Black-Scholes
...
Propiedades
---------
d1 : flt
d2 : flt
Probabilidade da opção expirar in the money
Methods
-------
get_user_input()
Requisita entradas do usuário pra construir um objeto com os parametros dados
"""
def __init__(self, asset_price, strike, maturity_time, risk_free_factor, sigma, option_type, time=0):
self.asset_price = asset_price
self.strike = strike
self.maturity_time = maturity_time
self.risk_free_factor = risk_free_factor
self.sigma = sigma
self.opt = option_type
self.time = time
@property
def d1(self):
return (np.log(self.asset_price / self.strike) + (self.risk_free_factor + 0.5 * self.sigma ** 2) *
(self.maturity_time - self.time)) / (self.sigma * np.sqrt(self.maturity_time - self.time))
@property
def d2(self):
return self.d1 - self.sigma * np.sqrt(self.maturity_time - self.time)
def update(self, initial_time, stock_price, strike=None):
if initial_time is not None:
self.time = initial_time
if stock_price is not None:
self.asset_price = stock_price
if strike is not None:
self.strike = strike
@property
def price(self):
"""
Retorna o preço da opção, calculado a partir da solução da equação e Black-Scholes
"""
if self.opt == "eurocall":
return self.asset_price * scipy.stats.norm.cdf(self.d1, 0.0, 1.0) - self.strike * np.exp(
-self.risk_free_factor * self.maturity_time) * scipy.stats.norm.cdf(self.d2, 0.0, 1.0)
elif self.opt == "europut":
return (self.strike * np.exp(-self.risk_free_factor * (self.maturity_time - self.time)) *
scipy.stats.norm.cdf(-self.d2, 0.0, 1.0) - self.asset_price * scipy.stats.norm.cdf(
-self.d1, 0.0, 1.0))
else:
print("Tipo de opção inválido, defina o tipo igual 1 para uma call e igual a 0 para um put")
@property
def delta(self):
"""
Retorna o delta da opção, a derivada do preço da opção em respeito ao preço da ação
"""
if self.opt == "eurocall":
return scipy.stats.norm.cdf(self.d1, 0.0, 1.0)
elif self.opt == "europut":
return scipy.stats.norm.cdf(self.d1, 0.0, 1.0) - 1
else:
print("Tipo de opção inválido, defina o tipo igual 1 para uma call e igual a 0 para um put")
@property
def gamma(self):
"""
Retorna o gamma da opção, a segunda derivada do preço da opção em respeito ao preço da ação
"""
return (scipy.stats.norm.pdf(self.d1) * np.exp(self.maturity_time - self.time)) / (
self.asset_price * self.sigma * np.sqrt(self.maturity_time - self.time))
@property
def vega(self):
"""
Retorna o vega da opção, a derivada do preço da opção em respeito a volatilidade
"""
return (scipy.stats.norm.pdf(self.d1) * np.exp(self.maturity_time - self.time)) * \
(self.asset_price * np.sqrt(self.maturity_time - self.time))
@staticmethod
def imp_vol(self, sigma0, actual_price, iter=100):
# Todo: remove from here
"""
Obtém a raiz da função BS_price(sigma) - actual_price utilizando o método de newton, onde a a derivada em relação a volatilidade é o vega da opção
"""
option = BlackScholes(self.asset_price, self.strike, self.maturity_time, self.risk_free_factor, sigma0,
self.tipo, self.time)
for i in range(iter):
option.sigma = option.sigma - (option.price() - actual_price) / option.vega()
return option.sigma
def gmb_path(self, number_steps=100, seed=2, plot=False):
np.random.seed(seed)
n_steps = number_steps
ts, dt = np.linspace(0, self.maturity_time, n_steps, retstep=True)
asset_price_vector = np.zeros(n_steps)
asset_price_vector[0] = self.asset_price
normal_vector = np.random.normal(0, np.sqrt(dt), n_steps)
cumulative_step_vector = np.cumsum(normal_vector)
for i in range(1, n_steps):
asset_price_vector[i] = self.asset_price * np.exp((self.risk_free_factor - 0.5 * self.sigma * self.sigma) *
ts[i - 1] + self.sigma * cumulative_step_vector[i])
if plot:
plt.plot(ts, cumulative_step_vector)
plt.show()
return asset_price_vector, ts
def delta_hedging(self, number_steps, n_options=1, seed=2, *args):
"""
Estratégia de delta hedging para uma venda de call option
:param number_steps:
:param n_options:
:param seed:
:param args:
:return:
"""
if not args:
path, ts = self.gmb_path(number_steps=number_steps, seed=seed)
else:
path, ts = args
stocks = np.zeros(len(path))
deltas = np.zeros(len(path))
delta_position = np.zeros(len(path))
cash = np.zeros(len(path))
cash[0] = self.price
opt_price = np.zeros(len(path))
ds_list = []
for i in range(len(path)):
opt_price = self.price
deltas[i] = round(self.delta, 3)
delta_position[i] = deltas[i] * n_options
stocks[i] = - delta_position[i] / path[i]
if i != 0:
ds = round(stocks[i] - stocks[i - 1], 4)
cash[i] += ds * path[i]
ds_list.append(ds)
elif i == 0:
ds = round(stocks[i], 4)
cash[i] += ds * path[i]
ds_list.append(ds)
self.update(initial_time=ts[i], stock_price=path[i])
data = {"delta": deltas,
"shares purchased": ds_list,
"total_shares": stocks,
"stock_price": path,
"delta_position": delta_position}
df = pd.DataFrame(data=data)
results = np.array(ds_list) @ path
return results, df
def stop_loss(self, pct=0, number_steps=100, seed=2, plot=False, *args):
"""
Estratégia de comprar quando passar do strike e vender quando estiver abaixo
:param pct:
:param number_steps:
:param seed:
:param plot:
:param args:
:return:
"""
if not args:
path, ts = self.gmb_path(number_steps=number_steps, seed=seed)
else:
path, ts = args
n = len(path)
cash = np.zeros(n)
shares = 0
buy_list = []
sell_list = []
results = None
for i in range(len(path)):
if (path[i] > (1 + pct / 100) * self.strike) and (shares == 0):
shares += 1
cash[i] -= path[i]
buy_list.append([ts[i], path[i]])
if (path[i] < (1 - pct / 100) * self.strike) and (shares == 1):
shares -= 1
cash[i] += path[i]
sell_list.append([ts[i], path[i]])
else:
pass
if (shares == 0) and (path[-1] >= self.strike):
# se o preço no tempo final for maior que o strike a opção será executada e o
# vendedor da call deverá comprar o ativo no preço deste no final do trajeto
results = cash[-1] + (-path[-1] + self.strike)
buy_list.append([ts[-1], path[-1]])
if (shares == 1) and (path[-1] >= self.strike):
# se o preço no tempo final for maior que o strike a opção
# será executada e o vendedor da call não precisará comprar o ativo se já possui
results = cash[-1] - self.strike
if path[-1] < self.strike: # se o preço estiver abaixo do strike a opção não será executada
results = cash[-1]
buy_list = np.array(buy_list)
sell_list = np.array(sell_list)
if plot:
plt.figure(figsize=(15, 6))
plt.plot(ts, path)
plt.plot(buy_list[:, 0], buy_list[:, 1], linestyle="None", marker="o", color="green", label="Buy")
plt.plot(sell_list[:, 0], sell_list[:, 1], linestyle="None", marker="o", color="red", label="Sell")
plt.plot(ts, [self.strike] * len(ts), linestyle="dashed", label="Strike")
if pct != 0:
plt.plot(ts, [self.strike * (1 + pct / 100)] * len(ts), linestyle="dotted", label="Safety margin up")
plt.plot(ts, [self.strike * (1 - pct / 100)] * len(ts), linestyle="dotted", label="Safety margin down")
plt.legend()
plt.show()
return results
|
[
"vinirsz@hotmail.com"
] |
vinirsz@hotmail.com
|
84873498412aa1c1e0b0640b4262e1d6fa21347b
|
5c39e166d0c6ec68f71c4c7627956fdd6f28c15e
|
/andrew/stats/exercise_2_20.py
|
f4f14361cc2878dbff00bff79fdeb83b94c188a1
|
[] |
no_license
|
zero-one-group/zot-internship
|
a8ae21029086188f4c8ca348c50e445a86a01179
|
9bbc421252b42f1bc85de66d22f48266c59113f0
|
refs/heads/master
| 2023-01-12T07:48:39.766136
| 2020-10-15T09:03:24
| 2020-10-15T09:03:24
| 279,190,490
| 3
| 3
| null | 2020-11-08T21:30:30
| 2020-07-13T02:32:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,961
|
py
|
'''
In each of the following cases, construct an Accept–Reject algorithm,
generate a sample of the corresponding random variables, and draw the density function
on top of the histogram.
a. Generate normal random variables using a Cauchy candidate in Accept–Reject.
b. Generate gamma G(4.3, 6.2) random variables using a gamma G(4, 7) candidate.
'''
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
def simulate_normal_with_cauchy():
grid = np.linspace(-5, 5, 1000)
candidate = np.random.standard_cauchy()
g = stats.cauchy.pdf
f = stats.norm.pdf
M = np.max(f(grid) / g(grid))
accept = np.random.random() < (f(candidate) / (M * g(candidate)))
if accept:
return candidate
else:
return simulate_normal_with_cauchy()
def simulate_gamma_with_gamma():
grid = np.linspace(-5, 5, 1000)
candidate = np.random.gamma(7/4, 7)
g = stats.cauchy.pdf
f = stats.cauchy.pdf
M = np.max(f(grid) / g(grid))
accept = np.random.gamma(6.2/4.3, 4.3) < (f(candidate) / (M * g(candidate)))
if accept:
return candidate
else:
return simulate_gamma_with_gamma()
simulated_normal = [simulate_normal_with_cauchy() for _ in range(int(1e4))]
plot_grid = np.linspace(np.min(simulated_normal), np.max(simulated_normal), 100)
plt.figure(0)
plt.hist(simulated_normal, bins=80, density=True, label='Simulated normal')
plt.plot(plot_grid, stats.norm.pdf(plot_grid), label='Normal distribution')
plt.legend()
plt.savefig('cauchy_candidate.png')
simulated_gamma = [simulate_gamma_with_gamma() for _ in range(int(1e4))]
plot_grid = np.linspace(np.min(simulated_gamma), np.max(simulated_gamma), 100)
plt.figure(1)
plt.hist(simulated_gamma, bins=80, density=True, label='Simulated Gamma G(4, 7)')
plt.plot(plot_grid, stats.gamma.pdf(plot_grid, loc=6.2/4.3, a=6.2/4.3, scale=6.2), label='Gamma distribution G(4, 7)')
plt.show()
plt.legend()
plt.savefig('gamma_candidate.png')
|
[
"AHW@macbook-pro-2.mynet"
] |
AHW@macbook-pro-2.mynet
|
9f821f3895798789551e2f22a22e3636952ec599
|
76e8ebacd049766f2c7b550a693e6e5352eb1d4e
|
/polls/tests.py
|
9e0e6dad94b6a07d0d4780cf3e16668b93e3b38d
|
[] |
no_license
|
ericzenger/hw4
|
e35adaa6f89c8f160d54c05908b016836f6047b8
|
bef308572fdd22c34a9c010e082df2a4faed41e0
|
refs/heads/master
| 2021-01-10T12:39:18.323318
| 2016-03-09T03:11:49
| 2016-03-09T03:11:49
| 53,463,654
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,452
|
py
|
import datetime
from django.utils import timezone
from django.test import TestCase
from polls.models import Question
from polls.views import QuestionModelForm
from django.core.urlresolvers import reverse, resolve
from django.contrib.auth.models import User
from django.db import DataError
from django.core.urlresolvers import reverse
from django.core.exceptions import ValidationError
import validators
def create_question(question_text, days):
"""
Creates a question with the given `question_text` published the given
number of `days` offset to now (negative for questions published
in the past, positive for questions that have yet to be published).
"""
time = timezone.now() + datetime.timedelta(days=days)
return Question.objects.create(question_text=question_text,
pub_date=time)
class QuestionIndexDetailTests(TestCase):
def test_detail_view_with_a_future_question(self):
"""
The detail view of a question with a pub_date in the future should
return a 404 not found.
"""
future_question = create_question(question_text='Future question.',
days=5)
response = self.client.get(reverse('polls:detail',
args=(future_question.id,)))
self.assertEqual(response.status_code, 404)
def test_detail_view_with_a_past_question(self):
"""
The detail view of a question with a pub_date in the past should
display the question's text.
"""
past_question = create_question(question_text='Past Question.',
days=-5)
response = self.client.get(reverse('polls:detail',
args=(past_question.id,)))
self.assertContains(response, past_question.question_text,
status_code=200)
class QuestionMethodTests(TestCase):
def test_was_published_recently_with_future_question(self):
"""
was_published_recently() should return False for questions whose
pub_date is in the future
"""
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertEqual(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
"""
was_published_recently() should return False for questions whose
pub_date is older than 1 day.
"""
time = timezone.now() - datetime.timedelta(days=30)
old_question = Question(pub_date=time)
self.assertEqual(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
"""
was_published_recently() should return True for questions whose
pub_date is within the last day.
"""
time = timezone.now() - datetime.timedelta(hours=1)
recent_question = Question(pub_date=time)
self.assertEqual(recent_question.was_published_recently(), True)
def create_question(question_text, days):
"""
Creates a question with the given `question_text` published the given
number of `days` offset to now (negative for questions published
in the past, positive for questions that have yet to be published).
"""
time = timezone.now() + datetime.timedelta(days=days)
return Question.objects.create(question_text=question_text,pub_date=time)
class QuestionViewTests(TestCase):
def test_index_view_with_no_questions(self):
"""
If no questions exist, an appropriate message should be displayed.
"""
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_index_view_with_a_past_question(self):
"""
Questions with a pub_date in the past should be displayed on the
index page
"""
create_question(question_text="Past question.", days=-30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(response.context['latest_question_list'],['<Question: Past question.>'])
def test_index_view_with_a_future_question(self):
"""
Questions with a pub_date in the future should not be displayed on
the index page.
"""
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertContains(response, "No polls are available.", status_code=200)
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_index_view_with_future_question_and_past_question(self):
"""
Even if both past and future questions exist, only past questions
should be displayed.
"""
create_question(question_text="Past question.", days=-30)
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_index_view_with_two_past_questions(self):
"""
The questions index page may display multiple questions.
"""
create_question(question_text="Past question 1.", days=-30)
create_question(question_text="Past question 2.", days=-5)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(response.context['latest_question_list'],['<Question: Past question 2.>', '<Question: Past question 1.>'])
class QuestionURLTests(TestCase):
#See: https://docs.djangoproject.com/en/1.7/ref/urlresolvers/#resolve
# for additional information on resolve()
def test_url_index(self):
"""
/polls/ should resolve to polls:index
"""
resolver = resolve('/polls/')
self.assertEqual(resolver.namespace,'polls')
self.assertEqual(resolver.view_name,'polls:index')
def test_url_detail(self):
"""
/polls/1/ should resolve to polls:detail
"""
resolver = resolve('/polls/1/')
self.assertEqual(resolver.namespace,'polls')
self.assertEqual(resolver.view_name,'polls:detail')
def test_url_results(self):
"""
/polls/1/results/ should resolve to polls:results
"""
resolver = resolve('/polls/1/results/')
self.assertEqual(resolver.namespace,'polls')
self.assertEqual(resolver.view_name,'polls:results')
def test_url_vote(self):
"""
/polls/1/vote/ should resolve to polls:vote
"""
resolver = resolve('/polls/1/vote/')
self.assertEqual(resolver.namespace,'polls')
self.assertEqual(resolver.view_name,'polls:vote')
class QuestionIndexDetailTests(TestCase):
def test_detail_view_with_a_future_question(self):
"""
The detail view of a question with a pub_date in the future should
return a 404 not found.
"""
future_question = create_question(question_text='Future question.', days=5)
response = self.client.get(reverse('polls:detail',args=(future_question.id,)))
self.assertEqual(response.status_code, 404)
def test_detail_view_with_a_past_question(self):
"""
The detail view of a question with a pub_date in the past should
display the question's text.
"""
past_question = create_question(question_text='Past Question.', days=-5)
response = self.client.get(reverse('polls:detail', args=(past_question.id,)))
self.assertContains(response, past_question.question_text, status_code=200)
class QuestionResultsTests(TestCase):
def test_results_view_without_login(self):
"""
The results view of a question should redirect (302) to the login page if the user is not
logged in.
"""
question = create_question(question_text='Not logged in', days=-1)
response = self.client.get(reverse('polls:results',args=(question.id,)))
self.assertEqual(response.status_code, 302)
def test_detail_view_with_login(self):
"""
The results view of a question should return the results if the user is
logged in.
"""
User.objects.create_superuser('fred', 'fred@fred.fred', 'secret')
self.client.login(username='fred',password='secret')
question = create_question(question_text='Loged in', days=-1)
response = self.client.get(reverse('polls:results',args=(question.id,)))
self.assertContains(response, 'Loged in',status_code=200)
class QuestionModelTests(TestCase):
#When using PostgreSQL (via the psycopg2 DBI) max_length is enforced by
# django.db so we can use a simple unit test for that.
# This unit test will fail when using sqlite3 because it does not enforce
# max_length.
def test_question_text_max_length(self):
"""
Should not allow question text longer than 200 characters
"""
with self.assertRaises(DataError):
question = create_question(question_text=u'a'*201, days=-1)
#validators are not enforced by django.db so we need to use an
#integration test with a ModelForm.
def test_pub_date_not_future(self):
"""
Should not allow questions published in the future
"""
#create an invalid model object
question = create_question(question_text=u'a'*200, days=1000)
#load the invalid object into it's corresponding ModelForm
form = QuestionModelForm(instance=question)
#assert that the form is not valid
self.assertFalse(form.is_valid())
class ValidatorTests(TestCase):
def test_not_future_fails(self):
"""Raise a ValidationError if the value is in the future.
"""
value = timezone.now() + datetime.timedelta(days=30)
with self.assertRaises(ValidationError):
validators.not_future(value)
def not_unauthorized_word(self):
"""Raise a ValidationError if the value is in the unauthorized word list.
"""
value = 'chipmunk'
with self.assertRaises(ValidationError):
validators.not_unauthorized_word(value)
|
[
"1387508569@mil"
] |
1387508569@mil
|
ea7bcfbac2542ccd837ba0a41ce54695564d7b42
|
74c9564073f2733eac214396c125ef990df32b36
|
/gstudio/admin/__init__.py
|
5ec479ddbcc6e40f0c5b87a2d44756c1856520d4
|
[
"BSD-3-Clause"
] |
permissive
|
suruchi/django-gstudio
|
0b6d8b8bdb29e16f431ca59f70620dd22c2843a5
|
727eeefe475bb2fd1cf819bdab496f4369d1c991
|
refs/heads/master
| 2021-01-15T20:13:23.956721
| 2011-11-09T06:30:39
| 2011-11-09T06:30:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 329
|
py
|
"""Admin of Gstudio"""
from django.contrib import admin
from gstudio.models import Objecttype
from gstudio.models import Metatype
from gstudio.admin.objecttype import ObjecttypeAdmin
from gstudio.admin.metatype import MetatypeAdmin
admin.site.register(Objecttype, ObjecttypeAdmin)
admin.site.register(Metatype, MetatypeAdmin)
|
[
"nagarjun@gnowledge.org"
] |
nagarjun@gnowledge.org
|
9abd6dd46453ed8b0436bf88eee68b31f84592c6
|
009cee89a2f3f7733053e5b1d63f87524554758c
|
/graph/core/module_path.py
|
82aa448dc1b92089ec466fd58e1c0b2737e7705b
|
[] |
no_license
|
yosu/module-graph-prototyping
|
288af0fb86581b5a7dec0f5e11ddcbf070d72ad0
|
765f97287aa346d0ca4b5588f4c907df5b8fec47
|
refs/heads/master
| 2022-12-01T16:25:52.759681
| 2020-08-17T15:59:11
| 2020-08-17T16:00:56
| 288,312,504
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 647
|
py
|
import dataclasses
@dataclasses.dataclass(frozen=True)
class ModulePath:
name: str
def __post_init__(self):
# not empty
assert self.name
@property
def path_level(self) -> int:
return len(self.name.split("."))
def __lt__(self, other: "ModulePath"):
return self.name < other.name
def belongs_to(self, other: "ModulePath") -> bool:
return self.name.startswith(other.name)
def limit_path_level(self, max_path_level: int) -> "ModulePath":
assert max_path_level > 0
new_name = ".".join(self.name.split(".")[:max_path_level])
return ModulePath(new_name)
|
[
"woodstock830@gmail.com"
] |
woodstock830@gmail.com
|
412de0e58abd888274b2b22e2be969e4cbd8de95
|
3cb0441b8dfbe794373650e6ba808222583841db
|
/input_nilai.py
|
c5d76353116a10684794c19007bb1adb69bca72c
|
[] |
no_license
|
RegaNugraha27/Latihan07
|
48b81a9f3aa7dc624ef9714917ca8aa5654e3a8f
|
bb3745ad62a9be2aed7146b78094c738d53d6a77
|
refs/heads/master
| 2020-12-23T17:27:35.752760
| 2020-01-30T14:09:13
| 2020-01-30T14:09:13
| 237,218,637
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
from Model.Daftar_Nilai import tambah_data
def inputan():
nama = input("NAMA : ")
nim = int(input("NIM : "))
tugas = int(input("TUGAS : "))
uts = int(input("UTS : "))
uas = int(input("UAS : "))
tambah_data(nama, nim, tugas, uts, uas)
def edit():
from Model.Daftar_Nilai import edit_data
edit_data(nama=input("Masukan nama untuk edit data : "))
def delete():
from Model.Daftar_Nilai import delete_data
delete_data(nama=input("Masukan nama untuk menghapus data : "))
def cari():
from Model.Daftar_Nilai import cari_data
cari_data(nama=input("Masukan nama yang di cari : "))
|
[
"noreply@github.com"
] |
RegaNugraha27.noreply@github.com
|
e56415a28830a6d542f79c0b3441f4ca549a1aa0
|
a592235c7d8e9aa7fc5cd4aeacdca3568c6ba7fc
|
/MZI/3410/Curve.py
|
0d524b42a341782e3a9a33f572d4a14d3bcf9264
|
[] |
no_license
|
BlackLIonz/BSUIR_4_1_labs
|
e7a3f695e56ccf0db328377ca443830c95699a9e
|
644c5f9a2e466e8f831885a859ab849fd5d2062d
|
refs/heads/master
| 2023-06-09T04:37:17.821259
| 2019-12-31T14:12:59
| 2019-12-31T14:12:59
| 207,292,851
| 4
| 2
| null | 2023-05-30T19:17:37
| 2019-09-09T11:25:25
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,500
|
py
|
from utils import bytes_to_long, mod_invert
class Curve:
def __init__(self, p, q, a, b, x, y):
self.p = bytes_to_long(p)
self.q = bytes_to_long(q)
self.a = bytes_to_long(a)
self.b = bytes_to_long(b)
self.x = bytes_to_long(x)
self.y = bytes_to_long(y)
r1 = self.y * self.y % self.p
r2 = ((self.x * self.x + self.a) * self.x + self.b) % self.p
if r2 < 0:
r2 += self.p
if r1 != r2:
raise ValueError("Invalid parameters")
def _pos(self, v):
if v < 0:
return v + self.p
return v
def _add(self, p1x, p1y, p2x, p2y):
if p1x == p2x and p1y == p2y:
t = ((3 * p1x * p1x + self.a) * mod_invert(2 * p1y, self.p)) % self.p
else:
tx = self._pos(p2x - p1x) % self.p
ty = self._pos(p2y - p1y) % self.p
t = (ty * mod_invert(tx, self.p)) % self.p
tx = self._pos(t * t - p1x - p2x) % self.p
ty = self._pos(t * (p1x - tx) - p1y) % self.p
return tx, ty
def scalar_multiply(self, degree, x=None, y=None):
x = x or self.x
y = y or self.y
tx = x
ty = y
degree -= 1
if degree == 0:
raise ValueError("Bad degree value")
while degree != 0:
if degree & 1 == 1:
tx, ty = self._add(tx, ty, x, y)
degree = degree >> 1
x, y = self._add(x, y, x, y)
return tx, ty
|
[
"shotaik@mail.ru"
] |
shotaik@mail.ru
|
2aa1bc8cc9ff1a014b06338fba46998daac18e01
|
621f2a1e2b78324df165fbbc950e0990f7c8447d
|
/my_env/bin/sqlformat
|
0c36484be7884b21e29876bed597a7585db09137
|
[] |
no_license
|
rsjf2020/nest4guest
|
0603f1a824e80e6b3abc734e21ae37069811f471
|
392f72bfc0e46cbad72bd2d247199fb79c643009
|
refs/heads/main
| 2023-03-19T00:26:43.986877
| 2021-03-17T07:45:40
| 2021-03-17T07:45:40
| 345,608,216
| 0
| 0
| null | 2021-03-08T09:56:48
| 2021-03-08T09:56:48
| null |
UTF-8
|
Python
| false
| false
| 263
|
#!/home/rabin/Desktop/djano/patahao/nest4guest/my_env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"rabcom07@gmail.com"
] |
rabcom07@gmail.com
|
|
424ae5f73a6e092007b8050b4151a8491bb1babb
|
95b1296ebdf322de5e1486acc2e775f712584dbe
|
/day7/day7.py
|
1f9a3851b1a8c075556681c0c3a93bca18c48359
|
[] |
no_license
|
mjarrett/adventofcode2015
|
51407b4b4a0b27501a5b337ef50e3242c53578f6
|
18210034dadb0f5999ca6690bb1d54adc1f17257
|
refs/heads/master
| 2021-06-11T11:39:59.916663
| 2017-01-18T05:43:50
| 2017-01-18T05:43:50
| 48,412,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,335
|
py
|
#!/usr/bin/python
#title :day7.py
#description :advent of code day7
#author :Mike Jarrett
#date :20151225
#version :1
#usage :python day7.py
#notes :
#python_version :2.6.6
#==============================================================================
import re
f = open('input.txt')
#f = open('test.txt')
wires = {}
connects = []
part2 = True
#this section loads all the lines into a list
for line in f:
m = re.match(r'(\w+)\s(\w*)\s?(\w*)\s?->\s(\w+)',line)
#print m.group(1), m.group(2), m.group(3), m.group(4)
#print line
#print m.group(1)
#print m.group(2)
#print m.group(3)
#print m.group(4)
if m.group(2) == '' and m.group(3) == '':
#direct assignment
try:
int(m.group(1))
connects.append(('start',m.group(1),'','',m.group(4)))
except:
connects.append(('assign',m.group(1),'','',m.group(4)))
elif m.group(2) != '' and m.group(3) == '':
#NOT xx = yy
#print line
# notswitch(m.group(2),m.group(4))
try:
int(m.group(2))
connects.append(('not',int(m.group(2)),'','',m.group(4)))
except:
connects.append(('not',m.group(2),'','',m.group(4)))
else:
# xx OR yy = zz
# comboswitch(m.group(1),m.group(2),m.group(3),m.group(4))
try:
x = int(m.group(1))
t = 'combo_int'
except:
x = m.group(1)
t = 'combo'
#try:
# y = int(m.group(3))
# t = 'combo_int'
#except:
# y = m.group(3)
# t = 'combo'
y = m.group(3)
connects.append((t,x,m.group(2),y,m.group(4)))
#print connects
#now create a key in the dict for each wire
for connect in connects:
if connect[0] != 'start': wires[connect[1]] = ''
wires[connect[4]] = ''
if connect[3] != '':
try:
int(connect[3])
except:
wires[connect[3]] = ''
# now find starting points in connects and assign wire values
for connect in connects:
if connect[0] == 'start':
wires[connect[4]] = int(connect[1])
if part2:
wires['b'] = 46065
# fxn to find wires that have been assigned and return list
def findempty():
empty_wires = []
for k, v in wires.items():
if v == '':
empty_wires.append(k)
return empty_wires
def findfilled():
filled_wires = []
for k, v in wires.items():
if v != '':
filled_wires.append(k)
return filled_wires
# fxn to find connections that are ready to be evaluated
def findnext():
todo = []
for connect in connects:
if connect[0] == 'combo' and connect[1] in findfilled() and connect[3] in findfilled() and connect[4] in findempty():
#print connect
todo.append(connect)
elif connect[0] == 'combo_int' and ( connect[1] in findfilled() or connect[3] in findfilled() ) and connect[4] in findempty():
todo.append(connect)
elif connect[1] in findfilled() and (connect[2] == 'LSHIFT' or connect[2] == 'RSHIFT') and connect[4] in findempty():
todo.append(connect)
elif connect[1] in findfilled() and connect[0] == 'not' and connect[4] in findempty():
todo.append(connect)
elif connect[0] == 'assign' and connect[1] in findfilled() and connect[4] in findempty():
todo.append(connect)
return todo
def eval(c):
#print c
if c[2] == 'AND' and c[0] == 'combo':
wires[c[4]] = wires[c[1]] & wires[c[3]]
elif c[2] == 'AND' and c[0] == 'combo_int':
wires[c[4]] = c[1] & wires[c[3]]
elif c[2] == 'OR':
wires[c[4]] = wires[c[1]] | wires[c[3]]
elif c[2] == 'LSHIFT':
wires[c[4]] = wires[c[1]] << int(c[3])
elif c[2] == 'RSHIFT':
wires[c[4]] = wires[c[1]] >> int(c[3])
elif c[0] == 'not':
wires[c[4]] = ~wires[c[1]] & 0xFFFF
elif c[0] == 'assign':
wires[c[4]] = wires[c[1]]
# while there are still connects to eval, check for good connections and evaluate
while findnext():
for c in findnext():
print c
# print wires['lx']
# print wires['a']
# print sorted(findfilled())
eval(c)
print wires['a']
#print wires
|
[
"msjarrett@gmail.com"
] |
msjarrett@gmail.com
|
9070e41bc0704a9b5f22306c46cf02951593f2ae
|
a244a8f515ed76a6a82036e22ae2541eec1e367e
|
/Parsefiles.py
|
8f70cc66926adcc84073196d3d9d05e579b25410
|
[] |
no_license
|
Rotorover/FileParser
|
f60bbf3c9f846926fa172d5bd3f18686a4214a35
|
a1ab45787240236fbea05148e98a793b6a73c1a6
|
refs/heads/main
| 2023-04-19T18:20:29.286081
| 2021-05-20T21:34:42
| 2021-05-20T21:34:42
| 317,986,139
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 698
|
py
|
# This script should Parse files and concatinate content
import os
# folder_1 = PureWindowsPath("C:\_Temp\benchmarkstt\GCP Transcribe")
file_orig = input("Enter file name: ")
file_post = (file_orig + "_post.txt")
# GCP uses 27 & -3; AWS uses 38 & -7
trimOffset_start = 27
trimOffset_end = -3
with open(file_orig, "r") as reader:
for line in reader:
if "transcript" in line:
y = line[trimOffset_start:trimOffset_end]
with open(file_post,'a') as a_writer:
a_writer.writelines(y)
# the following is just to view the results in the console, but not strictly needed
with open(file_post, "r") as reader:
print(reader.read())
|
[
"noreply@github.com"
] |
Rotorover.noreply@github.com
|
febaad2d5f1bc6fa75eadf08f50fa265fdec038b
|
c1bb75fc10eab4d79caa0e8d21a7914acff0c09b
|
/polls/admin.py
|
6310df971f0fe79556f13d08f3f2c719eddda48c
|
[] |
no_license
|
aarush7/footfall-counter
|
03a94ffa1034c6e9f3688876a6f8aae5d162943a
|
d580caca92c8f19debac92d3aa65a9bce01eea0c
|
refs/heads/main
| 2023-07-15T08:54:41.400513
| 2021-09-02T05:39:26
| 2021-09-02T05:39:26
| 402,302,603
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 184
|
py
|
from django.contrib import admin
from .models import Question, PeopleCount,Choice
admin.site.register(Question)
admin.site.register(Choice)
admin.site.register(PeopleCount)
|
[
"aarushgupta212@gmail.com"
] |
aarushgupta212@gmail.com
|
c2b6167f1d110ce7f0facffaea1445620967156f
|
5574599e3c436d8dd39c89481ff89eb601e13fac
|
/appVote/accounts/models.py
|
e4f32b948c409424d9d52148d049f2dba8364129
|
[] |
no_license
|
Iyed777/appVote
|
587285e7ab4376d9cac2910abe30b3fae5bc2440
|
7dbc611ef3dd120fa9286f9085cb2b5fd6572846
|
refs/heads/master
| 2021-01-13T05:21:15.843128
| 2017-02-20T18:13:05
| 2017-02-20T18:13:05
| 81,362,651
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
from __future__ import unicode_literals
from django.db import models
# Create your models here
class Member(models.Model):
username = models.CharField(max_length=50)
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
email = models.EmailField()
birth_date = models.DateField()
registration = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.email
|
[
"iyedsla@gmail.com"
] |
iyedsla@gmail.com
|
9e49699608907fbc09a3c28478aeb0136b8c7177
|
17cf9ad610ddc492c345e99e13d48c5be10591d6
|
/tests/test_flightReservation.py
|
1ba777d01951be1836be640697bd118c010ad6f0
|
[] |
no_license
|
angrycaptain19/pyhton-test
|
37542f36eb3ae4917c93cc655a761203578874fb
|
c8f049c753c635e3f549c0c8030ab8d6de266947
|
refs/heads/master
| 2023-03-18T09:39:35.930016
| 2021-03-04T07:42:25
| 2021-03-04T07:42:25
| 344,591,908
| 0
| 0
| null | 2021-03-05T18:13:44
| 2021-03-04T19:51:14
| null |
UTF-8
|
Python
| false
| false
| 1,280
|
py
|
import re
from pagesObjects.BasePage import BasePage
from pagesObjects.ResultPage import ResultPage
from utlilities.BaseClass import BaseClass
from datetime import datetime, timedelta
class TestFlight(BaseClass):
today_date = datetime.today()
departureDate = today_date.strftime("%d/%m/%Y")
returnDate = (today_date + timedelta(2)).strftime("%d/%m/%Y")
def test_shortFlight(self):
base_page = BasePage(self.driver)
flight_page = base_page.goToFlights()
self.selectOptionByText(flight_page.getOrigin(), "Merida")
self.selectOptionByText(flight_page.getDestination(), "La Habana")
flight_page.getDepartingDate().send_keys(self.departureDate)
flight_page.getReturninDate().send_keys(self.returnDate)
flight_page.search()
result_page = ResultPage(self.driver)
self.verifyElementClickable("sort")
self.selectOptionByValue(result_page.getSortPrices(), "desc")
prices = result_page.getFlightPrices()
pricesList = []
for price in prices:
priceText = price.text
value = re.sub('[^0-9,]', "", priceText)
pricesList.append(value)
assert pricesList == sorted(pricesList, reverse=True), "The price list is not ordered"
|
[
"ruvaz@live.com"
] |
ruvaz@live.com
|
b0e7337a7fd1023fc3c80f395a5d6deb8e260357
|
d3fee129a0a292e45c0170da2f4870f290309a23
|
/sfmutils/state_store.py
|
f591e8b674a3a73557e1282d4db259692b01a6bd
|
[
"MIT"
] |
permissive
|
ituethoslab/sfm-utils
|
e38bf8526e12d9f49252e5f65d124c38f672a8ed
|
47ac6b8a894f5b02d947d76c74aa61d59cb5d48d
|
refs/heads/master
| 2023-02-26T16:12:01.489820
| 2020-05-04T12:09:15
| 2020-05-04T12:09:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,711
|
py
|
import logging
import codecs
import os
import json
import shutil
log = logging.getLogger(__name__)
"""
A harvest state store keeps track of the state of harvesting of different types of resources.
For example, it might be used to keep track of the last tweet fetched from a user timeline.
A harvest state store should implement the signature of DictHarvestStateStore.
"""
class DictHarvestStateStore:
"""
A harvest state store implementation backed by a dictionary and not persisted.
"""
def __init__(self, verbose=True):
self.state = {}
self.verbose = verbose
def get_state(self, resource_type, key):
"""
Retrieves a state value from the harvest state store.
:param resource_type: Key for the resource that has stored state.
:param key: Key for the state that is being retrieved.
:return: Value if the state or None.
"""
if resource_type in self.state and key in self.state[resource_type]:
return self.state[resource_type][key]
else:
return None
def set_state(self, resource_type, key, value):
"""
Adds a state value to the harvest state store.
The resource type is used to separate namespaces for keys.
:param resource_type: Key for the resource that is storing state.
:param key: Key for the state that is being stored.
:param value: Value for the state that is being stored. None to delete an existing value.
"""
if self.verbose:
log.debug("Setting state for %s with key %s to %s", resource_type, key, value)
if value is not None:
if resource_type not in self.state:
self.state[resource_type] = {}
self.state[resource_type][key] = value
else:
# Clearing value
if resource_type in self.state and key in self.state[resource_type]:
# Delete key
del self.state[resource_type][key]
# If resource type is empty then delete
if not self.state[resource_type]:
del self.state[resource_type]
class JsonHarvestStateStore(DictHarvestStateStore):
"""
A harvest state store implementation backed by a dictionary and stored as JSON.
The JSON is written to <path>/state.json. It is loaded and saved on
every get and set.
"""
def __init__(self, path):
DictHarvestStateStore.__init__(self)
self.path = path
self.state_filepath = os.path.join(path, "state.json")
self.state_tmp_filepath = os.path.join(path, "state.json.tmp")
def _load_state(self):
if os.path.exists(self.state_filepath):
with codecs.open(self.state_filepath, "r") as state_file:
self.state = json.load(state_file)
def get_state(self, resource_type, key):
self._load_state()
return DictHarvestStateStore.get_state(self, resource_type, key)
def set_state(self, resource_type, key, value):
self._load_state()
DictHarvestStateStore.set_state(self, resource_type, key, value)
if not os.path.exists(self.path):
os.makedirs(self.path)
# This way if the write fails, the original file will still be in place.
with codecs.open(self.state_tmp_filepath, 'w', encoding="utf-8") as state_file:
json.dump(self.state, state_file)
shutil.move(self.state_tmp_filepath, self.state_filepath)
class NullHarvestStateStore:
"""
A harvest state store that does nothing.
"""
def __init__(self):
pass
def get_state(self, resource_type, key):
return None
def set_state(self, resource_type, key, value):
pass
class DelayedSetStateStoreAdapter:
"""
An adapter for a state store that keeps track of sets and delays
passing them on the the underlying state store.
"""
def __init__(self, state_store):
self.state_store = state_store
self.delayed_state = DictHarvestStateStore(verbose=False)
def get_state(self, resource_type, key):
return self.delayed_state.get_state(resource_type, key) or self.state_store.get_state(resource_type, key)
def set_state(self, resource_type, key, value):
self.delayed_state.set_state(resource_type, key, value)
def pass_state(self):
"""
Set the state on the underlying state store.
"""
for resource_type, key_values in self.delayed_state.state.items():
for key, value in key_values.items():
self.state_store.set_state(resource_type, key, value)
self.delayed_state = DictHarvestStateStore()
|
[
"justinlittman@gwu.edu"
] |
justinlittman@gwu.edu
|
dfab67d2bd71345b89ff952cc64c981bd1ea045b
|
d605cc55ea1326524fe1450558758184a9520fe8
|
/seeds.py
|
ae5179e7bb1d39fc4fddece161ee75309b36abe2
|
[] |
no_license
|
fbabauta/python-newsfeed
|
160ea4d3c9fe53fb0e77ab96f8f373f9ade8c3c9
|
8fa22b672af497e6e914a546b0624e476ba0fdac
|
refs/heads/main
| 2023-03-31T14:25:15.787368
| 2021-04-03T04:06:14
| 2021-04-03T04:06:14
| 353,933,992
| 0
| 0
| null | 2021-04-03T04:06:15
| 2021-04-02T06:55:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,087
|
py
|
from app.models import User
from app.db import Session, Base, engine
from app.models import User, Post, Comment, Vote
# drop and rebuild tables
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
db = Session()
# insert users
db.add_all([
User(username='alesmonde0', email='nwestnedge0@cbc.ca', password='password123'),
User(username='jwilloughway1', email='rmebes1@sogou.com', password='password123'),
User(username='iboddam2', email='cstoneman2@last.fm', password='password123'),
User(username='dstanmer3', email='ihellier3@goo.ne.jp', password='password123'),
User(username='djiri4', email='gmidgley4@weather.com', password='password123')
])
db.commit()
# insert posts
db.add_all([
Post(title='Donec posuere metus vitae ipsum', post_url='https://buzzfeed.com/in/imperdiet/et/commodo/vulputate.png', user_id=1),
Post(title='Morbi non quam nec dui luctus rutrum', post_url='https://nasa.gov/donec.json', user_id=1),
Post(title='Donec diam neque, vestibulum eget, vulputate ut, ultrices vel, augue', post_url='https://europa.eu/parturient/montes/nascetur/ridiculus/mus/etiam/vel.aspx', user_id=2),
Post(title='Nunc purus', post_url='http://desdev.cn/enim/blandit/mi.jpg', user_id=3),
Post(title='Pellentesque eget nunc', post_url='http://google.ca/nam/nulla/integer.aspx', user_id=4)
])
db.commit()
# insert comments
db.add_all([
Comment(comment_text='Nunc rhoncus dui vel sem.', user_id=1, post_id=2),
Comment(comment_text='Morbi odio odio, elementum eu, interdum eu, tincidunt in, leo. Maecenas pulvinar lobortis est.', user_id=1, post_id=3),
Comment(comment_text='Aliquam erat volutpat. In congue.', user_id=2, post_id=1),
Comment(comment_text='Quisque arcu libero, rutrum ac, lobortis vel, dapibus at, diam.', user_id=2, post_id=3),
Comment(comment_text='In hac habitasse platea dictumst.', user_id=3, post_id=3)
])
db.commit()
# insert votes
db.add_all([
Vote(user_id=1, post_id=2),
Vote(user_id=1, post_id=4),
Vote(user_id=2, post_id=4),
Vote(user_id=3, post_id=4),
Vote(user_id=4, post_id=2)
])
db.commit()
db.close()
|
[
"francine.babauta@outlook.com"
] |
francine.babauta@outlook.com
|
efb346cc440dd11fb0ee33eff6f07c4895f3b34c
|
c887db974d9ec1cc14fdd6a64191bbdfe608f6e2
|
/perfis/migrations/0002_auto_20210608_1430.py
|
458c68fa2bbb5371a770a9ae433a85ba0892bad0
|
[] |
no_license
|
DouglasFeliphe/experiencein
|
8c082fab4efd4c119aaef0e44c1f045777b8c380
|
4545b57e1f184b3026d7825ab3d1e65485a0bce0
|
refs/heads/master
| 2023-06-21T00:27:33.873759
| 2021-08-03T22:43:30
| 2021-08-03T22:43:30
| 368,657,412
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 364
|
py
|
# Generated by Django 2.2 on 2021-06-08 17:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('perfis', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='perfil',
old_name='nome_da_empresa',
new_name='nome_empresa',
),
]
|
[
"douglasfeliphe@hotmail.com"
] |
douglasfeliphe@hotmail.com
|
61855138635103deb44d02425d6ece284117d88e
|
72c3fdf70ca0fc8b6ec073f840bb70a9e52a1d10
|
/day2.py
|
ec1a81d3722c27169fb28b2fee836e5cc05db170
|
[] |
no_license
|
sereneliu/AoC-2016
|
c1df58c6ab271aa77528033a7837193f0ddddd7c
|
1bd4f11c766f27ba5dff772a536e24e843b8a7d2
|
refs/heads/master
| 2021-05-12T08:08:47.794767
| 2018-02-09T03:08:26
| 2018-02-09T03:08:26
| 117,267,315
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,334
|
py
|
with open('day2.txt') as puzzle_file:
puzzle_input = puzzle_file.read().split('\n')
example_input = [
'ULL',
'RRDDD',
'LURDL',
'UUUUD']
keypad = ['123', '456', '789']
actual_keypad = [' 1 ', ' 234 ', '56789', ' ABC ', ' D ']
def up(x, y):
return x, y - 1
def down(x, y):
return x, y + 1
def right(x, y):
return x + 1, y
def left(x, y):
return x - 1, y
directions = {
'U': up,
'D': down,
'R': right,
'L': left
}
def bathroom_code(some_input, some_keypad, start):
(x, y) = start
code = []
for line in some_input:
for i in xrange(len(line)):
direction = line[i]
if (direction == 'U' and y > 0) or (direction == 'D' and y < (len(some_keypad) - 1)) or (direction == 'R' and x < (len(some_keypad) - 1)) or (direction == 'L' and x > 0):
(a, b) = directions[direction](x, y)
if some_keypad[b][a] != ' ':
(x, y) = (a, b)
if i == len(line) - 1:
code.append(some_keypad[y][x])
return ''.join(code)
# print bathroom_code(example_input, keypad, (1, 1)) # expect: 1985
# print bathroom_code(puzzle_input, keypad, (1, 1))
# print bathroom_code(example_input, actual_keypad, (0, 2)) # expect: 5DB3
print bathroom_code(puzzle_input, actual_keypad, (0, 2))
|
[
"cloud9@sentanother.email"
] |
cloud9@sentanother.email
|
ebe87a45c4436ed13085db6a8f31edb73febc833
|
6906c88458f6798a71515ea31187b2ccece8c211
|
/codekata/sum of two numbers odd or even.py
|
bcbf7002e20951df8a9a83421a3484d7cea40714
|
[] |
no_license
|
vsdhanya/guvi
|
369ed2ded9811cd56af4fd457d0c1f6582bc14b9
|
b62b217db7bfe81e34a2d70cd8c044323e0174fd
|
refs/heads/master
| 2020-06-03T00:10:19.379834
| 2019-07-27T16:53:19
| 2019-07-27T16:53:19
| 191,355,080
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 103
|
py
|
numb1,numb2=map(int,input().split())
ans=numb1+numb2
if ans%2==0:
print("even")
else:
print("odd")
|
[
"noreply@github.com"
] |
vsdhanya.noreply@github.com
|
c1cbd0ef8f401374648df34ef3ae3fdcb2080dac
|
a6d3936eac053b55c600d961ce49c6537e9b1d9f
|
/Python/Simulaciones/Liguilla/jornadas.py
|
5cfcc78f99803aae41eb94fee8f2819cb49190d6
|
[] |
no_license
|
dcharua/code
|
233e382ca5c37983565ca6cb134fb5be18b1af82
|
781fefb8e87334ec3b060c2dcfa9cf142feea0e7
|
refs/heads/main
| 2021-06-23T20:50:35.493288
| 2020-12-17T20:16:42
| 2020-12-17T20:16:42
| 146,336,132
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,119
|
py
|
jornada8 = [["Tiburones", "Cruz Azul"],
["Atlas", "Tigres"],
["Queretaro", "Monarcas"],
["America","Lobos"],
["Monterrey", "Puebla"],
["Necaxa","Xolos"],
["Pachuca","Chivas"],
["Pumas","Leon"],
["Santos", "Toluca"]]
jornada9 = [["Monarcas","America"],
["Xolos", "Atlas"],
["Cruz Azul", "Necaxa"],
["Leon","Santos"],
["Tigres","Pachuca"],
["Chivas","Monterrey"],
["Toluca","Tiburones"],
["Lobos","Pumas"],
["Puebla","Queretaro"]]
jornada10 = [["Tiburones","Santos"],
["Atlas","Cruz Azul"],
["Pachuca","Xolos"],
["America","Puebla"],
["Monterrey","Tigres"],
["Necaxa","Toluca"],
["Queretaro","Chivas"],
["Pumas","Monarcas"],
["Lobos","Leon"]]
jornada11 = [["Monarcas","Lobos"],
["Puebla", "Pumas"],
["Cruz Azul","Pachuca"],
["Chivas","America"],
["Tigres","Queretaro"],
["Xolos","Monterrey"],
["Toluca","Atlas"],
["Santos", "Necaxa"],
["Leon", "Tiburones"]]
jornada12 = [["Monarcas","Leon"],
["Atlas", "Santos"],
["Queretaro","Xolos"],
["Pachuca","Toluca"],
["America","Tigres"],
["Monterrey", "Cruz Azul"],
["Necaxa","Tiburones"],
["Pumas","Chivas"],
["Lobos", "Puebla"]]
jornada13 = [["Tiburones","Atlas"],
["Puebla","Monarcas"],
["Cruz Azul","Queretaro"],
["Tigres","Pumas"],
["Leon","Necaxa"],
["Chivas","Lobos"],
["Xolos","America"],
["Toluca","Monterrey"],
["Santos","Pachuca"]]
jornada14 = [["Puebla","Leon"],
["Atlas", "Necaxa"],
["Queretaro","Toluca"],
["Pachuca","Tiburones"],
["Monterrey","Santos"],
["Monarcas", "Chivas"],
["Pumas","Xolos"],
["Lobos","Tigres"],
["America","Cruz Azul"]]
jornada15 = [["Tiburones","Monterrey"],
["Xolos","Lobos"],
["Necaxa","Pachuca"],
["Leon","Atlas"],
["Cruz Azul","Pumas"],
["Tigres","Monarcas"],
["Chivas","Puebla"],
["Toluca","America"],
["Santos","Queretaro"]]
jornada16 = [["Puebla","Tigres"],
["Monarcas","Xolos"],
["Pachuca","Atlas"],
["America","Santos"],
["Monterrey","Necaxa"],
["Chivas","Leon"],
["Pumas","Toluca"],
["Lobos","Cruz Azul"],
["Queretaro","Tiburones"]]
jornada17 = [["Atlas","Monterrey"],
["Cruz Azul","Monarcas"],
["Leon","Pachuca"],
["Tigres","Chivas"],
["Tiburones","America"],
["Necaxa","Queretaro"],
["Toluca","Lobos"],
["Xolos","Puebla"],
["Santos","Pumas"]]
|
[
"danielcharua@hotmail.com"
] |
danielcharua@hotmail.com
|
7387efb8cc731d9261f4170557712bf510d1ac48
|
8a23722c98688f768e3a2749142b14b0e700eeb1
|
/social/social.py
|
758cb6bbeff882aeccef33cbf94b6fc5551aebb8
|
[
"MIT"
] |
permissive
|
leagueofstuds/los-cogs
|
7bf2aaef03de190bed6a0dc872d7f04b2af28d17
|
15720fced75a1c3dc96800592b86e3e8df98cd0d
|
refs/heads/master
| 2021-01-17T19:51:58.349423
| 2016-06-06T05:50:33
| 2016-06-06T05:50:33
| 60,491,272
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,220
|
py
|
import os
import asyncio
from random import randint, sample
import discord
from discord.ext import commands
class Social:
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
async def kiss(self, context, user: discord.Member):
""" kiss anyone """
msg = '{0} Was KISSED by {1}! :kiss:'.format(user.mention, context.message.author.mention)
folder = "kiss"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def taunt(self, context, user: discord.Member):
""" taunt anyone """
msg = '{0} Was TAUNTED by {1}! :kiss:'.format(user.mention, context.message.author.mention)
folder = "taunt"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def gank(self, context, user: discord.Member):
""" gank anyone """
msg = '{0} Was Ganked by {1}! :kiss:'.format(user.mention, context.message.author.mention)
folder = "gank"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def sit(self, context, user: discord.Member):
""" sit on anyone face"""
msg = '{1}! Sits on {0} face :smiling_imp: '.format(user.mention, context.message.author.mention)
folder = "sit"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def tip(self, context, user: discord.Member):
""" make it rain on anyone """
msg = '{1}! Makes it rain on {0} :money_mouth: :money_with_wings: '.format(user.mention, context.message.author.mention)
folder = "tips"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def shoot(self, context, user: discord.Member):
""" shoot anyone """
msg = '{0} Was shot dead by {1}! :skull: :gun: '.format(user.mention, context.message.author.mention)
folder = "shoot"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def snatch(self, context, user: discord.Member):
""" snatch anyone wig"""
msg = '{0} Wig has been snatched by {1}! r.i.p :scream: '.format(user.mention, context.message.author.mention)
folder = "snatched"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def cuddle(self, context, user: discord.Member):
""" cuddle with anyone """
msg = '{1}! Cuddles {0} so hard! '.format(user.mention, context.message.author.mention)
folder = "cuddle"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def spell(self, context, user: discord.Member):
""" casts a spell on anyone """
msg = '{1}! Casts a spell on {0} ! :dizzy: :comet: '.format(user.mention, context.message.author.mention)
folder = "spell"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def hugs(self, context, user: discord.Member):
""" hugs anyone """
msg = '{1}! Gives {0} a big hug! :hugging: '.format(user.mention, context.message.author.mention)
folder = "hug"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def truth(self, context, user: discord.Member):
""" truth questions """
msg = '{1}! Challenges {0} to tell the truth! '.format(user.mention, context.message.author.mention)
folder = "truth"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def dare(self, context, user: discord.Member):
""" dare questions """
msg = '{1}! Challenges {0} to a dare! '.format(user.mention, context.message.author.mention)
folder = "dare"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def feed(self, context, user: discord.Member):
""" feed anyone """
msg = '{1}! Feeds {0}! :yum: '.format(user.mention, context.message.author.mention)
folder = "feeds"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def drag(self, context, user: discord.Member):
""" drag race persona of a friend """
msg = '{1}! Reveals {0}! true inner drag persona! :princess: '.format(user.mention, context.message.author.mention)
folder = "drag"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def future(self, context, user: discord.Member):
""" check some ones future """
msg = '{1}! Takes a glance at what {0}! will become in the future! :scream: '.format(user.mention, context.message.author.mention)
folder = "future"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def shade(self, context, user: discord.Member):
""" throw some serious shade """
msg = 'It\'s cold in the shade. Isn\'t it {mentioned_user}?'.format(
mentioned_user = user.mention)
folder = "shade"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def adore(self, context, *gif):
""" summon adore (e.g. die, drag, ew, fuck, gasp, idgaf, overit, party, tongue) """
adores = ("die", "drag", "ew", "fuck", "gasp", "idgaf", "overit", "party", "tongue")
if gif:
gif = gif.lower()
if gif in adores:
return await self.bot.upload("data/gifs/adore/{0}.gif".format(gif))
await self.upload_random_gif(None, "adore")
@commands.command()
async def rr(self):
""" russian roulette... good luck! """
await self.bot.say('You spin the cylinder of the revolver with 1 bullet in it...')
await asyncio.sleep(1)
await self.bot.say('...you place the muzzle against your head and pull the trigger...')
await asyncio.sleep(2)
if randint(1, 6) == 1:
await self.bot.say('...your brain gets splattered all over the wall.')
else:
await self.bot.say('...you live to see another day.')
async def upload_random_gif(self, msg, folder):
if msg:
await self.bot.say(msg)
folderPath = "data/gifs/" + folder
fileList = os.listdir(folderPath)
gifPath = folderPath + "/" + fileList[randint(0, len(fileList) - 1)]
await self.bot.upload(gifPath)
def setup(bot):
bot.add_cog(Social(bot))
|
[
"William.Humphreys-Cloutier@logmein.com"
] |
William.Humphreys-Cloutier@logmein.com
|
ead37457e9d1ae26a9ddb0c5034ae98ed4005d49
|
69d075f29db8c58df1133eee6af9e5ec40d1ed0b
|
/paten/utils.py
|
f180cae01d042baa6841c6adc98bfe4ce54be30e
|
[] |
no_license
|
gsy0911/paten
|
d985ee27bf7b96a32519a6bc88450a5ba651eb01
|
c121b2558dd523535f44e960b2a92171cc1856a8
|
refs/heads/master
| 2022-12-10T14:58:27.677139
| 2020-09-08T14:10:33
| 2020-09-08T14:10:33
| 268,189,768
| 0
| 0
| null | 2020-09-08T14:02:07
| 2020-05-31T01:37:54
|
Python
|
UTF-8
|
Python
| false
| false
| 536
|
py
|
import re
def validate_function_app_name(function_app_name: str) -> bool:
"""
Args:
function_app_name:
Returns:
True is function_app_name is correct.
See Also:
https://docs.microsoft.com/ja-jp/azure/azure-functions/functions-create-first-azure-function#create-a-function-app
"""
function_app_name_accept_pattern = r"^[A-Za-z0-9][A-Za-z0-9\-]+[A-Za-z0-9]$"
result = re.match(function_app_name_accept_pattern, function_app_name)
if result:
return True
return False
|
[
"yoshiki0911@gmail.com"
] |
yoshiki0911@gmail.com
|
aa26db8811a44d35425a83b952ee3b5791cf8503
|
3e85167fa5d0277d014a19e16c3864677f53b7c4
|
/ArticleBlog/Article/urls.py
|
e1ce523680b5ebe4b179d7b68e508f1bdb0c2291
|
[] |
no_license
|
ltt1997/Djingo1118
|
af869b891e8f3d2d32a9b49124fa9769c11f30b5
|
5111efb63e56d277109d6431e8f327689517d788
|
refs/heads/master
| 2022-12-11T02:20:27.234561
| 2020-03-06T13:51:40
| 2020-03-06T13:51:40
| 239,662,926
| 0
| 0
| null | 2022-12-06T20:46:16
| 2020-02-11T02:56:49
|
CSS
|
UTF-8
|
Python
| false
| false
| 370
|
py
|
from django.urls import path,re_path
from .views import *
urlpatterns = [
path('index/',index),
path('about/',about),
path('listpic/',listpic),
re_path('newslistpic/(?P<page>\d+)/',newslistpic),
path('addmany/',addmany),
path('ft_text/',ft_text),
re_path('articleinfo/(?P<id>\d*)/',articleinfo),
# path('articleinfo/',articleinfo),
]
|
[
"2231683215@qq.com"
] |
2231683215@qq.com
|
997e2e9eea1be9351075548e571bdd63d66f2003
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_189/ch84_2019_06_06_12_13_25_240848.py
|
5f8c23a29613b7e484e849d377bd558705a2a600
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
py
|
def inverte_dicionario():
dict.keys()=dk
dict.values()=dv
dkey=[]
dvalue=[]
for a in dk:
dvalue.append(a)
for b in dv:
dkey.append(b)
z=dict
dkey=dict.keys(z)
dvalue=dict.values(z)
return (z)
|
[
"you@example.com"
] |
you@example.com
|
70cf5301d23b014507310d0fc82cd2a371d18621
|
f2b4bfeeaecc5fcc919923a94fc7fd613f07b834
|
/my_geek_shop/authapp/migrations/0004_auto_20210902_1337.py
|
7de7d346107096b145c478e4a41c6d3a96a9cf8c
|
[] |
no_license
|
johnk2280/GB_course_Django_Framework_basics
|
54770391739b2523d3813c7d303d38eca0c03ab6
|
dfb715a51337704813fc56111f6263a6d29b902c
|
refs/heads/main
| 2023-09-01T09:00:19.626983
| 2021-09-26T16:33:52
| 2021-09-26T16:33:52
| 385,336,147
| 0
| 0
| null | 2021-09-26T09:57:27
| 2021-07-12T17:52:49
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 708
|
py
|
# Generated by Django 3.2.5 on 2021-09-02 13:37
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('authapp', '0003_auto_20210901_1603'),
]
operations = [
migrations.AlterField(
model_name='shopuser',
name='activation_key_expires',
field=models.DateTimeField(default=datetime.datetime(2021, 9, 4, 13, 37, 2, 972464, tzinfo=utc)),
),
migrations.AlterField(
model_name='shopuser',
name='age',
field=models.PositiveIntegerField(default=18, verbose_name='возраст'),
),
]
|
[
"johnk2280@gmail.com"
] |
johnk2280@gmail.com
|
28c7d91bddab1ef94b1d0f73ff466fbb9c000e05
|
9ff4e9d9d988f2d44f4bc9cb386e5d9e8a666696
|
/recursion/hanoi.py
|
7ebbdab592b5350f82fcc5968c5e0b55023c084e
|
[] |
no_license
|
ashengtx/algorithms
|
26dd297cbb85149f48b568621dad50685aff0195
|
5231bf11de8b8b9cdd6f8ed765f4c7681a3badd4
|
refs/heads/master
| 2021-01-01T18:45:21.092083
| 2017-08-23T11:31:22
| 2017-08-23T11:31:22
| 98,427,244
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 680
|
py
|
def move_tower(height, from_pole, with_pole, to_pole):
if height >= 1:
move_tower(height-1, from_pole, to_pole, with_pole)
move_disk(from_pole, to_pole)
move_tower(height-1, with_pole, from_pole, to_pole)
def moveTower(height,fromPole, toPole, withPole):
if height >= 1:
moveTower(height-1,fromPole,withPole,toPole)
move_disk(fromPole,toPole)
moveTower(height-1,withPole,toPole,fromPole)
def move_disk(from_pole, to_pole):
print("move disk from %s to % s"%(from_pole, to_pole))
def main():
move_tower(5, 'A', 'B', 'C')
print("====")
moveTower(5, 'A', 'C', 'B')
if __name__ == '__main__':
main()
|
[
"linliansheng007@163.com"
] |
linliansheng007@163.com
|
7bf0be59131563d40c328fa49f0dc064b87bf83c
|
8e37c0901b4b1295ef45a03882730a53523de50b
|
/lambdas/app/test/choice_writer/__init__.py
|
e6cf93ceadc3600f553b91f150f98b82ff00c8f8
|
[] |
no_license
|
sjawhar/enchanted-brain
|
086a8745939910598cc18b5cefd725ef78b872d9
|
c78e1457b28df289c78dc404afaefa746a095b65
|
refs/heads/master
| 2023-01-12T15:56:02.370278
| 2020-07-19T16:26:55
| 2020-07-19T16:26:55
| 184,348,628
| 1
| 1
| null | 2023-01-05T03:26:09
| 2019-05-01T00:16:38
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 73
|
py
|
import os
os.environ["DYNAMODB_TABLE_NAME"] = "testing-enchanted-brain"
|
[
"jackson.borchardt@gmail.com"
] |
jackson.borchardt@gmail.com
|
8187cf5ca9e3e21b1645f1c7b4d10858e8355506
|
6a66fffb719e8338621bf00e27e72a2e40050790
|
/MouvementsFouleSegerTamimMartinDedieu/environnement.py
|
df016bdc6d3af162c2ee9c7e548e2711d606fde1
|
[] |
no_license
|
dedieuvalerie/diu-eil-uvsq-1
|
cdc304630336ba8e854bba8d5616b12fef6c333b
|
95714cb8725cb6a85c7e56f95eb64289a0b6f095
|
refs/heads/master
| 2020-06-03T00:44:51.861427
| 2019-06-27T17:18:16
| 2019-06-27T17:18:16
| 191,365,449
| 0
| 0
| null | 2019-06-11T12:17:47
| 2019-06-11T12:17:47
| null |
UTF-8
|
Python
| false
| false
| 6,505
|
py
|
#import grille
from math import *
import random
class Voyageur:
"""
Représente les éléments mobiles qui se déplacent dans la grille donnée à la
construction.
L'attribut couleur peut servir à l'affichage.
L'attribut trajet consiste en un liste de portes par lesquelles doit passer
le voyageur. Lorsque cette liste a été intégralement parcourue,
le voyageur est arrivé et peut disparaître de la grille.
Afin de simuler des vitesses (statistiquement) différentes pour les
voyageurs, un attribut probaDep contient une probabilité de
déplacement (tirée au hasard lors de la construction ou spécifiée lors
de celle-ci). Ainsi, lorsque la grille ordonne au voyageur de se déplacer,
le voyageur ne se déplace pas systématiquement.
Le voyageur possède aussi un compteur de pas qui compte tous les pas
effectués.
Le compteur temps comptabilise lui toutes les trames où on l'a solicité pour
tenter un mouvement.
lors de la simulation, un autre compteur de pas de type 'watch dog'
permet d'enclencher, comme le ferait un vrai voyageur, une procédure de
'perte' qui tente de la ramener à la dernière porte qui a
été vue. Il convient ainsi, bien que ce ne soit pas imposé dans la classe
de positionner une porte à l'endroit exact où le voyageur est déposé dans
la grille (pour qu'il connaisse son point de départ).
Dans le déplacement d'un voyageur, les portes intermédiaires jouent un peu
le rôle des panneau indicateurs placés dans la salle et qui indiquent au
voyageur des buts intermédiaires. Ce serait plus exactement l'équivalent
du marquage au plafond de zones qui guident le voyageur d'étapes en étapes
jusqu'à l'objectif final.
Ces portes intermédiaires semblent indispensables si on ne souhaite pas
(comme c'est indiqué dans les consignes) que le voyageur connaisse
tout son environnement et si, comme cela est souhaitable dans la réalité,
que le voyageur se lance dans une exploration plus ou moins aléatoire
d'un labyrinthe. Les portes du trajet doivent être placées de telle façon
qu'à chaque moment de son déplacement, le voyageur ait 'en visuel' son
prochain but. En fait, le placement des panneaux indicateurs fait partie
intégrante de la conception d'une salle et le programme doit permettre de
tester cela.
"""
def __init__(self, couleur, trajet, grille, probaDep = None):
self.couleur = couleur
self.trajet = trajet
self.indiceTrajet = 0
self.grille = grille
if probaDep == None:
### ATTENTION ###
# la valeur de sigma joue enormement sur la fluidité
self.probaDep = random.gauss(0.8, 0.1)
else:
self.probaDep = probaDep
self.podometre = 0
self.temps = 0
self.WATCH_DOG_VAL = 100
self.watchDog = self.WATCH_DOG_VAL
self.perdu = False
def _scruteHorizon(self,position):
"""
determine le contenu des cases autour du voyageur (soit un autre
voyageur, soit un obstacle, soit une porte) ;
en entree : les coordonnees du voyageur (tuple d'entiers) ; en sortie :
la liste des coordonnees des cases accessibles (tuples d'entiers)
et avec leur contenu (porte ou vide)
"""
(x_voy, y_voy) = position
horizonRel = [(x, y) for x in [-1, 0, 1] for y in [-1, 0, 1]
if x != 0 or y != 0]
posHorizon = [(x_voy + dep[0], y_voy + dep[1]) for dep in horizonRel]
contHorizon = [(pos, self.grille.getContenuCase(pos)) for
pos in posHorizon]
def estLibre(cont):
return cont[1][0] == None and cont[1][1] == None
cases = [cont for cont in contHorizon if estLibre(cont)]
random.shuffle(cases)
return cases
def seDeplacer(self):
"""
Deplacement d'un voyageur.
Le voyageur commence par observer les cases alentour et etablit ainsi
une liste de possibilites.
Si une des cases accessibles est une des etapes de son trajet, il y va.
Sinon, on choisit celle qui le devie le moins possible de son trajet
(produit scalaire de deux vecteurs)
"""
def initWatchDog():
self.watchDog = (self.WATCH_DOG_VAL if not self.perdu else
self.WATCH_DOG_VAL // 5)
self.temps += 1
maposition = self.grille.getPosition(self)
casesPossibles = self._scruteHorizon(maposition)
assert(self.indiceTrajet >= 0)
if len(self.trajet) <= self.indiceTrajet: # arrivé
self.grille.deleteVoyageur(self)
elif (self.trajet[self.indiceTrajet] ==
self.grille.getContenuCase(maposition)[2]): #porte
self.perdu = False
initWatchDog()
self.indiceTrajet += 1
self.seDeplacer()
elif self.watchDog == 0:
self.perdu = not self.perdu
self.indiceTrajet += -1 if self.perdu else +1
initWatchDog()
self.seDeplacer()
elif casesPossibles != [] and random.random() < self.probaDep:
self.watchDog -= 1
maxi = -1
dirP = self.grille.getDirection(
self, self.trajet[self.indiceTrajet])
normDir = sqrt(dirP[0] * dirP[0] + dirP[1] * dirP[1])
for CASE in casesPossibles:
posCase = CASE[0]
vDep = (posCase[0] - maposition[0], posCase[1] - maposition[1])
normVDep = sqrt(vDep[0] * vDep[0] + vDep[1] * vDep[1])
dotP = dirP[0]*vDep[0] + dirP[1]*vDep[1]
cosinus = dotP / (normDir * normVDep)
if cosinus >= maxi :
maxi = cosinus
new_pos = posCase
if maxi > 0:
self.grille.setPosition(self, new_pos)
self.podometre += 1
else:
self.watchDog -= 1
class Obstacle:
def __init__(self, couleur, grille):
self.couleur = couleur
self.grille = grille
class Porte:
def __init__(self, couleur, grille):
self.couleur = couleur
self.grille = grille
|
[
"valerie.dedieu@gmail.com"
] |
valerie.dedieu@gmail.com
|
8a77c9aa0f53c8d7fe001ea13772da74858bd1bd
|
d62e9450fbfbf646e0bd9207b26e6c980628f11c
|
/experimental/test_module.py
|
7a5fd2ce0f9807e8189b28073d3847b0367929e3
|
[] |
no_license
|
Barnettxxf/scrapy_pytest
|
e81ac050f437f7d5de3c891156f663ed9b35fa3e
|
bf913655741a32233edb2ed9176b3c9f24ac59bc
|
refs/heads/master
| 2020-06-03T20:42:51.938470
| 2019-06-29T07:23:42
| 2019-06-29T07:23:42
| 191,723,747
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,167
|
py
|
# contents of test_module.py with source code and the test
from pathlib import Path
class Exam:
def __init__(self):
self.exam = 'Math'
def examize(self, extra):
return self.exam + extra
class E(Exam):
def examize(self, extra):
self.exam = 'TestMath'
return super().examize(extra)
def getssh():
"""Simple function to return expanded homedir ssh path."""
return Path.home() / ".ssh"
def test_getssh(monkeypatch):
# mocked return function to replace Path.home
# always return '/abc'
def mockreturn():
return Path("/abc")
# Application of the monkeypatch to replace Path.home
# with the behavior of mockreturn defined above.
monkeypatch.setattr(Path, "home", mockreturn)
# Calling getssh() will use mockreturn in place of Path.home
# for this test with the monkeypatch.
x = getssh()
assert x == Path("/abc/.ssh")
def test_exam(monkeypatch):
def mockreturn(self, extra):
return 'Test' + 'extra'
exam = E()
monkeypatch.setattr(Exam, "examize", mockreturn)
assert exam.examize('extra') == 'Testextra'
assert exam.exam == 'TestMath'
|
[
"15102096586@163.com"
] |
15102096586@163.com
|
076db1487e1ff2cca84679348ac0b202151ba937
|
afcf26bbd84161f2775b879a68b2c163878984d9
|
/detaileddifferences/detaileddifferences.py
|
f7c4b6af3a1a8b50588fd719225475252d108a59
|
[] |
no_license
|
jakobkhansen/KattisSolutions
|
505a0657fa02a5156c853fc0a6566dd51591d36d
|
2869d6c9027515fd41eac6fcaee281aa474810c4
|
refs/heads/master
| 2023-07-07T19:17:37.614836
| 2023-06-28T18:12:50
| 2023-06-28T18:12:50
| 191,001,396
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 556
|
py
|
import sys
def detaileddifferences(lines):
retStr = []
lines = lines[1:]
for i in range(0, len(lines), 2):
word1 = lines[i]
word2 = lines[i+1]
result = "{}\n{}\n".format(word1, word2)
for j in range(len(word1)):
if word1[j] == word2[j]:
result += "."
else:
result += "*"
retStr.append(result + "\n")
return "\n".join(retStr).strip()
def main():
lines = [line.strip() for line in sys.stdin]
print(detaileddifferences(lines))
main()
|
[
"jakob.hansen@hotmail.no"
] |
jakob.hansen@hotmail.no
|
dc6f4bfaca71c166477d3a58a9ce15d6366b86b5
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/artificial/transf_Integration/trend_Lag1Trend/cycle_30/ar_12/test_artificial_128_Integration_Lag1Trend_30_12_100.py
|
a9cbdbe704f76b2f21a5f6800925bfd1a10fd27f
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219
| 2023-07-28T20:15:53
| 2023-07-28T20:15:53
| 70,790,978
| 457
| 77
|
BSD-3-Clause
| 2023-03-08T21:45:40
| 2016-10-13T09:30:30
|
Python
|
UTF-8
|
Python
| false
| false
| 269
|
py
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 30, transform = "Integration", sigma = 0.0, exog_count = 100, ar_order = 12);
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
ab2691b76f501c4a62309bdc68b3d68ff527f83a
|
bcc3d1901bcf9b0c72b654a2be833c77c1db4066
|
/smoke_tests.py
|
022ca4653b215d2326ffb0f94dfb7a239fe88642
|
[] |
no_license
|
lasdolphin/moistatuscheck
|
f588f27358b11123ae3f4a3507af98051f8eca18
|
7399f53523e2ed21abdfad800f4953a9bbbf0919
|
refs/heads/master
| 2020-05-04T17:01:50.039203
| 2019-05-13T10:35:22
| 2019-05-13T10:35:22
| 179,295,696
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,029
|
py
|
import boto3
import json
import unittest
region = 'eu-central-1'
payload1 = """{
"dry": 1,
"update_id": 4,
"message": {
"message_id": 261,
"from": {
"id": 427355455,
"is_bot": "True",
"first_name": "qwe",
"last_name": "qwe",
"language_code": "en"
},
"chat": {
"id": 61560729,
"first_name": "qwe",
"last_name": "qwe",
"type": "private"
},
"date": 1556618119,
"text": "OAM-47516/ZM-2018"
}
}"""
payload2 ="""{
"dry": 1,
"update_id": 5,
"message": {
"message_id": 261,
"from": {
"id": 427355455,
"is_bot": "True",
"first_name": "qwe",
"last_name": "qwe",
"language_code": "en"
},
"chat": {
"id": 61560729,
"first_name": "qwe",
"last_name": "qwe",
"type": "private"
},
"date": 1556618119,
"text": "OAM-47516-2/ZM-2018"
}
}"""
payload3 ="""{
"dry": 1,
"update_id": 6,
"message": {
"message_id": 261,
"from": {
"id": 427355455,
"is_bot": "True",
"first_name": "qwe",
"last_name": "qwe",
"language_code": "en"
},
"chat": {
"id": 61560729,
"first_name": "qwe",
"last_name": "qwe",
"type": "private"
},
"date": 1556618119,
"text": "OAM-12906/ZM-2018"
}
}"""
def invoke_lambda(payload, region):
client = boto3.client('lambda', region_name = region)
response = client.invoke(
FunctionName="MOIStatusCheck",
InvocationType='RequestResponse',
Payload=payload
)
return json.loads(response['Payload'].read())
class TestLambda(unittest.TestCase):
def test_found(self):
self.assertIn('found in MOI', invoke_lambda(payload1, region))
def test_wrong_format(self):
self.assertIn('Format seems to be incorrect', invoke_lambda(payload2, region))
def test_bot_found(self):
self.assertIn('was not found in file from', invoke_lambda(payload3, region))
if __name__ == '__main__':
unittest.main()
|
[
"lasdolphin@gmail.com"
] |
lasdolphin@gmail.com
|
3785f958355754fc84cc65177e26c696dd4e9cea
|
742dd0d99099c8ef38eb23e4b4cf763226571e72
|
/udfs.py
|
937118d59d16a344281f7815a901129bb58ebe9c
|
[] |
no_license
|
vishwanath79/PigDistributer
|
68f4b3ef780a02228c73f70868526529e32699d6
|
c61026a5dc1567dfab4429a48ed1bfab1ad32a37
|
refs/heads/master
| 2016-08-05T15:31:43.756023
| 2014-03-18T01:08:47
| 2014-03-18T01:08:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 96
|
py
|
@outputSchema("values:bag{t:tuple(key, value)}")
def bag_of_tuples(map):
return map.items()
|
[
"ingvay7@aol.com"
] |
ingvay7@aol.com
|
3245933ea186704baae813d647a605e633ac67cf
|
6500149881382163c430ef2922667633c7f59e81
|
/Object Detection/real_time_face_detection.py
|
6da85414441146b69fcfca01d90c6c75dc52fb0f
|
[] |
no_license
|
V-Run-P/Computer-Vision
|
37604bea0c5e089af9158ab2ecf0495750d81f7d
|
67e59525bb062652bb459a4969c3188e22895ba6
|
refs/heads/main
| 2023-06-11T16:02:02.932715
| 2021-07-09T12:03:56
| 2021-07-09T12:03:56
| 319,770,033
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 724
|
py
|
import cv2
import sys
import logging as log
import datetime as dt
from time import sleep
cascPath = "haarcascade_frontalface_alt2.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
video_capture = cv2.VideoCapture(0)
while True:
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.imshow('Video', frame)
video_capture.release()
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
V-Run-P.noreply@github.com
|
4f9c88e2ddbd1acfeb620138840ae3a4972261a0
|
77b998e0e4237ac74649948db3f7729c9740ad4b
|
/Day12/day12_2.py
|
cd5d343e0277ab5e52dd046fb9acc76b06b77b08
|
[] |
no_license
|
sboyett31/AdventOfCode2020
|
94bd64f01bacc55e20fe55d864d0223c280cb141
|
a49aceca123473cb40325494a7f0dc269b7ff2a3
|
refs/heads/master
| 2023-02-15T14:10:58.727243
| 2021-01-11T23:42:10
| 2021-01-11T23:42:10
| 325,432,049
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,405
|
py
|
#######################
# Stephen Boyett
# Advent of Code
# Day 12, Part 2
# 1/3/2021
########################
class ship:
def __init__(self):
self.x = 0
self.y = 0
self.wpy = 1
self.wpx = 10
self.dir = 90
self.wp_quad = 0
def autopilot(self, instr_set):
for i in instr_set:
print(f"Instruction is: {i}")
if "F" in i:
self.x += self.wpx*int(i[1:])
self.y += self.wpy*int(i[1:])
elif "R" in i or "L" in i:
self.rotate_waypoint(i)
else:
self.move_waypoint(i)
print(f"self.waypoint = ({self.wpx}, {self.wpy})")
def rotate_waypoint(self, instr):
rotations = int(float(instr[1:])/360/.25)
self.wp_quad = (self.wp_quad + rotations) % 3
if "R" in instr:
for _ in range(rotations):
tx = self.wpx
self.wpx = self.wpy
self.wpy = -tx
elif "L" in instr:
for _ in range(rotations):
tx = self.wpx
self.wpx = -self.wpy
self.wpy = tx
def move_waypoint(self, instr):
if instr[:1] == "N":
self.wpy += int(instr[1:])
elif instr[:1] == "E":
self.wpx += int(instr[1:])
elif instr[:1] == "W":
self.wpx -= int(instr[1:])
elif instr[:1] == "S":
self.wpy -= int(instr[1:])
def main():
with open("day12_input", "r") as f:
instr = f.read().splitlines()
boat = ship()
boat.autopilot(instr)
print((boat.x, boat.y))
print(f"Manhattan distance is {abs(boat.x)+abs(boat.y)}")
if __name__=="__main__":
main()
|
[
"stephen.boyett@adtran.com"
] |
stephen.boyett@adtran.com
|
b7c1f2d82dc1dd5d62f4507f54d9b21e76f4b8a4
|
fc974aaa610ee31d2ed06ef138d89522faae0b76
|
/pydata/python_repos.py
|
2cd4694bd87f16a00b18e0cfc2b990c9fe83d461
|
[] |
no_license
|
wangxinghust/pyhello
|
9c2a3f31a0d05c1ca06cb60e6b4175b78e4766e4
|
95c81499c94b4118c8ef34cfa5866953c224d5ee
|
refs/heads/master
| 2020-08-14T17:31:15.980140
| 2019-10-25T05:23:03
| 2019-10-25T05:23:03
| 215,208,031
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,342
|
py
|
"""
使用API
"""
import requests
import pygal
from pygal.style import LightColorizedStyle as LCS, LightenStyle as LS
# 执行API调用并存储响应
url = 'https://api.github.com/search/repositories?q=language:python&sort=stars'
r = requests.get(url)
print("Status code: ", r.status_code)
# 将API响应存储在一个变量中
response_dict = r.json()
# 处理结果
# print(response_dict.keys())
print("Total repositories:"+str(response_dict['total_count']))
# 探索有关仓库的信息
repo_dicts = response_dict['items']
print("Repositories returned:", len(repo_dicts))
# 研究第一个仓库
#repo_dict = repo_dicts[0]
#print("\tKeys:", len(repo_dict))
# for key in sorted(repo_dict.keys()):
# print(key)
#print("\nSelected information about first repository:")
#print('Name:', repo_dict['name'])
#print('Owner:', repo_dict['owner']['login'])
#print('Stars:', repo_dict['stargazers_count'])
#print('Repositoty:', repo_dict['html_url'])
#print('Created:', repo_dict['created_at'])
#print('Updated:', repo_dict['updated_at'])
#print('Descriprion:', repo_dict['description'])
# print("-----------")
#print("Selected information about each repository:")
# for repo_dict in repo_dicts:
# print('\nName', repo_dict['name'])
# print('Owner', repo_dict['owner']['login'])
# print('Stars', repo_dict['stargazers_count'])
# print('Repository', repo_dict['html_url'])
# print('Description', repo_dict['description'])
names, plot_dicts = [], []
for repo_dict in repo_dicts:
names.append(repo_dict['name'])
plot_dict = {
'value': repo_dict['stargazers_count'],
'label': str(repo_dict['description']), # 遇到了解码问题,重新再转为str可解决问题
'xlink': repo_dict['html_url']
}
plot_dicts.append(plot_dict)
# 可视化
my_style = LS('#333366', base_style=LCS)
my_config = pygal.Config()
my_config.x_label_rotation = 45
my_config.show_legend = False
my_config.title_font_size = 24
my_config.label_font_size = 14
my_config.major_label_font_size = 18
my_config.truncate_label = 15
my_config.show_y_guides = False
my_config.width = 1000
chart = pygal.Bar(my_config, style=my_style)
chart.force_uri_protocol = 'http'
chart.title = 'Most-Starred Python Projects on Github'
chart.x_labels = names
chart.add('', plot_dicts)
chart.render_to_file('python_repos.svg')
|
[
"wangxinghust@qq.com"
] |
wangxinghust@qq.com
|
f4ad5373ba404d554b1f3dec4d3aec8e6e05174b
|
38248522bf1e2735aaf057f94f080043cf78d1b8
|
/stubs/phantom/vault.py
|
65d04e2189569193bb0522da7a0d2a604fb3b77f
|
[
"Apache-2.0"
] |
permissive
|
mjiyani-crest/phcode42v2
|
94685df9ec07c55dee7763ed3d48898d1503e8f8
|
1e27773df7115845371561956ec0d3433388f6ae
|
refs/heads/main
| 2023-06-23T22:25:14.790298
| 2021-06-28T17:25:34
| 2021-06-28T17:25:34
| 373,809,313
| 0
| 0
|
Apache-2.0
| 2021-06-04T10:43:50
| 2021-06-04T10:43:49
| null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
class Vault:
@classmethod
def create_attachment(
cls, file_contents, container_id, file_name=None, metadata=None
):
pass
|
[
"alan.grgic@code42.com"
] |
alan.grgic@code42.com
|
bfee3d7f0d173050488e73bc4e33f45ce25bed58
|
9972d534eb10c47546385b9a8db520e639d9f029
|
/kernel_svm.py
|
eb30a465b4b27d213723af1dd682c484f9b0c355
|
[] |
no_license
|
Yash482/Classification_Models
|
757e2319d8c0324e4bd0c37e7e368919873936a0
|
6edc2ab6f1e19be152bc7e4e99fd123c6de86bc7
|
refs/heads/master
| 2022-11-13T17:09:58.558384
| 2020-07-03T12:32:28
| 2020-07-03T12:32:28
| 276,892,917
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 885
|
py
|
#import libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#get dataset
dataset = pd.read_csv('Data.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
#splitting data
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state =1)
#Feature scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train[:, :-1] = sc.fit_transform(X_train[:, :-1])
X_test[:, :-1] = sc.transform(X_test[:, :-1])
#training
from sklearn.svm import SVC
classifier = SVC(kernel = 'rbf' , random_state =0)
classifier.fit(X_train, y_train)
from sklearn.metrics import confusion_matrix, accuracy_score
y_pred = classifier.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
print(cm)
print(accuracy_score(y_test, y_pred))
|
[
"agarwaly482@gmail.com"
] |
agarwaly482@gmail.com
|
d61041193914ae4891ecb1576ae6c66e1b215381
|
1b5b2df047080208d2649c7aae3c79e6f99cc23e
|
/Customer_managements/account/migrations/0002_auto_20210412_0502.py
|
35fb002c0abfa6842283e073f99dd042515a38b6
|
[] |
no_license
|
Hardik0812/Customer_management_project
|
57795936f52c2a51f5644d185373d87958eb5a69
|
39ab7c579af9f0ac85a8155f1750bd226eadb743
|
refs/heads/main
| 2023-04-13T14:31:28.665267
| 2021-04-14T18:57:16
| 2021-04-14T18:57:16
| 357,665,395
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,051
|
py
|
# Generated by Django 3.1.3 on 2021-04-12 05:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='customers',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='order',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='products',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='tag',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
[
"mail2hardikchandegara@gmail.com"
] |
mail2hardikchandegara@gmail.com
|
4aa5e17e1955bc88a72d692a188697cf9d737dc4
|
89c59980b4cb3eccedc56e8c8872bf789fd55787
|
/DBapp/consumers.py
|
975da7bf7147eeba627495aa982cbabf98bb09ef
|
[] |
no_license
|
ChaosXYZ/Kilbearn
|
b6b139d859051b4c41c0a8794229c5d46c15ccbf
|
241eff1945b6cd84f72246b889ed924800510f5b
|
refs/heads/master
| 2023-07-13T01:31:08.196039
| 2021-08-23T19:18:01
| 2021-08-23T19:18:01
| 399,222,122
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,369
|
py
|
import json, time
from channels.generic.websocket import AsyncWebsocketConsumer
class ChatConsumer(AsyncWebsocketConsumer):
async def connect(self):
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = 'chat_%s' % self.room_name
# Join room group
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name
)
await self.accept()
async def disconnect(self, close_code):
# Leave room group
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
# Receive message from WebSocket
async def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json['message']
# Send message to room group
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'chat_message',
'message': message
}
)
# Receive message from room group
async def chat_message(self, event):
ts = time.gmtime()
message = event['message']
# Send message to WebSocket
await self.send(text_data=json.dumps({
'message': "("+str(time.strftime("%Y-%m-%d %H:%M:%S", ts))+message
}))
|
[
"ChaosCodePractice@gmail.com"
] |
ChaosCodePractice@gmail.com
|
9ca2e6337d14d6e649e9d38da30e2d1da7e2ccde
|
2bafce6fa0662a9c4b799a076dc08f1ad07f40be
|
/Contributions/basic-xor-encryptor.py
|
88a365f4e52df83dc9f978fb864cad4e9707bc6a
|
[] |
no_license
|
aryangulati/Hacktoberfest2021
|
70e12df69850dcd057e79e6881e8676f09b91919
|
cdaf3f7082c841c9b03ffcf3521a8c60fd4f7fb9
|
refs/heads/main
| 2023-09-01T03:22:22.884520
| 2021-10-22T07:10:23
| 2021-10-22T07:10:23
| 300,351,216
| 4
| 48
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 797
|
py
|
text = "flag{be551b494eac9a69a4584b8e2db0bb20}"
def encrypt(text,key):
encrypted_txt = ''
hex_enc_txt = ''
for i in range(0,len(text)):
xored = ord(text[i]) ^ ord(key)
encrypted_txt += chr(xored)
hex_enc_txt += hex(xored) + '-'
li = list(hex_enc_txt)
li.pop()
hex_enc_txt = ''.join(li)
return encrypted_txt,hex_enc_txt
def decrypt(ciphered_txt,key):
decrypted_txt = ''
for i in range(0,len(ciphered_txt)):
xored = ord(ciphered_txt[i]) ^ ord(key)
decrypted_txt += chr(xored)
return decrypted_txt
alpha = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
for i in alpha:
ciphered_txt,cip_hex = encrypt(text, 'Q')
# print(ciphered_txt)
print(cip_hex)
deciphered_txt = decrypt(ciphered_txt, i)
print(deciphered_txt)
|
[
"noreply@github.com"
] |
aryangulati.noreply@github.com
|
b1162025d76acd18de5a6ad6c14cd6f53341538e
|
08b8f76a4d734c24d23d83cc23fa798e8ae9c809
|
/domain/http_status.py
|
4c58268c853c9764d641989775300b0316e439b1
|
[] |
no_license
|
vanchinathan83/ninjablog
|
9e09433bcc0228be42527a4a4ff616e26d3f42a3
|
45e253bf906c0fd29c2f3dd6e4ee3f14749a2131
|
refs/heads/master
| 2016-09-10T18:51:12.966177
| 2015-06-12T04:17:54
| 2015-06-12T04:17:54
| 35,651,897
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 194
|
py
|
class http_status():
def __init__(status_code=200,message="Success"):
self.status_code = status_code
self.message = message
def __str__():
return self.status_code + '-' + self.message
|
[
"ac.vanchinathan@gmail.com"
] |
ac.vanchinathan@gmail.com
|
aa0c78b91142b0002b7981f71cf48f00bc2bf1fc
|
9e4e8d69db7f440f4bb0110901bcca96f774b752
|
/HandTracking.py
|
3aebdcccd94c28a1eaa4f5354855ae9b46f24889
|
[] |
no_license
|
ankanp14/virtual-mouse
|
1029344ba56c38f5fa7e1bb06b21d3118ac12dc4
|
38070463459dc630d001967a1dbb675ca78b4470
|
refs/heads/main
| 2023-08-30T23:18:59.908713
| 2021-10-12T05:50:11
| 2021-10-12T05:50:11
| 416,193,493
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,179
|
py
|
import math
import cv2
import mediapipe as mp
import time
class handDetector():
def __init__(self, mode=False, maxHands=2, detectionCon=0.5, trackCon=0.5):
self.mode = mode
self.maxHands = maxHands
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.detectionCon, self.trackCon)
self.mpDraw = mp.solutions.drawing_utils
self.tipids = [4, 8, 12, 16, 20]
def findHandPoints(self, frame, draw=True):
frameRGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
self.results = self.hands.process(frameRGB)
# print(results.multi_hand_landmarks)
if self.results.multi_hand_landmarks:
for handLms in self.results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(frame, handLms, self.mpHands.HAND_CONNECTIONS)
return frame
def findPosition(self, frame, handNo=0, draw=True):
self.lmList = []
if self.results.multi_hand_landmarks:
hand = self.results.multi_hand_landmarks[handNo]
for id, lm in enumerate(hand.landmark):
h, w, c = frame.shape
cx, cy = int(lm.x * w), int(lm.y * h)
self.lmList.append([id, cx, cy])
if draw:
# cv2.circle(frame, (cx, cy), 20, (255, 0, 255), cv2.FILLED)
cv2.putText(frame, str(id), (cx, cy), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 3)
return self.lmList
def fingersUp(self):
fingers = []
# Thumb
if self.lmList[self.tipids[0]][1] > self.lmList[self.tipids[0] - 1][1]:
fingers.append(1)
else:
fingers.append(0)
# Other Fingers
for id in range(1, 5):
if self.lmList[self.tipids[id]][2] < self.lmList[self.tipids[id] - 2][2]:
fingers.append(1)
else:
fingers.append(0)
return fingers
def findDistance(self, frame, tip1, tip2, minDist=50, draw=True):
x1, y1 = self.lmList[tip1][1:]
x2, y2 = self.lmList[tip2][1:]
cx, cy = int(x1 + x2) // 2, int(y1 + y2) // 2
length = math.hypot(x2 - x1, y2 - y1)
if draw and length < minDist:
cv2.circle(frame, (cx, cy), 15, (0, 255, 0), cv2.FILLED)
return length, frame
def main():
prevTime = 0
currTime = 0
vidCap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
detector = handDetector()
while True:
success, frame = vidCap.read()
frame = detector.findHandPoints(frame)
lmList = detector.findPosition(frame)
if len(lmList) != 0:
print(lmList[4])
currTime = time.time()
fps = 1 / (currTime - prevTime)
prevTime = currTime
cv2.putText(frame, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 255), 3)
cv2.imshow("frame", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
vidCap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
[
"apathak@gmail.com"
] |
apathak@gmail.com
|
63e4f8cf5a8098230e5b16812a052b656f55fdcc
|
39dd67658cdf965a789b698af01837af5da17214
|
/hlt7-json/ls.py
|
5df42885cce3f835ec571889eb63dcd75d88629d
|
[] |
no_license
|
aaditya9/zestl_work
|
024284d27704ba6da60989124f9499f4f96760e0
|
3fffeb1b83fe9200e0c99572fa92a64e9abe70e3
|
refs/heads/master
| 2020-03-06T21:58:09.668652
| 2018-08-16T05:49:39
| 2018-08-16T05:49:39
| 127,091,038
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
import os
dirlist = os.listdir("/home/adi/Desktop/zestl/hlt7-json/inputs")
# from pprint import pprint
# pprint(dirlist)
print(dirlist)
|
[
"aditya@zestl.com"
] |
aditya@zestl.com
|
07d7d00cabbdd38048ac62b9367315ed14ec597b
|
aa1972e6978d5f983c48578bdf3b51e311cb4396
|
/mas_nitro-python-1.0/massrc/com/citrix/mas/nitro/resource/config/ns/ns_visualizer_network_bindings.py
|
451bd7a3c98e29df8fc73e2841f2c2d5c5e171c7
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
MayankTahil/nitro-ide
|
3d7ddfd13ff6510d6709bdeaef37c187b9f22f38
|
50054929214a35a7bb19ed10c4905fffa37c3451
|
refs/heads/master
| 2020-12-03T02:27:03.672953
| 2017-07-05T18:09:09
| 2017-07-05T18:09:09
| 95,933,896
| 2
| 5
| null | 2017-07-05T16:51:29
| 2017-07-01T01:03:20
|
HTML
|
UTF-8
|
Python
| false
| false
| 9,643
|
py
|
'''
Copyright (c) 2008-2015 Citrix Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from massrc.com.citrix.mas.nitro.resource.Base import *
from massrc.com.citrix.mas.nitro.service.options import options
from massrc.com.citrix.mas.nitro.exception.nitro_exception import nitro_exception
from massrc.com.citrix.mas.nitro.util.filtervalue import filtervalue
from massrc.com.citrix.mas.nitro.resource.Base.base_resource import base_resource
from massrc.com.citrix.mas.nitro.resource.Base.base_response import base_response
'''
Configuration for visualizer network bindings resource
'''
class ns_visualizer_network_bindings(base_resource):
_vlan_nsip_binding=[]
_vlan_interface_binding=[]
_name= ""
_ip_address= ""
_vlan_bridgegroup_binding=[]
__count=""
'''
get the resource id
'''
def get_resource_id(self) :
try:
if hasattr(self, 'id'):
return self.id
else:
return None
except Exception as e :
raise e
'''
get the resource type
'''
def get_object_type(self) :
try:
return "ns_visualizer_network_bindings"
except Exception as e :
raise e
'''
Returns the value of object identifier argument.
'''
def get_object_id(self) :
try:
return self._name
except Exception as e :
raise e
'''
Returns the value of object file path argument.
'''
@property
def file_path_value(self) :
try:
return None
except Exception as e :
raise e
'''
Returns the value of object file component name.
'''
@property
def file_component_value(self) :
try :
return "ns_visualizer_network_bindingss"
except Exception as e :
raise e
'''
get VLAN NetScaler binding
'''
@property
def vlan_nsip_binding(self) :
try:
return self._vlan_nsip_binding
except Exception as e :
raise e
'''
set VLAN NetScaler binding
'''
@vlan_nsip_binding.setter
def vlan_nsip_binding(self,vlan_nsip_binding) :
try :
if not isinstance(vlan_nsip_binding,list):
raise TypeError("vlan_nsip_binding must be set to array of str value")
for item in vlan_nsip_binding :
if not isinstance(item,str):
raise TypeError("item must be set to str value")
self._vlan_nsip_binding = vlan_nsip_binding
except Exception as e :
raise e
'''
get VLAN interface binding
'''
@property
def vlan_interface_binding(self) :
try:
return self._vlan_interface_binding
except Exception as e :
raise e
'''
set VLAN interface binding
'''
@vlan_interface_binding.setter
def vlan_interface_binding(self,vlan_interface_binding) :
try :
if not isinstance(vlan_interface_binding,list):
raise TypeError("vlan_interface_binding must be set to array of str value")
for item in vlan_interface_binding :
if not isinstance(item,str):
raise TypeError("item must be set to str value")
self._vlan_interface_binding = vlan_interface_binding
except Exception as e :
raise e
'''
get Name of network binding
'''
@property
def name(self) :
try:
return self._name
except Exception as e :
raise e
'''
set Name of network binding
'''
@name.setter
def name(self,name):
try :
if not isinstance(name,str):
raise TypeError("name must be set to str value")
self._name = name
except Exception as e :
raise e
'''
get IP Address
'''
@property
def ip_address(self) :
try:
return self._ip_address
except Exception as e :
raise e
'''
set IP Address
'''
@ip_address.setter
def ip_address(self,ip_address):
try :
if not isinstance(ip_address,str):
raise TypeError("ip_address must be set to str value")
self._ip_address = ip_address
except Exception as e :
raise e
'''
get VLAN bridge group binding
'''
@property
def vlan_bridgegroup_binding(self) :
try:
return self._vlan_bridgegroup_binding
except Exception as e :
raise e
'''
set VLAN bridge group binding
'''
@vlan_bridgegroup_binding.setter
def vlan_bridgegroup_binding(self,vlan_bridgegroup_binding) :
try :
if not isinstance(vlan_bridgegroup_binding,list):
raise TypeError("vlan_bridgegroup_binding must be set to array of str value")
for item in vlan_bridgegroup_binding :
if not isinstance(item,str):
raise TypeError("item must be set to str value")
self._vlan_bridgegroup_binding = vlan_bridgegroup_binding
except Exception as e :
raise e
'''
Use this operation to get Network bindings.
'''
@classmethod
def get(cls,client = None,resource="",option_=""):
try:
response=""
if not resource :
ns_visualizer_network_bindings_obj=ns_visualizer_network_bindings()
response = ns_visualizer_network_bindings_obj.get_resources(client,option_)
else:
response = resource.get_resource(client, option_)
return response
except Exception as e :
raise e
'''
Use this API to fetch filtered set of ns_visualizer_network_bindings resources.
filter string should be in JSON format.eg: "vm_state:DOWN,name:[a-z]+"
'''
@classmethod
def get_filtered(cls,service,filter_) :
try:
ns_visualizer_network_bindings_obj = ns_visualizer_network_bindings()
option_ = options()
option_._filter=filter_
return ns_visualizer_network_bindings_obj.getfiltered(service, option_)
except Exception as e :
raise e
'''
* Use this API to count the ns_visualizer_network_bindings resources.
'''
@classmethod
def count(cls,service) :
try:
ns_visualizer_network_bindings_obj = ns_visualizer_network_bindings()
option_ = options()
option_._count=True
response = ns_visualizer_network_bindings_obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
'''
Use this API to count the filtered set of ns_visualizer_network_bindings resources.
filter string should be in JSON format.eg: "vm_state:DOWN,name:[a-z]+"
'''
@classmethod
def count_filtered(cls,service,filter_):
try:
ns_visualizer_network_bindings_obj = ns_visualizer_network_bindings()
option_ = options()
option_._count=True
option_._filter=filter_
response = ns_visualizer_network_bindings_obj.getfiltered(service, option_)
if response :
return response[0].__dict__['_count']
return 0;
except Exception as e :
raise e
'''
Converts API response into object and returns the object array in case of get request.
'''
def get_nitro_response(self,service ,response):
try :
result=service.payload_formatter.string_to_resource(ns_visualizer_network_bindings_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.ns_visualizer_network_bindings
except Exception as e :
raise e
'''
Converts API response into object and returns the object array .
'''
def get_nitro_bulk_response(self,service ,response):
try :
result=service.payload_formatter.string_to_resource(ns_visualizer_network_bindings_responses, response, "ns_visualizer_network_bindings_response_array")
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
response = result.ns_visualizer_network_bindings_response_array
i=0
error = [ns_visualizer_network_bindings() for _ in range(len(response))]
for obj in response :
error[i]= obj._message
i=i+1
raise nitro_exception(result.errorcode, str(result.message), error)
response = result.ns_visualizer_network_bindings_response_array
i=0
ns_visualizer_network_bindings_objs = [ns_visualizer_network_bindings() for _ in range(len(response))]
for obj in response :
if hasattr(obj,'_ns_visualizer_network_bindings'):
for props in obj._ns_visualizer_network_bindings:
result = service.payload_formatter.string_to_bulk_resource(ns_visualizer_network_bindings_response,self.__class__.__name__,props)
ns_visualizer_network_bindings_objs[i] = result.ns_visualizer_network_bindings
i=i+1
return ns_visualizer_network_bindings_objs
except Exception as e :
raise e
'''
Performs generic data validation for the operation to be performed
'''
def validate(self,operationType):
try:
super(ns_visualizer_network_bindings,self).validate()
except Exception as e :
raise e
'''
Forms the proper response.
'''
class ns_visualizer_network_bindings_response(base_response):
def __init__(self,length=1) :
self.ns_visualizer_network_bindings= []
self.errorcode = 0
self.message = ""
self.severity = ""
self.ns_visualizer_network_bindings= [ ns_visualizer_network_bindings() for _ in range(length)]
'''
Forms the proper response for bulk operation.
'''
class ns_visualizer_network_bindings_responses(base_response):
def __init__(self,length=1) :
self.ns_visualizer_network_bindings_response_array = []
self.errorcode = 0
self.message = ""
self.ns_visualizer_network_bindings_response_array = [ ns_visualizer_network_bindings() for _ in range(length)]
|
[
"Mayank@Mandelbrot.local"
] |
Mayank@Mandelbrot.local
|
08f7eea4ec4d3f08ad17af56071e9c8f789b1eb4
|
49a15320cc98bf4f609d1ab3cd5090936aabd9c9
|
/match_UI.py
|
85ae765ba50ce0d0df7293a95d275e9915d0294b
|
[] |
no_license
|
sparkbrains/buddymeup-backend
|
6eed295d10665e794257664d3dc44cc05f23cb87
|
80059ea17e546d29d4a5c208b42d3e23f3c78439
|
refs/heads/master
| 2023-06-09T06:24:27.977418
| 2021-07-04T09:57:22
| 2021-07-04T09:57:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,242
|
py
|
import streamlit as st
import match_algo
import pandas as pd
def display_match_UI():
st.write("Load data from DB to dataframe:")
if st.button("Prep data"):
st.write("Preparing data...")
st.session_state.data, st.session_state.email_ids, \
st.session_state.fdf, st.session_state.idx_dict = match_algo.prep_data()
st.write("Done!")
if st.button("Assign scores"):
st.write("Assigning scores...")
st.session_state.scores_df = match_algo.score_buddies(
st.session_state.fdf, st.session_state.data, st.session_state.idx_dict
)
st.write("Done!")
if st.button("Match buddies"):
st.write("Matching buddies...")
st.session_state.matched_df = match_algo.match_buddies(
st.session_state.data, st.session_state.scores_df, st.session_state.email_ids,
st.session_state.idx_dict
)
st.write("Done!")
if st.button("Save matches to DB"):
st.write("Saving matches to DB...")
match_algo.save_matches_db(st.session_state.matched_df)
st.write("Done!")
if st.button("Analyze matches"):
st.write("Analyzing matches...")
match_algo.analyze_matches()
st.write("Done!")
if st.button("Send emails"):
match_algo.email_notifications()
st.write("Done!")
|
[
"ryc.mak@gmail.com"
] |
ryc.mak@gmail.com
|
5b90ea66af48cddbcf91b005e887bda3142329f3
|
b06978b6020ce3240912ba5c131c4f38a86d7996
|
/Pycharm_files/Lists/rand_num_list.py
|
5ff3c2ce16b490c9c4438705cc6590bbde38c7c1
|
[] |
no_license
|
mn4774jm/PycharmProjects
|
95dc8ee6b89a85ba02d4134aa5b5bce11004647b
|
886bcf2400abc9a1f797fe98d09241f99fa16322
|
refs/heads/master
| 2021-08-09T10:20:27.907847
| 2020-09-04T15:21:21
| 2020-09-04T15:21:21
| 219,878,503
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,802
|
py
|
""" M. Bock 6/17/2019 This program asks the user how many integers they would like to pick for their list,
The program randomly selects that number of integers, and saves each pick in a list. The program sorts and
adds the numbers, then determines min and max. It displays the information in different ways
- with a short cut, a loop & a sentence. """
import random
print('Welcome to our random number game.')
def main():
try:
num_list = inputs()
total, min_num, max_num = processing(num_list)
outputs(num_list, total, min_num, max_num)
restart = input('\nWould you like to pick again? Enter y or n: ').lower()
if restart == 'y':
print('Let\'s play again...')
main()
else:
print('Thanks for using the program.')
except Exception as err:
print(err)
def inputs(): # collect information needed, from user and by random selection
print('How many integers should the computer pick for your list?')
picks = get_posint() # user sets list length
num_list = [] # initialize empty list
for pick in range(picks): # loop runs user-defined number of times
num_picked = random.randint(0, 9) # pick a random int for each trip through the loop
num_list.append(num_picked) # add the number picked to the list
num_list.sort() # sort the list - best to do this here
return num_list
def get_posint(): # request and validate positive integer
posint = input('Please enter a whole number: ')
while posint.isnumeric() is False or int(posint) == 0:
posint = input('You can only enter a number 1 or higher: ')
posint = int(posint)
return posint
def processing(num_list):
total = sum(num_list) # accumulate the total of the numbers counted
min_num = min(num_list) # identify and save the min
max_num = max(num_list) # identify and save the max
return total, min_num, max_num
def outputs(num_list, total, min_num, max_num):
print(f'\nHere is your list of {len(num_list)} integers - randomly selected & sorted:')
print(num_list) # print sorted list without other formatting
print('Here is your list - printed w/shortcut method:')
print(*num_list, sep=", ") # shortcut printing removes brackets
print('Here is your list - printed via a loop, with total: ')
for index in range(len(num_list)): # loop printing for ultimate control
if index < len(num_list) - 1:
print(f'{num_list[index]} + ', end="") # print all nums except last one
else:
print(f'{num_list[index]} = {total}') # print for last num & total
print(f'Your list minimum was {min_num} and maximum was {max_num} this time.')
main()
|
[
"mn4774jm@go.minneapolis.edu"
] |
mn4774jm@go.minneapolis.edu
|
55fd977c44b6f3678d972ff1d4ce0f76dd2fed9f
|
4eff9da9c945bac23383c1911ffa8448bae12ab3
|
/chapter 2/chapter2_problem1.py
|
5da8d9c765ba2e28e82f2692e8355c850e2649b4
|
[] |
no_license
|
SoorejRB/Anandolody-python-exercises
|
0490fe3027dda3f74241ef21dcc11fe83242a79e
|
30397c4926e480a57dbc4c8ff8df6c7236931a7f
|
refs/heads/master
| 2023-08-24T10:39:15.127918
| 2021-08-27T03:00:44
| 2021-08-27T03:00:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 736
|
py
|
# a list contain another list as member
a = [1,2]
b = [3,5,6,a]
print(b)
# builtin range fn
x = range(1, 4)
print(x)
print(x[0])
print(x[1])
print(x[2])
print(len(x))
# + and * operators in list
a = [1,2,3]
b = [4,5,6]
print(a+b)
print(a*2)
# negative index
x = [1,2,3,4]
print(x[-1])
print(x[-2])
#list slicing
x = [1,2,3,4]
print(x[0:2])
print(x[0:-2])
print(x[:2])
print(x[2:])
x = [1,2,3,4,5,6,7,8,9]
print(x[0:6:2])
print(x[::-1]) #reverse list
#Presence of a key in a list can be tested using in operator
x = [1,2,3,4]
print(2 in x)
print(10 in x)
#append
x = [1,2,3,4]
x.append(100)
print(x)
#problem
x = [0, 1, [2]]
# x[2] = 3
x[2][0] = 3
print(x)
x[2].append(4)
print(x)
# x.append(22)
# print(x)
x[2] = 2
print(x)
|
[
"soorejrb@gmail.com"
] |
soorejrb@gmail.com
|
e1d3dba25c73ce79845293cae302e1f57c6b0570
|
88854ce2e1a2c7342a074c5266165ce1934f46cf
|
/practice exercise 3_5.py
|
d470858bd555d652969914e1199995389d74503e
|
[] |
no_license
|
patrick473/TCIT-V1PROG-15
|
0fe6803284a345b31133e4eba3e3ba69ecf9648d
|
2d7279dfd3c5544c6b164aa977a4845694cd5b56
|
refs/heads/master
| 2020-05-29T09:12:41.011103
| 2016-10-11T12:43:44
| 2016-10-11T12:43:44
| 70,230,825
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
import math
def kwadraten_som(grondgetallen) :
for i in grondgetallen :
if i < 0 :
grondgetallen.remove(i)
return sum([grondgetal**2 for grondgetal in grondgetallen ])
print(kwadraten_som([4,5,3,-81]))
|
[
"patkottman@gmail.com"
] |
patkottman@gmail.com
|
5c53432d1ffc71cfc8c419bf6bd2c8048d9826e5
|
df025f6d1168fbbe1d643278ca816eed2a25eaef
|
/hct/apps.py
|
69a4f65e2a1771df479a030c45fee5316ac764c8
|
[] |
no_license
|
jorgeeecm/HazteConTodo
|
68e4890b70a021ffebf989af512404ee61855917
|
c759713f2c4993da22291a245d50a31dd12583f4
|
refs/heads/master
| 2020-03-21T13:35:19.200153
| 2018-06-25T15:34:02
| 2018-06-25T15:34:02
| 137,350,697
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 81
|
py
|
from django.apps import AppConfig
class HctConfig(AppConfig):
name = 'hct'
|
[
"jorgecastillamaldonado@gmail.com"
] |
jorgecastillamaldonado@gmail.com
|
e898cbf91adc561911b9c2f3d4986ddf8a4dbc1d
|
23fd8d2525706f9679c5d8140363f3eb46444313
|
/DiscOpt/screenname/screenname/solver.py
|
25aa963c755c7eca717e2bbf7e754e308bd3df01
|
[] |
no_license
|
stevehaigh/MiscCourseWork
|
6c8d4fa4b5950e7d4bcb53aeb6ad5f7cc78b7b1f
|
fbd4797ccdd5446187722dbe96d08680bc60661b
|
refs/heads/master
| 2021-01-10T03:27:37.825996
| 2015-12-16T15:56:52
| 2015-12-16T15:56:52
| 48,120,078
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 237
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
def solveIt():
# return a string that is the screen name you would like
return 'shaigh'
if __name__ == '__main__':
print 'This script submits the screen name: ' + solveIt() + '\n'
|
[
"steve.haigh@ericsson.com"
] |
steve.haigh@ericsson.com
|
09f337fa9dd14abb02db74e77343480e6f404d70
|
52585c8d95cef15199c18ba1a76899d2c31329f0
|
/05PythonCookbook/ch11Network_and_WebProgramming/2tcp_server/1server_write.py
|
81158a2d9b9a58cdeef3ee8d6799882b0eecdf04
|
[] |
no_license
|
greatabel/PythonRepository
|
c7a952257303a21083ed7d535274c339362bd126
|
836fcdd3f5c1b150122302685104fe51b5ebe1a3
|
refs/heads/master
| 2023-08-30T15:56:05.376391
| 2023-08-26T03:34:14
| 2023-08-26T03:34:14
| 29,392,599
| 33
| 6
| null | 2023-02-14T13:33:21
| 2015-01-17T13:54:58
|
Python
|
UTF-8
|
Python
| false
| false
| 733
|
py
|
from socketserver import StreamRequestHandler, TCPServer
class EchoHandler(StreamRequestHandler):
def handle(self):
print('Got connection from', self.client_address)
# self.rfile is a file-like object for reading
for line in self.rfile:
# self.wfile is a file-like object for writing
self.wfile.write(line)
if __name__ == '__main__':
import socket
serv = TCPServer(('', 20000), EchoHandler, bind_and_activate=False)
# Set up various socket options
serv.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
# Bind and activate
serv.server_bind()
serv.server_activate()
print('Echo server running on port 20000')
serv.serve_forever()
|
[
"greatabel1@126.com"
] |
greatabel1@126.com
|
5439dfb116477a4beef14b119635147786c66780
|
6b3003db01237128168cac075f7283c5184cfc1c
|
/plot.py
|
ad7c3e91d9e31725881269bdd2c4c2af5db9ef0d
|
[] |
no_license
|
muhumahucoorp/Data-Mining-Project-2
|
e87cb3d9a223d0cb5622edb6d84201b2317651fb
|
9c6aa09ac0cb6a8c688fd3e6b469b5676bc23d0e
|
refs/heads/master
| 2020-12-03T03:54:20.898179
| 2017-07-26T21:52:14
| 2017-07-26T21:52:14
| 95,788,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,768
|
py
|
import pandas
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import sys
print("Prepare data ...")
names = ["Id", "RI", "Na", "Mg", "Al", "Si", "K", "Ca", "Ba", "Fe"]
values = pandas.read_csv('glass.data', delimiter=',', header=None)
classes = ('building_windows_float_processed', 'building_windows_non_float_processed', 'vehicle_windows_float_processed', 'vehicle_windows_non_float_processed', 'containers', 'tableware', 'headlamps')
classifications = values.iloc[:,-1]
values = values.drop(labels=10, axis=1)
values = values.drop(labels=0, axis=1)
#print(classifications)
#print(values)
maxima = values.max(axis=0)
minima = values.min(axis=0)
f = open('Plots/valueRange.txt', 'w')
print("Start plotting ...")
for i in range(1,len(values.columns)+1):
# Create the class distribution plot.
# The thenth coloumn represents the nominal class of the tuples.
# Writes the maxima and minima for each attribute
f.write('Attribute: ' + names[i] + ', max: ' + str(maxima[i]) + ", min: " + str(minima[i]) + "\n")
"""
# Create Histograms
plt.clf()
plt.xlabel('distribution')
plt.ylabel(names[i])
plt.hist(values[i], 100, facecolor='green', alpha=0.95)
plt.savefig("Plots/" + names[i] + "_histogram")
plt.clf()
"""
"""
# Create Boxplots
plt.figure()
plt.boxplot(values[i], 1)
plt.xticks([1], [names[i]])
plt.savefig("Plots/" + names[i] + "_boxplot")
plt.clf()
"""
f.close()
print("Created plots.")
"""
# Create a heatmap that represents the correlation between the values
print("Heatmap creation ...")
plt.clf()
x = values.corr()
x.columns = names[1:]
x.index = names[1:]
sns.set(font_scale=0.7)
sns.heatmap(x, annot=True, linewidths=0.5)
plt.savefig("Plots/" + "correlation_heatmap")
print("Heatmap created.")
"""
|
[
"muhumahu@web.de"
] |
muhumahu@web.de
|
bd953833d28c7cc861dec244fed9c12b796aa056
|
980017a97a0373e7dde5afd100f2b82cbcfa4d18
|
/bot.py
|
54a9fa8ff5d03404765740e6720ccdaf8f883cec
|
[] |
no_license
|
nimert007/aiogram-bot-template
|
6298ee7c89f6ff8eb85b561031713bd7873300db
|
ebab1bf882566007c755684eba8f859c286b3a1f
|
refs/heads/master
| 2023-03-27T13:53:24.380280
| 2021-04-04T15:04:41
| 2021-04-04T15:04:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,557
|
py
|
from typing import List
import aiojobs as aiojobs
from aiogram import Bot, Dispatcher
from aiogram.contrib.fsm_storage.redis import RedisStorage2
from aiogram.types import ParseMode
from aiohttp import web
from loguru import logger
from data import config
# noinspection PyUnusedLocal
async def on_startup(app: web.Application):
import middlewares
import filters
import handlers
middlewares.setup(dp)
filters.setup(dp)
handlers.errors.setup(dp)
handlers.user.setup(dp)
logger.info('Configure Webhook URL to: {url}', url=config.WEBHOOK_URL)
await dp.bot.set_webhook(config.WEBHOOK_URL)
async def on_shutdown(app: web.Application):
app_bot: Bot = app['bot']
await app_bot.close()
async def init() -> web.Application:
from utils.misc import logging
import web_handlers
logging.setup()
scheduler = await aiojobs.create_scheduler()
app = web.Application()
subapps: List[str, web.Application] = [
('/health/', web_handlers.health_app),
('/tg/webhooks/', web_handlers.tg_updates_app),
]
for prefix, subapp in subapps:
subapp['bot'] = bot
subapp['dp'] = dp
subapp['scheduler'] = scheduler
app.add_subapp(prefix, subapp)
app.on_startup.append(on_startup)
app.on_shutdown.append(on_shutdown)
return app
if __name__ == '__main__':
bot = Bot(config.BOT_TOKEN, parse_mode=ParseMode.HTML, validate_token=True)
storage = RedisStorage2(**config.redis)
dp = Dispatcher(bot, storage=storage)
web.run_app(init())
|
[
"mr.i.lebedev2@gmail.com"
] |
mr.i.lebedev2@gmail.com
|
43981ac07cd22df75089d79929af3633bc60e54e
|
56863ae0712a937bc469a3f9e50d56170ef2666d
|
/CourseWork/urls.py
|
a15fd8edab056ab907336edd5e9e5213ecdac1c9
|
[] |
no_license
|
BohdanM19/CourseWork
|
f79b5e716366c87583ac575e93e0a6d9dbf9db5d
|
ca3ed39b263b71185e14660f477496ffc0101031
|
refs/heads/master
| 2022-12-15T19:04:43.403578
| 2019-06-02T11:41:57
| 2019-06-02T11:41:57
| 175,252,801
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 977
|
py
|
"""CourseWork URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from application import views
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('application.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"bogdanrichers@gmail.com"
] |
bogdanrichers@gmail.com
|
c8495d65ce9caa0795ab5e3f33e6d9e688f0e37c
|
aa44e7108b8f986e1bd23a3a6132c672cf185047
|
/1_assignment/4_sorting_mapreduce/mapper.py
|
0bb12fec3e4ed0f4eaa20f55d368c028fc5bda2c
|
[] |
no_license
|
jonbichel/hadoopProjects
|
a008bce9917d4b57ef71f07f0d55e83d0b8069df
|
6fe00fc4ded04cf97eec1d870105a078f0941fe3
|
refs/heads/master
| 2020-03-31T14:59:20.812357
| 2018-11-06T02:52:35
| 2018-11-06T02:52:35
| 152,318,733
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 438
|
py
|
#!/usr/bin/env python
#mapper.py
import string
import sys
for line in sys.stdin:
line = line.strip()
line = line.lower()
words = line.split()
for word in words:
for c in string.punctuation:
word = word.replace(c,"")
if word != '':
if len(word) == 1:
print'%s.%s\t%s' % (word[0], "", 1)
else:
print'%s.%s\t%s' % (word[0], word[1:], 1)
|
[
"jon@pkosund.ca"
] |
jon@pkosund.ca
|
7d417ecb524f386139a8882bd6a18ee49c18665e
|
7fbe976af5d3d20d0d4b6130a95f59d7f7dbc6ef
|
/video/app/client/urls.py
|
14821a4dc7be7ff66ec86f2d14dd40ba8ce064d1
|
[] |
no_license
|
wang122300090/muke_video_test
|
0ac3ddf4854d04a13b89ccb947c2ed40b9793468
|
86acb441ecd36ce354530ec20f2262f12561e95a
|
refs/heads/main
| 2023-05-31T04:38:30.352061
| 2021-06-12T07:35:30
| 2021-06-12T07:35:30
| 376,222,027
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 65
|
py
|
# coding: utf-8
from django.urls import path
urlpatterns = [
]
|
[
"1561585776@qq.com"
] |
1561585776@qq.com
|
5717ef3da2990fec63c5c42339244d2e9266bc96
|
7a535da0698c3933f6a3b642cf425b47287dd2c9
|
/train_gan.py
|
50f3008d13870bcb729e3a3bf0881dfe4ccef6ab
|
[] |
no_license
|
xuehuiping/VGAN
|
180969505489854ff9d5b88b6bb957ee6e403409
|
9c22bf333d33a13a48eea69b4772b558eb65bf81
|
refs/heads/master
| 2022-01-05T23:37:36.323380
| 2019-03-18T07:18:09
| 2019-03-18T07:18:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,255
|
py
|
#! /usr/bin/env python
# encoding:utf-8
"""
The adversarsial training file
"""
import tensorflow as tf
import prodata
from load_data import Dis_dataloader
from reward1 import get_reward
import random
import numpy as np
from generator import Generator
from discriminator1 import Discriminator
import time
import numpy as np
import loaddata
import os
import codecs
seed = 123
START_TOKEN = 0
END_TOKEN = 1
UNK_TOKEN = 2
PAD_TOKEN = 3
def generate_samples(sess,generator,batch_size,generated_num,vocab,output_file):
"""
trainable_model: generator
batch_size: the number of sentences generated every time
generated_num: the total number of sentences generated
output_file: the generated examples are saved in the file
"""
generated_samples = []
for _ in range(int(generated_num / batch_size)):
samples = sess.run(generator.gen_x) # batch_size * seq_length
generated_samples.extend(samples.tolist())
generated_samples_noend = []
for line in generated_samples:
list1 = []
for word in line[:-1]:
if word == END_TOKEN or word == PAD_TOKEN:
break
else:
list1.append(word)
if len(list1) == 0:
continue
generated_samples_noend.append(list1)
linelist = []
for x in generated_samples_noend:
line = []
for x1 in x:
line.append(vocab.id2word(x1))
linelist.append(line)
with codecs.open(output_file, 'w',encoding='utf-8') as fout:
print '\n'
print " generated samples in the text !!!!!!! "
for i,line in enumerate(linelist):
try:
buffer = ' '.join(line) + u'\n'
buffer_str = buffer.encode('utf-8')
if i < 10:
print buffer_str
fout.write(buffer)
except:
print 'some errors happen'
continue
print '\n'
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('mode',default_value ='train', docstring = 'train or decode train:train the adversial model, decode just generate sample ')
tf.app.flags.DEFINE_string('data_positive',default_value ='data/taobao_traindata.txt',docstring = 'Path expression to positive file')
tf.app.flags.DEFINE_string('data_negative',default_value ='data/data_neg.txt',docstring = 'Path expression to example generated ')
tf.app.flags.DEFINE_string('data_test',default_value ='data/taobao_testdata.txt',docstring = 'Path expression to test data ')
tf.app.flags.DEFINE_string('vocab_path',default_value ='data/dict.txt',docstring = 'Path expression to text vocabulary file.')
tf.app.flags.DEFINE_integer('batch_size',default_value= 64,docstring = 'batch_size')
tf.app.flags.DEFINE_integer('num_gen', default_value= 20000, docstring = 'the number of comment generated')
tf.app.flags.DEFINE_integer('vocab_size',default_value= 3000,docstring = 'number of vocab')
tf.app.flags.DEFINE_integer('embed_size',default_value= 200,docstring = 'dim of embedding')
tf.app.flags.DEFINE_integer('hidden_size',default_value= 100,docstring = 'RNN hidden_size')
tf.app.flags.DEFINE_integer('output_size',default_value= 200,docstring = 'output size')
tf.app.flags.DEFINE_integer('num_layers',default_value= 2,docstring = 'gen layers')
tf.app.flags.DEFINE_integer('seq_length', default_value= 21, docstring = 'max length of sequence include start_token or end_token ----x,y,mask length')
tf.app.flags.DEFINE_string('pre_gen_model', default_value='model_gen', docstring = 'the pre_trained generator dir')
tf.app.flags.DEFINE_string('pre_dis_model', default_value= 'dis_pre_model', docstring = 'the pre_trained discriminator dir')
tf.app.flags.DEFINE_string('gen_model', default_value='model_1', docstring = 'the directory in which the generator will be saved during adversarisal training')
tf.app.flags.DEFINE_string('dis_model', default_value= 'model_2', docstring = 'the directory in which the discriminator will be saved during adversarisal training')
tf.app.flags.DEFINE_integer('total_epoch', default_value= 300, docstring = 'the adversial number of epoch')
tf.app.flags.DEFINE_integer('gen_epoch', default_value= 5, docstring = 'the training numbers of generator every epoch')
tf.app.flags.DEFINE_integer('dis_epoch', default_value= 3, docstring = 'the training numbers of discriminator every epoch')
tf.app.flags.DEFINE_integer('latent_size', default_value= 60, docstring = 'the latent size')
tf.app.flags.DEFINE_string('gpu', default_value= '2', docstring = 'the latent size')
tf.app.flags.DEFINE_float('reward_gamma',default_value = 0.95, docstring = 'reward ')
os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu
def main(x):
import pdb;pdb.set_trace()
pre_gen_model = FLAGS.pre_gen_model + '/.'
# the pretrained generator
pre_dis_model = FLAGS.pre_dis_model + '/.'
# the pretrained discriminator
gen_model_path = FLAGS.gen_model + '/gen'
dis_model_path = FLAGS.dis_model + '/dis'
vocab_path = FLAGS.vocab_path
batch_size = FLAGS.batch_size
data_neg_new = FLAGS.data_negative
data_pos = FLAGS.data_positive
vocab_size = FLAGS.vocab_size
seq_length = FLAGS.seq_length
dis_filter_sizes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20]
dis_num_filters = [100, 200, 200, 200, 200, 100, 100, 100, 100, 100, 160, 160]
embed_dim = FLAGS.embed_size
vocab1 = prodata.Vocab(vocab_path,vocab_size)
data = loaddata.Data_loader(FLAGS.batch_size, FLAGS.seq_length)
vocab_freqs = data.create_batches(data_pos, vocab1)
vocab2 = prodata.Vocab(vocab_path,vocab_size)
dis_data = Dis_dataloader(FLAGS.batch_size,FLAGS.seq_length)
test_data = FLAGS.data_test
test_data_nll = loaddata.Data_loader(FLAGS.batch_size, FLAGS.seq_length)
test_data_nll.create_batches(test_data, vocab1)
# the generator
gen_model = Generator(vocab_size=vocab_size, batch_size=batch_size, emb_dim=embed_dim, hidden_dim=FLAGS.hidden_size,
sequence_length=FLAGS.seq_length, start_token=START_TOKEN, end_token=END_TOKEN,
pad_token=PAD_TOKEN, outputsize=FLAGS.output_size, vocab_freqs=vocab_freqs,
latent_size=FLAGS.latent_size, dropout=0,
num_layers=2, learning_rate=0.001, reward_gamma=0.95, scope='generator')
# the policy notwork for reward
gen_rollout = Generator(vocab_size=vocab_size, batch_size=batch_size, emb_dim=embed_dim, hidden_dim=FLAGS.hidden_size,
sequence_length=FLAGS.seq_length, start_token=START_TOKEN, end_token=END_TOKEN,
pad_token=PAD_TOKEN, outputsize=FLAGS.output_size, vocab_freqs=vocab_freqs,
latent_size=FLAGS.latent_size, dropout=0,
num_layers=2, learning_rate=0.001, reward_gamma=0.95, scope='rollout')
# the discriminator
dis_model = Discriminator(seq_length, num_classes=2, vocab_size=vocab_size,
embedding_size=64, filter_sizes=dis_filter_sizes,
num_filters=dis_num_filters, l2_reg_lambda=0.2)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
pdb.set_trace()
saver1 = tf.train.Saver(var_list=gen_model.g_params,max_to_keep=500)
model_file1 = tf.train.latest_checkpoint(pre_gen_model)
saver3 = tf.train.Saver(var_list=gen_model.g_params,max_to_keep=500)
saver1.restore(sess,model_file1)
saver2 = tf.train.Saver(var_list=dis_model.params,max_to_keep=500)
model_file2 = tf.train.latest_checkpoint(pre_dis_model)
saver2.restore(sess,model_file2)
saver4 = tf.train.Saver(var_list=dis_model.params,max_to_keep=500)
gen_rollout.F_update_para(sess,gen_model.g_params)
print ' start the adversial training !!!!!!!! '
for i in xrange(FLAGS.total_epoch):
for it in xrange(FLAGS.gen_epoch):
samples_x,samples_y,mask = gen_model.generate_samples(sess)
# get the reward by the policy network
reward = get_reward(sess,samples_x,samples_y,seq_length,3,gen_rollout,dis_model,vocab1)
# reward1.py give the reward of the sentence
# Montle Carlo search is used
# print samples_y
# print reward
feed = {gen_model.rewards: reward,
gen_model.mask: mask,
gen_model.x: samples_x,
gen_model.y: samples_y}
# policy gradient update the generator
loss1, _ = sess.run([gen_model.g_loss,
gen_model.g_updates], feed)
print '\n'
print 'the batch_size samples generated !!!!!!'
print 'the adversial epoch:',i
print '\n'
_, samples_batch, _ = gen_model.generate_samples(sess)
for x_i in xrange(batch_size):
comment_batch = []
for x_j in xrange(seq_length):
if samples_batch[x_i, x_j] == END_TOKEN:
break
comment_batch.append(vocab1.id2word(samples_batch[x_i, x_j]))
buffer = u'adversial:' + ''.join(comment_batch)
buffer_str = buffer.encode('utf-8')
print 'epoch:',i,'---',buffer_str
print '\n'
# calulate the nll of test dataset
test_data_nll.reset_pointer()
aveloss = 0.0
for j in xrange(test_data_nll.num_batch):
x, y, mask = test_data_nll.next_batch()
feed = {gen_model.x: x,
gen_model.y: y,
gen_model.mask: mask
}
loss_1 = sess.run(gen_model.pre_loss_sen,
feed)
loss_1 = loss_1 / float(batch_size)
aveloss = aveloss + loss_1 / float(test_data_nll.num_batch)
print 'epoch %d, batch %d, nll: %f' % (i, it, aveloss)
print ' updates the rollout parameters !!!!!!!! '
gen_rollout.update_para(sess,gen_model.g_params)
print ' generate new examples file !!!!!!!!! '
saver3.save(sess,gen_model_path,i)
generate_samples(sess,gen_model,batch_size,FLAGS.num_gen,vocab1,data_neg_new)
dis_data.load_train_data(data_pos, data_neg_new, vocab2)
dis_data.reset_pointer()
print '\n'
print ' train the discriminator!!!!!!! '
for _ in xrange(FLAGS.dis_epoch):
for m1 in xrange(dis_data.num_batch):
x,y = dis_data.next_batch()
feed1 = {
dis_model.input_x: x,
dis_model.input_y: y,
dis_model.dropout_keep_prob: 0.75
}
loss2, _ = sess.run([dis_model.loss, dis_model.train_op], feed1)
print 'epoch:%d,batch:%d,loss:%f'%(i,m1,loss2)
print '\n'
saver4.save(sess, save_path=dis_model_path, global_step=i)
if __name__ == '__main__':
tf.app.run()
|
[
"2282821376@qq.com"
] |
2282821376@qq.com
|
cc5f1b8f912672b465b323e9a72150043df31abe
|
d19ef5717555c67398f69f7ae26f8666cd64aa67
|
/flask/bin/pyrsa-decrypt
|
90e280353b3cfe8d8174179375f0436f1a6ea294
|
[
"MIT"
] |
permissive
|
abagh0703/Find-Retail-Stores
|
4d421e1ea5e5bbcf4e45e629a5ddf549e6d16eab
|
cbca3c052523c52935066c5585e5dd2f1c6b4b1e
|
refs/heads/master
| 2020-12-26T00:06:34.856475
| 2016-08-11T16:53:35
| 2016-08-11T16:53:35
| 59,902,109
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
#!/Users/benjaminnewman/Documents/StuyHacks/RetailTrail/flask/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import decrypt
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(decrypt())
|
[
"nlb9991@gmail.com"
] |
nlb9991@gmail.com
|
|
7217341c53ebd4827a6ee7287feba14cdfbc09c3
|
5d5279a26c58538f47ec74efb7f66d43e07606e9
|
/huffman_helper_tests.py
|
362f371be5acdadfced9dd4332952deb82bccc4c
|
[] |
no_license
|
clau2024/Encoding
|
010e6506c13e4d5234b4332707697d630e34468c
|
d61fc327005e2ce50259ab4399673fac1871a8b9
|
refs/heads/main
| 2023-03-25T16:35:34.490516
| 2021-03-22T19:36:34
| 2021-03-22T19:36:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,150
|
py
|
import unittest
import filecmp
import subprocess
from ordered_list import *
from huffman import *
class TestList(unittest.TestCase):
def test_cnt_freq(self):
freqlist = cnt_freq("file2.txt")
anslist = [2, 4, 8, 16, 0, 2, 0]
self.assertListEqual(freqlist[97:104], anslist)
def test_lt_and_eq(self):
freqlist = cnt_freq("file2.txt")
anslist = [2, 4, 8, 16, 0, 2, 0]
ascii = 97
lst = OrderedList()
for freq in anslist:
node = HuffmanNode(ascii, freq)
lst.add(node)
ascii += 1
self.assertEqual(lst.index(HuffmanNode(101, 0)), 0)
self.assertEqual(lst.index(HuffmanNode(100, 16)), 6)
self.assertEqual(lst.index(HuffmanNode(97, 2)), 2)
self.assertFalse(HuffmanNode(97, 2) == None)
def test_create_huff_tree(self):
freqlist = cnt_freq("file2.txt")
hufftree = create_huff_tree(freqlist)
self.assertEqual(hufftree.freq, 32)
self.assertEqual(hufftree.char, 97)
left = hufftree.left
self.assertEqual(left.freq, 16)
self.assertEqual(left.char, 97)
right = hufftree.right
self.assertEqual(right.freq, 16)
self.assertEqual(right.char, 100)
second_tree = create_huff_tree([])
self.assertEqual(None, second_tree)
def test_create_header(self):
freqlist = cnt_freq("file2.txt")
self.assertEqual(create_header(freqlist), "97 2 98 4 99 8 100 16 102 2")
def test_create_code(self):
freqlist = cnt_freq("file2.txt")
hufftree = create_huff_tree(freqlist)
codes = create_code(hufftree)
self.assertEqual(codes[ord('d')], '1')
self.assertEqual(codes[ord('a')], '0000')
self.assertEqual(codes[ord('f')], '0001')
def test_create_code_2(self):
freq_list_2 = cnt_freq("file_spaces.txt")
huff_tree_2 = create_huff_tree(freq_list_2)
codes_2 = create_code(huff_tree_2)
self.assertEqual(codes_2[ord(' ')], '')
def test_create_code_3(self):
freq_list_2 = cnt_freq("file_spaces_multiline.txt")
huff_tree_2 = create_huff_tree(freq_list_2)
codes_2 = create_code(huff_tree_2)
self.assertEqual(codes_2[ord(' ')], '1')
def test_empty_file(self):
with self.assertRaises(FileNotFoundError):
huffman_encode('does_not_exist.txt', 'does_not_exist_out.txt')
def test_01_textfile(self):
huffman_encode("file1.txt", "file1_out.txt")
# capture errors by running 'diff' on your encoded file with a *known* solution file
err = subprocess.call("diff -wb file1_out.txt file1_soln.txt", shell=True)
self.assertEqual(err, 0)
err = subprocess.call("diff -wb file1_out_compressed.txt file1_compressed_soln.txt", shell=True)
self.assertEqual(err, 0)
def test_02_textfile(self):
huffman_encode("declaration.txt", "declaration_out.txt")
# capture errors by running 'diff' on your encoded file with a *known* solution file
err = subprocess.call("diff -wb declaration_out.txt declaration_soln.txt", shell=True)
self.assertEqual(err, 0)
err = subprocess.call("diff -wb declaration_out_compressed.txt declaration_compressed_soln.txt", shell=True)
self.assertEqual(err, 0)
def test_04_textfile(self):
huffman_encode("multiline.txt", "multiline_out.txt")
# capture errors by running 'diff' on your encoded file with a *known* solution file
err = subprocess.call("diff -wb multiline_out.txt multiline_soln.txt", shell=True)
self.assertEqual(err, 0)
err = subprocess.call("diff -wb multiline_out_compressed.txt multiline_compressed_soln.txt", shell=True)
self.assertEqual(err, 0)
def test_05_textfile(self):
huffman_encode("file2.txt", "file2_out.txt")
# capture errors by running 'diff' on your encoded file with a *known* solution file
err = subprocess.call("diff -wb file2_out.txt file2_soln.txt", shell=True)
self.assertEqual(err, 0)
err = subprocess.call("diff -wb file2_out_compressed.txt file2_compressed_soln.txt", shell=True)
self.assertEqual(err, 0)
def test_06_textfile(self):
huffman_encode("same_freq.txt", "same_freq_out.txt")
# capture errors by running 'diff' on your encoded file with a *known* solution file
err = subprocess.call("diff -wb same_freq_out.txt same_freq_soln.txt", shell=True)
self.assertEqual(err, 0)
def test_simple(self):
t_list = OrderedList()
t_list.add(10)
self.assertEqual(t_list.python_list(), [10])
self.assertEqual(t_list.size(), 1)
self.assertEqual(t_list.index(10), 0)
self.assertTrue(t_list.search(10))
self.assertFalse(t_list.is_empty())
self.assertEqual(t_list.python_list_reversed(), [10])
self.assertTrue(t_list.remove(10))
t_list.add(10)
self.assertEqual(t_list.pop(0), 10)
def test_everything(self):
my_list = OrderedList()
self.assertTrue(my_list.is_empty())
my_list.add(2)
self.assertFalse(my_list.is_empty())
self.assertTrue(my_list.add(4))
self.assertFalse(my_list.add(4))
self.assertTrue(my_list.add(6))
self.assertTrue(my_list.add(3))
self.assertEqual(my_list.index(3), 1)
self.assertTrue(my_list.search(6))
self.assertFalse(my_list.search(10))
self.assertEqual(my_list.python_list(), [2, 3, 4, 6])
self.assertEqual(my_list.python_list_reversed(), [6, 4, 3, 2])
self.assertEqual(my_list.size(), 4)
with self.assertRaises(IndexError):
my_list.pop(-1)
self.assertEqual(my_list.pop(3), 6)
self.assertFalse(my_list.remove(6))
self.assertTrue(my_list.remove(4))
self.assertEqual(my_list.index(4), None)
def test_01a_test_file1_parse_header(self):
f = open('file1_compressed_soln.txt', 'rb')
header = f.readline()
f.close()
expected = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
0, 0, 0, \
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, \
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 3, 2, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, \
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, \
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, \
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, \
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0]
self.compare_freq_counts(parse_header(header), expected)
def compare_freq_counts(self, freq, exp):
for i in range(256):
stu = 'Frequency for ASCII ' + str(i) + ': ' + str(freq[i])
ins = 'Frequency for ASCII ' + str(i) + ': ' + str(exp[i])
self.assertEqual(stu, ins)
def test_01_test_file1_decode(self):
huffman_decode("file1_compressed_soln.txt", "file1_decoded.txt")
err = subprocess.call("diff -wb file1.txt file1_decoded.txt", shell=True)
self.assertEqual(err, 0)
def test_02_test_file1_decode(self):
huffman_decode("letters_out_compressed.txt", "letters_decoded.txt")
err = subprocess.call("diff -wb letters.txt letters_decoded.txt", shell=True)
self.assertEqual(err, 0)
def test_04_test_file1_decode(self):
huffman_decode("file_spaces_multiline_out_compressed.txt", "file_spaces_multiline_decoded.txt")
err = subprocess.call("diff -wb file_spaces_multiline.txt file_spaces_multiline_decoded.txt", shell=True)
self.assertEqual(err, 0)
def test_05_test_file1_decode(self):
huffman_decode("file2_compressed_soln.txt", "file2_decoded.txt")
err = subprocess.call("diff -wb file2.txt file2_decoded.txt", shell=True)
self.assertEqual(err, 0)
def test_06_test_file1_decode(self):
huffman_decode("file_spaces_out_compressed.txt", "file_spaces_decoded.txt")
err = subprocess.call("diff -wb file_spaces.txt file_spaces_decoded.txt", shell=True)
self.assertEqual(err, 0)
def test_07_test_file1_decode(self):
huffman_decode("multiline_compressed_soln.txt", "multiline_decoded.txt")
err = subprocess.call("diff -wb multiline.txt multiline_decoded.txt", shell=True)
self.assertEqual(err, 0)
def test_08_test_file1_decode(self):
huffman_decode("same_freq_out_compressed.txt", "same_freq_decoded.txt")
err = subprocess.call("diff -wb same_freq.txt same_freq_decoded.txt", shell=True)
self.assertEqual(err, 0)
def test_empty_file_02(self):
with self.assertRaises(FileNotFoundError):
huffman_decode('does_not_exist.txt', 'does_not_exist_out.txt')
def test_09_test_file1_decode(self):
huffman_decode("numbers_out_compressed.txt", "numbers_decoded.txt")
err = subprocess.call("diff -wb numbers.txt numbers_decoded.txt", shell=True)
self.assertEqual(err, 0)
def test_10_test_file1_decode(self):
huffman_decode("single_char_out_compressed.txt", "single_char_decoded.txt")
err = subprocess.call("diff -wb single_char.txt single_char_decoded.txt", shell=True)
self.assertEqual(err, 0)
def test_11_test_file1_decode(self):
huffman_decode("newline_only_out_compressed.txt", "newline_only_decoded.txt")
err = subprocess.call("diff -wb newline_only.txt newline_only_decoded.txt", shell=True)
self.assertEqual(err, 0)
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
clau2024.noreply@github.com
|
aaa2ceea09ad4ade20bef24175dab45ad8a2fc7a
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/sieve/99c2a017b3484b88b8aeabf6dbd28122.py
|
780ae85330f2f6d9871630b622399a77d4c92425
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 266
|
py
|
def sieve(number):
end = number + 1
array = range(end)
array[1] = 0
for i in xrange(2, number/2):
if array[i] > 0:
for j in xrange(i+i, end, i):
array[j] = 0
return [x for x in range(2, end) if array[x] > 0]
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
9c064cd199d4fefac3d2a9e1087409073e1f8c6c
|
84ed9a53237c703769df75e68f73ebca28900d73
|
/src/union_find/union_find.py
|
28f7e7e18128e8a4f20974cccf74474a3b1bbe2d
|
[
"MIT"
] |
permissive
|
naoki85/algorithm_programs
|
667be4d75f3e6c37fca64f6d7521f5c6058fc68e
|
7da25547bb9eb515091104769912860244860184
|
refs/heads/main
| 2023-07-28T16:03:52.214019
| 2021-09-12T07:48:46
| 2021-09-12T07:48:46
| 334,637,463
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 931
|
py
|
class UnionFind(object):
def __init__(self, num_of_nodes=1):
self.__parents = [i for i in range(num_of_nodes + 1)]
self.__rank = [0 for _ in range(num_of_nodes + 1)]
def find(self, x):
if self.__parents[x] == x:
return x
else:
self.__parents[x] = self.find(self.__parents[x])
return self.__parents[x]
def union(self, x, y):
x = self.find(x)
y = self.find(y)
if x != y:
if self.__rank[x] < self.__rank[y]:
x, y = y, x
if self.__rank[x] == self.__rank[y]:
self.__rank[x] += 1
self.__parents[y] = x
def is_same(self, x, y):
return self.find(x) == self.find(y)
def get_leaders(self):
leaders = set()
for a in set(self.__parents[1:]):
if a == self.__parents[a]:
leaders.add(a)
return leaders
|
[
"naoki.yoneyama.85@gmail.com"
] |
naoki.yoneyama.85@gmail.com
|
1a0e9db68c91b85962abc8429d257f6023ba63fb
|
2b9cc67d4bbb5257b4c64bf6437bf7c589300c06
|
/workspaces/Data_unconstrainedFpt/step2
|
889b74e2cadf04723a24cc47bddb47519876fedd
|
[] |
no_license
|
gerbaudo/hlfv-fitmodel
|
81bfe391a4a19af5268fa056319dc552f6b9e1cf
|
17a44604fa860382f72e27a5ee5c1432677e40cd
|
refs/heads/master
| 2020-06-07T09:34:42.487189
| 2015-05-25T09:44:23
| 2015-05-25T09:44:23
| 35,870,053
| 1
| 0
| null | 2015-05-25T09:05:46
| 2015-05-19T08:43:13
|
C
|
UTF-8
|
Python
| false
| false
| 1,033
|
#!/usr/bin/env python
import os
import sys
import ROOT
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-check', action='store_true')
args = parser.parse_args()
models = ["AllSYS",]
for model in models:
fname="results/ws_LFV_combined_%s_model.root"%model
if not os.path.exists(fname): continue
cmd = 'root -l ../../scripts/runAsymptoticsCLs.C+\''\
'("%s","combined","ModelConfig","obsData","","limits_%s","126",0.95)\'' % (fname,model)
os.system(cmd)
cmd = 'root -l ../../scripts/StandardHypoTestDemo.C+\'("%s","combined","ModelConfig","","obsData",2)\'' % (fname)
os.system(cmd)
fr = ROOT.TFile.Open("ws_out.root")
ws = fr.Get("combined")
mc = ws.obj("ModelConfig")
data = ws.data("obsData")
simPdf = mc.GetPdf()
numCats = simPdf.indexCat().numTypes()
#ws.Print()
ws.loadSnapshot("conditionalNuis_0")
nuis = mc.GetNuisanceParameters()
it = nuis.createIterator()
nui=it.Next()
while nui:
print "%30s %7.3f" % (nui.GetName(), nui.getVal())
nui=it.Next()
|
[
"avitald@883ba7d9-fdd0-4202-9341-49aa55999ad8"
] |
avitald@883ba7d9-fdd0-4202-9341-49aa55999ad8
|
|
eb15e5e6b5e442c0e948dd3ec8456dec3d69731c
|
2bac8043ec48f626617f2af625fc1258ddee40d9
|
/mecab_parse_simple.py
|
eaa5f67b234ed991d5f7321bf83d1c79ac7803fd
|
[] |
no_license
|
nagano16425/kadai2016
|
84648dd505096815ed071d84f450c822ebbe7090
|
5e76f3697f5931aedbead759d18796ece892d262
|
refs/heads/master
| 2021-01-21T04:53:58.275897
| 2016-06-29T12:15:31
| 2016-06-29T12:15:31
| 55,683,193
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,027
|
py
|
import sys # System
import MeCab # MeCab
if __name__=="__main__":
mecab = MeCab.Tagger("-Ochasen") # ChaSen互換形式
mecab.parse(" ") # UnicodeDecodeError回避(MeCabとPython3のバグ回避)
sentence = ("「豊工に行っています。」") # 形態素解析用の文章を入力
node = mecab.parseToNode(sentence) # 形態素の詳細情報のノードを取得
node = node.next # コールバック関数
while node:
pos = [] # 表層,品詞,原形を格納する配列を用意
surface = node.surface # 表層
array = node.feature.split(",") # 品詞,原形を分割
for i in range(4):
pos.insert(i, array[i]) # 品詞(pos[0]~pos[3])
base = array[6] # 原形
print (surface,"\t",pos[0],pos[1],pos[2],pos[3],base)
node = node.next # コールバック関数
|
[
"sd16425@toyota-ti.ac.jp.com"
] |
sd16425@toyota-ti.ac.jp.com
|
bb4a99e49dd0f91bad55759f5e8fbae63e93263c
|
e791c8d861241574264193a0a11d370f9f90725c
|
/node.py
|
cacf608c1a28789a76940b4ebc008499e1e592ff
|
[] |
no_license
|
LinnikPolina/Compiler
|
253f75c0f301c4ba7f04050694cb45658ea1a4eb
|
d1914025a0536f7d35fccf8a040ca27ac185ca06
|
refs/heads/master
| 2020-06-11T03:55:06.596908
| 2019-06-26T06:33:01
| 2019-06-26T06:33:01
| 193,843,491
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,879
|
py
|
from value import Value
from symbol_table import SymbolTable as SymbolTableClass
class Node():
def __init__(self, value, children):
self.value = value
self.children = children
def Evaluate(self, SymbolTable):
pass
class BinOp(Node):
def same_type(self, value1, value2):
if (value1.type == value2.type):
return True
else:
return False
def Evaluate(self, SymbolTable):
value1_obj = self.children[0].Evaluate(SymbolTable)
value2_obj = self.children[1].Evaluate(SymbolTable)
value1 = value1_obj.getValue()
value2 = value2_obj.getValue()
if (self.value == "+"):
if not self.same_type(value1_obj, value2_obj):
raise ValueError("Operands must be the same type")
value_sum = value1 + value2
result = Value("int")
result.setValue(value_sum)
return result
elif (self.value == "-"):
if not self.same_type(value1_obj, value2_obj):
raise ValueError("Operands must be the same type")
value_sub = value1 - value2
result = Value("int")
result.setValue(value_sub)
return result
elif (self.value == "or"):
if not self.same_type(value1_obj, value2_obj):
raise ValueError("Operands must be the same type")
value_or = value1 or value2
result = Value("boolean")
result.setValue(value_or)
return result
elif (self.value == "*"):
if not self.same_type(value1_obj, value2_obj):
raise ValueError("Operands must be the same type")
value_mult = value1 * value2
result = Value("int")
result.setValue(value_mult)
return result
elif (self.value == "/"):
if not self.same_type(value1_obj, value2_obj):
raise ValueError("Operands must be the same type")
value_div = value1 // value2
result = Value("int")
result.setValue(value_div)
return result
elif (self.value == "and"):
if not self.same_type(value1_obj, value2_obj):
raise ValueError("Operands must be the same type")
value_and = value1 and value2
result = Value("boolean")
result.setValue(value_and)
return result
elif (self.value == ":="):
SymbolTable.setSymbol(value1, value2)
elif (self.value == ">"):
if not self.same_type(value1_obj, value2_obj):
raise ValueError("Operands must be the same type")
value_bigger = value1 > value2
result = Value("boolean")
result.setValue(value_bigger)
return result
elif (self.value == "<"):
if not self.same_type(value1_obj, value2_obj):
raise ValueError("Operands must be the same type")
value_smaller = value1 < value2
result = Value("boolean")
result.setValue(value_smaller)
return result
elif (self.value == "="):
if not self.same_type(value1_obj, value2_obj):
raise ValueError("Operands must be the same type")
value_equal = value1 == value2
result = Value("boolean")
result.setValue(value_equal)
return result
elif (self.value == "!="):
if not self.same_type(value1_obj, value2_obj):
raise ValueError("Operands must be the same type")
value_diff = value1 != value2
result = Value("boolean")
result.setValue(value_diff)
return result
elif (self.value == ":"):
SymbolTable.createSymbol(value1, value2)
else:
return
class UnOp(Node):
def Evaluate(self, SymbolTable):
value_obj = self.children[0].Evaluate(SymbolTable)
value = value_obj.getValue()
if (self.value == "-"):
result = Value("int")
result.setValue(value * -1)
return result
elif (self.value == "not"):
if value_obj.type == "boolean":
result = Value("boolean")
result.setValue(not value)
return result
else:
raise ValueError("Operand must be a boolean")
else:
return
class StrVal(Node):
def Evaluate(self, SymbolTable):
value = Value("string")
value.setValue(self.value)
return value
class IntVal(Node):
def Evaluate(self, SymbolTable):
value = Value("int")
value.setValue(self.value)
return value
class BoolVal(Node):
def Evaluate(self, SymbolTable):
value = Value("boolean")
value.setValue(self.value)
return value
class Identifier(Node):
def Evaluate(self, SymbolTable):
value = SymbolTable.getSymbol(self.value)
return value
class NoOp(Node):
def Evaluate(self, SymbolTable):
return None
class Statements(Node):
def Evaluate(self, SymbolTable):
for child in self.children:
child.Evaluate(SymbolTable)
class Print(Node):
def Evaluate(self, SymbolTable):
value = self.children[0].Evaluate(SymbolTable)
print(value.getValue())
class Read(Node):
def Evaluate(self, SymbolTable):
result = input()
value = Value("int")
value.setValue(int(result))
return value
class If(Node):
def Evaluate(self, SymbolTable):
comp = self.children[0].Evaluate(SymbolTable)
if (comp.value):
self.children[1].Evaluate(SymbolTable)
else:
self.children[2].Evaluate(SymbolTable)
class While(Node):
def Evaluate(self, SymbolTable):
comp = self.children[0]
while (comp.Evaluate(SymbolTable).getValue()):
self.children[1].Evaluate(SymbolTable)
class Program(Node):
def Evaluate(self, SymbolTable):
SymbolTable.createSymbol(self.value, None)
for child in self.children:
child.Evaluate(SymbolTable)
class VarDec(Node):
def Evaluate(self, SymbolTable):
for child in self.children:
child.Evaluate(SymbolTable)
class FuncDec(Node):
def Evaluate(self, SymbolTable):
SymbolTable.createSymbol(self.value, "func")
SymbolTable.setSymbol(self.value, self)
class Funcs(Node):
def Evaluate(self, SymbolTable):
for func in self.children:
func.Evaluate(SymbolTable)
class FuncCall(Node):
def Evaluate(self, SymbolTable):
func_name = self.value
func_node = SymbolTable.getSymbol(func_name, "func").getValue()
funcSymbolTable = SymbolTableClass(SymbolTable)
var_dec = func_node.children[0]
args = [x.children[0] for x in var_dec.children]
func_node.children[0].Evaluate(funcSymbolTable)
if (len(args) != len(self.children)):
raise ValueError("Number of arguments must \
be the same as declaration")
for i in range(len(args)):
symbol = args[i].Evaluate(funcSymbolTable).getValue()
symbol_type = funcSymbolTable.getSymbol(symbol).getType()
value_obj = self.children[i].Evaluate(SymbolTable)
if (symbol_type != value_obj.getType()):
raise ValueError("Function argument must be \
the same as declared")
value = value_obj.getValue()
funcSymbolTable.setSymbol(symbol, value)
for i in range(1, len(func_node.children)):
func_node.children[i].Evaluate(funcSymbolTable)
result = funcSymbolTable.getSymbol(func_name)
return result
|
[
"apoliina@outlook.com"
] |
apoliina@outlook.com
|
1244d88a20ed14f3b94d2619028165589628a562
|
1da5c37c2a356eaa2cdc912120937db9f80dfb1f
|
/costumers/apps.py
|
3938f75d3fce2c4800a604704d1eec3a4090f215
|
[] |
no_license
|
pace-noge/django-bootstrap-kargo
|
4303c296bc83ff03548dced176bd7ae8261572e2
|
5d1947b481c308990f324a8f328ebdef902df9d6
|
refs/heads/master
| 2020-04-07T15:43:04.278103
| 2016-06-27T01:22:23
| 2016-06-27T01:22:23
| 62,012,404
| 0
| 0
| null | 2016-06-26T23:12:00
| 2016-06-26T23:12:00
| null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
from __future__ import unicode_literals
from django.apps import AppConfig
class CostumersConfig(AppConfig):
name = 'costumers'
|
[
"nasa.freaks@gmail.com"
] |
nasa.freaks@gmail.com
|
16effeca8f4cf8ea3950da947f8f8f576ad45be9
|
60046bdd6ee8bac2494970812f9dc7a34ca4f659
|
/tests/test_database.py
|
4c1a212d6dbf595d99b467f4df201c1db85ea212
|
[
"MIT"
] |
permissive
|
nameone/accounts
|
7f2498aa8570e151281b697e4e9c3b5fa41a338b
|
75ad3ccf849c1c80f380e6db1fcd9a41b8383f3d
|
refs/heads/master
| 2020-12-14T06:11:03.659926
| 2014-10-21T03:23:28
| 2014-10-21T03:23:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
# -*- coding: utf-8 -*-
import psycopg2
from psycopg2.extensions import STATUS_READY
import settings
from accounts.database import connect
def test_connect():
con = connect(settings.TEST_DB_PATH)
assert isinstance(con, psycopg2._psycopg.connection)
assert con.status == STATUS_READY
con.close()
def test_connect_fail():
assert connect('') is None
|
[
"imiric@gmail.com"
] |
imiric@gmail.com
|
e732ce8e858fe101f891c48e34c95e282265a794
|
021d837c38901e6a4371dfa21019b5ce8c9e83e5
|
/indic_glue_dataset_fixed/indic_glue.py
|
e8407084bd863ad303ead5ed05f6ee9044478f17
|
[] |
no_license
|
amankhandelia/roberta_hindi
|
bf9e6086ffcb25a9df32c7800836977d20c124d4
|
be4b082d97443abd40fc61c4cf138a217d6baa16
|
refs/heads/master
| 2023-06-24T14:18:06.183093
| 2021-07-20T12:49:16
| 2021-07-20T12:49:16
| 382,264,438
| 4
| 0
| null | 2021-07-10T19:27:57
| 2021-07-02T07:20:20
|
Python
|
UTF-8
|
Python
| false
| false
| 44,321
|
py
|
"""The IndicGLUE benchmark."""
import csv
import json
import os
import textwrap
import pandas as pd
import datasets
_INDIC_GLUE_CITATION = """\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
_INDIC_GLUE_DESCRIPTION = """\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
_DESCRIPTIONS = {
"wnli": textwrap.dedent(
"""
The Winograd Schema Challenge (Levesque et al., 2011) is a reading comprehension task
in which a system must read a sentence with a pronoun and select the referent of that pronoun from
a list of choices. The examples are manually constructed to foil simple statistical methods: Each
one is contingent on contextual information provided by a single word or phrase in the sentence.
To convert the problem into sentence pair classification, we construct sentence pairs by replacing
the ambiguous pronoun with each possible referent. The task is to predict if the sentence with the
pronoun substituted is entailed by the original sentence. We use a small evaluation set consisting of
new examples derived from fiction books that was shared privately by the authors of the original
corpus. While the included training set is balanced between two classes, the test set is imbalanced
between them (65% not entailment). Also, due to a data quirk, the development set is adversarial:
hypotheses are sometimes shared between training and development examples, so if a model memorizes the
training examples, they will predict the wrong label on corresponding development set
example. As with QNLI, each example is evaluated separately, so there is not a systematic correspondence
between a model's score on this task and its score on the unconverted original task. We
call converted dataset WNLI (Winograd NLI). This dataset is translated and publicly released for 3
Indian languages by AI4Bharat.
"""
),
"copa": textwrap.dedent(
"""
The Choice Of Plausible Alternatives (COPA) evaluation provides researchers with a tool for assessing
progress in open-domain commonsense causal reasoning. COPA consists of 1000 questions, split equally
into development and test sets of 500 questions each. Each question is composed of a premise and two
alternatives, where the task is to select the alternative that more plausibly has a causal relation
with the premise. The correct alternative is randomized so that the expected performance of randomly
guessing is 50%. This dataset is translated and publicly released for 3 languages by AI4Bharat.
"""
),
"sna": textwrap.dedent(
"""
This dataset is a collection of Bengali News articles. The dataset is used for classifying articles into
6 different classes namely national, international, state, kolkata, entertainment and sports.
"""
),
"csqa": textwrap.dedent(
"""
Given a text with an entity randomly masked, the task is to predict that masked entity from a list of 4
candidate entities. The dataset contains around 239k examples across 11 languages.
"""
),
"wstp": textwrap.dedent(
"""
Predict the correct title for a Wikipedia section from a given list of four candidate titles.
The dataset has 400k examples across 11 Indian languages.
"""
),
"inltkh": textwrap.dedent(
"""
Obtained from inltk project. The corpus is a collection of headlines tagged with their news category.
Available for langauges: gu, ml, mr and ta.
"""
),
"bbca": textwrap.dedent(
"""
This release consists of 4335 Hindi documents with tags from the BBC Hindi News website.
"""
),
"cvit-mkb-clsr": textwrap.dedent(
"""
CVIT Maan ki Baat Dataset - Given a sentence in language $L_1$ the task is to retrieve its translation
from a set of candidate sentences in language $L_2$.
The dataset contains around 39k parallel sentence pairs across 8 Indian languages.
"""
),
"iitp-mr": textwrap.dedent(
"""
IIT Patna Product Reviews: Sentiment analysis corpus for product reviews posted in Hindi.
"""
),
"iitp-pr": textwrap.dedent(
"""
IIT Patna Product Reviews: Sentiment analysis corpus for product reviews posted in Hindi.
"""
),
"actsa-sc": textwrap.dedent(
"""
ACTSA Corpus: Sentiment analysis corpus for Telugu sentences.
"""
),
"md": textwrap.dedent(
"""
The Hindi Discourse Analysis dataset is a corpus for analyzing discourse modes present in its sentences.
It contains sentences from stories written by 11 famous authors from the 20th Century. 4-5 stories by
each author have been selected which were available in the public domain resulting in a collection of 53 stories.
Most of these short stories were originally written in Hindi but some of them were written in other Indian languages
and later translated to Hindi.
"""
),
"wiki-ner": textwrap.dedent(
"""
The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been constructed using
the linked entities in Wikipedia pages for 282 different languages including Danish.
"""
),
}
_CITATIONS = {
"wnli": textwrap.dedent(
"""
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
@inproceedings{Levesque2011TheWS,
title={The Winograd Schema Challenge},
author={H. Levesque and E. Davis and L. Morgenstern},
booktitle={KR},
year={2011}
}
"""
),
"copa": textwrap.dedent(
"""
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
@inproceedings{Gordon2011SemEval2012T7,
title={SemEval-2012 Task 7: Choice of Plausible Alternatives: An Evaluation of Commonsense Causal Reasoning},
author={Andrew S. Gordon and Zornitsa Kozareva and Melissa Roemmele},
booktitle={SemEval@NAACL-HLT},
year={2011}
}
"""
),
"sna": textwrap.dedent(
"""
https://www.kaggle.com/csoham/classification-bengali-news-articles-indicnlp
"""
),
"csqa": textwrap.dedent(
"""
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
),
"wstp": textwrap.dedent(
"""
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
),
"inltkh": textwrap.dedent(
"""
https://github.com/goru001/inltk
"""
),
"bbca": textwrap.dedent(
"""
https://github.com/NirantK/hindi2vec/releases/tag/bbc-hindi-v0.1
"""
),
"cvit-mkb-clsr": textwrap.dedent(
"""
@inproceedings{siripragada-etal-2020-multilingual,
title = "A Multilingual Parallel Corpora Collection Effort for {I}ndian Languages",
author = "Siripragada, Shashank and
Philip, Jerin and
Namboodiri, Vinay P. and
Jawahar, C V",
booktitle = "Proceedings of the 12th Language Resources and Evaluation Conference",
month = may,
year = "2020",
address = "Marseille, France",
publisher = "European Language Resources Association",
url = "https://www.aclweb.org/anthology/2020.lrec-1.462",
pages = "3743--3751",
abstract = "We present sentence aligned parallel corpora across 10 Indian Languages - Hindi, Telugu, Tamil, Malayalam, Gujarati, Urdu, Bengali, Oriya, Marathi, Punjabi, and English - many of which are categorized as low resource. The corpora are compiled from online sources which have content shared across languages. The corpora presented significantly extends present resources that are either not large enough or are restricted to a specific domain (such as health). We also provide a separate test corpus compiled from an independent online source that can be independently used for validating the performance in 10 Indian languages. Alongside, we report on the methods of constructing such corpora using tools enabled by recent advances in machine translation and cross-lingual retrieval using deep neural network based methods.",
language = "English",
ISBN = "979-10-95546-34-4",
}
"""
),
"iitp-mr": textwrap.dedent(
"""
@inproceedings{akhtar-etal-2016-hybrid,
title = "A Hybrid Deep Learning Architecture for Sentiment Analysis",
author = "Akhtar, Md Shad and
Kumar, Ayush and
Ekbal, Asif and
Bhattacharyya, Pushpak",
booktitle = "Proceedings of {COLING} 2016, the 26th International Conference on Computational Linguistics: Technical Papers",
month = dec,
year = "2016",
address = "Osaka, Japan",
publisher = "The COLING 2016 Organizing Committee",
url = "https://www.aclweb.org/anthology/C16-1047",
pages = "482--493",
abstract = "In this paper, we propose a novel hybrid deep learning archtecture which is highly efficient for sentiment analysis in resource-poor languages. We learn sentiment embedded vectors from the Convolutional Neural Network (CNN). These are augmented to a set of optimized features selected through a multi-objective optimization (MOO) framework. The sentiment augmented optimized vector obtained at the end is used for the training of SVM for sentiment classification. We evaluate our proposed approach for coarse-grained (i.e. sentence level) as well as fine-grained (i.e. aspect level) sentiment analysis on four Hindi datasets covering varying domains. In order to show that our proposed method is generic in nature we also evaluate it on two benchmark English datasets. Evaluation shows that the results of the proposed method are consistent across all the datasets and often outperforms the state-of-art systems. To the best of our knowledge, this is the very first attempt where such a deep learning model is used for less-resourced languages such as Hindi.",
}
"""
),
"iitp-pr": textwrap.dedent(
"""
@inproceedings{akhtar-etal-2016-hybrid,
title = "A Hybrid Deep Learning Architecture for Sentiment Analysis",
author = "Akhtar, Md Shad and
Kumar, Ayush and
Ekbal, Asif and
Bhattacharyya, Pushpak",
booktitle = "Proceedings of {COLING} 2016, the 26th International Conference on Computational Linguistics: Technical Papers",
month = dec,
year = "2016",
address = "Osaka, Japan",
publisher = "The COLING 2016 Organizing Committee",
url = "https://www.aclweb.org/anthology/C16-1047",
pages = "482--493",
abstract = "In this paper, we propose a novel hybrid deep learning archtecture which is highly efficient for sentiment analysis in resource-poor languages. We learn sentiment embedded vectors from the Convolutional Neural Network (CNN). These are augmented to a set of optimized features selected through a multi-objective optimization (MOO) framework. The sentiment augmented optimized vector obtained at the end is used for the training of SVM for sentiment classification. We evaluate our proposed approach for coarse-grained (i.e. sentence level) as well as fine-grained (i.e. aspect level) sentiment analysis on four Hindi datasets covering varying domains. In order to show that our proposed method is generic in nature we also evaluate it on two benchmark English datasets. Evaluation shows that the results of the proposed method are consistent across all the datasets and often outperforms the state-of-art systems. To the best of our knowledge, this is the very first attempt where such a deep learning model is used for less-resourced languages such as Hindi.",
}
"""
),
"actsa-sc": textwrap.dedent(
"""
@inproceedings{mukku-mamidi-2017-actsa,
title = "{ACTSA}: Annotated Corpus for {T}elugu Sentiment Analysis",
author = "Mukku, Sandeep Sricharan and
Mamidi, Radhika",
booktitle = "Proceedings of the First Workshop on Building Linguistically Generalizable {NLP} Systems",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W17-5408",
doi = "10.18653/v1/W17-5408",
pages = "54--58",
abstract = "Sentiment analysis deals with the task of determining the polarity of a document or sentence and has received a lot of attention in recent years for the English language. With the rapid growth of social media these days, a lot of data is available in regional languages besides English. Telugu is one such regional language with abundant data available in social media, but it{'}s hard to find a labelled data of sentences for Telugu Sentiment Analysis. In this paper, we describe an effort to build a gold-standard annotated corpus of Telugu sentences to support Telugu Sentiment Analysis. The corpus, named ACTSA (Annotated Corpus for Telugu Sentiment Analysis) has a collection of Telugu sentences taken from different sources which were then pre-processed and manually annotated by native Telugu speakers using our annotation guidelines. In total, we have annotated 5457 sentences, which makes our corpus the largest resource currently available. The corpus and the annotation guidelines are made publicly available.",
}
"""
),
"md": textwrap.dedent(
"""
@inproceedings{Dhanwal2020AnAD,
title={An Annotated Dataset of Discourse Modes in Hindi Stories},
author={Swapnil Dhanwal and Hritwik Dutta and Hitesh Nankani and Nilay Shrivastava and Y. Kumar and Junyi Jessy Li and Debanjan Mahata and Rakesh Gosangi and Haimin Zhang and R. R. Shah and Amanda Stent},
booktitle={LREC},
year={2020}
}
"""
),
"wiki-ner": textwrap.dedent(
"""
@inproceedings{pan-etal-2017-cross,
title = "Cross-lingual Name Tagging and Linking for 282 Languages",
author = "Pan, Xiaoman and
Zhang, Boliang and
May, Jonathan and
Nothman, Joel and
Knight, Kevin and
Ji, Heng",
booktitle = "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2017",
address = "Vancouver, Canada",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P17-1178",
doi = "10.18653/v1/P17-1178",
pages = "1946--1958",
abstract = "The ambitious goal of this work is to develop a cross-lingual name tagging and linking framework for 282 languages that exist in Wikipedia. Given a document in any of these languages, our framework is able to identify name mentions, assign a coarse-grained or fine-grained type to each mention, and link it to an English Knowledge Base (KB) if it is linkable. We achieve this goal by performing a series of new KB mining methods: generating {``}silver-standard{''} annotations by transferring annotations from English to other languages through cross-lingual links and KB properties, refining annotations through self-training and topic selection, deriving language-specific morphology features from anchor links, and mining word translation pairs from cross-lingual links. Both name tagging and linking results for 282 languages are promising on Wikipedia data and on-Wikipedia data.",
}
"""
),
}
_TEXT_FEATURES = {
"wnli": {"hypothesis": "sentence1", "premise": "sentence2"},
"copa": {"premise": "premise", "choice1": "choice1", "choice2": "choice2", "question": "question"},
"sna": {"text": "text"},
"csqa": {"question": "question", "answer": "answer", "category": "category", "title": "title"},
"wstp": {
"sectionText": "sectionText",
"correctTitle": "correctTitle",
"titleA": "titleA",
"titleB": "titleB",
"titleC": "titleC",
"titleD": "titleD",
"url": "url",
},
"inltkh": {"text": "text"},
"bbca": {"label": "label", "text": "text"},
"cvit-mkb-clsr": {"sentence1": "sentence1", "sentence2": "sentence2"},
"iitp-mr": {"text": "text"},
"iitp-pr": {"text": "text"},
"actsa-sc": {"text": "text"},
"md": {"sentence": "sentence", "discourse_mode": "discourse_mode"},
"wiki-ner": {},
}
_DATA_URLS = {
"wnli": "https://storage.googleapis.com/ai4bharat-public-indic-nlp-corpora/evaluations/wnli-translated.tar.gz",
"copa": "https://storage.googleapis.com/ai4bharat-public-indic-nlp-corpora/evaluations/copa-translated.tar.gz",
"sna": "https://storage.googleapis.com/ai4bharat-public-indic-nlp-corpora/evaluations/soham-articles.tar.gz",
"csqa": "https://storage.googleapis.com/ai4bharat-public-indic-nlp-corpora/evaluations/wiki-cloze.tar.gz",
"wstp": "https://storage.googleapis.com/ai4bharat-public-indic-nlp-corpora/evaluations/wiki-section-titles.tar.gz",
"inltkh": "https://storage.googleapis.com/ai4bharat-public-indic-nlp-corpora/evaluations/inltk-headlines.tar.gz",
"bbca": "https://storage.googleapis.com/ai4bharat-public-indic-nlp-corpora/evaluations/bbc-articles.tar.gz",
"cvit-mkb-clsr": "https://storage.googleapis.com/ai4bharat-public-indic-nlp-corpora/evaluations/cvit-mkb.tar.gz",
"iitp-mr": "https://storage.googleapis.com/ai4bharat-public-indic-nlp-corpora/evaluations/iitp-movie-reviews.tar.gz",
"iitp-pr": "https://storage.googleapis.com/ai4bharat-public-indic-nlp-corpora/evaluations/iitp-product-reviews.tar.gz",
"actsa-sc": "https://storage.googleapis.com/ai4bharat-public-indic-nlp-corpora/evaluations/actsa.tar.gz",
"md": "https://storage.googleapis.com/ai4bharat-public-indic-nlp-corpora/evaluations/midas-discourse.tar.gz",
"wiki-ner": "https://storage.googleapis.com/ai4bharat-public-indic-nlp-corpora/evaluations/wikiann-ner.tar.gz",
}
_URLS = {
"wnli": "https://indicnlp.ai4bharat.org/indic-glue/#natural-language-inference",
"copa": "https://indicnlp.ai4bharat.org/indic-glue/#natural-language-inference",
"sna": "https://indicnlp.ai4bharat.org/indic-glue/#news-category-classification",
"csqa": "https://indicnlp.ai4bharat.org/indic-glue/#cloze-style-question-answering",
"wstp": "https://indicnlp.ai4bharat.org/indic-glue/#wikipedia-section-title-prediction",
"inltkh": "https://indicnlp.ai4bharat.org/indic-glue/#news-category-classification",
"bbca": "https://indicnlp.ai4bharat.org/indic-glue/#news-category-classification",
"cvit-mkb-clsr": "https://indicnlp.ai4bharat.org/indic-glue/#cross-lingual-sentence-retrieval",
"iitp-mr": "https://indicnlp.ai4bharat.org/indic-glue/#sentiment-analysis",
"iitp-pr": "https://indicnlp.ai4bharat.org/indic-glue/#sentiment-analysis",
"actsa-sc": "https://indicnlp.ai4bharat.org/indic-glue/#sentiment-analysis",
"md": "https://indicnlp.ai4bharat.org/indic-glue/#discourse-analysis",
"wiki-ner": "https://indicnlp.ai4bharat.org/indic-glue/#named-entity-recognition",
}
_INDIC_GLUE_URL = "https://indicnlp.ai4bharat.org/indic-glue/"
_WNLI_LANGS = ["en", "hi", "gu", "mr"]
_COPA_LANGS = ["en", "hi", "gu", "mr"]
_SNA_LANGS = ["bn"]
_CSQA_LANGS = ["as", "bn", "gu", "hi", "kn", "ml", "mr", "or", "pa", "ta", "te"]
_WSTP_LANGS = ["as", "bn", "gu", "hi", "kn", "ml", "mr", "or", "pa", "ta", "te"]
_iNLTKH_LANGS = ["gu", "ml", "mr", "ta", "te"]
_BBCA_LANGS = ["hi"]
_CVIT_MKB_CLSR = ["en-bn", "en-gu", "en-hi", "en-ml", "en-mr", "en-or", "en-ta", "en-te", "en-ur"]
_IITP_MR_LANGS = ["hi"]
_IITP_PR_LANGS = ["hi"]
_ACTSA_LANGS = ["te"]
_MD_LANGS = ["hi"]
_WIKI_NER_LANGS = ["as", "bn", "gu", "hi", "kn", "ml", "mr", "or", "pa", "ta", "te"]
_NAMES = []
for lang in _WNLI_LANGS:
_NAMES.append(f"wnli.{lang}")
for lang in _COPA_LANGS:
_NAMES.append(f"copa.{lang}")
for lang in _SNA_LANGS:
_NAMES.append(f"sna.{lang}")
for lang in _CSQA_LANGS:
_NAMES.append(f"csqa.{lang}")
for lang in _WSTP_LANGS:
_NAMES.append(f"wstp.{lang}")
for lang in _iNLTKH_LANGS:
_NAMES.append(f"inltkh.{lang}")
for lang in _BBCA_LANGS:
_NAMES.append(f"bbca.{lang}")
for lang in _CVIT_MKB_CLSR:
_NAMES.append(f"cvit-mkb-clsr.{lang}")
for lang in _IITP_MR_LANGS:
_NAMES.append(f"iitp-mr.{lang}")
for lang in _IITP_PR_LANGS:
_NAMES.append(f"iitp-pr.{lang}")
for lang in _ACTSA_LANGS:
_NAMES.append(f"actsa-sc.{lang}")
for lang in _MD_LANGS:
_NAMES.append(f"md.{lang}")
for lang in _WIKI_NER_LANGS:
_NAMES.append(f"wiki-ner.{lang}")
class IndicGlueConfig(datasets.BuilderConfig):
"""BuilderConfig for IndicGLUE."""
def __init__(self, data_url, citation, url, text_features, **kwargs):
"""
Args:
data_url: `string`, url to download the zip file from.
citation: `string`, citation for the data set.
url: `string`, url for information about the data set.
text_features: `dict[string, string]`, map from the name of the feature
dict for each text field to the name of the column in the csv/json file
**kwargs: keyword arguments forwarded to super.
"""
super(IndicGlueConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
self.data_url = data_url
self.citation = citation
self.url = url
self.text_features = text_features
class IndicGlue(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
IndicGlueConfig(
name=name,
description=_DESCRIPTIONS[name.split(".")[0]],
text_features=_TEXT_FEATURES[name.split(".")[0]],
data_url=_DATA_URLS[name.split(".")[0]],
citation=_CITATIONS[name.split(".")[0]],
url=_URLS[name.split(".")[0]],
)
for name in _NAMES
]
def _info(self):
features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()}
if self.config.name.startswith("copa"):
features["label"] = datasets.Value("int32")
if self.config.name.startswith("sna"):
features["label"] = datasets.features.ClassLabel(
names=["kolkata", "state", "national", "sports", "entertainment", "international"]
)
if self.config.name.startswith("inltkh"):
features["label"] = datasets.features.ClassLabel(
names=[
"entertainment",
"business",
"tech",
"sports",
"state",
"spirituality",
"tamil-cinema",
"positive",
"negative",
"neutral",
]
)
if self.config.name.startswith("iitp"):
features["label"] = datasets.features.ClassLabel(names=["negative", "neutral", "positive"])
if self.config.name.startswith("wnli"):
features["label"] = datasets.features.ClassLabel(names=["not_entailment", "entailment", "None"])
if self.config.name.startswith("actsa"):
features["label"] = datasets.features.ClassLabel(names=["positive", "negative"])
if self.config.name.startswith("csqa"):
features["options"] = datasets.features.Sequence(datasets.Value("string"))
features["out_of_context_options"] = datasets.features.Sequence(datasets.Value("string"))
if self.config.name.startswith("md"):
features["story_number"] = datasets.Value("int32")
features["id"] = datasets.Value("int32")
if self.config.name.startswith("wiki-ner"):
features["tokens"] = datasets.features.Sequence(datasets.Value("string"))
features["ner_tags"] = datasets.features.Sequence(
datasets.features.ClassLabel(names=["B-LOC", "B-ORG", "B-PER", "I-LOC", "I-ORG", "I-PER", "O"])
)
features["additional_info"] = datasets.features.Sequence(
datasets.features.Sequence(datasets.Value("string"))
)
return datasets.DatasetInfo(
description=_INDIC_GLUE_DESCRIPTION + "\n" + self.config.description,
features=datasets.Features(features),
homepage=self.config.url,
citation=_INDIC_GLUE_CITATION + "\n" + self.config.citation,
)
def _split_generators(self, dl_manager):
if self.config.name.startswith("wnli"):
dl_dir = dl_manager.download_and_extract(self.config.data_url)
task_name = self._get_task_name_from_data_url(self.config.data_url)
dl_dir = os.path.join(dl_dir, task_name + "/" + self.config.name.split(".")[1])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"datafile": os.path.join(dl_dir, "train.csv"),
"split": datasets.Split.TRAIN,
"key": "train-split",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"datafile": os.path.join(dl_dir, "dev.csv"),
"split": datasets.Split.VALIDATION,
"key": "val-split",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"datafile": os.path.join(dl_dir, "test.csv"),
"split": datasets.Split.TEST,
"key": "test-split",
},
),
]
if self.config.name.startswith("copa"):
dl_dir = dl_manager.download_and_extract(self.config.data_url)
task_name = self._get_task_name_from_data_url(self.config.data_url)
dl_dir = os.path.join(dl_dir, task_name + "/" + self.config.name.split(".")[1])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"datafile": os.path.join(dl_dir, "train.jsonl"),
"split": datasets.Split.TRAIN,
"key": "train-split",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"datafile": os.path.join(dl_dir, "val.jsonl"),
"split": datasets.Split.VALIDATION,
"key": "val-split",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"datafile": os.path.join(dl_dir, "test.jsonl"),
"split": datasets.Split.TEST,
"key": "test-split",
},
),
]
if self.config.name.startswith("sna"):
dl_dir = dl_manager.download_and_extract(self.config.data_url)
task_name = self._get_task_name_from_data_url(self.config.data_url)
dl_dir = os.path.join(dl_dir, task_name + "/" + self.config.name.split(".")[1])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"datafile": os.path.join(dl_dir, "bn-train.csv"),
"split": datasets.Split.TRAIN,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"datafile": os.path.join(dl_dir, "bn-valid.csv"),
"split": datasets.Split.VALIDATION,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"datafile": os.path.join(dl_dir, "bn-test.csv"),
"split": datasets.Split.TEST,
},
),
]
if self.config.name.startswith("csqa"):
dl_dir = dl_manager.download_and_extract(self.config.data_url)
task_name = self._get_task_name_from_data_url(self.config.data_url)
dl_dir = os.path.join(dl_dir, task_name)
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"datafile": os.path.join(dl_dir, f"{self.config.name.split('.')[1]}.json"),
"split": datasets.Split.TEST,
},
)
]
if self.config.name.startswith("wstp"):
dl_dir = dl_manager.download_and_extract(self.config.data_url)
task_name = self._get_task_name_from_data_url(self.config.data_url)
dl_dir = os.path.join(dl_dir, task_name + "/" + self.config.name.split(".")[1])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"datafile": os.path.join(dl_dir, f"{self.config.name.split('.')[1]}-train.json"),
"split": datasets.Split.TRAIN,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"datafile": os.path.join(dl_dir, f"{self.config.name.split('.')[1]}-valid.json"),
"split": datasets.Split.VALIDATION,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"datafile": os.path.join(dl_dir, f"{self.config.name.split('.')[1]}-test.json"),
"split": datasets.Split.TEST,
},
),
]
if (
self.config.name.startswith("inltkh")
or self.config.name.startswith("iitp")
or self.config.name.startswith("actsa")
):
dl_dir = dl_manager.download_and_extract(self.config.data_url)
task_name = self._get_task_name_from_data_url(self.config.data_url)
dl_dir = os.path.join(dl_dir, task_name + "/" + self.config.name.split(".")[1])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"datafile": os.path.join(dl_dir, f"{self.config.name.split('.')[1]}-train.csv"),
"split": datasets.Split.TRAIN,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"datafile": os.path.join(dl_dir, f"{self.config.name.split('.')[1]}-valid.csv"),
"split": datasets.Split.VALIDATION,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"datafile": os.path.join(dl_dir, f"{self.config.name.split('.')[1]}-test.csv"),
"split": datasets.Split.TEST,
},
),
]
if self.config.name.startswith("bbca"):
dl_dir = dl_manager.download_and_extract(self.config.data_url)
task_name = self._get_task_name_from_data_url(self.config.data_url)
dl_dir = os.path.join(dl_dir, task_name + "/" + self.config.name.split(".")[1])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"datafile": os.path.join(dl_dir, f"{self.config.name.split('.')[1]}-train.csv"),
"split": datasets.Split.TRAIN,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"datafile": os.path.join(dl_dir, f"{self.config.name.split('.')[1]}-test.csv"),
"split": datasets.Split.TEST,
},
),
]
if self.config.name.startswith("cvit"):
dl_dir = dl_manager.download_and_extract(self.config.data_url)
task_name = self._get_task_name_from_data_url(self.config.data_url)
dl_dir = os.path.join(dl_dir, task_name + "/" + self.config.name.split(".")[1])
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"datafile": None,
"src": os.path.join(dl_dir, f"mkb.{self.config.name.split('.')[1].split('-')[0]}"),
"tgt": os.path.join(dl_dir, f"mkb.{self.config.name.split('.')[1].split('-')[1]}"),
"split": datasets.Split.TEST,
},
)
]
if self.config.name.startswith("md"):
dl_dir = dl_manager.download_and_extract(self.config.data_url)
task_name = self._get_task_name_from_data_url(self.config.data_url)
dl_dir = os.path.join(dl_dir, task_name + "/" + self.config.name.split(".")[1])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"datafile": os.path.join(dl_dir, "train.json"),
"split": datasets.Split.TRAIN,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"datafile": os.path.join(dl_dir, "val.json"),
"split": datasets.Split.VALIDATION,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"datafile": os.path.join(dl_dir, "test.json"),
"split": datasets.Split.TEST,
},
),
]
if self.config.name.startswith("wiki-ner"):
dl_dir = dl_manager.download_and_extract(self.config.data_url)
task_name = self._get_task_name_from_data_url(self.config.data_url)
dl_dir = os.path.join(dl_dir, task_name + "/" + self.config.name.split(".")[1])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"datafile": os.path.join(dl_dir, f"{self.config.name.split('.')[1]}-train.txt"),
"split": datasets.Split.TRAIN,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"datafile": os.path.join(dl_dir, f"{self.config.name.split('.')[1]}-valid.txt"),
"split": datasets.Split.VALIDATION,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"datafile": os.path.join(dl_dir, f"{self.config.name.split('.')[1]}-test.txt"),
"split": datasets.Split.TEST,
},
),
]
def _generate_examples(self, **args):
"""Yields examples."""
filepath = args["datafile"]
if self.config.name.startswith("wnli"):
if args["key"] == "test-split":
with open(filepath, encoding="utf-8") as f:
data = csv.DictReader(f)
for id_, row in enumerate(data):
yield id_, {"hypothesis": row["sentence1"], "premise": row["sentence2"], "label": "None"}
else:
with open(filepath, encoding="utf-8") as f:
data = csv.DictReader(f)
for id_, row in enumerate(data):
label = "entailment" if row["label"] else "not_entailment"
yield id_, {
"hypothesis": row["sentence1"],
"premise": row["sentence2"],
"label": label,
}
if self.config.name.startswith("copa"):
if args["key"] == "test-split":
with open(filepath, "r", encoding="utf-8") as f:
lines = f.readlines()
data = map(lambda l: json.loads(l), lines)
data = list(data)
for id_, row in enumerate(data):
yield id_, {
"premise": row["premise"],
"choice1": row["choice1"],
"choice2": row["choice2"],
"question": row["question"],
"label": 0,
}
else:
with open(filepath, "r", encoding="utf-8") as f:
lines = f.readlines()
data = map(lambda l: json.loads(l), lines)
data = list(data)
for id_, row in enumerate(data):
yield id_, {
"premise": row["premise"],
"choice1": row["choice1"],
"choice2": row["choice2"],
"question": row["question"],
"label": row["label"],
}
if self.config.name.startswith("sna"):
df = pd.read_csv(filepath, names=["label", "text"])
for id_, row in df.iterrows():
yield id_, {"text": row["text"], "label": row["label"]}
if self.config.name.startswith("csqa"):
with open(filepath, encoding="utf-8") as f:
data = json.load(f)
df = pd.DataFrame(data["cloze_data"])
df["out_of_context_options"].loc[df["out_of_context_options"].isnull()] = (
df["out_of_context_options"].loc[df["out_of_context_options"].isnull()].apply(lambda x: [])
)
for id_, row in df.iterrows():
yield id_, {
"question": row["question"],
"answer": row["answer"],
"category": row["category"],
"title": row["title"],
"out_of_context_options": row["out_of_context_options"],
"options": row["options"],
}
if self.config.name.startswith("wstp"):
df = pd.read_json(filepath)
for id_, row in df.iterrows():
yield id_, {
"sectionText": row["sectionText"],
"correctTitle": row["correctTitle"],
"titleA": row["titleA"],
"titleB": row["titleB"],
"titleC": row["titleC"],
"titleD": row["titleD"],
"url": row["url"],
}
if (
self.config.name.startswith("inltkh")
or self.config.name.startswith("bbca")
or self.config.name.startswith("iitp")
):
df = pd.read_csv(filepath, names=["label", "text"])
for id_, row in df.iterrows():
yield id_, {"text": row["text"], "label": row["label"]}
if self.config.name.startswith("actsa"):
df = pd.read_csv(filepath, names=["label", "text"])
for id_, row in df.iterrows():
label = "positive" if row["label"] else "negative"
yield id_, {"text": row["text"], "label": label}
if self.config.name.startswith("cvit"):
source = args["src"]
target = args["tgt"]
src, tgt = open(source, "r", encoding="utf-8"), open(target, "r", encoding="utf-8")
src, tgt = src.readlines(), tgt.readlines()
for id_, row in enumerate(zip(src, tgt)):
yield id_, {"sentence1": row[0], "sentence2": row[1]}
if self.config.name.startswith("md"):
df = pd.read_json(filepath)
for id_, row in df.iterrows():
yield id_, {
"story_number": row["Story_no"],
"sentence": row["Sentence"],
"discourse_mode": row["Discourse Mode"],
"id": row["id"],
}
if self.config.name.startswith("wiki-ner"):
with open(filepath, "r", encoding="utf-8") as f:
data = f.readlines()
tokens = []
labels = []
infos = []
for id_, row in enumerate(data):
row = row.split()
if len(row) == 0:
yield id_, {"tokens": tokens, "ner_tags": labels, "additional_info": infos}
tokens = []
labels = []
infos = []
continue
tokens.append(row[0])
labels.append(row[-1])
infos.append(row[1:-1])
def _get_task_name_from_data_url(self, data_url):
return data_url.split("/")[-1].split(".")[0]
|
[
"kartik@deepklarity.com"
] |
kartik@deepklarity.com
|
03114e5bc681884e49cec60bf21af44cc401113f
|
203db2e71e5bdd6cc5b9ba5f3012634c149de00f
|
/pythonex/0807/ex3.py
|
0c309727eb3f016485f45ddc2c122efa4246fa5e
|
[] |
no_license
|
NayoungKwon413/Python-study
|
c8401361585110a761cb02482a4fbd31f169f87a
|
ae8b1aae59f53bfa11c27578dea87c4f81f852fa
|
refs/heads/master
| 2022-12-13T15:35:46.883063
| 2020-09-03T10:24:50
| 2020-09-03T10:24:50
| 292,536,988
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
'''
Created on 2020. 8. 7.
@author: GDJ24
간단한 계산기 함수
'''
def calc(var1,var2,oper):
if oper=='+':
return var1+var2
if oper=='-':
return var1-var2
if oper=='*':
return var1*var2
if oper=='/':
return var1/var2
oper = input("연산자를 선택하세요:(+,-,*,/)=>")
var1 = int(input("첫번째 수=>"))
var2 = int(input("두번째 수=>"))
res = calc(var1,var2,oper)
print("계산: %d %s %d = %d" % (var1,oper,var2,res))
|
[
"monika94@naver.com"
] |
monika94@naver.com
|
ff6d027135f24916c328e61afee061883d54d057
|
f5b7b88c9b2fcfc344f5f11c3fb3ec9a7a9f64ef
|
/34-PCA.py
|
89da2e0a7c33d43aa727b3b33e2e5f2cf28a7af2
|
[] |
no_license
|
vedavd/Machine-Learning-A-Z
|
559c566c273f35daa760f93b5023e490aee50128
|
57b26f33ffa8644c1492ad894db10d66b40dfc3f
|
refs/heads/master
| 2021-01-05T05:25:48.525693
| 2020-04-27T16:01:50
| 2020-04-27T16:01:50
| 240,896,180
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,159
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 27 08:58:57 2020
@author: vedav
"""
#import libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Read the data
dataset=pd.read_csv("34_PCA_Wine_Data.csv")
#Create the independent variable.(index 0 to index 12)
X=dataset.iloc[:,0:13].values
#Create the dependent variable i.e. Purchased column(index 13)
y=dataset.iloc[:,13].values
#Split the data into training set and test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
#We should apply feature scaling
#Feature Scaling is must before applying Dimensionality Reduction techniques such as PCA or LDA
from sklearn.preprocessing import StandardScaler
sc_x=StandardScaler()
#Apply feature scaling on Xtrain and Xtest
#Use fittransform for the X training set
X_train=sc_x.fit_transform(X_train)
#Use transform for X test set
X_test=sc_x.transform(X_test)
#Applying PCA
#------------------from sklearn.decomposition import PCA
#------------------pca=PCA(n_components=None)
#------------------X_train=pca.fit_transform(X_train)
#------------------X_test=pca.fit_transform(X_test)
#we are creating explained_variance variable that is containing the percentage of variance explained by
#each of the principal components that we extracted here
#------------------explained_variance=pca.explained_variance_ratio_
#Looking at the explained_variance results the first two components results in
#(0.359522+0.198206=0.557728) i.e. 55.77% variance
#So we will have to select n_components as 2.
#We need to restart the kernel because the existing pca has already
#transformed the X_train and X_test.
#we should restart the kernel and execute pca with n_components=2
from sklearn.decomposition import PCA
pca=PCA(n_components=2)
X_train=pca.fit_transform(X_train)
X_test=pca.fit_transform(X_test)
#we are creating explained_variance variable that is containing the percentage of variance explained by
#each of the principal components that we extracted here
explained_variance=pca.explained_variance_ratio_
#Fit the logistic regression to the training dataset
from sklearn.linear_model import LogisticRegression
classifier=LogisticRegression(random_state=0)
classifier.fit(X_train,y_train)
#Predicting the test set results
y_pred=classifier.predict(X_test)
#Create confusion matrice
from sklearn.metrics import confusion_matrix
cm=confusion_matrix(y_test,y_pred)
#Visualise training set results
from matplotlib.colors import ListedColormap
X_set,y_set=X_train,y_train
X1,X2=np.meshgrid(np.arange(start=X_set[:,0].min()-1,stop=X_set[:,0].max()+1,step=0.01),
np.arange(start=X_set[:,1].min()-1,stop=X_set[:,1].max()+1,step=0.01))
plt.contourf(X1,X2,classifier.predict(np.array([X1.ravel(),X2.ravel()]).T).reshape(X1.shape),
alpha=0.75,cmap=ListedColormap(('red','green','blue')))
plt.xlim(X1.min(),X1.max())
plt.ylim(X2.min(),X2.max())
for i,j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set==j,0],X_set[y_set==j,1],
c=ListedColormap(('red','green','blue'))(i),label=j)
plt.title("Logistic Regression(Training Data)")
plt.xlabel("PC1")
plt.ylabel("PC2")
plt.legend()
plt.show()
#Visualise test set results
from matplotlib.colors import ListedColormap
X_set,y_set=X_test,y_test
X1,X2=np.meshgrid(np.arange(start=X_set[:,0].min()-1,stop=X_set[:,0].max()+1,step=0.01),
np.arange(start=X_set[:,1].min()-1,stop=X_set[:,1].max()+1,step=0.01))
plt.contourf(X1,X2,classifier.predict(np.array([X1.ravel(),X2.ravel()]).T).reshape(X1.shape),
alpha=0.75,cmap=ListedColormap(('red','green','blue')))
plt.xlim(X1.min(),X1.max())
plt.ylim(X2.min(),X2.max())
for i,j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set==j,0],X_set[y_set==j,1],
c=ListedColormap(('red','green','blue'))(i),label=j)
plt.title("Logistic Regression(Test Data)")
plt.xlabel("PC1")
plt.ylabel("PC2")
plt.legend()
plt.show()
|
[
"noreply@github.com"
] |
vedavd.noreply@github.com
|
3339d1eacd12e7530eff357b3cbf3e28a0855845
|
dce96c94b93f8ed99e8ad1202065fc0923fc113b
|
/Question1.py
|
436c131ea50bcd9cccaacccc08797450bb2c4839
|
[] |
no_license
|
dasha-pokutnaya/HPM573S18_Pokutnaya_HW7
|
c2c96e21ebba329307699716b609cfbad815814e
|
0c6cc96fa63f3e970211020b8d2e6eb830d1a3d9
|
refs/heads/master
| 2020-03-07T13:14:04.600736
| 2018-03-31T03:29:08
| 2018-03-31T03:29:08
| 127,495,401
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,605
|
py
|
from enum import Enum
import numpy as np
import scr.StatisticalClasses as Stat
class HealthStat(Enum):
""" health status of patients """
ALIVE = 1
DEAD = 0
class Patient:
def __init__(self, id, mortality_prob, pop_size):
""" initiates a patient
:param id: ID of the patient
:param mortality_prob: probability of death during a time-step (must be in [0,1])
"""
self._id = id
self._rnd = np.random # random number generator for this patient
self._rnd.seed(self._id) # specifying the seed of random number generator for this patient
self._mortalityProb = mortality_prob
self._healthState = HealthStat.ALIVE # assuming all patients are alive at the beginning
self._survivalTime = 0
self._over5 = []
self._livedover5 = 0
def simulate(self, n_time_steps):
""" simulate the patient over the specified simulation length """
t = 0 # simulation current time
# while the patient is alive and simulation length is not yet reached
while self._healthState == HealthStat.ALIVE and t < n_time_steps:
# determine if the patient will die during this time-step
if self._rnd.sample() < self._mortalityProb:
self._healthState = HealthStat.DEAD
self._survivalTime = t + 1 # assuming deaths occurs at the end of this period
# increment time
t += 1
def get_survival_time(self):
""" returns the patient survival time """
# return survival time only if the patient has died
if self._healthState == HealthStat.DEAD:
return self._survivalTime
else:
return None
def over5(self, pop_size):
for i in range(pop_size):
if self._survivalTime >= 5:
self._over5 += 1 # increase the number of living by 1
return sum(self.over5) / len(self._pop_size) *100
class Cohort:
def __init__(self, id, pop_size, mortality_prob):
""" create a cohort of patients
:param id: cohort ID
:param pop_size: population size of this cohort
:param mortality_prob: probability of death for each patient in this cohort over a time-step (must be in [0,1])
"""
self._initialPopSize = pop_size # initial population size
self._patients = [] # list of patients
self._survivalTimes = [] # list to store survival time of each patient
# populate the cohort
for i in range(pop_size):
# create a new patient (use id * pop_size + n as patient id)
patient = Patient(id * pop_size + i, mortality_prob)
# add the patient to the cohort
self._patients.append(patient)
def simulate(self, n_time_steps):
""" simulate the cohort of patients over the specified number of time-steps
:param n_time_steps: number of time steps to simulate the cohort
:returns simulation outputs from simulating this cohort
"""
# simulate all patients
for patient in self._patients:
# simulate
patient.simulate(n_time_steps)
# record survival time
value = patient.get_survival_time()
if not (value is None):
self._survivalTimes.append(value)
# return cohort outcomes for this simulated class
return CohortOutcomes(self)
def get_survival_times(self):
""" :returns the survival times of the patients in this cohort"""
return self._survivalTimes
def get_initial_pop_size(self):
""" :returns the initial population size of this cohort"""
return self._initialPopSize
class CohortOutcomes:
def __init__(self, simulated_cohort):
""" extracts outcomes of a simulated cohort
:param simulated_cohort: a cohort after being simulated"""
self._simulatedCohort = simulated_cohort
# summary statistics on survival times
self._sumStat_patientSurvivalTimes = \
Stat.SummaryStat('Patient survival times', self._simulatedCohort.get_survival_times())
def get_ave_survival_time(self):
""" returns the average survival time of patients in this cohort """
return self._sumStat_patientSurvivalTimes.get_mean()
def get_CI_survival_time(self, alpha):
"""
:param alpha: confidence level
:return: t-based confidence interval
"""
return self._sumStat_patientSurvivalTimes.get_t_CI(alpha)
def get_survival_curve(self):
""" returns the sample path for the number of living patients over time """
# find the initial population size
n_pop = self._simulatedCohort.get_initial_pop_size()
# sample path (number of alive patients over time)
n_living_patients = PathCls.SamplePathBatchUpdate('# of living patients', 0, n_pop)
# record the times of deaths
for obs in self._simulatedCohort.get_survival_times():
n_living_patients.record(time=obs, increment=-1)
return n_living_patients
def get_survival_times(self):
""" :returns the survival times of the patients in this cohort"""
return self._simulatedCohort.get_survival_times()
class MultiCohort:
""" simulates multiple cohorts with different parameters """
def __init__(self, ids, pop_sizes, mortality_probs):
"""
:param ids: a list of ids for cohorts to simulate
:param pop_sizes: a list of population sizes of cohorts to simulate
:param mortality_probs: a list of the mortality probabilities
"""
self._ids = ids
self._popSizes = pop_sizes
self._mortalityProbs = mortality_probs
self._survivalTimes = [] # two dimensional list of patient survival time from each simulated cohort
self._meanSurvivalTimes = [] # list of mean patient survival time for each simulated cohort
self._sumStat_meanSurvivalTime = None
def simulate(self, n_time_steps):
""" simulates all cohorts """
for i in range(len(self._ids)):
# create a cohort
cohort = Cohort(self._ids[i], self._popSizes[i], self._mortalityProbs[i])
# simulate the cohort
output = cohort.simulate(n_time_steps)
# store all patient surival times from this cohort
self._survivalTimes.append(cohort.get_survival_times())
# store average survival time for this cohort
self._meanSurvivalTimes.append(output.get_ave_survival_time())
# after simulating all cohorts
# summary statistics of mean survival time
self._sumStat_meanSurvivalTime = Stat.SummaryStat('Mean survival time', self._meanSurvivalTimes)
def get_cohort_mean_survival(self, cohort_index):
""" returns the mean survival time of an specified cohort
:param cohort_index: integer over [0, 1, ...] corresponding to the 1st, 2ndm ... simulated cohort
"""
return self._meanSurvivalTimes[cohort_index]
def get_cohort_CI_mean_survival(self, cohort_index, alpha):
""" :returns: the confidence interval of the mean survival time for a specified cohort
:param cohort_index: integer over [0, 1, ...] corresponding to the 1st, 2ndm ... simulated cohort
:param alpha: significance level
"""
st = Stat.SummaryStat('', self._survivalTimes[cohort_index])
return st.get_t_CI(alpha)
def get_all_mean_survival(self):
""" :returns a list of mean survival time for all simulated cohorts"""
return self._meanSurvivalTimes
def get_overall_mean_survival(self):
""" :returns the overall mean survival time (the mean of the mean survival time of all cohorts)"""
return self._sumStat_meanSurvivalTime.get_mean()
def get_cohort_PI_survival(self, cohort_index, alpha):
""" :returns: the prediction interval of the survival time for a specified cohort
:param cohort_index: integer over [0, 1, ...] corresponding to the 1st, 2ndm ... simulated cohort
:param alpha: significance level
"""
st = Stat.SummaryStat('', self._survivalTimes[cohort_index])
return st.get_PI(alpha)
def get_PI_mean_survival(self, alpha):
""" :returns: the prediction interval of the mean survival time"""
return self._sumStat_meanSurvivalTime.get_PI(alpha)
#Problem 1:
print('Percentage of patients survived beyond 5 years', Patient.livedover5)
|
[
"noreply@github.com"
] |
dasha-pokutnaya.noreply@github.com
|
ca5e812fa90e6845444fddb52d48d6d9c7bd9d5c
|
7832e7dc8f1583471af9c08806ce7f1117cd228a
|
/aliyun-python-sdk-ons/aliyunsdkons/request/v20170918/OnsTraceQueryByMsgIdRequest.py
|
19b77d0b56fda4a2a169b69a18c6a06cce57c27e
|
[
"Apache-2.0"
] |
permissive
|
dianplus/aliyun-openapi-python-sdk
|
d6494850ddf0e66aaf04607322f353df32959725
|
6edf1ed02994245dae1d1b89edc6cce7caa51622
|
refs/heads/master
| 2023-04-08T11:35:36.216404
| 2017-11-02T12:01:15
| 2017-11-02T12:01:15
| 109,257,597
| 0
| 0
|
NOASSERTION
| 2023-03-23T17:59:30
| 2017-11-02T11:44:27
|
Python
|
UTF-8
|
Python
| false
| false
| 2,122
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class OnsTraceQueryByMsgIdRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ons', '2017-09-18', 'OnsTraceQueryByMsgId')
def get_PreventCache(self):
return self.get_query_params().get('PreventCache')
def set_PreventCache(self,PreventCache):
self.add_query_param('PreventCache',PreventCache)
def get_OnsRegionId(self):
return self.get_query_params().get('OnsRegionId')
def set_OnsRegionId(self,OnsRegionId):
self.add_query_param('OnsRegionId',OnsRegionId)
def get_OnsPlatform(self):
return self.get_query_params().get('OnsPlatform')
def set_OnsPlatform(self,OnsPlatform):
self.add_query_param('OnsPlatform',OnsPlatform)
def get_Topic(self):
return self.get_query_params().get('Topic')
def set_Topic(self,Topic):
self.add_query_param('Topic',Topic)
def get_MsgId(self):
return self.get_query_params().get('MsgId')
def set_MsgId(self,MsgId):
self.add_query_param('MsgId',MsgId)
def get_EndTime(self):
return self.get_query_params().get('EndTime')
def set_EndTime(self,EndTime):
self.add_query_param('EndTime',EndTime)
def get_BeginTime(self):
return self.get_query_params().get('BeginTime')
def set_BeginTime(self,BeginTime):
self.add_query_param('BeginTime',BeginTime)
|
[
"haowei.yao@alibaba-inc.com"
] |
haowei.yao@alibaba-inc.com
|
d63ebc6be1a001368b725fe8beea41e9b7ee9f53
|
0a40da80ee72f14050c713f9d5372670bef26bad
|
/standard_library/csv_file.py
|
c03db9288772d1ffa5c562b52dc164aa64442eb7
|
[] |
no_license
|
naimishbhagat/python3
|
17430adf788cc2bc6ed31b2b95e522939ed16d09
|
a740229c00db83a2c7c69720340a7a619493139a
|
refs/heads/main
| 2022-12-29T02:57:17.884417
| 2020-10-11T01:03:20
| 2020-10-11T01:03:20
| 303,016,973
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
import csv
# with open("data.csv", "w") as file:
# writer = csv.writer(file)
# writer.writerow(["transaction_id", "product_id", "price"])
# writer.writerow([1000, 1, 5])
# writer.writerow([1001, 2, 15])
with open("data.csv") as file:
csvreader = csv.reader(file)
# print(list(csvreader))
for row in csvreader:
print(row)
|
[
"naimish.bhagat@arq.group"
] |
naimish.bhagat@arq.group
|
e30bbcaf8bb08ef3829ba503bbe5745147e18756
|
0cc4eb3cb54f8394c127ace62d3108fdb5230c85
|
/.spack-env/view/lib/python3.7/test/multibytecodec_support.py
|
bd6b243293ba3fc0c45dc2a673595ef5b4f10719
|
[] |
no_license
|
jacobmerson/spack-develop-env
|
5b2d76f58c0b64ae97c64f77a3c4d33a770c71c8
|
5fca20ca343b1a76f05fc635c87f94ed25417d94
|
refs/heads/master
| 2022-07-04T02:22:50.264727
| 2020-05-06T05:13:50
| 2020-05-06T05:13:50
| 261,657,112
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
/lore/mersoj/spack/spack/opt/spack/linux-rhel7-x86_64/gcc-7.3.0/python-3.7.7-oihhthdoxtgh4krvzpputn5ozwcnq2by/lib/python3.7/test/multibytecodec_support.py
|
[
"mersoj@rpi.edu"
] |
mersoj@rpi.edu
|
ebc05e5c030cb186dcfb2743ad4e25a2601074ce
|
904cbe01b3aa8ba4a5dadb42570cd5ffa0e20ed4
|
/CX_LIFE_NEW/test_case/test_01.py
|
61d95bf4009b73bc27171806d36616d58972ce2f
|
[] |
no_license
|
DoraemonYzx/CX_LIFE_NEW
|
7f09a5b235c5cc40f524a4e8d0d0bf2c094784d0
|
51c83f68aa3d462d8856d860c3efd4d3747a5b24
|
refs/heads/master
| 2021-01-07T10:36:20.333411
| 2020-02-19T16:33:16
| 2020-02-19T16:33:16
| 241,664,814
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,055
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author:余振新
@file: test_01.py
@time: 2020/02/14
"""
import unittest
from selenium import webdriver
from common.base_driver import base_driver
from common.base_action import BaseAction
from pages.wx_page import WxPage
from pages.home_page import HomePage
from pages.commodity_details_page import CommodityDetailsPage
from pages.my_page import MyPage
from pages.navigation_bar_page import NavigationBarPage
from pages.confirm_order_page import ConfirmOrderPage
from pages.pay_complete_page import PayCompletePage
from pages.my_order_page import MyOrderPage
from pages.order_details_page import OrderDetailsPage
from time import sleep
"""
从首页选择商品直接购买:
1、进入小程序
2、从首页选择一个商品
3、进行购买
4、进行付款
5、查看订单
"""
class TestCase01(unittest.TestCase):
"""从首页选择商品直接购买"""
@classmethod
def setUpClass(cls):
cls.driver = base_driver()
cls.baseaction = BaseAction(cls.driver)
cls.wx_page = WxPage(cls.driver)
cls.home_page = HomePage(cls.driver)
cls.commodity_details_page = CommodityDetailsPage(cls.driver)
cls.my_page = MyPage(cls.driver)
cls.navigation_bar_page = NavigationBarPage(cls.driver)
cls.confirm_order_page = ConfirmOrderPage(cls.driver)
cls.pay_complete_page = PayCompletePage(cls.driver)
cls.my_order_page = MyOrderPage(cls.driver)
cls.order_details_page = OrderDetailsPage(cls.driver)
print("开始测试")
@classmethod
def tearDownClass(cls):
cls.driver.quit()
def test_001(self):
"""进入常笑生活小程序"""
for i in range(3):
self.wx_open = self.wx_page.assert_find_search()
print(self.wx_open)
if self.wx_open:
print("已经进入微信页面,开始下一步操作")
self.baseaction.swipe_up_and_down(0.5, 0.2, 0.8)
sleep(1.5)
self.wx_page.click_cxlife()
break
else:
print("还没有进入微信。")
self.assertTrue(self.wx_open, msg="进入微信超时")
def test_002(self):
"""点击某一商品"""
for i in range(4):
self.home_open = self.home_page.assert_find_must_buy_one()
print(self.home_open)
# self.home_page.assert_click_plus_coupon1()
# self.home_page.click_plus_coupon()
if self.home_open:
print("已经常笑生活首页,开始下一步操作")
self.home_page.assert_click_plus_coupon()
self.baseaction.swipe_up_and_down(0.5, 0.8, 0.3)
self.home_page.click_commodity_one()
break
else:
print("还没有进入首页。")
self.assertTrue(self.commodity_details_page.assert_buy(), msg="没有进入商品详情页!")
def test_003(self):
"""进行购买"""
self.commodity_details_page.click_buy()
self.not_login_in = self.my_page.assert_confirm_login()
if self.not_login_in:
self.my_page.click_confirm_login()
self.my_page.click_wx_login()
self.commodity_details_page.click_buy()
else:
pass
self.assertTrue(self.confirm_order_page.assert_pay(), msg="没有进入确认订单页面")
def test_004(self):
"""进行付款"""
self.confirm_order_page.click_pay()
self.assertTrue(self.pay_complete_page.assert_pay_success(), msg="没有支付完成")
def test_005(self):
"""查看订单"""
self.pay_complete_page.click_return_order()
self.assertTrue(self.my_order_page.assert_all(), msg="没有进入全部订单页面")
def test_006(self):
"""查看订单详情"""
self.my_order_page.click_order_one()
self.assertTrue(self.order_details_page.assert_cx_life(), msg="没有进入订单详情页")
|
[
"1353520146@qq.com"
] |
1353520146@qq.com
|
ffc6850ee4a7c07bc7fa57c305cebadb10c99d31
|
206a1dc4ba509e85221a755bb18b7e4abef89e84
|
/retrofreud/moodmeter/views.py
|
403cf7ca2ecbb7b1ce195b55f724b62ae76e5088
|
[] |
no_license
|
mmatchyn/retrofreud
|
d32e04fadaf01860b242afab49f2a26a9ad40d3a
|
9923f2179dbf0f647fab8e25af960f1f03ac3aa9
|
refs/heads/master
| 2021-05-01T16:28:34.291006
| 2013-07-29T20:50:04
| 2013-07-29T20:50:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,549
|
py
|
from django.core.context_processors import csrf
from django.views.decorators.csrf import requires_csrf_token
#from django.utils.safestring import mark_safe
from django.shortcuts import render, render_to_response, get_object_or_404
from django.template import RequestContext
from moodmeter.models import MoodProfile
def index(request):
profiles = MoodProfile.objects.all()
return render(request, 'moodmeter/index.html', {'profiles': profiles})
def vote(request, id):
profile = get_object_or_404(MoodProfile,id=id)
template = 'moodmeter/profile.html'
if request.is_ajax():
template = 'moodmeter/profile_ajax.html'
mood = request.GET.get('mood')
inc = int(request.GET.get('inc'))
if mood == 'happy':
new_value = profile.happiness + inc
profile.happiness = new_value if new_value > 0 else 0
elif mood == 'sad':
new_value = profile.sadness + inc
profile.sadness = new_value if new_value > 0 else 0
elif mood == 'excited':
new_value = profile.excitement + inc
profile.excitement = new_value if new_value > 0 else 0
elif mood == 'bored':
new_value = profile.boredom + inc
profile.boredom = new_value if new_value > 0 else 0
elif mood == 'tired':
new_value = profile.tiredness + inc
profile.tiredness = new_value if new_value > 0 else 0
profile.save()
return render(request, template, {'profile':profile})
def vote_thumb(request, id):
profile = get_object_or_404(MoodProfile,id=id)
template = 'moodmeter/profile.html'
if request.is_ajax():
template = 'moodmeter/profile_thumb_ajax.html'
inc = int(request.GET.get('inc'))
if inc < 0:
profile.thumbs_down -= inc #add positive :)
else:
profile.thumbs_up += inc
profile.save()
return render(request, template, {'profile':profile})
@requires_csrf_token
def update(request, id):
profile = get_object_or_404(MoodProfile,id=id)
c = {}
c.update(csrf(request))
template = "moodmeter/profile_suggestion_ajax.html"
if request.method == 'POST':
content = request.POST["activities"]
profile.improvements = content
profile.save()
return render(request, template, {'profile':profile})
def profile(request, id, action='base'):
profile = get_object_or_404(MoodProfile,id=id)
template = 'moodmeter/profile.html'
if request.is_ajax():
if action == 'mood':
template = "moodmeter/profile_ajax.html"
elif action == 'thumbs':
template = "moodmeter/profile_thumb_ajax.html"
elif action == 'sug':
template = "moodmeter/profile_suggestion_ajax.html"
return render(request, template, {'profile':profile})
|
[
"mateja.verlic@gmail.com"
] |
mateja.verlic@gmail.com
|
2d1d912843c65dfabe278497ecc3011b961bd530
|
244919911b21743e089d0a995cea7190a3e49ce4
|
/caesar.py
|
2db90c2daf7f61225127629629722c76fabd0b2b
|
[] |
no_license
|
Tomcruseal/sage
|
644683ec5779a2ca63c4d739cbf02e957be8a8ee
|
a66e21f018f73c815a59dd1b5d986c021f635917
|
refs/heads/master
| 2021-01-10T15:22:16.204150
| 2016-03-16T01:51:13
| 2016-03-16T01:51:13
| 53,791,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 555
|
py
|
letters=['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
def get_index(char):
for i in xrange(len(letters)):
if char==letters[i]:
return i
def caesar(message,x):
cipher_message=[]
for char in message:
new_index=(get_index(char)+x)%26
new_char=letters[new_index]
cipher_message.append(new_char)
return ''.join(cipher_message)
def get_message():
message=raw_input().upper()
return message
print "enter plaintext:"
m=get_message()
print "enter x"
x=int(input())
print caesar(m,x)
|
[
"lzb731@hotmail.com"
] |
lzb731@hotmail.com
|
bbd048b8f532b3ed17fdd7db6d0f506df6a58b48
|
0efb5aa14dae6ae061d5d45ab22a8c27de5fb1d6
|
/maringaji/maringaji_api/models.py
|
e6568f77a1af8009d0fd3ad430ef26eec6356761
|
[] |
no_license
|
monabstudios/maringaji-mtc
|
69a568e7699d1d5ebb2db2daee92a3a01fcef4fb
|
a18941e9cdd5673b100405a2de40ff4ad631e084
|
refs/heads/master
| 2020-03-18T00:33:26.691284
| 2018-05-22T14:56:28
| 2018-05-22T14:56:28
| 134,100,905
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,710
|
py
|
from django.db import models
from django.contrib.auth.models import BaseUserManager # Allows us to alter behaviour of creating UserProfiles
from django.contrib.auth.models import AbstractBaseUser # Base user that we are going to modify
from django.contrib.auth.models import PermissionsMixin # Allows us to easily set up user permissions
# Create your models here.
class UserProfileManager(BaseUserManager):
def create_user(self, email, name, is_tutor, is_student, desc, password=None):
if not email:
raise ValueError('Users must have an email address.')
email = self.normalize_email(email)
user = self.model(
email=email,
name=name,
is_tutor=is_tutor,
is_student=is_student,
desc=desc
)
user.set_password(password) # uses encryption and hashing, bcrypt
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
user = self.create_user(email, name, False, False, None, password)
user.is_superuser = True # allows user to have privileges in Django Admin
user.is_staff = True # allows user to have privileges in Django Admin
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
# More about Django models: https://docs.djangoproject.com/en/1.11/ref/models/fields/#django.db.models.Field
email = models.EmailField(unique=True)
name = models.CharField(max_length=50)
is_tutor = models.BooleanField(default=False)
is_student = models.BooleanField(default=False)
desc = models.TextField(null=True, blank=True)
is_active = models.BooleanField(default=True) # must-have attribute
is_staff = models.BooleanField(default=False) # must-have attribute
objects = UserProfileManager()
USERNAME_FIELD = 'email' # replace Django's default behaviour of using a separate username
REQUIRED_FIELDS = ['name'] # email is already required
def get_full_name(self):
"""Get user's full name."""
return self.name
def get_short_name(self):
"""Get user's short name."""
return self.name
def __str__(self):
"""A string representation of the UserProfile object."""
return self.email
class Applications(models.Model):
"""A student's application for a tutor."""
application_id = models.AutoField(primary_key=True)
student = models.ForeignKey(UserProfile, related_name='student')
tutor = models.ForeignKey(UserProfile, related_name='tutor')
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
def __str__(self):
"""A string representation of the Application object."""
return '{0} + {1}'.format(self.student.name, self.tutor.name)
|
[
"monabstudios@gmail.com"
] |
monabstudios@gmail.com
|
4cf46c7ea1c390ac0f4372c6439b29c2a2f92330
|
8498b874aa350922e6352254642a87560ebbb564
|
/data/atmosphere_biogeochemistry/HadISDH_humidity_trend/code/process_relative_humidity.py
|
d9bb16ad580d4d88a573d25a3c1534e4a9e9da16
|
[
"MIT",
"CC-BY-4.0",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
RPGroup-PBoC/human_impacts
|
bf023da546f88111b7b4b8717466c4635c57c14d
|
41a1365a9ec01850d59f782de77984e7afad74af
|
refs/heads/master
| 2023-04-10T04:44:07.774700
| 2023-02-17T16:39:07
| 2023-02-17T16:39:07
| 272,572,291
| 9
| 3
|
MIT
| 2023-01-20T06:42:53
| 2020-06-16T00:28:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,144
|
py
|
#################
# This script converts the HadISDH data in .dat format to a CSV file in tidy format
# with the processed time series. The data provided are relative humidity anomalies
# with respect to a 1981-2010 reference in %rh. Uncertainty estimates are 2 sigma.
# The original data file contains 10 columns:
# Column 1 is the date in year.decimal format.
# Column 2 is the relative humidity anomaly.
# Columns 3 and 4 are the lower and upper bounds of the within gridbox
# sampling uncertainty.
# Columns 5 and 6 are the lower and upper bounds of the regional gridbox coverage
# uncertainty.
# Columns 7 and 8 are the lower and upper bounds of the station uncertainty
# (climatology, measurement and homogeneity adjustment).
# Columns 9 and 10 are the lower and upper bounds of the combined uncertainty.
#
# Last updated: March 2021
# Author: Nicholas S. Sarai
#
#################
import pandas as pd
from PyAstronomy import pyasl
proc_data = pd.read_csv('../source/HadISDH.blendRH.1.0.0.2019fSHIP_global_annual_full_anoms8110_JAN2020.dat',
header=None, delim_whitespace=True, engine = 'python',
names=['year', 'anomaly', 'low. bound within gridbox sampling uncertainty', 'upp.bound within gridbox sampling uncertainty',
'low. bound regional gridbox coverage uncertainty', 'upp. bound regional gridbox coverage uncertainty', 'low. bound station uncertainty (climatology, measurement and homogeneity adjustment)',
'upp. bound station uncertainty (climatology, measurement and homogeneity adjustment)', 'low. bound combined uncertainty', 'upp. bound combined uncertainty']
)
proc_data_clean = proc_data.drop([47,48,49])
proc_data_clean['year'] = pd.to_numeric(proc_data_clean['year']).astype(int)
data_tidy = proc_data_clean.melt(id_vars=proc_data_clean.columns[0],
var_name="Reported value",
value_name="Relative humidity anomaly (%rh)")
data_tidy.to_csv(r'../processed/HadISDH_relative_humidity_1973-2019.csv', index = False)
|
[
"nsarai@caltech.edu"
] |
nsarai@caltech.edu
|
7b587126968e0ad3238e2e28260af160bc4e6212
|
31bf601e335c0e803b898faca7298d4cdfe0c3bd
|
/q22.py
|
71a03f4f03b107429691c255fb46ff085cd0e03b
|
[] |
no_license
|
ankushngpl2311/decision-tree
|
dbdfa8b35b1a2365b0a0317cba3ecc77333a0bef
|
9c87db0f7e2e79f86b26759dcd1f65531ece9a31
|
refs/heads/master
| 2020-04-17T03:03:42.311615
| 2019-01-23T12:22:47
| 2019-01-23T12:22:47
| 166,164,814
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,047
|
py
|
import pandas as pd
import math
# from tree import *
import copy
from sklearn.model_selection import train_test_split
import pickle as pk
from sklearn.metrics import precision_score
class node:
def __init__(self):
self.children ={} # {low:pointer to low,high:pointer to high}
def insert(self,name,value,positive,negative):
self.name= name
self.value=value
self.positive=positive
self.negative=negative
data= pd.read_csv('train.csv')[0:100]
label ="left"
true= 1
false=0
def prepare_dict(data):
cols= data.columns
d={}
for i in cols:
d[i]= set(data[i])
print("len= ")
print(len(d[i]))
for i in cols:
s= d[i]
l=list(s)
l.sort()
d[i]=l
return d
def prob(fav,total):
prob = fav/total
return prob
def entropy(positive,negative):
total = positive +negative
prob_yes = prob(positive,total)
prob_no = prob(negative,total)
if(prob_yes != 0 and prob_no != 0):
entr = -prob_yes*math.log(prob_yes,2) - prob_no*math.log(prob_no,2)
elif(prob_yes == 0):
entr = - prob_no*math.log(prob_no,2)
else:
entr = -prob_yes*math.log(prob_yes,2)
return entr
def info(positive,negative):
total =0
feature=[]
for i in positive:
total = total +i
f= [i]
feature.append(f)
for count,j in enumerate(negative):
total = total+j
feature[count].append(j)
# print(feature)
# print(total)
infor=0
for i in feature:
n= i[0] +i[1] #positive +neg
if(n==0):
continue
infor = infor + (n/total)*entropy(i[0],i[1])
return infor
def info_gain(entropy,info):
gain = entropy - info
return gain
def feature_select(data,features,value_dict):
global label
# cols=data.columns
max_gain=-1
# print("features in fselect= ",features)
for f in features:
positive_f= data.loc[data[label]==true]
negative_f= data.loc[data[label]==false]
lpos_f= len(positive_f)
lneg_f= len(negative_f)
entr_f= entropy(lpos_f,lneg_f)
values= value_dict[f]
# positive_list =[]
# negative_list=[]
for value in values:
positive_list=[]
negative_list=[]
# print("feature= ",f)
# print("value= ",value)
data2= data.loc[data[f] > value]
# print("data2= ",data2)
positive=data2.loc[data2[label]==true]
negative = data2.loc[data2[label]==false]
lpos= len(positive)
lneg = len(negative)
# entr_v = entropy(lpos,lneg)
positive_list.append(lpos)
negative_list.append(lneg)
data3= data.loc[data[f] <= value]
positive1=data3.loc[data3[label]==true]
negative1 = data3.loc[data3[label]==false]
lpos1= len(positive1)
lneg1 = len(negative1)
# entr_v = entropy(lpos,lneg)
positive_list.append(lpos1)
negative_list.append(lneg1)
# print("positive list= ",positive_list)
# print("negative list= ",negative_list)
information = info(positive_list,negative_list)
gain = info_gain(entr_f,information)
# print("gain= ",gain)
# print("feature= ",f)
# print("gain= ",gain)
if(gain<0):
print("negative gainnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn")
if(gain>max_gain):
f_selected = f
split=value
max_gain=gain
pos_selected= lpos_f
neg_selected= lneg_f
ret= [f_selected,split,pos_selected,neg_selected]
return ret
def make_tree(data,features,value_dict,n):
# if(len(features)==0):
# print("len of feature ==0")
# return
# print("features= ",features)
lenpos = len(data.loc[data[label]==true])
lenneg = len(data.loc[data[label]==false])
if(len(features)==0) or (lenpos == 0) or (lenpos ==1 and lenneg ==0) or (lenpos==0 and lenneg==1) or (lenneg == 0):
print("len of pos= ",lenpos)
print("len of neg= ",lenneg)
if(lenpos==0 or lenneg >= lenpos):
n.insert(0,"null",lenpos,lenneg)
if(lenneg==0 or lenneg < lenpos):
n.insert(1,"null,",lenpos,lenneg)
# print("len of pos= ",lenpos)
# print("len of neg= ",lenneg)
return
# print("data= ",data)
# print("features after= ",features)
# print("valuedic= ",value_dict)
f= feature_select(data,features,value_dict)
print("feature selected= ",f)
# f= [f_selected,split,pos_selected,neg_selected]
value = f[1]
tempdict= copy.deepcopy(value_dict)
# print(type(tempdict))
# print(type(tempdict[f]))
# print(tempdict)
# print(tempdict[f])
tempdict[f[0]].remove(f[1])
temp = copy.deepcopy(features)
if(len(tempdict[f[0]])==0):
temp=temp.drop(f[0])
rdict=copy.deepcopy(tempdict)
for i in tempdict[f[0]]:
if(i<value):
rdict[f[0]].remove(i)
ldict=copy.deepcopy(tempdict)
for i in tempdict[f[0]]:
if(i>=value):
ldict[f[0]].remove(i)
# print("ldict= ",ldict)
# print("rdict= ",rdict)
# n=node()
n.insert(f[0],f[1],f[2],f[3])
# print("f= ",f)
# values = value_dict[f[0]]
# print("data= ",data)
# print("value dict= ",value_dict)
# print("values= ",values)
# print("value = ",value)
# LEFT CHILD
n1=node()
n.children["low"]=n1
data_tosend= data.loc[data[f[0]]<=value]
# print("data2= \n",data_tosend)
make_tree(data_tosend,temp,ldict,n1)
#RIGHT CHILD
n2=node()
n.children["high"]=n2
data_tosend2= data.loc[data[f[0]]>value]
make_tree(data_tosend2,temp,rdict,n2)
def predict(x,root):
y=[]
print(type(root))
# print("root name ",root.name)
for index,row in x.iterrows():
# print("row= ",row)
while(len(root.children) != 0):
name = root.name
val =row[name]
compare= root.value
if(val<= compare):
root=root.children["low"]
if(val> compare):
root= root.children["high"]
# print("root name ",root.name)
# print("root children ",root.children)
# root = root.children[val]
# print("outside loop= ",root.name)
# print("rootout= ",root)
y.append(root.name)
# print(y)
return y
def accuracy(ytest,ypredict):
c=0
l =len(ytest)
for count,i in enumerate(ytest):
if(ytest[count] == ypredict[count]):
c= c +1
print("count= ",c)
print("total= ",l)
return c/l
def parameters(ytest,ypredict):
tp=0
fp=0
fn=0
for count,i in enumerate(ytest):
if(ypredict[count]==true and ytest[count]==true):
tp= tp+1
if(ypredict[count]==true and ytest[count]==false):
fp = fp+1
if(ypredict[count]==false and ytest[count]== true):
fn = fn +1
l=[tp,fp,fn]
print("tp fp fn= ",l)
return l
def recall(tp,fn):
den = tp+fn
if(den==0):
return 0
return tp/den
def precision(tp,fp):
den= tp+fp
if(den==0):
return 0
return tp/den
def f1_score(precision,recall):
pr1= 1/precision
data3=data
print("data columns= ",data.columns)
data2=[list(x) for x in data.values]
columns= ['satisfaction_level', 'last_evaluation', 'number_project',
'average_montly_hours', 'time_spend_company', 'Work_accident', 'left',
'promotion_last_5years', 'sales', 'salary']
# categorical=['sales', 'salary']
# cat_list=[]
# for j in categorical:
# d={}
# for count,i in enumerate(set(data[j])):
# d[i]=count
# cat_list.append(d)
# print(cat_list)
print("data2= ")
print(data2)
print("typedata2 = ",type(data2))
print(type(data2[0]))
print("data2[0]= ",data2[0])
# [{'support': 0, 'product_mng': 1, 'accounting': 2, 'IT': 3, 'hr': 4, 'marketing': 5, 'RandD': 6,
# 'management': 7, 'sales': 8, 'technical': 9}, {'medium': 0, 'high': 1, 'low': 2}]
for row in data2:
if(row[8]=='support'):
row[8]=0
elif(row[8]=='product_mng'):
row[8]=1
elif(row[8]=='accounting'):
row[8]=2
elif(row[8]=='IT'):
row[8]=3
elif(row[8]=='hr'):
row[8]=4
elif(row[8]=='marketing'):
row[8]=5
elif(row[8]=='RandD'):
row[8]=6
elif(row[8]=='management'):
row[8]=7
elif(row[8]=='sales'):
row[8]=8
elif(row[8]=='technical'):
row[8]=9
if(row[9]=='medium'):
row[9]=0
elif(row[9]=='high'):
row[9]=1
elif(row[9]=='low'):
row[9]=2
# for i in cat_list:
# data2=data2.replace(i)
# print(data2)
value_dict= prepare_dict(data3)
print("value dict= ",value_dict)
value_dict1={}
columns= ['satisfaction_level', 'last_evaluation', 'number_project',
'average_montly_hours', 'time_spend_company', 'Work_accident', 'left',
'promotion_last_5years', 'sales', 'salary']
for i in value_dict:
if(i=='satisfaction_level'):
value_dict1[0]=value_dict[i]
elif(i=='last_evaluation'):
value_dict1[1]=value_dict[i]
elif(i=='number_project'):
value_dict1[2]=value_dict[i]
elif(i=='average_montly_hours'):
value_dict1[3]=value_dict[i]
elif(i=='time_spend_company'):
value_dict1[4]=value_dict[i]
elif(i=='Work_accident'):
value_dict1[5]=value_dict[i]
elif(i=='left'):
value_dict1[6]=value_dict[i]
elif(i=='promotion_last_5years'):
value_dict1[7]=value_dict[i]
elif(i=='sales'):
value_dict1[8]=value_dict[i]
elif(i=='salary'):
value_dict1[9]=value_dict[i]
# features = data2.columns
# features=features.drop(label)
# print("features= ",features)
# # print("data2= ",data2)
# x=data2.drop(columns=label)
# y= data2[label]
# xTrain, xTest, yTrain, yTest = train_test_split(x, y, test_size = 0.2)
# l= [xTrain,yTrain]
# data_train=pd.concat(l,axis=1)
# # print("data train= ",data_train)
# l=[xTest,yTest]
# data_test = pd.concat(l,axis=1)
# print("entropy s")
# print(entropy(9,5))
# print("entropy sunny")
# print(entropy(2,3))
# x=info([2,4,3],[3,0,2])
# print(x)
# print(info_gain(entropy(9,5),x))
# features= data.columns
# print(features)
# features=features.drop(label)
# features=["outlook","temp","humidity","windy"]
# x= feature_select(data,features,value_dict)
# print("feature selected= ",x)
# print()
# root= node()
# make_tree(data_train,features,value_dict,root)
########################
#DUMP MODEL
#####################
# f= open("model.pkl","w")
# pk.dump(root,f)
# f.close()
###########################
###########################
# print("postorder")
# print(postorder(root))
# ypredict=predict(xTest,root)
# yTest= yTest.tolist()
# print(yTest)
# print("ypredict= ",ypredict)
# print("type= ",type(yTest[0]))
# print("ytest[0] ",yTest[0])
# print("ypredict [0] ",ypredict[0])
# print("accuracy= ",accuracy(yTest,ypredict))
# l= parameters(yTest,ypredict)
# print("recall= ",recall(l[0],l[2]))
# print("precision= ",precision(l[0],l[1]))
# print(precision_score(yTest,ypredict))
|
[
"ankush.nagpal@students.iiit.ac.in"
] |
ankush.nagpal@students.iiit.ac.in
|
4f39adab863b233b15775cee2fd844f5c6e6acec
|
f1a20cf2d60bf16f0f39ec0e2fa696000f2d286d
|
/external/predictor/python/seldon/pipeline/util.py
|
d9a8cfdbe32875fdef27dc5cceaa77e1fa91dd96
|
[
"Apache-2.0"
] |
permissive
|
gutaljaard/seldon-server
|
2767550fe15f101c4ec81ac103d9fe44619e18fa
|
831ba837a7d66ba4a7eed4c921fade003d937b7a
|
refs/heads/master
| 2021-01-21T19:58:06.349552
| 2015-11-18T11:09:12
| 2015-11-18T11:09:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,477
|
py
|
import seldon.fileutil as fu
import json
import os.path
import logging
import shutil
import unicodecsv
import numpy as np
import pandas as pd
import random
import string
from sklearn.externals import joblib
class Pipeline_wrapper(object):
"""
Wrapper to allow dataframes to be created and saved from external data sources.
Data sources:AWS s3 and file system
Formats: JSON and CSV
Parameters
----------
work_folder : str
load work folder to stage files
aws_key : str, optional
AWS key
aws_secret : str, optional
AWS secret
"""
def __init__(self,work_folder="/tmp",aws_key=None,aws_secret=None):
self.work_folder=work_folder
self.lines_read = 0
self.df_format='json'
self.active_file=None
self.aws_key=aws_key
self.aws_secret=aws_secret
def get_work_folder(self):
return self.work_folder
def create_work_folder(self):
if not os.path.exists(self.work_folder):
print "creating ",self.work_folder
os.makedirs(self.work_folder)
#
# save dataframe to location
#
def save_dataframe(self,df,location,df_format="json",csv_index=True):
"""Save dataframe
Parameters
----------
df : pandas daraframe
dataframe to save
location : str
external filesystem location to save to
df_format : str
format to use : json or csv
csv_index : bool
whether to save index when outputing to csv
"""
self.create_work_folder()
tmp_file = self.work_folder+"/df_tmp"
if df_format == 'csv':
print "saving dataframe as csv"
df.to_csv(tmp_file,index=csv_index)
else:
print "saving dataframe as json"
f = open(tmp_file,"w")
for i in range(0, df.shape[0]):
row = df.irow(i).dropna()
jNew = row.to_dict()
jStr = json.dumps(jNew,sort_keys=True)
f.write(jStr+"\n")
f.close()
futil = fu.FileUtil(key=self.aws_key,secret=self.aws_secret)
futil.copy(tmp_file,location)
#
# download data and convert to dataframe
#
def _save_features_local(self,line):
"""save data line to local features file
"""
if not self.df_format == 'csv' and self.lines_read > 0:
self.active_file.write(",")
self.active_file.write(line+"\n")
self.lines_read += 1
def _copy_features_locally(self,locations,local_file,df_format):
self.df_format=df_format
self.create_work_folder()
print "streaming features ",locations," to ",local_file
print "input type is ",self.df_format
self.lines_read = 0
self.active_file = open(local_file,"w")
if not self.df_format == 'csv':
self.active_file.write("[")
futil = fu.FileUtil(key=self.aws_key,secret=self.aws_secret)
futil.stream_multi(locations,self._save_features_local)
if not self.df_format == 'csv':
self.active_file.write("]")
self.active_file.close()
print "finished stream of features"
def _convert_dataframe(self,local_file,df_format,csv_dates=None,index_col=None):
"""load csv or json into pandas dataframe
"""
print "loading data into pandas dataframe"
if df_format == 'csv':
print "loading csv ",csv_dates,"index ",index_col
return pd.read_csv(local_file,parse_dates=csv_dates,index_col=index_col)
else:
print "loading json"
return pd.read_json(local_file,orient='records')
def create_dataframe(self,data=None,df_format="json",csv_dates=None,index_col=None):
"""
Create Pandas dataframe from external source
Parameters
----------
data : object, list, dict or str
object : pandas dataframe - will be returned as is
list : list of folders to load data frame
str : filename to load data frome
dict : data in dict
"""
if data is not None:
if isinstance(data, pd.DataFrame):
return data
elif isinstance(data,dict):
return pd.DataFrame([data])
elif isinstance(data,basestring):
local_file= self.work_folder+"/data"
futil = fu.FileUtil(key=self.aws_key,secret=self.aws_secret)
futil.copy(data,local_file)
return self._convert_dataframe(local_file,df_format,csv_dates,index_col)
elif isinstance(data,list):
local_file= self.work_folder+"/data"
self._copy_features_locally(data,local_file,df_format)
return self._convert_dataframe(local_file,df_format,csv_dates,index_col)
else:
raise ValueError("unknown argument type for data")
#
# Upload pipeline
#
def save_pipeline(self,pipeline,location):
"""
Save scikit learn pipeline to external location
Parameters
----------
pipelines : sklearn pipeline
pipeline to be saved
location : str
external folder to save pipeline
"""
self.create_work_folder()
pipeline_folder = self.work_folder+"/pipeline"
if not os.path.exists(pipeline_folder):
print "creating folder ",pipeline_folder
os.makedirs(pipeline_folder)
tmp_file = pipeline_folder+"/p"
joblib.dump(pipeline,tmp_file)
futil = fu.FileUtil(key=self.aws_key,secret=self.aws_secret)
futil.copy(pipeline_folder,location)
def load_pipeline(self,pipeline_folder):
"""
Load scikit learn pipeline from external folder
Parameters
----------
pipeline_folder : str
external folder holding pipeline
"""
self.create_work_folder()
local_pipeline_folder = self.work_folder+"/pipeline"
if not os.path.exists(local_pipeline_folder):
print "creating folder ",local_pipeline_folder
os.makedirs(local_pipeline_folder)
futil = fu.FileUtil(key=self.aws_key,secret=self.aws_secret)
futil.copy(pipeline_folder,local_pipeline_folder)
return joblib.load(local_pipeline_folder+"/p")
|
[
"cc@seldon.io"
] |
cc@seldon.io
|
6d3c3ed0fcf1a61750bdadbb045b155a0e049615
|
63fd8a67f1a2129544b41edeb26fea68f65c39c3
|
/Connection.py
|
d14ede78517d2a578a372dbeff99234b68de9f13
|
[] |
no_license
|
arkariz/Object-Oriented-Programming-MYSQL
|
a4f00b9265ff7e0d7639a8a0482f3755c213629d
|
2530e5cce2506fd9f8b70305bb3b43a31e912757
|
refs/heads/master
| 2022-11-13T11:31:20.271182
| 2020-06-28T03:23:13
| 2020-06-28T03:23:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
import pymysql
import DB_Config as config
class Connection:
def open(self):
try:
connect = pymysql.connect(
host=config.host,
user=config.user,
passwd=config.passwd,
database=config.database
)
except pymysql.Error as err:
print(err)
else:
return connect
def close(self):
if self.open.is_connected:
self.open.close()
# if __name__ == '__main__':
# c = Connection()
# c.open()
|
[
"44420394+MHDRizky0@users.noreply.github.com"
] |
44420394+MHDRizky0@users.noreply.github.com
|
17f42a4ced64f871dce690819201ef6d88280259
|
34ff697bd7ef10e5586875c0f7eadaaaa46c7350
|
/aleph/tests/test_view_util.py
|
8082846ca1490d3f45ae4d1b424e6a7ba6a9c823
|
[
"MIT"
] |
permissive
|
gazeti/aleph
|
0305db3fac56fd914e2e09972d479b859ab403ec
|
f6714c4be038471cfdc6408bfe88dc9e2ed28452
|
refs/heads/master
| 2021-06-27T11:10:40.798378
| 2017-12-26T19:27:40
| 2017-12-26T19:27:40
| 113,964,932
| 0
| 0
|
MIT
| 2019-07-04T04:00:21
| 2017-12-12T08:39:08
|
Python
|
UTF-8
|
Python
| false
| false
| 664
|
py
|
from flask import Request
from aleph.views.util import extract_next_url
from aleph.tests.util import TestCase
class ViewUtilTest(TestCase):
def setUp(self):
super(ViewUtilTest, self).setUp()
def test_extract_next_url_blank(self):
req = Request.from_values('')
self.assertEqual('/', extract_next_url(req))
def test_extract_next_url_unsafe(self):
req = Request.from_values('/?next={}'.format(self.fake.url()))
self.assertEqual('/', extract_next_url(req))
def test_extract_next_url_safe(self):
req = Request.from_values('/?next=/help')
self.assertEqual('/help', extract_next_url(req))
|
[
"friedrich@pudo.org"
] |
friedrich@pudo.org
|
d06efe39997e099a93dedae5554911e3a371c562
|
3d06eeebdd598efba25d29d7e3d03d90ede1bfbd
|
/1_lesson/venv/bin/easy_install
|
3d4f5a7a30df321193516cd41c9ac23fceeaa6c7
|
[] |
no_license
|
duk1edev/itproger
|
58bdd16088dec7864585d318935b118ce584874d
|
786f94fff6d816f3f978bd8c24c3d985ffd5ffb2
|
refs/heads/master
| 2021-01-02T02:43:32.684100
| 2020-03-28T18:10:25
| 2020-03-28T18:10:25
| 239,443,309
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 438
|
#!/home/duk1e/code/itproger/1_lesson/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
|
[
"duk1e.ptc.ua@yandex.ru"
] |
duk1e.ptc.ua@yandex.ru
|
|
9ae11956faa91035b5dd81c4086bd1c86acccf07
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/exp-big-1308.py
|
046fc60b5113296ee7329b6975527295dc6151a4
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,179
|
py
|
# Compute x**y
def exp(x: int, y: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp2(x: int, y: int, x2: int, y2: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp3(x: int, y: int, x2: int, y2: int, x3: int, y3: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp4(x: int, y: int, x2: int, y2: int, x3: int, y3: int, x4: int, y4: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp5(x: int, y: int, x2: int, y2: int, x3: int, y3: int, x4: int, y4: int, x5: int, y5: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
$Var = 1
a5 = 1
return f(y)
# Input parameter
n:int = 42
n2:int = 42
n3:int = 42
n4:int = 42
n5:int = 42
# Run [0, n]
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
while i <= n:
print(exp(2, i % 31))
i = i + 1
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
c3113677f757b0b05779ba4e7daf9073b87b7e03
|
ab752a84ef38e2a30512743da599bf02308512cb
|
/User/admin.py
|
f6eceb282c29175947e99e9eff2f45f1e379b477
|
[] |
no_license
|
mikekeda/Classroom-Page
|
edd517ca0fe826559f1229f67d00e57578ad7141
|
9160a234b26faff3fc7c7c0883201617f5856dd7
|
refs/heads/master
| 2021-01-22T13:04:14.190468
| 2016-09-01T19:40:32
| 2016-09-01T19:40:32
| 66,929,961
| 0
| 0
| null | 2020-10-08T15:33:35
| 2016-08-30T10:13:01
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 95
|
py
|
from django.contrib import admin
from User.models import Profile
admin.site.register(Profile)
|
[
"mriynuk@gmail.com"
] |
mriynuk@gmail.com
|
55899503ba359db587de9e8e73f80cde9e2c3679
|
c7beeaeb49ba32c673ce6b4772215ed7cc91565a
|
/taobao/orm/__init__.py
|
f6013fa203b4924e68c0a248fec0e12bd9540b0f
|
[] |
no_license
|
py-wuhao/taobao_h5
|
55a074df5f4fd875949cb366ed38c54d32e18b19
|
f8a996ee7caa30f0d88d30468dfe59713a7d3f05
|
refs/heads/master
| 2020-11-24T05:54:28.004208
| 2019-12-14T09:02:30
| 2019-12-14T09:02:30
| 227,994,309
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 229
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2019/11/7 14:58
# @Author : wuhao
# @Email : 15392746632@qq.com
# @File : __init__.py.py
# @Software: PyCharm
from .base import DB_Session, BaseModel, engine
from .goods_info import *
|
[
"15392746632@qq.com"
] |
15392746632@qq.com
|
c03b0245cff06e02284f63b62b6dd8cdf67a7e09
|
de40d3fa8d8af0030556d27d6833f6a1a0e7700c
|
/baekjoon/4892py/a.py
|
e279817d4a65188b08c0b752da86103b06d0aba6
|
[] |
no_license
|
NeoMindStd/CodingLife
|
cd6a627209c0353f4855f09fd5dfef8da4bbfef6
|
bcb6c3752f472e6a4f3b8f158d02bc3599dfcda3
|
refs/heads/master
| 2022-12-24T10:42:45.390085
| 2022-12-11T16:27:16
| 2022-12-11T16:27:16
| 191,797,634
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
T=1
while True:
a=int(input())*3
if a==0:break
f=a%2==0
b=(a//2 if f else (a+1)//2)*3//9
print(f'{T}. {"even"if f else"odd"} {b}')
T+=1
|
[
"dwj1996@naver.com"
] |
dwj1996@naver.com
|
874ed34d896808bb93161b5c1af36900567fb962
|
42438f0c9ec4811a9164732fc702f78b5fe3b163
|
/app/computation/risks/core.py
|
27f3b96c360ede694c21d7b6ed4bdb80d00adcf3
|
[] |
no_license
|
jf2/hack
|
c1709fcea02a60d983a4233bc66df168ba365eff
|
10dc38749d82eec7d97a8cd57fa8b610bc55630c
|
refs/heads/master
| 2022-12-10T14:34:37.128855
| 2018-09-16T09:25:49
| 2018-09-16T09:25:49
| 148,686,906
| 0
| 1
| null | 2022-01-21T19:39:52
| 2018-09-13T19:23:51
|
Python
|
UTF-8
|
Python
| false
| false
| 472
|
py
|
from enum import Enum
class RiskType(Enum):
""" The type of risk """
INVALID = 1
FIRE = 2
FLOODING = 3
EARTHQUAKE = 4
class Risk:
""" A risk """
# general
risk_type = RiskType.INVALID
# region of interest
def __init__(self, risk_type):
self.risk_type = risk_type
self.range_min = 0
self.range_max = 1
self.value = -1
def get_risk_score(self, lon, lat, radius):
return self.value
|
[
"ralph.aeschimann@gmail.com"
] |
ralph.aeschimann@gmail.com
|
63199d388849cff873fe4fb6dc36519835546c23
|
067b4545e638e0e1f36aa31bc9e6a127be8764ea
|
/bubble-sort.py
|
6ec92d155512290645bdabc045c191638d24a05a
|
[
"MIT"
] |
permissive
|
cygnusss/algos
|
41d5d6c1b175196c6ffb95a8aea7c9fbb6949033
|
52d1f3431fbd13ae9571a29aa4393ee78c74b94f
|
refs/heads/master
| 2021-05-10T07:52:22.650150
| 2018-03-10T03:02:55
| 2018-03-10T03:02:55
| 118,862,280
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 554
|
py
|
'''
Bubble Sort algorithm for each iteration finds the greatest value and bubbles it up to the last index of the array
Every next iteration will have one less item since the last item becomes sorted
'''
def bubbleSort(list):
for i in range (0, len(list) - 1):
for j in range (0, len(list) - 1 - i):
if list[j] > list[j + 1]:
# if current element is greater than its right neighbor
# swap the two elements
list[j], list[j + 1] = list[j + 1], list[j]
return list
example = [1, 2, 3, 4, 5]
print(bubbleSort(example))
|
[
"cygnidavid@gmail.com"
] |
cygnidavid@gmail.com
|
12713ea9f2e7d2ba55338d517df888939db8212b
|
5b9eacd5d3af2bde20085931b2ca91b357f8b2e1
|
/.qt_for_python/uic/bookwindow.py
|
3c39a3a3f70395d0ab7b870e90e07b16c51ba728
|
[] |
no_license
|
brtkev/QtMediaPlayer
|
dc10cca7b1e14cfddd42ead34f6b7ac7827d8125
|
2883d63272d87d890b05adfce8a7d13e6d15cee5
|
refs/heads/main
| 2023-08-13T00:24:29.297016
| 2021-09-28T04:37:17
| 2021-09-28T04:37:17
| 352,264,144
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,058
|
py
|
# -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'bookwindow.ui'
##
## Created by: Qt User Interface Compiler version 5.15.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
class Ui_BookWindow(object):
def setupUi(self, BookWindow):
if not BookWindow.objectName():
BookWindow.setObjectName(u"BookWindow")
BookWindow.resize(601, 420)
self.centralWidget = QWidget(BookWindow)
self.centralWidget.setObjectName(u"centralWidget")
self.vboxLayout = QVBoxLayout(self.centralWidget)
self.vboxLayout.setSpacing(6)
self.vboxLayout.setObjectName(u"vboxLayout")
self.vboxLayout.setContentsMargins(9, 9, 9, 9)
self.groupBox = QGroupBox(self.centralWidget)
self.groupBox.setObjectName(u"groupBox")
self.vboxLayout1 = QVBoxLayout(self.groupBox)
self.vboxLayout1.setSpacing(6)
self.vboxLayout1.setObjectName(u"vboxLayout1")
self.vboxLayout1.setContentsMargins(9, 9, 9, 9)
self.bookTable = QTableView(self.groupBox)
self.bookTable.setObjectName(u"bookTable")
self.bookTable.setSelectionBehavior(QAbstractItemView.SelectRows)
self.vboxLayout1.addWidget(self.bookTable)
self.groupBox_2 = QGroupBox(self.groupBox)
self.groupBox_2.setObjectName(u"groupBox_2")
self.formLayout = QFormLayout(self.groupBox_2)
self.formLayout.setObjectName(u"formLayout")
self.label_5 = QLabel(self.groupBox_2)
self.label_5.setObjectName(u"label_5")
self.formLayout.setWidget(0, QFormLayout.LabelRole, self.label_5)
self.titleEdit = QLineEdit(self.groupBox_2)
self.titleEdit.setObjectName(u"titleEdit")
self.titleEdit.setEnabled(True)
self.formLayout.setWidget(0, QFormLayout.FieldRole, self.titleEdit)
self.label_2 = QLabel(self.groupBox_2)
self.label_2.setObjectName(u"label_2")
self.formLayout.setWidget(1, QFormLayout.LabelRole, self.label_2)
self.authorEdit = QComboBox(self.groupBox_2)
self.authorEdit.setObjectName(u"authorEdit")
self.authorEdit.setEnabled(True)
self.formLayout.setWidget(1, QFormLayout.FieldRole, self.authorEdit)
self.label_3 = QLabel(self.groupBox_2)
self.label_3.setObjectName(u"label_3")
self.formLayout.setWidget(2, QFormLayout.LabelRole, self.label_3)
self.genreEdit = QComboBox(self.groupBox_2)
self.genreEdit.setObjectName(u"genreEdit")
self.genreEdit.setEnabled(True)
self.formLayout.setWidget(2, QFormLayout.FieldRole, self.genreEdit)
self.label_4 = QLabel(self.groupBox_2)
self.label_4.setObjectName(u"label_4")
self.formLayout.setWidget(3, QFormLayout.LabelRole, self.label_4)
self.yearEdit = QSpinBox(self.groupBox_2)
self.yearEdit.setObjectName(u"yearEdit")
self.yearEdit.setEnabled(True)
self.yearEdit.setMinimum(-1000)
self.yearEdit.setMaximum(2100)
self.formLayout.setWidget(3, QFormLayout.FieldRole, self.yearEdit)
self.label = QLabel(self.groupBox_2)
self.label.setObjectName(u"label")
self.formLayout.setWidget(4, QFormLayout.LabelRole, self.label)
self.ratingEdit = QSpinBox(self.groupBox_2)
self.ratingEdit.setObjectName(u"ratingEdit")
self.ratingEdit.setMaximum(5)
self.formLayout.setWidget(4, QFormLayout.FieldRole, self.ratingEdit)
self.vboxLayout1.addWidget(self.groupBox_2)
self.vboxLayout.addWidget(self.groupBox)
BookWindow.setCentralWidget(self.centralWidget)
QWidget.setTabOrder(self.bookTable, self.titleEdit)
QWidget.setTabOrder(self.titleEdit, self.authorEdit)
QWidget.setTabOrder(self.authorEdit, self.genreEdit)
QWidget.setTabOrder(self.genreEdit, self.yearEdit)
self.retranslateUi(BookWindow)
QMetaObject.connectSlotsByName(BookWindow)
# setupUi
def retranslateUi(self, BookWindow):
BookWindow.setWindowTitle(QCoreApplication.translate("BookWindow", u"Books", None))
self.groupBox.setTitle("")
self.groupBox_2.setTitle(QCoreApplication.translate("BookWindow", u"Details", None))
self.label_5.setText(QCoreApplication.translate("BookWindow", u"<b>Title:</b>", None))
self.label_2.setText(QCoreApplication.translate("BookWindow", u"<b>Author: </b>", None))
self.label_3.setText(QCoreApplication.translate("BookWindow", u"<b>Genre:</b>", None))
self.label_4.setText(QCoreApplication.translate("BookWindow", u"<b>Year:</b>", None))
self.yearEdit.setPrefix("")
self.label.setText(QCoreApplication.translate("BookWindow", u"<b>Rating:</b>", None))
# retranslateUi
|
[
"kbreto2911@gmail.com"
] |
kbreto2911@gmail.com
|
70b3dd584be3b9acb9555e13597af100f1df7520
|
9bdf9bb38d6ea628c71a901e85527f34a2b08f67
|
/markov_chain_monte_carlo.py
|
063219c6a5a3ad9064403f51122f7eb1167a75d2
|
[] |
no_license
|
physics-machinelearning/Bayes
|
fa493276eae6178108241ad72fe142c5f5fcc935
|
d4d2769d73f88b605426fd1383bf069f5ad0d215
|
refs/heads/master
| 2020-12-04T12:51:36.532387
| 2020-01-13T13:16:33
| 2020-01-13T13:16:33
| 231,771,791
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,827
|
py
|
import numpy as np
import matplotlib.pyplot as plt
class MetropolisHastings:
def __init__(self, epsilon, theta, NMCS, f):
self.epsilon = epsilon
self.theta = theta
self.NMCS = NMCS
self.f = f
def judgeacceptornot(self, theta, new_theta):
judge = self.f(new_theta) > self.f(theta)
r = self.f(new_theta) / self.f(theta)
return judge, r
def loop(self):
theta = self.theta
self.theta_list = [theta]
for i in range(self.NMCS):
theta_new = theta + self.epsilon*np.random.randn()
judge, r = self.judgeacceptornot(theta, theta_new)
if judge:
theta = theta_new
else:
if np.random.rand() < r:
theta = theta_new
self.theta_list.append(theta)
def plot(self):
theta = np.linspace(min(self.theta_list), max(self.theta_list), num=100)
fig, ax1 = plt.subplots()
ax1.hist(self.theta_list, density=True, bins=40)
ax2 = ax1.twinx()
ax2.plot(theta, self.f(theta))
plt.show()
class MultidimentionalMetropolisHastings:
def __init__(self, epsilon, theta, NMCS, f):
self.epsilon = epsilon
self.theta = theta
self.NMCS = NMCS
self.f = f
def judgeacceptornot(self, theta, new_theta):
judge = self.f(new_theta) > self.f(theta)
r = self.f(new_theta) / self.f(theta)
return judge, r
def loop(self):
theta = self.theta
self.theta_list = np.empty(1, theta.shape[0])
self.theta_list[0] = theta
for i in range(self.NMCS):
theta_new = theta + self.epsilon*np.random.randn(theta.shape[0])
judge, r = self.judgeacceptornot(theta, theta_new)
if judge:
theta = theta_new
else:
if np.random.rand() < r:
theta = theta_new
self.theta_list = np.vstack((self.theta_list, theta))
def plot(self):
theta = np.linspace(min(self.theta_list), max(self.theta_list), num=100)
fig, ax1 = plt.subplots()
ax1.hist(self.theta_list, density=True, bins=40)
ax2 = ax1.twinx()
ax2.plot(theta, self.f(theta))
plt.show()
class ReplicaExchangeMethod:
"""
This class is for generation of samples from
one dimentional gaussian mixture distribution
"""
def __init__(self, f_prob, epsilon, x_min, x_max, NMCS, e_freq, num_chain):
self.f_prob = f_prob
self.epsilon = epsilon
self.x_min, self.x_max = x_min, x_max
self.NMCS = NMCS
self.e_freq = e_freq
self.num_chain = num_chain
self.beta_list = [i/(num_chain-1) for i in range(num_chain)]
def judgeacceptornot(self, x, new_x, f):
judge = f(new_x) > f(x)
r = f(new_x) / f(x)
return judge, r
def make_energy_func(self):
def energy(x):
return -np.log(self.f_prob(x))
return energy
def make_likelihood(self, beta):
def likelihood(x):
temp = beta*np.log(self.f_prob(x))
return np.exp(temp)
return likelihood
def loop(self):
self.x_list = [[np.random.uniform(self.x_min, self.x_max)] for i in range(self.num_chain)]
distributions = [self.make_likelihood(beta) for beta in self.beta_list]
for i in range(self.NMCS):
for i, x in enumerate(self.x_list):
x = x[-1]
x_new = x + self.epsilon*np.random.randn()
f = distributions[i]
judge, r = self.judgeacceptornot(x, x_new, f)
if judge:
x = x_new
else:
if np.random.rand() < r:
x = x_new
self.x_list[i].append(x)
if i %self.e_freq == 0:
index = int(np.random.uniform(0,len(self.x_list)-1))
x0 = self.x_list[index][-1]
x1 = self.x_list[index+1][-1]
dist0 = distributions[index]
dist1 = distributions[index+1]
if np.random.uniform() < (dist0(x1)*dist1(x0))/(dist0(x0)*dist1(x1)):
self.x_list[index][-1], self.x_list[index+1][-1] = np.copy(x1),np.copy(x0)
def plot(self):
theta = np.linspace(min(self.x_list[-1]), max(self.x_list[-1]), num=100)
fig, ax1 = plt.subplots()
ax1.hist(self.x_list[-1], density=True, bins=40)
ax2 = ax1.twinx()
ax2.plot(theta, self.f_prob(theta))
plt.show()
# for i in range(len(self.x_list)):
# plt.figure()
# plt.hist(self.x_list[i], alpha=0.5, label=str(i), bins=40)
# plt.legend()
# plt.show()
class HamiltonianMonteCarlo:
def __init__(self, epsilon, T, L, theta, f, h, dhdtheta):
self.epsilon = epsilon
self.T = T
self.L = L
self.theta = theta
self.f = f
self.h = h
self.dhdtheta = dhdtheta
def leapfrog(self, p, theta):
p = p - (self.epsilon*self.dhdtheta(theta)/2)
theta = theta + self.epsilon*p
p = p - (self.epsilon*self.dhdtheta(theta)/2)
return p, theta
def calchamiltonian(self, theta, p):
return self.h(theta) + p**2/2
def judgeacceptornot(self, new_theta, new_p, old_theta, old_p):
r = np.exp(self.calchamiltonian(new_theta, new_p)-self.calchamiltonian(old_theta, old_p))
return np.random.rand() < r
def loop(self):
theta = self.theta
self.p_list = []
self.theta_list = []
accept_list = []
for i in range(self.T):
p = np.random.randn()
old_p = p
old_theta = theta
for j in range(self.L):
p, theta = self.leapfrog(p, theta)
new_p = p
new_theta = theta
if self.judgeacceptornot(new_theta, new_p, old_theta, old_p):
self.p_list.append(p)
self.theta_list.append(theta)
accept_list.append(True)
else:
p = old_p
theta = old_theta
accept_list.append(False)
print('accept ratio: ', sum(accept_list)/len(accept_list))
def plot_transition(self):
plt.figure()
plt.plot(self.p_list, self.theta_list)
plt.show()
def plot(self):
theta = np.linspace(min(self.theta_list), max(self.theta_list), num=100)
fig, ax1 = plt.subplots()
ax1.hist(self.theta_list, density=True, bins=40)
ax2 = ax1.twinx()
ax2.plot(theta, self.f(theta))
plt.show()
|
[
"kuroki.ryo.physics@gmail.com"
] |
kuroki.ryo.physics@gmail.com
|
1f4ef7c4157af9f3e05e20a8406db26ac6a1f60d
|
b345823e0a9df7190c9ffe2522590daa0ab30b6c
|
/MineMap.py
|
fd5a26ec2c7bf9b95cecc5ca6af96983096c0716
|
[] |
no_license
|
nhurtado/group6
|
bf0448909c04b64e9bf9520f75073638772e028d
|
a764d9ee075a4ad79e05375ed2c0179b6d2722ac
|
refs/heads/master
| 2020-05-03T17:03:33.998488
| 2019-04-01T16:01:15
| 2019-04-01T16:01:15
| 178,737,703
| 0
| 0
| null | 2019-03-31T20:18:55
| 2019-03-31T20:18:55
| null |
UTF-8
|
Python
| false
| false
| 5,897
|
py
|
# -*- coding: utf-8 -*-
import sys
import pickle
import os
import glob
model_loaded = None
def marvin_model(filename):
blocks_model_data = {}
with open(filename) as f:
for line in f.readlines():
data = line.strip().split(' ')
blocks_model_data[data[0]] = {
'x': data[1],
'y': data[2],
'z': data[3],
'weight': data[4],
'au': data[5],
'cu': data[6],
'proc_profit': data[7]
}
return blocks_model_data
def zuck_small_model(filename):
blocks_model_data = {}
with open(filename) as f:
for line in f.readlines():
data = line.strip().split(' ')
blocks_model_data[data[0]] = {
'x': data[1],
'y': data[2],
'z': data[3],
'weight': float(data[6])+float(data[7]),
'cost': data[4],
'value': data[5],
'rock_tonnes': data[6],
'ore_tonnes': data[7]
}
return blocks_model_data
def new_block_model(map_type):
filename = input('please enter the location of the file:\n')
if not os.path.isfile(filename):
input("file not found...")
return None
blocks_model_data = []
if map_type == 1:
blocks_model_data = marvin_model(filename)
elif map_type == 2:
blocks_model_data = zuck_small_model(filename)
return blocks_model_data
def save_to_database(filename, data):
with open('model_files\\'+filename+'.db', 'wb') as handle:
pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)
def read_database(filename):
with open(filename, 'rb') as handle:
return pickle.load(handle)
def load_map():
"""
Function for loading maps on the database.
"""
global model_loaded
current_directory = os.path.dirname(os.path.abspath(__file__))
menu_keys = []
os.chdir(current_directory + "\model_files")
for file in glob.glob("*.db"):
menu_keys.append(file)
if len(menu_keys) == 0:
input("you must first create a new map...")
return None
menu_keys.append("back")
while True:
print("load options:")
for option_index in range(len(menu_keys)):
print("{} - {}".format(option_index + 1, menu_keys[option_index]))
selected_option = input("Select Option: ")
try:
selected_option = int(selected_option)
except:
print("You must enter a number...")
continue
if -1 < selected_option < len(menu_keys) + 1:
menu_key = menu_keys[selected_option - 1]
if menu_key == "back":
break
else:
model_loaded = read_database(current_directory + "\model_files\\" + menu_key)
break
else:
print("Unknown Option, please select one of the given ones...")
def new_map():
"""
Function for creating new maps on the database.
"""
global model_loaded
new_map_menu_options = {
"Marvin": 1,
"Zuck small": 2,
"Back": 0
}
new_map_keys = list(new_map_menu_options.keys())
while True:
print("Menu options:")
for option_index in range(len(new_map_keys)):
print("{} - {}".format(option_index + 1, new_map_keys[option_index]))
selected_option = input("Select Option: ")
try:
selected_option = int(selected_option)
except:
print("You must enter a number...")
continue
if -1 < selected_option < len(new_map_keys) + 1:
new_menu_key = new_map_keys[selected_option - 1]
if new_menu_key == 0:
break
else:
new_model = new_block_model(new_map_menu_options[new_menu_key])
break
else:
print("Unknown Option, please select one of the given ones...")
if new_model:
filename = input("please enter a name for the model:\n")
save_to_database(filename, new_model)
model_loaded = new_model
def query_map():
"""
Function for query a loaded map
"""
if not model_loaded:
input("you must first load a map...")
return None
while True:
block_id = input("please enter a block id of the currently loaded model: ")
try:
int(block_id)
except:
print("You must enter a number...")
continue
if block_id in model_loaded:
print("block " + block_id + " info: ")
for data in model_loaded[block_id]:
print("{0}: {1}".format(data, model_loaded[block_id][data]))
break
else:
print("Invalid block id...")
def close_program():
"""
Function for closing connection with database before exiting.
"""
print("Closing program...")
sys.exit()
def main_menu():
main_menu_options = {
"Create New Map": new_map,
"Load Map": load_map,
"Query Map": query_map,
"Exit": close_program
}
main_menu_keys = list(main_menu_options.keys())
while True:
print("Menu options:")
for option_index in range(len(main_menu_keys)):
print("{} - {}".format(option_index + 1, main_menu_keys[option_index]))
selected_option = input("Select Option: ")
try:
selected_option = int(selected_option)
except:
print("You must enter a number...")
continue
if -1 < selected_option < len(main_menu_keys) + 1:
main_menu_key = main_menu_keys[selected_option - 1]
main_menu_options[main_menu_key]()
else:
print("Unknown Option, please select one of the given ones...")
main_menu()
|
[
"nhurtado@miuandes.cl"
] |
nhurtado@miuandes.cl
|
8b3724a773360254ffb8e6b3351328324b33ea95
|
30cfdd215a3a9e0490e0603337e0de0efd898a29
|
/pharmAutoML/autoML_util.py
|
8112c67199fe08208614f9f7d944a1faa9567af0
|
[] |
no_license
|
gengbo-genentech/Pharm-AutoML
|
7c3c2f280fb0cc9c2515dfb34ff982252a84ca0d
|
bae09d59ff049b5eadff7dcff9191443579e2d8c
|
refs/heads/main
| 2023-05-05T10:19:35.659844
| 2021-05-22T01:51:24
| 2021-05-22T01:51:24
| 348,850,859
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,498
|
py
|
import os
from sklearn.model_selection import ParameterGrid
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from hyperopt import hp
import pickle
from hyperopt import STATUS_OK
import mlflow
def get_preprocessed_data(result_path, para_index):
"""
read in preprocessed data
Args:
result_path: str, result path.
fold: int, fold index.
para_index: int, parameter grid index
Returns:
X_train: pd dataframe
X_test: pd dataframe
y_train: pd dataframe
y_test: pd dataframe
logger_path: str, logger directory
"""
para_path = result_path + '/' + str(para_index)
X_train_dir = para_path + '/X_train_important.csv'
y_train_dir = para_path + '/y_train.csv'
X_test_dir = para_path + '/X_test_important.csv'
y_test_dir = para_path + '/y_test.csv'
X_train = pd.read_csv(X_train_dir, index_col = 0)
y_train = pd.read_csv(y_train_dir, index_col = 0)
X_test = pd.read_csv(X_test_dir, index_col = 0)
y_test = pd.read_csv(y_test_dir, index_col = 0)
return X_train, y_train, X_test, y_test
def getBestModelfromTrials(trials):
valid_trial_list = [trial for trial in trials
if STATUS_OK == trial['result']['status']]
losses = [float(trial['result']['loss']) for trial in valid_trial_list]
index_having_minumum_loss = np.argmin(losses)
best_trial_obj = valid_trial_list[index_having_minumum_loss]
return best_trial_obj['result']['best_model']
def get_best_model(paras_json, b_model_dict):
bm_paras = paras_json
for i in paras_json.keys():
for j in paras_json[i].keys():
if isinstance(paras_json[i][j], list):
bm_paras[i][j] = bm_paras[i][j][b_model_dict[j]]
return bm_paras
def hp_parameters(json_parameters):
hp_paras = {}
for i in json_parameters.keys():
sub_paras = {}
for j in json_parameters[i].keys():
if j == 'drop_features' or j == 'categorical_features':
sub_paras[j] = json_parameters[i][j]
elif isinstance(json_parameters[i][j], list):
sub_paras[j] = hp.choice(j, json_parameters[i][j])
else:
sub_paras[j] = hp.choice(j, [json_parameters[i][j]])
hp_paras[i] = sub_paras
return hp_paras
def get_parameter_grid(parameter_comparison):
parameter_temp = parameter_comparison.copy()
for i in parameter_comparison.keys():
parameter_temp[i] = list(ParameterGrid(parameter_comparison[i]))
parameter_grid = list(ParameterGrid(parameter_temp))
return parameter_grid
def get_parameter_grid_for_clf(parameter_comparison, impute_missing_mode = True):
parameter_temp = parameter_comparison.copy()
for i in parameter_comparison.keys():
parameter_temp[i] = list(ParameterGrid(parameter_comparison[i]))
parameter_grid = list(ParameterGrid(parameter_temp))
clf_param_list = []
for d in parameter_grid:
for k, v in d.items():
if impute_missing_mode == False and k != "XGboost":
continue
clf_param = {k:v}
if clf_param not in clf_param_list:
clf_param_list.append(clf_param)
return clf_param_list
def get_multiple_option_parameter(parameter):
multiple_option_parameter = ["clfs"]
for i in parameter.keys():
for j in parameter[i].keys():
if len(parameter[i][j]) > 1:
multiple_option_parameter.append(j)
return multiple_option_parameter
def save_model(para_grid_length, clfs, result, result_path):
"""
save the classifier model
Args:
para_grid_length,
clfs,
result,
result_path
"""
for para_index in range(para_grid_length):
bm = result['para_index_'+str(para_index)]['bm']
for clf_string in clfs:
bm_name = 'bm_' + clf_string
model = bm[bm_name]['learner']
filename = result_path + '/' + str(para_index) + 'prepro_' + clf_string + '.sav'
pickle.dump(model, open(filename, 'wb'))
def setup_temp_dir(prepro_num, directory):
"""set up data preprocessing directory
Args:
prepro_num: int, number of parameter grids
directory: str, result path
"""
for i in range (prepro_num):
path = directory + '/' + str(i)
if os.path.exists(path):
shutil.rmtree(path, ignore_errors=True)
os.mkdir(path)
def get_fold_path(path, fold):
"""set up fold path
Args:
path: string, name of path
fold: int, fold number
"""
fold_i = fold + 1
file_fold = path + '/fold_' + str(fold_i)
return file_fold
def missing_stats(X, missing_threshold, axis=1):
"""
Calculate and sort the fraction of missing in each column
Args:
X: dataframe
missing_threshold: float
Returns:
missing_threshold_rows_grid: list of float
"""
a = 1-axis
missing_series = X.isnull().sum(axis = a) / X.shape[a]
# Calculate the fraction of missing in each column
missing_series = X.isnull().sum() / X.shape[0]
if axis == 1:
missing_stats_cols = pd.DataFrame(missing_series).rename(columns = {'index': 'feature', 0: 'missing_fraction'})
# Sort with highest number of missing values on top
missing_stats_cols = missing_stats_cols.sort_values('missing_fraction', ascending = False)
missing_threshold_cols_grid = pd.DataFrame(missing_series[missing_series >= missing_threshold]).reset_index().rename(columns = {'index': 'cols', 0: 'missing_fraction'})
return missing_threshold_cols_grid
elif axis == 0:
missing_stats_rows = pd.DataFrame(missing_series).rename(columns = {'index': 'feature', 0: 'missing_fraction'})
# Sort with highest number of missing values on top
missing_stats_rows = missing_stats_rows.sort_values('missing_fraction', ascending = False)
missing_threshold_rows_grid = pd.DataFrame(missing_series[missing_series > missing_threshold]).reset_index().rename(columns = {'index': 'rows', 0: 'missing_fraction'})
return missing_threshold_rows_grid
def get_grid_complement_missing_threshold(x, d_list, missing_threshold_complement_mode=False):
"""
get preprocessing parameter grid for missing imputation pipeline
Args:
X: dataframe
d_list: dictionary of preprocessing parameters
missing_threshold_complement_mode: complement the missing threshold or not
Returns:
grid: list of dictionary, parameter grid
"""
if missing_threshold_complement_mode == True:
min_threshold = min(d_list['missing_threshold'])
output = missing_stats(x, min_threshold)
if list(output['missing_fraction']) == []:
d_list['missing_threshold'] = [0]
else:
d_list['missing_threshold'] = list(output['missing_fraction'])
grid = list(ParameterGrid(d_list))
return grid
def get_grid_allow_missing(x, d_list):
"""
get preprocessing parameter grid for missing allow pipeline
Args:
X: dataframe
d_list: dictionary of preprocessing parameters
Returns:
grid: list of dictionary, parameter grid
"""
del d_list['impute_category_strategy']
del d_list['impute_numerical_strategy']
grid = list(ParameterGrid(d_list))
return grid
def single_roc_plot(y_true, y_probas, text=None, title='ROC Curves', figsize=None, title_fontsize="large", text_fontsize="medium"):
"""
generate single roc auc plot
Args:
y_true: numpy array, 1 * n.
y_probas: dataframe, 1 * n.
"""
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.set_title(title, fontsize=title_fontsize)
fpr, tpr, _ = roc_curve(y_true, y_probas, pos_label=1)
roc_auc = auc(fpr, tpr)
ax.plot(fpr, tpr, label='ROC curve '
'(area = {0:0.2f})'.format(roc_auc),
color='blue', linewidth=2)
ax.plot([0, 1], [0, 1], 'k--', lw=2)
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate', fontsize=text_fontsize)
ax.tick_params(labelsize=text_fontsize)
ax.legend(loc='lower right', fontsize=text_fontsize)
return fig
def plot_roc(y_true, y_probas, classes = None, title='ROC Curves', average_plot = True,
ax=None, figsize=None, cmap='nipy_spectral',
title_fontsize="large", text_fontsize="medium"):
"""
generate roc auc plots for each fold of cross validation experiments and average curve
Args:
y_true: list, list of np array.
y_probas: list, list of np array.
"""
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.set_title(title, fontsize=title_fontsize)
n_fold_roc_auc = []
for i in range(len(y_true)):
fpr, tpr, _ = roc_curve(y_true[i], y_probas[i])
roc_auc = auc(fpr, tpr)
color = plt.cm.get_cmap(cmap)(float(i) / len(y_true))
if classes is None:
s = 'fold'
else:
s = classes[i]
ax.plot(fpr, tpr, lw=2, color=color,
label='ROC curve of {0} {1} (area = {2:0.2f})'
''.format(s, i, roc_auc))
n_fold_roc_auc.append(roc_auc)
average_roc_auc = 0
if classes is None:
if average_plot:
all_y_true = np.concatenate(y_true)
all_probas = np.concatenate(y_probas)
fpr_all, tpr_all, _ = roc_curve(all_y_true, all_probas)
average_roc_auc = auc(fpr_all, tpr_all)
ax.plot(fpr_all, tpr_all,
label='average ROC curve '
'(area = {0:0.2f})'.format(average_roc_auc),
color='blue', linestyle=':', linewidth=4)
ax.plot([0, 1], [0, 1], 'k--', lw=2)
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('False Positive Rate', fontsize=text_fontsize)
ax.set_ylabel('True Positive Rate', fontsize=text_fontsize)
ax.tick_params(labelsize=text_fontsize)
ax.legend(loc='lower right', fontsize=text_fontsize)
return ax, n_fold_roc_auc, average_roc_auc
def plot_precision_recall(y_true, y_probas, classes = None, title='Precision-Recall Curve',
average_plot = True, ax=None, figsize=None, cmap='nipy_spectral',
title_fontsize="large", text_fontsize="medium"):
"""
generate pr auc plots for each fold of cross validation experiments and average curve
Args:
y_true: list, list of np array.
y_probas: list, list of np array.
"""
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.set_title(title, fontsize=title_fontsize)
n_fold_pr_score = []
for i in range(len(y_true)):
precision, recall, _ = precision_recall_curve(y_true[i], y_probas[i])
average_precision = average_precision_score(y_true[i], y_probas[i])
color = plt.cm.get_cmap(cmap)(float(i) / len(y_true))
if classes is None:
s = 'fold'
else:
s = classes[i]
ax.plot(recall, precision, lw=2, color=color,
label='Precision-recall curve of {0} {1} (area = {2:0.2f})'
''.format(s, i, average_precision))
n_fold_pr_score.append(average_precision)
all_average_precision=0
if classes is None:
if average_plot:
all_y_true = np.concatenate(y_true)
all_probas = np.concatenate(y_probas)
precision_all, recall_all, _ = precision_recall_curve(all_y_true, all_probas)
all_average_precision = average_precision_score(all_y_true, all_probas)
ax.plot(recall_all, precision_all,
label='average Precision-recall curve'
'(area = {0:0.2f})'.format(all_average_precision),
color='blue', linestyle=':', linewidth=4)
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('Recall', fontsize=text_fontsize)
ax.set_ylabel('Precision', fontsize=text_fontsize)
ax.tick_params(labelsize=text_fontsize)
ax.legend(loc='best', fontsize=text_fontsize)
return ax, n_fold_pr_score, all_average_precision
|
[
"60203611+YoungGoodman@users.noreply.github.com"
] |
60203611+YoungGoodman@users.noreply.github.com
|
993621681e8265e350db1ce0a5bdf55253dcc21c
|
a29bbae651a831343dc998bbdca64f2e308a239c
|
/analysis_plot.py
|
2bac881752295afb862a5dd922700ffa144de2c6
|
[] |
no_license
|
RuiZhang-ICT/int-cyclegan-brats-energy
|
0ccd644912db6198087e8c2fc1d519b3629ae76b
|
67e4a6345424f7e1e31ec980d0381b5b6ba16c14
|
refs/heads/master
| 2020-03-26T13:33:52.818718
| 2018-08-23T02:56:29
| 2018-08-23T02:56:29
| 144,945,282
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 676
|
py
|
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
img1 = Image.open('Flair/0001_090.png')
img1_mat = np.array(img1, dtype=np.float)
img2 = Image.open('T1/0001_090.png')
img2_mat = np.array(img2, dtype=np.float)
plt.imshow(img1_mat, cmap='gray')
plt.show()
img1_prob = img1_mat/ 127.0 -1
img2_prob = img2_mat/ 127.0 -1
mat_index = np.int32(img1_mat / cell_range)
mat_feature = img2_prob - img1_prob
cell_range = 16
bins_num = 256 / 16
bins_idx = np.zeros(bins_num)
for idx in range(bins_num):
num = np.sum(mat_index == idx)
if num >0:
bins_idx[idx] = np.mean(mat_feature[mat_index == idx])
plt.plot(bins_idx)
plt.savefig('tmp.png')
|
[
"zhangrui@ict.ac.cn"
] |
zhangrui@ict.ac.cn
|
ce7b5b0db92a2702183e98f1c974cc8df7a292a3
|
a677159f2b4efa9e99a96fcccfc3c3cd95747eaf
|
/ziguangzhanrui/8955-scripts/v1_runner.py
|
0a593e3d843ba149532b56af0a3fd9a7c07e072f
|
[] |
no_license
|
Taomengyuan/Demo
|
0c1f5c06daf776c22541500b84f0f19b854da2ea
|
8e705c9cb45b9e3dfd3c4756c398b0265079b7e2
|
refs/heads/master
| 2020-09-12T10:52:36.289617
| 2019-03-12T02:14:39
| 2019-03-12T02:14:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
from comm.atconfig import atConfig
from comm.atlogger import atLogger
if __name__=="__main__":
print(atConfig.com_port)
atLogger.case_start("AT")
atLogger.command_send("AT")
atLogger.get_res("OK")
atLogger.case_end("AT")
|
[
"limengmeng004@ke.com"
] |
limengmeng004@ke.com
|
361237e78a1186eba941c6d8345459a396a8133d
|
fd5f52c0c5a4844f3f9dc0f8cd12ebed845fea67
|
/change.py
|
5ae993d8ce0fb92cd29157d7e68e8137076d3eb1
|
[] |
no_license
|
TataSatyaPratheek/AlgoToolbox
|
77fbf50cb9e8eac7c6dbbc41c9c9a3212bef045e
|
64674d03197c2406673ab3031e8eb17b38379954
|
refs/heads/master
| 2022-11-30T10:42:11.255288
| 2020-08-11T17:03:52
| 2020-08-11T17:03:52
| 286,720,620
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 225
|
py
|
# Uses python3
import sys
def get_change(m):
#write your code here
a = int(m/10) + int((m%10)/5) + int(((m%10)%5))
return a
if __name__ == '__main__':
m = int(sys.stdin.read())
print(get_change(m))
|
[
"noreply@github.com"
] |
TataSatyaPratheek.noreply@github.com
|
e9dbdafacf420c57b3aa907a62b3ff10c0b637f8
|
2fea90baaf526322ea87d42a64808e9596a0f4c2
|
/1021.py
|
fc1cec4b79ba96c647add46a096c01a3a8935520
|
[] |
no_license
|
claudianoufrpe/URI
|
2e69d6ade5a78333f2e79fa278cdc48513503fab
|
969d23115a812bf4aa73d604fded6c19fbd9f584
|
refs/heads/master
| 2020-05-18T18:02:52.456005
| 2018-04-27T14:18:13
| 2018-04-27T14:18:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 613
|
py
|
# @author Matheus Alves dos Santos
# TITLE: Notas e Moedas
# ID: 1021
dinheiro = int(float(raw_input()) * 100)
valores = [10000, 5000, 2000, 1000, 500, 200, 100, 50, 25, 10, 5, 1]
resultado = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for i in range(len(valores)):
if dinheiro >= valores[i]:
resultado[i] = dinheiro / valores[i]
dinheiro -= resultado[i] * valores[i]
print 'NOTAS:'
for i in range(0, 6):
print '%d nota(s) de R$ %.2f' % (resultado[i], float(valores[i])/100)
print 'MOEDAS:'
for i in range(6, 12):
print '%d moeda(s) de R$ %.2f' % (resultado[i], float(valores[i])/100)
|
[
"m.hollygard@gmail.com"
] |
m.hollygard@gmail.com
|
a1c753f9253ae42052bd4f7eee49c088120df1f4
|
0ba6e9398ae8592d84d7925d4947f44fa7550f5c
|
/Snake Game Redevelopment/Source/MainMenu.py
|
511fd6294b1dbe155b3491e284c33cfd1ee0caf1
|
[
"MIT"
] |
permissive
|
MdNaushad12/python-animations
|
20f73139b9461806a9d6fa1f7b980160091c7ec3
|
ee0b4e7fe113f593d5cd9bef78500eb7d2c0707b
|
refs/heads/master
| 2021-05-18T18:50:07.264845
| 2020-03-30T16:22:22
| 2020-03-30T16:22:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,658
|
py
|
from PlayMap import *
import pygame
from GameOver import *
from GamePause import *
from GameInstructions import *
"""
Main Menu Class encapsulates the 'window' of the game
as the logic(backend) and decides what to send to Controller.py to display
"""
class MainMenu :
"""
MainMenu has state variables:
gameMap: PlayMap object
gameOver: GameOver object
state: string
startGameButton: pygame.Rect object
exitGameButton: pygame.Rect object
Assumptions: __init__() is called before any other access program
"""
def __init__(self) :
"""
Constructor for MainMenu class
Transition: initialized to main menu state
exception: none
"""
self.STATES = ['menu','game','gameOver','gamePause','instructions']
self.gameMap = PlayMap()
self.gameOver = GameOver(20)
self.gamePause = GamePause(20)
self.gameInstructions = GameInstructions()
self.pauseStatus = False
# What stage of the interface the game is on
self.state = 'menu'
# Display objects for the GUI
self.startGameButton = pygame.Rect(50,200,200,100)
self.exitGameButton = pygame.Rect(300,200,200,100)
self.diff0Button = pygame.Rect(100,400,50,50)
self.diff1Button = pygame.Rect(200,400,50,50)
self.diff2Button = pygame.Rect(300,400,50,50)
self.updateState()
# Changed representation by python of object for ease of testing
def __repr__(self) :
return (str(self.startGameButton)+str(self.exitGameButton)+str(self.diff0Button)+ str(self.diff1Button) + str(self.diff2Button) + str(self.state))
#Add ValueError exception
#change value of self.state
def changeState(self,newState) :
"""
function to change the current state of the main menu
Transition: self.state is updated to new state
exception: none
input: newState - string value of new state
output: none
"""
if self.STATES.count(newState)==1 : self.state = newState
if self.state =='menu' : self.pauseStatus = False
if self.state == 'instructions' : self.pauseStatus = False
if self.state=='game' :
if not(self.pauseStatus) :
self.gameMap = PlayMap()
if self.state=='gameOver':
self.pauseStatus = False
self.gameOver = GameOver(self.gameMap.score)
if self.state=='gamePause':
self.pauseStatus = True
self.gamePause = GamePause(self.gameMap.score)
#call necessary functions based on current state
def updateState(self) :
"""
function to return which objects to display on GUI (current state of game)
Transition: an array of objects to display on the screen is returned
exception: none
input: none
output: an array of objects (pygame)
"""
#print "MainMenu.updateState ran"
if self.state=='menu' :
return [self.startGameButton,self.exitGameButton,self.diff0Button,self.diff1Button,self.diff2Button]
if self.state=='instructions' :
return self.gameInstructions.getCurrentState()
if self.state=='game' :
self.gameMap.updateState()
return self.gameMap.getCurrentState()
if self.state=='gameOver' :
self.gameOver.updateState(self.gameMap.score)
return self.gameOver.getCurrentState()
if self.state=='gamePause' :
self.gamePause.updateState(self.gameMap.score)
return self.gamePause.getCurrentState()
|
[
"rabbass@mcmaster.ca"
] |
rabbass@mcmaster.ca
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.