blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
40ba500165d51f5a4a42bd3211868cf89e57d901 | Python | voyagerdva/EXERCISES_PYTHON | /EXERCISES_STRINGS_101/Exercise_7_Replace_POOR_and_NOT_to_GOOD/test.py | UTF-8 | 226 | 2.859375 | 3 | [] | no_license | import pytest
import calculate
def test_Calculate():
original_string = 'The lyrics is not that poor!'
result = calculate.replaceToGOOD1(original_string)
ethalon = "The lyrics is GOOD!"
assert result == ethalon | true |
cd830317ee726ec9814d0bcb8077f0a82400ef37 | Python | patrickbald/hadoop-map-reduce | /inlinksMap.py | UTF-8 | 261 | 2.625 | 3 | [] | no_license | #!/usr/bin/env python3
import sys
import io
def main():
stream = io.TextIOWrapper(sys.stdin.buffer, encoding = 'iso-8859-1')
for line in stream:
host, link = line.split()[0], line.split()[1]
print(f"{link} {host}")
if __name__ == '__main__':
main()
| true |
a37768e480496bd969d3fad9002ffc8de3d91843 | Python | jonggyup/Grouping-Applications-Using-Geometrical-Information-of-Applications-on-Tabletop-Systems | /tracefiles/user_4_backup/exp1/test.py | UTF-8 | 3,165 | 3.03125 | 3 | [] | no_license | from __future__ import division
import collections
import random
import numpy as np
from sklearn import svm
from sklearn.metrics import f1_score
def number_of_chars(s):
return len(s)
def unique_chars(s):
s2 = ''.join(set(s))
return len(s2)
def weighted_unique_chars(s):
return unique_chars(s)/number_of_chars(s)
def words_count(s):
return collections.Counter(s)
def words_counter_object(s):
cnt = collections.Counter()
words = s.split()
for w in words:
cnt[w] += 1
return cnt
def total_words(cnt):
sum = 0
for k in dict(cnt).keys():
sum += int(cnt[k])
return sum
def most_common(cnt, n):
for k,v in cnt.most_common(n):
#print "most common k = %s : v = %s" %(k,v)
pass
def is_repeated(cnt):
for k,v in cnt.most_common(1):
freq = v/total_words(cnt)
# print 'freq=',freq
if freq > 0.5:
return 1
return 0
def make_feature_vector(critique, labels):
" construct feature vector"
feature_vector = []
for i in range(len(critique)):
s = critique[i]
feature = []
counter_obj = words_counter_object(s)
feature.append(number_of_chars(s))
feature.append(unique_chars(s))
feature.append(weighted_unique_chars(s))
feature.append(total_words(counter_obj))
feature.append(is_repeated(counter_obj))
feature.append(labels[i])
feature_vector.append(feature)
return feature_vector
def read_data():
''' read and make a list of critiques'''
f = open('bad.txt', 'r')
bad = f.read().split('|')
f.close()
f = open('good.txt', 'r')
good = f.read().split('|')
f.close()
return bad+good, [0]*len(bad) + [1]*len(good)
def make_np_array_XY(xy):
print "make_np_array_XY()"
a = np.array(xy)
x = a[:,0:-1]
y = a[:,-1]
return x,y
if __name__ == '__main__':
critiques, labels = read_data()
features_and_labels= make_feature_vector(critiques, labels)
number_of_features = len(features_and_labels[0]) - 1
random.shuffle(features_and_labels)
# make train / test sets from the shuffled list
cut = int(len(features_and_labels)/2)
XY_train = features_and_labels[:cut]
XY_test = features_and_labels[cut:]
X_train, Y_train = make_np_array_XY(XY_train)
X_test, Y_test = make_np_array_XY(XY_test)
# train set
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X_train, Y_train)
print 'type(svc)=', type(svc)
print 'svc=',svc
print 'Y_test:\n', Y_test
Y_predict = svc.predict(X_test)
print 'Y_predict:\n', Y_predict
# score
test_size = len(Y_test)
score = 0
for i in range(test_size):
if Y_predict[i] == Y_test[i]:
score += 1
print 'Got %s out of %s' %(score, test_size)
# f1 score
f1 = f1_score(Y_test, Y_predict, average='macro')
print 'f1 macro = %.2f' %(f1)
f1 = f1_score(Y_test, Y_predict, average='micro')
print 'f1 micro = %.2f' %(f1)
f1 = f1_score(Y_test, Y_predict, average='weighted')
print 'f1 weighted = %.2f' %(f1)
| true |
65dacd67441098caca7de997f0329d70633ced6f | Python | AlexVlasev/AlexVlasev.github.io | /build.py | UTF-8 | 3,917 | 2.625 | 3 | [] | no_license | import json
def element(head, attributes, content=""):
config = " ".join(f'{key}="{value}"' for key, value in attributes.items() if key != "head")
return f'<{head} {config}>{content}</{head}>'
sr_only = element("span", {"class": "sr-only"}, "(current)")
dropdown_config = {
"class": "nav-link dropdown-toggle",
"href": "#",
"role": "button",
"data-toggle": "dropdown",
"aria-haspopup": "true",
"aria-expanded": "false"
}
def navbar_item(item, active=False, active_option=""):
if item["type"] == "link":
return navbar_link(item, active)
elif item["type"] == "dropdown":
return navbar_dropdown(item, active, active_option)
def navbar_link(data, active=False):
cls = "nav-item"
title = data['title']
if active:
cls += " active"
title += sr_only
return element("li", {"class": cls}, element("a", {"class": "nav-link", "href": data["href"]}, title))
def navbar_dropdown(data, active=False, active_option=""):
cls = "nav-item dropdown"
title = data['title']
menu_options = []
for option in data["options"]:
activate = option['title'] == active_option
item = navbar_dropdown_item(option, activate)
menu_options.append(item)
if activate:
cls = "nav-item dropdown active"
title = data['title'] + sr_only
config = element("a", dropdown_config, title)
menu_options = f'\n{6*" "}' + f'\n{6*" "}'.join(navbar_dropdown_item(option, option['title'] == active_option) for option in data["options"])
menu = element("div", {"class": "dropdown-menu", "aria-labelledby": "navbarDropdown"}, menu_options)
items = [config, menu]
return element("li", {"class": cls}, f'\n{5*" "}'.join(items))
def navbar_dropdown_item(data, active=False):
cls = "dropdown-item"
title = data['title']
if active:
cls += " active"
title += sr_only
return element("a", {"class": cls, "href": data["href"]}, title)
if __name__ == '__main__':
with open("template.html", "r") as infile:
template = "".join(infile.readlines())
with open("json/scripts.json", 'r') as infile:
scripts_data = json.load(infile)
with open("json/navbar.json", 'r') as infile:
navbar_data = json.load(infile)["navbar-items"]
with open("json/pages.json", 'r') as infile:
pages_data = json.load(infile)
scripts = {name: element(data["head"], data) for name, data in scripts_data.items()}
default_script_names = ["jquery.js", "popper.js", "bootstrap.css", "bootstrap.js"]
navbar_keys = ["home", "software", "math", "lifestyle", "about"]
for page in pages_data:
scripts_to_add = [script_name for script_name in default_script_names]
if page["include-math"]:
scripts_to_add.append("mathjax.js")
if page["include-p5"]:
scripts_to_add.append("p5.js")
scripts_to_add.append("p5.dom.js")
scripts_html = '\n '.join(scripts[name] for name in scripts_to_add)
title = page["title"]
navbar_items = (navbar_item(navbar_data[name], name == page["name"], title) for name in navbar_data)
navbar_html = f'\n{5*" "}'.join(navbar_items)
with open(f'content/{page["name"]}-content.html', 'r') as infile:
content_html = f'{4*" "}'.join(infile.readlines())
page_items = {
"title": "Alex Vlasev" + f' - {title}',
"author": "Alexander Vlasev",
"description": page["description"],
"brand": "Alex Vlasev",
"scripts": scripts_html,
"navbar": navbar_html,
"content": content_html
}
webpage = template.format(**page_items)
with open(f'{page["name"]}.html', 'w') as outfile:
outfile.write(webpage)
| true |
b942fe891463983c9fc05f85b4d00cd77bb489e4 | Python | anand-sonawane/30DaysOfCode-Hackerrank | /Python/9:Recursion.py | UTF-8 | 163 | 3.25 | 3 | [] | no_license | N=int(input())
def factorial(fact_n):
if(fact_n==1):
return 1
else:
return fact_n * factorial(fact_n-1)
ans = factorial(N)
print(ans)
| true |
08743e0bd8e5b16a42d4ee965cc1c84f29f8a06e | Python | ricardopineda93/Playing-with-API-Calls | /python_repos.py | UTF-8 | 3,806 | 3.578125 | 4 | [] | no_license | import requests
import pygal
from pygal.style import LightColorizedStyle as LCS, LightenStyle as LS
# Making an API call and storing the responses
url = 'https://api.github.com/search/repositories?q=language=python&sort=stars'
r = requests.get(url)
print('Status Code: ', r.status_code) #status_code letsus know if the call was successful (200 indicates successful)
# Store API response in a variable
response_dict = r.json() #API calls returns info in JSON format so we have to use json method to convert info dictionary
print('Total Repositories: ', response_dict['total_count']) #pulling the total number of python repositories on github
#Exploring information about the repositories
repo_dicts = response_dict['items'] #'items' key is a list of dictionaries containing info about each individual python repo.
print('Repositories returned: ', len(repo_dicts)) #printing length of items to see how many repos we have data for
# Examine the first repository
#repo_dict = repo_dicts[0]
#print('\nKeys: ', len(repo_dict)) #seeing how many keys in the dictionary of each python repo is availble to gaug how much info we have in each repo.
# for key in sorted(repo_dict.keys()): # Simply printing all the keys of dictionary to see WHAT info we have about each repo
# print(key, " : ", repo_dict[key])
#
# print('\nSelected information about first repository: ')
# print('Name: ', repo_dict['name'])
# print('Owner: ', repo_dict['owner']['login'])
# print('Stars: ', repo_dict['stargazers_count'])
# print('Repository: ', repo_dict['html_url'])
# print('Created: ', repo_dict['created_at'])
# print('Last Updated: ', repo_dict['updated_at'])
# print('Description: ', repo_dict['description'])
#Examining all repositories
# print('\nSelected information for each repository:')
# for repo_dict in repo_dicts:
# print('Name: ', repo_dict['name'])
# print('Owner: ', repo_dict['owner']['login'])
# print('Stars: ', repo_dict['stargazers_count'])
# print('Repository: ', repo_dict['html_url'])
# print('Created: ', repo_dict['created_at'])
# print('Last Updated: ', repo_dict['updated_at'])
# print('Description: ', repo_dict['description'])
# print()
#
names, plot_dicts = [], []
for repo_dict in repo_dicts:
names.append(repo_dict['name'])
#Getting project desc if one is available
description = repo_dict['description']
if not description: #always remember to have a fallback value if one can't be procured
description = 'No description provided.'
#for each repo we loop through, we add a dict to indicate what displays over each bar as follows
plot_dict = {
'value': repo_dict['stargazers_count'], #amount of stars on the project, which is the data we are plotting
'label': description, #custom tooltip that provides description of project when hovered over the bar
'xlink': repo_dict['html_url'] #makes so you can click on the bar and it takes you to the github page of the project
}
plot_dicts.append(plot_dict) #we add each dict to a list as pygal needs a list in order to plot data
# Making visualization through PyGal
my_style = LS('#333366', base_style=LCS)
my_style.title_font_size = 24
my_style.label_font_size = 14
my_style.major_label_font_size = 18
my_config = pygal.Config() #accessing the configuration class of pygal directly
my_config.x_label_rotation = 45
my_config.show_legend = False
my_config.truncate_label = 15 #truncates the names of projects on y-axis if >15 characters
my_config.show_y_guides = False #hides the horizontal lines on the graph from y axis
my_config.width = 1000
chart = pygal.Bar(my_config, style=my_style)
chart.title = 'Most Starred Python Projects on GitHub'
chart.x_labels = names
chart.add('', plot_dicts)
chart.render_to_file('python_repos.svg')
| true |
68fae6dbf3b6d6ab646d865e257a581fe84fe729 | Python | rudyard2021/math-calculator | /main.py | UTF-8 | 300 | 2.625 | 3 | [] | no_license | from source.function import Function
if __name__ == "__main__":
function = Function()
err = function.start("raiz(25;-4+2*(-5+8))+summa(x;x;1;5)")
if err is not None:
print("Incompleto => {}".format(err))
else:
value = function.f()
print("{}".format(value))
| true |
8fbb2541eeba6d6cce2b8737279eb1bb7d02c8b3 | Python | julienbgr/email_utilize | /email_utilize.py | UTF-8 | 1,983 | 2.625 | 3 | [] | no_license | import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import email.mime.application
import pandas as pd
import time
class Empfänger:
def __init__(self, Nachname, Vorname, Email, Anrede):
self.nachname = Nachname
self.vorname = Vorname
self.email = Email
self.anrede = Anrede
def GetReceivers():
filepath = 'Empfänger'
Receivers = []
with open(filepath, 'r') as lines:
for line in lines:
line = line.strip('\n')
result = line.split(',')
Receivers.append(Empfänger(result[0].strip(), result[1].strip(), result[2].strip(), result[3].strip()))
return Receivers
def Anrede(Message, Receiver):
if Receiver.anrede == 'Herr':
return Message.format('geehrter Herr',Receiver.nachname)
if Receiver.anrede == 'Frau':
return Message.format('geehrtee Frau',Receiver.nachname)
def GetMessage():
return open('Nachricht', 'r').read()
start = time.time()
#Initialize
sender = 'YOUR_MAIL'
password = 'PASSWORD'
cc = 'CC_MAIL'
subject = 'SUB'
receiver_list = GetReceivers()
#start server
server = smtplib.SMTP('smtp.office365.com', 587)
server.starttls()
server.login(sender, password)
Message = GetMessage()
for receiver in receiver_list:
message = Anrede(Message, receiver)
msg = MIMEMultipart()
msg['From'] = sender
msg['To'] = receiver.email
msg['Subject'] = subject
msg['CC'] = cc
msg.attach(MIMEText(message, 'plain'))
filename = 'Anhang.pdf' #path to file
fo=open(filename,'rb')
attach = email.mime.application.MIMEApplication(fo.read(),_subtype="pdf")
fo.close()
attach.add_header('Content-Disposition','attachment',filename=filename)
msg.attach(attach)
msg.attach(attach)
text = msg.as_string()
server.sendmail(sender, receiver.email, text)
server.quit()
fin = time.time()
print('Email is transmitted within %.3f'%(fin-start))
| true |
f2d3c2522343c3edae324fd68fec6bbb4dfdf0e7 | Python | mdietterle/aulas | /listas/listas1.py | UTF-8 | 315 | 3.5625 | 4 | [
"Apache-2.0"
] | permissive | lista=["pão","leite","queijo", "café","presunto"]
print(lista)
for compras in range(0,len(lista)):
print(lista[compras])
item = input("Digite o item que você esqueceu de colocar na lista: ")
lista.append(item)
print(lista)
print("----------------------------")
for compras in lista:
print(compras)
| true |
3d8101c01b57f28430864f3000785250364b0a57 | Python | truongquang1993/truongvanquang-Fundamentals-c4e26 | /LAB1/Homework/Exercise1.py | UTF-8 | 1,391 | 2.90625 | 3 | [] | no_license | from urllib.request import urlopen
from bs4 import BeautifulSoup
from collections import OrderedDict
import pyexcel
from youtube_dl import YoutubeDL
## part 1:
# 1. Tạo một kết nôi Create conection
url = "https://www.apple.com/itunes/charts/songs/"
conn = urlopen(url)
# 2. Download page
raw_data = conn.read()
page_content = raw_data.decode("utf8")
# print(page_content)
# 3. Find ROI (Region of insert)
soup = BeautifulSoup(page_content, "html.parser")
section = soup.find("section", "section chart-grid")
div = section.find("div", "section-content")
ul = div.find("ul")
# 4. Extra ROI
li_list = ul.find_all("li")
# print(li_list)
top_list = []
for li in li_list[0:10]: #Chọn phần tử trong list Chọn nhỏ thôi làm part 2 cho nhanh ^^
strong = li.strong
oder_number = strong.string
h3 = li.h3
a = h3.a
link_song = a["href"]
name_song = a.string
h4 = li.h4
aa = h4.a
name_artist = aa.string
top = OrderedDict({
"oder number": oder_number,
"link song": link_song,
"songs'name": name_song,
"artist": name_artist
})
top_list.append(top)
# <Part 2>
options = {
"default_search": "ytsearch",
"max_downloads": 1
}
dl = YoutubeDL(options)
dl.download([name_song])
#</Part 2>
# 5. Save
pyexcel.save_as(records=top_list, dest_file_name="Top songs.xlsx")
| true |
49ac1a64c03b0c35efeb59f31834032818ff6fdb | Python | CodeChangeTheWorld/bigdata | /Kafka/data-producer.py | UTF-8 | 2,974 | 2.71875 | 3 | [] | no_license | from googlefinance import getQuotes
from kafka import KafkaProducer
from kafka.errors import KafkaTimeoutError
import argparse #used to parse argument
import atexit #clean up when exit
import datetime
import logging
import json
import random
import schedule
import time
# - default kafka topic to write to
topic_name = 'stock-analyzer'
# -default kafka broker location
kafka_broker='192.168.99.100:9092'
logger_format = '%(asctime)-15s %(message)s'
logging.basicConfig(format=logger_format)
logger = logging.getLogger('data-producer')
logger.setLevel(logging.DEBUG)
def fetch_price(producer, symbol):
"""
helper function to retrive stock data and send it to kafka
:param producer: instance of a kafka producer
:param symbol: symbol of the stock
:return : None
"""
logger.debug('Start to fetch stock price for %s', symbol)
try:
#price = json.dumps(getQuotes(symbol))
price = random.randint(30,120);
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%dT%H:%MZ')
payload=('[{"StockSymbol":"AAPL", "LastTradePrice":%d, "LastTradeDateTime":"%s"}]' % (price, timestamp)).encode('utf-8')
logger.debug('Retrive stock info %s', price)
producer.send(topic=topic_name, value=payload, timestamp_ms=time.time())
#logger.debug('Sent stock price for %s to kafka', symbol)
except KafkaTimeoutError as timeout_error:
logger.warn('Failed to send stock price for %s to kafka, caused by:%s', (symbol, timeout_error.message))
except Exception:
logger.warn('Failed to fetch stock price for %s', symbol)
def shutdown_hook(producer):
"""
a shut down huook to be called before shutdown
:param producer: instance of a kafka producer
: return: None
"""
try:
logger.info('Flushing pending messages to kafka, timeout is set to 10s')
producer.flush(10)
logger.info('Finish flushing pending messages to kafka')
except KafkaError as kafka_error:
logger.warn('Failed to flush pending messages to kafka, caused by: %s', kafka_error.message)
finally:
try:
logger.info('Closing kafka connection')
producer.close(10)
except Exception as e:
logger.warn('Failed to close kafka connection, caused by: %s', e.mesage)
if __name__=='__main__':
#- setup command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('symbol', help='the symbol of the stock to collect')
parser.add_argument('topic_name', help='the kafka topic push to')
parser.add_argument('kafka_broker', help='the location of the kafka broker')
# - parse arguemtns
args = parser.parse_args()
symbol = args.symbol
topic_name = args.topic_name
kafka_broker = args.kafka_broker
# - instantiate a simple kafka producer
producer = KafkaProducer(
bootstrap_servers=kafka_broker
)
# - schedule and run the fetch_price function every second
schedule.every(1).second.do(fetch_price, producer,symbol)
# - setup proper shutdown hook
atexit.register(shutdown_hook, producer)
while True:
schedule.run_pending()
time.sleep(1)
| true |
3991a9f17aad98681ba9d4a6ebcbb710807fc3d9 | Python | julianosk/ordenaacoes | /controllers/consts.py | UTF-8 | 5,168 | 2.8125 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: latin-1 -*-
"""
TODO:
- Reordenar sem reload - FEITO
- Refiltrar sem reload
- http://code.google.com/appengine/docs/python/config/cron.html - FEITO
- http://code.google.com/appengine/docs/python/backends/overview.html
- http://tablesorter.com/docs/example-ajax.html
"""
stockattrs = ['pl', 'pvp', 'psr', 'divyield', 'pativo', 'pcapgiro', 'pebit',
'pativcircliq','evebit', 'mrgebit', 'mrgliq', 'liqcor', 'roic', 'roe',
'liquidez', 'patrimliq', 'divbrutpatr', 'growth', 'diff']
indicatorsorder = {'pl':False, 'pvp':False, 'psr':False, 'divyield':True, 'pativo':False, 'pcapgiro':False,
'pebit':False,'pativcircliq':False,'evebit':False, 'mrgebit':True, 'mrgliq':True, 'liqcor':True, 'roic':True,
'roe':True, 'liquidez':True, 'patrimliq':True, 'divbrutpatr':False, 'growth':True, 'diff':True}
attrnames = {'pl':'P/L', 'pvp':'P/VP', 'psr':'PSR', 'divyield':'Div. Yield', 'pativo':'P/Ativos', 'pcapgiro':'P/Cap. Giro',
'pebit':'P/EBIT','pativcircliq':'P/Ativ Circ Liq','evebit':'EV/EBIT', 'mrgebit':'Marg. EBIT', 'mrgliq':'Marg. Liquida',
'liqcor':'Liquidez Corr', 'roic':'ROIC', 'roe':'ROE', 'liquidez':'Liquidez 2 meses', 'patrimliq':'Patrimonio Liquido',
'divbrutpatr':'Divida Bruta/Patrimonio', 'growth':'Crescimento', 'diff':'Oscilacao'}
attrtips = { 'pl' : "Preço da ação dividido pelo lucro por ação. O P/L é o número de anos que se levaria para reaver o capital aplicado na compra de uma ação, através do recebimento do lucro gerado pela empresa, considerando que esses lucros permaneçam constantes.",
'pvp' : "Preço da ação dividido pelo Valor Patrimonial por ação. Informa quanto o mercado está disposto a pagar sobre o Patrimônio Líquido da empresa.",
'pebit' : "Preço da ação dividido pelo EBIT por ação. EBIT é o Lucro antes dos Impostos e Despesas Financeiras. É uma boa aproximação do lucro operacional da empresa.",
'psr' : "Price Sales Ratio: Preço da ação dividido pela Receita Líquida por ação.",
'divyield' : "Dividend Yield: Dividendo pago por ação dividido pelo preço da ação. É o rendimento gerado para o dono da ação pelo pagamento de dividendos.",
'pativo' : "Preço da ação dividido pelos Ativos totais por ação.",
'pcapgiro' : "Preço da ação dividido pelo capital de giro por ação. Capital de giro é o Ativo Circulante menos Passivo Circulante.",
'pebit' : "Preço da ação dividido pelo EBIT por ação. EBIT é o Lucro antes dos Impostos e Despesas Financeiras. É uma boa aproximação do lucro operacional da empresa.",
'pativcircliq' : "Preço da ação dividido pelos Ativos Circulantes Líquidos por ação. Ativo Circ. Líq. é obtido subtraindo os ativos circulantes pelas dívidas de curto e longo prazo, ou seja, após o pagamento de todas as dívidas, quanto sobraria dos ativos mais líquidos da empresa (caixa, estoque, etc)",
'evebit' : "Valor da Firma (Enterprise Value) dividido pelo EBIT.",
'mrgebit' : "EBIT dividido pela Receita Líquida: Indica a porcentagem de cada R$1 de venda que sobrou após o pagamento dos custos dos produtos/serviços vendidos, das despesas com vendas, gerais e administrativas.",
'mrgliq' : "Lucro Líquido dividido pela Receita Líquida.",
'liqcor' : "Ativo Circulante dividido pelo Passivo Circulante: Reflete a capacidade de pagamento da empresa no curto prazo.",
'roic' : "Retorno sobre o Capital Investido: Calculado dividindo-se o EBIT por (Ativos - Fornecedores - Caixa). Informa o retorno que a empresa consegue sobre o capital total aplicado.",
'roe' : "Retorno sobre o Patrimônio Líquido: Lucro líquido dividido pelo Patrimônio Líquido.",
'liquidez' : "Volume médio de negociação da ação nos últimos 2 meses (R$).",
'patrimliq' : "O patrimônio líquido representa os valores que os sócios ou acionistas têm na empresa em um determinado momento. No balanço patrimonial, a diferença entre o valor dos ativos e dos passivos e resultado de exercícios futuros representa o PL (Patrimônio Líquido), que é o valor contábil devido pela pessoa jurídica aos sócios ou acionistas.",
'divbrutpatr' : "Dívida Bruta total (Dívida+Debêntures) dividido pelo Patrimônio Líquido.",
'growth' : "Crescimento da Receita Líquida nos últimos 5 anos.",
'diff' : "Diferença do preço da ação com relação ao último dia."}
ibrx = ["AEDU3","ALLL3","AMBV4","AMIL3","BBAS3","BBDC3","BBDC4","BBRK3","BISA3","BRAP4","BRFS3","BRKM5","BRML3",
"BRPR3","BRSR6","BRTO4","BTOW3","BVMF3","CCRO3","CESP6","CIEL3","CMIG4","CPFE3","CPLE6","CRUZ3","CSAN3","CSMG3",
"CSNA3","CTIP3","CYRE3","DASA3","DTEX3","ECOR3","ELET3","ELET6","ELPL4","EMBR3","ENBR3","EVEN3","EZTC3","FIBR3",
"GETI4","GFSA3","GGBR4","GOAU4","GOLL4","HGTX3","HRTP3","HYPE3","ITSA4","ITUB4","JBSS3","KLBN4","LAME4","LIGT3",
"LLXL3","LREN3","MMXM3","MPLU3","MPXE3","MRFG3","MRVE3","MULT3","MYPK3","NATU3","ODPV3","OGXP3","PCAR4","PDGR3",
"PETR3","PETR4","POMO4","POSI3","PSSA3","QGEP3","RADL3","RAPT4","RDCD3","RENT3","RSID3","SANB11","SBSP3","SULA11",
"SUZB5","TAMM4","TBLE3","TCSA3","TIMP3","TNLP3","TNLP4","TOTS3","TRPL4","UGPA3","USIM3","USIM5","VAGR3","VALE3",
"VALE5","VIVT4","WEGE3"]
| true |
44e444bd1270327a904f571d77aeffb9b7d34c5e | Python | Aasthaengg/IBMdataset | /Python_codes/p03608/s731153919.py | UTF-8 | 824 | 2.953125 | 3 | [] | no_license |
from itertools import permutations
def submit():
n, m, _ = map(int, input().split())
rlist = list(map(int, input().split()))
rlist = [r - 1 for r in rlist]
# warshall floyd
dp = [[float('inf') for _ in range(n)] for _ in range(n)]
for i in range(n):
dp[i][i] = 0
for _ in range(m):
a, b, c = map(int, input().split())
a -= 1
b -= 1
dp[a][b] = dp[b][a] = c
for k in range(n):
for i in range(n):
for j in range(n):
dp[i][j] = min(dp[i][j], dp[i][k] + dp[k][j])
min_cost = float('inf')
for pt in permutations(rlist):
cost = 0
for i in range(len(pt) - 1):
cost += dp[pt[i]][pt[i + 1]]
if min_cost > cost:
min_cost = cost
print(min_cost)
submit() | true |
c5c56f984990d0a74b29930f0500404faa0c5250 | Python | ginseng27/robotSoccerDreadnoughts | /00/scripts/networking/client.py | UTF-8 | 793 | 3.046875 | 3 | [] | no_license | import socket
import sys
class Client:
def __init__(self,host,port):
self.host = host
self.port = port
self.size = 1024
self.open_socket()
def open_socket(self):
try:
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.connect((self.host,self.port))
except socket.error, (value,message):
if self.server:
self.server.close()
print "Could not open socket: " + message
sys.exit(1)
def run(self):
while True:
print "type somethign to send"
line = sys.stdin.readline()
self.sendInfo(line)
def sendInfo(self, info):
print "Sending: %s" % info
self.server.send(info)
| true |
0d9dcb87073a15f54098f15582e0f4ae2d53af18 | Python | SharanyaMarathe/Advance-Python | /last_name_sort.py | UTF-8 | 528 | 3.328125 | 3 | [] | no_license | def match_name():
li=list()
lastname=list()
for line in file1:
if len(line.split()) > 2:
li.append(line.split()[2])
else:
li.append(line.split()[1])
li.sort()
for item in li:
print("{}---------->{}".format(item.split(",")[0],item.split(",")[2]))
print("Name and Number of matches")
with open("captains.txt",mode="r") as file1:
header=next(file1)
match_name()
print("==========================")
| true |
d2f3bf369a28a19b987c6b0b81dc95ee62a08679 | Python | 14-Mini/keyloggerforwindows | /keylogger.py | UTF-8 | 604 | 3 | 3 | [] | no_license | import datetime
from pynput.keyboard import Key, Listener
keys = []
def on_press(key):
global keys
keys.append(key)
date = datetime.datetime.now()
print(f"{date} {key} pressed.")
write_file(keys)
def write_file(keys):
date = datetime.datetime.now()
with open("keys.txt", "a") as f:
for key in keys:
f.write(f'{date} {str(key)} pressed.')
f.write("\n")
def on_release(key):
if key==Key.esc:
return False
with Listener(on_press=on_press, on_release=on_release) as listener:
listener.join() | true |
7b295f8bd2612abdf6a4b26daebf23fc1ad1df35 | Python | tretyakovr/Lesson-04 | /Task-04.py | UTF-8 | 910 | 3.6875 | 4 | [] | no_license | # Третьяков Роман Викторович
# Факультет Geek University Python-разработки. Основы языка Python
# Урок 4. Задание 4:
# Представлен список чисел. Определить элементы списка, не имеющие повторений. Сформировать
# итоговый массив чисел, соответствующих требованию. Элементы вывести в порядке их следования
# в исходном списке. Для выполнения задания обязательно использовать генератор.
#
# Пример исходного списка: [2, 2, 2, 7, 23, 1, 44, 44, 3, 2, 10, 7, 4, 11].
#
# Результат: [23, 1, 3, 10, 4, 11]
lst = [2, 2, 2, 7, 23, 1, 44, 44, 3, 2, 10, 7, 4, 11, 15, 15, 2, 1, 777]
new_lst = [i for i in lst if lst.count(i) == 1]
print(new_lst) | true |
e533a0b364986e4fd6348d4fb75167f430dcd704 | Python | bohdaholas/Lacalut | /create_new_poetry_db.py | UTF-8 | 557 | 2.546875 | 3 | [
"MIT"
] | permissive | import json
import re
import requests
poetry_page = "http://ukrlit.org/tvory/poeziia_poemy_virshi/virshi"
page = requests.get(f"{poetry_page}")
poem_name_link_pattern = re.compile(r'<li><a href="(\S+?)" title="\S+?">(.+?)</a>.+?</li>')
matches = re.findall(poem_name_link_pattern, requests.get(poetry_page).text)
poem_name_link_dict = {}
for poem_link, poem_name in matches:
poem_name_link_dict[poem_name] = poem_link
with open("poetry_db.json", "w", encoding="utf-8") as file:
json.dump(poem_name_link_dict, file, indent=4, ensure_ascii=False)
| true |
f6636525de76200875930b7344b42ddf95aa8422 | Python | isolde18/Class | /test_scores.py | UTF-8 | 331 | 3.375 | 3 | [] | no_license | #CTI
#20.02.2018
#Silvia
score1 = float (input ("Enter the first score "))
score2 = float ( input ("Enter the second score "))
score3 = float ( input ("Enter the third score "))
average_score= (score1 + score2 + score3)/3
print("The average of the three scores is " , average_score)
| true |
3738330daaab0d66102ef998b5f62f27a0239a08 | Python | nrw505/adventofcode-2019 | /day18/part2.py | UTF-8 | 4,337 | 2.625 | 3 | [] | no_license | #!/usr/bin/env python3
import sys
import math
import re
import operator
from collections import deque
from grid import Grid
infile = open(sys.argv[1])
grid = Grid()
starts = (None, None, None, None)
start = None
keys = {}
doors = {}
def adjacent(pos):
return [
x
for x in [
(pos[0] + 1, pos[1]),
(pos[0] - 1, pos[1]),
(pos[0], pos[1] + 1),
(pos[0], pos[1] - 1),
]
if x in grid
]
y = 0
for line in infile:
line = line.strip()
x = 0
for c in line:
grid[(x, y)] = c
if c == "@":
start = (x, y)
if "a" <= c <= "z":
keys[c] = (x, y)
if "A" <= c <= "Z":
doors[c.lower()] = (x, y)
x += 1
y += 1
for pos in adjacent(start):
grid[pos] = "#"
grid[start] = "#"
starts = (
(start[0] + 1, start[1] + 1),
(start[0] + 1, start[1] - 1),
(start[0] - 1, start[1] + 1),
(start[0] - 1, start[1] - 1),
)
def find_accessible_keys(pos, held_keys, distance=1, already_done=None):
paths = {}
blocking_doors = {}
done = set()
next_iteration = adjacent(pos)
if already_done:
done = already_done.copy()
next_iteration = [doors[x] for x in doors if doors[x] in done]
for x in next_iteration:
done.remove(x)
while next_iteration:
this_iteration = next_iteration
next_iteration = []
for check in this_iteration:
c = grid[check]
done.add(check)
if c == "#":
continue
if c.isupper() and c.lower() not in held_keys:
blocking_doors[c.lower()] = distance
continue
if (
c.islower()
and c not in paths
and c not in held_keys
):
paths[c] = distance
next_iteration.extend([x for x in adjacent(check) if x not in done])
distance += 1
return (paths, blocking_doors, done)
all_paths = {}
def generate_all_paths(pos, held_keys='', distance=1, done=None, already_blocking=''):
if pos not in all_paths:
all_paths[pos] = {}
if not done:
done = set()
(paths, blocking_doors, done) = find_accessible_keys(pos, held_keys, distance, done)
if not paths and not blocking_doors:
return
for path in paths:
all_paths[pos][path] = { "distance": paths[path], "doors": held_keys }
for door in blocking_doors:
if door not in already_blocking:
generate_all_paths(pos, held_keys + door, blocking_doors[door], done, blocking_doors.keys())
def accessible_keys(pos_set, held_keys):
accessible = {}
for pos in pos_set:
for key in all_paths[pos]:
if key not in held_keys and set(all_paths[pos][key]["doors"]).issubset(set(held_keys)):
accessible[key] = all_paths[pos][key]["distance"]
return accessible
def leaf_length(pos_set, path):
length = 0
held = ""
for k in path:
acc = accessible_keys(pos_set, held)
length += acc[k]
pos = keys[k]
held += k
return length
leaf_lengths = {}
explore_from_cache = {}
def next_pos_set(pos_set, key):
nextset = [None] * 4
for i in range(4):
if key in all_paths[pos_set[i]]:
nextset[i] = keys[key]
else:
nextset[i] = pos_set[i]
return tuple(nextset)
def explore_from(pos_set, path):
held = "".join([x for x in sorted(path)])
cache_key = (pos_set, held)
if cache_key in explore_from_cache:
return explore_from_cache[cache_key]
options = accessible_keys(pos_set, path)
if not options:
return (0, "")
dists = {}
paths = {}
for option in options:
nextset = next_pos_set(pos_set, option)
(d, p) = explore_from(nextset, path + option)
dists[option] = d + options[option]
paths[option] = option + p
shortest = min(dists.items(), key=operator.itemgetter(1))[0]
ret = (dists[shortest], paths[shortest])
explore_from_cache[cache_key] = ret
return ret
for start in starts:
generate_all_paths(start)
for key in keys:
generate_all_paths(keys[key])
(m, p) = explore_from(starts, "")
print(f"Length: {m}")
print(f"Path: {p}")
| true |
bd3f0ba50979561e3e16ccca054d2eee39937699 | Python | ryohare/threatsims-june-ctf-2020 | /networking/in_net.py | UTF-8 | 721 | 3.078125 | 3 | [] | no_license | import socket,struct
import ipaddress
def addressInNetwork(ip,net):
"Is an address in a network"
ipaddr = struct.unpack('L',socket.inet_aton(ip))[0]
netaddr,bits = net.split('/')
netmask = struct.unpack('L',socket.inet_aton(netaddr))[0] & ((2L<<int(bits)-1) - 1)
return ipaddr & netmask == netmask
with open('ipaddress.txt', 'r') as f:
pairs = f.readlines()
for p in pairs:
print(p.strip())
parts = p.split(',')
parts[1]=parts[1].strip()
print("IP: {}".format(parts[0]))
print("Net: {}".format(parts[1]))
if ipaddress.ip_address( unicode(parts[0], "utf-8") ) in ipaddress.ip_network( unicode(parts[1], "utf-8") ):
print("in net: {} in {}".format(parts[0],parts[1]))
| true |
7f45354a1912c74bef2996ce57df55556aee7922 | Python | bx-lr/android_static_dynamic_apk_test | /stats.py | UTF-8 | 2,718 | 2.515625 | 3 | [] | no_license | #!/usr/bin/python
import os
import sys
import sqlite3 as lite
def showformat(recs, sept = ('-' * 40)):
print len(recs), 'records'
print sept
for rec in recs:
maxkey = max(len(key) for key in recs)
for key in rec:
print '%-*s => %s' % (maxkey, key, rec[key])
print sept
def makedicts(cursor, query, params=()):
cursor.execute(query, params)
colnames = [desc[0] for desc in cursor.description]
rowdicts = [dict(zip(colnames, row)) for row in cursor.fetchall()]
return rowdicts
def dumpdb(cursor, table, format=True):
if not format:
cursor.execute('select * from ' + table)
while True:
rec = cursor.fetchone()
if not rec:
break
print rec
else:
recs = makedicts(cursor, 'select * from ' + table)
showformat(recs)
def help():
print 'stats.py /path/to/database/directory/'
if __name__ == "__main__":
if len(sys.argv) != 2:
help()
sys.exit()
dirlist = os.listdir(sys.argv[-1])
vulndb = []
dfound = 0
sfound = 0
checkdb = 0
pcpdb = []
for f in dirlist:
if not f.endswith('db'):
continue
checkdb += 1
conn = lite.Connection(sys.argv[-1]+f)
curs = conn.cursor()
recs = makedicts(curs, 'select * from apk_table')
a = sys.argv[-1]+f
do_report = 0
for rec in recs:
if rec['is_test'] == 1:
if rec['category'].find('vulnerability test') > -1:
if not rec['test_result'].find('Not Vulnerable') > -1:
if rec['name'].find('vulndisco') > -1 and rec['name'].find('dynamic') > -1:
dfound += 1
if rec['name'].find('vulndisco') > -1 and rec['name'].find('static') > -1:
sfound += 1
if a:
vulndb.append(a)
print "\n", a
print '\t' + rec['name'] + ' ' + rec['test_result']
a = None
do_report = 1
else:
do_report = 1
print '\t' + rec['name'] + ' ' + rec['test_result']
if rec['category'].find('pcp test') > -1:
if rec['test_result'] == "True":
do_report = 1
pcpdb.append(sys.argv[-1]+f)
if a:
print "\n", a
print '\t' + rec['name'] + ' ' + rec['test_result']
a = None
else:
print '\t' + rec['name'] + ' ' + rec['test_result']
if rec['name'].find('privacy_yara_static') > -1 and do_report:
v = 0.0
tmp = rec['test_result'].split('\\n')
for t in tmp:
p = t[t.find("weight=\""):]
p = p.split(",")[0].replace("weight=\"", "").replace("\"", "")
if len(p) > 1:
v += float(p)
print "\tPrivacy Score:", v
print "\nFound %d vulnerable apk's out of %d successfully parsed files" % (len(vulndb)+1, checkdb)
print "\tConfirmed: %d" % (dfound)
print "\tUnconfirmed: %d" % (sfound - dfound)
print "\nFound %d apk's with Poor Coding Pracitces" % (len(pcpdb))
| true |
41055df919ec471401a6deab0cc01efd1ae2adea | Python | DomChey/FoML | /AML_Project/preprocessing.py | UTF-8 | 3,844 | 2.671875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 17 12:52:25 2018
@author: Manuel
"""
import numpy as np
from skimage import io, color
import os
from tqdm import tqdm
from imgCrop import cutIntoPieces, createPieces
from accessory import Orientations
from compatibility import compatibility, slices
np.random.seed(100)
def createTrainingData(file):
# Create training data from one single image file which is split into
# 12 x 17 tiles.
# Returns several positive and negative instances from this image
pieceList = createPieces(file, 28, 28, 11, 16)
compMat = np.ones((204,204,4))
for i,pi in enumerate(pieceList):
for k,pj in enumerate(pieceList): #[x for j,x in enumerate(pieceList) if j!=i]):
for s,orientation in enumerate(Orientations):
compMat[i,k,s] = compatibility(pi.data,pj.data,orientation)
posLabeled = []
negLabeled = []
for i,pi in enumerate(pieceList):
for s,orientation in enumerate(Orientations):
sorting = np.argsort(compMat[i,:,s])
if sorting[-1] == i:
idx = sorting[-2]
idx2 = sorting[-3]
else:
idx = sorting[-1]
idx2 = sorting[-2]
if orientation == Orientations.up:
data = pi.trueNeighborUp
elif orientation == Orientations.down:
data = pi.trueNeighborDown
elif orientation == Orientations.left:
data = pi.trueNeighborLeft
elif orientation == Orientations.right:
data = pi.trueNeighborRight
if data != [] and (np.equal(pieceList[idx].data, data)).all():
#print("The most compatible piece is the true neighbor")
s1, s2, s3, s4 = slices(pi.data, pieceList[idx].data, orientation)
posLabeled.append( np.stack((s2,s1,s3,s4), axis=1) )
s1, s2, s3, s4 = slices(pi.data, pieceList[idx2].data, orientation)
negLabeled.append( np.stack((s2,s1,s3,s4), axis=1) )
else:
#print("The most compatible piece is not the true neighbor")
s1, s2, s3, s4 = slices(pi.data, pieceList[idx2].data, orientation)
negLabeled.append( np.stack((s2,s1,s3,s4), axis=1) )
negLabeled, posLabeled = np.array(negLabeled), np.array(posLabeled)
#reduce the negativeFeatures for a balanced set
randomMask = np.random.choice(negLabeled.shape[0],posLabeled.shape[0], replace = False)
negLabeled = negLabeled[randomMask,:,:,:]
return posLabeled, negLabeled
def scanImagesForTraining(rootdir):
# Scans all images in rootdir and extracts the features for the neural net
# (One instance is a 28x4x3 matrix)
positiveFeatures = []
negativeFeatures = []
for root, dirs, files in os.walk(rootdir):
for image in tqdm(files):
posLabeled, negLabeled = createTrainingData(root + image)
positiveFeatures.extend(posLabeled)
negativeFeatures.extend(negLabeled)
return positiveFeatures, negativeFeatures
if __name__ == "__main__":
# Get the features from all the extracted images
positiveFeatures, negativeFeatures = scanImagesForTraining("extractedImages/")
positiveFeatures, negativeFeatures = np.array(positiveFeatures), np.array(negativeFeatures)
import gzip
f = gzip.GzipFile("positiveFeatures.npy.gz", "w")
np.save(file = f, arr=positiveFeatures)
f.close()
f = gzip.GzipFile("negativeFeatures.npy.gz", "w")
np.save(file = f, arr=negativeFeatures)
f.close()
# To load the arrays:
# f = gzip.GzipFile("positiveFeatures.npy.gz", "r")
# array = np.load(f)
| true |
8132c0034b21abf5bd67c19ec8b72f5a8e372730 | Python | Aasthaengg/IBMdataset | /Python_codes/p03471/s217393978.py | UTF-8 | 271 | 3.296875 | 3 | [] | no_license | #ABC_085_C_Otoshidama.py
N,Y = list(map(int, input().split()))
x=-1
y=-1
z=-1
for a in range(N+1): #number of 10000yen
for b in range(N+1-a): #number of 5000yen
c=N-a-b #number of 1000yen
sum=10000*a + 5000*b + 1000*c
if sum == Y:
x=a
y=b
z=c
print(x,y,z) | true |
2846d7f1e2427ddc73c46290427876027f4671a5 | Python | Jiacli/NLP-QA | /code/siyu/ask.py | UTF-8 | 2,030 | 3.109375 | 3 | [] | no_license | #!/usr/bin/env python
import sys
import os
import re
import string
import generateQuestion
# gloabl control variables
verbose = True
os.environ['STANFORD_PARSER'] = '/Users/sirrie/Desktop/11611/project/jars'
os.environ['STANFORD_MODELS'] = '/Users/sirrie/Desktop/11611/project/jars'
# main routine
def main(args):
# read article, return a list of valid sentences with
# certain length (50<length<300) and upper-case letter
# starting.
sentences = read_article(args[1])
questions = ask(sentences)
def ask(sentences):
# generate questions based on valid sentence list
q_list = []
counter = 0
for sentence in sentences:
rst = generateQuestion.generateEasyQuestion(sentence)
#rst = generateQuestion.generateWho(sentence)
if len(rst) > 0:
counter += 1
q_list.append(rst)
print counter, " , ", rst
return q_list
# article reading control parameters
valid_sents_length_lo = 50
valid_sents_length_hi = 300
def read_article(filename):
sentences = []
with open(filename) as f:
for line in f.readlines():
# for empty lines
if len(line) == 0:
continue
# check invalid line e.g., without any punctuation,
# maybe it is just a title or something else
if line.count('.') == 0:
continue
sents = line.strip().split('.')
for str in sents:
s = str.strip()
# validation rules
if len(s) > valid_sents_length_lo \
and len(s) < valid_sents_length_hi \
and (s[0] in string.ascii_uppercase):
sentences.append(s)
# debug
if verbose:
i = 0
for sent in sentences:
print i, sent
i += 1
return sentences
if __name__ == '__main__':
if len(sys.argv) != 3:
print 'Usage: ./ask article.txt questions'
exit(-1)
main(sys.argv) | true |
844c6eb7cb3295704307fe45dba56ffe8e777aed | Python | yoshd/LanguageProcessing100 | /LanguageProcessing100/chapter3/knock20.py | UTF-8 | 441 | 2.9375 | 3 | [] | no_license | import json
if __name__ == "__main__":
output_texts = []
with open("jawiki-country.json") as file:
line = file.readline()
while line:
wiki_json = json.loads(line)
if wiki_json["title"] == "イギリス":
output_texts.append(wiki_json["text"])
line = file.readline()
with open("british_articles.txt", "w") as file:
file.writelines(output_texts)
| true |
415a953e7988f7607e245f952f1af24008db5cfc | Python | Cecilia520/algorithmic-learning-leetcode | /cecilia-python/tree-graph/graph/IsGraphBipartite.py | UTF-8 | 2,869 | 4.15625 | 4 | [] | no_license | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : IsGraphBipartite.py
@Contact : 70904372cecilia@gmail.com
@License : (C)Copyright 2019-2020
@Modify Time @Author @Version @Desciption
------------ ------- -------- -----------
2020/3/9 22:03 cecilia 1.0 是否是二分图
问题描述:
给定一个无向图graph,当这个图为二分图时返回true。
如果我们能将一个图的节点集合分割成两个独立的子集A和B,并使图中的每一条边的两个节点一个来自A集合,一个来自B集合,我们就将这个图称为二分图。
graph将会以邻接表方式给出,graph[i]表示图中与节点i相连的所有节点。每个节点都是一个在0到graph.length-1之间的整数。这图中没有自环和平行边: graph[i] 中不存在i,并且graph[i]中没有重复的值。
示例 1:
输入: [[1,3], [0,2], [1,3], [0,2]]
输出: true
解释:
无向图如下:
0----1
| |
| |
3----2
我们可以将节点分成两组: {0, 2} 和 {1, 3}。
示例 2:
输入: [[1,2,3], [0,2], [0,1,3], [0,2]]
输出: false
解释:
无向图如下:
0----1
| \ |
| \ |
3----2
我们不能将节点分割成两个独立的子集。
注意:
graph 的长度范围为 [1, 100]。
graph[i] 中的元素的范围为 [0, graph.length - 1]。
graph[i] 不会包含 i 或者有重复的值。
图是无向的: 如果j 在 graph[i]里边, 那么 i 也会在 graph[j]里边。
算法分析:
1. 使用颜色数组记录每一个结点的颜色,颜色可以是0,1,也可以是未着色null或者-1;
2. 贪心思想给图着色。搜索节点时,需要考虑图是非连通的情况。对每个未着色节点,从该节点开始深度优先搜索着色。每个邻接点都可以通过当前节点着相反的颜色。如果存在当前点和邻接点颜色相同,则着色失败。
3.使用栈完成深度优先搜索,存储着下一个要访问节点的顺序。在 graph[node] 中,对每个未着色邻接点,着色该节点并将其放入到栈中。
"""
def isBipartite(graph):
"""
判断是否是二分图
方法:深度优先遍历+贪心思想
:param graph:
:return:
算法分析:时间复杂度O(N+E),空间复杂度O(N)
"""
colors = {} # 用来记录每个节点的颜色
for node in range(len(graph)):
if node not in colors:
stack = [node]
colors[node] = 0
while stack:
node = stack.pop()
for curr in graph[node]:
if curr not in colors:
stack.append(curr)
colors[curr] = colors[node] ^ 1 # 颜色改变成1
elif colors[curr] == colors[node]:
return False
return True
if __name__ == '__main__':
print(isBipartite(graph=[[1, 2, 3], [0, 2], [0, 1, 3], [0, 2]]))
| true |
2a5051564d2334ae425a70b1b79b2744ac708ad3 | Python | UMD-ENEE408I/ENEE408I_Spring_2021_Team_4 | /pose-recognition/svm_train.py | UTF-8 | 1,140 | 2.59375 | 3 | [] | no_license | import cv2
import argparse
import pickle
from sklearn.preprocessing import LabelEncoder
from sklearn import svm
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--features", required=True,
help="path to serialized db of sample pose features")
ap.add_argument("-c", "--classifier", required=True,
help="path to output model trained to recognize poses")
ap.add_argument("-l", "--le", required=True,
help="path to output label encoder")
args = vars(ap.parse_args())
# load the pose features
print("[INFO] loading sample pose features...")
data = pickle.loads(open(args["features"], "rb").read())
# encode the labels
print("[INFO] encoding labels...")
le = LabelEncoder()
labels = le.fit_transform(data["labels"])
features = data["features"]
width=224
height=224
dim = (width, height)
print("[INFO] training model...")
clf = svm.SVC(kernel="linear", probability=True) # classifier
clf.fit(features, labels)
# write the actual pose recognition model to disk
f = open(args["classifier"], "wb")
f.write(pickle.dumps(clf))
f.close()
# write the label encoder to disk
f = open(args["le"], "wb")
f.write(pickle.dumps(le))
f.close()
| true |
930dea042d5a37847c16e13b9a706522ea933b6b | Python | DigDug101/FontMatching | /CNNTrain.py | UTF-8 | 5,516 | 2.625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 5 21:33:37 2019
@author: Sathya Bhat
"""
import numpy as np
from tensorflow.keras.applications import VGG16
import matplotlib.pyplot as plt
from tensorflow.keras.preprocessing import image
#import os, shutil
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras import optimizers
img_width = 224
img_height = 224
train_dir = './dataset/train'
train_size = 920
val_dir = './dataset/validation'
val_size = 90
no_classes = 10
feature_count = 512
conv_base = VGG16(weights='imagenet',
include_top=False,
input_shape=(img_width, img_height, 3))
# Freeze the layers except the last 4 layers
for layer in conv_base.layers[:-4]:
layer.trainable = False
# Check the trainable status of the individual layers
#for layer in conv_base.layers:
# print(layer, layer.trainable)
# Create the model
model = models.Sequential()
# Add the vgg convolutional base model
model.add(conv_base)
# Add new layers
model.add(layers.Flatten())
model.add(layers.Dense(1024, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(no_classes, activation='softmax'))
# Show a summary of the model. Check the number of trainable parameters
model.summary()
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
validation_datagen = ImageDataGenerator(rescale=1./255)
# Change the batchsize according to your system RAM
train_batchsize = 5
val_batchsize = 5
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(img_width, img_height),
batch_size=train_batchsize,
class_mode='categorical')
validation_generator = validation_datagen.flow_from_directory(
val_dir,
target_size=(img_width, img_height),
batch_size=val_batchsize,
class_mode='categorical',
shuffle=False)
# Compile the model
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['acc'])
# Train the model
history = model.fit_generator(
train_generator,
steps_per_epoch=train_generator.samples/train_generator.batch_size ,
epochs=30,
validation_data=validation_generator,
validation_steps=validation_generator.samples/validation_generator.batch_size,
verbose=1)
# Save the model
model.save('resnet_last4.h5')
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'b', label='Training acc')
plt.plot(epochs, val_acc, 'r', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'b', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
# Create a generator for prediction
validation_generator = validation_datagen.flow_from_directory(
val_dir,
target_size=(img_width, img_height),
batch_size=val_batchsize,
class_mode='categorical',
shuffle=False)
# Get the filenames from the generator
fnames = validation_generator.filenames
# Get the ground truth from generator
ground_truth = validation_generator.classes
# Get the label to class mapping from the generator
label2index = validation_generator.class_indices
# Getting the mapping from class index to class label
idx2label = dict((v,k) for k,v in label2index.items())
# Get the predictions from the model using the generator
predictions = model.predict_generator(validation_generator, steps=validation_generator.samples/validation_generator.batch_size,verbose=1)
predicted_classes = np.argmax(predictions,axis=1)
errors = np.where(predicted_classes != ground_truth)[0]
print("No of errors = {}/{}".format(len(errors),validation_generator.samples))
correct = np.where(predicted_classes == ground_truth)[0]
print("Correct detections = {}/{}".format(len(correct),validation_generator.samples))
# Show the errors
for i in range(len(errors)):
pred_class = np.argmax(predictions[errors[i]])
pred_label = idx2label[pred_class]
title = 'Original label:{}, Prediction :{}, confidence : {:.3f}'.format(
fnames[errors[i]].split('/')[0],
pred_label,
predictions[errors[i]][pred_class])
original = image.load_img('{}/{}'.format(val_dir,fnames[errors[i]]))
plt.figure(figsize=[7,7])
plt.axis('off')
plt.title(title)
plt.imshow(original)
plt.show()
for i in range(len(correct)):
pred_class = np.argmax(predictions[correct[i]])
pred_label = idx2label[pred_class]
title = 'Original label:{}, Prediction :{}, confidence : {:.3f}'.format(
fnames[correct[i]].split('/')[0],
pred_label,
predictions[correct[i]][pred_class])
original = image.load_img('{}/{}'.format(val_dir,fnames[correct[i]]))
plt.figure(figsize=[7,7])
plt.axis('off')
plt.title(title)
plt.imshow(original)
plt.show() | true |
33a468d27415a7a28742e5b1a7fd6dbe067057a9 | Python | karthikg92/gnn-robust-image-classification | /baselines/GNN/img2graph.py | UTF-8 | 7,807 | 3.140625 | 3 | [] | no_license | import numpy as np
from scipy import sparse
###############################################################################
# From an image to a graph
def _make_edges(n_x, n_y):
"""
Returns a list of edges and edge weights for a 2D image.
Parameters
----------
n_x : int
The size of the grid in the x direction.
n_y : int
The size of the grid in the y direction.
"""
vertices = np.arange(n_x * n_y).reshape((n_x, n_y))
# vertices [[0, 1, 2, ..., 27],
# [28,29,30,..., 55],
# [ ]]
########### edges ###########
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel())) # edges: ((0,1), (1,2), ...)
edges_left = np.vstack((vertices[:, 1:].ravel(),
vertices[:, :-1].ravel())) # edges: ((1,0), (2,1), ...)
edges_down = np.vstack((vertices[:-1].ravel(),
vertices[1:].ravel())) # edges: ((0,28), (1,29), ...)
edges_up = np.vstack((vertices[1:].ravel(),
vertices[:-1].ravel())) # edges: ((28,0), (29,1), ...)
edges_diag_down_right = np.vstack((vertices[:-1, :-1].ravel(),
vertices[1:, 1:].ravel())) # edges: ((0,29), (1,30), ...)
edges_diag_up_left = np.vstack((vertices[1:, 1:].ravel(),
vertices[:-1, :-1].ravel())) # edges: ((29,0), (30,1), ...)
edges_diag_up_right = np.vstack((vertices[1:, :-1].ravel(),
vertices[:-1, 1:].ravel())) # edges: ((28,1), (29,2), ...)
edges_diag_down_left = np.vstack((vertices[1:, :-1].ravel(),
vertices[:-1, 1:].ravel())) # edges: ((1,28), (2,29), ...)
#################################
########### weights ###########
# edge weights ((x_j-x_i, y_j-y_i))
weights_right = np.array([[0, 1]] * edges_right.shape[1]) # ((0,1),(0,1),...)
weights_left = np.array([[0, -1]] * edges_left.shape[1]) # ((0,-1),(0,-1),...)
weights_down = np.array([[1, 0]] * edges_down.shape[1]) # ((1,0),(1,0),...)
weights_up = np.array([[-1, 0]] * edges_up.shape[1]) # ((-1,0),(-1,0),...)
weights_diag_down_right = np.array([[1, 1]] * edges_diag_down_right.shape[1]) # ((1,1), (1,1),...)
weights_diag_up_left = np.array([[-1, -1]] * edges_diag_up_left.shape[1]) # ((-1,-1),(-1,-1),...)
weights_diag_up_right = np.array([[-1, 1]] * edges_diag_up_right.shape[1]) # ((-1,1), (-1,1),...)
weights_diag_down_left = np.array([[1, -1]] * edges_diag_down_left.shape[1]) # ((1,-1), (1,-1),...)
#################################
# shape [2, num_edges]
edges = np.hstack((edges_right, edges_left, edges_down, edges_up,
edges_diag_down_right, edges_diag_up_left,
edges_diag_up_right, edges_diag_down_left))
# shape [num_edges, num_edge_feats(2)]
edge_weights = np.vstack((weights_right, weights_left, weights_down, weights_up,
weights_diag_down_right, weights_diag_up_left,
weights_diag_up_right, weights_diag_down_left))
return edges, edge_weights
# not used (keeping here just in case it is required)
def _compute_gradient_3d(edges, img):
_, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
def _mask_edges_weights(mask, edges, edge_weights=None):
"""
Apply a mask to edges (weighted or not)
Parameters
----------
mask: np.ndarray
binary matrix with zeros at coords to remove
shape (img_height, img_width)
edges: np.ndarray
edges matrix of shape [2, num_edges]
weights: np.ndarray
edges matrix of shape [num_edges, num_edge_feats]
"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds),
np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if edge_weights is not None:
weights = edge_weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if edge_weights is None:
return edges
else:
return edges, edge_weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges, weights = _make_edges(n_x, n_y)
if dtype is None:
dtype = img.dtype
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, *, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""
Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Parameters
----------
img : ndarray of shape (height, width) or (height, width, channel)
2D or 3D image.
mask : ndarray of shape (height, width) or \
(height, width, channel), dtype=bool, default=None
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, \
default=sparse.coo_matrix
The class to use to build the returned adjacency matrix.
"""
img = np.atleast_3d(img)
n_x, n_y = img.shape
return _to_graph(n_x, n_y, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, *, mask=None, return_as=sparse.coo_matrix,
dtype=int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
----------
n_x : int
Dimension in x axis
n_y : int
Dimension in y axis
n_z : int, default=1
Dimension in z axis
mask : ndarray of shape (n_x, n_y, n_z), dtype=bool, default=None
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, \
default=sparse.coo_matrix
The class to use to build the returned adjacency matrix.
dtype : dtype, default=int
The data of the returned sparse matrix. By default it is int
Notes
-----
For scikit-learn versions 0.14.1 and prior, return_as=np.ndarray was
handled by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
| true |
1aab6437150ef83551e8dd53bfad3b992d8798ec | Python | ramitdour/openCV_test | /opencvTest19.py | UTF-8 | 988 | 2.8125 | 3 | [] | no_license | #https://www.youtube.com/watch?v=aDY4aBLFOIg&list=PLS1QulWo1RIa7D1O6skqDQ-JZ1GGHKK-K&index=21
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
#img = cv.imread('opencv-logo.png',-1)
img = cv.imread('data/sudoku.png',0)
#Laplacian Gradient
lap = cv.Laplacian(img , cv.CV_64F ,ksize = 3)
lap = np.uint8(np.absolute(lap))
#sobel
sobelX = cv.Sobel(img , cv.CV_64F , 1 , 0,ksize = 3)
sobelY = cv.Sobel(img , cv.CV_64F , 0 , 1,ksize = 3)
sobelX = np.uint8(np.absolute(sobelX))
sobelY = np.uint8(np.absolute(sobelY))
sobelCombined = cv.bitwise_or(sobelX,sobelY)
titles = ['image' ,'Laplacian' ,'sobelX' , 'sobelY' ,'sobelCombined']
images = [img ,lap ,sobelX , sobelY,sobelCombined]
dictgrid = {1:(1,1) , 2:(1,2) , 3:(1,3), 4:(2,2), 5:(2,3), 6:(2,3), 7:(4,2) ,8:(4,2), 9:(3,3), 10:(3,4)}
l = len(titles)
for i in range(l):
plt.subplot(*dictgrid[l],i+1),plt.imshow(images[i],'gray')
plt.title(titles[i])
plt.xticks([]),plt.yticks([])
plt.show()
| true |
4a5997eab530fe170fd54fe156ea2bc83759c9f8 | Python | aak-1/Journey-with-Python | /Loops.py | UTF-8 | 192 | 3.796875 | 4 | [] | no_license | if __name__ == '__main__':
n = int(input())
for i in range (n):
print(i**2)
"""
Read an integer . For all non-negative integers , print square. See the sample for details.
""" | true |
d69d177a74dcbb26846afe0588a29b4585750cfe | Python | 15013412747/test_my | /qietu.py | UTF-8 | 4,543 | 2.59375 | 3 | [] | no_license | import cv2
import math
import os
# noinspection PyUnresolvedReferences
import numpy as np
from PIL import Image
from pathlib import Path
Image.MAX_IMAGE_PIXELS = None
IMAGES_FORMAT = ['.png'] # 图片格式
# src = input('请输入图片文件路径:')
# print(src)
#
# #list = os.listdir(src)
# dstpath = input('请输入图片输出目录(不输入路径则表示使用源图片所在目录):')
input_img_path = ""
output_img_path = ""
def cut_image_total():
input_img_path_list = os.listdir(input_img_path)
for q_dir in input_img_path_list:
img_list = os.listdir(os.path.join(input_img_path_list, q_dir))
cut_image(q_dir)
def cut_image(q_dir):
image_names = [name for name in os.listdir(q_dir) for item in IMAGES_FORMAT if
os.path.splitext(name)[1] == item]
for i, sample in enumerate(image_names):
print(sample)
# file_name = src + '/' + str(sample)
file_name = os.path.join(input_img_path, q_dir, str(sample))
# save_path = dstpath + str(sample[:-4]) + '/'
save_path = os.path.join(output_img_path, q_dir, str(sample[:-4]))
Path(save_path).mkdir(parents=True, exist_ok=True)
# block size
height = 1000
width = 1000
# overlap
over_x = 0
over_y = 0
h_val = height - over_x
w_val = width - over_y
# Set whether to discard an image that does not meet the size
mandatory = False
print(str(file_name))
img = cv2.imread(file_name)
# print(img.shape)
# original image size
original_height = img.shape[0]
original_width = img.shape[1]
# max_row = float((original_height - height) / h_val) # + 1
# max_col = float((original_width - width) / w_val) # + 1
max_row = float(original_height / h_val) # + 1
max_col = float(original_width / w_val) # + 1
# block number
max_row = math.ceil(max_row) if mandatory == False else math.floor(max_row)
max_col = math.ceil(max_col) if mandatory == False else math.floor(max_col)
print(max_row)
print(max_col)
images = []
for i in range(max_row):
images_temp = []
for j in range(max_col):
temp_path = save_path + '/' + str(i) + '_' + str(j) + '_'
if ((width + j * w_val) > original_width and (
i * h_val + height) <= original_height): # Judge the right most incomplete part
temp = img[i * h_val:i * h_val + height, j * w_val:original_width, :]
temp_path = temp_path + str(temp.shape[0]) + '_' + str(temp.shape[1]) + '.png'
# temp = temp[:, :, 0]
cv2.imwrite(temp_path, temp)
images_temp.append(temp)
elif ((height + i * h_val) > original_height and (
j * w_val + width) <= original_width): # Judge the incomplete part at the bottom
temp = img[i * h_val:original_height, j * w_val:j * w_val + width, :]
temp_path = temp_path + str(temp.shape[0]) + '_' + str(temp.shape[1]) + '.png'
# temp = temp[:, :, 0]
cv2.imwrite(temp_path, temp)
images_temp.append(temp)
elif ((width + j * w_val) > original_width and (
i * h_val + height) > original_height): # Judge the last slide
temp = img[i * h_val:original_height, j * w_val:original_width, :]
temp_path = temp_path + str(temp.shape[0]) + '_' + str(temp.shape[1]) + '.png'
# temp = temp[:, :, 0]
cv2.imwrite(temp_path, temp)
images_temp.append(temp)
else:
temp = img[i * h_val:i * h_val + height, j * w_val:j * w_val + width, :]
temp_path = temp_path + str(temp.shape[0]) + '_' + str(temp.shape[1]) + '.png'
# temp = temp[:, :, 0]
cv2.imwrite(temp_path, temp)
images_temp.append(temp) # The rest of the complete
images.append(images_temp)
print(len(images))
if "__main__" == __name__:
cut_image_total()
# file_name = "/Users/liuhongyan/xiangmu/data/gjb_bandao.png"#输入图像路径
# save_path = '/Users/liuhongyan/xiangmu/data/small_0/' # 输出图像的路径
# Path(save_path).mkdir(parents=True, exist_ok=True)
| true |
3b194d22a127fe8e38959eac316ed15922260e57 | Python | Jarantym/portfolio | /randomPassw_1.1.py | UTF-8 | 557 | 3.5625 | 4 | [] | no_license | ## list of the randomly generated passwords
from random import randint
def main() :
for j in range(10): ## strings and characters mixing
for i in range(4) :
a= str(randomCharacter("bcdfghjk1mnpqrstvwxz"))
b= str(randomCharacter("aei0uy"))
print(a+b,end='')
print()
## random function chosing inputs
def randomCharacter(characters) :
n = len(characters)
r = randint(0, n - 1)
return characters[r]
main()
## stop
from os import system
system("pause")
| true |
be4f0e4adcaf0b720a670728ed0f2a1b301a0f4d | Python | Zjhao666/CompQA | /src/kangqi/task/compQA/model/module/seq_helper.py | UTF-8 | 8,222 | 2.609375 | 3 | [] | no_license | """
Author: Kangqi Luo
Goal: Define the sequence-related operations.
"""
import tensorflow as tf
from kangqi.util.tf.cosine_sim import cosine_sim
from kangqi.util.tf.ntn import NeuralTensorNetwork
from kangqi.util.LogUtil import LogInfo
def get_merge_function(merge_config, dim_hidden, reuse):
"""
Judge whether to use cosine similarity, or use a NTN layer
"""
assert merge_config['name'] in ('cosine', 'NTN', 'FC')
if merge_config['name'] == 'cosine':
merge_func = cosine_sim
LogInfo.logs('Merge function: cosine')
elif merge_config['name'] == 'NTN':
ntn = NeuralTensorNetwork(dim_hidden=dim_hidden,
blocks=merge_config['blocks'])
LogInfo.logs('Merge function: NTN')
def merge_func(x, y):
return ntn.forward(x, y, reuse=reuse)
else:
def merge_func(x, y):
concat_input = tf.concat([x, y], axis=-1, name='concat_input') # (data_size, 2 * dim_hidden)
fc_hidden = tf.contrib.layers.fully_connected(
inputs=concat_input,
num_outputs=merge_config['dim_fc'],
activation_fn=tf.nn.relu,
scope='FC',
reuse=reuse
) # (data_size, dim_fc)
fc_score = tf.contrib.layers.fully_connected(
inputs=fc_hidden,
num_outputs=1,
activation_fn=None,
scope='FC_final',
reuse=reuse
) # (data_size, 1)
fc_score = tf.squeeze(fc_score, axis=-1, name='fc_score')
return fc_score
return merge_func
def seq_encoding(emb_input, len_input, encoder, fwbw=False, reuse=tf.AUTO_REUSE):
"""
Just a small wrapper: given the embedding input, length input and encoder, return the encoded result
:param emb_input: (data_size, stamps, dim_emb)
:param len_input: (data_size, ) as int32
:param encoder: the BidirectionalRNNEncoder instance
:param fwbw: only use the concat of last fw & bw state
:param reuse: reuse flag (used in generating RNN/GRU/LSTM- Cell)
:return: (data_size, stamps, dim_hidden)
"""
if encoder is None:
return emb_input # just use word embedding, without other operations
rnn_input = tf.unstack(emb_input, axis=1, name='emb_input') # stamp * (data_size, dim_emb)
encoder_output = encoder.encode(inputs=rnn_input,
sequence_length=len_input,
reuse=reuse)
if not fwbw:
out_hidden = tf.stack(encoder_output.outputs, axis=1, name='out_hidden') # (data_size, stamp, dim_hidden)
return out_hidden
else:
out_hidden = tf.concat(encoder_output.final_state, axis=-1, name='out_hidden') # (data_size, dim_hidden)
return out_hidden
def schema_encoding(preds_hidden, preds_len, pwords_hidden, pwords_len):
"""
Given the pred-/pword- sequence embedding after Bidirectional RNN layer,
return the final representation of the schema.
The detail implementation is varied (max pooling, average ...), controlled by the detail config.
Currently we follow yu2017 and use max-pooling.
:param preds_hidden: (data_size, pred_max_len, dim_hidden)
:param preds_len: (data_size, )
:param pwords_hidden: (data_size, pword_max_len, dim_hidden)
:param pwords_len: (data_size, )
:return: (data_size, dim_hidden), the final representation of the schema
"""
masked_preds_hidden = seq_hidden_masking_before_pooling(seq_hidden_input=preds_hidden,
len_input=preds_len)
masked_pwords_hidden = seq_hidden_masking_before_pooling(seq_hidden_input=pwords_hidden,
len_input=pwords_len)
masked_merge_hidden = tf.concat(
[masked_preds_hidden, masked_pwords_hidden],
axis=1, name='masked_merge_hidden'
) # (data_size, pred_max_len + pword_max_len, dim_hidden)
schema_hidden = tf.reduce_max(masked_merge_hidden,
axis=1, name='schema_hidden') # (data_size, dim_hidden)
return schema_hidden
def seq_hidden_masking(seq_hidden_input, len_input, mask_value):
"""
According to the length of each data, set the padding hidden vector into some pre-defined mask value
Then we can perform max_pooling
:param seq_hidden_input: (data_size, max_len, dim_hidden)
:param len_input: (data_size, ) as int
:param mask_value: tf.float32.min or 0
:return: (data_size, stamps, dim_hidden) with masked.
"""
max_len = tf.shape(seq_hidden_input)[1] # could be int or int-tensor
mask = tf.sequence_mask(lengths=len_input, maxlen=max_len,
dtype=tf.float32, name='mask') # (data_size, max_len)
exp_mask = tf.expand_dims(mask, axis=-1, name='exp_mask') # (data_size, max_len, 1)
masked_hidden = exp_mask * seq_hidden_input + (1.0 - exp_mask) * mask_value
return masked_hidden
def seq_hidden_masking_before_pooling(seq_hidden_input, len_input):
return seq_hidden_masking(seq_hidden_input, len_input, mask_value=tf.float32.min)
def seq_hidden_masking_before_averaging(seq_hidden_input, len_input):
return seq_hidden_masking(seq_hidden_input, len_input, mask_value=0)
def seq_hidden_max_pooling(seq_hidden_input, len_input):
"""
Perform max pooling over the sequences.
Should perform masking before pooling, due to different length of sequences.
:param seq_hidden_input: (data_size, max_len, dim_hidden)
:param len_input: (data_size, ) as int
:return: (data_size, dim_hidden)
"""
masked_hidden = seq_hidden_masking_before_pooling(seq_hidden_input, len_input)
final_hidden = tf.reduce_max(masked_hidden, axis=1, name='final_hidden') # (-1, dim_hidden)
return final_hidden
def seq_hidden_averaging(seq_hidden_input, len_input):
"""
Average hidden vectors over all the stamps of the sequence.
For the padding position of each sequence, their vectors are always 0 (controlled by BidirectionalRNNEncoder)
For the padding sequences, their length is 0, we shall avoid dividing by 0.
:param seq_hidden_input: (data_size, max_len, dim_hidden)
:param len_input: (data_size, ) as int
:return: (data_size, dim_hidden) as the averaged vector repr.
"""
masked_hidden = seq_hidden_masking_before_averaging(seq_hidden_input, len_input)
sum_seq_hidden = tf.reduce_sum(
masked_hidden, axis=1, name='sum_seq_hidden'
) # (-1, dim_hidden)
seq_len_mat = tf.cast(
tf.expand_dims(
tf.maximum(len_input, 1), # for padding sequence, their length=0, we avoid dividing by 0
axis=1
), dtype=tf.float32, name='seq_len_mat'
) # (-1, 1) as float32
seq_avg_hidden = tf.div(sum_seq_hidden, seq_len_mat,
name='seq_avg_hidden') # (-1, dim_hidden)
return seq_avg_hidden
def seq_encoding_with_aggregation(emb_input, len_input, rnn_encoder, seq_merge_mode):
"""
Given sequence embedding, return the aggregated representation of the whole sequence.
Consider using or not using RNN
:param emb_input: (ds, max_len, dim_emb)
:param len_input: (ds, )
:param rnn_encoder: Xusheng's BidirectionalRNNEncoder
:param seq_merge_mode: fwbw / max / avg
:return: (ds, dim_hidden)
"""
is_fwbw = seq_merge_mode == 'fwbw'
if rnn_encoder is not None:
hidden_repr = seq_encoding(emb_input=emb_input, len_input=len_input,
encoder=rnn_encoder, fwbw=is_fwbw) # (ds, dim_hidden)
else:
hidden_repr = emb_input # (ds, max_len, dim_emb)
final_repr = None
if seq_merge_mode == 'fwbw':
final_repr = hidden_repr
elif seq_merge_mode == 'avg':
final_repr = seq_hidden_averaging(seq_hidden_input=hidden_repr, len_input=len_input)
elif seq_merge_mode == 'max':
final_repr = seq_hidden_max_pooling(seq_hidden_input=hidden_repr, len_input=len_input)
return final_repr # (ds, dim_hidden)
| true |
4f849e6dbb7b8a041d4fe3296651b8efccd2264f | Python | razzlepdx/practice-algorithms | /hackerrank/30_days_08.py | UTF-8 | 780 | 3.84375 | 4 | [] | no_license | # Enter your code here. Read input from STDIN. Print output to STDOUT
# create phone book dictionary with known number of inputs
num_entries = int(raw_input())
phone_book = {}
while num_entries:
name, number = raw_input().rstrip().split(" ")
phone_book[name] = number
num_entries -= 1
def get_phone_numbers(name, phone_book):
""" Takes in a phone book and name to search, and returns a string representing
the result, or an error message stating that the number could not be found."""
if phone_book.get(name):
return name + "=" + phone_book[name]
return "Not found"
# handle unknown number of phone_book queries and print the results
while True:
try:
print get_phone_numbers(raw_input(), phone_book)
except:
break
| true |
8f0fd8cf68057a4533a08a4922060bc717b1ab74 | Python | Jaime885/cti110 | /M2HW1_DistanceTraveled_JaimeRodriguezmiller.py | UTF-8 | 611 | 4.15625 | 4 | [] | no_license | #CTI-110
#M2HW1-Distance Traveled
#Jaime Rodriguezmiller
#September 9, 2017
#Value to the speed variable.
speed = 70
#Value to the time variable.
time1 = 6
time2 = 10
time3 = 15
#Get the distance traveled.
distanceAfter6 = speed * time1
distanceAfter10 = speed * time2
distanceAfter15 = speed * time3
print("Traveling at 70 miles per hour...")
print("After 6 hours of driving you will have traveled ", distanceAfter6, "miles.")
print("After 10 hours of driving you will have traveled ", distanceAfter10, "miles.")
print("After 15 hours of driving you will have traveled ", distanceAfter15, "miles.")
| true |
043be90767bc0510435f136994eaf69b3d38df1a | Python | bradyborkowski/LPTHW | /ex17.0.py | UTF-8 | 2,406 | 3.96875 | 4 | [] | no_license | # imports the argv module from the sys package
from sys import argv
# imports the exists module from the os.path package
from os.path import exists
# assigns variables to arguments passed to the script
script, from_file, to_file = argv
# prints an fstring
print(f"Copying from {from_file} to {to_file}")
# One line version of the code below:
# -- in_file = open(from_file)
# -- indata = in_file.read()
# -- -- Rather than creating two variables, we skip the need to create the
# -- -- in_file variable by opening and reading from_file in one command
# Setting indata this will assign the string within from_file to the indata var
# -- There is no open fileobject to close! This single line opens and reads the file,
# -- assigns the contents to indata, and either closes it or never created an object
# -- to begin with. Hopefully we'll learn more about this.
# -- -- This happenes because the file object is opened, but never assigned to a var
# -- -- indata is equalling te result of reading the file object, not the object itself.
# -- -- -- FYI this is NOT consistent. Some interpreters will close for you, others will not.
indata = open(from_file).read()
# Here's an example of the above: printing indata returns a string, which will be
# whatever string was added to the file.
# -- This was figured out because I was trying to learn why I didn't have to close
# -- the object. After passing indata to .closed and recieving an error due to it being
# -- a string, I tried to print it and this happened.
print(indata)
# prints fstring
# len() returns the length, or the number of items in an object
# -- when a variable whose value is a string is passed to it, it will return
# -- the number of characters that compose the string
print(f"The input file is {len(indata)} bytes long")
# prints an fstring
# Uses the exists() function; returns a boolean
# -- Evaluates to True if the file exists, returns False if it does not
print(f"Does the output file exist? {exists(to_file)}")
print("Ready, hit RETURN to continue, CTRL-C to abort.")
input()
# assigns out_file to equal opening to_file in write mode
out_file = open(to_file, 'w')
# writes the contents of indata, which is equal to a fileobject in read mode
# -- this will copy the contents of the indata file object to the out_file file object
out_file.write(indata)
print("Alright, all done.")
# closes out_file file object
out_file.close()
| true |
8d854483b13e4a41d38f1553a4a843e9d12dc264 | Python | igrekus/adf3114 | /domain.py | UTF-8 | 2,160 | 2.515625 | 3 | [] | no_license | # -*- coding: UTF-8 -*-
import serial
from time import sleep
from PyQt5.QtCore import QObject, pyqtSignal
from arduino.arduinospi import ArduinoSpi
from arduino.arduinospimock import ArduinoSpiMock
mock_enabled = False
class Domain(QObject):
def __init__(self, parent=None):
super().__init__(parent)
self._progr = None
def connectProgr(self):
def available_ports():
result = list()
for port in [f'COM{i+1}' for i in range(256)]:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
print(f'available ports: {result}')
return result
def find_arduino():
for port in available_ports():
print(f'trying {port}')
s = serial.Serial(port=port, baudrate=9600, timeout=1)
if s.is_open:
s.write(b'<n>\n')
sleep(0.5)
ans = s.read_all()
s.close()
if ans[:3] == b'SPI':
print(f'STM32 found on {port}')
return port
else:
return ''
if not mock_enabled:
port = find_arduino()
if port:
self._progr = ArduinoSpi(port=port, baudrate=9600, parity=serial.PARITY_NONE, bytesize=8,
stopbits=serial.STOPBITS_ONE, timeout=1)
else:
self._progr = ArduinoSpiMock(port=(find_arduino()), baudrate=9600, parity=serial.PARITY_NONE, bytesize=8,
stopbits=serial.STOPBITS_ONE, timeout=1)
return bool(self._progr)
def disconnectProgr(self):
self._progr.disconnect()
def send(self, command):
res = ''
try:
res = self._progr.query(command)
except Exception as ex:
print(ex)
return res
@property
def connected(self):
return self._progr._port.is_open
| true |
dc01146e0c737d86166a0c8055cd1afbf626b888 | Python | scan3ls/holbertonschool-higher_level_programming | /0x04-python-more_data_structures/8-simple_delete.py | UTF-8 | 130 | 2.84375 | 3 | [] | no_license | #!/usr/bin/python3
def simple_delete(a_dictionary, key=""):
d = a_dictionary
if key in d:
del d[key]
return d
| true |
094369778867f3536fb99a2aa70bf842ce47fe75 | Python | caobaoli/python-primer | /example/primer_5_exception.py | UTF-8 | 1,168 | 3.4375 | 3 | [] | no_license | # 5.1 异常处理概述
# 5.2 异常处理格式
'''
try:
程序
except Exception as 异常名称:
异常处理部分
'''
# URLError与HTTPError
'''
两者都是异常处理的类, HTTPError是URLError的子类,HTTPError有异常状态码与异常原因,URLError没有异常状态码,所以
在处理时,不能使用URLError代替HTTPError。如果要代替,必须要判断是否有状态码属性
'''
# try:
# for i in range(0, 9):
# if(i == 4):
# print(i)
# except Exception as e:
# print(e)
# print("ok")
#会抛出403异常,这个网址已屏蔽爬虫,必须伪装成浏览器
import urllib.request
import urllib.error
try:
urllib.request.urlopen('http://blog.csdn.net')
except urllib.error.HTTPError as e:
print(e.getcode())
try:
url = 'http://blog.csdn.net'
req = urllib.request.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36')
data = urllib.request.urlopen(req).read().decode()
print(data)
except urllib.error.HTTPError as e:
print(e.getcode())
| true |
3b7d081b46c577a46137db47354745d6748da490 | Python | il-giza/Pilgrim | /PilgrimTask/PilgrimCross.py | UTF-8 | 6,643 | 3.71875 | 4 | [] | no_license | class PilgrimCross():
"""Класс Перекресток.
Будем двигаться по перекресткам.
Переменные класса отвечают за параметры города:
min_x, max_x = 1, 5 - Размер города с запада на восток
min_y, max_y = 1, 5 - Размер города с севера на юг
finish_cross = (5,5) - Конечная точка пути
stop_cross = [(1,1), (2,1)] - Эти перекрестки уже пройдены и в силу правил города по ним уже нельзя двигаться.
Атрибуты класса:
id = 0 - Пусть у объекта будет id, так легче будет отслеживать последовательность шагов
name = 'X' - Имя объекта - сюда пишем направление света
x, y = 3, 1 - Начальные координаты пути
balance = -4 - Начальный баланс.
parent = False - Ссылка на родителя (предыдущий перекресток)
street = (None, None) - Здесь название улицы, которая находится между перекрестками. Пишем в виде кортежа.
free_step - Массив названий доступных направлений движения
next - Перекресток имеет свой генератор разрешенных направлений
finish - Состояние объекта. True - поломник дошел до конечной точки с балансом >= 0, иначе - False.
"""
min_x, max_x = 1, 5
min_y, max_y = 1, 5
finish_cross = (5,5)
stop_cross = [(1,1), (2,1)]
def __init__(self, id = 0, name = 'X', x = 3, y = 1, balance = -4, parent = False, street = (None, None)):
self.id = id
self.name = name
self.x = x
self.y = y
self.balance = balance
self.parent = parent
self.street = street
self.free_step = self.test_step()[1]
self.next = self.next_step()
if (x,y) == self.finish_cross and self.balance >= 0:
self.finish = True
else:
self.finish = False
def test_street(self, street):
"""Проверяем, были ли данная улица уже пройдена поломником. Пробегаем по всем родительским объектам"""
if self.street == street:
return True
if self.parent:
return self.parent.test_street(street)
else:
return False
def go_W(self):
"""Шаг на запад. Создание объекта на западе."""
return PilgrimCross(self.id+1, 'W', self.x-1, self.y, self.balance + 2, self, street = (self.x-0.5, self.y))
def go_E(self):
"""Шаг на восток. Создание объекта на востоке."""
return PilgrimCross(self.id+1, 'E', self.x+1, self.y, self.balance - 2, self, street = (self.x+0.5, self.y))
def go_N(self):
"""Шаг на север. Создание объекта на севере."""
return PilgrimCross(self.id+1, 'N', self.x, self.y-1, self.balance / 2, self, street = (self.x, self.y-0.5))
def go_S(self):
"""Шаг на юг. Создание объекта на юге."""
return PilgrimCross(self.id+1, 'S', self.x, self.y+1, self.balance * 2, self, street = (self.x, self.y+0.5))
def test_step(self):
"""Создаем картеж массивов функций и названий доступных вариантов шагов. Проверяем:
1. граничное условие
2. условие того, что по этой улице еще не проходили
3. запретные перекрестки
Возвращаем массив функций
"""
free_step_func = []
free_step_name = []
if not(self.x == self.min_x or self.test_street((self.x-0.5, self.y)) or (self.x-1, self.y) in self.stop_cross):
free_step_func.append(self.go_W)
free_step_name.append('W')
if not(self.x == self.max_x or self.test_street((self.x+0.5, self.y)) or (self.x+1, self.y) in self.stop_cross):
free_step_func.append(self.go_E)
free_step_name.append('E')
if not(self.y == self.min_y or self.test_street((self.x, self.y-0.5)) or (self.x, self.y-1) in self.stop_cross):
free_step_func.append(self.go_N)
free_step_name.append('N')
if not(self.y == self.max_y or self.test_street((self.x, self.y+0.5)) or (self.x, self.y+1) in self.stop_cross):
free_step_func.append(self.go_S)
free_step_name.append('S')
return (free_step_func, free_step_name)
def next_step(self):
"""На основе массива доступных вариантов шагов создаем генератор"""
for i in self.test_step()[0]:
yield i
def get_next_cross(self):
"""Здесь возвращаем следующее значение генератора"""
return self.next.__next__()()
def show_path(self):
"""Возвращаем пройденный путь. Пробегаем всех родителей и пишем в массив"""
path = []
path.append((self.id, self.name, [self.x, self.y], self.street, self.balance))
if self.parent:
path += self.parent.show_path()
return path
def get_pid(self):
"""Возвращаем id родителя"""
if self.parent:
return self.parent.id
else:
return -1
def show_info(self):
"""Покажем описание перекрестка"""
print('id', self.id,
' pid', self.get_pid(),
' name:', self.name,
' (x,y) =' , (self.x, self.y),
' balance:', self.balance,
' street:', self.street,
' free_step:', self.free_step)
def __call__(self):
"""Покажем описание перекрестка"""
self.show_info() | true |
5686009968a24bc9478e4510f517b1b43004bd1b | Python | AidanFray/Cryptopals_Crypto_Challenges | /Set2/Challenge16/Challenge16.py | UTF-8 | 2,048 | 3.21875 | 3 | [] | no_license | import sys ; sys.path += ['.', '../..']
from SharedCode import Function
import base64
# Random key and IV are created on every execution
key = Function.Encryption.AES.randomKeyBase64()
iv = Function.Encryption.AES.randomKeyBase64()
def encrypt(key, data):
return Function.Encryption.AES.CBC.Encrypt(iv, key, data)
def decrypt_and_admin_search(key, data):
"""
Simulates granting access to the admin system
"""
target = ";admin=true;"
d = Function.Encryption.AES.CBC.Decrypt(iv, key, data)
plainTextWithPadding = base64.b64decode(d)
return target in str(plainTextWithPadding)
def bitflip(cipherText):
# The block that contains the payload
targetBlock = 1
blocks = Function.Encryption.splitBase64IntoBlocks(cipherText)
# Splits the block into single byte Base64 values
chars = Function.Encryption.splitBase64IntoBlocks(blocks[targetBlock], 1)
# The outcome value is determed by XORing the actual cipher text byte (A) with its previous plaintext
# value (PA) then applying the desired value (PD)
# A' = A ⊕ PA ⊕ PD
# https://masterpessimistaa.wordpress.com/2017/05/03/cbc-bit-flipping-attack/
chars[0] = Function.BitFlippingAttacks.flip(chars[0], ":", ";")
chars[6] = Function.BitFlippingAttacks.flip(chars[6], ":", "=")
chars[11] = Function.BitFlippingAttacks.flip(chars[11], ":", ";")
blocks[targetBlock] = Function.Base64_To.concat(chars)
return Function.Base64_To.concat(blocks)
def task16():
# Creates the string with placeholders
data = Function.BitFlippingAttacks.createString(":admin:true:")
cipherText = bitflip(encrypt(key, data))
# If the encrypted text contains our ";admin=true;" string we have access
if decrypt_and_admin_search(key, cipherText):
Function.BitFlippingAttacks.colouredOutput(access=True)
return True
else:
Function.BitFlippingAttacks.colouredOutput(access=False)
return False
if __name__ == "__main__":
output = True
task16()
| true |
8f5c06b3e862cae78edfc5d98e5a12b730cc316c | Python | upple/BOJ | /src/10000/10826.py3.py | UTF-8 | 108 | 3.21875 | 3 | [
"MIT"
] | permissive | n=int(input())
a=0
b=1
if n==0:
b=0
for i in range(1, n):
tmp=b
b=a+b
a=tmp
print(b)
| true |
bc79ea060d1ab5800b31b5ea26906f362f798214 | Python | lyp741/RaspberryPi-Home | /project/ip.py | UTF-8 | 2,262 | 2.546875 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys, os, urllib2, json
import re, urllib
import sys
import threading
import time
from static import Redis_conn as rds
reload(sys)
sys.setdefaultencoding('utf8')
class Get_public_ip:
def getip(self):
try:
myip = self.visit("http://1212.ip138.com/ic.asp")
except:
myip = "So sorry!!!"
return myip
def visit(self,url):
request = urllib2.Request(url)
response = urllib2.urlopen(request)
str = response.read()
print str
pattern = re.compile('''\[(.*?)]''')
items = re.findall(pattern , str)
return items[0]
def getip():
getmyip = Get_public_ip()
return getmyip.getip()
def get_ip_area(ip):
try:
apiurl = "http://ip.taobao.com/service/getIpInfo.php?ip=%s" %ip
content = urllib2.urlopen(apiurl).read()
data = json.loads(content)['data']
code = json.loads(content)['code']
if code == 0: # success
print(data['country_id'])
print(data['area'])
print(data['city'])
print(data['region'])
return data['city']
else:
print(data)
except Exception as ex:
print(ex)
class weather(object):
weather_uri = "http://apistore.baidu.com/microservice/weather?cityid="
def mainHandle(self,day):
city = rds.get("current_city")
city_name = city[0:len(city)-3]
print "len city",len(city)
print "city",city
print "city_name",city_name
code_uri = "http://php.weather.sina.com.cn/xml.php?"+urllib.urlencode({'city':city_name.encode('gb2312')})+"&password=DJOYnieT8234jlsK&day="+day
uri = code_uri
print uri
url = urllib2.urlopen(uri).read().decode('utf-8')
return url
def getCity():
while True:
try:
ip = getip()
city = get_ip_area(ip)
print ip
print "len",len(city)
rds.set("current_ip",ip)
rds.set("current_city",city)
except:
print "error getCity"
time.sleep(600)
t = threading.Thread(target=getCity)
t.start()
def getWeather(day):
wt = weather()
return wt.mainHandle(day)
| true |
ae74cf0ae91b484d98285791828dc30a4805af3e | Python | MattJDavidson/python-adventofcode | /tests/test_05.py | UTF-8 | 2,868 | 3.03125 | 3 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | import pytest
from advent.problem_05 import (forbidden_patterns,
has_letter_hop,
nice_string,
nicer_string,
num_vowels,
non_overlapping_pair,
repeated_chars,
total_nice_strings,
total_nicer_strings)
def test_num_vowels():
assert num_vowels('') == 0
assert num_vowels('a') == 1
assert num_vowels('ba') == 1
assert num_vowels('aaa') == 3
def test_repeated_chars():
assert repeated_chars('') is False
assert repeated_chars('a') is False
assert repeated_chars('aba') is False
assert repeated_chars('aa') is True
assert repeated_chars('abbc') is True
def test_forbidden_patterns():
assert forbidden_patterns('ab') is True
assert forbidden_patterns('cd') is True
assert forbidden_patterns('pq') is True
assert forbidden_patterns('xy') is True
assert forbidden_patterns('') is False
assert forbidden_patterns('acefg') is False
def test_nice_string():
assert nice_string('ugknbfddgicrmopn') is True
assert nice_string('aaa') is True
assert nice_string('') is False
assert nice_string('jchzalrnumimnmhp') is False
assert nice_string('haegwjzuvuyypxyu') is False
assert nice_string('dvszwmarrgswjxmb') is False
def test_total_nice_strings():
assert total_nice_strings('') == 0
assert total_nice_strings('jchzalrnumimnmhp') == 0
assert total_nice_strings('ab\njchzalrnumimnmhp') == 0
assert total_nice_strings('aaa\njchzalrnumimnmhp') == 1
assert total_nice_strings('aaa\nabjchzalrnumimnmhp') == 1
assert total_nice_strings('aaa\neee\niii') == 3
def test_non_overlapping_pair():
assert non_overlapping_pair('') is False
assert non_overlapping_pair('aaa') is False
assert non_overlapping_pair('aca') is False
assert non_overlapping_pair('aaaa') is True
assert non_overlapping_pair('abab') is True
assert non_overlapping_pair('aaccaa') is True
assert non_overlapping_pair('xxyxx') is True
def test_has_letter_hop():
assert has_letter_hop('') is False
assert has_letter_hop('aa') is False
assert has_letter_hop('abcdefgh') is False
assert has_letter_hop('abbaabb') is False
assert has_letter_hop('xyx') is True
assert has_letter_hop('xxyxx') is True
assert has_letter_hop('abcdefeghi') is True
assert has_letter_hop('aaa') is True
def test_nicer_string():
assert nicer_string('') is False
assert nicer_string('xxyxx') is True
def test_total_nicer_strings():
assert total_nicer_strings('') == 0
assert total_nicer_strings('aa') == 0
assert total_nicer_strings('abcdefgh') == 0
assert total_nicer_strings('abbaabb') == 0
| true |
92971ec03453c9834228561799bd08ecf81c8525 | Python | ftn8205/python-course | /41-7.py | UTF-8 | 624 | 3.46875 | 3 | [] | no_license | """
同一個進程下多個線程數據是共享的
為什麼同一個進程下還會使用隊列??
因為隊列是 管道 + 鎖 組成
所以用隊列也是為了保證數據安全
"""
import queue
#1 隊列q 先進先出
# q = queue.Queue(3)
# q.put(1)
# q.get()
# q.get_nowait()
# q.get(timeout=3)
# q.full()
# q.empty()
#2 Last in first out queue
# q = queue.LifoQueue(3)
# q.put(1)
# q.put(2)
# q.put(3)
# print(q.get())
#3 優先級Queue 可以給放入隊列中的數據設置進出的優先級 (優先級/值) => 優先級越小越高
q = Queue.PriorityQueue(3)
q.put((1,'111'))
q.put((100,'222'))
| true |
8433ee0517820eae41d5db0ed08b97396468e1e5 | Python | elimisteve/tent-python-xiaoping | /example_app.py | UTF-8 | 2,405 | 2.984375 | 3 | [
"MIT"
] | permissive | import datetime
import config
from xiaoping.tentapp import TentApp
from xiaoping.posts import AppPost, Post
###############################################################################
# About
###############################################################################
# This app lets you create a status post and then prints your last five
# status posts.
#
# You must already have a Tent server set up for it to work. One place that
# can be done is: https://cupcake.io/
###############################################################################
# Make preparations
###############################################################################
# 1. Run the following if you haven't already:
# $ pip install -r requirements.txt
# $ cp example_config.py config.py
# 2. Set `EXAMPLE_APP_ENTITY` in `config.py`.
# 3. Run the app:
# $ python example_app.py
###############################################################################
# Setup
###############################################################################
app_post = AppPost('Xiaoping Example App',
write=['https://tent.io/types/status/v0'])
app = TentApp(config.EXAMPLE_APP_ENTITY, app_post)
go_to_me = app.start_setup()
print 'Now you need to go to:'
print ''
print go_to_me
print ''
print 'and approve the app.'
print "After doing so you'll be redirected to a new page."
print "Get the code parameter from that page's URL and enter it here."
code = raw_input('> ')
app.finish_setup(code)
# A real app would store app.app_id, app.id_value, and app.hawk_key
# at this point.
###############################################################################
# In use
###############################################################################
status_type = 'https://tent.io/types/status/v0#'
status_post = Post(status_type, {'text': raw_input('Type your status post: ')})
app.create_post(status_post)
for i in app.get_posts_list({'types': status_type, 'limit': 5}):
text = i['content']['text']
unix_time_in_s = i['published_at']/1000.0
published_at = datetime.datetime.fromtimestamp(unix_time_in_s)
formatted_time = published_at.strftime('%Y-%m-%d %H:%M:%S')
print ('======================================='
'=======================================')
print 'Text: ' + text
print 'Published at: ' + formatted_time
print ''
| true |
516c55f041ce412ddcb985ee03425d68e08698f1 | Python | aureldent/lumapps-sdk | /lumapps/helpers/community.py | UTF-8 | 7,714 | 2.734375 | 3 | [
"MIT"
] | permissive | import logging
from lumapps.helpers.exceptions import BadRequestException
from lumapps.helpers.user import User
class Community(object):
""" Lumapps community object
Args:
api: the ApiClient instance to use for requests
customer: the customer id of the community, used for autorization
instance: the instance id, if not defined the community is a customer community (platform level)
title: the community name
author: Community object of the community owner
uid: the lumapps unique id of the community, generated automatically at the first save
representation: a dictionary of all community attributes from lumapps
"""
def __init__(
self,
api,
customer="",
instance="",
title="",
uid="",
author="",
representation=None,
):
# type: (ApiClient, str, str, str, Community, str, dict) -> None
self._customer = customer if customer else api.customerId
self._uid = uid
self._title = title
self._author = author
self._instance = instance
self._id = uid
self._api = api
self._admins = []
self._users = []
if representation is not None:
self._set_representation(representation)
@property
def uid(self):
return str(self._uid)
@property
def title(self):
return self._title
@staticmethod
def new(api, customer="", instance="", uid="", title="", representation=None):
return Community(
api=api,
instance=instance,
customer=customer,
title=title,
uid=uid,
representation=representation,
)
def get_attribute(self, attr):
# type: (str) -> (Union[object,str,int])
"""
Args:
attr: the attribute to fetch
Returns:
the value of this attribute from the full dictionary of the group attributes
"""
label = "_{}".format(attr)
if hasattr(self, label):
return getattr(self, label, "")
def set_attribute(self, attr, value, force=False):
# type: (str, Union[str,int,object], boolean) -> None
"""
Args:
attr: feed attribute key to save
value: feed attribute value to save
force: whether to force the storage of the attribute
Returns: None
"""
if attr == "adminKeys":
attr = "admins"
value = [User.new(self._api, self._customer, uid=usr) for usr in value]
if attr == "userKeys":
attr = "users"
value = [User.new(self._api, self._customer, uid=usr) for usr in value]
if attr == "authorId":
attr = "author"
value = User.new(self._api, self._customer, uid=value)
label = "_{}".format(attr)
authorized_update_fields = (
"admins",
"users",
"author",
"title",
"status",
"instance",
"type",
"description",
)
if force or attr in authorized_update_fields:
setattr(self, label, value)
else:
BadRequestException("attribute {} is not writable", attr)
def _set_representation(self, result, force=False):
# type: (dict[str], boolean) -> None
"""
Update the attribute of the class from a Lumapps Community resource: https://api.lumapps.com/docs/output/_schemas/community
Args:
result: Lumapps Community resource dictionnary
force: save all the attributes from this dictionary
Returns: None
"""
self._uid = result.get("uid")
self._id = result.get("id")
for k, v in iter(result.items()):
self.set_attribute(k, v, force)
def to_lumapps_dict(self):
# we only keep attributes starting with "_" and we strip the "_"
ignore_fields = ["api", "author", "admins", "users"]
community = dict(
(k[1:], v)
for k, v in iter(vars(self).items())
if k[0] == "_" and k[1:] not in ignore_fields and v is not None
)
community["authorId"] = self._author.uid
community["adminKeys"] = [usr.uid for usr in self._admins]
community["userKeys"] = [usr.uid for usr in self._users]
return community
def get_posts(self, **params):
# type: (dict) -> Iterator[dict[str]]
"""
fetch community posts
Args:
**params: optional dictionary of search parameters as in https://api.lumapps.com/docs/output/_schemas/servercontentcommunitypostpostmessagespostlistrequest
Returns:
a Community Post Generator object
"""
params["contentId"] = self._uid
params["instanceId"] = self._api.instanceId
if not params.get("lang", None):
params["lang"] = "en"
if not params.get("fields", None):
params[
"fields"
] = "cursor,items(author,content,createdAt,uid,status,tags,title)"
return self._api.iter_call("community", "post", "search", body=params)
def list_communities(api, **params):
# type: (ApiClient, dict) -> Iterator[dict[str]]
"""Fetch communities
Args:
api: the ApiClient instance to use for requests
**params: optional dictionary of search parameters as in https://api.lumapps.com/docs/community/list
Returns:
a Community Generator object
"""
if not params.get("fields", None):
params[
"fields"
] = "cursor,items(adminKeys,instance,status,title,type,uid,userKeys,authorId, description)"
if not params.get("body", None):
params["body"] = {}
return api.iter_call("community", "list", **params)
def build_batch(api, communities, association=None):
# type: (ApiClient, Iterator[dict[str]], dict[str]) -> Community
"""
A generator for Community instances from raw Lumapps community Iterator
Args:
api: the ApiClient instance to use for requests
communities: list of Lumapps Community dictionnary
association: a dictionnary to translate the community dictionnary to Community instance
Yields:
a Community attribute
"""
logging.info("building batch communities")
for u in communities:
if association:
community = dict([(association.get(k, k), v) for (k, v) in iter(u.items())])
else:
community = u
community = Community(api, representation=community)
yield community
def list_sync(api, instance="", **params):
# type (ApiClient, str) -> list[dict[str]]
"""
list all the communities of an instance. If no instance is provided , all the communities
Args:
api: the ApiClient instance to use for requests
instance: the instance id
**params: optional dictionary of search parameters as defined in https://api.lumapps.com/docs/community/list
Returns:
list: a list of Lumapps Community resources
"""
if not params:
params = dict()
if not params.get("fields", None):
params[
"fields"
] = "cursor,items(adminKeys,instance,status,title,type,uid,userKeys,authorId, description)"
if not params.get("body", None):
params["body"] = {"lang": "en"}
if instance:
params["body"]["instanceId"] = instance
result = api.get_call("community", "list", **params)
return result
| true |
836a3ea6d3ae977fa588c098338c04679d064472 | Python | pybites/challenges | /52/henryy07/pomodoro.py | UTF-8 | 3,466 | 3.703125 | 4 | [] | no_license | """
Very simple pomodoro application, you can choose pomodoro duration, break length and how many times you want to repeat
process, to start application you need to install speech dispatcher, you can use command:
sudo apt install speech-dispatcher
Enjoy!
"""
import argparse
from datetime import datetime, timedelta
import time
import os
import sys
# making arg parse arguments with the default values
parser = argparse.ArgumentParser()
parser.add_argument(
"--duration",
help="length of the pomodoro duration in minutes, default value is 20",
type=int,
default=20
)
parser.add_argument(
'--count',
help='how many pomodoros you want to do',
type=int,
default=4
)
parser.add_argument(
'--br_length',
help='length of the break in minutes',
type=int,
default=5
)
args = parser.parse_args()
duration = args.duration
break_length = args.br_length
iteration_count = args.count
print(args.duration, args.br_length, args.count)
# function which makes small animations which shows to user that application is working all the time
def animation(i):
animation = "|/-\\"
time.sleep (0.2)
sys.stdout.write("\r" + animation[i % len (animation)])
sys.stdout.flush()
# timer for break duration with specific outputs for break
def break_timer(break_length, start_time):
# speech-dispatcher
os.system ('spd-say "break"')
sys.stdout.write("\n break started at: {} and should end at: {}".format (
datetime.now(), datetime.now () + timedelta (minutes=break_length)))
timer = True
i = 0
while timer:
work_duration = datetime.now() - start_time
animation(i)
i += 1
if work_duration >= timedelta(minutes=break_length):
timer = False
# timer for pomodoro duration with specific outputs for pomodoro
def pomodoro_timer(pomodoro_duration, start_time):
# speech-dispatcher
os.system ('spd-say "time to work"')
sys.stdout.write("\n pomodoro started at: {} and will end at: {}".format (
datetime.now(), datetime.now () + timedelta(minutes=duration)))
timer = True
i = 0
while timer:
work_duration = datetime.now() - start_time
animation(i)
i += 1
if work_duration >= timedelta(minutes=pomodoro_duration):
timer = False
# main function which uses timers in for loop which is in rango of iteration count choosen by user, or default one
def main():
for pomodoro_iteration in range(iteration_count):
# there will be different behaviours for first and last iteration
if pomodoro_iteration == 0:
sys.stdout.write ("""\n pomodoro started with the following configuration: \n
length of one pomodoro iteration: {} min\n
break length: {} min\n
number of iterations: {}\n""".format (duration, break_length,
iteration_count))
pomodoro_timer(duration, datetime.now())
break_timer(break_length, datetime.now())
elif pomodoro_iteration == iteration_count - 1:
pomodoro_timer(duration, datetime.now())
else:
pomodoro_timer(duration, datetime.now())
break_timer(break_length, datetime.now())
# speech-dispatcher
os.system ('spd-say "thats it good job"')
if __name__ == '__main__':
main() | true |
1396c60c4bf671df7385eec5805cc13af519ee18 | Python | Hedgehogues/HoChiMinh | /hochiminh/dev/font_to_image.py | UTF-8 | 5,351 | 2.59375 | 3 | [] | no_license | import cv2
from PIL import Image, ImageDraw, ImageFont
import numpy as np
from copy import deepcopy
from os import listdir
from os.path import isfile, join
from multiprocessing import Process
from numpy.random import randint, choice
class DatasetGenerator:
def __init__(self, in_path, out_path):
self.font_size = [11, 23]
self.font_path = 'data/fonts/'
self.fonts = ["1.ttf", "2.ttf", "3.ttf", "4.ttf", "5.ttf", "6.ttf", "7.ttf"]
self.letters = list(range(ord('А'), ord('Я') + 1)) + \
list(range(ord('а'), ord('я') + 1)) + \
list(range(ord('0'), ord('9') + 1)) + \
list(range(ord('a'), ord('z') + 1)) + \
list(range(ord('A'), ord('Z') + 1))
self.letters = [chr(letter) for letter in self.letters]
self.erode_kernel = [1, 5]
self.erode_iterate = [1, 5]
self.dilate_kernel = [1, 5]
self.dilate_iterate = [1, 5]
self.gauss_kernel = [1, 5]
self.gauss_sigma = [0, 4]
self.seq_len = [1, 8]
self.sep = [' ', '\n']
self.seqs = [1, 10]
self.intensity = [128, 255]
self.in_path = in_path
self.out_path = out_path
def sample(self, inds, id):
num = inds[0]
print('Process', id, 'was started')
i = 0
while num < inds[-1]:
image = Image.fromarray(np.zeros((160, 160), dtype=np.uint8))
draw = ImageDraw.Draw(image)
seq = ''
for _ in np.arange(randint(self.seqs[0], self.seqs[1])):
seq_len = randint(self.seq_len[0], self.seq_len[1])
seq += ''.join([choice(self.letters) for _ in np.arange(seq_len)])
seq += choice(self.sep)
font_type = self.font_path + choice(self.fonts)
font_size = randint(self.font_size[0], self.font_size[1])
font = ImageFont.truetype(font_type, font_size)
intensity = randint(self.intensity[0], self.intensity[1])
draw.text((0, 0), seq, intensity, font=font)
in_image = np.array(deepcopy(image))
in_image[in_image > 0] = 255
etalon_image = Image.fromarray(np.zeros((100, 100), dtype=np.uint8))
etalon_draw = ImageDraw.Draw(etalon_image)
etalon_font = ImageFont.truetype(font_type, font_size)
etalon_draw.text((0, 0), seq, 255, font=etalon_font)
cv2.imwrite(self.in_path + str(num) + '.tif', np.array(etalon_image))
noise_type = randint(0, 9)
if noise_type == 0:
pass
elif noise_type == 1:
sigma = randint(0, 3)
image = cv2.GaussianBlur(np.array(image), (3, 3), sigma)
elif noise_type == 2:
image = cv2.medianBlur(np.array(image), 3)
elif noise_type == 3:
image = cv2.dilate(np.array(image), np.ones((3, 3), np.uint8), iterations=1)
elif noise_type == 5:
if font_size > 20:
image = cv2.dilate(np.array(image), np.ones((3, 3), np.uint8), iterations=1)
else:
continue
elif noise_type == 6:
if font_size > 22:
image = cv2.dilate(np.array(image), np.ones((3, 3), np.uint8), iterations=1)
image = cv2.GaussianBlur(np.array(image), (3, 3), 0)
else:
continue
elif noise_type == 7:
if font_size > 22:
image = cv2.GaussianBlur(np.array(image), (3, 3), 0)
image = cv2.dilate(np.array(image), np.ones((3, 3), np.uint8), iterations=1)
else:
continue
elif noise_type == 8:
if font_size > 22:
image = cv2.erode(np.array(image), np.ones((2, 2), np.uint8), iterations=1)
else:
continue
cv2.imwrite(self.out_path + str(num) + '.tif', np.array(image))
if i > 0 and i % 500 == 0:
print('#', id, '. Step:', i)
num += 1
i += 1
def extract_non_zero_image(in_image, out_image, max_size, border=0):
vert = np.where(np.sum(out_image, axis=1) > 0)[0]
hor = np.where(np.sum(out_image, axis=0) > 0)[0]
min_y = max(0, np.min(vert) - border)
min_x = max(0, np.min(hor) - border)
in_empty_image = np.zeros(max_size, np.uint8)
out_empty_image = np.zeros(max_size, np.uint8)
max_y = min(min_y + max_size[0], len(in_image))
max_x = min(min_x + max_size[1], len(in_image[0]))
in_empty_image[:max_y - min_y, :max_x - min_x] = in_image[min_y:max_y, min_x:max_x]
out_empty_image[:max_y - min_y, :max_x - min_x] = out_image[min_y:max_y, min_x:max_x]
return in_empty_image, out_empty_image
if __name__ == "__main__":
in_path = '../rosatom_dataset/in/'
out_path = '../rosatom_dataset/out/'
n = 20000
i = 0
pr_count = 8
DS = DatasetGenerator(in_path, out_path)
step = n // pr_count + 15
process = []
for pr_num in range(pr_count):
inds = range(min(step * pr_num, n), min(step * (pr_num + 1), n))
p = Process(target=DS.sample, args=(inds, pr_num))
p.start()
process.append(p)
for p in process:
p.join()
| true |
21da772b3c69837b1806520fada8b8978bd19b4f | Python | matbra/dist_comp | /python/faktor_packages/src/faktor/dsp/common/sound.py | UTF-8 | 513 | 2.828125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri May 18 22:47:01 2012
@author: Matti
"""
import pygame
import numpy
def sound(x, fs):
pygame.mixer.init(frequency=fs, size=-16, channels=1, buffer=4096)
sound = pygame.sndarray.make_sound(numpy.int16(x))
pygame.mixer.Sound(sound)
sound.play()
return None
def soundsc(x, fs):
x_normalized = normalize(x) * 2**15
sound(x_normalized, fs)
return None
def normalize(x):
return x / max(abs(x)) | true |
33944487695e755cd398a04abe0a4f1f5f6a3235 | Python | aotong/auto_dict2 | /auto_dict/views.py | UTF-8 | 1,331 | 2.765625 | 3 | [] | no_license | from django.shortcuts import render
from urllib.request import urlopen
from .models import Word, Translation
def make_url(word):
url = "http://www.dictionaryapi.com/api/v1/references/collegiate/xml/"
url += word + "?key=e2595b47-f120-4361-8aa7-bb3a7eb3c5f6"
return url
def index(request):
if request.method == 'POST':
word = request.POST.get('word', '').strip()
print(word * 9)
url = make_url(word)
print("trying to open your url\n\n")
html = urlopen(url)
print("Opened! our url! Now trying to read\n\n")
text = html.read()
print("Read!! Yay!! Finished!\n\n")
if type(text) != type("string"):
text = text.decode("utf-8")
index = text.find("def")
index = text.find("dt", index)
index = text.find(":", index) + 1
end = text.find("</dt", index)
definition = text[index : end ]
word_obj = Word(word=word, definition=definition)
word_obj.save()
print(definition)
return render(request, 'auto_dict/index.html', {'word' : word, 'definition' : definition})
translations = Translation.objects.all()
first = translations.first()
second = translations.last()
return render(request, 'auto_dict/index.html', {'first_translation': first, 'second_translation': second})
| true |
cb9b256aac26a47abc81e20fd5e402f20746c4af | Python | Pixelus/Programming-Problems-From-Programming-Books | /Python-crash-course/Chapter8/cities.py | UTF-8 | 216 | 3.015625 | 3 | [] | no_license | def describe_city(name, country="Iceland"):
print(name.title() + " is in " + country.title() + ".")
describe_city("Paris", country="France")
describe_city("Reykjavik")
describe_city("London", country="England")
| true |
7f356da57a47efc300ba0f2161a8229a046fe9ca | Python | kirixh/ConsoleWars | /Creators/MineCreator.py | UTF-8 | 608 | 3.0625 | 3 | [] | no_license | from __future__ import annotations
from Buildings.Mine import Mine
from Creators.BuildingCreator import BuildingCreator
class MineCreator(BuildingCreator):
"""
Класс, создающий шахту и добавляющий ее на карту.
"""
def create(self, game_map, symb, *coords):
game_map.map[coords[0]][coords[1]] = symb
# Добавление информации о созданном объекте на карту.
game_map.info[(coords[0], coords[1])].append(Mine(coords[0], coords[1]))
return game_map.info[(coords[0], coords[1])][0]
| true |
cc7cab51d6ad322f5bc8c6a862a180921e367db6 | Python | jonodrew/matchex | /munkres_test.py | UTF-8 | 379 | 3.296875 | 3 | [
"MIT"
] | permissive | from munkres import Munkres, print_matrix
matrix = [[5, 9, 1],
[10, 3, 2],
[8, 7, 4]]
m = Munkres()
indexes = m.compute(matrix)
print_matrix(matrix, msg='Lowest cost through this matrix:')
total = 0
for row, column in indexes:
value = matrix[row][column]
total += value
print '(%d, %d) -> %d' % (row, column, value)
print 'total cost: %d' % total
| true |
a01f42b9ee61ab089a6b5b7d85167ce37f656804 | Python | samsun076/100-days-of-Python | /scripts/16-18-List-comp_and-generators/01-listcom-gen.py | UTF-8 | 2,612 | 3.984375 | 4 | [] | no_license |
# examples from list comp's and generators
# https://github.com/talkpython/100daysofcode-with-python-course/blob/master/days/16-18-listcomprehensions-generators/list-comprehensions-generators.ipynb\\
from collections import Counter
import calendar
import itertools
import random
import re
import string
import requests
# Create a list of names and run it
names = 'pybites mike bob julian tim sara guido'.split()
print(names)
# Typical For loop
for name in names:
print(name.title())
first_half_alpha = string.ascii_lowercase[:13]
print("\n",first_half_alpha)
# typical way to create an empty list, loop through names list
# find names that start with the first half of the alphabet
# and capitalize the name
new_names=[]
for name in names:
if name[0] in first_half_alpha:
new_names.append(name.title())
print(new_names)
# now with list comps
new_names2 = [name.title() for name in names if name[0] in first_half_alpha]
print(new_names2)
assert new_names == new_names2
# another example to get common word requests on an excerpt of Harry Potter
resp = requests.get("http://projects.bobbelderbos.com/pcc/harry.txt")
# get all words in excerpt, make them lowercase, and split them up into a list
potter_words_list = resp.text.lower().split()
print(potter_words_list[:5])
# create a CNT variable and print the number of instances of each letter
cnt = Counter(potter_words_list)
print(cnt)
# 5 most common words
print(cnt.most_common(5))
# basic function with list comp to check if stop words or non alpha numeric exist in excerpt
sample_stop_words = "- the to fuck".split()
def stop_word_check(word_list=potter_words_list):
return[word in word_list for word in sample_stop_words]
#check to see if stop words exists. Returns True or False.
print(stop_word_check())
# clean up non alpha numeric chars and set new variable name. \W is any non word character.
# '+' is one or more. So replacing and instances of one or more non word characters with nothing.
new_words = [re.sub(r'\W+', r'', word) for word in potter_words_list]
print(stop_word_check(new_words))
cnt = Counter(new_words)
cnt.most_common(5)
resp = requests.get('http://projects.bobbelderbos.com/pcc/stopwords.txt')
full_stop_words = resp.text.lower().split()
print(full_stop_words[:5])
#traditional for loop
loop_list=[]
for word in new_words:
if word.strip() and word not in full_stop_words:
loop_list.append(word)
print(loop_list)
#comprehension list
comp_list = [word for word in new_words if word.strip() and word not in full_stop_words]
print(comp_list)
cnt = Counter(comp_list)
print(cnt.most_common(10))
| true |
21872ca998933d3fa3c118467ea8470b076695a2 | Python | WolfAuto/NEA-Code | /Pro Maths/test_dates.py | UTF-8 | 12,183 | 3.125 | 3 | [] | no_license | import sqlite3 as sql # python modules used
import datetime as dt
import tkinter as tk
import pandas as pd
from tkinter import messagebox
from tkinter import ttk
with sql.connect("updatedfile.db", detect_types=sql.PARSE_DECLTYPES) as db: # connection made to db file with data type detection
cursor = db.cursor() # creating two cursors for interacting with the db file
cursor1 = db.cursor()
shared_data = {"date": None, # dictionary used for Set Test Date
"type": None,
"level": None,
"comments": "No Further Comments"}
current_date = dt.date.today().strftime("%Y-%m-%d") # Gets the current date (today's date)
# the create table statement is no longer needed as the table already exists
create_date_table = (
"CREATE TABLE IF NOT EXISTS test_dates(test_date DATE, test_type TEXT, test_level TEXT, comments TEXT, time_stamp DATE)")
#creating the test date table with columns
# test_date, text_type, text_level, comments and time_stamp
cursor.execute(create_date_table) # sql execution of creating the table
db.commit() # saves changes made to the db file
title_font = ("Times New Roman", 30) # setting a title font type with font family and size
medium_font = ("Times New Roman", 20) # setting a medium font type with font family and size
def show_details():
messagebox.showinfo("Window", "After you have finished with this window you can close it") #tkinter pop up message
root = tk.Tk() # creating a tkinter window
title_label = tk.Label(root, text="Tests", font=title_font, bg="grey") # title label for the tkinter window
title_label.config(fg="white",bg="blue")
title_label.pack()
# the following code is for test dates that are today it has a title label a label with all the test dates
# which has been added onto the tkinter window created
current_label = tk.Label(root, text="Test For Today", bg="grey", font=title_font)
current_label.config(anchor="center")
current_label.pack(pady=10)
today_label = tk.Label(root, text=current_test(), bg="grey", font=medium_font, wraplengt=900)
today_label.pack()
separator = ttk.Separator(root, orient="horizontal") # separates the current test and upcoming test from each other
separator.pack(fill="x") # creates a line that fills the whole x axis
# the following code is for test dates that are in the future it has a title label and a label with all the test dates
# which also has been added on the tkinter window created
upcoming_label = tk.Label(root, text="Upcoming Tests", font=title_font, bg="grey")
upcoming_label.pack()
test_upcoming = tk.Label(root, text=upcoming_test(), bg="grey", font=medium_font,wraplengt=900)
test_upcoming.pack()
exit_button = tk.Button(root, text="Exit", command=lambda: root.destroy()) # button for closing the window
exit_button.config(height=3, width=10, bg="blue", fg="white")
exit_button.place(x=1120, y=745)
# button for updating the text on the today_label and test_upcoming label if a new test is added or a test
# needs to be removed
refresh_button = tk.Button(root, text="Refresh Tests", command=lambda: update_labels())
refresh_button.config(height=3, width=10, bg="blue", fg="white")
refresh_button.place(x=0, y=745)
def update_labels(): # refresh button runs this function
delete_date() # removes test dates that have past
today_label["text"] = current_test() # sets the text of the today_label to be current_test
test_upcoming["text"] = upcoming_test() # sets the text of the test_upcoming to be upcoming_test
root.geometry("1200x800") # sets the size of the tkinter window
root.config(bg="grey") # sets the colour of the tkinter window
root.attributes("-topmost", True) # makes the tkinter window appear at the top
root.resizable(height=False,width=False) # prevents the window from being resized
def upcoming_test(): # gets upcoming tests
# sql has a limit of only 4 records to prevent flooding the tkinter window
sql = """ SELECT test_date,test_type,test_level,comments FROM test_dates WHERE test_date > ? LIMIT 4 """
resp = pd.read_sql_query(sql, db, params=(current_date,)) # converts the sql execution into a dataframe
if resp.empty: # condition that the dataframe is empty
return "No Test Set for the Future" # sets label text to be the return value
else: # if the other condition is not met then
return resp.to_csv(None, index= False) # returns the dataframe as a csv without the index
def current_test(): # gets tests that are today
# sql has a limit of only 3 records to prevent flooding the tkinter window
sql = """ SELECT test_date,test_type,test_level,comments FROM test_dates WHERE test_date = ? LIMIT 3 """
resp = pd.read_sql_query(sql, db, params=(current_date,)) # converts the sql execution into a dataframe
if resp.empty: # condition that the dataframe is empty
return "No Test Set for Today" # sets label text to be the return value
else: # if the other condition is not met then
return resp.to_csv(None, index = False) # returns the dataframe as a csv without the index
def update_stamp(): # updates time_stamp for all records in the test_dates table
sql = """ UPDATE test_dates SET time_stamp = ? """
cursor.execute(sql, [(current_date)]) # changes time_stamp to the current_date value
db.commit() # saves the change made to every record time_stamp
def delete_date(): # removes dates from the test_dates table
# deletes dates where time_stamp is greater than the test date
delete_date = ("DELETE FROM test_dates WHERE time_stamp > test_date")
cursor.execute(delete_date) # executes the sql
db.commit() # saves the records deleted from the table
def set_test(date, type, level, comment): # setting a test date and storing it in the test dates table
# sql for inserting into the test_dates table
insert_test ="""INSERT INTO test_dates (test_date, test_type, test_level, comments, time_stamp)
VALUES (?,?,?,?,?)"""
try: # loop that continues based on no error occuring
dt.datetime.strptime(date, '%Y-%m-%d') # compares the date input to the format YYYY-MM-DD
if date < current_date: # if date is less than current date (today's date)
messagebox.showerror("Date","Date already past set a reasonable date") # return tkinter error message
else:
shared_data["date"] = date # stores the value in the dictionary
if type == 1: # type being 1 means that a pure test is set
shared_data["type"] = "Pure" # stores the type as pure in dictionary
if level == 1: # level being 1 means that an AS test is set
shared_data["level"] = "AS" # stores level as AS in dictionary
if len(comment) < 250: # comment has to be less than 250 characters
shared_data["comments"] = comment # stores comment in dictionary
cursor.execute(insert_test, [(shared_data["date"]), # inserts record into the table
(shared_data["type"]),
(shared_data["level"]),
(shared_data["comments"]),
(current_date)])
db.commit() # saves changes made to the db file
return True # returns True for the tkinter main window
else: # when comment is not less than 250 characters
messagebox.showerror("Comment","Comment has to be less than 250 characters") # return tkinter error message
elif level == 2: # level being 2 means that an A2 test is set
shared_data["level"] = "A2" # stores level as A2 in dictionary
if len(comment) < 250: # comment has to be less than 250 characters
shared_data["comments"] = comment # stores comment in dictionary
cursor.execute(insert_test, [(shared_data["date"]), # inserts record into the table
(shared_data["type"]),
(shared_data["level"]),
(shared_data["comments"]),
(current_date)])
db.commit() # saves changes made to the db file
return True # returns True for the tkinter main window
else: # when comment is not less than 250 characters return tkinter error message
messagebox.showerror("Comment","Comment has to be less than 250 characters")
else: # level has to be either 1 or 2 if not return tkinter error message
messagebox.showerror("Level", "Test Level cannot be left blank")
elif type == 2: # type being 2 means that an applied test is set
shared_data["type"] = "Applied" # stores the type as applied in dictionary
if level == 1: # level being 1 means that an AS test is set
shared_data["level"] = "AS" # stores level as AS in dictionary
if len(comment) < 250: # comment has to be less than 250 characters
shared_data["comments"] = comment # stores comment in dictionary
cursor.execute(insert_test, [(shared_data["date"]), # inserts record into the table
(shared_data["type"]),
(shared_data["level"]),
(shared_data["comments"]),
(current_date)])
db.commit() # saves changes made to the db file
return True # returns True for the tkinter main window
else: # when comment is not less than 250 characters
messagebox.showerror("Comment","Comment has to be less than 250 characters") # return tkinter error message
elif level == 2: # level being 2 means that an A2 test is set
shared_data["level"] = "A2" # stores level as A2 in dictionary
if len(comment) < 250: # comment has to be less than 250 characters
shared_data["comments"] = comment # stores comment in dictionary
cursor.execute(insert_test, [(shared_data["date"]), # inserts record into the table
(shared_data["type"]),
(shared_data["level"]),
(shared_data["comments"]),
(current_date)])
db.commit() # saves changes made to the db file
return True # returns True for the tkinter main window
else: # when comment is not less than 250 characters return tkinter error message
messagebox.showerror("Comment","Comment has to be less than 250 characters")
else: # level has to be 1 or 2 if not return tkinter error message
messagebox.showerror("Level", "Test Level cannot be left blank")
else: # type has to be 1 or 2 if not return tkinter error message
messagebox.showerror("Type", "Test Type cannot be left blank")
except: # if the user doesn't input a date in the format set then return tkinter error message
messagebox.showerror("Date", "Date Format should be YYYY-MM-DD and not left blank")
| true |
7da00e1b9c70960834d85f3fe43e859a3d0568d0 | Python | wenh81/OFDMSim | /GenerateBits.py | UTF-8 | 1,054 | 2.78125 | 3 | [] | no_license | """
@ OFDM仿真
@ 信号产生文件
@ DD
"""
import GlobalParameter
import numpy as np
# #
# @ func: def GetBitsNeed() -> int:
# @ 得到OFDM一个符号所拥有的比特数(信息量)
# @ para void
# @ return OFDMBitsNeed
# #
def getBitsNeed() -> int:
ofdmBitsNeed = GlobalParameter.OFDMCarrierCount * GlobalParameter.SymbolPerCarrier \
* GlobalParameter.BitsPerSymbol
return ofdmBitsNeed
# #
# @ func: def GetBitsNeed() -> int:
# @ 得到OFDM一个符号所拥有的比特数(信息量)
# @ para void
# @ return OFDMBitsNeed
# #
def generateBits():
ofdmBitsNeed = getBitsNeed() # 得到所需的比特数目
if GlobalParameter.DEBUG:
print(f'OFDM仿真: 单个OFDM符号所需的比特数目:{ofdmBitsNeed}') # debug
bits = np.random.randint(0, 2, ofdmBitsNeed) # 得到所需的随机比特流
if GlobalParameter.DEBUG:
print(f'OFDM仿真: 比特流的数据类型:{type(bits)}')
return bits
# #
# @ Debug(文件内)
# #
if __name__ == "__main__":
pass
| true |
73ced682948dbd7de62a3e71f10f1e6fe5af87b5 | Python | Valdoos/freecodecamp | /Data Analysis with Python Projects/Mean-Variance-Standard Deviation Calculator/mean_var_std.py | UTF-8 | 654 | 3.234375 | 3 | [] | no_license | import numpy as np
def calculate(list):
if len(list) != 9:
raise ValueError("List must contain nine numbers.")
arr = np.array([list[0:3],list[3:6],list[6:9]])
mean = [[*arr.mean(0)],[*arr.mean(1)],arr.mean()]
variance = [[*arr.var(0)],[*arr.var(1)],arr.var()]
std = [[*arr.std(0)],[*arr.std(1)],arr.std()]
max = [[*arr.max(0)],[*arr.max(1)],arr.max()]
min = [[*arr.min(0)],[*arr.min(1)],arr.min()]
sum = [[*arr.sum(0)],[*arr.sum(1)],arr.sum()]
return {
"mean" : mean,
"variance" : variance,
"standard deviation" : std,
"max" : max,
"min" : min,
"sum" : sum}
| true |
6c3458dc592043615c15f5f8174e87fbfaa670af | Python | AI-Inspire/Code-for-Workshops-Spring-2018-PL | /NLP Example 1.py | UTF-8 | 1,919 | 4.0625 | 4 | [] | no_license | from nltk.tokenize import sent_tokenize, word_tokenize
from nltk import pos_tag #importing necessary libraries which will be called in code to perform NLP tasks
from nltk.stem import PorterStemmer
from nltk.corpus import stopwords
#TASK 1
text = "Hi! How are you today? I am awesome! How about you?"
print(word_tokenize(text)) #tokenizing text by splitting it wherever space is present in the sentence
print() #new line
#TASK 2
print(sent_tokenize(text))#tokenizing text by splitting it wherever a punctuation mark is present
print() #new line
#TASK 3 : tokenizing words in a diff. language, tokenizing so that sentences are separated by punctuation marks
textSpanish = "¡Hola! Estoy feliz porque estoy aprendiendo a codificar. ¿Cómo estás?"
print(sent_tokenize(textSpanish,"spanish"))
print() #new line
#TASK 4
#example with Part Of Speech (POS) Tagging, homynoyms are present in text : words with same spelling but diff. meaning
text1 = "They refuse to permit us to obtain the refuse permit"
print(pos_tag(text1)) #determing POS of each word
print() #new line
#TASK 5 : Word stemming
WordStemmer = PorterStemmer()
print(WordStemmer.stem('coding'))
print(WordStemmer.stem('seesaw'))
print(WordStemmer.stem('purple'))
print() #new line
#TASK 6 : Removing stop words
sentence = "You don't need a pen to write this essay. No pens are allowed. Only pencils are allowed."
stopWords = set(stopwords.words('english')) #generating all stop words in English
words = word_tokenize(sentence) #tokenizing the sentence into individual words
wordsFiltered = [] #this is an array, where all words will be filtered, and stop words will be removed
for w in words: #traversing through
if w not in stopWords: #if word is not a stop word
wordsFiltered.append(w) #add it to the wordsFiltered[]
print(wordsFiltered) #print all of the words filtered (not stop words)
| true |
9ac59768d021ef5fa3e94b49e71bee134febcd55 | Python | hiroto-kazama/cs-362_week_09 | /fizzbuzz.py | UTF-8 | 345 | 3.859375 | 4 | [] | no_license | def fizzBuzz():
i = 1
s = ""
while i < 101:
if i%3 == 0 and i%5 == 0:
s += "FizzBuzz "
elif i%3 == 0:
s += "Fizz "
elif i%5 == 0:
s += "Buzz "
elif i%3 != 0 and i%5 != 0:
s += str(i)
s += " "
i += 1
return s
print(fizzBuzz())
| true |
c73c4abf41d0a8a99cf8032dfe66c26cd04a7cb3 | Python | tskpcp/pythondome | /pandasDome/dropping_entries_from_an_axis.py | UTF-8 | 605 | 3.609375 | 4 | [] | no_license | import numpy as np
from pandas import Series,DataFrame
def droppingEntriesFromAnAxis():
print('Series根据索引删除元素')
obj=Series(np.arange(5),index=['a','b','c','d','e'])
new_obj=obj.drop('c')
print(obj)
print(new_obj)
print(obj.drop(['d','c']))
print('DataFrame删除元素,可指定索引或列')
data=DataFrame(np.arange(16).reshape((4,4)),index=['Ohio','Colorado','Utah','New York'],columns=['one','two','three','four'])
print(data)
print(data.drop(['Colorado','Ohio']))
print(data.drop('two',axis=1))
print(data.drop(['two','four'],axis=1)) | true |
e8c66bcc992450b61db549f9a53983781123b431 | Python | Yuehchang/Python-practice-files | /Machine_learning/data_preprocessing.py | UTF-8 | 8,383 | 3.5625 | 4 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 19 14:16:22 2017
@author: changyueh
"""
"""
Data Preprocessing page 100
"""
import pandas as pd
from io import StringIO
csv_data = '''A,B,C,D
1.0,2.0,3.0,4.0
5.0,6.0,,8.0
10.0,11.0,12.0,'''
df = pd.read_csv(StringIO(csv_data))
df.isnull().sum() #missing(True =1), numeric(False =0) #in this case each C & D has one missing value
df.dropna() # in default, it make more sense to drop sample but feature
df.dropna(axis=1) # drop the feature
df.dropna(how='all') # drop row where all columns are NAN
df.dropna(thresh=4) # threshord = 4 == drop row that not have at least 4 non-NaN values
df.dropna(subset=['C']) # only drop row where NaN appear in specific columns 'C' for example
"""
Imputing missing value 'mean imputation' page 102
"""
# strategy could be midian, most_frequent(useful for categorical feature values), etc.
from sklearn.preprocessing import Imputer
imr = Imputer(missing_values='NaN', strategy='mean', axis=0) # axis = 1 == row's mean
imr = imr.fit(df)
impute_data = imr.transform(df.values)
impute_data
"""
Handeling categorical data page 104
"""
df = pd.DataFrame([['green','M',10.1,'class1'],['red','L',13.5,'class2'],['blue','XL',15.3,'class1']])
df.columns = ['color', 'size', 'price', 'classlabel']
#Mapping ordinal features
size_mapping = {'XL':3, 'L':2, 'M':1}
df['size'] = df['size'].map(size_mapping)
#Encoding class labels
import numpy as np
class_mapping = {label:idx for idx, label in enumerate(np.unique(df['classlabel']))}
df['classlabel'] = df['classlabel'].map(class_mapping)
inv_class_mapping = {v:k for k, v in class_mapping.items()}
df['classlabel'] = df['classlabel'].map(inv_class_mapping)
from sklearn.preprocessing import LabelEncoder
class_le = LabelEncoder()
y = class_le.fit_transform(df['classlabel'].values)
class_le.inverse_transform(y)
#One-hot encoding on nominal features
X = df[['color', 'size', 'price']].values
color_le = LabelEncoder()
X[:, 0] = color_le.fit_transform(X[:, 0])
#OneHot => Dummy
from sklearn.preprocessing import OneHotEncoder
ohe = OneHotEncoder(categorical_features=[0])
ohe.fit_transform(X).toarray()
#second way for OneHot same output
ohe = OneHotEncoder(categorical_features=[0], sparse=False)
ohe.fit_transform(X)
#Dummy only impact the string columns
pd.get_dummies(df[['price', 'color', 'size']])
"""
Partitioning a dataset in training and test sets page 108
"""
df_wine = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data', header=None)
df_wine.columns = ['Class label', 'Alcohol', 'Malic acid', 'Ash', 'Alcalinity of ash', 'Magnesium', 'Total phenols', 'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins', 'Color intensity', 'Hue', 'OD280 / OD315 of diluted wines', 'Proline']
print('Class label', np.unique(df_wine['Class label']))
df_wine.head()
from sklearn.cross_validation import train_test_split
X, y = df_wine.iloc[:, 1:].values, df_wine.iloc[:, 0].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
"""
Scaling the features normalization / standardization
"""
from sklearn.preprocessing import MinMaxScaler
mms = MinMaxScaler()
X_train_norm = mms.fit_transform(X_train)
X_test_norm = mms.fit_transform(X_test)
from sklearn.preprocessing import StandardScaler
stdsc = StandardScaler()
X_train_std = stdsc.fit_transform(X_train)
X_test_std = stdsc.transform(X_test)
"""
Sparse solution L1/L2
"""
from sklearn.linear_model import LogisticRegression
LogisticRegression(penalty='l1')
lr = LogisticRegression(penalty='l1', C=0.1)
lr.fit(X_train_std, y_train)
print('Training accuracy:', lr.score(X_train_std, y_train))
print('Test accuracy:', lr.score(X_test_std, y_test))
lr.intercept_ #the intercept in each class was fit by different models One-vs-Rest page 116
lr.coef_
"""
Weight coefficients of the different features for different regularization strengths
"""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = plt.subplot(111)
color = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'pink', 'lightgreen', 'lightblue', 'gray', 'indigo', 'orange']
weights, params = [], []
for c in np.arange(-4, 6):
lr = LogisticRegression(penalty='l1', C=10**c, random_state=0)
lr.fit(X_train_std, y_train)
weights.append(lr.coef_[1])
params.append(10**c)
weights = np.array(weights)
for column, color in zip(range(weights.shape[1]), color):
plt.plot(params, weights[:, column], label=df_wine.columns[column+1], color=color)
plt.axhline(0, color='black', linestyle='--', linewidth=3)
plt.xlim([10**(-5), 10**5])
plt.ylabel('weight coefficient')
plt.xlabel('C')
plt.xscale('log')
plt.legend(loc='upper left')
ax.legend(loc='upper center', bbox_to_anchor=(1.38, 1.03), ncol=1, fancybox=True)
plt.show()
"""
Sequential Backward Selection(SBS) page 119
"""
from sklearn.base import clone
from itertools import combinations
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.metrics import accuracy_score
class SBS():
def __init__(self, estimator, k_features, scoring=accuracy_score, test_size=0.25, random_state=1):
self.scoring = scoring
self.estimator = estimator
self.k_features = k_features
self.test_size = test_size
self.random_state = random_state
def fit(self, X, y):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=self.test_size, random_state=self.random_state)
dim = X_train.shape[1]
self.indices_ = tuple(range(dim))
self.subsets_ = [self.indices_]
score = self._calc_score(X_train, y_train, X_test, y_test, self.indices_)
self.scores_ = [score]
while dim > self.k_features:
scores = []
subsets = []
for p in combinations(self.indices_, r=dim-1):
score = self._calc_score(X_train, y_train, X_test, y_test, p)
scores.append(score)
subsets.append(p)
best = np.argmax(scores)
self.indices_ = subsets[best]
self.subsets_.append(self.indices_)
dim -= 1
self.scores_.append(scores[best])
self.k_score_ = self.scores_[-1]
return self
def transform(self, X):
return X[:, self.indices_]
def _calc_score(self, X_train, y_train, X_test, y_test, indices):
self.estimator.fit(X_train[:, indices], y_train)
y_pred = self.estimator.predict(X_test[:, indices])
score = self.scoring(y_test, y_pred)
return score
#SBS implementation page 121
from sklearn.neighbors import KNeighborsClassifier
#import matplotlib.pyplot as plt
knn = KNeighborsClassifier(n_neighbors=2)
sbs = SBS(knn, k_features=1)
sbs.fit(X_train_std, y_train)
k_feat = [len(k) for k in sbs.subsets_]
plt.plot(k_feat, sbs.scores_, marker='o')
plt.ylim([0.7, 1.1])
plt.ylabel('Accuracy')
plt.xlabel('Number of features')
plt.grid()
plt.show()
#what features give the good performance // 5-features subset from the 9th position
k5 = list(sbs.subsets_[8])
print(df_wine.columns[1:][k5])
#evaluate the performance of the KNN on the test set
knn.fit(X_train_std, y_train)
print('Training accuracy:', knn.score(X_train_std, y_train))
print('Test accuracy:', knn.score(X_test_std, y_test))
#selected 5-feature subset
knn.fit(X_train_std[:, k5], y_train)
print('Training accuracy:', knn.score(X_train_std[:, k5], y_train))
print('Test accuracy:', knn.score(X_test_std[:, k5], y_test))
"""
Assessing feature importance with random forests, columns distribution
"""
from sklearn.ensemble import RandomForestClassifier
feat_labels = df_wine.columns[1:]
forest = RandomForestClassifier(n_estimators=10000, random_state=0, n_jobs=-1)
forest.fit(X_train, y_train)
importances = forest.feature_importances_
indices = np.argsort(importances)[::-1]
for f in range(X_train.shape[1]):
print("%2d %-*s %f" % (f + 1, 30, feat_labels[indices[f]], importances[indices[f]]))
plt.title('Feature Importances')
plt.bar(range(X_train.shape[1]), importances[indices], color='lightblue', align='center')
plt.xticks(range(X_train.shape[1]), feat_labels[indices], rotation=90)
plt.xlim([-1, X_train.shape[1]])
plt.tight_layout()
plt.show()
| true |
150c1eafec1e1659846bd8304fa0899c2d6d501b | Python | MichaelKim0407/selfhacked-util | /selfhacked/common/sql/middlewares.py | UTF-8 | 1,554 | 2.5625 | 3 | [] | no_license | import logging
from django.db import connection
from selfhacked.util.func import timed
db_logger = logging.getLogger('django.db.debugging')
class SqlQueryCountMiddleWare(object):
class Cursor(object):
TIMED_METHOD = ['callproc', 'execute', 'executemany']
def __init__(self, cursor, queries: list):
self.cursor = cursor
self.queries = queries
def timed(self, method):
return timed(self.queries.append)(method)
def __getattr__(self, item):
if item in self.TIMED_METHOD:
method = self.timed(getattr(self.cursor, item))
setattr(self, item, method)
return method
return getattr(self.cursor, item)
def __iter__(self):
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return self.close()
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
cursor = connection.cursor
queries = []
def new_cursor(*args, **kwargs):
return self.Cursor(cursor(*args, **kwargs), queries)
connection.cursor = new_cursor
try:
return self.get_response(request)
finally:
if queries:
db_logger.debug(
f"\"{request.method} {request.path}\" "
f"{len(queries)} queries took {sum(queries)} seconds"
)
| true |
3419b31bea6abd76c31ce36715b13069a46ff8f5 | Python | rhyun9584/BOJ | /python/2493.py | UTF-8 | 362 | 2.71875 | 3 | [] | no_license | import sys
input = sys.stdin.readline
N = int(input())
tops = list(map(int, input().split()))
stack = []
result = []
for i in range(N):
while stack and stack[-1][0] < tops[i]:
stack.pop()
if stack == []:
result.append(0)
else:
result.append(stack[-1][1])
stack.append((tops[i], i+1))
print(" ".join(map(str, result))) | true |
7065cfd5026bc0384bd2a2f561b89c5581a4ca5a | Python | jaggerwang/jw-pylib | /pylib/form/validator.py | UTF-8 | 2,303 | 2.828125 | 3 | [
"MIT"
] | permissive | from wtforms import ValidationError
from wtforms.validators import Regexp
from ..string import display_width
def DisplayWidth(min_width=None, max_width=None, length_counter=display_width):
def _validate(form, field):
if field.data is None:
return
width = length_counter(field.data)
if min_width is not None and width < min_width:
raise ValidationError(
"too short: {} < {}".format(width, min_width))
if max_width is not None and width > max_width:
raise ValidationError("too long: {} > {}".format(width, max_width))
return _validate
def TagLength(max_number=None, min_len=None, max_len=None, anyof=None):
def _validate(form, field):
if field.data is None:
return
number = len(field.data)
if max_number is not None and number > max_number:
raise ValidationError(
"too many tags: {} > {}".format(number, max_number))
for v in field.data:
width = len(v)
if min_len is not None and width < min_len:
raise ValidationError("tag '{}' too short: {} < {}"
.format(v, width, min_len))
if max_len is not None and width > max_len:
raise ValidationError("tag '{}' too long: {} > {}"
.format(v, width, max_len))
if anyof is not None and v not in anyof:
raise ValidationError("tag '{}' not in {}"
.format(v, anyof))
return _validate
class Version(Regexp):
def __init__(self, message=None):
super().__init__(r'^\d{1,2}(?:\.\d{1,2})?(?:\.\d{1,2})?$', 0, message)
def __call__(self, form, field):
message = self.message
if message is None:
message = field.gettext('Invalid version.')
super().__call__(form, field, message)
class Section3Version(Regexp):
def __init__(self, message=None):
super().__init__(r'^\d{1,2}\.\d{1,2}\.\d{1,2}$', 0, message)
def __call__(self, form, field):
message = self.message
if message is None:
message = field.gettext('Invalid version.')
super().__call__(form, field, message)
| true |
16e20cc30ea7c62312db3a90ca2c3321beb89c42 | Python | Jerllina/NILM_TEST | /REDD_LoadClassification/LoadClassification_kNNTest.py | UTF-8 | 1,612 | 2.96875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 13:10:16 2019
@author: Jelina
"""
import pandas as pd
import matplotlib.pyplot as plt
#load data
load_information=pd.read_csv('REDD_demo_load_information.csv')
# manually specify column names
load_information.columns = ['0','P', 'Load']
load_information.drop(['0'], axis=1, inplace=True)
#split testset and trainingset
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(load_information.loc[:,load_information.columns!='Load'],
load_information['Load'],stratify=load_information['Load'],random_state=66)
###k-NN test
#1.train the model
from sklearn.neighbors import KNeighborsClassifier
train_accuracy_1=[]
test_accuracy_1=[]
neighbors_settings=range(1,10)
y_predict=[]
error=[]
for n_neighbors in neighbors_settings:
knn=KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X_train,y_train)
y_predict.append( knn.predict(X_test))
train_accuracy_1.append(knn.score(X_train,y_train))
test_accuracy_1.append(knn.score(X_test,y_test))
#2.visualize the results
plt.plot(neighbors_settings,train_accuracy_1,label='train accuracy')
plt.plot(neighbors_settings,test_accuracy_1,label='test accuracy')
plt.ylabel('Accuracy')
plt.xlabel('n_neighbors')
plt.legend()
plt.savefig('knn_test_model')
print('Accuracy of k-NN classifier on training set:{:.2f}'.format(knn.score(X_train,y_train)))
print('Accuracy of k-NN classifier on test set:{:.2f}'.format(knn.score(X_test,y_test)))
#print(y_predict) | true |
d9c2efe98f9de6465b078bac59684a2379e33f95 | Python | goodnewsj62/python-for-everyone-exercise | /Exercises/exercise_10.py | UTF-8 | 1,882 | 3.59375 | 4 | [] | no_license | # 1.
message_count = dict()
with open(r'./files/mbox-short.txt', 'rt') as file:
for line in file:
if line.startswith("From"):
line = line.strip().split()
if len(line) > 3:
message_count[line[1]] = message_count.get(line[1], 0) + 1
list_ = list()
for key,value in message_count.items():
list_.append((value,key))
list_.sort()
print("The maximum is: ", list_[-1][1], list_[-1][0])
# 2.
hour_count = dict()
with open(r'./files/mbox-short.txt', 'rt') as file:
for line in file:
if line.startswith("From"):
line = line.strip().split()
if len(line) > 3:
index = line[-2].find(':')
key = line[-2][ : index]
hour_count[key] = hour_count.get(key, 0) + 1
hour_list = list()
for key,value in hour_count.items():
hour_list.append((value, key))
hour_list.sort()
for each in hour_list:
print(each[1]," ",each[0])
# 3.
import string
letter_count = dict()
with open(r'./files/mbox-short.txt', 'rt') as file:
for line in file:
#removes punctuations special characters and numbers
line = line.translate(str.maketrans('','', string.punctuation))
line = line.translate(str.maketrans('','','1234567890@#$%^&*()_-+=|][{}\/>< '))
line = line.lower()
line = line.strip("\n").split()
for letters in line:
letters = letters.split()
str_ = ''.join(letters)
for letter in range(len(str_)):
letter_count[str_[letter]] = letter_count.get(str_[letter], 0) + 1
list_letter = []
for key,value in letter_count.items():
list_letter.append((value,key))
list_letter.sort()
print('\n\nletters and how many times they appear\n\n')
for n in range(len(list_letter)):
print(list_letter[n][1],list_letter[n][0]) | true |
0e22a8be5fecf4fa7b1989521c471ace8b53016c | Python | johnbukaixin/python-demo | /database1/Connect.py | UTF-8 | 654 | 2.6875 | 3 | [] | no_license | import pymysql
def con():
# 1. 创建数据库连接对象
con = pymysql.connect(host='localhost', port=3306,
database='hrs', charset='utf8',
user='root', password='123456')
return con
def con_by_param(host, port, user, password, database, charset):
if database is None or database == '':
database = 'hrs'
if charset is None or charset == '':
charset = 'utf8'
connect = pymysql.connect(host=host, port=port,
database=database, charset=charset,
user=user, password=password)
return connect
| true |
609ccd57cedf754fa7b57bbc544bd41728c8d70f | Python | DeanHe/Practice | /LeetCodePython/SplitArrayLargestSum.py | UTF-8 | 1,442 | 3.96875 | 4 | [] | no_license | """
Given an array nums which consists of non-negative integers and an integer m, you can split the array into m non-empty continuous subarrays.
Write an algorithm to minimize the largest sum among these m subarrays.
Example 1:
Input: nums = [7,2,5,10,8], m = 2
Output: 18
Explanation:
There are four ways to split nums into two subarrays.
The best way is to split it into [7,2,5] and [10,8],
where the largest sum among the two subarrays is only 18.
Example 2:
Input: nums = [1,2,3,4,5], m = 2
Output: 9
Example 3:
Input: nums = [1,4,4], m = 3
Output: 4
Constraints:
1 <= nums.length <= 1000
0 <= nums[i] <= 106
1 <= m <= min(50, nums.length)
"""
class SplitArrayLargestSum(object):
def countValidGroups(self, nums, most):
cur_sum, cnts = 0, 1
for n in nums:
cur_sum += n
if cur_sum > most:
cnts += 1
cur_sum = n
return cnts
def splitArray(self, nums, m):
"""
:type nums: List[int]
:type m: int
:rtype: int
"""
s, e, = max(nums), sum(nums)
while s + 1 < e:
mid = s + (e - s) // 2
if self.countValidGroups(nums, mid) <= m:
e = mid
else:
s = mid
if self.countValidGroups(nums, s) == m:
return s
elif self.countValidGroups(nums, e) == m:
return e
else:
return s | true |
d5830512c69a4acde1c60409d2479a981023e72d | Python | mhvis/pretix | /src/tests/base/test_urls.py | UTF-8 | 1,279 | 2.59375 | 3 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | from importlib import import_module
from django.conf import settings
from django.test import TestCase
class URLTestCase(TestCase):
"""
This test case tests for a name string on all URLs. Unnamed
URLs will cause a TypeError in the metrics middleware.
"""
pattern_attrs = ['urlpatterns', 'url_patterns']
def test_url_names(self):
urlconf = import_module(settings.ROOT_URLCONF)
nameless = self.find_nameless_urls(urlconf)
message = "URL regexes missing names: %s" % " ".join([n.regex.pattern for n in nameless])
self.assertIs(len(nameless), 0, message)
def find_nameless_urls(self, conf):
nameless = []
patterns = self.get_patterns(conf)
for u in patterns:
if self.has_patterns(u):
nameless.extend(self.find_nameless_urls(u))
else:
if u.name is None:
nameless.append(u)
return nameless
def get_patterns(self, conf):
for pa in self.pattern_attrs:
if hasattr(conf, pa):
return getattr(conf, pa)
return []
def has_patterns(self, conf):
for pa in self.pattern_attrs:
if hasattr(conf, pa):
return True
return False
| true |
3bc69f4602b2ede5815c7eb0f49e6e9af4a1bdbc | Python | Jananicolodi/Python_ReactNative | /Python_Base/Python_Base/aula_6/trabalho_2.py | UTF-8 | 2,357 | 3.25 | 3 | [] | no_license | # 2. Depois de escolhido o site, realize 10 testes ou mais dentro desse site
# utilizando a linguagem Python juntamente com o Selenium e o Unittest. Os
# seguintes elementos web devem ser testados ao longo dos 10 testes,
# devendo-se utilizar xpath ao menos em 4 desses para busca dos elementos.
# ● Imagens: <img />
# ● Divs: <div> </div>
# ● a: <a />
# ● inputs: <input />
# 3. Além disso, também devem ser utilizadas ao longo dos 10 testes, as
# seguintes funções do unittest:
# ● assertEqual ou assertNotEqual
# ● assertTrue ou assertFalse
# ● assertIsNone ou assertIsNotNone
# ● assertIn ou assertNotIn
# Os requisitos acima cumprem 80% da nota do trabalho, os outros 20% serão dados
# aos trabalhos que incluíram mais elementos web nas buscas do que os
# apresentados no item 2 deste trabalho, e que também incluírem mais tipos de testes
# do que os presentes no item 3 deste trabalho.
# Bom trabalho, e qualquer dúvida estou a disposição
from selenium import webdriver
import unittest
class TestesAsserts(unittest.TestCase):
@classmethod
def setUp(inst):
inst.driver = webdriver.Edge()
inst.driver.maximize_window()
inst.driver.get("https://www.iffarroupilha.edu.br/")
def test_search_field_name(self):
search_field = self.driver.find_element_by_xpath("//input[@type='text']")
search_field_name = search_field.get_attribute("name")
self.assertEqual(search_field_name,'q')
# def test_country(self):
# country = self.driver.find_element_by_xpath("//span[@class='Q8LRLc']")
# self.assertNotEqual(country.text,'Inglaterra')
# def test_search_field_length(self):
# search_field = self.driver.find_element_by_xpath("//input[@type='text']")
# search_field_max_length = int( search_field.get_attribute("maxlength"))
# self.assertTrue(search_field_max_length > 1000 )
# def test_attribute(self):
# btn = self.driver.find_element_by_class_name('gNO89b')
# type_btn = btn.get_attribute("typ")
# self.assertIsNone(type_btn)
# def test_btn_inside_listbtn(self):
# inputs = self.driver.find_element_by_class_name("input")
# btn_search = self.driver.find_element_by_name("btnk") [1]
# self.assertIn(btn_search,inputs)
if __name__ == '__main__':
unittest.main()
| true |
eb51203148b276dc6d5c28b7b4917f138432385c | Python | NurKevser/python-assignments | /count_letter.py | UTF-8 | 144 | 2.765625 | 3 | [] | no_license | def count_letter(sentence):
dicto = {}
for i in sentence:
dicto[i]=sentence.count(i)
return dicto
count_letter('hippo runs to us !')
| true |
ffb5bb1288c449646103f389303b4a33779caf92 | Python | husenzhang/reinvent_the_wheel | /fix_392_plates.py | UTF-8 | 922 | 2.90625 | 3 | [] | no_license | #!/usr/bin/env python
import csv
from collections import defaultdict
import sys
"""reverse rows labels on a 392 plate
each A to P. Pseudocodes"""
def process(rows):
labels, values = list(zip(*rows))
rev_labels = reversed(labels)
return list(zip(rev_labels, values))
if __name__ == '__main__':
filin = sys.argv[1]
fileout = sys.argv[2]
with open(filin, 'rt') as fr:
csr = csv.reader(fr)
rows = [row for row in csr]
start_with = {row[0][0] for row in rows}
dd = defaultdict(list)
for row in rows:
for sw in start_with:
if row[0].startswith(sw):
dd[sw].append(row)
new_rows = [process(dd[k]) for k in dd]
new_rows = [row for sublist in new_rows for row in sublist]
with open(fileout, 'wt') as fw:
csw = csv.writer(fw)
csw.writerows(sorted(new_rows, key=lambda x: x[0]))
| true |
fa1ca53e7e791c33bfa2d82ea0f9a4d163c7a1f3 | Python | ilyankou/gcb-visualizations | /ilyas experiments/mds/mds.py | UTF-8 | 992 | 2.703125 | 3 | [] | no_license | import csv
from numpy import genfromtxt
from collections import OrderedDict
from sklearn import manifold
from sklearn.metrics import euclidean_distances
# Choose columns to be used in PCA:
COLUMNS = OrderedDict([
('total', 4),
('given', 5),
('yellow', 6),
('purple', 7),
('orange', 8),
('blue', 9),
('green', 10),
('abstractions', 11),
('ifs', 12),
('ifelse', 13),
('loops', 14),
('lists', 15),
('proc', 16),
('proc_params', 17),
('variables', 18)
])
data = genfromtxt('pca/2017.csv',
dtype=None,
delimiter=',',
skip_header=1,
usecols=COLUMNS.values()
)
similarities = euclidean_distances(data)
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9,
random_state=3, dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
for i in range(0,len(pos)):
print("%f , %f" % (pos[i][0], pos[i][1]))
| true |
db49417e14db7d18cacbd1941e0576cdecb47e05 | Python | mlaizure/holbertonschool-higher_level_programming | /0x0C-python-almost_a_circle/models/rectangle.py | UTF-8 | 3,444 | 3.78125 | 4 | [] | no_license | #!/usr/bin/python3
"""Module with Rectangle class that inerits from Base class"""
from models.base import Base
class Rectangle(Base):
"""Private attributes width, height, x, and y, can calculate area,
stringify, update, dictionarify, and display itself"""
def __init__(self, width, height, x=0, y=0, id=None):
"""inititalizes class attributes"""
self.width = width
self.height = height
self.x = x
self.y = y
Base.__init__(self, id)
@property
def width(self):
"""retrieves width of rectangle"""
return self.__width
@width.setter
def width(self, value):
"""sets width of rectangle"""
if type(value) is not int:
raise TypeError("width must be an integer")
elif value <= 0:
raise ValueError("width must be > 0")
else:
self.__width = value
@property
def height(self):
"""retrieves height of rectangle"""
return self.__height
@height.setter
def height(self, value):
"""sets height of rectangle"""
if type(value) is not int:
raise TypeError("height must be an integer")
elif value <= 0:
raise ValueError("height must be > 0")
else:
self.__height = value
@property
def x(self):
"""retrieves x positional coordinate of rectangle"""
return self.__x
@x.setter
def x(self, value):
"""sets x positional coordinate of rectangle"""
if type(value) is not int:
raise TypeError("x must be an integer")
elif value < 0:
raise ValueError("x must be >= 0")
else:
self.__x = value
@property
def y(self):
"""retrieves y positional coordinate of rectangle"""
return self.__y
@y.setter
def y(self, value):
"""sets y positional coordinate of rectangle"""
if type(value) is not int:
raise TypeError("y must be an integer")
elif value < 0:
raise ValueError("y must be >= 0")
else:
self.__y = value
def area(self):
"""returns the area value of the Rectangle instance"""
return self.__width * self.__height
def display(self):
"""prints in stdout the Rectangle instance with the char #"""
strg = ""
for i in range(self.__y):
strg += "\n"
for i in range(self.__height):
strg += "{}{}\n".format(" " * self.__x, "#" * self.__width)
print(strg, end="")
def __str__(self):
"""returns a human readable string for pretty printing"""
strg = "[Rectangle] ({})".format(self.id)
strg += " {}/{}".format(self.__x, self.__y)
strg += " - {}/{}".format(self.__width, self.__height)
return strg
def update(self, *args, **kwargs):
"""assigns an argument to each attribute"""
if args:
attr_list = ["id", "width", "height", "x", "y"]
for attr, value in zip(attr_list, args):
setattr(self, attr, value)
elif kwargs:
for key, value in kwargs.items():
setattr(self, key, value)
def to_dictionary(self):
"""returns dictionary representation of a Rectangle"""
return {'id': self.id, 'width': self.__width, 'height': self.__height,
'x': self.__x, 'y': self.__y}
| true |
0c0cd9f443e21f8414cf635415cc576ddf141305 | Python | colci/python | /domashka7/domashka3.py | UTF-8 | 1,354 | 4.25 | 4 | [] | no_license | class Cell:
def __init__(self,cell):
self.cell = int(cell)
def __add__(self, other):
return self.cell + other.cell
def __sub__(self, other):
if (self.cell > other.cell):
return self.cell - other.cell
else:
return "Ошибка! Разность количества ячеек двух клеток не больше нуля"
def __str__(self):
return f"{round(self.cell)}"
def __mul__(self, other):
return self.cell * other.cell
def __truediv__(self, other):
if other.cell > 0:
return round(self.cell / other.cell)
else:
return "Ошибка! Ячейки клетки деляться на ноль клеток"
def make_order(self):
result = ""
for i in range(self.cell):
if (i % 5 == 0 and i != 0):
result = result + "\n"
result = result + "@"
return result
c = Cell(12)
c2 = Cell(7)
print(f"Первая клетка имеет {c} клеток")
print(f"Вторая клетка имеет {c2} клеток")
print(f"Объединение двух клеток {c + c2}")
print(f"Разность двух клеток {c - c2}")
print(f"Результат деления двух клеток {c / c2}")
print(c.make_order())
| true |
c676a6f61b9555b654fa40c6a372123d1aa13b55 | Python | mamemilk/acrc | /プログラミングコンテストチャレンジブック_秋葉,他/src/2-2-1_02_aoj__TLE__.py | UTF-8 | 842 | 2.984375 | 3 | [] | no_license | # https://judge.u-aizu.ac.jp/onlinejudge/description.jsp?id=DPL_1_A&lang=jp
#
# 以下のように分割すると時間超過.
# 1 :
# 2 :
# 3 : 1 + 2, 3
# 4 : 1 + 3, 2 + 2, 4
# 5 : 1 + 4, 2 + 3, 5
# ....
#
#
# 二項に分けた時の初項はコインの最大値まででOKとしてもだめ.
val, m = map(int, input().split())
coins = list(map(int, input().split()))
max_coin = max(coins)
candid = [0] * (val + 1)
for n in range(1, val+1):
if n == 1:
candid[n] = 1
else:
if n in coins:
candid[n] = 1
continue
pre = float('inf')
# for (l,r) in [(i, n-i) for i in range(1, n//2+1)]:
for (l,r) in [(i, n-i) for i in range(1, min(n//2+1, max_coin+1))]:
pre = min(candid[l]+candid[r], pre)
candid[n] = pre
print(candid[val]) | true |
d9cbf1961b6053d94c48bbb81ffbd906cdf039b4 | Python | mrzacarias/MoonBunny | /parse.py | UTF-8 | 2,530 | 2.671875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
keywords = ["MUSIC_FILE", "TITLE", "BPM", "DIFFICULTIES", "ARTIST"]
class InvalidKeyword(Exception):
pass
LEVEL_DIR = "./levels"
def level_list():
for f in os.listdir(LEVEL_DIR): f
level_list = [f for f in os.listdir(LEVEL_DIR) if os.path.isdir(os.path.join(LEVEL_DIR, f)) and os.path.exists(os.path.join(LEVEL_DIR, f, 'header.lvl'))]
return level_list
def level_header(name):
level_file = open(os.path.join(LEVEL_DIR, name, 'header.lvl'))
try:
level_info = {}
for i, line in enumerate(level_file):
key, value = line.split("=")
value = value.strip()
if key not in keywords:
raise InvalidKeyword("Invalid keyword '%s' found when parsing level %s at line %d" % (key, name, i))
if key == "BPM":
try:
value = float(value)
except ValueError:
raise ValueError("Error parsing line %d from file '%s': could not convert (%s) to float" % (i, level_file, value))
if key == "MUSIC_FILE":
value = os.path.join(LEVEL_DIR, name, value)
level_info[key]=value
level_info["NAME"] = os.path.split(name)[1]
return level_info
finally:
level_file.close()
def level_rings(levelname, diff):
ring_list = []
time_ant = 0.0
ring_file = "%s.rng" % (diff)
level_file = open(os.path.join(LEVEL_DIR, levelname, ring_file))
try:
for i, line in enumerate(level_file):
if line.strip() and not line.startswith('#'):
pos_str, time_str, button = line.split(";")
x, y = pos_str.split(",")
try:
f_x, f_y = float(x), float(y)
except ValueError:
raise ValueError("Error parsing line %d from file '%s': could not convert (%s, %s) to float tuple" % (i, level_addr, x, y))
try:
time_ant += float(time_str)
except ValueError:
raise ValueError("Error parsing line %d from file '%s': could not convert (%s) to float" % (i, level_addr, time_str))
ring_list.append(((f_x, f_y), time_ant, button.strip()))
return ring_list
finally:
level_file.close()
#print level_list()
| true |
4d60b80a33ac63f458516210bf0c21e4a01c5c2d | Python | ultraman-agul/python_demos | /函数/w8_4.py | UTF-8 | 986 | 3.734375 | 4 | [] | no_license | # -*- coding: utf-8 -*-
# @Author : agul
# @Date : 2020/11/11 10:53
# Software : PyCharm
# version: Python 3.7
# @File : w8_4.py
# description :编写与字符串对象的find方法功能相似的函数find(srcString, substring, start, end),
# 作用是在srcString串的下标start到下标end之间的片段中寻找subString串的所有出现。
# 如果有多处出现,各下标位置用西文逗号','隔开。如果一次都没有出现,则输出"none"。
str1 = input().split(" ")
srcString = str1[0]
subString = str1[1]
start = int(str1[2])
end = int(str1[3])
def f1(srcString, subString, start, end):
srcString = srcString[start:end]
if subString not in srcString:
print("none")
else:
lst = []
for i in range(len(srcString)):
if srcString[i:i+len(subString)] == subString:
lst.append(i)
print(",".join(str(i) for i in lst))
f1(srcString, subString, start, end)
| true |
bc99a7599f485e168bce114e6d38776df5cd09b2 | Python | SalmaMeniawy/Dusty-phillips-Python3-OOP | /case_study_4/interface.py | UTF-8 | 2,076 | 3.375 | 3 | [] | no_license | import auth
class Editor:
def __init__(self):
self.username = None
self.menu_map = {
"login": self.login ,
"test":self.test ,
"change":self.change ,
"quite" : self.quite
}
def login(self):
logged_in = False
while not logged_in:
username = input("username : ")
password = input("password : ")
try :
auth.authenticator.login(username ,password)
except auth.InvalidUsername:
print ("Sorry user name does not exist ")
except auth.InvalidPassword:
print ("sorry password is wrong")
else:
self.username = username
def is_permitted(self,permission):
try:
auth.authorizor.check_permission(permission ,self.username)
except auth.NotLoggedInError as e :
print ("{} is not logged in ".format(self.username) )
return False
except auth.NotPermittedError as e :
print ("{} can not {} ".format(self.username , permission))
return False
else:
return True
def test(self):
if self.is_permitted("test program"):
print ("testing program now")
def change(self):
if self.is_permitted("change program"):
print ("Changing program now")
def quite(self):
raise SystemExit()
def menu(self):
answer = " "
while True:
print("""
Please enter a command:
\tlogin\tLogin
\ttest\tTest the program
\tchange\tChange the program
\tquit\tquite
""")
answer = input("Enter command ").lower()
try:
func = self.menu_map[answer]
except KeyError :
print ("{} is not valied option ".format(answer))
else:
func()
finally:
print("thank you for testing the auth module")
Editor().menu() | true |
12a5ca46deb0b0c16b8ede2fadda244e8a090898 | Python | labist/plottr | /test/scripts/h5py_concurrent_rw_swmr.py | UTF-8 | 3,216 | 2.984375 | 3 | [
"MIT"
] | permissive | """This is a test script for swmr data write/read.
While this complies with the HDF5 instructions, it causes issues on some Windows machines.
Also, it does seem to cause issues with network drives (this is documented by HDF5).
"""
from multiprocessing import Process
import time
from datetime import datetime
from pathlib import Path
import h5py
import numpy as np
# which path to run this on.
filepath = Path(r'Z:\swmr-testing\testdata.h5')
filepath = Path('./testdata.h5')
def mkdata(start, nrows, npts=1):
numbers = np.arange(start, start+nrows).reshape(nrows, -1) * np.ones(npts) # broadcasts to (nrows, npts)
return numbers
def info(sender, msg):
print(f'{datetime.now()} : {sender} : {msg}')
class Writer(Process):
ncols = 3
nrows_per_rep = 1000
nreps = 100
delay = 0.01
def __init__(self):
super().__init__()
def run(self):
filepath.unlink(missing_ok=True)
arr = mkdata(0, self.nrows_per_rep, self.ncols)
info('writer', 'starting to write data')
with h5py.File(str(filepath), 'a', libver='latest') as f:
g = f.create_group('my_group')
ds = g.create_dataset('my_dataset', maxshape=(None, self.ncols), data=arr)
f.swmr_mode = True
for i in range(self.nreps):
shp = list(ds.shape)
arr = mkdata((i+1)*self.nrows_per_rep, self.nrows_per_rep, self.ncols)
shp[0] += arr.shape[0]
info('writer', f"Resizing to {tuple(shp)}")
ds.resize(tuple(shp))
info('writer', f"Adding data")
ds[-arr.shape[0]:, ...] = arr
ds.flush()
info('writer', f"...Flushed")
time.sleep(self.delay)
class Reader(Process):
delay = 0.001
maxruntime = None
close_always = True
def run(self):
t0 = time.time()
info('reader', 'starting to read data')
if not self.close_always:
f = h5py.File(str(filepath), 'r', libver='latest', swmr=True)
assert f.swmr_mode
while True:
if self.close_always:
with h5py.File(str(filepath), 'r', libver='latest', swmr=True) as f:
assert f.swmr_mode
ds = f['my_group/my_dataset']
ds.refresh()
info('reader', f'shape {ds.shape}')
else:
ds = f['my_group/my_dataset']
ds.refresh()
info('reader', f'shape {ds.shape}')
if self.delay is not None:
time.sleep(self.delay)
if self.maxruntime is not None and time.time() - t0 > self.maxruntime:
break
if not self.close_always:
f.close()
if __name__ == '__main__':
writer = Writer()
reader = Reader()
reader.maxruntime = None
reader.delay = 0.01
reader.close_always = True
writer.start()
time.sleep(0.5)
reader.start()
writer.join()
reader.kill()
with h5py.File(filepath, 'r', libver='latest', swmr=True) as f:
ds = f['my_group/my_dataset']
info('main', f'Retrieved shape {ds.shape}')
| true |
cdf3502f342390e6cd084c0df66f6f7f10160ec4 | Python | AlexMeinke/certified-certain-uncertainty | /utils/adversarial.py | UTF-8 | 10,271 | 2.875 | 3 | [] | no_license | import torch
import torch.nn.functional as F
import torch.utils.data as data_utils
def gen_adv_noise(model, device, seed, epsilon=0.1, restarts=1, perturb=False,
steps=40, step_size=0.01, norm='inf'):
'''
Runs an adversarial noise attack in l_inf norm
Maximizes the confidence in some class (different from adversarial
attack which maximizes confidence in some wrong class)
'''
model.eval()
batch_size = seed.shape[0]
orig_data = torch.tensor(seed.detach().cpu().numpy()).to(device)
if restarts>1:
data = seed.clone()
losses = 1e5*torch.ones(batch_size, device=device)
for _ in range(restarts):
current_data, current_losses = gen_adv_noise(model, device, seed, epsilon=epsilon,
restarts=1, perturb=True, steps=steps)
with torch.no_grad():
index = losses > current_losses
data[index] = current_data[index]
losses[index] = current_losses[index]
return data, losses
else:
with torch.no_grad():
alpha = step_size * torch.ones(batch_size,1,1,1, device=device)
orig_data = seed.to(device)
prev_data = seed.to(device)
data = seed.to(device).requires_grad_()
if perturb:
perturbation = epsilon * (torch.rand_like(prev_data) - .5)
prev_data += perturbation
data += perturbation
prev_losses = 1e5 * torch.ones(batch_size, device=device)
prev_grad = torch.zeros_like(seed, device=device)
for _ in range(steps):
with torch.enable_grad():
y = model(data)
losses = -y.max(1)[0]
#losses = y.sum(1)
grad = -torch.autograd.grad(losses.sum(), data)[0]
with torch.no_grad():
if norm=='inf':
grad = grad.sign()
else:
grad = grad / grad.norm(p=norm)
regret_index = losses > prev_losses
alpha[regret_index] /= 2.
alpha[~regret_index] *= 1.1
data[regret_index] = prev_data[regret_index]
grad[regret_index] = prev_grad[regret_index]
prev_losses = losses
prev_data = data
prev_grad = grad
data += alpha * grad
delta = data - orig_data
if norm=='inf':
delta = torch.clamp(delta, -epsilon, epsilon)
else:
N = delta.norm(dim=-1, p=norm)
index = N > epsilon
delta[index] *= (epsilon / N[index])[:, None]
data = torch.clamp(orig_data + delta, 0, 1).requires_grad_()
with torch.no_grad():
y = model(data)
losses = -y.max(1)[0]
orig_losses = -model(orig_data).max(1)[0]
index = orig_losses < losses
data[index] = orig_data[index]
losses[index] = losses[index]
return torch.clamp(data, 0, 1), losses
def gen_pca_noise(model, device, seed, pca, epsilon, restarts=1, perturb=False, steps=40, alpha=0.01):
'''
Runs an adversarial noise attack in Mahalanobis space
takes a models.MyPCA object to get the orientation of the ellipsoid
Since simultaneous projection onto a rotated ellipsoid and the [0,1]^D box
is challenging, I instead rotate the coordinate system back and forth and
project once per gradient step.
This doesn't guarantee a solution in the intersection so in the end I do
10 more alternating projection steps.
'''
model.eval()
batch_size = seed.shape[0]
orig_data = seed.clone()
if restarts>1:
data = seed.clone()
losses = 1e5*torch.ones(batch_size, device=device)
for _ in range(restarts):
current_data, current_losses = gen_pca_noise(model, device, seed, pca, epsilon,
restarts=1, perturb=True, steps=steps, alpha=alpha)
with torch.no_grad():
index = losses > current_losses
data[index] = current_data[index]
losses[index] = current_losses[index]
return data, losses
else:
with torch.no_grad():
alpha = alpha * torch.ones(batch_size,1, device=device)
orig_data_pca = pca.trans(seed.clone())
prev_data_pca = pca.trans(seed.clone()).to(device)
data_pca = pca.trans(seed.clone()).requires_grad_()
if perturb:
perturbation = epsilon[:,None]*(torch.rand_like(prev_data_pca) - .5)
prev_data_pca += perturbation
data_pca += perturbation
prev_losses = 1e5*torch.ones(batch_size, device=device)
prev_grad = torch.zeros_like(data_pca, device=device)
for _ in range(steps):
with torch.enable_grad():
y = model(pca.inv_trans(data_pca))
# losses = y[correct_index].view(batch_size, 9).max(1)[0]
losses = -y.max(1)[0]
grad = -torch.autograd.grad (losses.sum(), data_pca)[0]
with torch.no_grad():
regret_index = losses > prev_losses
alpha[regret_index] /= 2.
alpha[~regret_index] *= 1.1
data_pca[regret_index] = prev_data_pca[regret_index]
grad[regret_index] = prev_grad[regret_index]
prev_losses=losses
prev_data_pca = data_pca
prev_grad = grad
data_pca += alpha*grad
delta = data_pca - orig_data_pca
N = delta.norm(dim=-1)
index = N > epsilon
delta[index] *= (epsilon[index] / N[index])[:, None]
data_pca = orig_data_pca + delta
data = pca.inv_trans(data_pca)
data = torch.clamp(data, 0, 1)
data_pca = pca.trans(data).requires_grad_()
for _ in range(10):
with torch.no_grad():
delta = data_pca - orig_data_pca
N = delta.norm(dim=-1)
index = N > epsilon
delta[index] *= (epsilon[index] / N[index])[:, None]
data_pca = orig_data_pca + delta
data = pca.inv_trans(data_pca)
data = torch.clamp(data, 0, 1).detach()
data_pca = pca.trans(data)
with torch.no_grad():
y = model(data)
losses = -y.max(1)[0]
orig_losses = -model(orig_data).max(1)[0]
index = orig_losses < losses
data[index] = orig_data[index]
losses[index] = losses[index]
return data, losses
def gen_adv_sample(model, device, seed, label, epsilon=0.1, steps=40, step_size=0.001):
'''
Runs adversarial attack in l_inf norm
not used for the results in https://arxiv.org/abs/1909.12180
'''
model.eval()
correct_index = label[:,None]!=torch.arange(10)[None,:]
with torch.no_grad():
batch_size = seed.shape[0]
alpha = step_size * torch.ones(batch_size,1,1,1, device=device)
orig_data = seed.to(device)
prev_data = seed.to(device)
data = seed.to(device).requires_grad_()
prev_losses = -100000.*torch.ones(batch_size, device=device)
prev_grad = torch.zeros_like(seed, device=device)
for _ in range(steps):
with torch.enable_grad():
y = model(data)
losses = y[correct_index].view(batch_size, 9).max(1)[0]
losses.sum().backward()
with torch.no_grad():
grad = data.grad.sign()
regret_index = losses<prev_losses
alpha[regret_index] /= 2.
data[regret_index] = prev_data[regret_index]
grad[regret_index] = prev_grad[regret_index]
prev_losses=losses
prev_data = data
prev_grad = grad
data += alpha*grad
delta = torch.clamp(data-orig_data, -epsilon, epsilon)
data = torch.clamp(orig_data + delta, 0, 1).requires_grad_()
return data.detach()
# deprecated functions that help evaluate OOD detection on adversaries
# in the same pipeline as regular datasets
def create_adv_noise_loader(model, dataloader, device, batches=50):
new_data = []
for batch_idx, (data, target) in enumerate(dataloader):
if batch_idx > batches:
break
new_data.append(gen_adv_noise(model, device,
data, epsilon=0.3,
steps=200).detach().cpu()
)
new_data = torch.cat(new_data, 0)
adv_noise_set = data_utils.TensorDataset(new_data, torch.zeros(len(new_data),10))
return data_utils.DataLoader(adv_noise_set, batch_size=100, shuffle=False)
def create_adv_sample_loader(model, dataloader, device, batches=50):
new_data = []
for batch_idx, (data, target) in enumerate(dataloader):
if batch_idx > batches:
break
new_data.append(gen_adv_sample(model, device,
data, target,
epsilon=0.3, steps=200).detach().cpu()
)
new_data = torch.cat(new_data, 0)
adv_sample_set = data_utils.TensorDataset(new_data, torch.zeros(len(new_data),10))
return data_utils.DataLoader(adv_sample_set, batch_size=100, shuffle=False)
| true |
5b9745509d0e1bd0be426fd6b888201109d9ac97 | Python | JudgementH/scanner | /graphic/edge.py | UTF-8 | 1,800 | 2.953125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2020/10/14 13:11
'利用canny算法提取边缘'
__author__ = 'Judgement'
import cv2
from graphic import transform
def getOutline(img_src):
# 输入图像路径,返回透视后轮廓的图形数组
img = cv2.imread(img_src)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 去噪
blur = cv2.GaussianBlur(gray, (5, 5), sigmaX=0)
# 二值化边缘
binary = cv2.Canny(blur, threshold1=30, threshold2=120)
# 腐蚀
erode = cv2.erode(binary, kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (1, 1)))
# 膨胀
dilate = cv2.dilate(erode, kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
# 轮廓提取
contours, hierarchy = cv2.findContours(dilate, mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_NONE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
# 初始轮廓
outline = cv2.multiply(img, 0)
cv2.drawContours(outline, contours, 0, (255, 255, 255), 2)
outline = cv2.cvtColor(outline, cv2.COLOR_RGB2GRAY)
# 再提取
contours, hierarchy = cv2.findContours(outline, mode=cv2.RETR_LIST, method=cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
outline = cv2.multiply(img, 0)
cv2.drawContours(outline, contours, 0, (255, 255, 255), 2)
arcLength = cv2.arcLength(contours[0], True)
points = cv2.approxPolyDP(contours[0], 0.02 * arcLength, True)
res = cv2.multiply(img, 0)
points = points.reshape(4, 2)
cv2.drawContours(res, [points], -1, (255, 255, 255), 2)
res = transform.four_point_transform(img, points)
return res
if __name__ == '__main__':
outline = getOutline("../img/originImage.jpg")
cv2.imshow("outline", outline)
cv2.waitKey()
| true |
5fbeb7cc4543d39a130d07a68a52acca08b4e45b | Python | tamuraryo0126/programing | /at/ABC/ABC_028_B.py | UTF-8 | 211 | 3.265625 | 3 | [] | no_license | in_string=input()
st_list=list(in_string)
st_dict={"A":0,"B":0,"C":0,"D":0,"E":0,"F":0}
for st in st_list:
st_dict[st]+=1
print(st_dict["A"],st_dict["B"],st_dict["C"],st_dict["D"],st_dict["E"],st_dict["F"]) | true |
5cb8d929e0d754bfca9a531dfedea219f3466270 | Python | visor517/GeekBrains_python | /lesson4/task2.py | UTF-8 | 369 | 3.9375 | 4 | [] | no_license | # 2. Представлен список чисел. Необходимо вывести элементы исходного списка, значения которых больше предыдущего элемента.
my_list = [300, 2, 12, 44, 1, 1, 4, 10, 7, 1, 78, 123, 55, 77]
print([my_list[i] for i in range(1,len(my_list)) if my_list[i] > my_list[i-1]]) | true |
21e36af89bade6c76c0366ac9f9399db9997bdba | Python | werble7/exercitando-python | /lucro_prejuizo.py | UTF-8 | 353 | 3.765625 | 4 | [] | no_license | valorcompra = float(input("Digite o valor de compra: "))
valorvenda = float(input("Digite o valor de venda: "))
balanco = valorvenda - valorcompra
if balanco > 0:
print("Você teve um lucro de ", balanco)
elif balanco < 0:
print("Você teve um prejuízo de ", balanco)
else:
print("Os valores são iguais, sem lucro nem prejuízo")
| true |
ba0fab4c26be1d0328a4b0aeba23a3bce807eb46 | Python | adsehgal/Verilog_Projects | /8x8_Led_Matrix_Cycler/case_create.py | UTF-8 | 1,221 | 2.59375 | 3 | [] | no_license | def one_hot(num, a):
if num == 0:
return "XXXXXXX" + str(a)
elif num == 1:
return "XXXXXX" + str(a) + "X"
elif num == 2:
return "XXXXX" + str(a) + "XX"
elif num == 3:
return "XXXX" + str(a) + "XXX"
elif num == 4:
return "XXX" + str(a) + "XXXX"
elif num == 5:
return "XX" + str(a) + "XXXXX"
elif num == 6:
return "X" + str(a) + "XXXXXX"
elif num == 7:
return str(a) + "XXXXXXX"
# for full in range(64):
x = 0
y = 0
for i in range(64):
# print("led_num[" + str(i) + "] : begin")
# print(" col = 8'b" + one_hot(x, 0) + ";")
# print(" row = 8'b" + one_hot(y, 1) + ";")
# print(" end")
# print()
print("if(led_num[" + str(i) + "]) begin")
print(" if(rst_others)begin")
print(" col = 8'hFF;")
print(" row = 8'h00;")
print(" end")
print(" col[" + str(x) + "] = 1'b0;")
print(" row[" + str(y) + "] = 1'b1;")
print("end\n")
x += 1
if x == 8:
y += 1
x = 0
# if(led_num[i]) begin
# if(rst_others)begin
# col = 8'hFF;
# row = 8'h00;
# end
# col[x] = 1'b0;
# row[y] = 1'b1;
# end
| true |
f8ecd993688ad451e4065381cd6a89e6c654d52b | Python | cody33231/learnpython | /py_lianxi/2-5-b.py | UTF-8 | 54 | 3.078125 | 3 | [] | no_license | for c in range(1,11):
print "loop c is %d" %(c)
| true |
871a389fd4a817fd600ff41bd39883977fd4b462 | Python | CMPUT466F16T08/otto_classify | /ensemble/get_probs.py | UTF-8 | 9,112 | 2.90625 | 3 | [] | no_license | import pandas as pd
import numpy as np
import sklearn
import time
import csv
import cPickle as pickle
from math import log
#from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import LabelEncoder
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
'''
Reference: http://mlwave.com/kaggle-ensembling-guide/
1.Split the training set into two disjoint sets.
2.Train several base learners on the first part.
3.Test the base learners on the second part.
4.Using the predictions from 3) as the inputs, and the correct responses as the outputs, train a higher level learner.
'''
'''
Class 0 log loss = 1.81006222877
Class 1 log loss = 0.500747771575
Class 2 log loss = 1.18498811702
Class 3 log loss = 1.94049107604
Class 4 log loss = 0.329764348788
Class 5 log loss = 0.285778343501
Class 6 log loss = 1.44070043069
Class 7 log loss = 0.404446699186
Class 8 log loss = 0.560933894566
RF: Logloss (with calibration using isotonic) = 0.670662403093
'''
#from useful_functions import predit_result, cal_accuracy, class_accuracy, num_labels, log_loss_implement, write_pred_prob, write_pred_logloss, init_set, update_set
def predit_result(array):
choice = []
for i in range(len(array)):
line = array[i]
max_ind = 0
for j in range(len(line)):
if float(line[j]) > float(line[max_ind]):
max_ind = j
choice.append(max_ind)
return choice
def cal_accuracy(predict, ytest):
correct = 0
for i in range(len(predict)):
if predict[i]==ytest[i]:
correct += 1
return float(correct)/len(predict)
def class_accuracy(predict, ytest):
classes = {}
for i in range(len(predict)):
c = ytest[i]
if c not in classes:
classes[c] = {'correct':0,
'false':0}
if c == predict[i]:
classes[c]['correct'] += 1
else:
classes[c]['false'] += 1
for cla in classes:
acc = float(classes[cla]['correct'])/(classes[cla]['false'] + classes[cla]['correct'])
print 'Class', str(cla), 'accuracy =', str(acc)
def num_labels(actual):
labels = {}
size = 0
for l in actual:
if l not in labels:
size += 1
labels[l] = 0
return size
# formula: http://www.exegetic.biz/blog/2015/12/making-sense-logarithmic-loss/
def log_loss_implement(actual, predicted, eps = 1e-15):
predicted = np.minimum(np.maximum(predicted,eps),1-eps)
sum1 = 0
N = len(actual)
M = num_labels(actual)
result_list = []
for j in range(M):
sum2 = 0
count = 0
for i in range(N):
y = 1 if j==actual[i] else 0
if j==actual[i]:
y = 1
count += 1
else:
y = 0
p = predicted[i][j]
temp = y*log(p)
sum2 += temp
cla_logloss = (-1)*sum2/float(count)
print 'Class', j, 'log loss =', cla_logloss
result_list.append([j, cla_logloss])
sum1 += sum2
logloss = (-1)*sum1/float(N)
return logloss, result_list
def write_pred_prob(probs,filename):
ids=pd.read_csv('../test.csv')
ids=ids.values
ids=ids[:,0]
probs=np.insert(probs,0,ids,axis=1)
rpy=pd.DataFrame(probs,columns=['id','Class_1','Class_2','Class_3','Class_4','Class_5','Class_6','Class_7','Class_8','Class_9'])
rpy.to_csv(filename,index=False)
def write_pred_logloss(logloss_list):
f = open('logloss.csv', 'wb')
writer = csv.writer(f)
labels = ['class', 'log loss']
writer.writerow(labels)
data = []
for l in logloss_list:
data.append(l)
writer.writerows(data)
f.close()
print 'finish writting <logloss.csv>'
def init_set(prob):
new_set = []
for i in range(len(prob)):
new_set.append([])
for j in range(len(prob[i])):
new_set[i].append(1e-15)
f = open('new_set.csv', 'wb')
writer = csv.writer(f)
for l in new_set:
writer.writerow(l)
f.close()
return new_set
def update_set(new_set, prob):
for i in range(len(prob)):
largest = max(prob[i])
for j in range(len(prob[i])):
if prob[i][j] == largest:
new_set[i][j] += prob[i][j]
f = open('new_set.csv', 'wb')
writer = csv.writer(f)
for l in new_set:
writer.writerow(l)
f.close()
return new_set
#====================knn
from sklearn.neighbors import KNeighborsClassifier
from sklearn.grid_search import GridSearchCV
def knn(Xtrain, ytrain):
neigh = KNeighborsClassifier(n_neighbors=142, n_jobs = -1)
neigh = neigh.fit(Xtrain, ytrain)
predict_prob = neigh.predict_proba(Xtrain)
return predict_prob, neigh
#====================LDA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.calibration import CalibratedClassifierCV
def lda(Xtrain, ytrain):
clf=LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None,
solver='svd', store_covariance=False, tol=0.0001)
clf_CV= CalibratedClassifierCV(clf, method='isotonic', cv=5)
clf_fit_CV = clf_CV.fit(Xtrain, ytrain)
predict_prob= clf_fit_CV.predict_proba(Xtrain)
return predict_prob, clf_fit_CV
#===================NN
import os.path
import sys
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
def nn(Xtrain, ytrain):
mlp = MLPClassifier(solver='sgd', learning_rate='constant',momentum=0,alpha=1e-5,
learning_rate_init=0.2,max_iter=15,verbose=False,random_state=0)
mlp = mlp.fit(Xtrain, ytrain)
predict_prob = mlp.predict_proba(Xtrain)
return predict_prob, mlp
#===================RF
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
def rf(Xtrain, ytrain):
forest = RandomForestClassifier(n_estimators=100, n_jobs=-1, max_features='sqrt', min_samples_leaf=1, class_weight='balanced')
forestCal = CalibratedClassifierCV(forest, method='isotonic', cv=6)
forestCal = forestCal.fit(Xtrain, ytrain)
predict_prob = forestCal.predict_proba(Xtrain)
return predict_prob, forestCal
#==================SVM
from sklearn import svm
def svm_(Xtrain, ytrain):
model = svm.SVC(kernel='rbf', class_weight='balanced', C=2, gamma=0.003, probability=True)
model = model.fit(Xtrain, ytrain)
predict_prob = model.predict_proba(Xtrain)
return predict_prob, model
#==================XGB
from sklearn.ensemble import GradientBoostingClassifier, BaggingClassifier
def xgboost(Xtrain, ytrain):
gbm = GradientBoostingClassifier(learning_rate=0.4, max_depth=3, n_estimators=100)
gbm = BaggingClassifier(gbm, n_estimators=5)
model = gbm.fit(Xtrain, ytrain)
predict_prob = model.predict_proba(Xtrain)
return predict_prob, model
'''
read and test
'''
# read file, get training data and testing data
X = pd.read_csv('../train.csv')
X = X.drop('id', axis=1)
y = X.target.values
y = LabelEncoder().fit_transform(y)
X = X.drop('target', axis=1)
Xt = pd.read_csv('../test.csv')
Xt = Xt.drop('id', axis=1)
#yt = Xt.target.values
#yt = LabelEncoder().fit_transform(yt)
#Xt = Xt.drop('target', axis=1)
Xtrain1 = X
ytrain1 = y
Xtest = Xt
#ytest = yt
#first step
print 'First step'
prob, knn_model = knn(Xtrain1, ytrain1)
#new_set = init_set(prob)
#new_set = update_set(new_set, prob)
print 'knn'
prob, lda_model = lda(Xtrain1, ytrain1)
#new_set = update_set(new_set, prob)
print 'lda'
prob, nn_model = nn(Xtrain1, ytrain1)
#new_set = update_set(new_set, prob)
print 'nn'
prob, rf_model = rf(Xtrain1, ytrain1)
#new_set = update_set(new_set, prob)
print 'rf'
prob, svm_model = svm_(Xtrain1, ytrain1)
#new_set = update_set(new_set, prob)
print 'svm'
prob, xgb_model = xgboost(Xtrain1, ytrain1)
#new_set = update_set(new_set, prob)
print 'xgboost'
'''
second step
'''
print 'Second step'
'''
id_f = open('new_set.csv', 'rb')
id_r = csv.reader(id_f)
Xtrain = [row for row in id_r]
id_f.close()
'''
#======================================
prob = knn_model.predict_proba(Xtest)
write_pred_prob(prob, 'knn_prob.csv')
#new_set = init_set(prob)
#new_set = update_set(new_set, prob)
prob = lda_model.predict_proba(Xtest)
write_pred_prob(prob, 'lda_prob.csv')
#new_set = update_set(new_set, prob)
prob = nn_model.predict_proba(Xtest)
write_pred_prob(prob, 'nn_prob.csv')
#new_set = update_set(new_set, prob)
prob = rf_model.predict_proba(Xtest)
write_pred_prob(prob, 'rf_prob.csv')
#new_set = update_set(new_set, prob)
prob = svm_model.predict_proba(Xtest)
write_pred_prob(prob, 'svm_prob.csv')
#new_set = update_set(new_set, prob)
prob = xgb_model.predict_proba(Xtest)
write_pred_prob(prob, 'xgb_prob.csv')
#new_set = update_set(new_set, prob)
| true |
adf65314f96d3d1d24be5ce507e7192710c1fa96 | Python | xstian/pyimageresearch | /Chapter 1/1.4/bitwise.py | UTF-8 | 799 | 3.890625 | 4 | [] | no_license | # NOTE: AND, OR, XOR, NOT
# cv2.bitwise_and()
# cv2.bitwise_or()
# cv2.bitwise_xor()
# cv2.bitwise_not()
import numpy as np
import cv2
# draw a rectangle
rectangle = np.zeros((300, 300), dtype='uint8')
cv2.rectangle(rectangle, (25, 25), (275, 275), 255, -1)
cv2.imshow('Rectangle', rectangle)
# draw circle
circle = np.zeros((300, 300), dtype='uint8')
cv2.circle(circle, (150, 150), 150, 255, -1)
cv2.imshow('Circle', circle)
# AND
bitwised = cv2.bitwise_and(rectangle, circle)
cv2.imshow('AND', bitwised)
cv2.waitKey(0)
# OR
bitwised = cv2.bitwise_or(rectangle, circle)
cv2.imshow('OR', bitwised)
cv2.waitKey(0)
# XOR
bitwised = cv2.bitwise_xor(rectangle, circle)
cv2.imshow('XOR', bitwised)
cv2.waitKey(0)
# NOT
bitwised = cv2.bitwise_not(circle)
cv2.imshow('NOT', bitwised)
cv2.waitKey(0)
| true |
af7a018393db3b7abc30cae2db6668a19de85bb3 | Python | thatch/arlib | /arlib/__init__.py | UTF-8 | 19,877 | 2.703125 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
import tarfile
import zipfile
import io
import os
import shutil
import collections
import bisect
import abc
import fnmatch
import sys
import decoutils
if sys.version_info[0] == 2: #pragma no cover
import __builtin__ as builtins
else: #pragma no cover
import builtins
__version__ = '0.1.0'
_auto_engine = []
if sys.version_info[0] >= 3 and sys.version_info[1] >= 6: #pragma no cover
_path_classes = (str, bytes, os.PathLike)
else: #pragma no cover
_path_classes = (str, bytes)
@decoutils.decorator_with_args
def register_auto_engine(func, priority=50, prepend=False):
"""Register automatic engine determing function
Two possible signatures:
* :code:`register_auto_engine(func, priority=50, prepend=False)`
* :code:`register_auto-engine(priority=50, prepend=False)`
The first one can be used as a regular function as well as a
decorator. The second one is a decorator with arguments
Args:
func (callable): A callable which determines archive engine from
file properties and open mode. The signature should be:
func(path, mode) where path is a file-like or path-like
object, and mode str to open the file.
priority (int, float): Priority of the func, small number means
higher priority. When multiple functions are registered by
multiple call of register_auto_engine, functions will be used
in an ordering determined by thier priortities. Default to 50.
prepend (bool): If there is already a function with the same
priority registered, insert to the left (before) or right
(after) of it. Default to False.
Return:
The first version of signature will return the input callable
:code:`func`, therefore it can be used as a decorator (without
arguments). The second version will return a decorator wrap.
"""
p = [x[0] for x in _auto_engine]
if prepend: #pragma no cover
i = bisect.bisect_left(p, priority)
else:
i = bisect.bisect_right(p, priority)
_auto_engine.insert(i, (priority, func))
@register_auto_engine
def auto_engine_tar(path, mode):
if 'r' in mode:
if isinstance(path, tarfile.TarFile):
if path.mode != 'r':
raise ValueError('Mode of TarFile object is not compatible'
' with the mode argument.')
return TarArchive
if isinstance(path, _path_classes):
path = os.path.abspath(path)
if os.path.isfile(path) and tarfile.is_tarfile(path):
return TarArchive
else:
if isinstance(path, tarfile.TarFile): #pragma no cover
if path.mode not in ['a', 'w', 'x']:
raise ValueError('Mode of TarFile object is not compatible'
' with the mode argument.')
return TarArchive
if isinstance(path, _path_classes):
if any(fnmatch.fnmatch(path, x) for x in
['*.tar', '*.tgz', '*.tar.gz', '*.tar.bz2', '*.tar.xz']):
return TarArchive
return None
@register_auto_engine
def auto_engine_zip(path, mode):
if 'r' in mode:
if isinstance(path, zipfile.ZipFile):
if path.mode != 'r':
raise ValueError('Mode of ZipFile object is not compatible'
' with the mode argument.')
return ZipArchive
if (sys.version_info[0] >= 3 and sys.version_info[1] >= 1 and
isinstance(path, io.IOBase) and
(hasattr(path, 'mode') and 'b' in path.mode or
not hasattr(path, 'mode'))):
if not path.readable():
raise ValueError('Opened file is not readable, but the mode'
'argument is '+mode)
if zipfile.is_zipfile(path):
return ZipArchive
if isinstance(path, _path_classes):
if os.path.isfile(path) and zipfile.is_zipfile(path):
return ZipArchive
else:
if isinstance(path, zipfile.ZipFile):
if path.mode not in ['a', 'w', 'x']:
raise ValueError('Mode of ZipFile object is not compatible'
' with the mode argument.')
return ZipArchive
if isinstance(path, _path_classes):
if fnmatch.fnmatch(path, '*.zip'):
return ZipArchive
return None
@register_auto_engine
def auto_engine_dir(path, mode):
if 'r' in mode:
if isinstance(path, _path_classes):
if os.path.isdir(path):
return DirArchive
return None
def auto_engine(path, mode='r'):
"""Automatically determine engine type from file properties and file
mode using the registered determining functions
Args:
path (file-like, path-like): Opened file object or path to the
archive file
mode (str): Mode str to open the file. Default to "r".
Return:
type, NoneType: a subclass of Archive if successfully find one
engine, otherwise None
See also:
:func:`is_archive`
"""
for _, func in _auto_engine:
engine = func(path, mode)
if engine is not None:
break
return engine
def is_archive(path, mode='r'):
"""Determine if the file specified by :code:`path` is a valid archive
when opened with :code:`mode`
Basically, the function checks the result of :func:`auto_engien`,
and return :code:`True` if the result is not None, and return
:code:`False` otherwise.
Args:
path (file-like, path-like): Opened file object or path to the
archive file.
mode (str): Mode str to open the file. Default to "r".
Return:
bool: :code:`True` if the path is valid archive, :code:`False`
otherwise.
Examples:
>>> is_archive('a.tar.gz', 'w')
True
>>> is_archive('a.tar.bz2', 'w')
True
>>> is_archive('a.txt', 'w')
False
See also:
:func:`auto_engine`
"""
return auto_engine(path, mode) is not None
def assert_is_archive(path, mode):
"""Assert that :code:`path` can be opened as a valid archive with
:code:`mode`
Args:
path (file-like, path-like): Opened file object or path to the
archive file.
mode (str): Mode str to open the file. Default to "r".
Examples:
>>> assert_is_archive('a.tar.gz', 'w')
>>> assert_is_archive('a.txt', 'w')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: a.txt cannot be opened as a valid archive with w
See also:
:func:`is_archive`
"""
if not is_archive(path, mode):
raise ValueError(str(path)+' cannot be opened as a valid archive '
'with '+mode)
if sys.version_info[0] > 2 and sys.version_info[1] > 3: # pragma no cover
base_cls = abc.ABC
else: #pragma no cover
base_cls = object
class Archive(base_cls):
"""Common-interface to different type of archive files manipulation
Args:
path (path-like, file-like): Path of the archive to read or write
mode (str): The mode to open the member, same as in
:func:`open`. Default to 'r'.
engine (type): Class object of a specific subclass Archive which
implements the logic of processing a specific type of
Archive. Provided implements:
* ZipArchive: zip file archive using the `zipfile` module
* TarArchive: tar file archive using the `tarfile` module
* DirArchive: directory as an archive using the `pathlib` module
* None: Automatically determine engines by file properties and
mode
kwargs : Additional keyword arguments passed to the underlying
engine constructor
Note:
The constructor of a concrete engine should take at least one
positional argument `path` and one optional argument `mode` with
default value to `r`.
"""
__metaclass__ = abc.ABCMeta
@property
@abc.abstractmethod
def member_names(self):
"""Get list of names of the members (i.e. files contained in the
archive)
Return:
list[str]: list of member names
"""
pass
@abc.abstractmethod
def open_member(self, name, mode='r', **kwargs):
"""Open a member file contained in the archive
Args:
name (str): name of the member file to open
mode (str): The mode to open the member, same as in
:func:`open`. Default to 'r'.
kwargs: Additional keyword arguments that will be passed
to the underlying function.
Return:
file-like: A opened file object associated with the member
file
"""
pass
def validate_member_name(self, name):
names = self.member_names
name.replace('\\', '/')
assert len(name) > 0
if name in names:
return name
elif name[-1] != '/' and name+'/' in names:
return name + '/'
else:
raise ValueError(name+' is not a valid member name.')
def member_is_dir(self, name):
"""Check if a specific member is a directory
Args:
name (str): Member name.
Returns:
bool: True if the member is a directory, False otherwise.
"""
name = self.validate_member_name(name)
return name.endswith('/')
def member_is_file(self, name):
"""Check if a specific member is a regular file
Args:
name (str): Member name.
Returns:
bool: True if the member is a regular file, False otherwise.
"""
return not self.member_is_dir(name)
def extract(self, path=None, members=None): #pragma no cover
"""Extract members to a location
Args:
path (path-like): Location of the extracted files.
members (Seq[str]): Members to extract, specified by a list
of names.
"""
if path is None: #pragma no cover
path = '.'
if members is None:
members = self.member_names
else:
members = [self.validate_member_name(x) for x in members]
for name in members:
fname = os.path.join(path, name)
if self.member_is_dir(name):
if not os.path.isdir(fname):
os.makedirs(fname)
else:
parent = os.path.dirname(fname)
if not os.path.isdir(parent):
os.makedirs(parent)
with self.open_member(name, 'rb') as src, builtins.open(fname, 'wb') as dst:
shutil.copyfileobj(src, dst)
def close(self):
"""Release resources such as closing files etc
"""
pass
def __enter__(self):
"""Context manager enter function
Return:
Archive: The archive object itself
"""
return self
def __exit__(self, type, value, traceback):
"""Context manager exit function
Call self.close() then return True
"""
self.close()
class TarArchive(Archive):
"""Archive engine for *tar* files using the `tarfile` module
Args:
path (path-like): Path to the archive
mode (str): The mode to open the member, same as in
:func:`open`.
kwargs : Other keyword arguments that will be passed to the
underlying function.
"""
def __init__(self, path, mode='r', **kwargs):
self._need_close = True
if isinstance(path, tarfile.TarFile):
self._file = path
self._need_close = False
elif (isinstance(path, io.IOBase) or
sys.version_info[0] == 2 and isinstance(path, file)):
self._file = tarfile.open(fileobj=path, mode=mode, **kwargs)
else:
self._file = tarfile.open(name=path, mode=mode, **kwargs)
@property
def member_names(self):
names = self._file.getnames()
# normalize names so that name of members which are
# directories will be appended with a '/'
names = [x+'/' if self._file.getmember(x).isdir() else x
for x in names]
return names
def open_member(self, name, mode='r'):
"""Open member file contained in the tar archive
Args:
name (str): Name of the member to open
mode (str): The mode argument to open. Same as in :func:`open`.
Return:
file-like: The opened file object associated with the member
file.
Note:
Members of tar archive cannot be opened in write mode.
"""
mode = mode.lower()
if 'r' not in mode: #pragma no cover
raise ValueError('members of tar archive can not be opened in'
' write mode')
if self.member_is_dir(name):
raise ValueError('directory member cannot be opened.')
f = self._file.extractfile(name)
if 'b' not in mode:
if sys.version_info[0] >= 3:
f = io.TextIOWrapper(f)
else: #pragma no cover
raise ValueError('I do not know how to wrap binary file'
' object to text io.')
return f
def extract(self, path=None, members=None):
"""Extract members to a location
Args:
path (path-like): Location of the extracted files.
members (Seq[str]): Members to extract, specified by a list
of names.
"""
if members is not None:
info = []
for name in members:
name = self.validate_member_name(name)
if name.endswith('/'):
name = name[:-1]
info.append(self._file.getmember(name))
members = info
if path is None: #pragma no cover
path = '.'
self._file.extractall(path, members)
def close(self):
if self._need_close:
self._file.close()
class ZipArchive(Archive):
"""Archive engine for *zip* files using the `zipfile` module
"""
def __init__(self, path, *args, **kwargs):
self._need_close = True
if isinstance(path, zipfile.ZipFile):
self._file = path
self._need_close = False
else:
self._file = zipfile.ZipFile(path, *args, **kwargs)
@property
def member_names(self):
names = self._file.namelist()
return names
def open_member(self, name, mode='r', **kwargs):
"""Open a member file in the zip archive
Args:
name (str): Name of the member file
mode (str): The mode argument to open. Same as in :func:`open`.
kwargs: Additional keyword arguments that will be passed
to :func:`zipfile.ZipFile.open`
Return:
file-like: The opened file object associated with the member
file.
"""
if 'r' in mode:
name = self.validate_member_name(name)
if name.endswith('/'):
raise ValueError('Directory member cannot be opened in'
' read mode.')
assert 'r' in mode or 'w' in mode
mode2 = 'r' if 'r' in mode else 'w'
f = self._file.open(name, mode2)
if 'b' not in mode:
f = io.TextIOWrapper(f)
return f
def extract(self, path=None, members=None):
"""Extract members to a location
Args:
path (path-like): Location of the extracted files.
members (Seq[str]): Members to extract, specified by a list
of names.
"""
if members is not None:
members = [self.validate_member_name(x) for x in members]
self._file.extractall(path, members)
def close(self):
if self._need_close:
self._file.close()
class DirArchive(Archive):
"""Archive engine that treat a directory as an archive using `pathlib`
module
"""
def __init__(self, path, mode='r'):
self._file = os.path.abspath(path)
@property
def member_names(self):
names = []
for p, dirs, files in os.walk(self._file):
dirnames = [os.path.relpath(os.path.join(p, x), self._file)+'/'
for x in dirs]
names += dirnames
fnames = [os.path.relpath(os.path.join(p, x), self._file)
for x in files]
names += fnames
names = [x.replace('\\', '/') for x in names]
return names
def open_member(self, name, mode='r', **kwargs):
"""Open a member in the directory
Args:
name (str): Name of the member file
mode (str): The mode argument to open. Same as in :func:`open`.
kwargs: Additional keyword arguments that will be passed
to :func:`open`
Return:
file-like: The opened file object associated with the member
file.
"""
if 'r' in mode:
name = self.validate_member_name(name)
if name.endswith('/'):
raise ValueError('Directory member cannot be opened.')
path = os.path.join(self._file, name)
return builtins.open(path, mode, **kwargs)
def extract(self, path=None, members=None):
"""Extract members to a location
Args:
path (path-like): Location of the extracted files.
members (Seq[str]): Members to extract, specified by a list
of names.
"""
if path is None: #pragma no cover
path = '.'
if os.path.samefile(self._file, path): #pragma no cover
return
if members is None:
members = self.member_names
else:
members = [self.validate_member_name(x) for x in members]
for name in members:
fname = os.path.join(path, name)
if self.member_is_dir(name):
if not os.path.isdir(fname):
os.makedirs(fname)
else:
parent = os.path.dirname(fname)
if not os.path.isdir(parent):
os.makedirs(parent)
shutil.copyfile(os.path.join(self._file, name), fname)
def open(path, mode='r', engine=None, *args, **kwargs):
"""Open an archive file
Args:
path (path-like, file-like): Path of the archive to read or write
mode (str): The mode to open the member, same as in
:func:`open`. Default to 'r'.
engine (type): Class object of a specific subclass Archive which
implements the logic of processing a specific type of
Archive. Provided implements:
* ZipArchive: zip file archive using the `zipfile` module
* TarArchive: tar file archive using the `tarfile` module
* DirArchive: directory as an archive using the `pathlib` module
* None: Automatically determine engines by file properties and
mode
kwargs : Additional keyword arguments passed to the underlying
engine constructor
"""
if engine is None:
engine = auto_engine(path, mode)
if engine is None:
raise RuntimeError('Cannot automatically determine engine for '
'path:', path, ' mode:', mode)
assert issubclass(engine, Archive)
return engine(path, mode, **kwargs)
| true |
bdf07ed01648c9ca3d60a2e99ed23be0e2b7c677 | Python | pwilso/Wire_Detector | /Wire_Detector_Refurbished.py | UTF-8 | 6,452 | 2.875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 31 17:34:00 2017
@author: Paige
"""
from scipy import ndimage
import matplotlib.pyplot as plt
import numpy as np
### Functions #################################################################
def filter_pic(picture, sigma, threshold, scale, tilt):
pic = ndimage.imread(picture, mode = 'L', flatten = True) # import picture
pic = ndimage.gaussian_filter(pic, sigma) # apply Gaussian filter
pic_grad = np.gradient(pic) # take gradient
Gx = pic_grad[0]
Gy = pic_grad[1]
mag = np.sqrt(Gx**2 + Gy**2) # magnitude of gradient
over = (mag > threshold).astype(int)
thresh = mag*over
return thresh
def maximum(picture, row, column, L_or_R): # find a local maximum to the pixel
if L_or_R == "Right":
while picture[row, column] <= picture[row, column + 1]:
column += 1
else:
while picture[row, column] <= picture[row, column - 1]:
column -= 1
return column
def subpixel(picture, row, column, L_or_R, max_col): # adjust the maximum within the pixel
g = []
d = []
while picture[row, column] != 0:
g.append(picture[row, column])
d.append(column - max_col)
if L_or_R == "Right":
column += 1
else:
column -= 1
g = np.array(g)
d = np.array(d)
delta = sum(g*d)/sum(g)
return delta
def plotter(picture_ax, right_point, left_point, height_point):
outline_pic = plt.figure(3)
outline_pic_ax = outline_pic.add_subplot(111)
outline_pic_ax.imshow(unfiltered)
outline_pic_ax.axis("off")
picture_ax.scatter(right_point, height_point, color = 'c', marker = '.')
picture_ax.scatter(left_point, height_point, color = 'm',
marker = '.')
plt.draw()
def find_edge(m_value, n_value):
m = m_value
n = n_value
value = pic_array[m, n]
#print(value)
while value == 0: # check right
n += 1
value = pic_array[m, n]
n_nonzero = n
n_max = maximum(pic_array, m, n, "Right")
subpixel_n = subpixel(pic_array, m, n_nonzero, "Right", n_max) + n_max
right = subpixel_n
n = n_value
value = pic_array[m, n]
while value == 0: # check left
n -= 1
value = pic_array[m, n]
n_nonzero = n
n_max = maximum(pic_array, m, n, "Left")
subpixel_n = subpixel(pic_array, m, n_nonzero, "Left", n_max) + n_max
left = subpixel_n
return (right, left, m) # returns the diameter and y coord in pixel values
def click(event):
global diam, height, special, specialcount, diam_pix, height_pix
n_orig = int(event.xdata)
m_orig = int(event.ydata)
# if len(diam_pix) < 2:
# results = find_edge(m_orig, n_orig)
# plotter(outline_pic_ax, results[0], results[1], results[2])
# diam_pix.append(abs(results[0] - results[1]))
# height_pix.append(results[2])
# elif len(diam_pix) == 2:
# points = np.arange(height_pix[1], height_pix[0], -1*scale*step)
# for p in points:
# results = find_edge(p, n_orig)
# plotter(outline_pic_ax, results[0], results[1], results[2])
# diam_pix.insert(-1, abs(results[0] - results[1]))
# height_pix.append(-1, p)
# else:
# if special == 0:
# pass
# else:
if specialcount != 0:
results = find_edge(m_orig, n_orig)
plotter(outline_pic_ax, results[0], results[1], results[2])
#plotter(thresh_pic_ax, results[0], results[1], results[2])
diam_pix.append(abs(results[0] - results[1]))
height_pix.append(results[2])
specialcount -= 1
else:
results = find_edge(m_orig, n_orig)
plotter(outline_pic_ax, results[0], results[1], results[2])
diam_pix.append(abs(results[0] - results[1]))
#print(diam_pix)
height_pix.append(results[2])
#print(height_pix)
diam = np.array(diam_pix)/scale
height = np.array(height_pix)
height = ((height - height[0])/scale)/np.sin(np.radians(tilt))
#print("Height (nm)\tDiameter (nm)")
for i in range(len(height)):
print str(height[i]).ljust(15), '\t', diam[i]
#print '\n'
diam_pix = []
height_pix = []
diam = []
height = []
specialcount = special
### Parameters ################################################################
folder = "C:\Users\Paige\Desktop\Samples\Sample_1953\Oct30"
picture = "\\1953_1000D2_col2_x20_30deg.tif"
img_name = folder + picture
unfiltered = ndimage.imread(img_name)
tilt = 20.0 # image tilt in SEM
scale = 0.43 # SEM image scale
sigma = 1.5 # Gaussian smoothing
t = 4.0 # threshold
step = 50 # distance between measurements in nm
special = 2
specialcount = special # number of special features to detect
diam_pix = []
height_pix = []
### Main ######################################################################
if __name__ == "__main__":
press = 0
number = 0
outline_pic = plt.figure(3)
outline_pic_ax = outline_pic.add_subplot(111)
outline_pic_ax.imshow(unfiltered)
pic_array = filter_pic(img_name, sigma, t, scale, tilt)
thresh_pic = plt.figure(1)
thresh_pic_ax = thresh_pic.add_subplot(111)
thresh_pic_ax.imshow(pic_array, interpolation = "None", cmap = 'pink')
thresh_pic_ax.axis("off")
thresh_pic.subplots_adjust(left = 0, right = 1, top = 1, bottom = 0)
plt.draw()
cid = thresh_pic.canvas.mpl_connect('button_press_event', click) # grab a point
| true |
04bda87b7c83a8c8d1dc90832c6935557b19876d | Python | prem168/GUVI | /productofarrayexceptcurrentnumber.py | UTF-8 | 153 | 3.140625 | 3 | [] | no_license | x=int(input())
a=list(map(int,input().split()))
f=1
for i in range(0,x):
f=f*a[i]
for i in range(0,x-1):
print(f//a[i],end=" ")
print(f//a[x-1])
| true |
3092dfd52ba763841622d99ccb677cee50a61687 | Python | Aurelienpautrot/Webscraping_project | /scrapy/spider1.py | UTF-8 | 1,051 | 2.859375 | 3 | [] | no_license | import scrapy
from scrapy import Selector
from urllib import request
import pandas as pd
#choose the number of pages to scrape
nb_page = 101
#define the item link
class Link(scrapy.Item):
link = scrapy.Field()
class LinkListsSpider(scrapy.Spider):
name = 'spider1'
page_number = 2
start_urls = ['https://www.doctolib.fr/dentiste/paris?page=1']
output = "output.csv"
def parse(self, response):
#Gather the links of doctor pages
page_links_list = response.css('h3 a::attr(href)')
for doctor_link in page_links_list:
l = Link()
l['link'] = 'https://www.doctolib.fr' + doctor_link.get()
yield l
#go to the next page if the defined number of pages to scrape is not reach yet
next_page = 'https://www.doctolib.fr/dentiste/paris?page='+ str(LinkListsSpider.page_number)
if LinkListsSpider.page_number < nb_page:
LinkListsSpider.page_number = LinkListsSpider.page_number + 1
yield scrapy.Request(next_page, callback=self.parse)
| true |
1ecd7634ad263228e8313142172f9aaa96a7406a | Python | kamyu104/LeetCode-Solutions | /Python/score-of-parentheses.py | UTF-8 | 789 | 3.65625 | 4 | [
"MIT"
] | permissive | # Time: O(n)
# Space: O(1)
class Solution(object):
def scoreOfParentheses(self, S):
"""
:type S: str
:rtype: int
"""
result, depth = 0, 0
for i in xrange(len(S)):
if S[i] == '(':
depth += 1
else:
depth -= 1
if S[i-1] == '(':
result += 2**depth
return result
# Time: O(n)
# Space: O(h)
class Solution2(object):
def scoreOfParentheses(self, S):
"""
:type S: str
:rtype: int
"""
stack = [0]
for c in S:
if c == '(':
stack.append(0)
else:
last = stack.pop()
stack[-1] += max(1, 2*last)
return stack[0]
| true |
298e1b65a59a44a8b33d5d51275e6fca041cd467 | Python | petrov-anna/flask_app_prak | /users.py | UTF-8 | 398 | 2.78125 | 3 | [] | no_license | from passw import enc_password
# работа с пользователем
class Users:
users = []
def get_users(self):
return self.users
def set_users(self, login, password, date=None):
self.users.append({'login': login, 'password': enc_password(password), 'registration date': date})
return list(filter(lambda user: user['login'] == login, self.users))
| true |
5e519eced0ecafc616024ba968a4db8d7a7c4fb4 | Python | cdong5/Dog-Walking-Excerciser | /GUI.py | UTF-8 | 2,255 | 3.90625 | 4 | [] | no_license | # Created by Calvin Dong - 12/30/2018
# Learning tkinter and random Library
from random import *
from tkinter import *
def exercise():
# Reads a txt file named exercises
# Places each line of text into a list
# Uses the list to generate exercises
exerciselist = []
file = open('exercises.txt', 'r')
for line in file:
ex = line.strip()
exerciselist.append(ex)
len_exlist = int(len(exerciselist))
ran_ex = exerciselist[randint(0,len_exlist-1)]
return ran_ex
def num_sec_ex():
# Creates a random number of reps to do for exercise
ex_time = randint(10, 20)
return ex_time
def gen_ex():
# Creates a Label of the exercise and number of reps
# Places into the application
textframe = Frame(root)
textframe.pack()
e1 = Label(textframe, text=(f'{exercise()} for {num_sec_ex()} reps'))
e1.pack()
def num_sec_walk():
# Creates a random variable to tell how long to walk
walk_time = randint(120,240)
return walk_time
def gen_walking():
# Generates a label to tell how long to walk for
# Places into the application
textframe = Frame(root)
textframe.pack()
e2 = Label(textframe, text=(f'Walk for {round(num_sec_walk()//60, 2)} minutes'))
e2.pack()
return e2
def create_list():
# Creates a random list of exercises and walking time
# Gets the time you have, and creates a list of exercises to fit in the time
time = entry_box.get()
current_time = 0
total_time = int(time)*60
while current_time < total_time:
gen_walking()
gen_ex()
current_time += num_sec_ex()
current_time += num_sec_walk()
# Formating and organization of the GUI
root = Tk()
root.title('Dog Walk Exerciser')
labelframe = Frame(root)
labelframe.pack(side=TOP)
top_label = Label(labelframe, text='Dog Walk Exerciser').pack(side=TOP)
Entry_frame = Frame(root)
Entry_frame.pack(side=TOP)
entry_label = Label(Entry_frame, text='How Long?')
entry_label.pack(side="left", padx=2)
entry_box = Entry(Entry_frame)
entry_box.pack(side="left", padx=2, pady=2)
gen_frame = Frame(root)
gen_frame.pack(side=TOP)
gen_button = Button(gen_frame, text='Generate', command= create_list)
gen_button.pack(pady=2)
root.mainloop()
| true |
a1ddad55f653124b94d8cdf5728546f902419dbb | Python | n0tch/my_uri_problems | /URI_2160.py | UTF-8 | 201 | 3.640625 | 4 | [] | no_license | # -*- coding: utf-8 -*-
'''
Escreva a sua solução aqui
Code your solution here
Escriba su solución aquí
'''
nome = input()
if len(nome) <= 80:
print("YES")
else:
print("NO") | true |