blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
09353e34ffec3547a510acdadb3b8f3316a5e2b6 | Python | Muratam/yukicoder-nim | /yukicoder/inprogress/hoge.py | UTF-8 | 485 | 2.90625 | 3 | [] | no_license | from itertools import permutations
n = int(raw_input())
a = map(int, raw_input().split())
m = int(raw_input())
b = map(int, raw_input().split())
a.sort()
b.sort()
b.reverse()
res = m + 1
for perm in permutations(a, n):
print res
temp = b[:]
i, j = 0, 0
while i < n and j < m:
if temp[j] >= perm[i]:
temp[j] -= perm[i]
i += 1
else:
j += 1
res = min(res, j + 1)
if res < m + 1:
print res
else:
print -1
| true |
3c8235f2efdf2f208e53a3adfb494adced00f35f | Python | CesarHera/POO-Algoritmos-Py | /polimorfismo5.py | UTF-8 | 625 | 3.640625 | 4 | [] | no_license | class Persona:
def __init__(self, nombre):
self.nombre = nombre
def anvanza(self):
print('Ando caminando')
class Ciclista(Persona):
def __init__(self, nombre):
super().__init__(nombre)
def anvanza(self):
print('Ando pedaleando')
class Nadador(Persona):
def __init__(self, nombre):
super().__init__(nombre)
def anvanza(self):
print('Ando nadando')
def run():
persona = Persona('Cesar')
persona.anvanza()
ciclista = Ciclista('Tom')
ciclista.anvanza()
nadador = Nadador('Michael')
nadador.anvanza()
if __name__ == '__main__':
run() | true |
8d53b0f90e12d49f45c26a08d5d85b9db31c38d1 | Python | sksundaram-learning/agate-sql | /tests/test_agatesql.py | UTF-8 | 5,775 | 2.921875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# -*- coding: utf8 -*-
from decimal import Decimal
import agate
import agatesql # noqa
from sqlalchemy import create_engine
class TestSQL(agate.AgateTestCase):
def setUp(self):
self.rows = (
(1.123, 'a', True, '11/4/2015', '11/4/2015 12:22 PM'),
# See issue #18
# (2, u'👍', False, '11/5/2015', '11/4/2015 12:45 PM'),
(2, u'c', False, '11/5/2015', '11/4/2015 12:45 PM'),
(None, 'b', None, None, None)
)
self.column_names = [
'number', 'text', 'boolean', 'date', 'datetime'
]
self.column_types = [
agate.Number(), agate.Text(), agate.Boolean(),
agate.Date(), agate.DateTime()
]
self.table = agate.Table(self.rows, self.column_names, self.column_types)
self.connection_string = 'sqlite:///:memory:'
def test_back_and_forth(self):
engine = create_engine(self.connection_string)
connection = engine.connect()
self.table.to_sql(connection, 'test')
table = agate.Table.from_sql(connection, 'test')
self.assertSequenceEqual(table.column_names, self.column_names)
self.assertIsInstance(table.column_types[0], agate.Number)
self.assertIsInstance(table.column_types[1], agate.Text)
self.assertIsInstance(table.column_types[2], agate.Boolean)
self.assertIsInstance(table.column_types[3], agate.Date)
self.assertIsInstance(table.column_types[4], agate.DateTime)
self.assertEqual(len(table.rows), len(self.table.rows))
self.assertSequenceEqual(table.rows[0], self.table.rows[0])
def test_create_if_not_exists(self):
column_names = ['id', 'name']
column_types = [agate.Number(), agate.Text()]
rows1 = (
(1, 'Jake'),
(2, 'Howard'),
)
rows2 = (
(3, 'Liz'),
(4, 'Tim'),
)
table1 = agate.Table(rows1, column_names, column_types)
table2 = agate.Table(rows2, column_names, column_types)
engine = create_engine(self.connection_string)
connection = engine.connect()
# Write two agate tables into the same SQL table
table1.to_sql(connection, 'create_if_not_exists_test', create=True, create_if_not_exists=True, insert=True)
table2.to_sql(connection, 'create_if_not_exists_test', create=True, create_if_not_exists=True, insert=True)
def test_to_sql_create_statement(self):
statement = self.table.to_sql_create_statement('test_table')
self.assertIn('CREATE TABLE test_table', statement)
self.assertIn('number DECIMAL,', statement)
self.assertIn('text VARCHAR(1) NOT NULL,', statement)
self.assertIn('boolean BOOLEAN,', statement)
self.assertIn('date DATE,', statement)
self.assertIn('datetime TIMESTAMP', statement)
def test_to_sql_create_statement_no_constraints(self):
statement = self.table.to_sql_create_statement('test_table', constraints=False)
self.assertIn('CREATE TABLE test_table', statement)
self.assertIn('number DECIMAL,', statement)
self.assertIn('text VARCHAR,', statement)
self.assertIn('boolean BOOLEAN,', statement)
self.assertIn('date DATE,', statement)
self.assertIn('datetime TIMESTAMP', statement)
def test_to_sql_create_statement_with_schema(self):
statement = self.table.to_sql_create_statement('test_table', db_schema='test_schema', dialect='mysql')
self.assertIn('CREATE TABLE test_schema.test_table', statement)
self.assertIn('number DECIMAL(38, 3),', statement)
self.assertIn('text VARCHAR(1) NOT NULL,', statement)
self.assertIn('boolean BOOL,', statement)
self.assertIn('date DATE,', statement)
self.assertIn('datetime TIMESTAMP', statement)
def test_to_sql_create_statement_with_dialects(self):
for dialect in ['mysql', 'postgresql', 'sqlite']:
self.table.to_sql_create_statement('test_table', dialect=dialect)
def test_to_sql_create_statement_zero_width(self):
rows = (
(1, ''),
(2, ''),
)
column_names = ['id', 'name']
column_types = [agate.Number(), agate.Text()]
table = agate.Table(rows, column_names, column_types)
statement = table.to_sql_create_statement('test_table', db_schema='test_schema', dialect='mysql')
self.assertIn('CREATE TABLE test_schema.test_table', statement)
self.assertIn('id DECIMAL(38, 0) NOT NULL,', statement)
self.assertIn('name VARCHAR(1)', statement)
def test_sql_query_simple(self):
results = self.table.sql_query('select * from agate')
self.assertColumnNames(results, self.table.column_names)
self.assertRows(results, self.table.rows)
def test_sql_query_limit(self):
results = self.table.sql_query('select * from agate limit 2')
self.assertColumnNames(results, self.table.column_names)
self.assertRows(results, self.table.rows[:2])
def test_sql_query_select(self):
results = self.table.sql_query('select number, boolean from agate')
self.assertColumnNames(results, ['number', 'boolean'])
self.assertColumnTypes(results, [agate.Number, agate.Boolean])
self.assertRows(results, [
[Decimal('1.123'), True],
[2, False],
[None, None]
])
def test_sql_query_aggregate(self):
results = self.table.sql_query('select sum(number) as total from agate')
self.assertColumnNames(results, ['total'])
self.assertColumnTypes(results, [agate.Number])
self.assertRows(results, [[Decimal('3.123')]])
| true |
319ef6777f36329faa8ea7a1732b06cccbba1e12 | Python | theoptips/algorithm_exercise | /python_list_comprehension.py | UTF-8 | 497 | 3.625 | 4 | [] | no_license | # python list comprehension
# times table example
times_table = [i*j for i in range(1,10) for j in range(1,10)]
print (times_table)
# coursera exercise list of all possible ids letterletterdigitsdigits e.g. xy04
lowercase = 'abcdefghijklmnopqrstuvwxyz'
digits = '0123456789'
answer = [str(i)+str(j)+str(k)+str(m) for i in lowercase for j in lowercase for k in digits for m in digits]
#correct_answer = [a+b+c+d for a in lowercase for b in lowercase for c in digits for d in digits]
print(answer) | true |
4c6c8f9104df28f2b80812069f8be307adb98111 | Python | zara9006/naver_news_search_scraper | /naver_news_search_crawler/press_list.py | UTF-8 | 442 | 2.625 | 3 | [] | no_license | from .utils import get_soup
def get_press_list():
def parse(link):
oid = link['href'].split('oid=')[-1].split('&')[0]
name = link.text
return oid, name
url = 'https://news.naver.com/main/officeList.nhn'
soup = get_soup(url)
links = soup.select('ul[class=group_list] a')
press_list = [parse(link) for link in links]
press_list = sorted(press_list, key=lambda x:int(x[0]))
return press_list | true |
5d7f138202f0d4c83081335ad3b740efde910570 | Python | JCGSoto/Python | /Frecuencia_de_letra.py | UTF-8 | 351 | 4.125 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Frecuencia de letra
Este programa emite la frecuencia de una letra en un texto
como pocertanje entero
@author: JCGS
"""
text = input('Ingrese un texto: ')
letter = input('Ingrese una letra: ')
a = text.count(letter)
c = len(text)
if a > 0:
b = (a/c)*(100)
print(int(b),'{}'.format('%'))
| true |
e1201d911955e5f267cdbcdb131b8179a6d97b43 | Python | guori12321/todo | /todo/models.py | UTF-8 | 1,417 | 3.796875 | 4 | [
"MIT"
] | permissive | # coding=utf8
class Task(object):
"""
One task looks like
1. (x) Go shopping
if this task not done, leave there blank.
Attrributes
id int
content str
done bool
"""
def __init__(self, id, content, done=False):
self.id = id
self.content = content
self.done = done
class TaskNotFound(Exception):
pass
class Todo(list):
"""
A todo is kind of a list of tasks.
But todo[task_id] will get an item which id is task_id
"""
def next_id(self):
"""
Return next id should be.
"""
ids = [task.id for task in self]
max_id = max(ids) if ids else 0
return (max_id + 1)
def new_task(self, content):
"""
Append a new undone task to todo
"""
task = Task(self.next_id(), content, False)
return self.append(task)
def __getitem__(self, id):
"""
Get task by id.
"""
for task in self:
if task.id == id:
return task
raise TaskNotFound
def check_task(self, id):
"""
Check task's status to done
"""
self[id].done = True
def undo_task(self, id):
"""
Undone some task
"""
self[id].done = False
def clear(self):
"""
Clear all tasks!
"""
self[:] = []
| true |
6c1705075cde2498f7acb8804c23929130543072 | Python | Latikeshnikam/zelthy-assignments | /assignment-1/main.py | UTF-8 | 1,078 | 2.609375 | 3 | [] | no_license | from flask import Flask
import requests
import json
from io import BytesIO
from flask import render_template
import csv
app = Flask(__name__, template_folder='template')
# To display the fetched data
@app.route('/', methods=['GET'])
def get_data():
data = requests.get('https://606f76d385c3f0001746e93d.mockapi.io/api/v1/auditlog').content
return render_template('display_info.html',data = json.loads(data))
#route to import json data to csv
@app.route('/convertCSV')
def response_to_csv():
data = requests.get('https://606f76d385c3f0001746e93d.mockapi.io/api/v1/auditlog').content
data = json.loads(data)
f = csv.writer(open("output.csv", "w+"))
f.writerow(["id", "description", "date", "action", "user id", "criticality"])
for x in data:
f.writerow([x["id"],
x["description"],
x["date"],
x["action"],
x["user_id"],
x["criticality"]])
return "File created as output.csv File name"
if __name__ == '__main__':
app.run(debug=True)
| true |
a07f69d9c8f2603bf46fe1604ec9e23952a11da7 | Python | IvanHornung/IoT-RaspberryPi | /Course 2 - Interfacing with the Raspberry Pi/Module 3 - Protocol Libraries & APIs/TwittersAPI.py | UTF-8 | 688 | 2.625 | 3 | [] | no_license | #Twitter's API
"Twitter's API"
#Raspberry Pi can run several SDKs for Twitter's API
#RPi can make tweets
#RPi can respond to tweets
# -May look for a tag
#Twython is the package we will use
#Installing Twython
'''
sudo apt-get update
sudo apt-get install python-pip
sudo pip install twython
'''
#Pip is an installer for Python packages
'Register Your Twitter App'
#Your app needs to be registered to access Twitter's API
# -Need a Twitter account first
#You will receive several keys needed for authentication
#Twython funcitons use keys to transmit messages
#Twitter servers authenticate if keys are correct.
#Go to http://apps.twitter.com
'apps.twitter.com'
#Click Create New App | true |
3f64ae51e2ec7a387f20d2a2664b8c73f8cc0c5f | Python | davidma/gizmo | /client.py | UTF-8 | 2,493 | 2.6875 | 3 | [] | no_license | import os
import sys
import apiai
import uuid
import json
import pyowm
APIAI_CLIENT_TOKEN = 'bdbdfb49786b44b7bc4e0eddd4dba9ae'
PYOWM_CLIENT_TOKEN = 'ba58abbdec163813716f64cf4138d36b'
DEBUG = False
def main():
ai = apiai.ApiAI(APIAI_CLIENT_TOKEN)
print ("======================================================")
print ("========= G I Z M O T E S T C L I E N T ==========")
print ("======================================================")
print ()
print ("Type questions for Gizmo, type \'exit\' when done")
while True:
print(u"> ", end=u"")
user_message = input()
if user_message == '':
print ("> I didn't catch that? Can you try again?")
continue
if user_message == u"exit":
break
request = ai.text_request()
request.session_id = str(uuid.uuid4)
request.query = user_message
response = json.loads(request.getresponse().read().decode())
if DEBUG:
print ("DEBUG " + json.dumps(response))
reply = response['result']['fulfillment']['speech']
if reply.startswith("WEATHER"):
data = reply.split(",")
if data[1] == "NULL":
location = "Kilcullen, Kildare, Ireland"
else:
location = data[1]
owm = pyowm.OWM(PYOWM_CLIENT_TOKEN)
obs = owm.weather_at_place(location)
wet = obs.get_weather()
reply = "Open Weather Map says "
reply += wet.get_detailed_status() + ", "
reply += "high of " + str(wet.get_temperature()['temp_max'] - 273.15) + "C, "
reply += "low of " + str(wet.get_temperature()['temp_min'] - 273.15) + "C, "
reply += "wind " + str(wet.get_wind()['speed']) + "km/h, "
reply += "heading " + str(wet.get_wind()['deg']) + "deg."
if DEBUG:
loc = obs.get_location()
reply += "(" + loc.get_name() + " - " + str(loc.get_lat()) + "," + str(loc.get_lon()) +")"
if reply.startswith("HEATING"):
data = reply.split(",")
if data[1] == "ON":
if data[2] == "NULL":
reply = "OK, Turning heating on..."
else:
reply = "OK, Boosting heating for " + data[2]+"..."
else:
reply = "OK, turning heating off..."
### Print the final reply
print ("< " + reply)
if __name__ == '__main__':
main()
| true |
4044066b62b5814adc21737b77594e9e0ef0b43d | Python | safiyesarac/python_projects | /two_body_simulation/two_body_animation.py | UTF-8 | 3,106 | 2.765625 | 3 | [] | no_license |
import math;
import tkinter;
import sys
import matplotlib.animation as ani
from matplotlib.patches import Circle
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
states=[]
steps=0
class View :
def animation():
root = tkinter.Tk()
label = tkinter.Label(root,text="Two Body Animation").grid(column=0, row=0)
r = 2
def circle(phi, phi_off,offset_x, offset_y):
return np.array([r*np.cos(phi+phi_off), r*np.sin(phi+phi_off)]) + np.array([offset_x, offset_y])
plt.rcParams["figure.figsize"] = 8,6
fig, ax = plt.subplots()
canvas = FigureCanvasTkAgg(fig, master=root)
canvas.get_tk_widget().grid(column=0,row=1)
def Pause():
return ani.event_source.stop()
#unpause
def Play():
return ani.event_source.start()
a=tkinter.Button( root,text="Play", command=Play).grid(column=0,row=2)
b=tkinter.Button( root,text="Pause", command=Pause).grid(column=0,row=3)
def endProgam():
raise SystemExit
sys.exit()
B = tkinter.Button(root, text = "Exit", command = endProgam).grid(column=1,row=3)
ax.axis([-30,30,-30,30])
ax.set_aspect("equal")
# create initial conditions
phi_offs = [ np.pi/2, np.pi]
offset_xs = [states[0][0],states[0][1]]
offset_ys = [states[0][2],states[0][3]]
# amount of points
N = len(phi_offs)
# create a point in the axes
points = []
for i in range(N):
x,y = circle(0, phi_offs[i], offset_xs[i], offset_ys[i])
points.append(ax.plot(x, y, marker="o")[0])
def update(phi, phi_off, offset_x,offset_y):
# set point coordinates
for i in range(N):
x, y = circle(phi,phi_off[i], offset_x[i], offset_y[i])
points[i].set_data([states[0][i]],[states[0][i+2]])
del states[0]
#points[i].set_data(states1[0][0],states1[0][1],[state["positions"][i]["y"] ])
return points
ani = animation.FuncAnimation(fig,update,
fargs=(phi_offs, offset_xs, offset_ys),
interval = 2,
frames=np.linspace(0,2*np.pi,360, endpoint=False),
blit=True)
tkinter.mainloop()
class App:
def ReadValues():
f = open("test.txt", "r")
lines = f.readlines()
for line in lines:
coords= line.split(",")
coords[3]=coords[3].rstrip("\n")
states.append([float(coords[0]),float(coords[2]),float(coords[1]),float(coords[3])])
f.close()
open('test.txt', 'w').close()
App.ReadValues()
View.animation() | true |
c501ca8f0ece9587841ef244f7fad0dacc710915 | Python | yovanycunha/P1 | /opbancarias/opbancarias.py | UTF-8 | 500 | 3.0625 | 3 | [] | no_license | #coding: utf-8
# Operações Bancárias
# (C) 2016, Yovany Cunha/UFCG, Programaçao I
cliente,saldo = raw_input('').split()
saldo = float(saldo)
while True:
entrada = raw_input('')
if entrada == '3': break
operacao,quantia = entrada.split()
operacao = int(operacao)
quantia = float(quantia)
if operacao == 1:
#quantia = float(raw_input(''))
saldo -= quantia
elif operacao == 2:
#quantia = float(raw_input(''))
saldo += quantia
print 'Saldo de R$ %.2f na conta de %s' % (saldo, cliente)
| true |
508687d46649043a7284031c2baad9e9d6fd508c | Python | daniel-reich/ubiquitous-fiesta | /FwCZpyTZDH3QExXE2_14.py | UTF-8 | 97 | 2.828125 | 3 | [] | no_license |
def amount_fib(n):
x,y, cnt = 0,1,0
while x<n: x,y,cnt = y, x+y, cnt+1
return cnt
| true |
5dcace6aa3a20e1242fc7466550fe2b1d335dce5 | Python | yangjian615/My_python_pro | /python_workspace/CSA_code_demo/cylon_funcs.py | UTF-8 | 10,791 | 3.21875 | 3 | [] | no_license | #!/usr/bin/env python
import datetime as dt
import glob
import numpy as np
from collections import Counter # for size_mode()
def mon_no(month):
'''To return the Mon as a string number''' #This could return a string, e.g. '01', '02', etc?
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
return str(months.index(month) + 1)
def flt2date(decday):
'''From decimal day to datetime.datetime in UTC'''
# Convert back to seconds
seconds = decday * (24*60*60)
return dt.datetime.utcfromtimestamp(seconds)
def days_in_month(any_day):
'''Given a decimal day, return how many days in that month'''
# Convert to date object
any_day = flt2date(any_day)
next_month = any_day.replace(day=28) + dt.timedelta(days=4) # this will never fail
ldom = next_month - dt.timedelta(days=next_month.day) #Last Day Of Month
return ldom.day
def str2flt(yyyymmddhhmmss):
'''From yyyymmddhhmmss to decimal day'''
notz = 'UTC' #%Z Time zone name UTC, EST, CST; Should be naive but no...
dttime = dt.datetime.strptime(yyyymmddhhmmss+notz, '%Y%m%d%H%M%S%Z')
#Convert it to an integer for the recarray
#dt.datetime.timestamp(dttime) is unreliable as I can't make it UTC
#timestamp = dttime.replace(tzinfo=timezone.utc).timestamp() timezone??
#or by calculating the timestamp directly:
timestamp = (dttime - dt.datetime(1970, 1, 1)) / dt.timedelta(seconds=1)
dayfloat = timestamp / (24*60*60)
return dayfloat
def date2flt(dttime):
'''From datetime.datetime to decimal day''' # subsection of str2flt()
timestamp = (dttime - dt.datetime(1970, 1, 1)) / dt.timedelta(seconds=1)
dayfloat = timestamp / (24*60*60)
return dayfloat
def days_in_year(year):
'''Given a year, return how many days in that year'''
# Convert to date object
nyd = str2flt(year+'0101000000')
next_nyd = str2flt(str(int(year) + 1) +'0101000000')
ndays = next_nyd - nyd
return ndays
def lineofinventory(lineoffile):
'''
Takes a line (string) from the inventory csv as input:
'"C1_CP_FGM_SPIN","2001-02-28 10:36:32.348","2001-03-01 00:00:00.0","12016","10"'
Output is a tuple of creation day as float, filesize in bytes and plot time as float
'''
# Split by comma
lineoffile = lineoffile.split(',')
# For first date&time, take off leading and trailing double quote and convert to date using given format
stad = dt.datetime.strptime(lineoffile[1][1:-1], '%Y-%m-%d %H:%M:%S.%f')
# Convert date to flt (date2flt gives decimal day)
stad = date2flt(stad)
# Repeat for end of period
endd = dt.datetime.strptime(lineoffile[2][1:-1], '%Y-%m-%d %H:%M:%S.%f')
endd = date2flt(endd)
# Number of data points in that line, without double quotes
npts = int(lineoffile[3][1:-1])
# Return tuple of results
return (stad, endd, npts)
def read_inventory(sc, path, y, m):
'''
Given a spacecraft ID (e.g. 'C1'), a path to the inventory lists, a year and a month,
return an ndarray containing the times () and points in each line.
'''
# inventory filename example: C1_CG_FGM_SPIN_200101Inventory.csv
# C1_CP_EFW_L3_E3D_INERT_200101Inventory.csv
inv_req = sc+'*'+y+m+'Inventory.csv'
# Find all matching files
inventory = glob.glob(path+inv_req)
if len(inventory) == 0:
print("Are all the directories correct? read_inventory can't find any inventory files.")
if len(inventory) > 1:
print('More than one inventory file?!')
# Open file and count lines
with open(inventory[0]) as f:
size=len([0 for _ in f])
if size > 0:
# Set up an np.ndarray
inv_file_data = np.zeros(size-1, dtype=[('time1',float),
('time2', float),
('npts', int)])
# Open the file again and read it with lineofinventory
with open(inventory[0]) as f:
for c, line in enumerate(f):
if line[1] == 'C': #ignore the header
dataline = (lineofinventory(line))
inv_file_data[c-1] = dataline #ignoring the header
else:
inv_file_data = np.zeros(1, dtype=[('time1',float),
('time2', float),
('npts', int)])
return inv_file_data
def query_inventory(inv_data, plot_st):
'''
Check inventory file for lines enclosing the plot time and
return number of seconds in that hour with data.
20160609 Revised to not care about the order of the lines,
after advice from Stratos.
'''
# Plots cover 1 hour
plot_end = plot_st + (1/24)
# All lines with start time before plot end
inv_indices = [x for x in range(0,len(inv_data)) if inv_data['time1'][x] <= plot_end]
# Now narrow that down to lines with end time after plot start
inv_indices = [x for x in inv_indices if inv_data['time2'][x] >= plot_st]
# Relevant Inventory Lines:
ril = inv_data[inv_indices]
#print(ril)
# Calculate time in inventory lines if they contain points
total_overlap = 0.0
for p in ril:
# From http://stackoverflow.com/questions/325933/determine-whether-two-date-ranges-overlap
# by Vitalii Fedorenko
overlap = max(0, min(p['time2'], plot_end) - max(p['time1'], plot_st))
if overlap > 0 and p['npts'] > 0:
total_overlap += overlap
return total_overlap
def lineofplotlist(lineoffile):
'''
Takes a line of the plot list; modification date (system time, so LT), size of file, and filename
containing start and end of plot (1 hour):
'Sun Nov 1 09:25:57 2015 7366 C1_CG_FGM_BMAG_CAA__20010301_000000_20010301_010000_V01.png'
Returns modification date in seconds, size of file and decimal day of start of plot.
'''
# Split the whole line by spaces
lparts = lineoffile.split()
# Creation date and time
# Convert and replace the 'Mmm' to 'n' month number (string)
lparts[1] = mon_no(lparts[1])
# Join them back together, convert to datetime object (which won't work in a list)
# using the given format
creation_time = dt.datetime.strptime(' '.join(lparts[1:5]), '%m %d %H:%M:%S %Y')
# Then convert the datetime object to decimal day since 1/1/1970 LT (not UTC since this is system time)
#creation = dt.datetime.timestamp(creation_time)
creation = date2flt(creation_time)
# Filesize in bytes
filesize = lparts[5]
# Split up the plot filename by underscores
fnparts = lparts[-1].split('_')
# Start time, taken from end of filename since parameter may contain '_', converted to decimal day
start = str2flt(fnparts[-5]+fnparts[-4])
# Return a tuple of the results
return (creation, filesize, start)
def read_png_lists(sc, path, y, m):
'''
Given a spacecraft ID (e.g. 'C1'), a path to the png lists, a year and a month,
return a dictionary where each key is SCParameter_yyyymm
'''
# Filename example: C1FGM_BGSE_PHI200106, C1EFW_L3_E3D_INERT_X200101
pnglist_req = sc+'*'+y+m+'.txt'
# Find all matching files; these will include the full path
plotlistlist = glob.glob(path+pnglist_req)
# Extract all the parameter names into a list, this should retain its order.
files = [x[x.find(sc):] for x in plotlistlist]
parameters = [x[x.find('_')+1:x.find(y+m)-1] for x in files]
# Set up an empty dictionary
scym_pngs = {}
# This assumes that the parameters are singular
for i in range(0,len(parameters)):
# Set up the key for the dictionary
scymkey = sc + parameters[i] + '_' + y + m
# Open file and check size
with open(plotlistlist[i]) as f:
size=len([0 for _ in f])
# Set up receiving ndarray
pngs = np.zeros(size, dtype=[('creation',float),
('filesize', int),
('plot_time', float)])
# Open and read file again, send each line to Pregen1hPlot
# and save the returned tuple in the array
with open(plotlistlist[i]) as f:
for c, line in enumerate(f):
dataline = (lineofplotlist(line))
pngs[c] = dataline
# Once one file (parameter) has been read in, store
# one month together, 1 s/c, 1 month, all parameters
scym_pngs[scymkey] = pngs
# Return whole dictionary
return scym_pngs
def size_mode(d_sizes):
'''
Given a list of integers, find the mode. If more than one mode, return the integers
with the same count as the most common. If no mode, return all integers.
'''
# Count up the different sizes
d_size_counts = Counter(d_sizes) #<class 'collections.Counter'>
#e.g. Counter({35137: 14, 96384: 1, 43906: 1, 106550: 1, 105157: 1, 132854: 1, 132839: 1, 68484: 1, 102534: 1, 44717: 1, 65438: 1})#print(type(d_size_counts))
# How many has the most common got?
modeinfo = d_size_counts.most_common(1) #<class 'list'>
# e.g., modeinfo = [(7046, 21)]
modecount = modeinfo[0][1]
# Count the counts of the different sizes
mode_check = Counter(d_size_counts.values()) #<class 'collections.Counter'>
#e.g. Counter({1: 10, 14: 1})
if mode_check[modecount] > 1:
#if mode_check[modecount] != 24: # This doesn't appear to be needed...
#print('Non-singular mode!', mode_check[modecount])
# This will return only the modes that have a count equal to the most common
# So if there are 'twins', all twins will be returned
# If all the values are different, count=1 and it will return all
return [key for key in d_size_counts if d_size_counts[key] == modecount]
else:
return [modeinfo[0][0]]
def Tally(all_citizens, key):
# these list comprehensions should return lists of indices where text matches
humans = [k for k,x in enumerate(all_citizens[key]['status']) if x == b'Human']
empties = [k for k,x in enumerate(all_citizens[key]['status']) if x == b'Empty']
cylons = [k for k,x in enumerate(all_citizens[key]['status']) if x == b'Cylon']
suspects = [k for k,x in enumerate(all_citizens[key]['status']) if x == b'Suspect']
twins = [k for k,x in enumerate(all_citizens[key]['status']) if x == b'Twin']
others = [k for k,x in enumerate(all_citizens[key]['status']) if x != b'Suspect' \
and x != b'Empty' and x != b'Cylon' and x != b'Human' and x != b'Twin']
return [len(humans), len(empties), len(cylons), len(twins), len(suspects), len(others)]
| true |
6c9d391e913387c6337374ccae3c6afb53247b9d | Python | frank6866/python-scripts | /testing/consumeMemory.py | UTF-8 | 410 | 3.03125 | 3 | [] | no_license | import sys
import time
def consume_memory(v_increase_by_mb_per_second):
strs = []
while True:
print len(strs)
strs.append(' ' * int(v_increase_by_mb_per_second) * (2 ** 20))
time.sleep(1)
if __name__ == "__main__":
increase_by_mb_per_second = 10
if len(sys.argv) == 2:
increase_by_mb_per_second = sys.argv[1]
consume_memory(increase_by_mb_per_second)
| true |
95d3960fe267ed450064cdd2f3273407061d4628 | Python | cs-richardson/fahrenheit-albs1010 | /Convert C to F.py | UTF-8 | 640 | 4.5 | 4 | [] | no_license |
# Albert
# It asks for a number input from the user in numeric form
your_number = input("Enter a whole number in Celsius ")
def convert(number):
# Function that tries the number and converts to F, if not a valid number, error.
try:
value = int(number)
if value >= -273:
new_number = value * 9/5 + 32
return "C:" + str(value) + "˚" + "\nF:" + str(new_number) + "˚"
else:
return "Your number " + str(value) + " is under absolute zero!"
except ValueError:
return "You have to enter the number in numeric form!"
print(convert(your_number))
| true |
4abc3f9514815675d0106c0ea96b8a223e6dd1be | Python | montenegrodr/daftlistings | /examples/sale_agreed_properties.py | UTF-8 | 819 | 3.375 | 3 | [] | no_license | # Get the current sale agreed prices for properties in Dublin 15 that are between 200,000 and 250,000.
from daftlistings import Daft, SaleType
daft = Daft()
daft.set_county("Dublin City")
daft.set_area("Dublin 15")
daft.set_listing_type(SaleType.PROPERTIES)
daft.set_sale_agreed(True)
daft.set_min_price(200000)
daft.set_max_price(250000)
listings = daft.get_listings()
for listing in listings:
print(listing.get_formalised_address())
print(listing.get_daft_link())
facilities = listing.get_facilities()
if facilities is not None:
print('Facilities: ')
for facility in facilities:
print(facility)
features = listing.get_features()
if features is not None:
print('Features: ')
for feature in features:
print(feature)
print("")
| true |
43c01a8d25b66afa1f1ab5f96f7f15b22d67733d | Python | NeoMindStd/CodingLife | /baekjoon/11772py/a.py | UTF-8 | 88 | 2.984375 | 3 | [] | no_license | s=0
for i in range(int(input())):
p=input()
s+=int(p[:-1])**int(p[-1])
print(s)
| true |
127f436a22d863413f8b92cbbc33a35177d34ee1 | Python | JoukoRintamaki/mooc-ohjelmointi-21 | /osa05-11_kertomat/src/kertomat.py | UTF-8 | 273 | 3.5 | 4 | [] | no_license | def kertomat(n: int):
kertomatDictionary = {}
i = 1
while i <= n:
j = i
lukuKertoma = 1
while j > 0:
lukuKertoma *= j
j -= 1
kertomatDictionary[i] = lukuKertoma
i += 1
return kertomatDictionary | true |
2494d0f7939e7d40e84e472784de5c1725685455 | Python | andreikee/pyTextGames | /tic_tac_toe/tic_tac_toe.py | UTF-8 | 3,326 | 4 | 4 | [
"MIT"
] | permissive | """
Tic-tac-toe (American English), noughts and crosses (Commonwealth English), or Xs and Os/“X’y O’sies” (Ireland), is a paper-and-pencil game for two players, X and O, who take turns marking the spaces in a 3×3 grid. The player who succeeds in placing three of their marks in a diagonal, horizontal, or vertical row is the winner. It is a solved game with a forced draw assuming best play from both players. (https://en.wikipedia.org/wiki/Tic-tac-toe)
"""
import os
from time import sleep
def cls():
os.system('cls' if os.name=='nt' else 'clear')
# now, to clear the screen
cls()
def get_new_board():
return [str(x) for x in range(1, 10)]
def board_rendered(board: list) -> str:
res = ''
rows = []
for r in range(3):
start = r * 3
end = start + 3
row = board[start:end]
rows.append(' | '.join(row))
return '\n---------\n'.join(rows)
def get_winning_slices():
slices = []
for r in range(3):
start = r * 3
end = start + 3
row = slice(start, end, None)
slices.append(row)
for c in range(3):
start = c
end = 6 + c + 1
col = slice(start, end, 3)
slices.append(col)
diag_1 = slice(0, 9, 4)
diag_2 = slice(2, 7, 2)
slices.append(diag_1)
slices.append(diag_2)
return slices
def check_win_slice(sl, board):
return all(board[sl][0] == x for x in board[sl])
def is_winner(board):
return any(check_win_slice(sl, board) for sl in get_winning_slices())
def player_gen(name_x='Player 1', name_o='Player 2'):
players = [{'name': name_x, 'symb': 'X'},
{'name': name_o, 'symb': 'O'}]
i = 0
while True:
ind = i % 2
yield players[ind]
i += 1
def get_input(player, board):
while True:
inp = input(f"{pl['name']} type the position for {pl['symb']} : ")
pos = None
if inp in ['q', 'n']:
return inp
try:
pos = int(inp)
pos -= 1
if board[pos] in 'XO':
print('This cell is busy.')
print('Try one more time.')
print()
continue
return pos
except (ValueError, IndexError) as e:
print('Wrong input. You should put the number 1-9.')
print('Try one more time.')
print()
def init_game(name_x='Player 1', name_o='Player 2'):
board = get_new_board()
player = player_gen(name_x, name_o)
return board, player
menu_text = """
1 - 9 the number of a cell to put X or O
q - Terminate the current game & Exit
n - Terminate the current game & Start new game
"""
board, player = init_game()
while True:
cls()
print(board_rendered(board))
print(menu_text)
pl = next(player)
inp = get_input(pl, board)
if inp == 'q':
print('='*30)
print('Exit')
print()
break
elif inp == 'n':
print('='*30)
print('Terminating the current game & starting new one')
print()
sleep(4)
board, player = init_game()
continue
else:
board[inp] = pl['symb']
if is_winner(board):
print(f"{pl['name']} is 'WINNER!!!!'")
sleep(4)
board, player = init_game()
| true |
4fde598ca6b03824625177f00d528f1ed79501a7 | Python | holim0/Algo_Study | /python/boj9935.py | UTF-8 | 824 | 3.125 | 3 | [] | no_license | from collections import deque
string = input()
explosion = input()
exlen = len(explosion)
string.rstrip("\n")
explosion.rstrip("\n")
stack= []
cur = deque([])
for s in string:
stack.append(s)
if len(cur)<exlen:
cur.append(s)
else:
cur.popleft()
cur.append(s)
if len(cur)==exlen:
if "".join(cur)==explosion:
for i in range(exlen):
stack.pop()
cur = deque([])
if len(stack)==0: continue
elif len(stack)>=exlen:
for i in range(exlen):
cur.append(stack[len(stack)-exlen+i])
else:
for i in range(len(stack)):
cur.append(stack[i])
if len(stack)==0:
print("FRULA")
else:
print("".join(stack)) | true |
f3cb6379f7e624f042c0e5e6b4259111ceaf8005 | Python | wilcockj/AdventOfCode2020 | /day7/main.py | UTF-8 | 3,375 | 3.0625 | 3 | [] | no_license | import fileinput
from collections import Counter
import re
inputlist = []
for line in fileinput.input():
inputlist.append(line.strip())
baglist = []
canholdgold = {}
bagdict = {}
testbagdict = {}
parents = []
bagnumberdict = {}
# make dicitionary write if a bag can contain shiny gold
# then look through and see if a bag contains any of these bags
def recursivebagcheck(bags, target, parents):
for outer, inner in bags.items():
if target in inner and outer not in parents:
parents.append(outer)
recursivebagcheck(bags, outer, parents)
return len(parents)
def recursivebagcount(bags, parents):
total = 1
for child, number in bags[parents].items():
total += int(number) * recursivebagcount(bags, child)
return total
def p1(inputlist):
# make dictionary
for x in range(10):
for rule in inputlist:
innerbags = []
parent, children = re.match(r'(.+?)s? contain (.+)', rule).groups()
children = re.findall(r'(\d) ([ a-z]+bag)?', children)
'''
outerbag = rule.split("bags")[0].strip()
baglist.append(outerbag)
innerbags = []
if rule.split("contain")[1].strip() == "no other bags.":
# print(rule)
canholdgold[outerbag] = 0
elif Counter(rule)[","] > 0:
innerbags = rule.split("contain")[1].split(",")
innerbags = [re.sub('\d', '', bag) for bag in innerbags]
innerbags = [re.sub('bag.|bags.|contain', '', bag)
for bag in innerbags]
innerbags = [x.strip() for x in innerbags]
innerbags = [x.rstrip(" .") for x in innerbags]
else:
innerbags = rule.split("contain")[1]
innerbags = re.sub('\d|bags.|bag.', '', innerbags)
innerbags = innerbags.strip()
innerbags = [innerbags]
# print(outerbag,innerbags)
bagdict[outerbag] = innerbags
'''
# print(parent)
# print(children)
for x in children:
innerbags.append(x[1])
if "shiny gold bag" in innerbags:
canholdgold[parent] = 1
for x in innerbags:
if x in canholdgold:
if canholdgold[x] == 1:
canholdgold[parent] = 1
if parent not in bagdict:
bagdict[parent] = []
for number, bag in children:
bagdict[parent].append(bag)
if parent not in bagnumberdict:
bagnumberdict[parent] = {}
for number, bag in children:
bagnumberdict[parent][bag] = number
# print(innerbags)
# print(len(canholdgold))
# print(len(canholdgold))
goldholdcount = 0
for x in canholdgold.values():
if x == 1:
goldholdcount += 1
goldholdcount = 0
for x in canholdgold.values():
if x == 1:
goldholdcount += 1
print(goldholdcount)
parents = []
print(recursivebagcheck(bagdict, "shiny gold bag", parents))
parents = {}
print(recursivebagcount(bagnumberdict, "shiny gold bag") - 1)
p1(inputlist)
# print(set(baglist))
| true |
f8b35f8619628d26c532c6885750ecb3da2d9d93 | Python | ryujaehun/baseline | /code/model/srcnn.py | UTF-8 | 1,832 | 2.75 | 3 | [] | no_license | import torch
import torch.nn as nn
from math import sqrt
class ConvBlock(torch.nn.Module):
def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True, activation='relu', norm='batch'):
super(ConvBlock, self).__init__()
self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias)
self.norm = norm
if self.norm =='batch':
self.bn = torch.nn.BatchNorm2d(output_size)
elif self.norm == 'instance':
self.bn = torch.nn.InstanceNorm2d(output_size)
self.activation = activation
if self.activation == 'relu':
self.act = torch.nn.ReLU(True)
elif self.activation == 'prelu':
self.act = torch.nn.PReLU()
elif self.activation == 'lrelu':
self.act = torch.nn.LeakyReLU(0.2, True)
elif self.activation == 'tanh':
self.act = torch.nn.Tanh()
elif self.activation == 'sigmoid':
self.act = torch.nn.Sigmoid()
def forward(self, x):
if self.norm is not None:
out = self.bn(self.conv(x))
else:
out = self.conv(x)
if self.activation is not None:
return self.act(out)
else:
return out
class SRCNN(torch.nn.Module):
def __init__(self):
super(SRCNN, self).__init__()
self.layers = torch.nn.Sequential(
ConvBlock(1, 64, 9, 1, 0, norm=None),
ConvBlock(64, 64 // 2, 5, 1, 0, norm=None),
ConvBlock(64 // 2, 1, 5, 1, 0, activation=None, norm=None)
)
def forward(self, x):
out = self.layers(x)
return out
def weight_init(self, mean=0.0, std=0.001):
for m in self.modules():
utils.weights_init_normal(m, mean=mean, std=std)
| true |
374c1f123d22b6b46195e0c05f8361b5ea0830c2 | Python | ryanoh98/pepper_study | /posture2.py | UTF-8 | 1,386 | 2.625 | 3 | [] | no_license | import qi
import argparse
import sys
import time
import almath
def posture2(session):
motion_service = session.service("ALMotion")
motion_service.setStiffnesses(["Head", "Shoulder", "Elbow"], [1.0, 1.0, 1.0])
names = ["HeadYaw", "HeadPitch", "LShoulderRoll", "RShoulderRoll", "LElbowRoll", "RElbowRoll", "LElbowYaw", "RElbowYaw"]
angles = [0.0*almath.TO_RAD, -35.0*almath.TO_RAD, 45.0*almath.TO_RAD, -45.0*almath.TO_RAD,
-89.0*almath.TO_RAD, 89.0*almath.TO_RAD, 5.0*almath.TO_RAD, -5.0*almath.TO_RAD]
fractionMaxSpeed = 0.4
motion_service.setAngles(names,angles,fractionMaxSpeed)
time.sleep(5)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", type=str, default="192.168.1.45",
help="Robot IP address. On robot or Local Naoqi: use '192.168.1.45'.")
parser.add_argument("--port", type=int, default=9559,
help="Naoqi port number")
args = parser.parse_args()
session = qi.Session()
try:
session.connect("tcp://" + args.ip + ":" + str(args.port))
except RuntimeError:
print("Can't connect to Naoqi at ip \"" + args.ip + "\" on port " + str(
args.port) + ".\n""Please check your script arguments. Run with -h option for help.")
sys.exit(1)
posture2(session) | true |
a2d75cbea23dd05d525ba7364ef2d5298bec8d2e | Python | zhaotong139/4043 | /main_car.py | UTF-8 | 2,839 | 3.78125 | 4 | [] | no_license | import time
#导入time,用到系统当前时间
'''使用面向对象的思路实现『停车收费』场景:
1. 车主开车进入停车场,产生停车记录,
2. 车主开车继续向前,将车停到车位上,修改前面的停车记录,
3. 车主停车完成,
一段时间(购物、吃饭...)之后,车主驾车准备离开停车场,
4. 车主开车离开车位,修改停车记录,
5. 车主开车到达出口,系统根据停车的时间生成订单,
6. 车主缴纳停车费,
7. 车主离开停车场。
至此,整个停车收费的场景完成。
'''
#定义car类,属性包含车牌号,车主,联系方式,入库时间,出库时间
class Car():
#模拟,定义车位数,存出车列表
max_car = 8
car_lst = []
def __init__(self, platenumber, owner, contantway, time_start=0, time_end=0):
self.platenumber = platenumber
self.owner = owner
self.contantway = contantway
self.time_start = time_start
self.time_end = time_end
#定义进车方法
def in_car(self):
if len(self.car_lst) < self.max_car:
self.time_start = time.time()
self.car_lst.append(self)
print('停车成功.')
else:
print('车库已满.')
#定义出库方法
def exit(self):
a = len(self.car_lst) - 1 if len(self.car_lst) == self.max_car else len(self.car_lst)
for i in range(0, a):
if self.car_lst[i].platenumber == self.platenumber:
carex = self.car_lst[i]
self.car_lst.remove(self.car_lst[i])
carex.time_end = time.time()
tt = float((carex.time_end - carex.time_start) / 3600)
print('停车时间%f小时,停车费用%f元.' % (tt, float(tt / 5)))
else:
if i == len(self.car_lst) - 1:
print('该汽车从未进入, 请联系管理员.')
#主方法
while True:
chose = input(
'''
请选择功能:
1.停车
2.出车
3.退出系统
'''
)
if chose == '3':
break
elif chose == '1':
if len(Car.car_lst) >= Car.max_car:
print('车库已满.')
else:
pla = input('输入车牌号:')
own = input('输入车主名:')
cw = input('输入联系方式:')
c = Car(pla, own, cw)
c.in_car()
elif chose == '2':
if len(Car.car_lst) == 0:
print('车库为空, 请联系管理员.')
else:
pl = input('输入车牌号:')
carr = Car(pl, 0, 0)
carr.exit()
else:
print('输入错误,请重新选择.')
time.sleep(2)
continue
| true |
160b4c4f45e118280dd8bf2006f9667912d08cdf | Python | Alucardmini/nlp_base_learning | /vae/vae_gen_poem.py | UTF-8 | 3,637 | 2.546875 | 3 | [] | no_license | #!/usr/bin/python
#coding:utf-8
"""
@author: wuxikun
@software: PyCharm Community Edition
@file: vae_gen_poem.py
@time: 12/19/18 11:34 AM
"""
import keras.backend as K
from keras.layers import Dense, Lambda, Conv1D, Embedding, Input, GlobalAveragePooling1D, Reshape
from keras.losses import mse, categorical_crossentropy
import numpy as np
import re
import codecs
n = 7 # 只抽取五言诗
latent_dim = 64 # 隐变量维度
hidden_dim = 64 # 隐层节点数
s = codecs.open('data/shi.txt', encoding='utf-8').read()
# 通过正则表达式找出所有的五言诗
s = re.findall(u' (.{%s},.{%s}。.*?)\r\n'%(n,n), s)
shi = []
for i in s:
for j in i.split(u'。'): # 按句切分
if j:
shi.append(j)
shi = [i[:n] + i[n+1:] for i in shi if len(i) == 2*n+1]
# 构建字与id的相互映射
id2char = dict(enumerate(set(''.join(shi))))
char2id = {j:i for i,j in id2char.items()}
# 诗歌id化
shi2id = [[char2id[j] for j in i] for i in shi]
shi2id = np.array(shi2id)
from keras.engine.topology import Layer
class GCNN(Layer): # 定义GCNN层,结合残差
def __init__(self, output_dim=None, residual=False, **kwargs):
super(GCNN, self).__init__(**kwargs)
self.output_dim = output_dim
self.residual = residual
def build(self, input_shape):
if self.output_dim == None:
self.output_dim = input_shape[-1]
self.kernel = self.add_weight(name='gcnn_kernel',
shape=(3, input_shape[-1],
self.output_dim * 2),
initializer='glorot_uniform',
trainable=True)
def call(self, x):
_ = K.conv1d(x, self.kernel, padding='same')
_ = _[:,:,:self.output_dim] * K.sigmoid(_[:,:,self.output_dim:])
if self.residual:
return _ + x
else:
return _
inputs = Input(shape=(2*n, ), dtype='int32')
embedding = Embedding(len(char2id), hidden_dim)(inputs)
h = GCNN(residual=True)(embedding)
h = GCNN(residual=True)(h)
h = GlobalAveragePooling1D()(h)
z_mean = Dense(latent_dim)(h)
z_log_var = Dense(latent_dim)(h)
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean=0, stddev=1)
return z_mean + K.exp(z_log_var/2)*epsilon
z = Lambda(sampling)([z_mean, z_log_var])
decoder_hidden = Dense(hidden_dim*(2*n))
decoder_cnn = GCNN(residual=True)
decoder_Dense = Dense(len(char2id), activation='softmax')
h = decoder_hidden(z)
h = Reshape((2*n, hidden_dim))(h)
h = decoder_cnn(h)
output = decoder_Dense(h)
from keras.models import Model
vae = Model(inputs, output)
xent_loss = K.sum(K.sparse_categorical_crossentropy(inputs, output), 1)
kl_loss = -0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
vae_loss = K.mean(xent_loss + kl_loss)
vae.add_loss(vae_loss)
vae.compile(optimizer='adam')
# 重用解码层,构建单独的生成模型
decoder_input = Input(shape=(latent_dim,))
_ = decoder_hidden(decoder_input)
_ = Reshape((2*n, hidden_dim))(_)
_ = decoder_cnn(_)
_output = decoder_Dense(_)
generator = Model(decoder_input, _output)
# 利用生成模型随机生成一首诗
def gen():
r = generator.predict(np.random.randn(1, latent_dim))[0]
r = r.argmax(axis=1)
return ''.join([id2char[i] for i in r[:n]])\
+ u','\
+ ''.join([id2char[i] for i in r[n:]])
vae.fit(shi2id,
shuffle=True,
epochs=20,
batch_size=64)
vae.save_weights('shi.model')
for i in range(20):
print(gen())
| true |
b086a0fe446bc23ee4c3471705392488db3b3390 | Python | Hallis1221/klassekartlager | /liste.py | UTF-8 | 5,697 | 3.625 | 4 | [
"MIT"
] | permissive | # load a Tkinter listbox with data lines from a file,
# sort data lines, select a data line, display the data line,
# edit the data line, update listbox with the edited data line
# add/delete a data line, save the updated listbox to a data file
# used a more modern import to give Tkinter items a namespace
# tested with Python24 vegaseat 16nov2006
import tkinter as tk # gives tk namespace
def add_item():
"""
add the text in the Entry widget to the end of the listbox
"""
listbox1.insert(tk.END, enter1.get())
def delete_item():
"""
delete a selected line from the listbox
"""
try:
# get selected line index
index = listbox1.curselection()[0]
listbox1.delete(index)
except IndexError:
pass
def get_list(event):
"""
function to read the listbox selection
and put the result in an entry widget
"""
# get selected line index
index = listbox1.curselection()[0]
# get the line's text
seltext = listbox1.get(index)
# delete previous text in enter1
enter1.delete(0, 50)
# now display the selected text
enter1.insert(0, seltext)
def set_list(event):
"""
insert an edited line from the entry widget
back into the listbox
"""
try:
index = listbox1.curselection()[0]
# delete old listbox line
listbox1.delete(index)
except IndexError:
index = tk.END
# insert edited item back into listbox1 at index
listbox1.insert(index, enter1.get())
def sort_list():
"""
function to sort listbox items case insensitive
"""
temp_list = list(listbox1.get(0, tk.END))
temp_list.sort(key=str.lower)
# delete contents of present listbox
listbox1.delete(0, tk.END)
# load listbox with sorted data
for item in temp_list:
listbox1.insert(tk.END, item)
def save_list_jente():
"""
save the current listbox contents to a file
"""
# get a list of listbox lines
temp_list = list(listbox1.get(0, tk.END))
# add a trailing newline char to each line
temp_list = [chem + '\n' for chem in temp_list]
# give the file a different name
print("jente")
fout = open("jenter.txt", "w")
fout.writelines(temp_list)
fout.close()
def save_list_gutter():
"""
save the current listbox contents to a file
"""
# get a list of listbox lines
temp_list = list(listbox1.get(0, tk.END))
# add a trailing newline char to each line
temp_list = [chem + '\n' for chem in temp_list]
# give the file a different name
print("gutt")
fout = open("gutter.txt", "w")
fout.writelines(temp_list)
fout.close()
def main_kjønn(kjønn):
# create the sample data file
if kjønn == "gutter":
str1 = open("gutter.txt", "r").read()
fout = open("gutter.txt", "w")
else:
str1 = open("jenter.txt", "r").read()
fout = open("jenter.txt", "w")
fout.write(str1)
fout.close()
# read the data file into a list
if kjønn == "gutter":
fin = open("gutter.txt", "r")
else:
fin = open("jenter.txt", "r")
chem_list = fin.readlines()
fin.close()
# strip the trailing newline char
chem_list = [chem.rstrip() for chem in chem_list]
root = tk.Tk()
root.title("Listbox Operations")
# create the listbox (note that size is in characters)
globals()["listbox1"] = tk.Listbox(root, width=50, height=6)
listbox1.grid(row=0, column=0)
# create a vertical scrollbar to the right of the listbox
yscroll = tk.Scrollbar(root, command=listbox1.yview, orient=tk.VERTICAL)
yscroll.grid(row=0, column=1, sticky=tk.N + tk.S)
listbox1.configure(yscrollcommand=yscroll.set)
# use entry widget to display/edit selection
globals()["enter1"] = tk.Entry(root, width=50, bg='yellow')
enter1.insert(0, 'Click on an item in the listbox')
enter1.grid(row=1, column=0)
# pressing the return key will update edited line
enter1.bind('<Return>', set_list)
# or double click left mouse button to update line
enter1.bind('<Double-1>', set_list)
# button to sort listbox
button1 = tk.Button(root, text='Sorter', command=sort_list, width=20)
button1.grid(row=2, column=0, sticky=tk.W)
# button to save the listbox's data lines to a file
if kjønn == "gutter":
button2 = tk.Button(root, text = "Lagre", command=save_list_gutter, width=20)
else:
button2 = tk.Button(root, text='Lagre', command=save_list_jente, width=20)
button2.grid(row=3, column=0, sticky=tk.W)
# button to add a line to the listbox
button3 = tk.Button(root, text='Legg til teksten i listen', command=add_item, width=20)
button3.grid(row=2, column=0, sticky=tk.E)
# button to delete a line from listbox
button4 = tk.Button(root, text='Fjern valgte linje ', command=delete_item, width=20)
button4.grid(row=3, column=0, sticky=tk.E)
label1 = tk.Label(root, text="Du må trykke lagre og restarte programmet for å lagre endringer")
label1.grid(row=4, column=0, sticky=tk.E)
# load the listbox with data
for item in chem_list:
listbox1.insert(tk.END, item)
# left mouse click on a list item to display selection
listbox1.bind('<ButtonRelease-1>', get_list)
root.mainloop()
def main_gutter():
main_kjønn("gutter")
def main_jenter():
main_kjønn("jenter")
def close():
#TODO
global listbox1
try:
return
except NameError:
return | true |
95a677bc44e8e5e27216419d54ddee262a5d70f1 | Python | jlindsey15/oneshot | /match_net_generalize.py | UTF-8 | 9,431 | 2.546875 | 3 | [] | no_license | import numpy as np
import time
import sys
cur_time = time.time()
mb_dim = 32 #training examples per minibatch
x_dim = 28 #size of one side of square image
y_dim = int(sys.argv[1]) #possible classes
y_dim_alt = int(sys.argv[2])
n_samples_per_class = 1 #samples of each class
eps = 1e-10 #term added for numerical stability of log computations
tie = False #tie the weights of the query network to the labeled network
x_i_learn = True #toggle learning for the query network
learning_rate = 1e-1
data = np.load('data.npy')
data = np.reshape(data,[-1,20,28,28]) #each of the 1600 classes has 20 examples
'''
Samples a minibatch of size mb_dim. Each training example contains
n_samples labeled samples, such that n_samples_per_class samples
come from each of y_dim randomly chosen classes. An additional example
one one of these classes is then chosen to be the query, and its label
is the target of the network.
'''
def get_minibatch(num_classes):
n_samples = num_classes * n_samples_per_class
mb_x_i = np.zeros((mb_dim,n_samples,x_dim,x_dim,1))
mb_y_i = np.zeros((mb_dim,n_samples))
mb_x_hat = np.zeros((mb_dim,x_dim,x_dim,1),dtype=np.int)
mb_y_hat = np.zeros((mb_dim,),dtype=np.int)
for i in range(mb_dim):
ind = 0
pinds = np.random.permutation(n_samples)
classes = np.random.choice(data.shape[0],num_classes,False)
x_hat_class = np.random.randint(num_classes)
for j,cur_class in enumerate(classes): #each class
example_inds = np.random.choice(data.shape[1],n_samples_per_class,False)
for eind in example_inds:
mb_x_i[i,pinds[ind],:,:,0] = np.rot90(data[cur_class][eind],np.random.randint(4))
mb_y_i[i,pinds[ind]] = j
ind +=1
if j == x_hat_class:
mb_x_hat[i,:,:,0] = np.rot90(data[cur_class][np.random.choice(data.shape[1])],np.random.randint(4))
mb_y_hat[i] = j
return mb_x_i,mb_y_i,mb_x_hat,mb_y_hat
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('summary_dir', '/tmp/oneshot_logs', 'Summaries directory')
if tf.gfile.Exists(FLAGS.summary_dir):
tf.gfile.DeleteRecursively(FLAGS.summary_dir)
tf.gfile.MakeDirs(FLAGS.summary_dir)
x_hat = tf.placeholder(tf.float32,shape=[None,x_dim,x_dim,1])
x_i = tf.placeholder(tf.float32,shape=[None,y_dim * n_samples_per_class ,x_dim,x_dim,1])
y_i_ind = tf.placeholder(tf.int64,shape=[None,y_dim * n_samples_per_class])
y_i = tf.cast(tf.one_hot(y_i_ind,y_dim, 1, 0), tf.float32)
y_hat_ind = tf.placeholder(tf.int64,shape=[None])
y_hat = tf.cast(tf.one_hot(y_hat_ind,y_dim, 1, 0), tf.float32)
x_hat_alt = tf.placeholder(tf.float32,shape=[None,x_dim,x_dim,1])
x_i_alt = tf.placeholder(tf.float32,shape=[None, y_dim_alt * n_samples_per_class,x_dim,x_dim,1])
y_i_ind_alt = tf.placeholder(tf.int64,shape=[None,y_dim_alt * n_samples_per_class])
y_i_alt = tf.cast(tf.one_hot(y_i_ind_alt,y_dim_alt, 1, 0), tf.float32)
y_hat_ind_alt = tf.placeholder(tf.int64,shape=[None])
y_hat_alt = tf.cast(tf.one_hot(y_hat_ind_alt,y_dim_alt, 1, 0), tf.float32)
'''
creates a stack of 4 layers. Each layer contains a
3x3 conv layers, batch normalization, retified activation,
and then 2x2 max pooling. The net effect is to tranform the
mb_dimx28x28x1 images into a mb_dimx1x1x64 embedding, the extra
dims are removed, resulting in mb_dimx64.
'''
def make_conv_net(inp,scope,reuse=None,stop_grad=False, index=0, alt_reuse=None):
with tf.variable_scope(scope) as varscope:
cur_input = inp
cur_filters = 1
for i in range(4):
with tf.variable_scope('conv'+str(i), reuse=reuse):
W = tf.get_variable('W',[3,3,cur_filters,64])
#with tf.variable_scope('conv' + str(i) + '_' + str(index), reuse = alt_reuse):
beta = tf.get_variable('beta', shape=[64], initializer=tf.constant_initializer(0.0))
gamma = tf.get_variable('gamma', shape=[64], initializer=tf.constant_initializer(1.0))
cur_filters = 64
pre_norm = tf.nn.conv2d(cur_input,W,strides=[1,1,1,1],padding='SAME')
mean,variance = tf.nn.moments(pre_norm,[0,1,2])
post_norm = tf.nn.batch_normalization(pre_norm,mean,variance,beta,gamma,eps)
conv = tf.nn.relu(post_norm)
cur_input = tf.nn.max_pool(conv,ksize=[1,2,2,1],strides=[1,2,2,1],padding='VALID')
if stop_grad:
return tf.stop_gradient(tf.squeeze(cur_input,[1,2]))
else:
return tf.squeeze(cur_input,[1,2])
'''
assemble a computational graph for processing minibatches of the n_samples labeled examples and one unlabeled sample.
All labeled examples use the same convolutional network, whereas the unlabeled sample defaults to using different parameters.
After using the convolutional networks to encode the input, the pairwise cos similarity is computed. The normalized version of this
is used to weight each label's contribution to the queried label prediction.
'''
scope = 'encode_x'
x_hat_encode = make_conv_net(x_hat,scope)
#x_hat_inv_mag = tf.rsqrt(tf.clip_by_value(tf.reduce_sum(tf.square(x_hat_encode),1,keep_dims=True),eps,float("inf")))
cos_sim_list = []
if not tie:
scope = 'encode_x_i'
for i in range(y_dim * n_samples_per_class):
x_i_encode = make_conv_net(x_i[:,i,:,:,:],scope,tie or i > 0,not x_i_learn, index=i)
x_i_inv_mag = tf.rsqrt(tf.clip_by_value(tf.reduce_sum(tf.square(x_i_encode),1,keep_dims=True),eps,float("inf")))
dotted = tf.squeeze(
tf.batch_matmul(tf.expand_dims(x_hat_encode,1),tf.expand_dims(x_i_encode,2)),[1,])
cos_sim_list.append(dotted
*x_i_inv_mag)
#*x_hat_inv_mag
cos_sim = tf.concat(1,cos_sim_list)
tf.histogram_summary('cos sim',cos_sim)
weighting = tf.nn.softmax(cos_sim)
label_prob = tf.squeeze(tf.batch_matmul(tf.expand_dims(weighting,1),y_i))
tf.histogram_summary('label prob',label_prob)
top_k = tf.nn.in_top_k(label_prob,y_hat_ind,1)
acc = tf.reduce_mean(tf.to_float(top_k))
tf.scalar_summary('avg accuracy',acc)
correct_prob = tf.reduce_sum(tf.log(tf.clip_by_value(label_prob,eps,1.0))*y_hat,1)
loss = tf.reduce_mean(-correct_prob,0)
tf.scalar_summary('loss',loss)
optim = tf.train.GradientDescentOptimizer(learning_rate)
#optim = tf.train.AdamOptimizer(learning_rate)
grads = optim.compute_gradients(loss)
grad_summaries = [tf.histogram_summary(v.name,g) if g is not None else '' for g,v in grads]
train_step = optim.apply_gradients(grads)
'''
End of the construction of the computational graph.
'''
''' Construct alternative (i.e. different y dimension) computational graph '''
scope = 'encode_x'
x_hat_encode_alt = make_conv_net(x_hat_alt,scope, reuse=True, alt_reuse = True)
#x_hat_inv_mag = tf.rsqrt(tf.clip_by_value(tf.reduce_sum(tf.square(x_hat_encode),1,keep_dims=True),eps,float("inf")))
cos_sim_list_alt = []
if not tie:
scope = 'encode_x_i'
for i in range(y_dim_alt * n_samples_per_class):
x_i_encode_alt = make_conv_net(x_i_alt[:,i,:,:,:],scope,True,not x_i_learn, index=i, alt_reuse = True)
x_i_inv_mag_alt = tf.rsqrt(tf.clip_by_value(tf.reduce_sum(tf.square(x_i_encode_alt),1,keep_dims=True),eps,float("inf")))
dotted_alt = tf.squeeze(tf.batch_matmul(tf.expand_dims(x_hat_encode_alt,1),tf.expand_dims(x_i_encode_alt,2)),[1,])
cos_sim_list_alt.append(dotted_alt*x_i_inv_mag_alt)
cos_sim_alt = tf.concat(1,cos_sim_list_alt)
tf.histogram_summary('cos sim alt',cos_sim_alt)
weighting_alt = tf.nn.softmax(cos_sim_alt)
label_prob_alt = tf.squeeze(tf.batch_matmul(tf.expand_dims(weighting_alt,1),y_i_alt))
tf.histogram_summary('label prob alt',label_prob_alt)
top_k_alt = tf.nn.in_top_k(label_prob_alt,y_hat_ind_alt,1)
acc_alt = tf.reduce_mean(tf.to_float(top_k_alt))
tf.scalar_summary('avg accuracy alt',acc_alt)
correct_prob_alt = tf.reduce_sum(tf.log(tf.clip_by_value(label_prob_alt,eps,1.0))*y_hat_alt,1)
loss_alt = tf.reduce_mean(-correct_prob_alt,0)
tf.scalar_summary('loss alt',loss_alt)
''' End of the construction of the alternative computational graph '''
sess = tf.Session()
merged = tf.merge_all_summaries()
writer = tf.train.SummaryWriter(sys.argv[3], graph=sess.graph)
sess.run(tf.initialize_all_variables())
for v in tf.trainable_variables():
print(v.name)
for i in range(int(1e7)):
mb_x_i,mb_y_i,mb_x_hat,mb_y_hat = get_minibatch(y_dim)
mb_x_i_alt,mb_y_i_alt,mb_x_hat_alt,mb_y_hat_alt = get_minibatch(y_dim_alt)
feed_dict = {x_hat: mb_x_hat,
y_hat_ind: mb_y_hat,
x_i: mb_x_i,
y_i_ind: mb_y_i,
x_hat_alt: mb_x_hat_alt,
y_hat_ind_alt: mb_y_hat_alt,
x_i_alt: mb_x_i_alt,
y_i_ind_alt: mb_y_i_alt}
_,mb_loss, mb_acc, mb_loss_alt, mb_acc_alt, summary,ans = sess.run([train_step,loss, acc, loss_alt, acc_alt, merged,cos_sim],feed_dict=feed_dict)
if i % int(10) == 0:
print(i,'loss: ',mb_loss, 'acc: ', mb_acc, 'loss alt: ', mb_loss_alt, 'acc alt: ', mb_acc_alt, 'time: ',time.time()-cur_time)
cur_time = time.time()
if i % int(100) == 0:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
writer.add_run_metadata(run_metadata, 'step%d' % i)
writer.add_summary(summary,i)
sess.close()
| true |
310c5169df85532a9f2586a1da075233b26443fc | Python | XuHangkun/tianchi_channel_1 | /code/utils/padCollate.py | UTF-8 | 2,355 | 3.328125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
a variant of callate_fn that pads according to the longest sequence in
a batch of sequences
~~~~~~~~~~~~~~~~~~~~~~
:author: Xu Hangkun (许杭锟)
:copyright: © 2020 Xu Hangkun <xuhangkun@ihep.ac.cn>
:license: MIT, see LICENSE for more details.
"""
import torch
def pad_tensor(vec, pad, dim,pad_idx):
"""
args:
vec - tensor to pad
pad - the size to pad to
dim - dimension to pad
return:
a new tensor padded to 'pad' in dimension 'dim'
"""
pad_size = list(vec.shape)
pad_size[dim] = pad - vec.shape[dim]
return torch.cat([vec, pad_idx*torch.ones(*pad_size,dtype=torch.long)], dim=dim)
class PadCollate:
"""
a variant of callate_fn that pads according to the longest sequence in
a batch of sequences
"""
def __init__(self, dim=0,pad_idx = 858):
"""
args:
dim - the dimension to be padded (dimension of time in sequences)
pad_idx - idx of padding
"""
self.dim = dim
self.pad_idx = pad_idx
def pad_collate(self, batch):
"""
args:
batch - list of (tensor, label)
reutrn:
xs - a tensor of all examples in 'batch' after padding
ys - a LongTensor of all labels in batch
"""
# find longest sequence
max_len = max(map(lambda x: x[0].shape[self.dim], batch))
# pad according to max_len
new_batch = []
for x,y in batch:
x = torch.LongTensor(x)
y = torch.LongTensor(y)
new_batch.append((pad_tensor(x, pad=max_len, dim=self.dim,pad_idx=self.pad_idx),y))
batch = new_batch
# stack all
xs = torch.stack([x for (x,y) in batch])
ys = torch.stack([y for (x,y) in batch])
return xs, ys
def __call__(self, batch):
return self.pad_collate(batch)
def test():
import numpy as np
pad = PadCollate()
batch = [(np.array([1.0,2,3,4,5,6,693,11,328,15,380, 53,698, 57, 59, 2, 27, 43, 4, 30, 34, 1, 32, 58, 21, 17, 15, 38, 31, 20, 11, 40, 35, 39, 36, 3, 5, 41, 26, 13, 50, 12, 0, 44, 29, 45, 22, 48, 54, 25, 28, 51, 19, 14, 252, 49, 9, 16, 46, 47, 7,42, 11110]),np.array([1,0,0])),(np.array([1,2,3,4,5,6,7]),np.array([1,1,0]))]
print(pad(batch))
if __name__ == '__main__':
test()
| true |
354f60b6d0a26639af1af51a759be5ec1c38bdce | Python | salamwaddah/nd004-1mac | /triangle.py | UTF-8 | 123 | 3.453125 | 3 | [] | no_license | import turtle
juno = turtle.Turtle()
juno.color("white")
for side in [1, 2, 3]:
juno.forward(100)
juno.left(120)
| true |
46d051c157a5199f99177df706ed014fab650c5a | Python | kiirudavid/Recipe-project | /app/requests.py | UTF-8 | 3,352 | 2.65625 | 3 | [
"MIT"
] | permissive | import urllib.request,json
from .models import Recipe
# Getting api key
# api_key = None
# app_id = None
# Getting the recipe base url
base_url = None
def configure_request(app):
global api_key, app_id, base_url
api_key = app.config['RECIPE_API_KEY']
app_id =app.config['APP_ID']
base_url = app.config['RECIPE_API_BASE_URL']
def get_recipes():
'''
Function that gets the json responce to our url request
'''
get_recipes_url = "https://api.edamam.com/search?q=food&app_id=ee974c44&app_key=417ba80ca6f3032f407ccb1379e65325&from=0"
with urllib.request.urlopen(get_recipes_url) as url:
get_recipes_data = url.read()
get_recipes_response = json.loads(get_recipes_data)
recipe_results = None
if get_recipes_response['hits']:
recipe_results_list = get_recipes_response['hits']
recipe_results = process_results(recipe_results_list)
return recipe_results
def get_recipe(id):
get_recipe_details_url = base_url.format(id,api_key)
with urllib.request.urlopen(get_recipe_details_url) as url:
recipe_details_data = url.read()
recipe_details_response = json.loads(recipe_details_data)
recipe_object = None
if recipe_details_response:
id = recipe_details_response.get('id')
title = recipe_details_response.get('original_title')
overview = recipe_details_response.get('overview')
poster = recipe_details_response.get('poster_path')
vote_average = recipe_details_response.get('vote_average')
vote_count = recipe_details_response.get('vote_count')
recipe_object = Recipe(id,title,overview,poster,vote_average,vote_count)
return recipe_object
def search_recipe(recipe_name):
# search_recipe_url = 'https://api.edamam.com/search?q={}&app_id=${}&app_key=${}'.format(api_key,recipe_name)
search_recipe_url = "https://api.edamam.com/search?q=food&app_id=ee974c44&app_key=417ba80ca6f3032f407ccb1379e65325&from=0"
with urllib.request.urlopen(search_recipe_url) as url:
search_recipe_data = url.read()
search_recipe_response = json.loads(search_recipe_data)
search_recipe_results = None
if search_recipe_response['hits']:
search_recipe_list = search_recipe_response['hits']
search_recipe_results = process_results(search_recipe_list)
return search_recipe_results
def process_results(recipe_list):
'''
Function that processes the recipe result and transform them to a list of Objects
Args:
recipe_list: A list of dictionaries that contain recipe details
Returns :
recipe_results: A list of recipe objects
'''
recipe_results = []
for recipe_item in recipe_list:
url = recipe_item['recipe'].get('url')
label = recipe_item['recipe'].get('label')
image = recipe_item['recipe'].get('image')
source = recipe_item['recipe'].get('source')
healthlabels = recipe_item['recipe'].get('healthlabels')
ingredients = recipe_item['recipe'].get('ingredients')
if image:
recipe_object = Recipe(url,label,image,source,healthlabels,ingredients)
recipe_results.append(recipe_object)
# import pdb;pdb.set_trace()
return recipe_results | true |
b3fdccf6b89290528577d48ba2dbac0c70dd9299 | Python | ceteongvanness/100-Days-of-Code-Python | /Day 4 - Beginner - Randomisation and Python Lists/2. Who's Paying/main.py | UTF-8 | 369 | 3.78125 | 4 | [] | no_license | import random
test_seed = int(input("Create a seed number: "))
random.see(test_seed)
namesAsCSV = input("Give me everybody's names, seperated by a comman. ")
names = namesAsCSV.split(",")
num_items = len(names)
random_choice = random.randint(0, num_items - 1)
person_who_will_pay = names[random_choice]
print(person_who_will_pay + "is going to by the meal today!") | true |
6b4d255384f411353e0fe311172c73c80ec0d8fd | Python | JLUVicent/crawler | /1_urllib的基本使用.py | UTF-8 | 518 | 3.765625 | 4 | [] | no_license | # 使用urllib来获取百度首页源码
# (1)定义一个url,就是要访问的地址
import urllib.request
url = 'http://www.baidu.com'
# (2)模拟浏览器向服务器发送请求 response响应
response = urllib.request.urlopen(url)
# (3)获取响应中的页面源码
# read方法 返回字节形式的二进制数据
# 将二进制数据转换为字符串
# 二进制--》字符串 解码 decode('编码的格式')
content = response.read().decode('utf-8')
# (4)打印数据
print(content)
| true |
467361c41a51f8980208d6c91652ebd601ec95a7 | Python | drewverlee/Course-Projects | /topology/test_tools.py | UTF-8 | 2,424 | 3.0625 | 3 | [] | no_license | """
File : test_tools.py
Author : Drew Verlee
Date : 17.10.13.
Email : drew.verlee@gmail.com
GitHub : https://github.com/Midnightcoffee/
Description : test the various tools used for topological sorting
"""
import unittest
from tools import file_to_graph, inverse_graph, find_sources
class TestFileToGraph(unittest.TestCase):
def test_graph7_graph(self):
a_file = 'graph7.txt'
expected = {
0: {1, 2, 4},
1: {3, 5},
2: {3, 6},
3: {7},
4: {5, 6},
5: {7},
6: {7},
7: set()
}
returned, _ = file_to_graph(a_file)
self.assertEqual(expected, returned)
def test_graph7_edge_count(self):
a_file = 'graph7.txt'
expected = 12
_, returned, _ = file_to_graph(a_file)
self.assertEqual(expected, returned)
class TestFileToGraph(unittest.TestCase):
def test_graph7_graph(self):
a_file = 'graph7.txt'
expected = {
0: {1, 2, 4},
1: {3, 5},
2: {3, 6},
3: {7},
4: {5, 6},
5: {7},
6: {7},
7: set()
}
returned, _, _ = file_to_graph(a_file)
self.assertEqual(expected, returned)
def test_graph7_edge_count(self):
a_file = 'graph7.txt'
expected = 12
_, returned = file_to_graph(a_file)
self.assertEqual(expected, returned)
class TestInvertGraph(unittest.TestCase):
"""Go from {v : {outgoing edges}} to {v : incoming} """
def test_basic(self):
graph = {
1: {2},
2: {3},
3: set()
}
expected = {
1: set(),
2: {1},
3: {2},
}
returned = inverse_graph(graph)
self.assertEqual(expected, returned)
class TestFindSources(unittest.TestCase):
"""make sure we can find sources and degrees of all vertices"""
def test_basic(self):
graph = {
1: set(),
2: {1},
3: {2},
}
sources = {1}
expected = sources
returned = find_sources(graph)
self.assertSetEqual(expected, returned)
if __name__ == '__main__':
unittest.main()
| true |
3686e413f06988fd99a702be595627780f08a2ad | Python | srinivasycplf8/LeetCode | /weight_interval.py | UTF-8 | 3,179 | 2.953125 | 3 | [] | no_license | def merge_sort(given_length,given_array):
n=len(given_array)
if n<2:
return
mid=n//2
left=given_array[0:mid]
right=given_array[mid:n]
merge_sort(given_length,left)
merge_sort(given_length,right)
S=merge_two_sorted_arrays(left,right,given_array)
if len(S)==int(given_length):
return S
return S
def merge_two_sorted_arrays(left_array,right_array,original_array):
pointer_left_array=0
pointer_right_array=0
total=len(left_array)+len(right_array)
for i in range(total):
if pointer_left_array<len(left_array) and pointer_right_array<len(right_array):
if left_array[pointer_left_array][1]<=right_array[pointer_right_array][1]:
original_array[i]=left_array[pointer_left_array]
pointer_left_array+=1
else:
original_array[i]=right_array[pointer_right_array]
pointer_right_array+=1
else:
if pointer_left_array==len(left_array) and pointer_right_array!=len(right_array):
original_array[i]=right_array[pointer_right_array]
pointer_right_array+=1
else:
original_array[i]=left_array[pointer_left_array]
pointer_left_array+=1
return original_array
interval=int(input())
x=[]
for i in range(interval):
interval_input=input()
interval_input=interval_input.split(" ")
intervals=[int(interval_input[0]),int(interval_input[1]),int(interval_input[2])]
x.append(intervals)
'''def binary_search(A,k,a,b):
m = (a+b)//2
if b>=2 :
if A[m][1]<=A[k][0]<=A[m+1][1]:
return m
if A[m][1]>=A[k][0]:
return binary_search(A,k,a,m)
if A[k][0]>A[m+1][1]:
return binary_search(A,k,m+1,b)
if b==1:
if A[m][1]<=A[k][0]:
return m
else:
return 0'''
'''private int latestCompatible(int i){
int low = 0, high = i - 1;
while (low <= high){ //Iterative binary search
int mid = (low + high) / 2; //integer division (floor)
if (jobs[mid][2] <= jobs[i][1]) {
if (jobs[mid + 1][2] <= jobs[i][1])
low = mid + 1;
else
return mid;
}
else
high = mid - 1;
}
return 0; //No compatible job was found. Return 0 so that value of placeholder job in jobs[0] can be used
}'''
def binary_search(A,k):
low=1
high=k-1
if high==1:
mid = (low+high)//2
if A[mid][1]<=A[k][0]:
return mid
else:
return 0
while low<=high:
mid = (low+high)//2
if A[mid][1]<=A[k][0]:
if A[mid+1][1]<=A[k][0]:
low=mid+1
else:
return mid
else:
high=mid-1
return 0
def weight_interval(interval,A):
y = merge_sort(interval,A)
y.insert(0,0)
F = [0]*(interval+1)
F[0] = 0
pre=[0]*(interval+1)
for k in range(1,len(y)):
if k>1:
pre[k]=binary_search(y,k)
F[k]=max((y[k][2]+F[pre[k]]),F[k-1])
print(max(F))
#print(F[-1])
weight_interval(interval,x)
| true |
ddc38f4e35d02b558e0a74984752edee10e4c482 | Python | rsprenkels/python | /aoc/2019/aoc2019_08.py | UTF-8 | 1,458 | 3.390625 | 3 | [] | no_license | from collections import defaultdict
def image_checksum(all_layers, image_size):
histogram = []
for index, layer in enumerate(range(0, len(all_layers), image_size)):
histogram.append(defaultdict(int))
layer_data = all_layers[layer:layer + image_size]
for pixel in layer_data:
histogram[index][pixel] += 1
least = min(histogram, key=lambda x: x['0'])
return least['1'] * least['2']
def image_render(all_layers, im_w=25, im_h=6):
pixels = defaultdict(list)
for index, layer in enumerate(range(0, len(all_layers), im_w * im_h)):
layer_data = all_layers[layer:layer + im_w * im_h]
for y in range(im_h):
for x in range(im_w):
pixels[(x,y)].append(layer_data[y * im_w + x])
output_lines = []
for y in range(im_h):
one_line = []
for x in range(im_w):
stacked_pixel = [p for p in pixels[(x,y)] if p != '2'][0]
one_line.append({'0':' ', '1':'*'}[stacked_pixel])
output_lines.append(''.join(one_line))
return output_lines
def test_1():
assert image_checksum('123456789012', 6) == 1
def test_part1():
lines = open('aoc2019_08_input.txt', 'r').readlines()
assert image_checksum((lines[0]), 150) == 1703
def test_part2():
lines = open('aoc2019_08_input.txt', 'r').readlines()
result = image_render((lines[0]), 25, 6)
print()
for line in result:
print(line)
| true |
e52a6d4cf9c82c459f1abfcf82cf17ae580593fd | Python | pooyanjamshidi/BO4CO | /utils/unit_tests/test_expconf_merge.py | UTF-8 | 3,281 | 2.65625 | 3 | [
"BSD-2-Clause-Views",
"BSD-3-Clause"
] | permissive | import unittest
import os
import tempfile
from merge_expconfig import *
class TestExpConfMerge(unittest.TestCase):
@classmethod
def setUpClass(cls):
base_path = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(base_path, 'files')
def fpath(*names):
return os.path.join(file_path, *names)
cls.test_file_prefix = "files/"
cls.test_co_config_files = {
"normal": fpath("co-config.yaml")
}
cls.test_app_config_files = {
"minimal": fpath("app-config-minimal.yaml"),
"mixed": fpath("app-config-mixed.yaml"),
"mixed-nimbus": fpath("app-config-nimbus.yaml")
}
cls.test_expconfig_files = {
"full": fpath("expconfig-full.yaml"),
"full-nimbus": fpath("expconfig-full-nimbus.yaml"),
}
def test_full_merge(self):
"""
Both source files have a disjunct set of parameters.
"""
co_config_file = self.test_co_config_files["normal"]
app_config_file = self.test_app_config_files["minimal"]
with tempfile.NamedTemporaryFile('w', delete=True) as f_expconfig:
expconfig_file = f_expconfig.name
merge_config(co_config_file, app_config_file, expconfig_file)
expconfig = load_yaml(expconfig_file)
self.assertIsNotNone(expconfig)
expconfig_expected = load_yaml(self.test_expconfig_files["full"])
self.maxDiff = None
self.assertEqual(expconfig_expected, expconfig)
def test_mixed_merge(self):
"""
Some of the parameters in the application's config are the ones
belonging to the CO configuration. This should be overwritten
in the merge.
"""
co_config_file = self.test_co_config_files["normal"]
app_config_file = self.test_app_config_files["mixed"]
with tempfile.NamedTemporaryFile('w', delete=True) as f_expconfig:
expconfig_file = f_expconfig.name
merge_config(co_config_file, app_config_file, expconfig_file)
expconfig = load_yaml(expconfig_file)
expconfig_expected = load_yaml(self.test_expconfig_files["full"])
self.maxDiff = None
self.assertEqual(expconfig_expected, expconfig)
def test_mixed_nimbus_merge(self):
"""
Some of the parameters in the application's config are the ones
belonging to the CO configuration. This should be overwritten
in the merge. Additionally, the application defines the
Storm nimbus as an external service, which should be preserved
in the final output.
"""
co_config_file = self.test_co_config_files["normal"]
app_config_file = self.test_app_config_files["mixed-nimbus"]
with tempfile.NamedTemporaryFile('w', delete=True) as f_expconfig:
expconfig_file = f_expconfig.name
merge_config(co_config_file, app_config_file, expconfig_file)
expconfig = load_yaml(expconfig_file)
expconfig_expected = load_yaml(
self.test_expconfig_files["full-nimbus"])
self.maxDiff = None
self.assertEqual(expconfig_expected, expconfig)
| true |
40a7d4ec850b6bcc0350dd974d5663b1f196ed3c | Python | iilunin/gainloss-calc | /gainloss/ReportLoader.py | UTF-8 | 3,608 | 2.671875 | 3 | [
"Apache-2.0"
] | permissive | import io
import time
from functools import wraps
import requests
import datetime
import threading
import yaml
import gdax
import pandas as pd
import numpy as np
def rate_limited(max_per_second: int):
"""Rate-limits the decorated function locally, for one process."""
lock = threading.Lock()
min_interval = 1.0 / max_per_second
def decorate(func):
last_time_called = time.perf_counter()
@wraps(func)
def rate_limited_function(*args, **kwargs):
lock.acquire()
nonlocal last_time_called
try:
elapsed = time.perf_counter() - last_time_called
left_to_wait = min_interval - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
return func(*args, **kwargs)
finally:
last_time_called = time.perf_counter()
lock.release()
return rate_limited_function
return decorate
class ReportLoader:
STANDARD_DELAY = 0.5
def __init__(self, passphrase, key, b64secret):
self.gdax = gdax.AuthenticatedClient(key=key, b64secret=b64secret, passphrase=passphrase)
self.gdax_public = gdax.PublicClient()
# self.products = None
@classmethod
def from_config(cls, config_path):
with open(config_path) as f:
config = yaml.safe_load(f)
return cls(passphrase=config['passphrase'],
key=config['key'],
b64secret=config['b64secret'])
def download_reports(self, products, start_date, end_date):
"""
:param currency:
:param products:
:param start_date:
:param end_date:
:return:
@rtype: pd.DataFrame
"""
report_ids = []
for p in products:
result = self.gdax.create_report(
report_type="fills",
start_date=str(start_date),
end_date=str(end_date),
product_id=p,
report_format='csv')
report_ids.append(result['id'])
self.__doublesleep()
report_urls = []
while len(report_ids) > 0:
for rid in report_ids:
res = self.gdax.get_report(rid)
if res['status'] == 'ready':
report_urls.append(res['file_url'])
report_ids.remove(rid)
self.__doublesleep()
data_frames = []
for url in report_urls:
print(url)
s = requests.get(url).content
data_frames.append(
pd.read_csv(io.StringIO(s.decode('utf-8')))
)
return pd.concat(data_frames, ignore_index=True)
@rate_limited(1.5)
def getHistoricalUsdVal(self, currency, date, timedelta=15):
#https://min-api.cryptocompare.com/data/pricehistorical?fsym=ETH&tsyms=BTC,USD,EUR&ts=1518723173&e=Coinbase
start_date = date - datetime.timedelta(seconds=timedelta)
end_date = date + datetime.timedelta(seconds=timedelta)
result = self.gdax_public.get_product_historic_rates(
'{unit}-USD'.format(unit=currency),
start=start_date,
end=end_date,
granularity=60)
return round(np.mean([(row[1]+row[2])/2 for row in result]), 2)
def __sleep(self, t=STANDARD_DELAY):
time.sleep(t)
def __doublesleep(self):
self.__sleep(ReportLoader.STANDARD_DELAY * 2)
def sleep(self):
self.__sleep()
def dsleep(self):
self.__doublesleep()
| true |
b84e9fd285b29705416a09a07488019c8f703584 | Python | semikite/study_python | /hellp.py | UTF-8 | 1,073 | 3.921875 | 4 | [] | no_license | class Dog:
def __init__(self, name):
self.name = name
print(self.name, " was Born")
def speak(self):
print("YELP!", self.name)
def wag(self):
print("Dog's wag tail")
def m1(self):
print("m1")
def m2():
print("m2")
def __del__(self):
print("desroy!!")
class Puppy(Dog):
__hidden = "==========================="
def __init__(self):
self.name = "Puppy"
self.color = "red"
print("Puppy was Born")
def __read_diary(self):
print("Diary Content!!!")
def wag(self):
print("Puppy's wag tail")
self.__read_diary()
def speak(self):
print("bow wow!", self.name)
def change_color(self):
self.color = "yellow"
def static_method():
print("xxxxxxxxxxxxxxxxxxxxxxx")
def hidden(self):
print(self.__hidden)
d = Dog("dog")
p = Puppy()
d.speak()
p.speak()
d.wag()
p.wag()
print(d.name,p.name)
print(p.color)
p.change_color()
print(p.color)
p.hidden()
Puppy.static_method() | true |
46e9ead65dae36152e59ced3e95dbe343c1f5244 | Python | FernandoDGonzalez/My-CookBook | /User_input.py | UTF-8 | 98 | 3.515625 | 4 | [] | no_license | print("Hello")
name = input("Please enter your name: ")
print("Nice top meet you, " + str(name))
| true |
b02ad173d131f8d5981a015d843d62731c05810e | Python | mink007/structure_learning | /project1.py | UTF-8 | 11,476 | 3.078125 | 3 | [] | no_license | #%matplotlib inline
#Using python 2.7*
#This file is for small.csv data set. Please change the inputfilename and outputfilename paths for medium and large data sets accordingly.
import sys
import networkx as nx
import pandas as pd
import numpy as np
import math
import time
import copy
import ntpath
import matplotlib
import matplotlib.pyplot as plt
import pdb
from collections import defaultdict
start_time = time.time()
def write_gph(good_G, filename):
with open(filename, 'w') as f:
#Writing all the edges present
for edge in good_G.edges():
f.write("{}, {}\n".format(edge[0], edge[1]))
f.close()
#plot_g.clear()
plot_g = nx.DiGraph()
list_node = []
for node in good_G.nodes():
list_node.append(node)
#list_node.unique()
#Remove duplicate nodes if present to deepcopy issues if any
list_node = list(dict.fromkeys(list_node))
for node in list_node:
plot_g.add_node(node)
for ed in good_G.edges():
plot_g.add_edge(ed[0],ed[1])
#posi = nx.spring_layout(plot_g,k=0.15,iterations=20)
#For drawing the graph
df = pd.DataFrame(index=plot_g.nodes(), columns=plot_g.nodes())
for row, data in nx.shortest_path_length(plot_g):
for col, dist in data.items():
df.loc[row,col] = dist
df = df.fillna(df.max().max())
posi = nx.kamada_kawai_layout(plot_g, dist=df.to_dict())
nx.draw_networkx(plot_g, arrows=True, with_labels=True, width=2.0,edge_color='red',node_color='orange',pos=posi)
g_file = filename + ".png"
print("SHOWING GRAPH")
plt.axis('off')
plt.savefig(g_file, bbox_inches='tight')
plt.show()
def write_score_and_time(graph_score,time_diff,outfile):
with open(outfile, 'w') as f:
f.write("{}, {}\n".format(graph_score, time_diff))
f.close()
def write_graph_score(good_G, outfile, graph_score, time_diff):
write_gph(good_G, outfile)
outfile = ntpath.basename(outfile)
outfile = ntpath.splitext(outfile)[0] + ".score_and_time"
write_score_and_time(graph_score,time_diff,outfile)
def compute(infile, outfile):
# WRITE YOUR CODE HERE
# FEEL FREE TO CHANGE ANYTHING ANYWHERE IN THE CODE
# THIS INCLUDES CHANGING THE FUNCTION NAMES, MAKING THE CODE MODULAR, BASICALLY ANYTHING
data = pd.read_csv(infile)
#print("COLS", data.columns[0])
x_i = data.columns
#Initialize a graph
G = nx.DiGraph()
#Add all the nodes to the graph
for i in range(0,len(x_i)):
G.add_node(x_i[i])
#Find m_ijk for graph G
#Check if G is cyclic
G, graph_score = find_graph_score(data,G)
print("Graph Score", graph_score)
#pdb.set_trace()
node_list = []
for n in G.nodes():
node_list.append(n)
#print("N-Node", n,node_list)
good_G=nx.DiGraph()
good_G = copy.deepcopy(G)
initial_G=initial_GP=nx.DiGraph()
initial_G = copy.deepcopy(G)
print("Time taken to get this best score %s seconds ---" % (time.time() - start_time))
write_graph_score(G, outfile, graph_score, (time.time() - start_time))
#top_graph_score = 0
#Graph iterations and Bayesian score calculations.
#The Local Directed Graph Search Loop
for nn in range(0,len(node_list)+1):
node_list.append(node_list.pop(0))
print("NODE LIST", node_list)
#The K2Search loop starts here
for n in range(0,len(node_list)):
initial_G = copy.deepcopy(good_G)
for o in range(n+1,len(node_list)):
initial_GP = copy.deepcopy(initial_G)
#print("Adding edge",node_list[o],node_list[n])
initial_G.add_edge(node_list[o],node_list[n])
initial_G, new_graph_score = find_graph_score(data,initial_G)
print("Score", new_graph_score)
if new_graph_score == "":
#print("Removing edge a",node_list[o],node_list[n])
initial_G.clear()
initial_G = copy.deepcopy(initial_GP)
continue
if new_graph_score > graph_score:
#print("Better score %f than previous bettwe of %f" % new_graph_score % graph_score)
print("Better score {} obtained, than previous best of {}\n".format(new_graph_score, graph_score))
good_G.clear()
good_G = copy.deepcopy(initial_G)
graph_score = new_graph_score
print("Time taken to get this best score %s seconds ---" % (time.time() - start_time))
write_graph_score(good_G, outfile, graph_score, (time.time() - start_time))
#print("Removing edge b",node_list[o],node_list[n])
initial_G.clear()
initial_G = copy.deepcopy(initial_GP)
def find_graph_score(data,G):
is_cyclic_graph = 0
try:
is_cyclic_graph = nx.find_cycle(G, orientation='original')
is_cyclic_graph = 1
#print("cyclic graph")
except:
is_cyclic_graph = 0
#print("Not cyclic graph")
if(is_cyclic_graph == 0):
m_ijk, G = find_m_ijk(data, G)
score = find_bayesian_score(G, m_ijk)
#print("Score = ", score)
return G, score
else:
return G, ""
def find_bayesian_score(G, m_ijk):
i = 1
a_ij0 = defaultdict(list)
m_ij0 = defaultdict(list)
score = 0
for n in G.nodes:
first_term = 0
for j in range(0,len(G.nodes[n]["list_values_at_j"])):
index_a = str(i) + str(j+1) + "0"
a_ij0[index_a] = 0
m_ij0[index_a] = 0
for k in range(0,len(G.nodes[n]["value_of_ri"])):
index = str(i) + str(j+1) + str(k+1)
a_ij0[index_a] = a_ij0[index_a] + 1
m_ij0[index_a] = m_ij0[index_a] + m_ijk[index]
#print("INDEX_A", index_a)
third_term = 0
for k in range(0,len(G.nodes[n]["value_of_ri"])):
index = str(i) + str(j+1) + str(k+1)
#print("INDEX", index)
third_term = third_term + math.lgamma(1 + m_ijk[index])
first_term = first_term + math.lgamma(a_ij0[index_a]) - math.lgamma(a_ij0[index_a]+m_ij0[index_a]) + third_term
i = i + 1
score = score + first_term
return score
def find_m_ijk(data, G):
node_names_dict=defaultdict(list)
a_count = 1
min_val = defaultdict(list)
max_val = defaultdict(list)
total_count_values = defaultdict(list)
for i in G.nodes:
node_names_dict[a_count] = i
a_count=a_count+1
min_val[i]=np.min(data[i][:])
max_val[i]=np.max(data[i][:])
total_count_values[i] = max_val[i] - min_val[i] + 1
G.nodes[i]["val_range_count"] = max_val[i] - min_val[i] + 1
G.nodes[i]["range"] = range(min_val[i],max_val[i]+1, 1)
G.nodes[i]["list_values_at_j"] = []
G.nodes[i]["value_of_ri"] = []
G.nodes[i]["parents"] = []
for sam in range(0,data.shape[0]):
for i in range(1,len(node_names_dict)+1):
G.nodes[node_names_dict[i]]["data_value"]=data[node_names_dict[i]][sam]
#G.nodes[node_names_dict[i]]["list_of_parents"] = []
#G.nodes[node_names_dict[i]]["value_of_k"] = []
for i in range(1,len(node_names_dict)+1):
#G.nodes[node_names_dict[i]]["data_value"]=data[node_names_dict[i]][sam]
list_of_parents = []
for key in G.predecessors(node_names_dict[i]):
list_of_parents.append(key)
list_of_parents.sort()
if len(list_of_parents)>0:
G.nodes[node_names_dict[i]]["parents"] = list_of_parents
#print("PARENTS", len(list_of_parents))
#else:
#print("No PARENTS")
j_value_parent_combination = ""
for parent_node in list_of_parents:
j_value_parent_combination = j_value_parent_combination + str(G.nodes[parent_node]["data_value"])
#print("Node J value parent combination", node_names_dict[i], j_value_parent_combination)
#Store the parent node value combination and the index will give 'j'
if len(list_of_parents)>0:
if (str(j_value_parent_combination) not in G.nodes[node_names_dict[i]]["list_values_at_j"]):
G.nodes[node_names_dict[i]]["list_values_at_j"].append(j_value_parent_combination)
G.nodes[node_names_dict[i]]["value_of_ri"].append(G.nodes[node_names_dict[i]]["data_value"])
if len(list_of_parents) == 0:
G.nodes[node_names_dict[i]]["list_values_at_j"] = [1]
if (G.nodes[node_names_dict[i]]["data_value"] not in G.nodes[node_names_dict[i]]["value_of_ri"]):
G.nodes[node_names_dict[i]]["value_of_ri"].append(G.nodes[node_names_dict[i]]["data_value"])
m_ijk = defaultdict(list)
#for sam in range(0,data.shape[0]):
#pdb.set_trace()
for i in range(1,len(node_names_dict)+1):
for j in range(0,len(G.nodes[node_names_dict[i]]["list_values_at_j"])):
for k in range(0,len(G.nodes[node_names_dict[i]]["value_of_ri"])):
#print("IJK", i, j+1 , k+1)
index = str(i) + str(j+1) + str(k+1)
m_ijk[index] = 0
#pdb.set_trace()
for sam in range(0,data.shape[0]):
j_parent_node_val = ""
for parents in G.nodes[node_names_dict[i]]["parents"]:
j_parent_node_val = j_parent_node_val + str(data[parents][sam])
if j_parent_node_val == "":
j_parent_node_val = 1
#print("j_parent_node val", j_parent_node_val )
#print("j parane node val from data", G.nodes[node_names_dict[i]]["list_values_at_j"][j] )
if j_parent_node_val == G.nodes[node_names_dict[i]]["list_values_at_j"][j]:
k_node_value = data[node_names_dict[i]][sam]
if k_node_value == G.nodes[node_names_dict[i]]["value_of_ri"][k]:
m_ijk[index] = m_ijk[index] + 1
#print("MIJK", m_ijk)
return m_ijk, G
def main():
#if len(sys.argv) != 3:
# raise Exception("usage: python project1.py <infile>.csv <outfile>.gph"
#inputfilename = sys.argv[1]
#outputfilename = sys.argv[2]
inputfilename = "C:/Users/mink_/AA228Student/workspace/project1/small.csv"
outputfilename = "C:/Users/mink_/AA228Student/workspace/project1/small.gph"
print("IN/OUT FILES", inputfilename, outputfilename)
compute(inputfilename, outputfilename)
if __name__ == '__main__':
main()
| true |
54f8ed3f6508de7f15726e68b24229128e4dc85e | Python | Andida7/my_practice-code | /35.py | UTF-8 | 1,217 | 4.46875 | 4 | [] | no_license | """
Write a program that predicts the approximate size of a population of organisms. The
application should use text boxes to allow the user to enter the starting number of organisms,
the average daily population increase (as a percentage), and the number of days the
organisms will be left to multiply. For example, assume the user enters the following values:
Starting number of organisms: 2
Average daily increase: 30%
Number of days to multiply: 10
The program should display the following table of data:
Day Approximate Population
1 2
2 2.6
3 3.38
4 4.394
5 5.7122
6 7.42586
7 9.653619
8 12.5497
9 16.31462
10 21.209
"""
#get number of starting orgamisms
organism_num = int(input('Starting number of organisms: '))
#get avarage daily increase
increament = float(input('Avarage daily increase: '))
#get number of days to multiply
multiplication_day = int(input('Number of days to multiply: '))
#printing the columns of the data
print('Day Approximate', 'Population', sep=' ')
#lets loop
for days in range(multiplication_day):
organism_num += organism_num*increament
print(days+1, format(organism_num, '16.4f')) | true |
efb3bb968dbdf62249517eb2a2dc98b8374fafe6 | Python | zht200/KNN-RF | /RandomForest.py | UTF-8 | 739 | 3.5625 | 4 | [] | no_license | '''
This file runs Random forest on the Bank Marketing dataset.
The user will need to input the number of trees and whether to use bagging in the algorithm in the console.
'''
from RF import RF
import RFdataProcessing as RFData
isBagging = True
userInput = input("Please enter the number of trees:")
TreeNum = int(userInput)
userInput1 = input("Use bagging? (Y/N):")
if userInput1== "Y":
isBagging = True
elif userInput1== "N":
isBagging = False
x = RFData.x
y = RFData.y
xt = RFData.xt
yt = RFData.yt
rf = RF(B=TreeNum, Bagging = isBagging)
rf.train(x, y)
yPredict = rf.predict(xt)
count = 0
for i in range(len(yPredict)):
if yPredict[i] == yt[i]:
count +=1
print(count/float(len(yt))) | true |
91ef2e9ae2f760c9dd273ecd2b473aaa2e0990d9 | Python | gsanz95/sudokusolver | /board.py | UTF-8 | 5,022 | 3.78125 | 4 | [] | no_license | import copy
import math
from misc import *
#from block import Block
class Board:
# Creates board and saves ID and difficulty
# and initiates a list for the cells.
def __init__(self, boardId, difficulty = "NORMAL"):
self.id = boardId
self.difficulty = difficulty
self.cells = []
self.cellPossibilities = dict()
# Deep copies the list passed
def setCells(self, cellsToSet):
self.cells = copy.deepcopy(cellsToSet)
self.size = int(len(self.cells))
# To string
def __str__(self):
outputText = ""
for row in range(0, self.size):
for col in range(0, self.size):
outputText += str(self.cells[row][col])
outputText += " "
outputText += "\n"
return outputText
# Returns a list containing values in row
# number passed
def getRow(self, rowNumber):
rowHolder = []
for col in range(0, self.size):
rowHolder.append(self.cells[rowNumber][col])
return rowHolder
# Returns a list containing values in column
# number passed
def getCol(self, colNumber):
colHolder = []
for row in range(0, self.size):
colHolder.append(self.cells[row][colNumber])
return colHolder
# Returns a list containing values in
# a square determined by the number passed
def getSquare(self, rowNumber, colNumber):
cellHolder = []
squareSize = int(math.sqrt(self.size))
# Start top-left most square
while rowNumber%squareSize != 0:
rowNumber -= 1
while colNumber%squareSize != 0:
colNumber -= 1
# Gather square cells
for i in range(0, squareSize):
for j in range(0, squareSize):
cellHolder.append(self.cells[rowNumber+i][colNumber+j])
return cellHolder
# Looks if any rows have a single empty
# cell and fills it with the missing number
def tryCompleteRows(self):
for rowNumber in range(0, self.size):
row = self.getRow(rowNumber)
# No empty cells
if 0 not in row:
continue
# Single empty cell
if(hasUniqueNumbers(row)):
colNumber = row.index(0)
valueForCell = findMissingNumber(row)
self.cells[rowNumber][colNumber] = valueForCell
# Looks if any columns have a single empty
# cell and fills it with the missing number
def tryCompleteCols(self):
for colNumber in range(0, self.size):
col = self.getCol(colNumber)
# No empty cells
if 0 not in col:
continue
# Single empty cell
if(hasUniqueNumbers(col)):
rowNumber = col.index(0)
valueForCell = findMissingNumber(col)
self.cells[rowNumber][colNumber] = valueForCell
# Looks if any squares have a single empty
# cell and tries to fill them
def tryCompleteSquares(self):
for row in range(0, self.size, int(math.sqrt(self.size))):
for col in range(0, self.size, int(math.sqrt(self.size))):
square = self.getSquare(row, col)
# Single empty cell
if(hasUniqueNumbers(square)):
valueForCell = findMissingNumber(square)
self.completeSquare(row, col, valueForCell)
# Fills square referenced by passed value
# with the new value passed
def completeSquare(self, rowNumber, colNumber, newValue):
squareSize = int(math.sqrt(self.size))
# Start top-left most square
while rowNumber%squareSize != 0:
rowNumber -= 1
while colNumber%squareSize != 0:
colNumber -= 1
# Gather square cells
for i in range(0, squareSize):
for j in range(0, squareSize):
if self.cells[rowNumber+i][colNumber+j] == 0:
self.updateCell(rowNumber+i,colNumber+j, newValue)
# Updates a cell's value located
# at rowNumber and colNumber
def updateCell(self, rowNumber, colNumber, newValue):
self.cells[rowNumber][colNumber] = newValue
# Collects possible values that the cell at
# rowNumber and colNumber can hold
def generateOptions(self, rowNumber, colNumber):
# Generate all possible values
possibleVals = [x for x in range(1, self.size+1)]
# Get cells already filled
rowVals = self.getRow(rowNumber)
colVals = self.getCol(colNumber)
sqrVals = self.getSquare(rowNumber, colNumber)
# Substract those values from the list of values
possibleVals = [value for value in possibleVals if value not in rowVals]
possibleVals = [value for value in possibleVals if value not in colVals]
possibleVals = [value for value in possibleVals if value not in sqrVals]
return possibleVals | true |
de6f93e6b8b7e92139e63db6303e38c1c7d4e3ab | Python | aa10402tw/NCTU_Machine-Learning | /4_Expectation–Maximization/HW4_EM.py | UTF-8 | 4,332 | 2.625 | 3 | [] | no_license | from numpy.linalg import inv
import numpy as np
import matplotlib.pyplot as plt
import math
from math import e
from math import log
from numpy import linalg
import sys
import sympy
from sympy import *
from IPython.display import display, HTML
import random
from math import log
from math import exp
# %matplotlib inline
# %config IPCompleter.greedy=True
# sympy.init_printing(use_unicode=False, wrap_line=True)
np.set_printoptions(suppress=True)
from utils import *
# Load mnist data
print('Load Data...', end=' ')
imgs_train, labels_train = load_mnist(train=True)
imgs_test, labels_test = load_mnist(train=False)
# Change Image to Feature Vector
X_train, Y_train = img_to_vector(imgs_train), labels_train
X_test, Y_test = img_to_vector(imgs_test), labels_test
print('Finish!')
print(X_train.shape)
print(max(X_train[0]))
# E-Step : Compute the probabilities of cluster assignment (r_ik)
# M-step : Update parameters mu, pi givn r
# X = [[x_11...x_1d], ..., [x_n1...x_nd]] where x_ij = 0/1 (success/fail)
# mu : [mu_1...mu_k] where mu_i is the vector of prob of success for cluster k , mu_i's shape = (1, D)
# pi : [pi_1 ... pi_k] where pi_i is the prob to draw cluster k
# r_iK : the prob(expectation) that Xi belong to cluster k
# Zi : [z_i1, ..., z_ik] binary k=dim data(assign to cluster k)
num_cluster = 10
K = num_cluster
X = np.copy(X_train)[:]
Y = np.copy(Y_train)[:]
N, D = X.shape
# init parameters mu & pi
mu = np.random.random((K, D))
pi = np.random.random((K, 1))
pi = pi / pi.sum()
r = np.zeros((N, K))
print(X.max())
print(X.min())
# 0~255 to 0 or 1
X[X < 128.0] = 0
X[X >= 128.0] = 1
print(X.max())
print(X.min())
from numpy import prod
def L2distance(A, B):
A = A.reshape(prod(A.shape))
B = B.reshape(prod(B.shape))
dis = math.sqrt(np.dot(A - B, A - B))
return dis
def EM(X, mu, pi, r, max_iter=100):
N, D = X.shape
K, _ = mu.shape
new_mu = np.copy(mu)
for it in range(max_iter):
# E-Step : Compute the probabilities of cluster assignment (r_ik)
for i in range(N):
for k in range(K):
r[i][k] = log(pi[k]) # Log scale
for d in range(D):
xid = X[i][d]
try:
r[i][k] += log((mu[k][d]**xid) * ((1 - mu[k][d])**(1 - xid)) + 1e-7)
except:
print('domain error')
print(mu[k][d], xid)
print((mu[k][d]**xid) * ((1 - mu[k][d])**(1 - xid)))
r[i] -= r[i].max()
r[i] = np.exp(r[i]) # Exp, back to origin scale
r[i] = r[i] / r[i].sum() # normalize to 1
Nk = r.sum(axis=0) # prob to draw k-th cluster
pi = Nk / Nk.sum()
# M-step : Update parameters mu, pi givn r
for k in range(K):
mu_k = 0
for i in range(N):
mu_k += r[i][k] * X[i]
new_mu[k] = mu_k / Nk[k]
diff = L2distance(new_mu, mu)
print(diff)
mu = np.copy(new_mu)
if diff < 1e-5:
print('converge after %d iteration' % (it))
break
return mu, pi, r
def EM_inference(X, mu, pi):
N, D = X.shape
K, _ = mu.shape
y_pred = np.zeros((N,))
for i in range(N):
for k in range(K):
r[i][k] = log(pi[k]) # Log scale
for d in range(D):
xid = X[i][d]
try:
r[i][k] += log((mu[k][d]**xid) * ((1 - mu[k][d])**(1 - xid)) + 1e-7)
except:
print('domain error')
print(mu[k][d], xid)
print((mu[k][d]**xid) * ((1 - mu[k][d])**(1 - xid)))
# print(r[i])
y_pred[i] = np.argmax(r[i])
return y_pred
mu, pi, r = EM(X, mu, pi, r)
y_pred = EM_inference(X, mu, pi)
from sklearn.metrics import confusion_matrix
count_y = [np.count_nonzero(Y == i) for i in range(10)]
count_y_pred = [np.count_nonzero(y_pred == i) for i in range(num_cluster)]
print(count_y)
print(count_y_pred)
print(confusion_matrix(y_pred, Y))
print(r.shape)
print(mu.shape)
print(mu.max())
print(mu.min())
print("~~")
for i in range(num_cluster):
plt.subplot(2, 5, i + 1)
p = mu[i].reshape((28, 28))
plt.imshow(p, cmap='gray'), plt.xticks([]), plt.yticks([])
plt.show()
| true |
e8316c9ccf35bdaa617dd0420661bf34904965a5 | Python | ganeshskudva/Algorithms | /HackerRank/Game_of_Stones.py | UTF-8 | 76 | 3.25 | 3 | [] | no_license | for _ in xrange(input()):
print ["First","Second"][input()%7 in [0,1]]
| true |
f672117746932192d3ec24b9590608d9939cfb8e | Python | Giri-5/Data-Structures-and-Algorithms | /Linked_lists/Doubly_LinkedList.py | UTF-8 | 2,430 | 3.71875 | 4 | [] | no_license | class Node:
def __init__(self,data):
self.data = data
self.next = None
self.prev = None
class Linkedlist:
def __init__(self):
self.head = None
self.tail = None
self.length = 0
def append(self,data):
new_node = Node(data)
if self.head == None:
self.head = new_node
self.tail = self.head
self.length = 1
else:
self.tail.next = new_node
new_node.prev = self.tail
self.tail = new_node
self.length += 1
"""When i printed the below statement after the if condition ended, for the very first time load, self.tail will be self.head, when I print the prev node address will be none, so .data will throw error."""
print("last",self.tail.prev.data)
return self.tail.data
def prepend(self,data):
new_node = Node(data)
new_node.next = self.head
self.head.prev = new_node
self.head = new_node
self.length +=1
print("Prepended data", self.head.data)
def insert(self,index,data):
new_node = Node(data)
i = 0
temp = self.head
if index == 0:
new_node = self.prepend(data)
return
if index >= self.length:
new_node = self.append(data)
return
while i < self.length:
if i == index-1:
leader = temp
follower = temp.next
leader.next = new_node
new_node.next = follower
new_node.prev = leader
follower.prev = new_node
self.length += 1
break
temp = temp.next
i += 1
def remove(self,index):
temp = self.head
i = 0
if index==0:
self.head=self.head.next
self.length-=1
return
if index == self.length-1:
self.tail = self.tail.prev
self.tail.next = None
self.length -= 1
return
while i < self.length:
if i == index -1:
leader = temp
follower = temp.next.next
leader.next = follower
follower.prev = leader
self.length -= 1
break
temp = temp.next
i += 1
def print(self):
temp = self.head
while(temp):
print(temp.data,end = " ")
temp = temp.next
llist = Linkedlist()
print(llist.append(10))
print(llist.append(20))
llist.prepend(5)
llist.insert(2,15)
llist.insert(0,2)
llist.remove(2)
print(llist.length)
llist.print()
| true |
059d212f970416edff0a93a844a03a2a9bda2f45 | Python | lwoodyiii/AbstractAlgebra | /Vector.py | UTF-8 | 938 | 3.40625 | 3 | [] | no_license | class Vector:
def __init__(self, t):
super().__init__()
self.elements = t
self.dim = len(t)
def scalar_multiplication(self, other):
x = []
for i in self.elements:
x.append(i * other)
return Vector(tuple(x))
# Also known as the dot product
def vector_inner_product(self, other):
raise NotImplementedError
def __rmul__(self, other):
return self.scalar_multiplication(other)
def __mul__(self, other):
return self.scalar_multiplication(other)
def __eq__(self, value):
return (self.elements == value.elements)
def __add__(self, other):
x = []
for i, j in zip(self.elements, other.elements):
x.append(i + j)
return Vector(tuple(x))
def __radd__(self, other):
x = []
for i in self.elements:
x.append(i + other)
return Vector(tuple(x))
| true |
df29e29c4f28758bb31e34c4f32fb7b74c31b7a5 | Python | Zimbra/zm-load-testing | /zmmboxsearch/response_time.py | UTF-8 | 1,046 | 2.875 | 3 | [] | no_license | import re
import numpy as np
log_file_path = '/opt/zimbra/log/mailbox.log'
# Pattern to match lines containing "time elapsed"
pattern = r"SearchMultiMailboxRequest elapsed=(\d+)"
time_elapsed_values = []
# Read the log file and extract "time elapsed" values
with open(log_file_path, "r") as file:
for line in file:
match = re.search(pattern, line)
if match and len(match.groups()) > 0:
elapsed_time = int(match.group(1))
time_elapsed_values.append(elapsed_time)
# Sort the time elapsed values in ascending order
time_elapsed_values.sort()
#print("Elapsed times:", time_elapsed_values)
# Calculate the 90th percentile
percentile_90 = np.percentile(time_elapsed_values, 90)
# Calculate the average
average = np.mean(time_elapsed_values)
# Print the time elapsed values and 90th percentile
print("Time Elapsed Values for SearchMultiMailboxRequest on mailbox 1:")
for time_elapsed in time_elapsed_values:
print(time_elapsed)
print("Average:", average)
print("90th Percentile:", percentile_90)
| true |
ab282363edc3f8d97c35bd8bd43034b958d779be | Python | Educorreia932/Pokemon-Origins | /plot.py | UTF-8 | 1,110 | 3.109375 | 3 | [] | no_license | import csv
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib.image as mpimg
import numpy as np
origins = {}
# Read data file
with open('data.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
if row[1] != "" and row[1] != "Origin":
if row[1] not in origins:
origins[row[1]] = [row[0]]
else:
origins[row[1]].append(row[0])
# Graph plotting
origin_name = list(origins)
number_of_pokemon = list(len(origin) for origin in origins.values())
df = pd.DataFrame({"x": origin_name, "y": number_of_pokemon})
df = df.sort_values("y")
plt.xlim(-0.5, len(df.x))
plt.ylim(0, max(df.y))
plt.xticks(np.arange(len(df.x)), df.x, rotation = "vertical")
plt.ylabel("Number of Pokémon")
# Plotting pokémon sprites
for i, origin_name in enumerate(df.x):
for j, pokemon_name in enumerate(origins[origin_name]):
image = mpimg.imread("icons/" + pokemon_name + ".png")
plt.imshow(image, extent=[i - 0.5, i + 0.5, j, j + 1])
plt.show() | true |
33e31e60ec731aa6093bfbe3f03031432b83946a | Python | wukm/pycake | /merging.py | UTF-8 | 7,901 | 3.125 | 3 | [] | no_license | #!/usr/bin/env python3
import numpy as np
import numpy.ma as ma
from scipy.ndimage import label
from skimage.morphology import remove_small_objects
import matplotlib.pyplot as plt
"""
this module contains implementations of several (simple) strategies of
turning a Frangi stack into a single image, i.e. simple segmentation methods.
Also some older functions which aren't being used now, such as "view_slices"
"""
def nz_percentile(A, q, axis=None, interpolation='linear'):
"""calculate np.percentile(...,q) on an array's nonzero elements only
Parameters
----------
A : ndarray
matrix from which percentiles will be calculated. Percentiles
are calculated on an elementwise basis, so the shape is not important
q : a float
Percentile to compute, between 0 and 100.0 (inclusive).
(other arguments): see numpy.percentile docstring
...
Returns
-------
out: float
"""
if ma.is_masked(A):
A = A.filled(0)
return np.percentile(A[A > 0], q, axis=axis, interpolation=interpolation)
def apply_threshold(targets, alphas, return_labels=True):
"""Threshold targets at each scale, then return max target over all scales.
A unique alpha can be given for each scale (see below). Return a 2D boolean
array, and optionally another array representing what at what scale the max
filter response occurred.
Parameters
----------
targets : ndarray
a 3D array, where targets[:,:,k] is the result of the Frangi filter
at the kth scale.
alphas : float or array_like
a list / 1d array of length targets.shape[-1]. each alphas[k] is a
float which thresholds the Frangi response at the kth scale. Due to
broadcasting, this can also be a single float, which will be applied
to each scale.
return_labels : bool, optional
If True, return another ndarray representing the scale (see Notes
below). Default is True.
Returns
-------
out : ndarray, dtype=bool
if return labels is true, this will return both the final
threshold and the labels as two separate matrices. This is
a convenience, since you could easily find labels with
labels : ndarray, optional, dtype=uint8
The scale at which the largest filter response was found after
thresholding. Element is 0 if no scale passed the threshold,
otherwise an int between 1 and targets.shape[-1] See Notes below.
Notes / Examples
----------------
Despite the name, this does *NOT* return the thresholded targets itself,
but instead the maximum value after thresholding. If you wanted the
thresholded filter responses alone, you should simply run
>>>(targets > alphas)*targets
The optional output `labels` is a 2D matrix indicating where the max filter
response occured. For example, if the label is K, the max filter response
will occur at targets[:,:,K-1]. In other words,
>>>passed, labels = apply_threshold(targets,alphas)
>>>targets.max(axis=-1) == targets[:,:,labels -1 ]
True
It should be noted that returning labels is really just for convenience
only; you could construct it as shown in the following example:
>>>manual_labels = (targets.argmax(axis=-1) + 1)*np.invert(passed)
>>>labels == manual_labels
True
Similarly, the standard boolean output could just as easily be obtained.
>>>passed == (labels != 0)
True
"""
# threshold as an array (even if it's a single element) to broadcast
alphas = np.array(alphas)
# if input's just a MxN matrix, expand it trivially so it works below
if targets.ndim == 2:
targets = np.expand_dims(targets, 2)
# either there's an alpha for each channel or there's a single
# alpha to be broadcast across all channels
assert (targets.shape[-1] == alphas.size) or (alphas.size == 1)
# pixels that passed the threshold at any level
passed = (targets >= alphas).any(axis=-1)
if not return_labels:
return passed # we're done already
wheres = targets.argmax(axis=-1) # get label of where maximum occurs
wheres += 1 # increment to reserve 0 label for no match
# then remove anything that didn't pass the threshold
wheres[np.invert(passed)] = 0
assert np.all(passed == (wheres > 0))
return passed, wheres
def sieve_scales(multiscale, high_percentile, low_percentile, min_size=None,
axis=0):
"""
multiscale is a 3 dimensional where 2 dimensions are image and `axis`
parameter is which one is the scale space (i.e. resolution). hopefully
axis is 0 or 1 (this won't handle stupider cases)
this gathers points contiguous points at a low threshold and adds them
to the output it contains at least only if that blob contains at least one
high percentile point.
min_size is a size requirement can either be an integer or an array of
integers """
assert multiscale.ndim == 3
if axis in (-1, 2):
# this won't change the input, just creates a view
V = np.transpose(multiscale, axes=(2, 0, 1))
elif axis == 0:
V = multiscale # just to use the same variable name
else:
raise ValueError('Please make resolution the first or last dimension.')
if np.isscalar(min_size):
min_size = [min_size for x in range(multiscale.shape[0])]
# label matrix the size of one of the images
sieved = np.zeros(V.shape[1:], dtype=np.int32)
print('sieving ', end='')
for n, v in enumerate(V):
print('σ', end='', flush=True)
if min_size is not None:
z = remove_small_objects(v, min_size=min_size[n])
else:
z = v # relabel to use same variable
high_thresh = nz_percentile(v, high_percentile)
low_thresh = nz_percentile(v, low_percentile)
labeled, n_labels = label(z > low_thresh)
high_passed = (z > high_thresh)
for lab in range(n_labels):
if lab == 0:
continue
if np.any(high_passed[labeled == lab]):
sieved[labeled == lab] = n
print()
return sieved
def view_slices(multiscale, axis=0, scales=None, crop=None, cmap='nipy_spectral',
vmin=0, vmax=1.0, outnames=None, show_colorbar=True):
""" scales is just to use for a figure title
crop before you get in here.
if outname is an iterable returning filenames, then we'll assume
non-interative mode
"""
assert multiscale.ndim == 3
if axis in (-1, 2):
# this won't change the input, just creates a view
V = np.transpose(multiscale, axes=(2, 0, 1))
elif axis == 0:
V = multiscale # just to use the same variable name
else:
raise ValueError('Please make resolution the first or last dimension.')
if scales is None:
scales = [None for x in range(multiscale.shape[0])]
if outnames is None:
outnames = [None for x in range(multiscale.shape[0])]
plt.close('all')
for v, sigma, outname in zip(V, scales, outnames):
if crop is None:
viewable = v
else:
viewable = v[crop]
if outname is None:
plt.imshow(viewable, cmap=cmap, vmin=vmin, vmax=vmax)
mng = plt.get_current_fig_manager()
#mng.window.showMaximized()
mng.window.showFullScreen()
plt.tight_layout()
if sigma is not None:
plt.title(r'$\sigma={:.2f}$'.format(sigma))
plt.axis('off')
if show_colorbar:
plt.colorbar()
plt.tight_layout()
plt.show()
plt.close()
else:
# save them non interactively with imsave
plt.imsave(outname, viewable, cmap=cmap, vmin=vmin, vmax=vmax)
| true |
0e5fee26994b35cd5bd788f42ce452bc9d810c6b | Python | Gabriel-Fernandes1917/lab-the-python | /class 2708/exercise1.py | UTF-8 | 204 | 3.5625 | 4 | [
"MIT"
] | permissive |
dictionary ={None:None}
loop = "sim"
while(loop == "sim"):
dictionary[0]=input('informe o nome\n')
dictionary[1]=input('informe o cpf\n')
loop = input('deseja continuar ?')
print(dictionary)
| true |
e9b8bff691770c8ee9aae307dd495333046f2a00 | Python | Kineolyan/lego-dir | /v2_test.py | UTF-8 | 13,235 | 2.953125 | 3 | [
"MIT"
] | permissive | import unittest
import os
import v2
from fs import TestApi
def simple_entry(location, element):
return {
"location": location,
"selection": [element]
}
class V1FormatEntryTest(unittest.TestCase):
def setUp(self):
self.fs = TestApi(cwd = '/a/b/c')
def test_with_absolute_path(self):
entry = v2.format_entry('vdir', '/root/some/path:one/dir', self.fs)
self.assertEqual(entry, simple_entry('/root/some/path', 'one/dir'))
def test_with_user_home(self):
entry = v2.format_entry('vdir', '~/my/root:one/dir', self.fs)
self.assertEqual(entry, simple_entry(f"{self.fs.home}/my/root", 'one/dir'))
def test_with_relative_path(self):
entry = v2.format_entry('vdir', 'd:other', self.fs)
self.assertEqual(entry, simple_entry('/a/b/c/vdir/d', 'other'))
def test_with_rewinding_path(self):
entry = v2.format_entry('vdir', '../../and/up/again:other', self.fs)
self.assertEqual(entry, simple_entry('/a/b/and/up/again', 'other'))
def test_with_leading_base(self):
entry = v2.format_entry('to/vdir', '../path:target', self.fs)
self.assertEqual(entry, simple_entry('/a/b/c/to/path', 'target'))
def test_with_rewinding_base(self):
entry = v2.format_entry('../vdir', '../path:target', self.fs)
self.assertEqual(entry, simple_entry('/a/b/path', 'target'))
def test_with_complex_entry_and_absolute_path(self):
config = {
"location": '/var',
"base": 'log',
"selection": ['nginx', 'apache']
}
entry = v2.format_entry('vdir', config, self.fs)
self.assertEqual(entry, config)
def test_with_complex_entry_and_user_path(self):
config = {
"location": '~/my/root',
"base": 'log',
"selection": ['nginx', 'apache']
}
entry = v2.format_entry('vdir', config, self.fs)
self.assertEqual(
entry,
{
"location": f"{self.fs.home}/my/root",
"base": 'log',
"selection": ['nginx', 'apache']
})
def test_with_complex_entry_and_relative_path(self):
config = {
"location": 'd',
"base": 'log',
"selection": ['nginx', 'apache']
}
entry = v2.format_entry('vdir', config, self.fs)
self.assertEqual(
entry,
{
"location": '/a/b/c/vdir/d',
"base": 'log',
"selection": ['nginx', 'apache']
})
def test_with_complex_entry_and_rewinding_path(self):
config = {
"location": '../../and/up/again',
"base": 'log',
"selection": ['nginx', 'apache']
}
entry = v2.format_entry('vdir', config, self.fs)
self.assertEqual(
entry,
{
"location": '/a/b/and/up/again',
"base": 'log',
"selection": ['nginx', 'apache']
})
def test_with_complex_entry_and_leading_base(self):
config = {
"location": '../path',
"base": 'log',
"selection": ['nginx', 'apache']
}
entry = v2.format_entry('to/vdir', config, self.fs)
self.assertEqual(
entry,
{
"location": '/a/b/c/to/path',
"base": 'log',
"selection": ['nginx', 'apache']
})
def test_with_complex_entry_and_rewinding_base(self):
config = {
"location": '../path',
"base": 'log',
"selection": ['nginx', 'apache']
}
entry = v2.format_entry('../vdir', config, self.fs)
self.assertEqual(
entry,
{
"location": '/a/b/path',
"base": 'log',
"selection": ['nginx', 'apache']
})
def test_with_negated_root_entry(self):
entry = v2.format_entry('../vdir', '~/dd:!f', self.fs)
self.assertEqual(
entry,
{
"location": f"{self.fs.home}/dd",
"selection": ['!f']
})
def test_with_negated_entry(self):
entry = v2.format_entry('../vdir', '~/dd:path/to/!f', self.fs)
self.assertEqual(
entry,
{
"location": f"{self.fs.home}/dd",
"base": 'path/to',
"selection": ['!f']
})
class V2CheckEntryTest(unittest.TestCase):
def setUp(self):
home = '/home/user'
self.fs = TestApi(home = home, cwd = home)
def test_simple_entry(self):
self.fs._set_dir('/path/to/a/dir')
self.fs._set_dir('/path/to/a/dir/file')
config = {
'location': '/path/to/a/dir',
'selection': ['file']
}
v2.check_entries([config], self.fs)
def test_entry_with_base(self):
self.fs._set_dir('/path/to/a/dir')
self.fs._set_dir('/path/to/a/dir/d1/d2')
self.fs._set_dir('/path/to/a/dir/d1/d2/file')
config = {
'location': '/path/to/a/dir',
'base': 'd1/d2',
'selection': ['file']
}
v2.check_entries([config], self.fs)
def test_multiple_entries(self):
self.fs._set_dir('/path/to/a/dir')
self.fs._set_dir('/path/to/a/dir/d1/d2')
self.fs._set_dir('/path/to/a/dir/d1/d2/f1')
self.fs._set_dir('/path/to/a/dir/d1/d2/f2')
config = {
'location': '/path/to/a/dir',
'base': 'd1/d2',
'selection': ['f1', 'f2']
}
v2.check_entries([config], self.fs)
def test_with_negate_entries(self):
self.fs._set_dir('/path/to/a/dir')
self.fs._set_dir('/path/to/a/dir/d1/d2')
config = {
'location': '/path/to/a/dir',
'base': 'd1/d2',
'selection': ['!f1', '!f2']
}
v2.check_entries([config], self.fs)
def test_with_mixed_entries(self):
self.fs._set_dir('/path/to/a/dir')
self.fs._set_dir('/path/to/a/dir/d1/d2')
config = {
'location': '/path/to/a/dir',
'base': 'd1/d2',
'selection': ['!f1', 'f2']
}
with self.assertRaises(ValueError):
v2.check_entries([config], self.fs)
def test_with_invalid_location(self):
config = {'location': '/not/a/dir'}
with self.assertRaises(ValueError):
v2.check_entries([config], self.fs)
def test_with_invalid_base(self):
self.fs._set_dir('/the/real/dir')
config = {
'location': '/the/real/dir',
'base': 'is/wrong'
}
with self.assertRaises(ValueError):
v2.check_entries([config], self.fs)
def test_with_base_and_invalid_entry(self):
self.fs._set_dir('/the/real')
self.fs._set_dir('/the/real/dir')
self.fs._set_dir('/the/real/dir/valid')
config = {
'location': '/the/real',
'base': 'dir',
'selection': [
'valid',
'invalid'
]
}
with self.assertRaises(ValueError):
v2.check_entries([config], self.fs)
def test_multiple_checks(self):
self.fs._set_dir('/path/to/a/dir')
self.fs._set_dir('/path/to/a/dir/d1/d2')
self.fs._set_dir('/path/to/a/dir/d1/d2/f1')
self.fs._set_dir('/path/to/a/dir/d1/d2/f2')
self.fs._set_dir('/path/to/a/dir/d3')
configs = [
{
'location': '/path/to/a/dir',
'base': 'd1/d2',
'selection': ['f1', 'f2']
},
{
'location': '/path/to/a/dir',
'base': 'd3',
'selection': ['!f1', '!f2']
}
]
v2.check_entries(configs, self.fs)
def test_multiple_with_errors(self):
self.fs._set_dir('/path/to/a/dir')
self.fs._set_dir('/path/to/a/dir/d1/d2')
self.fs._set_dir('/path/to/a/dir/d1/d2/f1')
self.fs._set_dir('/path/to/a/dir/d1/d2/f2')
configs = [
{
'location': '/path/to/a/dir',
'base': 'd1/d2',
'selection': ['f1', 'f2']
},
{
'location': '/path/to/a/dir',
'base': 'd3',
'selection': ['!f1', '!f2']
}
]
with self.assertRaises(ValueError):
v2.check_entries(configs, self.fs)
def test_not_has_no_path(self):
self.fs._set_dir('/path/to/a/dir')
self.fs._set_dir('/path/to/a/dir/d1/d2')
config = {
'location': '/path/to/a/dir',
'base': 'd1/d2',
'selection': ['!f1/f2']
}
with self.assertRaises(ValueError):
v2.check_entries([config], self.fs)
class V2BuildEntryTest(unittest.TestCase):
def setUp(self):
home = '/home/user'
self.fs = TestApi(home = home, cwd = home)
def test_for_simple_entry(self):
structure = [simple_entry('/a/b/dir', 'f1')]
v2.build_entries('path/to/vdir', structure, self.fs)
self.assertEqual(
set(self.fs.created_links),
set([('/a/b/dir/f1', 'path/to/vdir/f1')]))
def test_for_simple_entries(self):
config = {
'location': '/a/b/dir',
'selection': ['f1', 'f2']
}
v2.build_entries('path/to/vdir', [config], self.fs)
self.assertEqual(
set(self.fs.created_links),
set([
('/a/b/dir/f1', 'path/to/vdir/f1'),
('/a/b/dir/f2', 'path/to/vdir/f2')
]))
def test_for_simple_entries_with_base(self):
config = {
'location': '/a/b',
'base': 'long/dir',
'selection': ['f1', 'f2']
}
v2.build_entries('path/to/vdir', [config], self.fs)
self.assertEqual(
self.fs.created_dirs,
[
'path/to/vdir/long',
'path/to/vdir/long/dir'
])
self.assertEqual(
set(self.fs.created_links),
set([
('/a/b/long/dir/f1', 'path/to/vdir/long/dir/f1'),
('/a/b/long/dir/f2', 'path/to/vdir/long/dir/f2')
]))
def test_for_not_entry(self):
entries = ['a', 'b', 'not-this', 'c']
self.fs._set_entries('/path', entries)
for e in entries:
self.fs._set_dir(f"/path/{e}")
structure = [simple_entry('/path', '!not-this')]
v2.build_entries('to/vdir', structure, self.fs)
self.assertEqual(
set(self.fs.created_links),
set([
('/path/a', 'to/vdir/a'),
('/path/b', 'to/vdir/b'),
('/path/c', 'to/vdir/c')
]))
def test_for_not_entry_with_base(self):
entries = ['a', 'b', 'not-this', 'c']
self.fs._set_entries('/path/inside/dir', entries)
for e in entries:
self.fs._set_dir(f"/path/inside/dir/{e}")
config = {
'location': '/path',
'base': 'inside/dir',
'selection': ['!not-this']
}
v2.build_entries('to/vdir', [config], self.fs)
self.assertEqual(
self.fs.created_dirs,
[
'to/vdir/inside',
'to/vdir/inside/dir'
])
self.assertEqual(
set(self.fs.created_links),
set([
('/path/inside/dir/a', 'to/vdir/inside/dir/a'),
('/path/inside/dir/b', 'to/vdir/inside/dir/b'),
('/path/inside/dir/c', 'to/vdir/inside/dir/c')
]))
def test_for_many_not_entries_with_base(self):
entries = ['a', 'ab', 'not-this', 'dc']
self.fs._set_entries('/path/inside/dir', entries)
for e in entries:
self.fs._set_dir(f"/path/inside/dir/{e}")
config = {
'location': '/path',
'base': 'inside/dir',
'selection': [
'!not-this',
'!a',
'!c'
]
}
v2.build_entries('to/vdir', [config], self.fs)
self.assertEqual(
self.fs.created_dirs,
[
'to/vdir/inside',
'to/vdir/inside/dir'
])
self.assertEqual(
set(self.fs.created_links),
set([
('/path/inside/dir/ab', 'to/vdir/inside/dir/ab'),
('/path/inside/dir/dc', 'to/vdir/inside/dir/dc')
]))
def test_for_many_not_entries_without_base(self):
entries = ['a', 'ab', 'not-this', 'dc']
self.fs._set_entries('/path', entries)
for e in entries:
self.fs._set_dir(f"/path/{e}")
config = {
'location': '/path',
'selection': [
'!not-this',
'!a',
'!c'
]
}
v2.build_entries('to/vdir', [config], self.fs)
self.assertEqual(
self.fs.created_dirs,
[])
self.assertEqual(
set(self.fs.created_links),
set([
('/path/ab', 'to/vdir/ab'),
('/path/dc', 'to/vdir/dc')
]))
def test_not_entry_ignore_file(self):
dir_entries = ['a', 'not-this', 'b']
for e in dir_entries:
self.fs._set_dir(f"/path/{e}")
file_entries = ['c', 'd']
self.fs._set_entries('/path', dir_entries + file_entries)
config = simple_entry('/path', '!not-this')
v2.build_entries('to/vdir', [config], self.fs)
self.assertEqual(
self.fs.created_dirs,
[])
self.assertEqual(
set(self.fs.created_links),
set([
('/path/a', 'to/vdir/a'),
('/path/b', 'to/vdir/b')
]))
def test_for_many_entries(self):
entries = ['a', 'not-this', 'b']
self.fs._set_entries('/root', entries)
for e in entries:
self.fs._set_dir(f"/root/{e}")
structure = [
simple_entry('/a/b/dir', 'f1'),
simple_entry('/my/home/has/dir', 'f2'),
simple_entry('/root', '!not-this')
]
v2.build_entries('path/to/vdir', structure, self.fs)
self.assertEqual(
set(self.fs.created_links),
set([
('/a/b/dir/f1', 'path/to/vdir/f1'),
('/my/home/has/dir/f2', 'path/to/vdir/f2'),
('/root/a', 'path/to/vdir/a'),
('/root/b', 'path/to/vdir/b')
]))
def test_with_existing_entry(self):
self.fs._set_file('path/to/vdir/f1')
structure = [simple_entry('/a/b/dir', 'f1')]
v2.build_entries('path/to/vdir', structure, self.fs)
self.assertEqual(list(self.fs.created_links), [])
class V2Test(unittest.TestCase):
def test_integration(self):
fs = TestApi(home = '/home/charlie', cwd = '/usr')
entries = ['a', 'not-this', 'b']
fs._set_entries('/root', entries)
for e in ['/home/charlie/dir/f1', '/usr/other/f2', '/root/a', '/root/b']:
fs._set_dir(os.path.dirname(e))
fs._set_dir(e)
fs._set_dir('/var')
fs._set_dir('/log')
log_entries = ['apache', 'nginx', 'tomcat', 'zgc']
fs._set_entries('/var/log', log_entries)
for e in log_entries: fs._set_dir(f"/var/log/{e}")
var_entries = ['log', 'db', 'games']
fs._set_entries('/var', var_entries)
for e in var_entries: fs._set_dir(f"/var/{e}")
structure = [
'~/dir:f1',
'../other:f2',
'/root:!not-this',
{
"location": '/var',
"selection": ['!cache', '!log']
},
{
"location": '/var',
"base": 'log',
"selection": ['nginx', 'apache']
}
]
v2.process('vdir', {"structure": structure}, fs)
self.assertEqual(
set(fs.created_links),
set([
('/home/charlie/dir/f1', 'vdir/f1'),
('/usr/other/f2', 'vdir/f2'),
('/root/a', 'vdir/a'),
('/root/b', 'vdir/b'),
('/var/db', 'vdir/db'),
('/var/games', 'vdir/games'),
('/var/log/apache', 'vdir/log/apache'),
('/var/log/nginx', 'vdir/log/nginx')
]))
if __name__ == '__main__':
unittest.main()
| true |
9076ef8d82d04b336093922a068a6d1e62a7220d | Python | Alexandre1212/Householdfinance- | /Householdfinance.py | UTF-8 | 1,245 | 3.046875 | 3 | [] | no_license | import tkinter as tk
class Main(tk.Frame):
def __init__(self,root):
super().__init__(root)
self.init_main()
def init_main(self):
toolbar = tk.Frame(bg='#d7dBe0',bd=2)
toolbar.pack(side=tk.TOP,fill=tk.X)
self.add_img = tk.PhotoImage(file="\\home\\Alexsandre\\Документы\\Householdfinance\\755917060980866.gif")
btn_open_dialog = tk.Button(toolbar,text='Добавить позицию',command=self.open_dialog,bg='d7d80',bd=0,
compound=tk.TOP,image=self.add_img)
btn_open_dialog.pack(side=tk.LEFT)
def open_dialog(self):
Child()
class Child(tk.Toplevel):
def __init__(self):
super().__init__(root)
self.init_child()
def init_child(self):
self.title("Добавить расходы/доходы")
self.geometry("400x220+400+300")
self.resizable(False,False)
self.grab_set()
self.focus_set()
if __name__=="__main__":
root = tk.Tk()
app = Main(root)
app.pack()
root.title("Household finance")
root.geometry("650x450+300+200")
root.resizable(False,False)
root.mainloop() | true |
be7fcb7c7b52cb70cb2d7d063b05d79466c87792 | Python | Xutete/TransPy | /testgui.py | UTF-8 | 2,284 | 2.875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
@author: Zhanhong Cheng
"""
import matplotlib as mpl
import numpy as np
# mpl.use('Qt5Agg')
from matplotlib import pyplot as plt
# mpl.matplotlib_fname()
# mpl.rcParams['backend']= 'Qt5Agg'
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.fill_between([1,2,3], [2,3,4], [3,5,4.5], where=None, interpolate=False, step=None)
line = ax.plot([1,2,3],color='r',linestyle='dashed', marker='o',fillstyle='left')
mpl.rcParams['lines.antialiased']=False
mpl.rcParams['patch.antialiased']=False
x = np.random.rand(0,10000)
y = np.random.rand(0,10000)
plt.plot(x,x)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.patheffects as path_effects
import matplotlib.transforms as transforms
fig = plt.figure()
ax=fig.add_subplot(111)
xx = np.random.rand(1,10000)
lines=ax.scatter(xx,xx,'ro')
x = np.arange(0., 2., 0.01)
y = np.sin(2*np.pi*x)
line, = ax.plot(x, y, lw=3, color='blue')
# shift the object over 2 points, and down 2 points
dx, dy = 2/72., -2/72.
offset = transforms.ScaledTranslation(dx, dy,
fig.dpi_scale_trans)
shadow_transform = ax.transData + offset
# now plot the same data with our offset transform;
# use the zorder to make sure we are below the line
ax.plot(x, y, lw=3, color='gray',
transform=shadow_transform,
zorder=0.5*line.get_zorder())
t = ax.text(0.02, 0.5, 'Hatch shadow', fontsize=75, weight=1000, va='center')
t.set_path_effects([path_effects.PathPatchEffect(offset=(4, -4), hatch='xxxx',
facecolor='gray'),
path_effects.PathPatchEffect(edgecolor='white', linewidth=1.1,
facecolor='black')])
plt.show()
import matplotlib.pyplot as plt
import matplotlib.patheffects as path_effects
fig=plt.figure()
ax = fig.add_axes([0,0,1,1])
line = ax.plot([0,2,3])
line[0].set_path_effects([path_effects.Stroke(linewidth=3, foreground='black'),
path_effects.Normal()])
text = plt.text(0.5, 0.5, 'Hello DIR effects world!',
path_effects=[path_effects.withSimplePatchShadow()])
plt.plot([0, 3, 2, 5], linewidth=5, color='blue',
path_effects=[path_effects.SimpleLineShadow(),
path_effects.Normal()])
plt.show() | true |
b9ad511ec2af30de0658b4f7e5650b947f6d9420 | Python | z-kidy/python-app | /socket_server.py | UTF-8 | 1,849 | 2.890625 | 3 | [] | no_license | # encoding=utf-8
# disigned by kidy zhang
import socket
import sys
from pymouse import PyMouse
# import thread
import json
HOST = '' # Symbolic name meaning all available interfaces
PORT = 10002 # Arbitrary non-privileged port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print 'Socket created'
try:
s.bind((HOST, PORT))
except socket.error, msg:
print 'Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
print 'Socket bind complete'
s.listen(10)
print 'Socket now listening'
class MouseWork(PyMouse):
"""docstring for MouseWork"""
def __init__(self):
# self.content = content
self.method = {'click': self.clickmouse,
'move': self.movemouse}
def clickmouse(self, content):
self.x = self.position()[0]
self.y = self.position()[1]
if content == 'left':
self.click(self.x, self.y, 1)
else:
self.click(self.x, self.y, 2)
def movemouse(self, content):
self.x = self.position()[0]
self.y = self.position()[1]
xy = content.split(',')
offset_x = int(xy[0])
offset_y = int(xy[1])
self.move(self.x + offset_x, self.y + offset_y)
conn, addr = s.accept() # accept a connnection
print 'A New Connected with ' + addr[0] + ':' + str(addr[1])
mymouse = MouseWork()
while 1:
data = conn.recv(2048) # 目前限定最大一次获取2048个字符数据
data = data.split('#') # every data is splited by '#'
for d in data[0:-1]:
d = json.loads(d) # loads json
method = d.get('method')
content = d.get('content')
mymouse.method[method](content)
# while 1:
# now keep talking with the client
# mouse = thread.start_new_thread(pymouse_action, (conn,))
# 一个多线程的设计
s.close()
| true |
68c8ecd0b85b18af05b54239ffc339b013f07107 | Python | pipkin0265/CTI-110 | /P4HW2_MealTipTax_MichaelPipkin (1).py | UTF-8 | 748 | 4.28125 | 4 | [] | no_license | #Calculates the total amount of a meal purchased at a restaurant
#09/24/2020
#CTI-110 P2HW1 - Meal Tip Tax Calculator
#Michael Pipkin II
num1 = float(input('Enter food cost: '))
num2 = float(input('Enter tip amount of 15, 18, or 20: '))
#validating tip percentage
if (num2 == 15):
print('15%')
elif(num2 == 18):
print('18%')
elif(num2 == 20):
print('20%')
else:
print("Invalid! Please enter tip again!")
#assigning tax as 6 percent
num3 = 6.0
#calculating tip amount
num4 = num1 *(num2/100)
#calculating tax amount
num5 = num1 *(num3/100)
#printing the results
print("Calculated Tip: %.2f"%(num4))
print("Calculated Tax: %.2f"%(num5))
print("Total cost including tip and tax: %.2f"%(num1+num4+num5))
| true |
581b04b37a1d18f2b5a62b99fd04d6bbceada04d | Python | nasosmon/python-projects | /a1.py | UTF-8 | 343 | 2.84375 | 3 | [] | no_license |
d = open(r"C:\Users\nasos\Desktop\destiny2.txt").read().split()
d.sort(key = len, reverse = True)
k = [None] * 5
for i in range (0, 5):
k[i] = d [i]
j = 0
for x in k:
a = k[j]
a = a.translate({ord(i): None for i in "aeiou"})
print (a[::-1])
j = j + 1
| true |
c0ff52dd7a8447af85303cbefba554f748946f0c | Python | smohapatra1/scripting | /python/practice/day46/print_triangle_problem_180_degree_roration.py | UTF-8 | 424 | 3.703125 | 4 | [] | no_license | def triangle_180(n):
#Number of spaces
if n <= 50:
k = 2*n - 2
for i in range(0,n):
for j in range(0,k):
print (end =" ")
# decrementing k after each loop
k = k - 2
for j in range(0, i+1):
print ("* ", end="")
print ("\r")
else:
print ("Out of range")
triangle_180(int(input("Enter number : ")))
| true |
404add1153ee7a8c6a5d4533f85d279938b19d2e | Python | cpe202fall2018/lab0-darrylyeo | /planets.py | UTF-8 | 349 | 3.8125 | 4 | [] | no_license | # Assignment:
# http://users.csc.calpoly.edu/~phatalsk/202/labs/lab00/lab00.pdf
def weight_on_planets():
weight = float(input('What do you weigh on earth? '))
output = '\nOn Mars you would weigh {} pounds.\nOn Jupiter you would weigh {} pounds.'.format(weight * 0.38, weight * 2.34)
print(output)
if __name__ == '__main__':
weight_on_planets() | true |
47f2adc522c1d050be138349aa5eb3b63b91f3b6 | Python | WarrenWeckesser/odeintw | /setup.py | UTF-8 | 1,937 | 2.625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python
# Copyright (c) 2014, Warren Weckesser
# All rights reserved.
# See the LICENSE file for license information.
from os import path
from setuptools import setup
def get_odeintw_version():
"""
Find the value assigned to __version__ in odeintw/__init__.py.
This function assumes that there is a line of the form
__version__ = "version-string"
in odeintw/__init__.py. It returns the string version-string, or None if
such a line is not found.
"""
with open(path.join("odeintw", "__init__.py"), "r") as f:
for line in f:
s = [w.strip() for w in line.split("=", 1)]
if len(s) == 2 and s[0] == "__version__":
return s[1][1:-1]
_descr = ('Solve complex and matrix differential equations '
'with scipy.integrate.odeint.')
_long_descr = """
odeintw
=======
`odeintw` provides a wrapper of `scipy.integrate.odeint` that allows it to
handle complex and matrix differential equations. That is, it can solve
equations of the form
dZ/dt = F(Z, t, param1, param2, ...)
where `t` is real and `Z` is a real or complex array.
Since `odeintw` is just a wrapper of `scipy.integrate.odeint`, it requires
`scipy` to be installed. SciPy 0.15 or greater is required, to avoid a
bug in `scipy.stats.odeint` in older versions of SciPy.
See README.md at https://github.com/WarrenWeckesser/odeintw for examples.
"""
setup(name='odeintw',
version=get_odeintw_version(),
description=_descr,
long_description=_long_descr,
author='Warren Weckesser',
url='https://github.com/WarrenWeckesser/odeintw',
packages=['odeintw'],
classifiers=[
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
],
keywords="scipy odeint",
install_requires=['scipy'])
| true |
f28033c092fc0f4743d9199971f2ce0d70ecbcde | Python | patataofcourse/yaBMGr | /main.py | UTF-8 | 924 | 2.546875 | 3 | [] | no_license | import click
import bmg
VERSION = "v1"
click.echo(f"yaBMGr {VERSION} by patataofcourse")
@click.group(help="converts BMG to RBMG and back",options_metavar='')
def cli():
pass
@cli.command( "unpack",
help="converts a BMG into the readable format RBMG",
no_args_is_help = True,
options_metavar='[-o/--output OUTPUT]'
)
@click.argument("input")
@click.option("-o", "--output", default=None)
def unpack(input, output):
bmg.unpack(input, output)
click.echo("Done!")
@cli.command( "pack",
help="converts a RBMG back into Nintendo's BMG",
no_args_is_help = True,
options_metavar='[-o/--output OUTPUT]'
)
@click.argument("input")
@click.option("-o", "--output", default=None)
def pack(input, output):
bmg.pack(input, output)
click.echo("Done!")
if __name__ == "__main__":
cli() | true |
3a051f8f37a2c44160d8cd94495351b948e83507 | Python | fcchou/tensorflow_models | /tensorflow_models/dbn.py | UTF-8 | 3,540 | 2.78125 | 3 | [] | no_license | import tensorflow as tf
from tensorflow_models.mlp import MultiLayerPerceptron
from tensorflow_models.rbm import RBMLayer, RBM
class DeepBeliefNet(MultiLayerPerceptron):
def __init__(
self,
input_size,
hidden_layer_sizes,
n_output_class=2,
learning_rate=0.001,
regularization=0.0001,
n_iter=50000,
batch_size=100,
):
"""Deep belief network implemented with TensorFlow.
It works like a multi-layer perceptron, but the layers are first pretrained by RBM.
The input data is assumed to be in [0, 1]
Args:
input_size (int): Number of features in the input
hidden_layer_sizes (list):
List of int representing the sizes of the hidden layers. The list length determines the depth of the
network.
n_output_class (int):
Number of output classes in the labels. The training labels must be in [0, n_output_class).
learning_rate (float): Learning rate of the gradient descent.
regularization (float): Strength of the L2 regularization. Use 0 to skip regularization.
n_iter (int): Positive integer. The number of gradient descent iterations.
batch_size (int): Size of the mini batch.
"""
super().__init__(
input_size=input_size,
hidden_layer_sizes=hidden_layer_sizes,
n_output_class=n_output_class,
learning_rate=learning_rate,
regularization=regularization,
n_iter=n_iter,
batch_size=batch_size,
activation=tf.nn.sigmoid,
)
# Skip the last log-reg layer for RBM pretraining
self._rbm_layers = [
RBMLayer(
layer.weight,
layer.bias,
tf.Variable(tf.zeros([layer.input_size])),
)
for layer in self._layers[:-1]
]
self._sess.run(tf.global_variables_initializer())
def pretrain(
self,
x,
learning_rate=0.001,
n_iter=5000,
batch_size=100,
negative_sample_size=100,
regularization=0.00001,
cd_k=1,
):
"""Layer-wise RBM pretraining.
Args:
x (numpy.ndarray): Feature vectors for the training set.
learning_rate (float): Learning rate of the gradient descent.
n_iter (int): Number of gradient descent iterations.
batch_size (int): Size of the mini batch.
negative_sample_size (int): Number of negative sample particles to kept during CD for each iteration.
regularization (float): Strength of the L2 regularization. Use 0 to skip regularization.
cd_k (int): Number of CD steps to perform.
"""
training_x = x
for layer_idx, rbm_layer in enumerate(self._rbm_layers):
# Pretrain the layer
rbm_training = RBM(
rbm_layer,
learning_rate=learning_rate,
n_iter=n_iter,
batch_size=batch_size,
negative_sample_size=negative_sample_size,
regularization=regularization,
cd_k=cd_k,
session=self._sess,
)
rbm_training.train(training_x)
# Get the input for the next_layer
training_x = self._sess.run(
self._layers[layer_idx].output,
feed_dict={self._x: x},
) | true |
278bde4732c6458eb8e72471064f6e6aed9df920 | Python | webserver3315/Laptop_Ubuntu_PSBOJ | /3-2/PuEn/2020_src/parse_tree.py | UTF-8 | 4,943 | 3.296875 | 3 | [] | no_license | #!/usr/bin/env python
class Node():
def __init__(self,data, parent, index, id = None, scope = ["global"]):
self.data = data
self.children = []
self.parent = parent
self.index = index
self.id = id
self.scope = scope
def __repr__(self, level = 0):
value = self.data
ret = str(level) + "|" + "\t" * level + repr(value)
ret += "\n"
for child in self.children:
ret += child.__repr__(level + 1)
return ret
def search_inorder(self):
node = self
if node.children:
return node.children[0]
while node.parent is not None:
if node.index != len(node.parent.children) -1:
return node.parent.children[node.index+1]
node = node.parent
return 0
def set_child(self, data):
for idx, item in enumerate(data):
node = Node(item,self, idx)
self.children.append(node)
return self.children[0]
def advance(self):
index = self.index
node = self
cur = self.parent
while True:
if cur != None :
if len(cur.children)-1 == index:
index = cur.index
node = cur
cur = cur.parent
else:
break
else:
return node
cur = cur.children[index+1]
while len(cur.children) != 0:
cur = cur.children[0]
return cur
def node_print(self):
node = self
mem = self
while mem.children:
mem = mem.children[-1]
while len(node.children) != 0:
node = node.children[0]
while node != mem:
if node.data in ['[0-9]*','([a-z] | [A-Z])*']:
print(node.id, end= ' ')
else:
print(node.data,end=' ')
node = node.advance()
print(node.data)
def get_root(self):
node = self
while node.parent != None:
node = node.parent
return node
def set_symbol_table(self):
node = self
scope = ["global"]
symbol_table = []
while len(node.children) != 0:
node = node.children[0]
symbol_table.append([node.id, "function", list(scope)])
scope.append(node.id)
node = node.advance()
while node.parent is not None:
if node.data in ["char", "int"]:
node_type = node.data
node = node.advance()
if node.data == "([a-z] | [A-Z])*":
symbol_table.append([node.id, node_type, list(scope)])
node = node.advance()
continue
elif node.data in ["IF", "WHILE", "ELSE"]:
scope.append(node.data)
elif node.data == "}":
scope.pop()
node = node.advance()
return symbol_table
def get_node_with_keyword(self, str):
set = []
if self.data == str:
set.append(self)
for child in self.children:
result = child.get_node_with_keyword(str)
for entry in result:
set.append(entry)
return set
def getleft(self):
node = self
while node.index == 0:
node = node.parent
return node.parent.children[node.index-1]
def getright(self):
node = self
while True:
if(len(node.parent.children) > node.index + 1):
break
else:
node = node.parent
return node.parent.children[node.index+1]
def get_binarySyntaxTree(self):
operators = ["=", "+", "*", ">"]
q = []
root = None
for child in self.children:
q.append(child)
while True:
node = q.pop(0)
if node.data in operators:
if root == None:
if node.id:
root = Node(node.id, node.parent, node.index, node.id)
else:
root = Node(node.data, node.parent, node.index, node.id)
if (len(root.children) == 0):
left = node.getleft()
root.children.append(left.get_binarySyntaxTree())
right = node.getright()
root.children.append(right.get_binarySyntaxTree())
else:
for grandchild in node.children:
q.append(grandchild)
if len(q) == 0:
if (root == None):
if node.id:
root = Node(node.id, node.parent, node.index, node.id)
else:
root = Node(node.data, node.parent, node.index, node.id)
break
return root
| true |
a8ad5c484b4119b5fcf07ebb072a96535ed9c8c5 | Python | kokoa-naverAIboostcamp/algorithm | /Algorithm/solution/BOJ2602.py | UTF-8 | 641 | 2.890625 | 3 | [] | no_license | # 2602번: 돌다리 건너기 (라이언)
scroll=' '+input()
bridge=[' '+input() for _ in range(2)]
size=len(bridge[0])
dp=[[[0]*size for _ in range(2)] for _ in range(len(scroll))]
# 처음에 초기화
for y in range(0,size):
for x in range(2):
dp[0][x][y]=1
for i in range(1,len(scroll)):
for y in range(1,size):
for x in range(2):
across=not x
# 두루마리에 있는 문자열일때
if bridge[x][y] == scroll[i] :
dp[i][x][y]=dp[i-1][across][y-1]+dp[i][x][y-1]
else:
dp[i][x][y]=dp[i][x][y-1]
print(dp[-1][0][-1]+dp[-1][1][-1]) | true |
f65d88b015fe805a39b7264d215ad491936c1ca6 | Python | jmdavi/exercism_practice | /python/leap/leap.py | UTF-8 | 368 | 3.640625 | 4 | [] | no_license | def leap_year(year):
#start with general case of every four years
if int(year) % 4 == 0:
#check the special cases of 100 year intervals, that aren't divisible by 400
if int(year) % 100 == 0 and int(year) % 400 != 0:
return False
#for non-special case of not 100 years, always True
else: return True
return False
| true |
451bea51cc194fd8e9cd54fa89041d3c5f63a483 | Python | nlml/liamlib | /liamlib/labelimg_tools.py | UTF-8 | 1,748 | 2.734375 | 3 | [] | no_license | import xml.etree.ElementTree as ET
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def plot_img_with_rects(im, rects, edgecolor='r'):
# Plots an image with bounding boxes superimposed
fig, ax = plt.subplots(1, figsize=[7, 7])
# Display the image
ax.imshow(im)
for r in rects:
x, y, x1, y1 = r
w = x1 - x
h = y1 - y
# Create a Rectangle patch
rect = patches.Rectangle((x, y), w, h, linewidth=3, edgecolor=edgecolor, facecolor='none')
# Add the patch to the Axes
ax.add_patch(rect)
plt.show()
def make_labelimg_xml(imgfilename, boxes):
out = '\n<annotation>\n <folder>labelimgtest</folder>\n <filename>2019-03-01_000001.png</filename>\n <path>/home/liam/labelimgtest/2019-03-01_000001.png</path>\n <source>\n <database>Unknown</database>\n </source>\n <size>\n <width>474</width>\n <height>621</height>\n <depth>3</depth>\n </size>\n <segmented>0</segmented>' # noqa
for i, b in enumerate(boxes):
out += '\n\t<object>\n\t\t<name>{0}</name>\n\t\t<pose>Unspecified</pose>\n\t\t<truncated>0</truncated>\n\t\t<difficult>0</difficult>\n\t\t<bndbox>\n\t\t\t<xmin>{1}</xmin>\n\t\t\t<ymin>{2}</ymin>\n\t\t\t<xmax>{3}</xmax>\n\t\t\t<ymax>{4}</ymax>\n\t\t</bndbox>\n\t</object>\n'.format(i, *b) # noqa
out += '\n</annotation>'
return out
def labelimg_xml_to_json(labelimgxml_path):
out = []
tree = ET.parse(labelimgxml_path).getroot()
for type_tag in tree.findall('object'):
bndbox = type_tag.find('bndbox')
bndbox = [int(bndbox.find(i).text) for i in ['xmin', 'ymin', 'xmax', 'ymax']]
out.append({"id": 0, "bbox": bndbox})
return out
| true |
f2099c99f5613cc8ef8c2d8dbc568c5f0eca7af3 | Python | mmngreco/py_est2 | /1 - VA DISCRETA /EST2_practica_t1.py | UTF-8 | 1,331 | 4.28125 | 4 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import scipy.stats as st
print '''
20. El **numero de maquinas reparadas** por un tecnico en un dia de trabajo sigue una **distribucion de Poisson de media 3**, calcular la probabilidad de que:
a. un dia cualquiera repare al menos 5.
b. un dia repare 5 sabiendo que ya ha reparado mas de 2
c. en una semana (5 dias) repare entre 12 y 16 maquinas
'''
# Numero de maquinas reparadas por día
# λ = 3 (maquinas por día)
lambda_a = 3
# p(3)
print "# APARTADO A:\n## TENEMOS QUE CALCULAR:"
print "P(X ≥ 5) = 1 - P(X ≤ 4)"
print "\nBuscamos en Tablas: "
print "P(X ≥ 5) =", 1 - st.poisson.cdf(4, lambda_a)
print "\n# APARTADOS B: \n## TENEMOS QUE HALLAR:\n"
print ">>> P(X = 5 | X > 2) \n= P(X > 2 | X = 5 ) * P(X = 5) / P(X > 2)"
print "P(X > 2 | X = 5 ) = 1"
print "P(X = 5) =", st.poisson.pmf(5, lambda_a)
print "P(X > 2) = 1 - P(X ≤ 2) =", 1 - st.poisson.cdf(2, lambda_a)
print "P(X = 5 | X > 2) =", st.poisson.pmf(5, lambda_a) / (1 - st.poisson.cdf(2, lambda_a))
print """\n# APARTADO C: \n## TENEMOS QUE HALLAR:\n
En 5 días repare entre 12 y 16 maquinas:
Esto se traduce en: λ = 3 · 5 = 15
:
"""
a = 11
b = 16
lambda_c = 15
p_a = st.poisson.cdf(a, lambda_c)
p_b = st.poisson.cdf(b, lambda_c)
print "P(12 ≤ x ≤ 16) = P(x ≤ 16) - P(x ≤ 11) =", p_b - p_a | true |
135f8c337cc2981187e0f822566f0cff50c3efdb | Python | StefanBratovanov/PythonCourse | /Lecture5/Test2_sys_os.py | UTF-8 | 406 | 2.6875 | 3 | [] | no_license | import sys
import os
# print(sys.platform)
# print(sys.argv)
# print(os.access('.../.../..', os.W_OK))
# print(os.path.dirname('../Lecture5/Tests.py'))
# print(os.path.basename('/Lecture5/Tests.py'))
# print(os.path.exists('../Lecture5/Tests.py'))
for root, dirs, files in os.walk('../Lecture5'):
print(root, '-----', dirs, '-----', files)
for fn in files:
print(os.path.join(root, fn))
| true |
58697aa5d5a1b224aea90fc01bb11646f2aed02e | Python | UVoggenberger/CEUAS | /CEUAS/public/uncertainties/analyse_DistributionsPlevels.py | UTF-8 | 5,525 | 2.765625 | 3 | [] | no_license | """ Plotting the errors distributions for different pressure levels
Author: Ambrogi Federico, federico.ambrogi@univie.ac.at
"""
from uncertainties_utils import *
""" Dirs, definitions, select datasets """
cov_file = 'data/covariance_matrices.npy'
variables = ['temp', 'uwind','vwind','speed','direction', 'rh']
variables = ['temp']
stations = ['Lindenberg']
netCDF = DataHandler()
Cov = Covariance(netCDF)
Plot = Plotter()
Plot.initialize_dirs()
matrices = np.load(cov_file, allow_pickle = True).item()
print('The matrices are', matrices.keys())
print('The matrices are', matrices['Lindenberg'].keys())
""" pressure levels to be considered """
LEVELS = [0,3,5,8,11,13,15]
res = {}
'''
""" Plotting the time series of the running mean """
for s in stations:
print ('***** Processing the station: ', s , '\n')
datums = matrices[s]['datums']
for v in variables:
res[v] = {}
print ('**** Variable: ', v )
Plot.plot_prop(var=v)
matrices_dic = matrices[s][v]
for h in [1]:
plevels_i = LEVELS
plevels_j = LEVELS # 11 is 500 hPa
for i,j in zip(plevels_i,plevels_j): # no off-diagonal elements
res[v][i] = {}
Plot.plot_prop(var = v, fg_p = i, an_p = j , hour = str(h))
print("*** Processing the i,j entries: ", i , j )
values = Cov.select_ijentry(matrices = matrices_dic[str(h)], i = i , j = j)
values_cleaned, outliers, lower, upper, median = netCDF.remove_outliers(values, cut= 1.5 )
""" The errors are the sqrt of the covariances; note that you must take the abs since they might be negative """
#values_cleaned = [ abs(np.sqrt(v)) for v in values ]
means, dates = [], []
for n in [60,365]:
res[v][i][n] = {}
res[v][i][n]['means'] = []
runningmean, date = Cov.running_mean_old(data= values_cleaned, n= n, datums= datums)
res[v][i][n]['means'].append(runningmean)
print('the results is', res)
np.save('results_for_distributions',res)
'''
res = np.load('data/plevels_distributions.npy', allow_pickle = True).item()
### PLOTTING
colors = ['blue','slateblue','lime','gold','orange','magenta','cyan']
labels_f = [10,20,30,50,70,100,150,200,250,300,400,500, 700,850,925, 1000]
labels = [10, 50, 100, 250, 500, 700, 1000]
bins = 25
FONT = 13
range = [0.,3.]
dic = {'uwind':'[m/s]' , 'vwind':'[m/s]' , 'speed':'[m/s]', 'temp':'[K]' , 'direction':'[Degree]', 'rh':''}
variables = ['temp','speed']
h = 1
for v in variables:
for c,i in zip(colors, LEVELS ):
data_60 = res[v][h][i][60]['means']
data_365 = res[v][h][i][365]['means']
plt.hist( data_365, histtype = 'stepfilled', range = range, bins = bins , color= c , alpha = 0.2, density = True)
plt.hist( data_365, histtype = 'step', range = range ,bins = bins , color= c , ls = '-', density = True, alpha=0.6)
v_n = v.replace('temp','Temperature').replace('uwind','Wind u-component').replace('vwind','Wind v-component')
for c,l in zip (colors, labels):
plt.plot([-100,-50], [-100,-50], color = c, label = str(l) + ' [hPa]' )
plt.title(v_n + ' errors comparison for different pressure levels', fontsize = FONT, y = 1.03)
plt.text(0.2 , 9.2 , 'Desroziers 1 year ', fontsize = FONT )
plt.legend(loc = 'upper right', fontsize = FONT-4)
plt.xlabel('Error ' + dic[v], fontsize = FONT)
plt.ylabel('Normalized Counts', fontsize = FONT)
plt.xlim(0.,3)
plt.ylim(0,10)
plt.grid(linestyle= ':', color = 'lightgray', lw = 1.2 )
plt.savefig('plots/plevels_histo/'+ v + '_Errors_Comparison_365.png', bbox_inches = 'tight')
plt.close()
for c,i in zip(colors, LEVELS ):
data_60 = res[v][h][i][60]['means']
data_365 = res[v][h][i][365]['means']
plt.hist( data_60, histtype = 'stepfilled', bins = bins , color= c , alpha = 0.2, density = True)
plt.hist( data_60, histtype = 'step', bins = bins , color= c , ls = '-', density = True, alpha=0.6)
for c,l in zip (colors, labels):
plt.plot([-100,-50], [-100,-50], color = c, label = str(l) + ' [hPa]' )
plt.text(0.2 , 9.2 , 'Desroziers 2 months', fontsize = FONT )
v_n = v.replace('temp','Temperature').replace('uwind','Wind u-component').replace('vwind','Wind v-component')
plt.title(v_n + ' errors comparison for different pressure levels', fontsize = FONT, y = 1.03)
plt.legend(loc = 'upper right', fontsize = FONT-4)
plt.xlabel('Error ' + dic[v], fontsize = FONT)
plt.ylabel('Normalized Counts', fontsize = FONT)
plt.xlim(0., 3)
plt.ylim(0, 10)
plt.grid(linestyle= ':', color = 'lightgray', lw = 1.2 )
plt.savefig('plots/plevels_histo/'+ v + '_Errors_Comparison_60.png', bbox_inches = 'tight')
plt.close()
| true |
76f2e70f25e5ea393ddabef04bee5dc2395e6b63 | Python | shnizzedy/SM_openSMILE | /utilities/recurse/recurse.py | UTF-8 | 2,971 | 3.40625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
recurse.py
Script to recursively collect files.
Author:
– Jon Clucas, 2017 (jon.clucas@childmind.org)
© 2017, Child Mind Institute, Apache v2.0 License
@author: jon.clucas
"""
import argparse, os
def recurse(top_dir):
"""
Function to recursively collect the absolute paths of all files in
`top_dir`.
Parameters
----------
top_dir : string
absolute path of top directory for files we want to read from
Returns
-------
result : list
either a list of all files to act upon or an empty list
"""
# initialize result
file_list = []
print(''.join(["Getting files from `", top_dir, "`"]))
# get files
for root, dirs, files in os.walk(top_dir):
for file in files:
filepath = os.path.join(root, file)
print(''.join([" ", filepath]))
file_list.append(filepath)
for dir in dirs:
file_list.extend(recurse(dir))
return file_list
def filter_list(file_list, requirement):
"""
Function to select files from `file_list` based on requirement.
Parameters
----------
file_list : list
a list of absolute filepaths
requirement : string
requirement to filter `file_list` by
Returns
-------
result : list
either a list of all files to act upon or an empty list
"""
for file in file_list:
if not eval(''.join(["'", file, "'", requirement])):
file_list.remove(file)
return file_list
def filter_recurse(top_dir, requirement):
"""
Function to recursively collect the absolute paths of select files in
`top_dir` based on requirement.
Parameters
----------
top_dir : string
absolute path of top directory for files we want to read from
requirement : string
requirement to filter `file_list` by
Returns
-------
result : list
either a list of all files to act upon or an empty list
"""
file_list = []
print(''.join(["Getting files from `", top_dir, "`"]))
# get files
for root, dirs, files in os.walk(top_dir):
for file in files:
if eval(''.join(["'", file, "'", requirement])):
filepath = os.path.join(root, file)
print(''.join([" ", filepath]))
file_list.append(filepath)
for dir in dirs:
file_list.extend(recurse(dir))
return file_list
def main():
# script can be run from the command line to see results
parser = argparse.ArgumentParser(description='get directory')
parser.add_argument('in_dir', type=str)
parser.add_argument('--req', type=str)
arg = parser.parse_args()
if arg.req:
filter_recurse(arg.in_dir, arg.req)
else:
recurse(arg.in_dir)
# ============================================================================
if __name__ == '__main__':
main() | true |
83279da6a64ad1b2ba4dc0dcb9920b0a941e6d7d | Python | Aasthaengg/IBMdataset | /Python_codes/p03549/s197691985.py | UTF-8 | 257 | 2.65625 | 3 | [] | no_license | import sys
def solve():
readline = sys.stdin.buffer.readline
mod = 10 ** 9 + 7
n, m = list(map(int, readline().split()))
t1 = n - m
t2 = m
print(int((t1 * 100 + t2 * 1900) / (1 / 2) ** t2))
if __name__ == '__main__':
solve()
| true |
f7d7d1235097a8f1ee7312aeba97c671628675d2 | Python | madengr00/Mission_to_Mars | /mission_to_mars.py | UTF-8 | 4,166 | 2.953125 | 3 | [] | no_license | #Dependencies
from bs4 import BeautifulSoup
from splinter import Browser
from splinter.exceptions import ElementDoesNotExist
import requests
import pandas as pd
def scrape_info():
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=False)
#a dictionary to story the scraped data
mars_data = {}
################### NEWS STORY #####################
# URL of page to be scraped
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
#This finds only the top or newest news story
news_story = soup.find('li', 'slide')
#This scrapes the tile and teaser paragraph
for story in news_story:
news_title = story.find('div','content_title').text
news_para = story.find('div','article_teaser_body').text
mars_data["news_story"] = (news_title,news_para)
print(news_title)
print(news_para)
################## Featured Image ##################
# URL of page to be scraped
url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars/'
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
#This gets down to background-image:
background_image = soup.find('article')['style']
print(background_image)
#Slice the string and add it to the rest of the path
featured_image_url = "https://www.jpl.nasa.gov" + background_image[23:-3]
print(featured_image_url)
mars_data["featured_Image"] = (featured_image_url)
#################### Mars Weather ###################
# URL of page to be scraped
url = 'https://twitter.com/marswxreport?lang=en'
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
mars_weather = soup.find('p','TweetTextSize TweetTextSize--normal js-tweet-text tweet-text').text
print(mars_weather)
mars_data["mars_weather"] = (mars_weather)
###################### Mars Facts ####################
# URL of page to be scraped
url = 'http://space-facts.com/mars/'
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
#read in any tables on the webpage
Mars_table = pd.read_html(url)
#Name the columns
mars_df = Mars_table[0]
mars_df.columns = ['descriptor','value']
#Set descriptor as index
mars_df.set_index('descriptor',inplace=True)
mars_df
#Convert dataframe to HTML table
mars_html_table = mars_df.to_html()
mars_html_table
#Strip unwanted newlines (\n)
mars_html_table = mars_html_table.replace('\n','')
final_mars_table = mars_html_table
mars_data["mars_facts"] = (final_mars_table)
print(final_mars_table)
################### Mars Hemispheres ##################
# URL of page to be scraped
url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
products = soup.find('div', class_='collapsible results')
hemisphere = products.find_all('h3')
titles_and_urls = []
#Create empty lists to store url and title
#Scrape the url and title, store in lists
for record in hemisphere:
try:
#Capture the title
title = record.text
#Click on the link
browser.click_link_by_partial_text('Enhanced')
#find the Original Image link on the new page
downloads = browser.find_link_by_text('Sample').first
image_url = downloads['href']
#Capture the sample image url
title_and_url = {title:image_url}
titles_and_urls.append(title_and_url)
print(title_and_url)
except ElementDoesNotExist:
print("Scraping Complete")
print(titles_and_urls)
mars_data["mars_hemispheres"] = (titles_and_urls)
# Close the browser after scraping
browser.quit()
#Return mars data dictionary
return(mars_data)
print(scrape_info) | true |
03728ab0e4ae6295b93ef686e0ec4f85440396e9 | Python | arpithaupd/TrifonovRS.Deep_Learning_Portfolio.github.io | /Project 20: Sms spam collection/Dense_models.py | UTF-8 | 9,702 | 3.234375 | 3 | [] | no_license | """
PROJECT 20: Sms spam collection
TASK: Natural Language Processing to predict a SMS is spam or not spam
PROJECT GOALS AND OBJECTIVES
PROJECT GOAL
- Studying Feed-forward neural network for NPL
- Studying tokenization, text vectorisation and embedding
PROJECT OBJECTIVES
1. Exploratory Data Analysis
2. Training several dense model
3. Predict a SMS is spam or not spam
"""
# %%
# IMPORT LIBRARIES
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import random
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
# %%
# LOAD AND EXPLORE DATASET
# Path to data
train_data_path = "data/train.csv"
test_data_path = "data/test.csv"
# Create dataframe
train_df = pd.read_csv(train_data_path, index_col=0)
test_df = pd.read_csv(test_data_path, index_col=0)
train_df.head()
# %%
train_df.columns = ["label","message"]
train_df.info()
train_df['label'].value_counts()
# %%
code_spam_ham = {"ham": 0,
"spam": 1}
train_df['label'] = train_df['label'].map(code_spam_ham)
train_df.head()
# %%
# Shuffle training dataframe
train_df_shuffled = train_df.sample(frac=1, random_state=42)
train_df_shuffled.head()
# %%
test_df.head()
test_df.info()
# %%
print(f"Total training samples: {len(train_df)}")
print(f"Total test samples: {len(test_df)}")
print(f"Total samples: {len(train_df) + len(test_df)}")
# %%
# Random training examples
random_index = random.randint(0, len(train_df) - 5)
for row in train_df_shuffled[["label", "message"]][random_index:random_index + 5].itertuples():
_, label, message = row
print(f"Target: {label}", "(spam)" if label > 0 else "(not spam)")
print(f"Text:\n{message}\n")
print("---\n")
# %%
# Split data
train_sentences, val_sentences, train_labels, val_labels = train_test_split(train_df_shuffled["message"].to_numpy(),
train_df_shuffled["label"].to_numpy(),
test_size=0.1,
random_state=42)
# Check the lengths
len(train_sentences), len(train_labels), len(val_sentences), len(val_labels)
# %%
# The first 10 training sentences and their labels
train_sentences[:10]
train_labels[:10]
# %%
# MULTINOMIAL NAIVE BAYES CLASSIFIER
model_0 = Pipeline([
("tfidf", TfidfVectorizer()),
("clf", MultinomialNB())
])
model_0.fit(train_sentences.astype('str'), train_labels)
# %%
# EVALUATION RESULTS
baseline_score = model_0.score(val_sentences, val_labels)
print(f"Model accuracy: {baseline_score*100:.2f}%")
# %%
# Predictions
baseline_preds = model_0.predict(val_sentences)
baseline_preds[:20]
# %%
def calculate_results(y_true, y_pred):
model_accuracy = accuracy_score(y_true, y_pred) * 100
model_precision, model_recall, model_f1, _ = precision_recall_fscore_support(y_true, y_pred, average="weighted", labels=np.unique(y_pred))
model_results = {"accuracy": model_accuracy,
"precision": model_precision,
"recall": model_recall,
"f1": model_f1}
return model_results
baseline_results = calculate_results(y_true=val_labels,
y_pred=baseline_preds)
baseline_results
# %%
# EVALUATION AND VISUALIZATION OF MODEL PARAMETERS
def learning_curves(history):
pd.DataFrame(history.history).plot(figsize=(20, 8))
plt.grid(True)
plt.title('Learning curves')
plt.gca().set_ylim(0, 1)
plt.show()
def evaluation_model(history):
fig, (axL, axR) = plt.subplots(ncols=2, figsize=(20, 8))
axL.plot(history.history['loss'], label="Training loss")
axL.plot(history.history['val_loss'], label="Validation loss")
axL.set_title('Training and Validation loss')
axL.set_xlabel('Epochs')
axL.set_ylabel('Loss')
axL.legend(loc='upper right')
axR.plot(history.history['accuracy'], label="Training accuracy")
axR.plot(history.history['val_accuracy'], label="Validation accuracy")
axR.set_title('Training and Validation accuracy')
axR.set_xlabel('Epoch')
axR.set_ylabel('Accuracy')
axR.legend(loc='upper right')
plt.show()
# %%
# DENSE MODEL №1
max_vocab_length = 10000
max_length = 50
# TextVectorization layer
text_vectorizer = TextVectorization(max_tokens=max_vocab_length,
output_mode="int",
output_sequence_length=max_length)
text_vectorizer.adapt(train_sentences.astype('str'))
# Embedding Layer
embedding = layers.Embedding(input_dim=max_vocab_length,
output_dim=128,
embeddings_initializer="uniform",
input_length=max_length,
name="embedding_1")
# Model
inputs = layers.Input(shape=(1,), dtype="string")
x = text_vectorizer(inputs)
x = embedding(x)
x = layers.GlobalAveragePooling1D()(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model_1 = tf.keras.Model(inputs, outputs, name="model_1_dense")
model_1.compile(loss="binary_crossentropy",
optimizer=tf.keras.optimizers.Adam(),
metrics=["accuracy"])
model_1.summary()
tf.keras.utils.plot_model(model_1, to_file='model_1_dense.png')
# %%
# Train model
early_stopping_cb = tf.keras.callbacks.EarlyStopping(patience=3,
restore_best_weights=True)
model_1_history = model_1.fit(train_sentences.astype('str'),
train_labels,
epochs=200,
validation_data=(val_sentences, val_labels),
callbacks=[early_stopping_cb])
# %%
# EVALUATION RESULT
# Learning curves
learning_curves(model_1_history)
# Evaluation model
evaluation_model(model_1_history)
# Check the results
model_1.evaluate(val_sentences, val_labels)
# Predictions
model_1_pred_probs = model_1.predict(val_sentences)
model_1_preds = tf.squeeze(tf.round(model_1_pred_probs))
model_1_preds[:20]
model_1_results = calculate_results(y_true=val_labels,
y_pred=model_1_preds)
model_1_results
# %%
# DENSE MODEL №2
# Model
inputs = layers.Input(shape=(1,), dtype="string")
x = text_vectorizer(inputs)
x = embedding(x)
x = layers.GlobalAveragePooling1D()(x)
x = layers.Dense(256, activation="relu")(x)
x = layers.Dense(128, activation="relu")(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model_2 = tf.keras.Model(inputs, outputs, name="model_2_dense")
model_2.compile(loss="binary_crossentropy",
optimizer=tf.keras.optimizers.Adam(),
metrics=["accuracy"])
model_2.summary()
tf.keras.utils.plot_model(model_2, to_file='model_2_dense.png')
# %%
# Train model
early_stopping_cb = tf.keras.callbacks.EarlyStopping(patience=3,
restore_best_weights=True)
model_2_history = model_2.fit(train_sentences.astype('str'),
train_labels,
epochs=200,
validation_data=(val_sentences, val_labels),
callbacks=[early_stopping_cb])
# %%
# EVALUATION RESULT
# Learning curves
learning_curves(model_2_history)
# Evaluation model
evaluation_model(model_2_history)
# Check the results
model_2.evaluate(val_sentences, val_labels)
# Predictions
model_2_pred_probs = model_2.predict(val_sentences)
model_2_preds = tf.squeeze(tf.round(model_2_pred_probs))
model_2_preds[:20]
model_2_results = calculate_results(y_true=val_labels,
y_pred=model_2_preds)
model_2_results
# %%
# DENSE MODEL №3
# Model
inputs = layers.Input(shape=(1,), dtype="string")
x = text_vectorizer(inputs)
x = embedding(x)
x = layers.GlobalAveragePooling1D()(x)
x = layers.Dense(512, activation="relu")(x)
x = layers.Dense(256, activation="relu")(x)
x = layers.Dense(128, activation="relu")(x)
x = layers.Dense(64, activation="relu")(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model_3 = tf.keras.Model(inputs, outputs, name="model_3_dense")
model_3.compile(loss="binary_crossentropy",
optimizer=tf.keras.optimizers.Adam(),
metrics=["accuracy"])
model_3.summary()
tf.keras.utils.plot_model(model_3, to_file='model_3_dense.png')
# %%
# Train model
early_stopping_cb = tf.keras.callbacks.EarlyStopping(patience=3,
restore_best_weights=True)
model_3_history = model_3.fit(train_sentences.astype('str'),
train_labels,
epochs=200,
validation_data=(val_sentences, val_labels),
callbacks=[early_stopping_cb])
# %%
# EVALUATION RESULT
# Learning curves
learning_curves(model_3_history)
# Evaluation model
evaluation_model(model_3_history)
# Check the results
model_3.evaluate(val_sentences, val_labels)
# Predictions
model_3_pred_probs = model_3.predict(val_sentences)
model_3_preds = tf.squeeze(tf.round(model_3_pred_probs))
model_3_preds[:20]
model_3_results = calculate_results(y_true=val_labels,
y_pred=model_3_preds)
model_3_results | true |
10a3e53e3bc065d7097e7dc4e010ae162f861841 | Python | gynvael/arcanesector | /attic/keytest.py | UTF-8 | 304 | 3.0625 | 3 | [] | no_license | class A(object):
def __init__(self, a, b):
self.a = a
self.b = b
def __hash__(self):
return hash(self.a) ^ hash(self.b)
def __eq__(self, obj):
return self.a == obj.a and self.b == obj.b
a = A("ala", "kot")
b = A("ala", "kot")
c = A("ala", "kxt")
d = {a: "costam"}
print d[a]
| true |
a81dd8829e719d8c43aa977063fa4ff6f4756dde | Python | alejandrozorita/Python-Pensamiento-computacional | /doc_string.py | UTF-8 | 166 | 2.734375 | 3 | [] | no_license | def funcion_1(param_1):
"""descripción de la función
param_1 int cualquier entero
:return param_1 + 5
"""
return param_1 + 5
help(funcion_1)
| true |
39a78544f35be6ef9bef4d434c0609cc0f0f6d53 | Python | svebk/DeepSentiBank_memex | /workflows/images-incremental-update/image_dl.py | UTF-8 | 11,040 | 2.8125 | 3 | [
"BSD-2-Clause"
] | permissive | import requests
imagedltimeout=3
class UnknownImageFormat(Exception):
pass
def get_image_size_and_format(input):
# adapted from https://github.com/scardine/image_size
"""
Return (width, height, format) for a given img file content stream.
No external dependencies except the struct modules from core.
"""
import struct
height = -1
width = -1
format = None
data = input.read(25)
if data[:6] in ('GIF87a', 'GIF89a'):
# GIFs
w, h = struct.unpack("<HH", data[6:10])
width = int(w)
height = int(h)
format = 'GIF'
elif data.startswith('\211PNG\r\n\032\n') and (data[12:16] == 'IHDR'):
# PNGs
w, h = struct.unpack(">LL", data[16:24])
width = int(w)
height = int(h)
format = 'PNG'
elif data.startswith('\211PNG\r\n\032\n'):
# older PNGs?
w, h = struct.unpack(">LL", data[8:16])
width = int(w)
height = int(h)
format = 'PNG'
elif data.startswith('\377\330'):
# JPEG
format = 'JPEG'
msg = " raised while trying to decode as JPEG."
input.seek(0)
input.read(2)
b = input.read(1)
try:
while (b and ord(b) != 0xDA):
while (ord(b) != 0xFF): b = input.read(1)
while (ord(b) == 0xFF): b = input.read(1)
if (ord(b) >= 0xC0 and ord(b) <= 0xC3):
input.read(3)
h, w = struct.unpack(">HH", input.read(4))
break
else:
input.read(int(struct.unpack(">H", input.read(2))[0])-2)
b = input.read(1)
width = int(w)
height = int(h)
except struct.error:
raise UnknownImageFormat("StructError" + msg)
except ValueError:
raise UnknownImageFormat("ValueError" + msg)
except Exception as e:
raise UnknownImageFormat(e.__class__.__name__ + msg)
else:
raise UnknownImageFormat("Sorry, don't know how to get information from this file.")
return width, height, format
def mkpath(outpath):
import os
pos_slash=[pos for pos,c in enumerate(outpath) if c=="/"]
for pos in pos_slash:
try:
os.mkdir(outpath[:pos])
except:
pass
def dlimage(url,verbose=False):
import numpy as np
import shutil
import time
import os
pos_slash=[pos for pos,c in enumerate(url) if c=="/"]
file_img=url[pos_slash[-1]+1:]
# path with time and random to ensure unique names
outpath=os.path.join('./'+str(time.time())+'_'+str(np.int32(np.random.random()*(10e6)))+'_'+file_img)
mkpath(outpath)
if verbose:
print "Downloading image from {} to {}.".format(url,outpath)
try:
r = requests.get(url, stream=True, timeout=imagedltimeout)
if r.status_code == 200:
with open(outpath, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
return outpath
except Exception as inst:
if verbose:
print "Download failed for img that should be saved at {} from url {}.".format(outpath,url)
print inst
return None
def get_SHA1_from_data(data):
import hashlib
sha1hash = None
try:
sha1 = hashlib.sha1()
sha1.update(data)
sha1hash = sha1.hexdigest().upper()
except:
print "Could not read data to compute SHA1."
return sha1hash
def get_SHA1_from_URL_StringIO(url,verbose=0):
from cStringIO import StringIO
import sys
if verbose>1:
print "Downloading image from {}.".format(url)
try:
r = requests.get(url, timeout=imagedltimeout)
if r.status_code == 200:
r_sio = StringIO(r.content)
if int(r.headers['content-length']) == 0:
del r
raise ValueError("Empty image.")
else:
data = r_sio.read()
sha1hash = get_SHA1_from_data(data)
del r,r_sio,data
return sha1hash
else:
raise ValueError("Incorrect status_code: {}.".format(r.status_code))
except Exception as inst:
print "Download failed from url {}. [{}]".format(url, inst)
return None
def get_SHA1_imginfo_from_URL_StringIO_PIL(url,verbose=0):
from cStringIO import StringIO
import requests
if verbose>1:
print "Downloading image from {}.".format(url)
try:
r = requests.get(url, timeout=imagedltimeout)
if r.status_code == 200:
r_sio = StringIO(r.content)
if int(r.headers['content-length']) == 0:
del r
raise ValueError("Empty image.")
else:
# with PIL, takes 1 second...
#start = time.time()
from PIL import Image
img = Image.open(r_sio)
w,h = img.size
format = img.format
del img
#print "PIL get image size and format:",time.time()-start
r_sio.seek(0)
data = r_sio.read()
# use a dict for img info so we can store any other info we may need
img_info = dict()
img_info['size'] = dict()
img_info['size']['width'] = w
img_info['size']['height'] = h
img_info['format'] = format
sha1hash = get_SHA1_from_data(data)
del r,r_sio,data
return sha1hash,img_info
else:
raise ValueError("Incorrect status_code: {}.".format(r.status_code))
except Exception as inst:
print "Download failed from url {}. [{}]".format(url, inst)
return None
def get_SHA1_imginfo_from_URL_StringIO(url,verbose=0):
from cStringIO import StringIO
import requests
if verbose>1:
print "Downloading image from {}.".format(url)
try:
r = requests.get(url, timeout=imagedltimeout)
if r.status_code == 200:
r_sio = StringIO(r.content)
if int(r.headers['content-length']) == 0:
del r
raise ValueError("Empty image.")
else:
# No PIL dependency, 10-5s.
#start = time.time()
w,h,format = get_image_size_and_format(r_sio)
#print "get_image_size_and_format:",time.time()-start
# Seek back to compute SHA1 on the whole binary content!
r_sio.seek(0)
data = r_sio.read()
# use a dict for img info so we can store any other info we may need
img_info = dict()
img_info['size'] = dict()
img_info['size']['width'] = w
img_info['size']['height'] = h
img_info['format'] = format
sha1hash = get_SHA1_from_data(data)
del r,r_sio,data
return sha1hash,img_info
else:
raise ValueError("Incorrect status_code: {}.".format(r.status_code))
except Exception as inst:
print "Download failed from url {}. [{}]".format(url, inst)
return None,None
def get_SHA1_from_URL(url,verbose=False):
if verbose:
print "Downloading image from {}.".format(url)
try:
r = requests.get(url, stream=True, timeout=imagedltimeout)
if r.status_code == 200:
sha1hash = get_SHA1_from_data(r.raw.data)
return sha1hash
except Exception as inst:
if verbose:
print "Download failed from url {}.".format(url)
print inst
return None
def get_b64_from_data(data):
import base64
b64_from_data = None
try:
b64_from_data = base64.b64encode(data)
except:
print "Could not read data to compute base64 string."
return b64_from_data
def get_b64_from_URL(url,verbose=False):
if verbose:
print "Downloading image from {}.".format(url)
try:
r = requests.get(url, stream=True, timeout=imagedltimeout)
if r.status_code == 200:
b64_from_data = get_b64_from_data(r.raw.data)
return b64_from_data
except Exception as inst:
if verbose:
print "Download failed from url {}.".format(url)
print inst
return None
def get_b64_from_URL_StringIO(url,verbose=False):
from StringIO import StringIO
if verbose:
print "Downloading image from {}.".format(url)
try:
r = requests.get(url, timeout=imagedltimeout)
if r.status_code == 200:
r_sio = StringIO(r.content)
data = r_sio.read()
b64_from_data = get_b64_from_data(data)
return b64_from_data
else:
print "Incorrect status_code {} for url {}".format(r.status_code,url)
except Exception as inst:
if verbose:
print "Download failed from url {}.".format(url)
print inst
return None
def get_SHA1_b64_from_URL(url,verbose=False):
if verbose:
print "Downloading image from {}.".format(url)
try:
r = requests.get(url, stream=True, timeout=imagedltimeout)
if r.status_code == 200:
sha1hash = get_SHA1_from_data(r.raw.data)
b64_from_data = get_b64_from_data(r.raw.data)
return sha1hash,b64_from_data
except Exception as inst:
if verbose:
print "Download failed from url {}.".format(url)
print inst
return None,None
if __name__ == "__main__":
import profile
import time
#profile.run('sha1 = get_SHA1_from_URL("https://s3.amazonaws.com/memex-images/full/581ed33d3e12498f12c86b44010306b172f4ad6a.jpg")')
#profile.run('sha1_sio = get_SHA1_from_URL_StringIO("https://s3.amazonaws.com/memex-images/full/581ed33d3e12498f12c86b44010306b172f4ad6a.jpg")')
#profile.run('sha1_sio, img_info = get_SHA1_imginfo_from_URL_StringIO("https://s3.amazonaws.com/memex-images/full/581ed33d3e12498f12c86b44010306b172f4ad6a.jpg")')
start = time.time()
sha1 = get_SHA1_from_URL("https://s3.amazonaws.com/memex-images/full/581ed33d3e12498f12c86b44010306b172f4ad6a.jpg")
print sha1,time.time()-start
start = time.time()
sha1_sio = get_SHA1_from_URL_StringIO("https://s3.amazonaws.com/memex-images/full/581ed33d3e12498f12c86b44010306b172f4ad6a.jpg")
print sha1_sio,time.time()-start
start = time.time()
sha1_sio, img_info = get_SHA1_imginfo_from_URL_StringIO("https://s3.amazonaws.com/memex-images/full/581ed33d3e12498f12c86b44010306b172f4ad6a.jpg")
print sha1_sio,img_info,time.time()-start
start = time.time()
sha1_sio, img_info = get_SHA1_imginfo_from_URL_StringIO_PIL("https://s3.amazonaws.com/memex-images/full/581ed33d3e12498f12c86b44010306b172f4ad6a.jpg")
print sha1_sio,img_info,time.time()-start | true |
2f9a190a4faaa89b9c141cf6069f77bf1d849746 | Python | juliendossantos/course-RIL13 | /Python/CesiRIL13/exo28.py | UTF-8 | 309 | 2.984375 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Dos Santos Julien'
import threading
import time
def MyTimer(tempo = 5.0):
## initialisation du timer
threading.Timer(tempo, MyTimer, [tempo]).start()
## affichage de l'heure
print time.strftime('%d/%m/%y %H:%M:%S',time.localtime())
MyTimer() | true |
e0a460f5e36d2ccf04e160af423ecd68fa14ca08 | Python | domishana/hato-bot | /library/hukidasi.py | UTF-8 | 1,257 | 3.21875 | 3 | [
"MIT"
] | permissive | # coding: utf-8
import unicodedata
# Sudden Death(元にしたコードのライセンスは以下の通り)
# MIT License
# Copyright (c) 2016 koluku
# https://github.com/koluku/sudden-death/blob/master/LICENSE
def text_length_list(text):
"""突然の死で使う関数"""
count_list = list()
for t in text:
count = 0
for c in t:
count += 2 if unicodedata.east_asian_width(c) in 'FWA' else 1
count_list.append(count)
return count_list
def generator(msg):
msg = msg.split('\n')
msg_length_list = text_length_list(msg)
max_length = max(msg_length_list)
half_max_length = max_length // 2
generating = '_'
for _ in range(half_max_length + 2):
generating += '人'
generating += '_\n'
for l, m in zip(msg_length_list, msg):
half_length = (max_length - l) // 2
generating += '>'
for _ in range(half_length + 2):
generating += ' '
generating += m
for _ in range(max_length - half_length - l + 2):
generating += ' '
generating += '<\n'
generating += ' ̄'
for _ in range(half_max_length + 2):
generating += '^Y'
generating += ' ̄'
return generating
| true |
a1c4a432c92a3392faef71aca30c7b3ce8afa8e5 | Python | akamakim/synology | /script/folderit | UTF-8 | 1,985 | 3 | 3 | [] | no_license | #!/usr/bin/env python
'''
use this script to group files in a "tagged" folder.
'''
import os
import sys
import re
import shutil
def system_mv(src,dst):
# python mv function is too slow, use system mv.
os.system("mv '%s' '%s'"%(src,dst))
try:
os.chdir(sys.argv[1]);
except:
print "chdir failed"
exit()
# display current workikng path
print "working dir=[%s]"%os.path.abspath(os.curdir)
# regex to filter file extension
rex_ext_video=re.compile(r"avi$|wmv$",re.IGNORECASE)
rex_ext_pic=re.compile(r"jpg$|gif$|png$",re.IGNORECASE)
# regex strip [^\w\d]
rex_strip=re.compile(r"[^\w\d]",re.IGNORECASE| re.MULTILINE)
# regex to generate tag: format [a-z]+[\d]+
rex_tag=re.compile(r"[a-zA-Z]+\d+",re.IGNORECASE)
#parse files and grouped with tag
tags=dict()
for fname in os.listdir(os.curdir):
#match video or pic
if rex_ext_video.search(fname) or rex_ext_pic.search(fname):
#create tag
tag=rex_tag.search( rex_strip.sub("",fname).upper() )
if tag:
dname=tag.group()
print "dname:[%-024s] <= fname:[%-024s]"%(dname,fname)
if tags.has_key(dname):
tags[dname].append(fname);
else:
tags[dname] = list([fname])
else:
print "no tag for %s"%(fname)
else:
#print "no match extension file"
pass
# remove tag without grouped files.
for dname,fnames in tags.items():
if len(fnames) <= 1 :
del tags[dname]
# show tags
for dname,fnames in tags.items():
print "[%-016s] <= ["%dname,
for fname in fnames:
print "%-016s,"%fname,
print "]"
if len(tags) == 0:
print "Nothing to process. Exit."
exit()
#prompt to continue
run=raw_input("process? (y/n): ")
if run != "y":
exit()
for dname,fnames in tags.items():
if not os.path.exists(dname):
os.mkdir(dname)
if os.path.isdir(dname):
#movie files to dir
for fname in fnames:
system_mv(fname,dname)
| true |
88f851f702cd8583c813f556659281e60e9ca655 | Python | spather/CousinsCodingCamp | /Lessons/03/lesson3.py | UTF-8 | 583 | 2.6875 | 3 | [
"MIT"
] | permissive | WIDTH = 500
HEIGHT = 300
alien = Actor("alien")
alien.pos = 100, 56
asteroid = Actor("asteroid")
asteroid.left = WIDTH
asteroid.top = 50
def draw():
screen.fill((0, 102, 255))
alien.draw()
asteroid.draw()
def update():
asteroid.left -= 2
if alien.colliderect(asteroid):
alien.image = "alien_hurt"
# sounds.eep.play()
clock.schedule_unique(set_alien_normal, 1.0)
def set_alien_normal():
alien.image = "alien"
def on_key_down(key):
if key == keys.DOWN:
alien.top += 10
elif key == keys.UP:
alien.top -= 10
| true |
bf711fb4287ed41f82b00d034794259a3e2da46e | Python | jimgrund/capstone | /top_universities/scrape_top_university_list.py | UTF-8 | 4,327 | 2.765625 | 3 | [] | no_license | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from bs4 import BeautifulSoup
from pathlib import Path
import pandas as pd
import re
import os
import urllib
import time
from random import randint
UNIVERSITIES_CACHE_FILE = "universities.txt"
base_url = "https://www.usnews.com/"
university_urls = ['https://www.usnews.com/best-colleges/rankings/national-universities?_page=1',\
'https://www.usnews.com/best-colleges/rankings/national-universities?_page=2',\
'https://www.usnews.com/best-colleges/rankings/national-universities?_page=3',\
'https://www.usnews.com/best-colleges/rankings/national-universities?_page=4',\
'https://www.usnews.com/best-colleges/rankings/national-universities?_page=5',\
'https://www.usnews.com/best-colleges/rankings/national-universities?_page=6',\
'https://www.usnews.com/best-colleges/rankings/national-universities?_page=7',\
'https://www.usnews.com/best-colleges/rankings/national-universities?_page=8',\
'https://www.usnews.com/best-colleges/rankings/national-universities?_page=9',\
'https://www.usnews.com/best-colleges/rankings/national-universities?_page=10'
]
# placeholder to store the top N universities
universities = []
def firstGoogleResult(query_url):
# Firefox session
driver = webdriver.Firefox()
driver.get(query_url)
driver.implicitly_wait(20)
soup = BeautifulSoup(driver.page_source, 'html.parser')
code_soup = soup.find_all('div', attrs={'class': 'srg'})
# print("Code Soup: " + str(code_soup))
search_results = []
for i in code_soup:
# print(str(i.getText()))
result_links = i.find_all('div', attrs={'class':'r'})
# print("Result Links: " + str(result_links))
for link in result_links:
# print(str(link))
search_results.append(link.find('a')['href'])
# random sleep to try and prevent Google from blocking us
time.sleep(randint(20,60))
# close the browser
driver.close()
# return the first result if results are obtained..
if (len(search_results) > 0):
return search_results[0]
# else return empty string
return ''
def googleUniversity(university_name):
query_string = '"'+university_name+'" "data science" "master program" "site:edu"'
query_string = urllib.parse.quote_plus(query_string)
google_query_url = 'https://www.google.com/search?q='+query_string
#print(google_query_url)
return firstGoogleResult(google_query_url)
def scrapeUrl(universities_url):
#print("Scraping ", universities_url)
# Firefox session
driver = webdriver.Firefox()
driver.get(universities_url)
driver.implicitly_wait(10)
soup = BeautifulSoup(driver.page_source, 'html.parser')
code_soup = soup.find_all('div', attrs={'class': 'shadow-dark block-flush'})
universities = []
for i in code_soup:
# print(str(i.getText()))
university_links = i.find_all('h3', attrs={'class':'heading-large block-tighter'})
for link in university_links:
universities.append(link.find('a').contents[0])
# close the browser
driver.close()
return universities
if Path(UNIVERSITIES_CACHE_FILE).exists():
print("cachefile found, reading it")
# cache file exists, read it rather than scraping the usnews site again
universities_fh = open(UNIVERSITIES_CACHE_FILE,"r")
for university in universities_fh:
universities.append(university)
universities_fh.close()
# save all universities to text file
if not Path(UNIVERSITIES_CACHE_FILE).is_file():
# cache file did not exist, scrape the usnews site to grab the universities
for university_url in university_urls:
universities = universities + scrapeUrl(university_url)
# store the universities info to a cache file
universities_fh = open(UNIVERSITIES_CACHE_FILE,"w")
for university in universities:
universities_fh.write(university+"\n")
universities_fh.close()
for university in universities:
print(university.rstrip(),end=',')
print(googleUniversity(university))
| true |
bceefb396269e3cd7698d40c908a9239991f89b2 | Python | GraceRonnie/my-isc-work | /python/TempProbeSerial.py | UTF-8 | 1,187 | 2.9375 | 3 | [] | no_license | #!/usr/bin/python2.7
import serial as ser
import time
import datetime
ser = ser.Serial(
port = '/dev/ttyUSB0',
baudrate = 9600,
bytesize = ser.EIGHTBITS,
parity = ser.PARITY_NONE,
stopbits = ser.STOPBITS_ONE
)
#for i in range(3): #here we created a for loop to read the temp every 10 seconds for n times in range(n)
# reading = ser.read(size=8)
# dt = datetime.datetime.utcnow().isoformat()
# print dt, reading
#time.sleep(10)
#for i in range(3): #here we did as above but this introduces a time lag
# print datetime.datetime.utcnow().isoformat(), ser.read(size=8)
# time.sleep(10)
#while ser.isOpen(): #here we created a for loop to read the temp every 10 seconds for n times in range(n)
# reading = ser.read(size=8)
# dt = datetime.datetime.utcnow().isoformat()
# print dt, reading
#time.sleep(10)
import io
sio = io.TextIOWrapper(io.BufferedRWPair(ser, ser, 1), encoding='ascii', newline='\r')
while ser.isOpen(): #here we created a for loop to read the temp every 10 seconds for n times in range(n)
reading = ser.read(size=8)
dt = datetime.datetime.utcnow().isoformat()
print dt, reading
| true |
702c4e0f0159f9a30c59fb2a7277a6a0bc438538 | Python | Mabo-IoT/every2mqtt | /MqttClient.py | UTF-8 | 920 | 2.515625 | 3 | [] | no_license | import paho.mqtt.client as mqtt
import yaml
import logger
class MQTTclient:
client = None
clientid = None
host = ''
port = None
keepalive = None
def __init__(self):
'''
初始化,从配置文件读取MQTT参数
'''
try:
# 读取配置文件
f = open("config.yaml","r+",encoding="utf-8")
fstream = f.read()
configobj = yaml.safe_load(fstream)
self.host = configobj['mqtt']['host']
self.port =configobj['mqtt']['port']
self.keepalive = configobj['mqtt']['keepalive']
self.clientid = configobj['mqtt']['clientid']
except Exception as e:
logger.writeLog("MQTT组件初始化失败" + str(e),'mqtt.log')
# 生成MQTT客户端
def genMQTTClient(self):
self.client = mqtt.Client(client_id=self.clientid)
return self.client | true |
750dc375504f55ca53365c29b6e70348a2b94d78 | Python | BrunoPedrinha/Discord-Music-Bot | /UnknownPyBot/UnknownPyBot/MusicCommands.py | UTF-8 | 909 | 2.796875 | 3 | [] | no_license | import discord
from discord.ext import commands
import MusicPlayer
music_player = MusicPlayer.MusicPlayer()
#cog class for the music commands.
class MusicCommands:
global music_player
def __init__(self, client):
self.client = client
@commands.command(pass_context=True, brief='Paste url or use "SONG NAME" with quotes')
async def play(self, context, url):
await music_player.Create_Player(url, context, self.client)
@commands.command(pass_context = True)
async def stop(self, context):
music_player.Stop_Playing()
@commands.command(pass_context=True)
async def volume(self, context, value):
music_player.Set_Volume(context, int(value))
@commands.command(pass_context=True)
async def current(self, context):
await music_player.Now_Playing(context, self.client)
def setup(client):
client.add_cog(MusicCommands(client))
| true |
4e5eeaa8fd3eaa2057e8f4ddf7937b7387bf04f8 | Python | yjthay/Python | /Codility/codility_MinAvgTwoSlice.py | UTF-8 | 3,047 | 4.125 | 4 | [] | no_license | """A non-empty array A consisting of N integers is given. A pair of integers (P, Q), such that 0 ≤ P < Q < N, is called a slice of array A (notice that the slice contains at least two elements). The average of a slice (P, Q) is the sum of A[P] + A[P + 1] + ... + A[Q] divided by the length of the slice. To be precise, the average equals (A[P] + A[P + 1] + ... + A[Q]) / (Q − P + 1).
For example, array A such that:
A[0] = 4
A[1] = 2
A[2] = 2
A[3] = 5
A[4] = 1
A[5] = 5
A[6] = 8
contains the following example slices:
slice (1, 2), whose average is (2 + 2) / 2 = 2;
slice (3, 4), whose average is (5 + 1) / 2 = 3;
slice (1, 4), whose average is (2 + 2 + 5 + 1) / 4 = 2.5.
The goal is to find the starting position of a slice whose average is minimal.
Write a function:
def solution(A)
that, given a non-empty array A consisting of N integers, returns the starting position of the slice with the minimal average. If there is more than one slice with a minimal average, you should return the smallest starting position of such a slice.
For example, given array A such that:
A[0] = 4
A[1] = 2
A[2] = 2
A[3] = 5
A[4] = 1
A[5] = 5
A[6] = 8
the function should return 1, as explained above.
Write an efficient algorithm for the following assumptions:
N is an integer within the range [2..100,000];
each element of array A is an integer within the range [−10,000..10,000].
Copyright 2009–2023 by Codility Limited. All Rights Reserved. Unauthorized copying, publication or disclosure prohibited."""
# you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(A):
# Implement your solution here
left_pointer, right_pointer = 0, 2
min_avg = sum(A[left_pointer:right_pointer])
min_idx = 0
while right_pointer < len(A):
print(left_pointer, right_pointer, A[left_pointer:right_pointer])
avg = sum(A[left_pointer:right_pointer]) / (right_pointer - left_pointer)
if avg < min_avg:
min_idx = left_pointer
min_avg = avg
incremental_right = (A[right_pointer] - avg) / (right_pointer - left_pointer + 1)
incremental_left = (A[left_pointer] - avg) / (right_pointer - left_pointer + 1)
# print(incremental_right, incremental_left)
# if incremental_right > 0:
if incremental_left > 0:
left_pointer += 1
right_pointer = max(left_pointer + 2, right_pointer)
# keep right_pointer static
elif incremental_right > 0:
left_pointer = right_pointer - 1
right_pointer = left_pointer + 2
else:
right_pointer += 1
return min_idx
# return r
if __name__ == '__main__':
print(solution([4, 2, 2, 5, 1, 5, 8])) # 1
# print(solution([4, 4, 2, 3.5, 4, 5, 1, 5, 8])) # 2
# print(solution([4, 4, 2, 2, 3.5, 4, 5, 1, 5, 8])) # 2
# print(solution([4, 4, 2, 2, 2, 4, 5, 1, 5, 8])) # 2
# print(solution([10, 10, 10, 1, 9, 1, 10, 10, 10])) # 2
| true |
cc8ab0feb2b622d086e8a91a7042c7ca422ae546 | Python | EvaldoFilho098/ProjetoAGR | /example.py | UTF-8 | 17,328 | 2.578125 | 3 | [] | no_license | #encoding: utf-8
from tkinter import Tk, StringVar, Frame,Entry,Label,Button,Menu,BooleanVar,Checkbutton,PhotoImage,END,RIGHT,LEFT,TOP,BOTTOM,CENTER,VERTICAL,Y,HORIZONTAL,X
from tkinter import messagebox
from tkinter import ttk
from Banco import Banco
import pandas as pd
from variaveis import *
from Classes import AutocompleteCombobox
#Criar Janela
jan = Tk()
#CONFIGURACOES ----
#Titulo
jan.title(titulos)
#Tamanho da Janela
jan.geometry(str(largura)+"x"+str(altura))
#Cor de Fundo
jan.configure(background = cor_meta)
#Nao redimensionar
jan.resizable(width = False, height = False)
#Transparencia
jan.attributes("-alpha",0.95)
#Icone
jan.iconbitmap(default="Icons/icon.ico")
#Logo
logo = PhotoImage(file="icons/logo_.png")
#FUNÇÕES
def Mensagem_Aviso(txt):
''' Aviso para caso falte alguma informação válida'''
messagebox.showerror(title="Impossível Cadastrar Atendimento", message= txt)
def Inserir():
global lista_locais, lista_atendimentos, lista_certificados, lista_solicitantes, qtd, listagem
txt = ""
#LOCAL
local = localEntry.get().upper()
if local == "":
txt += "Local Inválido!\n"
elif local not in lista_locais :
add_local = messagebox.askyesno(title="Aviso!", message="Esse Local Não Está Cadastrado. Deseja Cadastrá-lo?" )
if add_local:
lista_locais.append(local)
localEntry.set_completion_list(lista_locais)
else:
localEntry.delete(0,END)
return
#SOLICITANTE
solicitante = solEntry.get().upper()
if solicitante not in lista_solicitantes:
txt = txt + "Solicitante Inválido!\n"
#ATENDIMENTO
atendimento = atendEntry.get().upper()
if atendimento == "":
txt += "Atendimento Inválido!\n"
elif atendimento not in lista_atendimentos:
add_atend = messagebox.askyesno(title="Aviso!", message="Esse Atendimento Não Está Cadastrado. Deseja Cadastrá-lo?" )
if add_atend:
lista_atendimentos.append(local)
atendEntry.set_completion_list(lista_atendimentos)
else:
atendEntry.delete(0,END)
#TIPO DE CERTIFICADO
certificado = certEntry.get().upper()
if certificado not in lista_certificados:
txt = txt + "Tipo de Certificado Inválido!\n"
#DISPOSITIVO DA META
meta = "SIM"
if not chkValueMeta.get():
meta = "NAO"
#PROBLEMA RESOLVIDO
resolv = "SIM"
if not chkValueResol.get():
resolv = "NAO"
#CASO TUDO ESTEJA CORRETO
if txt == "":
#CADASTRA NO BANCO DE DADOS
banco = Banco()
ultimo = banco.current
x = banco.dados["Id"]
if ultimo:
id_ = str(int(x[ultimo-1]) + 1)
else:
id_ = str(1)
nova_linha = [id_,local, solicitante, atendimento, certificado, meta, resolv, data]
banco.dados.loc[banco.current] = nova_linha
banco.Atualiza()
#banco.Save()
#MOSTRA MENSAGEM DE SUCESSO
messagebox.showinfo(title="SUCESSO!", message="Atendimento Cadastrado com Sucesso!")
#LIMPA AS SELEÇÕES E TEXTOS
localEntry.delete(0,END)
solEntry.delete(0,END)
atendEntry.delete(0,END)
certEntry.delete(0,END)
chkValueMeta.set(False)
chkValueResol.set(False)
#ALTERA A QUANTIDADE DE ATENDIMENTOS
qtd_atendimentos = banco.current
qtd['text'] = qtd_atendimentos
qtd.place(relx=0.5, rely=0.5,anchor=CENTER)
qtd_h = banco.dados.loc[banco.dados["Data"] == data].count()
qtd_hj['text'] = qtd_h[0]
qtd_hj.place(relx=0.5, rely=0.5,anchor=CENTER)
#ALTERA A LISTAGEM
listagem.insert('', 'end', values=tuple(banco.dados.loc[qtd_atendimentos-1]))
listagem.pack(side=LEFT)
else:
#CASO DE ERRADO
Mensagem_Aviso(txt)
def Mostrar(event):
try:
global listagem, lista_atendimentos,lista_certificados,lista_locais,lista_solicitantes
#Pega o item selecionado
nodeId_1 = listagem.focus()
#Pega as informacoes do item
id_ = listagem.item(nodeId_1)['values'][0]
local = listagem.item(nodeId_1)['values'][1]
solicitante = listagem.item(nodeId_1)['values'][2]
atendimento = listagem.item(nodeId_1)['values'][3]
certificado = listagem.item(nodeId_1)['values'][4]
meta = listagem.item(nodeId_1)['values'][5]
resolvido = listagem.item(nodeId_1)['values'][6]
data = listagem.item(nodeId_1)['values'][7]
#Abre a nova janela
mostrar_jan = Tk()
#CONFIGURACOES ----
#Titulo
mostrar_jan.title(titulos)
#Tamanho da mostrar_janela
mostrar_jan.geometry("500x450")
#Cor de Fundo
mostrar_jan.configure(background = cor)
#Nao redimensionar
mostrar_jan.resizable(width = False, height = False)
#Transparencia
mostrar_jan.attributes("-alpha",0.95)
#Icone
mostrar_jan.iconbitmap(default="Icons/icon.ico")
cor_more = 'grey8'
x_l = 40
x_e = 200
y_i = 30
#Insere as labels de Informacoes
#Local
localLabel_ = Label(mostrar_jan,text="Local: ",font=fonte_Textos, anchor="w", fg=cor_contraste, bg=cor)
localLabel_.place(x=x_l, y = y_i)
localEntry_ = Label(mostrar_jan,text=local,font=fonte_Textos, anchor="w", fg=cor_contraste, bg=cor_more)
localEntry_.place(x=x_e, y = y_i)
#Solicitante
solLabel_ = Label(mostrar_jan,text ="Solicitante: ",font=fonte_Textos, anchor="w", fg=cor_contraste, bg=cor)
solLabel_.place(x=x_l, y = y_i+50)
solEntry_ = Label(mostrar_jan,text=solicitante,font=fonte_Textos, anchor="w", fg=cor_contraste, bg=cor_more)
solEntry_.place(x=x_e, y = y_i+50)
#Atendimento
atendLabel_ = Label(mostrar_jan,text="Atendiemento: ",font=fonte_Textos, anchor="w", fg=cor_contraste, bg=cor)
atendLabel_.place(x=x_l, y = y_i+100)
atendEntry_ = Label(mostrar_jan,text = atendimento,font=fonte_Textos, anchor="w", fg=cor_contraste, bg=cor_more)
atendEntry_.place(x=x_e, y = y_i+100)
#Certificado
certLabel_ = Label(mostrar_jan,text="Certificado: ",font=fonte_Textos, anchor="w", fg=cor_contraste, bg=cor)
certLabel_.place(x=x_l, y = y_i+150)
certEntry_ = Label(mostrar_jan,text = certificado,font=fonte_Textos, anchor="w", fg=cor_contraste, bg=cor_more)
certEntry_.place(x=x_e, y = y_i+150)
#Dispositivo Meta
metaLabel_ = Label(mostrar_jan,text="Dispositivo Meta: ",font=fonte_Textos, anchor="w", fg=cor_contraste, bg=cor)
metaLabel_.place(x=x_l, y = y_i+200)
meta_Entry_ = Label(mostrar_jan,text=meta,font=fonte_Textos, anchor="w", fg=cor_contraste, bg=cor_more)
meta_Entry_.place(x=x_e, y = y_i+200)
#Problema Resolvido
resolv_Label_ = Label(mostrar_jan,text="Resolvido: ",font=fonte_Textos, anchor="w", fg=cor_contraste, bg=cor)
resolv_Label_.place(x=x_l, y = y_i+250)
resolv_Entry_ = Label(mostrar_jan,text = resolvido,font=fonte_Textos, anchor="w", fg=cor_contraste, bg=cor_more)
resolv_Entry_.place(x=x_e, y = y_i+250)
#Data
data_Label_ = Label(mostrar_jan,text="Data: ",font=fonte_Textos, anchor="w", fg=cor_contraste, bg=cor)
data_Label_.place(x=x_l, y = y_i+300)
data_Entry_ = Label(mostrar_jan,text = data,font=fonte_Textos, anchor="w", fg=cor_contraste, bg=cor_more)
data_Entry_.place(x=x_e, y = y_i+300)
#Funcao para excluir
def Excluir():
global listagem
#Abre o banco
banco = Banco()
#Encontra o item no banco com base na ID do item selecionado
x = banco.dados.query("Id == {} ".format(id_))
#Exlui o item do banco
banco.dados = banco.dados.drop(x.index)
banco.Atualiza()
#Atualiza q quantidade de atendimentos
qtd_atendimentos = banco.current
qtd['text'] = qtd_atendimentos
qtd_h = banco.dados.loc[banco.dados["Data"] == data].count()
qtd_hj['text'] = qtd_h[0]
#Atualiza a lista
listagem.delete(nodeId_1)
listagem.pack(side=LEFT)
#Salva o banco
banco.Save()
#mensagem de sucesso
messagebox.showinfo(title="Sucesso!", message="Cadastro Removido com Sucesso!")
#Fecha a janela
mostrar_jan.destroy()
#Botao de excluir
ex_button = Button(mostrar_jan,text="Excluir" , width = 20,bg=cor, fg=cor_contraste,relief="raise",command=Excluir)
ex_button.place(x=x_e-25,y = y_i+350)
mostrar_jan.mainloop()
except:
pass
def Visualizar():
banco = Banco()
#Abre a nova janela
visualizar_janela = Tk()
#CONFIGURACOES ----
#Titulo
visualizar_janela.title(titulos)
#Tamanho da janela
visualizar_janela.geometry("{}x{}".format(largura,altura))
#Cor de Fundo
visualizar_janela.configure(background = cor)
#Nao redimensionar
visualizar_janela.resizable(width = False, height = False)
#Transparencia
visualizar_janela.attributes("-alpha",0.95)
#Icone
visualizar_janela.iconbitmap(default="Icons/icon.ico")
dadosCols = tuple(banco.dados.columns)
listagem_v = ttk.Treeview(visualizar_janela,columns = dadosCols, show='headings', height = 25)
listagem_v.column("Id", width = 25,anchor=CENTER)
listagem_v.heading("Id",text="ID",anchor=CENTER)
listagem_v.column("Local", width = 150,anchor=CENTER)
listagem_v.heading("Local",text="Local",anchor=CENTER)
listagem_v.column("Solicitante", width = 100,anchor=CENTER)
listagem_v.heading("Solicitante",text="Solicitante",anchor=CENTER)
listagem_v.column("Atendimento", width = 250,anchor=CENTER)
listagem_v.heading("Atendimento",text="Atendimento",anchor=CENTER)
listagem_v.column("Certificado", width = 150,anchor=CENTER)
listagem_v.heading("Certificado",text="Certificado",anchor=CENTER)
listagem_v.column("Meta", width = 70,anchor=CENTER)
listagem_v.heading("Meta",text="Meta",anchor=CENTER)
listagem_v.column("Resolvido", width = 70,anchor=CENTER)
listagem_v.heading("Resolvido",text="Resolvido",anchor=CENTER)
listagem_v.column("Data", width = 100,anchor=CENTER)
listagem_v.heading("Data",text="Data",anchor=CENTER)
listagem_v.place(x=45,y=35)
#BARRAS DE ROLAGEM DA VISUALIZACAO
ysb = ttk.Scrollbar(visualizar_janela, orient=VERTICAL, command=listagem_v.yview)
listagem_v['yscroll'] = ysb.set
ysb.pack(side = RIGHT, fill = Y)
# TEXTOS DOS CABEÇALHO
for c in dadosCols:
listagem_v.heading(c, text=c.title())
# INSRINDO OS ITENS
for item in banco.dados.values:
listagem_v.insert('', 'end', values=tuple(item))
visualizar_janela.mainloop()
def Sobre():
messagebox.showinfo(title="SOBRE",message="Software para controle de Suporte\n2021\nMeta Certificado Digital")
#BARRA DE MENUS
menubar = Menu(jan)
#MENU OPCOES
opmenu = Menu(menubar,tearoff=0)
opmenu.add_command(label="Visualizar Atendimentos",command=Visualizar)
menubar.add_cascade(label = "Opções", menu = opmenu)
#MENUN SOBRE
sobremenu = Menu(menubar,tearoff=0)
sobremenu.add_command(label="Sobre", command=Sobre)
menubar.add_cascade(label = "?", menu = sobremenu)
#TITULO
TopFrame = Frame(jan, width = largura, height = 100, bg = cor, relief = "raise" )
TopFrame.pack(side=TOP)
#Logo da Meta e Titulo do programa
logo_meta = Label(TopFrame, image=logo,bg=cor)
logo_meta.place(x=5,y=5)
meta = Label(TopFrame,text = "Controle de Atendimentos",font=fonte_Titulos, fg= cor_contraste, bg=cor)
meta.place(x=280,y=25)
#AMBIENTE DE INFORMACOES 1
infosFrame = Frame(jan, width = 450, height = 150, bg=cor,relief="raise")
infosFrame.place(x = 540,y=150)
#Data
date = Label(infosFrame,text=data,fg=cor_contraste,bg=cor,font=fonte_Destaques)
date.place(x=140,y=5)
#Quantidade de Atendimentos
n_atendiLabel = Label(infosFrame, text="Atendimentos Realizados", fg = cor_contraste, bg=cor, font=fonte_Textos)
n_atendiLabel.place(x=15,y=50)
frame_aux = Frame(infosFrame, width = 200, height = 50, bg = "grey8", relief="raise")
frame_aux.place(x=15, y=85)
qtd = Label(frame_aux,text=qtd_atendimentos, bg = 'grey8' , fg="red", font=fonte_Destaques)
qtd.place(relx=0.5, rely=0.5,anchor=CENTER)
n_atendiHLabel = Label(infosFrame, text="Atendimentos de Hoje", fg = cor_contraste, bg=cor, font=fonte_Textos)
n_atendiHLabel.place(x=235,y=50)
frame_auxH = Frame(infosFrame, width = 200, height = 50, bg = "grey8", relief="raise")
frame_auxH.place(x=235, y=85)
qtd_hj = Label(frame_auxH,text=qtd_h, bg = 'grey8' , fg="red", font=fonte_Destaques)
qtd_hj.place(relx=0.5, rely=0.5,anchor=CENTER)
#AMBIENTE DE INFORMACOES 2
infos_2Frame = Frame(jan, width = 450, height = 225, bg=cor,relief="raise")
infos_2Frame.place(x = 540,y=325)
#VISUALIZACAO RAPIDA DE CADASTROS
dadosCols = tuple(banco.dados.columns)
listagem = ttk.Treeview(infos_2Frame,columns = dadosCols, show='headings', height = 10, selectmode='extended')
listagem.bind('<Double-1>',Mostrar)
listagem.column("Id", width = 25)
listagem.heading("Id",text="ID")
listagem.column("Local", width = 70)
listagem.heading("Local",text="Local")
listagem.column("Solicitante", width = 70)
listagem.heading("Solicitante",text="Solicitante")
listagem.column("Atendimento", width = 70)
listagem.heading("Atendimento",text="Atendimento")
listagem.column("Certificado", width = 70)
listagem.heading("Certificado",text="Certificado")
listagem.column("Meta", width = 35)
listagem.heading("Meta",text="Meta")
listagem.column("Resolvido", width = 35)
listagem.heading("Resolvido",text="Resolvido")
listagem.column("Data", width = 70)
listagem.heading("Data",text="Data")
listagem.pack(side=LEFT)
#BARRAS DE ROLAGEM DA VISUALIZACAO
ysb = ttk.Scrollbar(infos_2Frame, orient=VERTICAL, command=listagem.yview)
listagem['yscroll'] = ysb.set
ysb.pack(side = RIGHT, fill = Y)
# TEXTOS DOS CABEÇALHO
for c in dadosCols:
listagem.heading(c, text=c.title())
# INSRINDO OS ITENS
for item in dados.loc[dados["Data"]==data].values:
listagem.insert('', 'end', values=tuple(item))
#AMBIENTE DE CADASTRO
cadastroFrame = Frame(jan, width = 450, height = altura-200, bg=cor,relief="raise")
cadastroFrame.place(x = 40,y=150)
#LOCAL
localLabel = Label(cadastroFrame,text = "Local: ",font=fonte_Textos, anchor="w", fg=cor_contraste, bg=cor)
localLabel.place(x = xLabels,y = yInicialCadastro )
localEntry = AutocompleteCombobox(cadastroFrame, width = entrysWidth)
lista_locais = list(banco.dados["Local"].drop_duplicates())
localEntry.set_completion_list(lista_locais)
localEntry.place(x = xEntrys, y = yInicialCadastro)
#SOLICITANTE
solLabel = Label(cadastroFrame,text = "Solicitante: ",font=fonte_Textos, anchor="w", fg=cor_contraste, bg=cor)
solLabel.place(x = xLabels,y =yInicialCadastro + 70 )
solEntry = AutocompleteCombobox(cadastroFrame, width = entrysWidth)
solEntry.set_completion_list(lista_solicitantes)
solEntry.place(x = xEntrys, y = yInicialCadastro + 70 )
#ATENDIMENTO
atendLabel = Label(cadastroFrame,text = "Atendimento: ",font=fonte_Textos, anchor="w", fg=cor_contraste, bg=cor)
atendLabel.place(x = xLabels,y = yInicialCadastro + 140 )
atendEntry = AutocompleteCombobox(cadastroFrame, width = entrysWidth)
atendEntry.set_completion_list(lista_atendimentos)
atendEntry.place(x = xEntrys, y = yInicialCadastro + 140 )
#CERTIFICADO
certLabel = Label(cadastroFrame,text = "Certificado: ",font=fonte_Textos, anchor="w", fg=cor_contraste, bg=cor)
certLabel.place(x = xLabels,y = yInicialCadastro + 210 )
certEntry = AutocompleteCombobox(cadastroFrame, width = entrysWidth)
certEntry.set_completion_list(lista_certificados)
certEntry.place(x = xEntrys, y = yInicialCadastro + 210 )
#META
chkValueMeta = BooleanVar()
chkValueMeta.set(False)
chkMeta = Checkbutton(cadastroFrame, text='Dispostivo da Meta',var = chkValueMeta,bg=cor, activebackground = cor, fg=cor_contraste, selectcolor= cor)
chkMeta.place(x= xLabels,y = yInicialCadastro + 260)
#RESOLVIDO
chkValueResol = BooleanVar()
chkValueResol.set(False)
chkResolv = Checkbutton(cadastroFrame, text='Problema Resolvido',var = chkValueResol,bg=cor, activebackground = cor, fg=cor_contraste, selectcolor = cor)
chkResolv.place(x = xEntrys + 100 ,y = yInicialCadastro + 260 )
#BOTAO DE INSERIR
cadastroButton = Button(cadastroFrame, text = "Inserir Atendimento", bg=cor, fg=cor_contraste, width = entrysWidth,command = Inserir)
cadastroButton.place(x = xEntrys - 50, y = yInicialCadastro + 300)
#LOOP PARA O APP FUNCIONAR
jan.config(menu = menubar)
jan.mainloop() | true |
5400235e1abf8dabe1e1f5dc0ae70163bf63a932 | Python | g0pher98/CTF | /2019/x-mas/web/rigged_Election/md5table_gen.py | UTF-8 | 492 | 3.203125 | 3 | [] | no_license | from hashlib import md5
md5table = {}
elements = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
salt = "watch__bisqwit__"
def hashing(depth, now=0, string=""):
if depth == now:
enc = md5()
enc.update(salt+string)
md5table[string] = enc.hexdigest()
return
for e in elements:
hashing(depth, now = now+1, string = string+e)
# length = 7~25
for i in range(7,25):
print("=== %d ===" % i)
hashing(i)
with open("./md5table.txt", "w") as f:
f.write(str(md5table)) | true |
d703fe94d0f1a7a7d3815b053d6674cbdeb5d169 | Python | thiagomayllart/Flipper | /Flipper/HTTPRequester.py | UTF-8 | 2,211 | 2.578125 | 3 | [
"MIT"
] | permissive | import re
import urllib2
import base64
from base64 import b64encode, b64decode
requests_file = None
arr = None
file_string = None
def set_file(file_location):
global requests_file, file_string
requests_file = open(file_location, "r")
file_string = requests_file.read()
requests_file.close()
def get_key():
global requests_file, arr, file_string
file_string_aux = file_string
data = file_string_aux.replace('\n', '!!!')
arr = data.split('@@@@')
print "Original Key: " + arr[1]
decoded = arr[1].decode('base64')
return decoded
def HTTP_request(key, block, position):
arr_aux = arr
arr_aux[1] = key.encode('base64').replace('\n', '')
new_req = ''.join(arr_aux)
s = new_req
start = 'request base64='
end = '/request'
method = ''
ax = s
result = ax.split(start)[-1]
result = result.split(end)[0]
start = 'CDATA['
end = ']]><'
method = ''
result = result.split(start)[-1]
result = result.split(end)[0]
start = '<url><![CDATA['
end = ']]></url>'
path = s.split(start)[-1].split(end)[0]
raw_header, data = result.split('!!!!!!')
lines = raw_header.split('!!!')
if 'POST' in lines[0]:
method = 'POST'
else:
if 'GET' in lines[0]:
method = 'GET'
headers = {}
i = 1
while i < len(lines):
info = lines[i].split(': ', 1)
if 'Content-Length' not in info[0]:
headers.update({info[0]: info[1]})
i = i + 1
the_page = ''
code = ''
try:
if method == 'POST':
req = urllib2.Request(path, data=data, headers=headers)
response = urllib2.urlopen(req)
the_page = response.read()
code = response.getcode()
if method == 'GET':
req = urllib2.Request(path, headers=headers)
response = urllib2.urlopen(req)
the_page = response.read()
code = response.getcode()
print ("%s, %s, %s, %s, %s", arr_aux[1], block, position, code, len(the_page))
except urllib2.HTTPError as err:
print err
| true |
6a899d675d7e33b367cb2343674a8f78a0aebb8d | Python | S-GH/Keras_Basic | /keras21_1save.py | UTF-8 | 551 | 2.625 | 3 | [] | no_license | from numpy import array
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, LSTM
# 13 * 3
x = array([[1,2,3],[2,3,4],[3,4,5],[4,5,6],
[5,6,7],[6,7,8],[7,8,9],[8,9,10],
[9,10,11],[10,11,12],
[20,30,40],[30,40,50],[40,50,60]])
y = array([4,5,6,7,8,9,10,11,12,13,50,60,70])
model = Sequential()
model.add(Dense(20,activation='relu', input_shape=(3,)))
model.add(Dense(20))
model.add(Dense(10))
model.add(Dense(5))
model.add(Dense(1))
model.save('./save/save.h5') | true |
7eb23b0ce7dfb1410a9686d5313071e3bdd235cc | Python | lingyk/Code_Interview | /CompetitiveGaming.py | UTF-8 | 679 | 3.140625 | 3 | [] | no_license | class solution(object):
def CompetitiveGaming(self, k, score):
rank = []
count = 0
level = 0
score = sorted(score, reverse=True)
for i in range(len(score)):
if score[i] != score[i-1]:
rank.append(i+1)
else:
count += 1
rank.append(i+1-count)
if rank[i] < rank[i-1]:
rank[i] = rank[i-1]
for i in range(len(rank)):
if rank[i] <= k:
level = level+1
return level, rank
print(solution().CompetitiveGaming(4, [5,2,2,3,4,2,2]))
print(solution().CompetitiveGaming(4, [2,2,2,3,3,4,4,4,5])) | true |
437233cdcff0200bb4cf3ada91972a508be43c25 | Python | SitaramRyali/SDCND_Particle_Filters | /Particle_Filters_in_python/robot_with_particles_ingrid.py | UTF-8 | 925 | 3.203125 | 3 | [] | no_license | from math import pi
import random
from robot_class import robot
myrobot = robot()
myrobot = myrobot.move(0.1,5.0)
Z = myrobot.sense()
print(Z)
print(myrobot)
N =1000
p = []
for _ in range(N):
x = robot()
x.set_noise(0.05,0.05,5.0)
p.append(x)
p2 =[]
for i in range(N):
p2.append(p[i].move(0.1,5.0))
p = p2
# Now we want to give weight to our
# particles. This program will print a
# list of 1000 particle weights.
w =[]
W = 0
for i in range(N):
weight = p[i].measurement_prob(Z)
w.append(weight)
W += weight
# You should make sure that p3 contains a list with particles
# resampled according to their weights.
# Also, DO NOT MODIFY p.
#normalize weights
w_norm = []
for i in range(N):
w_norm.append(w[i]/W)
p3 = []
for i in range(N):
choice = random.choice(w_norm)
id_c = w_norm.index(choice)
p3.append(p[id_c])
| true |
2dd4e4bf4e668ab96283e1a4be34ef3913e75b5a | Python | Bobowski/crypto-protocols | /oblivious-transfer/dh_simple.py | UTF-8 | 1,342 | 2.8125 | 3 | [] | no_license | import random
from charm.toolbox.ecgroup import ECGroup, ZR, G
from charm.toolbox.eccurve import prime192v1, prime256v1
class DHSimple:
def __init__(self, group_obj):
global group
group = group_obj
def gen_params(self):
g = group.random(G)
a = {'g': g}
b = {'g': g}
return a, b
def gen_msgs(self, a, count):
a['m'] = [group.random(G) for _ in range(count)]
def setup(self, a):
a['a'] = group.random(ZR)
a['A'] = a['g'] ** a['a']
a['A2'] = a['A'] ** a['a']
a['A3'] = [a['A2'] ** i for i in range(1, len(a['m']))]
return {'A': a['A']}
def choose(self, b):
b['k'] = group.random(ZR)
b['v'] = (b['A'] ** b['b']) * (b['g'] ** b['k'])
b['m'] = b['A'] ** b['k']
return {"v": b['v']}
def mask(self, a):
a["m'"] = [a['v'] ** a['a']]
a["m'"] += [a["m'"][0] / A3 for A3 in a['A3']]
# a["m'"] += [a['v'] ** a['a'] / a['A2'] ** i for i in range(1, len(a['m']))]
return {"m'": a["m'"]}
if __name__ == "__main__":
# Just testing
OT = DHSimple(ECGroup(prime256v1))
a, b = OT.gen_params()
OT.gen_msgs(a, 55)
b.update({'b': 42})
b.update(OT.setup(a))
a.update(OT.choose(b))
b.update(OT.mask(a))
print(b["m"] == a["m'"][b["b"]])
| true |
587dc91f670b02cda9a0ef72f48535b6a848ca90 | Python | zohmg/zohmg | /src/zohmg/utils.py | UTF-8 | 2,011 | 2.921875 | 3 | [
"Apache-2.0"
] | permissive | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import string, sys, time
from random import Random
def random_string(size):
# subopt for larger sizes.
if size > len(string.letters):
return random_string(size/2)+random_string(size/2)
return ''.join(Random().sample(string.letters, size))
def timing(func):
def wrapper(*arg):
t0 = time.time()
r = func(*arg)
elapsed = time.time() - t0
return (elapsed*1000.00)
return wrapper
def timing_p(func):
def wrapper(*arg):
t0 = time.time()
r = func(*arg)
elapsed = (time.time() - t0) * 1000.00
print "=> %.2f ms" % elapsed
return elapsed
return wrapper
#
# General helpers
#
def compare_triples(p, q):
"""
p and q are triples, like so: (4, 2, ..)
sort by first the element, then by the second. don't care about the third element.
return 1, 0, or -1 if p is larger than, equal to, or less than q, respectively.
"""
a, b, dontcare = p
x, y, dontcare = q
if a > x: return 1
if a < x: return -1
if b > y: return 1
if b < y: return -1
return 0
def fail(msg,errno=1):
print >>sys.stderr, msg
exit(errno)
# strip whitespaces.
def strip(str):
return str.strip()
| true |
fc783a46ad42c4fae45b0518373608b083398e88 | Python | SaLuX8/Koneoppiminen | /T11.py | UTF-8 | 2,394 | 2.859375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 19 17:25:22 2020
@author: Sami
"""
import tensorflow as tf
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() # haetaan kerasin valmis datasetti mnist
#näin voidaan jakaa koodi blokkeihin, joita voi ajaa ctrl+enter
#%%
plt.imshow(x_train[1], cmap='Greys')
#%%
# tehdään x_train ja x_test muuttujien koosta yksiulotteinen taulukko kertomalla 28x28=784
x_train_flat=x_train.reshape(60000, 784)
x_test_flat = x_test.reshape(10000, 784)
x_train_flat = x_train_flat/255 # jokainen pikseli on välillä 0-255, joten jaetaan 255 ja saadaan luku välillä 0-1
x_test_flat = x_test_flat/255
y_train = pd.get_dummies(y_train)
y_test_orig = y_test
y_test = pd.get_dummies(y_test)
model = tf.keras.Sequential([
tf.keras.layers.Dense(1000, activation='relu', input_shape=(x_train_flat.shape[1],)),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax') #outputissa 10 numeroa ja luokittelevan neuroverkon output activation on softmax
])
model.compile(optimizer=tf.keras.optimizers.Adam(0.001),
loss='categorical_crossentropy',
metrics=['categorical_accuracy'])
model.fit(x_train_flat, y_train, validation_data=(x_test_flat, y_test), epochs=10, batch_size=100)
#%%
ennuste_test = model.predict(x_test_flat)
enn = pd.DataFrame(ennuste_test)
enn['max'] = enn.max(axis=1)
enn['ennuste'] = enn.idxmax(axis=1)
enn['oikea'] = y_test_orig
enn['tulos'] = enn['ennuste'] == enn['oikea']
#%%
import random
fig, axs = plt.subplots(2, 3)
axs = axs.ravel()
x=0
lista = [115,124,149,151,247,3893]
for i in lista:
# i = random.randint(0,len(x_test))
# number = y_test.loc[i][lambda x: x == 1].index[0]
number = y_test_orig[i]
#print(number)
# pred_number = ennuste_test[i].max()
pred_number = enn['ennuste'][i]
print(pred_number)
axs[x].imshow(x_test[i])
axs[x].text(27,-1, i, size=9, ha="right")
axs[x].text(0,-1, ('pr',pred_number), size=9 )
axs[x].text(12,-1, number, size=9 )
axs[x].set_xticks([])
axs[x].set_yticks([])
x+=1
#%%
g=0
right = 0
wrong = 0
for i in y_test_orig:
if i == enn['ennuste'][g]:
right+=1
else:
wrong+=1
g+=1
print(100-(wrong/right*100)) | true |
2a83c602a344b078ba5d38d18139e9bff7b2b0aa | Python | aineko-macx/python | /ch7/7_3.py | UTF-8 | 483 | 3.5 | 4 | [] | no_license | import math
def square_root(a):
x = a/3 #first guess
epsilon = 0.00000001
while True:
#print(x)
y = (x + a / x) / 2
if abs(y-x) < epsilon:
break
x = y
return x
def test_square_root(a):
col2 = square_root(a)
col3 = math.sqrt(a)
epsilon = col3-col2
print(a, col2, col3, epsilon)
test_square_root(1)
test_square_root(2)
| true |
fdba14f05aea694ee1b27a516c8661044b55b840 | Python | xlyw236/python_exercise | /loops_start.py | UTF-8 | 194 | 3.203125 | 3 | [] | no_license | #
# Example file for working with loops
# (For Python 3.x, be sure to use the ExampleSnippets3.txt file)
def main():
for x in range(5,10):
print x
if __name__ == "__main__":
main()
| true |
136afc572ad1c2b81295e89305b400c4fde3e333 | Python | PiCoder314/kickass-tui | /src/main.py | UTF-8 | 2,205 | 2.984375 | 3 | [] | no_license | #!/bin/python3
from requests import get, RequestException
from contextlib import closing
from bs4 import BeautifulSoup as soup
import inquirer
import os
def simple_get(url):
"""
Attempts to get the content at `url` by making an HTTP GET request.
If the content-type of response is some kind of HTML/XML, return the
text content, otherwise return None.
"""
try:
with closing(get(url, stream=True)) as resp:
if is_good_response(resp):
return resp.content
else:
return None
except RequestException as e:
log_error('Error during requests to {0} : {1}'.format(url, str(e)))
return None
def is_good_response(resp):
"""
Returns True if the response seems to be HTML, False otherwise.
"""
content_type = resp.headers['Content-Type'].lower()
return (resp.status_code == 200
and content_type is not None
and content_type.find('html') > -1)
def log_error(e):
"""
print
"""
print(e)
def get_query():
inp = input("Enter search term: ")
inp = inp.replace(" ", "%20")
return inp
if __name__ == "__main__":
query = get_query()
raw_html = simple_get(f'https://katcr.to/usearch/{query}/')
html = soup(raw_html, 'html.parser')
links = html.find_all('a', attrs={
'class': 'cellMainLink'
})
shows = []
for i, a in enumerate(links):
shows.append({})
shows[i]['title'] = a.text.replace('\n', '').strip()
shows[i]['link'] = a.get('href')
# Prompt to choose torrent
print('Choose a torrent to download')
questions = [
inquirer.List('title',
'==> ',
[show['title'] for show in shows],
carousel=True
)
]
title = inquirer.prompt(questions)['title']
link = list(filter(lambda show: show['title'] == title, shows))[0]['link']
raw_html = simple_get(f'https://katcr.to/{link}')
html = soup(raw_html, 'html.parser')
link = html.find('a', attrs={
'class': 'kaGiantButton',
'title': 'Magnet link'
}).get('href')
os.system(f"xdg-open '{link}'")
| true |
8229d9189886177c88c57b13b52effd07cfc901f | Python | hrishikeshshenoy/website | /server.py | UTF-8 | 843 | 2.765625 | 3 | [] | no_license | from flask import Flask,render_template,request,redirect
import csv
app = Flask(__name__)
@app.route('/')
def hello_world2():
return render_template('index.html')
@app.route('/<page_name>')
def hello_world6(page_name):
return render_template(page_name)
def write_to_data(data):
with open('database.csv',mode='a') as database:
email=data["email"]
subject=data["subject"]
message=data["message"]
csv_bulla=csv.writer(database,delimiter=',',quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_bulla.writerow([email,subject,message])
@app.route('/submit_form', methods=['POST', 'GET'])
def submit_form():
if request.method=="POST":
data=request.form.to_dict()
write_to_data(data)
return redirect('thankyou.html')
else:
return 'Something Went Wrong' | true |
f4211182c83661cc63d3c3d38e66ab45f4a3bdc6 | Python | JamesP2/adventofcode-2020 | /day/12/day12-2.py | UTF-8 | 2,134 | 3.640625 | 4 | [] | no_license | import argparse
from enum import IntEnum
import math
parser = argparse.ArgumentParser(description="Rain Risk")
parser.add_argument("file", metavar="INPUT_FILE", type=str, help="Input filename")
parser.add_argument(
"--verbose", "-v", action="store_true", default=False, help="Verbose output"
)
args = parser.parse_args()
def vprint(*x):
if args.verbose:
if len(x) == 1:
print(x[0])
else:
print(x)
class Direction(IntEnum):
EAST = 0
SOUTH = 90
WEST = 180
NORTH = 270
def rotate_waypoint(old_position, angle):
angle = math.radians(angle)
return [
round(math.cos(angle) * old_position[0] - math.sin(angle) * old_position[1]),
round(math.sin(angle) * old_position[0] + math.cos(angle) * old_position[1]),
]
ship_direction = 0
ship_position = [0, 0]
waypoint_relative_position = [10, 1]
with open(args.file) as file:
for line in file:
line = line.replace("\n", "")
action = line[0]
value = int(line[1:])
if action == "N":
waypoint_relative_position[1] += value
elif action == "S":
waypoint_relative_position[1] -= value
elif action == "E":
waypoint_relative_position[0] += value
elif action == "W":
waypoint_relative_position[0] -= value
elif action == "L":
waypoint_relative_position = rotate_waypoint(
waypoint_relative_position, value
)
elif action == "R":
waypoint_relative_position = rotate_waypoint(
waypoint_relative_position, -value
)
elif action == "F":
ship_position[0] += waypoint_relative_position[0] * value
ship_position[1] += waypoint_relative_position[1] * value
vprint(
"Instruction: {}{}, Ship Postion: {}, Waypoint Position: {}".format(
action, value, ship_position, waypoint_relative_position
)
)
vprint("At the end of the instructions the Manhattan distance is:")
print((abs(ship_position[0]) + abs(ship_position[1]))) | true |