repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
jonahmakowski/PyWrskp | src/other/Other_from_2020-2021/home/date_time_testing_try_2.py | import time
from datetime import date
days_in_month = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
while True:
if my_birthday.year%4==0:
days_in_month[1] = 29
elif my_birthday.year%4!=0:
days_in_month[1] = 28
today = date.today()
today = date.fromtimestamp(time.time())
# today = today.replace(year=2024, month=2, day=28)
month = int(input('what is the month of your birthday?\nJanuary is 1, February is 2, March is 3, April is 4, May is 5, June is 6, July is 7, August is 8, September is 9, October is 10, November is 11, and December is 12\n'))
my_birthday = date(today.year, month, int(input('what is the day of your birthday?\n')))
time_to_birthday = abs(my_birthday - today)
days_in = days_in_month[today.month]
time_to_birthday_2 = time_to_birthday.days / days_in
time_to_birthday_2 = int(time_to_birthday_2)
if my_birthday < today:
my_birthday = my_birthday.replace(year=today.year + 1)
if time_to_birthday_2 * days_in != time_to_birthday:
time_to_birthday_2 = (str(time_to_birthday_2) + ' months and ' + str(int(time_to_birthday.days) - (time_to_birthday_2 * days_in)) + ' days')
print(time_to_birthday_2)
print('or')
print(str(time_to_birthday.days) + ' days')
continue_ = input('would you like a new bday? (y/n)\n')
if continue_ == 'n':
break
print('reloading')
time.sleep(1)
print('SHUTTING DOWN')
|
jonahmakowski/PyWrskp | src/web21/app/routes.py | <filename>src/web21/app/routes.py
from flask import request, render_template, redirect
from app import app
import sys
import os
try:
pyWrkspLoc = os.environ["PYWRKSP"]
except KeyError:
os_ = input('Would you like to use the "HOME" env varible?')
if os_ == 'y':
pyWrkspLoc = os.environ["HOME"] + input('Since you do not have the PYWRSKP env var '
'\nPlease enter the pwd for the pyWrskp repo not including the '
'"home" section')
else:
pyWrkspLoc = input('Since you do not have the PYWRSKP env var \nPlease enter the pwd for the pyWrskp repo)')
# pyWrskpLoc = '/Users/jonahmakowski/Desktop/Github/pyWrskp' #for debug, so you don't have to enter info
User = ''
blogPosts_list = [
{
'creator': {'username': 'Jonah', 'user': 'Jonah'},
'content': 'Why this website is better then the other',
'info': 'It is home made by me! It also has many pages, unlike the first website I made!',
'link': '/post/Jonah'
},
{
'creator': {'username': "Mr. Payne's 4/5 class 2020-21", 'user': "Mr.Payne's+45+class+2020-21"},
'content': 'Why in-person school is better then the online version',
'info': "In-person school is so much better than online school "
"because when we are in person we can chat, and eat together "
"(note this was not written by Mr.Payne's class, it was written by Jonah)",
'link': "/post/Mr.Payne's+45+class"
},
{
'creator': {'username': 'Jonah', 'user': 'Jonah1'},
'content': 'What does www stand for?',
'info': 'What does www (like https://www...) mean? www means World Wide Web!',
'link': '/post/Jonah1'
},
{
'creator': {'username': 'Jonah', 'user': 'Jonah2'},
'content': 'How to show the CEO info',
'info': 'in your web address bar, remove "/link/Jonah2", and put "/ceo+info" '
'instead, then press enter, and bingo, you got the CEO info!',
'link': '/post/Jonah2'
}]
CEO = {'name': '<NAME>',
'emails': {
'1': '<EMAIL>',
'2': '<EMAIL>',
'3': '<EMAIL>'},
'link': 'https://www.youtube.com/channel/UC1ti62i-uMnBVAh9b_Pp3UA/'}
@app.route('/', methods=['POST', 'GET'])
@app.route('/welcome', methods=['POST', 'GET'])
def welcome():
return render_template('index.html', title='Welcome', User=User)
@app.route('/home', methods=['POST', 'GET'])
def home():
return render_template('home.html', title='Home', User=User)
@app.route('/home/blogposts', methods=['POST', 'GET'])
@app.route('/home/blogposts/', methods=['POST', 'GET'])
@app.route('/blogposts', methods=['POST', 'GET'])
def blog_posts():
return render_template('posts.html', title='blog posts', posts=blogPosts_list)
@app.route('/post/<creator>', methods=['POST', 'GET'])
def post(creator):
usr = False
for item in blogPosts_list:
if item['creator']['user'] == creator:
usr = True
break
if not usr:
return redirect('/', code=404)
else:
return render_template('Blog_outline.html', title=post['creator']['username'] + "'s blog post", post=post)
@app.route('/home/blogposts/small', methods=['POST', 'GET'])
@app.route('/home/blogposts/small/', methods=['POST', 'GET'])
@app.route('/blogposts/small', methods=['POST', 'GET'])
@app.route('/blogposts+small', methods=['POST', 'GET'])
@app.route('/home/blogposts+small', methods=['POST', 'GET'])
def blog_post_small():
return render_template('posts_small.html', title='blog posts', posts=blogPosts_list)
@app.route('/music', methods=['POST', 'GET'])
def music():
return render_template('music.html', title='Music')
@app.route('/calendar', methods=['POST', 'GET'])
def calendar():
return render_template('calendar.html', title='calendar')
@app.route('/ceo+info', methods=['POST', 'GET'])
def ceo_info():
if request.method == "POST":
return redirect(CEO['link'])
elif request.method == 'GET':
name = 'Name: ' + CEO['name']
email = 'Emails: {}, {}, {}'.format(CEO['emails']['1'], CEO['emails']['2'], CEO['emails']['3'])
link = 'Youtube channel: ' + CEO['link']
print(name + '\n' + email + '\n' + link)
return render_template('CEO_info.html', title='CEO info', name=name, email=email, link=link)
else:
print('request.method is not get or post it is {}'.format(request.method))
exit(5)
@app.route('/youtube', methods=['POST', 'GET'])
def youtube():
return render_template('youtube.html', title='youtube')
@app.route('/page', methods=['POST', 'GET'])
def page():
return render_template('page.html', title='List of Pages | This is a list of pages on this website')
@app.route('/cac', methods=['POST', 'GET'])
def calculator():
if request.method == "POST":
global pyWrkspLoc
try:
num2 = int(request.form.get("num2"))
except ValueError:
num2 = None
num1 = int(request.form.get('num1'))
t = request.form.get('type')
try:
num1 = float(num1)
num2 = float(num2)
if t == '+':
a = num1 + num2
q = '{} + {}'.format(num1, num2)
elif t == '-':
a = num1 - num2
q = '{} - {}'.format(num1, num2)
elif t == '*':
a = num1 * num2
q = '{} * {}'.format(num1, num2)
elif t == '/':
a = num1 / num2
q = '{} / {}'.format(num1, num2)
elif t == '**':
a = num1 ** num2
q = '{} ** {}'.format(num1, num2)
elif t == '^':
from math import sqrt
a = sqrt(num1)
q = '√{}'.format(num1)
else:
a = "ISSUE CODE CAN NOT FIND NUMBERS NECESSARY, TALK TO THE OWNER OF THIS WEBSITE"
q = a
except AssertionError as error:
q = error
a = 'The linux_interaction() function was not executed'
except ValueError as error:
q = error
a = 'The first or other string is not a int or float'
return render_template('calculator.html', title='Calculator', a=a, q=q)
elif request.method == 'GET':
return render_template('calculator_redirect.html', title='Calculator Sender')
else:
print('request.method is not get or post it is {}'.format(request.method))
exit(5)
@app.route('/passwords', methods=['GET', 'POST'])
def passwords():
if request.method == "POST":
global pyWrkspLoc
sys.path.append(pyWrkspLoc + '/src/password_maker')
from Creater import create
password = create(request.form.get("letters"),
request.form.get("numbers"),
request.form.get("sc"),
request.form.get("super_c"),
int(request.form.get("length")),
'n',
False,
pyWrkspLoc)
return render_template('password_show.html', title='Passwords', password=password)
elif request.method == 'GET':
return render_template('passwords.html', title='Passwords')
else:
print('request.method is not get or post it is {}'.format(request.method))
exit(5)
@app.route('/login', methods=['GET', 'POST'])
def login():
global User
if request.method == "POST":
global pyWrkspLoc
password = request.form.get("password")
User = request.form.get("username")
if User == 'WhiteSwine' and password == '<PASSWORD>':
return render_template('logged_in.html', title='logged in!', user=User)
else:
return '<h1>INCORRECT</h1>'
elif request.method == 'GET':
return render_template('login.html', title='login')
else:
print('request.method is not get or post it is {}'.format(request.method))
exit(5)
@app.route('/noah', methods=['GET', 'POST'])
@app.route('/turtle', methods=['GET', 'POST'])
def turtle():
if request.method == "POST":
global pyWrkspLoc
sys.path.append(pyWrkspLoc + '/src/other/Other_from_2020-2021/home')
try:
from noah_and_me_turtle import run
run()
except: # I know there is a warning here, but I am not sure how to fix it, if you know how please do
return redirect('/')
return '<h1>Done</h1> <p>if nothing is happening, you have an error</p> <a href="/">return to home page</a>'
elif request.method == 'GET':
return render_template('turtle.html', title='Noah and me turtle')
else:
print('request.method is not get or post it is {}'.format(request.method))
exit(5)
@app.route('/pygame/draw', methods=['GET', 'POST'])
def draw():
if request.method == "POST":
global pyWrkspLoc
sys.path.append(pyWrkspLoc + '/src/other/Other_from_2020-2021/classes')
from Class_one import draw
draw()
return '<h1>Done</h1> <p>if nothing is happening, you have an error</p> <a href="/">return to home page</a>'
elif request.method == 'GET':
return render_template('pygame_draw.html', title='DRAW!')
else:
print('request.method is not get or post it is {}'.format(request.method))
exit(5)
@app.route('/feedback', methods=['GET', 'POST'])
def feedback():
return render_template('feedback.html', title='Feedback')
@app.route('/fire', methods=['GET', 'POST'])
def fire():
return render_template('fire.html', title='Fire')
@app.route('/alarm', methods=['GET', 'POST'])
def alarm():
if request.method == 'POST':
global pyWrkspLoc
sys.path.append(pyWrkspLoc + '/src/alarm')
from time_only import work
work(request.form.get('hour'), request.form.get('min'), print_info=False)
return '<h1>DING-DONG</h1> <a href="/">return to home page</a>'
elif request.method == 'GET':
return render_template('alarm.html', title='Alarm clock!')
else:
print('request.method is not get or post it is {}'.format(request.method))
exit(5)
@app.route('/lola', methods=['GET', 'POST'])
def lola():
return render_template('lola.html', title='Lola Image')
@app.route('/code', methods=['GET', 'POST'])
def code():
if request.method == 'POST':
sys.path.append(pyWrkspLoc + '/src/coder-decoder')
from coder import coderDecoder
key = int(request.form.get('key'))
coder_decoder = request.form.get('type')
coder = coderDecoder(print_info=False)
coder.add_vars(message=request.form.get('message'), key=key, )
if coder_decoder == 'code':
message = coder.code()
elif coder_decoder == 'decode':
message = coder.decode()
else:
print('error within code')
print('coder_decoder did not equal code or decode, exiting')
message = 'error within code\ncoder_decoder did not equal code or decode, exiting'
return render_template('coder_show.html', title='coder decoder show', message=message, key=key)
elif request.method == 'GET':
return render_template('coder.html', title='coder decoder')
else:
print('request.method is not get or post it is {}'.format(request.method))
exit(5)
@app.route('/fun', methods=['GET', 'POST'])
def fun():
return render_template('fun.html', title='Fun')
@app.route('/translater', methods=['GET', 'POST'])
def translater():
if request.method == 'POST':
language = request.form.get('language')
text = request.form.get('text')
from translate import Translator
translator = Translator(to_lang=language)
trans = translator.translate(text)
if text != trans:
return trans
elif text == trans:
return '<h1>Language not supported</h1> ' \
'<p>This language you used is not one of the languages supported on this translater</p>'
elif request.method == 'GET':
return render_template('translate.html', title='translater')
else:
print('request.method is not get or post it is {}'.format(request.method))
exit(5)
@app.route('/christmas', methods=['GET', 'POST'])
def christmas_tree():
return render_template('christmas_tree.html', title='Christmas')
@app.route('/notes/write', methods=['GET', 'POST'])
def notes_write():
if request.method == 'POST':
import json
sys.path.append(pyWrkspLoc + '/src/notes')
from notes import Notes
n = Notes(name=pyWrkspLoc + '/docs/txt-files/web21_notes.txt')
n.add_note(request.form.get('note'))
n.save_notes()
note = request.form.get('note')
return 'Your note {} was saved'.format(note)
elif request.method == 'GET':
return render_template('notes.html', title='Notes writer')
else:
print('request.method is not get or post it is {}'.format(request.method))
exit(5)
@app.route('/notes/read', methods=['GET', 'POST'])
def notes_read():
import json
try:
with open(pyWrkspLoc + '/docs/txt-files/web21_notes.txt') as json_file:
notes = json.load(json_file)
except:
notes = ['You have 0 notes']
# print(notes)
return render_template('notes_read.html', title='Notes reader', notes=notes)
@app.route('/schedule/create', methods=['GET', 'POST'])
def schedule_create():
if request.method == 'POST':
sys.path.append(pyWrkspLoc + '/src/other')
import schedule
s = schedule.Schedule(show=False, name='web21_schedule_data.txt')
start_time = '{}:{}'.format(request.form.get('start hr'), request.form.get('start min'))
end_time = '{}:{}'.format(request.form.get('end hr'), request.form.get('end min'))
name = request.form.get('name')
s.create(start_time, end_time, name)
return redirect('/schedule/read')
elif request.method == 'GET':
return render_template('schedule.html', title='schedule creater')
else:
print('request.method is not get or post it is {}'.format(request.method))
exit(5)
@app.route('/schedule/read', methods=['GET', 'POST'])
def schedule_read():
sys.path.append(pyWrkspLoc + '/src/other')
import schedule
s = schedule.Schedule(show=False, name='web21_schedule_data.txt')
info = s.read()
if info is None:
info = []
return render_template('schedule_read.html', title='schedule read', schedule=info)
@app.route('/schedule/empty', methods=['GET', 'POST'])
def schedule_empty():
sys.path.append(pyWrkspLoc + '/src/other')
import schedule
s = schedule.Schedule(show=False, name='web21_schedule_data.txt')
s.empty()
return redirect('/schedule/create')
@app.route('/is-it-the-word', methods=['GET', 'POST'])
def is_it_the_word():
sys.path.append(pyWrkspLoc + '/src/other')
from is_it_the_word import IsItAWord
if request.method == 'POST':
is_it_the_word = IsItAWord(request.form.get('w'))
info = is_it_the_word.check_blind()
return render_template('is_it_a_word_a.html', title='Is It The Word', info=info)
elif request.method == 'GET':
return render_template('is_it_the_word.html', title='Is It The Word')
else:
print('request.method is not get or post it is {}'.format(request.method))
exit(5)
|
jonahmakowski/PyWrskp | src/other/school_notes.py | <reponame>jonahmakowski/PyWrskp
import os
import datetime
import json
try:
pyWrksp = os.environ["PYWRKSP"]
except KeyError:
pyWrksp = os.environ["HOME"] + input('Since you do not have the PYWRSKP env var '
'\nPlease enter the pwd for the pyWrskp repo not including the '
'"home" section')
# pyWrskp = '/Users/jonahmakowski/Desktop/Github/pyWrskp' #for debug, so you don't have to enter info
class SchoolNotes:
def __init__(self, pywrskploc, name='school_notes.txt'):
self.pywrskploc = pywrskploc
self.name = pywrskploc + '/docs/txt-files/' + name
def add(self):
now = datetime.datetime.now()
date = now.strftime("%d/%m/%Y")
current_time = now.strftime("%H:%M:%S")
print("current time is: " + current_time)
print("today's date is: " + date)
note = input('What is the note?\n')
teacher = input("What is the teacher's name?\n")
self.group = {'date': date, 'current_time': current_time, 'note': note, 'teacher': teacher}
self.save()
def save(self, custom=False):
if not custom:
self.read()
self.all_events.append(self.group)
with open(self.name, 'w') as outfile:
json.dump(self.all_events, outfile)
def read(self):
with open(self.name) as json_file:
self.all_events = json.load(json_file)
return self.all_events
def p(self):
self.read()
self.all_events = sorted(self.all_events, key=lambda i: i['date'], reverse=True)
if self.all_events is []:
print('date, time, teacher, note')
for item in self.all_events:
print('{}, {}, {}, {}'.format(item['date'], item['current_time'], item['note'], item['teacher']))
elif self.all_events is not []:
print('There is nothing in this list')
info = input('Would you like to add a new event (y/n)?')
if info == 'y':
self.add()
def empty(self):
self.all_events = []
self.save(custom=True)
if __name__ == "__main__":
s = SchoolNotes(pywrskploc=pyWrksp)
do = input('Would you like to add, print or empty?')
if do == 'add':
s.add()
elif do == 'print':
s.p()
elif do == 'empty':
s.empty()
|
jonahmakowski/PyWrskp | src/other/Other_from_2020-2021/classes/polygon.py | import turtle
class Polygon:
def __init__(self, sides, side_length):
self.sides = sides
self.side_length = side_length
def draw(self, t, loc=(0,0), fill=False):
t.pu()
t.goto(loc)
t.pd()
if fill:
t.begin_fill()
for s in range(self.sides):
t.fd(self.side_length)
t.left(360/self.sides)
t.end_fill()
class Square(Polygon):
def __init__(self, side_length):
super().__init__(4, side_length)
def show_area(self, t):
t.write(self.side_length**2)
t = turtle.Turtle()
shape = Polygon(10, 1)
shape.draw(t)
shape.draw(t, loc=(100, 100), fill=True)
square = Square(20)
square.draw(t, loc=(200, 200))
square.show_area(t) |
jonahmakowski/PyWrskp | src/other/Other_from_2020-2021/classes/class_1_extras.py | <gh_stars>0
import pygame
pygame.init()
def which_color(x, radius, current_color, screen):
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
PURPLE = (251, 0, 255)
YELLOW = (255, 247, 0)
TEAL = (0, 255, 255)
ORANGE = (255, 196, 0)
LIME = (132, 255, 0)
if x < 750 and x >= 700:
radius = 25
return radius
elif x >= 650:
radius = 20
return radius
elif x >= 600:
radius = 15
return radius
elif x >= 550:
radius = 10
return radius
elif x >= 500:
radius = 5
return radius
elif x >= 450:
current_color = WHITE
return current_color
elif x >= 400:
current_color = LIME
return current_color
elif x >= 350:
current_color = ORANGE
return current_color
elif x >= 300:
current_color = TEAL
return current_color
elif x >= 250:
current_color = YELLOW
return current_color
elif x >= 200:
current_color = PURPLE
return current_color
elif x >= 150:
current_color = BLACK
return current_color
elif x >= 100:
current_color = BLUE
return current_color
elif x >= 50:
current_color = GREEN
return current_color
else:
current_color = RED
return current_color |
jonahmakowski/PyWrskp | src/other/Other_from_2020-2021/classes/Combo.py | import tkinter as tk
class app(tk.Frame):
def __init__(self, i, master=None):
super().__init__(master)
self.master = master
self.i = i
self.i_2 = i
self.i_3 = i
self.pack()
self.createWidgets()
def createWidgets(self):
self.btnFrame = tk.Frame(self)
self.btnpaint = tk.Button(self.btnFrame, text='Paint', command = self.paint)
self.btnpaint_2 = tk.Button(self.btnFrame, text= 'Paint v2', command = self.paint_2)
self.btnhome = tk.Button(self.btnFrame, text='Home stuff', command=self.home)
self.btntextedit = tk.Button(self.btnFrame, text='text editer', command=self.text_edit)
self.btnclose = tk.Button(self.btnFrame, text='close', command=self.master.destroy())
self.btns = [self.btnpaint, self.btnpaint_2, self.btnhome, self.btntextedit, self.btnclose]
self.btnFrame.grid(row = 0, column=0, sticky='ns')
i = 0
for item in self.btns:
item.grid(row=i, column = 0, sticky='ew')
i += 1
def paint(self):
import Class_one
if self.i != 0:
Class_one.draw()
self.i += 1
def paint_2(self):
import class_2
if self.i_2 != 0:
class_2.paint()
self.i_2 += 1
def home(self):
import class_3_homework as class_3
if self.i_3 != 0:
class_3.extra()
self.i_3 += 1
def text_edit(self):
import Class_4 as class_4
self.df_save_loc = '/home/jonah/Thonny files/TXT_files/' # change to folder name where you want auto saves as def
self.df_name = 'testing' # you can chnage def save name
self.root_2 = tk.Tk()
self.root_2.title('Text Editer')
self.root_2.rowconfigure(0, minsize =800, weight = 1)
self.root_2.columnconfigure(1, minsize =600, weight=1)
self.app_2 = class_4.Application(df_name, df_save_loc, master=root_2)
self.app_2.mainloop()
root = tk.Tk()
i = 0
root.rowconfigure(0, minsize =9999, weight = 1)
root.columnconfigure(1, minsize =9999, weight=1)
app = app(i, master=root)
app.mainloop() |
jonahmakowski/PyWrskp | src/other/Other_from_2020-2021/home/random_names_or_partners.py | a_or_b = input('would you like partners, three person groups, half and half, or one person?\n')
if a_or_b == ('partners'):
from random import randint
names = []
while True:
name = input('What name would you like to add?\n')
if name == (''):
break
names.append(name)
while len(names) >= 2:
a = randint(0, len(names) - 1)
b = names[a]
del names[a]
c = randint(0, len(names) - 1)
d = names[c]
del names[c]
print(d + ' and ' + b + ' are working as a group!')
input()
if len(names) == 1:
print('you have an uneven amount of people in your list so we could not find a partner for ' + names[0])
print('those are all your names')
exit()
if a_or_b == ('one person'):
from random import randint
names = []
while True:
name = input('What name would you like to add?\n')
if name == (''):
break
names.append(name)
while True:
if len(names) == 0:
break
a = randint(0, len(names) - 1)
print('The name that was chosen is ' + names[a] + '!')
del names[a]
input()
print('Those are all your names!')
exit()
if a_or_b == ('three person groups'):
from random import randint
names = []
while True:
name = input('What name would you like to add?\n')
if name == (''):
break
names.append(name)
while len(names) >= 3:
a = randint(0, len(names) - 1)
b = names[a]
del names[a]
c = randint(0, len(names) - 1)
d = names[c]
del names[c]
e = randint(0, len(names) - 1)
f = names[e]
del names[e]
print(d + ', ' + b + ' and ' + f + ' are working as a group!')
input()
if len(names) == 1:
print('you have an uneven amount of people in your list so we could not find a group for ' + names[0])
if len(names) == 2:
print('you have an uneven amount of people in your list so we could not find a group for ' + names[0] + ' and ' + names[1])
print('those are all your names')
exit()
if a_or_b == ('half and half'):
import time
from random import randint
names = []
while True:
name = input('What name would you like to add?\n')
if name == (''):
break
else:
names.append(name)
half_float = len(names) / 2
half_int = int(half_float)
team_1 = []
team_2 = []
while len(names) >= 2:
a = randint(0, len(names) - 1)
team_1.append(names[a])
del names[a]
b = randint(0, len(names) - 1)
team_2.append(names[b])
del names[b]
if half_float != float(half_int):
team_1.append(names[0])
print('Team 1')
for i in range(len(team_1)):
print(team_1[i])
time.sleep(0.5)
print('Team 2')
for i in range(len(team_2)):
print(team_2[i])
time.sleep(0.5)
'''
if a_or_b == ('half and half (girls then boys)'):
import time
from random import randint
boys = []
girls = []
print("please enter the girls' names")
while True:
name = input('')
if name == (''):
break
else:
girls.append(name)
print("please enter the boys' names")
while True:
name = input('')
if name == (''):
break
else:
boys.append(name)
team_1 = []
team_2 = []
while len(girls) >= 2:
a = randint(0, len(girls) - 1)
team_1.append(girls[a])
del girls[a]
a = randint(0, len(girls) - 1)
team_2.append(girls[a])
del girls[a]
if len(girls) == 1:
team_1.append(girls[0])
del girls[0]
while len(boys) >= 2:
a = randint(0, len(boys) - 1)
team_1.append(boys[a])
del boys[a]
a = randint(0, len(boys) - 1)
team_2.append(boys[a])
del boys[a]
if len(boys) == 1:
team_2.append(boys[0])
del boys[0]
print('Team 1')
for i in range(len(team_1)):
print(team_1[i])
time.sleep(0.5)
print('Team 2')
for i in range(len(team_2)):
print(team_2[i])
time.sleep(0.5)
print('That is it!')
'''
|
jonahmakowski/PyWrskp | src/other/docs.py | <filename>src/other/docs.py<gh_stars>0
class docs:
def __init__(self):
self.file_name = input('What is the file name?')
def read(self):
file = open(self.file_name, 'r')
print(file.read())
return file
def create(self, pr=True, add=None, text=None):
text = ''
file = open(self.file_name, 'w')
if pr:
print('What would you like to write in your file')
if text != None:
while True:
temp_text = input('')
if temp_text == ' ':
break
ttemp_text = temp_text + '\n'
text += ttemp_text
if add != None:
text += add.read()
file.write(text)
def add(self):
file = self.read()
self.create(pr=False, add=file)
doc = docs()
info = input("Would you like read, create, or add (for add, you need to have a file and you can't remove old sections)")
if info == 'add':
doc.add()
elif info == 'create':
doc.create()
elif info == 'read':
doc.read() |
jonahmakowski/PyWrskp | src/other/schedule.py | <gh_stars>0
import os
import json
class Schedule:
def __init__(self, show=True, name='schedule_data.txt'):
try:
self.pyWrskp = os.environ["PYWRKSP"]
except KeyError:
self.pyWrskp = os.environ["HOME"] + input('Since you do not have the PYWRSKP env var '
'\nPlease enter the pwd for the pyWrskp repo not including the '
'"home" section')
self.name = self.pyWrskp + '/docs/txt-files/' + name
if show:
info = input('What would you like to do? \noptions: print, create, or empty (p/c/e)')
if info == 'c':
self.create_built_in()
elif info == 'p':
self.show()
elif info == 'e':
self.empty()
def create_built_in(self):
end_time = input('What is the end time for this event?\nplease use 24hr clock')
name = input('What is the name of this event?')
start_time = input('What is the start time for this event?\nplease use 24hr clock')
all_events = self.read()
if all_events is None:
all_events = []
all_events.append({'start_time': start_time, 'end_time': end_time, 'name': name})
with open(self.name, 'w') as outfile:
json.dump(all_events, outfile)
def create(self, start_time, end_time, name):
all_events = self.read()
if all_events is None:
all_events = []
all_events.append({'start_time': start_time, 'end_time': end_time, 'name': name})
with open(self.name, 'w') as outfile:
json.dump(all_events, outfile)
def read(self):
try:
with open(self.name) as json_file:
info = json.load(json_file)
info = sorted(info, key=lambda i: i['start_time'], reverse=True)
except FileNotFoundError:
info = None
return info
def show(self):
events = self.read()
if events is None or events == []:
print('you have no events saved')
else:
print('Name, start, end')
for item in events:
print('{}, {}, {}'.format(item['name'], item['start_time'], item['end_time']))
def empty(self):
with open(self.name, 'w') as outfile:
json.dump([], outfile)
if __name__ == "__main__":
s = Schedule()
|
jonahmakowski/PyWrskp | src/other/Other_from_2020-2021/class2020/class_4_2.0_.py | <reponame>jonahmakowski/PyWrskp
import tkinter as tk
from tkinter.filedialog import askopenfilename, asksaveasfilename
class Application(tk.Frame):
def __init__(self, name, save_loc, master=None):
super().__init__(master)
self.master = master
self.pack()
self.createWidgets()
self.name = name
self.save_loc = save_loc
def createWidgets(self):
self.txtEdit = tk.Text(self)
self.btnFrame = tk.Frame(self)
self.btnOpen = tk.Button(self.btnFrame, text='Open', command=self.openFile)
self.btnSaveas = tk.Button(self.btnFrame, text='Save as', command = self.saveFile)
self.btnquit = tk.Button(self.btnFrame, text='Close', command = self.master.destroy)
self.btnsave = tk.Button(self.btnFrame, text = 'save', command = self.save)
self.txtEdit.grid(row=0, column=1, sticky='nsew')
self.btnFrame.grid(row = 0, column=0, sticky='ns')
self.btnOpen.grid(row=0, column=0, sticky= 'ew')
self.btnSaveas.grid(row=1, column=0, sticky= 'ew')
self.btnsave.grid(row=2, column=0, sticky= 'ew')
self.btnquit.grid(row=3, column=0, sticky='ew')
def openFile(self):
print('open in TXT folder only!!!')
self.fp = askopenfilename(filetypes=[('all files that work with this code', ['*.py', '*.txt', '*.docx']), ('Text Files', '*.txt'), ('Python Files', '*.py'), ('All Files', '*.*')])
self.name = self.fp[35:]
if not self.fp:
return
self.txtEdit.delete(1.0, tk.END)
with open(self.fp, 'r') as inFile:
txt = inFile.read()
self.txtEdit.insert(tk.END, txt)
def saveFile(self):
self.fp = asksaveasfilename(filetypes=[('all files that work with this code', ['*.py', '*.txt', '*.docx']), ('Text Files', '*.txt'), ('Python Files', '*.py'), ('All Files', '*.*')])
self.name = self.fp[len(self.save_loc):]
if not self.fp:
return
with open(self.fp, 'w') as outFile:
txt = self.txtEdit.get(1.0, tk.END)
outFile.write(txt)
def save(self):
with open(save_loc + self.name, "w") as f:
try:
txt = self.txtEdit.get(1.0, tk.END)
f.write(txt)
print('FYI it saved')
except:
print('FYI, your file did not save')
save_loc = '/home/jonah/Thonny files/TXT_files/' # change to folder name where you want auto saves
root = tk.Tk()
root.title('Text Editer')
root.rowconfigure(0, minsize =800, weight = 1)
root.columnconfigure(1, minsize =600, weight=1)
app = Application(None, save_loc, master=root)
app.mainloop() |
jonahmakowski/PyWrskp | src/classroom_tools/classroom_tools.py | from random import randint as r
import classroom_tools_extras
import sys
sys.path.append('../notes')
import notes
class Classroom:
def __init__(self):
self.roster = []
self.teacher = ''
self.princaple = ''
self.school_name = ''
self.add_roster()
def add_roster(self):
print('Enter all of the students names:')
while True:
name = input('')
if name == '':
break
self.roster.append(name)
def team_maker(self):
num_of_teams = input('How many teams would you like?')
teams = []
for i in range(int(num_of_teams)):
teams.append([])
roster_copy = self.roster
while len(roster_copy) >= int(num_of_teams):
for item in teams:
person_num = r(0, len(roster_copy) - 1)
person_name = roster_copy[person_num]
del roster_copy[person_num]
item.append(person_name)
if len(roster_copy) > 0:
i = 0
while len(roster_copy) > 0:
teams[i].append(roster_copy[0])
del roster_copy[0]
i += 1
team_num = 0
for item in range(len(teams)):
print('Team {}'.format(team_num + 1))
for i in range(len(teams[team_num])):
print(teams[team_num][i])
team_num += 1
print('\n\n')
def math_game(self):
ty = input('What type of game would you like?')
if ty == 'normal':
classroom_tools_extras.basic_math_game()
elif ty == 'challange':
classroom_tools_extras.challange_math_game(self.roster)
def classroom_log(self):
log = notes.Notes(name='log.jonahtext')
log.print_notes()
add = input('would you like to add a note? (y/n/c)')
if add == 'y':
info = input('what is your note?')
log.add_note(info)
log.save_notes()
print('your note, "{}" has been added to your saved notes!'.format(info))
elif add == 'c':
log.clear()
testing_class = Classroom()
do = input('What do you want to do?')
if do == 'team maker':
testing_class.team_maker()
elif do == 'math game':
testing_class.math_game()
elif do == 'classroom log':
testing_class.classroom_log()
|
jonahmakowski/PyWrskp | src/math-game/game_extras.py | <gh_stars>0
from random import randint
def add(min_num, max_num):
num1 = randint(max_num, min_num)
num2 = randint(max_num, min_num)
a = num1 + num2
return num1, num2, a
def sub(min_num, max_num, neg):
num1 = randint(max_num, min_num)
num2 = randint(max_num, min_num)
a = num1 - num2
if neg == 'n' and a < 0:
numq = num1
numw = num2
num1 = numw
num2 = numq
a = num1 - num2
if neg == 'n' and a < 0:
a = sub(max_num, min_num, neg)
return num1, num2, a
def multi(min_num, max_num):
num1 = randint(max_num, min_num)
num2 = randint(max_num, min_num)
a = num1 * num2
return num1, num2, a
def div(min_num, max_num, dec):
num1 = randint(max_num, min_num)
num2 = randint(max_num, min_num)
a = num1 / num2
if dec == 'n' and isinstance(a, float):
num1, num2, a = div(max_num, min_num, dec)
return num1, num2, a |
jonahmakowski/PyWrskp | src/other/Other_from_2020-2021/classes/class_3_homework.py | <reponame>jonahmakowski/PyWrskp
class House:
def __init__(self, family_members_list, pets_list):
self.family = family_members_list
self.pets = pets_list
def new_family_member(self):
pet = input('Are you adding a pet? (y/n) ')
if pet == 'y':
name = input('What is the name of the pet? ')
self.pets.append(name)
elif pet == 'n':
name = input('What is the name of the person you want to add? ')
self.family.append(name)
print(name + ' has been added')
def show_family(self):
print('Pet(s):')
for pets in self.pets:
print(pets)
print('people:')
for people in self.family:
print(people)
def food_ideas(self):
from random import randint as r
foods = ['Pad Thai', 'Salmon', 'Chicken Wings', 'Tomato Soup', 'Hamburgers', 'Steak', 'Bullgogi', 'eggs', 'home made pizza', 'Lasagna', 'sandwichs', 'Pancakes']
while True:
if len(foods) <= 0:
print('those are all our ideas, sorry')
break
a = r(0, len(foods) - 1)
print('your food plan for today is: ' + foods[a])
del foods[a]
not_working = input('Is this plan not working?\ndo you need a new idea? (y/n) ')
if not_working == 'n':
break
def what_to_do(self):
ideas = ['go on a walk to pick up the mail', 'play on your device', 'watch a movie', 'go to a park']
if len(self.pets) >= 1:
ideas.append('take ' + pets[0] + ' on a walk')
from random import randint as r
while True:
if len(ideas) <= 0:
print('those are all our ideas, sorry')
break
a = r(0, len(ideas) - 1)
print('you should: ' + ideas[a])
not_working = input('Is this idea not working?\ndo you need a new idea? (y/n) ')
del ideas[a]
if not_working == 'n':
break
def new_house():
humans = []
pets = []
print('enter the names of all the people')
while True:
human = input('')
if human == (''):
break
else:
humans.append(human)
print('enter the names of all the pets')
while True:
pet = input('')
if pet == (''):
break
else:
pets.append(pet)
house = House(humans, pets)
return house
def extra():
print('please create your house')
house = new_house()
houses = [house]
i = 0
while True:
what_do = input('What would you like to do? ')
if what_do == ('add new family members'):
houses[i].new_family_member()
elif what_do == ('print family list'):
houses[i].show_family()
elif what_do == ('food ideas'):
houses[i].food_ideas()
elif what_do == ('what do'):
houses[i].what_to_do()
elif what_do == ('change house'):
change = int(input('What number would you like to change to? '))
if change <= len(houses) - 1:
i = change
else:
i = len(houses) - 1
print('The valuse you entered is too high, you have been put at your possible value.')
print('To create more house please enter "create new house" the next time the "What would you like to do?" question pops up')
elif what_do == ('create new house'):
house = new_house()
houses.append(house)
elif what_do == 'close':
return
else:
print('That is not a option')
extra() |
jonahmakowski/PyWrskp | src/other/Other_from_2020-2021/home/caculator.py | <gh_stars>0
# This is a simple calculator in the shell
def use(num1, t, num2):
try:
num1 = float(num1)
num2 = float(num2)
if t == '+':
a = num1 + num2
q = '{} + {}'.format(num1, num2)
elif t == '-':
a = num1 - num2
q = '{} - {}'.format(num1, num2)
elif t == '*':
a = num1 * num2
q = '{} * {}'.format(num1, num2)
elif t == '/':
a = num1 / num2
q = '{} / {}'.format(num1, num2)
elif t == '**':
a = num1 ** num2
q = '{} ** {}'.format(num1, num2)
elif t == '^':
from math import sqrt
a = sqrt(num1)
q = '√{}'.format(num1)
else:
a = "ISSUE CODE CAN NOT FIND NUMBERS NECESSARY, TALK TO THE OWNER OF THIS WEBSITE"
q = a
except AssertionError as error:
q = error
a = 'The linux_interaction() function was not executed'
except ValueError as error:
q = error
a = 'The first or other string is not a int or float'
return q, a
c, d = use(input('What is the first number?'), input('What is the type?'), input('what is the other number?'))
print(str(c) + ' = ' + str(d))
|
jonahmakowski/PyWrskp | src/other/random_gen.py | from random import randint as r
from time import sleep as wait
# RANDOM GEN
class RandomGen:
def __init__(self, version):
self.lis = []
if version == 'num':
length = int(input('What do you want to range to be, 0 to'))
self.lis = range(length)
self.num()
exit()
elif version == 'names':
print('Enter the names, nothing means end')
while True:
name = input()
if name == '':
break
self.lis.append(name)
self.names()
def num(self):
current_num = r(0, len(self.lis))
print('The number is {}'.format(current_num))
def names(self):
current_num = r(0, len(self.lis) - 1)
current_name = self.lis[current_num]
print('THE LUCKY WINNER IS: {}'.format(current_name.upper()))
wait(5)
print('Name was entered as {}'.format(current_name))
Random = RandomGen(input('What version'))
|
jonahmakowski/PyWrskp | src/other/Other_from_2020-2021/home/fun_game.py | <gh_stars>0
import time
name = input('What is your name?\n')
if name == ('Jonah'):
password = input('What is the password?\n')
if password != ('Jo'):
exit()
if name != ('Jonah'):
if name == ('Noah'):
password = input('What is the password?\n')
if password != ('No'):
exit()
if name != ('Noah'):
exit()
print('Hello ' + name + ' welcome back!')
do = input('What would you like to do?\n')
if do == ('count'):
count_how_long = int(input('What number should I count to ' + name + '?\n'))
for i in range(count_how_long):
time.sleep(1)
print(str(i + 1))
exit()
if do == ('turtle draw'):
import turtle
t = turtle.Turtle()
t.speed(0)
sides = int(input('How many sides would you like?\n'))
length = int(input('How long would you like your sides to be?\n'))
for i in range(sides):
t.forward(length)
t.right(360 / sides)
|
jonahmakowski/PyWrskp | src/other/game.py | <reponame>jonahmakowski/PyWrskp<gh_stars>0
from time import sleep
class Game:
def __init__(self, debug=False, health=200, cheat=True):
self.number = 1
self.number_killed = 1
self.health = health
self.attacks = [{'name':'blast', 'damage min':1, 'damage max':20},
{'name':'punch', 'damage min':5, 'damage max':10},
{'name':'super blast', 'damage min':-100, 'damage max':100}]
self.debug = debug
self.name = input('What is your name?\n')
if cheat == True and self.name == 'Jonah':
self.health = health * 10
self.attacks.append({'name':'total destruction', 'damage min':999999999999998, 'damage max':999999999999999})
for item in self.attacks:
if item['damage min'] > 0:
item['damage min'] = item['damage min'] * 10
if item['damage min'] < 0:
item['damage min'] = item['damage max'] - 1
if item['damage max'] > 0:
item['damage max'] = item['damage max'] * 10
if cheat == True and self.name == 'Noah':
self.health = health * 3
for item in self.attacks:
if item['damage min'] > 0:
item['damage min'] = item['damage min'] * 3
if item['damage min'] < 0:
item['damage min'] = item['damage max'] - 1
if item['damage max'] > 0:
item['damage max'] = item['damage max'] * 3
if self.debug:
print('debug info: self.name == Jonah, so cheat mode has been enabled')
self.print_info()
if self.debug:
print('debug info: self.print_info, done')
sleep(2)
while True:
self.choose_bad()
if self.debug:
print('debug info: self.choose_bad, done \nnum {}'.format(self.number))
self.number += 1
def print_info(self):
print('Hello {}!'.format(self.name))
print('You have {} health'.format(self.health))
print('Weapons you can use:')
for item in self.attacks:
name = item['name']
name = name.upper()
print('\t{} ({} - {} damage)'.format(name, item['damage min'], item['damage max']))
print('Bad Guys:')
print('\tGLOP - 10-20 attack damage - 50 health')
print('\tBlock - 50 attack damage - 20 health')
print('\tRANDOM KILLER - UNKNOWN - UNKOWN')
def choose_bad(self):
from random import randint
num = randint(1,3)
if self.debug:
print('debug info: num in self.choose_bad is {}'.format(num))
if num == 1:
#self.glop()
self.monster('glop', 20, 30, 50)
self.number_killed += 1
elif num == 2:
self.monster('block', 50, 50, 20)
self.number_killed += 1
elif num == 3:
self.monster('<NAME>LER', randint(1, 100), randint(100, 200), randint(100, 200))
elif self.debug:
print('debug info: issue, code could not find nums nessary\nsection: self.choose_bad')
print('\n')
def monster(self, name, attack_max, attack_min, monster_health):
from random import randint
monster_health = 50
print('you meet a {}!'.format(name))
while monster_health > 0 and self.health > 0:
skip = False
attack = input('What attack do you use {}? \n'.format(self.name))
attack = attack.lower()
if self.debug:
print('debug info: attack input = {}'.format(attack))
for item in self.attacks:
if item['name'] == attack:
attack_dic = item
break
else:
attack_dic = False
if attack_dic == False:
print('{} is not an attack name'.format(attack))
print('Printing info again:')
print('\n')
self.print_info()
skip = True
if not skip:
damage = randint(attack_dic['damage min'], attack_dic['damage max'])
print('The {} lost {} health'.format(name, damage))
monster_health -= damage
print('The {} now has {} health'.format(name, monster_health))
if monster_health > 0:
if attack_min == attack_max:
damage = attack_max
else:
damage = randint(attack_min, attack_max)
print('The {} did {} damage'.format(name, damage))
self.health -= damage
print('You now have {} health'.format(self.health))
if self.health <= 0:
print('you died!')
print('GAME OVER')
print('You killed {} bad guys'.format(self.number_killed))
exit()
else:
print('GOOD JOB!')
print('You killed the {} with {} health to spare!'.format(name, self.health))
print('You killed {} bad guys so far'.format(self.number_killed))
User = Game(jonah=False) # add debug = True for debug info
|
jonahmakowski/PyWrskp | src/password_maker/password_saver.py | <reponame>jonahmakowski/PyWrskp
import json
import os
try:
pyWrkspLoc = os.environ["PYWRKSP"]
except KeyError:
pyWrkspLoc = os.environ["HOME"] + input('Since you do not have the PYWRSKP env var '
'\nPlease enter the pwd for the pyWrskp repo not including the '
'"home" section')
passwords = []
def add_password(passw, web):
global passwords
passwords.append({'website': web, 'password': passw})
def print_passwords(pywrskp):
global passwords
try:
with open(pywrskp + '/docs/txt-files/passwords.txt') as json_file:
passwords += json.load(json_file)
except FileNotFoundError:
pass
if passwords is []:
for item in passwords:
print('{}: {}'.format(item['website'], item['password']))
if passwords is []:
print("you don't have any passwords saved!")
def save_passwords(pywrskp):
with open(pywrskp + '/docs/txt-files/passwords.txt', 'w') as outfile:
json.dump(passwords, outfile)
print_passwords(pyWrkspLoc)
add = input('would you like to add any passwords? (y/n)')
if add == 'y':
password = input('<PASSWORD> your password?')
website = input('what webiste is this password being used by?')
add_password(password, website)
save_passwords(pyWrkspLoc)
print('your password, {} has been added to your saved passwords!'.format(password))
|
jonahmakowski/PyWrskp | src/other/Other_from_2020-2021/home/dice_roll.py | from random import randint as rand
print('Welcome to DICE SPIN!')
while True:
try:
sides = int(input('How many sides would you like your dice to have? '))
break
except:
print('you did not enter a number')
nums = []
tick = 1
nums = [*range(1, sides, 1)]
spin_nums = []
tick = 1
while True:
try:
spins = int(input('How many times would you like to spin your die? '))
break
except:
print('you did not enter a number')
while True:
spin = nums[rand(0, len(nums) - 1)]
spin_nums.append(spin)
tick += 1
if tick == spins + 1:
break
print('Now displaying numbers!')
spin_nums = sorted(spin_nums)
dic_spin = {}
for num in spin_nums:
try:
dic_spin[str(num)].append(num)
except:
dic_spin[str(num)] = [num]
for item in range(sides):
try:
num = len(dic_spin[str(item + 1)])
print('\n' + str(item + 1) + ', was rolled ' + str(num) + ' times!')
except:
pass
|
jonahmakowski/PyWrskp | src/alarm/run_time_only.py | from time_only import work
hour = int(input('What is the hour number - form 1 to 24'))
mint = int(input('What is the min number'))
work(hour, mint)
|
jonahmakowski/PyWrskp | src/other/Other_from_2020-2021/home/tk_testing.py | import tkinter as tk
labels = {}
buttons = {}
entrys = {}
root= tk.Tk()
canvas1 = tk.Canvas(root, width = 1000, height = 1000/2, relief = 'raised')
canvas1.pack()
label1 = tk.Label(root, text='Calculate')
label1.config(font=('helvetica', 14))
canvas1.create_window(1000/2, 25, window=label1)
labels['label1'] = label1
entry1 = tk.Entry(root, justify='center')
entry1.place(x=0, y=100, width=1000, height=25)
#canvas1.create_window(1000/2, 100, window=entry1)
entry2 = tk.Entry(root, justify='center')
entry2.place(x=0, y=150, width=1000, height=25)
#canvas1.create_window(1000/2, 150, window=entry2)
e3 = 0
label4 = 0
def fuction(e3):
global label4
label4 = tk.Label(root, text=e3, font=('helvetica', 9))
canvas1.create_window(1000/2, 140, window=label4)
labels['label4'] = label4
def m():
global e3
e3 = '*'
fuction(e3)
def d():
global e3
e3 = '/'
fuction(e3)
def a():
global e3
e3 = '+'
fuction(e3)
def s():
global e3
e3 = '-'
fuction(e3)
def cac():
global a
global e1
global e2
global e3
if e1 == '' or e2 == '':
return
e1 = int(entry1.get())
e2 = int(entry2.get())
if e3 == '+':
a = e1 + e2
elif e3 == '-':
a = e1 - e2
elif e3 == '*':
a = e1 * e2
elif e3 == '/':
a = e1 / e2
text = '{} {} {} = {}'.format(e1, e3, e2, a)
if len(text) > 150:
text = '{}'.format(e1)
text2 = '{}'.format(e3)
text3 = '{}'.format(e2)
text4 = '='
text5 = '{}'.format(a)
height = 270
adding = 25
global label5
global label6
global label7
global label8
global label4
label4 = tk.Label(root, text=text,font=('helvetica', 10))
canvas1.create_window(1000/2, height, window=label4)
labels['label4'] = label4
height += adding
label5 = tk.Label(root, text=text2,font=('helvetica', 10))
canvas1.create_window(1000/2, height, window=label5)
labels['label4'] = label4
height += adding
label6 = tk.Label(root, text=text3,font=('helvetica', 10))
canvas1.create_window(1000/2, height, window=label6)
labels['label6'] = label6
height += adding
label7 = tk.Label(root, text=text4,font=('helvetica', 10))
canvas1.create_window(1000/2, height, window=label7)
labels['label7'] = label7
height += adding
label8 = tk.Label(root, text=text5,font=('helvetica', 10))
canvas1.create_window(1000/2, height, window=label8)
labels['label8'] = label8
else:
global label3
label3 = tk.Label(root, text=text,font=('helvetica', 10))
canvas1.create_window(1000/2, 270, window=label3)
labels['label3'] = label3
print('{} {} {} = {}'.format(e1, e3, e2, a))
def clear():
entry1.delete(0, 'end')
entry2.delete(0, 'end')
label4.destroy()
try:
label3.destroy()
del labels['label3']
except:
label4.destroy()
del labels['label4']
label5.destroy()
del labels['label5']
label6.destroy()
del labels['label6']
label7.destory()
del labels['label7']
label8.destory()
del labels['label8']
num = 430
height = 225
add = 37
button6 = tk.Button(text='C', command=clear, font=('helvetica', 11, 'bold'))
canvas1.create_window(1000/2, 450, window=button6)
button2 = tk.Button(text='*', command=m, font=('helvetica', 9, 'bold'))
canvas1.create_window(num, height, window=button2)
num += add
button3 = tk.Button(text='/', command=d, font=('helvetica', 9, 'bold'))
canvas1.create_window(num, height, window=button3)
num += add
button4 = tk.Button(text='+', command=a, font=('helvetica', 9, 'bold'))
canvas1.create_window(num, height, window=button4)
num += add
button5 = tk.Button(text='-', command=s, font=('helvetica', 9, 'bold'))
canvas1.create_window(num, height, window=button5)
num += add
button1 = tk.Button(text='=', command=cac, font=('helvetica', 9, 'bold'))
canvas1.create_window(num, height, window=button1)
a = 0
e1 = 0
e2 = 0
root.mainloop()
|
jonahmakowski/PyWrskp | src/other/Other_from_2020-2021/classes/Class_one.py | <filename>src/other/Other_from_2020-2021/classes/Class_one.py
import class_1_extras
# here we start with pygame:
import pygame
def draw():
pygame.init()
screen = pygame.display.set_mode([1375,750])
pygame.display.set_caption("Paint")
keep_going = True
mouse_down = False
# defines colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
PURPLE = (251, 0, 255)
YELLOW = (255, 247, 0)
TEAL = (0, 255, 255)
ORANGE = (255, 196, 0)
LIME = (132, 255, 0)
currentColour = (0, 0, 0)
radius = 10
# defines rectangles
redRectangle = pygame.Rect((0, 0), (50, 50))
greenRectangle = pygame.Rect((50, 0), (50, 50))
blueRectangle = pygame.Rect((100, 0), (50, 50))
blackRectangle = pygame.Rect((150, 0), (50, 50))
purpleRectangle = pygame.Rect((200, 0), (50, 50))
yellowRectangle = pygame.Rect((250, 0), (50, 50))
tealRectangle = pygame.Rect((300, 0), (50, 50))
orangeRectangle = pygame.Rect((350, 0), (50, 50))
limeRectangle = pygame.Rect((400, 0), (50, 50))
whiteRectangle = pygame.Rect((450, 0), (50, 50))
dotRectangle5 = pygame.Rect((500, 0), (50,50))
dotRectangle10 = pygame.Rect((550, 0), (50,50))
dotRectangle15 = pygame.Rect((600, 0), (50,50))
dotRectangle20 = pygame.Rect((650, 0), (50,50))
dotRectangle25 = pygame.Rect((700, 0), (50,50))
current_background = BLACK
while keep_going:
for event in pygame.event.get():
if event.type == pygame.QUIT:
keep_going = False
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_down = True
elif event.type == pygame.MOUSEBUTTONUP:
mouse_down = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
screen.fill(currentColour)
current_background = currentColour
pygame.display.update()
if event.key == pygame.K_a:
screen.fill(current_background)
pygame.display.update()
if mouse_down:
# if the event is pressing the mouse
# get the current position of the mouse
spot = pygame.mouse.get_pos()
x = spot[0]
y = spot[1]
if y > 50 or x >= 750:
pygame.draw.circle(screen, currentColour, spot, radius)
pygame.display.update()
elif y < 50 or x <= 750:
temp = class_1_extras.which_color(x, radius, currentColour, screen)
temp_true = isinstance(temp, int)
if temp_true == True:
radius = temp
elif temp_true == False:
if temp != None:
currentColour = temp
# draws rectangles
pygame.draw.rect(screen, RED, redRectangle)
pygame.draw.rect(screen, GREEN, greenRectangle)
pygame.draw.rect(screen, BLUE, blueRectangle)
pygame.draw.rect(screen, BLACK, blackRectangle)
pygame.draw.rect(screen, PURPLE, purpleRectangle)
pygame.draw.rect(screen, YELLOW, yellowRectangle)
pygame.draw.rect(screen, TEAL, tealRectangle)
pygame.draw.rect(screen, ORANGE, orangeRectangle)
pygame.draw.rect(screen, LIME, limeRectangle)
pygame.draw.rect(screen, WHITE, whiteRectangle)
pygame.draw.rect(screen, current_background, dotRectangle5)
pygame.draw.rect(screen, current_background, dotRectangle10)
pygame.draw.rect(screen, current_background, dotRectangle15)
pygame.draw.rect(screen, current_background, dotRectangle20)
pygame.draw.rect(screen, current_background, dotRectangle25)
# draws circles
if current_background != WHITE:
pygame.draw.circle(screen, WHITE, (525, 25), 5)
pygame.draw.circle(screen, WHITE, (575, 25), 10)
pygame.draw.circle(screen, WHITE, (625, 25), 15)
pygame.draw.circle(screen, WHITE, (675, 25), 20)
pygame.draw.circle(screen, WHITE, (725, 25), 25)
else:
pygame.draw.circle(screen, BLACK, (525, 25), 5)
pygame.draw.circle(screen, BLACK, (575, 25), 10)
pygame.draw.circle(screen, BLACK, (625, 25), 15)
pygame.draw.circle(screen, BLACK, (675, 25), 20)
pygame.draw.circle(screen, BLACK, (725, 25), 25)
pygame.display.update()
pygame.quit()
draw() |
jonahmakowski/PyWrskp | src/catalog/catalogue.py | <gh_stars>0
# This is a simple catalog app
import json
import time
import fuction
import tkinter as tk
from fuzzywuzzy import process
class catalog:
def __init__(self, books):
self.books = books
while True:
new_title = input('Please write the title of your book: ')
new_author = input('Please write the name of the author that the book is written by: ')
new_book = {'title': new_title, 'author': new_author}
if new_title == '' and new_author == '':
break
self.books.append(new_book)
self.active = False
self.start()
def start(self):
try:
self.read()
except:
print('could not find your "data.txt" file, please make sure it is in the same folder as this file')
print('if you have not created a "data.txt" file or, have not run this code before, ignore this message')
self.old_books = []
fuction.wait()
self.show()
s = input('Would you like to search the books? (y/n) ')
while s == 'y':
self.search()
s = input('Would you like to search the books again? (y/n) ')
s = input('Would you like to search the web? (y/n) ')
if s == 'y':
self.online_search()
self.login()
self.save()
def save(self):
with open('data.txt', 'w') as outfile:
json.dump(self.books, outfile)
def read(self):
with open('data.txt') as json_file:
self.old_books = json.load(json_file)
def show(self):
print('NEW BOOKS')
print('=========')
for book in self.books:
print(book['title'])
fuction.wait()
print('OLD BOOKS:')
print('==========')
for book in self.old_books:
print(book['title'])
def search(self):
qs = self.books + self.old_books
query_t = input('What is the title of the book you are looking for?')
query_a = input('What author of the book you are looking for?')
query = {'title': query_t, 'author': query_a}
ratios = process.extract(query, qs)
for item in ratios:
print(item[0]['title'] + ', ' + item[0]['author'])
def online_search(self):
fuction.online_search()
def login(self):
import login
self.root = tk.Tk()
self.root.title('Login')
self.root.rowconfigure(0, minsize =800, weight = 1)
self.root.columnconfigure(1, minsize =600, weight=1)
self.app = Application(master=root)
self.app.mainloop()
if self.app.active == True and self.app.pas == self.app.code:
self.active = True
def admin(self):
if self.active:
while True:
do = input('what do you want to do admin')
if do == 'change books':
do = input('what type?')
if do == 'del':
num = int(input('What number do you want to del?'))
del self.books[num]
if do == 'add':
new_book_a = input('What is the author?')
new_book_t = input('What is the title?')
new_book = {'title':new_title, 'author':new_author}
self.books.append(new_book)
if do == 'n':
break
cat = catalog([])
|
jonahmakowski/PyWrskp | src/other/Other_from_2020-2021/home/extra_turtle_stuff.py | def draw_hex(sides, length_of_sides, t):
for i in range(sides):
t.forward(length_of_sides)
t.right(360 / sides)
def turn(length, turn, left_or_right, t):
if left_or_right == 'right':
for i in range(length):
t.forward(1)
t.right(turn)
elif left_or_right == 'left':
for i in range(length):
t.forward(1)
t.left(turn)
else:
print('you have an error with the fuction "turn", becasue for left_or_right, you put in {}, not left or right'.format(left_or_right)) |
jonahmakowski/PyWrskp | src/other/Other_from_2020-2021/class2020/class_4_2.0.py | <reponame>jonahmakowski/PyWrskp
import tkinter as tk
from tkinter.filedialog import askopenfilename, asksaveasfilename
class Application(tk.Frame):
def __init__(self, name, save_loc, master=None):
super().__init__(master)
self.master = master
self.pack()
self.create_widgets()
self.name = name
self.save_loc = save_loc
def create_widgets(self):
self.txtEdit = tk.Text(self)
self.btnFrame = tk.Frame(self)
self.btnOpen = tk.Button(self.btnFrame, text='Open', command=self.open_file)
self.btnSaveas = tk.Button(self.btnFrame, text='Save as', command=self.save_file)
self.btnquit = tk.Button(self.btnFrame, text='Close', command=self.master.destroy)
self.btnsave = tk.Button(self.btnFrame, text='save', command=self.save)
self.txtEdit.grid(row=0, column=1, sticky='nsew')
self.btnFrame.grid(row=0, column=0, sticky='ns')
self.btnOpen.grid(row=0, column=0, sticky='ew')
self.btnSaveas.grid(row=1, column=0, sticky='ew')
self.btnsave.grid(row=2, column=0, sticky='ew')
self.btnquit.grid(row=3, column=0, sticky='ew')
def open_file(self):
print('open in TXT folder only!!!')
self.fp = askopenfilename(filetypes=[('all files that work with this code', ['*.py', '*.txt', '*.docx']),
('Text Files', '*.txt'),
('Python Files', '*.py'),
('All Files', '*.*')])
if not self.fp:
return
self.txtEdit.delete(1.0, tk.END)
with open(self.fp, 'r') as inFile:
txt = inFile.read()
self.txtEdit.insert(tk.END, txt)
def save_file(self):
self.fp = asksaveasfilename(filetypes=[('all files that work with this code', ['*.py', '*.txt', '*.docx']),
('Text Files', '*.txt'), ('Python Files', '*.py'),
('All Files', '*.*')])
self.name = self.fp[len(self.save_loc):]
if not self.fp:
return
with open(self.fp, 'w') as outFile:
txt = self.txtEdit.get(1.0, tk.END)
outFile.write(txt)
def save(self):
with open(self.fp, "w") as f:
try:
txt = self.txtEdit.get(1.0, tk.END)
f.write(txt)
print('FYI it saved')
except:
print('FYI, your file did not save')
save_loc = '/home/jonah/Thonny files/TXT_files/' # change to folder name where you want auto saves
root = tk.Tk()
root.title('Text Editer')
root.rowconfigure(0, minsize=800, weight=1)
root.columnconfigure(1, minsize=600, weight=1)
app = Application(None, save_loc, master=root)
app.mainloop()
|
jonahmakowski/PyWrskp | src/sudoku/solve_mm1.py | <gh_stars>0
"""
Early prototype of the sudoku solver, witten by <NAME>.
A simple 'brute force' algorithm was implemented; the prototype appears to work, but
enhancements and testing is desired (cf comments at the bottom of this file).
"""
import numpy as np
class Board:
def __init__(self, nrows=9, ncols=9):
self.nrows = nrows
self.ncols = ncols
self.board = np.zeros((nrows, ncols), dtype='i1')
self.finished = False # True, if all slots filled
self.depth = 0 # current depth of the recursion
self.debug = 0 # debug level
self.nzeros = 0 # number of non-zeros (i.e., predefined slots)
def define_tst(self, board=None): # define a board for testing
# [5, 3, 0, 0, 7, 0, 0, 0, 0],
if board != None:
self.board = board
elif board == None:
self.board = [
[5, 3, 0, 0, 7, 0, 1, 0, 0], # slot in col 6 set to 1 (to assure a unique solution?)
[6, 0, 0, 1, 9, 5, 0, 0, 0],
[0, 9, 8, 0, 0, 0, 0, 6, 0],
[8, 0, 0, 0, 6, 0, 0, 0, 3],
[4, 0, 0, 8, 0, 3, 0, 0, 1],
[7, 0, 0, 0, 2, 0, 0, 0, 6],
[0, 6, 0, 0, 0, 0, 2, 8, 0],
[0, 0, 0, 4, 1, 9, 0, 0, 5],
[0, 0, 0, 0, 8, 0, 0, 0, 0]]
# return true if num fits to the board cell at position (row, col)
# checks all rows, then cols, then boxes
def accepted(self, num, row, col):
for j in range(0, self.ncols): # check all cols in the row
if self.board[row][j] == num: # already in the row
return False # cannot accept a duplicate
for i in range(0, self.nrows): # check all rows in the column
if self.board[i][col] == num: # already in the column
return False # cannot accept a duplicate
# positions of first row/col y0/x0 of the box to which (row,col) belongs
y0 = (row // 3) * 3 # there are 3 rows in each box
x0 = (col // 3) * 3 # there are 3 col in each box
for i in range(0, 3):
for j in range(0, 3):
if self.board[y0 + i][x0 + j] == num: # already in the box
return False # cannot accept a duplicate in the square
return True # new entry num can be accepted
# fill the board by recursively trying to insert entries in empty slots by rows and cols
def solve(self):
if self.debug > 2:
print('recursion depth = ', self.depth)
if self.finished:
return
for row in range(0, self.nrows):
for col in range(0, self.ncols):
if self.board[row][col] == 0: # skip already filled slots/cells
for num in range(1, 10):
if self.accepted(num, row, col):
self.board[row][col] = num # fill and continue with next free slot
if self.debug and row == 8:
print('reached 8-th row, now at col = ', str(col))
self.print()
if row == self.nrows - 1 and row == col:
self.finished = True # all slots filled
print('Finished: all slots assigned. The recursion depth = ', self.depth)
# self.print()
return
if self.finished:
print('ERROR: should not come here after all slots are filled.')
print('num = ', str(num), ' row = ', str(row), ' col = ', str(col))
return
self.depth += 1
self.solve() # try the next free slot(s)
# retuns here from the lower recursion level
self.depth -= 1
if self.finished:
return
self.board[row][col] = 0 # current board is infeasible, reset the slot and continue
# loop for numbers ends here
# loop for columns ends here
# loop for rows ends here
return # continue with the next free slot
def info(self):
self.nzeros = 0
for row in range(0, self.nrows):
for col in range(0, self.ncols):
if self.board[row][col] > 0: # skip already filled slots/cells
self.nzeros += 1
print('The board has ', str(self.nzeros), 'defined slots, i.e., ', str(round(self.nzeros/81., 3)), ' density.')
def print(self, comm=None):
if comm:
print(comm)
for row in range(0, self.nrows):
print(' row[' + str(row) + ']: ', self.board[row])
if __name__ == '__main__':
board = Board()
board.define_tst()
board.info()
board.print('Initial board:')
board.solve()
board.print('Filled board:')
print('Finished.')
print('\nInfo: this is an early prototype of the sudoku solver. The following enhancements are desired/planned:')
print('\t* Code review; refactorization, if desired.')
print('\t* Input the board definition in flexible formats (currently the definition is hard-coded)')
print('\t* Handling infeasible board definitions.')
print('\t* Handling non-unique solutions.')
print('\t* Extensive testing, especially on hard problems.')
print('\t* Consider improvements to the solver algorithm, if needed for hard problems.')
print('\t* Optional integration with the board generator (to be developed).')
print('\t* User-friendly output (maybe through the Web browser).')
print('\t* Optional integration (as hint/checker) with the user-control interactive solver.') |
MichaelAlexanderDenton/Nyaa-scrapper | NyaaPy/__init__.py | # -*- coding: utf-8 -*-
"""
NyaaScraper
NyaaScraper is Feed parser and a web scraper for Nyaa.si and sukebei.nyaa.
NyaaScraper modules:
DataProcess.py --> contains DataProcess class. has all the necessary methods for extracting data from both RSS and the website
NyaaRSS.py --> contains NyaaRSS class. all methods needed to get data from the RSS feed.
NyaaScraper.py --> contains NyaaScraper class. all methods needed to get data from the website using Beautifulsoup.
"""
__version__ = "1.0.0"
__license__ = "MIT"
__source_url__ = "https://github.com/MichaelAlexanderDenton/nyaa-scrapper"
__author__ = "<NAME>"
# imports
from .DataProcess import DataProcess
from .NyaaRSS import NyaaRSS
from .NyaaScraper import NyaaScraper |
MichaelAlexanderDenton/Nyaa-scrapper | NyaaPy/DataProcess.py | <reponame>MichaelAlexanderDenton/Nyaa-scrapper<filename>NyaaPy/DataProcess.py
"""
----Creating the automated directory needs its own function called by __init__
----requests.exceptions.HTTPError: 404 Client Error: NOT FOUND for url: https://nyaa.si/download/647951
"""
import helpers
import requests
import urllib.parse
import urllib3
import re
import os
import pprint
from collections import OrderedDict
from bs4 import BeautifulSoup
class DataProcess(object):
def __init__(self, directory=None):
self.base__url = "http://nyaa.si/?"
self.base__rss_url = "https://nyaa.si/?page=rss"
self.base__torrent__link = "https://nyaa.si/download/"
self.base__view__link = "https://nyaa.si/view/"
self.registration__link = "https://nyaa.si/register"
self.base__dir = os.path.dirname(__file__)
self.default__dir = self._create_default_directory(dirname=directory)
def _check_registration(self):
html = requests.get(self.registration__link).content
soup = BeautifulSoup(html, 'lxml')
if soup.find('pre'):
return "Registations are currently closed."
else:
return "Registrations are now open."
def _create_default_directory(self, dirname=None):
if dirname is not None:
if os.path.exists(dirname) == False:
print('custom directory does not exist')
os.mkdir(dirname)
return dirname
else:
mdir = os.path.join(self.base__dir, "automated")
if os.path.exists(mdir) == False:
os.mkdir(mdir)
print('Default directory created.')
else:
print('Default directory exists.')
return mdir
def _get_torrent_link(self, url):
BASE_TORRENT_LINK = self.base__torrent__link
torrent_id = re.findall(r'([0-9]+)', url)[0]
return '{0}{1}.torrent'.format(BASE_TORRENT_LINK, torrent_id)
# def create_torrent_link_by_id(self, id=int()):
# return '{0}{1}.torrent'.format(self.base__torrent__link, id)
def get_magnet_link(self, url):
html = requests.get(url).content
soup = BeautifulSoup(html, 'lxml')
return soup.find('a', 'card-footer-item').get('href').strip()
def _parse_rss_feed(self, url=None, limit=None):
if isinstance(limit, str):
if str(limit).isnumeric() == False:
raise ValueError('limit should not be a string.')
else:
limit = int(limit)
url = self.base__rss_url if url is None else url
html = requests.get(url).content
soup = BeautifulSoup(html, features='lxml')
# saving data as an ordered list
obj = OrderedDict({
"title" : 'Nyaa - Home - Torrent File Feed Parser',
"description": f'Feed Parser for Home',
"atom": {
'link': soup.find('atom:link').get('href'),
'rel': soup.find('atom:link').get('rel'),
'type': soup.find('atom:link').get('type'),
},
"data": list(),
})
_count = 0
# Find all torrent files and magnets
items = soup.find_all('item')
for item in (items[:limit] if limit is not None else items):
anime = OrderedDict()
anime['title'] = item.title.text.strip()
anime['torrent_file'] = self._get_torrent_link(item.guid.text.strip())
anime['info_link'] = {
"url" : item.guid.text.strip(),
"isPermaLink" : item.guid.get('isPermaLink')
}
anime['Published_at'] = item.pubdate.text.strip()
anime['seeders'] = item.find('nyaa:seeders').text.strip()
anime['leechers'] = item.find('nyaa:leechers').text.strip()
anime['downloads'] = item.find('nyaa:downloads').text.strip()
anime['infoHash'] = item.find('nyaa:infohash').text.strip()
anime['category'] ={
'id' : item.find('nyaa:categoryid').text.strip(),
'category__name' : item.find('nyaa:category').text.strip()
},
anime['file__size'] = item.find('nyaa:size').text.strip()
anime['comments'] = item.find('nyaa:comments').text.strip()
anime['is__trusted__torrent'] = {
'text' : item.find('nyaa:trusted').text.strip(),
'value': False if item.find('nyaa:trusted').text.strip() == 'No' else True,
},
anime['is__remake'] = item.find('nyaa:remake').text.strip()
_count += 1
obj['data'].append(anime)
print(f"Total of {_count} object(s) were created from the RSS feed.")
return obj
def _create_search_query(self, filter_=None, search_query=None, category=None, username=None, search_type=None):
base_url = self.base__rss_url if search_type == 'rss' else self.base__url
query_array = list()
query = str()
if filter_ is not None:
query_array.append(dict({"f" :helpers._create_filters_query(_filter=filter_)}))
if search_query is not None:
search_query = search_query.replace(' ', '+')
query_array.append(dict({"q": search_query}))
if category is not None:
query_array.append(dict({"c" : helpers._create_category_query(category=category)}))
if username is not None:
query_array.append(dict({"u" : username}))
for q in query_array:
for key, value in q.items():
query += f"&{key}={value}"
link = base_url + query
print(f"Search link: {link}")
return link
# RSS torrent file retrieval
def _rss_get_torrent_files(self, url=None, limit=None):
feed_data = self._parse_rss_feed(url=url, limit=limit)
return self._get_data(feed_data)
def _get_file(self, id_):
try:
# get file name first
html = requests.get((self.base__view__link + str(id_))).content
soup = BeautifulSoup(html, 'lxml')
title = soup.find('h3', 'panel-title').text.strip()
url = f"{self.base__torrent__link}{id_}.torrent"
mdir = self.default__dir
print(f"file name: {title}")
with requests.get(url, stream=True) as r:
r.raise_for_status()
invalid_chars = f'<>:"\/|?*'
pattern = r'[' + invalid_chars + ']'
new_name = re.sub(pattern, ' ', title)[:155] # As Windows files are 155 character-limited.
with open(os.path.join(mdir, 'log.txt'), 'a', encoding='utf-8') as log:
log.write(f"File saved: {new_name}.torrent \n")
with open(os.path.join(mdir, f"{new_name}.torrent"), "wb") as f:
for chunk in r.iter_content():
if chunk:
f.write(chunk)
finally:
print('file saved.')
# get multiple files from structure
def _get_data(self, item_list):
try:
_count = 0
mdir = self.default__dir
print(mdir)
for item in item_list['data']:
with requests.get(item['torrent_file'], stream=True) as r:
r.raise_for_status()
invalid_chars = f'<>:"\/|?*'
pattern = r'[' + invalid_chars + ']'
new_name = re.sub(pattern, ' ', item['title'])[:155] # As Windows files are 155 character-limited.
with open(os.path.join(mdir, 'log.txt'), 'a', encoding='utf-8') as log:
log.write(f"File saved: {new_name}.torrent \n")
with open(os.path.join(mdir, f"{new_name}.torrent"), "wb") as f:
for chunk in r.iter_content():
if chunk:
f.write(chunk)
_count += 1
finally:
print(f"Downloaded {_count} torrent files.")
def _get_magnet(self, id_, file=False):
view_link = "{0}{1}".format(self.base__view__link, str(id_))
html = requests.get(view_link).content
soup = BeautifulSoup(html, 'lxml')
if file == True:
with open(os.path.join((self.base__dir + r'\automated'), 'magnet.txt'), "w") as f:
f.write(soup.find('a', 'card-footer-item').get('href'))
f.close()
return
if file == False:
return print(soup.find('a', 'card-footer-item').get('href'))
# This is purely exprimental, not guaranteed to
# work properly long-term due to trackers changing their
# udp/port, but we'll see...
def create_magnet_link(self, infohash=str(), title=str()):
magnet_prefix = "magnet:?xt=urn:"
torrent_infohash = f"btih:{infohash}"
torrent_title = f"&dn={title}"
# Gathering trackers from torrents in the main page and the upload page
html = requests.get("https://nyaa.si/").content
soup = BeautifulSoup(html, 'lxml')
magnets = soup.find_all('i', "fa-magnet")
for m in magnets[:3]:
x = m.parent['href']
x = urllib.parse.unquote(x)
# print(f"{x}\n")
test = re.findall(r"&tr=(.+)announce", x)[0]
test = test.split("&")
# Constructing links
# ---- Under construction ----
magnet = f"{magnet_prefix}{torrent_infohash}{urllib.parse.quote(torrent_title)}"
for m in test:
magnet += f"&{urllib.parse.quote(m)}"
return magnet
########################################################
# Nyaa Scraper methods/properties
########################################################
def _parse_scraper_data(self, url="http://nyaa.si/", pages=None, per_page=None):
_count = 0
if pages == None:
print("Pages value was not provided.")
print("Scraping only the first page.")
else:
print(f"----Number of pages to scrape > {pages}")
data = OrderedDict({'data': list()})
try:
for p in range(1, (2 if pages is None else (pages + 1))):
if pages is not None:
# kind of a hack, but it works
if url[-1] == "/":
url = url + "?"
create_url = url + f"&p={p}"
print(create_url)
html = requests.get(create_url if pages is not None else url).content
soup = BeautifulSoup(html, "lxml")
items_list = soup.find('tbody').find_all('tr')
for i in items_list[0:per_page] if per_page is not None else items_list:
anime = OrderedDict()
anime_category = i.select('td:nth-of-type(1)') # Done
anime_name_info = i.select('td:nth-of-type(2)') # Done
anime_torrent_magnet = i.select('td:nth-of-type(3)') # Done
data_size = i.select('td:nth-of-type(4)') # Done
anime_timestamp = i.select('td:nth-of-type(5)') # Done
anime_seeders = i.select('td:nth-of-type(6)') # Done
anime_leechers = i.select('td:nth-of-type(7)') # Done
number_of_downloads = i.select('td:nth-of-type(8)') # Done
# Scrape title/hyperlink
for info in anime_name_info:
link = self.base__url + info.find('a')['href']
if info.find("a", 'comments'):
anime['title'] = info.find('a').findNext('a').get('title')
anime['link'] = link.split("#")[0].strip()
anime['comments'] = int(info.find('a', 'comments').get('title').split(' ')[0])
else:
anime['title'] = info.find('a').get('title')
anime['link'] = link
anime['comments'] = 0
# Scrape category
for find_category in anime_category:
anime['category'] = OrderedDict({
'category__name' : find_category.find('img')['alt'],
'category__tag' : find_category.find('a')['href'].split('=')[1]
})
# Scrape torrent/magnet links
for link in anime_torrent_magnet:
torrent__link = self.base__url + link.find('i', 'fa-download').parent['href']
magnet__link = link.find('i', 'fa-magnet').parent['href']
anime['torrent_file'] = torrent__link
anime['magnet_link'] = magnet__link
# Scrape filesize
anime['size'] = data_size[0].text
# Scrape timestamp
time = OrderedDict({
"created_at" : anime_timestamp[0].text,
"timestamp": anime_timestamp[0].get('data-timestamp'),
# "real_time": anime_timestamp[0].get('title') #JS-executed
})
anime['date'] = time
# Seeders/Leechers
seeders = anime_seeders[0].text
leechers = anime_leechers[0].text
anime['seeders'] = seeders
anime['leechers'] = leechers
# Downloads
dnwlds = number_of_downloads[0].text
anime['downloads'] = dnwlds
_count += 1
data['data'].append(anime)
if pages is not None:
print(f"End of page {p}")
print(f"Total data scraped: {_count} in {pages} pages.") if pages is not None else print(f'Total data scraped: {_count} in one page.')
return data
except (urllib3.exceptions.NewConnectionError, urllib3.exceptions.MaxRetryError, requests.exceptions.ConnectionError ) as e:
print('no connection error')
def _get_magnet_links(self, item_list):
try:
_count = 0
mdir = os.path.join(self.base__dir, "automated")
if os.path.exists(mdir) == False:
os.mkdir(mdir)
print('Directory created.')
else:
print('directory exists.')
with open(os.path.join(mdir, 'magnets.txt'), "w") as f:
for i in item_list['data']:
f.write(f"{i['magnet_link']} \n")
_count += 1
f.close()
finally:
print(f"Saved {_count} magnet links.")
|
MichaelAlexanderDenton/Nyaa-scrapper | NyaaPy/helpers.py | filters= tuple({'no filter', 'no remake', 'trusted only'})
# m_cat = "Anime"
# s_cat = "Raw"
cats = [
{"All Categories": [None]},
{"Anime" : [
"None",
"Anime Music Video",
"English-translated",
"Non-English-translated",
"Raw",
]
},
{"Audio": [
"None",
"Lossless",
"Lossy"
]
},
{"Literature":[
"None",
"English-translated",
"Non-English-translated",
"Raw",
]
},
{"Raw Action": [
"None",
"English-translated",
"Idol/Promotional Video",
"Non-English-translated",
"Raw",
]
},
{"Pictures":[
"None",
"Graphics",
"Photos"
]
},
{"Software": [
"None",
"Applications",
"Games"
]}
]
def _create_category_query(category=tuple()) -> str():
user_main_cat = category[0]
user_sub_cat = category[1]
print("main cat {0}. sub cat {1}".format(user_main_cat, user_sub_cat))
main_cat = str()
sub_cat = str()
for i, c in enumerate(cats):
if user_main_cat == "All Categories" or user_main_cat is None:
main_cat = "0"
sub_cat = "0"
break
for value, key in c.items():
if value == user_main_cat:
main_cat = i
for v, k in enumerate(key):
if k == user_sub_cat:
sub_cat = v
print("Main category: {0} - sub-category:{1}".format(main_cat, sub_cat))
return "{0}_{1}".format(main_cat, sub_cat)
def _create_filters_query(_filter=str()) -> int():
if _filter in filters:
return filters.index(_filter)
else:
raise ValueError('Invalid filter input. check documentation for more info.')
|
MichaelAlexanderDenton/Nyaa-scrapper | NyaaPy/NyaaRSS.py | <gh_stars>0
"""
TODO:
---overwrite/not existing torrent files/data
---Check query if user has submitted valid input
---Add more debug console data.
---if page returns empty, put an exception.
---category needs to be parse and converted.
---get files by their tier (trusted, success, not-trusted, neutral...)
"""
from DataProcess import DataProcess
import json
import requests
import re
from json import JSONDecodeError
import pprint
import string
class NyaaRSS(DataProcess):
def __init__(self):
super().__init__()
def get_latest_feed_data(self, rtype='dict', limit=None):
pp = pprint.PrettyPrinter(indent=4)
feed_data = self._parse_rss_feed(limit=limit)
try:
if rtype == 'json':
return json.dumps(feed_data)
if rtype == 'dict':
return feed_data
if rtype == 'debug':
print(f"Object type: {feed_data.__class__()}")
pp.pprint(feed_data)
if rtype is not ['json', 'debug', 'dict']:
raise ValueError('Invalid value for rtype. Try again.')
except JSONDecodeError:
raise ('Error while parsing data to JSON notation.')
def get_latest_torrent_files(self, limit=None):
return self._rss_get_torrent_files(limit=limit)
def get_data_by_query(self,
filter_=None,
search_query=None,
category=None,
username=None,
limit=None):
search_url = self._create_search_query(filter_=filter_,
search_query=search_query,
category=category,
username=username,
search_type='rss')
return self._parse_rss_feed(search_url, limit=limit)
def get_torrents_by_query(self,
filter_=None,
search_query=None,
category=None,
username=None,
limit=None):
search_url = self._create_search_query(filter_=filter_,
search_query=search_query,
category=category,
username=username,
search_type='rss')
self._rss_get_torrent_files(url=search_url, limit=limit)
def get_data_by_username(self, username=None, limit=None):
search_url = self._create_search_query(username=username, search_type='rss')
return self._parse_rss_feed(search_url, limit=limit)
def get_torrents_by_username(self, username=None, limit=None):
search_url = self._create_search_query(username=username, search_type='rss')
self._rss_get_torrent_files(search_url, limit=limit)
|
MichaelAlexanderDenton/Nyaa-scrapper | NyaaPy/NyaaScraper.py | """
TODO:
---if page returns empty, put an exception.
---category needs to be parse and converted.
---get files by their tier (trusted, success, not-trusted, neutral...)
---Add an exception if the page scrapped returned nothing
---Magnet links file can have more file info as optional
---"optional" add exceeding pages exception
"""
from bs4 import BeautifulSoup
from json import JSONDecodeError
from DataProcess import DataProcess
from collections import OrderedDict
import json
import pprint
class NyaaScraper(DataProcess):
def __init__(self, directory=None):
super().__init__(directory=directory)
##################################################################
## Debug Methods for NyaaScraper
## You shouldn't be using these methods to get your data
##################################################################
def _debug_show_titles(self):
page_data = self.parse_scraper_data()
mlist = list()
for i in page_data['data']:
mlist.append(i['title'])
return mlist
def get_latest_data(self, rtype='dict', pages=None, per_page=None):
page_data = self._parse_scraper_data(pages=pages, per_page=per_page)
try:
if rtype == 'json':
return json.dumps(page_data)
if rtype == 'dict':
return page_data
if rtype == 'debug':
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(type(obj))
if rtype is not ['json', 'dict', 'debug']:
raise TypeError('Invalid type, try again. i.e --> type="dict"/type="json"')
except JSONDecodeError:
raise ('Invalid type, try again. i.e --> rtype="dict"/rtype="json"')
def get_latest_torrent_files(self, pages=None, per_page=None):
pages_data = self._parse_scraper_data(pages=pages, per_page=per_page)
self._get_data(pages_data)
def get_latest_magnet_links(self, pages=None, per_page=None):
pages_data = self._parse_scraper_data(pages=pages, per_page=per_page)
self._get_magnet_links(pages_data)
##########################################################
## search_data_by_pages was removed due to be a spammy
## method to use
## Might add later.
##########################################################
def get_data_by_query(self, filter_=None, search_query=None, category=None, username=None, pages=None, per_page=None):
# Maybe we can move this somewhere else...
scraper_data = OrderedDict({
"title" : f"Nyaa Scraper v0.1 (Under construction v0204)",
"description": f"Nyaa scraper for {search_query}"
})
search_url = self._create_search_query(
filter_=filter_,
search_query=search_query,
category=category,
username=username,
search_type="scraper")
return self._parse_scraper_data(url=search_url, pages=pages, per_page=per_page)
def get_torrent_files_by_query(self,
filter_=None,
search_query=None,
category=None,
username=None,
pages=None,
per_page=None):
scraper_data = OrderedDict({
"title" : f"Nyaa Scraper v0.1 (Under construction v0204)",
"description": f"Nyaa scraper for {search_query}"
})
search_url = self._create_search_query(filter_=filter_,
search_query=search_query,
category=category,
username=username,
search_type='scraper')
data = self._parse_scraper_data(url=search_url)
return self._get_data(data)
def get_magnet_links_by_query(self,
filter_=None,
search_query=None,
category=None,
username=None,
pages=None,
per_page=None):
search_url = self._create_search_query(filter_=filter_,
search_query=search_query,
category=category,
username=username,
search_type='scraper')
data = self._parse_scraper_data(url=search_url, pages=pages, per_page=per_page)
return self._get_magnet_links(data)
def get_data_by_username(self, username, rtype='dict', pages=None, per_page=None):
search_url = self._create_search_query(username=username, search_type='scraper')
data = self._parse_scraper_data(url=search_url, pages=pages, per_page=per_page)
if rtype == 'dict':
return data
if rtype == 'json':
return json.dumps(data)
if rtype is not ["dict, json"]:
raise TypeError("Specify data type for 'rtype' argument. 'dict' to return a dictionary, 'json' for JSON object notation.")
def get_files_by_username(self, username:None, rtype='torrent', pages=None, per_page=None):
search_url = self._create_search_query(username=username, search_type='scraper')
data = self._parse_scraper_data(url=search_url, pages=pages, per_page=per_page)
if rtype == 'magnet':
return self._get_magnet_links(data)
if rtype == 'torrent':
return self._get_data(data)
if rtype is not ['magnet', 'torrent']:
raise TypeError("Please specify return type. either 'magnet' for links / 'torrent' for files ")
def get_torrent_by_id(self, id_=None):
self._get_file(id_=id_)
def get_magnet_by_id(self, id_=None, file=None):
return self._get_magnet(id_=id_, file=file)
|
vsaveris/common-data-model-schema | test.py | <filename>test.py
'''
File name: cdm.py
Example usage code of the Common Data Model Schema Extraction class.
Author: <NAME>
email: <EMAIL>
License: MIT
Date last modified: 03.04.2020
Python Version: 3.7
'''
import cdm
import sys
c = cdm.CDM(path = sys.argv[1], core_path = sys.argv[2], base_path = sys.argv[3])
# Schema can be accessed by: c.getSchema()
c.printSchema() |
vsaveris/common-data-model-schema | cdm.py | <reponame>vsaveris/common-data-model-schema
'''
File name: cdm.py
Common Data Model Schema Extraction class.
Author: <NAME>
email: <EMAIL>
License: MIT
Date last modified: 03.04.2020
Python Version: 3.7
'''
import json
class CDM():
'''
CDM Schema Extraction class implementation.
Args:
path (string): The cdm.json file from which the schema should be
extracted.
core_path (string): The core path of the CDM directory
(i.e. ./CDM/schemaDocuments/core)
base_path (string): The base path of the CDM directory
(i.e. ./CDM/schemaDocuments/applicationCommon)
Public Attributes:
-
Private Attributes:
See constructor (self._*)
Public Methods:
getSchema() -> dictionary: Returns the extracted schema in a dictionary
where key is the attribute name and value is the attribute type.
printSchema() -> None: Prints the schema in a formatted way.
Private Methods:
See methods docstring (def _*)
Raises:
ValueError: In case input cdm.json file was not found.
'''
def __init__(self, path, core_path = None, base_path = None):
print('\nCDM class, path = ', path, ', core_path = ', core_path,
', base_path = ', base_path, sep = '')
# Core document (reference is not sure yet)
self._core_document = core_path + 'wellKnownCDSAttributeGroups{Version}.cdm.json'
# Read the contents of the CDM file
self._cdm_file = self._readCdmFile(path)
# Read document id (dictionary with keys: entityName, extendsEntity,
# versionNumber)
self._cdm_id = self._readDocumentId()
if self._cdm_id['entityName'] is None:
raise ValueError('Entity Name was not found in the CDM file.')
print('Document ID: ', self._cdm_id, sep = '')
# Initialize the Core reference file
self._core_file = None
# Create schema, first read attributes from the reference entity if
# defined and afterwards read attributes from the document itself
self._schema = {}
# Parse base document
if self._cdm_id['extendsEntity'] is not None and \
'base' in self._cdm_id['extendsEntity']:
if self._cdm_id['versionNumber'] is not None:
base_document = base_path + self._cdm_id['extendsEntity'].split('/')[-1] + \
'.' + self._cdm_id['versionNumber'] + '.cdm.json'
else:
base_document = base_path + self._cdm_id['extendsEntity'].split('/')[-1] + \
'.cdm.json'
self._schema.update(CDM(base_document, core_path).getSchema())
# Parse core document
elif self._cdm_id['extendsEntity'] is not None:
self._schema.update(self._readCoreAttributes(
self._cdm_id['extendsEntity']))
self._schema.update(self._readDocumentAttributes())
def getSchema(self):
'''
Returns a dictionary with the extracted schema (getter).
Args:
-
Raises:
-
Returns:
dictionary: Returns the extracted schema in a dictionary where key
is the attribute name and value is the attribute type.
'''
return self._schema
def printSchema(self):
'''
Prints the schema in a formatted way.
Args:
-
Raises:
-
Returns:
-
'''
print('\nNumber of attributes in the schema ',
self._cdm_id['entityName'], ', version ',
self._cdm_id['versionNumber'], ': ',
len(self._schema.keys()), sep = '')
max_key_len = len(max(self._schema.keys(), key = len))
index_len = len(str(len(self._schema.keys())))
i=1
for k, v in self._schema.items():
print('{:{}d}. {:{}} : {}'.format(i, index_len, k, max_key_len, v))
i += 1
def _readCoreAttributes(self, core_entity):
'''
Extracts schema from the core document for the core entities referenced
in the cdm.json file.
Args:
core_entity (string): The core entity references in the cdm.json
file.
Raises:
-
Returns:
dictionary: Extracted schema.
'''
print('\nRead attributes of the core_entity = ', core_entity, sep = '')
# Load core file
core_file_path = self._core_document.replace('{Version}', '.' +
self._cdm_id['versionNumber'] if self._cdm_id['versionNumber'] is
not None else '')
print('- Core file path: ', core_file_path, sep = '')
self._core_file = self._readCdmFile(core_file_path)
# Read attributes of the core entity
attributes = None
for item in self._core_file['definitions']:
try:
if item['entityName'] == core_entity:
attributes = item['hasAttributes']
break
except:
pass
print('- Attributes: ', attributes, sep = '')
# Read schema for the selected attributes
schema = {}
if attributes is not None:
for item in self._core_file['definitions']:
try:
if item['attributeGroupName'] in attributes:
print('- Parsing members of ', item['attributeGroupName'],
sep = '')
schema.update(self._parseMembers(item['members']))
except:
pass
return schema
def _readDocumentAttributes(self):
'''
Extracts schema from the cdm.json for the local attributes.
Args:
-
Raises:
-
Returns:
dictionary: Extracted schema.
'''
print('\nRead attributes of the document, parsing members.')
return self._parseMembers(self._cdm_file['definitions'][0]
['hasAttributes'][0]['attributeGroupReference']['members'])
def _parseMembers(self, members):
'''
Parses the members of the input members object (cdm decoding).
Args:
members (dictionary): The members part from a cdm.json file.
Raises:
-
Returns:
dictionary: Extracted members.
'''
schema = {}
for m in members:
# Attribute is defined in the core document, search for it
if isinstance(m, str) and self._core_file is not None:
for d in self._core_file['definitions']:
try:
if d['attributeGroupName'] == m:
m = d['members'][0]
break
except:
pass
# Check and parse for entities
try:
a_name = m['name']
a_type = m['dataType']
# If type is a dictionary, parse it for the type
if not isinstance(a_type, str):
a_type = a_type['dataTypeReference']
schema[a_name] = a_type
except:
pass
# Check and parse for resolution guidance
try:
resolution_guidance = m['resolutionGuidance']
try:
# Resolution guidance is entity by reference
rg = resolution_guidance['entityByReference']
a_name = rg['foreignKeyAttribute']['sourceName']
a_type = rg['foreignKeyAttribute']['dataType']
schema[a_name] = a_type
except:
pass
try:
# Resolution guidance is selects sub attribute
rg = resolution_guidance['selectsSubAttribute']
a_name = rg['selectedTypeAttribute']['name']
a_type = rg['selectedTypeAttribute']['dataType']
schema[a_name] = a_type
except:
pass
# Resolution guidance is add supporting attribute
try:
rg = resolution_guidance['addSupportingAttribute']
a_name = rg['name']
a_type = rg['dataType']
schema[a_name] = a_type
except:
pass
except:
pass
return schema
def _readDocumentId(self):
'''
Reads the identification part of a cdm.json file. Searches for
'entityName', 'extendsEntity' and 'versionNumber' values.
Args:
-
Raises:
-
Returns:
dictionary: Values found for the 'entityName', 'extendsEntity' and
'versionNumber' attributes.
'''
en = self._cdm_file['definitions'][0].get('entityName', None)
ee = self._cdm_file['definitions'][0].get('extendsEntity', None)
# Look for the version number
version = None
for t in self._cdm_file['definitions'][0].get('exhibitsTraits', None):
if t['traitReference'] == 'is.CDM.entityVersion':
try:
version = t['arguments'][0]['value']
break
except:
pass
return {'entityName': en, 'extendsEntity': ee, 'versionNumber': version}
def _readCdmFile(self, path):
'''
Loads a cdm.json file using the json python library.
Args:
path: The cdm.json file to be loaded.
Raises:
-
Returns:
object: The loaded file.
'''
with open(path) as f:
return json.load(f)
|
Xvezda/vishop | setup.py | <filename>setup.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Xvezda <<EMAIL>>
#
# Use of this source code is governed by an MIT-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/MIT.
from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'vishop', '__about__.py')) as f:
exec(f.read())
with open(path.join(here, 'README.md')) as f:
long_description = f.read()
setup(
name='vishop',
version=__version__,
description='Vishop is command line VIM script publisher client.',
long_description=long_description,
long_description_content_type='text/markdown',
license='MIT',
url='https://github.com/Xvezda/vishop',
author=__author__,
author_email=__email__,
classifiers=[
'Environment :: Console',
'Topic :: Text Editors',
'Topic :: System :: Archiving :: Packaging',
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
entry_points='''
[console_scripts]
vishop=vishop.core:main
''',
keywords='VIM, VI, editor, plugin, package manager, utility, publishing',
packages=find_packages(),
install_requires=['requests', 'BeautifulSoup4'],
zip_safe=False
)
|
huzhijiang123/testprj | restful_server/images.py | <reponame>huzhijiang123/testprj
"""
/images endpoint for Glance v1 API
"""
import copy
from webob.exc import HTTPBadRequest
from webob.exc import HTTPConflict
from webob.exc import HTTPForbidden
from webob.exc import HTTPMethodNotAllowed
from webob.exc import HTTPNotFound
from webob.exc import HTTPRequestEntityTooLarge
from webob.exc import HTTPServiceUnavailable
from webob.exc import HTTPUnauthorized
from webob import Response
import wsgi
class Controller(object):
"""
WSGI controller for images resource in Glance v1 API
The images resource API is a RESTful web service for image data. The API
is as follows::
GET /images -- Returns a set of brief metadata about images
GET /images/detail -- Returns a set of detailed metadata about
images
HEAD /images/<ID> -- Return metadata about an image with id <ID>
GET /images/<ID> -- Return image data for image with id <ID>
POST /images -- Store image data and return metadata about the
newly-stored image
PUT /images/<ID> -- Update image metadata and/or upload image
data for a previously-reserved image
DELETE /images/<ID> -- Delete the image with id <ID>
"""
def __init__(self):
pass
def create(self, req, image_meta, image_data):
return {'image_meta': "aaa"}
def update(self, req, id, image_meta, image_data):
return {'image_meta': "bbb"}
def delete(self, req, id):
return Response(body='', status=200)
class ImageDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests."""
def create(self, request):
return request
def update(self, request):
return request
class ImageSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses."""
def __init__(self):
pass
def meta(self, response, result):
return response
def show(self, response, result):
return response
def update(self, response, result):
return response
def create(self, response, result):
return response
def create_resource():
"""Images resource factory method"""
deserializer = ImageDeserializer()
serializer = ImageSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)
|
huzhijiang123/testprj | restful_client/http.py | <reponame>huzhijiang123/testprj
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import logging
import socket
import requests
import six
try:
import json
except ImportError:
import simplejson as json
from oslo_utils import encodeutils
LOG = logging.getLogger(__name__)
USER_AGENT = 'python-daisyclient'
CHUNKSIZE = 1024 * 64 # 64kB
REQ_ID_HEADER = 'X-OpenStack-Request-ID'
def encode_headers(headers):
"""Encodes headers.
Note: This should be used right before
sending anything out.
:param headers: Headers to encode
:returns: Dictionary with encoded headers'
names and values
"""
return dict((encodeutils.safe_encode(h), encodeutils.safe_encode(v))
for h, v in headers.items() if v is not None)
class _BaseHTTPClient(object):
@staticmethod
def _chunk_body(body):
chunk = body
while chunk:
chunk = body.read(CHUNKSIZE)
if not chunk:
break
yield chunk
def _set_common_request_kwargs(self, headers, kwargs):
"""Handle the common parameters used to send the request."""
# Default Content-Type is octet-stream
content_type = headers.get('Content-Type', 'application/octet-stream')
# NOTE(jamielennox): remove this later. Managers should pass json= if
# they want to send json data.
data = kwargs.pop("data", None)
if data is not None and not isinstance(data, six.string_types):
try:
data = json.dumps(data)
content_type = 'application/json'
except TypeError:
# Here we assume it's
# a file-like object
# and we'll chunk it
data = self._chunk_body(data)
headers['Content-Type'] = content_type
kwargs['stream'] = content_type == 'application/octet-stream'
return data
def _handle_response(self, resp):
if not resp.ok:
print("Request returned failure status %s.", resp.status_code)
content_type = resp.headers.get('Content-Type')
# Read body into string if it isn't obviously image data
if content_type == 'application/octet-stream':
# Do not read all response in memory when downloading an image.
body_iter = _close_after_stream(resp, CHUNKSIZE)
else:
content = resp.text
if content_type and content_type.startswith('application/json'):
# Let's use requests json method, it should take care of
# response encoding
body_iter = resp.json()
else:
body_iter = six.StringIO(content)
try:
body_iter = json.loads(''.join([c for c in body_iter]))
except ValueError:
body_iter = None
return resp, body_iter
class HTTPClient(_BaseHTTPClient):
def __init__(self, endpoint):
self.endpoint = endpoint
self.session = requests.Session()
self.session.headers["User-Agent"] = USER_AGENT
print("Init: endpoint:%s" %(self.endpoint))
def _request(self, method, url, **kwargs):
"""Send an http request with the specified characteristics.
Wrapper around httplib.HTTP(S)Connection.request to handle tasks such
as setting headers and error handling.
"""
# Copy the kwargs so we can reuse the original in case of redirects
headers = copy.deepcopy(kwargs.pop('headers', {}))
data = self._set_common_request_kwargs(headers, kwargs)
# Note(flaper87): Before letting headers / url fly,
# they should be encoded otherwise httplib will
# complain.
headers = encode_headers(headers)
if self.endpoint.endswith("/") or url.startswith("/"):
conn_url = "%s%s" % (self.endpoint, url)
else:
conn_url = "%s/%s" % (self.endpoint, url)
try:
print("Call: method:%s, conn_url:%s, data:%s, headers:%s, kwargs:%r" % (method, conn_url, data, headers, kwargs))
resp = self.session.request(method,
conn_url,
data=data,
headers=headers,
**kwargs)
except requests.exceptions.Timeout as e:
message = ("Error communicating with %(url)s: %(e)s" %
dict(url=conn_url, e=e))
print(message)
except requests.exceptions.ConnectionError as e:
message = ("Error finding address for %(url)s: %(e)s" %
dict(url=conn_url, e=e))
print(message)
except socket.gaierror as e:
message = "Error finding address for %s: %s" % (
self.endpoint_hostname, e)
print(message)
except (socket.error, socket.timeout, IOError) as e:
endpoint = self.endpoint
message = ("Error communicating with %(endpoint)s %(e)s" %
{'endpoint': endpoint, 'e': e})
print(message)
resp, body_iter = self._handle_response(resp)
return resp, body_iter
def head(self, url, **kwargs):
return self._request('HEAD', url, **kwargs)
def get(self, url, **kwargs):
return self._request('GET', url, **kwargs)
def post(self, url, **kwargs):
return self._request('POST', url, **kwargs)
def put(self, url, **kwargs):
return self._request('PUT', url, **kwargs)
def patch(self, url, **kwargs):
return self._request('PATCH', url, **kwargs)
def delete(self, url, **kwargs):
return self._request('DELETE', url, **kwargs)
def _close_after_stream(response, chunk_size):
"""Iterate over the content and ensure the response is closed after."""
# Yield each chunk in the response body
for chunk in response.iter_content(chunk_size=chunk_size):
yield chunk
# Once we're done streaming the body, ensure everything is closed.
# This will return the connection to the HTTPConnectionPool in urllib3
# and ideally reduce the number of HTTPConnectionPool full warnings.
response.close()
|
huzhijiang123/testprj | restful_client/client.py | import os
import sys
import re
import http
class Client(object):
"""Client for the OpenStack Images v1 API.
:param string endpoint: A user-supplied endpoint URL for the glance
service. Such as http://10.20.11.2:35357
"""
def __init__(self, endpoint):
"""Initialize a new client for the daisy v1 API."""
self.http_client = http.HTTPClient(endpoint)
self.images = ImageManager(self.http_client)
class ImageManager(object):
def __init__(self, client):
"""
:param client: instance of BaseClient descendant for HTTP requests
"""
self.client = client
def delete(self, imageid):
"""Delete an image."""
url = "/images/%s" % imageid
resp, body = self.client.delete(url)
print(resp)
client = Client("http://127.0.0.1:35357")
client.images.delete("xxxxxxxx") # This is a Restful call to server URL : http://10.20.11.2:35357/images/xxxxxxxx DELETE
|
huzhijiang123/testprj | restful_server/server.py | """
RestFul API Server
"""
import os
import sys
import eventlet
import router
# Monkey patch socket, time, select, threads
# NOTE(jokke): As per the eventlet commit
# b756447bab51046dfc6f1e0e299cc997ab343701 there's circular import happening
# which can be solved making sure the hubs are properly and fully imported
# before calling monkey_patch(). This is solved in eventlet 0.22.0 but we
# need to address it before that is widely used around.
eventlet.hubs.get_hub()
eventlet.patcher.monkey_patch(all=False, socket=True, time=True,
select=True, thread=True, os=True)
import wsgi
def fail():
sys.exit(-1)
def main():
try:
wsgi.set_eventlet_hub()
server = wsgi.Server()
# This equals to:
# vi /etc/paste.ini
#[app:server]
#paste.app_factory = router:API.factory
#from paste import deploy
#server.start(deploy.loadapp("config:/etc/paste.ini", name="server"), default_port=35357)
server.start(router.API.factory(None), default_port=35357)
# Or server.start(router.API(wsgi.APIMapper()), default_port=35357)
server.wait()
except:
fail()
if __name__ == '__main__':
main()
|
huzhijiang123/testprj | restful_server/router.py | import images
import wsgi
class API(wsgi.Router):
"""WSGI router for Glance v1 API requests."""
def __init__(self, mapper):
images_resource = images.create_resource()
mapper.connect("/images",
controller=images_resource,
action='create',
conditions={'method': ['POST']})
mapper.connect("/images/{id}",
controller=images_resource,
action="show",
conditions=dict(method=["GET"]))
mapper.connect("/images/{id}",
controller=images_resource,
action="update",
conditions=dict(method=["PUT"]))
mapper.connect("/images/{id}",
controller=images_resource,
action="delete",
conditions=dict(method=["DELETE"]))
super(API, self).__init__(mapper)
|
nancymukuiya14/Monty-blog | tests/user_test.py | <reponame>nancymukuiya14/Monty-blog
import unittest
from app.models import User
class UserModelTest(unittest.TestCase):
def setUp(self):
'''
testcase to create an instance of User class.
'''
self.new_user = User(password = '<PASSWORD>')
def test_password_setter(self):
'''
testcase to check password is being hashed and the pass_secure
'''
self.assertTrue(self.new_user.password is not None)
def test_no_access_password(self):
'''
testcase to comfirm the application raises Attribute error
'''
with self.assertRaises(AttributeError):
self.new_user.password
def test_password_verification(self):
'''
testcase to confirm that password_hash can be verified when a user uses correct password
'''
self.assertTrue(self.new_user.password('<PASSWORD>')) |
Adeon18/Solver_Algorithms | main.py | <reponame>Adeon18/Solver_Algorithms
import os
from sys import platform
# Check for user system
linux = False
bad_os = False
if platform == "linux" or platform == "linux2":
linux = True
elif platform == "win32":
bad_os = True
scripts = {
1: "maze/main.py",
2: "sudoku/main.py",
3: "crossword/main.py",
4: "graph_colorizer/main.py",
}
print("Hello! This is a script launcher. Choose a number of the script you'd like to run.")
print("Before you choose, close down the program and edit the coresponding file in data folder if you want to solve your problem\n")
print("\
1. Maze solver\n\
2. Sudoku solver\n\
3. Crossword solver\n\
4. Graph Colorer\n")
while True:
try:
choice = int(input("Enter a number: "))
command = scripts[choice]
break
except KeyError:
print("Enter a correct number")
except ValueError:
print("Enter a NUMBER")
if bad_os:
os.system("python -m " + command)
elif linux:
os.system("python " + command)
|
Adeon18/Solver_Algorithms | sudoku/sudoku_ADT.py | <reponame>Adeon18/Solver_Algorithms<gh_stars>1-10
import pygame
class Sudoku:
def __init__(self, path, vizual=None):
self.grid = self.read_grid(path)
if vizual:
self.vizual = vizual
def read_grid(self, path):
"""
takes the path to conditions
and convert them to grid --
-- list of lines where each line is a list of numbers in it
saves all the numbers as integers
saves the grid as an attribute
return None
"""
grid = []
with open(path, 'r', encoding='utf-8') as raw_grid:
for line in raw_grid:
line_lst = line[:-1].split()
grid.append([int(x) for x in line_lst])
return grid
def safe_to_place_in_row(self, row, col, number):
"""
checks whether the number on the coodinates (col, row)
is the only one like that in the row
return bool
"""
if number in self.grid[row]:
return False
return True
def safe_to_place_in_box(self, row, col, number):
"""
checks whether the number on the coodinates (col, row)
is the only one like that in the box 3x3
return bool
"""
box_col = col % 3
box_row = row % 3
for row_index in range(row - box_row, row - box_row + 3):
for col_index in range(col - box_col, col - box_col + 3):
if self.cell_is_number(row_index, col_index, number):
return False
return True
def safe_to_place_in_col(self, row, col, number):
"""
checks whether the number on the coodinates (col, row)
is the only one like that in the column
return bool
"""
for line_index in range(len(self.grid)):
if self.cell_is_number(line_index, col, number):
return False
return True
def cell_is_number(self, row, col, number):
"""
checks whether the element on the coodinates (col, row)
is equal to number
return bool
"""
if self.grid[row][col] == number:
return True
return False
def safe_to_place(self, row, col, number):
"""
checks whether the number on the coodinates (col, row)
fits all the sudoku criteria
return bool
"""
if self.cell_is_number(row, col, 0) \
and self.safe_to_place_in_box(row, col, number) \
and self.safe_to_place_in_col(row, col, number) \
and self.safe_to_place_in_row(row, col, number):
return True
return False
def empty_cell(self):
"""
returns the coordinates of the first empty cell on the grid
if there is no such cell, return False
"""
for row_index in range(9):
for col_index in range(9):
if self.cell_is_number(row_index, col_index, 0):
return [row_index, col_index]
return False
def solve(self):
"""
solves the sudoku with backtraking using recurtion
return True is there's no empty cells left
return False if it is impossible to solve Sudoku
"""
cell = self.empty_cell()
if not cell:
return True
row = cell[0]
col = cell[1]
for number in range(1, 10):
if self.safe_to_place(row, col, number):
self.grid[row][col] = number
# the place where the new number is placed
# Visuals
if self.vizual:
self.vizual.draw()
self.vizual.events()
pygame.time.wait(self.vizual.TIMESTEP)
if self.solve():
return True
self.grid[row][col] = 0
# the place when the assumption did not work and algorithm goes back
return False
def __str__(self):
line_splitter = ' -------------------------\n'
col_splitter = ' | '
# line_splitter = ' *************************\n'
# col_splitter = ' * '
to_print = ''
for row, line in enumerate(self.grid):
if row % 3 == 0:
to_print += line_splitter
for col, number in enumerate(line):
if col % 3 == 0:
to_print += col_splitter
else:
to_print += ' '
if number == 0:
number = '.'
to_print += f'{number}'
to_print += f'{col_splitter}\n'
to_print += line_splitter
return to_print
if __name__ == "__main__":
sudoku = Sudoku("condition1.txt")
print(sudoku)
sudoku.solve()
print(sudoku.grid)
|
Adeon18/Solver_Algorithms | maze/maze.py | """
Implemention of the Maze ADT.
"""
import turtle
import pygame
from dataclasses import dataclass
from lliststack import Stack
class Maze:
"""
Class for representing Maze objects.
Attributes:
maze: The list of lists representing the maze.
_start: The starting position of the maze; marked as -1 on the maze.
_exit: The exit position of the maze; marked as 2 on the maze.
"""
FREE = 0
MAZE_WALL = 1
EXIT = 2
PATH_TOKEN = 3
TRIED_TOKEN = 4
def __init__(self, maze_file: str, visualization=None):
"""
Creates a maze object using _build_maze func.
"""
self._start = None
self._exit = None
self.maze = self._build_maze(maze_file)
self.visualization = visualization
def _build_maze(self, filename: str):
"""
Builds a maze based on a text format in the given file.
The initial cell and the final cell must be present.
Otherwise an error with the corresponding message will be raised.
"""
maze = []
with open(filename, 'r') as f:
lines = f.readlines()
for i in range(len(lines)):
line = list(map(int, lines[i].split()))
if -1 in line:
self._start = _CellPosition(i, line.index(-1))
if 2 in line:
self._exit = _CellPosition(i, line.index(2))
maze.append(line)
assert self._start is not None, "No starting position found."
assert self._exit is not None, "No final position found."
return maze
def find_path(self):
"""
Attempts to solve the maze by finding a path from the starting cell
to the exit. Returns True if a path is found and False otherwise.
Change the self.maze where:
0 - not tried cells;
1 - walls;
2 - exit;
3 - found path;
4 - tried cells.
"""
stack = Stack()
current_cell = _CellPosition(
self._start.row, self._start.col)
self._mark_path(current_cell.row, current_cell.col)
while not self._exit_found(current_cell.row, current_cell.col):
# step up
if self._valid_move(current_cell.row - 1, current_cell.col):
stack.push(current_cell)
self._mark_path(current_cell.row - 1, current_cell.col)
current_cell = _CellPosition(
current_cell.row - 1, current_cell.col)
# step right
elif self._valid_move(current_cell.row, current_cell.col + 1):
stack.push(current_cell)
self._mark_path(current_cell.row, current_cell.col + 1)
current_cell = _CellPosition(
current_cell.row, current_cell.col + 1)
# step down
elif self._valid_move(current_cell.row + 1, current_cell.col):
stack.push(current_cell)
self._mark_path(current_cell.row + 1, current_cell.col)
current_cell = _CellPosition(
current_cell.row + 1, current_cell.col)
# step left
elif self._valid_move(current_cell.row, current_cell.col - 1):
stack.push(current_cell)
self._mark_path(current_cell.row, current_cell.col - 1)
current_cell = _CellPosition(
current_cell.row, current_cell.col - 1)
# step back if no other options are valid.
else:
# HERE SHOULD BE THE GRAPHICS
if self.visualization:
self.visualization.all_sprites.update()
self.visualization.draw()
self.visualization.events()
pygame.time.wait(self.visualization.TIMESTEP)
self._mark_tried(current_cell.row, current_cell.col)
# if current cell is start cell and there are no other valid moves.
if (current_cell == self._start and not
(self._valid_move(current_cell.row - 1, current_cell.col) or
self._valid_move(current_cell.row, current_cell.col + 1) or
self._valid_move(current_cell.row + 1, current_cell.col) or
self._valid_move(current_cell.row, current_cell.col - 1))):
self._mark_tried(self._start.row,
self._start.col)
return False
current_cell = stack.pop()
# HERE SHOULD BE THE GRAPHICS
if self.visualization:
self.visualization.all_sprites.update()
self.visualization.draw()
self.visualization.events()
pygame.time.wait(self.visualization.TIMESTEP)
if self._exit_found(current_cell.row, current_cell.col):
self._mark_path(current_cell.row, current_cell.col)
return True
def __str__(self):
"""
Returns a text-based representation of the maze.
"""
grid = ''
for row in range(len(self.maze)):
for col in range(len(self.maze[0])):
grid += str(self.maze[row][col]) + ' '
if row != len(self.maze) - 1:
grid += '\n'
return grid
def _valid_move(self, row: int, col: int):
"""
Returns True if the given cell position is a valid move.
"""
return (row >= 0 and row < len(self.maze)
and col >= 0 and col < len(self.maze[0])
and (self.maze[row][col] == self.FREE or
self.maze[row][col] == self.EXIT))
def _exit_found(self, row: int, col: int):
"""
Helper method to determine if the exit was found.
"""
return row == self._exit.row and col == self._exit.col
def _mark_tried(self, row: int, col: int):
"""
Drops a "tried" token at the given cell.
"""
self.maze[row][col] = self.TRIED_TOKEN
def _mark_path(self, row: int, col: int):
"""
Drops a "path" token at the given cell.
"""
self.maze[row][col] = self.PATH_TOKEN
@dataclass
class _CellPosition():
"""
Private storage class for holding a cell position.
"""
row: int
col: int
|
Adeon18/Solver_Algorithms | graph_colorizer/main.py | '''
Realizing graph colorizing by backtracking.
'''
def read_file(path):
'''
Reads from file and converts info into graph.
'''
with open(path) as file:
lines = file.readlines()
lines = [x for x in lines]
new_lines = []
for line in lines:
new_line = line.strip().split(" ")
new_lines.append(new_line)
return new_lines
class Graph:
'''
Represents a graph.
'''
class Vertex:
'''
Represents a vertex of the graph.
'''
def __init__(self, number, color=None):
'''
Initializes class.
'''
self.number = number
self.color = color
def __hash__(self):
return hash(self.number)
def __str__(self):
'''
Returns information about vertex in string.
'''
return f"Vertex(number:{self.number}, color:{self.color})"
def __init__(self, new_lines, num_colors):
'''
Initializes class.
'''
self.new_lines = new_lines
self.num_colors = num_colors
graph = dict()
vertexes = {}
for i in range(len(new_lines)):
vertexes[i] = self.Vertex(i)
for listik in new_lines:
for element in listik:
if element == '1':
if not vertexes[self.new_lines.index(listik)
] in graph:
graph[vertexes[self.new_lines.index(listik)
]] = []
graph[vertexes[self.new_lines.index(listik)
]].append(vertexes[listik.index(element)])
listik[listik.index(element)] = '0'
else:
continue
self.graph = graph
def is_safe(self, num_vertex, col):
'''
Checks if it is ok to set the given colour to the given vertex.
'''
for key in self.graph.keys():
if key.number == num_vertex:
for val in self.graph[key]:
if val.color == col:
return False
return True
def graph_color_recursive(self, num_colors, num_vertex):
'''
Recursive function to colorize the graph by backtracking.
'''
if num_vertex == len(self.graph.keys()):
return True
for col in range(1, num_colors + 1):
if self.is_safe(num_vertex, col) == True:
for key in self.graph.keys():
if key.number == num_vertex:
key.color = col
if self.graph_color_recursive(num_colors, num_vertex + 1) == True:
return True
def graph_color_final(self):
'''
Shows result of the colorizing.
'''
if self.graph_color_recursive(self.num_colors, 0) == None:
return "It is not possible to colour given graph in this number of colours."
print("Solution exists and here are the assigned colours:")
for vertex in self.graph.keys():
print(vertex)
return "Congratulations!"
if __name__ == "__main__":
graph = Graph(read_file("data/graph_matrix.txt"), 4)
print(graph.graph_color_final())
|
Adeon18/Solver_Algorithms | maze/main.py | '''
Main script with visualization
'''
import pygame
import sys
from os import path, pardir
import time
from maze import *
from pygame_helpers import *
class Game:
"""
A general visualization class for pygame
"""
DARKPINK = (219, 0, 189)
DARKBLUE = (95, 0, 219)
LIGHTBLUE = (138, 138, 219)
LIGHTPINK = (192, 138, 219)
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
def __init__(self):
pygame.mixer.pre_init(44100, -16, 2, 2048)
pygame.mixer.init()
pygame.init()
# Make screen
self.screen = pygame.display.set_mode((800, 800))
#self.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)
self.scr_width, self.scr_height = pygame.display.get_surface().get_size()
pygame.display.set_caption("Maze Solver")
# Load data and start te clock
self.clock = pygame.time.Clock()
self.load_data()
def load_data(self):
"""
Load all the external data
"""
game_folder = path.dirname(path.join(__file__, pardir))
data_folder = path.join(game_folder, 'data')
def new(self):
"""
New game
"""
# initialize all variables and do all the setup for a new game
self.all_sprites = pygame.sprite.LayeredUpdates()
self.maze_obj = Maze('data/maze.txt', visualization=self)
self.TIMESTEP = 50
self.create_maze()
def run(self):
"""
Run the graphics
"""
# game loop - set self.playing = False to end the game
self.playing = True
while self.playing:
self.dt = self.clock.tick(60) / 1000.0
self.events()
self.draw()
self.update()
def create_maze(self):
"""
Just draw the maze begginning and then just update the tiles
"""
for i in range(0, len(self.maze_obj.maze)):
for j in range(0, len(self.maze_obj.maze[i])):
Tile(self, self.maze_obj, j, i)
def quit(self):
pygame.quit()
sys.exit()
def update(self):
"""
The whole visualization
"""
pygame.time.wait(2000)
self.maze_obj.find_path()
pygame.time.wait(5000)
self.quit()
def draw(self):
"""
Blit everything to the screen each frame
"""
self.screen.fill((125, 100, 158))
self.draw_text(f"Delay: {self.TIMESTEP} ms", 40, (51, 16, 97), self.scr_width//2, 50)
# self.all_sprites.draw(self.screen)
for sprite in self.all_sprites:
self.screen.blit(sprite.image, (sprite.rect.x, sprite.rect.y))
pygame.display.flip()
def events(self):
# catch all events here
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_F4:
self.quit()
if event.key == pygame.K_UP:
self.TIMESTEP -= 10
if event.key == pygame.K_DOWN:
self.TIMESTEP += 10
def draw_text(self, text, size, color, x, y, align='center', fontname="Consolas"):
"""
Helper for drawing text on the screen
"""
font = pygame.font.SysFont(path.join("Consolas", fontname), size)
text_surface = font.render(text, True, color)
text_rect = text_surface.get_rect()
if align == "nw":
text_rect.topleft = (x, y)
if align == "ne":
text_rect.topright = (x, y)
if align == "sw":
text_rect.bottomleft = (x, y)
if align == "se":
text_rect.bottomright = (x, y)
if align == "n":
text_rect.midtop = (x, y)
if align == "s":
text_rect.midbottom = (x, y)
if align == "e":
text_rect.midright = (x, y)
if align == "w":
text_rect.midleft = (x, y)
if align == "center":
text_rect.center = (x, y)
self.screen.blit(text_surface, text_rect)
if __name__ == "__main__":
# create the game object
g = Game()
while True:
g.new()
g.run() |
Adeon18/Solver_Algorithms | crossword/crossword.py | '''
module containing crossword solving algorithm using backtracking
'''
import pygame
import random
DIRECTIONS = [[-1, -1], [-1, 0], [-1, 1], [0, -1], [0, 1], [1, -1], [1, 0], [1, 1]]
class Crossword:
'''
class representing crossword
has methods to read it from file and solve it
'''
def __init__(self, file_path, vizual=None):
'''
initialise crossword with its field and target words
'''
self.field, self.target_words = self.read_from_file(file_path)
self.boocked_positions = []
self.completed_words = []
self.temp_pos = []
self.vizual = vizual
def read_from_file(self, file_path):
'''
read crossword from file
first block of file is its field with words
separated by empty line goes target words, each in new line
'''
# read the game field
field = []
file = open(file_path, 'r')
while True:
line = file.readline().split()
field.append(line)
if not line:
break
# read targt words
target_words = []
line = file.readline().split()
while True:
line = file.readline().strip()
target_words.append(line)
if not line:
break
file.close()
return (field, target_words)
def solve(self):
'''
solve the crossword
'''
def recurse(dirr, letter, pos, word):
new_pos = [pos[0] + dirr[0], pos[1] + dirr[1]]
try:
# if letter on field by its direction equals requiered letter
if self.field[new_pos[0]][new_pos[1]].lower() == word[letter]:
# make this letter upper, to show that it's in
self.field[new_pos[0]][new_pos[1]] = self.field[new_pos[0]][new_pos[1]].upper()
if recurse(dirr, letter + 1, new_pos, word):
# if we've got the whole word, start adding useful information
self.boocked_positions.append(new_pos)
self.temp_pos.append(new_pos)
# For visual repesentation
if self.vizual:
self.vizual.draw(self.vizual.LIGHTRED)
self.vizual.events()
pygame.time.wait(self.vizual.TIMESTEP)
return True
else:
if new_pos not in self.boocked_positions:
# if this word wasn't found on the needed position
# backtrack and lower all the letters
# if this leter is a part of other word, do not erase it
self.field[new_pos[0]][new_pos[1]] = self.field[new_pos[0]][new_pos[1]].lower()
# For visual repesentation
if self.vizual:
self.vizual.draw()
self.vizual.events()
pygame.time.wait(self.vizual.TIMESTEP)
return False
except IndexError:
if letter == len(word):
return True
return False
i = 0
j = 0
for row in self.field:
for letter in row:
for word in self.target_words:
if word and word[0] == letter:
# if the first letter of word is our current letter on field
# make it upper and go recurse through all directions to find the word
# For visual repesentation
if self.vizual:
self.vizual.draw()
self.vizual.events()
pygame.time.wait(self.vizual.TIMESTEP)
for dirr in DIRECTIONS:
self.field[i][j] = self.field[i][j].upper()
if recurse(dirr, 1, [i, j], word):
self.boocked_positions.append([i, j])
self.temp_pos.append([i, j])
self.completed_words.append(self.temp_pos)
self.temp_pos = []
else:
if [i, j] not in self.boocked_positions:
self.field[i][j] = self.field[i][j].lower()
j += 1
i += 1
j = 0
# double check if all found words are ok
for pos in self.boocked_positions:
self.field[pos[0]][pos[1]] = self.field[pos[0]][pos[1]].upper()
# For visual repesentation
if self.vizual:
self.vizual.draw(self.vizual.LIGHTRED)
self.vizual.events()
pygame.time.wait(self.vizual.TIMESTEP)
def __str__(self):
res = ""
for line in self.field:
for item in line:
res += str(item) + " "
res += "\n"
res += "\n"
for word in self.target_words:
res += str(word) + "\n"
return res
if __name__ == "__main__":
crossword = Crossword("test_1.txt")
crossword.solve()
print(crossword) |
Adeon18/Solver_Algorithms | maze/pygame_helpers.py | """
This module contains pygame helpers for visualization
"""
import pygame
from maze import *
class Tile(pygame.sprite.Sprite):
"""
A tile object that gets colored during the process
"""
TILESIZE = 0
def __init__(self, game, maze, grid_x, grid_y):
self.groups = game.all_sprites
pygame.sprite.Sprite.__init__(self, self.groups)
# Some constants
self.game = game
self.maze = maze
self.grid_x = grid_x
self.grid_y = grid_y
self.TILEWIDTH = 600 // len(self.maze.maze)
self.TILEHEIGHT = 600 // len(self.maze.maze[0])
# Get the "image"
self.image = pygame.Surface((self.TILEWIDTH, self.TILEWIDTH))
# Color the images
if self.maze.maze[self.grid_y][self.grid_x] == self.maze.MAZE_WALL:
self.image.fill(self.game.BLACK)
elif self.maze.maze[self.grid_y][self.grid_x] == self.maze.EXIT:
self.image.fill(self.game.DARKBLUE)
elif self.maze.maze[self.grid_y][self.grid_x] == -1:
self.image.fill(self.game.DARKPINK)
else:
self.image.fill(self.game.WHITE)
# Get the rect
self.rect = self.image.get_rect()
# Position it
self.rect.x = 100 + (self.grid_x) * self.TILEWIDTH
self.rect.y = 100 + (self.grid_y) * self.TILEWIDTH
def update(self):
"""
Update the tile view in realtime
"""
# Fill each tile the needed color
if self.maze.maze[self.grid_y][self.grid_x] == self.maze.MAZE_WALL:
self.image.fill(self.game.BLACK)
elif self.maze.maze[self.grid_y][self.grid_x] == self.maze.EXIT:
self.image.fill(self.game.DARKPINK)
elif self.maze.maze[self.grid_y][self.grid_x] == -1:
self.image.fill(self.game.DARKBLUE)
elif self.maze.maze[self.grid_y][self.grid_x] == self.maze.PATH_TOKEN:
self.image.fill(self.game.LIGHTPINK)
elif self.maze.maze[self.grid_y][self.grid_x] == self.maze.TRIED_TOKEN:
self.image.fill(self.game.LIGHTBLUE)
else:
self.image.fill(self.game.WHITE) |
Mu-L/ArchiveBox | archivebox/core/urls.py | from django.contrib import admin
from django.urls import path, include
from django.views import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf import settings
from django.views.generic.base import RedirectView
from core.views import HomepageView, SnapshotView, PublicIndexView, AddView, HealthCheckView
# print('DEBUG', settings.DEBUG)
urlpatterns = [
path('public/', PublicIndexView.as_view(), name='public-index'),
path('robots.txt', static.serve, {'document_root': settings.STATICFILES_DIRS[0], 'path': 'robots.txt'}),
path('favicon.ico', static.serve, {'document_root': settings.STATICFILES_DIRS[0], 'path': 'favicon.ico'}),
path('docs/', RedirectView.as_view(url='https://github.com/ArchiveBox/ArchiveBox/wiki'), name='Docs'),
path('archive/', RedirectView.as_view(url='/')),
path('archive/<path:path>', SnapshotView.as_view(), name='Snapshot'),
path('admin/core/snapshot/add/', RedirectView.as_view(url='/add/')),
path('add/', AddView.as_view(), name='add'),
path('accounts/login/', RedirectView.as_view(url='/admin/login/')),
path('accounts/logout/', RedirectView.as_view(url='/admin/logout/')),
path('accounts/', include('django.contrib.auth.urls')),
path('admin/', admin.site.urls),
path('health/', HealthCheckView.as_view(), name='healthcheck'),
path('index.html', RedirectView.as_view(url='/')),
path('index.json', static.serve, {'document_root': settings.OUTPUT_DIR, 'path': 'index.json'}),
path('', HomepageView.as_view(), name='Home'),
]
urlpatterns += staticfiles_urlpatterns()
if settings.DEBUG_TOOLBAR:
import debug_toolbar
urlpatterns += [
path('__debug__/', include(debug_toolbar.urls)),
]
# # Proposed FUTURE URLs spec
# path('', HomepageView)
# path('/add', AddView)
# path('/public', PublicIndexView)
# path('/snapshot/:slug', SnapshotView)
# path('/admin', admin.site.urls)
# path('/accounts', django.contrib.auth.urls)
# # Prposed REST API spec
# # :slugs can be uuid, short_uuid, or any of the unique index_fields
# path('api/v1/'),
# path('api/v1/core/' [GET])
# path('api/v1/core/snapshot/', [GET, POST, PUT]),
# path('api/v1/core/snapshot/:slug', [GET, PATCH, DELETE]),
# path('api/v1/core/archiveresult', [GET, POST, PUT]),
# path('api/v1/core/archiveresult/:slug', [GET, PATCH, DELETE]),
# path('api/v1/core/tag/', [GET, POST, PUT]),
# path('api/v1/core/tag/:slug', [GET, PATCH, DELETE]),
# path('api/v1/cli/', [GET])
# path('api/v1/cli/{add,list,config,...}', [POST]), # pass query as kwargs directly to `run_subcommand` and return stdout, stderr, exitcode
# path('api/v1/extractors/', [GET])
# path('api/v1/extractors/:extractor/', [GET]),
# path('api/v1/extractors/:extractor/:func', [GET, POST]), # pass query as args directly to chosen function
# future, just an idea:
# path('api/v1/scheduler/', [GET])
# path('api/v1/scheduler/task/', [GET, POST, PUT]),
# path('api/v1/scheduler/task/:slug', [GET, PATCH, DELETE]),
|
nanda-nainadurai/powerbi-generate-pdf-api | GeneratePDF.py | <gh_stars>0
import msal
import requests
import json
import time
import sys
f = open('data.json',"r")
config = json.load(f)
f.close
# Tenant/Report specific configurations
TENANT_ID = config['tenant_id']
WORKSPACE_ID = config['workspace_id']
REPORT_ID = config['report_id']
# Service Principal Credentials
CLIENT_ID = config['client_id']
CLIENT_SECRET = config['client_secret']
# Scopes defined for authentication
SCOPE = config['scope']
AUTHORITY = config['authority'].replace('organizations',TENANT_ID)
URL_EXPORT_TO_FILE = config['url_export_to_file'].replace('WORKSPACE_ID',WORKSPACE_ID).replace('REPORT_ID',REPORT_ID)
URL_EXPORT_FILE_PATH = config['url_export_file_path'].replace('WORKSPACE_ID',WORKSPACE_ID).replace('REPORT_ID',REPORT_ID)
def main():
clientapp = msal.ConfidentialClientApplication(CLIENT_ID, CLIENT_SECRET, authority=AUTHORITY)
response = clientapp.acquire_token_for_client(scopes=SCOPE)
token = response['access_token']
header = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + token}
### POST call to invoke Export To PDF option
data = {}
data['format'] = 'pdf'
json_data = json.dumps(data)
api_response = requests.post(URL_EXPORT_TO_FILE, headers=header, data=json_data)
try:
export_id = api_response.json()['id']
except:
print(api_response.content)
return
download_file_path = get_download_file_path(export_id, header)
file_location = download_file(download_file_path, header)
# print(file_location)
def get_download_file_path(export_id, header):
file_status = 'Running'
file_location = ''
counter = 0
while(file_status.lower() != 'succeeded' and counter++ < 100):
### GET call to get status of exportId
url = URL_EXPORT_FILE_PATH + export_id
try:
api_response = requests.get(url, headers=header)
resp = api_response.json()
percent = resp['percentComplete']
print(f'Percentage complete: {percent}')
file_status = resp['status']
time.sleep(3)
except:
print(api_response.content)
return
file_location = api_response.json()['resourceLocation']
# print(f'The file can be downloaded from the URL: {file_location}')
return file_location
def download_file(url, header):
local_filename = url.split('/')[-1] + ".pdf"
# NOTE the stream=True parameter below
with requests.get(url, headers=header, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
# If you have chunk encoded response uncomment if
# and set chunk_size parameter to None.
#if chunk:
f.write(chunk)
return local_filename
if __name__ == "__main__":
main()
|
Karryanna/res-counterfitting | counterfitting.py | <reponame>Karryanna/res-counterfitting<filename>counterfitting.py
import ConfigParser
import numpy
import sys
import time
import random
import math
import os
from copy import deepcopy
import json
from numpy.linalg import norm
from numpy import dot
from scipy.stats import spearmanr
class ExperimentRun:
"""
This class stores all of the data and hyperparameters required for a counterfitting run.
"""
def __init__(self, config_filepath):
"""
To initialise the class, we need to supply the config file, which contains the location of
the pretrained word vectors, of the vocabulary to use, the location of (potentially many)
collections of linguistic constraints (one pair per line), the location of the dialogue
domain ontology to inject (optional, needs to respect DSTC format), as well as the six
hyperparameters of the counterfitting procedure (as detailed in the NAACL paper).
"""
self.config = ConfigParser.RawConfigParser()
try:
self.config.read(config_filepath)
except:
print "Couldn't read config file from", config_filepath
return None
pretrained_vectors_filepath = self.config.get("data", "pretrained_vectors_filepath")
vocabulary_filepath = self.config.get("data", "vocabulary_filepath")
vocabulary = []
with open(vocabulary_filepath, "r+") as f_in:
for line in f_in:
vocabulary.append(line.strip())
vocabulary = set(vocabulary)
# load pretrained word vectors and initialise their (restricted) vocabulary.
self.pretrained_word_vectors = load_word_vectors(pretrained_vectors_filepath, vocabulary)
# if no vectors were loaded, exit gracefully:
if not self.pretrained_word_vectors:
return
self.vocabulary = set(self.pretrained_word_vectors.keys())
# load list of filenames for synonyms and antonyms.
synonym_list = self.config.get("data", "synonyms").replace("[","").replace("]", "").replace(" ", "").split(",")
antonym_list = self.config.get("data", "antonyms").replace("[","").replace("]", "").replace(" ", "").split(",")
self.synonyms = set()
self.antonyms = set()
# We check if a dialogue ontology has been supplied (this supplies extra antonyms):
try:
ontology_filepath = self.config.get("data", "ontology_filepath").replace(" ", "")
dialogue_ontology = json.load(open(ontology_filepath, "rb"))
print "\nExtracting antonyms from the dialogue ontology specified in", ontology_filepath
ontology_antonyms = extract_antonyms_from_dialogue_ontology(dialogue_ontology, self.vocabulary)
print "Extracted", len(ontology_antonyms), "antonyms from", ontology_filepath, "\n"
self.antonyms |= ontology_antonyms
except:
print "No dialogue ontology supplied: using just the supplied synonyms and antonyms.\n"
# and we then have all the information to collect all the linguistic constraints:
for syn_filepath in synonym_list:
self.synonyms = self.synonyms | load_constraints(syn_filepath, self.vocabulary)
for ant_filepath in antonym_list:
self.antonyms = self.antonyms | load_constraints(ant_filepath, self.vocabulary)
# finally, load the experiment hyperparameters:
self.load_experiment_hyperparameters()
def load_experiment_hyperparameters(self):
"""
This method loads/sets the hyperparameters of the procedure as specified in the paper.
"""
self.hyper_k1 = self.config.getfloat("hyperparameters", "hyper_k1")
self.hyper_k2 = self.config.getfloat("hyperparameters", "hyper_k2")
self.hyper_k3 = self.config.getfloat("hyperparameters", "hyper_k3")
self.delta = self.config.getfloat("hyperparameters", "delta")
self.gamma = self.config.getfloat("hyperparameters", "gamma")
self.rho = self.config.getfloat("hyperparameters", "rho")
print "\nExperiment hyperparameters (k_1, k_2, k_3, delta, gamma, rho):", \
self.hyper_k1, self.hyper_k2, self.hyper_k3, self.delta, self.gamma, self.rho
def load_word_vectors(file_destination, vocabulary):
"""
This method loads the word vectors from the supplied file destination.
It loads the dictionary of word vectors and prints its size and the vector dimensionality.
"""
print "Loading pretrained word vectors from", file_destination
word_dictionary = {}
try:
with open(file_destination, "r") as f:
for line in f:
line = line.split(" ", 1)
key = line[0].lower()
if key in vocabulary:
word_dictionary[key] = numpy.fromstring(line[1], dtype="float32", sep=" ")
except:
print "Word vectors could not be loaded from:", file_destination
if file_destination == "word_vectors/glove.txt" or file_destination == "word_vectors/paragram.txt":
print "Please unzip the provided glove/paragram vectors in the word_vectors directory.\n"
return {}
print len(word_dictionary), "vectors loaded from", file_destination
return normalise_word_vectors(word_dictionary)
def print_word_vectors(word_vectors, write_path):
"""
This function prints the collection of word vectors to file, in a plain textual format.
"""
print "Saving the counter-fitted word vectors to", write_path, "\n"
with open(write_path, "wb") as f_write:
for key in word_vectors:
print >>f_write, key, " ".join(map(str, numpy.round(word_vectors[key], decimals=6)))
def normalise_word_vectors(word_vectors, norm=1.0):
"""
This method normalises the collection of word vectors provided in the word_vectors dictionary.
"""
for word in word_vectors:
word_vectors[word] /= math.sqrt((word_vectors[word]**2).sum() + 1e-6)
word_vectors[word] = word_vectors[word] * norm
return word_vectors
def load_constraints(constraints_filepath, vocabulary):
"""
This methods reads a collection of constraints from the specified file, and returns a set with
all constraints for which both of their constituent words are in the specified vocabulary.
"""
constraints_filepath.strip()
constraints = set()
with open(constraints_filepath, "r+") as f:
for line in f:
word_pair = line.split()
if word_pair[0] in vocabulary and word_pair[1] in vocabulary and word_pair[0] != word_pair[1]:
constraints |= {(word_pair[0], word_pair[1])}
constraints |= {(word_pair[1], word_pair[0])}
print constraints_filepath, "yielded", len(constraints), "constraints."
return constraints
def extract_antonyms_from_dialogue_ontology(dialogue_ontology, vocabulary):
"""
Returns a list of antonyms for the supplied dialogue ontology, which needs to be provided as a dictionary.
The dialogue ontology must follow the DST Challenges format: we only care about goal slots, i.e. informables.
"""
# We are only interested in the goal slots of the ontology:
dialogue_ontology = dialogue_ontology["informable"]
slot_names = set(dialogue_ontology.keys())
# Forcing antonymous relations between different entity names does not make much sense.
if "name" in slot_names:
slot_names.remove("name")
# Binary slots - we do not know how to handle - there is no point enforcing antonymy relations there.
binary_slots = set()
for slot_name in slot_names:
current_values = dialogue_ontology[slot_name]
if len(current_values) == 2 and "true" in current_values and "false" in current_values:
binary_slots |= {slot_name}
if binary_slots:
print "Removing binary slots:", binary_slots
else:
print "There are no binary slots to ignore."
slot_names = slot_names - binary_slots
antonym_list = set()
# add antonymy relations between each pair of slot values for each non-binary slot.
for slot_name in slot_names:
current_values = dialogue_ontology[slot_name]
for index_1, value in enumerate(current_values):
for index_2 in range(index_1 + 1, len(current_values)):
# note that this will ignore all multi-value words.
if value in vocabulary and current_values[index_2] in vocabulary:
antonym_list |= {(value, current_values[index_2])}
antonym_list |= {(current_values[index_2], value)}
return antonym_list
def distance(v1, v2, normalised_vectors=True):
"""
Returns the cosine distance between two vectors.
If the vectors are normalised, there is no need for the denominator, which is always one.
"""
if normalised_vectors:
return 1 - dot(v1, v2)
else:
return 1 - dot(v1, v2) / ( norm(v1) * norm(v2) )
def compute_vsp_pairs(word_vectors, vocabulary, rho=0.2):
"""
This method returns a dictionary with all word pairs which are closer together than rho.
Each pair maps to the original distance in the vector space.
In order to manage memory, this method computes dot-products of different subsets of word
vectors and then reconstructs the indices of the word vectors that are deemed to be similar.
"""
print "Pre-computing word pairs relevant for Vector Space Preservation (VSP). Rho =", rho
vsp_pairs = {}
threshold = 1 - rho
vocabulary = list(vocabulary)
num_words = len(vocabulary)
step_size = 1000 # Number of word vectors to consider at each iteration.
vector_size = random.choice(word_vectors.values()).shape[0]
# ranges of word vector indices to consider:
list_of_ranges = []
left_range_limit = 0
while left_range_limit < num_words:
curr_range = (left_range_limit, min(num_words, left_range_limit + step_size))
list_of_ranges.append(curr_range)
left_range_limit += step_size
range_count = len(list_of_ranges)
# now compute similarities between words in each word range:
for left_range in range(range_count):
for right_range in range(left_range, range_count):
# offsets of the current word ranges:
left_translation = list_of_ranges[left_range][0]
right_translation = list_of_ranges[right_range][0]
# copy the word vectors of the current word ranges:
vectors_left = numpy.zeros((step_size, vector_size), dtype="float32")
vectors_right = numpy.zeros((step_size, vector_size), dtype="float32")
# two iterations as the two ranges need not be same length (implicit zero-padding):
full_left_range = range(list_of_ranges[left_range][0], list_of_ranges[left_range][1])
full_right_range = range(list_of_ranges[right_range][0], list_of_ranges[right_range][1])
for iter_idx in full_left_range:
vectors_left[iter_idx - left_translation, :] = word_vectors[vocabulary[iter_idx]]
for iter_idx in full_right_range:
vectors_right[iter_idx - right_translation, :] = word_vectors[vocabulary[iter_idx]]
# now compute the correlations between the two sets of word vectors:
dot_product = vectors_left.dot(vectors_right.T)
# find the indices of those word pairs whose dot product is above the threshold:
indices = numpy.where(dot_product >= threshold)
num_pairs = indices[0].shape[0]
left_indices = indices[0]
right_indices = indices[1]
for iter_idx in range(0, num_pairs):
left_word = vocabulary[left_translation + left_indices[iter_idx]]
right_word = vocabulary[right_translation + right_indices[iter_idx]]
if left_word != right_word:
# reconstruct the cosine distance and add word pair (both permutations):
score = 1 - dot_product[left_indices[iter_idx], right_indices[iter_idx]]
vsp_pairs[(left_word, right_word)] = score
vsp_pairs[(right_word, left_word)] = score
# print "There are", len(vsp_pairs), "VSP relations to enforce for rho =", rho, "\n"
return vsp_pairs
def vector_partial_gradient(u, v, normalised_vectors=True):
"""
This function returns the gradient of cosine distance: \frac{ \partial dist(u,v)}{ \partial u}
If they are both of norm 1 (we do full batch and we renormalise at every step), we can save some time.
"""
if normalised_vectors:
gradient = u * dot(u,v) - v
else:
norm_u = norm(u)
norm_v = norm(v)
nominator = u * dot(u,v) - v * numpy.power(norm_u, 2)
denominator = norm_v * numpy.power(norm_u, 3)
gradient = nominator / denominator
return gradient
def one_step_SGD(word_vectors, synonym_pairs, antonym_pairs, vsp_pairs, current_experiment):
"""
This method performs a step of SGD to optimise the counterfitting cost function.
"""
new_word_vectors = deepcopy(word_vectors)
gradient_updates = {}
update_count = {}
oa_updates = {}
vsp_updates = {}
# AR term:
for (word_i, word_j) in antonym_pairs:
current_distance = distance(new_word_vectors[word_i], new_word_vectors[word_j])
if current_distance < current_experiment.delta:
gradient = vector_partial_gradient( new_word_vectors[word_i], new_word_vectors[word_j])
gradient = gradient * current_experiment.hyper_k1
if word_i in gradient_updates:
gradient_updates[word_i] += gradient
update_count[word_i] += 1
else:
gradient_updates[word_i] = gradient
update_count[word_i] = 1
# SA term:
for (word_i, word_j) in synonym_pairs:
current_distance = distance(new_word_vectors[word_i], new_word_vectors[word_j])
if current_distance > current_experiment.gamma:
gradient = vector_partial_gradient(new_word_vectors[word_j], new_word_vectors[word_i])
gradient = gradient * current_experiment.hyper_k2
if word_j in gradient_updates:
gradient_updates[word_j] -= gradient
update_count[word_j] += 1
else:
gradient_updates[word_j] = -gradient
update_count[word_j] = 1
# VSP term:
for (word_i, word_j) in vsp_pairs:
original_distance = vsp_pairs[(word_i, word_j)]
new_distance = distance(new_word_vectors[word_i], new_word_vectors[word_j])
if original_distance <= new_distance:
gradient = vector_partial_gradient(new_word_vectors[word_i], new_word_vectors[word_j])
gradient = gradient * current_experiment.hyper_k3
if word_i in gradient_updates:
gradient_updates[word_i] -= gradient
update_count[word_i] += 1
else:
gradient_updates[word_i] = -gradient
update_count[word_i] = 1
for word in gradient_updates:
# we've found that scaling the update term for each word helps with convergence speed.
update_term = gradient_updates[word] / (update_count[word])
new_word_vectors[word] += update_term
return normalise_word_vectors(new_word_vectors)
def counter_fit(current_experiment):
"""
This method repeatedly applies SGD steps to counter-fit word vectors to linguistic constraints.
"""
word_vectors = current_experiment.pretrained_word_vectors
vocabulary = current_experiment.vocabulary
antonyms = current_experiment.antonyms
synonyms = current_experiment.synonyms
current_iteration = 0
vsp_pairs = {}
if current_experiment.hyper_k3 > 0.0: # if we need to compute the VSP terms.
vsp_pairs = compute_vsp_pairs(word_vectors, vocabulary, rho=current_experiment.rho)
# Post-processing: remove synonym pairs which are deemed to be both synonyms and antonyms:
for antonym_pair in antonyms:
if antonym_pair in synonyms:
synonyms.remove(antonym_pair)
if antonym_pair in vsp_pairs:
del vsp_pairs[antonym_pair]
max_iter = 20
print "\nAntonym pairs:", len(antonyms), "Synonym pairs:", len(synonyms), "VSP pairs:", len(vsp_pairs)
print "Running the optimisation procedure for", max_iter, "SGD steps..."
while current_iteration < max_iter:
current_iteration += 1
word_vectors = one_step_SGD(word_vectors, synonyms, antonyms, vsp_pairs, current_experiment)
return word_vectors
def simlex_analysis(word_vectors):
"""
This method computes the Spearman's rho correlation (with p-value) of the supplied word vectors.
The method also prints the gold standard SimLex-999 ranking to results/simlex_ranking.txt,
and the ranking produced using the counter-fitted vectors to results/counter_ranking.txt
"""
fread_simlex = open("linguistic_constraints/SimLex-999.txt", "rb")
pair_list = []
line_number = 0
for line in fread_simlex:
if line_number > 0:
tokens = line.split()
word_i = tokens[0]
word_j = tokens[1]
score = float(tokens[3])
if word_i in word_vectors and word_j in word_vectors:
pair_list.append( ((word_i, word_j), score) )
line_number += 1
pair_list.sort(key=lambda x: - x[1])
f_out_simlex = open("results/simlex_ranking.txt", "wb")
f_out_counterfitting = open("results/counter_ranking.txt", "wb")
extracted_list = []
extracted_scores = {}
for (x,y) in pair_list:
(word_i, word_j) = x
current_distance = distance(word_vectors[word_i], word_vectors[word_j])
extracted_scores[(word_i, word_j)] = current_distance
extracted_list.append(((word_i, word_j), current_distance))
extracted_list.sort(key=lambda x: x[1])
# print both the gold standard ranking and the produced ranking to files in the results folder:
def parse_pair(pair_of_words):
return str(pair_of_words[0] + ", " + str(pair_of_words[1]))
for idx, element in enumerate(pair_list):
clean_elem = str(parse_pair(element[0])) + " : " + str(round(element[1], 2))
print >>f_out_simlex, idx, ":", clean_elem
for idx, element in enumerate(extracted_list):
clean_elem = str(parse_pair(element[0])) + " : " + str(round(element[1], 2))
print >>f_out_counterfitting, idx, ":", clean_elem
spearman_original_list = []
spearman_target_list = []
for position_1, (word_pair, score_1) in enumerate(pair_list):
score_2 = extracted_scores[word_pair]
position_2 = extracted_list.index((word_pair, score_2))
spearman_original_list.append(position_1)
spearman_target_list.append(position_2)
spearman_rho = spearmanr(spearman_original_list, spearman_target_list)
return round(spearman_rho[0], 3)
def run_experiment(config_filepath):
"""
This method runs the counterfitting experiment, printing the SimLex-999 score of the initial
vectors, then counter-fitting them using the supplied linguistic constraints.
We then print the SimLex-999 score of the final vectors, and save them to a .txt file in the
results directory.
"""
current_experiment = ExperimentRun(config_filepath)
if not current_experiment.pretrained_word_vectors:
return
#print "SimLex score (Spearman's rho coefficient) of initial vectors is:", \
# simlex_analysis(current_experiment.pretrained_word_vectors), "\n"
transformed_word_vectors = counter_fit(current_experiment)
#print "\nSimLex score (Spearman's rho coefficient) the counter-fitted vectors is:", \
# simlex_analysis(transformed_word_vectors), "\n"
print_word_vectors(transformed_word_vectors, "results/counter_fitted_vectors.txt")
def main():
"""
The user can provide the location of the config file as an argument.
If no location is specified, the default config file (experiment_parameters.cfg) is used.
"""
try:
config_filepath = sys.argv[1]
except:
print "\nUsing the default config file: experiment_parameters.cfg"
config_filepath = "experiment_parameters.cfg"
run_experiment(config_filepath)
if __name__=='__main__':
main()
|
tago-io/analysis-example-downlinkDashboard-py | analysis.py | # Analysis Example
# Sending downlink using dashboard
# Using an Input Widget in the dashboard, you will be able to trigger a downlink to
# any LoraWaN network server.
# You can get the dashboard template to use here: http://admin.tago.io/template/5f514218d4555600278023c4
#
# Environment Variables
# In order to use this analysis, you must setup the Environment Variable table.
#
# account_token: Your account token. Check bellow how to get this.
# default_PORT: The default port to be used if not sent by the dashboard.
# device_id: The default device id to be used if not sent by the dashboard (OPTIONAL).
# payload: The default payload to be used if not sent by the dashboard (OPTIONAL).
#
# Steps to generate an account_token:
# 1 - Enter the following link: https://admin.tago.io/account/
# 2 - Select your Profile.
# 3 - Enter Tokens tab.
# 4 - Generate a new Token with Expires Never.
# 5 - Press the Copy Button and place at the Environment Variables tab of this analysis.
from tago import Analysis
from tago import Account
from os import error
import requests
# The function myAnalysis will run when you execute your analysis
def myAnalysis(context,scope):
account_token = list(filter(lambda account_token: account_token['key'] == 'account_token', context.environment))
account_token = account_token[0]['value']
if not account_token:
return context.log("Missing account_token Environment Variable.")
my_account = Account(account_token)
# Get the variables form_payload, form_port and device_id sent by the widget/dashboard.
payload = list(filter(lambda payload:payload['variable'] == 'form_payload', scope))
device_id = payload[0]['origin']
payload = payload[0]['value']
port = list(filter(lambda payload:payload['variable'] == 'form_port', scope))
port = port[0]['value']
if not payload:
return context.log("Payload not found")
if not port:
return context.log("Port not found")
network_id = my_account.devices.info(device_id)
network_id = network_id['result']['network']
context.log(network_id)
# get network info(middleware_endpoint) using api
middleware_endpoint = my_account.integration_network.info(network_id,['id', 'middleware_endpoint', "name"])
middleware_endpoint = middleware_endpoint['result']['middleware_endpoint']
if not middleware_endpoint:
return context.log("Couldn't find a network middleware for this device.")
# Set the parameters for the device. Some NS like Everynet need this.
params = my_account.devices.paramList(device_id,"false")
downlink_param = list(filter(lambda downlink_param: downlink_param['key'] == 'downlink', params['result']))
downlink_param = downlink_param[0]
my_account.devices.paramSet(device_id,downlink_param)
# Find the token containing the authorization code used.
device_tokens = my_account.devices.tokenList(device_id,1,10,{},['name', 'serie_number', 'last_authorization'])
token_result = device_tokens['result']
token_serie_number = token_result[0]['serie_number']
token_last_authorization = token_result[0]['last_authorization']
if not token_serie_number:
return context.log("Couldn't find a token with serial for this device")
if not token_last_authorization:
return context.log("Couldn't find a token with last authorization for this device")
context.log('Trying to send the downlink')
data = {'device':token_serie_number,'authorization': token_last_authorization,'payload': payload,'port': port}
context.log(data)
try:result = requests.post("https://"+ middleware_endpoint +"/downlink" , data)
except error:context.log(error)
context.log(result)
# The analysis token in only necessary to run the analysis outside TagoIO
Analysis('my analysis token here').init(myAnalysis)
|
EndlessTrax/spondy-news | tests/conftest.py | from datetime import timedelta
import pytest
from django.utils import timezone
from aggregator.models import Entry
@pytest.fixture
def example_entry(db):
"""Test article fixture"""
entry = Entry.objects.create(
title="Awesome Article Title",
description="A great article about all the things.",
pub_date=timezone.now(),
link="http://myawesomeblog.com",
category="ARTICLE",
)
return entry
@pytest.fixture
def example_old_entry(db):
"""Fixture for an old article"""
entry = Entry.objects.create(
title="Crap Article Title",
description="A terrible article about irrelevant things.",
pub_date=timezone.now() - timedelta(days=21),
link="http://mydodgyblog.com/",
category="ARTICLE",
)
return entry
|
EndlessTrax/spondy-news | aggregator/management/commands/startjobs.py | # Standard Lib
import re
import logging
from datetime import timedelta
# Third Party
import feedparser
from dateutil import parser
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.triggers.cron import CronTrigger
from django_apscheduler.jobstores import DjangoJobStore
from django_apscheduler.models import DjangoJobExecution
# Django
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import timezone
# Apps
from aggregator.models import Entry
logger = logging.getLogger(__name__)
GOOGLE_ALERT_FEEDS = {
"axspa": "https://www.google.com/alerts/feeds/12301481115898191089/13630488004246171818",
"spondylitis": "https://www.google.com/alerts/feeds/12301481115898191089/5579286489481723714",
"spondyloarthritis": "https://www.google.com/alerts/feeds/12301481115898191089/5989929665899114034",
"spondyloarthropathy": "https://www.google.com/alerts/feeds/12301481115898191089/3186404085116501193",
}
PUBMED_FEEDS = {
"axial spondyloarthritis": "https://pubmed.ncbi.nlm.nih.gov/rss/search/14CrWYUMC68Kd_QhNo0LutvubuiZrdL47utc2tIJJ8pCWGNMyR/?limit=20&utm_campaign=pubmed-2&fc=20210117223700",
"ankylosing spondylitis": "https://pubmed.ncbi.nlm.nih.gov/rss/search/1pabLar0q26GwV21NSLZ__LYXTO1Ur5WgUsuRUtJ8aJnHsugMd/?limit=20&utm_campaign=pubmed-2&fc=20210117223855",
}
AS_NEWS_DOTCOM_FEED = "https://ankylosingspondylitisnews.com/feed/"
def delete_old_job_executions(max_age=604_800):
"""This job deletes all apscheduler job executions older than `max_age` from the database."""
DjangoJobExecution.objects.delete_old_job_executions(max_age)
def remove_html_elements(string: str) -> str:
"""Removes any html elements and attributes from any string passed"""
regex = re.compile("<.*?>")
clean_text = re.sub(regex, "", string)
return clean_text
def parse_google_alert_feed(url: str) -> None:
"""Parsing function for all Google Alert RSS feeds.
All entries gathered from these feeds are aytomatically give the ARTICLE
category.
"""
feed = feedparser.parse(url)
try:
for item in feed.entries:
if not Entry.objects.filter(link=item.link).exists():
entry = Entry(
title=remove_html_elements(item.title),
description=remove_html_elements(item.content[0]["value"]),
pub_date=parser.parse(item.updated),
link=item.link,
category="ARTICLE",
)
entry.save()
except:
logger.warn("No items in the Feed")
def parse_pubmed_feed(url: str) -> None:
"""Parsing function for all PubMed RSS feeds.
All entires gathered from these feeds are aytomatically give the RESEARCH
category.
"""
feed = feedparser.parse(url)
try:
for item in feed.entries:
# Pubmed uses query strings in its RSS feeds which leads to
# multiple duplicates when items appear on more than one feed.
# Therefore, pubmed feeds we use the title as the unique identifier
if not Entry.objects.filter(title=item.title).exists():
entry = Entry(
title=remove_html_elements(item.title),
description=remove_html_elements(item.description),
pub_date=parser.parse(item.published),
link=item.link,
category="RESEARCH",
)
entry.save()
except:
logger.warn("No items in the Feed")
def delete_rejected_entries() -> None:
"""Deletes old entries that were not marked is_published and used"""
start_range = timezone.now() - timedelta(days=365)
end_range = timezone.now() - timedelta(days=14)
to_be_deleted = Entry.objects.filter(
is_published=False, pub_date__range=[start_range, end_range]
)
for entry in to_be_deleted:
try:
entry.delete()
logger.info(f"Deleted entry: {entry.title}")
except:
logger.info(f"Unable to delete entry: {entry.title}")
def axspa_feed() -> None:
"""Function to be passed to a Django-APScheduler job"""
logger.info("Parsing axspa feed...")
parse_google_alert_feed(GOOGLE_ALERT_FEEDS["axspa"])
def spondylitis_feed() -> None:
"""Function to be passed to a Django-APScheduler job"""
logger.info("Parsing spondylitis feed...")
parse_google_alert_feed(GOOGLE_ALERT_FEEDS["spondylitis"])
def spondyloarthritis_feed() -> None:
"""Function to be passed to a Django-APScheduler job"""
logger.info("Parsing spondyloarthritis feed...")
parse_google_alert_feed(GOOGLE_ALERT_FEEDS["spondyloarthritis"])
def spondyloarthropathy_feed() -> None:
"""Function to be passed to a Django-APScheduler job"""
logger.info("Parsing spondyloarthropathy feed...")
parse_google_alert_feed(GOOGLE_ALERT_FEEDS["spondyloarthropathy"])
def research_axspa_feed() -> None:
"""Function to be passed to a Django-APScheduler job"""
logger.info("Parsing axial spondyloarthritis PUBMED feed...")
parse_pubmed_feed(PUBMED_FEEDS["axial spondyloarthritis"])
def research_as_feed() -> None:
"""Function to be passed to a Django-APScheduler job"""
logger.info("Parsing ankylosing spondylitis PUBMED feed...")
parse_pubmed_feed(PUBMED_FEEDS["ankylosing spondylitis"])
def as_news_dotcom_feed() -> None:
"""Function to be passed to a Django-APScheduler job"""
logger.info("Parsing AnkylosingSpondylitisNews.com feed...")
parse_google_alert_feed(AS_NEWS_DOTCOM_FEED)
class Command(BaseCommand):
help = "Runs apscheduler."
def handle(self, *args, **options):
scheduler = BlockingScheduler(timezone=settings.TIME_ZONE)
scheduler.add_jobstore(DjangoJobStore(), "default")
scheduler.add_job(
axspa_feed,
trigger="interval",
hours=12,
minutes=30,
id="Keyword: axspa",
max_instances=1,
replace_existing=True,
)
logger.info("Added job: axspa_feed")
scheduler.add_job(
spondylitis_feed,
trigger="interval",
hours=12,
id="Keyword: spondylitis",
max_instances=1,
replace_existing=True,
)
logger.info("Added job: spondylitis_feed")
scheduler.add_job(
spondyloarthritis_feed,
trigger="interval",
hours=12,
minutes=10,
id="Keyword: spondyloarthritis",
max_instances=1,
replace_existing=True,
)
logger.info("Added job: spondyloarthritis_feed")
scheduler.add_job(
spondyloarthropathy_feed,
trigger="interval",
hours=12,
minutes=20,
id="Keyword: spondyloarthropathy",
max_instances=1,
replace_existing=True,
)
logger.info("Added job: spondyloarthropathy_feed")
scheduler.add_job(
research_axspa_feed,
trigger="interval",
hours=6,
id="Research: AxSpa",
max_instances=1,
replace_existing=True,
)
logger.info("Added job: research_axspa_feed")
scheduler.add_job(
research_as_feed,
trigger="interval",
hours=6,
minutes=10,
id="Research: AS",
max_instances=1,
replace_existing=True,
)
logger.info("Added job: research_as_feed")
scheduler.add_job(
as_news_dotcom_feed,
trigger="interval",
hours=6,
id="AS News dotcom feed",
max_instances=1,
replace_existing=True,
)
logger.info("Added job: as_news_dotcom_feed")
scheduler.add_job(
delete_old_job_executions,
trigger=CronTrigger(day_of_week="mon", hour="00", minute="00"),
id="delete old job executions",
max_instances=1,
replace_existing=True,
)
logger.info("Added weekly job: delete_old_job_executions")
scheduler.add_job(
delete_rejected_entries,
trigger=CronTrigger(day_of_week="mon", hour="01", minute="00"),
id="delete old rejected entries",
max_instances=1,
replace_existing=True,
)
logger.info("Added weekly job: delete_rejected_entries")
try:
logger.info("Starting scheduler...")
scheduler.start()
except KeyboardInterrupt:
logger.info("Stopping scheduler...")
scheduler.shutdown()
logger.info("Scheduler shut down successfully!")
|
EndlessTrax/spondy-news | spondy_news/sitemap.py | <filename>spondy_news/sitemap.py
from django.contrib.sitemaps import Sitemap
from django.shortcuts import reverse
class StaticPageSitemap(Sitemap):
changefreq = "daily"
priority = 0.5
def items(self):
return ["homepage"]
def location(self, item):
return reverse(item)
|
EndlessTrax/spondy-news | aggregator/apps.py | <reponame>EndlessTrax/spondy-news<filename>aggregator/apps.py
from django.apps import AppConfig
class ContentAggregatorConfig(AppConfig):
name = "aggregator"
|
EndlessTrax/spondy-news | spondy_news/feeds.py | from django.contrib.syndication.views import Feed
from aggregator.models import Entry
class LatestEntriesFeed(Feed):
title = "Spondy News Feed"
link = "https://spondy.news"
description = "Published entries on Spondy News"
def items(self):
return Entry.objects.filter(is_published=True).order_by("-pub_date")[:20]
def item_title(self, item):
return item.title
def item_description(self, item):
return item.description
def item_link(self, item):
return item.link
def item_categories(self, item):
return [item.category] |
EndlessTrax/spondy-news | tests/test_spondy_news.py | <reponame>EndlessTrax/spondy-news<filename>tests/test_spondy_news.py
from django.urls import reverse
import pytest
from spondy_news.feeds import LatestEntriesFeed
@pytest.mark.django_db
def test_admin_action_publish_selected(client, example_entry):
"""Checks the that the custom admin function changes an entrys published status"""
change_url = reverse("admin:aggregator_entry_change", args=(example_entry.id,))
data = {"action": "publish_selected", "_selected_action": [example_entry.id]}
response = client.post(change_url, data, follow=True)
assert response.status_code == 200
def test_sitemap_status_code(client):
"""Checks the response code of sitemap.xml """
response = client.get("/sitemap.xml")
assert response.status_code == 200
def test_sitemap_content():
feed = LatestEntriesFeed()
assert feed.title == "Spondy News Feed"
assert feed.link == "https://spondy.news"
assert feed.description == "Published entries on Spondy News"
def test_robots_txt_status_code(client):
"""Checks the response code of sitemap.xml """
response = client.get("/robots.txt")
assert response.status_code == 200
@pytest.mark.django_db
def test_rss_feed_latest_status_code(client):
"""Checks the response code of the RSS feed for latest entries"""
response = client.get("/feeds/latest/rss.xml")
assert response.status_code == 200
|
EndlessTrax/spondy-news | aggregator/models.py | <gh_stars>1-10
from django.db import models
CATEGORY_CHOICES = [
("ARTICLE", "Article"),
("RESEARCH", "Research"),
("EVENT", "Event"),
]
class Entry(models.Model):
class Meta:
verbose_name_plural = "entries"
title = models.CharField(max_length=200)
description = models.TextField()
pub_date = models.DateTimeField()
link = models.URLField(unique=True, max_length=300)
is_published = models.BooleanField(default=False)
is_featured = models.BooleanField(default=False)
category = models.CharField(max_length=10, choices=CATEGORY_CHOICES, blank=True)
def __str__(self) -> str:
return f"{self.title}"
|
EndlessTrax/spondy-news | tests/test_aggregator.py | from django.urls import reverse
import pytest
from pytest_django.asserts import assertTemplateUsed
from aggregator.management.commands.startjobs import delete_rejected_entries
from aggregator.models import Entry
def test_class_str_repr(example_entry):
"""Tests the models __str__ function"""
assert str(example_entry) == "Awesome Article Title"
def test_entry_content(example_entry):
"""Tests the model saves content correctly"""
assert example_entry.link == "http://myawesomeblog.com"
assert example_entry.category == "ARTICLE"
@pytest.mark.django_db
def test_homepage_status_code(client):
"""Checks the response code of the homepage"""
response = client.get("/")
assert response.status_code == 200
@pytest.mark.django_db
def test_homepage_uses_correct_template(client):
"""Check the correct template is used for the homepage"""
response = client.get(reverse("homepage"))
assertTemplateUsed(response, "homepage.html")
@pytest.mark.django_db
def test_delete_rejected_entries_job(example_entry, example_old_entry):
"""Tests that the delete_rejected_entries function only deletes entries
that have is_published=False and a pub_date older than 14 days."""
assert Entry.objects.filter(title=example_entry.title).exists()
assert Entry.objects.filter(title=example_old_entry.title).exists()
# Delete only unpublished entries over 14 days old.
delete_rejected_entries()
assert Entry.objects.filter(title=example_entry.title).exists()
assert not Entry.objects.filter(title=example_old_entry.title).exists()
|
EndlessTrax/spondy-news | aggregator/views.py | from django.views.generic import ListView
from .models import Entry
class HomePageView(ListView):
"""Main homepage view"""
template_name = "homepage.html"
model = Entry
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# Filters the newest 25 articles to be displayed on homepage
context["articles"] = Entry.objects.filter(
is_published=True, category="ARTICLE"
).order_by("-pub_date")[:25]
# Filters the newest 25 research article to be displayed on homepage
context["researches"] = Entry.objects.filter(
is_published=True, category="RESEARCH"
).order_by("-pub_date")[:25]
# Filters all events to be displayed on homepage
context["events"] = Entry.objects.filter(
is_published=True, category="EVENT"
).order_by("-pub_date")
return context
|
EndlessTrax/spondy-news | aggregator/admin.py | <filename>aggregator/admin.py<gh_stars>1-10
from django.contrib import admin
from .models import Entry
def publish_selected(modeladmin, request, queryset):
queryset.update(is_published=True)
publish_selected.short_description = "Publish the selected posts"
@admin.register(Entry)
class EntryAdmin(admin.ModelAdmin):
list_display = ("pub_date", "title", "category", "is_featured", "is_published")
actions = [publish_selected]
ordering = ("-pub_date",)
|
A-Bak/remove-dirs | test/directory_test_case.py | import unittest
import os
import shutil
class DirectoryTestCase(unittest.TestCase):
def setUp(self) -> None:
for dir in self.dir_paths:
if not os.path.exists(dir):
os.makedirs(dir)
for file, content in self.file_contents.items():
if not os.path.exists(file):
with open(file, 'w') as f:
if content is not None:
f.write(content)
def tearDown(self) -> None:
shutil.rmtree('test/test_dirs')
|
A-Bak/remove-dirs | test/logging_test_case.py | <filename>test/logging_test_case.py
import unittest
import logging
import os
class LoggingTestCase(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
self.logger = logging.getLogger(__name__)
dir_name = 'test'
log_file_name = 'test_log.txt'
self.log_file_path = os.path.join(dir_name, log_file_name)
logging.basicConfig(filename= self.log_file_path,
format="[%(levelname)s] - %(asctime)s : %(message)s",
level = logging.DEBUG) |
A-Bak/remove-dirs | rmdirs/__main__.py | <reponame>A-Bak/remove-dirs
from .utils import *
import sys
import argparse
if __name__ == '__main__':
description = '''
Python utility for removing all subdirectories of a directory while
preserving all the files located in the subdirectories. The files are
renamed according to the relative path from the root_dir to given file.
The '\' and '/' in the relative path are replaced by the separator char.
'''
parser = argparse.ArgumentParser(description=description)
help_string = 'Path to root directory.'
parser.add_argument('-r', '--root', help=help_string, required=True)
help_string = 'Separator that is placed into the new file names instead of \'\\\' and \'/\'. Default is \'_\'.'
parser.add_argument('-s', '--sep', help=help_string, default='_')
args = parser.parse_args()
if args.root is not None:
remove(args.root, args.sep)
|
A-Bak/remove-dirs | test/rmdirs_string_util_test.py | <gh_stars>0
import unittest
import os
import rmdirs.utils
from logging_test_case import LoggingTestCase
from directory_test_case import DirectoryTestCase
class TestrmdirsStringUtil(LoggingTestCase, DirectoryTestCase):
""" Test rmdirs utility. """
def setUp(self) -> None:
self.dir_paths = [
'test/test_dirs/test_rename_if_exists/',
]
self.file_contents = {
'test/test_dirs/test_rename_if_exists/(1)' : None,
'test/test_dirs/test_rename_if_exists/(0).ext' : None,
'test/test_dirs/test_rename_if_exists/file.txt' : None,
'test/test_dirs/test_rename_if_exists/file_file.txt' : None,
'test/test_dirs/test_rename_if_exists/file_file (1).txt' : None,
'test/test_dirs/test_rename_if_exists/file_file (2).txt' : None,
}
return super().setUp()
def test_rename_if_exists(self):
""" Test if file names are renamed to avoid naming conflicts. """
file_path = 'test/test_dirs/test_rename_if_exists/(1)'
result = os.path.basename(rmdirs.utils.rename_if_exists(file_path))
expected = '(1) (1)'
self.assertEquals(result, expected)
file_path = 'test/test_dirs/test_rename_if_exists/(2)'
result = os.path.basename(rmdirs.utils.rename_if_exists(file_path))
expected = os.path.basename(file_path)
self.assertEquals(result, expected)
file_path = 'test/test_dirs/test_rename_if_exists/(0).ext'
result = os.path.basename(rmdirs.utils.rename_if_exists(file_path))
expected = '(0) (1).ext'
self.assertEquals(result, expected)
file_path = 'test/test_dirs/test_rename_if_exists/file.txt'
result = os.path.basename(rmdirs.utils.rename_if_exists(file_path))
expected = 'file (1).txt'
self.assertEquals(result, expected)
file_path = 'test/test_dirs/test_rename_if_exists/file_file.txt'
result = os.path.basename(rmdirs.utils.rename_if_exists(file_path))
expected = 'file_file (3).txt'
self.assertEquals(result, expected)
def test_replace_chars(self):
""" Test if a list of characters is correctly replaced in a string. """
self.assertRaises(TypeError, rmdirs.utils._replace_chars, (123456, ['1']))
self.assertRaises(TypeError, rmdirs.utils._replace_chars, ("123456", [1]))
string = '1100110011'
char_list = ['0']
output = '11__11__11'
self.assertEqual(rmdirs.utils._replace_chars(string, char_list), output)
string = '1100110011'
char_list = ['0', '1']
output = '__________'
self.assertEqual(rmdirs.utils._replace_chars(string, char_list), output)
string = '1100110011'
char_list = ['1']
output = '0000000000'
replacement = '0'
self.assertEqual(rmdirs.utils._replace_chars(string, char_list, replacement), output)
string = 'C:\\Users\\Public/Libraries/file.ext'
char_list = ['\\', '/']
output = 'C:_Users_Public_Libraries_file.ext'
self.assertEqual(rmdirs.utils._replace_chars(string, char_list), output)
def test_string_contains(self):
""" Test if given string contains a character from a set of characters. """
self.assertRaises(TypeError, rmdirs.utils._string_contains, (-1, []))
self.assertRaises(TypeError, rmdirs.utils._string_contains, ("", [1]))
self.assertRaises(TypeError, rmdirs.utils._string_contains, ("1", [1]))
self.assertTrue(rmdirs.utils._string_contains("", []))
self.assertFalse(rmdirs.utils._string_contains("", ['a']))
self.assertTrue(rmdirs.utils._string_contains("a", ['a']))
self.assertTrue(rmdirs.utils._string_contains("ababba", ['a']))
self.assertFalse(rmdirs.utils._string_contains("a", ['b']))
self.assertFalse(rmdirs.utils._string_contains("ababba", ['c']))
if __name__ == "__main__":
unittest.main()
|
A-Bak/remove-dirs | test/rmdirs_test.py | import unittest
import os
import rmdirs
from logging_test_case import LoggingTestCase
from directory_test_case import DirectoryTestCase
class TestRmdirs(LoggingTestCase, DirectoryTestCase):
""" Test rmdirs utility."""
def setUp(self) -> None:
self.dir_paths = [
'test/test_dirs/test_empty',
'test/test_dirs/test_empty/empty_dir',
'test/test_dirs/test_empty/dir/1',
'test/test_dirs/test_empty/dir/2',
'test/test_dirs/test_empty/dir/3',
'test/test_dirs/test_subdirs/1/',
'test/test_dirs/test_subdirs/2/21',
'test/test_dirs/test_subdirs/3/31',
'test/test_dirs/test_subdirs/3/32/321',
'test/test_dirs/test_subdirs/3/33',
'test/test_dirs/test_renaming/',
'test/test_dirs/test_renaming/1/',
]
self.file_contents = {
'test/test_dirs/test_subdirs/1/empty' : None,
'test/test_dirs/test_subdirs/1/cfg.txt' : '"python.testing.pytestEnabled": false',
'test/test_dirs/test_subdirs/2/same_name.txt' : None,
'test/test_dirs/test_subdirs/2/21/same_name.txt' : None,
'test/test_dirs/test_renaming/1_1.txt' : 'existing file',
'test/test_dirs/test_renaming/1_1 (1).txt' : 'existing file',
'test/test_dirs/test_renaming/1/1.txt' : 'renamed_file',
}
return super().setUp()
def test_logging(self):
self.assertTrue(self.logger is not None)
self.logger.debug("Logged message.")
self.assertTrue(os.path.exists(self.log_file_path))
def test_empty_dir(self):
""" Test use of rmdirs on an empty directory. """
dir_path = 'test/test_dirs/test_empty'
self.assertTrue(os.path.exists(dir_path))
_, dirs, files = next(os.walk(dir_path))
self.assertNotEqual(dirs, [])
self.assertEqual(files, [])
rmdirs.remove(dir_path)
_, dirs, files = next(os.walk(dir_path))
self.assertEqual(dirs, [])
self.assertEqual(files, [])
def test_remove_subdirs(self):
""" Test """
target_dir = 'test/test_dirs/test_subdirs'
self.assertTrue(os.path.exists(target_dir))
_, subdirs, _ = next(os.walk(target_dir))
self.assertEqual(subdirs, ['1', '2', '3'])
rmdirs.remove(target_dir)
_, subdirs, _ = next(os.walk(target_dir))
self.assertEqual(subdirs, [])
def test_renaming(self):
""" Test """
target_dir = 'test/test_dirs/test_renaming'
self.assertTrue(os.path.exists(target_dir))
_, _, files = next(os.walk(target_dir))
self.assertEqual(sorted(files), ['1_1 (1).txt', '1_1.txt'])
rmdirs.remove(target_dir)
_, _, files = next(os.walk(target_dir))
self.assertEqual(sorted(files), ['1_1 (1).txt', '1_1 (2).txt', '1_1.txt'])
if __name__ == "__main__":
unittest.main() |
A-Bak/remove-dirs | rmdirs/utils.py | from typing import List
import os
import shutil
import logging
__all__ = ['remove', 'new_file_name', 'rename_if_exists']
class Char(str):
""" Single character. """
class FilePath(str):
""" An absolute or a relative path to a file system resource. """
class FileName(str):
""" Full file name of a file in a directory. """
def remove(root_dir: FilePath, separator: Char='_') -> None:
"""
Function for removing all subdirectories of the root_dir. All files located
in the subdirectories of the root_dir are moved to the root_dir. The files
are renamed according to the relative path from the root_dir to given file.
The '\\\\' and '/' in the relative path are replaced by the separator char.
E.g.:
'root_dir/dir1/dir2/current_dir/file.ext'
old_file_name => file.ext
relative_path => dir1/dir2/current_dir
new_file_name => dir1_dir2_current_dir_file.ext
Parameters
-------------------------------------------------------------------------
root_dir : FilePath
path to the root directory
separator : Char
character that will be placed into the new file name instead of '\\\\' and '/'
Returns
-------------------------------------------------------------------------
None
"""
if not os.path.exists(root_dir):
raise ValueError("Invalid path to directory.")
# First directory returned by os.walk() is the root_dir
# => Get immediate subdirs for deletion via shutil.rmtree()
dirtree_iter = os.walk(root_dir)
_, subdirs, _ = next(dirtree_iter, None)
for current_dir, _, files in dirtree_iter:
for file_name in files:
source_file_path = os.path.join(current_dir, file_name)
target_file_path = new_file_name(file_name, current_dir, root_dir, separator)
shutil.move(source_file_path, target_file_path)
logging.debug(f'New name is "{target_file_path}".')
for dir_name in subdirs:
shutil.rmtree(os.path.join(root_dir, dir_name))
def new_file_name(file_name: FileName, current_dir: FilePath, root_dir: FilePath, separator: Char='_'):
"""
Function returns new file name for a file located in one of the subdirectories
of the root_dir. The new file name consists of 'prefix' + file_name, where
prefix is the relative path between root_dir and current_dir with all
backslashes and forwardslashes replaced by the separator character.
E.g.:
'root_dir/dir1/dir2/current_dir/file.ext'
old_file_name => file.ext
relative_path => dir1/dir2/current_dir
new_file_name => dir1_dir2_current_dir_file.ext
Parameters
-------------------------------------------------------------------------
file_name : FileName
name of the file
current_dir : FilePath
path to the current subdirectory
root_dir : FilePath
path to the root directory
separator : Char
character that will be placed into the new file name instead of '\\\\' and '/'
Returns
-------------------------------------------------------------------------
new_file_name with the relative path from root_dir to current_dir as prefix to the input file_name
"""
new_file_name = os.path.join(os.path.relpath(current_dir, start=root_dir), file_name)
new_file_name = _replace_chars(new_file_name, ['\\', '/'], separator)
new_file_name = rename_if_exists(os.path.join(root_dir, new_file_name))
return new_file_name
def rename_if_exists(target_file_path: FilePath) -> FilePath:
"""
Function returns a new file path that does not already exist.
If the target_file_path does not exist then it is unchanged.
If the target_file_path already exists then the returned file path is
such that there are no naming conflicts.
E.g.:
'target/file/path.ext'
=> 'target/file/path' + ' (i)' + .ext'
=> 'target/file/path (i).ext'
Parameters
-------------------------------------------------------------------------
target_file_path : FilePath
path to a new file location
Returns
-------------------------------------------------------------------------
potentially a new file path that doesn't already exist
"""
if os.path.exists(target_file_path):
# Split off extension from target_file_path and add index (i) to distinguish the file
i = 1
new_file_path = '{1} ({0}){2}'.format(i, *os.path.splitext(target_file_path))
while os.path.exists(new_file_path):
i += 1
new_file_path = '{1} ({0}){2}'.format(i, *os.path.splitext(target_file_path))
return new_file_path
else:
return target_file_path
def _replace_chars(string: str, char_list: List[Char], replacement: Char='_') -> str:
"""
Function replaces every occurence of characters from char_list in input
string with the replacement character.
Parameters
-------------------------------------------------------------------------
string : str
input string
char_list : List[Char]
list of characters that will be replaced in the input string
replacement : Char
character that will replace each occurence of characters from char_list
Returns
-------------------------------------------------------------------------
copy of the input string with replaced characters
Raises
-------------------------------------------------------------------------
ValueError
if the char_list is an empty list
TypeError
if the input string or characters in char_list are not type of str
"""
if not char_list:
raise ValueError('Argument char_list is an empty list.')
new_string = string
try:
if _string_contains(string, char_list):
for ch in char_list:
new_string = new_string.replace(ch, replacement)
except TypeError as e:
raise e
return new_string
def _string_contains(string: str, char_list: List[Char]) -> bool:
"""
Function checks if the input string contains any characters from char_list.
Parameters
-------------------------------------------------------------------------
string : str
input string
char_list : List[Char]
list of characters
Returns
-------------------------------------------------------------------------
True if input string contains any characters from char_list, False otherwise
Raises
-------------------------------------------------------------------------
TypeError
if the input string or characters in char_list are not type of str
"""
if not isinstance(string, str):
raise TypeError('String argument is not of type str.')
if not char_list:
return True
if any([not isinstance(ch, str) for ch in char_list]):
raise TypeError(('All elements of char_list must be of type str.'))
return any([ch in string for ch in char_list])
|
A-Bak/remove-dirs | setup.py | <reponame>A-Bak/remove-dirs
import setuptools
with open('ReadMe.md', 'r') as f:
long_description = f.read()
with open('requirements.txt', 'r', encoding='UTF-16') as f:
required = f.readlines()
setuptools.setup(
name="rmdirs",
version="0.1.1",
author="A-Bak",
author_email="<EMAIL>",
description="Python utility for removing all subdirectories of a directory while preserving all the files located in the subdirectories.",
long_description=long_description,
long_description_content_type='text/markdown',
keywords='utility util file-system file-structure remove-directories remove-dirs',
url='https://github.com/A-Bak/remove-dirs',
packages=setuptools.find_packages(),
python_requires='>=3.6',
install_requires=required,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
) |
karakuri-musha/JetRunStep | Common/kmg_common.py | <gh_stars>0
#!/usr/bin/python3
#-----------------------------------------------------------------------------
# KMG Common Create 2021.09
#
# 共通関数を定義したファイルです。他のプログラムから呼び出して使用します。
#
# Author : <EMAIL>ROKU@Karakuri-musha
# License : See the license file for the license.
#
# [変数記名規則]
# i_ : 関数の引数
# p_ : 関数内でのみ使用
# o_ : 関数の戻り値
#
# --------------------------------------------------------------------------
# 収録関数
# --------------------------------------------------------------------------
# [logger function]
# logger_init(i_logdir_path) : logger設定処理
#
# [Ubuntu環境確認用関数]
# get_pkglist()
#
#
# Author : <EMAIL>ROKU@Karakuri-musha
# License : See the license file for the license.
#
#-----------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------
# ライブラリインポート部
# -------------------------------------------------------------------------------------------
import os
import sys
import subprocess
import json
import shutil
import platform
from datetime import datetime
import logging
from logging import StreamHandler, FileHandler, Formatter
from logging import INFO, DEBUG, NOTSET
from argparse import ArgumentParser
from typing import Match
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, SubElement, Comment
from xml.dom import minidom
from Common.kmg_subprocess import *
# -------------------------------------------------------------------------------------------
# 変数/定数 定義部
# -------------------------------------------------------------------------------------------
# 実行環境ラベル
SYSTEM_LABEL_RASPI = 1
SYSTEM_LABEL_JETSON = 2
SYSTEM_LABEL_LINUX = 3
SYSTEM_LABEL_LINUX_OTHER = 4
SYSTEM_LABEL_WIN10 = 5
SYSTEM_LABEL_WIN_OTHER = 6
# -------------------------------------------------------------------------------------------
# 関数定義部
# -------------------------------------------------------------------------------------------
#---------------------------------------------
# logger function
#---------------------------------------------
def logger_init(i_logdir_path):
# ロギングの設定(コンソールとログファイルの両方に出力する
# --コンソール出力用ハンドラ
p_stream_hundler = StreamHandler()
p_stream_hundler.setLevel(INFO)
p_stream_hundler.setFormatter(Formatter("%(message)s"))
# --ログ出力用ライブラリの所在確認と作成
if not os.path.isdir(i_logdir_path):
os.makedirs(i_logdir_path, exist_ok = True)
# --ファイル出力用ハンドラ
p_file_handler = FileHandler(
f"./Log/log{datetime.now():%Y%m%d%H%M%S}.log"
)
p_file_handler.setLevel(DEBUG)
p_file_handler.setFormatter(
Formatter("%(asctime)s@ %(name)s [%(levelname)s] %(funcName)s: %(message)s")
)
# --ルートロガーの設定
logging.basicConfig(level=NOTSET, handlers=[p_stream_hundler, p_file_handler])
o_logger = logging.getLogger(__name__)
return o_logger
# End Function
#---------------------------------------------
# Ubuntu environment check function
#---------------------------------------------
# 実行環境のシステム判定処理
def check_system_env(logger):
# システム環境の判別 Determining the system environment.
logger.info('System Enviroment Check Process Begin')
system_label = ''
os_name = platform.system()
logger.info('The operating system is [' + os_name + ']')
if os_name == 'Linux':
# Raspberry Pi / Jetson / other ( have device-tree/model )
if os.path.exists('/proc/device-tree/model'):
res = call_subprocess_run('cat /proc/device-tree/model', logger)
os_info = res.__next__()
if 'Raspberry Pi' in os_info:
system_label = SYSTEM_LABEL_RASPI
logger.info('The model name is [' + os_info + ']')
elif 'NVIDIA Jetson' in os_info:
system_label = SYSTEM_LABEL_JETSON
logger.info('The model name is [' + os_info + ']')
else:
system_label = SYSTEM_LABEL_LINUX_OTHER
logger.info('The model name is [' + os_info + ']')
# Linux ( Not have device-tree/model )
else:
for product in read_data(get_system_data()):
os_info = SYSTEM_LABEL_LINUX
logger.error('The model name is [' + os_info + ']')
elif os_name == 'Windows':
systeminfo_l = win_call_subprocess_run('systeminfo', logger)
systeminfo_dict = []
for line in systeminfo_l:
info_l = line.split(': ')
for i in range(len(info_l)):
info_l[i] = info_l[i].strip()
systeminfo_dict.append(info_l)
if 'Microsoft Windows 10' in systeminfo_dict[5][1]:
system_label = SYSTEM_LABEL_WIN10
logger.info('The model name is [' + systeminfo_dict[5][1] + ']')
else:
system_label = SYSTEM_LABEL_WIN_OTHER
logger.info('The model name is [' + systeminfo_dict[5][1] + ']')
return system_label
# End Function
# Ubuntuパッケージリスト取得処理([0]パッケージ名、[1]バージョン、[2]アーキテクチャ)
def get_pkglist(logger):
res = call_subprocess_run("dpkg-query -l | awk -F, '6<=NR' | awk '{print $2\",\"$3\",\"$4}'", logger)
r_pkg_list = []
for line in res:
r_pkg_list.append(line.split(","))
return r_pkg_list
# End Function
|
karakuri-musha/JetRunStep | Common/kmg_json.py | #!/usr/bin/python3
#-----------------------------------------------------------------------------
# KMG JSON Create 2021.09
#
# 共通関数を定義したファイルです。他のプログラムから呼び出して使用します。
#
# Author : <EMAIL>ROKU<EMAIL>
# License : See the license file for the license.
#
# [変数記名規則]
# i_ : 関数の引数
# p_ : 関数内でのみ使用
# o_ : 関数の戻り値
#
# --------------------------------------------------------------------------
# 収録関数
# --------------------------------------------------------------------------
# [jsonファイル利用関数]
# read_json_entry(dir_path, p_input_file_name) : jsonファイルの読み込み(To Dict)
# read_json_dict_entry(p_json_data_dict:dict, p_dict_entry_name:str) : jsonデータエントリの読み出し(From Dict)
#
#
# Author : <EMAIL>KU<EMAIL>
# License : See the license file for the license.
#
#-----------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------
# ライブラリインポート部
# -------------------------------------------------------------------------------------------
import os
import json
#---------------------------------------------
# json function
#---------------------------------------------
# json read to dict
def read_json_entry(dir_path, p_input_file_name):
# jsonファイルを開く
json_file_path = os.path.join(dir_path, p_input_file_name)
json_open = open(json_file_path, 'r', encoding="utf-8")
p_json_data_dict = json.load(json_open)
return p_json_data_dict
# End Function
# Read dict(from json)
def read_json_dict_entry(p_json_data_dict:dict, p_dict_entry_name:str):
p_entry_data = p_json_data_dict.get(p_dict_entry_name, "")
return p_entry_data
# End Function
|
karakuri-musha/JetRunStep | Common/kmg_subprocess.py | <reponame>karakuri-musha/JetRunStep<filename>Common/kmg_subprocess.py
#!/usr/bin/python3
#-----------------------------------------------------------------------------
# KMG Subprocess Create 2021.09
#
# 共通関数を定義したファイルです。他のプログラムから呼び出して使用します。
#
# Author : GENROKU@<EMAIL>a
# License : See the license file for the license.
#
# [変数記名規則]
# i_ : 関数の引数
# p_ : 関数内でのみ使用
# o_ : 関数の戻り値
#
# --------------------------------------------------------------------------
# 収録関数
# --------------------------------------------------------------------------
# [Subprocess function]
# win_call_subprocess_run(origin_cmd, logger) : Windows向け外部コマンド実行処理関数
# call_subprocess_run(origin_cmd, logger) : Linux向け外部コマンド実行処理関数(1)
# call_subprocess_run_sudo(origin_cmd, p_passphrase, logger) : Linux向け外部コマンド実行処理関数(2)
# get_system_data(p_passphrase) : Linux実行環境確認関数(1)
# read_data(proc_output) : Linux実行環境確認関数(2)
#
#
# Author : <EMAIL>ROKU@<EMAIL>
# License : See the license file for the license.
#
#-----------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------
# ライブラリインポート部
# -------------------------------------------------------------------------------------------
import sys
import subprocess
import json
#---------------------------------------------
# Subprocess function
#---------------------------------------------
# Windows向け 外部コマンドの実行処理用の関数 Function for executing external commands.
# Windowsはロケールによってコマンドプロンプトの言語設定が違うため、英語出力に変更して出力する
def win_call_subprocess_run(origin_cmd, logger):
try:
# コマンドプロンプトの言語コードを確認し、変数chcp_originに格納
pre_p = subprocess.Popen("chcp",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True
)
chcp_res, _ = pre_p.communicate()
chcp_origin = chcp_res.split(':')
# コマンドプロンプト起動時に言語コードを英語に変更して起動し、systeminfoを実行
res = subprocess.Popen("cmd.exe /k \"chcp 437\"",
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True
)
res.stdin.write(origin_cmd + "\n")
stdout_t, _ = res.communicate()
# コマンドプロンプトの言語コードをorigin_chcpに戻す
cmd = "chcp " + str(chcp_origin[1])
after_p = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True
)
after_res, _ = after_p.communicate()
for line in stdout_t.splitlines():
yield line
except subprocess.CalledProcessError:
logger.error('Failed to execute the external command.[' + origin_cmd + ']', file = sys.stderr)
sys.exit(1)
# End Function
# Linux/Raspberry Pi OS用の外部コマンド実行関数(1)
# 通常外部コマンドの実行処理用の関数 Function for executing external commands.
def call_subprocess_run(origin_cmd, logger):
try:
res = subprocess.run(origin_cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True
)
for line in res.stdout.splitlines():
yield line
except subprocess.CalledProcessError:
logger.error('Failed to execute the external command.[' + origin_cmd + ']', file = sys.stderr)
sys.exit(1)
# End Function
# Linux/Raspberry Pi OS用の外部コマンド実行関数(2)
# Sudo系コマンドの実行処理用の関数 Function for executing external commands.
def call_subprocess_run_sudo(origin_cmd, p_passphrase, logger):
try:
res = subprocess.run(origin_cmd,
shell=True,
check=True,
input=p_passphrase + '\n',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True
)
for line in res.stdout.splitlines():
yield line
except subprocess.CalledProcessError:
logger.error('Failed to execute the external command.[' + origin_cmd + ']', file = sys.stderr)
sys.exit(1)
# End Function
# システム情報の取得
# Rassbery PiとJetson以外のLinuxで実行された場合に実行環境を取得するための処理
def get_system_data(p_passphrase):
lshw_cmd = ['sudo', 'lshw', '-json']
proc = subprocess.Popen(lshw_cmd,
stdin=p_passphrase + '/n',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return proc.communicate()[0]
# End Function
# Rassbery PiとJetson以外のLinuxで実行された場合に実行環境を読み込むための処理
def read_data(proc_output):
proc_result = []
proc_json = json.loads(proc_output)
for entry in proc_json:
proc_result.append(entry.get('product', ''))
return proc_result
# End Function |
karakuri-musha/JetRunStep | JetRunStep.py | #!/usr/bin/python3
#-----------------------------------------------------------------------------
# Jetson Nano Run Step cmd Tool (JetRunStep) Create 2021.09
#
# このツールは、Jetson Nanoの環境の更新履歴をトレースできる情報を管理するツールです。
# Ubuntuにおいてインストールコマンドなどを実行する際に、このコマンドを通して実行する
# ことで、下記の管理情報をもとに環境更新履歴を生成します。
# また、あらかじめ定義しておいた、インストールコマンド定義ファイル(.json形式)を使い
# 複数パッケージのインストール処理などを自動で実行することができます。
# ツールの動作条件は、JetRunStepSetting.jsonファイルで指定できます。
#
# 環境変更履歴は以下のコマンドで得られる情報をもとに生成します。
# 【管理情報】
# インストールパッケージ情報 (dpkg-query -l)
#
# オプション指定
# -i [Install] : コマンドを指定して実行する場合に指定します。実行するコマンドを指定します。
# -a [auto Install] : 自動インストールを行う場合に指定します。インストールコマンド定義ファイルを指定します。
# -s [Setup file name] : 動作設定用ファイル(json)を指定します。
#
# 処理結果出力
# 出力:環境変更履歴は、ツール実行フォルダの配下に「env_files」フォルダを生成し
# 出力されます。出力されるファイルは以下の構成です。
# (1) Env_trace.xml : 変更履歴のXML形式出力
# (2) Env_trace_view.xml : 変更履歴のXML形式出力(参照用)
# (3) Env_browse.html : 変更履歴のHtml形式出力
# (4) Env_latest_pack : 最新のパッケージ構成情報
#
# ログ:ツール実行フォルダの配下に「Log」というフォルダを作成し出力されます。
# ログファイルは指定したファイル数に達すると古いものから削除されます。
#
# Author : <EMAIL>ROKU<EMAIL>a
# License : See the license file for the license.
#
#-----------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------
# ライブラリインポート部
# -------------------------------------------------------------------------------------------
import sys
import csv
# 共通関数の呼び出し
from Common.kmg_common import *
from Common.kmg_json import *
from Common.kmg_file import *
from Common.kmg_subprocess import *
# ツール独自関数の呼び出し
from JetRunStep_func import *
SETTING_LOG_DIR = "./Log"
SETTING_BK_DIR = "xml_bk"
MSG_TOOL_RUN = "Start the Jetson Nano Run Setup Tool (JetRunStep) ."
MSG_TOOL_END = "Exit the Jetson Nano Run Setup Tool (JetRunStep) "
MSG_TOOL_ENV_ERROR = "This tool is not available in your environment."
MSG_TOOL_OPTION_ERROR = "The specified parameter is incorrect. To display the help, execute it with the -h option."
# -----------------------------------------------------------------------------
# main処理(main.pyが起動された場合に処理される内容)
# -----------------------------------------------------------------------------
if __name__ == "__main__":
# loggerの初期化
logger = logger_init(SETTING_LOG_DIR)
# カレントディレクトリの取得
dir_path = get_current_path()
# システム環境の判別 Determining the system environment.
system_label = check_system_env(logger)
# 指定されたオプションの確認
args = get_option()
cmd_run_flg = args.Install # 実行コマンド
auto_install_flg = args.AutoInstall # Auto Install 定義ファイル
input_file_name = args.json # 動作設定用ファイル
xml_view_file_name = args.XMLViewFile # 参照用XMLファイル
# ツールの動作設定ファイルの読み込み
p_run_env, p_env_t_int, p_env_t_out_d, p_env_t_xml_f, p_env_b_f, p_env_l_pkg_f, p_max_log_cnt = read_parameters(dir_path, input_file_name)
# ---------------------------------------------------------------
# Delete Old Log file
# ---------------------------------------------------------------
files = os.listdir(SETTING_LOG_DIR) # ディレクトリ内のファイルリストを取得
if len(files) >= int(p_max_log_cnt) + 1:
del_files = len(files)-int(p_max_log_cnt)
files.sort() # ファイルリストを昇順に並び替え
for i in range(del_files):
del_file_name = os.path.join(SETTING_LOG_DIR, files[i])
logger.info("delete log file : " + del_file_name)
os.remove(del_file_name) # 一番古いファイル名から削除
# ファイルパスの生成
p_env_xml_f_p = os.path.join(p_env_t_out_d, p_env_t_xml_f)
p_html_f_p = os.path.join(p_env_t_out_d, p_env_b_f)
p_pl_f_p = os.path.join(p_env_t_out_d, p_env_l_pkg_f)
p_ai_f_p = os.path.join(dir_path, auto_install_flg)
# 実行されたシステムと動作設定ファイルで指定された環境の比較
if system_label == int(p_run_env):
logger.info(MSG_TOOL_RUN)
# コマンド指定オプションが指定され、コマンドが入力された場合
if cmd_run_flg is not "1":
# コマンド実行と変更履歴生成
p_update_list, p_pkg_list = run_one_cmd(cmd_run_flg, p_env_t_int, p_env_xml_f_p, logger)
# 各出力ファイルの生成
# 1-xml
if xml_view_file_name is not "1":
create_xml_view(xml_view_file_name, p_env_xml_f_p, SETTING_BK_DIR, logger)
# 2-html
create_html(p_env_xml_f_p, p_html_f_p, SETTING_BK_DIR, logger)
# AutoInstall指定オプションが指定され、定義ファイルが指定された場合
else:
if auto_install_flg is not "1":
# 実行コマンドリストの読み込み Reading the execution command list (things_to_do.json).
p_update_list, p_pkg_list = run_auto_setup(p_env_t_int, p_env_t_out_d, p_ai_f_p, p_env_xml_f_p, SETTING_BK_DIR, logger)
# 各出力ファイルの生成
# 1-xml
if xml_view_file_name is not "1":
create_xml_view(xml_view_file_name, p_env_xml_f_p, SETTING_BK_DIR, logger)
# 2-html
create_html(p_env_xml_f_p, p_html_f_p, SETTING_BK_DIR, logger)
else:
logger.error(MSG_TOOL_OPTION_ERROR)
else:
logger.error(MSG_TOOL_ENV_ERROR)
sys.exit()
# End Function |
karakuri-musha/JetRunStep | Common/kmg_file.py | <filename>Common/kmg_file.py
#!/usr/bin/python3
#-----------------------------------------------------------------------------
# KMG file Create 2021.09
#
# 共通関数を定義したファイルです。他のプログラムから呼び出して使用します。
#
# Author : <EMAIL>ROKU@Karakuri-musha
# License : See the license file for the license.
#
# [変数記名規則]
# i_ : 関数の引数
# p_ : 関数内でのみ使用
# o_ : 関数の戻り値
#
# --------------------------------------------------------------------------
# 収録関数
# --------------------------------------------------------------------------
# [外部ファイル編集用関数]
# update_file(file_d, original_d, after_d, logger) : 編集元文字列指定
# update_file_firstline(file_d, original_d, after_d, logger) : 先頭行追加
# update_file_endline(file_d, original_d, after_d, logger) : 最終行追加
#
#
# Author : <EMAIL>ROKU@Karakuri-musha
# License : See the license file for the license.
#
#-----------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------
# ライブラリインポート部
# -------------------------------------------------------------------------------------------
import os
import shutil
import csv
#---------------------------------------------
# file custom function
#---------------------------------------------
# 外部ファイルの新規作成処理
def create_file(i_file_d, i_after_d, logger):
try:
if not os.path.exists(i_file_d):
logger.info('---- Create file ----')
with open(i_file_d, "w") as fs:
fs.write(i_after_d)
logger.info('---- Success Create file ----')
return 0
else:
logger.error("The specified file name already exists.")
return 1
except OSError as e:
logger.error(e)
return 1
# End Function
# 外部ファイルの更新処理[1](指定行の更新) Function for updating external files.
def update_file(i_file_d, i_original_d, i_after_d, i_bk_dirname, logger):
try:
#指定されたファイルパスを元に更新元ファイルをバックアップ
p_dir_name = os.path.dirname(i_file_d)
p_file_name = os.path.basename(i_file_d)
p_mk_dir_name = os.path.join(p_dir_name + i_bk_dirname)
p_bk_file_namepath = os.path.join(p_mk_dir_name, p_file_name)
if not os.path.isdir(p_mk_dir_name):
os.makedirs(p_mk_dir_name, exist_ok = True)
shutil.copy2(i_file_d, p_bk_file_namepath)
# 更新処理
file_line = []
logger.info('---- Scan file ----')
with open(i_file_d, "r") as fs:
for fsline in fs:
if fsline.find(i_original_d) == 0:
file_line.append(i_after_d + '\n')
else:
file_line.append(fsline)
logger.info('---- Update file ----')
with open(i_file_d, "w") as fs:
for line in file_line:
fs.write(line)
logger.info('---- Success update file ----')
return 0
except OSError as e:
logger.error(e)
return 1
# 外部ファイルの更新処理[2](先頭行追加)
def update_file_firstline(i_file_d, i_after_d, i_bk_dirname, logger):
try:
#指定されたファイルパスを元に更新元ファイルをバックアップ
p_dir_name = os.path.dirname(i_file_d)
p_file_name = os.path.basename(i_file_d)
p_mk_dir_name = os.path.join(p_dir_name + i_bk_dirname)
p_bk_file_namepath = os.path.join(p_mk_dir_name, p_file_name)
if not os.path.isdir(p_mk_dir_name):
os.makedirs(p_mk_dir_name, exist_ok = True)
shutil.copy2(i_file_d, p_bk_file_namepath)
# 更新処理
file_line = []
file_line.append(i_after_d + '\n')
logger.info('---- Scan file ----')
with open(i_file_d, "r") as fs:
for fsline in fs:
file_line.append(fsline)
logger.info('---- Update file ----')
with open(i_file_d, "w") as fs:
for line in file_line:
fs.write(line)
logger.info('---- Success update file ----')
return 0
except OSError as e:
logger.error(e)
return 1
# 外部ファイルの更新処理[3](末尾追記) Function for updating external files.
def update_file_endline(i_file_d, i_after_d, i_bk_dirname, logger):
try:
#指定されたファイルパスを元に更新元ファイルをバックアップ
p_dir_name = os.path.dirname(i_file_d)
p_file_name = os.path.basename(i_file_d)
p_mk_dir_name = os.path.join(p_dir_name + i_bk_dirname)
p_bk_file_namepath = os.path.join(p_mk_dir_name, p_file_name)
if not os.path.isdir(p_mk_dir_name):
os.makedirs(p_mk_dir_name, exist_ok = True)
shutil.copy2(i_file_d, p_bk_file_namepath)
logger.info('---- Update file ----')
with open(i_file_d, "a") as fs:
fs.write('\n' + i_after_d + '\n')
logger.info('---- Success update file ----')
return 0
except OSError as e:
logger.error(e)
return 1
# 外部ファイルの更新処理[4](ファイル 有:全上書き、無:新規作成)
# 指定名のファイルが存在する場合は、バックアップを取得して更新
def update_file_full(i_file_d, i_after_d, i_bk_dirname, logger):
try:
# 指定名のファイルがない場合
if not os.path.exists(i_file_d):
logger.info('---- Create file ----')
with open(i_file_d, "w") as fs:
fs.write(i_after_d + '\n')
logger.info('---- Success Create file ----')
return 0
# 指定名のファイルがある場合
else:
#指定されたファイルパスを元に更新元ファイルをバックアップ
p_dir_name = os.path.dirname(i_file_d)
p_file_name = os.path.basename(i_file_d)
p_mk_dir_name = os.path.join(p_dir_name, i_bk_dirname)
p_bk_file_namepath = os.path.join(p_mk_dir_name, p_file_name)
if not os.path.isdir(p_mk_dir_name):
os.makedirs(p_mk_dir_name, exist_ok = True)
shutil.copy2(i_file_d, p_bk_file_namepath)
# 指定名のファイルに上書き
logger.info('---- Update file ----')
with open(i_file_d, "w") as fs:
fs.write(i_after_d)
logger.info('---- Success Update file ----')
except OSError as e:
logger.error(e)
return 1
# End Function |
karakuri-musha/JetRunStep | JetRunStep_func.py | <gh_stars>0
#!/usr/bin/python3
#-----------------------------------------------------------------------------
# JetRunStep Function Create 2021.09
#
# JetRunStepツールで使用する独自関数を定義したファイルです。
# ツールのメインプログラムから呼び出して使用します。
#
# Author : GENROKU@Karakuri-musha
# License : See the license file for the license.
#
# [記名規則]
# i_ : 関数の引数
# p_ : 関数内でのみ使用
# o_ : 関数の戻り値
#
# --------------------------------------------------------------------------
# 収録関数
# --------------------------------------------------------------------------
# get_option(): :ツール実行時オプションの設定
# read_parameters(i_input_file_name): :動作パラメータの取得(jsonファイルからの読み出し)
# prettify(i_elem): :XML判読化
# create_xml(i_pkg_list, i_xml_file_d, logger): :管理用XML生成(初期化)
# update_xml(i_xml_file_d, i_new_pkg_list, cmd_str, logger): :管理用XML更新
# create_xml_view(i_xml_view_file_name, i_xml_trace_file, i_xml_bk_dir, logger): :判読化済みXMLファイル生成
# create_html(i_xml_file_path, i_html_file_path, logger): :HTMLファイル生成
# run_one_cmd(i_cmd_string, i_env_t_int, i_xml_path, logger): :コマンド単体実行処理
# run_auto_setup(i_env_t_int, i_dir_path, i_json_name, i_xml_path, logger): :Auto Setup実行処理(パラメータ設定)
# iterate_env_setting(i_res_status, i_type_d, i_comment_d, i_cmd_d, logger, i_original_d = '', i_after_d = ''): :Auto Setuo実行処理(実行)
#
#
# Author : GENROKU@Karakuri-musha
# License : See the license file for the license.
#
#-----------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------
# ライブラリインポート部
# -------------------------------------------------------------------------------------------
import os
from datetime import datetime
from logging import INFO, DEBUG, NOTSET
from argparse import ArgumentParser
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, SubElement, Comment
from xml.dom import minidom
# 共通関数の呼び出し
from Common.kmg_common import *
from Common.kmg_json import *
from Common.kmg_file import *
from Common.kmg_subprocess import *
# -------------------------------------------------------------------------------------------
# 変数/定数 定義部
# -------------------------------------------------------------------------------------------
MSG_OPTIONS_HELP_INSTALL = "Specify when executing by specifying a command. Specifies the command to execute."
MSG_OPTIONS_HELP_AUTOINS = "Specify when performing automatic installation. Specify the installation command definition file."
MSG_OPTIONS_HELP_SETUP = "Specify the operation setting file (json)."
SETTING_FILE_NAME = "JetRunStepSetting.json"
XML_ROOTENTITY_NAME = "Pkglist"
XML_MGMENTITY_NAME = "Package"
XML_MGMENTITY_ATTR1 = "name"
XML_MGMENTITY_ATTR2 = "flg"
XML_ITEM_NAME = "Trace"
XML_ITEM_ATTR = "order"
XML_ITEM_VERSION = "Versions"
XML_ITEM_DATE = "Updatedate"
XML_ITEM_REASON = "Reasonforup"
# ----------------------------------------------------------
# 関数定義部
# ----------------------------------------------------------
# 実行時オプションの構成
def get_option():
o_argparser = ArgumentParser()
o_argparser.add_argument("-i", "--Install", default="1", help=MSG_OPTIONS_HELP_INSTALL)
o_argparser.add_argument("-a", "--AutoInstall", default="1", help=MSG_OPTIONS_HELP_AUTOINS)
o_argparser.add_argument("-s", "--json", default=SETTING_FILE_NAME, help=MSG_OPTIONS_HELP_SETUP)
o_argparser.add_argument("-v", "--XMLViewFile", default="1", help=MSG_OPTIONS_HELP_SETUP)
return o_argparser.parse_args()
# End Function
# 実行環境のカレントパス取得処理
def get_current_path():
if getattr(sys, 'frozen', False):
os_current_path = os.path.dirname(os.path.abspath(sys.executable))
else:
os_current_path = os.path.dirname(os.path.abspath(__file__))
dir_path = os_current_path
return dir_path
# End Function
# 動作パラメータの取得(jsonファイルからの読み出し)
def read_parameters(i_dir_path, i_input_file_name):
# jsonファイルを開く
p_json_data_dict = read_json_entry(i_dir_path, i_input_file_name)
o_run_env = read_json_dict_entry(p_json_data_dict,'run_env')
o_env_t_int = read_json_dict_entry(p_json_data_dict,'env_trace_interval')
o_env_t_out_d = read_json_dict_entry(p_json_data_dict,'env_trace_output_dir')
o_env_t_xml_f = read_json_dict_entry(p_json_data_dict,'env_trace_xml_file')
o_env_b_f = read_json_dict_entry(p_json_data_dict,'env_browse_file')
o_env_l_pkg_f = read_json_dict_entry(p_json_data_dict,'env_latest_pkg_file')
o_max_log_cnt = read_json_dict_entry(p_json_data_dict,'max_log_cnt')
return o_run_env, o_env_t_int, o_env_t_out_d, o_env_t_xml_f, o_env_b_f, o_env_l_pkg_f, o_max_log_cnt
# End Function
#---------------------------------------------
# XML function
#---------------------------------------------
# XML成型用関数
def prettify(i_elem):
p_rough_string = ElementTree.tostring(i_elem, 'utf-8')
o_reparsed = minidom.parseString(p_rough_string)
return o_reparsed.toprettyxml(indent=" ")
# End Function
# XML構造生成処理(初回実行時)
# XMLファイルが存在しない場合に実行する処理(XML構造を持ったリストを返す)
def create_xml(i_pkg_list, i_xml_file_d, logger):
logger.info("The environment configuration check (init) will start.")
# XMLファイルのヘッダ作成
xml_root = Element(XML_ROOTENTITY_NAME)
comment = Comment('Generated for JetRunSetup by GENROKU@Karakuri-Musha')
xml_root.append(comment)
# 各パッケージの情報をXML構造化する
for i in range(len(i_pkg_list)):
xml_pkg = SubElement(xml_root, XML_MGMENTITY_NAME)
xml_pkg.set(XML_MGMENTITY_ATTR1, i_pkg_list[i][0])
xml_pkg.set(XML_MGMENTITY_ATTR2, "1")
xml_item = SubElement(xml_pkg, XML_ITEM_NAME)
xml_item.set(XML_ITEM_ATTR, "0")
xml_ver = SubElement(xml_item, XML_ITEM_VERSION)
xml_ver.text = i_pkg_list[i][1]
xml_date = SubElement(xml_item, XML_ITEM_DATE)
xml_date.text = f"{datetime.now():%Y.%m.%d}"
xml_reason = SubElement(xml_item, XML_ITEM_REASON)
xml_reason.text = "Initial creation"
p_elemtree = ElementTree.ElementTree()
p_elemtree._setroot(xml_root)
p_elemtree.write(i_xml_file_d, encoding="utf-8", xml_declaration=None, method="xml")
logger.info("The environment configuration check (init) is complete.")
# End Function
# XMLファイル更新処理
# 指定のXMLファイルに対して新しいバージョンを追加する処理
def update_xml(i_xml_file_d, i_new_pkg_list, cmd_str, logger):
logger.info("The environment configuration check will start.")
# XMLファイルの読み込み
xml_tree = ElementTree.parse(i_xml_file_d)
# 更新内容格納用リストの生成
o_update_list = []
# ルート要素の取得
xml_root = xml_tree.getroot()
# XML構造体の更新フラグを"0"にする
for child in xml_root.iter(XML_MGMENTITY_NAME):
child.set(XML_MGMENTITY_ATTR2, '0')
for i in range(len(i_new_pkg_list)):
pkg_n = i_new_pkg_list[i][0] # 最新環境パッケージ名の格納
pkg_v = i_new_pkg_list[i][1] # 最新環境バージョンの格納
# XMLデータから対象のパッケージの最終更新情報を取得
q_str = "./" + XML_MGMENTITY_NAME + "[@" + XML_MGMENTITY_ATTR1 + "=" + "\'" + pkg_n + "\'" + "]/" + XML_ITEM_NAME + "[@" + XML_ITEM_ATTR + "]"
res_find = xml_tree.findall(q_str)
# 環境にパッケージが新しく追加された場合
if len(res_find) == 0:
Add_pack_ent = SubElement(xml_root, XML_MGMENTITY_NAME)
Add_pack_ent.set(XML_MGMENTITY_ATTR1, pkg_n)
Add_pack_ent.set(XML_MGMENTITY_ATTR2, "1")
Add_mgm_ent = SubElement(Add_pack_ent, XML_ITEM_NAME)
Add_mgm_ent.set(XML_ITEM_ATTR, str(0))
Add_xml_ver = SubElement(Add_mgm_ent, XML_ITEM_VERSION)
Add_xml_ver.text = pkg_v
Add_xml_date = SubElement(Add_mgm_ent, XML_ITEM_DATE)
Add_xml_date.text = f"{datetime.now():%Y.%m.%d}"
Add_xml_reason = SubElement(Add_mgm_ent, XML_ITEM_REASON)
Add_xml_reason.text = cmd_str
# 環境にパッケージが既に存在する場合
else:
cnt = "0"
for elem in res_find:
cnt = elem.attrib[XML_ITEM_ATTR]
# 対象パッケージの最終バージョンの取得
q_str = "./" + XML_MGMENTITY_NAME + "[@" + XML_MGMENTITY_ATTR1 + "=" + "\'" + pkg_n + "\'" + "]/" + XML_ITEM_NAME + "[@" + XML_ITEM_ATTR + "=\'" + cnt + "\']/" + XML_ITEM_VERSION
least_v = xml_tree.find(q_str).text
# バージョンに変更がない場合は何もしない
if pkg_v == least_v:
q_str = "./" + XML_MGMENTITY_NAME + "[@" + XML_MGMENTITY_ATTR1 + "=" + "\'" + pkg_n + "\']"
res_pkg = xml_tree.findall(q_str)
for elem in res_pkg:
elem.set(XML_MGMENTITY_ATTR2, '1')
# バージョンに変更がある場合、タグを追加
else:
o_update_list.append(pkg_n + " : " + least_v + " -> " + pkg_v + "\n")
q_str = "./" + XML_MGMENTITY_NAME + "[@" + XML_MGMENTITY_ATTR1 + "=" + "\'" + pkg_n + "\']"
for a_elem in xml_tree.findall(q_str):
elem.set(XML_MGMENTITY_ATTR2, '1')
Add_mgm_ent = SubElement(a_elem, XML_ITEM_NAME)
Add_mgm_ent.set(XML_ITEM_ATTR, str(int(cnt) + 1))
Add_xml_ver = SubElement(Add_mgm_ent, XML_ITEM_VERSION)
Add_xml_ver.text = pkg_v
Add_xml_date = SubElement(Add_mgm_ent, XML_ITEM_DATE)
Add_xml_date.text = f"{datetime.now():%Y.%m.%d}"
Add_xml_reason = SubElement(Add_mgm_ent, XML_ITEM_REASON)
Add_xml_reason.text = cmd_str
q_str = "./" + XML_MGMENTITY_NAME + "[@" + XML_MGMENTITY_ATTR2 + "=" + "\'0\'" + "]"
res = xml_tree.findall(q_str)
if len(res) is not 0:
for elem in res:
xml_root.remove(elem)
xml_tree.write(i_xml_file_d, encoding="utf-8", xml_declaration=None, method="xml")
logger.info("The environment configuration check is complete.")
return o_update_list
# End Function
def create_xml_view(i_xml_view_file_name, i_xml_trace_file, i_xml_bk_dir, logger):
logger.info("Start generating XML file for reference.")
# XMLファイルの読み込み
xml_tree = ElementTree.parse(i_xml_trace_file)
# ルート要素の取得
xml_root = xml_tree.getroot()
# インデントの付与
p_output = prettify(xml_root)
# ファイル保存
update_file_full(i_xml_view_file_name, p_output, i_xml_bk_dir, logger)
logger.info("Complete generating XML file for reference.")
# End Function
# HTML(親属性)生成
def create_html(i_xml_file_path, i_html_file_path, i_xml_bk_dir, logger):
logger.info("Start HTML file generation.")
p_html_str_list = []
# 固定表記部分の生成
p_html_str_list.append('<!DOCTYPE HTML>')
p_html_str_list.append('<html lang=\"ja\" class=\"pc\">')
p_html_str_list.append('<head>')
p_html_str_list.append('<meta charset="UTF-8">')
p_html_str_list.append('<title>Package environment update history for Ubuntu</title>')
p_html_str_list.append('<link rel=\"stylesheet\" type=\"text/css\" href=\"js/JetRunStep.css\">')
p_html_str_list.append('<script src=\"js/jquery-3.6.0.min.js\"></script>')
p_html_str_list.append('</head>')
p_html_str_list.append('<body>')
p_html_str_list.append('<b style=\"font-size: 20pt;\">Package environment update history for Ubuntu</b><br />')
p_html_str_list.append('<a style=\"float: right;\">Generated in '+ f"{datetime.now():%Y.%m.%d}" + '</a><br />')
p_html_str_list.append('<a style=\"float: right;\">JetRunStep Tool: genroku @ Karakuri-Musha</a>')
p_html_str_list.append('<div id=\'Title\'></div>')
p_html_str_list.append('<table class=\'package-list\'>')
p_html_str_list.append('<thead>')
p_html_str_list.append('<tr>')
p_html_str_list.append('<th class=\"pkg-n\">Package Name</th>')
p_html_str_list.append('<th class=\"pkg-v\">Version</th>')
p_html_str_list.append('<th class=\"plg-ud\">least Update Date</th>')
p_html_str_list.append('<th class=\"show-hide\">Show/<br />Hide</th>')
p_html_str_list.append('<th class=\"up-his\">Update History</th>')
p_html_str_list.append('</tr>')
p_html_str_list.append('</thead>')
p_html_str_list.append('<tbody>')
# XMLファイルからの要素の取得とHTMLの生成
xml_tree = ElementTree.parse(i_xml_file_path)
xml_root = xml_tree.getroot()
for child in xml_root:
p_pkg_name = child.attrib['name']
p_html_str_list.append('<tr>')
p_html_str_list.append('<td>' + p_pkg_name + '</td>')
cnt = 0
least_cnt = str(len(child))
for e in reversed(child):
p_pkg_v = e[0].text
p_pkg_update = e[1].text
p_pkg_note = e[2].text
if cnt == 0:
p_html_str_list.append('<td>' + p_pkg_v + '</td>')
p_html_str_list.append('<td>' + p_pkg_update + '</td>')
p_html_str_list.append('<td><a href=\"#\" class=\"open\">+</a><a href=\"#\" class=\"close\">-</a></td>')
p_html_str_list.append('<td>' + least_cnt + '<br />')
p_html_str_list.append('<dl class=\"versions\">')
p_html_str_list.append('<dt>' + p_pkg_v + ' </dt><dd><--' + p_pkg_update + ' [' + p_pkg_note + ']</dd>')
cnt = cnt + 1
p_html_str_list.append('</dl>')
p_html_str_list.append('</td>')
p_html_str_list.append('</tr>')
p_html_str_list.append('</tbody>')
p_html_str_list.append('</table>')
p_html_str_list.append('<script src=\"js/JetRunStep.js\"></script>')
p_html_str_list.append('</body>')
p_html_str_list.append('</html>')
logger.info("End HTML file generation.")
p_join_str = "".join(p_html_str_list)
update_file_full(i_html_file_path, p_join_str, i_xml_bk_dir, logger)
# End Function
#---------------------------------------------
# One Command Execute function
#---------------------------------------------
# 単体コマンドの実行と環境変更情報の生成
def run_one_cmd(i_cmd_string, i_env_t_int, i_xml_path, logger):
if i_env_t_int in {"All", "Once"}:
res = call_subprocess_run(i_cmd_string, logger)
for line in res:
logger.info(line)
o_pkg_list = get_pkglist(logger)
if os.path.exists(i_xml_path):
o_update_list = update_xml(i_xml_path, o_pkg_list, i_cmd_string, logger)
if len(o_update_list) is not 0:
for line in o_update_list:
logger.info(line)
else:
create_xml(o_pkg_list, i_xml_path, logger)
logger.info("Since this tool was executed for the first time, an administrative file (Env_trace.xml) was created.")
o_update_list = []
return o_update_list, o_pkg_list
# End Function
#---------------------------------------------
# Auto Setup Execute function
#---------------------------------------------
# インストールコマンド定義ファイルをもとにした自動インストール処理
def run_auto_setup(i_env_t_int, i_dir_path, i_json_name, i_xml_path, i_bk_dirname, logger):
res_status = 0
p_json_read = read_json_entry(i_dir_path,i_json_name)
logger.info('Setup will start. The environment is Jetson and the file name is [' + i_json_name + ']')
# Auto Setup 定義ファイル内のコマンドリストの順次実行
for json_s in p_json_read:
p_type_d = json_s.get('type', '')
p_comment_d = json_s.get('comment', '')
p_cmd_d = json_s.get('cmd', '')
p_original_d = json_s.get('original', '')
p_after_d = json_s.get('after', '')
logger.info('The following command is being executed. [' + p_cmd_d + ']')
res_status = iterate_env_setting(res_status, p_type_d, p_comment_d, p_cmd_d, i_bk_dirname, logger, p_original_d, p_after_d)
# Auto Setup 内のコマンド実行毎にパッケージ更新情報を生成
if i_env_t_int == "All":
o_pkg_list = get_pkglist(logger)
if os.path.exists(i_xml_path):
o_update_list = update_xml(i_xml_path, o_pkg_list, p_cmd_d, logger)
if len(o_update_list) is not 0:
for line in o_update_list:
logger.info(line)
else:
create_xml(o_pkg_list, i_xml_path, logger)
logger.info("Since this tool was executed for the first time, an administrative file (Env_trace.xml) was created.")
if i_env_t_int == "Once":
o_pkg_list = get_pkglist(logger)
if os.path.exists(i_xml_path):
o_update_list = update_xml(i_xml_path, o_pkg_list, p_cmd_d, logger)
if len(o_update_list) is not 0:
for line in o_update_list:
logger.info(line)
else:
create_xml(o_pkg_list, i_xml_path)
logger.info("Since this tool was executed for the first time, an administrative file (Env_trace.xml) was created.")
return o_update_list, o_pkg_list
# End Function
#---------------------------------------------
# Auto Install function
#---------------------------------------------
# 実行処理と結果対処 Execution processing and result handling functions.
def iterate_env_setting(i_res_status, i_type_d, i_comment_d, i_cmd_d, i_bk_dirname, logger, i_original_d = '', i_after_d = ''):
logger.info(i_comment_d)
if i_type_d == 'cmd':
res = call_subprocess_run(i_cmd_d, logger)
for cnt in res:
if 'Failed to execute' in cnt:
logger.error(cnt)
i_res_status = 1
break
else:
logger.info(cnt)
elif i_type_d == 'frc':
i_res_status = update_file(i_cmd_d, i_original_d, i_after_d, i_bk_dirname, logger)
elif i_type_d == 'ffc':
i_res_status = update_file_firstline(i_cmd_d, i_after_d, i_bk_dirname, logger)
elif i_type_d == 'fec':
i_res_status = update_file_endline(i_cmd_d, i_after_d, i_bk_dirname, logger)
return i_res_status
# End Function
|
KyleBringmans/Pac-Man | featureExtractors.py | <gh_stars>1-10
# featureExtractors.py
# --------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by <NAME>
# (<EMAIL>) and <NAME> (<EMAIL>).
# Student side autograding was added by <NAME>, <NAME>, and
# <NAME> (<EMAIL>).
"Feature extractors for Pacman game states"
from game import Directions, Actions, random
import util
import math
class FeatureExtractor:
# EDITTED
def __init__(self):
self.paths = {} # will have all possible shortest paths - filled in in game.py in the 'run' function
# EDITTED
def getFeatures(self, state, action):
"""
Returns a dict from features to counts
Usually, the count will just be 1.0 for
indicator functions.
"""
util.raiseNotDefined()
class IdentityExtractor(FeatureExtractor):
def getFeatures(self, state, action):
feats = util.Counter()
feats[(state, action)] = 1.0
return feats
class CoordinateExtractor(FeatureExtractor):
def getFeatures(self, state, action):
feats = util.Counter()
feats[state] = 1.0
feats['x=%d' % state[0]] = 1.0
feats['y=%d' % state[0]] = 1.0
feats['action=%s' % action] = 1.0
return feats
def closestFood(pos, food, walls):
"""
closestFood -- this is similar to the function that we have
worked on in the search project; here its all in one place
"""
fringe = [(pos[0], pos[1], 0)]
expanded = set()
while fringe:
pos_x, pos_y, dist = fringe.pop(0)
if (pos_x, pos_y) in expanded:
continue
expanded.add((pos_x, pos_y))
# if we find a food at this location then exit
if food[pos_x][pos_y]:
return dist
# otherwise spread out from the location to its neighbours
nbrs = Actions.getLegalNeighbors((pos_x, pos_y), walls)
for nbr_x, nbr_y in nbrs:
fringe.append((nbr_x, nbr_y, dist + 1))
# no food found
return None
def distToClosestCapsule(pos, capsules, distMap):
distance = float('inf')
if capsules:
for capsule in capsules:
distToCapsule = distMap[pos, capsule]
if distToCapsule < distance:
distance = distToCapsule
return distance
def chooseNextCrossroad(crossroads, ghostsPos, pacmanPos, distMap, walls):
sortedCrossroads = []
sorted = False
while not sorted:
if len(sortedCrossroads) < len(crossroads):
closest = random.choice(crossroads)
for crossroad in crossroads:
if distMap[crossroad, pacmanPos] < distMap[closest, pacmanPos] and crossroad not in sortedCrossroads:
closest = crossroad
sortedCrossroads.append(closest)
else:
sorted = True
for i in range(0, len(sortedCrossroads)):
reachable = util.accessibleAStar(pacmanPos, sortedCrossroads[i], ghostsPos, walls)
if reachable:
return sortedCrossroads[i]
return sortedCrossroads[len(sortedCrossroads) - 1]
def vectorSum(vec1, vec2):
return (vec1[0] + vec2[0], vec1[1] + vec2[1])
class SimpleExtractor(FeatureExtractor):
"""
Returns simple features for a basic reflex Pacman:
- whether food will be eaten
- how far away the next food is
- whether a ghost collision is imminent
- whether a ghost is one step away
"""
def __init__(self):
FeatureExtractor.__init__(self)
def getFeatures(self, state, action):
# extract the grid of food and wall locations and get the ghost locations
food = state.getFood()
walls = state.getWalls()
ghosts = state.getGhostPositions()
ghostStates = state.getGhostStates()
sTime = state.getScaredTime()
maxPathLen = max([walls.height, walls.width]) * 1.0
n = 3 # distance instead of 1
features = util.Counter()
# compute the location of pacman after he takes the action
x, y = state.getPacmanPosition()
dx, dy = Actions.directionToVector(action)
next_x, next_y = int(x + dx), int(y + dy)
features["scared"] = (sTime - (self.avgScaredTime(ghostStates))) / (sTime * 1.0)
features["bias"] = 1.0
features["#-of-ghosts-1-step-away"] = sum(
(next_x, next_y) in Actions.getLegalNeighbors(g, walls) for g in ghosts)
notScared = list(filter(lambda q: q[1].scaredTimer == 0, zip(ghosts, ghostStates)))
features["#-of-not-scared-ghosts-n-steps-away"] = sum(
self.euclDist(x, y, g[0][0], g[0][1]) < n for g in notScared)
features["#-of-ghosts-scared"] = len(filter(lambda q: self.euclDist(x, y, q[0], q[1]) < n, ghosts)) - len(
notScared)
# if there is no danger of ghosts then add the food feature
if not features["#-of-ghosts-1-step-away"] and food[next_x][next_y]:
features["eats-food"] = 1.0
capsules = map(lambda q: [False] * len(q), food)
for cap in state.getCapsules():
capsules[cap[0]][cap[1]] = True
distFood = closestFood((next_x, next_y), food, walls)
if distFood is not None:
# make the distance a number less than one otherwise the update
# will diverge wildly
features["closest-food"] = float(distFood) / (walls.width * walls.height)
positions = self.generateAllNeighboursSimple(x, y)
# FEATURE: HALLWAY
for i in range(0, 4):
features["hallway-%i" % i] = (self.inHallwayRec(positions[i][0], positions[i][1], (x, y),walls) if self.notWall(positions[i][0], positions[i][1],walls) else 0) / maxPathLen
# FEATURE: NEIGHBOURING WALLS
for i in range(0,4):
if self.notWall(positions[i][0], positions[i][1], walls):
features["isWall-%i" % i] = 0
else:
features["isWall-%i" % i] = 1
# FEATURE: CLOSEST GHOST
for i in range(0, 4):
dist = self.closestGhostDist(positions[i][0], positions[i][1], ghosts, walls)
if dist is not None:
features["closest-ghost-%i" % i] = dist / maxPathLen
# FEATURE: DANGER VALUE
distHallwayGhost = [None] * 4
intersect = [None] * 4
notScared = map(lambda (a, b): a, notScared)
for i in range(0, 4):
nearest = self.closestGhost(positions[i][0], positions[i][1], notScared, walls)
intersect[i] = self.closestIntersect(positions[i][0], positions[i][1], (x, y), walls)
if intersect[i] is not None and nearest is not None:
distHallwayGhost[i] = self.paths[(nearest[0], nearest[1]), (intersect[i][0], intersect[i][1])]
distHallwayGhost = [0 if q is None else q for q in distHallwayGhost]
for i in range(0, 4):
if intersect[i] is not None:
features["danger-value-%i" % i] = (maxPathLen + features["hallway-%i" % i] - distHallwayGhost[i]) / maxPathLen
features.divideAll(10.0)
return features
# ------------------------------------------------------------------------------------------------------------------
def avgScaredTime(self, states):
tot = 0
for i in range(len(states)):
tot += states[i].scaredTimer
a = tot / len(states)
return a
# ------------------------------------------------------------------------------------------------------------------
def inHallway(self, x, y, origin, walls):
nbrs = self.getNeighboursSimple(x, y, walls)
# don't count doubles (spots counted in previous iteration
nbrs = filter(lambda q: q != origin, nbrs)
# check if there are the correct # of nbr walls -> hallway
if len(nbrs) == 2:
fst = nbrs[0]
snd = nbrs[1]
a = 1 + self.inHallwayRec(fst[0], fst[1], (x, y), walls)
b = self.inHallwayRec(snd[0], snd[1], (x, y), walls)
return a + b
elif len(nbrs) == 1:
fst = nbrs[0]
return 1 + self.inHallwayRec(fst[0], fst[1], (x, y), walls)
elif len(nbrs) == 0:
return 1
else:
return 0
def inHallwayRec(self, x, y, origin, walls):
nbrs = self.getNeighboursSimple(x, y, walls)
# don't count doubles (spots counted in previous iteration)
nbrs = filter(lambda q: q != origin, nbrs)
# check if there are the correct # of nbr walls -> hallway
if len(nbrs) == 1:
fst = nbrs[0]
return 1 + self.inHallwayRec(fst[0], fst[1], (x, y), walls)
elif len(nbrs) == 0:
return 1
else:
return 0
def closestIntersect(self, x, y, origin, walls):
nbrs = self.getNeighboursSimple(x, y, walls)
# don't count doubles (spots counted in previous iteration)
nbrs = filter(lambda q: q != origin, nbrs)
# check if there are the correct # of nbr walls -> hallway
if len(nbrs) == 1:
fst = nbrs[0]
return self.closestIntersect(fst[0], fst[1], (x, y), walls)
elif len(nbrs) == 2:
return x, y
else:
return None
def getDirectionalNeighbour(self, x, y, direction):
return x + direction[0], y + direction[1]
def euclDist(self, x1, y1, x2, y2):
return math.sqrt(((x1 - x2) ** 2) + ((y1 - y2) ** 2))
# ------------------------------------------------------------------------------------------------------------------
def notWall(self, x, y, walls):
if x > walls.width - 1 or y > walls.height - 1:
return False
return not walls[int(x)][int(y)]
def getNeighboursSimple(self, x, y, walls):
width = walls.width
height = walls.height
nbrs = self.generateAllNeighboursSimple(x, y)
nbrs = filter(lambda q: q[1] < width >= 0 and q[0] < height >= 0, nbrs) # keep nbrs in grid
nbrs = filter(lambda q: self.notWall(q[0], q[1], walls), nbrs) # remove neighbours that aren't walls
return nbrs
def getAllNeighboursSimple(self, x, y, walls):
width = walls.width
height = walls.height
nbrs = self.generateAllNeighboursSimple(x, y)
nbrs = filter(lambda q: q[1] < width >= 0 and q[0] < height >= 0, nbrs) # keep nbrs in grid
return nbrs
def generateAllNeighboursSimple(self, x, y):
return [(x + 1, y), (x, y + 1), (x - 1, y), (x, y - 1)]
# ------------------------------------------------------------------------------------------------------------------
def calculateCorners(self, path, walls):
corners = 0
if len(path) < 3:
return 0
for i in range(0, len(path) - 2):
bCorner = path[i]
corner = path[i + 1]
aCorner = path[i + 2]
if self.euclDist(bCorner[0], bCorner[1], aCorner[0], aCorner[1]) == math.sqrt(2):
if self.isCorner([bCorner, corner, aCorner], walls):
corners += 1
return corners
# TODO ehhhh is this correct?
def isCorner(self, corner, walls):
if self.wallNeighbours(corner[0], walls) > 0 and self.wallNeighbours(corner[2], walls) > 0:
return True
else:
return False
def wallNeighbours(self, pos, walls):
nbrs = self.getAllNeighboursSimple(pos[0], pos[1], walls)
w = 0
for p in nbrs:
if not self.notWall(p[0], p[1], walls): # so if it is a wall
w += 1
return w
def closestGhostDist(self, x, y, ghosts, walls):
distances = []
if not self.notWall(x, y, walls):
return None
for g in ghosts:
distances.append(self.paths[(int(x), int(y)), (int(g[0]), int(g[1]))])
return min(distances)
def closestGhost(self, x, y, ghosts, walls):
distances = []
if not self.notWall(x, y, walls):
return None
for g in ghosts:
distances.append((self.paths[(int(x), int(y)), (int(g[0]), int(g[1]))], g))
if distances == []:
return None
gPos0, gPos1 = min(distances, key=lambda q: q[0])[1]
return int(gPos0), int(gPos1)
# ------------------------------------------------------------------------------------------------------------------
|
KyleBringmans/Pac-Man | analysis.py | <reponame>KyleBringmans/Pac-Man
# analysis.py
# -----------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by <NAME>
# (<EMAIL>) and <NAME> (<EMAIL>).
# Student side autograding was added by <NAME>, <NAME>, and
# <NAME> (<EMAIL>).
######################
# ANALYSIS QUESTIONS #
######################
# Set the given parameters to obtain the specified policies through
# value iteration.
def question2():
answerDiscount = 0.9
answerNoise = 0
# if there is no noise, then since future rewards are barely discounted the agent will cross the bridge
return answerDiscount, answerNoise
# ----------------------------------------------------------------------------------------------------------------------
# these values were eyeballed and weren't outright calculated
def question3a():
answerDiscount = 0.1 # heavily discount distant rewards -> not worth taking
answerNoise = 0 # remove noise to make risking cliff feasable
answerLivingReward = -1 # make the agent want to stop at an end-state
return answerDiscount, answerNoise, answerLivingReward
def question3b():
answerDiscount = 0.5
answerNoise = 0.4 # add noise to make falling of cliff a larger possibility
answerLivingReward = -1 # make the agent want to stop at an end-state
return answerDiscount, answerNoise, answerLivingReward
def question3c():
answerDiscount = 0.9 # distant rewards are barely discounted
answerNoise = 0 # no risk of falling off cliff
answerLivingReward = -1 # make the agent want to stop at an end-state
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3d():
answerDiscount = 0.9 # distant rewards are more attractive
answerNoise = 0.4 # cliff is too risky because of noise
answerLivingReward = -1 # make the agent want to stop at an end-state
return answerDiscount, answerNoise, answerLivingReward
def question3e():
answerDiscount = 0.5 # eyeballed this
answerNoise = 0.4 # eyeballed this as well
answerLivingReward = 5 # make ending extremely unattractive
return answerDiscount, answerNoise, answerLivingReward
def question6():
# answerEpsilon = 0
# answerLearningRate = 0.1
return 'NOT POSSIBLE'
if __name__ == '__main__':
print 'Answers to analysis questions:'
import analysis
for q in [q for q in dir(analysis) if q.startswith('question')]:
response = getattr(analysis, q)()
print ' Question %s:\t%s' % (q, str(response))
|
KyleBringmans/Pac-Man | valueIterationAgents.py | # valueIterationAgents.py
# -----------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by <NAME>
# (<EMAIL>) and <NAME> (<EMAIL>).
# Student side autograding was added by <NAME>, <NAME>, and
# <NAME> (<EMAIL>).
import mdp, util
from learningAgents import ValueEstimationAgent
class ValueIterationAgent(ValueEstimationAgent):
"""
* Please read learningAgents.py before reading this.*
A ValueIterationAgent takes a Markov decision process
(see mdp.py) on initialization and runs value iteration
for a given number of iterations using the supplied
discount factor.
"""
def __init__(self, mdp, discount = 0.9, iterations = 100):
"""
Your value iteration agent should take an mdp on
construction, run the indicated number of iterations
and then act according to the resulting policy.
Some useful mdp methods you will use:
mdp.getStates()
mdp.getPossibleActions(state)
mdp.getTransitionStatesAndProbs(state, action)
mdp.getReward(state, action, nextState)
mdp.isTerminal(state)
"""
self.mdp = mdp
self.discount = discount
self.iterations = iterations
self.values = util.Counter() # A Counter is a dict with default 0
for i in range(0, iterations):
vals = util.Counter()
for state in self.mdp.getStates():
if self.mdp.isTerminal(state):
vals[state] = 0
else:
maximum = float("-inf")
for action in self.mdp.getPossibleActions(state): #do 1-step lookahead for every state
total = 0
for (lstate, chance) in self.mdp.getTransitionStatesAndProbs(state, action): #calculate for every branch in lookahead
total += chance * (self.mdp.getReward(state, action, lstate) + (self.discount * self.values[lstate])) #Bellmann equation
maximum = max(total, maximum)
vals[state] = maximum
self.values = vals #update values
def getValue(self, state):
"""
Return the value of the state (computed in __init__).
"""
return self.values[state]
def computeQValueFromValues(self, state, action):
"""
Compute the Q-value of action in state from the
value function stored in self.values.
"""
tot = 0
for nextState, prob in self.mdp.getTransitionStatesAndProbs(state, action):
tot += prob * (self.mdp.getReward(state, action, nextState) + (self.discount * self.values[nextState])) # P * (R + gamma * Val)
return tot
def computeActionFromValues(self, state):
"""
The policy is the best action in the given state
according to the values currently stored in self.values.
You may break ties any way you see fit. Note that if
there are no legal actions, which is the case at the
terminal state, you should return None.
"""
value = float("-inf")
choice = None
if self.mdp.isTerminal(state):
return None
for action in self.mdp.getPossibleActions(state): # act greedily according to value function
qVal = self.computeQValueFromValues(state, action)
if qVal >= value:
value = qVal
choice = action
return choice
def getPolicy(self, state):
return self.computeActionFromValues(state)
def getAction(self, state):
"Returns the policy at the state (no exploration)."
return self.computeActionFromValues(state)
def getQValue(self, state, action):
return self.computeQValueFromValues(state, action)
|
KyleBringmans/Pac-Man | qlearningAgents.py | # qlearningAgents.py
# ------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by <NAME>
# (<EMAIL>) and <NAME> (<EMAIL>).
# Student side autograding was added by <NAME>, <NAME>, and
# <NAME> (<EMAIL>).
from learningAgents import ReinforcementAgent
from featureExtractors import *
import pacman
import random,math
from util import sigmoid, relu, tanh, sigmoidBackward, reluBackward, tanhBackward
import numpy as np
class QLearningAgent(ReinforcementAgent):
"""
Q-Learning Agent
Functions you should fill in:
- computeValueFromQValues
- computeActionFromQValues
- getQValue
- getAction
- update
Instance variables you have access to
- self.epsilon (exploration prob)
- self.alpha (learning rate)
- self.discount (discount rate)
Functions you should use
- self.getLegalActions(state)
which returns legal actions for a state
"""
def __init__(self, **args):
"You can initialize Q-values here..."
ReinforcementAgent.__init__(self, **args)
self.qValues = util.Counter() # All q-values set to 0
def getQValue(self, state, action):
"""
Returns Q(state,action)
Should return 0.0 if we have never seen a state
or the Q node value otherwise
"""
if (state,action) not in self.qValues:
self.qValues[(state,action)] = 0.0 # unseen state
return self.qValues[(state,action)]
def computeValueFromQValues(self, state):
"""
Returns max_action Q(state,action)
where the max is over legal actions. Note that if
there are no legal actions, which is the case at the
terminal state, you should return a value of 0.0.
"""
if len(self.getLegalActions(state)) == 0: # check for terminal state
return 0.0
tempVals = util.Counter()
for action in self.getLegalActions(state): # calculate q-values for every possible action
tempVals[(action)] = self.getQValue(state,action)
return tempVals[tempVals.argMax()]
# max_a_Q(s_t+1,a)
#TODO!!!
def computeActionFromQValues(self, state):
"""
Compute the best action to take in a state. Note that if there
are no legal actions, which is the case at the terminal state,
you should return None.
"""
actionVals = self.getLegalActions(state)
if len(actionVals) == 0:
return None
tempVals = util.Counter() # store q-values in here
for action in actionVals:
tempVals[action] = self.getQValue(state, action)
return tempVals.argMax() # takes the random choice
def getAction(self, state):
"""
Compute the action to take in the current state. With
probability self.epsilon, we should take a random action and
take the best policy action otherwise. Note that if there are
no legal actions, which is the case at the terminal state, you
should choose None as the action.
HINT: You might want to use util.flipCoin(prob)
HINT: To pick randomly from a list, use random.choice(list)
"""
# Pick Action
legalActions = self.getLegalActions(state)
if util.flipCoin(self.epsilon):
return random.choice(legalActions)
else:
return self.computeActionFromQValues(state)
def update(self, state, action, nextState, reward):
"""
The parent class calls this to observe a
state = action => nextState and reward transition.
You should do your Q-Value update here
NOTE: You should never call this function,
it will be called on your behalf
"""
self.qValues[(state,action)] = (1-self.alpha) * self.getQValue(state,action) + self.alpha * (reward + self.discount * self.computeValueFromQValues(nextState))
# Q(s,a) = (1-a) * Q(s,a) + a * (R + gamma * max_a_Q(s_t+1,a))
def getPolicy(self, state):
return self.computeActionFromQValues(state)
def getValue(self, state):
return self.computeValueFromQValues(state)
class PacmanQAgent(QLearningAgent):
"Exactly the same as QLearningAgent, but with different default parameters"
def __init__(self, epsilon=0.05,gamma=0.8,alpha=0.2, numTraining=0, **args):
"""
These default parameters can be changed from the pacman.py command line.
For example, to change the exploration rate, try:
python pacman.py -p PacmanQLearningAgent -a epsilon=0.1
alpha - learning rate
epsilon - exploration rate
gamma - discount factor
numTraining - number of training episodes, i.e. no learning after these many episodes
"""
args['epsilon'] = epsilon
args['gamma'] = gamma
args['alpha'] = alpha
args['numTraining'] = numTraining
self.index = 0 # This is always Pacman
QLearningAgent.__init__(self, **args)
def getAction(self, state):
"""
Simply calls the getAction method of QLearningAgent and then
informs parent of action for Pacman. Do not change or remove this
method.
"""
action = QLearningAgent.getAction(self,state)
self.doAction(state,action)
return action
class ApproximateQAgent(PacmanQAgent):
"""
ApproximateQLearningAgent
You should only have to overwrite getQValue
and update. All other QLearningAgent functions
should work as is.
"""
def __init__(self, extractor='IdentityExtractor', **args):
self.featExtractor = util.lookup(extractor, globals())()
PacmanQAgent.__init__(self, **args)
self.weights = util.Counter()
self.weightsScared = util.Counter()
def getWeights(self):
return self.weights
def getWeightsScared(self):
return self.weightsScared
#EDITTED
def getQValue(self, state, action):
"""
Should return Q(state,action) = w * featureVector
where * is the dotProduct operator
"""
features = self.featExtractor.getFeatures(state,action)
#counter = 0
#p = state.data.agentStates[0].configuration.pos
#useOtherVector = False
#for agentState in state.data.agentStates:
#if agentState.scaredTimer > 0:
# counter += 1
# agentPos = agentState.getPosition()
# if agentState.scaredTimer > 0 and util.euclDist(agentPos[0], agentPos[1], p[0], p[1]) :
# useOtherVector = True
#if counter == len(state.data.agentStates) - 1:
# useOtherVector = True
useOtherVector = False
p = state.data.agentStates[0].configuration.pos
g1 = state.data.agentStates[1]
g2 = state.data.agentStates[2]
#distg1 = util.euclDist(p[0], p[1], g1.configuration.pos[0],g1.configuration.pos[1])
#distg2 = util.euclDist(p[0], p[1], g2.configuration.pos[0],g2.configuration.pos[1])
distg1 = self.featExtractor.paths[(p[0], p[1]), (int(g1.configuration.pos[0]), int(g1.configuration.pos[1]))]
distg2 = self.featExtractor.paths[(p[0], p[1]), (int(g2.configuration.pos[0]), int(g2.configuration.pos[1]))]
if (g1.scaredTimer > 0 and distg1 < distg2) or (g2.scaredTimer > 0 and distg2 < distg1):
useOtherVector = True
if useOtherVector:
q = 0
for feat in features: # Q(s,a) = Sum: i -> n : f_i(s,a)*w_i
q += features[feat] * self.getWeightsScared()[feat] # formula summed over
return q
else:
q = 0
for feat in features: # Q(s,a) = Sum: i -> n : f_i(s,a)*w_i
q += features[feat] * self.getWeights()[feat] # formula summed over
return q
#Editted
def update(self, state, action, nextState, reward):
"""
Should update your weights based on transition
"""
features = self.featExtractor.getFeatures(state,action)
difference = reward + self.discount * self.computeValueFromQValues(nextState) - self.getQValue(state,action) # seperate because it doesn't work otherwise :///
useOtherVector = False
p = state.data.agentStates[0].configuration.pos
g1 = state.data.agentStates[1]
g2 = state.data.agentStates[2]
distg1 = self.featExtractor.paths[(p[0], p[1]), (int(g1.configuration.pos[0]), int(g1.configuration.pos[1]))]
distg2 = self.featExtractor.paths[(p[0], p[1]), (int(g2.configuration.pos[0]), int(g2.configuration.pos[1]))]
if (g1.scaredTimer > 0 and distg1 < distg2) or (g2.scaredTimer > 0 and distg2 < distg1):
useOtherVector = True
if useOtherVector:
for feat in features:
self.weightsScared[feat] += self.alpha * difference * features[feat] # w_i + alfa * difference * f_i(s,a)
else:
for feat in features:
self.weights[feat] += self.alpha * difference * features[feat] # w_i + alfa * difference * f_i(s,a)
#print(self.weights)
def final(self, state):
"Called at the end of each game."
# call the super-class final method
PacmanQAgent.final(self, state)
# did we finish training?
if self.episodesSoFar == self.numTraining:
# you might want to print your weights here for debugging
"*** YOUR CODE HERE ***"
pass |
emadbagheri96/PyTse | setup.py | import setuptools
from pathlib import Path
setuptools.setup(
name="pytse",
author="<NAME>",
version="1.3.0",
long_description=Path("README.md").read_text(),
long_description_content_type='text/markdown',
description="A small web crawler for tsetmc.com",
packages=setuptools.find_packages(exclude=["tests", "data"]),
project_urls={
'Source': 'https://github.com/miladj/PyTse',
},
install_requires=[
'requests',
]
)
|
emadbagheri96/PyTse | tests/test_response.py |
from pytse.pytse import PyTse,SymbolData
from unittest.mock import patch
from pathlib import Path
from nose.tools import assert_equal,assert_is_not_none,assert_is_instance
def test_server_response():
with patch('pytse.pytse.PyTse._PyTse__get_data_from_server') as mock_get:
mock_get.return_value=Path("tests/sampledata.txt").read_text()
pytse=PyTse()
symbols=pytse.symbols_data
symbol=symbols["IRO1NIKI0001"]
print(symbol)
assert_is_not_none(symbol)
assert_is_instance(symbol,SymbolData)
assert_equal(symbol.tvol,8566607)
assert_equal(symbol.pmax,11396)
assert_equal(symbol.pmin,10570)
assert False
|
emadbagheri96/PyTse | pytse/constants.py | BASE_URL = "http://www.tsetmc.com/tsev2/data/MarketWatchInit.aspx?h=0&r=0"
CLIENT_TYPE_URL="http://www.tsetmc.com/tsev2/data/ClientTypeAll.aspx"
SYMBOL_PAGE_URL="http://www.tsetmc.com/loader.aspx?ParTree=151311&i={inscode}" |
MichaelSeaman/char-rnn | frequent_flush.py | from time import sleep
import sys
import threading
class Frequent_flush(threading.Thread):
def __init__(self, delay):
threading.Thread.__init__(self)
self.delay = delay
self.daemon = True
def run(self):
while True:
sys.stdout.flush()
sleep(self.delay)
def main():
delay = 1
n = 15
flushThread = Frequent_flush(delay)
flushThread.start()
for i in range(n):
print("Second ", i)
sleep(delay)
if __name__ == '__main__':
main()
|
MichaelSeaman/char-rnn | callback.py | <gh_stars>0
import numpy as np
from keras.utils import np_utils
import sys
import os
from keras.callbacks import Callback
from generate import sample, generate
class Generate_Text(Callback):
def __init__(self, gen_text_len, idx_to_char, char_to_idx, text, maxlen, vocab):
self.gen_text_len = gen_text_len
self.idx_to_char = idx_to_char
self.char_to_idx = char_to_idx
self.text = text
self.maxlen = maxlen
self.vocab = vocab
def on_epoch_end(self, epoch, logs={}):
start_index = np.random.randint(0, len(self.text) - self.maxlen - 1)
diversity = 1.0
print()
print('----- diversity:', diversity)
generated = ''
sentence = self.text[start_index: start_index + self.maxlen]
print('----- Generating with seed: "' + sentence + '"')
for i in range(self.gen_text_len):
x = np.zeros((1, self. maxlen, self.vocab))
for t, char in enumerate(sentence):
x[0,t,self.char_to_idx[char]] = 1.
preds = self.model.predict(x,verbose=0)[0]
next_idx = sample(preds, diversity)
next_char = self.idx_to_char[next_idx]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
class Write_Text(Callback):
def __init__(self, idx_to_char, char_to_idx, text, seq_len, vocab,
filepath, gen_text_len=300, temperature=.5):
self.gen_text_len = gen_text_len
self.idx_to_char = idx_to_char
self.char_to_idx = char_to_idx
self.text = text
self.seq_len = seq_len
self.vocab = vocab
self.filepath = filepath
self.temperature = temperature
def on_epoch_end(self, epoch, logs={}):
generated = generate(model=self.model, idx_to_char=self.idx_to_char,
char_to_idx=self.char_to_idx, gen_text_len=self.gen_text_len,
seq_len=self.seq_len, vocab=self.vocab, text=self.text,
temperature=self.temperature)
try:
filename = self.filepath.format(epoch=epoch)
with open(filename, 'w') as f:
f.write(generated)
print("Saved composition to", filename)
except Exception as e:
print("Could not save", filename)
print(e)
|
MichaelSeaman/char-rnn | model_file.py | import os
import time
import numpy as np
class ModelFile():
def __init__(self, model_directory='', model='', weights='', history=''):
self.model = model
self.weights = weights
self.history = history
if(model_directory):
self.model_directory = model_directory
files = os.listdir(model_directory)
if('model_history.npy' in files):
self.history = os.path.join(model_directory, 'model_history.npy')
if('model.h5' in files):
self.model = os.path.join(model_directory, 'model.h5')
weights_files = [f for f in files if 'weights-improvement' in f]
if(weights_files):
weights_files_split = [w.split('-') for w in weights_files ]
epochs = [w[2] for w in weights_files_split]
idx_max = np.argmax(epochs)
self.weights = os.path.join(model_directory, \
weights_files[idx_max])
else:
timestr = time.strftime("%m%d-%H%M")
model_name = "CharRNN_" + timestr
self.model_directory = os.path.join('models', model_name)
def check_exists(self):
'''
Returns true if model_directory, model, weights, and history all return
true from os.path.exists()
'''
md = os.path.exists(self.model_directory)
m = os.path.exists(self.model)
w = os.path.exists(self.weights)
h = os.path.exists(self.history)
return md and m and w and h
|
MichaelSeaman/char-rnn | rnn.py | <reponame>MichaelSeaman/char-rnn
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.layers import LSTM
from keras.optimizers import Adam
from keras.utils import np_utils
import numpy as np
def preprocess_text(filename, SEQ_LEN):
text = open(filename).read()
n_chars = len(text)
chars = sorted(list(set(text)))
vocab = len(chars)
char_to_idx = dict( (c, i) for i,c in enumerate(chars) )
idx_to_char = dict( (i, c) for i,c in enumerate(chars) )
step = 3
sentences = []
next_chars = []
for i in range(0, n_chars - SEQ_LEN, step):
sentences.append("".join(text[i:i+SEQ_LEN]))
next_chars.append(text[i+SEQ_LEN])
return (char_to_idx, idx_to_char, vocab, text, sentences, next_chars)
print("Vectorizing")
X, y = vectorize_data(SEQ_LEN, vocab, sentences, char_to_idx, next_chars)
return X, y, char_to_idx, idx_to_char, vocab, text
def vectorize_data(SEQ_LEN, vocab, sentences, char_to_idx, next_chars):
n_data = len(sentences)
vectorized_char_to_idx = np.vectorize(char_to_idx.get)
sentences_np = np.asarray([list(sentence) for sentence in sentences])
X = vectorized_char_to_idx(sentences_np)
X = np_utils.to_categorical(X, num_classes=vocab)
X = X.reshape( (n_data, SEQ_LEN, vocab) )
y = np_utils.to_categorical(vectorized_char_to_idx(
np.asarray(next_chars)), num_classes=vocab)
return X, y
def build_model(num_layers, seq_len, vocab, layer_size, dropout, lr, decay):
model = Sequential()
if(num_layers == 1):
model.add( LSTM(layer_size, input_shape=(seq_len, vocab), return_sequences=False) )
else:
for i in range(num_layers - 1):
model.add(LSTM(layer_size, input_shape=(seq_len, vocab), return_sequences=True))
model.add(LSTM(layer_size, return_sequences=False))
if(dropout):
model.add(Dropout(dropout))
model.add(Dense(vocab))
model.add(Activation('softmax'))
optimizer = Adam(lr=lr, decay=decay, clipvalue=0.5)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
|
MichaelSeaman/char-rnn | generate.py | '''
Copied from keras tutorials' char-rnn
Example script to generate text from Nietzsche's writings.
At least 20 epochs are required before the generated text
starts sounding coherent.
It is recommended to run this script on GPU, as recurrent
networks are quite computationally intensive.
If you try this script on new data, make sure your corpus
has at least ~100k characters. ~1M is better.
'''
from keras.models import Sequential
from keras.models import load_model
from keras.layers import Dense, Activation, Dropout
from keras.layers import LSTM
from model_file import ModelFile
from rnn import preprocess_text
from keras.utils import np_utils
import numpy as np
import random
import sys
import os
import getopt
TRAIN_FILE = 'quotes/author-quote.txt'
MODEL_DIR = ''
diversity = 1.0
def main(argv):
global TRAIN_FILE
global diversity
global MODEL_DIR
try:
opts, args = getopt.getopt(argv, 't:d:m:', ['trainFile=','diversity=',
'modelDirectory='])
for opt, arg in opts:
if opt in ('-t', '--trainFile'):
TRAIN_FILE = arg
elif opt in ('-m', '--modelDirectory'):
MODEL_DIR = arg
elif opt in ('-d', '--diversity'):
diversity = float(arg)
except getopt.GetoptError as e:
print("No train/weights file provided")
print(e)
print("Using Training File: ", TRAIN_FILE)
assert(os.path.exists(TRAIN_FILE))
print("Using model: ", MODEL_DIR)
model_file = ModelFile(model_directory=MODEL_DIR)
if(model_file.check_exists()):
print("Loading model")
else:
print("Incomplete model. Make sure that the folder exists and" +
" contains weights, and model.h5")
sys.exit(0)
model_dir = model_file.model_directory
model_filename = model_file.model
weights_file = model_file.weights
print("Building model")
model = load_model(model_filename)
model.load_weights(weights_file)
model.compile(loss='categorical_crossentropy', optimizer='adam')
maxlen = model.layers[0].batch_input_shape[1]
char_to_idx, idx_to_char, vocab, text, sentences, next_chars = \
preprocess_text(filename=TRAIN_FILE, SEQ_LEN= maxlen)
gen_text_len = int(input('Length of generated text: '))
output_file = input('Enter output filename (Leave blank to leave text unsaved) : ')
output_file_desired = output_file != ''
input_seed = input('Enter an input seed, max 40 chars: ').lower()
sentence = "a" * maxlen + input_seed
sentence = sentence[-1*maxlen:]
print("Seed: '", sentence, "'")
generated = generate(model, idx_to_char, char_to_idx, gen_text_len, maxlen,
vocab, sentence)
print(generated)
if(output_file_desired):
with open(output_file) as f:
f.write(generated)
def generate(model, idx_to_char, char_to_idx, gen_text_len, seq_len, vocab,
sentence="", text="", temperature=.5):
generated = ""
vectorized_char_to_idx = np.vectorize(char_to_idx.get)
if not sentence:
start_index = np.random.randint(0, len(text) - seq_len - 1)
sentence = text[start_index: start_index + seq_len]
for i in range(gen_text_len):
sentence_nested = [list(sentence)]
X = vectorized_char_to_idx(sentence_nested)
X = np_utils.to_categorical(X, num_classes=vocab)
X = X.reshape((1, seq_len, vocab))
preds = model.predict(X,verbose=0)[0]
next_idx = sample(preds, temperature=temperature)
next_char = idx_to_char[next_idx]
generated += next_char
sentence = sentence[1:] + next_char
return generated
def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probs = np.random.multinomial(1, preds, 1)
return np.argmax(probs)
if __name__ == "__main__":
main(sys.argv[1:])
|
MichaelSeaman/char-rnn | train_rnn.py | #!/usr/bin/env python
from keras.models import load_model
from keras.callbacks import ModelCheckpoint, EarlyStopping, History
from callback import *
from sklearn.model_selection import train_test_split
from rnn import preprocess_text, build_model, vectorize_data
from frequent_flush import Frequent_flush
from model_file import ModelFile
import numpy as np
import sys
import os
import getopt
import time
TRAIN_FILE = 'quotes/author-quote.txt'
MODEL_DIR = ''
NUM_EPOCHS = 30
QUICK_MODE = False
LEARNING_RATE = .001
DECAY = .5
LAYER_SIZE = 256
NUM_LAYERS = 2
DROPOUT = 0.2
SEQ_LEN = 90
BUFFER_OUTPUT = False
# Dealing with runtime options
def main(argv):
training_file, new_model, model_dir, num_epochs, quick_mode, \
learning_rate, decay, layer_size, num_layers, dropout, seq_len, \
buffer_output = parse_options(argv)
if(not buffer_output):
Frequent_flush(1).start()
print("Using Input File: ", training_file)
print("Preprocessing...")
char_to_idx, idx_to_char, vocab, text, sentences, next_chars = \
preprocess_text(filename=training_file, SEQ_LEN=SEQ_LEN)
X, y = vectorize_data(SEQ_LEN, vocab, sentences, char_to_idx, next_chars)
if(quick_mode):
X = X[:1000]
y = y[:1000]
print("Using SEQ_LEN of", SEQ_LEN)
print("Vocab size:", vocab)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Building a simple LSTM
if new_model:
print("Building model")
print("Size of Layers: ", layer_size)
print("Depth of Network: ", num_layers)
print("Using dropout: ", dropout)
model = build_model(num_layers=num_layers, seq_len=seq_len, vocab=vocab,
layer_size=layer_size, dropout=dropout, lr=LEARNING_RATE,
decay=DECAY)
model.summary()
model_dir = create_model_dir(model)
print("Saved model data to:", os.path.abspath(model_dir))
history = {'acc':[], 'val_acc':[], 'loss':[], 'val_loss':[]}
initial_epoch = 0
else:
model, history, initial_epoch = load_model_dir(model_dir)
model.summary()
print("Creating callbacks")
callbacks_list = create_callbacks(model_dir, writer=True,
idx_to_char=idx_to_char, char_to_idx=char_to_idx, text=text,
seq_len=SEQ_LEN, vocab=vocab)
print("\nTraining model\n")
history = train_model(model, X_train, y_train, X_test, y_test,
callbacks_list, num_epochs, history, initial_epoch)
print("Saving training history")
history_filename = os.path.join(model_dir, 'model_history.npy')
np.save(history_filename, history)
def create_model_dir(model):
if not os.path.exists('models'):
os.makedirs('models')
timestr = time.strftime("%m%d-%H%M")
model_name = "CharRNN_" + timestr
model_dir = os.path.join('models', model_name)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
model.save(os.path.join(model_dir, "model.h5"))
return model_dir
def load_model_dir(model_dir):
model_file = ModelFile(model_directory=model_dir)
if(model_file.check_exists()):
print("Loading model")
else:
print("Incomplete model. Make sure that the folder exists and" +
" contains weights, history, and model.h5")
sys.exit(0)
model_dir = model_file.model_directory
model_filename = model_file.model
weights_file = model_file.weights
history_file = model_file.history
model = load_model(model_filename)
print("Using weights file ", weights_file)
model.load_weights(weights_file)
print("Using history file ", history_file)
history = np.load(history_file).item()
initial_epoch = len(history['acc'])
print("Starting from epoch ", initial_epoch)
return model,history,initial_epoch
def create_callbacks(model_dir, history=True, checkp=True, earlyStop=False,
writer=False, idx_to_char = {}, char_to_idx = {}, text = "",
seq_len = 300 , vocab = 0):
callbacks = []
weights_filepath = os.path.join(model_dir,
"weights-improvement-{epoch:03d}-{loss:.4f}.hdf5")
composition_dir = os.path.join(model_dir, 'compositions')
composition_filepath = os.path.join(composition_dir,
"epoch_{epoch:03d}_composition_reduced.txt")
checkpoint = ModelCheckpoint(weights_filepath, save_best_only=True,
verbose=1)
esCallback = EarlyStopping(min_delta=0, patience=10, verbose=1)
hisCallback = History()
writerCallback = Write_Text(idx_to_char, char_to_idx, text, seq_len, vocab,
filepath=composition_filepath)
if history:
callbacks.append(hisCallback)
if checkp:
callbacks.append(checkpoint)
if earlyStop:
callbacks.append(esCallback)
if writer:
if not os.path.exists(composition_dir):
os.makedirs(composition_dir)
callbacks.append(writerCallback)
return callbacks
def train_model(model, X_train, y_train, X_test, y_test, callbacks_list,
num_epochs, history, initial_epoch=0):
for e in range(num_epochs):
epochs = e + initial_epoch
try:
print("\nEPOCH {}\n".format(epochs))
hist = model.fit(X_train, y_train, validation_data=(X_test,y_test),
batch_size=128, epochs=epochs+1, callbacks=callbacks_list,
initial_epoch=epochs)
for k, v in hist.history.items():
history[k] = history[k] + v
except KeyboardInterrupt:
print("Exiting training loop")
break
return history
def parse_options(argv):
'''
Takes a list of strings, and returns a list of hyper parameters in the order
given below
'''
global TRAIN_FILE
global MODEL_DIR
global NUM_EPOCHS
global QUICK_MODE
global DECAY
global LEARNING_RATE
global LAYER_SIZE
global NUM_LAYERS
global DROPOUT
global SEQ_LEN
global BUFFER_OUTPUT
new_model = True
try:
opts, args = getopt.getopt(argv, 'i:e:qm:w:h:b', ['inputFile=','epochs=',
'quickmode' , 'learningRate=','decay=', 'numLayers=', 'layerSize=',
'sequenceLength=', 'historyFile=', 'bufferOutput', 'dropout=',
'modelDirectory'])
for opt, arg in opts:
if opt in ('-i', '--inputFile'):
TRAIN_FILE = arg
elif opt in ('-m', '--modelDirectory'):
MODEL_DIR = arg
new_model = False
elif opt in ('-e', '--epochs'):
NUM_EPOCHS = int(arg)
elif opt in ('-q', '--quickmode'):
QUICK_MODE = True
elif opt == '--learningRate':
LEARNING_RATE = float(arg)
elif opt == '--decay':
DECAY = float(arg)
elif opt == '--layerSize':
LAYER_SIZE = int(arg)
elif opt == '--numLayers':
NUM_LAYERS = int(arg)
elif opt == '--dropout':
DROPOUT = float(arg)
elif opt == '--sequenceLength':
SEQ_LEN = int(arg)
elif opt in ('-b', '--bufferOutput'):
BUFFER_OUTPUT = True
except getopt.GetoptError as e:
print(e)
return (TRAIN_FILE, new_model, MODEL_DIR, NUM_EPOCHS, QUICK_MODE,
LEARNING_RATE, DECAY, LAYER_SIZE, NUM_LAYERS, DROPOUT,
SEQ_LEN, BUFFER_OUTPUT)
if __name__ == "__main__":
main(sys.argv[1:])
|
MichaelSeaman/char-rnn | graph_training.py | import numpy as np
import matplotlib.pyplot as plt
import os
import getopt
import sys
HISTORY_FILE = 'model_history.npy'
def main(argv):
global HISTORY_FILE
if(argv):
inputFile = argv[0]
else:
print("History file or model directory needed")
sys.exit(0)
if(os.path.isdir(inputFile)):
model_name = os.path.basename(inputFile)
HISTORY_FILE = os.path.join(inputFile, HISTORY_FILE)
else:
model_name = 'model'
HISTORY_FILE = inputFile
print("Model: ", model_name)
print("Using History File: ", HISTORY_FILE)
history = np.load(HISTORY_FILE).item()
plot_history(history, model_name)
print("Saved output to ", os.path.abspath(model_name + '.png'))
def plot_history(history, model_name):
# summarize history for accuracy
plt.figure(1)
plt.subplot(211)
plt.plot(history['acc'])
plt.plot(history['val_acc'])
plt.title('{model_name} accuracy'.format(model_name=model_name))
plt.ylabel('accuracy')
plt.legend(['train', 'test'], loc='upper left')
# summarize history for loss
plt.subplot(212)
plt.plot(history['loss'])
plt.plot(history['val_loss'])
plt.title('{model_name} loss'.format(model_name=model_name))
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.tight_layout()
plt.savefig('{model_name}.png'.format(model_name=model_name))
if __name__ == "__main__":
main(sys.argv[1:])
|
apastoriza/tf-ninja | tfninja/tf102/softmax101_numpy.py | <reponame>apastoriza/tf-ninja<gh_stars>0
# coding=utf-8
import numpy as np
def run_softmax():
y = [0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3]
softmax_y = np.exp(y) / np.sum(np.exp(y))
print(y)
print(softmax_y)
z = [0.1, 0.2, 0.3, 4.0, 0.1, 0.2, 0.3]
softmax_z = np.exp(z) / np.sum(np.exp(z))
print(z)
print(softmax_z)
if __name__ == '__main__':
run_softmax()
|
apastoriza/tf-ninja | tfninja/resources/mnist_input_data.py | <filename>tfninja/resources/mnist_input_data.py
# coding=utf-8
from tensorflow.examples.tutorials.mnist import input_data
from tfninja.resources import config
def gather_data():
train_dir = config.paths['dir'] + 'data'
data_sets = input_data.read_data_sets(train_dir=train_dir, one_hot=True)
return data_sets
|
apastoriza/tf-ninja | tfninja/resources/config.py | <filename>tfninja/resources/config.py<gh_stars>0
# coding=utf-8
paths = dict(
dir='/tmp/tf-ninja/'
)
|
apastoriza/tf-ninja | tfninja/utils/tensorfactory.py | # coding=utf-8
import tensorflow as tf
DEFAULT_STD_DEV = 0.01
def random_normal_variable(shape, variable_name, std_dev=DEFAULT_STD_DEV):
variable = tf.Variable(tf.random_normal(shape, stddev=std_dev), name=variable_name)
return variable
|
apastoriza/tf-ninja | tfninja/tf101/session105.py | # coding=utf-8
# single neuron and TensorBoard
# --(weight)--(input)-->[output=f(input, weight)]--(output)-->
import tensorflow as tf
from tfninja.resources import config
from tfninja.utils import loggerfactory
logger = loggerfactory.get_logger(__name__)
MAX_ITERATIONS = 100000
TARGET_RESULT = 0.0000000001
DESCENT_OPTIMIZER = 0.001
# an input value who stimulates the neuron. It will be a constant to make it simple
INPUT_VALUE = tf.constant(0.5, name='INPUT_VALUE')
# the value whe expect to get
EXPECTED_OUTPUT = tf.constant(0.0, name='EXPECTED_OUTPUT')
# a weight, multiplied by the input to provide the output of the neuron
weight = tf.Variable(1.0, name='weight')
# the function model
model = tf.multiply(INPUT_VALUE, weight, 'model')
loss_function = tf.pow((EXPECTED_OUTPUT - model), 2, name='loss_function')
optimizer = tf.train.GradientDescentOptimizer(DESCENT_OPTIMIZER).minimize(loss_function)
def setup_tensor_board(session):
log_dir = config.paths['dir'] + '/logs/tfninja_session105'
# define the parameters to be displayed in TensorBoard
for value in [INPUT_VALUE, EXPECTED_OUTPUT, weight, model, loss_function]:
tf.summary.scalar(value.op.name, value)
summaries = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(log_dir, session.graph)
return summaries, summary_writer
def run_session():
with tf.Session() as session:
summaries, summary_writer = setup_tensor_board(session)
session.run(tf.global_variables_initializer())
i = 0
result = session.run(weight)
logger.debug('Adjusted weight(%s): %s', i, result)
while (i < MAX_ITERATIONS) and (result >= TARGET_RESULT):
current_summary = session.run(summaries)
summary_writer.add_summary(current_summary, i)
session.run(optimizer)
result = session.run(weight)
logger.debug('Adjusted weight(%s): %s', i, result)
i += 1
if __name__ == '__main__':
run_session()
|
apastoriza/tf-ninja | tfninja/utils/loggerfactory.py | <gh_stars>0
# coding=utf-8
import logging
import logging.config
def get_logger(name):
logger = logging.getLogger(name)
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s (%(threadName)-10s) %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
logger.debug('Instance logger for %s', name)
return logger
|
apastoriza/tf-ninja | tfninja/tf101/session103.py | # coding=utf-8
import tensorflow as tf
from tfninja.utils import loggerfactory
logger = loggerfactory.get_logger(__name__)
CONSTANT_A = tf.constant([100.0])
CONSTANT_B = tf.constant([200.0])
CONSTANT_C = tf.constant([10.0])
add_operation = tf.add(CONSTANT_A, CONSTANT_B)
multiply_operation = tf.multiply(CONSTANT_A, CONSTANT_C)
def run_session():
with tf.Session() as session:
result = session.run([add_operation, multiply_operation])
logger.info(result)
if __name__ == '__main__':
run_session()
|
apastoriza/tf-ninja | tfninja/tf102/softmax102.py | <reponame>apastoriza/tf-ninja
# coding=utf-8
import numpy as np
import tensorflow as tf
from random import randint
from tfninja.resources import config
from tfninja.resources import mnist_input_data
from tfninja.utils import loggerfactory
from tfninja.utils import time
logger = loggerfactory.get_logger(__name__)
BATCH_SIZE = 100
TRAINING_EPOCHS = 1000
EXPECTED_ACCURACY = 0.90
LEARNING_RATE = 0.005
LAYER_NEURONS = 10
# About MNIST database
IMAGE_PX_WIDTH = 28
IMAGE_PX_HEIGHT = 28
X_image = tf.placeholder(tf.float32, [None, IMAGE_PX_WIDTH * IMAGE_PX_HEIGHT], name='input')
Y_probabilities = tf.placeholder(tf.float32, [None, LAYER_NEURONS])
W = tf.Variable(tf.zeros([IMAGE_PX_WIDTH * IMAGE_PX_HEIGHT, LAYER_NEURONS]))
bias_tensor = tf.Variable(tf.zeros([LAYER_NEURONS]))
XX_flatten_images = tf.reshape(X_image, [-1, IMAGE_PX_WIDTH * IMAGE_PX_HEIGHT])
evidence = tf.matmul(XX_flatten_images, W) + bias_tensor
Y = tf.nn.softmax(evidence, name='output')
softmax_cross_entropy_with_logits = tf.nn.softmax_cross_entropy_with_logits_v2(labels=Y_probabilities, logits=Y)
cross_entropy = tf.reduce_mean(softmax_cross_entropy_with_logits)
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_probabilities, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# try to switch between gradient and adam optimizer to see the effect
# train_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(cross_entropy)
train_step = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(cross_entropy)
def setup_tensor_board(session):
logs_path = config.paths['dir'] + '/logs/tfninja_softmax102'
tf.summary.scalar('cost', cross_entropy)
tf.summary.scalar('accuracy', accuracy)
summaries = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(logs_path, graph=session.graph)
return summaries, summary_writer
def run_session():
with tf.Session() as session:
session.run(tf.global_variables_initializer())
summaries, summary_writer = setup_tensor_board(session)
logger.info('-------TRAINING INIT-------')
init_time_in_millis = time.current_time_in_millis()
epoch = 0
accuracy_value = 0.0
data_sets = mnist_input_data.gather_data()
while (epoch < TRAINING_EPOCHS) and (accuracy_value <= EXPECTED_ACCURACY):
batch_count = int(data_sets.train.num_examples / BATCH_SIZE)
for i in range(batch_count):
batch_x, batch_y = data_sets.train.next_batch(BATCH_SIZE)
_, summary = session.run([train_step, summaries], feed_dict={
X_image: batch_x,
Y_probabilities: batch_y
})
summary_writer.add_summary(summary, epoch * batch_count + i)
accuracy_value = accuracy.eval(feed_dict={
X_image: data_sets.test.images,
Y_probabilities: data_sets.test.labels
})
if epoch % 10 == 0:
logger.info('Epoch: %s', epoch)
logger.info('Current accuracy: %s', accuracy_value)
epoch += 1
end_time_in_millis = time.current_time_in_millis()
logger.info('Epoch: %s', epoch)
logger.info('-------TRAINING DONE-------')
logger.info('Total time: %s millis', (end_time_in_millis - init_time_in_millis))
logger.info('Expected accuracy: %s', accuracy_value)
predict_numbers(session, data_sets.test)
def predict_numbers(session, test_data_set):
trials = 1000
rights = 0
for _ in range(trials):
num = randint(0, test_data_set.images.shape[0])
img = test_data_set.images[num]
classification = session.run(tf.argmax(Y, 1), feed_dict={
X_image: [img]
})
if classification[0] == np.argmax(test_data_set.labels[num]):
rights += 1
# logger.debug('Neural Network predicted %s', classification[0])
# logger.debug('Real label is: %s', np.argmax(test_data_set.labels[num]))
else:
logger.error('Neural Network predicted %s', classification[0])
logger.error('Real label is: %s', np.argmax(test_data_set.labels[num]))
logger.info('Real accuracy: %s/%s = %s', rights, trials, (rights / trials))
if __name__ == '__main__':
run_session()
|
apastoriza/tf-ninja | tfninja/hello/hello.py | <filename>tfninja/hello/hello.py
# coding=utf-8
import tensorflow as tf
from tfninja.utils import loggerfactory
logger = loggerfactory.get_logger(__name__)
hello = tf.constant('hello ninjas!!')
session = tf.Session()
logger.info(session.run(hello))
|
apastoriza/tf-ninja | tfninja/tf103/lenet101_softmax.py | <gh_stars>0
# coding=utf-8
import tensorflow as tf
import numpy as np
from tfninja.tf103 import lenet101_softmax_model
from tfninja.resources import mnist_input_data
from tfninja.utils import loggerfactory
from tfninja.utils import tensorfactory
BATCH_SIZE = 128
TEST_SIZE = 256
NUM_CLASSES = 10
LEARNING_RATE = 0.001
DECAY = 0.9
EXPECTED_ACCURACY = 0.99
TRAINING_EPOCHS = 1000
# About MNIST database
IMAGE_PX_WIDTH = 28
IMAGE_PX_HEIGHT = 28
IMAGE_SIZE = IMAGE_PX_WIDTH * IMAGE_PX_HEIGHT
# Features map
FEATURES_MAP_CONV_1 = 32
FEATURES_MAP_CONV_2 = 64
FEATURES_MAP_CONV_3 = 128
LAYER_NEURONS_1 = FEATURES_MAP_CONV_3 * 4 * 4
LAYER_NEURONS_2 = 512
mnist = mnist_input_data.gather_data()
train_x = mnist.train.images
train_y = mnist.train.labels
test_x = mnist.test.images
test_y = mnist.test.labels
train_x_reshape = train_x.reshape(-1, IMAGE_PX_WIDTH, IMAGE_PX_HEIGHT, 1)
test_x_reshape = test_x.reshape(-1, IMAGE_PX_WIDTH, IMAGE_PX_HEIGHT, 1)
X = tf.placeholder('float', [None, IMAGE_PX_WIDTH, IMAGE_PX_HEIGHT, 1])
Y = tf.placeholder('float', [None, NUM_CLASSES])
W_conv_layer_1 = tensorfactory.random_normal_variable([
3, 3, 1, FEATURES_MAP_CONV_1
], 'weight_conv_layer_1')
W_conv_layer_2 = tensorfactory.random_normal_variable([
3, 3, FEATURES_MAP_CONV_1, FEATURES_MAP_CONV_2
], 'weight_conv_layer_2')
W_conv_layer_3 = tensorfactory.random_normal_variable([
3, 3, FEATURES_MAP_CONV_2, FEATURES_MAP_CONV_3
], 'weight_conv_layer_3')
W_layer_4 = tensorfactory.random_normal_variable([
LAYER_NEURONS_1, LAYER_NEURONS_2
], 'weight_layer_4')
W_layer_output = tensorfactory.random_normal_variable([
LAYER_NEURONS_2, NUM_CLASSES
], 'weight_layer_output')
keep_prob_conv = tf.placeholder('float')
keep_prob_hidden = tf.placeholder('float')
py_x = lenet101_softmax_model.model(X,
W_conv_layer_1, W_conv_layer_2, W_conv_layer_3, W_layer_4, W_layer_output,
keep_prob_conv, keep_prob_hidden
)
softmax_cross_entropy_with_logits = tf.nn.softmax_cross_entropy_with_logits_v2(logits=py_x, labels=Y)
cost = tf.reduce_mean(softmax_cross_entropy_with_logits)
optimizer = tf.train.RMSPropOptimizer(LEARNING_RATE, DECAY).minimize(cost)
predict_op = tf.argmax(py_x, 1)
def run_session():
with tf.Session() as session:
logger = loggerfactory.get_logger(__name__)
session.run(tf.global_variables_initializer())
mean = 0
epoch = 0
while (epoch < TRAINING_EPOCHS) and (mean <= EXPECTED_ACCURACY):
training_batch = zip(
range(0, len(train_x_reshape), BATCH_SIZE),
range(BATCH_SIZE, len(train_x_reshape) + 1, BATCH_SIZE)
)
for start, end in training_batch:
session.run(optimizer, feed_dict={
X: train_x_reshape[start:end],
Y: train_y[start:end],
keep_prob_conv: 0.8,
keep_prob_hidden: 0.5
})
test_indices = np.arange(len(test_x_reshape)) # Get A Test Batch
np.random.shuffle(test_indices)
test_indices = test_indices[0:TEST_SIZE]
session_prediction = session.run(predict_op, feed_dict={
X: test_x_reshape[test_indices],
Y: test_y[test_indices],
keep_prob_conv: 1.0,
keep_prob_hidden: 1.0
})
mean = np.mean(np.argmax(test_y[test_indices], axis=1) == session_prediction)
logger.info('Epoch: %s - accuracy: %s', epoch, mean)
epoch += 1
if __name__ == '__main__':
run_session()
|
apastoriza/tf-ninja | tfninja/tf101/session101.py | <reponame>apastoriza/tf-ninja
# coding=utf-8
import tensorflow as tf
from tfninja.utils import loggerfactory
logger = loggerfactory.get_logger(__name__)
with tf.Session() as session:
# create a sensor x
x = tf.placeholder(
tf.float32, # data type
[1], # tensor shape (one dimension)
name='x'
)
# create a constant z
z = tf.constant(2.0)
y = x * z
x_in = [100]
y_output = session.run(y, {
x: x_in
})
logger.info(y_output)
|
apastoriza/tf-ninja | tfninja/tf103/lenet101_softmax_model.py | <gh_stars>0
# coding=utf-8
import tensorflow as tf
PADDING_TYPE = 'SAME'
IMAGE_NUMBER = 1
INPUT_CHANNEL = 1
KERNEL_SIZE = [1, 2, 2, 1]
def model(x, w1, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden):
conv1 = _create_conv_layer_dropout(x, w1, p_keep_conv)
conv2 = _create_conv_layer_dropout(conv1, w2, p_keep_conv)
conv3 = _create_conv_layer_relu(conv2, w3)
fully_connected_layer = tf.nn.max_pool(conv3, ksize=KERNEL_SIZE, strides=[
IMAGE_NUMBER, 2, 2, INPUT_CHANNEL
], padding=PADDING_TYPE)
fully_connected_layer = tf.reshape(fully_connected_layer, [-1, w4.get_shape().as_list()[0]])
fully_connected_layer = tf.nn.dropout(fully_connected_layer, p_keep_conv)
output_layer = tf.nn.relu(tf.matmul(fully_connected_layer, w4))
output_layer = tf.nn.dropout(output_layer, p_keep_hidden)
result = tf.matmul(output_layer, w_o)
return result
def _create_conv_layer_relu(input_data, weight):
conv_layer_input = tf.nn.conv2d(input_data, weight, strides=[
IMAGE_NUMBER, 1, 1, INPUT_CHANNEL
], padding=PADDING_TYPE)
return tf.nn.relu(conv_layer_input)
def _create_conv_layer_dropout(input_data, weight, keep_prob):
conv_layer_relu = _create_conv_layer_relu(input_data, weight)
conv_layer_maxpool = tf.nn.max_pool(conv_layer_relu, ksize=KERNEL_SIZE, strides=[
IMAGE_NUMBER, 2, 2, INPUT_CHANNEL
], padding=PADDING_TYPE)
conv_layer_dropout = tf.nn.dropout(conv_layer_maxpool, keep_prob)
return conv_layer_dropout
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.